4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/buffer_head.h>
15 #include <linux/writeback.h>
16 #include <linux/blkdev.h>
17 #include <linux/falloc.h>
18 #include <linux/types.h>
19 #include <linux/compat.h>
20 #include <linux/uaccess.h>
21 #include <linux/mount.h>
22 #include <linux/pagevec.h>
23 #include <linux/uio.h>
24 #include <linux/uuid.h>
25 #include <linux/file.h>
34 #include <trace/events/f2fs.h>
36 static vm_fault_t
f2fs_filemap_fault(struct vm_fault
*vmf
)
38 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
41 down_read(&F2FS_I(inode
)->i_mmap_sem
);
42 ret
= filemap_fault(vmf
);
43 up_read(&F2FS_I(inode
)->i_mmap_sem
);
48 static vm_fault_t
f2fs_vm_page_mkwrite(struct vm_fault
*vmf
)
50 struct page
*page
= vmf
->page
;
51 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
52 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
53 struct dnode_of_data dn
;
56 if (unlikely(f2fs_cp_error(sbi
))) {
61 sb_start_pagefault(inode
->i_sb
);
63 f2fs_bug_on(sbi
, f2fs_has_inline_data(inode
));
65 /* block allocation */
67 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
68 err
= f2fs_reserve_block(&dn
, page
->index
);
76 f2fs_balance_fs(sbi
, dn
.node_changed
);
78 file_update_time(vmf
->vma
->vm_file
);
79 down_read(&F2FS_I(inode
)->i_mmap_sem
);
81 if (unlikely(page
->mapping
!= inode
->i_mapping
||
82 page_offset(page
) > i_size_read(inode
) ||
83 !PageUptodate(page
))) {
90 * check to see if the page is mapped already (no holes)
92 if (PageMappedToDisk(page
))
95 /* page is wholly or partially inside EOF */
96 if (((loff_t
)(page
->index
+ 1) << PAGE_SHIFT
) >
100 offset
= i_size_read(inode
) & ~PAGE_MASK
;
101 zero_user_segment(page
, offset
, PAGE_SIZE
);
103 set_page_dirty(page
);
104 if (!PageUptodate(page
))
105 SetPageUptodate(page
);
107 f2fs_update_iostat(sbi
, APP_MAPPED_IO
, F2FS_BLKSIZE
);
109 trace_f2fs_vm_page_mkwrite(page
, DATA
);
112 f2fs_wait_on_page_writeback(page
, DATA
, false);
114 /* wait for GCed page writeback via META_MAPPING */
115 if (f2fs_post_read_required(inode
))
116 f2fs_wait_on_block_writeback(sbi
, dn
.data_blkaddr
);
119 up_read(&F2FS_I(inode
)->i_mmap_sem
);
121 sb_end_pagefault(inode
->i_sb
);
122 f2fs_update_time(sbi
, REQ_TIME
);
124 return block_page_mkwrite_return(err
);
127 static const struct vm_operations_struct f2fs_file_vm_ops
= {
128 .fault
= f2fs_filemap_fault
,
129 .map_pages
= filemap_map_pages
,
130 .page_mkwrite
= f2fs_vm_page_mkwrite
,
133 static int get_parent_ino(struct inode
*inode
, nid_t
*pino
)
135 struct dentry
*dentry
;
137 inode
= igrab(inode
);
138 dentry
= d_find_any_alias(inode
);
143 *pino
= parent_ino(dentry
);
148 static inline enum cp_reason_type
need_do_checkpoint(struct inode
*inode
)
150 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
151 enum cp_reason_type cp_reason
= CP_NO_NEEDED
;
153 if (!S_ISREG(inode
->i_mode
))
154 cp_reason
= CP_NON_REGULAR
;
155 else if (inode
->i_nlink
!= 1)
156 cp_reason
= CP_HARDLINK
;
157 else if (is_sbi_flag_set(sbi
, SBI_NEED_CP
))
158 cp_reason
= CP_SB_NEED_CP
;
159 else if (file_wrong_pino(inode
))
160 cp_reason
= CP_WRONG_PINO
;
161 else if (!f2fs_space_for_roll_forward(sbi
))
162 cp_reason
= CP_NO_SPC_ROLL
;
163 else if (!f2fs_is_checkpointed_node(sbi
, F2FS_I(inode
)->i_pino
))
164 cp_reason
= CP_NODE_NEED_CP
;
165 else if (test_opt(sbi
, FASTBOOT
))
166 cp_reason
= CP_FASTBOOT_MODE
;
167 else if (F2FS_OPTION(sbi
).active_logs
== 2)
168 cp_reason
= CP_SPEC_LOG_NUM
;
169 else if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_STRICT
&&
170 f2fs_need_dentry_mark(sbi
, inode
->i_ino
) &&
171 f2fs_exist_written_data(sbi
, F2FS_I(inode
)->i_pino
,
173 cp_reason
= CP_RECOVER_DIR
;
178 static bool need_inode_page_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
180 struct page
*i
= find_get_page(NODE_MAPPING(sbi
), ino
);
182 /* But we need to avoid that there are some inode updates */
183 if ((i
&& PageDirty(i
)) || f2fs_need_inode_block_update(sbi
, ino
))
189 static void try_to_fix_pino(struct inode
*inode
)
191 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
194 down_write(&fi
->i_sem
);
195 if (file_wrong_pino(inode
) && inode
->i_nlink
== 1 &&
196 get_parent_ino(inode
, &pino
)) {
197 f2fs_i_pino_write(inode
, pino
);
198 file_got_pino(inode
);
200 up_write(&fi
->i_sem
);
203 static int f2fs_do_sync_file(struct file
*file
, loff_t start
, loff_t end
,
204 int datasync
, bool atomic
)
206 struct inode
*inode
= file
->f_mapping
->host
;
207 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
208 nid_t ino
= inode
->i_ino
;
210 enum cp_reason_type cp_reason
= 0;
211 struct writeback_control wbc
= {
212 .sync_mode
= WB_SYNC_ALL
,
213 .nr_to_write
= LONG_MAX
,
217 if (unlikely(f2fs_readonly(inode
->i_sb
)))
220 trace_f2fs_sync_file_enter(inode
);
222 /* if fdatasync is triggered, let's do in-place-update */
223 if (datasync
|| get_dirty_pages(inode
) <= SM_I(sbi
)->min_fsync_blocks
)
224 set_inode_flag(inode
, FI_NEED_IPU
);
225 ret
= file_write_and_wait_range(file
, start
, end
);
226 clear_inode_flag(inode
, FI_NEED_IPU
);
229 trace_f2fs_sync_file_exit(inode
, cp_reason
, datasync
, ret
);
233 /* if the inode is dirty, let's recover all the time */
234 if (!f2fs_skip_inode_update(inode
, datasync
)) {
235 f2fs_write_inode(inode
, NULL
);
240 * if there is no written data, don't waste time to write recovery info.
242 if (!is_inode_flag_set(inode
, FI_APPEND_WRITE
) &&
243 !f2fs_exist_written_data(sbi
, ino
, APPEND_INO
)) {
245 /* it may call write_inode just prior to fsync */
246 if (need_inode_page_update(sbi
, ino
))
249 if (is_inode_flag_set(inode
, FI_UPDATE_WRITE
) ||
250 f2fs_exist_written_data(sbi
, ino
, UPDATE_INO
))
256 * Both of fdatasync() and fsync() are able to be recovered from
259 down_read(&F2FS_I(inode
)->i_sem
);
260 cp_reason
= need_do_checkpoint(inode
);
261 up_read(&F2FS_I(inode
)->i_sem
);
264 /* all the dirty node pages should be flushed for POR */
265 ret
= f2fs_sync_fs(inode
->i_sb
, 1);
268 * We've secured consistency through sync_fs. Following pino
269 * will be used only for fsynced inodes after checkpoint.
271 try_to_fix_pino(inode
);
272 clear_inode_flag(inode
, FI_APPEND_WRITE
);
273 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
277 atomic_inc(&sbi
->wb_sync_req
[NODE
]);
278 ret
= f2fs_fsync_node_pages(sbi
, inode
, &wbc
, atomic
);
279 atomic_dec(&sbi
->wb_sync_req
[NODE
]);
283 /* if cp_error was enabled, we should avoid infinite loop */
284 if (unlikely(f2fs_cp_error(sbi
))) {
289 if (f2fs_need_inode_block_update(sbi
, ino
)) {
290 f2fs_mark_inode_dirty_sync(inode
, true);
291 f2fs_write_inode(inode
, NULL
);
296 * If it's atomic_write, it's just fine to keep write ordering. So
297 * here we don't need to wait for node write completion, since we use
298 * node chain which serializes node blocks. If one of node writes are
299 * reordered, we can see simply broken chain, resulting in stopping
300 * roll-forward recovery. It means we'll recover all or none node blocks
304 ret
= f2fs_wait_on_node_pages_writeback(sbi
, ino
);
309 /* once recovery info is written, don't need to tack this */
310 f2fs_remove_ino_entry(sbi
, ino
, APPEND_INO
);
311 clear_inode_flag(inode
, FI_APPEND_WRITE
);
313 if (!atomic
&& F2FS_OPTION(sbi
).fsync_mode
!= FSYNC_MODE_NOBARRIER
)
314 ret
= f2fs_issue_flush(sbi
, inode
->i_ino
);
316 f2fs_remove_ino_entry(sbi
, ino
, UPDATE_INO
);
317 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
318 f2fs_remove_ino_entry(sbi
, ino
, FLUSH_INO
);
320 f2fs_update_time(sbi
, REQ_TIME
);
322 trace_f2fs_sync_file_exit(inode
, cp_reason
, datasync
, ret
);
323 f2fs_trace_ios(NULL
, 1);
327 int f2fs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
329 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file
)))))
331 return f2fs_do_sync_file(file
, start
, end
, datasync
, false);
334 static pgoff_t
__get_first_dirty_index(struct address_space
*mapping
,
335 pgoff_t pgofs
, int whence
)
340 if (whence
!= SEEK_DATA
)
343 /* find first dirty page index */
344 nr_pages
= find_get_pages_tag(mapping
, &pgofs
, PAGECACHE_TAG_DIRTY
,
353 static bool __found_offset(block_t blkaddr
, pgoff_t dirty
, pgoff_t pgofs
,
358 if ((blkaddr
== NEW_ADDR
&& dirty
== pgofs
) ||
359 is_valid_blkaddr(blkaddr
))
363 if (blkaddr
== NULL_ADDR
)
370 static loff_t
f2fs_seek_block(struct file
*file
, loff_t offset
, int whence
)
372 struct inode
*inode
= file
->f_mapping
->host
;
373 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
374 struct dnode_of_data dn
;
375 pgoff_t pgofs
, end_offset
, dirty
;
376 loff_t data_ofs
= offset
;
382 isize
= i_size_read(inode
);
386 /* handle inline data case */
387 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
)) {
388 if (whence
== SEEK_HOLE
)
393 pgofs
= (pgoff_t
)(offset
>> PAGE_SHIFT
);
395 dirty
= __get_first_dirty_index(inode
->i_mapping
, pgofs
, whence
);
397 for (; data_ofs
< isize
; data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
398 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
399 err
= f2fs_get_dnode_of_data(&dn
, pgofs
, LOOKUP_NODE
);
400 if (err
&& err
!= -ENOENT
) {
402 } else if (err
== -ENOENT
) {
403 /* direct node does not exists */
404 if (whence
== SEEK_DATA
) {
405 pgofs
= f2fs_get_next_page_offset(&dn
, pgofs
);
412 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
414 /* find data/hole in dnode block */
415 for (; dn
.ofs_in_node
< end_offset
;
416 dn
.ofs_in_node
++, pgofs
++,
417 data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
420 blkaddr
= datablock_addr(dn
.inode
,
421 dn
.node_page
, dn
.ofs_in_node
);
423 if (__found_offset(blkaddr
, dirty
, pgofs
, whence
)) {
431 if (whence
== SEEK_DATA
)
434 if (whence
== SEEK_HOLE
&& data_ofs
> isize
)
437 return vfs_setpos(file
, data_ofs
, maxbytes
);
443 static loff_t
f2fs_llseek(struct file
*file
, loff_t offset
, int whence
)
445 struct inode
*inode
= file
->f_mapping
->host
;
446 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
452 return generic_file_llseek_size(file
, offset
, whence
,
453 maxbytes
, i_size_read(inode
));
458 return f2fs_seek_block(file
, offset
, whence
);
464 static int f2fs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
466 struct inode
*inode
= file_inode(file
);
469 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
472 /* we don't need to use inline_data strictly */
473 err
= f2fs_convert_inline_inode(inode
);
478 vma
->vm_ops
= &f2fs_file_vm_ops
;
482 static int f2fs_file_open(struct inode
*inode
, struct file
*filp
)
484 int err
= fscrypt_file_open(inode
, filp
);
489 filp
->f_mode
|= FMODE_NOWAIT
;
491 return dquot_file_open(inode
, filp
);
494 void f2fs_truncate_data_blocks_range(struct dnode_of_data
*dn
, int count
)
496 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
497 struct f2fs_node
*raw_node
;
498 int nr_free
= 0, ofs
= dn
->ofs_in_node
, len
= count
;
502 if (IS_INODE(dn
->node_page
) && f2fs_has_extra_attr(dn
->inode
))
503 base
= get_extra_isize(dn
->inode
);
505 raw_node
= F2FS_NODE(dn
->node_page
);
506 addr
= blkaddr_in_node(raw_node
) + base
+ ofs
;
508 for (; count
> 0; count
--, addr
++, dn
->ofs_in_node
++) {
509 block_t blkaddr
= le32_to_cpu(*addr
);
511 if (blkaddr
== NULL_ADDR
)
514 dn
->data_blkaddr
= NULL_ADDR
;
515 f2fs_set_data_blkaddr(dn
);
516 f2fs_invalidate_blocks(sbi
, blkaddr
);
517 if (dn
->ofs_in_node
== 0 && IS_INODE(dn
->node_page
))
518 clear_inode_flag(dn
->inode
, FI_FIRST_BLOCK_WRITTEN
);
525 * once we invalidate valid blkaddr in range [ofs, ofs + count],
526 * we will invalidate all blkaddr in the whole range.
528 fofs
= f2fs_start_bidx_of_node(ofs_of_node(dn
->node_page
),
530 f2fs_update_extent_cache_range(dn
, fofs
, 0, len
);
531 dec_valid_block_count(sbi
, dn
->inode
, nr_free
);
533 dn
->ofs_in_node
= ofs
;
535 f2fs_update_time(sbi
, REQ_TIME
);
536 trace_f2fs_truncate_data_blocks_range(dn
->inode
, dn
->nid
,
537 dn
->ofs_in_node
, nr_free
);
540 void f2fs_truncate_data_blocks(struct dnode_of_data
*dn
)
542 f2fs_truncate_data_blocks_range(dn
, ADDRS_PER_BLOCK
);
545 static int truncate_partial_data_page(struct inode
*inode
, u64 from
,
548 loff_t offset
= from
& (PAGE_SIZE
- 1);
549 pgoff_t index
= from
>> PAGE_SHIFT
;
550 struct address_space
*mapping
= inode
->i_mapping
;
553 if (!offset
&& !cache_only
)
557 page
= find_lock_page(mapping
, index
);
558 if (page
&& PageUptodate(page
))
560 f2fs_put_page(page
, 1);
564 page
= f2fs_get_lock_data_page(inode
, index
, true);
566 return PTR_ERR(page
) == -ENOENT
? 0 : PTR_ERR(page
);
568 f2fs_wait_on_page_writeback(page
, DATA
, true);
569 zero_user(page
, offset
, PAGE_SIZE
- offset
);
571 /* An encrypted inode should have a key and truncate the last page. */
572 f2fs_bug_on(F2FS_I_SB(inode
), cache_only
&& f2fs_encrypted_inode(inode
));
574 set_page_dirty(page
);
575 f2fs_put_page(page
, 1);
579 int f2fs_truncate_blocks(struct inode
*inode
, u64 from
, bool lock
)
581 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
582 struct dnode_of_data dn
;
584 int count
= 0, err
= 0;
586 bool truncate_page
= false;
588 trace_f2fs_truncate_blocks_enter(inode
, from
);
590 free_from
= (pgoff_t
)F2FS_BLK_ALIGN(from
);
592 if (free_from
>= sbi
->max_file_blocks
)
598 ipage
= f2fs_get_node_page(sbi
, inode
->i_ino
);
600 err
= PTR_ERR(ipage
);
604 if (f2fs_has_inline_data(inode
)) {
605 f2fs_truncate_inline_inode(inode
, ipage
, from
);
606 f2fs_put_page(ipage
, 1);
607 truncate_page
= true;
611 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
612 err
= f2fs_get_dnode_of_data(&dn
, free_from
, LOOKUP_NODE_RA
);
619 count
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
621 count
-= dn
.ofs_in_node
;
622 f2fs_bug_on(sbi
, count
< 0);
624 if (dn
.ofs_in_node
|| IS_INODE(dn
.node_page
)) {
625 f2fs_truncate_data_blocks_range(&dn
, count
);
631 err
= f2fs_truncate_inode_blocks(inode
, free_from
);
636 /* lastly zero out the first data page */
638 err
= truncate_partial_data_page(inode
, from
, truncate_page
);
640 trace_f2fs_truncate_blocks_exit(inode
, err
);
644 int f2fs_truncate(struct inode
*inode
)
648 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
651 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
652 S_ISLNK(inode
->i_mode
)))
655 trace_f2fs_truncate(inode
);
657 #ifdef CONFIG_F2FS_FAULT_INJECTION
658 if (time_to_inject(F2FS_I_SB(inode
), FAULT_TRUNCATE
)) {
659 f2fs_show_injection_info(FAULT_TRUNCATE
);
663 /* we should check inline_data size */
664 if (!f2fs_may_inline_data(inode
)) {
665 err
= f2fs_convert_inline_inode(inode
);
670 err
= f2fs_truncate_blocks(inode
, i_size_read(inode
), true);
674 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
675 f2fs_mark_inode_dirty_sync(inode
, false);
679 int f2fs_getattr(const struct path
*path
, struct kstat
*stat
,
680 u32 request_mask
, unsigned int query_flags
)
682 struct inode
*inode
= d_inode(path
->dentry
);
683 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
684 struct f2fs_inode
*ri
;
687 if (f2fs_has_extra_attr(inode
) &&
688 f2fs_sb_has_inode_crtime(inode
->i_sb
) &&
689 F2FS_FITS_IN_INODE(ri
, fi
->i_extra_isize
, i_crtime
)) {
690 stat
->result_mask
|= STATX_BTIME
;
691 stat
->btime
.tv_sec
= fi
->i_crtime
.tv_sec
;
692 stat
->btime
.tv_nsec
= fi
->i_crtime
.tv_nsec
;
695 flags
= fi
->i_flags
& F2FS_FL_USER_VISIBLE
;
696 if (flags
& F2FS_APPEND_FL
)
697 stat
->attributes
|= STATX_ATTR_APPEND
;
698 if (flags
& F2FS_COMPR_FL
)
699 stat
->attributes
|= STATX_ATTR_COMPRESSED
;
700 if (f2fs_encrypted_inode(inode
))
701 stat
->attributes
|= STATX_ATTR_ENCRYPTED
;
702 if (flags
& F2FS_IMMUTABLE_FL
)
703 stat
->attributes
|= STATX_ATTR_IMMUTABLE
;
704 if (flags
& F2FS_NODUMP_FL
)
705 stat
->attributes
|= STATX_ATTR_NODUMP
;
707 stat
->attributes_mask
|= (STATX_ATTR_APPEND
|
708 STATX_ATTR_COMPRESSED
|
709 STATX_ATTR_ENCRYPTED
|
710 STATX_ATTR_IMMUTABLE
|
713 generic_fillattr(inode
, stat
);
715 /* we need to show initial sectors used for inline_data/dentries */
716 if ((S_ISREG(inode
->i_mode
) && f2fs_has_inline_data(inode
)) ||
717 f2fs_has_inline_dentry(inode
))
718 stat
->blocks
+= (stat
->size
+ 511) >> 9;
723 #ifdef CONFIG_F2FS_FS_POSIX_ACL
724 static void __setattr_copy(struct inode
*inode
, const struct iattr
*attr
)
726 unsigned int ia_valid
= attr
->ia_valid
;
728 if (ia_valid
& ATTR_UID
)
729 inode
->i_uid
= attr
->ia_uid
;
730 if (ia_valid
& ATTR_GID
)
731 inode
->i_gid
= attr
->ia_gid
;
732 if (ia_valid
& ATTR_ATIME
)
733 inode
->i_atime
= timespec64_trunc(attr
->ia_atime
,
734 inode
->i_sb
->s_time_gran
);
735 if (ia_valid
& ATTR_MTIME
)
736 inode
->i_mtime
= timespec64_trunc(attr
->ia_mtime
,
737 inode
->i_sb
->s_time_gran
);
738 if (ia_valid
& ATTR_CTIME
)
739 inode
->i_ctime
= timespec64_trunc(attr
->ia_ctime
,
740 inode
->i_sb
->s_time_gran
);
741 if (ia_valid
& ATTR_MODE
) {
742 umode_t mode
= attr
->ia_mode
;
744 if (!in_group_p(inode
->i_gid
) && !capable(CAP_FSETID
))
746 set_acl_inode(inode
, mode
);
750 #define __setattr_copy setattr_copy
753 int f2fs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
755 struct inode
*inode
= d_inode(dentry
);
757 bool size_changed
= false;
759 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
762 err
= setattr_prepare(dentry
, attr
);
766 err
= fscrypt_prepare_setattr(dentry
, attr
);
770 if (is_quota_modification(inode
, attr
)) {
771 err
= dquot_initialize(inode
);
775 if ((attr
->ia_valid
& ATTR_UID
&&
776 !uid_eq(attr
->ia_uid
, inode
->i_uid
)) ||
777 (attr
->ia_valid
& ATTR_GID
&&
778 !gid_eq(attr
->ia_gid
, inode
->i_gid
))) {
779 err
= dquot_transfer(inode
, attr
);
784 if (attr
->ia_valid
& ATTR_SIZE
) {
785 if (attr
->ia_size
<= i_size_read(inode
)) {
786 down_write(&F2FS_I(inode
)->i_mmap_sem
);
787 truncate_setsize(inode
, attr
->ia_size
);
788 err
= f2fs_truncate(inode
);
789 up_write(&F2FS_I(inode
)->i_mmap_sem
);
794 * do not trim all blocks after i_size if target size is
795 * larger than i_size.
797 down_write(&F2FS_I(inode
)->i_mmap_sem
);
798 truncate_setsize(inode
, attr
->ia_size
);
799 up_write(&F2FS_I(inode
)->i_mmap_sem
);
801 /* should convert inline inode here */
802 if (!f2fs_may_inline_data(inode
)) {
803 err
= f2fs_convert_inline_inode(inode
);
807 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
810 down_write(&F2FS_I(inode
)->i_sem
);
811 F2FS_I(inode
)->last_disk_size
= i_size_read(inode
);
812 up_write(&F2FS_I(inode
)->i_sem
);
817 __setattr_copy(inode
, attr
);
819 if (attr
->ia_valid
& ATTR_MODE
) {
820 err
= posix_acl_chmod(inode
, f2fs_get_inode_mode(inode
));
821 if (err
|| is_inode_flag_set(inode
, FI_ACL_MODE
)) {
822 inode
->i_mode
= F2FS_I(inode
)->i_acl_mode
;
823 clear_inode_flag(inode
, FI_ACL_MODE
);
827 /* file size may changed here */
828 f2fs_mark_inode_dirty_sync(inode
, size_changed
);
830 /* inode change will produce dirty node pages flushed by checkpoint */
831 f2fs_balance_fs(F2FS_I_SB(inode
), true);
836 const struct inode_operations f2fs_file_inode_operations
= {
837 .getattr
= f2fs_getattr
,
838 .setattr
= f2fs_setattr
,
839 .get_acl
= f2fs_get_acl
,
840 .set_acl
= f2fs_set_acl
,
841 #ifdef CONFIG_F2FS_FS_XATTR
842 .listxattr
= f2fs_listxattr
,
844 .fiemap
= f2fs_fiemap
,
847 static int fill_zero(struct inode
*inode
, pgoff_t index
,
848 loff_t start
, loff_t len
)
850 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
856 f2fs_balance_fs(sbi
, true);
859 page
= f2fs_get_new_data_page(inode
, NULL
, index
, false);
863 return PTR_ERR(page
);
865 f2fs_wait_on_page_writeback(page
, DATA
, true);
866 zero_user(page
, start
, len
);
867 set_page_dirty(page
);
868 f2fs_put_page(page
, 1);
872 int f2fs_truncate_hole(struct inode
*inode
, pgoff_t pg_start
, pgoff_t pg_end
)
876 while (pg_start
< pg_end
) {
877 struct dnode_of_data dn
;
878 pgoff_t end_offset
, count
;
880 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
881 err
= f2fs_get_dnode_of_data(&dn
, pg_start
, LOOKUP_NODE
);
883 if (err
== -ENOENT
) {
884 pg_start
= f2fs_get_next_page_offset(&dn
,
891 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
892 count
= min(end_offset
- dn
.ofs_in_node
, pg_end
- pg_start
);
894 f2fs_bug_on(F2FS_I_SB(inode
), count
== 0 || count
> end_offset
);
896 f2fs_truncate_data_blocks_range(&dn
, count
);
904 static int punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
906 pgoff_t pg_start
, pg_end
;
907 loff_t off_start
, off_end
;
910 ret
= f2fs_convert_inline_inode(inode
);
914 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
915 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
917 off_start
= offset
& (PAGE_SIZE
- 1);
918 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
920 if (pg_start
== pg_end
) {
921 ret
= fill_zero(inode
, pg_start
, off_start
,
922 off_end
- off_start
);
927 ret
= fill_zero(inode
, pg_start
++, off_start
,
928 PAGE_SIZE
- off_start
);
933 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
938 if (pg_start
< pg_end
) {
939 struct address_space
*mapping
= inode
->i_mapping
;
940 loff_t blk_start
, blk_end
;
941 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
943 f2fs_balance_fs(sbi
, true);
945 blk_start
= (loff_t
)pg_start
<< PAGE_SHIFT
;
946 blk_end
= (loff_t
)pg_end
<< PAGE_SHIFT
;
947 down_write(&F2FS_I(inode
)->i_mmap_sem
);
948 truncate_inode_pages_range(mapping
, blk_start
,
952 ret
= f2fs_truncate_hole(inode
, pg_start
, pg_end
);
954 up_write(&F2FS_I(inode
)->i_mmap_sem
);
961 static int __read_out_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
962 int *do_replace
, pgoff_t off
, pgoff_t len
)
964 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
965 struct dnode_of_data dn
;
969 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
970 ret
= f2fs_get_dnode_of_data(&dn
, off
, LOOKUP_NODE_RA
);
971 if (ret
&& ret
!= -ENOENT
) {
973 } else if (ret
== -ENOENT
) {
974 if (dn
.max_level
== 0)
976 done
= min((pgoff_t
)ADDRS_PER_BLOCK
- dn
.ofs_in_node
, len
);
982 done
= min((pgoff_t
)ADDRS_PER_PAGE(dn
.node_page
, inode
) -
983 dn
.ofs_in_node
, len
);
984 for (i
= 0; i
< done
; i
++, blkaddr
++, do_replace
++, dn
.ofs_in_node
++) {
985 *blkaddr
= datablock_addr(dn
.inode
,
986 dn
.node_page
, dn
.ofs_in_node
);
987 if (!f2fs_is_checkpointed_data(sbi
, *blkaddr
)) {
989 if (test_opt(sbi
, LFS
)) {
994 /* do not invalidate this block address */
995 f2fs_update_data_blkaddr(&dn
, NULL_ADDR
);
1008 static int __roll_back_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
1009 int *do_replace
, pgoff_t off
, int len
)
1011 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1012 struct dnode_of_data dn
;
1015 for (i
= 0; i
< len
; i
++, do_replace
++, blkaddr
++) {
1016 if (*do_replace
== 0)
1019 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1020 ret
= f2fs_get_dnode_of_data(&dn
, off
+ i
, LOOKUP_NODE_RA
);
1022 dec_valid_block_count(sbi
, inode
, 1);
1023 f2fs_invalidate_blocks(sbi
, *blkaddr
);
1025 f2fs_update_data_blkaddr(&dn
, *blkaddr
);
1027 f2fs_put_dnode(&dn
);
1032 static int __clone_blkaddrs(struct inode
*src_inode
, struct inode
*dst_inode
,
1033 block_t
*blkaddr
, int *do_replace
,
1034 pgoff_t src
, pgoff_t dst
, pgoff_t len
, bool full
)
1036 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src_inode
);
1041 if (blkaddr
[i
] == NULL_ADDR
&& !full
) {
1046 if (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
) {
1047 struct dnode_of_data dn
;
1048 struct node_info ni
;
1052 set_new_dnode(&dn
, dst_inode
, NULL
, NULL
, 0);
1053 ret
= f2fs_get_dnode_of_data(&dn
, dst
+ i
, ALLOC_NODE
);
1057 f2fs_get_node_info(sbi
, dn
.nid
, &ni
);
1058 ilen
= min((pgoff_t
)
1059 ADDRS_PER_PAGE(dn
.node_page
, dst_inode
) -
1060 dn
.ofs_in_node
, len
- i
);
1062 dn
.data_blkaddr
= datablock_addr(dn
.inode
,
1063 dn
.node_page
, dn
.ofs_in_node
);
1064 f2fs_truncate_data_blocks_range(&dn
, 1);
1066 if (do_replace
[i
]) {
1067 f2fs_i_blocks_write(src_inode
,
1069 f2fs_i_blocks_write(dst_inode
,
1071 f2fs_replace_block(sbi
, &dn
, dn
.data_blkaddr
,
1072 blkaddr
[i
], ni
.version
, true, false);
1078 new_size
= (dst
+ i
) << PAGE_SHIFT
;
1079 if (dst_inode
->i_size
< new_size
)
1080 f2fs_i_size_write(dst_inode
, new_size
);
1081 } while (--ilen
&& (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
));
1083 f2fs_put_dnode(&dn
);
1085 struct page
*psrc
, *pdst
;
1087 psrc
= f2fs_get_lock_data_page(src_inode
,
1090 return PTR_ERR(psrc
);
1091 pdst
= f2fs_get_new_data_page(dst_inode
, NULL
, dst
+ i
,
1094 f2fs_put_page(psrc
, 1);
1095 return PTR_ERR(pdst
);
1097 f2fs_copy_page(psrc
, pdst
);
1098 set_page_dirty(pdst
);
1099 f2fs_put_page(pdst
, 1);
1100 f2fs_put_page(psrc
, 1);
1102 ret
= f2fs_truncate_hole(src_inode
,
1103 src
+ i
, src
+ i
+ 1);
1112 static int __exchange_data_block(struct inode
*src_inode
,
1113 struct inode
*dst_inode
, pgoff_t src
, pgoff_t dst
,
1114 pgoff_t len
, bool full
)
1116 block_t
*src_blkaddr
;
1122 olen
= min((pgoff_t
)4 * ADDRS_PER_BLOCK
, len
);
1124 src_blkaddr
= f2fs_kvzalloc(F2FS_I_SB(src_inode
),
1125 array_size(olen
, sizeof(block_t
)),
1130 do_replace
= f2fs_kvzalloc(F2FS_I_SB(src_inode
),
1131 array_size(olen
, sizeof(int)),
1134 kvfree(src_blkaddr
);
1138 ret
= __read_out_blkaddrs(src_inode
, src_blkaddr
,
1139 do_replace
, src
, olen
);
1143 ret
= __clone_blkaddrs(src_inode
, dst_inode
, src_blkaddr
,
1144 do_replace
, src
, dst
, olen
, full
);
1152 kvfree(src_blkaddr
);
1158 __roll_back_blkaddrs(src_inode
, src_blkaddr
, do_replace
, src
, olen
);
1159 kvfree(src_blkaddr
);
1164 static int f2fs_do_collapse(struct inode
*inode
, pgoff_t start
, pgoff_t end
)
1166 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1167 pgoff_t nrpages
= (i_size_read(inode
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
1170 f2fs_balance_fs(sbi
, true);
1173 f2fs_drop_extent_tree(inode
);
1175 ret
= __exchange_data_block(inode
, inode
, end
, start
, nrpages
- end
, true);
1176 f2fs_unlock_op(sbi
);
1180 static int f2fs_collapse_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1182 pgoff_t pg_start
, pg_end
;
1186 if (offset
+ len
>= i_size_read(inode
))
1189 /* collapse range should be aligned to block size of f2fs. */
1190 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1193 ret
= f2fs_convert_inline_inode(inode
);
1197 pg_start
= offset
>> PAGE_SHIFT
;
1198 pg_end
= (offset
+ len
) >> PAGE_SHIFT
;
1200 /* avoid gc operation during block exchange */
1201 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1203 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1204 /* write out all dirty pages from offset */
1205 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1209 truncate_pagecache(inode
, offset
);
1211 ret
= f2fs_do_collapse(inode
, pg_start
, pg_end
);
1215 /* write out all moved pages, if possible */
1216 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1217 truncate_pagecache(inode
, offset
);
1219 new_size
= i_size_read(inode
) - len
;
1220 truncate_pagecache(inode
, new_size
);
1222 ret
= f2fs_truncate_blocks(inode
, new_size
, true);
1224 f2fs_i_size_write(inode
, new_size
);
1226 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1227 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1231 static int f2fs_do_zero_range(struct dnode_of_data
*dn
, pgoff_t start
,
1234 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1235 pgoff_t index
= start
;
1236 unsigned int ofs_in_node
= dn
->ofs_in_node
;
1240 for (; index
< end
; index
++, dn
->ofs_in_node
++) {
1241 if (datablock_addr(dn
->inode
, dn
->node_page
,
1242 dn
->ofs_in_node
) == NULL_ADDR
)
1246 dn
->ofs_in_node
= ofs_in_node
;
1247 ret
= f2fs_reserve_new_blocks(dn
, count
);
1251 dn
->ofs_in_node
= ofs_in_node
;
1252 for (index
= start
; index
< end
; index
++, dn
->ofs_in_node
++) {
1253 dn
->data_blkaddr
= datablock_addr(dn
->inode
,
1254 dn
->node_page
, dn
->ofs_in_node
);
1256 * f2fs_reserve_new_blocks will not guarantee entire block
1259 if (dn
->data_blkaddr
== NULL_ADDR
) {
1263 if (dn
->data_blkaddr
!= NEW_ADDR
) {
1264 f2fs_invalidate_blocks(sbi
, dn
->data_blkaddr
);
1265 dn
->data_blkaddr
= NEW_ADDR
;
1266 f2fs_set_data_blkaddr(dn
);
1270 f2fs_update_extent_cache_range(dn
, start
, 0, index
- start
);
1275 static int f2fs_zero_range(struct inode
*inode
, loff_t offset
, loff_t len
,
1278 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1279 struct address_space
*mapping
= inode
->i_mapping
;
1280 pgoff_t index
, pg_start
, pg_end
;
1281 loff_t new_size
= i_size_read(inode
);
1282 loff_t off_start
, off_end
;
1285 ret
= inode_newsize_ok(inode
, (len
+ offset
));
1289 ret
= f2fs_convert_inline_inode(inode
);
1293 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1294 ret
= filemap_write_and_wait_range(mapping
, offset
, offset
+ len
- 1);
1298 truncate_pagecache_range(inode
, offset
, offset
+ len
- 1);
1300 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
1301 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
1303 off_start
= offset
& (PAGE_SIZE
- 1);
1304 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1306 if (pg_start
== pg_end
) {
1307 ret
= fill_zero(inode
, pg_start
, off_start
,
1308 off_end
- off_start
);
1312 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1315 ret
= fill_zero(inode
, pg_start
++, off_start
,
1316 PAGE_SIZE
- off_start
);
1320 new_size
= max_t(loff_t
, new_size
,
1321 (loff_t
)pg_start
<< PAGE_SHIFT
);
1324 for (index
= pg_start
; index
< pg_end
;) {
1325 struct dnode_of_data dn
;
1326 unsigned int end_offset
;
1331 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1332 ret
= f2fs_get_dnode_of_data(&dn
, index
, ALLOC_NODE
);
1334 f2fs_unlock_op(sbi
);
1338 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1339 end
= min(pg_end
, end_offset
- dn
.ofs_in_node
+ index
);
1341 ret
= f2fs_do_zero_range(&dn
, index
, end
);
1342 f2fs_put_dnode(&dn
);
1343 f2fs_unlock_op(sbi
);
1345 f2fs_balance_fs(sbi
, dn
.node_changed
);
1351 new_size
= max_t(loff_t
, new_size
,
1352 (loff_t
)index
<< PAGE_SHIFT
);
1356 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
1360 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1365 if (new_size
> i_size_read(inode
)) {
1366 if (mode
& FALLOC_FL_KEEP_SIZE
)
1367 file_set_keep_isize(inode
);
1369 f2fs_i_size_write(inode
, new_size
);
1372 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1377 static int f2fs_insert_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1379 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1380 pgoff_t nr
, pg_start
, pg_end
, delta
, idx
;
1384 new_size
= i_size_read(inode
) + len
;
1385 ret
= inode_newsize_ok(inode
, new_size
);
1389 if (offset
>= i_size_read(inode
))
1392 /* insert range should be aligned to block size of f2fs. */
1393 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1396 ret
= f2fs_convert_inline_inode(inode
);
1400 f2fs_balance_fs(sbi
, true);
1402 /* avoid gc operation during block exchange */
1403 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1405 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1406 ret
= f2fs_truncate_blocks(inode
, i_size_read(inode
), true);
1410 /* write out all dirty pages from offset */
1411 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1415 truncate_pagecache(inode
, offset
);
1417 pg_start
= offset
>> PAGE_SHIFT
;
1418 pg_end
= (offset
+ len
) >> PAGE_SHIFT
;
1419 delta
= pg_end
- pg_start
;
1420 idx
= (i_size_read(inode
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
1422 while (!ret
&& idx
> pg_start
) {
1423 nr
= idx
- pg_start
;
1429 f2fs_drop_extent_tree(inode
);
1431 ret
= __exchange_data_block(inode
, inode
, idx
,
1432 idx
+ delta
, nr
, false);
1433 f2fs_unlock_op(sbi
);
1436 /* write out all moved pages, if possible */
1437 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1438 truncate_pagecache(inode
, offset
);
1441 f2fs_i_size_write(inode
, new_size
);
1443 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1444 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1448 static int expand_inode_data(struct inode
*inode
, loff_t offset
,
1449 loff_t len
, int mode
)
1451 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1452 struct f2fs_map_blocks map
= { .m_next_pgofs
= NULL
,
1453 .m_next_extent
= NULL
, .m_seg_type
= NO_CHECK_TYPE
};
1455 loff_t new_size
= i_size_read(inode
);
1459 err
= inode_newsize_ok(inode
, (len
+ offset
));
1463 err
= f2fs_convert_inline_inode(inode
);
1467 f2fs_balance_fs(sbi
, true);
1469 pg_end
= ((unsigned long long)offset
+ len
) >> PAGE_SHIFT
;
1470 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1472 map
.m_lblk
= ((unsigned long long)offset
) >> PAGE_SHIFT
;
1473 map
.m_len
= pg_end
- map
.m_lblk
;
1477 err
= f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_AIO
);
1484 last_off
= map
.m_lblk
+ map
.m_len
- 1;
1486 /* update new size to the failed position */
1487 new_size
= (last_off
== pg_end
) ? offset
+ len
:
1488 (loff_t
)(last_off
+ 1) << PAGE_SHIFT
;
1490 new_size
= ((loff_t
)pg_end
<< PAGE_SHIFT
) + off_end
;
1493 if (new_size
> i_size_read(inode
)) {
1494 if (mode
& FALLOC_FL_KEEP_SIZE
)
1495 file_set_keep_isize(inode
);
1497 f2fs_i_size_write(inode
, new_size
);
1503 static long f2fs_fallocate(struct file
*file
, int mode
,
1504 loff_t offset
, loff_t len
)
1506 struct inode
*inode
= file_inode(file
);
1509 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
1512 /* f2fs only support ->fallocate for regular file */
1513 if (!S_ISREG(inode
->i_mode
))
1516 if (f2fs_encrypted_inode(inode
) &&
1517 (mode
& (FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_INSERT_RANGE
)))
1520 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
1521 FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_ZERO_RANGE
|
1522 FALLOC_FL_INSERT_RANGE
))
1527 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
1528 if (offset
>= inode
->i_size
)
1531 ret
= punch_hole(inode
, offset
, len
);
1532 } else if (mode
& FALLOC_FL_COLLAPSE_RANGE
) {
1533 ret
= f2fs_collapse_range(inode
, offset
, len
);
1534 } else if (mode
& FALLOC_FL_ZERO_RANGE
) {
1535 ret
= f2fs_zero_range(inode
, offset
, len
, mode
);
1536 } else if (mode
& FALLOC_FL_INSERT_RANGE
) {
1537 ret
= f2fs_insert_range(inode
, offset
, len
);
1539 ret
= expand_inode_data(inode
, offset
, len
, mode
);
1543 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
1544 f2fs_mark_inode_dirty_sync(inode
, false);
1545 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1549 inode_unlock(inode
);
1551 trace_f2fs_fallocate(inode
, mode
, offset
, len
, ret
);
1555 static int f2fs_release_file(struct inode
*inode
, struct file
*filp
)
1558 * f2fs_relase_file is called at every close calls. So we should
1559 * not drop any inmemory pages by close called by other process.
1561 if (!(filp
->f_mode
& FMODE_WRITE
) ||
1562 atomic_read(&inode
->i_writecount
) != 1)
1565 /* some remained atomic pages should discarded */
1566 if (f2fs_is_atomic_file(inode
))
1567 f2fs_drop_inmem_pages(inode
);
1568 if (f2fs_is_volatile_file(inode
)) {
1569 set_inode_flag(inode
, FI_DROP_CACHE
);
1570 filemap_fdatawrite(inode
->i_mapping
);
1571 clear_inode_flag(inode
, FI_DROP_CACHE
);
1572 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
1573 stat_dec_volatile_write(inode
);
1578 static int f2fs_file_flush(struct file
*file
, fl_owner_t id
)
1580 struct inode
*inode
= file_inode(file
);
1583 * If the process doing a transaction is crashed, we should do
1584 * roll-back. Otherwise, other reader/write can see corrupted database
1585 * until all the writers close its file. Since this should be done
1586 * before dropping file lock, it needs to do in ->flush.
1588 if (f2fs_is_atomic_file(inode
) &&
1589 F2FS_I(inode
)->inmem_task
== current
)
1590 f2fs_drop_inmem_pages(inode
);
1594 static int f2fs_ioc_getflags(struct file
*filp
, unsigned long arg
)
1596 struct inode
*inode
= file_inode(filp
);
1597 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1598 unsigned int flags
= fi
->i_flags
;
1600 if (file_is_encrypt(inode
))
1601 flags
|= F2FS_ENCRYPT_FL
;
1602 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
))
1603 flags
|= F2FS_INLINE_DATA_FL
;
1605 flags
&= F2FS_FL_USER_VISIBLE
;
1607 return put_user(flags
, (int __user
*)arg
);
1610 static int __f2fs_ioc_setflags(struct inode
*inode
, unsigned int flags
)
1612 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1613 unsigned int oldflags
;
1615 /* Is it quota file? Do not allow user to mess with it */
1616 if (IS_NOQUOTA(inode
))
1619 flags
= f2fs_mask_flags(inode
->i_mode
, flags
);
1621 oldflags
= fi
->i_flags
;
1623 if ((flags
^ oldflags
) & (F2FS_APPEND_FL
| F2FS_IMMUTABLE_FL
))
1624 if (!capable(CAP_LINUX_IMMUTABLE
))
1627 flags
= flags
& F2FS_FL_USER_MODIFIABLE
;
1628 flags
|= oldflags
& ~F2FS_FL_USER_MODIFIABLE
;
1629 fi
->i_flags
= flags
;
1631 if (fi
->i_flags
& F2FS_PROJINHERIT_FL
)
1632 set_inode_flag(inode
, FI_PROJ_INHERIT
);
1634 clear_inode_flag(inode
, FI_PROJ_INHERIT
);
1636 inode
->i_ctime
= current_time(inode
);
1637 f2fs_set_inode_flags(inode
);
1638 f2fs_mark_inode_dirty_sync(inode
, false);
1642 static int f2fs_ioc_setflags(struct file
*filp
, unsigned long arg
)
1644 struct inode
*inode
= file_inode(filp
);
1648 if (!inode_owner_or_capable(inode
))
1651 if (get_user(flags
, (int __user
*)arg
))
1654 ret
= mnt_want_write_file(filp
);
1660 ret
= __f2fs_ioc_setflags(inode
, flags
);
1662 inode_unlock(inode
);
1663 mnt_drop_write_file(filp
);
1667 static int f2fs_ioc_getversion(struct file
*filp
, unsigned long arg
)
1669 struct inode
*inode
= file_inode(filp
);
1671 return put_user(inode
->i_generation
, (int __user
*)arg
);
1674 static int f2fs_ioc_start_atomic_write(struct file
*filp
)
1676 struct inode
*inode
= file_inode(filp
);
1679 if (!inode_owner_or_capable(inode
))
1682 if (!S_ISREG(inode
->i_mode
))
1685 ret
= mnt_want_write_file(filp
);
1691 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1693 if (f2fs_is_atomic_file(inode
))
1696 ret
= f2fs_convert_inline_inode(inode
);
1700 if (!get_dirty_pages(inode
))
1703 f2fs_msg(F2FS_I_SB(inode
)->sb
, KERN_WARNING
,
1704 "Unexpected flush for atomic writes: ino=%lu, npages=%u",
1705 inode
->i_ino
, get_dirty_pages(inode
));
1706 ret
= filemap_write_and_wait_range(inode
->i_mapping
, 0, LLONG_MAX
);
1710 set_inode_flag(inode
, FI_ATOMIC_FILE
);
1711 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
1712 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1714 F2FS_I(inode
)->inmem_task
= current
;
1715 stat_inc_atomic_write(inode
);
1716 stat_update_max_atomic_write(inode
);
1718 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1719 inode_unlock(inode
);
1720 mnt_drop_write_file(filp
);
1724 static int f2fs_ioc_commit_atomic_write(struct file
*filp
)
1726 struct inode
*inode
= file_inode(filp
);
1729 if (!inode_owner_or_capable(inode
))
1732 ret
= mnt_want_write_file(filp
);
1738 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1740 if (f2fs_is_volatile_file(inode
)) {
1745 if (f2fs_is_atomic_file(inode
)) {
1746 ret
= f2fs_commit_inmem_pages(inode
);
1750 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
1752 clear_inode_flag(inode
, FI_ATOMIC_FILE
);
1753 F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_ATOMIC
] = 0;
1754 stat_dec_atomic_write(inode
);
1757 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 1, false);
1760 if (is_inode_flag_set(inode
, FI_ATOMIC_REVOKE_REQUEST
)) {
1761 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
1764 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1765 inode_unlock(inode
);
1766 mnt_drop_write_file(filp
);
1770 static int f2fs_ioc_start_volatile_write(struct file
*filp
)
1772 struct inode
*inode
= file_inode(filp
);
1775 if (!inode_owner_or_capable(inode
))
1778 if (!S_ISREG(inode
->i_mode
))
1781 ret
= mnt_want_write_file(filp
);
1787 if (f2fs_is_volatile_file(inode
))
1790 ret
= f2fs_convert_inline_inode(inode
);
1794 stat_inc_volatile_write(inode
);
1795 stat_update_max_volatile_write(inode
);
1797 set_inode_flag(inode
, FI_VOLATILE_FILE
);
1798 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1800 inode_unlock(inode
);
1801 mnt_drop_write_file(filp
);
1805 static int f2fs_ioc_release_volatile_write(struct file
*filp
)
1807 struct inode
*inode
= file_inode(filp
);
1810 if (!inode_owner_or_capable(inode
))
1813 ret
= mnt_want_write_file(filp
);
1819 if (!f2fs_is_volatile_file(inode
))
1822 if (!f2fs_is_first_block_written(inode
)) {
1823 ret
= truncate_partial_data_page(inode
, 0, true);
1827 ret
= punch_hole(inode
, 0, F2FS_BLKSIZE
);
1829 inode_unlock(inode
);
1830 mnt_drop_write_file(filp
);
1834 static int f2fs_ioc_abort_volatile_write(struct file
*filp
)
1836 struct inode
*inode
= file_inode(filp
);
1839 if (!inode_owner_or_capable(inode
))
1842 ret
= mnt_want_write_file(filp
);
1848 if (f2fs_is_atomic_file(inode
))
1849 f2fs_drop_inmem_pages(inode
);
1850 if (f2fs_is_volatile_file(inode
)) {
1851 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
1852 stat_dec_volatile_write(inode
);
1853 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
1856 inode_unlock(inode
);
1858 mnt_drop_write_file(filp
);
1859 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1863 static int f2fs_ioc_shutdown(struct file
*filp
, unsigned long arg
)
1865 struct inode
*inode
= file_inode(filp
);
1866 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1867 struct super_block
*sb
= sbi
->sb
;
1871 if (!capable(CAP_SYS_ADMIN
))
1874 if (get_user(in
, (__u32 __user
*)arg
))
1877 if (in
!= F2FS_GOING_DOWN_FULLSYNC
) {
1878 ret
= mnt_want_write_file(filp
);
1884 case F2FS_GOING_DOWN_FULLSYNC
:
1885 sb
= freeze_bdev(sb
->s_bdev
);
1891 f2fs_stop_checkpoint(sbi
, false);
1892 thaw_bdev(sb
->s_bdev
, sb
);
1895 case F2FS_GOING_DOWN_METASYNC
:
1896 /* do checkpoint only */
1897 ret
= f2fs_sync_fs(sb
, 1);
1900 f2fs_stop_checkpoint(sbi
, false);
1902 case F2FS_GOING_DOWN_NOSYNC
:
1903 f2fs_stop_checkpoint(sbi
, false);
1905 case F2FS_GOING_DOWN_METAFLUSH
:
1906 f2fs_sync_meta_pages(sbi
, META
, LONG_MAX
, FS_META_IO
);
1907 f2fs_stop_checkpoint(sbi
, false);
1914 f2fs_stop_gc_thread(sbi
);
1915 f2fs_stop_discard_thread(sbi
);
1917 f2fs_drop_discard_cmd(sbi
);
1918 clear_opt(sbi
, DISCARD
);
1920 f2fs_update_time(sbi
, REQ_TIME
);
1922 if (in
!= F2FS_GOING_DOWN_FULLSYNC
)
1923 mnt_drop_write_file(filp
);
1927 static int f2fs_ioc_fitrim(struct file
*filp
, unsigned long arg
)
1929 struct inode
*inode
= file_inode(filp
);
1930 struct super_block
*sb
= inode
->i_sb
;
1931 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
1932 struct fstrim_range range
;
1935 if (!capable(CAP_SYS_ADMIN
))
1938 if (!blk_queue_discard(q
))
1941 if (copy_from_user(&range
, (struct fstrim_range __user
*)arg
,
1945 ret
= mnt_want_write_file(filp
);
1949 range
.minlen
= max((unsigned int)range
.minlen
,
1950 q
->limits
.discard_granularity
);
1951 ret
= f2fs_trim_fs(F2FS_SB(sb
), &range
);
1952 mnt_drop_write_file(filp
);
1956 if (copy_to_user((struct fstrim_range __user
*)arg
, &range
,
1959 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1963 static bool uuid_is_nonzero(__u8 u
[16])
1967 for (i
= 0; i
< 16; i
++)
1973 static int f2fs_ioc_set_encryption_policy(struct file
*filp
, unsigned long arg
)
1975 struct inode
*inode
= file_inode(filp
);
1977 if (!f2fs_sb_has_encrypt(inode
->i_sb
))
1980 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1982 return fscrypt_ioctl_set_policy(filp
, (const void __user
*)arg
);
1985 static int f2fs_ioc_get_encryption_policy(struct file
*filp
, unsigned long arg
)
1987 if (!f2fs_sb_has_encrypt(file_inode(filp
)->i_sb
))
1989 return fscrypt_ioctl_get_policy(filp
, (void __user
*)arg
);
1992 static int f2fs_ioc_get_encryption_pwsalt(struct file
*filp
, unsigned long arg
)
1994 struct inode
*inode
= file_inode(filp
);
1995 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1998 if (!f2fs_sb_has_encrypt(inode
->i_sb
))
2001 err
= mnt_want_write_file(filp
);
2005 down_write(&sbi
->sb_lock
);
2007 if (uuid_is_nonzero(sbi
->raw_super
->encrypt_pw_salt
))
2010 /* update superblock with uuid */
2011 generate_random_uuid(sbi
->raw_super
->encrypt_pw_salt
);
2013 err
= f2fs_commit_super(sbi
, false);
2016 memset(sbi
->raw_super
->encrypt_pw_salt
, 0, 16);
2020 if (copy_to_user((__u8 __user
*)arg
, sbi
->raw_super
->encrypt_pw_salt
,
2024 up_write(&sbi
->sb_lock
);
2025 mnt_drop_write_file(filp
);
2029 static int f2fs_ioc_gc(struct file
*filp
, unsigned long arg
)
2031 struct inode
*inode
= file_inode(filp
);
2032 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2036 if (!capable(CAP_SYS_ADMIN
))
2039 if (get_user(sync
, (__u32 __user
*)arg
))
2042 if (f2fs_readonly(sbi
->sb
))
2045 ret
= mnt_want_write_file(filp
);
2050 if (!mutex_trylock(&sbi
->gc_mutex
)) {
2055 mutex_lock(&sbi
->gc_mutex
);
2058 ret
= f2fs_gc(sbi
, sync
, true, NULL_SEGNO
);
2060 mnt_drop_write_file(filp
);
2064 static int f2fs_ioc_gc_range(struct file
*filp
, unsigned long arg
)
2066 struct inode
*inode
= file_inode(filp
);
2067 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2068 struct f2fs_gc_range range
;
2072 if (!capable(CAP_SYS_ADMIN
))
2075 if (copy_from_user(&range
, (struct f2fs_gc_range __user
*)arg
,
2079 if (f2fs_readonly(sbi
->sb
))
2082 end
= range
.start
+ range
.len
;
2083 if (range
.start
< MAIN_BLKADDR(sbi
) || end
>= MAX_BLKADDR(sbi
)) {
2087 ret
= mnt_want_write_file(filp
);
2093 if (!mutex_trylock(&sbi
->gc_mutex
)) {
2098 mutex_lock(&sbi
->gc_mutex
);
2101 ret
= f2fs_gc(sbi
, range
.sync
, true, GET_SEGNO(sbi
, range
.start
));
2102 range
.start
+= sbi
->blocks_per_seg
;
2103 if (range
.start
<= end
)
2106 mnt_drop_write_file(filp
);
2110 static int f2fs_ioc_f2fs_write_checkpoint(struct file
*filp
, unsigned long arg
)
2112 struct inode
*inode
= file_inode(filp
);
2113 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2116 if (!capable(CAP_SYS_ADMIN
))
2119 if (f2fs_readonly(sbi
->sb
))
2122 ret
= mnt_want_write_file(filp
);
2126 ret
= f2fs_sync_fs(sbi
->sb
, 1);
2128 mnt_drop_write_file(filp
);
2132 static int f2fs_defragment_range(struct f2fs_sb_info
*sbi
,
2134 struct f2fs_defragment
*range
)
2136 struct inode
*inode
= file_inode(filp
);
2137 struct f2fs_map_blocks map
= { .m_next_extent
= NULL
,
2138 .m_seg_type
= NO_CHECK_TYPE
};
2139 struct extent_info ei
= {0, 0, 0};
2140 pgoff_t pg_start
, pg_end
, next_pgofs
;
2141 unsigned int blk_per_seg
= sbi
->blocks_per_seg
;
2142 unsigned int total
= 0, sec_num
;
2143 block_t blk_end
= 0;
2144 bool fragmented
= false;
2147 /* if in-place-update policy is enabled, don't waste time here */
2148 if (f2fs_should_update_inplace(inode
, NULL
))
2151 pg_start
= range
->start
>> PAGE_SHIFT
;
2152 pg_end
= (range
->start
+ range
->len
) >> PAGE_SHIFT
;
2154 f2fs_balance_fs(sbi
, true);
2158 /* writeback all dirty pages in the range */
2159 err
= filemap_write_and_wait_range(inode
->i_mapping
, range
->start
,
2160 range
->start
+ range
->len
- 1);
2165 * lookup mapping info in extent cache, skip defragmenting if physical
2166 * block addresses are continuous.
2168 if (f2fs_lookup_extent_cache(inode
, pg_start
, &ei
)) {
2169 if (ei
.fofs
+ ei
.len
>= pg_end
)
2173 map
.m_lblk
= pg_start
;
2174 map
.m_next_pgofs
= &next_pgofs
;
2177 * lookup mapping info in dnode page cache, skip defragmenting if all
2178 * physical block addresses are continuous even if there are hole(s)
2179 * in logical blocks.
2181 while (map
.m_lblk
< pg_end
) {
2182 map
.m_len
= pg_end
- map
.m_lblk
;
2183 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2187 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2188 map
.m_lblk
= next_pgofs
;
2192 if (blk_end
&& blk_end
!= map
.m_pblk
)
2195 /* record total count of block that we're going to move */
2198 blk_end
= map
.m_pblk
+ map
.m_len
;
2200 map
.m_lblk
+= map
.m_len
;
2206 sec_num
= (total
+ BLKS_PER_SEC(sbi
) - 1) / BLKS_PER_SEC(sbi
);
2209 * make sure there are enough free section for LFS allocation, this can
2210 * avoid defragment running in SSR mode when free section are allocated
2213 if (has_not_enough_free_secs(sbi
, 0, sec_num
)) {
2218 map
.m_lblk
= pg_start
;
2219 map
.m_len
= pg_end
- pg_start
;
2222 while (map
.m_lblk
< pg_end
) {
2227 map
.m_len
= pg_end
- map
.m_lblk
;
2228 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2232 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2233 map
.m_lblk
= next_pgofs
;
2237 set_inode_flag(inode
, FI_DO_DEFRAG
);
2240 while (idx
< map
.m_lblk
+ map
.m_len
&& cnt
< blk_per_seg
) {
2243 page
= f2fs_get_lock_data_page(inode
, idx
, true);
2245 err
= PTR_ERR(page
);
2249 set_page_dirty(page
);
2250 f2fs_put_page(page
, 1);
2259 if (idx
< pg_end
&& cnt
< blk_per_seg
)
2262 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2264 err
= filemap_fdatawrite(inode
->i_mapping
);
2269 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2271 inode_unlock(inode
);
2273 range
->len
= (u64
)total
<< PAGE_SHIFT
;
2277 static int f2fs_ioc_defragment(struct file
*filp
, unsigned long arg
)
2279 struct inode
*inode
= file_inode(filp
);
2280 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2281 struct f2fs_defragment range
;
2284 if (!capable(CAP_SYS_ADMIN
))
2287 if (!S_ISREG(inode
->i_mode
) || f2fs_is_atomic_file(inode
))
2290 if (f2fs_readonly(sbi
->sb
))
2293 if (copy_from_user(&range
, (struct f2fs_defragment __user
*)arg
,
2297 /* verify alignment of offset & size */
2298 if (range
.start
& (F2FS_BLKSIZE
- 1) || range
.len
& (F2FS_BLKSIZE
- 1))
2301 if (unlikely((range
.start
+ range
.len
) >> PAGE_SHIFT
>
2302 sbi
->max_file_blocks
))
2305 err
= mnt_want_write_file(filp
);
2309 err
= f2fs_defragment_range(sbi
, filp
, &range
);
2310 mnt_drop_write_file(filp
);
2312 f2fs_update_time(sbi
, REQ_TIME
);
2316 if (copy_to_user((struct f2fs_defragment __user
*)arg
, &range
,
2323 static int f2fs_move_file_range(struct file
*file_in
, loff_t pos_in
,
2324 struct file
*file_out
, loff_t pos_out
, size_t len
)
2326 struct inode
*src
= file_inode(file_in
);
2327 struct inode
*dst
= file_inode(file_out
);
2328 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src
);
2329 size_t olen
= len
, dst_max_i_size
= 0;
2333 if (file_in
->f_path
.mnt
!= file_out
->f_path
.mnt
||
2334 src
->i_sb
!= dst
->i_sb
)
2337 if (unlikely(f2fs_readonly(src
->i_sb
)))
2340 if (!S_ISREG(src
->i_mode
) || !S_ISREG(dst
->i_mode
))
2343 if (f2fs_encrypted_inode(src
) || f2fs_encrypted_inode(dst
))
2347 if (pos_in
== pos_out
)
2349 if (pos_out
> pos_in
&& pos_out
< pos_in
+ len
)
2354 down_write(&F2FS_I(src
)->i_gc_rwsem
[WRITE
]);
2357 if (!inode_trylock(dst
))
2359 if (!down_write_trylock(&F2FS_I(dst
)->i_gc_rwsem
[WRITE
])) {
2366 if (pos_in
+ len
> src
->i_size
|| pos_in
+ len
< pos_in
)
2369 olen
= len
= src
->i_size
- pos_in
;
2370 if (pos_in
+ len
== src
->i_size
)
2371 len
= ALIGN(src
->i_size
, F2FS_BLKSIZE
) - pos_in
;
2377 dst_osize
= dst
->i_size
;
2378 if (pos_out
+ olen
> dst
->i_size
)
2379 dst_max_i_size
= pos_out
+ olen
;
2381 /* verify the end result is block aligned */
2382 if (!IS_ALIGNED(pos_in
, F2FS_BLKSIZE
) ||
2383 !IS_ALIGNED(pos_in
+ len
, F2FS_BLKSIZE
) ||
2384 !IS_ALIGNED(pos_out
, F2FS_BLKSIZE
))
2387 ret
= f2fs_convert_inline_inode(src
);
2391 ret
= f2fs_convert_inline_inode(dst
);
2395 /* write out all dirty pages from offset */
2396 ret
= filemap_write_and_wait_range(src
->i_mapping
,
2397 pos_in
, pos_in
+ len
);
2401 ret
= filemap_write_and_wait_range(dst
->i_mapping
,
2402 pos_out
, pos_out
+ len
);
2406 f2fs_balance_fs(sbi
, true);
2408 ret
= __exchange_data_block(src
, dst
, pos_in
>> F2FS_BLKSIZE_BITS
,
2409 pos_out
>> F2FS_BLKSIZE_BITS
,
2410 len
>> F2FS_BLKSIZE_BITS
, false);
2414 f2fs_i_size_write(dst
, dst_max_i_size
);
2415 else if (dst_osize
!= dst
->i_size
)
2416 f2fs_i_size_write(dst
, dst_osize
);
2418 f2fs_unlock_op(sbi
);
2421 up_write(&F2FS_I(dst
)->i_gc_rwsem
[WRITE
]);
2425 up_write(&F2FS_I(src
)->i_gc_rwsem
[WRITE
]);
2430 static int f2fs_ioc_move_range(struct file
*filp
, unsigned long arg
)
2432 struct f2fs_move_range range
;
2436 if (!(filp
->f_mode
& FMODE_READ
) ||
2437 !(filp
->f_mode
& FMODE_WRITE
))
2440 if (copy_from_user(&range
, (struct f2fs_move_range __user
*)arg
,
2444 dst
= fdget(range
.dst_fd
);
2448 if (!(dst
.file
->f_mode
& FMODE_WRITE
)) {
2453 err
= mnt_want_write_file(filp
);
2457 err
= f2fs_move_file_range(filp
, range
.pos_in
, dst
.file
,
2458 range
.pos_out
, range
.len
);
2460 mnt_drop_write_file(filp
);
2464 if (copy_to_user((struct f2fs_move_range __user
*)arg
,
2465 &range
, sizeof(range
)))
2472 static int f2fs_ioc_flush_device(struct file
*filp
, unsigned long arg
)
2474 struct inode
*inode
= file_inode(filp
);
2475 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2476 struct sit_info
*sm
= SIT_I(sbi
);
2477 unsigned int start_segno
= 0, end_segno
= 0;
2478 unsigned int dev_start_segno
= 0, dev_end_segno
= 0;
2479 struct f2fs_flush_device range
;
2482 if (!capable(CAP_SYS_ADMIN
))
2485 if (f2fs_readonly(sbi
->sb
))
2488 if (copy_from_user(&range
, (struct f2fs_flush_device __user
*)arg
,
2492 if (sbi
->s_ndevs
<= 1 || sbi
->s_ndevs
- 1 <= range
.dev_num
||
2493 sbi
->segs_per_sec
!= 1) {
2494 f2fs_msg(sbi
->sb
, KERN_WARNING
,
2495 "Can't flush %u in %d for segs_per_sec %u != 1\n",
2496 range
.dev_num
, sbi
->s_ndevs
,
2501 ret
= mnt_want_write_file(filp
);
2505 if (range
.dev_num
!= 0)
2506 dev_start_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).start_blk
);
2507 dev_end_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).end_blk
);
2509 start_segno
= sm
->last_victim
[FLUSH_DEVICE
];
2510 if (start_segno
< dev_start_segno
|| start_segno
>= dev_end_segno
)
2511 start_segno
= dev_start_segno
;
2512 end_segno
= min(start_segno
+ range
.segments
, dev_end_segno
);
2514 while (start_segno
< end_segno
) {
2515 if (!mutex_trylock(&sbi
->gc_mutex
)) {
2519 sm
->last_victim
[GC_CB
] = end_segno
+ 1;
2520 sm
->last_victim
[GC_GREEDY
] = end_segno
+ 1;
2521 sm
->last_victim
[ALLOC_NEXT
] = end_segno
+ 1;
2522 ret
= f2fs_gc(sbi
, true, true, start_segno
);
2530 mnt_drop_write_file(filp
);
2534 static int f2fs_ioc_get_features(struct file
*filp
, unsigned long arg
)
2536 struct inode
*inode
= file_inode(filp
);
2537 u32 sb_feature
= le32_to_cpu(F2FS_I_SB(inode
)->raw_super
->feature
);
2539 /* Must validate to set it with SQLite behavior in Android. */
2540 sb_feature
|= F2FS_FEATURE_ATOMIC_WRITE
;
2542 return put_user(sb_feature
, (u32 __user
*)arg
);
2546 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
2548 struct inode
*inode
= file_inode(filp
);
2549 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2550 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2551 struct super_block
*sb
= sbi
->sb
;
2552 struct dquot
*transfer_to
[MAXQUOTAS
] = {};
2557 if (!f2fs_sb_has_project_quota(sb
)) {
2558 if (projid
!= F2FS_DEF_PROJID
)
2564 if (!f2fs_has_extra_attr(inode
))
2567 kprojid
= make_kprojid(&init_user_ns
, (projid_t
)projid
);
2569 if (projid_eq(kprojid
, F2FS_I(inode
)->i_projid
))
2572 err
= mnt_want_write_file(filp
);
2579 /* Is it quota file? Do not allow user to mess with it */
2580 if (IS_NOQUOTA(inode
))
2583 ipage
= f2fs_get_node_page(sbi
, inode
->i_ino
);
2584 if (IS_ERR(ipage
)) {
2585 err
= PTR_ERR(ipage
);
2589 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage
), fi
->i_extra_isize
,
2592 f2fs_put_page(ipage
, 1);
2595 f2fs_put_page(ipage
, 1);
2597 err
= dquot_initialize(inode
);
2601 transfer_to
[PRJQUOTA
] = dqget(sb
, make_kqid_projid(kprojid
));
2602 if (!IS_ERR(transfer_to
[PRJQUOTA
])) {
2603 err
= __dquot_transfer(inode
, transfer_to
);
2604 dqput(transfer_to
[PRJQUOTA
]);
2609 F2FS_I(inode
)->i_projid
= kprojid
;
2610 inode
->i_ctime
= current_time(inode
);
2612 f2fs_mark_inode_dirty_sync(inode
, true);
2614 inode_unlock(inode
);
2615 mnt_drop_write_file(filp
);
2619 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
2621 if (projid
!= F2FS_DEF_PROJID
)
2627 /* Transfer internal flags to xflags */
2628 static inline __u32
f2fs_iflags_to_xflags(unsigned long iflags
)
2632 if (iflags
& F2FS_SYNC_FL
)
2633 xflags
|= FS_XFLAG_SYNC
;
2634 if (iflags
& F2FS_IMMUTABLE_FL
)
2635 xflags
|= FS_XFLAG_IMMUTABLE
;
2636 if (iflags
& F2FS_APPEND_FL
)
2637 xflags
|= FS_XFLAG_APPEND
;
2638 if (iflags
& F2FS_NODUMP_FL
)
2639 xflags
|= FS_XFLAG_NODUMP
;
2640 if (iflags
& F2FS_NOATIME_FL
)
2641 xflags
|= FS_XFLAG_NOATIME
;
2642 if (iflags
& F2FS_PROJINHERIT_FL
)
2643 xflags
|= FS_XFLAG_PROJINHERIT
;
2647 #define F2FS_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \
2648 FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \
2649 FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT)
2651 /* Transfer xflags flags to internal */
2652 static inline unsigned long f2fs_xflags_to_iflags(__u32 xflags
)
2654 unsigned long iflags
= 0;
2656 if (xflags
& FS_XFLAG_SYNC
)
2657 iflags
|= F2FS_SYNC_FL
;
2658 if (xflags
& FS_XFLAG_IMMUTABLE
)
2659 iflags
|= F2FS_IMMUTABLE_FL
;
2660 if (xflags
& FS_XFLAG_APPEND
)
2661 iflags
|= F2FS_APPEND_FL
;
2662 if (xflags
& FS_XFLAG_NODUMP
)
2663 iflags
|= F2FS_NODUMP_FL
;
2664 if (xflags
& FS_XFLAG_NOATIME
)
2665 iflags
|= F2FS_NOATIME_FL
;
2666 if (xflags
& FS_XFLAG_PROJINHERIT
)
2667 iflags
|= F2FS_PROJINHERIT_FL
;
2672 static int f2fs_ioc_fsgetxattr(struct file
*filp
, unsigned long arg
)
2674 struct inode
*inode
= file_inode(filp
);
2675 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2678 memset(&fa
, 0, sizeof(struct fsxattr
));
2679 fa
.fsx_xflags
= f2fs_iflags_to_xflags(fi
->i_flags
&
2680 F2FS_FL_USER_VISIBLE
);
2682 if (f2fs_sb_has_project_quota(inode
->i_sb
))
2683 fa
.fsx_projid
= (__u32
)from_kprojid(&init_user_ns
,
2686 if (copy_to_user((struct fsxattr __user
*)arg
, &fa
, sizeof(fa
)))
2691 static int f2fs_ioc_fssetxattr(struct file
*filp
, unsigned long arg
)
2693 struct inode
*inode
= file_inode(filp
);
2694 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2699 if (copy_from_user(&fa
, (struct fsxattr __user
*)arg
, sizeof(fa
)))
2702 /* Make sure caller has proper permission */
2703 if (!inode_owner_or_capable(inode
))
2706 if (fa
.fsx_xflags
& ~F2FS_SUPPORTED_FS_XFLAGS
)
2709 flags
= f2fs_xflags_to_iflags(fa
.fsx_xflags
);
2710 if (f2fs_mask_flags(inode
->i_mode
, flags
) != flags
)
2713 err
= mnt_want_write_file(filp
);
2718 flags
= (fi
->i_flags
& ~F2FS_FL_XFLAG_VISIBLE
) |
2719 (flags
& F2FS_FL_XFLAG_VISIBLE
);
2720 err
= __f2fs_ioc_setflags(inode
, flags
);
2721 inode_unlock(inode
);
2722 mnt_drop_write_file(filp
);
2726 err
= f2fs_ioc_setproject(filp
, fa
.fsx_projid
);
2733 int f2fs_pin_file_control(struct inode
*inode
, bool inc
)
2735 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2736 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2738 /* Use i_gc_failures for normal file as a risk signal. */
2740 f2fs_i_gc_failures_write(inode
,
2741 fi
->i_gc_failures
[GC_FAILURE_PIN
] + 1);
2743 if (fi
->i_gc_failures
[GC_FAILURE_PIN
] > sbi
->gc_pin_file_threshold
) {
2744 f2fs_msg(sbi
->sb
, KERN_WARNING
,
2745 "%s: Enable GC = ino %lx after %x GC trials\n",
2746 __func__
, inode
->i_ino
,
2747 fi
->i_gc_failures
[GC_FAILURE_PIN
]);
2748 clear_inode_flag(inode
, FI_PIN_FILE
);
2754 static int f2fs_ioc_set_pin_file(struct file
*filp
, unsigned long arg
)
2756 struct inode
*inode
= file_inode(filp
);
2760 if (!inode_owner_or_capable(inode
))
2763 if (get_user(pin
, (__u32 __user
*)arg
))
2766 if (!S_ISREG(inode
->i_mode
))
2769 if (f2fs_readonly(F2FS_I_SB(inode
)->sb
))
2772 ret
= mnt_want_write_file(filp
);
2778 if (f2fs_should_update_outplace(inode
, NULL
)) {
2784 clear_inode_flag(inode
, FI_PIN_FILE
);
2785 F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_PIN
] = 1;
2789 if (f2fs_pin_file_control(inode
, false)) {
2793 ret
= f2fs_convert_inline_inode(inode
);
2797 set_inode_flag(inode
, FI_PIN_FILE
);
2798 ret
= F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_PIN
];
2800 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2802 inode_unlock(inode
);
2803 mnt_drop_write_file(filp
);
2807 static int f2fs_ioc_get_pin_file(struct file
*filp
, unsigned long arg
)
2809 struct inode
*inode
= file_inode(filp
);
2812 if (is_inode_flag_set(inode
, FI_PIN_FILE
))
2813 pin
= F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_PIN
];
2814 return put_user(pin
, (u32 __user
*)arg
);
2817 int f2fs_precache_extents(struct inode
*inode
)
2819 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2820 struct f2fs_map_blocks map
;
2821 pgoff_t m_next_extent
;
2825 if (is_inode_flag_set(inode
, FI_NO_EXTENT
))
2829 map
.m_next_pgofs
= NULL
;
2830 map
.m_next_extent
= &m_next_extent
;
2831 map
.m_seg_type
= NO_CHECK_TYPE
;
2832 end
= F2FS_I_SB(inode
)->max_file_blocks
;
2834 while (map
.m_lblk
< end
) {
2835 map
.m_len
= end
- map
.m_lblk
;
2837 down_write(&fi
->i_gc_rwsem
[WRITE
]);
2838 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_PRECACHE
);
2839 up_write(&fi
->i_gc_rwsem
[WRITE
]);
2843 map
.m_lblk
= m_next_extent
;
2849 static int f2fs_ioc_precache_extents(struct file
*filp
, unsigned long arg
)
2851 return f2fs_precache_extents(file_inode(filp
));
2854 long f2fs_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
2856 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp
)))))
2860 case F2FS_IOC_GETFLAGS
:
2861 return f2fs_ioc_getflags(filp
, arg
);
2862 case F2FS_IOC_SETFLAGS
:
2863 return f2fs_ioc_setflags(filp
, arg
);
2864 case F2FS_IOC_GETVERSION
:
2865 return f2fs_ioc_getversion(filp
, arg
);
2866 case F2FS_IOC_START_ATOMIC_WRITE
:
2867 return f2fs_ioc_start_atomic_write(filp
);
2868 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
2869 return f2fs_ioc_commit_atomic_write(filp
);
2870 case F2FS_IOC_START_VOLATILE_WRITE
:
2871 return f2fs_ioc_start_volatile_write(filp
);
2872 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
2873 return f2fs_ioc_release_volatile_write(filp
);
2874 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
2875 return f2fs_ioc_abort_volatile_write(filp
);
2876 case F2FS_IOC_SHUTDOWN
:
2877 return f2fs_ioc_shutdown(filp
, arg
);
2879 return f2fs_ioc_fitrim(filp
, arg
);
2880 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
2881 return f2fs_ioc_set_encryption_policy(filp
, arg
);
2882 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
2883 return f2fs_ioc_get_encryption_policy(filp
, arg
);
2884 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
2885 return f2fs_ioc_get_encryption_pwsalt(filp
, arg
);
2886 case F2FS_IOC_GARBAGE_COLLECT
:
2887 return f2fs_ioc_gc(filp
, arg
);
2888 case F2FS_IOC_GARBAGE_COLLECT_RANGE
:
2889 return f2fs_ioc_gc_range(filp
, arg
);
2890 case F2FS_IOC_WRITE_CHECKPOINT
:
2891 return f2fs_ioc_f2fs_write_checkpoint(filp
, arg
);
2892 case F2FS_IOC_DEFRAGMENT
:
2893 return f2fs_ioc_defragment(filp
, arg
);
2894 case F2FS_IOC_MOVE_RANGE
:
2895 return f2fs_ioc_move_range(filp
, arg
);
2896 case F2FS_IOC_FLUSH_DEVICE
:
2897 return f2fs_ioc_flush_device(filp
, arg
);
2898 case F2FS_IOC_GET_FEATURES
:
2899 return f2fs_ioc_get_features(filp
, arg
);
2900 case F2FS_IOC_FSGETXATTR
:
2901 return f2fs_ioc_fsgetxattr(filp
, arg
);
2902 case F2FS_IOC_FSSETXATTR
:
2903 return f2fs_ioc_fssetxattr(filp
, arg
);
2904 case F2FS_IOC_GET_PIN_FILE
:
2905 return f2fs_ioc_get_pin_file(filp
, arg
);
2906 case F2FS_IOC_SET_PIN_FILE
:
2907 return f2fs_ioc_set_pin_file(filp
, arg
);
2908 case F2FS_IOC_PRECACHE_EXTENTS
:
2909 return f2fs_ioc_precache_extents(filp
, arg
);
2915 static ssize_t
f2fs_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
2917 struct file
*file
= iocb
->ki_filp
;
2918 struct inode
*inode
= file_inode(file
);
2921 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
2924 if ((iocb
->ki_flags
& IOCB_NOWAIT
) && !(iocb
->ki_flags
& IOCB_DIRECT
))
2927 if (!inode_trylock(inode
)) {
2928 if (iocb
->ki_flags
& IOCB_NOWAIT
)
2933 ret
= generic_write_checks(iocb
, from
);
2935 bool preallocated
= false;
2936 size_t target_size
= 0;
2939 if (iov_iter_fault_in_readable(from
, iov_iter_count(from
)))
2940 set_inode_flag(inode
, FI_NO_PREALLOC
);
2942 if ((iocb
->ki_flags
& IOCB_NOWAIT
) &&
2943 (iocb
->ki_flags
& IOCB_DIRECT
)) {
2944 if (!f2fs_overwrite_io(inode
, iocb
->ki_pos
,
2945 iov_iter_count(from
)) ||
2946 f2fs_has_inline_data(inode
) ||
2947 f2fs_force_buffered_io(inode
, WRITE
)) {
2948 clear_inode_flag(inode
,
2950 inode_unlock(inode
);
2955 preallocated
= true;
2956 target_size
= iocb
->ki_pos
+ iov_iter_count(from
);
2958 err
= f2fs_preallocate_blocks(iocb
, from
);
2960 clear_inode_flag(inode
, FI_NO_PREALLOC
);
2961 inode_unlock(inode
);
2965 ret
= __generic_file_write_iter(iocb
, from
);
2966 clear_inode_flag(inode
, FI_NO_PREALLOC
);
2968 /* if we couldn't write data, we should deallocate blocks. */
2969 if (preallocated
&& i_size_read(inode
) < target_size
)
2970 f2fs_truncate(inode
);
2973 f2fs_update_iostat(F2FS_I_SB(inode
), APP_WRITE_IO
, ret
);
2975 inode_unlock(inode
);
2978 ret
= generic_write_sync(iocb
, ret
);
2982 #ifdef CONFIG_COMPAT
2983 long f2fs_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
2986 case F2FS_IOC32_GETFLAGS
:
2987 cmd
= F2FS_IOC_GETFLAGS
;
2989 case F2FS_IOC32_SETFLAGS
:
2990 cmd
= F2FS_IOC_SETFLAGS
;
2992 case F2FS_IOC32_GETVERSION
:
2993 cmd
= F2FS_IOC_GETVERSION
;
2995 case F2FS_IOC_START_ATOMIC_WRITE
:
2996 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
2997 case F2FS_IOC_START_VOLATILE_WRITE
:
2998 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
2999 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
3000 case F2FS_IOC_SHUTDOWN
:
3001 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
3002 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
3003 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
3004 case F2FS_IOC_GARBAGE_COLLECT
:
3005 case F2FS_IOC_GARBAGE_COLLECT_RANGE
:
3006 case F2FS_IOC_WRITE_CHECKPOINT
:
3007 case F2FS_IOC_DEFRAGMENT
:
3008 case F2FS_IOC_MOVE_RANGE
:
3009 case F2FS_IOC_FLUSH_DEVICE
:
3010 case F2FS_IOC_GET_FEATURES
:
3011 case F2FS_IOC_FSGETXATTR
:
3012 case F2FS_IOC_FSSETXATTR
:
3013 case F2FS_IOC_GET_PIN_FILE
:
3014 case F2FS_IOC_SET_PIN_FILE
:
3015 case F2FS_IOC_PRECACHE_EXTENTS
:
3018 return -ENOIOCTLCMD
;
3020 return f2fs_ioctl(file
, cmd
, (unsigned long) compat_ptr(arg
));
3024 const struct file_operations f2fs_file_operations
= {
3025 .llseek
= f2fs_llseek
,
3026 .read_iter
= generic_file_read_iter
,
3027 .write_iter
= f2fs_file_write_iter
,
3028 .open
= f2fs_file_open
,
3029 .release
= f2fs_release_file
,
3030 .mmap
= f2fs_file_mmap
,
3031 .flush
= f2fs_file_flush
,
3032 .fsync
= f2fs_sync_file
,
3033 .fallocate
= f2fs_fallocate
,
3034 .unlocked_ioctl
= f2fs_ioctl
,
3035 #ifdef CONFIG_COMPAT
3036 .compat_ioctl
= f2fs_compat_ioctl
,
3038 .splice_read
= generic_file_splice_read
,
3039 .splice_write
= iter_file_splice_write
,