1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
31 #include <trace/events/f2fs.h>
33 static vm_fault_t
f2fs_filemap_fault(struct vm_fault
*vmf
)
35 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
38 down_read(&F2FS_I(inode
)->i_mmap_sem
);
39 ret
= filemap_fault(vmf
);
40 up_read(&F2FS_I(inode
)->i_mmap_sem
);
45 static vm_fault_t
f2fs_vm_page_mkwrite(struct vm_fault
*vmf
)
47 struct page
*page
= vmf
->page
;
48 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
49 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
50 struct dnode_of_data dn
= { .node_changed
= false };
53 if (unlikely(f2fs_cp_error(sbi
))) {
58 sb_start_pagefault(inode
->i_sb
);
60 f2fs_bug_on(sbi
, f2fs_has_inline_data(inode
));
62 file_update_time(vmf
->vma
->vm_file
);
63 down_read(&F2FS_I(inode
)->i_mmap_sem
);
65 if (unlikely(page
->mapping
!= inode
->i_mapping
||
66 page_offset(page
) > i_size_read(inode
) ||
67 !PageUptodate(page
))) {
73 /* block allocation */
74 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, true);
75 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
76 err
= f2fs_get_block(&dn
, page
->index
);
78 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, false);
85 f2fs_wait_on_page_writeback(page
, DATA
, false);
87 /* wait for GCed page writeback via META_MAPPING */
88 f2fs_wait_on_block_writeback(inode
, dn
.data_blkaddr
);
91 * check to see if the page is mapped already (no holes)
93 if (PageMappedToDisk(page
))
96 /* page is wholly or partially inside EOF */
97 if (((loff_t
)(page
->index
+ 1) << PAGE_SHIFT
) >
101 offset
= i_size_read(inode
) & ~PAGE_MASK
;
102 zero_user_segment(page
, offset
, PAGE_SIZE
);
104 set_page_dirty(page
);
105 if (!PageUptodate(page
))
106 SetPageUptodate(page
);
108 f2fs_update_iostat(sbi
, APP_MAPPED_IO
, F2FS_BLKSIZE
);
109 f2fs_update_time(sbi
, REQ_TIME
);
111 trace_f2fs_vm_page_mkwrite(page
, DATA
);
113 up_read(&F2FS_I(inode
)->i_mmap_sem
);
115 f2fs_balance_fs(sbi
, dn
.node_changed
);
117 sb_end_pagefault(inode
->i_sb
);
119 return block_page_mkwrite_return(err
);
122 static const struct vm_operations_struct f2fs_file_vm_ops
= {
123 .fault
= f2fs_filemap_fault
,
124 .map_pages
= filemap_map_pages
,
125 .page_mkwrite
= f2fs_vm_page_mkwrite
,
128 static int get_parent_ino(struct inode
*inode
, nid_t
*pino
)
130 struct dentry
*dentry
;
132 inode
= igrab(inode
);
133 dentry
= d_find_any_alias(inode
);
138 *pino
= parent_ino(dentry
);
143 static inline enum cp_reason_type
need_do_checkpoint(struct inode
*inode
)
145 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
146 enum cp_reason_type cp_reason
= CP_NO_NEEDED
;
148 if (!S_ISREG(inode
->i_mode
))
149 cp_reason
= CP_NON_REGULAR
;
150 else if (inode
->i_nlink
!= 1)
151 cp_reason
= CP_HARDLINK
;
152 else if (is_sbi_flag_set(sbi
, SBI_NEED_CP
))
153 cp_reason
= CP_SB_NEED_CP
;
154 else if (file_wrong_pino(inode
))
155 cp_reason
= CP_WRONG_PINO
;
156 else if (!f2fs_space_for_roll_forward(sbi
))
157 cp_reason
= CP_NO_SPC_ROLL
;
158 else if (!f2fs_is_checkpointed_node(sbi
, F2FS_I(inode
)->i_pino
))
159 cp_reason
= CP_NODE_NEED_CP
;
160 else if (test_opt(sbi
, FASTBOOT
))
161 cp_reason
= CP_FASTBOOT_MODE
;
162 else if (F2FS_OPTION(sbi
).active_logs
== 2)
163 cp_reason
= CP_SPEC_LOG_NUM
;
164 else if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_STRICT
&&
165 f2fs_need_dentry_mark(sbi
, inode
->i_ino
) &&
166 f2fs_exist_written_data(sbi
, F2FS_I(inode
)->i_pino
,
168 cp_reason
= CP_RECOVER_DIR
;
173 static bool need_inode_page_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
175 struct page
*i
= find_get_page(NODE_MAPPING(sbi
), ino
);
177 /* But we need to avoid that there are some inode updates */
178 if ((i
&& PageDirty(i
)) || f2fs_need_inode_block_update(sbi
, ino
))
184 static void try_to_fix_pino(struct inode
*inode
)
186 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
189 down_write(&fi
->i_sem
);
190 if (file_wrong_pino(inode
) && inode
->i_nlink
== 1 &&
191 get_parent_ino(inode
, &pino
)) {
192 f2fs_i_pino_write(inode
, pino
);
193 file_got_pino(inode
);
195 up_write(&fi
->i_sem
);
198 static int f2fs_do_sync_file(struct file
*file
, loff_t start
, loff_t end
,
199 int datasync
, bool atomic
)
201 struct inode
*inode
= file
->f_mapping
->host
;
202 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
203 nid_t ino
= inode
->i_ino
;
205 enum cp_reason_type cp_reason
= 0;
206 struct writeback_control wbc
= {
207 .sync_mode
= WB_SYNC_ALL
,
208 .nr_to_write
= LONG_MAX
,
211 unsigned int seq_id
= 0;
213 if (unlikely(f2fs_readonly(inode
->i_sb
) ||
214 is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)))
217 trace_f2fs_sync_file_enter(inode
);
219 /* if fdatasync is triggered, let's do in-place-update */
220 if (datasync
|| get_dirty_pages(inode
) <= SM_I(sbi
)->min_fsync_blocks
)
221 set_inode_flag(inode
, FI_NEED_IPU
);
222 ret
= file_write_and_wait_range(file
, start
, end
);
223 clear_inode_flag(inode
, FI_NEED_IPU
);
226 trace_f2fs_sync_file_exit(inode
, cp_reason
, datasync
, ret
);
230 /* if the inode is dirty, let's recover all the time */
231 if (!f2fs_skip_inode_update(inode
, datasync
)) {
232 f2fs_write_inode(inode
, NULL
);
237 * if there is no written data, don't waste time to write recovery info.
239 if (!is_inode_flag_set(inode
, FI_APPEND_WRITE
) &&
240 !f2fs_exist_written_data(sbi
, ino
, APPEND_INO
)) {
242 /* it may call write_inode just prior to fsync */
243 if (need_inode_page_update(sbi
, ino
))
246 if (is_inode_flag_set(inode
, FI_UPDATE_WRITE
) ||
247 f2fs_exist_written_data(sbi
, ino
, UPDATE_INO
))
253 * Both of fdatasync() and fsync() are able to be recovered from
256 down_read(&F2FS_I(inode
)->i_sem
);
257 cp_reason
= need_do_checkpoint(inode
);
258 up_read(&F2FS_I(inode
)->i_sem
);
261 /* all the dirty node pages should be flushed for POR */
262 ret
= f2fs_sync_fs(inode
->i_sb
, 1);
265 * We've secured consistency through sync_fs. Following pino
266 * will be used only for fsynced inodes after checkpoint.
268 try_to_fix_pino(inode
);
269 clear_inode_flag(inode
, FI_APPEND_WRITE
);
270 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
274 atomic_inc(&sbi
->wb_sync_req
[NODE
]);
275 ret
= f2fs_fsync_node_pages(sbi
, inode
, &wbc
, atomic
, &seq_id
);
276 atomic_dec(&sbi
->wb_sync_req
[NODE
]);
280 /* if cp_error was enabled, we should avoid infinite loop */
281 if (unlikely(f2fs_cp_error(sbi
))) {
286 if (f2fs_need_inode_block_update(sbi
, ino
)) {
287 f2fs_mark_inode_dirty_sync(inode
, true);
288 f2fs_write_inode(inode
, NULL
);
293 * If it's atomic_write, it's just fine to keep write ordering. So
294 * here we don't need to wait for node write completion, since we use
295 * node chain which serializes node blocks. If one of node writes are
296 * reordered, we can see simply broken chain, resulting in stopping
297 * roll-forward recovery. It means we'll recover all or none node blocks
301 ret
= f2fs_wait_on_node_pages_writeback(sbi
, seq_id
);
306 /* once recovery info is written, don't need to tack this */
307 f2fs_remove_ino_entry(sbi
, ino
, APPEND_INO
);
308 clear_inode_flag(inode
, FI_APPEND_WRITE
);
310 if (!atomic
&& F2FS_OPTION(sbi
).fsync_mode
!= FSYNC_MODE_NOBARRIER
)
311 ret
= f2fs_issue_flush(sbi
, inode
->i_ino
);
313 f2fs_remove_ino_entry(sbi
, ino
, UPDATE_INO
);
314 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
315 f2fs_remove_ino_entry(sbi
, ino
, FLUSH_INO
);
317 f2fs_update_time(sbi
, REQ_TIME
);
319 trace_f2fs_sync_file_exit(inode
, cp_reason
, datasync
, ret
);
320 f2fs_trace_ios(NULL
, 1);
324 int f2fs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
326 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file
)))))
328 return f2fs_do_sync_file(file
, start
, end
, datasync
, false);
331 static pgoff_t
__get_first_dirty_index(struct address_space
*mapping
,
332 pgoff_t pgofs
, int whence
)
337 if (whence
!= SEEK_DATA
)
340 /* find first dirty page index */
341 nr_pages
= find_get_pages_tag(mapping
, &pgofs
, PAGECACHE_TAG_DIRTY
,
350 static bool __found_offset(struct f2fs_sb_info
*sbi
, block_t blkaddr
,
351 pgoff_t dirty
, pgoff_t pgofs
, int whence
)
355 if ((blkaddr
== NEW_ADDR
&& dirty
== pgofs
) ||
356 is_valid_data_blkaddr(sbi
, blkaddr
))
360 if (blkaddr
== NULL_ADDR
)
367 static loff_t
f2fs_seek_block(struct file
*file
, loff_t offset
, int whence
)
369 struct inode
*inode
= file
->f_mapping
->host
;
370 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
371 struct dnode_of_data dn
;
372 pgoff_t pgofs
, end_offset
, dirty
;
373 loff_t data_ofs
= offset
;
379 isize
= i_size_read(inode
);
383 /* handle inline data case */
384 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
)) {
385 if (whence
== SEEK_HOLE
)
390 pgofs
= (pgoff_t
)(offset
>> PAGE_SHIFT
);
392 dirty
= __get_first_dirty_index(inode
->i_mapping
, pgofs
, whence
);
394 for (; data_ofs
< isize
; data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
395 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
396 err
= f2fs_get_dnode_of_data(&dn
, pgofs
, LOOKUP_NODE
);
397 if (err
&& err
!= -ENOENT
) {
399 } else if (err
== -ENOENT
) {
400 /* direct node does not exists */
401 if (whence
== SEEK_DATA
) {
402 pgofs
= f2fs_get_next_page_offset(&dn
, pgofs
);
409 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
411 /* find data/hole in dnode block */
412 for (; dn
.ofs_in_node
< end_offset
;
413 dn
.ofs_in_node
++, pgofs
++,
414 data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
417 blkaddr
= datablock_addr(dn
.inode
,
418 dn
.node_page
, dn
.ofs_in_node
);
420 if (__is_valid_data_blkaddr(blkaddr
) &&
421 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode
),
422 blkaddr
, DATA_GENERIC
)) {
427 if (__found_offset(F2FS_I_SB(inode
), blkaddr
, dirty
,
436 if (whence
== SEEK_DATA
)
439 if (whence
== SEEK_HOLE
&& data_ofs
> isize
)
442 return vfs_setpos(file
, data_ofs
, maxbytes
);
448 static loff_t
f2fs_llseek(struct file
*file
, loff_t offset
, int whence
)
450 struct inode
*inode
= file
->f_mapping
->host
;
451 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
457 return generic_file_llseek_size(file
, offset
, whence
,
458 maxbytes
, i_size_read(inode
));
463 return f2fs_seek_block(file
, offset
, whence
);
469 static int f2fs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
471 struct inode
*inode
= file_inode(file
);
474 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
477 /* we don't need to use inline_data strictly */
478 err
= f2fs_convert_inline_inode(inode
);
483 vma
->vm_ops
= &f2fs_file_vm_ops
;
487 static int f2fs_file_open(struct inode
*inode
, struct file
*filp
)
489 int err
= fscrypt_file_open(inode
, filp
);
494 filp
->f_mode
|= FMODE_NOWAIT
;
496 return dquot_file_open(inode
, filp
);
499 void f2fs_truncate_data_blocks_range(struct dnode_of_data
*dn
, int count
)
501 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
502 struct f2fs_node
*raw_node
;
503 int nr_free
= 0, ofs
= dn
->ofs_in_node
, len
= count
;
507 if (IS_INODE(dn
->node_page
) && f2fs_has_extra_attr(dn
->inode
))
508 base
= get_extra_isize(dn
->inode
);
510 raw_node
= F2FS_NODE(dn
->node_page
);
511 addr
= blkaddr_in_node(raw_node
) + base
+ ofs
;
513 for (; count
> 0; count
--, addr
++, dn
->ofs_in_node
++) {
514 block_t blkaddr
= le32_to_cpu(*addr
);
516 if (blkaddr
== NULL_ADDR
)
519 dn
->data_blkaddr
= NULL_ADDR
;
520 f2fs_set_data_blkaddr(dn
);
522 if (__is_valid_data_blkaddr(blkaddr
) &&
523 !f2fs_is_valid_blkaddr(sbi
, blkaddr
, DATA_GENERIC
))
526 f2fs_invalidate_blocks(sbi
, blkaddr
);
527 if (dn
->ofs_in_node
== 0 && IS_INODE(dn
->node_page
))
528 clear_inode_flag(dn
->inode
, FI_FIRST_BLOCK_WRITTEN
);
535 * once we invalidate valid blkaddr in range [ofs, ofs + count],
536 * we will invalidate all blkaddr in the whole range.
538 fofs
= f2fs_start_bidx_of_node(ofs_of_node(dn
->node_page
),
540 f2fs_update_extent_cache_range(dn
, fofs
, 0, len
);
541 dec_valid_block_count(sbi
, dn
->inode
, nr_free
);
543 dn
->ofs_in_node
= ofs
;
545 f2fs_update_time(sbi
, REQ_TIME
);
546 trace_f2fs_truncate_data_blocks_range(dn
->inode
, dn
->nid
,
547 dn
->ofs_in_node
, nr_free
);
550 void f2fs_truncate_data_blocks(struct dnode_of_data
*dn
)
552 f2fs_truncate_data_blocks_range(dn
, ADDRS_PER_BLOCK
);
555 static int truncate_partial_data_page(struct inode
*inode
, u64 from
,
558 loff_t offset
= from
& (PAGE_SIZE
- 1);
559 pgoff_t index
= from
>> PAGE_SHIFT
;
560 struct address_space
*mapping
= inode
->i_mapping
;
563 if (!offset
&& !cache_only
)
567 page
= find_lock_page(mapping
, index
);
568 if (page
&& PageUptodate(page
))
570 f2fs_put_page(page
, 1);
574 page
= f2fs_get_lock_data_page(inode
, index
, true);
576 return PTR_ERR(page
) == -ENOENT
? 0 : PTR_ERR(page
);
578 f2fs_wait_on_page_writeback(page
, DATA
, true);
579 zero_user(page
, offset
, PAGE_SIZE
- offset
);
581 /* An encrypted inode should have a key and truncate the last page. */
582 f2fs_bug_on(F2FS_I_SB(inode
), cache_only
&& f2fs_encrypted_inode(inode
));
584 set_page_dirty(page
);
585 f2fs_put_page(page
, 1);
589 int f2fs_truncate_blocks(struct inode
*inode
, u64 from
, bool lock
,
592 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
593 struct dnode_of_data dn
;
595 int count
= 0, err
= 0;
597 bool truncate_page
= false;
598 int flag
= buf_write
? F2FS_GET_BLOCK_PRE_AIO
: F2FS_GET_BLOCK_PRE_DIO
;
600 trace_f2fs_truncate_blocks_enter(inode
, from
);
602 free_from
= (pgoff_t
)F2FS_BLK_ALIGN(from
);
604 if (free_from
>= sbi
->max_file_blocks
)
608 __do_map_lock(sbi
, flag
, true);
610 ipage
= f2fs_get_node_page(sbi
, inode
->i_ino
);
612 err
= PTR_ERR(ipage
);
616 if (f2fs_has_inline_data(inode
)) {
617 f2fs_truncate_inline_inode(inode
, ipage
, from
);
618 f2fs_put_page(ipage
, 1);
619 truncate_page
= true;
623 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
624 err
= f2fs_get_dnode_of_data(&dn
, free_from
, LOOKUP_NODE_RA
);
631 count
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
633 count
-= dn
.ofs_in_node
;
634 f2fs_bug_on(sbi
, count
< 0);
636 if (dn
.ofs_in_node
|| IS_INODE(dn
.node_page
)) {
637 f2fs_truncate_data_blocks_range(&dn
, count
);
643 err
= f2fs_truncate_inode_blocks(inode
, free_from
);
646 __do_map_lock(sbi
, flag
, false);
648 /* lastly zero out the first data page */
650 err
= truncate_partial_data_page(inode
, from
, truncate_page
);
652 trace_f2fs_truncate_blocks_exit(inode
, err
);
656 int f2fs_truncate(struct inode
*inode
)
660 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
663 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
664 S_ISLNK(inode
->i_mode
)))
667 trace_f2fs_truncate(inode
);
669 if (time_to_inject(F2FS_I_SB(inode
), FAULT_TRUNCATE
)) {
670 f2fs_show_injection_info(FAULT_TRUNCATE
);
674 /* we should check inline_data size */
675 if (!f2fs_may_inline_data(inode
)) {
676 err
= f2fs_convert_inline_inode(inode
);
681 err
= f2fs_truncate_blocks(inode
, i_size_read(inode
), true, false);
685 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
686 f2fs_mark_inode_dirty_sync(inode
, false);
690 int f2fs_getattr(const struct path
*path
, struct kstat
*stat
,
691 u32 request_mask
, unsigned int query_flags
)
693 struct inode
*inode
= d_inode(path
->dentry
);
694 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
695 struct f2fs_inode
*ri
;
698 if (f2fs_has_extra_attr(inode
) &&
699 f2fs_sb_has_inode_crtime(inode
->i_sb
) &&
700 F2FS_FITS_IN_INODE(ri
, fi
->i_extra_isize
, i_crtime
)) {
701 stat
->result_mask
|= STATX_BTIME
;
702 stat
->btime
.tv_sec
= fi
->i_crtime
.tv_sec
;
703 stat
->btime
.tv_nsec
= fi
->i_crtime
.tv_nsec
;
706 flags
= fi
->i_flags
& F2FS_FL_USER_VISIBLE
;
707 if (flags
& F2FS_APPEND_FL
)
708 stat
->attributes
|= STATX_ATTR_APPEND
;
709 if (flags
& F2FS_COMPR_FL
)
710 stat
->attributes
|= STATX_ATTR_COMPRESSED
;
711 if (f2fs_encrypted_inode(inode
))
712 stat
->attributes
|= STATX_ATTR_ENCRYPTED
;
713 if (flags
& F2FS_IMMUTABLE_FL
)
714 stat
->attributes
|= STATX_ATTR_IMMUTABLE
;
715 if (flags
& F2FS_NODUMP_FL
)
716 stat
->attributes
|= STATX_ATTR_NODUMP
;
718 stat
->attributes_mask
|= (STATX_ATTR_APPEND
|
719 STATX_ATTR_COMPRESSED
|
720 STATX_ATTR_ENCRYPTED
|
721 STATX_ATTR_IMMUTABLE
|
724 generic_fillattr(inode
, stat
);
726 /* we need to show initial sectors used for inline_data/dentries */
727 if ((S_ISREG(inode
->i_mode
) && f2fs_has_inline_data(inode
)) ||
728 f2fs_has_inline_dentry(inode
))
729 stat
->blocks
+= (stat
->size
+ 511) >> 9;
734 #ifdef CONFIG_F2FS_FS_POSIX_ACL
735 static void __setattr_copy(struct inode
*inode
, const struct iattr
*attr
)
737 unsigned int ia_valid
= attr
->ia_valid
;
739 if (ia_valid
& ATTR_UID
)
740 inode
->i_uid
= attr
->ia_uid
;
741 if (ia_valid
& ATTR_GID
)
742 inode
->i_gid
= attr
->ia_gid
;
743 if (ia_valid
& ATTR_ATIME
)
744 inode
->i_atime
= timespec64_trunc(attr
->ia_atime
,
745 inode
->i_sb
->s_time_gran
);
746 if (ia_valid
& ATTR_MTIME
)
747 inode
->i_mtime
= timespec64_trunc(attr
->ia_mtime
,
748 inode
->i_sb
->s_time_gran
);
749 if (ia_valid
& ATTR_CTIME
)
750 inode
->i_ctime
= timespec64_trunc(attr
->ia_ctime
,
751 inode
->i_sb
->s_time_gran
);
752 if (ia_valid
& ATTR_MODE
) {
753 umode_t mode
= attr
->ia_mode
;
755 if (!in_group_p(inode
->i_gid
) && !capable(CAP_FSETID
))
757 set_acl_inode(inode
, mode
);
761 #define __setattr_copy setattr_copy
764 int f2fs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
766 struct inode
*inode
= d_inode(dentry
);
768 bool size_changed
= false;
770 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
773 err
= setattr_prepare(dentry
, attr
);
777 err
= fscrypt_prepare_setattr(dentry
, attr
);
781 if (is_quota_modification(inode
, attr
)) {
782 err
= dquot_initialize(inode
);
786 if ((attr
->ia_valid
& ATTR_UID
&&
787 !uid_eq(attr
->ia_uid
, inode
->i_uid
)) ||
788 (attr
->ia_valid
& ATTR_GID
&&
789 !gid_eq(attr
->ia_gid
, inode
->i_gid
))) {
790 f2fs_lock_op(F2FS_I_SB(inode
));
791 err
= dquot_transfer(inode
, attr
);
793 set_sbi_flag(F2FS_I_SB(inode
),
794 SBI_QUOTA_NEED_REPAIR
);
795 f2fs_unlock_op(F2FS_I_SB(inode
));
799 * update uid/gid under lock_op(), so that dquot and inode can
800 * be updated atomically.
802 if (attr
->ia_valid
& ATTR_UID
)
803 inode
->i_uid
= attr
->ia_uid
;
804 if (attr
->ia_valid
& ATTR_GID
)
805 inode
->i_gid
= attr
->ia_gid
;
806 f2fs_mark_inode_dirty_sync(inode
, true);
807 f2fs_unlock_op(F2FS_I_SB(inode
));
810 if (attr
->ia_valid
& ATTR_SIZE
) {
811 bool to_smaller
= (attr
->ia_size
<= i_size_read(inode
));
813 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
814 down_write(&F2FS_I(inode
)->i_mmap_sem
);
816 truncate_setsize(inode
, attr
->ia_size
);
819 err
= f2fs_truncate(inode
);
821 * do not trim all blocks after i_size if target size is
822 * larger than i_size.
824 up_write(&F2FS_I(inode
)->i_mmap_sem
);
825 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
831 /* should convert inline inode here */
832 if (!f2fs_may_inline_data(inode
)) {
833 err
= f2fs_convert_inline_inode(inode
);
837 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
840 down_write(&F2FS_I(inode
)->i_sem
);
841 F2FS_I(inode
)->last_disk_size
= i_size_read(inode
);
842 up_write(&F2FS_I(inode
)->i_sem
);
847 __setattr_copy(inode
, attr
);
849 if (attr
->ia_valid
& ATTR_MODE
) {
850 err
= posix_acl_chmod(inode
, f2fs_get_inode_mode(inode
));
851 if (err
|| is_inode_flag_set(inode
, FI_ACL_MODE
)) {
852 inode
->i_mode
= F2FS_I(inode
)->i_acl_mode
;
853 clear_inode_flag(inode
, FI_ACL_MODE
);
857 /* file size may changed here */
858 f2fs_mark_inode_dirty_sync(inode
, size_changed
);
860 /* inode change will produce dirty node pages flushed by checkpoint */
861 f2fs_balance_fs(F2FS_I_SB(inode
), true);
866 const struct inode_operations f2fs_file_inode_operations
= {
867 .getattr
= f2fs_getattr
,
868 .setattr
= f2fs_setattr
,
869 .get_acl
= f2fs_get_acl
,
870 .set_acl
= f2fs_set_acl
,
871 #ifdef CONFIG_F2FS_FS_XATTR
872 .listxattr
= f2fs_listxattr
,
874 .fiemap
= f2fs_fiemap
,
877 static int fill_zero(struct inode
*inode
, pgoff_t index
,
878 loff_t start
, loff_t len
)
880 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
886 f2fs_balance_fs(sbi
, true);
889 page
= f2fs_get_new_data_page(inode
, NULL
, index
, false);
893 return PTR_ERR(page
);
895 f2fs_wait_on_page_writeback(page
, DATA
, true);
896 zero_user(page
, start
, len
);
897 set_page_dirty(page
);
898 f2fs_put_page(page
, 1);
902 int f2fs_truncate_hole(struct inode
*inode
, pgoff_t pg_start
, pgoff_t pg_end
)
906 while (pg_start
< pg_end
) {
907 struct dnode_of_data dn
;
908 pgoff_t end_offset
, count
;
910 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
911 err
= f2fs_get_dnode_of_data(&dn
, pg_start
, LOOKUP_NODE
);
913 if (err
== -ENOENT
) {
914 pg_start
= f2fs_get_next_page_offset(&dn
,
921 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
922 count
= min(end_offset
- dn
.ofs_in_node
, pg_end
- pg_start
);
924 f2fs_bug_on(F2FS_I_SB(inode
), count
== 0 || count
> end_offset
);
926 f2fs_truncate_data_blocks_range(&dn
, count
);
934 static int punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
936 pgoff_t pg_start
, pg_end
;
937 loff_t off_start
, off_end
;
940 ret
= f2fs_convert_inline_inode(inode
);
944 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
945 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
947 off_start
= offset
& (PAGE_SIZE
- 1);
948 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
950 if (pg_start
== pg_end
) {
951 ret
= fill_zero(inode
, pg_start
, off_start
,
952 off_end
- off_start
);
957 ret
= fill_zero(inode
, pg_start
++, off_start
,
958 PAGE_SIZE
- off_start
);
963 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
968 if (pg_start
< pg_end
) {
969 struct address_space
*mapping
= inode
->i_mapping
;
970 loff_t blk_start
, blk_end
;
971 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
973 f2fs_balance_fs(sbi
, true);
975 blk_start
= (loff_t
)pg_start
<< PAGE_SHIFT
;
976 blk_end
= (loff_t
)pg_end
<< PAGE_SHIFT
;
978 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
979 down_write(&F2FS_I(inode
)->i_mmap_sem
);
981 truncate_inode_pages_range(mapping
, blk_start
,
985 ret
= f2fs_truncate_hole(inode
, pg_start
, pg_end
);
988 up_write(&F2FS_I(inode
)->i_mmap_sem
);
989 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
996 static int __read_out_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
997 int *do_replace
, pgoff_t off
, pgoff_t len
)
999 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1000 struct dnode_of_data dn
;
1004 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1005 ret
= f2fs_get_dnode_of_data(&dn
, off
, LOOKUP_NODE_RA
);
1006 if (ret
&& ret
!= -ENOENT
) {
1008 } else if (ret
== -ENOENT
) {
1009 if (dn
.max_level
== 0)
1011 done
= min((pgoff_t
)ADDRS_PER_BLOCK
- dn
.ofs_in_node
, len
);
1017 done
= min((pgoff_t
)ADDRS_PER_PAGE(dn
.node_page
, inode
) -
1018 dn
.ofs_in_node
, len
);
1019 for (i
= 0; i
< done
; i
++, blkaddr
++, do_replace
++, dn
.ofs_in_node
++) {
1020 *blkaddr
= datablock_addr(dn
.inode
,
1021 dn
.node_page
, dn
.ofs_in_node
);
1022 if (!f2fs_is_checkpointed_data(sbi
, *blkaddr
)) {
1024 if (test_opt(sbi
, LFS
)) {
1025 f2fs_put_dnode(&dn
);
1029 /* do not invalidate this block address */
1030 f2fs_update_data_blkaddr(&dn
, NULL_ADDR
);
1034 f2fs_put_dnode(&dn
);
1043 static int __roll_back_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
1044 int *do_replace
, pgoff_t off
, int len
)
1046 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1047 struct dnode_of_data dn
;
1050 for (i
= 0; i
< len
; i
++, do_replace
++, blkaddr
++) {
1051 if (*do_replace
== 0)
1054 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1055 ret
= f2fs_get_dnode_of_data(&dn
, off
+ i
, LOOKUP_NODE_RA
);
1057 dec_valid_block_count(sbi
, inode
, 1);
1058 f2fs_invalidate_blocks(sbi
, *blkaddr
);
1060 f2fs_update_data_blkaddr(&dn
, *blkaddr
);
1062 f2fs_put_dnode(&dn
);
1067 static int __clone_blkaddrs(struct inode
*src_inode
, struct inode
*dst_inode
,
1068 block_t
*blkaddr
, int *do_replace
,
1069 pgoff_t src
, pgoff_t dst
, pgoff_t len
, bool full
)
1071 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src_inode
);
1076 if (blkaddr
[i
] == NULL_ADDR
&& !full
) {
1081 if (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
) {
1082 struct dnode_of_data dn
;
1083 struct node_info ni
;
1087 set_new_dnode(&dn
, dst_inode
, NULL
, NULL
, 0);
1088 ret
= f2fs_get_dnode_of_data(&dn
, dst
+ i
, ALLOC_NODE
);
1092 ret
= f2fs_get_node_info(sbi
, dn
.nid
, &ni
);
1094 f2fs_put_dnode(&dn
);
1098 ilen
= min((pgoff_t
)
1099 ADDRS_PER_PAGE(dn
.node_page
, dst_inode
) -
1100 dn
.ofs_in_node
, len
- i
);
1102 dn
.data_blkaddr
= datablock_addr(dn
.inode
,
1103 dn
.node_page
, dn
.ofs_in_node
);
1104 f2fs_truncate_data_blocks_range(&dn
, 1);
1106 if (do_replace
[i
]) {
1107 f2fs_i_blocks_write(src_inode
,
1109 f2fs_i_blocks_write(dst_inode
,
1111 f2fs_replace_block(sbi
, &dn
, dn
.data_blkaddr
,
1112 blkaddr
[i
], ni
.version
, true, false);
1118 new_size
= (dst
+ i
) << PAGE_SHIFT
;
1119 if (dst_inode
->i_size
< new_size
)
1120 f2fs_i_size_write(dst_inode
, new_size
);
1121 } while (--ilen
&& (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
));
1123 f2fs_put_dnode(&dn
);
1125 struct page
*psrc
, *pdst
;
1127 psrc
= f2fs_get_lock_data_page(src_inode
,
1130 return PTR_ERR(psrc
);
1131 pdst
= f2fs_get_new_data_page(dst_inode
, NULL
, dst
+ i
,
1134 f2fs_put_page(psrc
, 1);
1135 return PTR_ERR(pdst
);
1137 f2fs_copy_page(psrc
, pdst
);
1138 set_page_dirty(pdst
);
1139 f2fs_put_page(pdst
, 1);
1140 f2fs_put_page(psrc
, 1);
1142 ret
= f2fs_truncate_hole(src_inode
,
1143 src
+ i
, src
+ i
+ 1);
1152 static int __exchange_data_block(struct inode
*src_inode
,
1153 struct inode
*dst_inode
, pgoff_t src
, pgoff_t dst
,
1154 pgoff_t len
, bool full
)
1156 block_t
*src_blkaddr
;
1162 olen
= min((pgoff_t
)4 * ADDRS_PER_BLOCK
, len
);
1164 src_blkaddr
= f2fs_kvzalloc(F2FS_I_SB(src_inode
),
1165 array_size(olen
, sizeof(block_t
)),
1170 do_replace
= f2fs_kvzalloc(F2FS_I_SB(src_inode
),
1171 array_size(olen
, sizeof(int)),
1174 kvfree(src_blkaddr
);
1178 ret
= __read_out_blkaddrs(src_inode
, src_blkaddr
,
1179 do_replace
, src
, olen
);
1183 ret
= __clone_blkaddrs(src_inode
, dst_inode
, src_blkaddr
,
1184 do_replace
, src
, dst
, olen
, full
);
1192 kvfree(src_blkaddr
);
1198 __roll_back_blkaddrs(src_inode
, src_blkaddr
, do_replace
, src
, olen
);
1199 kvfree(src_blkaddr
);
1204 static int f2fs_do_collapse(struct inode
*inode
, loff_t offset
, loff_t len
)
1206 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1207 pgoff_t nrpages
= (i_size_read(inode
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
1208 pgoff_t start
= offset
>> PAGE_SHIFT
;
1209 pgoff_t end
= (offset
+ len
) >> PAGE_SHIFT
;
1212 f2fs_balance_fs(sbi
, true);
1214 /* avoid gc operation during block exchange */
1215 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1216 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1219 f2fs_drop_extent_tree(inode
);
1220 truncate_pagecache(inode
, offset
);
1221 ret
= __exchange_data_block(inode
, inode
, end
, start
, nrpages
- end
, true);
1222 f2fs_unlock_op(sbi
);
1224 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1225 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1229 static int f2fs_collapse_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1234 if (offset
+ len
>= i_size_read(inode
))
1237 /* collapse range should be aligned to block size of f2fs. */
1238 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1241 ret
= f2fs_convert_inline_inode(inode
);
1245 /* write out all dirty pages from offset */
1246 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1250 ret
= f2fs_do_collapse(inode
, offset
, len
);
1254 /* write out all moved pages, if possible */
1255 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1256 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1257 truncate_pagecache(inode
, offset
);
1259 new_size
= i_size_read(inode
) - len
;
1260 truncate_pagecache(inode
, new_size
);
1262 ret
= f2fs_truncate_blocks(inode
, new_size
, true, false);
1263 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1265 f2fs_i_size_write(inode
, new_size
);
1269 static int f2fs_do_zero_range(struct dnode_of_data
*dn
, pgoff_t start
,
1272 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1273 pgoff_t index
= start
;
1274 unsigned int ofs_in_node
= dn
->ofs_in_node
;
1278 for (; index
< end
; index
++, dn
->ofs_in_node
++) {
1279 if (datablock_addr(dn
->inode
, dn
->node_page
,
1280 dn
->ofs_in_node
) == NULL_ADDR
)
1284 dn
->ofs_in_node
= ofs_in_node
;
1285 ret
= f2fs_reserve_new_blocks(dn
, count
);
1289 dn
->ofs_in_node
= ofs_in_node
;
1290 for (index
= start
; index
< end
; index
++, dn
->ofs_in_node
++) {
1291 dn
->data_blkaddr
= datablock_addr(dn
->inode
,
1292 dn
->node_page
, dn
->ofs_in_node
);
1294 * f2fs_reserve_new_blocks will not guarantee entire block
1297 if (dn
->data_blkaddr
== NULL_ADDR
) {
1301 if (dn
->data_blkaddr
!= NEW_ADDR
) {
1302 f2fs_invalidate_blocks(sbi
, dn
->data_blkaddr
);
1303 dn
->data_blkaddr
= NEW_ADDR
;
1304 f2fs_set_data_blkaddr(dn
);
1308 f2fs_update_extent_cache_range(dn
, start
, 0, index
- start
);
1313 static int f2fs_zero_range(struct inode
*inode
, loff_t offset
, loff_t len
,
1316 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1317 struct address_space
*mapping
= inode
->i_mapping
;
1318 pgoff_t index
, pg_start
, pg_end
;
1319 loff_t new_size
= i_size_read(inode
);
1320 loff_t off_start
, off_end
;
1323 ret
= inode_newsize_ok(inode
, (len
+ offset
));
1327 ret
= f2fs_convert_inline_inode(inode
);
1331 ret
= filemap_write_and_wait_range(mapping
, offset
, offset
+ len
- 1);
1335 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
1336 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
1338 off_start
= offset
& (PAGE_SIZE
- 1);
1339 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1341 if (pg_start
== pg_end
) {
1342 ret
= fill_zero(inode
, pg_start
, off_start
,
1343 off_end
- off_start
);
1347 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1350 ret
= fill_zero(inode
, pg_start
++, off_start
,
1351 PAGE_SIZE
- off_start
);
1355 new_size
= max_t(loff_t
, new_size
,
1356 (loff_t
)pg_start
<< PAGE_SHIFT
);
1359 for (index
= pg_start
; index
< pg_end
;) {
1360 struct dnode_of_data dn
;
1361 unsigned int end_offset
;
1364 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1365 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1367 truncate_pagecache_range(inode
,
1368 (loff_t
)index
<< PAGE_SHIFT
,
1369 ((loff_t
)pg_end
<< PAGE_SHIFT
) - 1);
1373 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1374 ret
= f2fs_get_dnode_of_data(&dn
, index
, ALLOC_NODE
);
1376 f2fs_unlock_op(sbi
);
1377 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1378 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1382 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1383 end
= min(pg_end
, end_offset
- dn
.ofs_in_node
+ index
);
1385 ret
= f2fs_do_zero_range(&dn
, index
, end
);
1386 f2fs_put_dnode(&dn
);
1388 f2fs_unlock_op(sbi
);
1389 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1390 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1392 f2fs_balance_fs(sbi
, dn
.node_changed
);
1398 new_size
= max_t(loff_t
, new_size
,
1399 (loff_t
)index
<< PAGE_SHIFT
);
1403 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
1407 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1412 if (new_size
> i_size_read(inode
)) {
1413 if (mode
& FALLOC_FL_KEEP_SIZE
)
1414 file_set_keep_isize(inode
);
1416 f2fs_i_size_write(inode
, new_size
);
1421 static int f2fs_insert_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1423 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1424 pgoff_t nr
, pg_start
, pg_end
, delta
, idx
;
1428 new_size
= i_size_read(inode
) + len
;
1429 ret
= inode_newsize_ok(inode
, new_size
);
1433 if (offset
>= i_size_read(inode
))
1436 /* insert range should be aligned to block size of f2fs. */
1437 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1440 ret
= f2fs_convert_inline_inode(inode
);
1444 f2fs_balance_fs(sbi
, true);
1446 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1447 ret
= f2fs_truncate_blocks(inode
, i_size_read(inode
), true, false);
1448 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1452 /* write out all dirty pages from offset */
1453 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1457 pg_start
= offset
>> PAGE_SHIFT
;
1458 pg_end
= (offset
+ len
) >> PAGE_SHIFT
;
1459 delta
= pg_end
- pg_start
;
1460 idx
= (i_size_read(inode
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
1462 /* avoid gc operation during block exchange */
1463 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1464 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1465 truncate_pagecache(inode
, offset
);
1467 while (!ret
&& idx
> pg_start
) {
1468 nr
= idx
- pg_start
;
1474 f2fs_drop_extent_tree(inode
);
1476 ret
= __exchange_data_block(inode
, inode
, idx
,
1477 idx
+ delta
, nr
, false);
1478 f2fs_unlock_op(sbi
);
1480 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1481 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1483 /* write out all moved pages, if possible */
1484 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1485 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1486 truncate_pagecache(inode
, offset
);
1487 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1490 f2fs_i_size_write(inode
, new_size
);
1494 static int expand_inode_data(struct inode
*inode
, loff_t offset
,
1495 loff_t len
, int mode
)
1497 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1498 struct f2fs_map_blocks map
= { .m_next_pgofs
= NULL
,
1499 .m_next_extent
= NULL
, .m_seg_type
= NO_CHECK_TYPE
};
1501 loff_t new_size
= i_size_read(inode
);
1505 err
= inode_newsize_ok(inode
, (len
+ offset
));
1509 err
= f2fs_convert_inline_inode(inode
);
1513 f2fs_balance_fs(sbi
, true);
1515 pg_end
= ((unsigned long long)offset
+ len
) >> PAGE_SHIFT
;
1516 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1518 map
.m_lblk
= ((unsigned long long)offset
) >> PAGE_SHIFT
;
1519 map
.m_len
= pg_end
- map
.m_lblk
;
1523 err
= f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_AIO
);
1530 last_off
= map
.m_lblk
+ map
.m_len
- 1;
1532 /* update new size to the failed position */
1533 new_size
= (last_off
== pg_end
) ? offset
+ len
:
1534 (loff_t
)(last_off
+ 1) << PAGE_SHIFT
;
1536 new_size
= ((loff_t
)pg_end
<< PAGE_SHIFT
) + off_end
;
1539 if (new_size
> i_size_read(inode
)) {
1540 if (mode
& FALLOC_FL_KEEP_SIZE
)
1541 file_set_keep_isize(inode
);
1543 f2fs_i_size_write(inode
, new_size
);
1549 static long f2fs_fallocate(struct file
*file
, int mode
,
1550 loff_t offset
, loff_t len
)
1552 struct inode
*inode
= file_inode(file
);
1555 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
1558 /* f2fs only support ->fallocate for regular file */
1559 if (!S_ISREG(inode
->i_mode
))
1562 if (f2fs_encrypted_inode(inode
) &&
1563 (mode
& (FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_INSERT_RANGE
)))
1566 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
1567 FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_ZERO_RANGE
|
1568 FALLOC_FL_INSERT_RANGE
))
1573 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
1574 if (offset
>= inode
->i_size
)
1577 ret
= punch_hole(inode
, offset
, len
);
1578 } else if (mode
& FALLOC_FL_COLLAPSE_RANGE
) {
1579 ret
= f2fs_collapse_range(inode
, offset
, len
);
1580 } else if (mode
& FALLOC_FL_ZERO_RANGE
) {
1581 ret
= f2fs_zero_range(inode
, offset
, len
, mode
);
1582 } else if (mode
& FALLOC_FL_INSERT_RANGE
) {
1583 ret
= f2fs_insert_range(inode
, offset
, len
);
1585 ret
= expand_inode_data(inode
, offset
, len
, mode
);
1589 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
1590 f2fs_mark_inode_dirty_sync(inode
, false);
1591 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1595 inode_unlock(inode
);
1597 trace_f2fs_fallocate(inode
, mode
, offset
, len
, ret
);
1601 static int f2fs_release_file(struct inode
*inode
, struct file
*filp
)
1604 * f2fs_relase_file is called at every close calls. So we should
1605 * not drop any inmemory pages by close called by other process.
1607 if (!(filp
->f_mode
& FMODE_WRITE
) ||
1608 atomic_read(&inode
->i_writecount
) != 1)
1611 /* some remained atomic pages should discarded */
1612 if (f2fs_is_atomic_file(inode
))
1613 f2fs_drop_inmem_pages(inode
);
1614 if (f2fs_is_volatile_file(inode
)) {
1615 set_inode_flag(inode
, FI_DROP_CACHE
);
1616 filemap_fdatawrite(inode
->i_mapping
);
1617 clear_inode_flag(inode
, FI_DROP_CACHE
);
1618 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
1619 stat_dec_volatile_write(inode
);
1624 static int f2fs_file_flush(struct file
*file
, fl_owner_t id
)
1626 struct inode
*inode
= file_inode(file
);
1629 * If the process doing a transaction is crashed, we should do
1630 * roll-back. Otherwise, other reader/write can see corrupted database
1631 * until all the writers close its file. Since this should be done
1632 * before dropping file lock, it needs to do in ->flush.
1634 if (f2fs_is_atomic_file(inode
) &&
1635 F2FS_I(inode
)->inmem_task
== current
)
1636 f2fs_drop_inmem_pages(inode
);
1640 static int f2fs_ioc_getflags(struct file
*filp
, unsigned long arg
)
1642 struct inode
*inode
= file_inode(filp
);
1643 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1644 unsigned int flags
= fi
->i_flags
;
1646 if (f2fs_encrypted_inode(inode
))
1647 flags
|= F2FS_ENCRYPT_FL
;
1648 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
))
1649 flags
|= F2FS_INLINE_DATA_FL
;
1651 flags
&= F2FS_FL_USER_VISIBLE
;
1653 return put_user(flags
, (int __user
*)arg
);
1656 static int __f2fs_ioc_setflags(struct inode
*inode
, unsigned int flags
)
1658 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1659 unsigned int oldflags
;
1661 /* Is it quota file? Do not allow user to mess with it */
1662 if (IS_NOQUOTA(inode
))
1665 flags
= f2fs_mask_flags(inode
->i_mode
, flags
);
1667 oldflags
= fi
->i_flags
;
1669 if ((flags
^ oldflags
) & (F2FS_APPEND_FL
| F2FS_IMMUTABLE_FL
))
1670 if (!capable(CAP_LINUX_IMMUTABLE
))
1673 flags
= flags
& F2FS_FL_USER_MODIFIABLE
;
1674 flags
|= oldflags
& ~F2FS_FL_USER_MODIFIABLE
;
1675 fi
->i_flags
= flags
;
1677 if (fi
->i_flags
& F2FS_PROJINHERIT_FL
)
1678 set_inode_flag(inode
, FI_PROJ_INHERIT
);
1680 clear_inode_flag(inode
, FI_PROJ_INHERIT
);
1682 inode
->i_ctime
= current_time(inode
);
1683 f2fs_set_inode_flags(inode
);
1684 f2fs_mark_inode_dirty_sync(inode
, false);
1688 static int f2fs_ioc_setflags(struct file
*filp
, unsigned long arg
)
1690 struct inode
*inode
= file_inode(filp
);
1694 if (!inode_owner_or_capable(inode
))
1697 if (get_user(flags
, (int __user
*)arg
))
1700 ret
= mnt_want_write_file(filp
);
1706 ret
= __f2fs_ioc_setflags(inode
, flags
);
1708 inode_unlock(inode
);
1709 mnt_drop_write_file(filp
);
1713 static int f2fs_ioc_getversion(struct file
*filp
, unsigned long arg
)
1715 struct inode
*inode
= file_inode(filp
);
1717 return put_user(inode
->i_generation
, (int __user
*)arg
);
1720 static int f2fs_ioc_start_atomic_write(struct file
*filp
)
1722 struct inode
*inode
= file_inode(filp
);
1725 if (!inode_owner_or_capable(inode
))
1728 if (!S_ISREG(inode
->i_mode
))
1731 ret
= mnt_want_write_file(filp
);
1737 if (f2fs_is_atomic_file(inode
)) {
1738 if (is_inode_flag_set(inode
, FI_ATOMIC_REVOKE_REQUEST
))
1743 ret
= f2fs_convert_inline_inode(inode
);
1747 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1749 if (!get_dirty_pages(inode
))
1752 f2fs_msg(F2FS_I_SB(inode
)->sb
, KERN_WARNING
,
1753 "Unexpected flush for atomic writes: ino=%lu, npages=%u",
1754 inode
->i_ino
, get_dirty_pages(inode
));
1755 ret
= filemap_write_and_wait_range(inode
->i_mapping
, 0, LLONG_MAX
);
1757 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1761 set_inode_flag(inode
, FI_ATOMIC_FILE
);
1762 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
1763 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1765 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1766 F2FS_I(inode
)->inmem_task
= current
;
1767 stat_inc_atomic_write(inode
);
1768 stat_update_max_atomic_write(inode
);
1770 inode_unlock(inode
);
1771 mnt_drop_write_file(filp
);
1775 static int f2fs_ioc_commit_atomic_write(struct file
*filp
)
1777 struct inode
*inode
= file_inode(filp
);
1780 if (!inode_owner_or_capable(inode
))
1783 ret
= mnt_want_write_file(filp
);
1787 f2fs_balance_fs(F2FS_I_SB(inode
), true);
1791 if (f2fs_is_volatile_file(inode
)) {
1796 if (f2fs_is_atomic_file(inode
)) {
1797 ret
= f2fs_commit_inmem_pages(inode
);
1801 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
1803 clear_inode_flag(inode
, FI_ATOMIC_FILE
);
1804 F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_ATOMIC
] = 0;
1805 stat_dec_atomic_write(inode
);
1808 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 1, false);
1811 if (is_inode_flag_set(inode
, FI_ATOMIC_REVOKE_REQUEST
)) {
1812 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
1815 inode_unlock(inode
);
1816 mnt_drop_write_file(filp
);
1820 static int f2fs_ioc_start_volatile_write(struct file
*filp
)
1822 struct inode
*inode
= file_inode(filp
);
1825 if (!inode_owner_or_capable(inode
))
1828 if (!S_ISREG(inode
->i_mode
))
1831 ret
= mnt_want_write_file(filp
);
1837 if (f2fs_is_volatile_file(inode
))
1840 ret
= f2fs_convert_inline_inode(inode
);
1844 stat_inc_volatile_write(inode
);
1845 stat_update_max_volatile_write(inode
);
1847 set_inode_flag(inode
, FI_VOLATILE_FILE
);
1848 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1850 inode_unlock(inode
);
1851 mnt_drop_write_file(filp
);
1855 static int f2fs_ioc_release_volatile_write(struct file
*filp
)
1857 struct inode
*inode
= file_inode(filp
);
1860 if (!inode_owner_or_capable(inode
))
1863 ret
= mnt_want_write_file(filp
);
1869 if (!f2fs_is_volatile_file(inode
))
1872 if (!f2fs_is_first_block_written(inode
)) {
1873 ret
= truncate_partial_data_page(inode
, 0, true);
1877 ret
= punch_hole(inode
, 0, F2FS_BLKSIZE
);
1879 inode_unlock(inode
);
1880 mnt_drop_write_file(filp
);
1884 static int f2fs_ioc_abort_volatile_write(struct file
*filp
)
1886 struct inode
*inode
= file_inode(filp
);
1889 if (!inode_owner_or_capable(inode
))
1892 ret
= mnt_want_write_file(filp
);
1898 if (f2fs_is_atomic_file(inode
))
1899 f2fs_drop_inmem_pages(inode
);
1900 if (f2fs_is_volatile_file(inode
)) {
1901 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
1902 stat_dec_volatile_write(inode
);
1903 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
1906 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
1908 inode_unlock(inode
);
1910 mnt_drop_write_file(filp
);
1911 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1915 static int f2fs_ioc_shutdown(struct file
*filp
, unsigned long arg
)
1917 struct inode
*inode
= file_inode(filp
);
1918 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1919 struct super_block
*sb
= sbi
->sb
;
1923 if (!capable(CAP_SYS_ADMIN
))
1926 if (get_user(in
, (__u32 __user
*)arg
))
1929 if (in
!= F2FS_GOING_DOWN_FULLSYNC
) {
1930 ret
= mnt_want_write_file(filp
);
1936 case F2FS_GOING_DOWN_FULLSYNC
:
1937 sb
= freeze_bdev(sb
->s_bdev
);
1943 f2fs_stop_checkpoint(sbi
, false);
1944 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
1945 thaw_bdev(sb
->s_bdev
, sb
);
1948 case F2FS_GOING_DOWN_METASYNC
:
1949 /* do checkpoint only */
1950 ret
= f2fs_sync_fs(sb
, 1);
1953 f2fs_stop_checkpoint(sbi
, false);
1954 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
1956 case F2FS_GOING_DOWN_NOSYNC
:
1957 f2fs_stop_checkpoint(sbi
, false);
1958 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
1960 case F2FS_GOING_DOWN_METAFLUSH
:
1961 f2fs_sync_meta_pages(sbi
, META
, LONG_MAX
, FS_META_IO
);
1962 f2fs_stop_checkpoint(sbi
, false);
1963 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
1970 f2fs_stop_gc_thread(sbi
);
1971 f2fs_stop_discard_thread(sbi
);
1973 f2fs_drop_discard_cmd(sbi
);
1974 clear_opt(sbi
, DISCARD
);
1976 f2fs_update_time(sbi
, REQ_TIME
);
1978 if (in
!= F2FS_GOING_DOWN_FULLSYNC
)
1979 mnt_drop_write_file(filp
);
1983 static int f2fs_ioc_fitrim(struct file
*filp
, unsigned long arg
)
1985 struct inode
*inode
= file_inode(filp
);
1986 struct super_block
*sb
= inode
->i_sb
;
1987 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
1988 struct fstrim_range range
;
1991 if (!capable(CAP_SYS_ADMIN
))
1994 if (!f2fs_hw_support_discard(F2FS_SB(sb
)))
1997 if (copy_from_user(&range
, (struct fstrim_range __user
*)arg
,
2001 ret
= mnt_want_write_file(filp
);
2005 range
.minlen
= max((unsigned int)range
.minlen
,
2006 q
->limits
.discard_granularity
);
2007 ret
= f2fs_trim_fs(F2FS_SB(sb
), &range
);
2008 mnt_drop_write_file(filp
);
2012 if (copy_to_user((struct fstrim_range __user
*)arg
, &range
,
2015 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2019 static bool uuid_is_nonzero(__u8 u
[16])
2023 for (i
= 0; i
< 16; i
++)
2029 static int f2fs_ioc_set_encryption_policy(struct file
*filp
, unsigned long arg
)
2031 struct inode
*inode
= file_inode(filp
);
2033 if (!f2fs_sb_has_encrypt(inode
->i_sb
))
2036 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2038 return fscrypt_ioctl_set_policy(filp
, (const void __user
*)arg
);
2041 static int f2fs_ioc_get_encryption_policy(struct file
*filp
, unsigned long arg
)
2043 if (!f2fs_sb_has_encrypt(file_inode(filp
)->i_sb
))
2045 return fscrypt_ioctl_get_policy(filp
, (void __user
*)arg
);
2048 static int f2fs_ioc_get_encryption_pwsalt(struct file
*filp
, unsigned long arg
)
2050 struct inode
*inode
= file_inode(filp
);
2051 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2054 if (!f2fs_sb_has_encrypt(inode
->i_sb
))
2057 err
= mnt_want_write_file(filp
);
2061 down_write(&sbi
->sb_lock
);
2063 if (uuid_is_nonzero(sbi
->raw_super
->encrypt_pw_salt
))
2066 /* update superblock with uuid */
2067 generate_random_uuid(sbi
->raw_super
->encrypt_pw_salt
);
2069 err
= f2fs_commit_super(sbi
, false);
2072 memset(sbi
->raw_super
->encrypt_pw_salt
, 0, 16);
2076 if (copy_to_user((__u8 __user
*)arg
, sbi
->raw_super
->encrypt_pw_salt
,
2080 up_write(&sbi
->sb_lock
);
2081 mnt_drop_write_file(filp
);
2085 static int f2fs_ioc_gc(struct file
*filp
, unsigned long arg
)
2087 struct inode
*inode
= file_inode(filp
);
2088 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2092 if (!capable(CAP_SYS_ADMIN
))
2095 if (get_user(sync
, (__u32 __user
*)arg
))
2098 if (f2fs_readonly(sbi
->sb
))
2101 ret
= mnt_want_write_file(filp
);
2106 if (!mutex_trylock(&sbi
->gc_mutex
)) {
2111 mutex_lock(&sbi
->gc_mutex
);
2114 ret
= f2fs_gc(sbi
, sync
, true, NULL_SEGNO
);
2116 mnt_drop_write_file(filp
);
2120 static int f2fs_ioc_gc_range(struct file
*filp
, unsigned long arg
)
2122 struct inode
*inode
= file_inode(filp
);
2123 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2124 struct f2fs_gc_range range
;
2128 if (!capable(CAP_SYS_ADMIN
))
2131 if (copy_from_user(&range
, (struct f2fs_gc_range __user
*)arg
,
2135 if (f2fs_readonly(sbi
->sb
))
2138 end
= range
.start
+ range
.len
;
2139 if (range
.start
< MAIN_BLKADDR(sbi
) || end
>= MAX_BLKADDR(sbi
)) {
2143 ret
= mnt_want_write_file(filp
);
2149 if (!mutex_trylock(&sbi
->gc_mutex
)) {
2154 mutex_lock(&sbi
->gc_mutex
);
2157 ret
= f2fs_gc(sbi
, range
.sync
, true, GET_SEGNO(sbi
, range
.start
));
2158 range
.start
+= sbi
->blocks_per_seg
;
2159 if (range
.start
<= end
)
2162 mnt_drop_write_file(filp
);
2166 static int f2fs_ioc_write_checkpoint(struct file
*filp
, unsigned long arg
)
2168 struct inode
*inode
= file_inode(filp
);
2169 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2172 if (!capable(CAP_SYS_ADMIN
))
2175 if (f2fs_readonly(sbi
->sb
))
2178 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
))) {
2179 f2fs_msg(sbi
->sb
, KERN_INFO
,
2180 "Skipping Checkpoint. Checkpoints currently disabled.");
2184 ret
= mnt_want_write_file(filp
);
2188 ret
= f2fs_sync_fs(sbi
->sb
, 1);
2190 mnt_drop_write_file(filp
);
2194 static int f2fs_defragment_range(struct f2fs_sb_info
*sbi
,
2196 struct f2fs_defragment
*range
)
2198 struct inode
*inode
= file_inode(filp
);
2199 struct f2fs_map_blocks map
= { .m_next_extent
= NULL
,
2200 .m_seg_type
= NO_CHECK_TYPE
};
2201 struct extent_info ei
= {0, 0, 0};
2202 pgoff_t pg_start
, pg_end
, next_pgofs
;
2203 unsigned int blk_per_seg
= sbi
->blocks_per_seg
;
2204 unsigned int total
= 0, sec_num
;
2205 block_t blk_end
= 0;
2206 bool fragmented
= false;
2209 /* if in-place-update policy is enabled, don't waste time here */
2210 if (f2fs_should_update_inplace(inode
, NULL
))
2213 pg_start
= range
->start
>> PAGE_SHIFT
;
2214 pg_end
= (range
->start
+ range
->len
) >> PAGE_SHIFT
;
2216 f2fs_balance_fs(sbi
, true);
2220 /* writeback all dirty pages in the range */
2221 err
= filemap_write_and_wait_range(inode
->i_mapping
, range
->start
,
2222 range
->start
+ range
->len
- 1);
2227 * lookup mapping info in extent cache, skip defragmenting if physical
2228 * block addresses are continuous.
2230 if (f2fs_lookup_extent_cache(inode
, pg_start
, &ei
)) {
2231 if (ei
.fofs
+ ei
.len
>= pg_end
)
2235 map
.m_lblk
= pg_start
;
2236 map
.m_next_pgofs
= &next_pgofs
;
2239 * lookup mapping info in dnode page cache, skip defragmenting if all
2240 * physical block addresses are continuous even if there are hole(s)
2241 * in logical blocks.
2243 while (map
.m_lblk
< pg_end
) {
2244 map
.m_len
= pg_end
- map
.m_lblk
;
2245 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2249 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2250 map
.m_lblk
= next_pgofs
;
2254 if (blk_end
&& blk_end
!= map
.m_pblk
)
2257 /* record total count of block that we're going to move */
2260 blk_end
= map
.m_pblk
+ map
.m_len
;
2262 map
.m_lblk
+= map
.m_len
;
2268 sec_num
= (total
+ BLKS_PER_SEC(sbi
) - 1) / BLKS_PER_SEC(sbi
);
2271 * make sure there are enough free section for LFS allocation, this can
2272 * avoid defragment running in SSR mode when free section are allocated
2275 if (has_not_enough_free_secs(sbi
, 0, sec_num
)) {
2280 map
.m_lblk
= pg_start
;
2281 map
.m_len
= pg_end
- pg_start
;
2284 while (map
.m_lblk
< pg_end
) {
2289 map
.m_len
= pg_end
- map
.m_lblk
;
2290 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2294 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2295 map
.m_lblk
= next_pgofs
;
2299 set_inode_flag(inode
, FI_DO_DEFRAG
);
2302 while (idx
< map
.m_lblk
+ map
.m_len
&& cnt
< blk_per_seg
) {
2305 page
= f2fs_get_lock_data_page(inode
, idx
, true);
2307 err
= PTR_ERR(page
);
2311 set_page_dirty(page
);
2312 f2fs_put_page(page
, 1);
2321 if (idx
< pg_end
&& cnt
< blk_per_seg
)
2324 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2326 err
= filemap_fdatawrite(inode
->i_mapping
);
2331 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2333 inode_unlock(inode
);
2335 range
->len
= (u64
)total
<< PAGE_SHIFT
;
2339 static int f2fs_ioc_defragment(struct file
*filp
, unsigned long arg
)
2341 struct inode
*inode
= file_inode(filp
);
2342 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2343 struct f2fs_defragment range
;
2346 if (!capable(CAP_SYS_ADMIN
))
2349 if (!S_ISREG(inode
->i_mode
) || f2fs_is_atomic_file(inode
))
2352 if (f2fs_readonly(sbi
->sb
))
2355 if (copy_from_user(&range
, (struct f2fs_defragment __user
*)arg
,
2359 /* verify alignment of offset & size */
2360 if (range
.start
& (F2FS_BLKSIZE
- 1) || range
.len
& (F2FS_BLKSIZE
- 1))
2363 if (unlikely((range
.start
+ range
.len
) >> PAGE_SHIFT
>
2364 sbi
->max_file_blocks
))
2367 err
= mnt_want_write_file(filp
);
2371 err
= f2fs_defragment_range(sbi
, filp
, &range
);
2372 mnt_drop_write_file(filp
);
2374 f2fs_update_time(sbi
, REQ_TIME
);
2378 if (copy_to_user((struct f2fs_defragment __user
*)arg
, &range
,
2385 static int f2fs_move_file_range(struct file
*file_in
, loff_t pos_in
,
2386 struct file
*file_out
, loff_t pos_out
, size_t len
)
2388 struct inode
*src
= file_inode(file_in
);
2389 struct inode
*dst
= file_inode(file_out
);
2390 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src
);
2391 size_t olen
= len
, dst_max_i_size
= 0;
2395 if (file_in
->f_path
.mnt
!= file_out
->f_path
.mnt
||
2396 src
->i_sb
!= dst
->i_sb
)
2399 if (unlikely(f2fs_readonly(src
->i_sb
)))
2402 if (!S_ISREG(src
->i_mode
) || !S_ISREG(dst
->i_mode
))
2405 if (f2fs_encrypted_inode(src
) || f2fs_encrypted_inode(dst
))
2409 if (pos_in
== pos_out
)
2411 if (pos_out
> pos_in
&& pos_out
< pos_in
+ len
)
2418 if (!inode_trylock(dst
))
2423 if (pos_in
+ len
> src
->i_size
|| pos_in
+ len
< pos_in
)
2426 olen
= len
= src
->i_size
- pos_in
;
2427 if (pos_in
+ len
== src
->i_size
)
2428 len
= ALIGN(src
->i_size
, F2FS_BLKSIZE
) - pos_in
;
2434 dst_osize
= dst
->i_size
;
2435 if (pos_out
+ olen
> dst
->i_size
)
2436 dst_max_i_size
= pos_out
+ olen
;
2438 /* verify the end result is block aligned */
2439 if (!IS_ALIGNED(pos_in
, F2FS_BLKSIZE
) ||
2440 !IS_ALIGNED(pos_in
+ len
, F2FS_BLKSIZE
) ||
2441 !IS_ALIGNED(pos_out
, F2FS_BLKSIZE
))
2444 ret
= f2fs_convert_inline_inode(src
);
2448 ret
= f2fs_convert_inline_inode(dst
);
2452 /* write out all dirty pages from offset */
2453 ret
= filemap_write_and_wait_range(src
->i_mapping
,
2454 pos_in
, pos_in
+ len
);
2458 ret
= filemap_write_and_wait_range(dst
->i_mapping
,
2459 pos_out
, pos_out
+ len
);
2463 f2fs_balance_fs(sbi
, true);
2465 down_write(&F2FS_I(src
)->i_gc_rwsem
[WRITE
]);
2468 if (!down_write_trylock(&F2FS_I(dst
)->i_gc_rwsem
[WRITE
]))
2473 ret
= __exchange_data_block(src
, dst
, pos_in
>> F2FS_BLKSIZE_BITS
,
2474 pos_out
>> F2FS_BLKSIZE_BITS
,
2475 len
>> F2FS_BLKSIZE_BITS
, false);
2479 f2fs_i_size_write(dst
, dst_max_i_size
);
2480 else if (dst_osize
!= dst
->i_size
)
2481 f2fs_i_size_write(dst
, dst_osize
);
2483 f2fs_unlock_op(sbi
);
2486 up_write(&F2FS_I(dst
)->i_gc_rwsem
[WRITE
]);
2488 up_write(&F2FS_I(src
)->i_gc_rwsem
[WRITE
]);
2497 static int f2fs_ioc_move_range(struct file
*filp
, unsigned long arg
)
2499 struct f2fs_move_range range
;
2503 if (!(filp
->f_mode
& FMODE_READ
) ||
2504 !(filp
->f_mode
& FMODE_WRITE
))
2507 if (copy_from_user(&range
, (struct f2fs_move_range __user
*)arg
,
2511 dst
= fdget(range
.dst_fd
);
2515 if (!(dst
.file
->f_mode
& FMODE_WRITE
)) {
2520 err
= mnt_want_write_file(filp
);
2524 err
= f2fs_move_file_range(filp
, range
.pos_in
, dst
.file
,
2525 range
.pos_out
, range
.len
);
2527 mnt_drop_write_file(filp
);
2531 if (copy_to_user((struct f2fs_move_range __user
*)arg
,
2532 &range
, sizeof(range
)))
2539 static int f2fs_ioc_flush_device(struct file
*filp
, unsigned long arg
)
2541 struct inode
*inode
= file_inode(filp
);
2542 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2543 struct sit_info
*sm
= SIT_I(sbi
);
2544 unsigned int start_segno
= 0, end_segno
= 0;
2545 unsigned int dev_start_segno
= 0, dev_end_segno
= 0;
2546 struct f2fs_flush_device range
;
2549 if (!capable(CAP_SYS_ADMIN
))
2552 if (f2fs_readonly(sbi
->sb
))
2555 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)))
2558 if (copy_from_user(&range
, (struct f2fs_flush_device __user
*)arg
,
2562 if (sbi
->s_ndevs
<= 1 || sbi
->s_ndevs
- 1 <= range
.dev_num
||
2563 sbi
->segs_per_sec
!= 1) {
2564 f2fs_msg(sbi
->sb
, KERN_WARNING
,
2565 "Can't flush %u in %d for segs_per_sec %u != 1\n",
2566 range
.dev_num
, sbi
->s_ndevs
,
2571 ret
= mnt_want_write_file(filp
);
2575 if (range
.dev_num
!= 0)
2576 dev_start_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).start_blk
);
2577 dev_end_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).end_blk
);
2579 start_segno
= sm
->last_victim
[FLUSH_DEVICE
];
2580 if (start_segno
< dev_start_segno
|| start_segno
>= dev_end_segno
)
2581 start_segno
= dev_start_segno
;
2582 end_segno
= min(start_segno
+ range
.segments
, dev_end_segno
);
2584 while (start_segno
< end_segno
) {
2585 if (!mutex_trylock(&sbi
->gc_mutex
)) {
2589 sm
->last_victim
[GC_CB
] = end_segno
+ 1;
2590 sm
->last_victim
[GC_GREEDY
] = end_segno
+ 1;
2591 sm
->last_victim
[ALLOC_NEXT
] = end_segno
+ 1;
2592 ret
= f2fs_gc(sbi
, true, true, start_segno
);
2600 mnt_drop_write_file(filp
);
2604 static int f2fs_ioc_get_features(struct file
*filp
, unsigned long arg
)
2606 struct inode
*inode
= file_inode(filp
);
2607 u32 sb_feature
= le32_to_cpu(F2FS_I_SB(inode
)->raw_super
->feature
);
2609 /* Must validate to set it with SQLite behavior in Android. */
2610 sb_feature
|= F2FS_FEATURE_ATOMIC_WRITE
;
2612 return put_user(sb_feature
, (u32 __user
*)arg
);
2616 int f2fs_transfer_project_quota(struct inode
*inode
, kprojid_t kprojid
)
2618 struct dquot
*transfer_to
[MAXQUOTAS
] = {};
2619 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2620 struct super_block
*sb
= sbi
->sb
;
2623 transfer_to
[PRJQUOTA
] = dqget(sb
, make_kqid_projid(kprojid
));
2624 if (!IS_ERR(transfer_to
[PRJQUOTA
])) {
2625 err
= __dquot_transfer(inode
, transfer_to
);
2627 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
2628 dqput(transfer_to
[PRJQUOTA
]);
2633 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
2635 struct inode
*inode
= file_inode(filp
);
2636 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2637 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2638 struct super_block
*sb
= sbi
->sb
;
2643 if (!f2fs_sb_has_project_quota(sb
)) {
2644 if (projid
!= F2FS_DEF_PROJID
)
2650 if (!f2fs_has_extra_attr(inode
))
2653 kprojid
= make_kprojid(&init_user_ns
, (projid_t
)projid
);
2655 if (projid_eq(kprojid
, F2FS_I(inode
)->i_projid
))
2659 /* Is it quota file? Do not allow user to mess with it */
2660 if (IS_NOQUOTA(inode
))
2663 ipage
= f2fs_get_node_page(sbi
, inode
->i_ino
);
2665 return PTR_ERR(ipage
);
2667 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage
), fi
->i_extra_isize
,
2670 f2fs_put_page(ipage
, 1);
2673 f2fs_put_page(ipage
, 1);
2675 err
= dquot_initialize(inode
);
2680 err
= f2fs_transfer_project_quota(inode
, kprojid
);
2684 F2FS_I(inode
)->i_projid
= kprojid
;
2685 inode
->i_ctime
= current_time(inode
);
2686 f2fs_mark_inode_dirty_sync(inode
, true);
2688 f2fs_unlock_op(sbi
);
2692 int f2fs_transfer_project_quota(struct inode
*inode
, kprojid_t kprojid
)
2697 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
2699 if (projid
!= F2FS_DEF_PROJID
)
2705 /* Transfer internal flags to xflags */
2706 static inline __u32
f2fs_iflags_to_xflags(unsigned long iflags
)
2710 if (iflags
& F2FS_SYNC_FL
)
2711 xflags
|= FS_XFLAG_SYNC
;
2712 if (iflags
& F2FS_IMMUTABLE_FL
)
2713 xflags
|= FS_XFLAG_IMMUTABLE
;
2714 if (iflags
& F2FS_APPEND_FL
)
2715 xflags
|= FS_XFLAG_APPEND
;
2716 if (iflags
& F2FS_NODUMP_FL
)
2717 xflags
|= FS_XFLAG_NODUMP
;
2718 if (iflags
& F2FS_NOATIME_FL
)
2719 xflags
|= FS_XFLAG_NOATIME
;
2720 if (iflags
& F2FS_PROJINHERIT_FL
)
2721 xflags
|= FS_XFLAG_PROJINHERIT
;
2725 #define F2FS_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \
2726 FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \
2727 FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT)
2729 /* Transfer xflags flags to internal */
2730 static inline unsigned long f2fs_xflags_to_iflags(__u32 xflags
)
2732 unsigned long iflags
= 0;
2734 if (xflags
& FS_XFLAG_SYNC
)
2735 iflags
|= F2FS_SYNC_FL
;
2736 if (xflags
& FS_XFLAG_IMMUTABLE
)
2737 iflags
|= F2FS_IMMUTABLE_FL
;
2738 if (xflags
& FS_XFLAG_APPEND
)
2739 iflags
|= F2FS_APPEND_FL
;
2740 if (xflags
& FS_XFLAG_NODUMP
)
2741 iflags
|= F2FS_NODUMP_FL
;
2742 if (xflags
& FS_XFLAG_NOATIME
)
2743 iflags
|= F2FS_NOATIME_FL
;
2744 if (xflags
& FS_XFLAG_PROJINHERIT
)
2745 iflags
|= F2FS_PROJINHERIT_FL
;
2750 static int f2fs_ioc_fsgetxattr(struct file
*filp
, unsigned long arg
)
2752 struct inode
*inode
= file_inode(filp
);
2753 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2756 memset(&fa
, 0, sizeof(struct fsxattr
));
2757 fa
.fsx_xflags
= f2fs_iflags_to_xflags(fi
->i_flags
&
2758 F2FS_FL_USER_VISIBLE
);
2760 if (f2fs_sb_has_project_quota(inode
->i_sb
))
2761 fa
.fsx_projid
= (__u32
)from_kprojid(&init_user_ns
,
2764 if (copy_to_user((struct fsxattr __user
*)arg
, &fa
, sizeof(fa
)))
2769 static int f2fs_ioctl_check_project(struct inode
*inode
, struct fsxattr
*fa
)
2772 * Project Quota ID state is only allowed to change from within the init
2773 * namespace. Enforce that restriction only if we are trying to change
2774 * the quota ID state. Everything else is allowed in user namespaces.
2776 if (current_user_ns() == &init_user_ns
)
2779 if (__kprojid_val(F2FS_I(inode
)->i_projid
) != fa
->fsx_projid
)
2782 if (F2FS_I(inode
)->i_flags
& F2FS_PROJINHERIT_FL
) {
2783 if (!(fa
->fsx_xflags
& FS_XFLAG_PROJINHERIT
))
2786 if (fa
->fsx_xflags
& FS_XFLAG_PROJINHERIT
)
2793 static int f2fs_ioc_fssetxattr(struct file
*filp
, unsigned long arg
)
2795 struct inode
*inode
= file_inode(filp
);
2796 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2801 if (copy_from_user(&fa
, (struct fsxattr __user
*)arg
, sizeof(fa
)))
2804 /* Make sure caller has proper permission */
2805 if (!inode_owner_or_capable(inode
))
2808 if (fa
.fsx_xflags
& ~F2FS_SUPPORTED_FS_XFLAGS
)
2811 flags
= f2fs_xflags_to_iflags(fa
.fsx_xflags
);
2812 if (f2fs_mask_flags(inode
->i_mode
, flags
) != flags
)
2815 err
= mnt_want_write_file(filp
);
2820 err
= f2fs_ioctl_check_project(inode
, &fa
);
2823 flags
= (fi
->i_flags
& ~F2FS_FL_XFLAG_VISIBLE
) |
2824 (flags
& F2FS_FL_XFLAG_VISIBLE
);
2825 err
= __f2fs_ioc_setflags(inode
, flags
);
2829 err
= f2fs_ioc_setproject(filp
, fa
.fsx_projid
);
2831 inode_unlock(inode
);
2832 mnt_drop_write_file(filp
);
2836 int f2fs_pin_file_control(struct inode
*inode
, bool inc
)
2838 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2839 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2841 /* Use i_gc_failures for normal file as a risk signal. */
2843 f2fs_i_gc_failures_write(inode
,
2844 fi
->i_gc_failures
[GC_FAILURE_PIN
] + 1);
2846 if (fi
->i_gc_failures
[GC_FAILURE_PIN
] > sbi
->gc_pin_file_threshold
) {
2847 f2fs_msg(sbi
->sb
, KERN_WARNING
,
2848 "%s: Enable GC = ino %lx after %x GC trials\n",
2849 __func__
, inode
->i_ino
,
2850 fi
->i_gc_failures
[GC_FAILURE_PIN
]);
2851 clear_inode_flag(inode
, FI_PIN_FILE
);
2857 static int f2fs_ioc_set_pin_file(struct file
*filp
, unsigned long arg
)
2859 struct inode
*inode
= file_inode(filp
);
2863 if (!inode_owner_or_capable(inode
))
2866 if (get_user(pin
, (__u32 __user
*)arg
))
2869 if (!S_ISREG(inode
->i_mode
))
2872 if (f2fs_readonly(F2FS_I_SB(inode
)->sb
))
2875 ret
= mnt_want_write_file(filp
);
2881 if (f2fs_should_update_outplace(inode
, NULL
)) {
2887 clear_inode_flag(inode
, FI_PIN_FILE
);
2888 f2fs_i_gc_failures_write(inode
, 0);
2892 if (f2fs_pin_file_control(inode
, false)) {
2896 ret
= f2fs_convert_inline_inode(inode
);
2900 set_inode_flag(inode
, FI_PIN_FILE
);
2901 ret
= F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_PIN
];
2903 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2905 inode_unlock(inode
);
2906 mnt_drop_write_file(filp
);
2910 static int f2fs_ioc_get_pin_file(struct file
*filp
, unsigned long arg
)
2912 struct inode
*inode
= file_inode(filp
);
2915 if (is_inode_flag_set(inode
, FI_PIN_FILE
))
2916 pin
= F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_PIN
];
2917 return put_user(pin
, (u32 __user
*)arg
);
2920 int f2fs_precache_extents(struct inode
*inode
)
2922 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2923 struct f2fs_map_blocks map
;
2924 pgoff_t m_next_extent
;
2928 if (is_inode_flag_set(inode
, FI_NO_EXTENT
))
2932 map
.m_next_pgofs
= NULL
;
2933 map
.m_next_extent
= &m_next_extent
;
2934 map
.m_seg_type
= NO_CHECK_TYPE
;
2935 end
= F2FS_I_SB(inode
)->max_file_blocks
;
2937 while (map
.m_lblk
< end
) {
2938 map
.m_len
= end
- map
.m_lblk
;
2940 down_write(&fi
->i_gc_rwsem
[WRITE
]);
2941 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_PRECACHE
);
2942 up_write(&fi
->i_gc_rwsem
[WRITE
]);
2946 map
.m_lblk
= m_next_extent
;
2952 static int f2fs_ioc_precache_extents(struct file
*filp
, unsigned long arg
)
2954 return f2fs_precache_extents(file_inode(filp
));
2957 long f2fs_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
2959 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp
)))))
2963 case F2FS_IOC_GETFLAGS
:
2964 return f2fs_ioc_getflags(filp
, arg
);
2965 case F2FS_IOC_SETFLAGS
:
2966 return f2fs_ioc_setflags(filp
, arg
);
2967 case F2FS_IOC_GETVERSION
:
2968 return f2fs_ioc_getversion(filp
, arg
);
2969 case F2FS_IOC_START_ATOMIC_WRITE
:
2970 return f2fs_ioc_start_atomic_write(filp
);
2971 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
2972 return f2fs_ioc_commit_atomic_write(filp
);
2973 case F2FS_IOC_START_VOLATILE_WRITE
:
2974 return f2fs_ioc_start_volatile_write(filp
);
2975 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
2976 return f2fs_ioc_release_volatile_write(filp
);
2977 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
2978 return f2fs_ioc_abort_volatile_write(filp
);
2979 case F2FS_IOC_SHUTDOWN
:
2980 return f2fs_ioc_shutdown(filp
, arg
);
2982 return f2fs_ioc_fitrim(filp
, arg
);
2983 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
2984 return f2fs_ioc_set_encryption_policy(filp
, arg
);
2985 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
2986 return f2fs_ioc_get_encryption_policy(filp
, arg
);
2987 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
2988 return f2fs_ioc_get_encryption_pwsalt(filp
, arg
);
2989 case F2FS_IOC_GARBAGE_COLLECT
:
2990 return f2fs_ioc_gc(filp
, arg
);
2991 case F2FS_IOC_GARBAGE_COLLECT_RANGE
:
2992 return f2fs_ioc_gc_range(filp
, arg
);
2993 case F2FS_IOC_WRITE_CHECKPOINT
:
2994 return f2fs_ioc_write_checkpoint(filp
, arg
);
2995 case F2FS_IOC_DEFRAGMENT
:
2996 return f2fs_ioc_defragment(filp
, arg
);
2997 case F2FS_IOC_MOVE_RANGE
:
2998 return f2fs_ioc_move_range(filp
, arg
);
2999 case F2FS_IOC_FLUSH_DEVICE
:
3000 return f2fs_ioc_flush_device(filp
, arg
);
3001 case F2FS_IOC_GET_FEATURES
:
3002 return f2fs_ioc_get_features(filp
, arg
);
3003 case F2FS_IOC_FSGETXATTR
:
3004 return f2fs_ioc_fsgetxattr(filp
, arg
);
3005 case F2FS_IOC_FSSETXATTR
:
3006 return f2fs_ioc_fssetxattr(filp
, arg
);
3007 case F2FS_IOC_GET_PIN_FILE
:
3008 return f2fs_ioc_get_pin_file(filp
, arg
);
3009 case F2FS_IOC_SET_PIN_FILE
:
3010 return f2fs_ioc_set_pin_file(filp
, arg
);
3011 case F2FS_IOC_PRECACHE_EXTENTS
:
3012 return f2fs_ioc_precache_extents(filp
, arg
);
3018 static ssize_t
f2fs_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
3020 struct file
*file
= iocb
->ki_filp
;
3021 struct inode
*inode
= file_inode(file
);
3024 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
3027 if ((iocb
->ki_flags
& IOCB_NOWAIT
) && !(iocb
->ki_flags
& IOCB_DIRECT
))
3030 if (!inode_trylock(inode
)) {
3031 if (iocb
->ki_flags
& IOCB_NOWAIT
)
3036 ret
= generic_write_checks(iocb
, from
);
3038 bool preallocated
= false;
3039 size_t target_size
= 0;
3042 if (iov_iter_fault_in_readable(from
, iov_iter_count(from
)))
3043 set_inode_flag(inode
, FI_NO_PREALLOC
);
3045 if ((iocb
->ki_flags
& IOCB_NOWAIT
) &&
3046 (iocb
->ki_flags
& IOCB_DIRECT
)) {
3047 if (!f2fs_overwrite_io(inode
, iocb
->ki_pos
,
3048 iov_iter_count(from
)) ||
3049 f2fs_has_inline_data(inode
) ||
3050 f2fs_force_buffered_io(inode
,
3052 clear_inode_flag(inode
,
3054 inode_unlock(inode
);
3059 preallocated
= true;
3060 target_size
= iocb
->ki_pos
+ iov_iter_count(from
);
3062 err
= f2fs_preallocate_blocks(iocb
, from
);
3064 clear_inode_flag(inode
, FI_NO_PREALLOC
);
3065 inode_unlock(inode
);
3069 ret
= __generic_file_write_iter(iocb
, from
);
3070 clear_inode_flag(inode
, FI_NO_PREALLOC
);
3072 /* if we couldn't write data, we should deallocate blocks. */
3073 if (preallocated
&& i_size_read(inode
) < target_size
)
3074 f2fs_truncate(inode
);
3077 f2fs_update_iostat(F2FS_I_SB(inode
), APP_WRITE_IO
, ret
);
3079 inode_unlock(inode
);
3082 ret
= generic_write_sync(iocb
, ret
);
3086 #ifdef CONFIG_COMPAT
3087 long f2fs_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
3090 case F2FS_IOC32_GETFLAGS
:
3091 cmd
= F2FS_IOC_GETFLAGS
;
3093 case F2FS_IOC32_SETFLAGS
:
3094 cmd
= F2FS_IOC_SETFLAGS
;
3096 case F2FS_IOC32_GETVERSION
:
3097 cmd
= F2FS_IOC_GETVERSION
;
3099 case F2FS_IOC_START_ATOMIC_WRITE
:
3100 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
3101 case F2FS_IOC_START_VOLATILE_WRITE
:
3102 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
3103 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
3104 case F2FS_IOC_SHUTDOWN
:
3105 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
3106 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
3107 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
3108 case F2FS_IOC_GARBAGE_COLLECT
:
3109 case F2FS_IOC_GARBAGE_COLLECT_RANGE
:
3110 case F2FS_IOC_WRITE_CHECKPOINT
:
3111 case F2FS_IOC_DEFRAGMENT
:
3112 case F2FS_IOC_MOVE_RANGE
:
3113 case F2FS_IOC_FLUSH_DEVICE
:
3114 case F2FS_IOC_GET_FEATURES
:
3115 case F2FS_IOC_FSGETXATTR
:
3116 case F2FS_IOC_FSSETXATTR
:
3117 case F2FS_IOC_GET_PIN_FILE
:
3118 case F2FS_IOC_SET_PIN_FILE
:
3119 case F2FS_IOC_PRECACHE_EXTENTS
:
3122 return -ENOIOCTLCMD
;
3124 return f2fs_ioctl(file
, cmd
, (unsigned long) compat_ptr(arg
));
3128 const struct file_operations f2fs_file_operations
= {
3129 .llseek
= f2fs_llseek
,
3130 .read_iter
= generic_file_read_iter
,
3131 .write_iter
= f2fs_file_write_iter
,
3132 .open
= f2fs_file_open
,
3133 .release
= f2fs_release_file
,
3134 .mmap
= f2fs_file_mmap
,
3135 .flush
= f2fs_file_flush
,
3136 .fsync
= f2fs_sync_file
,
3137 .fallocate
= f2fs_fallocate
,
3138 .unlocked_ioctl
= f2fs_ioctl
,
3139 #ifdef CONFIG_COMPAT
3140 .compat_ioctl
= f2fs_compat_ioctl
,
3142 .splice_read
= generic_file_splice_read
,
3143 .splice_write
= iter_file_splice_write
,