4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/buffer_head.h>
15 #include <linux/writeback.h>
16 #include <linux/blkdev.h>
17 #include <linux/falloc.h>
18 #include <linux/types.h>
19 #include <linux/compat.h>
20 #include <linux/uaccess.h>
21 #include <linux/mount.h>
22 #include <linux/pagevec.h>
30 #include <trace/events/f2fs.h>
32 static int f2fs_vm_page_mkwrite(struct vm_area_struct
*vma
,
35 struct page
*page
= vmf
->page
;
36 struct inode
*inode
= file_inode(vma
->vm_file
);
37 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
38 struct dnode_of_data dn
;
43 sb_start_pagefault(inode
->i_sb
);
45 f2fs_bug_on(sbi
, f2fs_has_inline_data(inode
));
47 /* block allocation */
49 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
50 err
= f2fs_reserve_block(&dn
, page
->index
);
58 file_update_time(vma
->vm_file
);
60 if (unlikely(page
->mapping
!= inode
->i_mapping
||
61 page_offset(page
) > i_size_read(inode
) ||
62 !PageUptodate(page
))) {
69 * check to see if the page is mapped already (no holes)
71 if (PageMappedToDisk(page
))
74 /* page is wholly or partially inside EOF */
75 if (((page
->index
+ 1) << PAGE_CACHE_SHIFT
) > i_size_read(inode
)) {
77 offset
= i_size_read(inode
) & ~PAGE_CACHE_MASK
;
78 zero_user_segment(page
, offset
, PAGE_CACHE_SIZE
);
81 SetPageUptodate(page
);
83 trace_f2fs_vm_page_mkwrite(page
, DATA
);
86 f2fs_wait_on_page_writeback(page
, DATA
);
88 sb_end_pagefault(inode
->i_sb
);
89 return block_page_mkwrite_return(err
);
92 static const struct vm_operations_struct f2fs_file_vm_ops
= {
93 .fault
= filemap_fault
,
94 .map_pages
= filemap_map_pages
,
95 .page_mkwrite
= f2fs_vm_page_mkwrite
,
98 static int get_parent_ino(struct inode
*inode
, nid_t
*pino
)
100 struct dentry
*dentry
;
102 inode
= igrab(inode
);
103 dentry
= d_find_any_alias(inode
);
108 if (update_dent_inode(inode
, &dentry
->d_name
)) {
113 *pino
= parent_ino(dentry
);
118 static inline bool need_do_checkpoint(struct inode
*inode
)
120 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
121 bool need_cp
= false;
123 if (!S_ISREG(inode
->i_mode
) || inode
->i_nlink
!= 1)
125 else if (file_wrong_pino(inode
))
127 else if (!space_for_roll_forward(sbi
))
129 else if (!is_checkpointed_node(sbi
, F2FS_I(inode
)->i_pino
))
131 else if (F2FS_I(inode
)->xattr_ver
== cur_cp_version(F2FS_CKPT(sbi
)))
133 else if (test_opt(sbi
, FASTBOOT
))
135 else if (sbi
->active_logs
== 2)
141 static bool need_inode_page_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
143 struct page
*i
= find_get_page(NODE_MAPPING(sbi
), ino
);
145 /* But we need to avoid that there are some inode updates */
146 if ((i
&& PageDirty(i
)) || need_inode_block_update(sbi
, ino
))
152 static void try_to_fix_pino(struct inode
*inode
)
154 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
157 down_write(&fi
->i_sem
);
159 if (file_wrong_pino(inode
) && inode
->i_nlink
== 1 &&
160 get_parent_ino(inode
, &pino
)) {
162 file_got_pino(inode
);
163 up_write(&fi
->i_sem
);
165 mark_inode_dirty_sync(inode
);
166 f2fs_write_inode(inode
, NULL
);
168 up_write(&fi
->i_sem
);
172 int f2fs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
174 struct inode
*inode
= file
->f_mapping
->host
;
175 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
176 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
177 nid_t ino
= inode
->i_ino
;
179 bool need_cp
= false;
180 struct writeback_control wbc
= {
181 .sync_mode
= WB_SYNC_ALL
,
182 .nr_to_write
= LONG_MAX
,
186 if (unlikely(f2fs_readonly(inode
->i_sb
)))
189 trace_f2fs_sync_file_enter(inode
);
191 /* if fdatasync is triggered, let's do in-place-update */
192 if (get_dirty_pages(inode
) <= SM_I(sbi
)->min_fsync_blocks
)
193 set_inode_flag(fi
, FI_NEED_IPU
);
194 ret
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
195 clear_inode_flag(fi
, FI_NEED_IPU
);
198 trace_f2fs_sync_file_exit(inode
, need_cp
, datasync
, ret
);
202 /* if the inode is dirty, let's recover all the time */
203 if (!datasync
&& is_inode_flag_set(fi
, FI_DIRTY_INODE
)) {
204 update_inode_page(inode
);
209 * if there is no written data, don't waste time to write recovery info.
211 if (!is_inode_flag_set(fi
, FI_APPEND_WRITE
) &&
212 !exist_written_data(sbi
, ino
, APPEND_INO
)) {
214 /* it may call write_inode just prior to fsync */
215 if (need_inode_page_update(sbi
, ino
))
218 if (is_inode_flag_set(fi
, FI_UPDATE_WRITE
) ||
219 exist_written_data(sbi
, ino
, UPDATE_INO
))
224 /* guarantee free sections for fsync */
225 f2fs_balance_fs(sbi
);
228 * Both of fdatasync() and fsync() are able to be recovered from
231 down_read(&fi
->i_sem
);
232 need_cp
= need_do_checkpoint(inode
);
236 /* all the dirty node pages should be flushed for POR */
237 ret
= f2fs_sync_fs(inode
->i_sb
, 1);
240 * We've secured consistency through sync_fs. Following pino
241 * will be used only for fsynced inodes after checkpoint.
243 try_to_fix_pino(inode
);
247 sync_node_pages(sbi
, ino
, &wbc
);
249 /* if cp_error was enabled, we should avoid infinite loop */
250 if (unlikely(f2fs_cp_error(sbi
)))
253 if (need_inode_block_update(sbi
, ino
)) {
254 mark_inode_dirty_sync(inode
);
255 f2fs_write_inode(inode
, NULL
);
259 ret
= wait_on_node_pages_writeback(sbi
, ino
);
263 /* once recovery info is written, don't need to tack this */
264 remove_dirty_inode(sbi
, ino
, APPEND_INO
);
265 clear_inode_flag(fi
, FI_APPEND_WRITE
);
267 remove_dirty_inode(sbi
, ino
, UPDATE_INO
);
268 clear_inode_flag(fi
, FI_UPDATE_WRITE
);
269 ret
= f2fs_issue_flush(sbi
);
271 trace_f2fs_sync_file_exit(inode
, need_cp
, datasync
, ret
);
272 f2fs_trace_ios(NULL
, NULL
, 1);
276 static pgoff_t
__get_first_dirty_index(struct address_space
*mapping
,
277 pgoff_t pgofs
, int whence
)
282 if (whence
!= SEEK_DATA
)
285 /* find first dirty page index */
286 pagevec_init(&pvec
, 0);
287 nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &pgofs
,
288 PAGECACHE_TAG_DIRTY
, 1);
289 pgofs
= nr_pages
? pvec
.pages
[0]->index
: LONG_MAX
;
290 pagevec_release(&pvec
);
294 static bool __found_offset(block_t blkaddr
, pgoff_t dirty
, pgoff_t pgofs
,
299 if ((blkaddr
== NEW_ADDR
&& dirty
== pgofs
) ||
300 (blkaddr
!= NEW_ADDR
&& blkaddr
!= NULL_ADDR
))
304 if (blkaddr
== NULL_ADDR
)
311 static loff_t
f2fs_seek_block(struct file
*file
, loff_t offset
, int whence
)
313 struct inode
*inode
= file
->f_mapping
->host
;
314 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
315 struct dnode_of_data dn
;
316 pgoff_t pgofs
, end_offset
, dirty
;
317 loff_t data_ofs
= offset
;
321 mutex_lock(&inode
->i_mutex
);
323 isize
= i_size_read(inode
);
327 /* handle inline data case */
328 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
)) {
329 if (whence
== SEEK_HOLE
)
334 pgofs
= (pgoff_t
)(offset
>> PAGE_CACHE_SHIFT
);
336 dirty
= __get_first_dirty_index(inode
->i_mapping
, pgofs
, whence
);
338 for (; data_ofs
< isize
; data_ofs
= pgofs
<< PAGE_CACHE_SHIFT
) {
339 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
340 err
= get_dnode_of_data(&dn
, pgofs
, LOOKUP_NODE_RA
);
341 if (err
&& err
!= -ENOENT
) {
343 } else if (err
== -ENOENT
) {
344 /* direct node does not exists */
345 if (whence
== SEEK_DATA
) {
346 pgofs
= PGOFS_OF_NEXT_DNODE(pgofs
,
354 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
356 /* find data/hole in dnode block */
357 for (; dn
.ofs_in_node
< end_offset
;
358 dn
.ofs_in_node
++, pgofs
++,
359 data_ofs
= (loff_t
)pgofs
<< PAGE_CACHE_SHIFT
) {
361 blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
363 if (__found_offset(blkaddr
, dirty
, pgofs
, whence
)) {
371 if (whence
== SEEK_DATA
)
374 if (whence
== SEEK_HOLE
&& data_ofs
> isize
)
376 mutex_unlock(&inode
->i_mutex
);
377 return vfs_setpos(file
, data_ofs
, maxbytes
);
379 mutex_unlock(&inode
->i_mutex
);
383 static loff_t
f2fs_llseek(struct file
*file
, loff_t offset
, int whence
)
385 struct inode
*inode
= file
->f_mapping
->host
;
386 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
392 return generic_file_llseek_size(file
, offset
, whence
,
393 maxbytes
, i_size_read(inode
));
398 return f2fs_seek_block(file
, offset
, whence
);
404 static int f2fs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
406 struct inode
*inode
= file_inode(file
);
408 /* we don't need to use inline_data strictly */
409 if (f2fs_has_inline_data(inode
)) {
410 int err
= f2fs_convert_inline_inode(inode
);
416 vma
->vm_ops
= &f2fs_file_vm_ops
;
420 int truncate_data_blocks_range(struct dnode_of_data
*dn
, int count
)
422 int nr_free
= 0, ofs
= dn
->ofs_in_node
;
423 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
424 struct f2fs_node
*raw_node
;
427 raw_node
= F2FS_NODE(dn
->node_page
);
428 addr
= blkaddr_in_node(raw_node
) + ofs
;
430 for (; count
> 0; count
--, addr
++, dn
->ofs_in_node
++) {
431 block_t blkaddr
= le32_to_cpu(*addr
);
432 if (blkaddr
== NULL_ADDR
)
435 dn
->data_blkaddr
= NULL_ADDR
;
436 update_extent_cache(dn
);
437 invalidate_blocks(sbi
, blkaddr
);
441 dec_valid_block_count(sbi
, dn
->inode
, nr_free
);
442 set_page_dirty(dn
->node_page
);
445 dn
->ofs_in_node
= ofs
;
447 trace_f2fs_truncate_data_blocks_range(dn
->inode
, dn
->nid
,
448 dn
->ofs_in_node
, nr_free
);
452 void truncate_data_blocks(struct dnode_of_data
*dn
)
454 truncate_data_blocks_range(dn
, ADDRS_PER_BLOCK
);
457 static int truncate_partial_data_page(struct inode
*inode
, u64 from
)
459 unsigned offset
= from
& (PAGE_CACHE_SIZE
- 1);
465 page
= find_data_page(inode
, from
>> PAGE_CACHE_SHIFT
, false);
470 if (unlikely(!PageUptodate(page
) ||
471 page
->mapping
!= inode
->i_mapping
))
474 f2fs_wait_on_page_writeback(page
, DATA
);
475 zero_user(page
, offset
, PAGE_CACHE_SIZE
- offset
);
476 set_page_dirty(page
);
478 f2fs_put_page(page
, 1);
482 int truncate_blocks(struct inode
*inode
, u64 from
, bool lock
)
484 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
485 unsigned int blocksize
= inode
->i_sb
->s_blocksize
;
486 struct dnode_of_data dn
;
488 int count
= 0, err
= 0;
491 trace_f2fs_truncate_blocks_enter(inode
, from
);
493 free_from
= (pgoff_t
)F2FS_BYTES_TO_BLK(from
+ blocksize
- 1);
498 ipage
= get_node_page(sbi
, inode
->i_ino
);
500 err
= PTR_ERR(ipage
);
504 if (f2fs_has_inline_data(inode
)) {
505 f2fs_put_page(ipage
, 1);
509 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
510 err
= get_dnode_of_data(&dn
, free_from
, LOOKUP_NODE
);
517 count
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
519 count
-= dn
.ofs_in_node
;
520 f2fs_bug_on(sbi
, count
< 0);
522 if (dn
.ofs_in_node
|| IS_INODE(dn
.node_page
)) {
523 truncate_data_blocks_range(&dn
, count
);
529 err
= truncate_inode_blocks(inode
, free_from
);
534 /* lastly zero out the first data page */
536 err
= truncate_partial_data_page(inode
, from
);
538 trace_f2fs_truncate_blocks_exit(inode
, err
);
542 void f2fs_truncate(struct inode
*inode
)
544 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
545 S_ISLNK(inode
->i_mode
)))
548 trace_f2fs_truncate(inode
);
550 /* we should check inline_data size */
551 if (f2fs_has_inline_data(inode
) && !f2fs_may_inline(inode
)) {
552 if (f2fs_convert_inline_inode(inode
))
556 if (!truncate_blocks(inode
, i_size_read(inode
), true)) {
557 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
558 mark_inode_dirty(inode
);
562 int f2fs_getattr(struct vfsmount
*mnt
,
563 struct dentry
*dentry
, struct kstat
*stat
)
565 struct inode
*inode
= dentry
->d_inode
;
566 generic_fillattr(inode
, stat
);
571 #ifdef CONFIG_F2FS_FS_POSIX_ACL
572 static void __setattr_copy(struct inode
*inode
, const struct iattr
*attr
)
574 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
575 unsigned int ia_valid
= attr
->ia_valid
;
577 if (ia_valid
& ATTR_UID
)
578 inode
->i_uid
= attr
->ia_uid
;
579 if (ia_valid
& ATTR_GID
)
580 inode
->i_gid
= attr
->ia_gid
;
581 if (ia_valid
& ATTR_ATIME
)
582 inode
->i_atime
= timespec_trunc(attr
->ia_atime
,
583 inode
->i_sb
->s_time_gran
);
584 if (ia_valid
& ATTR_MTIME
)
585 inode
->i_mtime
= timespec_trunc(attr
->ia_mtime
,
586 inode
->i_sb
->s_time_gran
);
587 if (ia_valid
& ATTR_CTIME
)
588 inode
->i_ctime
= timespec_trunc(attr
->ia_ctime
,
589 inode
->i_sb
->s_time_gran
);
590 if (ia_valid
& ATTR_MODE
) {
591 umode_t mode
= attr
->ia_mode
;
593 if (!in_group_p(inode
->i_gid
) && !capable(CAP_FSETID
))
595 set_acl_inode(fi
, mode
);
599 #define __setattr_copy setattr_copy
602 int f2fs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
604 struct inode
*inode
= dentry
->d_inode
;
605 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
608 err
= inode_change_ok(inode
, attr
);
612 if (attr
->ia_valid
& ATTR_SIZE
) {
613 if (attr
->ia_size
!= i_size_read(inode
)) {
614 truncate_setsize(inode
, attr
->ia_size
);
615 f2fs_truncate(inode
);
616 f2fs_balance_fs(F2FS_I_SB(inode
));
619 * giving a chance to truncate blocks past EOF which
620 * are fallocated with FALLOC_FL_KEEP_SIZE.
622 f2fs_truncate(inode
);
626 __setattr_copy(inode
, attr
);
628 if (attr
->ia_valid
& ATTR_MODE
) {
629 err
= posix_acl_chmod(inode
, get_inode_mode(inode
));
630 if (err
|| is_inode_flag_set(fi
, FI_ACL_MODE
)) {
631 inode
->i_mode
= fi
->i_acl_mode
;
632 clear_inode_flag(fi
, FI_ACL_MODE
);
636 mark_inode_dirty(inode
);
640 const struct inode_operations f2fs_file_inode_operations
= {
641 .getattr
= f2fs_getattr
,
642 .setattr
= f2fs_setattr
,
643 .get_acl
= f2fs_get_acl
,
644 .set_acl
= f2fs_set_acl
,
645 #ifdef CONFIG_F2FS_FS_XATTR
646 .setxattr
= generic_setxattr
,
647 .getxattr
= generic_getxattr
,
648 .listxattr
= f2fs_listxattr
,
649 .removexattr
= generic_removexattr
,
651 .fiemap
= f2fs_fiemap
,
654 static void fill_zero(struct inode
*inode
, pgoff_t index
,
655 loff_t start
, loff_t len
)
657 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
663 f2fs_balance_fs(sbi
);
666 page
= get_new_data_page(inode
, NULL
, index
, false);
670 f2fs_wait_on_page_writeback(page
, DATA
);
671 zero_user(page
, start
, len
);
672 set_page_dirty(page
);
673 f2fs_put_page(page
, 1);
677 int truncate_hole(struct inode
*inode
, pgoff_t pg_start
, pgoff_t pg_end
)
682 for (index
= pg_start
; index
< pg_end
; index
++) {
683 struct dnode_of_data dn
;
685 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
686 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
693 if (dn
.data_blkaddr
!= NULL_ADDR
)
694 truncate_data_blocks_range(&dn
, 1);
700 static int punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
702 pgoff_t pg_start
, pg_end
;
703 loff_t off_start
, off_end
;
706 if (!S_ISREG(inode
->i_mode
))
709 /* skip punching hole beyond i_size */
710 if (offset
>= inode
->i_size
)
713 if (f2fs_has_inline_data(inode
)) {
714 ret
= f2fs_convert_inline_inode(inode
);
719 pg_start
= ((unsigned long long) offset
) >> PAGE_CACHE_SHIFT
;
720 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_CACHE_SHIFT
;
722 off_start
= offset
& (PAGE_CACHE_SIZE
- 1);
723 off_end
= (offset
+ len
) & (PAGE_CACHE_SIZE
- 1);
725 if (pg_start
== pg_end
) {
726 fill_zero(inode
, pg_start
, off_start
,
727 off_end
- off_start
);
730 fill_zero(inode
, pg_start
++, off_start
,
731 PAGE_CACHE_SIZE
- off_start
);
733 fill_zero(inode
, pg_end
, 0, off_end
);
735 if (pg_start
< pg_end
) {
736 struct address_space
*mapping
= inode
->i_mapping
;
737 loff_t blk_start
, blk_end
;
738 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
740 f2fs_balance_fs(sbi
);
742 blk_start
= pg_start
<< PAGE_CACHE_SHIFT
;
743 blk_end
= pg_end
<< PAGE_CACHE_SHIFT
;
744 truncate_inode_pages_range(mapping
, blk_start
,
748 ret
= truncate_hole(inode
, pg_start
, pg_end
);
756 static int expand_inode_data(struct inode
*inode
, loff_t offset
,
757 loff_t len
, int mode
)
759 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
760 pgoff_t index
, pg_start
, pg_end
;
761 loff_t new_size
= i_size_read(inode
);
762 loff_t off_start
, off_end
;
765 f2fs_balance_fs(sbi
);
767 ret
= inode_newsize_ok(inode
, (len
+ offset
));
771 if (f2fs_has_inline_data(inode
)) {
772 ret
= f2fs_convert_inline_inode(inode
);
777 pg_start
= ((unsigned long long) offset
) >> PAGE_CACHE_SHIFT
;
778 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_CACHE_SHIFT
;
780 off_start
= offset
& (PAGE_CACHE_SIZE
- 1);
781 off_end
= (offset
+ len
) & (PAGE_CACHE_SIZE
- 1);
785 for (index
= pg_start
; index
<= pg_end
; index
++) {
786 struct dnode_of_data dn
;
788 if (index
== pg_end
&& !off_end
)
791 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
792 ret
= f2fs_reserve_block(&dn
, index
);
796 if (pg_start
== pg_end
)
797 new_size
= offset
+ len
;
798 else if (index
== pg_start
&& off_start
)
799 new_size
= (index
+ 1) << PAGE_CACHE_SHIFT
;
800 else if (index
== pg_end
)
801 new_size
= (index
<< PAGE_CACHE_SHIFT
) + off_end
;
803 new_size
+= PAGE_CACHE_SIZE
;
806 if (!(mode
& FALLOC_FL_KEEP_SIZE
) &&
807 i_size_read(inode
) < new_size
) {
808 i_size_write(inode
, new_size
);
809 mark_inode_dirty(inode
);
810 update_inode_page(inode
);
817 static long f2fs_fallocate(struct file
*file
, int mode
,
818 loff_t offset
, loff_t len
)
820 struct inode
*inode
= file_inode(file
);
823 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
))
826 mutex_lock(&inode
->i_mutex
);
828 if (mode
& FALLOC_FL_PUNCH_HOLE
)
829 ret
= punch_hole(inode
, offset
, len
);
831 ret
= expand_inode_data(inode
, offset
, len
, mode
);
834 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
835 mark_inode_dirty(inode
);
838 mutex_unlock(&inode
->i_mutex
);
840 trace_f2fs_fallocate(inode
, mode
, offset
, len
, ret
);
844 static int f2fs_release_file(struct inode
*inode
, struct file
*filp
)
846 /* some remained atomic pages should discarded */
847 if (f2fs_is_atomic_file(inode
))
848 commit_inmem_pages(inode
, true);
849 if (f2fs_is_volatile_file(inode
)) {
850 set_inode_flag(F2FS_I(inode
), FI_DROP_CACHE
);
851 filemap_fdatawrite(inode
->i_mapping
);
852 clear_inode_flag(F2FS_I(inode
), FI_DROP_CACHE
);
857 #define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
858 #define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
860 static inline __u32
f2fs_mask_flags(umode_t mode
, __u32 flags
)
864 else if (S_ISREG(mode
))
865 return flags
& F2FS_REG_FLMASK
;
867 return flags
& F2FS_OTHER_FLMASK
;
870 static int f2fs_ioc_getflags(struct file
*filp
, unsigned long arg
)
872 struct inode
*inode
= file_inode(filp
);
873 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
874 unsigned int flags
= fi
->i_flags
& FS_FL_USER_VISIBLE
;
875 return put_user(flags
, (int __user
*)arg
);
878 static int f2fs_ioc_setflags(struct file
*filp
, unsigned long arg
)
880 struct inode
*inode
= file_inode(filp
);
881 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
882 unsigned int flags
= fi
->i_flags
& FS_FL_USER_VISIBLE
;
883 unsigned int oldflags
;
886 ret
= mnt_want_write_file(filp
);
890 if (!inode_owner_or_capable(inode
)) {
895 if (get_user(flags
, (int __user
*)arg
)) {
900 flags
= f2fs_mask_flags(inode
->i_mode
, flags
);
902 mutex_lock(&inode
->i_mutex
);
904 oldflags
= fi
->i_flags
;
906 if ((flags
^ oldflags
) & (FS_APPEND_FL
| FS_IMMUTABLE_FL
)) {
907 if (!capable(CAP_LINUX_IMMUTABLE
)) {
908 mutex_unlock(&inode
->i_mutex
);
914 flags
= flags
& FS_FL_USER_MODIFIABLE
;
915 flags
|= oldflags
& ~FS_FL_USER_MODIFIABLE
;
917 mutex_unlock(&inode
->i_mutex
);
919 f2fs_set_inode_flags(inode
);
920 inode
->i_ctime
= CURRENT_TIME
;
921 mark_inode_dirty(inode
);
923 mnt_drop_write_file(filp
);
927 static int f2fs_ioc_getversion(struct file
*filp
, unsigned long arg
)
929 struct inode
*inode
= file_inode(filp
);
931 return put_user(inode
->i_generation
, (int __user
*)arg
);
934 static int f2fs_ioc_start_atomic_write(struct file
*filp
)
936 struct inode
*inode
= file_inode(filp
);
938 if (!inode_owner_or_capable(inode
))
941 f2fs_balance_fs(F2FS_I_SB(inode
));
943 if (f2fs_is_atomic_file(inode
))
946 set_inode_flag(F2FS_I(inode
), FI_ATOMIC_FILE
);
948 return f2fs_convert_inline_inode(inode
);
951 static int f2fs_ioc_commit_atomic_write(struct file
*filp
)
953 struct inode
*inode
= file_inode(filp
);
956 if (!inode_owner_or_capable(inode
))
959 if (f2fs_is_volatile_file(inode
))
962 ret
= mnt_want_write_file(filp
);
966 if (f2fs_is_atomic_file(inode
))
967 commit_inmem_pages(inode
, false);
969 ret
= f2fs_sync_file(filp
, 0, LONG_MAX
, 0);
970 mnt_drop_write_file(filp
);
971 clear_inode_flag(F2FS_I(inode
), FI_ATOMIC_FILE
);
975 static int f2fs_ioc_start_volatile_write(struct file
*filp
)
977 struct inode
*inode
= file_inode(filp
);
979 if (!inode_owner_or_capable(inode
))
982 if (f2fs_is_volatile_file(inode
))
985 set_inode_flag(F2FS_I(inode
), FI_VOLATILE_FILE
);
987 return f2fs_convert_inline_inode(inode
);
990 static int f2fs_ioc_release_volatile_write(struct file
*filp
)
992 struct inode
*inode
= file_inode(filp
);
994 if (!inode_owner_or_capable(inode
))
997 if (!f2fs_is_volatile_file(inode
))
1000 punch_hole(inode
, 0, F2FS_BLKSIZE
);
1004 static int f2fs_ioc_abort_volatile_write(struct file
*filp
)
1006 struct inode
*inode
= file_inode(filp
);
1009 if (!inode_owner_or_capable(inode
))
1012 ret
= mnt_want_write_file(filp
);
1016 f2fs_balance_fs(F2FS_I_SB(inode
));
1018 if (f2fs_is_atomic_file(inode
)) {
1019 commit_inmem_pages(inode
, false);
1020 clear_inode_flag(F2FS_I(inode
), FI_ATOMIC_FILE
);
1023 if (f2fs_is_volatile_file(inode
)) {
1024 clear_inode_flag(F2FS_I(inode
), FI_VOLATILE_FILE
);
1025 filemap_fdatawrite(inode
->i_mapping
);
1026 set_inode_flag(F2FS_I(inode
), FI_VOLATILE_FILE
);
1028 mnt_drop_write_file(filp
);
1032 static int f2fs_ioc_fitrim(struct file
*filp
, unsigned long arg
)
1034 struct inode
*inode
= file_inode(filp
);
1035 struct super_block
*sb
= inode
->i_sb
;
1036 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
1037 struct fstrim_range range
;
1040 if (!capable(CAP_SYS_ADMIN
))
1043 if (!blk_queue_discard(q
))
1046 if (copy_from_user(&range
, (struct fstrim_range __user
*)arg
,
1050 range
.minlen
= max((unsigned int)range
.minlen
,
1051 q
->limits
.discard_granularity
);
1052 ret
= f2fs_trim_fs(F2FS_SB(sb
), &range
);
1056 if (copy_to_user((struct fstrim_range __user
*)arg
, &range
,
1062 long f2fs_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
1065 case F2FS_IOC_GETFLAGS
:
1066 return f2fs_ioc_getflags(filp
, arg
);
1067 case F2FS_IOC_SETFLAGS
:
1068 return f2fs_ioc_setflags(filp
, arg
);
1069 case F2FS_IOC_GETVERSION
:
1070 return f2fs_ioc_getversion(filp
, arg
);
1071 case F2FS_IOC_START_ATOMIC_WRITE
:
1072 return f2fs_ioc_start_atomic_write(filp
);
1073 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
1074 return f2fs_ioc_commit_atomic_write(filp
);
1075 case F2FS_IOC_START_VOLATILE_WRITE
:
1076 return f2fs_ioc_start_volatile_write(filp
);
1077 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
1078 return f2fs_ioc_release_volatile_write(filp
);
1079 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
1080 return f2fs_ioc_abort_volatile_write(filp
);
1082 return f2fs_ioc_fitrim(filp
, arg
);
1088 #ifdef CONFIG_COMPAT
1089 long f2fs_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
1092 case F2FS_IOC32_GETFLAGS
:
1093 cmd
= F2FS_IOC_GETFLAGS
;
1095 case F2FS_IOC32_SETFLAGS
:
1096 cmd
= F2FS_IOC_SETFLAGS
;
1099 return -ENOIOCTLCMD
;
1101 return f2fs_ioctl(file
, cmd
, (unsigned long) compat_ptr(arg
));
1105 const struct file_operations f2fs_file_operations
= {
1106 .llseek
= f2fs_llseek
,
1107 .read
= new_sync_read
,
1108 .write
= new_sync_write
,
1109 .read_iter
= generic_file_read_iter
,
1110 .write_iter
= generic_file_write_iter
,
1111 .open
= generic_file_open
,
1112 .release
= f2fs_release_file
,
1113 .mmap
= f2fs_file_mmap
,
1114 .fsync
= f2fs_sync_file
,
1115 .fallocate
= f2fs_fallocate
,
1116 .unlocked_ioctl
= f2fs_ioctl
,
1117 #ifdef CONFIG_COMPAT
1118 .compat_ioctl
= f2fs_compat_ioctl
,
1120 .splice_read
= generic_file_splice_read
,
1121 .splice_write
= iter_file_splice_write
,