Merge tag 'regmap-fix-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / fs / f2fs / file.c
blobf585545277d775b36824bba05c7de46ea694e07f
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/f2fs/file.c
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
26 #include "f2fs.h"
27 #include "node.h"
28 #include "segment.h"
29 #include "xattr.h"
30 #include "acl.h"
31 #include "gc.h"
32 #include "trace.h"
33 #include <trace/events/f2fs.h>
34 #include <uapi/linux/f2fs.h>
36 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
38 struct inode *inode = file_inode(vmf->vma->vm_file);
39 vm_fault_t ret;
41 down_read(&F2FS_I(inode)->i_mmap_sem);
42 ret = filemap_fault(vmf);
43 up_read(&F2FS_I(inode)->i_mmap_sem);
45 if (!ret)
46 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
47 F2FS_BLKSIZE);
49 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
51 return ret;
54 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
56 struct page *page = vmf->page;
57 struct inode *inode = file_inode(vmf->vma->vm_file);
58 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
59 struct dnode_of_data dn;
60 bool need_alloc = true;
61 int err = 0;
63 if (unlikely(f2fs_cp_error(sbi))) {
64 err = -EIO;
65 goto err;
68 if (!f2fs_is_checkpoint_ready(sbi)) {
69 err = -ENOSPC;
70 goto err;
73 #ifdef CONFIG_F2FS_FS_COMPRESSION
74 if (f2fs_compressed_file(inode)) {
75 int ret = f2fs_is_compressed_cluster(inode, page->index);
77 if (ret < 0) {
78 err = ret;
79 goto err;
80 } else if (ret) {
81 if (ret < F2FS_I(inode)->i_cluster_size) {
82 err = -EAGAIN;
83 goto err;
85 need_alloc = false;
88 #endif
89 /* should do out of any locked page */
90 if (need_alloc)
91 f2fs_balance_fs(sbi, true);
93 sb_start_pagefault(inode->i_sb);
95 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
97 file_update_time(vmf->vma->vm_file);
98 down_read(&F2FS_I(inode)->i_mmap_sem);
99 lock_page(page);
100 if (unlikely(page->mapping != inode->i_mapping ||
101 page_offset(page) > i_size_read(inode) ||
102 !PageUptodate(page))) {
103 unlock_page(page);
104 err = -EFAULT;
105 goto out_sem;
108 if (need_alloc) {
109 /* block allocation */
110 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
111 set_new_dnode(&dn, inode, NULL, NULL, 0);
112 err = f2fs_get_block(&dn, page->index);
113 f2fs_put_dnode(&dn);
114 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
117 #ifdef CONFIG_F2FS_FS_COMPRESSION
118 if (!need_alloc) {
119 set_new_dnode(&dn, inode, NULL, NULL, 0);
120 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
121 f2fs_put_dnode(&dn);
123 #endif
124 if (err) {
125 unlock_page(page);
126 goto out_sem;
129 f2fs_wait_on_page_writeback(page, DATA, false, true);
131 /* wait for GCed page writeback via META_MAPPING */
132 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
135 * check to see if the page is mapped already (no holes)
137 if (PageMappedToDisk(page))
138 goto out_sem;
140 /* page is wholly or partially inside EOF */
141 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
142 i_size_read(inode)) {
143 loff_t offset;
145 offset = i_size_read(inode) & ~PAGE_MASK;
146 zero_user_segment(page, offset, PAGE_SIZE);
148 set_page_dirty(page);
149 if (!PageUptodate(page))
150 SetPageUptodate(page);
152 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
153 f2fs_update_time(sbi, REQ_TIME);
155 trace_f2fs_vm_page_mkwrite(page, DATA);
156 out_sem:
157 up_read(&F2FS_I(inode)->i_mmap_sem);
159 sb_end_pagefault(inode->i_sb);
160 err:
161 return block_page_mkwrite_return(err);
164 static const struct vm_operations_struct f2fs_file_vm_ops = {
165 .fault = f2fs_filemap_fault,
166 .map_pages = filemap_map_pages,
167 .page_mkwrite = f2fs_vm_page_mkwrite,
170 static int get_parent_ino(struct inode *inode, nid_t *pino)
172 struct dentry *dentry;
175 * Make sure to get the non-deleted alias. The alias associated with
176 * the open file descriptor being fsync()'ed may be deleted already.
178 dentry = d_find_alias(inode);
179 if (!dentry)
180 return 0;
182 *pino = parent_ino(dentry);
183 dput(dentry);
184 return 1;
187 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
189 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
190 enum cp_reason_type cp_reason = CP_NO_NEEDED;
192 if (!S_ISREG(inode->i_mode))
193 cp_reason = CP_NON_REGULAR;
194 else if (f2fs_compressed_file(inode))
195 cp_reason = CP_COMPRESSED;
196 else if (inode->i_nlink != 1)
197 cp_reason = CP_HARDLINK;
198 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
199 cp_reason = CP_SB_NEED_CP;
200 else if (file_wrong_pino(inode))
201 cp_reason = CP_WRONG_PINO;
202 else if (!f2fs_space_for_roll_forward(sbi))
203 cp_reason = CP_NO_SPC_ROLL;
204 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
205 cp_reason = CP_NODE_NEED_CP;
206 else if (test_opt(sbi, FASTBOOT))
207 cp_reason = CP_FASTBOOT_MODE;
208 else if (F2FS_OPTION(sbi).active_logs == 2)
209 cp_reason = CP_SPEC_LOG_NUM;
210 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
211 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
212 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
213 TRANS_DIR_INO))
214 cp_reason = CP_RECOVER_DIR;
216 return cp_reason;
219 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
221 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
222 bool ret = false;
223 /* But we need to avoid that there are some inode updates */
224 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
225 ret = true;
226 f2fs_put_page(i, 0);
227 return ret;
230 static void try_to_fix_pino(struct inode *inode)
232 struct f2fs_inode_info *fi = F2FS_I(inode);
233 nid_t pino;
235 down_write(&fi->i_sem);
236 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
237 get_parent_ino(inode, &pino)) {
238 f2fs_i_pino_write(inode, pino);
239 file_got_pino(inode);
241 up_write(&fi->i_sem);
244 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
245 int datasync, bool atomic)
247 struct inode *inode = file->f_mapping->host;
248 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
249 nid_t ino = inode->i_ino;
250 int ret = 0;
251 enum cp_reason_type cp_reason = 0;
252 struct writeback_control wbc = {
253 .sync_mode = WB_SYNC_ALL,
254 .nr_to_write = LONG_MAX,
255 .for_reclaim = 0,
257 unsigned int seq_id = 0;
259 if (unlikely(f2fs_readonly(inode->i_sb) ||
260 is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
261 return 0;
263 trace_f2fs_sync_file_enter(inode);
265 if (S_ISDIR(inode->i_mode))
266 goto go_write;
268 /* if fdatasync is triggered, let's do in-place-update */
269 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
270 set_inode_flag(inode, FI_NEED_IPU);
271 ret = file_write_and_wait_range(file, start, end);
272 clear_inode_flag(inode, FI_NEED_IPU);
274 if (ret) {
275 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
276 return ret;
279 /* if the inode is dirty, let's recover all the time */
280 if (!f2fs_skip_inode_update(inode, datasync)) {
281 f2fs_write_inode(inode, NULL);
282 goto go_write;
286 * if there is no written data, don't waste time to write recovery info.
288 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
289 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
291 /* it may call write_inode just prior to fsync */
292 if (need_inode_page_update(sbi, ino))
293 goto go_write;
295 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
296 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
297 goto flush_out;
298 goto out;
300 go_write:
302 * Both of fdatasync() and fsync() are able to be recovered from
303 * sudden-power-off.
305 down_read(&F2FS_I(inode)->i_sem);
306 cp_reason = need_do_checkpoint(inode);
307 up_read(&F2FS_I(inode)->i_sem);
309 if (cp_reason) {
310 /* all the dirty node pages should be flushed for POR */
311 ret = f2fs_sync_fs(inode->i_sb, 1);
314 * We've secured consistency through sync_fs. Following pino
315 * will be used only for fsynced inodes after checkpoint.
317 try_to_fix_pino(inode);
318 clear_inode_flag(inode, FI_APPEND_WRITE);
319 clear_inode_flag(inode, FI_UPDATE_WRITE);
320 goto out;
322 sync_nodes:
323 atomic_inc(&sbi->wb_sync_req[NODE]);
324 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
325 atomic_dec(&sbi->wb_sync_req[NODE]);
326 if (ret)
327 goto out;
329 /* if cp_error was enabled, we should avoid infinite loop */
330 if (unlikely(f2fs_cp_error(sbi))) {
331 ret = -EIO;
332 goto out;
335 if (f2fs_need_inode_block_update(sbi, ino)) {
336 f2fs_mark_inode_dirty_sync(inode, true);
337 f2fs_write_inode(inode, NULL);
338 goto sync_nodes;
342 * If it's atomic_write, it's just fine to keep write ordering. So
343 * here we don't need to wait for node write completion, since we use
344 * node chain which serializes node blocks. If one of node writes are
345 * reordered, we can see simply broken chain, resulting in stopping
346 * roll-forward recovery. It means we'll recover all or none node blocks
347 * given fsync mark.
349 if (!atomic) {
350 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
351 if (ret)
352 goto out;
355 /* once recovery info is written, don't need to tack this */
356 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
357 clear_inode_flag(inode, FI_APPEND_WRITE);
358 flush_out:
359 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
360 ret = f2fs_issue_flush(sbi, inode->i_ino);
361 if (!ret) {
362 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
363 clear_inode_flag(inode, FI_UPDATE_WRITE);
364 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
366 f2fs_update_time(sbi, REQ_TIME);
367 out:
368 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
369 f2fs_trace_ios(NULL, 1);
370 return ret;
373 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
375 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
376 return -EIO;
377 return f2fs_do_sync_file(file, start, end, datasync, false);
380 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
381 pgoff_t index, int whence)
383 switch (whence) {
384 case SEEK_DATA:
385 if (__is_valid_data_blkaddr(blkaddr))
386 return true;
387 if (blkaddr == NEW_ADDR &&
388 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
389 return true;
390 break;
391 case SEEK_HOLE:
392 if (blkaddr == NULL_ADDR)
393 return true;
394 break;
396 return false;
399 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
401 struct inode *inode = file->f_mapping->host;
402 loff_t maxbytes = inode->i_sb->s_maxbytes;
403 struct dnode_of_data dn;
404 pgoff_t pgofs, end_offset;
405 loff_t data_ofs = offset;
406 loff_t isize;
407 int err = 0;
409 inode_lock(inode);
411 isize = i_size_read(inode);
412 if (offset >= isize)
413 goto fail;
415 /* handle inline data case */
416 if (f2fs_has_inline_data(inode)) {
417 if (whence == SEEK_HOLE) {
418 data_ofs = isize;
419 goto found;
420 } else if (whence == SEEK_DATA) {
421 data_ofs = offset;
422 goto found;
426 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
428 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
429 set_new_dnode(&dn, inode, NULL, NULL, 0);
430 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
431 if (err && err != -ENOENT) {
432 goto fail;
433 } else if (err == -ENOENT) {
434 /* direct node does not exists */
435 if (whence == SEEK_DATA) {
436 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
437 continue;
438 } else {
439 goto found;
443 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
445 /* find data/hole in dnode block */
446 for (; dn.ofs_in_node < end_offset;
447 dn.ofs_in_node++, pgofs++,
448 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
449 block_t blkaddr;
451 blkaddr = f2fs_data_blkaddr(&dn);
453 if (__is_valid_data_blkaddr(blkaddr) &&
454 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
455 blkaddr, DATA_GENERIC_ENHANCE)) {
456 f2fs_put_dnode(&dn);
457 goto fail;
460 if (__found_offset(file->f_mapping, blkaddr,
461 pgofs, whence)) {
462 f2fs_put_dnode(&dn);
463 goto found;
466 f2fs_put_dnode(&dn);
469 if (whence == SEEK_DATA)
470 goto fail;
471 found:
472 if (whence == SEEK_HOLE && data_ofs > isize)
473 data_ofs = isize;
474 inode_unlock(inode);
475 return vfs_setpos(file, data_ofs, maxbytes);
476 fail:
477 inode_unlock(inode);
478 return -ENXIO;
481 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
483 struct inode *inode = file->f_mapping->host;
484 loff_t maxbytes = inode->i_sb->s_maxbytes;
486 switch (whence) {
487 case SEEK_SET:
488 case SEEK_CUR:
489 case SEEK_END:
490 return generic_file_llseek_size(file, offset, whence,
491 maxbytes, i_size_read(inode));
492 case SEEK_DATA:
493 case SEEK_HOLE:
494 if (offset < 0)
495 return -ENXIO;
496 return f2fs_seek_block(file, offset, whence);
499 return -EINVAL;
502 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
504 struct inode *inode = file_inode(file);
505 int err;
507 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
508 return -EIO;
510 if (!f2fs_is_compress_backend_ready(inode))
511 return -EOPNOTSUPP;
513 /* we don't need to use inline_data strictly */
514 err = f2fs_convert_inline_inode(inode);
515 if (err)
516 return err;
518 file_accessed(file);
519 vma->vm_ops = &f2fs_file_vm_ops;
520 set_inode_flag(inode, FI_MMAP_FILE);
521 return 0;
524 static int f2fs_file_open(struct inode *inode, struct file *filp)
526 int err = fscrypt_file_open(inode, filp);
528 if (err)
529 return err;
531 if (!f2fs_is_compress_backend_ready(inode))
532 return -EOPNOTSUPP;
534 err = fsverity_file_open(inode, filp);
535 if (err)
536 return err;
538 filp->f_mode |= FMODE_NOWAIT;
540 return dquot_file_open(inode, filp);
543 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
545 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
546 struct f2fs_node *raw_node;
547 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
548 __le32 *addr;
549 int base = 0;
550 bool compressed_cluster = false;
551 int cluster_index = 0, valid_blocks = 0;
552 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
553 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
555 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
556 base = get_extra_isize(dn->inode);
558 raw_node = F2FS_NODE(dn->node_page);
559 addr = blkaddr_in_node(raw_node) + base + ofs;
561 /* Assumption: truncateion starts with cluster */
562 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
563 block_t blkaddr = le32_to_cpu(*addr);
565 if (f2fs_compressed_file(dn->inode) &&
566 !(cluster_index & (cluster_size - 1))) {
567 if (compressed_cluster)
568 f2fs_i_compr_blocks_update(dn->inode,
569 valid_blocks, false);
570 compressed_cluster = (blkaddr == COMPRESS_ADDR);
571 valid_blocks = 0;
574 if (blkaddr == NULL_ADDR)
575 continue;
577 dn->data_blkaddr = NULL_ADDR;
578 f2fs_set_data_blkaddr(dn);
580 if (__is_valid_data_blkaddr(blkaddr)) {
581 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
582 DATA_GENERIC_ENHANCE))
583 continue;
584 if (compressed_cluster)
585 valid_blocks++;
588 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
589 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
591 f2fs_invalidate_blocks(sbi, blkaddr);
593 if (!released || blkaddr != COMPRESS_ADDR)
594 nr_free++;
597 if (compressed_cluster)
598 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
600 if (nr_free) {
601 pgoff_t fofs;
603 * once we invalidate valid blkaddr in range [ofs, ofs + count],
604 * we will invalidate all blkaddr in the whole range.
606 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
607 dn->inode) + ofs;
608 f2fs_update_extent_cache_range(dn, fofs, 0, len);
609 dec_valid_block_count(sbi, dn->inode, nr_free);
611 dn->ofs_in_node = ofs;
613 f2fs_update_time(sbi, REQ_TIME);
614 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
615 dn->ofs_in_node, nr_free);
618 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
620 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
623 static int truncate_partial_data_page(struct inode *inode, u64 from,
624 bool cache_only)
626 loff_t offset = from & (PAGE_SIZE - 1);
627 pgoff_t index = from >> PAGE_SHIFT;
628 struct address_space *mapping = inode->i_mapping;
629 struct page *page;
631 if (!offset && !cache_only)
632 return 0;
634 if (cache_only) {
635 page = find_lock_page(mapping, index);
636 if (page && PageUptodate(page))
637 goto truncate_out;
638 f2fs_put_page(page, 1);
639 return 0;
642 page = f2fs_get_lock_data_page(inode, index, true);
643 if (IS_ERR(page))
644 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
645 truncate_out:
646 f2fs_wait_on_page_writeback(page, DATA, true, true);
647 zero_user(page, offset, PAGE_SIZE - offset);
649 /* An encrypted inode should have a key and truncate the last page. */
650 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
651 if (!cache_only)
652 set_page_dirty(page);
653 f2fs_put_page(page, 1);
654 return 0;
657 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
659 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
660 struct dnode_of_data dn;
661 pgoff_t free_from;
662 int count = 0, err = 0;
663 struct page *ipage;
664 bool truncate_page = false;
666 trace_f2fs_truncate_blocks_enter(inode, from);
668 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
670 if (free_from >= sbi->max_file_blocks)
671 goto free_partial;
673 if (lock)
674 f2fs_lock_op(sbi);
676 ipage = f2fs_get_node_page(sbi, inode->i_ino);
677 if (IS_ERR(ipage)) {
678 err = PTR_ERR(ipage);
679 goto out;
682 if (f2fs_has_inline_data(inode)) {
683 f2fs_truncate_inline_inode(inode, ipage, from);
684 f2fs_put_page(ipage, 1);
685 truncate_page = true;
686 goto out;
689 set_new_dnode(&dn, inode, ipage, NULL, 0);
690 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
691 if (err) {
692 if (err == -ENOENT)
693 goto free_next;
694 goto out;
697 count = ADDRS_PER_PAGE(dn.node_page, inode);
699 count -= dn.ofs_in_node;
700 f2fs_bug_on(sbi, count < 0);
702 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
703 f2fs_truncate_data_blocks_range(&dn, count);
704 free_from += count;
707 f2fs_put_dnode(&dn);
708 free_next:
709 err = f2fs_truncate_inode_blocks(inode, free_from);
710 out:
711 if (lock)
712 f2fs_unlock_op(sbi);
713 free_partial:
714 /* lastly zero out the first data page */
715 if (!err)
716 err = truncate_partial_data_page(inode, from, truncate_page);
718 trace_f2fs_truncate_blocks_exit(inode, err);
719 return err;
722 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
724 u64 free_from = from;
725 int err;
727 #ifdef CONFIG_F2FS_FS_COMPRESSION
729 * for compressed file, only support cluster size
730 * aligned truncation.
732 if (f2fs_compressed_file(inode))
733 free_from = round_up(from,
734 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
735 #endif
737 err = f2fs_do_truncate_blocks(inode, free_from, lock);
738 if (err)
739 return err;
741 #ifdef CONFIG_F2FS_FS_COMPRESSION
742 if (from != free_from) {
743 err = f2fs_truncate_partial_cluster(inode, from, lock);
744 if (err)
745 return err;
747 #endif
749 return 0;
752 int f2fs_truncate(struct inode *inode)
754 int err;
756 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
757 return -EIO;
759 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
760 S_ISLNK(inode->i_mode)))
761 return 0;
763 trace_f2fs_truncate(inode);
765 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
766 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
767 return -EIO;
770 /* we should check inline_data size */
771 if (!f2fs_may_inline_data(inode)) {
772 err = f2fs_convert_inline_inode(inode);
773 if (err)
774 return err;
777 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
778 if (err)
779 return err;
781 inode->i_mtime = inode->i_ctime = current_time(inode);
782 f2fs_mark_inode_dirty_sync(inode, false);
783 return 0;
786 int f2fs_getattr(const struct path *path, struct kstat *stat,
787 u32 request_mask, unsigned int query_flags)
789 struct inode *inode = d_inode(path->dentry);
790 struct f2fs_inode_info *fi = F2FS_I(inode);
791 struct f2fs_inode *ri;
792 unsigned int flags;
794 if (f2fs_has_extra_attr(inode) &&
795 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
796 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
797 stat->result_mask |= STATX_BTIME;
798 stat->btime.tv_sec = fi->i_crtime.tv_sec;
799 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
802 flags = fi->i_flags;
803 if (flags & F2FS_COMPR_FL)
804 stat->attributes |= STATX_ATTR_COMPRESSED;
805 if (flags & F2FS_APPEND_FL)
806 stat->attributes |= STATX_ATTR_APPEND;
807 if (IS_ENCRYPTED(inode))
808 stat->attributes |= STATX_ATTR_ENCRYPTED;
809 if (flags & F2FS_IMMUTABLE_FL)
810 stat->attributes |= STATX_ATTR_IMMUTABLE;
811 if (flags & F2FS_NODUMP_FL)
812 stat->attributes |= STATX_ATTR_NODUMP;
813 if (IS_VERITY(inode))
814 stat->attributes |= STATX_ATTR_VERITY;
816 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
817 STATX_ATTR_APPEND |
818 STATX_ATTR_ENCRYPTED |
819 STATX_ATTR_IMMUTABLE |
820 STATX_ATTR_NODUMP |
821 STATX_ATTR_VERITY);
823 generic_fillattr(inode, stat);
825 /* we need to show initial sectors used for inline_data/dentries */
826 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
827 f2fs_has_inline_dentry(inode))
828 stat->blocks += (stat->size + 511) >> 9;
830 return 0;
833 #ifdef CONFIG_F2FS_FS_POSIX_ACL
834 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
836 unsigned int ia_valid = attr->ia_valid;
838 if (ia_valid & ATTR_UID)
839 inode->i_uid = attr->ia_uid;
840 if (ia_valid & ATTR_GID)
841 inode->i_gid = attr->ia_gid;
842 if (ia_valid & ATTR_ATIME)
843 inode->i_atime = attr->ia_atime;
844 if (ia_valid & ATTR_MTIME)
845 inode->i_mtime = attr->ia_mtime;
846 if (ia_valid & ATTR_CTIME)
847 inode->i_ctime = attr->ia_ctime;
848 if (ia_valid & ATTR_MODE) {
849 umode_t mode = attr->ia_mode;
851 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
852 mode &= ~S_ISGID;
853 set_acl_inode(inode, mode);
856 #else
857 #define __setattr_copy setattr_copy
858 #endif
860 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
862 struct inode *inode = d_inode(dentry);
863 int err;
865 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
866 return -EIO;
868 if ((attr->ia_valid & ATTR_SIZE) &&
869 !f2fs_is_compress_backend_ready(inode))
870 return -EOPNOTSUPP;
872 err = setattr_prepare(dentry, attr);
873 if (err)
874 return err;
876 err = fscrypt_prepare_setattr(dentry, attr);
877 if (err)
878 return err;
880 err = fsverity_prepare_setattr(dentry, attr);
881 if (err)
882 return err;
884 if (is_quota_modification(inode, attr)) {
885 err = dquot_initialize(inode);
886 if (err)
887 return err;
889 if ((attr->ia_valid & ATTR_UID &&
890 !uid_eq(attr->ia_uid, inode->i_uid)) ||
891 (attr->ia_valid & ATTR_GID &&
892 !gid_eq(attr->ia_gid, inode->i_gid))) {
893 f2fs_lock_op(F2FS_I_SB(inode));
894 err = dquot_transfer(inode, attr);
895 if (err) {
896 set_sbi_flag(F2FS_I_SB(inode),
897 SBI_QUOTA_NEED_REPAIR);
898 f2fs_unlock_op(F2FS_I_SB(inode));
899 return err;
902 * update uid/gid under lock_op(), so that dquot and inode can
903 * be updated atomically.
905 if (attr->ia_valid & ATTR_UID)
906 inode->i_uid = attr->ia_uid;
907 if (attr->ia_valid & ATTR_GID)
908 inode->i_gid = attr->ia_gid;
909 f2fs_mark_inode_dirty_sync(inode, true);
910 f2fs_unlock_op(F2FS_I_SB(inode));
913 if (attr->ia_valid & ATTR_SIZE) {
914 loff_t old_size = i_size_read(inode);
916 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
918 * should convert inline inode before i_size_write to
919 * keep smaller than inline_data size with inline flag.
921 err = f2fs_convert_inline_inode(inode);
922 if (err)
923 return err;
926 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
927 down_write(&F2FS_I(inode)->i_mmap_sem);
929 truncate_setsize(inode, attr->ia_size);
931 if (attr->ia_size <= old_size)
932 err = f2fs_truncate(inode);
934 * do not trim all blocks after i_size if target size is
935 * larger than i_size.
937 up_write(&F2FS_I(inode)->i_mmap_sem);
938 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
939 if (err)
940 return err;
942 spin_lock(&F2FS_I(inode)->i_size_lock);
943 inode->i_mtime = inode->i_ctime = current_time(inode);
944 F2FS_I(inode)->last_disk_size = i_size_read(inode);
945 spin_unlock(&F2FS_I(inode)->i_size_lock);
948 __setattr_copy(inode, attr);
950 if (attr->ia_valid & ATTR_MODE) {
951 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
952 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
953 inode->i_mode = F2FS_I(inode)->i_acl_mode;
954 clear_inode_flag(inode, FI_ACL_MODE);
958 /* file size may changed here */
959 f2fs_mark_inode_dirty_sync(inode, true);
961 /* inode change will produce dirty node pages flushed by checkpoint */
962 f2fs_balance_fs(F2FS_I_SB(inode), true);
964 return err;
967 const struct inode_operations f2fs_file_inode_operations = {
968 .getattr = f2fs_getattr,
969 .setattr = f2fs_setattr,
970 .get_acl = f2fs_get_acl,
971 .set_acl = f2fs_set_acl,
972 .listxattr = f2fs_listxattr,
973 .fiemap = f2fs_fiemap,
976 static int fill_zero(struct inode *inode, pgoff_t index,
977 loff_t start, loff_t len)
979 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
980 struct page *page;
982 if (!len)
983 return 0;
985 f2fs_balance_fs(sbi, true);
987 f2fs_lock_op(sbi);
988 page = f2fs_get_new_data_page(inode, NULL, index, false);
989 f2fs_unlock_op(sbi);
991 if (IS_ERR(page))
992 return PTR_ERR(page);
994 f2fs_wait_on_page_writeback(page, DATA, true, true);
995 zero_user(page, start, len);
996 set_page_dirty(page);
997 f2fs_put_page(page, 1);
998 return 0;
1001 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1003 int err;
1005 while (pg_start < pg_end) {
1006 struct dnode_of_data dn;
1007 pgoff_t end_offset, count;
1009 set_new_dnode(&dn, inode, NULL, NULL, 0);
1010 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1011 if (err) {
1012 if (err == -ENOENT) {
1013 pg_start = f2fs_get_next_page_offset(&dn,
1014 pg_start);
1015 continue;
1017 return err;
1020 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1021 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1023 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1025 f2fs_truncate_data_blocks_range(&dn, count);
1026 f2fs_put_dnode(&dn);
1028 pg_start += count;
1030 return 0;
1033 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1035 pgoff_t pg_start, pg_end;
1036 loff_t off_start, off_end;
1037 int ret;
1039 ret = f2fs_convert_inline_inode(inode);
1040 if (ret)
1041 return ret;
1043 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1044 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1046 off_start = offset & (PAGE_SIZE - 1);
1047 off_end = (offset + len) & (PAGE_SIZE - 1);
1049 if (pg_start == pg_end) {
1050 ret = fill_zero(inode, pg_start, off_start,
1051 off_end - off_start);
1052 if (ret)
1053 return ret;
1054 } else {
1055 if (off_start) {
1056 ret = fill_zero(inode, pg_start++, off_start,
1057 PAGE_SIZE - off_start);
1058 if (ret)
1059 return ret;
1061 if (off_end) {
1062 ret = fill_zero(inode, pg_end, 0, off_end);
1063 if (ret)
1064 return ret;
1067 if (pg_start < pg_end) {
1068 struct address_space *mapping = inode->i_mapping;
1069 loff_t blk_start, blk_end;
1070 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1072 f2fs_balance_fs(sbi, true);
1074 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1075 blk_end = (loff_t)pg_end << PAGE_SHIFT;
1077 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1078 down_write(&F2FS_I(inode)->i_mmap_sem);
1080 truncate_inode_pages_range(mapping, blk_start,
1081 blk_end - 1);
1083 f2fs_lock_op(sbi);
1084 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1085 f2fs_unlock_op(sbi);
1087 up_write(&F2FS_I(inode)->i_mmap_sem);
1088 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1092 return ret;
1095 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1096 int *do_replace, pgoff_t off, pgoff_t len)
1098 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1099 struct dnode_of_data dn;
1100 int ret, done, i;
1102 next_dnode:
1103 set_new_dnode(&dn, inode, NULL, NULL, 0);
1104 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1105 if (ret && ret != -ENOENT) {
1106 return ret;
1107 } else if (ret == -ENOENT) {
1108 if (dn.max_level == 0)
1109 return -ENOENT;
1110 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1111 dn.ofs_in_node, len);
1112 blkaddr += done;
1113 do_replace += done;
1114 goto next;
1117 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1118 dn.ofs_in_node, len);
1119 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1120 *blkaddr = f2fs_data_blkaddr(&dn);
1122 if (__is_valid_data_blkaddr(*blkaddr) &&
1123 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1124 DATA_GENERIC_ENHANCE)) {
1125 f2fs_put_dnode(&dn);
1126 return -EFSCORRUPTED;
1129 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1131 if (f2fs_lfs_mode(sbi)) {
1132 f2fs_put_dnode(&dn);
1133 return -EOPNOTSUPP;
1136 /* do not invalidate this block address */
1137 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1138 *do_replace = 1;
1141 f2fs_put_dnode(&dn);
1142 next:
1143 len -= done;
1144 off += done;
1145 if (len)
1146 goto next_dnode;
1147 return 0;
1150 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1151 int *do_replace, pgoff_t off, int len)
1153 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1154 struct dnode_of_data dn;
1155 int ret, i;
1157 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1158 if (*do_replace == 0)
1159 continue;
1161 set_new_dnode(&dn, inode, NULL, NULL, 0);
1162 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1163 if (ret) {
1164 dec_valid_block_count(sbi, inode, 1);
1165 f2fs_invalidate_blocks(sbi, *blkaddr);
1166 } else {
1167 f2fs_update_data_blkaddr(&dn, *blkaddr);
1169 f2fs_put_dnode(&dn);
1171 return 0;
1174 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1175 block_t *blkaddr, int *do_replace,
1176 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1178 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1179 pgoff_t i = 0;
1180 int ret;
1182 while (i < len) {
1183 if (blkaddr[i] == NULL_ADDR && !full) {
1184 i++;
1185 continue;
1188 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1189 struct dnode_of_data dn;
1190 struct node_info ni;
1191 size_t new_size;
1192 pgoff_t ilen;
1194 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1195 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1196 if (ret)
1197 return ret;
1199 ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1200 if (ret) {
1201 f2fs_put_dnode(&dn);
1202 return ret;
1205 ilen = min((pgoff_t)
1206 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1207 dn.ofs_in_node, len - i);
1208 do {
1209 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1210 f2fs_truncate_data_blocks_range(&dn, 1);
1212 if (do_replace[i]) {
1213 f2fs_i_blocks_write(src_inode,
1214 1, false, false);
1215 f2fs_i_blocks_write(dst_inode,
1216 1, true, false);
1217 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1218 blkaddr[i], ni.version, true, false);
1220 do_replace[i] = 0;
1222 dn.ofs_in_node++;
1223 i++;
1224 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1225 if (dst_inode->i_size < new_size)
1226 f2fs_i_size_write(dst_inode, new_size);
1227 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1229 f2fs_put_dnode(&dn);
1230 } else {
1231 struct page *psrc, *pdst;
1233 psrc = f2fs_get_lock_data_page(src_inode,
1234 src + i, true);
1235 if (IS_ERR(psrc))
1236 return PTR_ERR(psrc);
1237 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1238 true);
1239 if (IS_ERR(pdst)) {
1240 f2fs_put_page(psrc, 1);
1241 return PTR_ERR(pdst);
1243 f2fs_copy_page(psrc, pdst);
1244 set_page_dirty(pdst);
1245 f2fs_put_page(pdst, 1);
1246 f2fs_put_page(psrc, 1);
1248 ret = f2fs_truncate_hole(src_inode,
1249 src + i, src + i + 1);
1250 if (ret)
1251 return ret;
1252 i++;
1255 return 0;
1258 static int __exchange_data_block(struct inode *src_inode,
1259 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1260 pgoff_t len, bool full)
1262 block_t *src_blkaddr;
1263 int *do_replace;
1264 pgoff_t olen;
1265 int ret;
1267 while (len) {
1268 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1270 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1271 array_size(olen, sizeof(block_t)),
1272 GFP_NOFS);
1273 if (!src_blkaddr)
1274 return -ENOMEM;
1276 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1277 array_size(olen, sizeof(int)),
1278 GFP_NOFS);
1279 if (!do_replace) {
1280 kvfree(src_blkaddr);
1281 return -ENOMEM;
1284 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1285 do_replace, src, olen);
1286 if (ret)
1287 goto roll_back;
1289 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1290 do_replace, src, dst, olen, full);
1291 if (ret)
1292 goto roll_back;
1294 src += olen;
1295 dst += olen;
1296 len -= olen;
1298 kvfree(src_blkaddr);
1299 kvfree(do_replace);
1301 return 0;
1303 roll_back:
1304 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1305 kvfree(src_blkaddr);
1306 kvfree(do_replace);
1307 return ret;
1310 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1312 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1313 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1314 pgoff_t start = offset >> PAGE_SHIFT;
1315 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1316 int ret;
1318 f2fs_balance_fs(sbi, true);
1320 /* avoid gc operation during block exchange */
1321 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1322 down_write(&F2FS_I(inode)->i_mmap_sem);
1324 f2fs_lock_op(sbi);
1325 f2fs_drop_extent_tree(inode);
1326 truncate_pagecache(inode, offset);
1327 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1328 f2fs_unlock_op(sbi);
1330 up_write(&F2FS_I(inode)->i_mmap_sem);
1331 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1332 return ret;
1335 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1337 loff_t new_size;
1338 int ret;
1340 if (offset + len >= i_size_read(inode))
1341 return -EINVAL;
1343 /* collapse range should be aligned to block size of f2fs. */
1344 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1345 return -EINVAL;
1347 ret = f2fs_convert_inline_inode(inode);
1348 if (ret)
1349 return ret;
1351 /* write out all dirty pages from offset */
1352 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1353 if (ret)
1354 return ret;
1356 ret = f2fs_do_collapse(inode, offset, len);
1357 if (ret)
1358 return ret;
1360 /* write out all moved pages, if possible */
1361 down_write(&F2FS_I(inode)->i_mmap_sem);
1362 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1363 truncate_pagecache(inode, offset);
1365 new_size = i_size_read(inode) - len;
1366 ret = f2fs_truncate_blocks(inode, new_size, true);
1367 up_write(&F2FS_I(inode)->i_mmap_sem);
1368 if (!ret)
1369 f2fs_i_size_write(inode, new_size);
1370 return ret;
1373 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1374 pgoff_t end)
1376 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1377 pgoff_t index = start;
1378 unsigned int ofs_in_node = dn->ofs_in_node;
1379 blkcnt_t count = 0;
1380 int ret;
1382 for (; index < end; index++, dn->ofs_in_node++) {
1383 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1384 count++;
1387 dn->ofs_in_node = ofs_in_node;
1388 ret = f2fs_reserve_new_blocks(dn, count);
1389 if (ret)
1390 return ret;
1392 dn->ofs_in_node = ofs_in_node;
1393 for (index = start; index < end; index++, dn->ofs_in_node++) {
1394 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1396 * f2fs_reserve_new_blocks will not guarantee entire block
1397 * allocation.
1399 if (dn->data_blkaddr == NULL_ADDR) {
1400 ret = -ENOSPC;
1401 break;
1403 if (dn->data_blkaddr != NEW_ADDR) {
1404 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1405 dn->data_blkaddr = NEW_ADDR;
1406 f2fs_set_data_blkaddr(dn);
1410 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1412 return ret;
1415 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1416 int mode)
1418 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1419 struct address_space *mapping = inode->i_mapping;
1420 pgoff_t index, pg_start, pg_end;
1421 loff_t new_size = i_size_read(inode);
1422 loff_t off_start, off_end;
1423 int ret = 0;
1425 ret = inode_newsize_ok(inode, (len + offset));
1426 if (ret)
1427 return ret;
1429 ret = f2fs_convert_inline_inode(inode);
1430 if (ret)
1431 return ret;
1433 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1434 if (ret)
1435 return ret;
1437 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1438 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1440 off_start = offset & (PAGE_SIZE - 1);
1441 off_end = (offset + len) & (PAGE_SIZE - 1);
1443 if (pg_start == pg_end) {
1444 ret = fill_zero(inode, pg_start, off_start,
1445 off_end - off_start);
1446 if (ret)
1447 return ret;
1449 new_size = max_t(loff_t, new_size, offset + len);
1450 } else {
1451 if (off_start) {
1452 ret = fill_zero(inode, pg_start++, off_start,
1453 PAGE_SIZE - off_start);
1454 if (ret)
1455 return ret;
1457 new_size = max_t(loff_t, new_size,
1458 (loff_t)pg_start << PAGE_SHIFT);
1461 for (index = pg_start; index < pg_end;) {
1462 struct dnode_of_data dn;
1463 unsigned int end_offset;
1464 pgoff_t end;
1466 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1467 down_write(&F2FS_I(inode)->i_mmap_sem);
1469 truncate_pagecache_range(inode,
1470 (loff_t)index << PAGE_SHIFT,
1471 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1473 f2fs_lock_op(sbi);
1475 set_new_dnode(&dn, inode, NULL, NULL, 0);
1476 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1477 if (ret) {
1478 f2fs_unlock_op(sbi);
1479 up_write(&F2FS_I(inode)->i_mmap_sem);
1480 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1481 goto out;
1484 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1485 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1487 ret = f2fs_do_zero_range(&dn, index, end);
1488 f2fs_put_dnode(&dn);
1490 f2fs_unlock_op(sbi);
1491 up_write(&F2FS_I(inode)->i_mmap_sem);
1492 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1494 f2fs_balance_fs(sbi, dn.node_changed);
1496 if (ret)
1497 goto out;
1499 index = end;
1500 new_size = max_t(loff_t, new_size,
1501 (loff_t)index << PAGE_SHIFT);
1504 if (off_end) {
1505 ret = fill_zero(inode, pg_end, 0, off_end);
1506 if (ret)
1507 goto out;
1509 new_size = max_t(loff_t, new_size, offset + len);
1513 out:
1514 if (new_size > i_size_read(inode)) {
1515 if (mode & FALLOC_FL_KEEP_SIZE)
1516 file_set_keep_isize(inode);
1517 else
1518 f2fs_i_size_write(inode, new_size);
1520 return ret;
1523 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1525 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1526 pgoff_t nr, pg_start, pg_end, delta, idx;
1527 loff_t new_size;
1528 int ret = 0;
1530 new_size = i_size_read(inode) + len;
1531 ret = inode_newsize_ok(inode, new_size);
1532 if (ret)
1533 return ret;
1535 if (offset >= i_size_read(inode))
1536 return -EINVAL;
1538 /* insert range should be aligned to block size of f2fs. */
1539 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1540 return -EINVAL;
1542 ret = f2fs_convert_inline_inode(inode);
1543 if (ret)
1544 return ret;
1546 f2fs_balance_fs(sbi, true);
1548 down_write(&F2FS_I(inode)->i_mmap_sem);
1549 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1550 up_write(&F2FS_I(inode)->i_mmap_sem);
1551 if (ret)
1552 return ret;
1554 /* write out all dirty pages from offset */
1555 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1556 if (ret)
1557 return ret;
1559 pg_start = offset >> PAGE_SHIFT;
1560 pg_end = (offset + len) >> PAGE_SHIFT;
1561 delta = pg_end - pg_start;
1562 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1564 /* avoid gc operation during block exchange */
1565 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1566 down_write(&F2FS_I(inode)->i_mmap_sem);
1567 truncate_pagecache(inode, offset);
1569 while (!ret && idx > pg_start) {
1570 nr = idx - pg_start;
1571 if (nr > delta)
1572 nr = delta;
1573 idx -= nr;
1575 f2fs_lock_op(sbi);
1576 f2fs_drop_extent_tree(inode);
1578 ret = __exchange_data_block(inode, inode, idx,
1579 idx + delta, nr, false);
1580 f2fs_unlock_op(sbi);
1582 up_write(&F2FS_I(inode)->i_mmap_sem);
1583 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1585 /* write out all moved pages, if possible */
1586 down_write(&F2FS_I(inode)->i_mmap_sem);
1587 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1588 truncate_pagecache(inode, offset);
1589 up_write(&F2FS_I(inode)->i_mmap_sem);
1591 if (!ret)
1592 f2fs_i_size_write(inode, new_size);
1593 return ret;
1596 static int expand_inode_data(struct inode *inode, loff_t offset,
1597 loff_t len, int mode)
1599 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1600 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1601 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1602 .m_may_create = true };
1603 pgoff_t pg_end;
1604 loff_t new_size = i_size_read(inode);
1605 loff_t off_end;
1606 int err;
1608 err = inode_newsize_ok(inode, (len + offset));
1609 if (err)
1610 return err;
1612 err = f2fs_convert_inline_inode(inode);
1613 if (err)
1614 return err;
1616 f2fs_balance_fs(sbi, true);
1618 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1619 off_end = (offset + len) & (PAGE_SIZE - 1);
1621 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
1622 map.m_len = pg_end - map.m_lblk;
1623 if (off_end)
1624 map.m_len++;
1626 if (!map.m_len)
1627 return 0;
1629 if (f2fs_is_pinned_file(inode)) {
1630 block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
1631 sbi->log_blocks_per_seg;
1632 block_t done = 0;
1634 if (map.m_len % sbi->blocks_per_seg)
1635 len += sbi->blocks_per_seg;
1637 map.m_len = sbi->blocks_per_seg;
1638 next_alloc:
1639 if (has_not_enough_free_secs(sbi, 0,
1640 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1641 down_write(&sbi->gc_lock);
1642 err = f2fs_gc(sbi, true, false, NULL_SEGNO);
1643 if (err && err != -ENODATA && err != -EAGAIN)
1644 goto out_err;
1647 down_write(&sbi->pin_sem);
1649 f2fs_lock_op(sbi);
1650 f2fs_allocate_new_segment(sbi, CURSEG_COLD_DATA_PINNED);
1651 f2fs_unlock_op(sbi);
1653 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1654 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1656 up_write(&sbi->pin_sem);
1658 done += map.m_len;
1659 len -= map.m_len;
1660 map.m_lblk += map.m_len;
1661 if (!err && len)
1662 goto next_alloc;
1664 map.m_len = done;
1665 } else {
1666 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1668 out_err:
1669 if (err) {
1670 pgoff_t last_off;
1672 if (!map.m_len)
1673 return err;
1675 last_off = map.m_lblk + map.m_len - 1;
1677 /* update new size to the failed position */
1678 new_size = (last_off == pg_end) ? offset + len :
1679 (loff_t)(last_off + 1) << PAGE_SHIFT;
1680 } else {
1681 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1684 if (new_size > i_size_read(inode)) {
1685 if (mode & FALLOC_FL_KEEP_SIZE)
1686 file_set_keep_isize(inode);
1687 else
1688 f2fs_i_size_write(inode, new_size);
1691 return err;
1694 static long f2fs_fallocate(struct file *file, int mode,
1695 loff_t offset, loff_t len)
1697 struct inode *inode = file_inode(file);
1698 long ret = 0;
1700 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1701 return -EIO;
1702 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1703 return -ENOSPC;
1704 if (!f2fs_is_compress_backend_ready(inode))
1705 return -EOPNOTSUPP;
1707 /* f2fs only support ->fallocate for regular file */
1708 if (!S_ISREG(inode->i_mode))
1709 return -EINVAL;
1711 if (IS_ENCRYPTED(inode) &&
1712 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1713 return -EOPNOTSUPP;
1715 if (f2fs_compressed_file(inode) &&
1716 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1717 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1718 return -EOPNOTSUPP;
1720 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1721 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1722 FALLOC_FL_INSERT_RANGE))
1723 return -EOPNOTSUPP;
1725 inode_lock(inode);
1727 if (mode & FALLOC_FL_PUNCH_HOLE) {
1728 if (offset >= inode->i_size)
1729 goto out;
1731 ret = punch_hole(inode, offset, len);
1732 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1733 ret = f2fs_collapse_range(inode, offset, len);
1734 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1735 ret = f2fs_zero_range(inode, offset, len, mode);
1736 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1737 ret = f2fs_insert_range(inode, offset, len);
1738 } else {
1739 ret = expand_inode_data(inode, offset, len, mode);
1742 if (!ret) {
1743 inode->i_mtime = inode->i_ctime = current_time(inode);
1744 f2fs_mark_inode_dirty_sync(inode, false);
1745 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1748 out:
1749 inode_unlock(inode);
1751 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1752 return ret;
1755 static int f2fs_release_file(struct inode *inode, struct file *filp)
1758 * f2fs_relase_file is called at every close calls. So we should
1759 * not drop any inmemory pages by close called by other process.
1761 if (!(filp->f_mode & FMODE_WRITE) ||
1762 atomic_read(&inode->i_writecount) != 1)
1763 return 0;
1765 /* some remained atomic pages should discarded */
1766 if (f2fs_is_atomic_file(inode))
1767 f2fs_drop_inmem_pages(inode);
1768 if (f2fs_is_volatile_file(inode)) {
1769 set_inode_flag(inode, FI_DROP_CACHE);
1770 filemap_fdatawrite(inode->i_mapping);
1771 clear_inode_flag(inode, FI_DROP_CACHE);
1772 clear_inode_flag(inode, FI_VOLATILE_FILE);
1773 stat_dec_volatile_write(inode);
1775 return 0;
1778 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1780 struct inode *inode = file_inode(file);
1783 * If the process doing a transaction is crashed, we should do
1784 * roll-back. Otherwise, other reader/write can see corrupted database
1785 * until all the writers close its file. Since this should be done
1786 * before dropping file lock, it needs to do in ->flush.
1788 if (f2fs_is_atomic_file(inode) &&
1789 F2FS_I(inode)->inmem_task == current)
1790 f2fs_drop_inmem_pages(inode);
1791 return 0;
1794 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1796 struct f2fs_inode_info *fi = F2FS_I(inode);
1797 u32 masked_flags = fi->i_flags & mask;
1799 f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
1801 /* Is it quota file? Do not allow user to mess with it */
1802 if (IS_NOQUOTA(inode))
1803 return -EPERM;
1805 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1806 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1807 return -EOPNOTSUPP;
1808 if (!f2fs_empty_dir(inode))
1809 return -ENOTEMPTY;
1812 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1813 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1814 return -EOPNOTSUPP;
1815 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1816 return -EINVAL;
1819 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1820 if (masked_flags & F2FS_COMPR_FL) {
1821 if (!f2fs_disable_compressed_file(inode))
1822 return -EINVAL;
1824 if (iflags & F2FS_NOCOMP_FL)
1825 return -EINVAL;
1826 if (iflags & F2FS_COMPR_FL) {
1827 if (!f2fs_may_compress(inode))
1828 return -EINVAL;
1829 if (S_ISREG(inode->i_mode) && inode->i_size)
1830 return -EINVAL;
1832 set_compress_context(inode);
1835 if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
1836 if (masked_flags & F2FS_COMPR_FL)
1837 return -EINVAL;
1840 fi->i_flags = iflags | (fi->i_flags & ~mask);
1841 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1842 (fi->i_flags & F2FS_NOCOMP_FL));
1844 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1845 set_inode_flag(inode, FI_PROJ_INHERIT);
1846 else
1847 clear_inode_flag(inode, FI_PROJ_INHERIT);
1849 inode->i_ctime = current_time(inode);
1850 f2fs_set_inode_flags(inode);
1851 f2fs_mark_inode_dirty_sync(inode, true);
1852 return 0;
1855 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1858 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1859 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1860 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1861 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1864 static const struct {
1865 u32 iflag;
1866 u32 fsflag;
1867 } f2fs_fsflags_map[] = {
1868 { F2FS_COMPR_FL, FS_COMPR_FL },
1869 { F2FS_SYNC_FL, FS_SYNC_FL },
1870 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1871 { F2FS_APPEND_FL, FS_APPEND_FL },
1872 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1873 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1874 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
1875 { F2FS_INDEX_FL, FS_INDEX_FL },
1876 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1877 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1878 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
1881 #define F2FS_GETTABLE_FS_FL ( \
1882 FS_COMPR_FL | \
1883 FS_SYNC_FL | \
1884 FS_IMMUTABLE_FL | \
1885 FS_APPEND_FL | \
1886 FS_NODUMP_FL | \
1887 FS_NOATIME_FL | \
1888 FS_NOCOMP_FL | \
1889 FS_INDEX_FL | \
1890 FS_DIRSYNC_FL | \
1891 FS_PROJINHERIT_FL | \
1892 FS_ENCRYPT_FL | \
1893 FS_INLINE_DATA_FL | \
1894 FS_NOCOW_FL | \
1895 FS_VERITY_FL | \
1896 FS_CASEFOLD_FL)
1898 #define F2FS_SETTABLE_FS_FL ( \
1899 FS_COMPR_FL | \
1900 FS_SYNC_FL | \
1901 FS_IMMUTABLE_FL | \
1902 FS_APPEND_FL | \
1903 FS_NODUMP_FL | \
1904 FS_NOATIME_FL | \
1905 FS_NOCOMP_FL | \
1906 FS_DIRSYNC_FL | \
1907 FS_PROJINHERIT_FL | \
1908 FS_CASEFOLD_FL)
1910 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1911 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1913 u32 fsflags = 0;
1914 int i;
1916 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1917 if (iflags & f2fs_fsflags_map[i].iflag)
1918 fsflags |= f2fs_fsflags_map[i].fsflag;
1920 return fsflags;
1923 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1924 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1926 u32 iflags = 0;
1927 int i;
1929 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1930 if (fsflags & f2fs_fsflags_map[i].fsflag)
1931 iflags |= f2fs_fsflags_map[i].iflag;
1933 return iflags;
1936 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1938 struct inode *inode = file_inode(filp);
1939 struct f2fs_inode_info *fi = F2FS_I(inode);
1940 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1942 if (IS_ENCRYPTED(inode))
1943 fsflags |= FS_ENCRYPT_FL;
1944 if (IS_VERITY(inode))
1945 fsflags |= FS_VERITY_FL;
1946 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1947 fsflags |= FS_INLINE_DATA_FL;
1948 if (is_inode_flag_set(inode, FI_PIN_FILE))
1949 fsflags |= FS_NOCOW_FL;
1951 fsflags &= F2FS_GETTABLE_FS_FL;
1953 return put_user(fsflags, (int __user *)arg);
1956 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1958 struct inode *inode = file_inode(filp);
1959 struct f2fs_inode_info *fi = F2FS_I(inode);
1960 u32 fsflags, old_fsflags;
1961 u32 iflags;
1962 int ret;
1964 if (!inode_owner_or_capable(inode))
1965 return -EACCES;
1967 if (get_user(fsflags, (int __user *)arg))
1968 return -EFAULT;
1970 if (fsflags & ~F2FS_GETTABLE_FS_FL)
1971 return -EOPNOTSUPP;
1972 fsflags &= F2FS_SETTABLE_FS_FL;
1974 iflags = f2fs_fsflags_to_iflags(fsflags);
1975 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
1976 return -EOPNOTSUPP;
1978 ret = mnt_want_write_file(filp);
1979 if (ret)
1980 return ret;
1982 inode_lock(inode);
1984 old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1985 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
1986 if (ret)
1987 goto out;
1989 ret = f2fs_setflags_common(inode, iflags,
1990 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
1991 out:
1992 inode_unlock(inode);
1993 mnt_drop_write_file(filp);
1994 return ret;
1997 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1999 struct inode *inode = file_inode(filp);
2001 return put_user(inode->i_generation, (int __user *)arg);
2004 static int f2fs_ioc_start_atomic_write(struct file *filp)
2006 struct inode *inode = file_inode(filp);
2007 struct f2fs_inode_info *fi = F2FS_I(inode);
2008 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2009 int ret;
2011 if (!inode_owner_or_capable(inode))
2012 return -EACCES;
2014 if (!S_ISREG(inode->i_mode))
2015 return -EINVAL;
2017 if (filp->f_flags & O_DIRECT)
2018 return -EINVAL;
2020 ret = mnt_want_write_file(filp);
2021 if (ret)
2022 return ret;
2024 inode_lock(inode);
2026 f2fs_disable_compressed_file(inode);
2028 if (f2fs_is_atomic_file(inode)) {
2029 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2030 ret = -EINVAL;
2031 goto out;
2034 ret = f2fs_convert_inline_inode(inode);
2035 if (ret)
2036 goto out;
2038 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2041 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2042 * f2fs_is_atomic_file.
2044 if (get_dirty_pages(inode))
2045 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2046 inode->i_ino, get_dirty_pages(inode));
2047 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2048 if (ret) {
2049 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2050 goto out;
2053 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2054 if (list_empty(&fi->inmem_ilist))
2055 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2056 sbi->atomic_files++;
2057 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2059 /* add inode in inmem_list first and set atomic_file */
2060 set_inode_flag(inode, FI_ATOMIC_FILE);
2061 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2062 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2064 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2065 F2FS_I(inode)->inmem_task = current;
2066 stat_update_max_atomic_write(inode);
2067 out:
2068 inode_unlock(inode);
2069 mnt_drop_write_file(filp);
2070 return ret;
2073 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2075 struct inode *inode = file_inode(filp);
2076 int ret;
2078 if (!inode_owner_or_capable(inode))
2079 return -EACCES;
2081 ret = mnt_want_write_file(filp);
2082 if (ret)
2083 return ret;
2085 f2fs_balance_fs(F2FS_I_SB(inode), true);
2087 inode_lock(inode);
2089 if (f2fs_is_volatile_file(inode)) {
2090 ret = -EINVAL;
2091 goto err_out;
2094 if (f2fs_is_atomic_file(inode)) {
2095 ret = f2fs_commit_inmem_pages(inode);
2096 if (ret)
2097 goto err_out;
2099 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2100 if (!ret)
2101 f2fs_drop_inmem_pages(inode);
2102 } else {
2103 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2105 err_out:
2106 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2107 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2108 ret = -EINVAL;
2110 inode_unlock(inode);
2111 mnt_drop_write_file(filp);
2112 return ret;
2115 static int f2fs_ioc_start_volatile_write(struct file *filp)
2117 struct inode *inode = file_inode(filp);
2118 int ret;
2120 if (!inode_owner_or_capable(inode))
2121 return -EACCES;
2123 if (!S_ISREG(inode->i_mode))
2124 return -EINVAL;
2126 ret = mnt_want_write_file(filp);
2127 if (ret)
2128 return ret;
2130 inode_lock(inode);
2132 if (f2fs_is_volatile_file(inode))
2133 goto out;
2135 ret = f2fs_convert_inline_inode(inode);
2136 if (ret)
2137 goto out;
2139 stat_inc_volatile_write(inode);
2140 stat_update_max_volatile_write(inode);
2142 set_inode_flag(inode, FI_VOLATILE_FILE);
2143 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2144 out:
2145 inode_unlock(inode);
2146 mnt_drop_write_file(filp);
2147 return ret;
2150 static int f2fs_ioc_release_volatile_write(struct file *filp)
2152 struct inode *inode = file_inode(filp);
2153 int ret;
2155 if (!inode_owner_or_capable(inode))
2156 return -EACCES;
2158 ret = mnt_want_write_file(filp);
2159 if (ret)
2160 return ret;
2162 inode_lock(inode);
2164 if (!f2fs_is_volatile_file(inode))
2165 goto out;
2167 if (!f2fs_is_first_block_written(inode)) {
2168 ret = truncate_partial_data_page(inode, 0, true);
2169 goto out;
2172 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2173 out:
2174 inode_unlock(inode);
2175 mnt_drop_write_file(filp);
2176 return ret;
2179 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2181 struct inode *inode = file_inode(filp);
2182 int ret;
2184 if (!inode_owner_or_capable(inode))
2185 return -EACCES;
2187 ret = mnt_want_write_file(filp);
2188 if (ret)
2189 return ret;
2191 inode_lock(inode);
2193 if (f2fs_is_atomic_file(inode))
2194 f2fs_drop_inmem_pages(inode);
2195 if (f2fs_is_volatile_file(inode)) {
2196 clear_inode_flag(inode, FI_VOLATILE_FILE);
2197 stat_dec_volatile_write(inode);
2198 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2201 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2203 inode_unlock(inode);
2205 mnt_drop_write_file(filp);
2206 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2207 return ret;
2210 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2212 struct inode *inode = file_inode(filp);
2213 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2214 struct super_block *sb = sbi->sb;
2215 __u32 in;
2216 int ret = 0;
2218 if (!capable(CAP_SYS_ADMIN))
2219 return -EPERM;
2221 if (get_user(in, (__u32 __user *)arg))
2222 return -EFAULT;
2224 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2225 ret = mnt_want_write_file(filp);
2226 if (ret) {
2227 if (ret == -EROFS) {
2228 ret = 0;
2229 f2fs_stop_checkpoint(sbi, false);
2230 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2231 trace_f2fs_shutdown(sbi, in, ret);
2233 return ret;
2237 switch (in) {
2238 case F2FS_GOING_DOWN_FULLSYNC:
2239 ret = freeze_bdev(sb->s_bdev);
2240 if (ret)
2241 goto out;
2242 f2fs_stop_checkpoint(sbi, false);
2243 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2244 thaw_bdev(sb->s_bdev);
2245 break;
2246 case F2FS_GOING_DOWN_METASYNC:
2247 /* do checkpoint only */
2248 ret = f2fs_sync_fs(sb, 1);
2249 if (ret)
2250 goto out;
2251 f2fs_stop_checkpoint(sbi, false);
2252 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2253 break;
2254 case F2FS_GOING_DOWN_NOSYNC:
2255 f2fs_stop_checkpoint(sbi, false);
2256 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2257 break;
2258 case F2FS_GOING_DOWN_METAFLUSH:
2259 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2260 f2fs_stop_checkpoint(sbi, false);
2261 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2262 break;
2263 case F2FS_GOING_DOWN_NEED_FSCK:
2264 set_sbi_flag(sbi, SBI_NEED_FSCK);
2265 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2266 set_sbi_flag(sbi, SBI_IS_DIRTY);
2267 /* do checkpoint only */
2268 ret = f2fs_sync_fs(sb, 1);
2269 goto out;
2270 default:
2271 ret = -EINVAL;
2272 goto out;
2275 f2fs_stop_gc_thread(sbi);
2276 f2fs_stop_discard_thread(sbi);
2278 f2fs_drop_discard_cmd(sbi);
2279 clear_opt(sbi, DISCARD);
2281 f2fs_update_time(sbi, REQ_TIME);
2282 out:
2283 if (in != F2FS_GOING_DOWN_FULLSYNC)
2284 mnt_drop_write_file(filp);
2286 trace_f2fs_shutdown(sbi, in, ret);
2288 return ret;
2291 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2293 struct inode *inode = file_inode(filp);
2294 struct super_block *sb = inode->i_sb;
2295 struct request_queue *q = bdev_get_queue(sb->s_bdev);
2296 struct fstrim_range range;
2297 int ret;
2299 if (!capable(CAP_SYS_ADMIN))
2300 return -EPERM;
2302 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2303 return -EOPNOTSUPP;
2305 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2306 sizeof(range)))
2307 return -EFAULT;
2309 ret = mnt_want_write_file(filp);
2310 if (ret)
2311 return ret;
2313 range.minlen = max((unsigned int)range.minlen,
2314 q->limits.discard_granularity);
2315 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2316 mnt_drop_write_file(filp);
2317 if (ret < 0)
2318 return ret;
2320 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2321 sizeof(range)))
2322 return -EFAULT;
2323 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2324 return 0;
2327 static bool uuid_is_nonzero(__u8 u[16])
2329 int i;
2331 for (i = 0; i < 16; i++)
2332 if (u[i])
2333 return true;
2334 return false;
2337 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2339 struct inode *inode = file_inode(filp);
2341 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2342 return -EOPNOTSUPP;
2344 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2346 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2349 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2351 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2352 return -EOPNOTSUPP;
2353 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2356 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2358 struct inode *inode = file_inode(filp);
2359 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2360 int err;
2362 if (!f2fs_sb_has_encrypt(sbi))
2363 return -EOPNOTSUPP;
2365 err = mnt_want_write_file(filp);
2366 if (err)
2367 return err;
2369 down_write(&sbi->sb_lock);
2371 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2372 goto got_it;
2374 /* update superblock with uuid */
2375 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2377 err = f2fs_commit_super(sbi, false);
2378 if (err) {
2379 /* undo new data */
2380 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2381 goto out_err;
2383 got_it:
2384 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2385 16))
2386 err = -EFAULT;
2387 out_err:
2388 up_write(&sbi->sb_lock);
2389 mnt_drop_write_file(filp);
2390 return err;
2393 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2394 unsigned long arg)
2396 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2397 return -EOPNOTSUPP;
2399 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2402 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2404 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2405 return -EOPNOTSUPP;
2407 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2410 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2412 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2413 return -EOPNOTSUPP;
2415 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2418 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2419 unsigned long arg)
2421 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2422 return -EOPNOTSUPP;
2424 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2427 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2428 unsigned long arg)
2430 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2431 return -EOPNOTSUPP;
2433 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2436 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2438 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2439 return -EOPNOTSUPP;
2441 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2444 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2446 struct inode *inode = file_inode(filp);
2447 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2448 __u32 sync;
2449 int ret;
2451 if (!capable(CAP_SYS_ADMIN))
2452 return -EPERM;
2454 if (get_user(sync, (__u32 __user *)arg))
2455 return -EFAULT;
2457 if (f2fs_readonly(sbi->sb))
2458 return -EROFS;
2460 ret = mnt_want_write_file(filp);
2461 if (ret)
2462 return ret;
2464 if (!sync) {
2465 if (!down_write_trylock(&sbi->gc_lock)) {
2466 ret = -EBUSY;
2467 goto out;
2469 } else {
2470 down_write(&sbi->gc_lock);
2473 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
2474 out:
2475 mnt_drop_write_file(filp);
2476 return ret;
2479 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2481 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2482 u64 end;
2483 int ret;
2485 if (!capable(CAP_SYS_ADMIN))
2486 return -EPERM;
2487 if (f2fs_readonly(sbi->sb))
2488 return -EROFS;
2490 end = range->start + range->len;
2491 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2492 end >= MAX_BLKADDR(sbi))
2493 return -EINVAL;
2495 ret = mnt_want_write_file(filp);
2496 if (ret)
2497 return ret;
2499 do_more:
2500 if (!range->sync) {
2501 if (!down_write_trylock(&sbi->gc_lock)) {
2502 ret = -EBUSY;
2503 goto out;
2505 } else {
2506 down_write(&sbi->gc_lock);
2509 ret = f2fs_gc(sbi, range->sync, true, GET_SEGNO(sbi, range->start));
2510 if (ret) {
2511 if (ret == -EBUSY)
2512 ret = -EAGAIN;
2513 goto out;
2515 range->start += BLKS_PER_SEC(sbi);
2516 if (range->start <= end)
2517 goto do_more;
2518 out:
2519 mnt_drop_write_file(filp);
2520 return ret;
2523 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2525 struct f2fs_gc_range range;
2527 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2528 sizeof(range)))
2529 return -EFAULT;
2530 return __f2fs_ioc_gc_range(filp, &range);
2533 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2535 struct inode *inode = file_inode(filp);
2536 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2537 int ret;
2539 if (!capable(CAP_SYS_ADMIN))
2540 return -EPERM;
2542 if (f2fs_readonly(sbi->sb))
2543 return -EROFS;
2545 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2546 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2547 return -EINVAL;
2550 ret = mnt_want_write_file(filp);
2551 if (ret)
2552 return ret;
2554 ret = f2fs_sync_fs(sbi->sb, 1);
2556 mnt_drop_write_file(filp);
2557 return ret;
2560 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2561 struct file *filp,
2562 struct f2fs_defragment *range)
2564 struct inode *inode = file_inode(filp);
2565 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2566 .m_seg_type = NO_CHECK_TYPE ,
2567 .m_may_create = false };
2568 struct extent_info ei = {0, 0, 0};
2569 pgoff_t pg_start, pg_end, next_pgofs;
2570 unsigned int blk_per_seg = sbi->blocks_per_seg;
2571 unsigned int total = 0, sec_num;
2572 block_t blk_end = 0;
2573 bool fragmented = false;
2574 int err;
2576 /* if in-place-update policy is enabled, don't waste time here */
2577 if (f2fs_should_update_inplace(inode, NULL))
2578 return -EINVAL;
2580 pg_start = range->start >> PAGE_SHIFT;
2581 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2583 f2fs_balance_fs(sbi, true);
2585 inode_lock(inode);
2587 /* writeback all dirty pages in the range */
2588 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2589 range->start + range->len - 1);
2590 if (err)
2591 goto out;
2594 * lookup mapping info in extent cache, skip defragmenting if physical
2595 * block addresses are continuous.
2597 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2598 if (ei.fofs + ei.len >= pg_end)
2599 goto out;
2602 map.m_lblk = pg_start;
2603 map.m_next_pgofs = &next_pgofs;
2606 * lookup mapping info in dnode page cache, skip defragmenting if all
2607 * physical block addresses are continuous even if there are hole(s)
2608 * in logical blocks.
2610 while (map.m_lblk < pg_end) {
2611 map.m_len = pg_end - map.m_lblk;
2612 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2613 if (err)
2614 goto out;
2616 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2617 map.m_lblk = next_pgofs;
2618 continue;
2621 if (blk_end && blk_end != map.m_pblk)
2622 fragmented = true;
2624 /* record total count of block that we're going to move */
2625 total += map.m_len;
2627 blk_end = map.m_pblk + map.m_len;
2629 map.m_lblk += map.m_len;
2632 if (!fragmented) {
2633 total = 0;
2634 goto out;
2637 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2640 * make sure there are enough free section for LFS allocation, this can
2641 * avoid defragment running in SSR mode when free section are allocated
2642 * intensively
2644 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2645 err = -EAGAIN;
2646 goto out;
2649 map.m_lblk = pg_start;
2650 map.m_len = pg_end - pg_start;
2651 total = 0;
2653 while (map.m_lblk < pg_end) {
2654 pgoff_t idx;
2655 int cnt = 0;
2657 do_map:
2658 map.m_len = pg_end - map.m_lblk;
2659 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2660 if (err)
2661 goto clear_out;
2663 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2664 map.m_lblk = next_pgofs;
2665 goto check;
2668 set_inode_flag(inode, FI_DO_DEFRAG);
2670 idx = map.m_lblk;
2671 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2672 struct page *page;
2674 page = f2fs_get_lock_data_page(inode, idx, true);
2675 if (IS_ERR(page)) {
2676 err = PTR_ERR(page);
2677 goto clear_out;
2680 set_page_dirty(page);
2681 f2fs_put_page(page, 1);
2683 idx++;
2684 cnt++;
2685 total++;
2688 map.m_lblk = idx;
2689 check:
2690 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2691 goto do_map;
2693 clear_inode_flag(inode, FI_DO_DEFRAG);
2695 err = filemap_fdatawrite(inode->i_mapping);
2696 if (err)
2697 goto out;
2699 clear_out:
2700 clear_inode_flag(inode, FI_DO_DEFRAG);
2701 out:
2702 inode_unlock(inode);
2703 if (!err)
2704 range->len = (u64)total << PAGE_SHIFT;
2705 return err;
2708 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2710 struct inode *inode = file_inode(filp);
2711 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2712 struct f2fs_defragment range;
2713 int err;
2715 if (!capable(CAP_SYS_ADMIN))
2716 return -EPERM;
2718 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2719 return -EINVAL;
2721 if (f2fs_readonly(sbi->sb))
2722 return -EROFS;
2724 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2725 sizeof(range)))
2726 return -EFAULT;
2728 /* verify alignment of offset & size */
2729 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2730 return -EINVAL;
2732 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2733 sbi->max_file_blocks))
2734 return -EINVAL;
2736 err = mnt_want_write_file(filp);
2737 if (err)
2738 return err;
2740 err = f2fs_defragment_range(sbi, filp, &range);
2741 mnt_drop_write_file(filp);
2743 f2fs_update_time(sbi, REQ_TIME);
2744 if (err < 0)
2745 return err;
2747 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2748 sizeof(range)))
2749 return -EFAULT;
2751 return 0;
2754 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2755 struct file *file_out, loff_t pos_out, size_t len)
2757 struct inode *src = file_inode(file_in);
2758 struct inode *dst = file_inode(file_out);
2759 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2760 size_t olen = len, dst_max_i_size = 0;
2761 size_t dst_osize;
2762 int ret;
2764 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2765 src->i_sb != dst->i_sb)
2766 return -EXDEV;
2768 if (unlikely(f2fs_readonly(src->i_sb)))
2769 return -EROFS;
2771 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2772 return -EINVAL;
2774 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2775 return -EOPNOTSUPP;
2777 if (pos_out < 0 || pos_in < 0)
2778 return -EINVAL;
2780 if (src == dst) {
2781 if (pos_in == pos_out)
2782 return 0;
2783 if (pos_out > pos_in && pos_out < pos_in + len)
2784 return -EINVAL;
2787 inode_lock(src);
2788 if (src != dst) {
2789 ret = -EBUSY;
2790 if (!inode_trylock(dst))
2791 goto out;
2794 ret = -EINVAL;
2795 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2796 goto out_unlock;
2797 if (len == 0)
2798 olen = len = src->i_size - pos_in;
2799 if (pos_in + len == src->i_size)
2800 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2801 if (len == 0) {
2802 ret = 0;
2803 goto out_unlock;
2806 dst_osize = dst->i_size;
2807 if (pos_out + olen > dst->i_size)
2808 dst_max_i_size = pos_out + olen;
2810 /* verify the end result is block aligned */
2811 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2812 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2813 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2814 goto out_unlock;
2816 ret = f2fs_convert_inline_inode(src);
2817 if (ret)
2818 goto out_unlock;
2820 ret = f2fs_convert_inline_inode(dst);
2821 if (ret)
2822 goto out_unlock;
2824 /* write out all dirty pages from offset */
2825 ret = filemap_write_and_wait_range(src->i_mapping,
2826 pos_in, pos_in + len);
2827 if (ret)
2828 goto out_unlock;
2830 ret = filemap_write_and_wait_range(dst->i_mapping,
2831 pos_out, pos_out + len);
2832 if (ret)
2833 goto out_unlock;
2835 f2fs_balance_fs(sbi, true);
2837 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2838 if (src != dst) {
2839 ret = -EBUSY;
2840 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2841 goto out_src;
2844 f2fs_lock_op(sbi);
2845 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2846 pos_out >> F2FS_BLKSIZE_BITS,
2847 len >> F2FS_BLKSIZE_BITS, false);
2849 if (!ret) {
2850 if (dst_max_i_size)
2851 f2fs_i_size_write(dst, dst_max_i_size);
2852 else if (dst_osize != dst->i_size)
2853 f2fs_i_size_write(dst, dst_osize);
2855 f2fs_unlock_op(sbi);
2857 if (src != dst)
2858 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2859 out_src:
2860 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2861 out_unlock:
2862 if (src != dst)
2863 inode_unlock(dst);
2864 out:
2865 inode_unlock(src);
2866 return ret;
2869 static int __f2fs_ioc_move_range(struct file *filp,
2870 struct f2fs_move_range *range)
2872 struct fd dst;
2873 int err;
2875 if (!(filp->f_mode & FMODE_READ) ||
2876 !(filp->f_mode & FMODE_WRITE))
2877 return -EBADF;
2879 dst = fdget(range->dst_fd);
2880 if (!dst.file)
2881 return -EBADF;
2883 if (!(dst.file->f_mode & FMODE_WRITE)) {
2884 err = -EBADF;
2885 goto err_out;
2888 err = mnt_want_write_file(filp);
2889 if (err)
2890 goto err_out;
2892 err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2893 range->pos_out, range->len);
2895 mnt_drop_write_file(filp);
2896 err_out:
2897 fdput(dst);
2898 return err;
2901 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2903 struct f2fs_move_range range;
2905 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2906 sizeof(range)))
2907 return -EFAULT;
2908 return __f2fs_ioc_move_range(filp, &range);
2911 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2913 struct inode *inode = file_inode(filp);
2914 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2915 struct sit_info *sm = SIT_I(sbi);
2916 unsigned int start_segno = 0, end_segno = 0;
2917 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2918 struct f2fs_flush_device range;
2919 int ret;
2921 if (!capable(CAP_SYS_ADMIN))
2922 return -EPERM;
2924 if (f2fs_readonly(sbi->sb))
2925 return -EROFS;
2927 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2928 return -EINVAL;
2930 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2931 sizeof(range)))
2932 return -EFAULT;
2934 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2935 __is_large_section(sbi)) {
2936 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2937 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2938 return -EINVAL;
2941 ret = mnt_want_write_file(filp);
2942 if (ret)
2943 return ret;
2945 if (range.dev_num != 0)
2946 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2947 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2949 start_segno = sm->last_victim[FLUSH_DEVICE];
2950 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2951 start_segno = dev_start_segno;
2952 end_segno = min(start_segno + range.segments, dev_end_segno);
2954 while (start_segno < end_segno) {
2955 if (!down_write_trylock(&sbi->gc_lock)) {
2956 ret = -EBUSY;
2957 goto out;
2959 sm->last_victim[GC_CB] = end_segno + 1;
2960 sm->last_victim[GC_GREEDY] = end_segno + 1;
2961 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2962 ret = f2fs_gc(sbi, true, true, start_segno);
2963 if (ret == -EAGAIN)
2964 ret = 0;
2965 else if (ret < 0)
2966 break;
2967 start_segno++;
2969 out:
2970 mnt_drop_write_file(filp);
2971 return ret;
2974 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2976 struct inode *inode = file_inode(filp);
2977 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2979 /* Must validate to set it with SQLite behavior in Android. */
2980 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2982 return put_user(sb_feature, (u32 __user *)arg);
2985 #ifdef CONFIG_QUOTA
2986 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2988 struct dquot *transfer_to[MAXQUOTAS] = {};
2989 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2990 struct super_block *sb = sbi->sb;
2991 int err = 0;
2993 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2994 if (!IS_ERR(transfer_to[PRJQUOTA])) {
2995 err = __dquot_transfer(inode, transfer_to);
2996 if (err)
2997 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2998 dqput(transfer_to[PRJQUOTA]);
3000 return err;
3003 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3005 struct inode *inode = file_inode(filp);
3006 struct f2fs_inode_info *fi = F2FS_I(inode);
3007 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3008 struct page *ipage;
3009 kprojid_t kprojid;
3010 int err;
3012 if (!f2fs_sb_has_project_quota(sbi)) {
3013 if (projid != F2FS_DEF_PROJID)
3014 return -EOPNOTSUPP;
3015 else
3016 return 0;
3019 if (!f2fs_has_extra_attr(inode))
3020 return -EOPNOTSUPP;
3022 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3024 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3025 return 0;
3027 err = -EPERM;
3028 /* Is it quota file? Do not allow user to mess with it */
3029 if (IS_NOQUOTA(inode))
3030 return err;
3032 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3033 if (IS_ERR(ipage))
3034 return PTR_ERR(ipage);
3036 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3037 i_projid)) {
3038 err = -EOVERFLOW;
3039 f2fs_put_page(ipage, 1);
3040 return err;
3042 f2fs_put_page(ipage, 1);
3044 err = dquot_initialize(inode);
3045 if (err)
3046 return err;
3048 f2fs_lock_op(sbi);
3049 err = f2fs_transfer_project_quota(inode, kprojid);
3050 if (err)
3051 goto out_unlock;
3053 F2FS_I(inode)->i_projid = kprojid;
3054 inode->i_ctime = current_time(inode);
3055 f2fs_mark_inode_dirty_sync(inode, true);
3056 out_unlock:
3057 f2fs_unlock_op(sbi);
3058 return err;
3060 #else
3061 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3063 return 0;
3066 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3068 if (projid != F2FS_DEF_PROJID)
3069 return -EOPNOTSUPP;
3070 return 0;
3072 #endif
3074 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3077 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3078 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3079 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3082 static const struct {
3083 u32 iflag;
3084 u32 xflag;
3085 } f2fs_xflags_map[] = {
3086 { F2FS_SYNC_FL, FS_XFLAG_SYNC },
3087 { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE },
3088 { F2FS_APPEND_FL, FS_XFLAG_APPEND },
3089 { F2FS_NODUMP_FL, FS_XFLAG_NODUMP },
3090 { F2FS_NOATIME_FL, FS_XFLAG_NOATIME },
3091 { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT },
3094 #define F2FS_SUPPORTED_XFLAGS ( \
3095 FS_XFLAG_SYNC | \
3096 FS_XFLAG_IMMUTABLE | \
3097 FS_XFLAG_APPEND | \
3098 FS_XFLAG_NODUMP | \
3099 FS_XFLAG_NOATIME | \
3100 FS_XFLAG_PROJINHERIT)
3102 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
3103 static inline u32 f2fs_iflags_to_xflags(u32 iflags)
3105 u32 xflags = 0;
3106 int i;
3108 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3109 if (iflags & f2fs_xflags_map[i].iflag)
3110 xflags |= f2fs_xflags_map[i].xflag;
3112 return xflags;
3115 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
3116 static inline u32 f2fs_xflags_to_iflags(u32 xflags)
3118 u32 iflags = 0;
3119 int i;
3121 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3122 if (xflags & f2fs_xflags_map[i].xflag)
3123 iflags |= f2fs_xflags_map[i].iflag;
3125 return iflags;
3128 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
3130 struct f2fs_inode_info *fi = F2FS_I(inode);
3132 simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
3134 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3135 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3138 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
3140 struct inode *inode = file_inode(filp);
3141 struct fsxattr fa;
3143 f2fs_fill_fsxattr(inode, &fa);
3145 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
3146 return -EFAULT;
3147 return 0;
3150 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
3152 struct inode *inode = file_inode(filp);
3153 struct fsxattr fa, old_fa;
3154 u32 iflags;
3155 int err;
3157 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
3158 return -EFAULT;
3160 /* Make sure caller has proper permission */
3161 if (!inode_owner_or_capable(inode))
3162 return -EACCES;
3164 if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
3165 return -EOPNOTSUPP;
3167 iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
3168 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3169 return -EOPNOTSUPP;
3171 err = mnt_want_write_file(filp);
3172 if (err)
3173 return err;
3175 inode_lock(inode);
3177 f2fs_fill_fsxattr(inode, &old_fa);
3178 err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
3179 if (err)
3180 goto out;
3182 err = f2fs_setflags_common(inode, iflags,
3183 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
3184 if (err)
3185 goto out;
3187 err = f2fs_ioc_setproject(filp, fa.fsx_projid);
3188 out:
3189 inode_unlock(inode);
3190 mnt_drop_write_file(filp);
3191 return err;
3194 int f2fs_pin_file_control(struct inode *inode, bool inc)
3196 struct f2fs_inode_info *fi = F2FS_I(inode);
3197 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3199 /* Use i_gc_failures for normal file as a risk signal. */
3200 if (inc)
3201 f2fs_i_gc_failures_write(inode,
3202 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3204 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3205 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3206 __func__, inode->i_ino,
3207 fi->i_gc_failures[GC_FAILURE_PIN]);
3208 clear_inode_flag(inode, FI_PIN_FILE);
3209 return -EAGAIN;
3211 return 0;
3214 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3216 struct inode *inode = file_inode(filp);
3217 __u32 pin;
3218 int ret = 0;
3220 if (get_user(pin, (__u32 __user *)arg))
3221 return -EFAULT;
3223 if (!S_ISREG(inode->i_mode))
3224 return -EINVAL;
3226 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3227 return -EROFS;
3229 ret = mnt_want_write_file(filp);
3230 if (ret)
3231 return ret;
3233 inode_lock(inode);
3235 if (f2fs_should_update_outplace(inode, NULL)) {
3236 ret = -EINVAL;
3237 goto out;
3240 if (!pin) {
3241 clear_inode_flag(inode, FI_PIN_FILE);
3242 f2fs_i_gc_failures_write(inode, 0);
3243 goto done;
3246 if (f2fs_pin_file_control(inode, false)) {
3247 ret = -EAGAIN;
3248 goto out;
3251 ret = f2fs_convert_inline_inode(inode);
3252 if (ret)
3253 goto out;
3255 if (!f2fs_disable_compressed_file(inode)) {
3256 ret = -EOPNOTSUPP;
3257 goto out;
3260 set_inode_flag(inode, FI_PIN_FILE);
3261 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3262 done:
3263 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3264 out:
3265 inode_unlock(inode);
3266 mnt_drop_write_file(filp);
3267 return ret;
3270 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3272 struct inode *inode = file_inode(filp);
3273 __u32 pin = 0;
3275 if (is_inode_flag_set(inode, FI_PIN_FILE))
3276 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3277 return put_user(pin, (u32 __user *)arg);
3280 int f2fs_precache_extents(struct inode *inode)
3282 struct f2fs_inode_info *fi = F2FS_I(inode);
3283 struct f2fs_map_blocks map;
3284 pgoff_t m_next_extent;
3285 loff_t end;
3286 int err;
3288 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3289 return -EOPNOTSUPP;
3291 map.m_lblk = 0;
3292 map.m_next_pgofs = NULL;
3293 map.m_next_extent = &m_next_extent;
3294 map.m_seg_type = NO_CHECK_TYPE;
3295 map.m_may_create = false;
3296 end = F2FS_I_SB(inode)->max_file_blocks;
3298 while (map.m_lblk < end) {
3299 map.m_len = end - map.m_lblk;
3301 down_write(&fi->i_gc_rwsem[WRITE]);
3302 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3303 up_write(&fi->i_gc_rwsem[WRITE]);
3304 if (err)
3305 return err;
3307 map.m_lblk = m_next_extent;
3310 return err;
3313 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3315 return f2fs_precache_extents(file_inode(filp));
3318 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3320 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3321 __u64 block_count;
3323 if (!capable(CAP_SYS_ADMIN))
3324 return -EPERM;
3326 if (f2fs_readonly(sbi->sb))
3327 return -EROFS;
3329 if (copy_from_user(&block_count, (void __user *)arg,
3330 sizeof(block_count)))
3331 return -EFAULT;
3333 return f2fs_resize_fs(sbi, block_count);
3336 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3338 struct inode *inode = file_inode(filp);
3340 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3342 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3343 f2fs_warn(F2FS_I_SB(inode),
3344 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3345 inode->i_ino);
3346 return -EOPNOTSUPP;
3349 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3352 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3354 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3355 return -EOPNOTSUPP;
3357 return fsverity_ioctl_measure(filp, (void __user *)arg);
3360 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3362 struct inode *inode = file_inode(filp);
3363 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3364 char *vbuf;
3365 int count;
3366 int err = 0;
3368 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3369 if (!vbuf)
3370 return -ENOMEM;
3372 down_read(&sbi->sb_lock);
3373 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3374 ARRAY_SIZE(sbi->raw_super->volume_name),
3375 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3376 up_read(&sbi->sb_lock);
3378 if (copy_to_user((char __user *)arg, vbuf,
3379 min(FSLABEL_MAX, count)))
3380 err = -EFAULT;
3382 kfree(vbuf);
3383 return err;
3386 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3388 struct inode *inode = file_inode(filp);
3389 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3390 char *vbuf;
3391 int err = 0;
3393 if (!capable(CAP_SYS_ADMIN))
3394 return -EPERM;
3396 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3397 if (IS_ERR(vbuf))
3398 return PTR_ERR(vbuf);
3400 err = mnt_want_write_file(filp);
3401 if (err)
3402 goto out;
3404 down_write(&sbi->sb_lock);
3406 memset(sbi->raw_super->volume_name, 0,
3407 sizeof(sbi->raw_super->volume_name));
3408 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3409 sbi->raw_super->volume_name,
3410 ARRAY_SIZE(sbi->raw_super->volume_name));
3412 err = f2fs_commit_super(sbi, false);
3414 up_write(&sbi->sb_lock);
3416 mnt_drop_write_file(filp);
3417 out:
3418 kfree(vbuf);
3419 return err;
3422 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3424 struct inode *inode = file_inode(filp);
3425 __u64 blocks;
3427 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3428 return -EOPNOTSUPP;
3430 if (!f2fs_compressed_file(inode))
3431 return -EINVAL;
3433 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3434 return put_user(blocks, (u64 __user *)arg);
3437 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3439 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3440 unsigned int released_blocks = 0;
3441 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3442 block_t blkaddr;
3443 int i;
3445 for (i = 0; i < count; i++) {
3446 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3447 dn->ofs_in_node + i);
3449 if (!__is_valid_data_blkaddr(blkaddr))
3450 continue;
3451 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3452 DATA_GENERIC_ENHANCE)))
3453 return -EFSCORRUPTED;
3456 while (count) {
3457 int compr_blocks = 0;
3459 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3460 blkaddr = f2fs_data_blkaddr(dn);
3462 if (i == 0) {
3463 if (blkaddr == COMPRESS_ADDR)
3464 continue;
3465 dn->ofs_in_node += cluster_size;
3466 goto next;
3469 if (__is_valid_data_blkaddr(blkaddr))
3470 compr_blocks++;
3472 if (blkaddr != NEW_ADDR)
3473 continue;
3475 dn->data_blkaddr = NULL_ADDR;
3476 f2fs_set_data_blkaddr(dn);
3479 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3480 dec_valid_block_count(sbi, dn->inode,
3481 cluster_size - compr_blocks);
3483 released_blocks += cluster_size - compr_blocks;
3484 next:
3485 count -= cluster_size;
3488 return released_blocks;
3491 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3493 struct inode *inode = file_inode(filp);
3494 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3495 pgoff_t page_idx = 0, last_idx;
3496 unsigned int released_blocks = 0;
3497 int ret;
3498 int writecount;
3500 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3501 return -EOPNOTSUPP;
3503 if (!f2fs_compressed_file(inode))
3504 return -EINVAL;
3506 if (f2fs_readonly(sbi->sb))
3507 return -EROFS;
3509 ret = mnt_want_write_file(filp);
3510 if (ret)
3511 return ret;
3513 f2fs_balance_fs(F2FS_I_SB(inode), true);
3515 inode_lock(inode);
3517 writecount = atomic_read(&inode->i_writecount);
3518 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3519 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3520 ret = -EBUSY;
3521 goto out;
3524 if (IS_IMMUTABLE(inode)) {
3525 ret = -EINVAL;
3526 goto out;
3529 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3530 if (ret)
3531 goto out;
3533 F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
3534 f2fs_set_inode_flags(inode);
3535 inode->i_ctime = current_time(inode);
3536 f2fs_mark_inode_dirty_sync(inode, true);
3538 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3539 goto out;
3541 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3542 down_write(&F2FS_I(inode)->i_mmap_sem);
3544 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3546 while (page_idx < last_idx) {
3547 struct dnode_of_data dn;
3548 pgoff_t end_offset, count;
3550 set_new_dnode(&dn, inode, NULL, NULL, 0);
3551 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3552 if (ret) {
3553 if (ret == -ENOENT) {
3554 page_idx = f2fs_get_next_page_offset(&dn,
3555 page_idx);
3556 ret = 0;
3557 continue;
3559 break;
3562 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3563 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3564 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3566 ret = release_compress_blocks(&dn, count);
3568 f2fs_put_dnode(&dn);
3570 if (ret < 0)
3571 break;
3573 page_idx += count;
3574 released_blocks += ret;
3577 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3578 up_write(&F2FS_I(inode)->i_mmap_sem);
3579 out:
3580 inode_unlock(inode);
3582 mnt_drop_write_file(filp);
3584 if (ret >= 0) {
3585 ret = put_user(released_blocks, (u64 __user *)arg);
3586 } else if (released_blocks &&
3587 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3588 set_sbi_flag(sbi, SBI_NEED_FSCK);
3589 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3590 "iblocks=%llu, released=%u, compr_blocks=%u, "
3591 "run fsck to fix.",
3592 __func__, inode->i_ino, inode->i_blocks,
3593 released_blocks,
3594 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3597 return ret;
3600 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3602 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3603 unsigned int reserved_blocks = 0;
3604 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3605 block_t blkaddr;
3606 int i;
3608 for (i = 0; i < count; i++) {
3609 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3610 dn->ofs_in_node + i);
3612 if (!__is_valid_data_blkaddr(blkaddr))
3613 continue;
3614 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3615 DATA_GENERIC_ENHANCE)))
3616 return -EFSCORRUPTED;
3619 while (count) {
3620 int compr_blocks = 0;
3621 blkcnt_t reserved;
3622 int ret;
3624 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3625 blkaddr = f2fs_data_blkaddr(dn);
3627 if (i == 0) {
3628 if (blkaddr == COMPRESS_ADDR)
3629 continue;
3630 dn->ofs_in_node += cluster_size;
3631 goto next;
3634 if (__is_valid_data_blkaddr(blkaddr)) {
3635 compr_blocks++;
3636 continue;
3639 dn->data_blkaddr = NEW_ADDR;
3640 f2fs_set_data_blkaddr(dn);
3643 reserved = cluster_size - compr_blocks;
3644 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3645 if (ret)
3646 return ret;
3648 if (reserved != cluster_size - compr_blocks)
3649 return -ENOSPC;
3651 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3653 reserved_blocks += reserved;
3654 next:
3655 count -= cluster_size;
3658 return reserved_blocks;
3661 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3663 struct inode *inode = file_inode(filp);
3664 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3665 pgoff_t page_idx = 0, last_idx;
3666 unsigned int reserved_blocks = 0;
3667 int ret;
3669 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3670 return -EOPNOTSUPP;
3672 if (!f2fs_compressed_file(inode))
3673 return -EINVAL;
3675 if (f2fs_readonly(sbi->sb))
3676 return -EROFS;
3678 ret = mnt_want_write_file(filp);
3679 if (ret)
3680 return ret;
3682 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3683 goto out;
3685 f2fs_balance_fs(F2FS_I_SB(inode), true);
3687 inode_lock(inode);
3689 if (!IS_IMMUTABLE(inode)) {
3690 ret = -EINVAL;
3691 goto unlock_inode;
3694 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3695 down_write(&F2FS_I(inode)->i_mmap_sem);
3697 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3699 while (page_idx < last_idx) {
3700 struct dnode_of_data dn;
3701 pgoff_t end_offset, count;
3703 set_new_dnode(&dn, inode, NULL, NULL, 0);
3704 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3705 if (ret) {
3706 if (ret == -ENOENT) {
3707 page_idx = f2fs_get_next_page_offset(&dn,
3708 page_idx);
3709 ret = 0;
3710 continue;
3712 break;
3715 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3716 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3717 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3719 ret = reserve_compress_blocks(&dn, count);
3721 f2fs_put_dnode(&dn);
3723 if (ret < 0)
3724 break;
3726 page_idx += count;
3727 reserved_blocks += ret;
3730 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3731 up_write(&F2FS_I(inode)->i_mmap_sem);
3733 if (ret >= 0) {
3734 F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
3735 f2fs_set_inode_flags(inode);
3736 inode->i_ctime = current_time(inode);
3737 f2fs_mark_inode_dirty_sync(inode, true);
3739 unlock_inode:
3740 inode_unlock(inode);
3741 out:
3742 mnt_drop_write_file(filp);
3744 if (ret >= 0) {
3745 ret = put_user(reserved_blocks, (u64 __user *)arg);
3746 } else if (reserved_blocks &&
3747 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3748 set_sbi_flag(sbi, SBI_NEED_FSCK);
3749 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3750 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3751 "run fsck to fix.",
3752 __func__, inode->i_ino, inode->i_blocks,
3753 reserved_blocks,
3754 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3757 return ret;
3760 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3761 pgoff_t off, block_t block, block_t len, u32 flags)
3763 struct request_queue *q = bdev_get_queue(bdev);
3764 sector_t sector = SECTOR_FROM_BLOCK(block);
3765 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3766 int ret = 0;
3768 if (!q)
3769 return -ENXIO;
3771 if (flags & F2FS_TRIM_FILE_DISCARD)
3772 ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3773 blk_queue_secure_erase(q) ?
3774 BLKDEV_DISCARD_SECURE : 0);
3776 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3777 if (IS_ENCRYPTED(inode))
3778 ret = fscrypt_zeroout_range(inode, off, block, len);
3779 else
3780 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3781 GFP_NOFS, 0);
3784 return ret;
3787 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3789 struct inode *inode = file_inode(filp);
3790 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3791 struct address_space *mapping = inode->i_mapping;
3792 struct block_device *prev_bdev = NULL;
3793 struct f2fs_sectrim_range range;
3794 pgoff_t index, pg_end, prev_index = 0;
3795 block_t prev_block = 0, len = 0;
3796 loff_t end_addr;
3797 bool to_end = false;
3798 int ret = 0;
3800 if (!(filp->f_mode & FMODE_WRITE))
3801 return -EBADF;
3803 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3804 sizeof(range)))
3805 return -EFAULT;
3807 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3808 !S_ISREG(inode->i_mode))
3809 return -EINVAL;
3811 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3812 !f2fs_hw_support_discard(sbi)) ||
3813 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3814 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3815 return -EOPNOTSUPP;
3817 file_start_write(filp);
3818 inode_lock(inode);
3820 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3821 range.start >= inode->i_size) {
3822 ret = -EINVAL;
3823 goto err;
3826 if (range.len == 0)
3827 goto err;
3829 if (inode->i_size - range.start > range.len) {
3830 end_addr = range.start + range.len;
3831 } else {
3832 end_addr = range.len == (u64)-1 ?
3833 sbi->sb->s_maxbytes : inode->i_size;
3834 to_end = true;
3837 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3838 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3839 ret = -EINVAL;
3840 goto err;
3843 index = F2FS_BYTES_TO_BLK(range.start);
3844 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3846 ret = f2fs_convert_inline_inode(inode);
3847 if (ret)
3848 goto err;
3850 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3851 down_write(&F2FS_I(inode)->i_mmap_sem);
3853 ret = filemap_write_and_wait_range(mapping, range.start,
3854 to_end ? LLONG_MAX : end_addr - 1);
3855 if (ret)
3856 goto out;
3858 truncate_inode_pages_range(mapping, range.start,
3859 to_end ? -1 : end_addr - 1);
3861 while (index < pg_end) {
3862 struct dnode_of_data dn;
3863 pgoff_t end_offset, count;
3864 int i;
3866 set_new_dnode(&dn, inode, NULL, NULL, 0);
3867 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3868 if (ret) {
3869 if (ret == -ENOENT) {
3870 index = f2fs_get_next_page_offset(&dn, index);
3871 continue;
3873 goto out;
3876 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3877 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3878 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3879 struct block_device *cur_bdev;
3880 block_t blkaddr = f2fs_data_blkaddr(&dn);
3882 if (!__is_valid_data_blkaddr(blkaddr))
3883 continue;
3885 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3886 DATA_GENERIC_ENHANCE)) {
3887 ret = -EFSCORRUPTED;
3888 f2fs_put_dnode(&dn);
3889 goto out;
3892 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3893 if (f2fs_is_multi_device(sbi)) {
3894 int di = f2fs_target_device_index(sbi, blkaddr);
3896 blkaddr -= FDEV(di).start_blk;
3899 if (len) {
3900 if (prev_bdev == cur_bdev &&
3901 index == prev_index + len &&
3902 blkaddr == prev_block + len) {
3903 len++;
3904 } else {
3905 ret = f2fs_secure_erase(prev_bdev,
3906 inode, prev_index, prev_block,
3907 len, range.flags);
3908 if (ret) {
3909 f2fs_put_dnode(&dn);
3910 goto out;
3913 len = 0;
3917 if (!len) {
3918 prev_bdev = cur_bdev;
3919 prev_index = index;
3920 prev_block = blkaddr;
3921 len = 1;
3925 f2fs_put_dnode(&dn);
3927 if (fatal_signal_pending(current)) {
3928 ret = -EINTR;
3929 goto out;
3931 cond_resched();
3934 if (len)
3935 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3936 prev_block, len, range.flags);
3937 out:
3938 up_write(&F2FS_I(inode)->i_mmap_sem);
3939 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3940 err:
3941 inode_unlock(inode);
3942 file_end_write(filp);
3944 return ret;
3947 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
3949 struct inode *inode = file_inode(filp);
3950 struct f2fs_comp_option option;
3952 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3953 return -EOPNOTSUPP;
3955 inode_lock_shared(inode);
3957 if (!f2fs_compressed_file(inode)) {
3958 inode_unlock_shared(inode);
3959 return -ENODATA;
3962 option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3963 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3965 inode_unlock_shared(inode);
3967 if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3968 sizeof(option)))
3969 return -EFAULT;
3971 return 0;
3974 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
3976 struct inode *inode = file_inode(filp);
3977 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3978 struct f2fs_comp_option option;
3979 int ret = 0;
3981 if (!f2fs_sb_has_compression(sbi))
3982 return -EOPNOTSUPP;
3984 if (!(filp->f_mode & FMODE_WRITE))
3985 return -EBADF;
3987 if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
3988 sizeof(option)))
3989 return -EFAULT;
3991 if (!f2fs_compressed_file(inode) ||
3992 option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
3993 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
3994 option.algorithm >= COMPRESS_MAX)
3995 return -EINVAL;
3997 file_start_write(filp);
3998 inode_lock(inode);
4000 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
4001 ret = -EBUSY;
4002 goto out;
4005 if (inode->i_size != 0) {
4006 ret = -EFBIG;
4007 goto out;
4010 F2FS_I(inode)->i_compress_algorithm = option.algorithm;
4011 F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
4012 F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
4013 f2fs_mark_inode_dirty_sync(inode, true);
4015 if (!f2fs_is_compress_backend_ready(inode))
4016 f2fs_warn(sbi, "compression algorithm is successfully set, "
4017 "but current kernel doesn't support this algorithm.");
4018 out:
4019 inode_unlock(inode);
4020 file_end_write(filp);
4022 return ret;
4025 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
4027 DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, page_idx);
4028 struct address_space *mapping = inode->i_mapping;
4029 struct page *page;
4030 pgoff_t redirty_idx = page_idx;
4031 int i, page_len = 0, ret = 0;
4033 page_cache_ra_unbounded(&ractl, len, 0);
4035 for (i = 0; i < len; i++, page_idx++) {
4036 page = read_cache_page(mapping, page_idx, NULL, NULL);
4037 if (IS_ERR(page)) {
4038 ret = PTR_ERR(page);
4039 break;
4041 page_len++;
4044 for (i = 0; i < page_len; i++, redirty_idx++) {
4045 page = find_lock_page(mapping, redirty_idx);
4046 if (!page)
4047 ret = -ENOENT;
4048 set_page_dirty(page);
4049 f2fs_put_page(page, 1);
4050 f2fs_put_page(page, 0);
4053 return ret;
4056 static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
4058 struct inode *inode = file_inode(filp);
4059 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4060 struct f2fs_inode_info *fi = F2FS_I(inode);
4061 pgoff_t page_idx = 0, last_idx;
4062 unsigned int blk_per_seg = sbi->blocks_per_seg;
4063 int cluster_size = F2FS_I(inode)->i_cluster_size;
4064 int count, ret;
4066 if (!f2fs_sb_has_compression(sbi) ||
4067 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4068 return -EOPNOTSUPP;
4070 if (!(filp->f_mode & FMODE_WRITE))
4071 return -EBADF;
4073 if (!f2fs_compressed_file(inode))
4074 return -EINVAL;
4076 f2fs_balance_fs(F2FS_I_SB(inode), true);
4078 file_start_write(filp);
4079 inode_lock(inode);
4081 if (!f2fs_is_compress_backend_ready(inode)) {
4082 ret = -EOPNOTSUPP;
4083 goto out;
4086 if (f2fs_is_mmap_file(inode)) {
4087 ret = -EBUSY;
4088 goto out;
4091 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4092 if (ret)
4093 goto out;
4095 if (!atomic_read(&fi->i_compr_blocks))
4096 goto out;
4098 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4100 count = last_idx - page_idx;
4101 while (count) {
4102 int len = min(cluster_size, count);
4104 ret = redirty_blocks(inode, page_idx, len);
4105 if (ret < 0)
4106 break;
4108 if (get_dirty_pages(inode) >= blk_per_seg)
4109 filemap_fdatawrite(inode->i_mapping);
4111 count -= len;
4112 page_idx += len;
4115 if (!ret)
4116 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4117 LLONG_MAX);
4119 if (ret)
4120 f2fs_warn(sbi, "%s: The file might be partially decompressed "
4121 "(errno=%d). Please delete the file.\n",
4122 __func__, ret);
4123 out:
4124 inode_unlock(inode);
4125 file_end_write(filp);
4127 return ret;
4130 static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4132 struct inode *inode = file_inode(filp);
4133 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4134 pgoff_t page_idx = 0, last_idx;
4135 unsigned int blk_per_seg = sbi->blocks_per_seg;
4136 int cluster_size = F2FS_I(inode)->i_cluster_size;
4137 int count, ret;
4139 if (!f2fs_sb_has_compression(sbi) ||
4140 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4141 return -EOPNOTSUPP;
4143 if (!(filp->f_mode & FMODE_WRITE))
4144 return -EBADF;
4146 if (!f2fs_compressed_file(inode))
4147 return -EINVAL;
4149 f2fs_balance_fs(F2FS_I_SB(inode), true);
4151 file_start_write(filp);
4152 inode_lock(inode);
4154 if (!f2fs_is_compress_backend_ready(inode)) {
4155 ret = -EOPNOTSUPP;
4156 goto out;
4159 if (f2fs_is_mmap_file(inode)) {
4160 ret = -EBUSY;
4161 goto out;
4164 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4165 if (ret)
4166 goto out;
4168 set_inode_flag(inode, FI_ENABLE_COMPRESS);
4170 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4172 count = last_idx - page_idx;
4173 while (count) {
4174 int len = min(cluster_size, count);
4176 ret = redirty_blocks(inode, page_idx, len);
4177 if (ret < 0)
4178 break;
4180 if (get_dirty_pages(inode) >= blk_per_seg)
4181 filemap_fdatawrite(inode->i_mapping);
4183 count -= len;
4184 page_idx += len;
4187 if (!ret)
4188 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4189 LLONG_MAX);
4191 clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4193 if (ret)
4194 f2fs_warn(sbi, "%s: The file might be partially compressed "
4195 "(errno=%d). Please delete the file.\n",
4196 __func__, ret);
4197 out:
4198 inode_unlock(inode);
4199 file_end_write(filp);
4201 return ret;
4204 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4206 switch (cmd) {
4207 case FS_IOC_GETFLAGS:
4208 return f2fs_ioc_getflags(filp, arg);
4209 case FS_IOC_SETFLAGS:
4210 return f2fs_ioc_setflags(filp, arg);
4211 case FS_IOC_GETVERSION:
4212 return f2fs_ioc_getversion(filp, arg);
4213 case F2FS_IOC_START_ATOMIC_WRITE:
4214 return f2fs_ioc_start_atomic_write(filp);
4215 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4216 return f2fs_ioc_commit_atomic_write(filp);
4217 case F2FS_IOC_START_VOLATILE_WRITE:
4218 return f2fs_ioc_start_volatile_write(filp);
4219 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4220 return f2fs_ioc_release_volatile_write(filp);
4221 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4222 return f2fs_ioc_abort_volatile_write(filp);
4223 case F2FS_IOC_SHUTDOWN:
4224 return f2fs_ioc_shutdown(filp, arg);
4225 case FITRIM:
4226 return f2fs_ioc_fitrim(filp, arg);
4227 case FS_IOC_SET_ENCRYPTION_POLICY:
4228 return f2fs_ioc_set_encryption_policy(filp, arg);
4229 case FS_IOC_GET_ENCRYPTION_POLICY:
4230 return f2fs_ioc_get_encryption_policy(filp, arg);
4231 case FS_IOC_GET_ENCRYPTION_PWSALT:
4232 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4233 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4234 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4235 case FS_IOC_ADD_ENCRYPTION_KEY:
4236 return f2fs_ioc_add_encryption_key(filp, arg);
4237 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4238 return f2fs_ioc_remove_encryption_key(filp, arg);
4239 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4240 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4241 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4242 return f2fs_ioc_get_encryption_key_status(filp, arg);
4243 case FS_IOC_GET_ENCRYPTION_NONCE:
4244 return f2fs_ioc_get_encryption_nonce(filp, arg);
4245 case F2FS_IOC_GARBAGE_COLLECT:
4246 return f2fs_ioc_gc(filp, arg);
4247 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4248 return f2fs_ioc_gc_range(filp, arg);
4249 case F2FS_IOC_WRITE_CHECKPOINT:
4250 return f2fs_ioc_write_checkpoint(filp, arg);
4251 case F2FS_IOC_DEFRAGMENT:
4252 return f2fs_ioc_defragment(filp, arg);
4253 case F2FS_IOC_MOVE_RANGE:
4254 return f2fs_ioc_move_range(filp, arg);
4255 case F2FS_IOC_FLUSH_DEVICE:
4256 return f2fs_ioc_flush_device(filp, arg);
4257 case F2FS_IOC_GET_FEATURES:
4258 return f2fs_ioc_get_features(filp, arg);
4259 case FS_IOC_FSGETXATTR:
4260 return f2fs_ioc_fsgetxattr(filp, arg);
4261 case FS_IOC_FSSETXATTR:
4262 return f2fs_ioc_fssetxattr(filp, arg);
4263 case F2FS_IOC_GET_PIN_FILE:
4264 return f2fs_ioc_get_pin_file(filp, arg);
4265 case F2FS_IOC_SET_PIN_FILE:
4266 return f2fs_ioc_set_pin_file(filp, arg);
4267 case F2FS_IOC_PRECACHE_EXTENTS:
4268 return f2fs_ioc_precache_extents(filp, arg);
4269 case F2FS_IOC_RESIZE_FS:
4270 return f2fs_ioc_resize_fs(filp, arg);
4271 case FS_IOC_ENABLE_VERITY:
4272 return f2fs_ioc_enable_verity(filp, arg);
4273 case FS_IOC_MEASURE_VERITY:
4274 return f2fs_ioc_measure_verity(filp, arg);
4275 case FS_IOC_GETFSLABEL:
4276 return f2fs_ioc_getfslabel(filp, arg);
4277 case FS_IOC_SETFSLABEL:
4278 return f2fs_ioc_setfslabel(filp, arg);
4279 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4280 return f2fs_get_compress_blocks(filp, arg);
4281 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4282 return f2fs_release_compress_blocks(filp, arg);
4283 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4284 return f2fs_reserve_compress_blocks(filp, arg);
4285 case F2FS_IOC_SEC_TRIM_FILE:
4286 return f2fs_sec_trim_file(filp, arg);
4287 case F2FS_IOC_GET_COMPRESS_OPTION:
4288 return f2fs_ioc_get_compress_option(filp, arg);
4289 case F2FS_IOC_SET_COMPRESS_OPTION:
4290 return f2fs_ioc_set_compress_option(filp, arg);
4291 case F2FS_IOC_DECOMPRESS_FILE:
4292 return f2fs_ioc_decompress_file(filp, arg);
4293 case F2FS_IOC_COMPRESS_FILE:
4294 return f2fs_ioc_compress_file(filp, arg);
4295 default:
4296 return -ENOTTY;
4300 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4302 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4303 return -EIO;
4304 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4305 return -ENOSPC;
4307 return __f2fs_ioctl(filp, cmd, arg);
4310 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4312 struct file *file = iocb->ki_filp;
4313 struct inode *inode = file_inode(file);
4314 int ret;
4316 if (!f2fs_is_compress_backend_ready(inode))
4317 return -EOPNOTSUPP;
4319 ret = generic_file_read_iter(iocb, iter);
4321 if (ret > 0)
4322 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4324 return ret;
4327 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4329 struct file *file = iocb->ki_filp;
4330 struct inode *inode = file_inode(file);
4331 ssize_t ret;
4333 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4334 ret = -EIO;
4335 goto out;
4338 if (!f2fs_is_compress_backend_ready(inode)) {
4339 ret = -EOPNOTSUPP;
4340 goto out;
4343 if (iocb->ki_flags & IOCB_NOWAIT) {
4344 if (!inode_trylock(inode)) {
4345 ret = -EAGAIN;
4346 goto out;
4348 } else {
4349 inode_lock(inode);
4352 ret = generic_write_checks(iocb, from);
4353 if (ret > 0) {
4354 bool preallocated = false;
4355 size_t target_size = 0;
4356 int err;
4358 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4359 set_inode_flag(inode, FI_NO_PREALLOC);
4361 if ((iocb->ki_flags & IOCB_NOWAIT)) {
4362 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4363 iov_iter_count(from)) ||
4364 f2fs_has_inline_data(inode) ||
4365 f2fs_force_buffered_io(inode, iocb, from)) {
4366 clear_inode_flag(inode, FI_NO_PREALLOC);
4367 inode_unlock(inode);
4368 ret = -EAGAIN;
4369 goto out;
4371 goto write;
4374 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4375 goto write;
4377 if (iocb->ki_flags & IOCB_DIRECT) {
4379 * Convert inline data for Direct I/O before entering
4380 * f2fs_direct_IO().
4382 err = f2fs_convert_inline_inode(inode);
4383 if (err)
4384 goto out_err;
4386 * If force_buffere_io() is true, we have to allocate
4387 * blocks all the time, since f2fs_direct_IO will fall
4388 * back to buffered IO.
4390 if (!f2fs_force_buffered_io(inode, iocb, from) &&
4391 allow_outplace_dio(inode, iocb, from))
4392 goto write;
4394 preallocated = true;
4395 target_size = iocb->ki_pos + iov_iter_count(from);
4397 err = f2fs_preallocate_blocks(iocb, from);
4398 if (err) {
4399 out_err:
4400 clear_inode_flag(inode, FI_NO_PREALLOC);
4401 inode_unlock(inode);
4402 ret = err;
4403 goto out;
4405 write:
4406 ret = __generic_file_write_iter(iocb, from);
4407 clear_inode_flag(inode, FI_NO_PREALLOC);
4409 /* if we couldn't write data, we should deallocate blocks. */
4410 if (preallocated && i_size_read(inode) < target_size)
4411 f2fs_truncate(inode);
4413 if (ret > 0)
4414 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4416 inode_unlock(inode);
4417 out:
4418 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4419 iov_iter_count(from), ret);
4420 if (ret > 0)
4421 ret = generic_write_sync(iocb, ret);
4422 return ret;
4425 #ifdef CONFIG_COMPAT
4426 struct compat_f2fs_gc_range {
4427 u32 sync;
4428 compat_u64 start;
4429 compat_u64 len;
4431 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4432 struct compat_f2fs_gc_range)
4434 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4436 struct compat_f2fs_gc_range __user *urange;
4437 struct f2fs_gc_range range;
4438 int err;
4440 urange = compat_ptr(arg);
4441 err = get_user(range.sync, &urange->sync);
4442 err |= get_user(range.start, &urange->start);
4443 err |= get_user(range.len, &urange->len);
4444 if (err)
4445 return -EFAULT;
4447 return __f2fs_ioc_gc_range(file, &range);
4450 struct compat_f2fs_move_range {
4451 u32 dst_fd;
4452 compat_u64 pos_in;
4453 compat_u64 pos_out;
4454 compat_u64 len;
4456 #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4457 struct compat_f2fs_move_range)
4459 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4461 struct compat_f2fs_move_range __user *urange;
4462 struct f2fs_move_range range;
4463 int err;
4465 urange = compat_ptr(arg);
4466 err = get_user(range.dst_fd, &urange->dst_fd);
4467 err |= get_user(range.pos_in, &urange->pos_in);
4468 err |= get_user(range.pos_out, &urange->pos_out);
4469 err |= get_user(range.len, &urange->len);
4470 if (err)
4471 return -EFAULT;
4473 return __f2fs_ioc_move_range(file, &range);
4476 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4478 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4479 return -EIO;
4480 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4481 return -ENOSPC;
4483 switch (cmd) {
4484 case FS_IOC32_GETFLAGS:
4485 cmd = FS_IOC_GETFLAGS;
4486 break;
4487 case FS_IOC32_SETFLAGS:
4488 cmd = FS_IOC_SETFLAGS;
4489 break;
4490 case FS_IOC32_GETVERSION:
4491 cmd = FS_IOC_GETVERSION;
4492 break;
4493 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4494 return f2fs_compat_ioc_gc_range(file, arg);
4495 case F2FS_IOC32_MOVE_RANGE:
4496 return f2fs_compat_ioc_move_range(file, arg);
4497 case F2FS_IOC_START_ATOMIC_WRITE:
4498 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4499 case F2FS_IOC_START_VOLATILE_WRITE:
4500 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4501 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4502 case F2FS_IOC_SHUTDOWN:
4503 case FITRIM:
4504 case FS_IOC_SET_ENCRYPTION_POLICY:
4505 case FS_IOC_GET_ENCRYPTION_PWSALT:
4506 case FS_IOC_GET_ENCRYPTION_POLICY:
4507 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4508 case FS_IOC_ADD_ENCRYPTION_KEY:
4509 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4510 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4511 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4512 case FS_IOC_GET_ENCRYPTION_NONCE:
4513 case F2FS_IOC_GARBAGE_COLLECT:
4514 case F2FS_IOC_WRITE_CHECKPOINT:
4515 case F2FS_IOC_DEFRAGMENT:
4516 case F2FS_IOC_FLUSH_DEVICE:
4517 case F2FS_IOC_GET_FEATURES:
4518 case FS_IOC_FSGETXATTR:
4519 case FS_IOC_FSSETXATTR:
4520 case F2FS_IOC_GET_PIN_FILE:
4521 case F2FS_IOC_SET_PIN_FILE:
4522 case F2FS_IOC_PRECACHE_EXTENTS:
4523 case F2FS_IOC_RESIZE_FS:
4524 case FS_IOC_ENABLE_VERITY:
4525 case FS_IOC_MEASURE_VERITY:
4526 case FS_IOC_GETFSLABEL:
4527 case FS_IOC_SETFSLABEL:
4528 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4529 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4530 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4531 case F2FS_IOC_SEC_TRIM_FILE:
4532 case F2FS_IOC_GET_COMPRESS_OPTION:
4533 case F2FS_IOC_SET_COMPRESS_OPTION:
4534 case F2FS_IOC_DECOMPRESS_FILE:
4535 case F2FS_IOC_COMPRESS_FILE:
4536 break;
4537 default:
4538 return -ENOIOCTLCMD;
4540 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4542 #endif
4544 const struct file_operations f2fs_file_operations = {
4545 .llseek = f2fs_llseek,
4546 .read_iter = f2fs_file_read_iter,
4547 .write_iter = f2fs_file_write_iter,
4548 .open = f2fs_file_open,
4549 .release = f2fs_release_file,
4550 .mmap = f2fs_file_mmap,
4551 .flush = f2fs_file_flush,
4552 .fsync = f2fs_sync_file,
4553 .fallocate = f2fs_fallocate,
4554 .unlocked_ioctl = f2fs_ioctl,
4555 #ifdef CONFIG_COMPAT
4556 .compat_ioctl = f2fs_compat_ioctl,
4557 #endif
4558 .splice_read = generic_file_splice_read,
4559 .splice_write = iter_file_splice_write,