Merge tag 'kbuild-fixes-v5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/masahi...
[linux/fpc-iii.git] / fs / f2fs / inode.c
blobdb4fec30c30dfc4a2163246f3baa2007f49a2c2f
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/f2fs/inode.c
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/backing-dev.h>
12 #include <linux/writeback.h>
14 #include "f2fs.h"
15 #include "node.h"
16 #include "segment.h"
17 #include "xattr.h"
19 #include <trace/events/f2fs.h>
21 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
23 if (is_inode_flag_set(inode, FI_NEW_INODE))
24 return;
26 if (f2fs_inode_dirtied(inode, sync))
27 return;
29 mark_inode_dirty_sync(inode);
32 void f2fs_set_inode_flags(struct inode *inode)
34 unsigned int flags = F2FS_I(inode)->i_flags;
35 unsigned int new_fl = 0;
37 if (flags & F2FS_SYNC_FL)
38 new_fl |= S_SYNC;
39 if (flags & F2FS_APPEND_FL)
40 new_fl |= S_APPEND;
41 if (flags & F2FS_IMMUTABLE_FL)
42 new_fl |= S_IMMUTABLE;
43 if (flags & F2FS_NOATIME_FL)
44 new_fl |= S_NOATIME;
45 if (flags & F2FS_DIRSYNC_FL)
46 new_fl |= S_DIRSYNC;
47 if (file_is_encrypt(inode))
48 new_fl |= S_ENCRYPTED;
49 if (file_is_verity(inode))
50 new_fl |= S_VERITY;
51 if (flags & F2FS_CASEFOLD_FL)
52 new_fl |= S_CASEFOLD;
53 inode_set_flags(inode, new_fl,
54 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
55 S_ENCRYPTED|S_VERITY|S_CASEFOLD);
58 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
60 int extra_size = get_extra_isize(inode);
62 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
63 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
64 if (ri->i_addr[extra_size])
65 inode->i_rdev = old_decode_dev(
66 le32_to_cpu(ri->i_addr[extra_size]));
67 else
68 inode->i_rdev = new_decode_dev(
69 le32_to_cpu(ri->i_addr[extra_size + 1]));
73 static int __written_first_block(struct f2fs_sb_info *sbi,
74 struct f2fs_inode *ri)
76 block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
78 if (!__is_valid_data_blkaddr(addr))
79 return 1;
80 if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE))
81 return -EFSCORRUPTED;
82 return 0;
85 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
87 int extra_size = get_extra_isize(inode);
89 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
90 if (old_valid_dev(inode->i_rdev)) {
91 ri->i_addr[extra_size] =
92 cpu_to_le32(old_encode_dev(inode->i_rdev));
93 ri->i_addr[extra_size + 1] = 0;
94 } else {
95 ri->i_addr[extra_size] = 0;
96 ri->i_addr[extra_size + 1] =
97 cpu_to_le32(new_encode_dev(inode->i_rdev));
98 ri->i_addr[extra_size + 2] = 0;
103 static void __recover_inline_status(struct inode *inode, struct page *ipage)
105 void *inline_data = inline_data_addr(inode, ipage);
106 __le32 *start = inline_data;
107 __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
109 while (start < end) {
110 if (*start++) {
111 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
113 set_inode_flag(inode, FI_DATA_EXIST);
114 set_raw_inline(inode, F2FS_INODE(ipage));
115 set_page_dirty(ipage);
116 return;
119 return;
122 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
124 struct f2fs_inode *ri = &F2FS_NODE(page)->i;
126 if (!f2fs_sb_has_inode_chksum(sbi))
127 return false;
129 if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
130 return false;
132 if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
133 i_inode_checksum))
134 return false;
136 return true;
139 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
141 struct f2fs_node *node = F2FS_NODE(page);
142 struct f2fs_inode *ri = &node->i;
143 __le32 ino = node->footer.ino;
144 __le32 gen = ri->i_generation;
145 __u32 chksum, chksum_seed;
146 __u32 dummy_cs = 0;
147 unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
148 unsigned int cs_size = sizeof(dummy_cs);
150 chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
151 sizeof(ino));
152 chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
154 chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
155 chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
156 offset += cs_size;
157 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
158 F2FS_BLKSIZE - offset);
159 return chksum;
162 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
164 struct f2fs_inode *ri;
165 __u32 provided, calculated;
167 if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
168 return true;
170 #ifdef CONFIG_F2FS_CHECK_FS
171 if (!f2fs_enable_inode_chksum(sbi, page))
172 #else
173 if (!f2fs_enable_inode_chksum(sbi, page) ||
174 PageDirty(page) || PageWriteback(page))
175 #endif
176 return true;
178 ri = &F2FS_NODE(page)->i;
179 provided = le32_to_cpu(ri->i_inode_checksum);
180 calculated = f2fs_inode_chksum(sbi, page);
182 if (provided != calculated)
183 f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
184 page->index, ino_of_node(page), provided, calculated);
186 return provided == calculated;
189 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
191 struct f2fs_inode *ri = &F2FS_NODE(page)->i;
193 if (!f2fs_enable_inode_chksum(sbi, page))
194 return;
196 ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
199 static bool sanity_check_inode(struct inode *inode, struct page *node_page)
201 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
202 struct f2fs_inode_info *fi = F2FS_I(inode);
203 unsigned long long iblocks;
205 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
206 if (!iblocks) {
207 set_sbi_flag(sbi, SBI_NEED_FSCK);
208 f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
209 __func__, inode->i_ino, iblocks);
210 return false;
213 if (ino_of_node(node_page) != nid_of_node(node_page)) {
214 set_sbi_flag(sbi, SBI_NEED_FSCK);
215 f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
216 __func__, inode->i_ino,
217 ino_of_node(node_page), nid_of_node(node_page));
218 return false;
221 if (f2fs_sb_has_flexible_inline_xattr(sbi)
222 && !f2fs_has_extra_attr(inode)) {
223 set_sbi_flag(sbi, SBI_NEED_FSCK);
224 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
225 __func__, inode->i_ino);
226 return false;
229 if (f2fs_has_extra_attr(inode) &&
230 !f2fs_sb_has_extra_attr(sbi)) {
231 set_sbi_flag(sbi, SBI_NEED_FSCK);
232 f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
233 __func__, inode->i_ino);
234 return false;
237 if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
238 fi->i_extra_isize % sizeof(__le32)) {
239 set_sbi_flag(sbi, SBI_NEED_FSCK);
240 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
241 __func__, inode->i_ino, fi->i_extra_isize,
242 F2FS_TOTAL_EXTRA_ATTR_SIZE);
243 return false;
246 if (f2fs_has_extra_attr(inode) &&
247 f2fs_sb_has_flexible_inline_xattr(sbi) &&
248 f2fs_has_inline_xattr(inode) &&
249 (!fi->i_inline_xattr_size ||
250 fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
251 set_sbi_flag(sbi, SBI_NEED_FSCK);
252 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
253 __func__, inode->i_ino, fi->i_inline_xattr_size,
254 MAX_INLINE_XATTR_SIZE);
255 return false;
258 if (F2FS_I(inode)->extent_tree) {
259 struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
261 if (ei->len &&
262 (!f2fs_is_valid_blkaddr(sbi, ei->blk,
263 DATA_GENERIC_ENHANCE) ||
264 !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
265 DATA_GENERIC_ENHANCE))) {
266 set_sbi_flag(sbi, SBI_NEED_FSCK);
267 f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
268 __func__, inode->i_ino,
269 ei->blk, ei->fofs, ei->len);
270 return false;
274 if (f2fs_has_inline_data(inode) &&
275 (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
276 set_sbi_flag(sbi, SBI_NEED_FSCK);
277 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
278 __func__, inode->i_ino, inode->i_mode);
279 return false;
282 if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
283 set_sbi_flag(sbi, SBI_NEED_FSCK);
284 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
285 __func__, inode->i_ino, inode->i_mode);
286 return false;
289 return true;
292 static int do_read_inode(struct inode *inode)
294 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
295 struct f2fs_inode_info *fi = F2FS_I(inode);
296 struct page *node_page;
297 struct f2fs_inode *ri;
298 projid_t i_projid;
299 int err;
301 /* Check if ino is within scope */
302 if (f2fs_check_nid_range(sbi, inode->i_ino))
303 return -EINVAL;
305 node_page = f2fs_get_node_page(sbi, inode->i_ino);
306 if (IS_ERR(node_page))
307 return PTR_ERR(node_page);
309 ri = F2FS_INODE(node_page);
311 inode->i_mode = le16_to_cpu(ri->i_mode);
312 i_uid_write(inode, le32_to_cpu(ri->i_uid));
313 i_gid_write(inode, le32_to_cpu(ri->i_gid));
314 set_nlink(inode, le32_to_cpu(ri->i_links));
315 inode->i_size = le64_to_cpu(ri->i_size);
316 inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
318 inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
319 inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
320 inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
321 inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
322 inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
323 inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
324 inode->i_generation = le32_to_cpu(ri->i_generation);
325 if (S_ISDIR(inode->i_mode))
326 fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
327 else if (S_ISREG(inode->i_mode))
328 fi->i_gc_failures[GC_FAILURE_PIN] =
329 le16_to_cpu(ri->i_gc_failures);
330 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
331 fi->i_flags = le32_to_cpu(ri->i_flags);
332 if (S_ISREG(inode->i_mode))
333 fi->i_flags &= ~F2FS_PROJINHERIT_FL;
334 fi->flags = 0;
335 fi->i_advise = ri->i_advise;
336 fi->i_pino = le32_to_cpu(ri->i_pino);
337 fi->i_dir_level = ri->i_dir_level;
339 if (f2fs_init_extent_tree(inode, &ri->i_ext))
340 set_page_dirty(node_page);
342 get_inline_info(inode, ri);
344 fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
345 le16_to_cpu(ri->i_extra_isize) : 0;
347 if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
348 fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
349 } else if (f2fs_has_inline_xattr(inode) ||
350 f2fs_has_inline_dentry(inode)) {
351 fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
352 } else {
355 * Previous inline data or directory always reserved 200 bytes
356 * in inode layout, even if inline_xattr is disabled. In order
357 * to keep inline_dentry's structure for backward compatibility,
358 * we get the space back only from inline_data.
360 fi->i_inline_xattr_size = 0;
363 if (!sanity_check_inode(inode, node_page)) {
364 f2fs_put_page(node_page, 1);
365 return -EFSCORRUPTED;
368 /* check data exist */
369 if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
370 __recover_inline_status(inode, node_page);
372 /* try to recover cold bit for non-dir inode */
373 if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
374 set_cold_node(node_page, false);
375 set_page_dirty(node_page);
378 /* get rdev by using inline_info */
379 __get_inode_rdev(inode, ri);
381 if (S_ISREG(inode->i_mode)) {
382 err = __written_first_block(sbi, ri);
383 if (err < 0) {
384 f2fs_put_page(node_page, 1);
385 return err;
387 if (!err)
388 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
391 if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
392 fi->last_disk_size = inode->i_size;
394 if (fi->i_flags & F2FS_PROJINHERIT_FL)
395 set_inode_flag(inode, FI_PROJ_INHERIT);
397 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
398 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
399 i_projid = (projid_t)le32_to_cpu(ri->i_projid);
400 else
401 i_projid = F2FS_DEF_PROJID;
402 fi->i_projid = make_kprojid(&init_user_ns, i_projid);
404 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
405 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
406 fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
407 fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
410 F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
411 F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
412 F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
413 F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
414 f2fs_put_page(node_page, 1);
416 stat_inc_inline_xattr(inode);
417 stat_inc_inline_inode(inode);
418 stat_inc_inline_dir(inode);
420 return 0;
423 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
425 struct f2fs_sb_info *sbi = F2FS_SB(sb);
426 struct inode *inode;
427 int ret = 0;
429 inode = iget_locked(sb, ino);
430 if (!inode)
431 return ERR_PTR(-ENOMEM);
433 if (!(inode->i_state & I_NEW)) {
434 trace_f2fs_iget(inode);
435 return inode;
437 if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
438 goto make_now;
440 ret = do_read_inode(inode);
441 if (ret)
442 goto bad_inode;
443 make_now:
444 if (ino == F2FS_NODE_INO(sbi)) {
445 inode->i_mapping->a_ops = &f2fs_node_aops;
446 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
447 } else if (ino == F2FS_META_INO(sbi)) {
448 inode->i_mapping->a_ops = &f2fs_meta_aops;
449 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
450 } else if (S_ISREG(inode->i_mode)) {
451 inode->i_op = &f2fs_file_inode_operations;
452 inode->i_fop = &f2fs_file_operations;
453 inode->i_mapping->a_ops = &f2fs_dblock_aops;
454 } else if (S_ISDIR(inode->i_mode)) {
455 inode->i_op = &f2fs_dir_inode_operations;
456 inode->i_fop = &f2fs_dir_operations;
457 inode->i_mapping->a_ops = &f2fs_dblock_aops;
458 inode_nohighmem(inode);
459 } else if (S_ISLNK(inode->i_mode)) {
460 if (file_is_encrypt(inode))
461 inode->i_op = &f2fs_encrypted_symlink_inode_operations;
462 else
463 inode->i_op = &f2fs_symlink_inode_operations;
464 inode_nohighmem(inode);
465 inode->i_mapping->a_ops = &f2fs_dblock_aops;
466 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
467 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
468 inode->i_op = &f2fs_special_inode_operations;
469 init_special_inode(inode, inode->i_mode, inode->i_rdev);
470 } else {
471 ret = -EIO;
472 goto bad_inode;
474 f2fs_set_inode_flags(inode);
475 unlock_new_inode(inode);
476 trace_f2fs_iget(inode);
477 return inode;
479 bad_inode:
480 f2fs_inode_synced(inode);
481 iget_failed(inode);
482 trace_f2fs_iget_exit(inode, ret);
483 return ERR_PTR(ret);
486 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
488 struct inode *inode;
489 retry:
490 inode = f2fs_iget(sb, ino);
491 if (IS_ERR(inode)) {
492 if (PTR_ERR(inode) == -ENOMEM) {
493 congestion_wait(BLK_RW_ASYNC, HZ/50);
494 goto retry;
497 return inode;
500 void f2fs_update_inode(struct inode *inode, struct page *node_page)
502 struct f2fs_inode *ri;
503 struct extent_tree *et = F2FS_I(inode)->extent_tree;
505 f2fs_wait_on_page_writeback(node_page, NODE, true, true);
506 set_page_dirty(node_page);
508 f2fs_inode_synced(inode);
510 ri = F2FS_INODE(node_page);
512 ri->i_mode = cpu_to_le16(inode->i_mode);
513 ri->i_advise = F2FS_I(inode)->i_advise;
514 ri->i_uid = cpu_to_le32(i_uid_read(inode));
515 ri->i_gid = cpu_to_le32(i_gid_read(inode));
516 ri->i_links = cpu_to_le32(inode->i_nlink);
517 ri->i_size = cpu_to_le64(i_size_read(inode));
518 ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
520 if (et) {
521 read_lock(&et->lock);
522 set_raw_extent(&et->largest, &ri->i_ext);
523 read_unlock(&et->lock);
524 } else {
525 memset(&ri->i_ext, 0, sizeof(ri->i_ext));
527 set_raw_inline(inode, ri);
529 ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
530 ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
531 ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
532 ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
533 ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
534 ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
535 if (S_ISDIR(inode->i_mode))
536 ri->i_current_depth =
537 cpu_to_le32(F2FS_I(inode)->i_current_depth);
538 else if (S_ISREG(inode->i_mode))
539 ri->i_gc_failures =
540 cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
541 ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
542 ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
543 ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
544 ri->i_generation = cpu_to_le32(inode->i_generation);
545 ri->i_dir_level = F2FS_I(inode)->i_dir_level;
547 if (f2fs_has_extra_attr(inode)) {
548 ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
550 if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
551 ri->i_inline_xattr_size =
552 cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
554 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
555 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
556 i_projid)) {
557 projid_t i_projid;
559 i_projid = from_kprojid(&init_user_ns,
560 F2FS_I(inode)->i_projid);
561 ri->i_projid = cpu_to_le32(i_projid);
564 if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
565 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
566 i_crtime)) {
567 ri->i_crtime =
568 cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
569 ri->i_crtime_nsec =
570 cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
574 __set_inode_rdev(inode, ri);
576 /* deleted inode */
577 if (inode->i_nlink == 0)
578 clear_inline_node(node_page);
580 F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
581 F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
582 F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
583 F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
585 #ifdef CONFIG_F2FS_CHECK_FS
586 f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
587 #endif
590 void f2fs_update_inode_page(struct inode *inode)
592 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
593 struct page *node_page;
594 retry:
595 node_page = f2fs_get_node_page(sbi, inode->i_ino);
596 if (IS_ERR(node_page)) {
597 int err = PTR_ERR(node_page);
598 if (err == -ENOMEM) {
599 cond_resched();
600 goto retry;
601 } else if (err != -ENOENT) {
602 f2fs_stop_checkpoint(sbi, false);
604 return;
606 f2fs_update_inode(inode, node_page);
607 f2fs_put_page(node_page, 1);
610 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
612 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
614 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
615 inode->i_ino == F2FS_META_INO(sbi))
616 return 0;
618 if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
619 return 0;
621 if (!f2fs_is_checkpoint_ready(sbi))
622 return -ENOSPC;
625 * We need to balance fs here to prevent from producing dirty node pages
626 * during the urgent cleaning time when runing out of free sections.
628 f2fs_update_inode_page(inode);
629 if (wbc && wbc->nr_to_write)
630 f2fs_balance_fs(sbi, true);
631 return 0;
635 * Called at the last iput() if i_nlink is zero
637 void f2fs_evict_inode(struct inode *inode)
639 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
640 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
641 int err = 0;
643 /* some remained atomic pages should discarded */
644 if (f2fs_is_atomic_file(inode))
645 f2fs_drop_inmem_pages(inode);
647 trace_f2fs_evict_inode(inode);
648 truncate_inode_pages_final(&inode->i_data);
650 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
651 inode->i_ino == F2FS_META_INO(sbi))
652 goto out_clear;
654 f2fs_bug_on(sbi, get_dirty_pages(inode));
655 f2fs_remove_dirty_inode(inode);
657 f2fs_destroy_extent_tree(inode);
659 if (inode->i_nlink || is_bad_inode(inode))
660 goto no_delete;
662 err = dquot_initialize(inode);
663 if (err) {
664 err = 0;
665 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
668 f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
669 f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
670 f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
672 sb_start_intwrite(inode->i_sb);
673 set_inode_flag(inode, FI_NO_ALLOC);
674 i_size_write(inode, 0);
675 retry:
676 if (F2FS_HAS_BLOCKS(inode))
677 err = f2fs_truncate(inode);
679 if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
680 f2fs_show_injection_info(FAULT_EVICT_INODE);
681 err = -EIO;
684 if (!err) {
685 f2fs_lock_op(sbi);
686 err = f2fs_remove_inode_page(inode);
687 f2fs_unlock_op(sbi);
688 if (err == -ENOENT)
689 err = 0;
692 /* give more chances, if ENOMEM case */
693 if (err == -ENOMEM) {
694 err = 0;
695 goto retry;
698 if (err) {
699 f2fs_update_inode_page(inode);
700 if (dquot_initialize_needed(inode))
701 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
703 sb_end_intwrite(inode->i_sb);
704 no_delete:
705 dquot_drop(inode);
707 stat_dec_inline_xattr(inode);
708 stat_dec_inline_dir(inode);
709 stat_dec_inline_inode(inode);
711 if (likely(!f2fs_cp_error(sbi) &&
712 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
713 f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
714 else
715 f2fs_inode_synced(inode);
717 /* ino == 0, if f2fs_new_inode() was failed t*/
718 if (inode->i_ino)
719 invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
720 inode->i_ino);
721 if (xnid)
722 invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
723 if (inode->i_nlink) {
724 if (is_inode_flag_set(inode, FI_APPEND_WRITE))
725 f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
726 if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
727 f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
729 if (is_inode_flag_set(inode, FI_FREE_NID)) {
730 f2fs_alloc_nid_failed(sbi, inode->i_ino);
731 clear_inode_flag(inode, FI_FREE_NID);
732 } else {
734 * If xattr nid is corrupted, we can reach out error condition,
735 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
736 * In that case, f2fs_check_nid_range() is enough to give a clue.
739 out_clear:
740 fscrypt_put_encryption_info(inode);
741 fsverity_cleanup_inode(inode);
742 clear_inode(inode);
745 /* caller should call f2fs_lock_op() */
746 void f2fs_handle_failed_inode(struct inode *inode)
748 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
749 struct node_info ni;
750 int err;
753 * clear nlink of inode in order to release resource of inode
754 * immediately.
756 clear_nlink(inode);
759 * we must call this to avoid inode being remained as dirty, resulting
760 * in a panic when flushing dirty inodes in gdirty_list.
762 f2fs_update_inode_page(inode);
763 f2fs_inode_synced(inode);
765 /* don't make bad inode, since it becomes a regular file. */
766 unlock_new_inode(inode);
769 * Note: we should add inode to orphan list before f2fs_unlock_op()
770 * so we can prevent losing this orphan when encoutering checkpoint
771 * and following suddenly power-off.
773 err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
774 if (err) {
775 set_sbi_flag(sbi, SBI_NEED_FSCK);
776 f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
777 goto out;
780 if (ni.blk_addr != NULL_ADDR) {
781 err = f2fs_acquire_orphan_inode(sbi);
782 if (err) {
783 set_sbi_flag(sbi, SBI_NEED_FSCK);
784 f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix.");
785 } else {
786 f2fs_add_orphan_inode(inode);
788 f2fs_alloc_nid_done(sbi, inode->i_ino);
789 } else {
790 set_inode_flag(inode, FI_FREE_NID);
793 out:
794 f2fs_unlock_op(sbi);
796 /* iput will drop the inode object */
797 iput(inode);