arm64: kgdb: Fix single-step exception handling oops
[linux/fpc-iii.git] / fs / f2fs / inode.c
blobc56d04ec45dcfe369a54fd150d8b9b66d529daa4
1 /*
2 * fs/f2fs/inode.c
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/backing-dev.h>
15 #include <linux/writeback.h>
17 #include "f2fs.h"
18 #include "node.h"
20 #include <trace/events/f2fs.h>
22 void f2fs_mark_inode_dirty_sync(struct inode *inode)
24 if (f2fs_inode_dirtied(inode))
25 return;
26 mark_inode_dirty_sync(inode);
29 void f2fs_set_inode_flags(struct inode *inode)
31 unsigned int flags = F2FS_I(inode)->i_flags;
32 unsigned int new_fl = 0;
34 if (flags & FS_SYNC_FL)
35 new_fl |= S_SYNC;
36 if (flags & FS_APPEND_FL)
37 new_fl |= S_APPEND;
38 if (flags & FS_IMMUTABLE_FL)
39 new_fl |= S_IMMUTABLE;
40 if (flags & FS_NOATIME_FL)
41 new_fl |= S_NOATIME;
42 if (flags & FS_DIRSYNC_FL)
43 new_fl |= S_DIRSYNC;
44 inode_set_flags(inode, new_fl,
45 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
46 f2fs_mark_inode_dirty_sync(inode);
49 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
51 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
52 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
53 if (ri->i_addr[0])
54 inode->i_rdev =
55 old_decode_dev(le32_to_cpu(ri->i_addr[0]));
56 else
57 inode->i_rdev =
58 new_decode_dev(le32_to_cpu(ri->i_addr[1]));
62 static int __written_first_block(struct f2fs_sb_info *sbi,
63 struct f2fs_inode *ri)
65 block_t addr = le32_to_cpu(ri->i_addr[0]);
67 if (!__is_valid_data_blkaddr(addr))
68 return 1;
69 if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
70 return -EFAULT;
71 return 0;
74 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
76 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
77 if (old_valid_dev(inode->i_rdev)) {
78 ri->i_addr[0] =
79 cpu_to_le32(old_encode_dev(inode->i_rdev));
80 ri->i_addr[1] = 0;
81 } else {
82 ri->i_addr[0] = 0;
83 ri->i_addr[1] =
84 cpu_to_le32(new_encode_dev(inode->i_rdev));
85 ri->i_addr[2] = 0;
90 static void __recover_inline_status(struct inode *inode, struct page *ipage)
92 void *inline_data = inline_data_addr(ipage);
93 __le32 *start = inline_data;
94 __le32 *end = start + MAX_INLINE_DATA / sizeof(__le32);
96 while (start < end) {
97 if (*start++) {
98 f2fs_wait_on_page_writeback(ipage, NODE, true);
100 set_inode_flag(inode, FI_DATA_EXIST);
101 set_raw_inline(inode, F2FS_INODE(ipage));
102 set_page_dirty(ipage);
103 return;
106 return;
109 static bool sanity_check_inode(struct inode *inode, struct page *node_page)
111 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
112 unsigned long long iblocks;
114 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
115 if (!iblocks) {
116 set_sbi_flag(sbi, SBI_NEED_FSCK);
117 f2fs_msg(sbi->sb, KERN_WARNING,
118 "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, "
119 "run fsck to fix.",
120 __func__, inode->i_ino, iblocks);
121 return false;
124 if (ino_of_node(node_page) != nid_of_node(node_page)) {
125 set_sbi_flag(sbi, SBI_NEED_FSCK);
126 f2fs_msg(sbi->sb, KERN_WARNING,
127 "%s: corrupted inode footer i_ino=%lx, ino,nid: "
128 "[%u, %u] run fsck to fix.",
129 __func__, inode->i_ino,
130 ino_of_node(node_page), nid_of_node(node_page));
131 return false;
134 if (F2FS_I(inode)->extent_tree) {
135 struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
137 if (ei->len &&
138 (!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC) ||
139 !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
140 DATA_GENERIC))) {
141 set_sbi_flag(sbi, SBI_NEED_FSCK);
142 f2fs_msg(sbi->sb, KERN_WARNING,
143 "%s: inode (ino=%lx) extent info [%u, %u, %u] "
144 "is incorrect, run fsck to fix",
145 __func__, inode->i_ino,
146 ei->blk, ei->fofs, ei->len);
147 return false;
150 return true;
153 static int do_read_inode(struct inode *inode)
155 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
156 struct f2fs_inode_info *fi = F2FS_I(inode);
157 struct page *node_page;
158 struct f2fs_inode *ri;
159 int err;
161 /* Check if ino is within scope */
162 if (check_nid_range(sbi, inode->i_ino)) {
163 f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu",
164 (unsigned long) inode->i_ino);
165 WARN_ON(1);
166 return -EINVAL;
169 node_page = get_node_page(sbi, inode->i_ino);
170 if (IS_ERR(node_page))
171 return PTR_ERR(node_page);
173 ri = F2FS_INODE(node_page);
175 inode->i_mode = le16_to_cpu(ri->i_mode);
176 i_uid_write(inode, le32_to_cpu(ri->i_uid));
177 i_gid_write(inode, le32_to_cpu(ri->i_gid));
178 set_nlink(inode, le32_to_cpu(ri->i_links));
179 inode->i_size = le64_to_cpu(ri->i_size);
180 inode->i_blocks = le64_to_cpu(ri->i_blocks);
182 inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
183 inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
184 inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
185 inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
186 inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
187 inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
188 inode->i_generation = le32_to_cpu(ri->i_generation);
190 fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
191 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
192 fi->i_flags = le32_to_cpu(ri->i_flags);
193 fi->flags = 0;
194 fi->i_advise = ri->i_advise;
195 fi->i_pino = le32_to_cpu(ri->i_pino);
196 fi->i_dir_level = ri->i_dir_level;
198 if (f2fs_init_extent_tree(inode, &ri->i_ext))
199 set_page_dirty(node_page);
201 get_inline_info(inode, ri);
203 if (!sanity_check_inode(inode, node_page)) {
204 f2fs_put_page(node_page, 1);
205 return -EINVAL;
208 /* check data exist */
209 if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
210 __recover_inline_status(inode, node_page);
212 /* get rdev by using inline_info */
213 __get_inode_rdev(inode, ri);
215 err = __written_first_block(sbi, ri);
216 if (err < 0) {
217 f2fs_put_page(node_page, 1);
218 return err;
220 if (!err)
221 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
223 if (!need_inode_block_update(sbi, inode->i_ino))
224 fi->last_disk_size = inode->i_size;
226 f2fs_put_page(node_page, 1);
228 stat_inc_inline_xattr(inode);
229 stat_inc_inline_inode(inode);
230 stat_inc_inline_dir(inode);
232 return 0;
235 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
237 struct f2fs_sb_info *sbi = F2FS_SB(sb);
238 struct inode *inode;
239 int ret = 0;
241 inode = iget_locked(sb, ino);
242 if (!inode)
243 return ERR_PTR(-ENOMEM);
245 if (!(inode->i_state & I_NEW)) {
246 trace_f2fs_iget(inode);
247 return inode;
249 if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
250 goto make_now;
252 ret = do_read_inode(inode);
253 if (ret)
254 goto bad_inode;
255 make_now:
256 if (ino == F2FS_NODE_INO(sbi)) {
257 inode->i_mapping->a_ops = &f2fs_node_aops;
258 mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
259 } else if (ino == F2FS_META_INO(sbi)) {
260 inode->i_mapping->a_ops = &f2fs_meta_aops;
261 mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
262 } else if (S_ISREG(inode->i_mode)) {
263 inode->i_op = &f2fs_file_inode_operations;
264 inode->i_fop = &f2fs_file_operations;
265 inode->i_mapping->a_ops = &f2fs_dblock_aops;
266 } else if (S_ISDIR(inode->i_mode)) {
267 inode->i_op = &f2fs_dir_inode_operations;
268 inode->i_fop = &f2fs_dir_operations;
269 inode->i_mapping->a_ops = &f2fs_dblock_aops;
270 mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO);
271 } else if (S_ISLNK(inode->i_mode)) {
272 if (f2fs_encrypted_inode(inode))
273 inode->i_op = &f2fs_encrypted_symlink_inode_operations;
274 else
275 inode->i_op = &f2fs_symlink_inode_operations;
276 inode_nohighmem(inode);
277 inode->i_mapping->a_ops = &f2fs_dblock_aops;
278 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
279 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
280 inode->i_op = &f2fs_special_inode_operations;
281 init_special_inode(inode, inode->i_mode, inode->i_rdev);
282 } else {
283 ret = -EIO;
284 goto bad_inode;
286 unlock_new_inode(inode);
287 trace_f2fs_iget(inode);
288 return inode;
290 bad_inode:
291 f2fs_inode_synced(inode);
292 iget_failed(inode);
293 trace_f2fs_iget_exit(inode, ret);
294 return ERR_PTR(ret);
297 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
299 struct inode *inode;
300 retry:
301 inode = f2fs_iget(sb, ino);
302 if (IS_ERR(inode)) {
303 if (PTR_ERR(inode) == -ENOMEM) {
304 congestion_wait(BLK_RW_ASYNC, HZ/50);
305 goto retry;
308 return inode;
311 int update_inode(struct inode *inode, struct page *node_page)
313 struct f2fs_inode *ri;
315 f2fs_inode_synced(inode);
317 f2fs_wait_on_page_writeback(node_page, NODE, true);
319 ri = F2FS_INODE(node_page);
321 ri->i_mode = cpu_to_le16(inode->i_mode);
322 ri->i_advise = F2FS_I(inode)->i_advise;
323 ri->i_uid = cpu_to_le32(i_uid_read(inode));
324 ri->i_gid = cpu_to_le32(i_gid_read(inode));
325 ri->i_links = cpu_to_le32(inode->i_nlink);
326 ri->i_size = cpu_to_le64(i_size_read(inode));
327 ri->i_blocks = cpu_to_le64(inode->i_blocks);
329 if (F2FS_I(inode)->extent_tree)
330 set_raw_extent(&F2FS_I(inode)->extent_tree->largest,
331 &ri->i_ext);
332 else
333 memset(&ri->i_ext, 0, sizeof(ri->i_ext));
334 set_raw_inline(inode, ri);
336 ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
337 ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
338 ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
339 ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
340 ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
341 ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
342 ri->i_current_depth = cpu_to_le32(F2FS_I(inode)->i_current_depth);
343 ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
344 ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
345 ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
346 ri->i_generation = cpu_to_le32(inode->i_generation);
347 ri->i_dir_level = F2FS_I(inode)->i_dir_level;
349 __set_inode_rdev(inode, ri);
350 set_cold_node(inode, node_page);
352 /* deleted inode */
353 if (inode->i_nlink == 0)
354 clear_inline_node(node_page);
356 return set_page_dirty(node_page);
359 int update_inode_page(struct inode *inode)
361 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
362 struct page *node_page;
363 int ret = 0;
364 retry:
365 node_page = get_node_page(sbi, inode->i_ino);
366 if (IS_ERR(node_page)) {
367 int err = PTR_ERR(node_page);
368 if (err == -ENOMEM) {
369 cond_resched();
370 goto retry;
371 } else if (err != -ENOENT) {
372 f2fs_stop_checkpoint(sbi, false);
374 f2fs_inode_synced(inode);
375 return 0;
377 ret = update_inode(inode, node_page);
378 f2fs_put_page(node_page, 1);
379 return ret;
382 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
384 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
386 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
387 inode->i_ino == F2FS_META_INO(sbi))
388 return 0;
390 if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
391 return 0;
394 * We need to balance fs here to prevent from producing dirty node pages
395 * during the urgent cleaning time when runing out of free sections.
397 if (update_inode_page(inode))
398 f2fs_balance_fs(sbi, true);
399 return 0;
403 * Called at the last iput() if i_nlink is zero
405 void f2fs_evict_inode(struct inode *inode)
407 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
408 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
409 int err = 0;
411 /* some remained atomic pages should discarded */
412 if (f2fs_is_atomic_file(inode))
413 drop_inmem_pages(inode);
415 trace_f2fs_evict_inode(inode);
416 truncate_inode_pages_final(&inode->i_data);
418 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
419 inode->i_ino == F2FS_META_INO(sbi))
420 goto out_clear;
422 f2fs_bug_on(sbi, get_dirty_pages(inode));
423 remove_dirty_inode(inode);
425 f2fs_destroy_extent_tree(inode);
427 if (inode->i_nlink || is_bad_inode(inode))
428 goto no_delete;
430 #ifdef CONFIG_F2FS_FAULT_INJECTION
431 if (time_to_inject(sbi, FAULT_EVICT_INODE))
432 goto no_delete;
433 #endif
435 sb_start_intwrite(inode->i_sb);
436 set_inode_flag(inode, FI_NO_ALLOC);
437 i_size_write(inode, 0);
438 retry:
439 if (F2FS_HAS_BLOCKS(inode))
440 err = f2fs_truncate(inode);
442 if (!err) {
443 f2fs_lock_op(sbi);
444 err = remove_inode_page(inode);
445 f2fs_unlock_op(sbi);
448 /* give more chances, if ENOMEM case */
449 if (err == -ENOMEM) {
450 err = 0;
451 goto retry;
454 if (err)
455 update_inode_page(inode);
456 sb_end_intwrite(inode->i_sb);
457 no_delete:
458 stat_dec_inline_xattr(inode);
459 stat_dec_inline_dir(inode);
460 stat_dec_inline_inode(inode);
462 invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
463 if (xnid)
464 invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
465 if (is_inode_flag_set(inode, FI_APPEND_WRITE))
466 add_ino_entry(sbi, inode->i_ino, APPEND_INO);
467 if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
468 add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
469 if (is_inode_flag_set(inode, FI_FREE_NID)) {
470 alloc_nid_failed(sbi, inode->i_ino);
471 clear_inode_flag(inode, FI_FREE_NID);
473 f2fs_bug_on(sbi, err &&
474 !exist_written_data(sbi, inode->i_ino, ORPHAN_INO));
475 out_clear:
476 fscrypt_put_encryption_info(inode, NULL);
477 clear_inode(inode);
480 /* caller should call f2fs_lock_op() */
481 void handle_failed_inode(struct inode *inode)
483 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
484 struct node_info ni;
486 /* don't make bad inode, since it becomes a regular file. */
487 unlock_new_inode(inode);
490 * Note: we should add inode to orphan list before f2fs_unlock_op()
491 * so we can prevent losing this orphan when encoutering checkpoint
492 * and following suddenly power-off.
494 get_node_info(sbi, inode->i_ino, &ni);
496 if (ni.blk_addr != NULL_ADDR) {
497 int err = acquire_orphan_inode(sbi);
498 if (err) {
499 set_sbi_flag(sbi, SBI_NEED_FSCK);
500 f2fs_msg(sbi->sb, KERN_WARNING,
501 "Too many orphan inodes, run fsck to fix.");
502 } else {
503 add_orphan_inode(inode);
505 alloc_nid_done(sbi, inode->i_ino);
506 } else {
507 set_inode_flag(inode, FI_FREE_NID);
510 f2fs_unlock_op(sbi);
512 /* iput will drop the inode object */
513 iput(inode);