usb: gadget: udc: pch_udc: Fix a plethora of function documentation related issues
[linux/fpc-iii.git] / fs / nilfs2 / inode.c
blob28009ec54420e304ee2dd4305adea61e90569de8
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * inode.c - NILFS inode operations.
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
7 * Written by Ryusuke Konishi.
9 */
11 #include <linux/buffer_head.h>
12 #include <linux/gfp.h>
13 #include <linux/mpage.h>
14 #include <linux/pagemap.h>
15 #include <linux/writeback.h>
16 #include <linux/uio.h>
17 #include <linux/fiemap.h>
18 #include "nilfs.h"
19 #include "btnode.h"
20 #include "segment.h"
21 #include "page.h"
22 #include "mdt.h"
23 #include "cpfile.h"
24 #include "ifile.h"
26 /**
27 * struct nilfs_iget_args - arguments used during comparison between inodes
28 * @ino: inode number
29 * @cno: checkpoint number
30 * @root: pointer on NILFS root object (mounted checkpoint)
31 * @for_gc: inode for GC flag
33 struct nilfs_iget_args {
34 u64 ino;
35 __u64 cno;
36 struct nilfs_root *root;
37 int for_gc;
40 static int nilfs_iget_test(struct inode *inode, void *opaque);
42 void nilfs_inode_add_blocks(struct inode *inode, int n)
44 struct nilfs_root *root = NILFS_I(inode)->i_root;
46 inode_add_bytes(inode, i_blocksize(inode) * n);
47 if (root)
48 atomic64_add(n, &root->blocks_count);
51 void nilfs_inode_sub_blocks(struct inode *inode, int n)
53 struct nilfs_root *root = NILFS_I(inode)->i_root;
55 inode_sub_bytes(inode, i_blocksize(inode) * n);
56 if (root)
57 atomic64_sub(n, &root->blocks_count);
60 /**
61 * nilfs_get_block() - get a file block on the filesystem (callback function)
62 * @inode - inode struct of the target file
63 * @blkoff - file block number
64 * @bh_result - buffer head to be mapped on
65 * @create - indicate whether allocating the block or not when it has not
66 * been allocated yet.
68 * This function does not issue actual read request of the specified data
69 * block. It is done by VFS.
71 int nilfs_get_block(struct inode *inode, sector_t blkoff,
72 struct buffer_head *bh_result, int create)
74 struct nilfs_inode_info *ii = NILFS_I(inode);
75 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
76 __u64 blknum = 0;
77 int err = 0, ret;
78 unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits;
80 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
81 ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
82 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
83 if (ret >= 0) { /* found */
84 map_bh(bh_result, inode->i_sb, blknum);
85 if (ret > 0)
86 bh_result->b_size = (ret << inode->i_blkbits);
87 goto out;
89 /* data block was not found */
90 if (ret == -ENOENT && create) {
91 struct nilfs_transaction_info ti;
93 bh_result->b_blocknr = 0;
94 err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
95 if (unlikely(err))
96 goto out;
97 err = nilfs_bmap_insert(ii->i_bmap, blkoff,
98 (unsigned long)bh_result);
99 if (unlikely(err != 0)) {
100 if (err == -EEXIST) {
102 * The get_block() function could be called
103 * from multiple callers for an inode.
104 * However, the page having this block must
105 * be locked in this case.
107 nilfs_msg(inode->i_sb, KERN_WARNING,
108 "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
109 __func__, inode->i_ino,
110 (unsigned long long)blkoff);
111 err = 0;
113 nilfs_transaction_abort(inode->i_sb);
114 goto out;
116 nilfs_mark_inode_dirty_sync(inode);
117 nilfs_transaction_commit(inode->i_sb); /* never fails */
118 /* Error handling should be detailed */
119 set_buffer_new(bh_result);
120 set_buffer_delay(bh_result);
121 map_bh(bh_result, inode->i_sb, 0);
122 /* Disk block number must be changed to proper value */
124 } else if (ret == -ENOENT) {
126 * not found is not error (e.g. hole); must return without
127 * the mapped state flag.
130 } else {
131 err = ret;
134 out:
135 return err;
139 * nilfs_readpage() - implement readpage() method of nilfs_aops {}
140 * address_space_operations.
141 * @file - file struct of the file to be read
142 * @page - the page to be read
144 static int nilfs_readpage(struct file *file, struct page *page)
146 return mpage_readpage(page, nilfs_get_block);
149 static void nilfs_readahead(struct readahead_control *rac)
151 mpage_readahead(rac, nilfs_get_block);
154 static int nilfs_writepages(struct address_space *mapping,
155 struct writeback_control *wbc)
157 struct inode *inode = mapping->host;
158 int err = 0;
160 if (sb_rdonly(inode->i_sb)) {
161 nilfs_clear_dirty_pages(mapping, false);
162 return -EROFS;
165 if (wbc->sync_mode == WB_SYNC_ALL)
166 err = nilfs_construct_dsync_segment(inode->i_sb, inode,
167 wbc->range_start,
168 wbc->range_end);
169 return err;
172 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
174 struct inode *inode = page->mapping->host;
175 int err;
177 if (sb_rdonly(inode->i_sb)) {
179 * It means that filesystem was remounted in read-only
180 * mode because of error or metadata corruption. But we
181 * have dirty pages that try to be flushed in background.
182 * So, here we simply discard this dirty page.
184 nilfs_clear_dirty_page(page, false);
185 unlock_page(page);
186 return -EROFS;
189 redirty_page_for_writepage(wbc, page);
190 unlock_page(page);
192 if (wbc->sync_mode == WB_SYNC_ALL) {
193 err = nilfs_construct_segment(inode->i_sb);
194 if (unlikely(err))
195 return err;
196 } else if (wbc->for_reclaim)
197 nilfs_flush_segment(inode->i_sb, inode->i_ino);
199 return 0;
202 static int nilfs_set_page_dirty(struct page *page)
204 struct inode *inode = page->mapping->host;
205 int ret = __set_page_dirty_nobuffers(page);
207 if (page_has_buffers(page)) {
208 unsigned int nr_dirty = 0;
209 struct buffer_head *bh, *head;
212 * This page is locked by callers, and no other thread
213 * concurrently marks its buffers dirty since they are
214 * only dirtied through routines in fs/buffer.c in
215 * which call sites of mark_buffer_dirty are protected
216 * by page lock.
218 bh = head = page_buffers(page);
219 do {
220 /* Do not mark hole blocks dirty */
221 if (buffer_dirty(bh) || !buffer_mapped(bh))
222 continue;
224 set_buffer_dirty(bh);
225 nr_dirty++;
226 } while (bh = bh->b_this_page, bh != head);
228 if (nr_dirty)
229 nilfs_set_file_dirty(inode, nr_dirty);
230 } else if (ret) {
231 unsigned int nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
233 nilfs_set_file_dirty(inode, nr_dirty);
235 return ret;
238 void nilfs_write_failed(struct address_space *mapping, loff_t to)
240 struct inode *inode = mapping->host;
242 if (to > inode->i_size) {
243 truncate_pagecache(inode, inode->i_size);
244 nilfs_truncate(inode);
248 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
249 loff_t pos, unsigned len, unsigned flags,
250 struct page **pagep, void **fsdata)
253 struct inode *inode = mapping->host;
254 int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
256 if (unlikely(err))
257 return err;
259 err = block_write_begin(mapping, pos, len, flags, pagep,
260 nilfs_get_block);
261 if (unlikely(err)) {
262 nilfs_write_failed(mapping, pos + len);
263 nilfs_transaction_abort(inode->i_sb);
265 return err;
268 static int nilfs_write_end(struct file *file, struct address_space *mapping,
269 loff_t pos, unsigned len, unsigned copied,
270 struct page *page, void *fsdata)
272 struct inode *inode = mapping->host;
273 unsigned int start = pos & (PAGE_SIZE - 1);
274 unsigned int nr_dirty;
275 int err;
277 nr_dirty = nilfs_page_count_clean_buffers(page, start,
278 start + copied);
279 copied = generic_write_end(file, mapping, pos, len, copied, page,
280 fsdata);
281 nilfs_set_file_dirty(inode, nr_dirty);
282 err = nilfs_transaction_commit(inode->i_sb);
283 return err ? : copied;
286 static ssize_t
287 nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
289 struct inode *inode = file_inode(iocb->ki_filp);
291 if (iov_iter_rw(iter) == WRITE)
292 return 0;
294 /* Needs synchronization with the cleaner */
295 return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block);
298 const struct address_space_operations nilfs_aops = {
299 .writepage = nilfs_writepage,
300 .readpage = nilfs_readpage,
301 .writepages = nilfs_writepages,
302 .set_page_dirty = nilfs_set_page_dirty,
303 .readahead = nilfs_readahead,
304 .write_begin = nilfs_write_begin,
305 .write_end = nilfs_write_end,
306 /* .releasepage = nilfs_releasepage, */
307 .invalidatepage = block_invalidatepage,
308 .direct_IO = nilfs_direct_IO,
309 .is_partially_uptodate = block_is_partially_uptodate,
312 static int nilfs_insert_inode_locked(struct inode *inode,
313 struct nilfs_root *root,
314 unsigned long ino)
316 struct nilfs_iget_args args = {
317 .ino = ino, .root = root, .cno = 0, .for_gc = 0
320 return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
323 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
325 struct super_block *sb = dir->i_sb;
326 struct the_nilfs *nilfs = sb->s_fs_info;
327 struct inode *inode;
328 struct nilfs_inode_info *ii;
329 struct nilfs_root *root;
330 int err = -ENOMEM;
331 ino_t ino;
333 inode = new_inode(sb);
334 if (unlikely(!inode))
335 goto failed;
337 mapping_set_gfp_mask(inode->i_mapping,
338 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
340 root = NILFS_I(dir)->i_root;
341 ii = NILFS_I(inode);
342 ii->i_state = BIT(NILFS_I_NEW);
343 ii->i_root = root;
345 err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
346 if (unlikely(err))
347 goto failed_ifile_create_inode;
348 /* reference count of i_bh inherits from nilfs_mdt_read_block() */
350 atomic64_inc(&root->inodes_count);
351 inode_init_owner(inode, dir, mode);
352 inode->i_ino = ino;
353 inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
355 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
356 err = nilfs_bmap_read(ii->i_bmap, NULL);
357 if (err < 0)
358 goto failed_after_creation;
360 set_bit(NILFS_I_BMAP, &ii->i_state);
361 /* No lock is needed; iget() ensures it. */
364 ii->i_flags = nilfs_mask_flags(
365 mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
367 /* ii->i_file_acl = 0; */
368 /* ii->i_dir_acl = 0; */
369 ii->i_dir_start_lookup = 0;
370 nilfs_set_inode_flags(inode);
371 spin_lock(&nilfs->ns_next_gen_lock);
372 inode->i_generation = nilfs->ns_next_generation++;
373 spin_unlock(&nilfs->ns_next_gen_lock);
374 if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
375 err = -EIO;
376 goto failed_after_creation;
379 err = nilfs_init_acl(inode, dir);
380 if (unlikely(err))
382 * Never occur. When supporting nilfs_init_acl(),
383 * proper cancellation of above jobs should be considered.
385 goto failed_after_creation;
387 return inode;
389 failed_after_creation:
390 clear_nlink(inode);
391 unlock_new_inode(inode);
392 iput(inode); /*
393 * raw_inode will be deleted through
394 * nilfs_evict_inode().
396 goto failed;
398 failed_ifile_create_inode:
399 make_bad_inode(inode);
400 iput(inode);
401 failed:
402 return ERR_PTR(err);
405 void nilfs_set_inode_flags(struct inode *inode)
407 unsigned int flags = NILFS_I(inode)->i_flags;
408 unsigned int new_fl = 0;
410 if (flags & FS_SYNC_FL)
411 new_fl |= S_SYNC;
412 if (flags & FS_APPEND_FL)
413 new_fl |= S_APPEND;
414 if (flags & FS_IMMUTABLE_FL)
415 new_fl |= S_IMMUTABLE;
416 if (flags & FS_NOATIME_FL)
417 new_fl |= S_NOATIME;
418 if (flags & FS_DIRSYNC_FL)
419 new_fl |= S_DIRSYNC;
420 inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
421 S_NOATIME | S_DIRSYNC);
424 int nilfs_read_inode_common(struct inode *inode,
425 struct nilfs_inode *raw_inode)
427 struct nilfs_inode_info *ii = NILFS_I(inode);
428 int err;
430 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
431 i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
432 i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
433 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
434 inode->i_size = le64_to_cpu(raw_inode->i_size);
435 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
436 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
437 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
438 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
439 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
440 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
441 if (inode->i_nlink == 0)
442 return -ESTALE; /* this inode is deleted */
444 inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
445 ii->i_flags = le32_to_cpu(raw_inode->i_flags);
446 #if 0
447 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
448 ii->i_dir_acl = S_ISREG(inode->i_mode) ?
449 0 : le32_to_cpu(raw_inode->i_dir_acl);
450 #endif
451 ii->i_dir_start_lookup = 0;
452 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
454 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
455 S_ISLNK(inode->i_mode)) {
456 err = nilfs_bmap_read(ii->i_bmap, raw_inode);
457 if (err < 0)
458 return err;
459 set_bit(NILFS_I_BMAP, &ii->i_state);
460 /* No lock is needed; iget() ensures it. */
462 return 0;
465 static int __nilfs_read_inode(struct super_block *sb,
466 struct nilfs_root *root, unsigned long ino,
467 struct inode *inode)
469 struct the_nilfs *nilfs = sb->s_fs_info;
470 struct buffer_head *bh;
471 struct nilfs_inode *raw_inode;
472 int err;
474 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
475 err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
476 if (unlikely(err))
477 goto bad_inode;
479 raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
481 err = nilfs_read_inode_common(inode, raw_inode);
482 if (err)
483 goto failed_unmap;
485 if (S_ISREG(inode->i_mode)) {
486 inode->i_op = &nilfs_file_inode_operations;
487 inode->i_fop = &nilfs_file_operations;
488 inode->i_mapping->a_ops = &nilfs_aops;
489 } else if (S_ISDIR(inode->i_mode)) {
490 inode->i_op = &nilfs_dir_inode_operations;
491 inode->i_fop = &nilfs_dir_operations;
492 inode->i_mapping->a_ops = &nilfs_aops;
493 } else if (S_ISLNK(inode->i_mode)) {
494 inode->i_op = &nilfs_symlink_inode_operations;
495 inode_nohighmem(inode);
496 inode->i_mapping->a_ops = &nilfs_aops;
497 } else {
498 inode->i_op = &nilfs_special_inode_operations;
499 init_special_inode(
500 inode, inode->i_mode,
501 huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
503 nilfs_ifile_unmap_inode(root->ifile, ino, bh);
504 brelse(bh);
505 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
506 nilfs_set_inode_flags(inode);
507 mapping_set_gfp_mask(inode->i_mapping,
508 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
509 return 0;
511 failed_unmap:
512 nilfs_ifile_unmap_inode(root->ifile, ino, bh);
513 brelse(bh);
515 bad_inode:
516 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
517 return err;
520 static int nilfs_iget_test(struct inode *inode, void *opaque)
522 struct nilfs_iget_args *args = opaque;
523 struct nilfs_inode_info *ii;
525 if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
526 return 0;
528 ii = NILFS_I(inode);
529 if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
530 return !args->for_gc;
532 return args->for_gc && args->cno == ii->i_cno;
535 static int nilfs_iget_set(struct inode *inode, void *opaque)
537 struct nilfs_iget_args *args = opaque;
539 inode->i_ino = args->ino;
540 if (args->for_gc) {
541 NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
542 NILFS_I(inode)->i_cno = args->cno;
543 NILFS_I(inode)->i_root = NULL;
544 } else {
545 if (args->root && args->ino == NILFS_ROOT_INO)
546 nilfs_get_root(args->root);
547 NILFS_I(inode)->i_root = args->root;
549 return 0;
552 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
553 unsigned long ino)
555 struct nilfs_iget_args args = {
556 .ino = ino, .root = root, .cno = 0, .for_gc = 0
559 return ilookup5(sb, ino, nilfs_iget_test, &args);
562 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
563 unsigned long ino)
565 struct nilfs_iget_args args = {
566 .ino = ino, .root = root, .cno = 0, .for_gc = 0
569 return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
572 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
573 unsigned long ino)
575 struct inode *inode;
576 int err;
578 inode = nilfs_iget_locked(sb, root, ino);
579 if (unlikely(!inode))
580 return ERR_PTR(-ENOMEM);
581 if (!(inode->i_state & I_NEW))
582 return inode;
584 err = __nilfs_read_inode(sb, root, ino, inode);
585 if (unlikely(err)) {
586 iget_failed(inode);
587 return ERR_PTR(err);
589 unlock_new_inode(inode);
590 return inode;
593 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
594 __u64 cno)
596 struct nilfs_iget_args args = {
597 .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
599 struct inode *inode;
600 int err;
602 inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
603 if (unlikely(!inode))
604 return ERR_PTR(-ENOMEM);
605 if (!(inode->i_state & I_NEW))
606 return inode;
608 err = nilfs_init_gcinode(inode);
609 if (unlikely(err)) {
610 iget_failed(inode);
611 return ERR_PTR(err);
613 unlock_new_inode(inode);
614 return inode;
617 void nilfs_write_inode_common(struct inode *inode,
618 struct nilfs_inode *raw_inode, int has_bmap)
620 struct nilfs_inode_info *ii = NILFS_I(inode);
622 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
623 raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
624 raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
625 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
626 raw_inode->i_size = cpu_to_le64(inode->i_size);
627 raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
628 raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
629 raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
630 raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
631 raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
633 raw_inode->i_flags = cpu_to_le32(ii->i_flags);
634 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
636 if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
637 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
639 /* zero-fill unused portion in the case of super root block */
640 raw_inode->i_xattr = 0;
641 raw_inode->i_pad = 0;
642 memset((void *)raw_inode + sizeof(*raw_inode), 0,
643 nilfs->ns_inode_size - sizeof(*raw_inode));
646 if (has_bmap)
647 nilfs_bmap_write(ii->i_bmap, raw_inode);
648 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
649 raw_inode->i_device_code =
650 cpu_to_le64(huge_encode_dev(inode->i_rdev));
652 * When extending inode, nilfs->ns_inode_size should be checked
653 * for substitutions of appended fields.
657 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
659 ino_t ino = inode->i_ino;
660 struct nilfs_inode_info *ii = NILFS_I(inode);
661 struct inode *ifile = ii->i_root->ifile;
662 struct nilfs_inode *raw_inode;
664 raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
666 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
667 memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
668 if (flags & I_DIRTY_DATASYNC)
669 set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
671 nilfs_write_inode_common(inode, raw_inode, 0);
673 * XXX: call with has_bmap = 0 is a workaround to avoid
674 * deadlock of bmap. This delays update of i_bmap to just
675 * before writing.
678 nilfs_ifile_unmap_inode(ifile, ino, ibh);
681 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
683 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
684 unsigned long from)
686 __u64 b;
687 int ret;
689 if (!test_bit(NILFS_I_BMAP, &ii->i_state))
690 return;
691 repeat:
692 ret = nilfs_bmap_last_key(ii->i_bmap, &b);
693 if (ret == -ENOENT)
694 return;
695 else if (ret < 0)
696 goto failed;
698 if (b < from)
699 return;
701 b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
702 ret = nilfs_bmap_truncate(ii->i_bmap, b);
703 nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
704 if (!ret || (ret == -ENOMEM &&
705 nilfs_bmap_truncate(ii->i_bmap, b) == 0))
706 goto repeat;
708 failed:
709 nilfs_msg(ii->vfs_inode.i_sb, KERN_WARNING,
710 "error %d truncating bmap (ino=%lu)", ret,
711 ii->vfs_inode.i_ino);
714 void nilfs_truncate(struct inode *inode)
716 unsigned long blkoff;
717 unsigned int blocksize;
718 struct nilfs_transaction_info ti;
719 struct super_block *sb = inode->i_sb;
720 struct nilfs_inode_info *ii = NILFS_I(inode);
722 if (!test_bit(NILFS_I_BMAP, &ii->i_state))
723 return;
724 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
725 return;
727 blocksize = sb->s_blocksize;
728 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
729 nilfs_transaction_begin(sb, &ti, 0); /* never fails */
731 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
733 nilfs_truncate_bmap(ii, blkoff);
735 inode->i_mtime = inode->i_ctime = current_time(inode);
736 if (IS_SYNC(inode))
737 nilfs_set_transaction_flag(NILFS_TI_SYNC);
739 nilfs_mark_inode_dirty(inode);
740 nilfs_set_file_dirty(inode, 0);
741 nilfs_transaction_commit(sb);
743 * May construct a logical segment and may fail in sync mode.
744 * But truncate has no return value.
748 static void nilfs_clear_inode(struct inode *inode)
750 struct nilfs_inode_info *ii = NILFS_I(inode);
753 * Free resources allocated in nilfs_read_inode(), here.
755 BUG_ON(!list_empty(&ii->i_dirty));
756 brelse(ii->i_bh);
757 ii->i_bh = NULL;
759 if (nilfs_is_metadata_file_inode(inode))
760 nilfs_mdt_clear(inode);
762 if (test_bit(NILFS_I_BMAP, &ii->i_state))
763 nilfs_bmap_clear(ii->i_bmap);
765 nilfs_btnode_cache_clear(&ii->i_btnode_cache);
767 if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
768 nilfs_put_root(ii->i_root);
771 void nilfs_evict_inode(struct inode *inode)
773 struct nilfs_transaction_info ti;
774 struct super_block *sb = inode->i_sb;
775 struct nilfs_inode_info *ii = NILFS_I(inode);
776 int ret;
778 if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
779 truncate_inode_pages_final(&inode->i_data);
780 clear_inode(inode);
781 nilfs_clear_inode(inode);
782 return;
784 nilfs_transaction_begin(sb, &ti, 0); /* never fails */
786 truncate_inode_pages_final(&inode->i_data);
788 /* TODO: some of the following operations may fail. */
789 nilfs_truncate_bmap(ii, 0);
790 nilfs_mark_inode_dirty(inode);
791 clear_inode(inode);
793 ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
794 if (!ret)
795 atomic64_dec(&ii->i_root->inodes_count);
797 nilfs_clear_inode(inode);
799 if (IS_SYNC(inode))
800 nilfs_set_transaction_flag(NILFS_TI_SYNC);
801 nilfs_transaction_commit(sb);
803 * May construct a logical segment and may fail in sync mode.
804 * But delete_inode has no return value.
808 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
810 struct nilfs_transaction_info ti;
811 struct inode *inode = d_inode(dentry);
812 struct super_block *sb = inode->i_sb;
813 int err;
815 err = setattr_prepare(dentry, iattr);
816 if (err)
817 return err;
819 err = nilfs_transaction_begin(sb, &ti, 0);
820 if (unlikely(err))
821 return err;
823 if ((iattr->ia_valid & ATTR_SIZE) &&
824 iattr->ia_size != i_size_read(inode)) {
825 inode_dio_wait(inode);
826 truncate_setsize(inode, iattr->ia_size);
827 nilfs_truncate(inode);
830 setattr_copy(inode, iattr);
831 mark_inode_dirty(inode);
833 if (iattr->ia_valid & ATTR_MODE) {
834 err = nilfs_acl_chmod(inode);
835 if (unlikely(err))
836 goto out_err;
839 return nilfs_transaction_commit(sb);
841 out_err:
842 nilfs_transaction_abort(sb);
843 return err;
846 int nilfs_permission(struct inode *inode, int mask)
848 struct nilfs_root *root = NILFS_I(inode)->i_root;
850 if ((mask & MAY_WRITE) && root &&
851 root->cno != NILFS_CPTREE_CURRENT_CNO)
852 return -EROFS; /* snapshot is not writable */
854 return generic_permission(inode, mask);
857 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
859 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
860 struct nilfs_inode_info *ii = NILFS_I(inode);
861 int err;
863 spin_lock(&nilfs->ns_inode_lock);
864 if (ii->i_bh == NULL) {
865 spin_unlock(&nilfs->ns_inode_lock);
866 err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
867 inode->i_ino, pbh);
868 if (unlikely(err))
869 return err;
870 spin_lock(&nilfs->ns_inode_lock);
871 if (ii->i_bh == NULL)
872 ii->i_bh = *pbh;
873 else {
874 brelse(*pbh);
875 *pbh = ii->i_bh;
877 } else
878 *pbh = ii->i_bh;
880 get_bh(*pbh);
881 spin_unlock(&nilfs->ns_inode_lock);
882 return 0;
885 int nilfs_inode_dirty(struct inode *inode)
887 struct nilfs_inode_info *ii = NILFS_I(inode);
888 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
889 int ret = 0;
891 if (!list_empty(&ii->i_dirty)) {
892 spin_lock(&nilfs->ns_inode_lock);
893 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
894 test_bit(NILFS_I_BUSY, &ii->i_state);
895 spin_unlock(&nilfs->ns_inode_lock);
897 return ret;
900 int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
902 struct nilfs_inode_info *ii = NILFS_I(inode);
903 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
905 atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
907 if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
908 return 0;
910 spin_lock(&nilfs->ns_inode_lock);
911 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
912 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
914 * Because this routine may race with nilfs_dispose_list(),
915 * we have to check NILFS_I_QUEUED here, too.
917 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
919 * This will happen when somebody is freeing
920 * this inode.
922 nilfs_msg(inode->i_sb, KERN_WARNING,
923 "cannot set file dirty (ino=%lu): the file is being freed",
924 inode->i_ino);
925 spin_unlock(&nilfs->ns_inode_lock);
926 return -EINVAL; /*
927 * NILFS_I_DIRTY may remain for
928 * freeing inode.
931 list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
932 set_bit(NILFS_I_QUEUED, &ii->i_state);
934 spin_unlock(&nilfs->ns_inode_lock);
935 return 0;
938 int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
940 struct buffer_head *ibh;
941 int err;
943 err = nilfs_load_inode_block(inode, &ibh);
944 if (unlikely(err)) {
945 nilfs_msg(inode->i_sb, KERN_WARNING,
946 "cannot mark inode dirty (ino=%lu): error %d loading inode block",
947 inode->i_ino, err);
948 return err;
950 nilfs_update_inode(inode, ibh, flags);
951 mark_buffer_dirty(ibh);
952 nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
953 brelse(ibh);
954 return 0;
958 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
959 * @inode: inode of the file to be registered.
961 * nilfs_dirty_inode() loads a inode block containing the specified
962 * @inode and copies data from a nilfs_inode to a corresponding inode
963 * entry in the inode block. This operation is excluded from the segment
964 * construction. This function can be called both as a single operation
965 * and as a part of indivisible file operations.
967 void nilfs_dirty_inode(struct inode *inode, int flags)
969 struct nilfs_transaction_info ti;
970 struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
972 if (is_bad_inode(inode)) {
973 nilfs_msg(inode->i_sb, KERN_WARNING,
974 "tried to mark bad_inode dirty. ignored.");
975 dump_stack();
976 return;
978 if (mdi) {
979 nilfs_mdt_mark_dirty(inode);
980 return;
982 nilfs_transaction_begin(inode->i_sb, &ti, 0);
983 __nilfs_mark_inode_dirty(inode, flags);
984 nilfs_transaction_commit(inode->i_sb); /* never fails */
987 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
988 __u64 start, __u64 len)
990 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
991 __u64 logical = 0, phys = 0, size = 0;
992 __u32 flags = 0;
993 loff_t isize;
994 sector_t blkoff, end_blkoff;
995 sector_t delalloc_blkoff;
996 unsigned long delalloc_blklen;
997 unsigned int blkbits = inode->i_blkbits;
998 int ret, n;
1000 ret = fiemap_prep(inode, fieinfo, start, &len, 0);
1001 if (ret)
1002 return ret;
1004 inode_lock(inode);
1006 isize = i_size_read(inode);
1008 blkoff = start >> blkbits;
1009 end_blkoff = (start + len - 1) >> blkbits;
1011 delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1012 &delalloc_blkoff);
1014 do {
1015 __u64 blkphy;
1016 unsigned int maxblocks;
1018 if (delalloc_blklen && blkoff == delalloc_blkoff) {
1019 if (size) {
1020 /* End of the current extent */
1021 ret = fiemap_fill_next_extent(
1022 fieinfo, logical, phys, size, flags);
1023 if (ret)
1024 break;
1026 if (blkoff > end_blkoff)
1027 break;
1029 flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1030 logical = blkoff << blkbits;
1031 phys = 0;
1032 size = delalloc_blklen << blkbits;
1034 blkoff = delalloc_blkoff + delalloc_blklen;
1035 delalloc_blklen = nilfs_find_uncommitted_extent(
1036 inode, blkoff, &delalloc_blkoff);
1037 continue;
1041 * Limit the number of blocks that we look up so as
1042 * not to get into the next delayed allocation extent.
1044 maxblocks = INT_MAX;
1045 if (delalloc_blklen)
1046 maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1047 maxblocks);
1048 blkphy = 0;
1050 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1051 n = nilfs_bmap_lookup_contig(
1052 NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1053 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1055 if (n < 0) {
1056 int past_eof;
1058 if (unlikely(n != -ENOENT))
1059 break; /* error */
1061 /* HOLE */
1062 blkoff++;
1063 past_eof = ((blkoff << blkbits) >= isize);
1065 if (size) {
1066 /* End of the current extent */
1068 if (past_eof)
1069 flags |= FIEMAP_EXTENT_LAST;
1071 ret = fiemap_fill_next_extent(
1072 fieinfo, logical, phys, size, flags);
1073 if (ret)
1074 break;
1075 size = 0;
1077 if (blkoff > end_blkoff || past_eof)
1078 break;
1079 } else {
1080 if (size) {
1081 if (phys && blkphy << blkbits == phys + size) {
1082 /* The current extent goes on */
1083 size += n << blkbits;
1084 } else {
1085 /* Terminate the current extent */
1086 ret = fiemap_fill_next_extent(
1087 fieinfo, logical, phys, size,
1088 flags);
1089 if (ret || blkoff > end_blkoff)
1090 break;
1092 /* Start another extent */
1093 flags = FIEMAP_EXTENT_MERGED;
1094 logical = blkoff << blkbits;
1095 phys = blkphy << blkbits;
1096 size = n << blkbits;
1098 } else {
1099 /* Start a new extent */
1100 flags = FIEMAP_EXTENT_MERGED;
1101 logical = blkoff << blkbits;
1102 phys = blkphy << blkbits;
1103 size = n << blkbits;
1105 blkoff += n;
1107 cond_resched();
1108 } while (true);
1110 /* If ret is 1 then we just hit the end of the extent array */
1111 if (ret == 1)
1112 ret = 0;
1114 inode_unlock(inode);
1115 return ret;