1 // SPDX-License-Identifier: GPL-2.0+
3 * inode.c - NILFS inode operations.
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
7 * Written by Ryusuke Konishi.
11 #include <linux/buffer_head.h>
12 #include <linux/gfp.h>
13 #include <linux/mpage.h>
14 #include <linux/pagemap.h>
15 #include <linux/writeback.h>
16 #include <linux/uio.h>
26 * struct nilfs_iget_args - arguments used during comparison between inodes
28 * @cno: checkpoint number
29 * @root: pointer on NILFS root object (mounted checkpoint)
30 * @for_gc: inode for GC flag
32 struct nilfs_iget_args
{
35 struct nilfs_root
*root
;
39 static int nilfs_iget_test(struct inode
*inode
, void *opaque
);
41 void nilfs_inode_add_blocks(struct inode
*inode
, int n
)
43 struct nilfs_root
*root
= NILFS_I(inode
)->i_root
;
45 inode_add_bytes(inode
, i_blocksize(inode
) * n
);
47 atomic64_add(n
, &root
->blocks_count
);
50 void nilfs_inode_sub_blocks(struct inode
*inode
, int n
)
52 struct nilfs_root
*root
= NILFS_I(inode
)->i_root
;
54 inode_sub_bytes(inode
, i_blocksize(inode
) * n
);
56 atomic64_sub(n
, &root
->blocks_count
);
60 * nilfs_get_block() - get a file block on the filesystem (callback function)
61 * @inode - inode struct of the target file
62 * @blkoff - file block number
63 * @bh_result - buffer head to be mapped on
64 * @create - indicate whether allocating the block or not when it has not
67 * This function does not issue actual read request of the specified data
68 * block. It is done by VFS.
70 int nilfs_get_block(struct inode
*inode
, sector_t blkoff
,
71 struct buffer_head
*bh_result
, int create
)
73 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
74 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
77 unsigned int maxblocks
= bh_result
->b_size
>> inode
->i_blkbits
;
79 down_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
80 ret
= nilfs_bmap_lookup_contig(ii
->i_bmap
, blkoff
, &blknum
, maxblocks
);
81 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
82 if (ret
>= 0) { /* found */
83 map_bh(bh_result
, inode
->i_sb
, blknum
);
85 bh_result
->b_size
= (ret
<< inode
->i_blkbits
);
88 /* data block was not found */
89 if (ret
== -ENOENT
&& create
) {
90 struct nilfs_transaction_info ti
;
92 bh_result
->b_blocknr
= 0;
93 err
= nilfs_transaction_begin(inode
->i_sb
, &ti
, 1);
96 err
= nilfs_bmap_insert(ii
->i_bmap
, blkoff
,
97 (unsigned long)bh_result
);
98 if (unlikely(err
!= 0)) {
101 * The get_block() function could be called
102 * from multiple callers for an inode.
103 * However, the page having this block must
104 * be locked in this case.
106 nilfs_msg(inode
->i_sb
, KERN_WARNING
,
107 "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
108 __func__
, inode
->i_ino
,
109 (unsigned long long)blkoff
);
112 nilfs_transaction_abort(inode
->i_sb
);
115 nilfs_mark_inode_dirty_sync(inode
);
116 nilfs_transaction_commit(inode
->i_sb
); /* never fails */
117 /* Error handling should be detailed */
118 set_buffer_new(bh_result
);
119 set_buffer_delay(bh_result
);
120 map_bh(bh_result
, inode
->i_sb
, 0);
121 /* Disk block number must be changed to proper value */
123 } else if (ret
== -ENOENT
) {
125 * not found is not error (e.g. hole); must return without
126 * the mapped state flag.
138 * nilfs_readpage() - implement readpage() method of nilfs_aops {}
139 * address_space_operations.
140 * @file - file struct of the file to be read
141 * @page - the page to be read
143 static int nilfs_readpage(struct file
*file
, struct page
*page
)
145 return mpage_readpage(page
, nilfs_get_block
);
149 * nilfs_readpages() - implement readpages() method of nilfs_aops {}
150 * address_space_operations.
151 * @file - file struct of the file to be read
152 * @mapping - address_space struct used for reading multiple pages
153 * @pages - the pages to be read
154 * @nr_pages - number of pages to be read
156 static int nilfs_readpages(struct file
*file
, struct address_space
*mapping
,
157 struct list_head
*pages
, unsigned int nr_pages
)
159 return mpage_readpages(mapping
, pages
, nr_pages
, nilfs_get_block
);
162 static int nilfs_writepages(struct address_space
*mapping
,
163 struct writeback_control
*wbc
)
165 struct inode
*inode
= mapping
->host
;
168 if (sb_rdonly(inode
->i_sb
)) {
169 nilfs_clear_dirty_pages(mapping
, false);
173 if (wbc
->sync_mode
== WB_SYNC_ALL
)
174 err
= nilfs_construct_dsync_segment(inode
->i_sb
, inode
,
180 static int nilfs_writepage(struct page
*page
, struct writeback_control
*wbc
)
182 struct inode
*inode
= page
->mapping
->host
;
185 if (sb_rdonly(inode
->i_sb
)) {
187 * It means that filesystem was remounted in read-only
188 * mode because of error or metadata corruption. But we
189 * have dirty pages that try to be flushed in background.
190 * So, here we simply discard this dirty page.
192 nilfs_clear_dirty_page(page
, false);
197 redirty_page_for_writepage(wbc
, page
);
200 if (wbc
->sync_mode
== WB_SYNC_ALL
) {
201 err
= nilfs_construct_segment(inode
->i_sb
);
204 } else if (wbc
->for_reclaim
)
205 nilfs_flush_segment(inode
->i_sb
, inode
->i_ino
);
210 static int nilfs_set_page_dirty(struct page
*page
)
212 struct inode
*inode
= page
->mapping
->host
;
213 int ret
= __set_page_dirty_nobuffers(page
);
215 if (page_has_buffers(page
)) {
216 unsigned int nr_dirty
= 0;
217 struct buffer_head
*bh
, *head
;
220 * This page is locked by callers, and no other thread
221 * concurrently marks its buffers dirty since they are
222 * only dirtied through routines in fs/buffer.c in
223 * which call sites of mark_buffer_dirty are protected
226 bh
= head
= page_buffers(page
);
228 /* Do not mark hole blocks dirty */
229 if (buffer_dirty(bh
) || !buffer_mapped(bh
))
232 set_buffer_dirty(bh
);
234 } while (bh
= bh
->b_this_page
, bh
!= head
);
237 nilfs_set_file_dirty(inode
, nr_dirty
);
239 unsigned int nr_dirty
= 1 << (PAGE_SHIFT
- inode
->i_blkbits
);
241 nilfs_set_file_dirty(inode
, nr_dirty
);
246 void nilfs_write_failed(struct address_space
*mapping
, loff_t to
)
248 struct inode
*inode
= mapping
->host
;
250 if (to
> inode
->i_size
) {
251 truncate_pagecache(inode
, inode
->i_size
);
252 nilfs_truncate(inode
);
256 static int nilfs_write_begin(struct file
*file
, struct address_space
*mapping
,
257 loff_t pos
, unsigned len
, unsigned flags
,
258 struct page
**pagep
, void **fsdata
)
261 struct inode
*inode
= mapping
->host
;
262 int err
= nilfs_transaction_begin(inode
->i_sb
, NULL
, 1);
267 err
= block_write_begin(mapping
, pos
, len
, flags
, pagep
,
270 nilfs_write_failed(mapping
, pos
+ len
);
271 nilfs_transaction_abort(inode
->i_sb
);
276 static int nilfs_write_end(struct file
*file
, struct address_space
*mapping
,
277 loff_t pos
, unsigned len
, unsigned copied
,
278 struct page
*page
, void *fsdata
)
280 struct inode
*inode
= mapping
->host
;
281 unsigned int start
= pos
& (PAGE_SIZE
- 1);
282 unsigned int nr_dirty
;
285 nr_dirty
= nilfs_page_count_clean_buffers(page
, start
,
287 copied
= generic_write_end(file
, mapping
, pos
, len
, copied
, page
,
289 nilfs_set_file_dirty(inode
, nr_dirty
);
290 err
= nilfs_transaction_commit(inode
->i_sb
);
291 return err
? : copied
;
295 nilfs_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
297 struct inode
*inode
= file_inode(iocb
->ki_filp
);
299 if (iov_iter_rw(iter
) == WRITE
)
302 /* Needs synchronization with the cleaner */
303 return blockdev_direct_IO(iocb
, inode
, iter
, nilfs_get_block
);
306 const struct address_space_operations nilfs_aops
= {
307 .writepage
= nilfs_writepage
,
308 .readpage
= nilfs_readpage
,
309 .writepages
= nilfs_writepages
,
310 .set_page_dirty
= nilfs_set_page_dirty
,
311 .readpages
= nilfs_readpages
,
312 .write_begin
= nilfs_write_begin
,
313 .write_end
= nilfs_write_end
,
314 /* .releasepage = nilfs_releasepage, */
315 .invalidatepage
= block_invalidatepage
,
316 .direct_IO
= nilfs_direct_IO
,
317 .is_partially_uptodate
= block_is_partially_uptodate
,
320 static int nilfs_insert_inode_locked(struct inode
*inode
,
321 struct nilfs_root
*root
,
324 struct nilfs_iget_args args
= {
325 .ino
= ino
, .root
= root
, .cno
= 0, .for_gc
= 0
328 return insert_inode_locked4(inode
, ino
, nilfs_iget_test
, &args
);
331 struct inode
*nilfs_new_inode(struct inode
*dir
, umode_t mode
)
333 struct super_block
*sb
= dir
->i_sb
;
334 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
336 struct nilfs_inode_info
*ii
;
337 struct nilfs_root
*root
;
341 inode
= new_inode(sb
);
342 if (unlikely(!inode
))
345 mapping_set_gfp_mask(inode
->i_mapping
,
346 mapping_gfp_constraint(inode
->i_mapping
, ~__GFP_FS
));
348 root
= NILFS_I(dir
)->i_root
;
350 ii
->i_state
= BIT(NILFS_I_NEW
);
353 err
= nilfs_ifile_create_inode(root
->ifile
, &ino
, &ii
->i_bh
);
355 goto failed_ifile_create_inode
;
356 /* reference count of i_bh inherits from nilfs_mdt_read_block() */
358 atomic64_inc(&root
->inodes_count
);
359 inode_init_owner(inode
, dir
, mode
);
361 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= current_time(inode
);
363 if (S_ISREG(mode
) || S_ISDIR(mode
) || S_ISLNK(mode
)) {
364 err
= nilfs_bmap_read(ii
->i_bmap
, NULL
);
366 goto failed_after_creation
;
368 set_bit(NILFS_I_BMAP
, &ii
->i_state
);
369 /* No lock is needed; iget() ensures it. */
372 ii
->i_flags
= nilfs_mask_flags(
373 mode
, NILFS_I(dir
)->i_flags
& NILFS_FL_INHERITED
);
375 /* ii->i_file_acl = 0; */
376 /* ii->i_dir_acl = 0; */
377 ii
->i_dir_start_lookup
= 0;
378 nilfs_set_inode_flags(inode
);
379 spin_lock(&nilfs
->ns_next_gen_lock
);
380 inode
->i_generation
= nilfs
->ns_next_generation
++;
381 spin_unlock(&nilfs
->ns_next_gen_lock
);
382 if (nilfs_insert_inode_locked(inode
, root
, ino
) < 0) {
384 goto failed_after_creation
;
387 err
= nilfs_init_acl(inode
, dir
);
390 * Never occur. When supporting nilfs_init_acl(),
391 * proper cancellation of above jobs should be considered.
393 goto failed_after_creation
;
397 failed_after_creation
:
399 unlock_new_inode(inode
);
401 * raw_inode will be deleted through
402 * nilfs_evict_inode().
406 failed_ifile_create_inode
:
407 make_bad_inode(inode
);
413 void nilfs_set_inode_flags(struct inode
*inode
)
415 unsigned int flags
= NILFS_I(inode
)->i_flags
;
416 unsigned int new_fl
= 0;
418 if (flags
& FS_SYNC_FL
)
420 if (flags
& FS_APPEND_FL
)
422 if (flags
& FS_IMMUTABLE_FL
)
423 new_fl
|= S_IMMUTABLE
;
424 if (flags
& FS_NOATIME_FL
)
426 if (flags
& FS_DIRSYNC_FL
)
428 inode_set_flags(inode
, new_fl
, S_SYNC
| S_APPEND
| S_IMMUTABLE
|
429 S_NOATIME
| S_DIRSYNC
);
432 int nilfs_read_inode_common(struct inode
*inode
,
433 struct nilfs_inode
*raw_inode
)
435 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
438 inode
->i_mode
= le16_to_cpu(raw_inode
->i_mode
);
439 i_uid_write(inode
, le32_to_cpu(raw_inode
->i_uid
));
440 i_gid_write(inode
, le32_to_cpu(raw_inode
->i_gid
));
441 set_nlink(inode
, le16_to_cpu(raw_inode
->i_links_count
));
442 inode
->i_size
= le64_to_cpu(raw_inode
->i_size
);
443 inode
->i_atime
.tv_sec
= le64_to_cpu(raw_inode
->i_mtime
);
444 inode
->i_ctime
.tv_sec
= le64_to_cpu(raw_inode
->i_ctime
);
445 inode
->i_mtime
.tv_sec
= le64_to_cpu(raw_inode
->i_mtime
);
446 inode
->i_atime
.tv_nsec
= le32_to_cpu(raw_inode
->i_mtime_nsec
);
447 inode
->i_ctime
.tv_nsec
= le32_to_cpu(raw_inode
->i_ctime_nsec
);
448 inode
->i_mtime
.tv_nsec
= le32_to_cpu(raw_inode
->i_mtime_nsec
);
449 if (inode
->i_nlink
== 0)
450 return -ESTALE
; /* this inode is deleted */
452 inode
->i_blocks
= le64_to_cpu(raw_inode
->i_blocks
);
453 ii
->i_flags
= le32_to_cpu(raw_inode
->i_flags
);
455 ii
->i_file_acl
= le32_to_cpu(raw_inode
->i_file_acl
);
456 ii
->i_dir_acl
= S_ISREG(inode
->i_mode
) ?
457 0 : le32_to_cpu(raw_inode
->i_dir_acl
);
459 ii
->i_dir_start_lookup
= 0;
460 inode
->i_generation
= le32_to_cpu(raw_inode
->i_generation
);
462 if (S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
463 S_ISLNK(inode
->i_mode
)) {
464 err
= nilfs_bmap_read(ii
->i_bmap
, raw_inode
);
467 set_bit(NILFS_I_BMAP
, &ii
->i_state
);
468 /* No lock is needed; iget() ensures it. */
473 static int __nilfs_read_inode(struct super_block
*sb
,
474 struct nilfs_root
*root
, unsigned long ino
,
477 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
478 struct buffer_head
*bh
;
479 struct nilfs_inode
*raw_inode
;
482 down_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
483 err
= nilfs_ifile_get_inode_block(root
->ifile
, ino
, &bh
);
487 raw_inode
= nilfs_ifile_map_inode(root
->ifile
, ino
, bh
);
489 err
= nilfs_read_inode_common(inode
, raw_inode
);
493 if (S_ISREG(inode
->i_mode
)) {
494 inode
->i_op
= &nilfs_file_inode_operations
;
495 inode
->i_fop
= &nilfs_file_operations
;
496 inode
->i_mapping
->a_ops
= &nilfs_aops
;
497 } else if (S_ISDIR(inode
->i_mode
)) {
498 inode
->i_op
= &nilfs_dir_inode_operations
;
499 inode
->i_fop
= &nilfs_dir_operations
;
500 inode
->i_mapping
->a_ops
= &nilfs_aops
;
501 } else if (S_ISLNK(inode
->i_mode
)) {
502 inode
->i_op
= &nilfs_symlink_inode_operations
;
503 inode_nohighmem(inode
);
504 inode
->i_mapping
->a_ops
= &nilfs_aops
;
506 inode
->i_op
= &nilfs_special_inode_operations
;
508 inode
, inode
->i_mode
,
509 huge_decode_dev(le64_to_cpu(raw_inode
->i_device_code
)));
511 nilfs_ifile_unmap_inode(root
->ifile
, ino
, bh
);
513 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
514 nilfs_set_inode_flags(inode
);
515 mapping_set_gfp_mask(inode
->i_mapping
,
516 mapping_gfp_constraint(inode
->i_mapping
, ~__GFP_FS
));
520 nilfs_ifile_unmap_inode(root
->ifile
, ino
, bh
);
524 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
528 static int nilfs_iget_test(struct inode
*inode
, void *opaque
)
530 struct nilfs_iget_args
*args
= opaque
;
531 struct nilfs_inode_info
*ii
;
533 if (args
->ino
!= inode
->i_ino
|| args
->root
!= NILFS_I(inode
)->i_root
)
537 if (!test_bit(NILFS_I_GCINODE
, &ii
->i_state
))
538 return !args
->for_gc
;
540 return args
->for_gc
&& args
->cno
== ii
->i_cno
;
543 static int nilfs_iget_set(struct inode
*inode
, void *opaque
)
545 struct nilfs_iget_args
*args
= opaque
;
547 inode
->i_ino
= args
->ino
;
549 NILFS_I(inode
)->i_state
= BIT(NILFS_I_GCINODE
);
550 NILFS_I(inode
)->i_cno
= args
->cno
;
551 NILFS_I(inode
)->i_root
= NULL
;
553 if (args
->root
&& args
->ino
== NILFS_ROOT_INO
)
554 nilfs_get_root(args
->root
);
555 NILFS_I(inode
)->i_root
= args
->root
;
560 struct inode
*nilfs_ilookup(struct super_block
*sb
, struct nilfs_root
*root
,
563 struct nilfs_iget_args args
= {
564 .ino
= ino
, .root
= root
, .cno
= 0, .for_gc
= 0
567 return ilookup5(sb
, ino
, nilfs_iget_test
, &args
);
570 struct inode
*nilfs_iget_locked(struct super_block
*sb
, struct nilfs_root
*root
,
573 struct nilfs_iget_args args
= {
574 .ino
= ino
, .root
= root
, .cno
= 0, .for_gc
= 0
577 return iget5_locked(sb
, ino
, nilfs_iget_test
, nilfs_iget_set
, &args
);
580 struct inode
*nilfs_iget(struct super_block
*sb
, struct nilfs_root
*root
,
586 inode
= nilfs_iget_locked(sb
, root
, ino
);
587 if (unlikely(!inode
))
588 return ERR_PTR(-ENOMEM
);
589 if (!(inode
->i_state
& I_NEW
))
592 err
= __nilfs_read_inode(sb
, root
, ino
, inode
);
597 unlock_new_inode(inode
);
601 struct inode
*nilfs_iget_for_gc(struct super_block
*sb
, unsigned long ino
,
604 struct nilfs_iget_args args
= {
605 .ino
= ino
, .root
= NULL
, .cno
= cno
, .for_gc
= 1
610 inode
= iget5_locked(sb
, ino
, nilfs_iget_test
, nilfs_iget_set
, &args
);
611 if (unlikely(!inode
))
612 return ERR_PTR(-ENOMEM
);
613 if (!(inode
->i_state
& I_NEW
))
616 err
= nilfs_init_gcinode(inode
);
621 unlock_new_inode(inode
);
625 void nilfs_write_inode_common(struct inode
*inode
,
626 struct nilfs_inode
*raw_inode
, int has_bmap
)
628 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
630 raw_inode
->i_mode
= cpu_to_le16(inode
->i_mode
);
631 raw_inode
->i_uid
= cpu_to_le32(i_uid_read(inode
));
632 raw_inode
->i_gid
= cpu_to_le32(i_gid_read(inode
));
633 raw_inode
->i_links_count
= cpu_to_le16(inode
->i_nlink
);
634 raw_inode
->i_size
= cpu_to_le64(inode
->i_size
);
635 raw_inode
->i_ctime
= cpu_to_le64(inode
->i_ctime
.tv_sec
);
636 raw_inode
->i_mtime
= cpu_to_le64(inode
->i_mtime
.tv_sec
);
637 raw_inode
->i_ctime_nsec
= cpu_to_le32(inode
->i_ctime
.tv_nsec
);
638 raw_inode
->i_mtime_nsec
= cpu_to_le32(inode
->i_mtime
.tv_nsec
);
639 raw_inode
->i_blocks
= cpu_to_le64(inode
->i_blocks
);
641 raw_inode
->i_flags
= cpu_to_le32(ii
->i_flags
);
642 raw_inode
->i_generation
= cpu_to_le32(inode
->i_generation
);
644 if (NILFS_ROOT_METADATA_FILE(inode
->i_ino
)) {
645 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
647 /* zero-fill unused portion in the case of super root block */
648 raw_inode
->i_xattr
= 0;
649 raw_inode
->i_pad
= 0;
650 memset((void *)raw_inode
+ sizeof(*raw_inode
), 0,
651 nilfs
->ns_inode_size
- sizeof(*raw_inode
));
655 nilfs_bmap_write(ii
->i_bmap
, raw_inode
);
656 else if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
))
657 raw_inode
->i_device_code
=
658 cpu_to_le64(huge_encode_dev(inode
->i_rdev
));
660 * When extending inode, nilfs->ns_inode_size should be checked
661 * for substitutions of appended fields.
665 void nilfs_update_inode(struct inode
*inode
, struct buffer_head
*ibh
, int flags
)
667 ino_t ino
= inode
->i_ino
;
668 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
669 struct inode
*ifile
= ii
->i_root
->ifile
;
670 struct nilfs_inode
*raw_inode
;
672 raw_inode
= nilfs_ifile_map_inode(ifile
, ino
, ibh
);
674 if (test_and_clear_bit(NILFS_I_NEW
, &ii
->i_state
))
675 memset(raw_inode
, 0, NILFS_MDT(ifile
)->mi_entry_size
);
676 if (flags
& I_DIRTY_DATASYNC
)
677 set_bit(NILFS_I_INODE_SYNC
, &ii
->i_state
);
679 nilfs_write_inode_common(inode
, raw_inode
, 0);
681 * XXX: call with has_bmap = 0 is a workaround to avoid
682 * deadlock of bmap. This delays update of i_bmap to just
686 nilfs_ifile_unmap_inode(ifile
, ino
, ibh
);
689 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
691 static void nilfs_truncate_bmap(struct nilfs_inode_info
*ii
,
697 if (!test_bit(NILFS_I_BMAP
, &ii
->i_state
))
700 ret
= nilfs_bmap_last_key(ii
->i_bmap
, &b
);
709 b
-= min_t(__u64
, NILFS_MAX_TRUNCATE_BLOCKS
, b
- from
);
710 ret
= nilfs_bmap_truncate(ii
->i_bmap
, b
);
711 nilfs_relax_pressure_in_lock(ii
->vfs_inode
.i_sb
);
712 if (!ret
|| (ret
== -ENOMEM
&&
713 nilfs_bmap_truncate(ii
->i_bmap
, b
) == 0))
717 nilfs_msg(ii
->vfs_inode
.i_sb
, KERN_WARNING
,
718 "error %d truncating bmap (ino=%lu)", ret
,
719 ii
->vfs_inode
.i_ino
);
722 void nilfs_truncate(struct inode
*inode
)
724 unsigned long blkoff
;
725 unsigned int blocksize
;
726 struct nilfs_transaction_info ti
;
727 struct super_block
*sb
= inode
->i_sb
;
728 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
730 if (!test_bit(NILFS_I_BMAP
, &ii
->i_state
))
732 if (IS_APPEND(inode
) || IS_IMMUTABLE(inode
))
735 blocksize
= sb
->s_blocksize
;
736 blkoff
= (inode
->i_size
+ blocksize
- 1) >> sb
->s_blocksize_bits
;
737 nilfs_transaction_begin(sb
, &ti
, 0); /* never fails */
739 block_truncate_page(inode
->i_mapping
, inode
->i_size
, nilfs_get_block
);
741 nilfs_truncate_bmap(ii
, blkoff
);
743 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
745 nilfs_set_transaction_flag(NILFS_TI_SYNC
);
747 nilfs_mark_inode_dirty(inode
);
748 nilfs_set_file_dirty(inode
, 0);
749 nilfs_transaction_commit(sb
);
751 * May construct a logical segment and may fail in sync mode.
752 * But truncate has no return value.
756 static void nilfs_clear_inode(struct inode
*inode
)
758 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
761 * Free resources allocated in nilfs_read_inode(), here.
763 BUG_ON(!list_empty(&ii
->i_dirty
));
767 if (nilfs_is_metadata_file_inode(inode
))
768 nilfs_mdt_clear(inode
);
770 if (test_bit(NILFS_I_BMAP
, &ii
->i_state
))
771 nilfs_bmap_clear(ii
->i_bmap
);
773 nilfs_btnode_cache_clear(&ii
->i_btnode_cache
);
775 if (ii
->i_root
&& inode
->i_ino
== NILFS_ROOT_INO
)
776 nilfs_put_root(ii
->i_root
);
779 void nilfs_evict_inode(struct inode
*inode
)
781 struct nilfs_transaction_info ti
;
782 struct super_block
*sb
= inode
->i_sb
;
783 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
786 if (inode
->i_nlink
|| !ii
->i_root
|| unlikely(is_bad_inode(inode
))) {
787 truncate_inode_pages_final(&inode
->i_data
);
789 nilfs_clear_inode(inode
);
792 nilfs_transaction_begin(sb
, &ti
, 0); /* never fails */
794 truncate_inode_pages_final(&inode
->i_data
);
796 /* TODO: some of the following operations may fail. */
797 nilfs_truncate_bmap(ii
, 0);
798 nilfs_mark_inode_dirty(inode
);
801 ret
= nilfs_ifile_delete_inode(ii
->i_root
->ifile
, inode
->i_ino
);
803 atomic64_dec(&ii
->i_root
->inodes_count
);
805 nilfs_clear_inode(inode
);
808 nilfs_set_transaction_flag(NILFS_TI_SYNC
);
809 nilfs_transaction_commit(sb
);
811 * May construct a logical segment and may fail in sync mode.
812 * But delete_inode has no return value.
816 int nilfs_setattr(struct dentry
*dentry
, struct iattr
*iattr
)
818 struct nilfs_transaction_info ti
;
819 struct inode
*inode
= d_inode(dentry
);
820 struct super_block
*sb
= inode
->i_sb
;
823 err
= setattr_prepare(dentry
, iattr
);
827 err
= nilfs_transaction_begin(sb
, &ti
, 0);
831 if ((iattr
->ia_valid
& ATTR_SIZE
) &&
832 iattr
->ia_size
!= i_size_read(inode
)) {
833 inode_dio_wait(inode
);
834 truncate_setsize(inode
, iattr
->ia_size
);
835 nilfs_truncate(inode
);
838 setattr_copy(inode
, iattr
);
839 mark_inode_dirty(inode
);
841 if (iattr
->ia_valid
& ATTR_MODE
) {
842 err
= nilfs_acl_chmod(inode
);
847 return nilfs_transaction_commit(sb
);
850 nilfs_transaction_abort(sb
);
854 int nilfs_permission(struct inode
*inode
, int mask
)
856 struct nilfs_root
*root
= NILFS_I(inode
)->i_root
;
858 if ((mask
& MAY_WRITE
) && root
&&
859 root
->cno
!= NILFS_CPTREE_CURRENT_CNO
)
860 return -EROFS
; /* snapshot is not writable */
862 return generic_permission(inode
, mask
);
865 int nilfs_load_inode_block(struct inode
*inode
, struct buffer_head
**pbh
)
867 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
868 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
871 spin_lock(&nilfs
->ns_inode_lock
);
872 if (ii
->i_bh
== NULL
) {
873 spin_unlock(&nilfs
->ns_inode_lock
);
874 err
= nilfs_ifile_get_inode_block(ii
->i_root
->ifile
,
878 spin_lock(&nilfs
->ns_inode_lock
);
879 if (ii
->i_bh
== NULL
)
889 spin_unlock(&nilfs
->ns_inode_lock
);
893 int nilfs_inode_dirty(struct inode
*inode
)
895 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
896 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
899 if (!list_empty(&ii
->i_dirty
)) {
900 spin_lock(&nilfs
->ns_inode_lock
);
901 ret
= test_bit(NILFS_I_DIRTY
, &ii
->i_state
) ||
902 test_bit(NILFS_I_BUSY
, &ii
->i_state
);
903 spin_unlock(&nilfs
->ns_inode_lock
);
908 int nilfs_set_file_dirty(struct inode
*inode
, unsigned int nr_dirty
)
910 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
911 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
913 atomic_add(nr_dirty
, &nilfs
->ns_ndirtyblks
);
915 if (test_and_set_bit(NILFS_I_DIRTY
, &ii
->i_state
))
918 spin_lock(&nilfs
->ns_inode_lock
);
919 if (!test_bit(NILFS_I_QUEUED
, &ii
->i_state
) &&
920 !test_bit(NILFS_I_BUSY
, &ii
->i_state
)) {
922 * Because this routine may race with nilfs_dispose_list(),
923 * we have to check NILFS_I_QUEUED here, too.
925 if (list_empty(&ii
->i_dirty
) && igrab(inode
) == NULL
) {
927 * This will happen when somebody is freeing
930 nilfs_msg(inode
->i_sb
, KERN_WARNING
,
931 "cannot set file dirty (ino=%lu): the file is being freed",
933 spin_unlock(&nilfs
->ns_inode_lock
);
935 * NILFS_I_DIRTY may remain for
939 list_move_tail(&ii
->i_dirty
, &nilfs
->ns_dirty_files
);
940 set_bit(NILFS_I_QUEUED
, &ii
->i_state
);
942 spin_unlock(&nilfs
->ns_inode_lock
);
946 int __nilfs_mark_inode_dirty(struct inode
*inode
, int flags
)
948 struct buffer_head
*ibh
;
951 err
= nilfs_load_inode_block(inode
, &ibh
);
953 nilfs_msg(inode
->i_sb
, KERN_WARNING
,
954 "cannot mark inode dirty (ino=%lu): error %d loading inode block",
958 nilfs_update_inode(inode
, ibh
, flags
);
959 mark_buffer_dirty(ibh
);
960 nilfs_mdt_mark_dirty(NILFS_I(inode
)->i_root
->ifile
);
966 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
967 * @inode: inode of the file to be registered.
969 * nilfs_dirty_inode() loads a inode block containing the specified
970 * @inode and copies data from a nilfs_inode to a corresponding inode
971 * entry in the inode block. This operation is excluded from the segment
972 * construction. This function can be called both as a single operation
973 * and as a part of indivisible file operations.
975 void nilfs_dirty_inode(struct inode
*inode
, int flags
)
977 struct nilfs_transaction_info ti
;
978 struct nilfs_mdt_info
*mdi
= NILFS_MDT(inode
);
980 if (is_bad_inode(inode
)) {
981 nilfs_msg(inode
->i_sb
, KERN_WARNING
,
982 "tried to mark bad_inode dirty. ignored.");
987 nilfs_mdt_mark_dirty(inode
);
990 nilfs_transaction_begin(inode
->i_sb
, &ti
, 0);
991 __nilfs_mark_inode_dirty(inode
, flags
);
992 nilfs_transaction_commit(inode
->i_sb
); /* never fails */
995 int nilfs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
996 __u64 start
, __u64 len
)
998 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
999 __u64 logical
= 0, phys
= 0, size
= 0;
1002 sector_t blkoff
, end_blkoff
;
1003 sector_t delalloc_blkoff
;
1004 unsigned long delalloc_blklen
;
1005 unsigned int blkbits
= inode
->i_blkbits
;
1008 ret
= fiemap_check_flags(fieinfo
, FIEMAP_FLAG_SYNC
);
1014 isize
= i_size_read(inode
);
1016 blkoff
= start
>> blkbits
;
1017 end_blkoff
= (start
+ len
- 1) >> blkbits
;
1019 delalloc_blklen
= nilfs_find_uncommitted_extent(inode
, blkoff
,
1024 unsigned int maxblocks
;
1026 if (delalloc_blklen
&& blkoff
== delalloc_blkoff
) {
1028 /* End of the current extent */
1029 ret
= fiemap_fill_next_extent(
1030 fieinfo
, logical
, phys
, size
, flags
);
1034 if (blkoff
> end_blkoff
)
1037 flags
= FIEMAP_EXTENT_MERGED
| FIEMAP_EXTENT_DELALLOC
;
1038 logical
= blkoff
<< blkbits
;
1040 size
= delalloc_blklen
<< blkbits
;
1042 blkoff
= delalloc_blkoff
+ delalloc_blklen
;
1043 delalloc_blklen
= nilfs_find_uncommitted_extent(
1044 inode
, blkoff
, &delalloc_blkoff
);
1049 * Limit the number of blocks that we look up so as
1050 * not to get into the next delayed allocation extent.
1052 maxblocks
= INT_MAX
;
1053 if (delalloc_blklen
)
1054 maxblocks
= min_t(sector_t
, delalloc_blkoff
- blkoff
,
1058 down_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
1059 n
= nilfs_bmap_lookup_contig(
1060 NILFS_I(inode
)->i_bmap
, blkoff
, &blkphy
, maxblocks
);
1061 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
1066 if (unlikely(n
!= -ENOENT
))
1071 past_eof
= ((blkoff
<< blkbits
) >= isize
);
1074 /* End of the current extent */
1077 flags
|= FIEMAP_EXTENT_LAST
;
1079 ret
= fiemap_fill_next_extent(
1080 fieinfo
, logical
, phys
, size
, flags
);
1085 if (blkoff
> end_blkoff
|| past_eof
)
1089 if (phys
&& blkphy
<< blkbits
== phys
+ size
) {
1090 /* The current extent goes on */
1091 size
+= n
<< blkbits
;
1093 /* Terminate the current extent */
1094 ret
= fiemap_fill_next_extent(
1095 fieinfo
, logical
, phys
, size
,
1097 if (ret
|| blkoff
> end_blkoff
)
1100 /* Start another extent */
1101 flags
= FIEMAP_EXTENT_MERGED
;
1102 logical
= blkoff
<< blkbits
;
1103 phys
= blkphy
<< blkbits
;
1104 size
= n
<< blkbits
;
1107 /* Start a new extent */
1108 flags
= FIEMAP_EXTENT_MERGED
;
1109 logical
= blkoff
<< blkbits
;
1110 phys
= blkphy
<< blkbits
;
1111 size
= n
<< blkbits
;
1118 /* If ret is 1 then we just hit the end of the extent array */
1122 inode_unlock(inode
);