1 // SPDX-License-Identifier: GPL-2.0+
3 * NILFS inode operations.
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
7 * Written by Ryusuke Konishi.
11 #include <linux/buffer_head.h>
12 #include <linux/gfp.h>
13 #include <linux/mpage.h>
14 #include <linux/pagemap.h>
15 #include <linux/writeback.h>
16 #include <linux/uio.h>
17 #include <linux/fiemap.h>
18 #include <linux/random.h>
28 * struct nilfs_iget_args - arguments used during comparison between inodes
30 * @cno: checkpoint number
31 * @root: pointer on NILFS root object (mounted checkpoint)
34 struct nilfs_iget_args
{
37 struct nilfs_root
*root
;
41 static int nilfs_iget_test(struct inode
*inode
, void *opaque
);
43 void nilfs_inode_add_blocks(struct inode
*inode
, int n
)
45 struct nilfs_root
*root
= NILFS_I(inode
)->i_root
;
47 inode_add_bytes(inode
, i_blocksize(inode
) * n
);
49 atomic64_add(n
, &root
->blocks_count
);
52 void nilfs_inode_sub_blocks(struct inode
*inode
, int n
)
54 struct nilfs_root
*root
= NILFS_I(inode
)->i_root
;
56 inode_sub_bytes(inode
, i_blocksize(inode
) * n
);
58 atomic64_sub(n
, &root
->blocks_count
);
62 * nilfs_get_block() - get a file block on the filesystem (callback function)
63 * @inode: inode struct of the target file
64 * @blkoff: file block number
65 * @bh_result: buffer head to be mapped on
66 * @create: indicate whether allocating the block or not when it has not
69 * This function does not issue actual read request of the specified data
70 * block. It is done by VFS.
72 int nilfs_get_block(struct inode
*inode
, sector_t blkoff
,
73 struct buffer_head
*bh_result
, int create
)
75 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
76 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
79 unsigned int maxblocks
= bh_result
->b_size
>> inode
->i_blkbits
;
81 down_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
82 ret
= nilfs_bmap_lookup_contig(ii
->i_bmap
, blkoff
, &blknum
, maxblocks
);
83 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
84 if (ret
>= 0) { /* found */
85 map_bh(bh_result
, inode
->i_sb
, blknum
);
87 bh_result
->b_size
= (ret
<< inode
->i_blkbits
);
90 /* data block was not found */
91 if (ret
== -ENOENT
&& create
) {
92 struct nilfs_transaction_info ti
;
94 bh_result
->b_blocknr
= 0;
95 err
= nilfs_transaction_begin(inode
->i_sb
, &ti
, 1);
98 err
= nilfs_bmap_insert(ii
->i_bmap
, blkoff
,
99 (unsigned long)bh_result
);
100 if (unlikely(err
!= 0)) {
101 if (err
== -EEXIST
) {
103 * The get_block() function could be called
104 * from multiple callers for an inode.
105 * However, the page having this block must
106 * be locked in this case.
108 nilfs_warn(inode
->i_sb
,
109 "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
110 __func__
, inode
->i_ino
,
111 (unsigned long long)blkoff
);
114 nilfs_transaction_abort(inode
->i_sb
);
117 nilfs_mark_inode_dirty_sync(inode
);
118 nilfs_transaction_commit(inode
->i_sb
); /* never fails */
119 /* Error handling should be detailed */
120 set_buffer_new(bh_result
);
121 set_buffer_delay(bh_result
);
122 map_bh(bh_result
, inode
->i_sb
, 0);
123 /* Disk block number must be changed to proper value */
125 } else if (ret
== -ENOENT
) {
127 * not found is not error (e.g. hole); must return without
128 * the mapped state flag.
140 * nilfs_read_folio() - implement read_folio() method of nilfs_aops {}
141 * address_space_operations.
142 * @file: file struct of the file to be read
143 * @folio: the folio to be read
145 static int nilfs_read_folio(struct file
*file
, struct folio
*folio
)
147 return mpage_read_folio(folio
, nilfs_get_block
);
150 static void nilfs_readahead(struct readahead_control
*rac
)
152 mpage_readahead(rac
, nilfs_get_block
);
155 static int nilfs_writepages(struct address_space
*mapping
,
156 struct writeback_control
*wbc
)
158 struct inode
*inode
= mapping
->host
;
161 if (sb_rdonly(inode
->i_sb
)) {
162 nilfs_clear_dirty_pages(mapping
);
166 if (wbc
->sync_mode
== WB_SYNC_ALL
)
167 err
= nilfs_construct_dsync_segment(inode
->i_sb
, inode
,
173 static bool nilfs_dirty_folio(struct address_space
*mapping
,
176 struct inode
*inode
= mapping
->host
;
177 struct buffer_head
*head
;
178 unsigned int nr_dirty
= 0;
179 bool ret
= filemap_dirty_folio(mapping
, folio
);
182 * The page may not be locked, eg if called from try_to_unmap_one()
184 spin_lock(&mapping
->i_private_lock
);
185 head
= folio_buffers(folio
);
187 struct buffer_head
*bh
= head
;
190 /* Do not mark hole blocks dirty */
191 if (buffer_dirty(bh
) || !buffer_mapped(bh
))
194 set_buffer_dirty(bh
);
196 } while (bh
= bh
->b_this_page
, bh
!= head
);
198 nr_dirty
= 1 << (folio_shift(folio
) - inode
->i_blkbits
);
200 spin_unlock(&mapping
->i_private_lock
);
203 nilfs_set_file_dirty(inode
, nr_dirty
);
207 void nilfs_write_failed(struct address_space
*mapping
, loff_t to
)
209 struct inode
*inode
= mapping
->host
;
211 if (to
> inode
->i_size
) {
212 truncate_pagecache(inode
, inode
->i_size
);
213 nilfs_truncate(inode
);
217 static int nilfs_write_begin(struct file
*file
, struct address_space
*mapping
,
218 loff_t pos
, unsigned len
,
219 struct folio
**foliop
, void **fsdata
)
222 struct inode
*inode
= mapping
->host
;
223 int err
= nilfs_transaction_begin(inode
->i_sb
, NULL
, 1);
228 err
= block_write_begin(mapping
, pos
, len
, foliop
, nilfs_get_block
);
230 nilfs_write_failed(mapping
, pos
+ len
);
231 nilfs_transaction_abort(inode
->i_sb
);
236 static int nilfs_write_end(struct file
*file
, struct address_space
*mapping
,
237 loff_t pos
, unsigned len
, unsigned copied
,
238 struct folio
*folio
, void *fsdata
)
240 struct inode
*inode
= mapping
->host
;
241 unsigned int start
= pos
& (PAGE_SIZE
- 1);
242 unsigned int nr_dirty
;
245 nr_dirty
= nilfs_page_count_clean_buffers(folio
, start
,
247 copied
= generic_write_end(file
, mapping
, pos
, len
, copied
, folio
,
249 nilfs_set_file_dirty(inode
, nr_dirty
);
250 err
= nilfs_transaction_commit(inode
->i_sb
);
251 return err
? : copied
;
255 nilfs_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
257 struct inode
*inode
= file_inode(iocb
->ki_filp
);
259 if (iov_iter_rw(iter
) == WRITE
)
262 /* Needs synchronization with the cleaner */
263 return blockdev_direct_IO(iocb
, inode
, iter
, nilfs_get_block
);
266 const struct address_space_operations nilfs_aops
= {
267 .read_folio
= nilfs_read_folio
,
268 .writepages
= nilfs_writepages
,
269 .dirty_folio
= nilfs_dirty_folio
,
270 .readahead
= nilfs_readahead
,
271 .write_begin
= nilfs_write_begin
,
272 .write_end
= nilfs_write_end
,
273 .invalidate_folio
= block_invalidate_folio
,
274 .direct_IO
= nilfs_direct_IO
,
275 .migrate_folio
= buffer_migrate_folio_norefs
,
276 .is_partially_uptodate
= block_is_partially_uptodate
,
279 const struct address_space_operations nilfs_buffer_cache_aops
= {
280 .invalidate_folio
= block_invalidate_folio
,
283 static int nilfs_insert_inode_locked(struct inode
*inode
,
284 struct nilfs_root
*root
,
287 struct nilfs_iget_args args
= {
288 .ino
= ino
, .root
= root
, .cno
= 0, .type
= NILFS_I_TYPE_NORMAL
291 return insert_inode_locked4(inode
, ino
, nilfs_iget_test
, &args
);
294 struct inode
*nilfs_new_inode(struct inode
*dir
, umode_t mode
)
296 struct super_block
*sb
= dir
->i_sb
;
298 struct nilfs_inode_info
*ii
;
299 struct nilfs_root
*root
;
300 struct buffer_head
*bh
;
304 inode
= new_inode(sb
);
305 if (unlikely(!inode
))
308 mapping_set_gfp_mask(inode
->i_mapping
,
309 mapping_gfp_constraint(inode
->i_mapping
, ~__GFP_FS
));
311 root
= NILFS_I(dir
)->i_root
;
313 ii
->i_state
= BIT(NILFS_I_NEW
);
314 ii
->i_type
= NILFS_I_TYPE_NORMAL
;
317 err
= nilfs_ifile_create_inode(root
->ifile
, &ino
, &bh
);
319 goto failed_ifile_create_inode
;
320 /* reference count of i_bh inherits from nilfs_mdt_read_block() */
323 atomic64_inc(&root
->inodes_count
);
324 inode_init_owner(&nop_mnt_idmap
, inode
, dir
, mode
);
326 simple_inode_init_ts(inode
);
328 if (S_ISREG(mode
) || S_ISDIR(mode
) || S_ISLNK(mode
)) {
329 err
= nilfs_bmap_read(ii
->i_bmap
, NULL
);
331 goto failed_after_creation
;
333 set_bit(NILFS_I_BMAP
, &ii
->i_state
);
334 /* No lock is needed; iget() ensures it. */
337 ii
->i_flags
= nilfs_mask_flags(
338 mode
, NILFS_I(dir
)->i_flags
& NILFS_FL_INHERITED
);
340 /* ii->i_file_acl = 0; */
341 /* ii->i_dir_acl = 0; */
342 ii
->i_dir_start_lookup
= 0;
343 nilfs_set_inode_flags(inode
);
344 inode
->i_generation
= get_random_u32();
345 if (nilfs_insert_inode_locked(inode
, root
, ino
) < 0) {
347 goto failed_after_creation
;
350 err
= nilfs_init_acl(inode
, dir
);
353 * Never occur. When supporting nilfs_init_acl(),
354 * proper cancellation of above jobs should be considered.
356 goto failed_after_creation
;
360 failed_after_creation
:
362 if (inode
->i_state
& I_NEW
)
363 unlock_new_inode(inode
);
365 * raw_inode will be deleted through
366 * nilfs_evict_inode().
370 failed_ifile_create_inode
:
371 make_bad_inode(inode
);
377 void nilfs_set_inode_flags(struct inode
*inode
)
379 unsigned int flags
= NILFS_I(inode
)->i_flags
;
380 unsigned int new_fl
= 0;
382 if (flags
& FS_SYNC_FL
)
384 if (flags
& FS_APPEND_FL
)
386 if (flags
& FS_IMMUTABLE_FL
)
387 new_fl
|= S_IMMUTABLE
;
388 if (flags
& FS_NOATIME_FL
)
390 if (flags
& FS_DIRSYNC_FL
)
392 inode_set_flags(inode
, new_fl
, S_SYNC
| S_APPEND
| S_IMMUTABLE
|
393 S_NOATIME
| S_DIRSYNC
);
396 int nilfs_read_inode_common(struct inode
*inode
,
397 struct nilfs_inode
*raw_inode
)
399 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
402 inode
->i_mode
= le16_to_cpu(raw_inode
->i_mode
);
403 i_uid_write(inode
, le32_to_cpu(raw_inode
->i_uid
));
404 i_gid_write(inode
, le32_to_cpu(raw_inode
->i_gid
));
405 set_nlink(inode
, le16_to_cpu(raw_inode
->i_links_count
));
406 inode
->i_size
= le64_to_cpu(raw_inode
->i_size
);
407 inode_set_atime(inode
, le64_to_cpu(raw_inode
->i_mtime
),
408 le32_to_cpu(raw_inode
->i_mtime_nsec
));
409 inode_set_ctime(inode
, le64_to_cpu(raw_inode
->i_ctime
),
410 le32_to_cpu(raw_inode
->i_ctime_nsec
));
411 inode_set_mtime(inode
, le64_to_cpu(raw_inode
->i_mtime
),
412 le32_to_cpu(raw_inode
->i_mtime_nsec
));
413 if (nilfs_is_metadata_file_inode(inode
) && !S_ISREG(inode
->i_mode
))
414 return -EIO
; /* this inode is for metadata and corrupted */
415 if (inode
->i_nlink
== 0)
416 return -ESTALE
; /* this inode is deleted */
418 inode
->i_blocks
= le64_to_cpu(raw_inode
->i_blocks
);
419 ii
->i_flags
= le32_to_cpu(raw_inode
->i_flags
);
421 ii
->i_file_acl
= le32_to_cpu(raw_inode
->i_file_acl
);
422 ii
->i_dir_acl
= S_ISREG(inode
->i_mode
) ?
423 0 : le32_to_cpu(raw_inode
->i_dir_acl
);
425 ii
->i_dir_start_lookup
= 0;
426 inode
->i_generation
= le32_to_cpu(raw_inode
->i_generation
);
428 if (S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
429 S_ISLNK(inode
->i_mode
)) {
430 err
= nilfs_bmap_read(ii
->i_bmap
, raw_inode
);
433 set_bit(NILFS_I_BMAP
, &ii
->i_state
);
434 /* No lock is needed; iget() ensures it. */
439 static int __nilfs_read_inode(struct super_block
*sb
,
440 struct nilfs_root
*root
, unsigned long ino
,
443 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
444 struct buffer_head
*bh
;
445 struct nilfs_inode
*raw_inode
;
448 down_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
449 err
= nilfs_ifile_get_inode_block(root
->ifile
, ino
, &bh
);
453 raw_inode
= nilfs_ifile_map_inode(root
->ifile
, ino
, bh
);
455 err
= nilfs_read_inode_common(inode
, raw_inode
);
459 if (S_ISREG(inode
->i_mode
)) {
460 inode
->i_op
= &nilfs_file_inode_operations
;
461 inode
->i_fop
= &nilfs_file_operations
;
462 inode
->i_mapping
->a_ops
= &nilfs_aops
;
463 } else if (S_ISDIR(inode
->i_mode
)) {
464 inode
->i_op
= &nilfs_dir_inode_operations
;
465 inode
->i_fop
= &nilfs_dir_operations
;
466 inode
->i_mapping
->a_ops
= &nilfs_aops
;
467 } else if (S_ISLNK(inode
->i_mode
)) {
468 inode
->i_op
= &nilfs_symlink_inode_operations
;
469 inode_nohighmem(inode
);
470 inode
->i_mapping
->a_ops
= &nilfs_aops
;
472 inode
->i_op
= &nilfs_special_inode_operations
;
474 inode
, inode
->i_mode
,
475 huge_decode_dev(le64_to_cpu(raw_inode
->i_device_code
)));
477 nilfs_ifile_unmap_inode(raw_inode
);
479 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
480 nilfs_set_inode_flags(inode
);
481 mapping_set_gfp_mask(inode
->i_mapping
,
482 mapping_gfp_constraint(inode
->i_mapping
, ~__GFP_FS
));
486 nilfs_ifile_unmap_inode(raw_inode
);
490 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
494 static int nilfs_iget_test(struct inode
*inode
, void *opaque
)
496 struct nilfs_iget_args
*args
= opaque
;
497 struct nilfs_inode_info
*ii
;
499 if (args
->ino
!= inode
->i_ino
|| args
->root
!= NILFS_I(inode
)->i_root
)
503 if (ii
->i_type
!= args
->type
)
506 return !(args
->type
& NILFS_I_TYPE_GC
) || args
->cno
== ii
->i_cno
;
509 static int nilfs_iget_set(struct inode
*inode
, void *opaque
)
511 struct nilfs_iget_args
*args
= opaque
;
513 inode
->i_ino
= args
->ino
;
514 NILFS_I(inode
)->i_cno
= args
->cno
;
515 NILFS_I(inode
)->i_root
= args
->root
;
516 NILFS_I(inode
)->i_type
= args
->type
;
517 if (args
->root
&& args
->ino
== NILFS_ROOT_INO
)
518 nilfs_get_root(args
->root
);
522 struct inode
*nilfs_ilookup(struct super_block
*sb
, struct nilfs_root
*root
,
525 struct nilfs_iget_args args
= {
526 .ino
= ino
, .root
= root
, .cno
= 0, .type
= NILFS_I_TYPE_NORMAL
529 return ilookup5(sb
, ino
, nilfs_iget_test
, &args
);
532 struct inode
*nilfs_iget_locked(struct super_block
*sb
, struct nilfs_root
*root
,
535 struct nilfs_iget_args args
= {
536 .ino
= ino
, .root
= root
, .cno
= 0, .type
= NILFS_I_TYPE_NORMAL
539 return iget5_locked(sb
, ino
, nilfs_iget_test
, nilfs_iget_set
, &args
);
542 struct inode
*nilfs_iget(struct super_block
*sb
, struct nilfs_root
*root
,
548 inode
= nilfs_iget_locked(sb
, root
, ino
);
549 if (unlikely(!inode
))
550 return ERR_PTR(-ENOMEM
);
552 if (!(inode
->i_state
& I_NEW
)) {
553 if (!inode
->i_nlink
) {
555 return ERR_PTR(-ESTALE
);
560 err
= __nilfs_read_inode(sb
, root
, ino
, inode
);
565 unlock_new_inode(inode
);
569 struct inode
*nilfs_iget_for_gc(struct super_block
*sb
, unsigned long ino
,
572 struct nilfs_iget_args args
= {
573 .ino
= ino
, .root
= NULL
, .cno
= cno
, .type
= NILFS_I_TYPE_GC
578 inode
= iget5_locked(sb
, ino
, nilfs_iget_test
, nilfs_iget_set
, &args
);
579 if (unlikely(!inode
))
580 return ERR_PTR(-ENOMEM
);
581 if (!(inode
->i_state
& I_NEW
))
584 err
= nilfs_init_gcinode(inode
);
589 unlock_new_inode(inode
);
594 * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode
595 * @inode: inode object
597 * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode,
598 * or does nothing if the inode already has it. This function allocates
599 * an additional inode to maintain page cache of B-tree nodes one-on-one.
601 * Return Value: On success, 0 is returned. On errors, one of the following
602 * negative error code is returned.
604 * %-ENOMEM - Insufficient memory available.
606 int nilfs_attach_btree_node_cache(struct inode
*inode
)
608 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
609 struct inode
*btnc_inode
;
610 struct nilfs_iget_args args
;
612 if (ii
->i_assoc_inode
)
615 args
.ino
= inode
->i_ino
;
616 args
.root
= ii
->i_root
;
617 args
.cno
= ii
->i_cno
;
618 args
.type
= ii
->i_type
| NILFS_I_TYPE_BTNC
;
620 btnc_inode
= iget5_locked(inode
->i_sb
, inode
->i_ino
, nilfs_iget_test
,
621 nilfs_iget_set
, &args
);
622 if (unlikely(!btnc_inode
))
624 if (btnc_inode
->i_state
& I_NEW
) {
625 nilfs_init_btnc_inode(btnc_inode
);
626 unlock_new_inode(btnc_inode
);
628 NILFS_I(btnc_inode
)->i_assoc_inode
= inode
;
629 NILFS_I(btnc_inode
)->i_bmap
= ii
->i_bmap
;
630 ii
->i_assoc_inode
= btnc_inode
;
636 * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode
637 * @inode: inode object
639 * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its
640 * holder inode bound to @inode, or does nothing if @inode doesn't have it.
642 void nilfs_detach_btree_node_cache(struct inode
*inode
)
644 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
645 struct inode
*btnc_inode
= ii
->i_assoc_inode
;
648 NILFS_I(btnc_inode
)->i_assoc_inode
= NULL
;
649 ii
->i_assoc_inode
= NULL
;
655 * nilfs_iget_for_shadow - obtain inode for shadow mapping
656 * @inode: inode object that uses shadow mapping
658 * nilfs_iget_for_shadow() allocates a pair of inodes that holds page
659 * caches for shadow mapping. The page cache for data pages is set up
660 * in one inode and the one for b-tree node pages is set up in the
661 * other inode, which is attached to the former inode.
663 * Return Value: On success, a pointer to the inode for data pages is
664 * returned. On errors, one of the following negative error code is returned
667 * %-ENOMEM - Insufficient memory available.
669 struct inode
*nilfs_iget_for_shadow(struct inode
*inode
)
671 struct nilfs_iget_args args
= {
672 .ino
= inode
->i_ino
, .root
= NULL
, .cno
= 0,
673 .type
= NILFS_I_TYPE_SHADOW
675 struct inode
*s_inode
;
678 s_inode
= iget5_locked(inode
->i_sb
, inode
->i_ino
, nilfs_iget_test
,
679 nilfs_iget_set
, &args
);
680 if (unlikely(!s_inode
))
681 return ERR_PTR(-ENOMEM
);
682 if (!(s_inode
->i_state
& I_NEW
))
685 NILFS_I(s_inode
)->i_flags
= 0;
686 memset(NILFS_I(s_inode
)->i_bmap
, 0, sizeof(struct nilfs_bmap
));
687 mapping_set_gfp_mask(s_inode
->i_mapping
, GFP_NOFS
);
688 s_inode
->i_mapping
->a_ops
= &nilfs_buffer_cache_aops
;
690 err
= nilfs_attach_btree_node_cache(s_inode
);
692 iget_failed(s_inode
);
695 unlock_new_inode(s_inode
);
700 * nilfs_write_inode_common - export common inode information to on-disk inode
701 * @inode: inode object
702 * @raw_inode: on-disk inode
704 * This function writes standard information from the on-memory inode @inode
705 * to @raw_inode on ifile, cpfile or a super root block. Since inode bmap
706 * data is not exported, nilfs_bmap_write() must be called separately during
709 void nilfs_write_inode_common(struct inode
*inode
,
710 struct nilfs_inode
*raw_inode
)
712 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
714 raw_inode
->i_mode
= cpu_to_le16(inode
->i_mode
);
715 raw_inode
->i_uid
= cpu_to_le32(i_uid_read(inode
));
716 raw_inode
->i_gid
= cpu_to_le32(i_gid_read(inode
));
717 raw_inode
->i_links_count
= cpu_to_le16(inode
->i_nlink
);
718 raw_inode
->i_size
= cpu_to_le64(inode
->i_size
);
719 raw_inode
->i_ctime
= cpu_to_le64(inode_get_ctime_sec(inode
));
720 raw_inode
->i_mtime
= cpu_to_le64(inode_get_mtime_sec(inode
));
721 raw_inode
->i_ctime_nsec
= cpu_to_le32(inode_get_ctime_nsec(inode
));
722 raw_inode
->i_mtime_nsec
= cpu_to_le32(inode_get_mtime_nsec(inode
));
723 raw_inode
->i_blocks
= cpu_to_le64(inode
->i_blocks
);
725 raw_inode
->i_flags
= cpu_to_le32(ii
->i_flags
);
726 raw_inode
->i_generation
= cpu_to_le32(inode
->i_generation
);
729 * When extending inode, nilfs->ns_inode_size should be checked
730 * for substitutions of appended fields.
734 void nilfs_update_inode(struct inode
*inode
, struct buffer_head
*ibh
, int flags
)
736 ino_t ino
= inode
->i_ino
;
737 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
738 struct inode
*ifile
= ii
->i_root
->ifile
;
739 struct nilfs_inode
*raw_inode
;
741 raw_inode
= nilfs_ifile_map_inode(ifile
, ino
, ibh
);
743 if (test_and_clear_bit(NILFS_I_NEW
, &ii
->i_state
))
744 memset(raw_inode
, 0, NILFS_MDT(ifile
)->mi_entry_size
);
745 if (flags
& I_DIRTY_DATASYNC
)
746 set_bit(NILFS_I_INODE_SYNC
, &ii
->i_state
);
748 nilfs_write_inode_common(inode
, raw_inode
);
750 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
))
751 raw_inode
->i_device_code
=
752 cpu_to_le64(huge_encode_dev(inode
->i_rdev
));
754 nilfs_ifile_unmap_inode(raw_inode
);
757 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
759 static void nilfs_truncate_bmap(struct nilfs_inode_info
*ii
,
765 if (!test_bit(NILFS_I_BMAP
, &ii
->i_state
))
768 ret
= nilfs_bmap_last_key(ii
->i_bmap
, &b
);
777 b
-= min_t(__u64
, NILFS_MAX_TRUNCATE_BLOCKS
, b
- from
);
778 ret
= nilfs_bmap_truncate(ii
->i_bmap
, b
);
779 nilfs_relax_pressure_in_lock(ii
->vfs_inode
.i_sb
);
780 if (!ret
|| (ret
== -ENOMEM
&&
781 nilfs_bmap_truncate(ii
->i_bmap
, b
) == 0))
785 nilfs_warn(ii
->vfs_inode
.i_sb
, "error %d truncating bmap (ino=%lu)",
786 ret
, ii
->vfs_inode
.i_ino
);
789 void nilfs_truncate(struct inode
*inode
)
791 unsigned long blkoff
;
792 unsigned int blocksize
;
793 struct nilfs_transaction_info ti
;
794 struct super_block
*sb
= inode
->i_sb
;
795 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
797 if (!test_bit(NILFS_I_BMAP
, &ii
->i_state
))
799 if (IS_APPEND(inode
) || IS_IMMUTABLE(inode
))
802 blocksize
= sb
->s_blocksize
;
803 blkoff
= (inode
->i_size
+ blocksize
- 1) >> sb
->s_blocksize_bits
;
804 nilfs_transaction_begin(sb
, &ti
, 0); /* never fails */
806 block_truncate_page(inode
->i_mapping
, inode
->i_size
, nilfs_get_block
);
808 nilfs_truncate_bmap(ii
, blkoff
);
810 inode_set_mtime_to_ts(inode
, inode_set_ctime_current(inode
));
812 nilfs_set_transaction_flag(NILFS_TI_SYNC
);
814 nilfs_mark_inode_dirty(inode
);
815 nilfs_set_file_dirty(inode
, 0);
816 nilfs_transaction_commit(sb
);
818 * May construct a logical segment and may fail in sync mode.
819 * But truncate has no return value.
823 static void nilfs_clear_inode(struct inode
*inode
)
825 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
828 * Free resources allocated in nilfs_read_inode(), here.
830 BUG_ON(!list_empty(&ii
->i_dirty
));
834 if (nilfs_is_metadata_file_inode(inode
))
835 nilfs_mdt_clear(inode
);
837 if (test_bit(NILFS_I_BMAP
, &ii
->i_state
))
838 nilfs_bmap_clear(ii
->i_bmap
);
840 if (!(ii
->i_type
& NILFS_I_TYPE_BTNC
))
841 nilfs_detach_btree_node_cache(inode
);
843 if (ii
->i_root
&& inode
->i_ino
== NILFS_ROOT_INO
)
844 nilfs_put_root(ii
->i_root
);
847 void nilfs_evict_inode(struct inode
*inode
)
849 struct nilfs_transaction_info ti
;
850 struct super_block
*sb
= inode
->i_sb
;
851 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
852 struct the_nilfs
*nilfs
;
855 if (inode
->i_nlink
|| !ii
->i_root
|| unlikely(is_bad_inode(inode
))) {
856 truncate_inode_pages_final(&inode
->i_data
);
858 nilfs_clear_inode(inode
);
861 nilfs_transaction_begin(sb
, &ti
, 0); /* never fails */
863 truncate_inode_pages_final(&inode
->i_data
);
865 nilfs
= sb
->s_fs_info
;
866 if (unlikely(sb_rdonly(sb
) || !nilfs
->ns_writer
)) {
868 * If this inode is about to be disposed after the file system
869 * has been degraded to read-only due to file system corruption
870 * or after the writer has been detached, do not make any
871 * changes that cause writes, just clear it.
872 * Do this check after read-locking ns_segctor_sem by
873 * nilfs_transaction_begin() in order to avoid a race with
874 * the writer detach operation.
877 nilfs_clear_inode(inode
);
878 nilfs_transaction_abort(sb
);
882 /* TODO: some of the following operations may fail. */
883 nilfs_truncate_bmap(ii
, 0);
884 nilfs_mark_inode_dirty(inode
);
887 ret
= nilfs_ifile_delete_inode(ii
->i_root
->ifile
, inode
->i_ino
);
889 atomic64_dec(&ii
->i_root
->inodes_count
);
891 nilfs_clear_inode(inode
);
894 nilfs_set_transaction_flag(NILFS_TI_SYNC
);
895 nilfs_transaction_commit(sb
);
897 * May construct a logical segment and may fail in sync mode.
898 * But delete_inode has no return value.
902 int nilfs_setattr(struct mnt_idmap
*idmap
, struct dentry
*dentry
,
905 struct nilfs_transaction_info ti
;
906 struct inode
*inode
= d_inode(dentry
);
907 struct super_block
*sb
= inode
->i_sb
;
910 err
= setattr_prepare(&nop_mnt_idmap
, dentry
, iattr
);
914 err
= nilfs_transaction_begin(sb
, &ti
, 0);
918 if ((iattr
->ia_valid
& ATTR_SIZE
) &&
919 iattr
->ia_size
!= i_size_read(inode
)) {
920 inode_dio_wait(inode
);
921 truncate_setsize(inode
, iattr
->ia_size
);
922 nilfs_truncate(inode
);
925 setattr_copy(&nop_mnt_idmap
, inode
, iattr
);
926 mark_inode_dirty(inode
);
928 if (iattr
->ia_valid
& ATTR_MODE
) {
929 err
= nilfs_acl_chmod(inode
);
934 return nilfs_transaction_commit(sb
);
937 nilfs_transaction_abort(sb
);
941 int nilfs_permission(struct mnt_idmap
*idmap
, struct inode
*inode
,
944 struct nilfs_root
*root
= NILFS_I(inode
)->i_root
;
946 if ((mask
& MAY_WRITE
) && root
&&
947 root
->cno
!= NILFS_CPTREE_CURRENT_CNO
)
948 return -EROFS
; /* snapshot is not writable */
950 return generic_permission(&nop_mnt_idmap
, inode
, mask
);
953 int nilfs_load_inode_block(struct inode
*inode
, struct buffer_head
**pbh
)
955 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
956 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
959 spin_lock(&nilfs
->ns_inode_lock
);
960 if (ii
->i_bh
== NULL
|| unlikely(!buffer_uptodate(ii
->i_bh
))) {
961 spin_unlock(&nilfs
->ns_inode_lock
);
962 err
= nilfs_ifile_get_inode_block(ii
->i_root
->ifile
,
966 spin_lock(&nilfs
->ns_inode_lock
);
967 if (ii
->i_bh
== NULL
)
969 else if (unlikely(!buffer_uptodate(ii
->i_bh
))) {
980 spin_unlock(&nilfs
->ns_inode_lock
);
984 int nilfs_inode_dirty(struct inode
*inode
)
986 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
987 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
990 if (!list_empty(&ii
->i_dirty
)) {
991 spin_lock(&nilfs
->ns_inode_lock
);
992 ret
= test_bit(NILFS_I_DIRTY
, &ii
->i_state
) ||
993 test_bit(NILFS_I_BUSY
, &ii
->i_state
);
994 spin_unlock(&nilfs
->ns_inode_lock
);
999 int nilfs_set_file_dirty(struct inode
*inode
, unsigned int nr_dirty
)
1001 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
1002 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
1004 atomic_add(nr_dirty
, &nilfs
->ns_ndirtyblks
);
1006 if (test_and_set_bit(NILFS_I_DIRTY
, &ii
->i_state
))
1009 spin_lock(&nilfs
->ns_inode_lock
);
1010 if (!test_bit(NILFS_I_QUEUED
, &ii
->i_state
) &&
1011 !test_bit(NILFS_I_BUSY
, &ii
->i_state
)) {
1013 * Because this routine may race with nilfs_dispose_list(),
1014 * we have to check NILFS_I_QUEUED here, too.
1016 if (list_empty(&ii
->i_dirty
) && igrab(inode
) == NULL
) {
1018 * This will happen when somebody is freeing
1021 nilfs_warn(inode
->i_sb
,
1022 "cannot set file dirty (ino=%lu): the file is being freed",
1024 spin_unlock(&nilfs
->ns_inode_lock
);
1026 * NILFS_I_DIRTY may remain for
1030 list_move_tail(&ii
->i_dirty
, &nilfs
->ns_dirty_files
);
1031 set_bit(NILFS_I_QUEUED
, &ii
->i_state
);
1033 spin_unlock(&nilfs
->ns_inode_lock
);
1037 int __nilfs_mark_inode_dirty(struct inode
*inode
, int flags
)
1039 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
1040 struct buffer_head
*ibh
;
1044 * Do not dirty inodes after the log writer has been detached
1045 * and its nilfs_root struct has been freed.
1047 if (unlikely(nilfs_purging(nilfs
)))
1050 err
= nilfs_load_inode_block(inode
, &ibh
);
1051 if (unlikely(err
)) {
1052 nilfs_warn(inode
->i_sb
,
1053 "cannot mark inode dirty (ino=%lu): error %d loading inode block",
1057 nilfs_update_inode(inode
, ibh
, flags
);
1058 mark_buffer_dirty(ibh
);
1059 nilfs_mdt_mark_dirty(NILFS_I(inode
)->i_root
->ifile
);
1065 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
1066 * @inode: inode of the file to be registered.
1067 * @flags: flags to determine the dirty state of the inode
1069 * nilfs_dirty_inode() loads a inode block containing the specified
1070 * @inode and copies data from a nilfs_inode to a corresponding inode
1071 * entry in the inode block. This operation is excluded from the segment
1072 * construction. This function can be called both as a single operation
1073 * and as a part of indivisible file operations.
1075 void nilfs_dirty_inode(struct inode
*inode
, int flags
)
1077 struct nilfs_transaction_info ti
;
1078 struct nilfs_mdt_info
*mdi
= NILFS_MDT(inode
);
1080 if (is_bad_inode(inode
)) {
1081 nilfs_warn(inode
->i_sb
,
1082 "tried to mark bad_inode dirty. ignored.");
1087 nilfs_mdt_mark_dirty(inode
);
1090 nilfs_transaction_begin(inode
->i_sb
, &ti
, 0);
1091 __nilfs_mark_inode_dirty(inode
, flags
);
1092 nilfs_transaction_commit(inode
->i_sb
); /* never fails */
1095 int nilfs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
1096 __u64 start
, __u64 len
)
1098 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
1099 __u64 logical
= 0, phys
= 0, size
= 0;
1102 sector_t blkoff
, end_blkoff
;
1103 sector_t delalloc_blkoff
;
1104 unsigned long delalloc_blklen
;
1105 unsigned int blkbits
= inode
->i_blkbits
;
1108 ret
= fiemap_prep(inode
, fieinfo
, start
, &len
, 0);
1114 isize
= i_size_read(inode
);
1116 blkoff
= start
>> blkbits
;
1117 end_blkoff
= (start
+ len
- 1) >> blkbits
;
1119 delalloc_blklen
= nilfs_find_uncommitted_extent(inode
, blkoff
,
1124 unsigned int maxblocks
;
1126 if (delalloc_blklen
&& blkoff
== delalloc_blkoff
) {
1128 /* End of the current extent */
1129 ret
= fiemap_fill_next_extent(
1130 fieinfo
, logical
, phys
, size
, flags
);
1134 if (blkoff
> end_blkoff
)
1137 flags
= FIEMAP_EXTENT_MERGED
| FIEMAP_EXTENT_DELALLOC
;
1138 logical
= blkoff
<< blkbits
;
1140 size
= delalloc_blklen
<< blkbits
;
1142 blkoff
= delalloc_blkoff
+ delalloc_blklen
;
1143 delalloc_blklen
= nilfs_find_uncommitted_extent(
1144 inode
, blkoff
, &delalloc_blkoff
);
1149 * Limit the number of blocks that we look up so as
1150 * not to get into the next delayed allocation extent.
1152 maxblocks
= INT_MAX
;
1153 if (delalloc_blklen
)
1154 maxblocks
= min_t(sector_t
, delalloc_blkoff
- blkoff
,
1158 down_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
1159 n
= nilfs_bmap_lookup_contig(
1160 NILFS_I(inode
)->i_bmap
, blkoff
, &blkphy
, maxblocks
);
1161 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
1166 if (unlikely(n
!= -ENOENT
))
1171 past_eof
= ((blkoff
<< blkbits
) >= isize
);
1174 /* End of the current extent */
1177 flags
|= FIEMAP_EXTENT_LAST
;
1179 ret
= fiemap_fill_next_extent(
1180 fieinfo
, logical
, phys
, size
, flags
);
1185 if (blkoff
> end_blkoff
|| past_eof
)
1189 if (phys
&& blkphy
<< blkbits
== phys
+ size
) {
1190 /* The current extent goes on */
1191 size
+= n
<< blkbits
;
1193 /* Terminate the current extent */
1194 ret
= fiemap_fill_next_extent(
1195 fieinfo
, logical
, phys
, size
,
1197 if (ret
|| blkoff
> end_blkoff
)
1200 /* Start another extent */
1201 flags
= FIEMAP_EXTENT_MERGED
;
1202 logical
= blkoff
<< blkbits
;
1203 phys
= blkphy
<< blkbits
;
1204 size
= n
<< blkbits
;
1207 /* Start a new extent */
1208 flags
= FIEMAP_EXTENT_MERGED
;
1209 logical
= blkoff
<< blkbits
;
1210 phys
= blkphy
<< blkbits
;
1211 size
= n
<< blkbits
;
1218 /* If ret is 1 then we just hit the end of the extent array */
1222 inode_unlock(inode
);