2 * inode.c - NILFS inode operations.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * Written by Ryusuke Konishi.
20 #include <linux/buffer_head.h>
21 #include <linux/gfp.h>
22 #include <linux/mpage.h>
23 #include <linux/pagemap.h>
24 #include <linux/writeback.h>
25 #include <linux/uio.h>
35 * struct nilfs_iget_args - arguments used during comparison between inodes
37 * @cno: checkpoint number
38 * @root: pointer on NILFS root object (mounted checkpoint)
39 * @for_gc: inode for GC flag
41 struct nilfs_iget_args
{
44 struct nilfs_root
*root
;
48 static int nilfs_iget_test(struct inode
*inode
, void *opaque
);
50 void nilfs_inode_add_blocks(struct inode
*inode
, int n
)
52 struct nilfs_root
*root
= NILFS_I(inode
)->i_root
;
54 inode_add_bytes(inode
, i_blocksize(inode
) * n
);
56 atomic64_add(n
, &root
->blocks_count
);
59 void nilfs_inode_sub_blocks(struct inode
*inode
, int n
)
61 struct nilfs_root
*root
= NILFS_I(inode
)->i_root
;
63 inode_sub_bytes(inode
, i_blocksize(inode
) * n
);
65 atomic64_sub(n
, &root
->blocks_count
);
69 * nilfs_get_block() - get a file block on the filesystem (callback function)
70 * @inode - inode struct of the target file
71 * @blkoff - file block number
72 * @bh_result - buffer head to be mapped on
73 * @create - indicate whether allocating the block or not when it has not
76 * This function does not issue actual read request of the specified data
77 * block. It is done by VFS.
79 int nilfs_get_block(struct inode
*inode
, sector_t blkoff
,
80 struct buffer_head
*bh_result
, int create
)
82 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
83 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
86 unsigned int maxblocks
= bh_result
->b_size
>> inode
->i_blkbits
;
88 down_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
89 ret
= nilfs_bmap_lookup_contig(ii
->i_bmap
, blkoff
, &blknum
, maxblocks
);
90 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
91 if (ret
>= 0) { /* found */
92 map_bh(bh_result
, inode
->i_sb
, blknum
);
94 bh_result
->b_size
= (ret
<< inode
->i_blkbits
);
97 /* data block was not found */
98 if (ret
== -ENOENT
&& create
) {
99 struct nilfs_transaction_info ti
;
101 bh_result
->b_blocknr
= 0;
102 err
= nilfs_transaction_begin(inode
->i_sb
, &ti
, 1);
105 err
= nilfs_bmap_insert(ii
->i_bmap
, blkoff
,
106 (unsigned long)bh_result
);
107 if (unlikely(err
!= 0)) {
108 if (err
== -EEXIST
) {
110 * The get_block() function could be called
111 * from multiple callers for an inode.
112 * However, the page having this block must
113 * be locked in this case.
115 nilfs_msg(inode
->i_sb
, KERN_WARNING
,
116 "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
117 __func__
, inode
->i_ino
,
118 (unsigned long long)blkoff
);
121 nilfs_transaction_abort(inode
->i_sb
);
124 nilfs_mark_inode_dirty_sync(inode
);
125 nilfs_transaction_commit(inode
->i_sb
); /* never fails */
126 /* Error handling should be detailed */
127 set_buffer_new(bh_result
);
128 set_buffer_delay(bh_result
);
129 map_bh(bh_result
, inode
->i_sb
, 0);
130 /* Disk block number must be changed to proper value */
132 } else if (ret
== -ENOENT
) {
134 * not found is not error (e.g. hole); must return without
135 * the mapped state flag.
147 * nilfs_readpage() - implement readpage() method of nilfs_aops {}
148 * address_space_operations.
149 * @file - file struct of the file to be read
150 * @page - the page to be read
152 static int nilfs_readpage(struct file
*file
, struct page
*page
)
154 return mpage_readpage(page
, nilfs_get_block
);
158 * nilfs_readpages() - implement readpages() method of nilfs_aops {}
159 * address_space_operations.
160 * @file - file struct of the file to be read
161 * @mapping - address_space struct used for reading multiple pages
162 * @pages - the pages to be read
163 * @nr_pages - number of pages to be read
165 static int nilfs_readpages(struct file
*file
, struct address_space
*mapping
,
166 struct list_head
*pages
, unsigned int nr_pages
)
168 return mpage_readpages(mapping
, pages
, nr_pages
, nilfs_get_block
);
171 static int nilfs_writepages(struct address_space
*mapping
,
172 struct writeback_control
*wbc
)
174 struct inode
*inode
= mapping
->host
;
177 if (sb_rdonly(inode
->i_sb
)) {
178 nilfs_clear_dirty_pages(mapping
, false);
182 if (wbc
->sync_mode
== WB_SYNC_ALL
)
183 err
= nilfs_construct_dsync_segment(inode
->i_sb
, inode
,
189 static int nilfs_writepage(struct page
*page
, struct writeback_control
*wbc
)
191 struct inode
*inode
= page
->mapping
->host
;
194 if (sb_rdonly(inode
->i_sb
)) {
196 * It means that filesystem was remounted in read-only
197 * mode because of error or metadata corruption. But we
198 * have dirty pages that try to be flushed in background.
199 * So, here we simply discard this dirty page.
201 nilfs_clear_dirty_page(page
, false);
206 redirty_page_for_writepage(wbc
, page
);
209 if (wbc
->sync_mode
== WB_SYNC_ALL
) {
210 err
= nilfs_construct_segment(inode
->i_sb
);
213 } else if (wbc
->for_reclaim
)
214 nilfs_flush_segment(inode
->i_sb
, inode
->i_ino
);
219 static int nilfs_set_page_dirty(struct page
*page
)
221 struct inode
*inode
= page
->mapping
->host
;
222 int ret
= __set_page_dirty_nobuffers(page
);
224 if (page_has_buffers(page
)) {
225 unsigned int nr_dirty
= 0;
226 struct buffer_head
*bh
, *head
;
229 * This page is locked by callers, and no other thread
230 * concurrently marks its buffers dirty since they are
231 * only dirtied through routines in fs/buffer.c in
232 * which call sites of mark_buffer_dirty are protected
235 bh
= head
= page_buffers(page
);
237 /* Do not mark hole blocks dirty */
238 if (buffer_dirty(bh
) || !buffer_mapped(bh
))
241 set_buffer_dirty(bh
);
243 } while (bh
= bh
->b_this_page
, bh
!= head
);
246 nilfs_set_file_dirty(inode
, nr_dirty
);
248 unsigned int nr_dirty
= 1 << (PAGE_SHIFT
- inode
->i_blkbits
);
250 nilfs_set_file_dirty(inode
, nr_dirty
);
255 void nilfs_write_failed(struct address_space
*mapping
, loff_t to
)
257 struct inode
*inode
= mapping
->host
;
259 if (to
> inode
->i_size
) {
260 truncate_pagecache(inode
, inode
->i_size
);
261 nilfs_truncate(inode
);
265 static int nilfs_write_begin(struct file
*file
, struct address_space
*mapping
,
266 loff_t pos
, unsigned len
, unsigned flags
,
267 struct page
**pagep
, void **fsdata
)
270 struct inode
*inode
= mapping
->host
;
271 int err
= nilfs_transaction_begin(inode
->i_sb
, NULL
, 1);
276 err
= block_write_begin(mapping
, pos
, len
, flags
, pagep
,
279 nilfs_write_failed(mapping
, pos
+ len
);
280 nilfs_transaction_abort(inode
->i_sb
);
285 static int nilfs_write_end(struct file
*file
, struct address_space
*mapping
,
286 loff_t pos
, unsigned len
, unsigned copied
,
287 struct page
*page
, void *fsdata
)
289 struct inode
*inode
= mapping
->host
;
290 unsigned int start
= pos
& (PAGE_SIZE
- 1);
291 unsigned int nr_dirty
;
294 nr_dirty
= nilfs_page_count_clean_buffers(page
, start
,
296 copied
= generic_write_end(file
, mapping
, pos
, len
, copied
, page
,
298 nilfs_set_file_dirty(inode
, nr_dirty
);
299 err
= nilfs_transaction_commit(inode
->i_sb
);
300 return err
? : copied
;
304 nilfs_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
306 struct inode
*inode
= file_inode(iocb
->ki_filp
);
308 if (iov_iter_rw(iter
) == WRITE
)
311 /* Needs synchronization with the cleaner */
312 return blockdev_direct_IO(iocb
, inode
, iter
, nilfs_get_block
);
315 const struct address_space_operations nilfs_aops
= {
316 .writepage
= nilfs_writepage
,
317 .readpage
= nilfs_readpage
,
318 .writepages
= nilfs_writepages
,
319 .set_page_dirty
= nilfs_set_page_dirty
,
320 .readpages
= nilfs_readpages
,
321 .write_begin
= nilfs_write_begin
,
322 .write_end
= nilfs_write_end
,
323 /* .releasepage = nilfs_releasepage, */
324 .invalidatepage
= block_invalidatepage
,
325 .direct_IO
= nilfs_direct_IO
,
326 .is_partially_uptodate
= block_is_partially_uptodate
,
329 static int nilfs_insert_inode_locked(struct inode
*inode
,
330 struct nilfs_root
*root
,
333 struct nilfs_iget_args args
= {
334 .ino
= ino
, .root
= root
, .cno
= 0, .for_gc
= 0
337 return insert_inode_locked4(inode
, ino
, nilfs_iget_test
, &args
);
340 struct inode
*nilfs_new_inode(struct inode
*dir
, umode_t mode
)
342 struct super_block
*sb
= dir
->i_sb
;
343 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
345 struct nilfs_inode_info
*ii
;
346 struct nilfs_root
*root
;
350 inode
= new_inode(sb
);
351 if (unlikely(!inode
))
354 mapping_set_gfp_mask(inode
->i_mapping
,
355 mapping_gfp_constraint(inode
->i_mapping
, ~__GFP_FS
));
357 root
= NILFS_I(dir
)->i_root
;
359 ii
->i_state
= BIT(NILFS_I_NEW
);
362 err
= nilfs_ifile_create_inode(root
->ifile
, &ino
, &ii
->i_bh
);
364 goto failed_ifile_create_inode
;
365 /* reference count of i_bh inherits from nilfs_mdt_read_block() */
367 atomic64_inc(&root
->inodes_count
);
368 inode_init_owner(inode
, dir
, mode
);
370 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= current_time(inode
);
372 if (S_ISREG(mode
) || S_ISDIR(mode
) || S_ISLNK(mode
)) {
373 err
= nilfs_bmap_read(ii
->i_bmap
, NULL
);
375 goto failed_after_creation
;
377 set_bit(NILFS_I_BMAP
, &ii
->i_state
);
378 /* No lock is needed; iget() ensures it. */
381 ii
->i_flags
= nilfs_mask_flags(
382 mode
, NILFS_I(dir
)->i_flags
& NILFS_FL_INHERITED
);
384 /* ii->i_file_acl = 0; */
385 /* ii->i_dir_acl = 0; */
386 ii
->i_dir_start_lookup
= 0;
387 nilfs_set_inode_flags(inode
);
388 spin_lock(&nilfs
->ns_next_gen_lock
);
389 inode
->i_generation
= nilfs
->ns_next_generation
++;
390 spin_unlock(&nilfs
->ns_next_gen_lock
);
391 if (nilfs_insert_inode_locked(inode
, root
, ino
) < 0) {
393 goto failed_after_creation
;
396 err
= nilfs_init_acl(inode
, dir
);
399 * Never occur. When supporting nilfs_init_acl(),
400 * proper cancellation of above jobs should be considered.
402 goto failed_after_creation
;
406 failed_after_creation
:
408 unlock_new_inode(inode
);
410 * raw_inode will be deleted through
411 * nilfs_evict_inode().
415 failed_ifile_create_inode
:
416 make_bad_inode(inode
);
422 void nilfs_set_inode_flags(struct inode
*inode
)
424 unsigned int flags
= NILFS_I(inode
)->i_flags
;
425 unsigned int new_fl
= 0;
427 if (flags
& FS_SYNC_FL
)
429 if (flags
& FS_APPEND_FL
)
431 if (flags
& FS_IMMUTABLE_FL
)
432 new_fl
|= S_IMMUTABLE
;
433 if (flags
& FS_NOATIME_FL
)
435 if (flags
& FS_DIRSYNC_FL
)
437 inode_set_flags(inode
, new_fl
, S_SYNC
| S_APPEND
| S_IMMUTABLE
|
438 S_NOATIME
| S_DIRSYNC
);
441 int nilfs_read_inode_common(struct inode
*inode
,
442 struct nilfs_inode
*raw_inode
)
444 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
447 inode
->i_mode
= le16_to_cpu(raw_inode
->i_mode
);
448 i_uid_write(inode
, le32_to_cpu(raw_inode
->i_uid
));
449 i_gid_write(inode
, le32_to_cpu(raw_inode
->i_gid
));
450 set_nlink(inode
, le16_to_cpu(raw_inode
->i_links_count
));
451 inode
->i_size
= le64_to_cpu(raw_inode
->i_size
);
452 inode
->i_atime
.tv_sec
= le64_to_cpu(raw_inode
->i_mtime
);
453 inode
->i_ctime
.tv_sec
= le64_to_cpu(raw_inode
->i_ctime
);
454 inode
->i_mtime
.tv_sec
= le64_to_cpu(raw_inode
->i_mtime
);
455 inode
->i_atime
.tv_nsec
= le32_to_cpu(raw_inode
->i_mtime_nsec
);
456 inode
->i_ctime
.tv_nsec
= le32_to_cpu(raw_inode
->i_ctime_nsec
);
457 inode
->i_mtime
.tv_nsec
= le32_to_cpu(raw_inode
->i_mtime_nsec
);
458 if (inode
->i_nlink
== 0)
459 return -ESTALE
; /* this inode is deleted */
461 inode
->i_blocks
= le64_to_cpu(raw_inode
->i_blocks
);
462 ii
->i_flags
= le32_to_cpu(raw_inode
->i_flags
);
464 ii
->i_file_acl
= le32_to_cpu(raw_inode
->i_file_acl
);
465 ii
->i_dir_acl
= S_ISREG(inode
->i_mode
) ?
466 0 : le32_to_cpu(raw_inode
->i_dir_acl
);
468 ii
->i_dir_start_lookup
= 0;
469 inode
->i_generation
= le32_to_cpu(raw_inode
->i_generation
);
471 if (S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
472 S_ISLNK(inode
->i_mode
)) {
473 err
= nilfs_bmap_read(ii
->i_bmap
, raw_inode
);
476 set_bit(NILFS_I_BMAP
, &ii
->i_state
);
477 /* No lock is needed; iget() ensures it. */
482 static int __nilfs_read_inode(struct super_block
*sb
,
483 struct nilfs_root
*root
, unsigned long ino
,
486 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
487 struct buffer_head
*bh
;
488 struct nilfs_inode
*raw_inode
;
491 down_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
492 err
= nilfs_ifile_get_inode_block(root
->ifile
, ino
, &bh
);
496 raw_inode
= nilfs_ifile_map_inode(root
->ifile
, ino
, bh
);
498 err
= nilfs_read_inode_common(inode
, raw_inode
);
502 if (S_ISREG(inode
->i_mode
)) {
503 inode
->i_op
= &nilfs_file_inode_operations
;
504 inode
->i_fop
= &nilfs_file_operations
;
505 inode
->i_mapping
->a_ops
= &nilfs_aops
;
506 } else if (S_ISDIR(inode
->i_mode
)) {
507 inode
->i_op
= &nilfs_dir_inode_operations
;
508 inode
->i_fop
= &nilfs_dir_operations
;
509 inode
->i_mapping
->a_ops
= &nilfs_aops
;
510 } else if (S_ISLNK(inode
->i_mode
)) {
511 inode
->i_op
= &nilfs_symlink_inode_operations
;
512 inode_nohighmem(inode
);
513 inode
->i_mapping
->a_ops
= &nilfs_aops
;
515 inode
->i_op
= &nilfs_special_inode_operations
;
517 inode
, inode
->i_mode
,
518 huge_decode_dev(le64_to_cpu(raw_inode
->i_device_code
)));
520 nilfs_ifile_unmap_inode(root
->ifile
, ino
, bh
);
522 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
523 nilfs_set_inode_flags(inode
);
524 mapping_set_gfp_mask(inode
->i_mapping
,
525 mapping_gfp_constraint(inode
->i_mapping
, ~__GFP_FS
));
529 nilfs_ifile_unmap_inode(root
->ifile
, ino
, bh
);
533 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
537 static int nilfs_iget_test(struct inode
*inode
, void *opaque
)
539 struct nilfs_iget_args
*args
= opaque
;
540 struct nilfs_inode_info
*ii
;
542 if (args
->ino
!= inode
->i_ino
|| args
->root
!= NILFS_I(inode
)->i_root
)
546 if (!test_bit(NILFS_I_GCINODE
, &ii
->i_state
))
547 return !args
->for_gc
;
549 return args
->for_gc
&& args
->cno
== ii
->i_cno
;
552 static int nilfs_iget_set(struct inode
*inode
, void *opaque
)
554 struct nilfs_iget_args
*args
= opaque
;
556 inode
->i_ino
= args
->ino
;
558 NILFS_I(inode
)->i_state
= BIT(NILFS_I_GCINODE
);
559 NILFS_I(inode
)->i_cno
= args
->cno
;
560 NILFS_I(inode
)->i_root
= NULL
;
562 if (args
->root
&& args
->ino
== NILFS_ROOT_INO
)
563 nilfs_get_root(args
->root
);
564 NILFS_I(inode
)->i_root
= args
->root
;
569 struct inode
*nilfs_ilookup(struct super_block
*sb
, struct nilfs_root
*root
,
572 struct nilfs_iget_args args
= {
573 .ino
= ino
, .root
= root
, .cno
= 0, .for_gc
= 0
576 return ilookup5(sb
, ino
, nilfs_iget_test
, &args
);
579 struct inode
*nilfs_iget_locked(struct super_block
*sb
, struct nilfs_root
*root
,
582 struct nilfs_iget_args args
= {
583 .ino
= ino
, .root
= root
, .cno
= 0, .for_gc
= 0
586 return iget5_locked(sb
, ino
, nilfs_iget_test
, nilfs_iget_set
, &args
);
589 struct inode
*nilfs_iget(struct super_block
*sb
, struct nilfs_root
*root
,
595 inode
= nilfs_iget_locked(sb
, root
, ino
);
596 if (unlikely(!inode
))
597 return ERR_PTR(-ENOMEM
);
598 if (!(inode
->i_state
& I_NEW
))
601 err
= __nilfs_read_inode(sb
, root
, ino
, inode
);
606 unlock_new_inode(inode
);
610 struct inode
*nilfs_iget_for_gc(struct super_block
*sb
, unsigned long ino
,
613 struct nilfs_iget_args args
= {
614 .ino
= ino
, .root
= NULL
, .cno
= cno
, .for_gc
= 1
619 inode
= iget5_locked(sb
, ino
, nilfs_iget_test
, nilfs_iget_set
, &args
);
620 if (unlikely(!inode
))
621 return ERR_PTR(-ENOMEM
);
622 if (!(inode
->i_state
& I_NEW
))
625 err
= nilfs_init_gcinode(inode
);
630 unlock_new_inode(inode
);
634 void nilfs_write_inode_common(struct inode
*inode
,
635 struct nilfs_inode
*raw_inode
, int has_bmap
)
637 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
639 raw_inode
->i_mode
= cpu_to_le16(inode
->i_mode
);
640 raw_inode
->i_uid
= cpu_to_le32(i_uid_read(inode
));
641 raw_inode
->i_gid
= cpu_to_le32(i_gid_read(inode
));
642 raw_inode
->i_links_count
= cpu_to_le16(inode
->i_nlink
);
643 raw_inode
->i_size
= cpu_to_le64(inode
->i_size
);
644 raw_inode
->i_ctime
= cpu_to_le64(inode
->i_ctime
.tv_sec
);
645 raw_inode
->i_mtime
= cpu_to_le64(inode
->i_mtime
.tv_sec
);
646 raw_inode
->i_ctime_nsec
= cpu_to_le32(inode
->i_ctime
.tv_nsec
);
647 raw_inode
->i_mtime_nsec
= cpu_to_le32(inode
->i_mtime
.tv_nsec
);
648 raw_inode
->i_blocks
= cpu_to_le64(inode
->i_blocks
);
650 raw_inode
->i_flags
= cpu_to_le32(ii
->i_flags
);
651 raw_inode
->i_generation
= cpu_to_le32(inode
->i_generation
);
653 if (NILFS_ROOT_METADATA_FILE(inode
->i_ino
)) {
654 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
656 /* zero-fill unused portion in the case of super root block */
657 raw_inode
->i_xattr
= 0;
658 raw_inode
->i_pad
= 0;
659 memset((void *)raw_inode
+ sizeof(*raw_inode
), 0,
660 nilfs
->ns_inode_size
- sizeof(*raw_inode
));
664 nilfs_bmap_write(ii
->i_bmap
, raw_inode
);
665 else if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
))
666 raw_inode
->i_device_code
=
667 cpu_to_le64(huge_encode_dev(inode
->i_rdev
));
669 * When extending inode, nilfs->ns_inode_size should be checked
670 * for substitutions of appended fields.
674 void nilfs_update_inode(struct inode
*inode
, struct buffer_head
*ibh
, int flags
)
676 ino_t ino
= inode
->i_ino
;
677 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
678 struct inode
*ifile
= ii
->i_root
->ifile
;
679 struct nilfs_inode
*raw_inode
;
681 raw_inode
= nilfs_ifile_map_inode(ifile
, ino
, ibh
);
683 if (test_and_clear_bit(NILFS_I_NEW
, &ii
->i_state
))
684 memset(raw_inode
, 0, NILFS_MDT(ifile
)->mi_entry_size
);
685 if (flags
& I_DIRTY_DATASYNC
)
686 set_bit(NILFS_I_INODE_SYNC
, &ii
->i_state
);
688 nilfs_write_inode_common(inode
, raw_inode
, 0);
690 * XXX: call with has_bmap = 0 is a workaround to avoid
691 * deadlock of bmap. This delays update of i_bmap to just
695 nilfs_ifile_unmap_inode(ifile
, ino
, ibh
);
698 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
700 static void nilfs_truncate_bmap(struct nilfs_inode_info
*ii
,
706 if (!test_bit(NILFS_I_BMAP
, &ii
->i_state
))
709 ret
= nilfs_bmap_last_key(ii
->i_bmap
, &b
);
718 b
-= min_t(__u64
, NILFS_MAX_TRUNCATE_BLOCKS
, b
- from
);
719 ret
= nilfs_bmap_truncate(ii
->i_bmap
, b
);
720 nilfs_relax_pressure_in_lock(ii
->vfs_inode
.i_sb
);
721 if (!ret
|| (ret
== -ENOMEM
&&
722 nilfs_bmap_truncate(ii
->i_bmap
, b
) == 0))
726 nilfs_msg(ii
->vfs_inode
.i_sb
, KERN_WARNING
,
727 "error %d truncating bmap (ino=%lu)", ret
,
728 ii
->vfs_inode
.i_ino
);
731 void nilfs_truncate(struct inode
*inode
)
733 unsigned long blkoff
;
734 unsigned int blocksize
;
735 struct nilfs_transaction_info ti
;
736 struct super_block
*sb
= inode
->i_sb
;
737 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
739 if (!test_bit(NILFS_I_BMAP
, &ii
->i_state
))
741 if (IS_APPEND(inode
) || IS_IMMUTABLE(inode
))
744 blocksize
= sb
->s_blocksize
;
745 blkoff
= (inode
->i_size
+ blocksize
- 1) >> sb
->s_blocksize_bits
;
746 nilfs_transaction_begin(sb
, &ti
, 0); /* never fails */
748 block_truncate_page(inode
->i_mapping
, inode
->i_size
, nilfs_get_block
);
750 nilfs_truncate_bmap(ii
, blkoff
);
752 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
754 nilfs_set_transaction_flag(NILFS_TI_SYNC
);
756 nilfs_mark_inode_dirty(inode
);
757 nilfs_set_file_dirty(inode
, 0);
758 nilfs_transaction_commit(sb
);
760 * May construct a logical segment and may fail in sync mode.
761 * But truncate has no return value.
765 static void nilfs_clear_inode(struct inode
*inode
)
767 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
770 * Free resources allocated in nilfs_read_inode(), here.
772 BUG_ON(!list_empty(&ii
->i_dirty
));
776 if (nilfs_is_metadata_file_inode(inode
))
777 nilfs_mdt_clear(inode
);
779 if (test_bit(NILFS_I_BMAP
, &ii
->i_state
))
780 nilfs_bmap_clear(ii
->i_bmap
);
782 nilfs_btnode_cache_clear(&ii
->i_btnode_cache
);
784 if (ii
->i_root
&& inode
->i_ino
== NILFS_ROOT_INO
)
785 nilfs_put_root(ii
->i_root
);
788 void nilfs_evict_inode(struct inode
*inode
)
790 struct nilfs_transaction_info ti
;
791 struct super_block
*sb
= inode
->i_sb
;
792 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
795 if (inode
->i_nlink
|| !ii
->i_root
|| unlikely(is_bad_inode(inode
))) {
796 truncate_inode_pages_final(&inode
->i_data
);
798 nilfs_clear_inode(inode
);
801 nilfs_transaction_begin(sb
, &ti
, 0); /* never fails */
803 truncate_inode_pages_final(&inode
->i_data
);
805 /* TODO: some of the following operations may fail. */
806 nilfs_truncate_bmap(ii
, 0);
807 nilfs_mark_inode_dirty(inode
);
810 ret
= nilfs_ifile_delete_inode(ii
->i_root
->ifile
, inode
->i_ino
);
812 atomic64_dec(&ii
->i_root
->inodes_count
);
814 nilfs_clear_inode(inode
);
817 nilfs_set_transaction_flag(NILFS_TI_SYNC
);
818 nilfs_transaction_commit(sb
);
820 * May construct a logical segment and may fail in sync mode.
821 * But delete_inode has no return value.
825 int nilfs_setattr(struct dentry
*dentry
, struct iattr
*iattr
)
827 struct nilfs_transaction_info ti
;
828 struct inode
*inode
= d_inode(dentry
);
829 struct super_block
*sb
= inode
->i_sb
;
832 err
= setattr_prepare(dentry
, iattr
);
836 err
= nilfs_transaction_begin(sb
, &ti
, 0);
840 if ((iattr
->ia_valid
& ATTR_SIZE
) &&
841 iattr
->ia_size
!= i_size_read(inode
)) {
842 inode_dio_wait(inode
);
843 truncate_setsize(inode
, iattr
->ia_size
);
844 nilfs_truncate(inode
);
847 setattr_copy(inode
, iattr
);
848 mark_inode_dirty(inode
);
850 if (iattr
->ia_valid
& ATTR_MODE
) {
851 err
= nilfs_acl_chmod(inode
);
856 return nilfs_transaction_commit(sb
);
859 nilfs_transaction_abort(sb
);
863 int nilfs_permission(struct inode
*inode
, int mask
)
865 struct nilfs_root
*root
= NILFS_I(inode
)->i_root
;
867 if ((mask
& MAY_WRITE
) && root
&&
868 root
->cno
!= NILFS_CPTREE_CURRENT_CNO
)
869 return -EROFS
; /* snapshot is not writable */
871 return generic_permission(inode
, mask
);
874 int nilfs_load_inode_block(struct inode
*inode
, struct buffer_head
**pbh
)
876 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
877 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
880 spin_lock(&nilfs
->ns_inode_lock
);
881 if (ii
->i_bh
== NULL
) {
882 spin_unlock(&nilfs
->ns_inode_lock
);
883 err
= nilfs_ifile_get_inode_block(ii
->i_root
->ifile
,
887 spin_lock(&nilfs
->ns_inode_lock
);
888 if (ii
->i_bh
== NULL
)
898 spin_unlock(&nilfs
->ns_inode_lock
);
902 int nilfs_inode_dirty(struct inode
*inode
)
904 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
905 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
908 if (!list_empty(&ii
->i_dirty
)) {
909 spin_lock(&nilfs
->ns_inode_lock
);
910 ret
= test_bit(NILFS_I_DIRTY
, &ii
->i_state
) ||
911 test_bit(NILFS_I_BUSY
, &ii
->i_state
);
912 spin_unlock(&nilfs
->ns_inode_lock
);
917 int nilfs_set_file_dirty(struct inode
*inode
, unsigned int nr_dirty
)
919 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
920 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
922 atomic_add(nr_dirty
, &nilfs
->ns_ndirtyblks
);
924 if (test_and_set_bit(NILFS_I_DIRTY
, &ii
->i_state
))
927 spin_lock(&nilfs
->ns_inode_lock
);
928 if (!test_bit(NILFS_I_QUEUED
, &ii
->i_state
) &&
929 !test_bit(NILFS_I_BUSY
, &ii
->i_state
)) {
931 * Because this routine may race with nilfs_dispose_list(),
932 * we have to check NILFS_I_QUEUED here, too.
934 if (list_empty(&ii
->i_dirty
) && igrab(inode
) == NULL
) {
936 * This will happen when somebody is freeing
939 nilfs_msg(inode
->i_sb
, KERN_WARNING
,
940 "cannot set file dirty (ino=%lu): the file is being freed",
942 spin_unlock(&nilfs
->ns_inode_lock
);
944 * NILFS_I_DIRTY may remain for
948 list_move_tail(&ii
->i_dirty
, &nilfs
->ns_dirty_files
);
949 set_bit(NILFS_I_QUEUED
, &ii
->i_state
);
951 spin_unlock(&nilfs
->ns_inode_lock
);
955 int __nilfs_mark_inode_dirty(struct inode
*inode
, int flags
)
957 struct buffer_head
*ibh
;
960 err
= nilfs_load_inode_block(inode
, &ibh
);
962 nilfs_msg(inode
->i_sb
, KERN_WARNING
,
963 "cannot mark inode dirty (ino=%lu): error %d loading inode block",
967 nilfs_update_inode(inode
, ibh
, flags
);
968 mark_buffer_dirty(ibh
);
969 nilfs_mdt_mark_dirty(NILFS_I(inode
)->i_root
->ifile
);
975 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
976 * @inode: inode of the file to be registered.
978 * nilfs_dirty_inode() loads a inode block containing the specified
979 * @inode and copies data from a nilfs_inode to a corresponding inode
980 * entry in the inode block. This operation is excluded from the segment
981 * construction. This function can be called both as a single operation
982 * and as a part of indivisible file operations.
984 void nilfs_dirty_inode(struct inode
*inode
, int flags
)
986 struct nilfs_transaction_info ti
;
987 struct nilfs_mdt_info
*mdi
= NILFS_MDT(inode
);
989 if (is_bad_inode(inode
)) {
990 nilfs_msg(inode
->i_sb
, KERN_WARNING
,
991 "tried to mark bad_inode dirty. ignored.");
996 nilfs_mdt_mark_dirty(inode
);
999 nilfs_transaction_begin(inode
->i_sb
, &ti
, 0);
1000 __nilfs_mark_inode_dirty(inode
, flags
);
1001 nilfs_transaction_commit(inode
->i_sb
); /* never fails */
1004 int nilfs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
1005 __u64 start
, __u64 len
)
1007 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
1008 __u64 logical
= 0, phys
= 0, size
= 0;
1011 sector_t blkoff
, end_blkoff
;
1012 sector_t delalloc_blkoff
;
1013 unsigned long delalloc_blklen
;
1014 unsigned int blkbits
= inode
->i_blkbits
;
1017 ret
= fiemap_check_flags(fieinfo
, FIEMAP_FLAG_SYNC
);
1023 isize
= i_size_read(inode
);
1025 blkoff
= start
>> blkbits
;
1026 end_blkoff
= (start
+ len
- 1) >> blkbits
;
1028 delalloc_blklen
= nilfs_find_uncommitted_extent(inode
, blkoff
,
1033 unsigned int maxblocks
;
1035 if (delalloc_blklen
&& blkoff
== delalloc_blkoff
) {
1037 /* End of the current extent */
1038 ret
= fiemap_fill_next_extent(
1039 fieinfo
, logical
, phys
, size
, flags
);
1043 if (blkoff
> end_blkoff
)
1046 flags
= FIEMAP_EXTENT_MERGED
| FIEMAP_EXTENT_DELALLOC
;
1047 logical
= blkoff
<< blkbits
;
1049 size
= delalloc_blklen
<< blkbits
;
1051 blkoff
= delalloc_blkoff
+ delalloc_blklen
;
1052 delalloc_blklen
= nilfs_find_uncommitted_extent(
1053 inode
, blkoff
, &delalloc_blkoff
);
1058 * Limit the number of blocks that we look up so as
1059 * not to get into the next delayed allocation extent.
1061 maxblocks
= INT_MAX
;
1062 if (delalloc_blklen
)
1063 maxblocks
= min_t(sector_t
, delalloc_blkoff
- blkoff
,
1067 down_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
1068 n
= nilfs_bmap_lookup_contig(
1069 NILFS_I(inode
)->i_bmap
, blkoff
, &blkphy
, maxblocks
);
1070 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
1075 if (unlikely(n
!= -ENOENT
))
1080 past_eof
= ((blkoff
<< blkbits
) >= isize
);
1083 /* End of the current extent */
1086 flags
|= FIEMAP_EXTENT_LAST
;
1088 ret
= fiemap_fill_next_extent(
1089 fieinfo
, logical
, phys
, size
, flags
);
1094 if (blkoff
> end_blkoff
|| past_eof
)
1098 if (phys
&& blkphy
<< blkbits
== phys
+ size
) {
1099 /* The current extent goes on */
1100 size
+= n
<< blkbits
;
1102 /* Terminate the current extent */
1103 ret
= fiemap_fill_next_extent(
1104 fieinfo
, logical
, phys
, size
,
1106 if (ret
|| blkoff
> end_blkoff
)
1109 /* Start another extent */
1110 flags
= FIEMAP_EXTENT_MERGED
;
1111 logical
= blkoff
<< blkbits
;
1112 phys
= blkphy
<< blkbits
;
1113 size
= n
<< blkbits
;
1116 /* Start a new extent */
1117 flags
= FIEMAP_EXTENT_MERGED
;
1118 logical
= blkoff
<< blkbits
;
1119 phys
= blkphy
<< blkbits
;
1120 size
= n
<< blkbits
;
1127 /* If ret is 1 then we just hit the end of the extent array */
1131 inode_unlock(inode
);