2 * mdt.c - meta data file for NILFS
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
23 #include <linux/buffer_head.h>
24 #include <linux/mpage.h>
26 #include <linux/writeback.h>
27 #include <linux/backing-dev.h>
28 #include <linux/swap.h>
29 #include <linux/slab.h>
36 #include <trace/events/nilfs2.h>
38 #define NILFS_MDT_MAX_RA_BLOCKS (16 - 1)
42 nilfs_mdt_insert_new_block(struct inode
*inode
, unsigned long block
,
43 struct buffer_head
*bh
,
44 void (*init_block
)(struct inode
*,
45 struct buffer_head
*, void *))
47 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
51 /* Caller exclude read accesses using page lock */
53 /* set_buffer_new(bh); */
56 ret
= nilfs_bmap_insert(ii
->i_bmap
, block
, (unsigned long)bh
);
60 set_buffer_mapped(bh
);
62 kaddr
= kmap_atomic(bh
->b_page
);
63 memset(kaddr
+ bh_offset(bh
), 0, 1 << inode
->i_blkbits
);
65 init_block(inode
, bh
, kaddr
);
66 flush_dcache_page(bh
->b_page
);
69 set_buffer_uptodate(bh
);
70 mark_buffer_dirty(bh
);
71 nilfs_mdt_mark_dirty(inode
);
73 trace_nilfs2_mdt_insert_new_block(inode
, inode
->i_ino
, block
);
78 static int nilfs_mdt_create_block(struct inode
*inode
, unsigned long block
,
79 struct buffer_head
**out_bh
,
80 void (*init_block
)(struct inode
*,
84 struct super_block
*sb
= inode
->i_sb
;
85 struct nilfs_transaction_info ti
;
86 struct buffer_head
*bh
;
89 nilfs_transaction_begin(sb
, &ti
, 0);
92 bh
= nilfs_grab_buffer(inode
, inode
->i_mapping
, block
, 0);
97 if (buffer_uptodate(bh
))
101 if (buffer_uptodate(bh
))
104 bh
->b_bdev
= sb
->s_bdev
;
105 err
= nilfs_mdt_insert_new_block(inode
, block
, bh
, init_block
);
112 unlock_page(bh
->b_page
);
113 page_cache_release(bh
->b_page
);
118 err
= nilfs_transaction_commit(sb
);
120 nilfs_transaction_abort(sb
);
126 nilfs_mdt_submit_block(struct inode
*inode
, unsigned long blkoff
,
127 int mode
, struct buffer_head
**out_bh
)
129 struct buffer_head
*bh
;
133 bh
= nilfs_grab_buffer(inode
, inode
->i_mapping
, blkoff
, 0);
137 ret
= -EEXIST
; /* internal code */
138 if (buffer_uptodate(bh
))
142 if (!trylock_buffer(bh
)) {
146 } else /* mode == READ */
149 if (buffer_uptodate(bh
)) {
154 ret
= nilfs_bmap_lookup(NILFS_I(inode
)->i_bmap
, blkoff
, &blknum
);
159 map_bh(bh
, inode
->i_sb
, (sector_t
)blknum
);
161 bh
->b_end_io
= end_buffer_read_sync
;
166 trace_nilfs2_mdt_submit_block(inode
, inode
->i_ino
, blkoff
, mode
);
172 unlock_page(bh
->b_page
);
173 page_cache_release(bh
->b_page
);
179 static int nilfs_mdt_read_block(struct inode
*inode
, unsigned long block
,
180 int readahead
, struct buffer_head
**out_bh
)
182 struct buffer_head
*first_bh
, *bh
;
183 unsigned long blkoff
;
184 int i
, nr_ra_blocks
= NILFS_MDT_MAX_RA_BLOCKS
;
187 err
= nilfs_mdt_submit_block(inode
, block
, READ
, &first_bh
);
188 if (err
== -EEXIST
) /* internal code */
196 for (i
= 0; i
< nr_ra_blocks
; i
++, blkoff
++) {
197 err
= nilfs_mdt_submit_block(inode
, blkoff
, READA
, &bh
);
198 if (likely(!err
|| err
== -EEXIST
))
200 else if (err
!= -EBUSY
)
202 /* abort readahead if bmap lookup failed */
203 if (!buffer_locked(first_bh
))
208 wait_on_buffer(first_bh
);
212 if (!buffer_uptodate(first_bh
))
225 * nilfs_mdt_get_block - read or create a buffer on meta data file.
226 * @inode: inode of the meta data file
227 * @blkoff: block offset
228 * @create: create flag
229 * @init_block: initializer used for newly allocated block
230 * @out_bh: output of a pointer to the buffer_head
232 * nilfs_mdt_get_block() looks up the specified buffer and tries to create
233 * a new buffer if @create is not zero. On success, the returned buffer is
234 * assured to be either existing or formatted using a buffer lock on success.
235 * @out_bh is substituted only when zero is returned.
237 * Return Value: On success, it returns 0. On error, the following negative
238 * error code is returned.
240 * %-ENOMEM - Insufficient memory available.
244 * %-ENOENT - the specified block does not exist (hole block)
246 * %-EROFS - Read only filesystem (for create mode)
248 int nilfs_mdt_get_block(struct inode
*inode
, unsigned long blkoff
, int create
,
249 void (*init_block
)(struct inode
*,
250 struct buffer_head
*, void *),
251 struct buffer_head
**out_bh
)
255 /* Should be rewritten with merging nilfs_mdt_read_block() */
257 ret
= nilfs_mdt_read_block(inode
, blkoff
, !create
, out_bh
);
258 if (!create
|| ret
!= -ENOENT
)
261 ret
= nilfs_mdt_create_block(inode
, blkoff
, out_bh
, init_block
);
262 if (unlikely(ret
== -EEXIST
)) {
263 /* create = 0; */ /* limit read-create loop retries */
270 * nilfs_mdt_find_block - find and get a buffer on meta data file.
271 * @inode: inode of the meta data file
272 * @start: start block offset (inclusive)
273 * @end: end block offset (inclusive)
274 * @blkoff: block offset
275 * @out_bh: place to store a pointer to buffer_head struct
277 * nilfs_mdt_find_block() looks up an existing block in range of
278 * [@start, @end] and stores pointer to a buffer head of the block to
279 * @out_bh, and block offset to @blkoff, respectively. @out_bh and
280 * @blkoff are substituted only when zero is returned.
282 * Return Value: On success, it returns 0. On error, the following negative
283 * error code is returned.
285 * %-ENOMEM - Insufficient memory available.
289 * %-ENOENT - no block was found in the range
291 int nilfs_mdt_find_block(struct inode
*inode
, unsigned long start
,
292 unsigned long end
, unsigned long *blkoff
,
293 struct buffer_head
**out_bh
)
298 if (unlikely(start
> end
))
301 ret
= nilfs_mdt_read_block(inode
, start
, true, out_bh
);
306 if (unlikely(ret
!= -ENOENT
|| start
== ULONG_MAX
))
309 ret
= nilfs_bmap_seek_key(NILFS_I(inode
)->i_bmap
, start
+ 1, &next
);
312 ret
= nilfs_mdt_read_block(inode
, next
, true, out_bh
);
324 * nilfs_mdt_delete_block - make a hole on the meta data file.
325 * @inode: inode of the meta data file
326 * @block: block offset
328 * Return Value: On success, zero is returned.
329 * On error, one of the following negative error code is returned.
331 * %-ENOMEM - Insufficient memory available.
335 int nilfs_mdt_delete_block(struct inode
*inode
, unsigned long block
)
337 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
340 err
= nilfs_bmap_delete(ii
->i_bmap
, block
);
341 if (!err
|| err
== -ENOENT
) {
342 nilfs_mdt_mark_dirty(inode
);
343 nilfs_mdt_forget_block(inode
, block
);
349 * nilfs_mdt_forget_block - discard dirty state and try to remove the page
350 * @inode: inode of the meta data file
351 * @block: block offset
353 * nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and
354 * tries to release the page including the buffer from a page cache.
356 * Return Value: On success, 0 is returned. On error, one of the following
357 * negative error code is returned.
359 * %-EBUSY - page has an active buffer.
361 * %-ENOENT - page cache has no page addressed by the offset.
363 int nilfs_mdt_forget_block(struct inode
*inode
, unsigned long block
)
365 pgoff_t index
= (pgoff_t
)block
>>
366 (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
368 unsigned long first_block
;
372 page
= find_lock_page(inode
->i_mapping
, index
);
376 wait_on_page_writeback(page
);
378 first_block
= (unsigned long)index
<<
379 (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
380 if (page_has_buffers(page
)) {
381 struct buffer_head
*bh
;
383 bh
= nilfs_page_get_nth_block(page
, block
- first_block
);
384 nilfs_forget_buffer(bh
);
386 still_dirty
= PageDirty(page
);
388 page_cache_release(page
);
391 invalidate_inode_pages2_range(inode
->i_mapping
, index
, index
) != 0)
397 * nilfs_mdt_mark_block_dirty - mark a block on the meta data file dirty.
398 * @inode: inode of the meta data file
399 * @block: block offset
401 * Return Value: On success, it returns 0. On error, the following negative
402 * error code is returned.
404 * %-ENOMEM - Insufficient memory available.
408 * %-ENOENT - the specified block does not exist (hole block)
410 int nilfs_mdt_mark_block_dirty(struct inode
*inode
, unsigned long block
)
412 struct buffer_head
*bh
;
415 err
= nilfs_mdt_read_block(inode
, block
, 0, &bh
);
418 mark_buffer_dirty(bh
);
419 nilfs_mdt_mark_dirty(inode
);
424 int nilfs_mdt_fetch_dirty(struct inode
*inode
)
426 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
428 if (nilfs_bmap_test_and_clear_dirty(ii
->i_bmap
)) {
429 set_bit(NILFS_I_DIRTY
, &ii
->i_state
);
432 return test_bit(NILFS_I_DIRTY
, &ii
->i_state
);
436 nilfs_mdt_write_page(struct page
*page
, struct writeback_control
*wbc
)
438 struct inode
*inode
= page
->mapping
->host
;
439 struct super_block
*sb
;
442 if (inode
&& (inode
->i_sb
->s_flags
& MS_RDONLY
)) {
444 * It means that filesystem was remounted in read-only
445 * mode because of error or metadata corruption. But we
446 * have dirty pages that try to be flushed in background.
447 * So, here we simply discard this dirty page.
449 nilfs_clear_dirty_page(page
, false);
454 redirty_page_for_writepage(wbc
, page
);
462 if (wbc
->sync_mode
== WB_SYNC_ALL
)
463 err
= nilfs_construct_segment(sb
);
464 else if (wbc
->for_reclaim
)
465 nilfs_flush_segment(sb
, inode
->i_ino
);
471 static const struct address_space_operations def_mdt_aops
= {
472 .writepage
= nilfs_mdt_write_page
,
475 static const struct inode_operations def_mdt_iops
;
476 static const struct file_operations def_mdt_fops
;
479 int nilfs_mdt_init(struct inode
*inode
, gfp_t gfp_mask
, size_t objsz
)
481 struct nilfs_mdt_info
*mi
;
483 mi
= kzalloc(max(sizeof(*mi
), objsz
), GFP_NOFS
);
487 init_rwsem(&mi
->mi_sem
);
488 inode
->i_private
= mi
;
490 inode
->i_mode
= S_IFREG
;
491 mapping_set_gfp_mask(inode
->i_mapping
, gfp_mask
);
493 inode
->i_op
= &def_mdt_iops
;
494 inode
->i_fop
= &def_mdt_fops
;
495 inode
->i_mapping
->a_ops
= &def_mdt_aops
;
500 void nilfs_mdt_set_entry_size(struct inode
*inode
, unsigned entry_size
,
501 unsigned header_size
)
503 struct nilfs_mdt_info
*mi
= NILFS_MDT(inode
);
505 mi
->mi_entry_size
= entry_size
;
506 mi
->mi_entries_per_block
= (1 << inode
->i_blkbits
) / entry_size
;
507 mi
->mi_first_entry_offset
= DIV_ROUND_UP(header_size
, entry_size
);
511 * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
512 * @inode: inode of the metadata file
513 * @shadow: shadow mapping
515 int nilfs_mdt_setup_shadow_map(struct inode
*inode
,
516 struct nilfs_shadow_map
*shadow
)
518 struct nilfs_mdt_info
*mi
= NILFS_MDT(inode
);
520 INIT_LIST_HEAD(&shadow
->frozen_buffers
);
521 address_space_init_once(&shadow
->frozen_data
);
522 nilfs_mapping_init(&shadow
->frozen_data
, inode
);
523 address_space_init_once(&shadow
->frozen_btnodes
);
524 nilfs_mapping_init(&shadow
->frozen_btnodes
, inode
);
525 mi
->mi_shadow
= shadow
;
530 * nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map
531 * @inode: inode of the metadata file
533 int nilfs_mdt_save_to_shadow_map(struct inode
*inode
)
535 struct nilfs_mdt_info
*mi
= NILFS_MDT(inode
);
536 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
537 struct nilfs_shadow_map
*shadow
= mi
->mi_shadow
;
540 ret
= nilfs_copy_dirty_pages(&shadow
->frozen_data
, inode
->i_mapping
);
544 ret
= nilfs_copy_dirty_pages(&shadow
->frozen_btnodes
,
545 &ii
->i_btnode_cache
);
549 nilfs_bmap_save(ii
->i_bmap
, &shadow
->bmap_store
);
554 int nilfs_mdt_freeze_buffer(struct inode
*inode
, struct buffer_head
*bh
)
556 struct nilfs_shadow_map
*shadow
= NILFS_MDT(inode
)->mi_shadow
;
557 struct buffer_head
*bh_frozen
;
559 int blkbits
= inode
->i_blkbits
;
561 page
= grab_cache_page(&shadow
->frozen_data
, bh
->b_page
->index
);
565 if (!page_has_buffers(page
))
566 create_empty_buffers(page
, 1 << blkbits
, 0);
568 bh_frozen
= nilfs_page_get_nth_block(page
, bh_offset(bh
) >> blkbits
);
570 if (!buffer_uptodate(bh_frozen
))
571 nilfs_copy_buffer(bh_frozen
, bh
);
572 if (list_empty(&bh_frozen
->b_assoc_buffers
)) {
573 list_add_tail(&bh_frozen
->b_assoc_buffers
,
574 &shadow
->frozen_buffers
);
575 set_buffer_nilfs_redirected(bh
);
577 brelse(bh_frozen
); /* already frozen */
581 page_cache_release(page
);
586 nilfs_mdt_get_frozen_buffer(struct inode
*inode
, struct buffer_head
*bh
)
588 struct nilfs_shadow_map
*shadow
= NILFS_MDT(inode
)->mi_shadow
;
589 struct buffer_head
*bh_frozen
= NULL
;
593 page
= find_lock_page(&shadow
->frozen_data
, bh
->b_page
->index
);
595 if (page_has_buffers(page
)) {
596 n
= bh_offset(bh
) >> inode
->i_blkbits
;
597 bh_frozen
= nilfs_page_get_nth_block(page
, n
);
600 page_cache_release(page
);
605 static void nilfs_release_frozen_buffers(struct nilfs_shadow_map
*shadow
)
607 struct list_head
*head
= &shadow
->frozen_buffers
;
608 struct buffer_head
*bh
;
610 while (!list_empty(head
)) {
611 bh
= list_first_entry(head
, struct buffer_head
,
613 list_del_init(&bh
->b_assoc_buffers
);
614 brelse(bh
); /* drop ref-count to make it releasable */
619 * nilfs_mdt_restore_from_shadow_map - restore dirty pages and bmap state
620 * @inode: inode of the metadata file
622 void nilfs_mdt_restore_from_shadow_map(struct inode
*inode
)
624 struct nilfs_mdt_info
*mi
= NILFS_MDT(inode
);
625 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
626 struct nilfs_shadow_map
*shadow
= mi
->mi_shadow
;
628 down_write(&mi
->mi_sem
);
630 if (mi
->mi_palloc_cache
)
631 nilfs_palloc_clear_cache(inode
);
633 nilfs_clear_dirty_pages(inode
->i_mapping
, true);
634 nilfs_copy_back_pages(inode
->i_mapping
, &shadow
->frozen_data
);
636 nilfs_clear_dirty_pages(&ii
->i_btnode_cache
, true);
637 nilfs_copy_back_pages(&ii
->i_btnode_cache
, &shadow
->frozen_btnodes
);
639 nilfs_bmap_restore(ii
->i_bmap
, &shadow
->bmap_store
);
641 up_write(&mi
->mi_sem
);
645 * nilfs_mdt_clear_shadow_map - truncate pages in shadow map caches
646 * @inode: inode of the metadata file
648 void nilfs_mdt_clear_shadow_map(struct inode
*inode
)
650 struct nilfs_mdt_info
*mi
= NILFS_MDT(inode
);
651 struct nilfs_shadow_map
*shadow
= mi
->mi_shadow
;
653 down_write(&mi
->mi_sem
);
654 nilfs_release_frozen_buffers(shadow
);
655 truncate_inode_pages(&shadow
->frozen_data
, 0);
656 truncate_inode_pages(&shadow
->frozen_btnodes
, 0);
657 up_write(&mi
->mi_sem
);