2 * segment.c - NILFS segment constructor.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
24 #include <linux/pagemap.h>
25 #include <linux/buffer_head.h>
26 #include <linux/writeback.h>
27 #include <linux/bitops.h>
28 #include <linux/bio.h>
29 #include <linux/completion.h>
30 #include <linux/blkdev.h>
31 #include <linux/backing-dev.h>
32 #include <linux/freezer.h>
33 #include <linux/kthread.h>
34 #include <linux/crc32.h>
35 #include <linux/pagevec.h>
36 #include <linux/slab.h>
50 #define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
52 #define SC_MAX_SEGDELTA 64 /* Upper limit of the number of segments
53 appended in collection retry loop */
55 /* Construction mode */
57 SC_LSEG_SR
= 1, /* Make a logical segment having a super root */
58 SC_LSEG_DSYNC
, /* Flush data blocks of a given file and make
59 a logical segment without a super root */
60 SC_FLUSH_FILE
, /* Flush data files, leads to segment writes without
61 creating a checkpoint */
62 SC_FLUSH_DAT
, /* Flush DAT file. This also creates segments without
66 /* Stage numbers of dirty block collection */
69 NILFS_ST_GC
, /* Collecting dirty blocks for GC */
75 NILFS_ST_SR
, /* Super root */
76 NILFS_ST_DSYNC
, /* Data sync blocks */
80 /* State flags of collection */
81 #define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
82 #define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
83 #define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
84 #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
86 /* Operations depending on the construction mode and file type */
87 struct nilfs_sc_operations
{
88 int (*collect_data
)(struct nilfs_sc_info
*, struct buffer_head
*,
90 int (*collect_node
)(struct nilfs_sc_info
*, struct buffer_head
*,
92 int (*collect_bmap
)(struct nilfs_sc_info
*, struct buffer_head
*,
94 void (*write_data_binfo
)(struct nilfs_sc_info
*,
95 struct nilfs_segsum_pointer
*,
97 void (*write_node_binfo
)(struct nilfs_sc_info
*,
98 struct nilfs_segsum_pointer
*,
105 static void nilfs_segctor_start_timer(struct nilfs_sc_info
*);
106 static void nilfs_segctor_do_flush(struct nilfs_sc_info
*, int);
107 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info
*);
108 static void nilfs_dispose_list(struct the_nilfs
*, struct list_head
*, int);
110 #define nilfs_cnt32_gt(a, b) \
111 (typecheck(__u32, a) && typecheck(__u32, b) && \
112 ((__s32)(b) - (__s32)(a) < 0))
113 #define nilfs_cnt32_ge(a, b) \
114 (typecheck(__u32, a) && typecheck(__u32, b) && \
115 ((__s32)(a) - (__s32)(b) >= 0))
116 #define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
117 #define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
119 static int nilfs_prepare_segment_lock(struct nilfs_transaction_info
*ti
)
121 struct nilfs_transaction_info
*cur_ti
= current
->journal_info
;
125 if (cur_ti
->ti_magic
== NILFS_TI_MAGIC
)
126 return ++cur_ti
->ti_count
;
129 * If journal_info field is occupied by other FS,
130 * it is saved and will be restored on
131 * nilfs_transaction_commit().
134 "NILFS warning: journal info from a different "
136 save
= current
->journal_info
;
140 ti
= kmem_cache_alloc(nilfs_transaction_cachep
, GFP_NOFS
);
143 ti
->ti_flags
= NILFS_TI_DYNAMIC_ALLOC
;
149 ti
->ti_magic
= NILFS_TI_MAGIC
;
150 current
->journal_info
= ti
;
155 * nilfs_transaction_begin - start indivisible file operations.
157 * @ti: nilfs_transaction_info
158 * @vacancy_check: flags for vacancy rate checks
160 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
161 * the segment semaphore, to make a segment construction and write tasks
162 * exclusive. The function is used with nilfs_transaction_commit() in pairs.
163 * The region enclosed by these two functions can be nested. To avoid a
164 * deadlock, the semaphore is only acquired or released in the outermost call.
166 * This function allocates a nilfs_transaction_info struct to keep context
167 * information on it. It is initialized and hooked onto the current task in
168 * the outermost call. If a pre-allocated struct is given to @ti, it is used
169 * instead; otherwise a new struct is assigned from a slab.
171 * When @vacancy_check flag is set, this function will check the amount of
172 * free space, and will wait for the GC to reclaim disk space if low capacity.
174 * Return Value: On success, 0 is returned. On error, one of the following
175 * negative error code is returned.
177 * %-ENOMEM - Insufficient memory available.
179 * %-ENOSPC - No space left on device
181 int nilfs_transaction_begin(struct super_block
*sb
,
182 struct nilfs_transaction_info
*ti
,
185 struct the_nilfs
*nilfs
;
186 int ret
= nilfs_prepare_segment_lock(ti
);
188 if (unlikely(ret
< 0))
193 sb_start_intwrite(sb
);
195 nilfs
= sb
->s_fs_info
;
196 down_read(&nilfs
->ns_segctor_sem
);
197 if (vacancy_check
&& nilfs_near_disk_full(nilfs
)) {
198 up_read(&nilfs
->ns_segctor_sem
);
205 ti
= current
->journal_info
;
206 current
->journal_info
= ti
->ti_save
;
207 if (ti
->ti_flags
& NILFS_TI_DYNAMIC_ALLOC
)
208 kmem_cache_free(nilfs_transaction_cachep
, ti
);
214 * nilfs_transaction_commit - commit indivisible file operations.
217 * nilfs_transaction_commit() releases the read semaphore which is
218 * acquired by nilfs_transaction_begin(). This is only performed
219 * in outermost call of this function. If a commit flag is set,
220 * nilfs_transaction_commit() sets a timer to start the segment
221 * constructor. If a sync flag is set, it starts construction
224 int nilfs_transaction_commit(struct super_block
*sb
)
226 struct nilfs_transaction_info
*ti
= current
->journal_info
;
227 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
230 BUG_ON(ti
== NULL
|| ti
->ti_magic
!= NILFS_TI_MAGIC
);
231 ti
->ti_flags
|= NILFS_TI_COMMIT
;
232 if (ti
->ti_count
> 0) {
236 if (nilfs
->ns_writer
) {
237 struct nilfs_sc_info
*sci
= nilfs
->ns_writer
;
239 if (ti
->ti_flags
& NILFS_TI_COMMIT
)
240 nilfs_segctor_start_timer(sci
);
241 if (atomic_read(&nilfs
->ns_ndirtyblks
) > sci
->sc_watermark
)
242 nilfs_segctor_do_flush(sci
, 0);
244 up_read(&nilfs
->ns_segctor_sem
);
245 current
->journal_info
= ti
->ti_save
;
247 if (ti
->ti_flags
& NILFS_TI_SYNC
)
248 err
= nilfs_construct_segment(sb
);
249 if (ti
->ti_flags
& NILFS_TI_DYNAMIC_ALLOC
)
250 kmem_cache_free(nilfs_transaction_cachep
, ti
);
255 void nilfs_transaction_abort(struct super_block
*sb
)
257 struct nilfs_transaction_info
*ti
= current
->journal_info
;
258 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
260 BUG_ON(ti
== NULL
|| ti
->ti_magic
!= NILFS_TI_MAGIC
);
261 if (ti
->ti_count
> 0) {
265 up_read(&nilfs
->ns_segctor_sem
);
267 current
->journal_info
= ti
->ti_save
;
268 if (ti
->ti_flags
& NILFS_TI_DYNAMIC_ALLOC
)
269 kmem_cache_free(nilfs_transaction_cachep
, ti
);
273 void nilfs_relax_pressure_in_lock(struct super_block
*sb
)
275 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
276 struct nilfs_sc_info
*sci
= nilfs
->ns_writer
;
278 if (!sci
|| !sci
->sc_flush_request
)
281 set_bit(NILFS_SC_PRIOR_FLUSH
, &sci
->sc_flags
);
282 up_read(&nilfs
->ns_segctor_sem
);
284 down_write(&nilfs
->ns_segctor_sem
);
285 if (sci
->sc_flush_request
&&
286 test_bit(NILFS_SC_PRIOR_FLUSH
, &sci
->sc_flags
)) {
287 struct nilfs_transaction_info
*ti
= current
->journal_info
;
289 ti
->ti_flags
|= NILFS_TI_WRITER
;
290 nilfs_segctor_do_immediate_flush(sci
);
291 ti
->ti_flags
&= ~NILFS_TI_WRITER
;
293 downgrade_write(&nilfs
->ns_segctor_sem
);
296 static void nilfs_transaction_lock(struct super_block
*sb
,
297 struct nilfs_transaction_info
*ti
,
300 struct nilfs_transaction_info
*cur_ti
= current
->journal_info
;
301 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
302 struct nilfs_sc_info
*sci
= nilfs
->ns_writer
;
305 ti
->ti_flags
= NILFS_TI_WRITER
;
307 ti
->ti_save
= cur_ti
;
308 ti
->ti_magic
= NILFS_TI_MAGIC
;
309 current
->journal_info
= ti
;
312 down_write(&nilfs
->ns_segctor_sem
);
313 if (!test_bit(NILFS_SC_PRIOR_FLUSH
, &sci
->sc_flags
))
316 nilfs_segctor_do_immediate_flush(sci
);
318 up_write(&nilfs
->ns_segctor_sem
);
322 ti
->ti_flags
|= NILFS_TI_GC
;
325 static void nilfs_transaction_unlock(struct super_block
*sb
)
327 struct nilfs_transaction_info
*ti
= current
->journal_info
;
328 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
330 BUG_ON(ti
== NULL
|| ti
->ti_magic
!= NILFS_TI_MAGIC
);
331 BUG_ON(ti
->ti_count
> 0);
333 up_write(&nilfs
->ns_segctor_sem
);
334 current
->journal_info
= ti
->ti_save
;
337 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info
*sci
,
338 struct nilfs_segsum_pointer
*ssp
,
341 struct nilfs_segment_buffer
*segbuf
= sci
->sc_curseg
;
342 unsigned blocksize
= sci
->sc_super
->s_blocksize
;
345 if (unlikely(ssp
->offset
+ bytes
> blocksize
)) {
347 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp
->bh
,
348 &segbuf
->sb_segsum_buffers
));
349 ssp
->bh
= NILFS_SEGBUF_NEXT_BH(ssp
->bh
);
351 p
= ssp
->bh
->b_data
+ ssp
->offset
;
352 ssp
->offset
+= bytes
;
357 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
358 * @sci: nilfs_sc_info
360 static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info
*sci
)
362 struct nilfs_segment_buffer
*segbuf
= sci
->sc_curseg
;
363 struct buffer_head
*sumbh
;
368 if (nilfs_doing_gc())
370 err
= nilfs_segbuf_reset(segbuf
, flags
, sci
->sc_seg_ctime
, sci
->sc_cno
);
374 sumbh
= NILFS_SEGBUF_FIRST_BH(&segbuf
->sb_segsum_buffers
);
375 sumbytes
= segbuf
->sb_sum
.sumbytes
;
376 sci
->sc_finfo_ptr
.bh
= sumbh
; sci
->sc_finfo_ptr
.offset
= sumbytes
;
377 sci
->sc_binfo_ptr
.bh
= sumbh
; sci
->sc_binfo_ptr
.offset
= sumbytes
;
378 sci
->sc_blk_cnt
= sci
->sc_datablk_cnt
= 0;
382 static int nilfs_segctor_feed_segment(struct nilfs_sc_info
*sci
)
384 sci
->sc_nblk_this_inc
+= sci
->sc_curseg
->sb_sum
.nblocks
;
385 if (NILFS_SEGBUF_IS_LAST(sci
->sc_curseg
, &sci
->sc_segbufs
))
386 return -E2BIG
; /* The current segment is filled up
388 sci
->sc_curseg
= NILFS_NEXT_SEGBUF(sci
->sc_curseg
);
389 return nilfs_segctor_reset_segment_buffer(sci
);
392 static int nilfs_segctor_add_super_root(struct nilfs_sc_info
*sci
)
394 struct nilfs_segment_buffer
*segbuf
= sci
->sc_curseg
;
397 if (segbuf
->sb_sum
.nblocks
>= segbuf
->sb_rest_blocks
) {
398 err
= nilfs_segctor_feed_segment(sci
);
401 segbuf
= sci
->sc_curseg
;
403 err
= nilfs_segbuf_extend_payload(segbuf
, &segbuf
->sb_super_root
);
405 segbuf
->sb_sum
.flags
|= NILFS_SS_SR
;
410 * Functions for making segment summary and payloads
412 static int nilfs_segctor_segsum_block_required(
413 struct nilfs_sc_info
*sci
, const struct nilfs_segsum_pointer
*ssp
,
416 unsigned blocksize
= sci
->sc_super
->s_blocksize
;
417 /* Size of finfo and binfo is enough small against blocksize */
419 return ssp
->offset
+ binfo_size
+
420 (!sci
->sc_blk_cnt
? sizeof(struct nilfs_finfo
) : 0) >
424 static void nilfs_segctor_begin_finfo(struct nilfs_sc_info
*sci
,
427 sci
->sc_curseg
->sb_sum
.nfinfo
++;
428 sci
->sc_binfo_ptr
= sci
->sc_finfo_ptr
;
429 nilfs_segctor_map_segsum_entry(
430 sci
, &sci
->sc_binfo_ptr
, sizeof(struct nilfs_finfo
));
432 if (NILFS_I(inode
)->i_root
&&
433 !test_bit(NILFS_SC_HAVE_DELTA
, &sci
->sc_flags
))
434 set_bit(NILFS_SC_HAVE_DELTA
, &sci
->sc_flags
);
438 static void nilfs_segctor_end_finfo(struct nilfs_sc_info
*sci
,
441 struct nilfs_finfo
*finfo
;
442 struct nilfs_inode_info
*ii
;
443 struct nilfs_segment_buffer
*segbuf
;
446 if (sci
->sc_blk_cnt
== 0)
451 if (test_bit(NILFS_I_GCINODE
, &ii
->i_state
))
453 else if (NILFS_ROOT_METADATA_FILE(inode
->i_ino
))
458 finfo
= nilfs_segctor_map_segsum_entry(sci
, &sci
->sc_finfo_ptr
,
460 finfo
->fi_ino
= cpu_to_le64(inode
->i_ino
);
461 finfo
->fi_nblocks
= cpu_to_le32(sci
->sc_blk_cnt
);
462 finfo
->fi_ndatablk
= cpu_to_le32(sci
->sc_datablk_cnt
);
463 finfo
->fi_cno
= cpu_to_le64(cno
);
465 segbuf
= sci
->sc_curseg
;
466 segbuf
->sb_sum
.sumbytes
= sci
->sc_binfo_ptr
.offset
+
467 sci
->sc_super
->s_blocksize
* (segbuf
->sb_sum
.nsumblk
- 1);
468 sci
->sc_finfo_ptr
= sci
->sc_binfo_ptr
;
469 sci
->sc_blk_cnt
= sci
->sc_datablk_cnt
= 0;
472 static int nilfs_segctor_add_file_block(struct nilfs_sc_info
*sci
,
473 struct buffer_head
*bh
,
477 struct nilfs_segment_buffer
*segbuf
;
478 int required
, err
= 0;
481 segbuf
= sci
->sc_curseg
;
482 required
= nilfs_segctor_segsum_block_required(
483 sci
, &sci
->sc_binfo_ptr
, binfo_size
);
484 if (segbuf
->sb_sum
.nblocks
+ required
+ 1 > segbuf
->sb_rest_blocks
) {
485 nilfs_segctor_end_finfo(sci
, inode
);
486 err
= nilfs_segctor_feed_segment(sci
);
491 if (unlikely(required
)) {
492 err
= nilfs_segbuf_extend_segsum(segbuf
);
496 if (sci
->sc_blk_cnt
== 0)
497 nilfs_segctor_begin_finfo(sci
, inode
);
499 nilfs_segctor_map_segsum_entry(sci
, &sci
->sc_binfo_ptr
, binfo_size
);
500 /* Substitution to vblocknr is delayed until update_blocknr() */
501 nilfs_segbuf_add_file_buffer(segbuf
, bh
);
508 * Callback functions that enumerate, mark, and collect dirty blocks
510 static int nilfs_collect_file_data(struct nilfs_sc_info
*sci
,
511 struct buffer_head
*bh
, struct inode
*inode
)
515 err
= nilfs_bmap_propagate(NILFS_I(inode
)->i_bmap
, bh
);
519 err
= nilfs_segctor_add_file_block(sci
, bh
, inode
,
520 sizeof(struct nilfs_binfo_v
));
522 sci
->sc_datablk_cnt
++;
526 static int nilfs_collect_file_node(struct nilfs_sc_info
*sci
,
527 struct buffer_head
*bh
,
530 return nilfs_bmap_propagate(NILFS_I(inode
)->i_bmap
, bh
);
533 static int nilfs_collect_file_bmap(struct nilfs_sc_info
*sci
,
534 struct buffer_head
*bh
,
537 WARN_ON(!buffer_dirty(bh
));
538 return nilfs_segctor_add_file_block(sci
, bh
, inode
, sizeof(__le64
));
541 static void nilfs_write_file_data_binfo(struct nilfs_sc_info
*sci
,
542 struct nilfs_segsum_pointer
*ssp
,
543 union nilfs_binfo
*binfo
)
545 struct nilfs_binfo_v
*binfo_v
= nilfs_segctor_map_segsum_entry(
546 sci
, ssp
, sizeof(*binfo_v
));
547 *binfo_v
= binfo
->bi_v
;
550 static void nilfs_write_file_node_binfo(struct nilfs_sc_info
*sci
,
551 struct nilfs_segsum_pointer
*ssp
,
552 union nilfs_binfo
*binfo
)
554 __le64
*vblocknr
= nilfs_segctor_map_segsum_entry(
555 sci
, ssp
, sizeof(*vblocknr
));
556 *vblocknr
= binfo
->bi_v
.bi_vblocknr
;
559 static struct nilfs_sc_operations nilfs_sc_file_ops
= {
560 .collect_data
= nilfs_collect_file_data
,
561 .collect_node
= nilfs_collect_file_node
,
562 .collect_bmap
= nilfs_collect_file_bmap
,
563 .write_data_binfo
= nilfs_write_file_data_binfo
,
564 .write_node_binfo
= nilfs_write_file_node_binfo
,
567 static int nilfs_collect_dat_data(struct nilfs_sc_info
*sci
,
568 struct buffer_head
*bh
, struct inode
*inode
)
572 err
= nilfs_bmap_propagate(NILFS_I(inode
)->i_bmap
, bh
);
576 err
= nilfs_segctor_add_file_block(sci
, bh
, inode
, sizeof(__le64
));
578 sci
->sc_datablk_cnt
++;
582 static int nilfs_collect_dat_bmap(struct nilfs_sc_info
*sci
,
583 struct buffer_head
*bh
, struct inode
*inode
)
585 WARN_ON(!buffer_dirty(bh
));
586 return nilfs_segctor_add_file_block(sci
, bh
, inode
,
587 sizeof(struct nilfs_binfo_dat
));
590 static void nilfs_write_dat_data_binfo(struct nilfs_sc_info
*sci
,
591 struct nilfs_segsum_pointer
*ssp
,
592 union nilfs_binfo
*binfo
)
594 __le64
*blkoff
= nilfs_segctor_map_segsum_entry(sci
, ssp
,
596 *blkoff
= binfo
->bi_dat
.bi_blkoff
;
599 static void nilfs_write_dat_node_binfo(struct nilfs_sc_info
*sci
,
600 struct nilfs_segsum_pointer
*ssp
,
601 union nilfs_binfo
*binfo
)
603 struct nilfs_binfo_dat
*binfo_dat
=
604 nilfs_segctor_map_segsum_entry(sci
, ssp
, sizeof(*binfo_dat
));
605 *binfo_dat
= binfo
->bi_dat
;
608 static struct nilfs_sc_operations nilfs_sc_dat_ops
= {
609 .collect_data
= nilfs_collect_dat_data
,
610 .collect_node
= nilfs_collect_file_node
,
611 .collect_bmap
= nilfs_collect_dat_bmap
,
612 .write_data_binfo
= nilfs_write_dat_data_binfo
,
613 .write_node_binfo
= nilfs_write_dat_node_binfo
,
616 static struct nilfs_sc_operations nilfs_sc_dsync_ops
= {
617 .collect_data
= nilfs_collect_file_data
,
618 .collect_node
= NULL
,
619 .collect_bmap
= NULL
,
620 .write_data_binfo
= nilfs_write_file_data_binfo
,
621 .write_node_binfo
= NULL
,
624 static size_t nilfs_lookup_dirty_data_buffers(struct inode
*inode
,
625 struct list_head
*listp
,
627 loff_t start
, loff_t end
)
629 struct address_space
*mapping
= inode
->i_mapping
;
631 pgoff_t index
= 0, last
= ULONG_MAX
;
635 if (unlikely(start
!= 0 || end
!= LLONG_MAX
)) {
637 * A valid range is given for sync-ing data pages. The
638 * range is rounded to per-page; extra dirty buffers
639 * may be included if blocksize < pagesize.
641 index
= start
>> PAGE_SHIFT
;
642 last
= end
>> PAGE_SHIFT
;
644 pagevec_init(&pvec
, 0);
646 if (unlikely(index
> last
) ||
647 !pagevec_lookup_tag(&pvec
, mapping
, &index
, PAGECACHE_TAG_DIRTY
,
648 min_t(pgoff_t
, last
- index
,
649 PAGEVEC_SIZE
- 1) + 1))
652 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
653 struct buffer_head
*bh
, *head
;
654 struct page
*page
= pvec
.pages
[i
];
656 if (unlikely(page
->index
> last
))
660 if (!page_has_buffers(page
))
661 create_empty_buffers(page
, 1 << inode
->i_blkbits
, 0);
664 bh
= head
= page_buffers(page
);
666 if (!buffer_dirty(bh
) || buffer_async_write(bh
))
669 list_add_tail(&bh
->b_assoc_buffers
, listp
);
671 if (unlikely(ndirties
>= nlimit
)) {
672 pagevec_release(&pvec
);
676 } while (bh
= bh
->b_this_page
, bh
!= head
);
678 pagevec_release(&pvec
);
683 static void nilfs_lookup_dirty_node_buffers(struct inode
*inode
,
684 struct list_head
*listp
)
686 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
687 struct address_space
*mapping
= &ii
->i_btnode_cache
;
689 struct buffer_head
*bh
, *head
;
693 pagevec_init(&pvec
, 0);
695 while (pagevec_lookup_tag(&pvec
, mapping
, &index
, PAGECACHE_TAG_DIRTY
,
697 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
698 bh
= head
= page_buffers(pvec
.pages
[i
]);
700 if (buffer_dirty(bh
) &&
701 !buffer_async_write(bh
)) {
703 list_add_tail(&bh
->b_assoc_buffers
,
706 bh
= bh
->b_this_page
;
707 } while (bh
!= head
);
709 pagevec_release(&pvec
);
714 static void nilfs_dispose_list(struct the_nilfs
*nilfs
,
715 struct list_head
*head
, int force
)
717 struct nilfs_inode_info
*ii
, *n
;
718 struct nilfs_inode_info
*ivec
[SC_N_INODEVEC
], **pii
;
721 while (!list_empty(head
)) {
722 spin_lock(&nilfs
->ns_inode_lock
);
723 list_for_each_entry_safe(ii
, n
, head
, i_dirty
) {
724 list_del_init(&ii
->i_dirty
);
726 if (unlikely(ii
->i_bh
)) {
730 } else if (test_bit(NILFS_I_DIRTY
, &ii
->i_state
)) {
731 set_bit(NILFS_I_QUEUED
, &ii
->i_state
);
732 list_add_tail(&ii
->i_dirty
,
733 &nilfs
->ns_dirty_files
);
737 if (nv
== SC_N_INODEVEC
)
740 spin_unlock(&nilfs
->ns_inode_lock
);
742 for (pii
= ivec
; nv
> 0; pii
++, nv
--)
743 iput(&(*pii
)->vfs_inode
);
747 static void nilfs_iput_work_func(struct work_struct
*work
)
749 struct nilfs_sc_info
*sci
= container_of(work
, struct nilfs_sc_info
,
751 struct the_nilfs
*nilfs
= sci
->sc_super
->s_fs_info
;
753 nilfs_dispose_list(nilfs
, &sci
->sc_iput_queue
, 0);
756 static int nilfs_test_metadata_dirty(struct the_nilfs
*nilfs
,
757 struct nilfs_root
*root
)
761 if (nilfs_mdt_fetch_dirty(root
->ifile
))
763 if (nilfs_mdt_fetch_dirty(nilfs
->ns_cpfile
))
765 if (nilfs_mdt_fetch_dirty(nilfs
->ns_sufile
))
767 if ((ret
|| nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs
->ns_dat
))
772 static int nilfs_segctor_clean(struct nilfs_sc_info
*sci
)
774 return list_empty(&sci
->sc_dirty_files
) &&
775 !test_bit(NILFS_SC_DIRTY
, &sci
->sc_flags
) &&
776 sci
->sc_nfreesegs
== 0 &&
777 (!nilfs_doing_gc() || list_empty(&sci
->sc_gc_inodes
));
780 static int nilfs_segctor_confirm(struct nilfs_sc_info
*sci
)
782 struct the_nilfs
*nilfs
= sci
->sc_super
->s_fs_info
;
785 if (nilfs_test_metadata_dirty(nilfs
, sci
->sc_root
))
786 set_bit(NILFS_SC_DIRTY
, &sci
->sc_flags
);
788 spin_lock(&nilfs
->ns_inode_lock
);
789 if (list_empty(&nilfs
->ns_dirty_files
) && nilfs_segctor_clean(sci
))
792 spin_unlock(&nilfs
->ns_inode_lock
);
796 static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info
*sci
)
798 struct the_nilfs
*nilfs
= sci
->sc_super
->s_fs_info
;
800 nilfs_mdt_clear_dirty(sci
->sc_root
->ifile
);
801 nilfs_mdt_clear_dirty(nilfs
->ns_cpfile
);
802 nilfs_mdt_clear_dirty(nilfs
->ns_sufile
);
803 nilfs_mdt_clear_dirty(nilfs
->ns_dat
);
806 static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info
*sci
)
808 struct the_nilfs
*nilfs
= sci
->sc_super
->s_fs_info
;
809 struct buffer_head
*bh_cp
;
810 struct nilfs_checkpoint
*raw_cp
;
813 /* XXX: this interface will be changed */
814 err
= nilfs_cpfile_get_checkpoint(nilfs
->ns_cpfile
, nilfs
->ns_cno
, 1,
817 /* The following code is duplicated with cpfile. But, it is
818 needed to collect the checkpoint even if it was not newly
820 mark_buffer_dirty(bh_cp
);
821 nilfs_mdt_mark_dirty(nilfs
->ns_cpfile
);
822 nilfs_cpfile_put_checkpoint(
823 nilfs
->ns_cpfile
, nilfs
->ns_cno
, bh_cp
);
825 WARN_ON(err
== -EINVAL
|| err
== -ENOENT
);
830 static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info
*sci
)
832 struct the_nilfs
*nilfs
= sci
->sc_super
->s_fs_info
;
833 struct buffer_head
*bh_cp
;
834 struct nilfs_checkpoint
*raw_cp
;
837 err
= nilfs_cpfile_get_checkpoint(nilfs
->ns_cpfile
, nilfs
->ns_cno
, 0,
840 WARN_ON(err
== -EINVAL
|| err
== -ENOENT
);
843 raw_cp
->cp_snapshot_list
.ssl_next
= 0;
844 raw_cp
->cp_snapshot_list
.ssl_prev
= 0;
845 raw_cp
->cp_inodes_count
=
846 cpu_to_le64(atomic64_read(&sci
->sc_root
->inodes_count
));
847 raw_cp
->cp_blocks_count
=
848 cpu_to_le64(atomic64_read(&sci
->sc_root
->blocks_count
));
849 raw_cp
->cp_nblk_inc
=
850 cpu_to_le64(sci
->sc_nblk_inc
+ sci
->sc_nblk_this_inc
);
851 raw_cp
->cp_create
= cpu_to_le64(sci
->sc_seg_ctime
);
852 raw_cp
->cp_cno
= cpu_to_le64(nilfs
->ns_cno
);
854 if (test_bit(NILFS_SC_HAVE_DELTA
, &sci
->sc_flags
))
855 nilfs_checkpoint_clear_minor(raw_cp
);
857 nilfs_checkpoint_set_minor(raw_cp
);
859 nilfs_write_inode_common(sci
->sc_root
->ifile
,
860 &raw_cp
->cp_ifile_inode
, 1);
861 nilfs_cpfile_put_checkpoint(nilfs
->ns_cpfile
, nilfs
->ns_cno
, bh_cp
);
868 static void nilfs_fill_in_file_bmap(struct inode
*ifile
,
869 struct nilfs_inode_info
*ii
)
872 struct buffer_head
*ibh
;
873 struct nilfs_inode
*raw_inode
;
875 if (test_bit(NILFS_I_BMAP
, &ii
->i_state
)) {
878 raw_inode
= nilfs_ifile_map_inode(ifile
, ii
->vfs_inode
.i_ino
,
880 nilfs_bmap_write(ii
->i_bmap
, raw_inode
);
881 nilfs_ifile_unmap_inode(ifile
, ii
->vfs_inode
.i_ino
, ibh
);
885 static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info
*sci
)
887 struct nilfs_inode_info
*ii
;
889 list_for_each_entry(ii
, &sci
->sc_dirty_files
, i_dirty
) {
890 nilfs_fill_in_file_bmap(sci
->sc_root
->ifile
, ii
);
891 set_bit(NILFS_I_COLLECTED
, &ii
->i_state
);
895 static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info
*sci
,
896 struct the_nilfs
*nilfs
)
898 struct buffer_head
*bh_sr
;
899 struct nilfs_super_root
*raw_sr
;
902 bh_sr
= NILFS_LAST_SEGBUF(&sci
->sc_segbufs
)->sb_super_root
;
903 raw_sr
= (struct nilfs_super_root
*)bh_sr
->b_data
;
904 isz
= nilfs
->ns_inode_size
;
905 srsz
= NILFS_SR_BYTES(isz
);
907 raw_sr
->sr_bytes
= cpu_to_le16(srsz
);
908 raw_sr
->sr_nongc_ctime
909 = cpu_to_le64(nilfs_doing_gc() ?
910 nilfs
->ns_nongc_ctime
: sci
->sc_seg_ctime
);
911 raw_sr
->sr_flags
= 0;
913 nilfs_write_inode_common(nilfs
->ns_dat
, (void *)raw_sr
+
914 NILFS_SR_DAT_OFFSET(isz
), 1);
915 nilfs_write_inode_common(nilfs
->ns_cpfile
, (void *)raw_sr
+
916 NILFS_SR_CPFILE_OFFSET(isz
), 1);
917 nilfs_write_inode_common(nilfs
->ns_sufile
, (void *)raw_sr
+
918 NILFS_SR_SUFILE_OFFSET(isz
), 1);
919 memset((void *)raw_sr
+ srsz
, 0, nilfs
->ns_blocksize
- srsz
);
922 static void nilfs_redirty_inodes(struct list_head
*head
)
924 struct nilfs_inode_info
*ii
;
926 list_for_each_entry(ii
, head
, i_dirty
) {
927 if (test_bit(NILFS_I_COLLECTED
, &ii
->i_state
))
928 clear_bit(NILFS_I_COLLECTED
, &ii
->i_state
);
932 static void nilfs_drop_collected_inodes(struct list_head
*head
)
934 struct nilfs_inode_info
*ii
;
936 list_for_each_entry(ii
, head
, i_dirty
) {
937 if (!test_and_clear_bit(NILFS_I_COLLECTED
, &ii
->i_state
))
940 clear_bit(NILFS_I_INODE_SYNC
, &ii
->i_state
);
941 set_bit(NILFS_I_UPDATED
, &ii
->i_state
);
945 static int nilfs_segctor_apply_buffers(struct nilfs_sc_info
*sci
,
947 struct list_head
*listp
,
948 int (*collect
)(struct nilfs_sc_info
*,
949 struct buffer_head
*,
952 struct buffer_head
*bh
, *n
;
956 list_for_each_entry_safe(bh
, n
, listp
, b_assoc_buffers
) {
957 list_del_init(&bh
->b_assoc_buffers
);
958 err
= collect(sci
, bh
, inode
);
961 goto dispose_buffers
;
967 while (!list_empty(listp
)) {
968 bh
= list_first_entry(listp
, struct buffer_head
,
970 list_del_init(&bh
->b_assoc_buffers
);
976 static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info
*sci
)
978 /* Remaining number of blocks within segment buffer */
979 return sci
->sc_segbuf_nblocks
-
980 (sci
->sc_nblk_this_inc
+ sci
->sc_curseg
->sb_sum
.nblocks
);
983 static int nilfs_segctor_scan_file(struct nilfs_sc_info
*sci
,
985 struct nilfs_sc_operations
*sc_ops
)
987 LIST_HEAD(data_buffers
);
988 LIST_HEAD(node_buffers
);
991 if (!(sci
->sc_stage
.flags
& NILFS_CF_NODE
)) {
992 size_t n
, rest
= nilfs_segctor_buffer_rest(sci
);
994 n
= nilfs_lookup_dirty_data_buffers(
995 inode
, &data_buffers
, rest
+ 1, 0, LLONG_MAX
);
997 err
= nilfs_segctor_apply_buffers(
998 sci
, inode
, &data_buffers
,
999 sc_ops
->collect_data
);
1000 BUG_ON(!err
); /* always receive -E2BIG or true error */
1004 nilfs_lookup_dirty_node_buffers(inode
, &node_buffers
);
1006 if (!(sci
->sc_stage
.flags
& NILFS_CF_NODE
)) {
1007 err
= nilfs_segctor_apply_buffers(
1008 sci
, inode
, &data_buffers
, sc_ops
->collect_data
);
1009 if (unlikely(err
)) {
1010 /* dispose node list */
1011 nilfs_segctor_apply_buffers(
1012 sci
, inode
, &node_buffers
, NULL
);
1015 sci
->sc_stage
.flags
|= NILFS_CF_NODE
;
1018 err
= nilfs_segctor_apply_buffers(
1019 sci
, inode
, &node_buffers
, sc_ops
->collect_node
);
1023 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode
)->i_bmap
, &node_buffers
);
1024 err
= nilfs_segctor_apply_buffers(
1025 sci
, inode
, &node_buffers
, sc_ops
->collect_bmap
);
1029 nilfs_segctor_end_finfo(sci
, inode
);
1030 sci
->sc_stage
.flags
&= ~NILFS_CF_NODE
;
1036 static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info
*sci
,
1037 struct inode
*inode
)
1039 LIST_HEAD(data_buffers
);
1040 size_t n
, rest
= nilfs_segctor_buffer_rest(sci
);
1043 n
= nilfs_lookup_dirty_data_buffers(inode
, &data_buffers
, rest
+ 1,
1044 sci
->sc_dsync_start
,
1047 err
= nilfs_segctor_apply_buffers(sci
, inode
, &data_buffers
,
1048 nilfs_collect_file_data
);
1050 nilfs_segctor_end_finfo(sci
, inode
);
1052 /* always receive -E2BIG or true error if n > rest */
1057 static int nilfs_segctor_collect_blocks(struct nilfs_sc_info
*sci
, int mode
)
1059 struct the_nilfs
*nilfs
= sci
->sc_super
->s_fs_info
;
1060 struct list_head
*head
;
1061 struct nilfs_inode_info
*ii
;
1065 switch (sci
->sc_stage
.scnt
) {
1068 sci
->sc_stage
.flags
= 0;
1070 if (!test_bit(NILFS_SC_UNCLOSED
, &sci
->sc_flags
)) {
1071 sci
->sc_nblk_inc
= 0;
1072 sci
->sc_curseg
->sb_sum
.flags
= NILFS_SS_LOGBGN
;
1073 if (mode
== SC_LSEG_DSYNC
) {
1074 sci
->sc_stage
.scnt
= NILFS_ST_DSYNC
;
1079 sci
->sc_stage
.dirty_file_ptr
= NULL
;
1080 sci
->sc_stage
.gc_inode_ptr
= NULL
;
1081 if (mode
== SC_FLUSH_DAT
) {
1082 sci
->sc_stage
.scnt
= NILFS_ST_DAT
;
1085 sci
->sc_stage
.scnt
++; /* Fall through */
1087 if (nilfs_doing_gc()) {
1088 head
= &sci
->sc_gc_inodes
;
1089 ii
= list_prepare_entry(sci
->sc_stage
.gc_inode_ptr
,
1091 list_for_each_entry_continue(ii
, head
, i_dirty
) {
1092 err
= nilfs_segctor_scan_file(
1093 sci
, &ii
->vfs_inode
,
1094 &nilfs_sc_file_ops
);
1095 if (unlikely(err
)) {
1096 sci
->sc_stage
.gc_inode_ptr
= list_entry(
1098 struct nilfs_inode_info
,
1102 set_bit(NILFS_I_COLLECTED
, &ii
->i_state
);
1104 sci
->sc_stage
.gc_inode_ptr
= NULL
;
1106 sci
->sc_stage
.scnt
++; /* Fall through */
1108 head
= &sci
->sc_dirty_files
;
1109 ii
= list_prepare_entry(sci
->sc_stage
.dirty_file_ptr
, head
,
1111 list_for_each_entry_continue(ii
, head
, i_dirty
) {
1112 clear_bit(NILFS_I_DIRTY
, &ii
->i_state
);
1114 err
= nilfs_segctor_scan_file(sci
, &ii
->vfs_inode
,
1115 &nilfs_sc_file_ops
);
1116 if (unlikely(err
)) {
1117 sci
->sc_stage
.dirty_file_ptr
=
1118 list_entry(ii
->i_dirty
.prev
,
1119 struct nilfs_inode_info
,
1123 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1124 /* XXX: required ? */
1126 sci
->sc_stage
.dirty_file_ptr
= NULL
;
1127 if (mode
== SC_FLUSH_FILE
) {
1128 sci
->sc_stage
.scnt
= NILFS_ST_DONE
;
1131 sci
->sc_stage
.scnt
++;
1132 sci
->sc_stage
.flags
|= NILFS_CF_IFILE_STARTED
;
1134 case NILFS_ST_IFILE
:
1135 err
= nilfs_segctor_scan_file(sci
, sci
->sc_root
->ifile
,
1136 &nilfs_sc_file_ops
);
1139 sci
->sc_stage
.scnt
++;
1140 /* Creating a checkpoint */
1141 err
= nilfs_segctor_create_checkpoint(sci
);
1145 case NILFS_ST_CPFILE
:
1146 err
= nilfs_segctor_scan_file(sci
, nilfs
->ns_cpfile
,
1147 &nilfs_sc_file_ops
);
1150 sci
->sc_stage
.scnt
++; /* Fall through */
1151 case NILFS_ST_SUFILE
:
1152 err
= nilfs_sufile_freev(nilfs
->ns_sufile
, sci
->sc_freesegs
,
1153 sci
->sc_nfreesegs
, &ndone
);
1154 if (unlikely(err
)) {
1155 nilfs_sufile_cancel_freev(nilfs
->ns_sufile
,
1156 sci
->sc_freesegs
, ndone
,
1160 sci
->sc_stage
.flags
|= NILFS_CF_SUFREED
;
1162 err
= nilfs_segctor_scan_file(sci
, nilfs
->ns_sufile
,
1163 &nilfs_sc_file_ops
);
1166 sci
->sc_stage
.scnt
++; /* Fall through */
1169 err
= nilfs_segctor_scan_file(sci
, nilfs
->ns_dat
,
1173 if (mode
== SC_FLUSH_DAT
) {
1174 sci
->sc_stage
.scnt
= NILFS_ST_DONE
;
1177 sci
->sc_stage
.scnt
++; /* Fall through */
1179 if (mode
== SC_LSEG_SR
) {
1180 /* Appending a super root */
1181 err
= nilfs_segctor_add_super_root(sci
);
1185 /* End of a logical segment */
1186 sci
->sc_curseg
->sb_sum
.flags
|= NILFS_SS_LOGEND
;
1187 sci
->sc_stage
.scnt
= NILFS_ST_DONE
;
1189 case NILFS_ST_DSYNC
:
1191 sci
->sc_curseg
->sb_sum
.flags
|= NILFS_SS_SYNDT
;
1192 ii
= sci
->sc_dsync_inode
;
1193 if (!test_bit(NILFS_I_BUSY
, &ii
->i_state
))
1196 err
= nilfs_segctor_scan_file_dsync(sci
, &ii
->vfs_inode
);
1199 sci
->sc_curseg
->sb_sum
.flags
|= NILFS_SS_LOGEND
;
1200 sci
->sc_stage
.scnt
= NILFS_ST_DONE
;
1213 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1214 * @sci: nilfs_sc_info
1215 * @nilfs: nilfs object
1217 static int nilfs_segctor_begin_construction(struct nilfs_sc_info
*sci
,
1218 struct the_nilfs
*nilfs
)
1220 struct nilfs_segment_buffer
*segbuf
, *prev
;
1224 segbuf
= nilfs_segbuf_new(sci
->sc_super
);
1225 if (unlikely(!segbuf
))
1228 if (list_empty(&sci
->sc_write_logs
)) {
1229 nilfs_segbuf_map(segbuf
, nilfs
->ns_segnum
,
1230 nilfs
->ns_pseg_offset
, nilfs
);
1231 if (segbuf
->sb_rest_blocks
< NILFS_PSEG_MIN_BLOCKS
) {
1232 nilfs_shift_to_next_segment(nilfs
);
1233 nilfs_segbuf_map(segbuf
, nilfs
->ns_segnum
, 0, nilfs
);
1236 segbuf
->sb_sum
.seg_seq
= nilfs
->ns_seg_seq
;
1237 nextnum
= nilfs
->ns_nextnum
;
1239 if (nilfs
->ns_segnum
== nilfs
->ns_nextnum
)
1240 /* Start from the head of a new full segment */
1244 prev
= NILFS_LAST_SEGBUF(&sci
->sc_write_logs
);
1245 nilfs_segbuf_map_cont(segbuf
, prev
);
1246 segbuf
->sb_sum
.seg_seq
= prev
->sb_sum
.seg_seq
;
1247 nextnum
= prev
->sb_nextnum
;
1249 if (segbuf
->sb_rest_blocks
< NILFS_PSEG_MIN_BLOCKS
) {
1250 nilfs_segbuf_map(segbuf
, prev
->sb_nextnum
, 0, nilfs
);
1251 segbuf
->sb_sum
.seg_seq
++;
1256 err
= nilfs_sufile_mark_dirty(nilfs
->ns_sufile
, segbuf
->sb_segnum
);
1261 err
= nilfs_sufile_alloc(nilfs
->ns_sufile
, &nextnum
);
1265 nilfs_segbuf_set_next_segnum(segbuf
, nextnum
, nilfs
);
1267 BUG_ON(!list_empty(&sci
->sc_segbufs
));
1268 list_add_tail(&segbuf
->sb_list
, &sci
->sc_segbufs
);
1269 sci
->sc_segbuf_nblocks
= segbuf
->sb_rest_blocks
;
1273 nilfs_segbuf_free(segbuf
);
1277 static int nilfs_segctor_extend_segments(struct nilfs_sc_info
*sci
,
1278 struct the_nilfs
*nilfs
, int nadd
)
1280 struct nilfs_segment_buffer
*segbuf
, *prev
;
1281 struct inode
*sufile
= nilfs
->ns_sufile
;
1286 prev
= NILFS_LAST_SEGBUF(&sci
->sc_segbufs
);
1288 * Since the segment specified with nextnum might be allocated during
1289 * the previous construction, the buffer including its segusage may
1290 * not be dirty. The following call ensures that the buffer is dirty
1291 * and will pin the buffer on memory until the sufile is written.
1293 err
= nilfs_sufile_mark_dirty(sufile
, prev
->sb_nextnum
);
1297 for (i
= 0; i
< nadd
; i
++) {
1298 /* extend segment info */
1300 segbuf
= nilfs_segbuf_new(sci
->sc_super
);
1301 if (unlikely(!segbuf
))
1304 /* map this buffer to region of segment on-disk */
1305 nilfs_segbuf_map(segbuf
, prev
->sb_nextnum
, 0, nilfs
);
1306 sci
->sc_segbuf_nblocks
+= segbuf
->sb_rest_blocks
;
1308 /* allocate the next next full segment */
1309 err
= nilfs_sufile_alloc(sufile
, &nextnextnum
);
1313 segbuf
->sb_sum
.seg_seq
= prev
->sb_sum
.seg_seq
+ 1;
1314 nilfs_segbuf_set_next_segnum(segbuf
, nextnextnum
, nilfs
);
1316 list_add_tail(&segbuf
->sb_list
, &list
);
1319 list_splice_tail(&list
, &sci
->sc_segbufs
);
1323 nilfs_segbuf_free(segbuf
);
1325 list_for_each_entry(segbuf
, &list
, sb_list
) {
1326 ret
= nilfs_sufile_free(sufile
, segbuf
->sb_nextnum
);
1327 WARN_ON(ret
); /* never fails */
1329 nilfs_destroy_logs(&list
);
1333 static void nilfs_free_incomplete_logs(struct list_head
*logs
,
1334 struct the_nilfs
*nilfs
)
1336 struct nilfs_segment_buffer
*segbuf
, *prev
;
1337 struct inode
*sufile
= nilfs
->ns_sufile
;
1340 segbuf
= NILFS_FIRST_SEGBUF(logs
);
1341 if (nilfs
->ns_nextnum
!= segbuf
->sb_nextnum
) {
1342 ret
= nilfs_sufile_free(sufile
, segbuf
->sb_nextnum
);
1343 WARN_ON(ret
); /* never fails */
1345 if (atomic_read(&segbuf
->sb_err
)) {
1346 /* Case 1: The first segment failed */
1347 if (segbuf
->sb_pseg_start
!= segbuf
->sb_fseg_start
)
1348 /* Case 1a: Partial segment appended into an existing
1350 nilfs_terminate_segment(nilfs
, segbuf
->sb_fseg_start
,
1351 segbuf
->sb_fseg_end
);
1352 else /* Case 1b: New full segment */
1353 set_nilfs_discontinued(nilfs
);
1357 list_for_each_entry_continue(segbuf
, logs
, sb_list
) {
1358 if (prev
->sb_nextnum
!= segbuf
->sb_nextnum
) {
1359 ret
= nilfs_sufile_free(sufile
, segbuf
->sb_nextnum
);
1360 WARN_ON(ret
); /* never fails */
1362 if (atomic_read(&segbuf
->sb_err
) &&
1363 segbuf
->sb_segnum
!= nilfs
->ns_nextnum
)
1364 /* Case 2: extended segment (!= next) failed */
1365 nilfs_sufile_set_error(sufile
, segbuf
->sb_segnum
);
1370 static void nilfs_segctor_update_segusage(struct nilfs_sc_info
*sci
,
1371 struct inode
*sufile
)
1373 struct nilfs_segment_buffer
*segbuf
;
1374 unsigned long live_blocks
;
1377 list_for_each_entry(segbuf
, &sci
->sc_segbufs
, sb_list
) {
1378 live_blocks
= segbuf
->sb_sum
.nblocks
+
1379 (segbuf
->sb_pseg_start
- segbuf
->sb_fseg_start
);
1380 ret
= nilfs_sufile_set_segment_usage(sufile
, segbuf
->sb_segnum
,
1383 WARN_ON(ret
); /* always succeed because the segusage is dirty */
1387 static void nilfs_cancel_segusage(struct list_head
*logs
, struct inode
*sufile
)
1389 struct nilfs_segment_buffer
*segbuf
;
1392 segbuf
= NILFS_FIRST_SEGBUF(logs
);
1393 ret
= nilfs_sufile_set_segment_usage(sufile
, segbuf
->sb_segnum
,
1394 segbuf
->sb_pseg_start
-
1395 segbuf
->sb_fseg_start
, 0);
1396 WARN_ON(ret
); /* always succeed because the segusage is dirty */
1398 list_for_each_entry_continue(segbuf
, logs
, sb_list
) {
1399 ret
= nilfs_sufile_set_segment_usage(sufile
, segbuf
->sb_segnum
,
1401 WARN_ON(ret
); /* always succeed */
1405 static void nilfs_segctor_truncate_segments(struct nilfs_sc_info
*sci
,
1406 struct nilfs_segment_buffer
*last
,
1407 struct inode
*sufile
)
1409 struct nilfs_segment_buffer
*segbuf
= last
;
1412 list_for_each_entry_continue(segbuf
, &sci
->sc_segbufs
, sb_list
) {
1413 sci
->sc_segbuf_nblocks
-= segbuf
->sb_rest_blocks
;
1414 ret
= nilfs_sufile_free(sufile
, segbuf
->sb_nextnum
);
1417 nilfs_truncate_logs(&sci
->sc_segbufs
, last
);
1421 static int nilfs_segctor_collect(struct nilfs_sc_info
*sci
,
1422 struct the_nilfs
*nilfs
, int mode
)
1424 struct nilfs_cstage prev_stage
= sci
->sc_stage
;
1427 /* Collection retry loop */
1429 sci
->sc_nblk_this_inc
= 0;
1430 sci
->sc_curseg
= NILFS_FIRST_SEGBUF(&sci
->sc_segbufs
);
1432 err
= nilfs_segctor_reset_segment_buffer(sci
);
1436 err
= nilfs_segctor_collect_blocks(sci
, mode
);
1437 sci
->sc_nblk_this_inc
+= sci
->sc_curseg
->sb_sum
.nblocks
;
1441 if (unlikely(err
!= -E2BIG
))
1444 /* The current segment is filled up */
1445 if (mode
!= SC_LSEG_SR
|| sci
->sc_stage
.scnt
< NILFS_ST_CPFILE
)
1448 nilfs_clear_logs(&sci
->sc_segbufs
);
1450 if (sci
->sc_stage
.flags
& NILFS_CF_SUFREED
) {
1451 err
= nilfs_sufile_cancel_freev(nilfs
->ns_sufile
,
1455 WARN_ON(err
); /* do not happen */
1456 sci
->sc_stage
.flags
&= ~NILFS_CF_SUFREED
;
1459 err
= nilfs_segctor_extend_segments(sci
, nilfs
, nadd
);
1463 nadd
= min_t(int, nadd
<< 1, SC_MAX_SEGDELTA
);
1464 sci
->sc_stage
= prev_stage
;
1466 nilfs_segctor_truncate_segments(sci
, sci
->sc_curseg
, nilfs
->ns_sufile
);
1473 static void nilfs_list_replace_buffer(struct buffer_head
*old_bh
,
1474 struct buffer_head
*new_bh
)
1476 BUG_ON(!list_empty(&new_bh
->b_assoc_buffers
));
1478 list_replace_init(&old_bh
->b_assoc_buffers
, &new_bh
->b_assoc_buffers
);
1479 /* The caller must release old_bh */
1483 nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info
*sci
,
1484 struct nilfs_segment_buffer
*segbuf
,
1487 struct inode
*inode
= NULL
;
1489 unsigned long nfinfo
= segbuf
->sb_sum
.nfinfo
;
1490 unsigned long nblocks
= 0, ndatablk
= 0;
1491 struct nilfs_sc_operations
*sc_op
= NULL
;
1492 struct nilfs_segsum_pointer ssp
;
1493 struct nilfs_finfo
*finfo
= NULL
;
1494 union nilfs_binfo binfo
;
1495 struct buffer_head
*bh
, *bh_org
;
1502 blocknr
= segbuf
->sb_pseg_start
+ segbuf
->sb_sum
.nsumblk
;
1503 ssp
.bh
= NILFS_SEGBUF_FIRST_BH(&segbuf
->sb_segsum_buffers
);
1504 ssp
.offset
= sizeof(struct nilfs_segment_summary
);
1506 list_for_each_entry(bh
, &segbuf
->sb_payload_buffers
, b_assoc_buffers
) {
1507 if (bh
== segbuf
->sb_super_root
)
1510 finfo
= nilfs_segctor_map_segsum_entry(
1511 sci
, &ssp
, sizeof(*finfo
));
1512 ino
= le64_to_cpu(finfo
->fi_ino
);
1513 nblocks
= le32_to_cpu(finfo
->fi_nblocks
);
1514 ndatablk
= le32_to_cpu(finfo
->fi_ndatablk
);
1516 inode
= bh
->b_page
->mapping
->host
;
1518 if (mode
== SC_LSEG_DSYNC
)
1519 sc_op
= &nilfs_sc_dsync_ops
;
1520 else if (ino
== NILFS_DAT_INO
)
1521 sc_op
= &nilfs_sc_dat_ops
;
1522 else /* file blocks */
1523 sc_op
= &nilfs_sc_file_ops
;
1527 err
= nilfs_bmap_assign(NILFS_I(inode
)->i_bmap
, &bh
, blocknr
,
1530 nilfs_list_replace_buffer(bh_org
, bh
);
1536 sc_op
->write_data_binfo(sci
, &ssp
, &binfo
);
1538 sc_op
->write_node_binfo(sci
, &ssp
, &binfo
);
1541 if (--nblocks
== 0) {
1545 } else if (ndatablk
> 0)
1555 static int nilfs_segctor_assign(struct nilfs_sc_info
*sci
, int mode
)
1557 struct nilfs_segment_buffer
*segbuf
;
1560 list_for_each_entry(segbuf
, &sci
->sc_segbufs
, sb_list
) {
1561 err
= nilfs_segctor_update_payload_blocknr(sci
, segbuf
, mode
);
1564 nilfs_segbuf_fill_in_segsum(segbuf
);
1569 static void nilfs_begin_page_io(struct page
*page
)
1571 if (!page
|| PageWriteback(page
))
1572 /* For split b-tree node pages, this function may be called
1573 twice. We ignore the 2nd or later calls by this check. */
1577 clear_page_dirty_for_io(page
);
1578 set_page_writeback(page
);
1582 static void nilfs_segctor_prepare_write(struct nilfs_sc_info
*sci
)
1584 struct nilfs_segment_buffer
*segbuf
;
1585 struct page
*bd_page
= NULL
, *fs_page
= NULL
;
1587 list_for_each_entry(segbuf
, &sci
->sc_segbufs
, sb_list
) {
1588 struct buffer_head
*bh
;
1590 list_for_each_entry(bh
, &segbuf
->sb_segsum_buffers
,
1592 if (bh
->b_page
!= bd_page
) {
1595 clear_page_dirty_for_io(bd_page
);
1596 set_page_writeback(bd_page
);
1597 unlock_page(bd_page
);
1599 bd_page
= bh
->b_page
;
1603 list_for_each_entry(bh
, &segbuf
->sb_payload_buffers
,
1605 set_buffer_async_write(bh
);
1606 if (bh
== segbuf
->sb_super_root
) {
1607 if (bh
->b_page
!= bd_page
) {
1609 clear_page_dirty_for_io(bd_page
);
1610 set_page_writeback(bd_page
);
1611 unlock_page(bd_page
);
1612 bd_page
= bh
->b_page
;
1616 if (bh
->b_page
!= fs_page
) {
1617 nilfs_begin_page_io(fs_page
);
1618 fs_page
= bh
->b_page
;
1624 clear_page_dirty_for_io(bd_page
);
1625 set_page_writeback(bd_page
);
1626 unlock_page(bd_page
);
1628 nilfs_begin_page_io(fs_page
);
1631 static int nilfs_segctor_write(struct nilfs_sc_info
*sci
,
1632 struct the_nilfs
*nilfs
)
1636 ret
= nilfs_write_logs(&sci
->sc_segbufs
, nilfs
);
1637 list_splice_tail_init(&sci
->sc_segbufs
, &sci
->sc_write_logs
);
1641 static void nilfs_end_page_io(struct page
*page
, int err
)
1646 if (buffer_nilfs_node(page_buffers(page
)) && !PageWriteback(page
)) {
1648 * For b-tree node pages, this function may be called twice
1649 * or more because they might be split in a segment.
1651 if (PageDirty(page
)) {
1653 * For pages holding split b-tree node buffers, dirty
1654 * flag on the buffers may be cleared discretely.
1655 * In that case, the page is once redirtied for
1656 * remaining buffers, and it must be cancelled if
1657 * all the buffers get cleaned later.
1660 if (nilfs_page_buffers_clean(page
))
1661 __nilfs_clear_page_dirty(page
);
1668 if (!nilfs_page_buffers_clean(page
))
1669 __set_page_dirty_nobuffers(page
);
1670 ClearPageError(page
);
1672 __set_page_dirty_nobuffers(page
);
1676 end_page_writeback(page
);
1679 static void nilfs_abort_logs(struct list_head
*logs
, int err
)
1681 struct nilfs_segment_buffer
*segbuf
;
1682 struct page
*bd_page
= NULL
, *fs_page
= NULL
;
1683 struct buffer_head
*bh
;
1685 if (list_empty(logs
))
1688 list_for_each_entry(segbuf
, logs
, sb_list
) {
1689 list_for_each_entry(bh
, &segbuf
->sb_segsum_buffers
,
1691 if (bh
->b_page
!= bd_page
) {
1693 end_page_writeback(bd_page
);
1694 bd_page
= bh
->b_page
;
1698 list_for_each_entry(bh
, &segbuf
->sb_payload_buffers
,
1700 clear_buffer_async_write(bh
);
1701 if (bh
== segbuf
->sb_super_root
) {
1702 if (bh
->b_page
!= bd_page
) {
1703 end_page_writeback(bd_page
);
1704 bd_page
= bh
->b_page
;
1708 if (bh
->b_page
!= fs_page
) {
1709 nilfs_end_page_io(fs_page
, err
);
1710 fs_page
= bh
->b_page
;
1715 end_page_writeback(bd_page
);
1717 nilfs_end_page_io(fs_page
, err
);
1720 static void nilfs_segctor_abort_construction(struct nilfs_sc_info
*sci
,
1721 struct the_nilfs
*nilfs
, int err
)
1726 list_splice_tail_init(&sci
->sc_write_logs
, &logs
);
1727 ret
= nilfs_wait_on_logs(&logs
);
1728 nilfs_abort_logs(&logs
, ret
? : err
);
1730 list_splice_tail_init(&sci
->sc_segbufs
, &logs
);
1731 nilfs_cancel_segusage(&logs
, nilfs
->ns_sufile
);
1732 nilfs_free_incomplete_logs(&logs
, nilfs
);
1734 if (sci
->sc_stage
.flags
& NILFS_CF_SUFREED
) {
1735 ret
= nilfs_sufile_cancel_freev(nilfs
->ns_sufile
,
1739 WARN_ON(ret
); /* do not happen */
1742 nilfs_destroy_logs(&logs
);
1745 static void nilfs_set_next_segment(struct the_nilfs
*nilfs
,
1746 struct nilfs_segment_buffer
*segbuf
)
1748 nilfs
->ns_segnum
= segbuf
->sb_segnum
;
1749 nilfs
->ns_nextnum
= segbuf
->sb_nextnum
;
1750 nilfs
->ns_pseg_offset
= segbuf
->sb_pseg_start
- segbuf
->sb_fseg_start
1751 + segbuf
->sb_sum
.nblocks
;
1752 nilfs
->ns_seg_seq
= segbuf
->sb_sum
.seg_seq
;
1753 nilfs
->ns_ctime
= segbuf
->sb_sum
.ctime
;
1756 static void nilfs_segctor_complete_write(struct nilfs_sc_info
*sci
)
1758 struct nilfs_segment_buffer
*segbuf
;
1759 struct page
*bd_page
= NULL
, *fs_page
= NULL
;
1760 struct the_nilfs
*nilfs
= sci
->sc_super
->s_fs_info
;
1761 int update_sr
= false;
1763 list_for_each_entry(segbuf
, &sci
->sc_write_logs
, sb_list
) {
1764 struct buffer_head
*bh
;
1766 list_for_each_entry(bh
, &segbuf
->sb_segsum_buffers
,
1768 set_buffer_uptodate(bh
);
1769 clear_buffer_dirty(bh
);
1770 if (bh
->b_page
!= bd_page
) {
1772 end_page_writeback(bd_page
);
1773 bd_page
= bh
->b_page
;
1777 * We assume that the buffers which belong to the same page
1778 * continue over the buffer list.
1779 * Under this assumption, the last BHs of pages is
1780 * identifiable by the discontinuity of bh->b_page
1781 * (page != fs_page).
1783 * For B-tree node blocks, however, this assumption is not
1784 * guaranteed. The cleanup code of B-tree node pages needs
1787 list_for_each_entry(bh
, &segbuf
->sb_payload_buffers
,
1789 const unsigned long set_bits
= (1 << BH_Uptodate
);
1790 const unsigned long clear_bits
=
1791 (1 << BH_Dirty
| 1 << BH_Async_Write
|
1792 1 << BH_Delay
| 1 << BH_NILFS_Volatile
|
1793 1 << BH_NILFS_Redirected
);
1795 set_mask_bits(&bh
->b_state
, clear_bits
, set_bits
);
1796 if (bh
== segbuf
->sb_super_root
) {
1797 if (bh
->b_page
!= bd_page
) {
1798 end_page_writeback(bd_page
);
1799 bd_page
= bh
->b_page
;
1804 if (bh
->b_page
!= fs_page
) {
1805 nilfs_end_page_io(fs_page
, 0);
1806 fs_page
= bh
->b_page
;
1810 if (!nilfs_segbuf_simplex(segbuf
)) {
1811 if (segbuf
->sb_sum
.flags
& NILFS_SS_LOGBGN
) {
1812 set_bit(NILFS_SC_UNCLOSED
, &sci
->sc_flags
);
1813 sci
->sc_lseg_stime
= jiffies
;
1815 if (segbuf
->sb_sum
.flags
& NILFS_SS_LOGEND
)
1816 clear_bit(NILFS_SC_UNCLOSED
, &sci
->sc_flags
);
1820 * Since pages may continue over multiple segment buffers,
1821 * end of the last page must be checked outside of the loop.
1824 end_page_writeback(bd_page
);
1826 nilfs_end_page_io(fs_page
, 0);
1828 nilfs_drop_collected_inodes(&sci
->sc_dirty_files
);
1830 if (nilfs_doing_gc())
1831 nilfs_drop_collected_inodes(&sci
->sc_gc_inodes
);
1833 nilfs
->ns_nongc_ctime
= sci
->sc_seg_ctime
;
1835 sci
->sc_nblk_inc
+= sci
->sc_nblk_this_inc
;
1837 segbuf
= NILFS_LAST_SEGBUF(&sci
->sc_write_logs
);
1838 nilfs_set_next_segment(nilfs
, segbuf
);
1841 nilfs
->ns_flushed_device
= 0;
1842 nilfs_set_last_segment(nilfs
, segbuf
->sb_pseg_start
,
1843 segbuf
->sb_sum
.seg_seq
, nilfs
->ns_cno
++);
1845 clear_bit(NILFS_SC_HAVE_DELTA
, &sci
->sc_flags
);
1846 clear_bit(NILFS_SC_DIRTY
, &sci
->sc_flags
);
1847 set_bit(NILFS_SC_SUPER_ROOT
, &sci
->sc_flags
);
1848 nilfs_segctor_clear_metadata_dirty(sci
);
1850 clear_bit(NILFS_SC_SUPER_ROOT
, &sci
->sc_flags
);
1853 static int nilfs_segctor_wait(struct nilfs_sc_info
*sci
)
1857 ret
= nilfs_wait_on_logs(&sci
->sc_write_logs
);
1859 nilfs_segctor_complete_write(sci
);
1860 nilfs_destroy_logs(&sci
->sc_write_logs
);
1865 static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info
*sci
,
1866 struct the_nilfs
*nilfs
)
1868 struct nilfs_inode_info
*ii
, *n
;
1869 struct inode
*ifile
= sci
->sc_root
->ifile
;
1871 spin_lock(&nilfs
->ns_inode_lock
);
1873 list_for_each_entry_safe(ii
, n
, &nilfs
->ns_dirty_files
, i_dirty
) {
1875 struct buffer_head
*ibh
;
1878 spin_unlock(&nilfs
->ns_inode_lock
);
1879 err
= nilfs_ifile_get_inode_block(
1880 ifile
, ii
->vfs_inode
.i_ino
, &ibh
);
1881 if (unlikely(err
)) {
1882 nilfs_warning(sci
->sc_super
, __func__
,
1883 "failed to get inode block.\n");
1886 mark_buffer_dirty(ibh
);
1887 nilfs_mdt_mark_dirty(ifile
);
1888 spin_lock(&nilfs
->ns_inode_lock
);
1889 if (likely(!ii
->i_bh
))
1896 clear_bit(NILFS_I_QUEUED
, &ii
->i_state
);
1897 set_bit(NILFS_I_BUSY
, &ii
->i_state
);
1898 list_move_tail(&ii
->i_dirty
, &sci
->sc_dirty_files
);
1900 spin_unlock(&nilfs
->ns_inode_lock
);
1905 static void nilfs_segctor_drop_written_files(struct nilfs_sc_info
*sci
,
1906 struct the_nilfs
*nilfs
)
1908 struct nilfs_inode_info
*ii
, *n
;
1909 int during_mount
= !(sci
->sc_super
->s_flags
& MS_ACTIVE
);
1910 int defer_iput
= false;
1912 spin_lock(&nilfs
->ns_inode_lock
);
1913 list_for_each_entry_safe(ii
, n
, &sci
->sc_dirty_files
, i_dirty
) {
1914 if (!test_and_clear_bit(NILFS_I_UPDATED
, &ii
->i_state
) ||
1915 test_bit(NILFS_I_DIRTY
, &ii
->i_state
))
1918 clear_bit(NILFS_I_BUSY
, &ii
->i_state
);
1921 list_del_init(&ii
->i_dirty
);
1922 if (!ii
->vfs_inode
.i_nlink
|| during_mount
) {
1924 * Defer calling iput() to avoid deadlocks if
1925 * i_nlink == 0 or mount is not yet finished.
1927 list_add_tail(&ii
->i_dirty
, &sci
->sc_iput_queue
);
1930 spin_unlock(&nilfs
->ns_inode_lock
);
1931 iput(&ii
->vfs_inode
);
1932 spin_lock(&nilfs
->ns_inode_lock
);
1935 spin_unlock(&nilfs
->ns_inode_lock
);
1938 schedule_work(&sci
->sc_iput_work
);
1942 * Main procedure of segment constructor
1944 static int nilfs_segctor_do_construct(struct nilfs_sc_info
*sci
, int mode
)
1946 struct the_nilfs
*nilfs
= sci
->sc_super
->s_fs_info
;
1949 sci
->sc_stage
.scnt
= NILFS_ST_INIT
;
1950 sci
->sc_cno
= nilfs
->ns_cno
;
1952 err
= nilfs_segctor_collect_dirty_files(sci
, nilfs
);
1956 if (nilfs_test_metadata_dirty(nilfs
, sci
->sc_root
))
1957 set_bit(NILFS_SC_DIRTY
, &sci
->sc_flags
);
1959 if (nilfs_segctor_clean(sci
))
1963 sci
->sc_stage
.flags
&= ~NILFS_CF_HISTORY_MASK
;
1965 err
= nilfs_segctor_begin_construction(sci
, nilfs
);
1969 /* Update time stamp */
1970 sci
->sc_seg_ctime
= get_seconds();
1972 err
= nilfs_segctor_collect(sci
, nilfs
, mode
);
1976 /* Avoid empty segment */
1977 if (sci
->sc_stage
.scnt
== NILFS_ST_DONE
&&
1978 nilfs_segbuf_empty(sci
->sc_curseg
)) {
1979 nilfs_segctor_abort_construction(sci
, nilfs
, 1);
1983 err
= nilfs_segctor_assign(sci
, mode
);
1987 if (sci
->sc_stage
.flags
& NILFS_CF_IFILE_STARTED
)
1988 nilfs_segctor_fill_in_file_bmap(sci
);
1990 if (mode
== SC_LSEG_SR
&&
1991 sci
->sc_stage
.scnt
>= NILFS_ST_CPFILE
) {
1992 err
= nilfs_segctor_fill_in_checkpoint(sci
);
1994 goto failed_to_write
;
1996 nilfs_segctor_fill_in_super_root(sci
, nilfs
);
1998 nilfs_segctor_update_segusage(sci
, nilfs
->ns_sufile
);
2000 /* Write partial segments */
2001 nilfs_segctor_prepare_write(sci
);
2003 nilfs_add_checksums_on_logs(&sci
->sc_segbufs
,
2004 nilfs
->ns_crc_seed
);
2006 err
= nilfs_segctor_write(sci
, nilfs
);
2008 goto failed_to_write
;
2010 if (sci
->sc_stage
.scnt
== NILFS_ST_DONE
||
2011 nilfs
->ns_blocksize_bits
!= PAGE_CACHE_SHIFT
) {
2013 * At this point, we avoid double buffering
2014 * for blocksize < pagesize because page dirty
2015 * flag is turned off during write and dirty
2016 * buffers are not properly collected for
2017 * pages crossing over segments.
2019 err
= nilfs_segctor_wait(sci
);
2021 goto failed_to_write
;
2023 } while (sci
->sc_stage
.scnt
!= NILFS_ST_DONE
);
2026 nilfs_segctor_drop_written_files(sci
, nilfs
);
2030 if (sci
->sc_stage
.flags
& NILFS_CF_IFILE_STARTED
)
2031 nilfs_redirty_inodes(&sci
->sc_dirty_files
);
2034 if (nilfs_doing_gc())
2035 nilfs_redirty_inodes(&sci
->sc_gc_inodes
);
2036 nilfs_segctor_abort_construction(sci
, nilfs
, err
);
2041 * nilfs_segctor_start_timer - set timer of background write
2042 * @sci: nilfs_sc_info
2044 * If the timer has already been set, it ignores the new request.
2045 * This function MUST be called within a section locking the segment
2048 static void nilfs_segctor_start_timer(struct nilfs_sc_info
*sci
)
2050 spin_lock(&sci
->sc_state_lock
);
2051 if (!(sci
->sc_state
& NILFS_SEGCTOR_COMMIT
)) {
2052 sci
->sc_timer
.expires
= jiffies
+ sci
->sc_interval
;
2053 add_timer(&sci
->sc_timer
);
2054 sci
->sc_state
|= NILFS_SEGCTOR_COMMIT
;
2056 spin_unlock(&sci
->sc_state_lock
);
2059 static void nilfs_segctor_do_flush(struct nilfs_sc_info
*sci
, int bn
)
2061 spin_lock(&sci
->sc_state_lock
);
2062 if (!(sci
->sc_flush_request
& (1 << bn
))) {
2063 unsigned long prev_req
= sci
->sc_flush_request
;
2065 sci
->sc_flush_request
|= (1 << bn
);
2067 wake_up(&sci
->sc_wait_daemon
);
2069 spin_unlock(&sci
->sc_state_lock
);
2073 * nilfs_flush_segment - trigger a segment construction for resource control
2075 * @ino: inode number of the file to be flushed out.
2077 void nilfs_flush_segment(struct super_block
*sb
, ino_t ino
)
2079 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
2080 struct nilfs_sc_info
*sci
= nilfs
->ns_writer
;
2082 if (!sci
|| nilfs_doing_construction())
2084 nilfs_segctor_do_flush(sci
, NILFS_MDT_INODE(sb
, ino
) ? ino
: 0);
2085 /* assign bit 0 to data files */
2088 struct nilfs_segctor_wait_request
{
2095 static int nilfs_segctor_sync(struct nilfs_sc_info
*sci
)
2097 struct nilfs_segctor_wait_request wait_req
;
2100 spin_lock(&sci
->sc_state_lock
);
2101 init_wait(&wait_req
.wq
);
2103 atomic_set(&wait_req
.done
, 0);
2104 wait_req
.seq
= ++sci
->sc_seq_request
;
2105 spin_unlock(&sci
->sc_state_lock
);
2107 init_waitqueue_entry(&wait_req
.wq
, current
);
2108 add_wait_queue(&sci
->sc_wait_request
, &wait_req
.wq
);
2109 set_current_state(TASK_INTERRUPTIBLE
);
2110 wake_up(&sci
->sc_wait_daemon
);
2113 if (atomic_read(&wait_req
.done
)) {
2117 if (!signal_pending(current
)) {
2124 finish_wait(&sci
->sc_wait_request
, &wait_req
.wq
);
2128 static void nilfs_segctor_wakeup(struct nilfs_sc_info
*sci
, int err
)
2130 struct nilfs_segctor_wait_request
*wrq
, *n
;
2131 unsigned long flags
;
2133 spin_lock_irqsave(&sci
->sc_wait_request
.lock
, flags
);
2134 list_for_each_entry_safe(wrq
, n
, &sci
->sc_wait_request
.task_list
,
2136 if (!atomic_read(&wrq
->done
) &&
2137 nilfs_cnt32_ge(sci
->sc_seq_done
, wrq
->seq
)) {
2139 atomic_set(&wrq
->done
, 1);
2141 if (atomic_read(&wrq
->done
)) {
2142 wrq
->wq
.func(&wrq
->wq
,
2143 TASK_UNINTERRUPTIBLE
| TASK_INTERRUPTIBLE
,
2147 spin_unlock_irqrestore(&sci
->sc_wait_request
.lock
, flags
);
2151 * nilfs_construct_segment - construct a logical segment
2154 * Return Value: On success, 0 is retured. On errors, one of the following
2155 * negative error code is returned.
2157 * %-EROFS - Read only filesystem.
2161 * %-ENOSPC - No space left on device (only in a panic state).
2163 * %-ERESTARTSYS - Interrupted.
2165 * %-ENOMEM - Insufficient memory available.
2167 int nilfs_construct_segment(struct super_block
*sb
)
2169 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
2170 struct nilfs_sc_info
*sci
= nilfs
->ns_writer
;
2171 struct nilfs_transaction_info
*ti
;
2177 /* A call inside transactions causes a deadlock. */
2178 BUG_ON((ti
= current
->journal_info
) && ti
->ti_magic
== NILFS_TI_MAGIC
);
2180 err
= nilfs_segctor_sync(sci
);
2185 * nilfs_construct_dsync_segment - construct a data-only logical segment
2187 * @inode: inode whose data blocks should be written out
2188 * @start: start byte offset
2189 * @end: end byte offset (inclusive)
2191 * Return Value: On success, 0 is retured. On errors, one of the following
2192 * negative error code is returned.
2194 * %-EROFS - Read only filesystem.
2198 * %-ENOSPC - No space left on device (only in a panic state).
2200 * %-ERESTARTSYS - Interrupted.
2202 * %-ENOMEM - Insufficient memory available.
2204 int nilfs_construct_dsync_segment(struct super_block
*sb
, struct inode
*inode
,
2205 loff_t start
, loff_t end
)
2207 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
2208 struct nilfs_sc_info
*sci
= nilfs
->ns_writer
;
2209 struct nilfs_inode_info
*ii
;
2210 struct nilfs_transaction_info ti
;
2216 nilfs_transaction_lock(sb
, &ti
, 0);
2218 ii
= NILFS_I(inode
);
2219 if (test_bit(NILFS_I_INODE_SYNC
, &ii
->i_state
) ||
2220 nilfs_test_opt(nilfs
, STRICT_ORDER
) ||
2221 test_bit(NILFS_SC_UNCLOSED
, &sci
->sc_flags
) ||
2222 nilfs_discontinued(nilfs
)) {
2223 nilfs_transaction_unlock(sb
);
2224 err
= nilfs_segctor_sync(sci
);
2228 spin_lock(&nilfs
->ns_inode_lock
);
2229 if (!test_bit(NILFS_I_QUEUED
, &ii
->i_state
) &&
2230 !test_bit(NILFS_I_BUSY
, &ii
->i_state
)) {
2231 spin_unlock(&nilfs
->ns_inode_lock
);
2232 nilfs_transaction_unlock(sb
);
2235 spin_unlock(&nilfs
->ns_inode_lock
);
2236 sci
->sc_dsync_inode
= ii
;
2237 sci
->sc_dsync_start
= start
;
2238 sci
->sc_dsync_end
= end
;
2240 err
= nilfs_segctor_do_construct(sci
, SC_LSEG_DSYNC
);
2242 nilfs
->ns_flushed_device
= 0;
2244 nilfs_transaction_unlock(sb
);
2248 #define FLUSH_FILE_BIT (0x1) /* data file only */
2249 #define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */
2252 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2253 * @sci: segment constructor object
2255 static void nilfs_segctor_accept(struct nilfs_sc_info
*sci
)
2257 spin_lock(&sci
->sc_state_lock
);
2258 sci
->sc_seq_accepted
= sci
->sc_seq_request
;
2259 spin_unlock(&sci
->sc_state_lock
);
2260 del_timer_sync(&sci
->sc_timer
);
2264 * nilfs_segctor_notify - notify the result of request to caller threads
2265 * @sci: segment constructor object
2266 * @mode: mode of log forming
2267 * @err: error code to be notified
2269 static void nilfs_segctor_notify(struct nilfs_sc_info
*sci
, int mode
, int err
)
2271 /* Clear requests (even when the construction failed) */
2272 spin_lock(&sci
->sc_state_lock
);
2274 if (mode
== SC_LSEG_SR
) {
2275 sci
->sc_state
&= ~NILFS_SEGCTOR_COMMIT
;
2276 sci
->sc_seq_done
= sci
->sc_seq_accepted
;
2277 nilfs_segctor_wakeup(sci
, err
);
2278 sci
->sc_flush_request
= 0;
2280 if (mode
== SC_FLUSH_FILE
)
2281 sci
->sc_flush_request
&= ~FLUSH_FILE_BIT
;
2282 else if (mode
== SC_FLUSH_DAT
)
2283 sci
->sc_flush_request
&= ~FLUSH_DAT_BIT
;
2285 /* re-enable timer if checkpoint creation was not done */
2286 if ((sci
->sc_state
& NILFS_SEGCTOR_COMMIT
) &&
2287 time_before(jiffies
, sci
->sc_timer
.expires
))
2288 add_timer(&sci
->sc_timer
);
2290 spin_unlock(&sci
->sc_state_lock
);
2294 * nilfs_segctor_construct - form logs and write them to disk
2295 * @sci: segment constructor object
2296 * @mode: mode of log forming
2298 static int nilfs_segctor_construct(struct nilfs_sc_info
*sci
, int mode
)
2300 struct the_nilfs
*nilfs
= sci
->sc_super
->s_fs_info
;
2301 struct nilfs_super_block
**sbp
;
2304 nilfs_segctor_accept(sci
);
2306 if (nilfs_discontinued(nilfs
))
2308 if (!nilfs_segctor_confirm(sci
))
2309 err
= nilfs_segctor_do_construct(sci
, mode
);
2312 if (mode
!= SC_FLUSH_DAT
)
2313 atomic_set(&nilfs
->ns_ndirtyblks
, 0);
2314 if (test_bit(NILFS_SC_SUPER_ROOT
, &sci
->sc_flags
) &&
2315 nilfs_discontinued(nilfs
)) {
2316 down_write(&nilfs
->ns_sem
);
2318 sbp
= nilfs_prepare_super(sci
->sc_super
,
2319 nilfs_sb_will_flip(nilfs
));
2321 nilfs_set_log_cursor(sbp
[0], nilfs
);
2322 err
= nilfs_commit_super(sci
->sc_super
,
2325 up_write(&nilfs
->ns_sem
);
2329 nilfs_segctor_notify(sci
, mode
, err
);
2333 static void nilfs_construction_timeout(unsigned long data
)
2335 struct task_struct
*p
= (struct task_struct
*)data
;
2340 nilfs_remove_written_gcinodes(struct the_nilfs
*nilfs
, struct list_head
*head
)
2342 struct nilfs_inode_info
*ii
, *n
;
2344 list_for_each_entry_safe(ii
, n
, head
, i_dirty
) {
2345 if (!test_bit(NILFS_I_UPDATED
, &ii
->i_state
))
2347 list_del_init(&ii
->i_dirty
);
2348 truncate_inode_pages(&ii
->vfs_inode
.i_data
, 0);
2349 nilfs_btnode_cache_clear(&ii
->i_btnode_cache
);
2350 iput(&ii
->vfs_inode
);
2354 int nilfs_clean_segments(struct super_block
*sb
, struct nilfs_argv
*argv
,
2357 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
2358 struct nilfs_sc_info
*sci
= nilfs
->ns_writer
;
2359 struct nilfs_transaction_info ti
;
2365 nilfs_transaction_lock(sb
, &ti
, 1);
2367 err
= nilfs_mdt_save_to_shadow_map(nilfs
->ns_dat
);
2371 err
= nilfs_ioctl_prepare_clean_segments(nilfs
, argv
, kbufs
);
2372 if (unlikely(err
)) {
2373 nilfs_mdt_restore_from_shadow_map(nilfs
->ns_dat
);
2377 sci
->sc_freesegs
= kbufs
[4];
2378 sci
->sc_nfreesegs
= argv
[4].v_nmembs
;
2379 list_splice_tail_init(&nilfs
->ns_gc_inodes
, &sci
->sc_gc_inodes
);
2382 err
= nilfs_segctor_construct(sci
, SC_LSEG_SR
);
2383 nilfs_remove_written_gcinodes(nilfs
, &sci
->sc_gc_inodes
);
2388 nilfs_warning(sb
, __func__
,
2389 "segment construction failed. (err=%d)", err
);
2390 set_current_state(TASK_INTERRUPTIBLE
);
2391 schedule_timeout(sci
->sc_interval
);
2393 if (nilfs_test_opt(nilfs
, DISCARD
)) {
2394 int ret
= nilfs_discard_segments(nilfs
, sci
->sc_freesegs
,
2398 "NILFS warning: error %d on discard request, "
2399 "turning discards off for the device\n", ret
);
2400 nilfs_clear_opt(nilfs
, DISCARD
);
2405 sci
->sc_freesegs
= NULL
;
2406 sci
->sc_nfreesegs
= 0;
2407 nilfs_mdt_clear_shadow_map(nilfs
->ns_dat
);
2408 nilfs_transaction_unlock(sb
);
2412 static void nilfs_segctor_thread_construct(struct nilfs_sc_info
*sci
, int mode
)
2414 struct nilfs_transaction_info ti
;
2416 nilfs_transaction_lock(sci
->sc_super
, &ti
, 0);
2417 nilfs_segctor_construct(sci
, mode
);
2420 * Unclosed segment should be retried. We do this using sc_timer.
2421 * Timeout of sc_timer will invoke complete construction which leads
2422 * to close the current logical segment.
2424 if (test_bit(NILFS_SC_UNCLOSED
, &sci
->sc_flags
))
2425 nilfs_segctor_start_timer(sci
);
2427 nilfs_transaction_unlock(sci
->sc_super
);
2430 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info
*sci
)
2435 spin_lock(&sci
->sc_state_lock
);
2436 mode
= (sci
->sc_flush_request
& FLUSH_DAT_BIT
) ?
2437 SC_FLUSH_DAT
: SC_FLUSH_FILE
;
2438 spin_unlock(&sci
->sc_state_lock
);
2441 err
= nilfs_segctor_do_construct(sci
, mode
);
2443 spin_lock(&sci
->sc_state_lock
);
2444 sci
->sc_flush_request
&= (mode
== SC_FLUSH_FILE
) ?
2445 ~FLUSH_FILE_BIT
: ~FLUSH_DAT_BIT
;
2446 spin_unlock(&sci
->sc_state_lock
);
2448 clear_bit(NILFS_SC_PRIOR_FLUSH
, &sci
->sc_flags
);
2451 static int nilfs_segctor_flush_mode(struct nilfs_sc_info
*sci
)
2453 if (!test_bit(NILFS_SC_UNCLOSED
, &sci
->sc_flags
) ||
2454 time_before(jiffies
, sci
->sc_lseg_stime
+ sci
->sc_mjcp_freq
)) {
2455 if (!(sci
->sc_flush_request
& ~FLUSH_FILE_BIT
))
2456 return SC_FLUSH_FILE
;
2457 else if (!(sci
->sc_flush_request
& ~FLUSH_DAT_BIT
))
2458 return SC_FLUSH_DAT
;
2464 * nilfs_segctor_thread - main loop of the segment constructor thread.
2465 * @arg: pointer to a struct nilfs_sc_info.
2467 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2468 * to execute segment constructions.
2470 static int nilfs_segctor_thread(void *arg
)
2472 struct nilfs_sc_info
*sci
= (struct nilfs_sc_info
*)arg
;
2473 struct the_nilfs
*nilfs
= sci
->sc_super
->s_fs_info
;
2476 sci
->sc_timer
.data
= (unsigned long)current
;
2477 sci
->sc_timer
.function
= nilfs_construction_timeout
;
2480 sci
->sc_task
= current
;
2481 wake_up(&sci
->sc_wait_task
); /* for nilfs_segctor_start_thread() */
2483 "segctord starting. Construction interval = %lu seconds, "
2484 "CP frequency < %lu seconds\n",
2485 sci
->sc_interval
/ HZ
, sci
->sc_mjcp_freq
/ HZ
);
2487 spin_lock(&sci
->sc_state_lock
);
2492 if (sci
->sc_state
& NILFS_SEGCTOR_QUIT
)
2495 if (timeout
|| sci
->sc_seq_request
!= sci
->sc_seq_done
)
2497 else if (!sci
->sc_flush_request
)
2500 mode
= nilfs_segctor_flush_mode(sci
);
2502 spin_unlock(&sci
->sc_state_lock
);
2503 nilfs_segctor_thread_construct(sci
, mode
);
2504 spin_lock(&sci
->sc_state_lock
);
2509 if (freezing(current
)) {
2510 spin_unlock(&sci
->sc_state_lock
);
2512 spin_lock(&sci
->sc_state_lock
);
2515 int should_sleep
= 1;
2517 prepare_to_wait(&sci
->sc_wait_daemon
, &wait
,
2518 TASK_INTERRUPTIBLE
);
2520 if (sci
->sc_seq_request
!= sci
->sc_seq_done
)
2522 else if (sci
->sc_flush_request
)
2524 else if (sci
->sc_state
& NILFS_SEGCTOR_COMMIT
)
2525 should_sleep
= time_before(jiffies
,
2526 sci
->sc_timer
.expires
);
2529 spin_unlock(&sci
->sc_state_lock
);
2531 spin_lock(&sci
->sc_state_lock
);
2533 finish_wait(&sci
->sc_wait_daemon
, &wait
);
2534 timeout
= ((sci
->sc_state
& NILFS_SEGCTOR_COMMIT
) &&
2535 time_after_eq(jiffies
, sci
->sc_timer
.expires
));
2537 if (nilfs_sb_dirty(nilfs
) && nilfs_sb_need_update(nilfs
))
2538 set_nilfs_discontinued(nilfs
);
2543 spin_unlock(&sci
->sc_state_lock
);
2546 sci
->sc_task
= NULL
;
2547 wake_up(&sci
->sc_wait_task
); /* for nilfs_segctor_kill_thread() */
2551 static int nilfs_segctor_start_thread(struct nilfs_sc_info
*sci
)
2553 struct task_struct
*t
;
2555 t
= kthread_run(nilfs_segctor_thread
, sci
, "segctord");
2557 int err
= PTR_ERR(t
);
2559 printk(KERN_ERR
"NILFS: error %d creating segctord thread\n",
2563 wait_event(sci
->sc_wait_task
, sci
->sc_task
!= NULL
);
2567 static void nilfs_segctor_kill_thread(struct nilfs_sc_info
*sci
)
2568 __acquires(&sci
->sc_state_lock
)
2569 __releases(&sci
->sc_state_lock
)
2571 sci
->sc_state
|= NILFS_SEGCTOR_QUIT
;
2573 while (sci
->sc_task
) {
2574 wake_up(&sci
->sc_wait_daemon
);
2575 spin_unlock(&sci
->sc_state_lock
);
2576 wait_event(sci
->sc_wait_task
, sci
->sc_task
== NULL
);
2577 spin_lock(&sci
->sc_state_lock
);
2582 * Setup & clean-up functions
2584 static struct nilfs_sc_info
*nilfs_segctor_new(struct super_block
*sb
,
2585 struct nilfs_root
*root
)
2587 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
2588 struct nilfs_sc_info
*sci
;
2590 sci
= kzalloc(sizeof(*sci
), GFP_KERNEL
);
2596 nilfs_get_root(root
);
2597 sci
->sc_root
= root
;
2599 init_waitqueue_head(&sci
->sc_wait_request
);
2600 init_waitqueue_head(&sci
->sc_wait_daemon
);
2601 init_waitqueue_head(&sci
->sc_wait_task
);
2602 spin_lock_init(&sci
->sc_state_lock
);
2603 INIT_LIST_HEAD(&sci
->sc_dirty_files
);
2604 INIT_LIST_HEAD(&sci
->sc_segbufs
);
2605 INIT_LIST_HEAD(&sci
->sc_write_logs
);
2606 INIT_LIST_HEAD(&sci
->sc_gc_inodes
);
2607 INIT_LIST_HEAD(&sci
->sc_iput_queue
);
2608 INIT_WORK(&sci
->sc_iput_work
, nilfs_iput_work_func
);
2609 init_timer(&sci
->sc_timer
);
2611 sci
->sc_interval
= HZ
* NILFS_SC_DEFAULT_TIMEOUT
;
2612 sci
->sc_mjcp_freq
= HZ
* NILFS_SC_DEFAULT_SR_FREQ
;
2613 sci
->sc_watermark
= NILFS_SC_DEFAULT_WATERMARK
;
2615 if (nilfs
->ns_interval
)
2616 sci
->sc_interval
= HZ
* nilfs
->ns_interval
;
2617 if (nilfs
->ns_watermark
)
2618 sci
->sc_watermark
= nilfs
->ns_watermark
;
2622 static void nilfs_segctor_write_out(struct nilfs_sc_info
*sci
)
2624 int ret
, retrycount
= NILFS_SC_CLEANUP_RETRY
;
2626 /* The segctord thread was stopped and its timer was removed.
2627 But some tasks remain. */
2629 struct nilfs_transaction_info ti
;
2631 nilfs_transaction_lock(sci
->sc_super
, &ti
, 0);
2632 ret
= nilfs_segctor_construct(sci
, SC_LSEG_SR
);
2633 nilfs_transaction_unlock(sci
->sc_super
);
2635 flush_work(&sci
->sc_iput_work
);
2637 } while (ret
&& retrycount
-- > 0);
2641 * nilfs_segctor_destroy - destroy the segment constructor.
2642 * @sci: nilfs_sc_info
2644 * nilfs_segctor_destroy() kills the segctord thread and frees
2645 * the nilfs_sc_info struct.
2646 * Caller must hold the segment semaphore.
2648 static void nilfs_segctor_destroy(struct nilfs_sc_info
*sci
)
2650 struct the_nilfs
*nilfs
= sci
->sc_super
->s_fs_info
;
2653 up_write(&nilfs
->ns_segctor_sem
);
2655 spin_lock(&sci
->sc_state_lock
);
2656 nilfs_segctor_kill_thread(sci
);
2657 flag
= ((sci
->sc_state
& NILFS_SEGCTOR_COMMIT
) || sci
->sc_flush_request
2658 || sci
->sc_seq_request
!= sci
->sc_seq_done
);
2659 spin_unlock(&sci
->sc_state_lock
);
2661 if (flush_work(&sci
->sc_iput_work
))
2664 if (flag
|| !nilfs_segctor_confirm(sci
))
2665 nilfs_segctor_write_out(sci
);
2667 if (!list_empty(&sci
->sc_dirty_files
)) {
2668 nilfs_warning(sci
->sc_super
, __func__
,
2669 "dirty file(s) after the final construction\n");
2670 nilfs_dispose_list(nilfs
, &sci
->sc_dirty_files
, 1);
2673 if (!list_empty(&sci
->sc_iput_queue
)) {
2674 nilfs_warning(sci
->sc_super
, __func__
,
2675 "iput queue is not empty\n");
2676 nilfs_dispose_list(nilfs
, &sci
->sc_iput_queue
, 1);
2679 WARN_ON(!list_empty(&sci
->sc_segbufs
));
2680 WARN_ON(!list_empty(&sci
->sc_write_logs
));
2682 nilfs_put_root(sci
->sc_root
);
2684 down_write(&nilfs
->ns_segctor_sem
);
2686 del_timer_sync(&sci
->sc_timer
);
2691 * nilfs_attach_log_writer - attach log writer
2692 * @sb: super block instance
2693 * @root: root object of the current filesystem tree
2695 * This allocates a log writer object, initializes it, and starts the
2698 * Return Value: On success, 0 is returned. On error, one of the following
2699 * negative error code is returned.
2701 * %-ENOMEM - Insufficient memory available.
2703 int nilfs_attach_log_writer(struct super_block
*sb
, struct nilfs_root
*root
)
2705 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
2708 if (nilfs
->ns_writer
) {
2710 * This happens if the filesystem was remounted
2711 * read/write after nilfs_error degenerated it into a
2714 nilfs_detach_log_writer(sb
);
2717 nilfs
->ns_writer
= nilfs_segctor_new(sb
, root
);
2718 if (!nilfs
->ns_writer
)
2721 err
= nilfs_segctor_start_thread(nilfs
->ns_writer
);
2723 kfree(nilfs
->ns_writer
);
2724 nilfs
->ns_writer
= NULL
;
2730 * nilfs_detach_log_writer - destroy log writer
2731 * @sb: super block instance
2733 * This kills log writer daemon, frees the log writer object, and
2734 * destroys list of dirty files.
2736 void nilfs_detach_log_writer(struct super_block
*sb
)
2738 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
2739 LIST_HEAD(garbage_list
);
2741 down_write(&nilfs
->ns_segctor_sem
);
2742 if (nilfs
->ns_writer
) {
2743 nilfs_segctor_destroy(nilfs
->ns_writer
);
2744 nilfs
->ns_writer
= NULL
;
2747 /* Force to free the list of dirty files */
2748 spin_lock(&nilfs
->ns_inode_lock
);
2749 if (!list_empty(&nilfs
->ns_dirty_files
)) {
2750 list_splice_init(&nilfs
->ns_dirty_files
, &garbage_list
);
2751 nilfs_warning(sb
, __func__
,
2752 "Hit dirty file after stopped log writer\n");
2754 spin_unlock(&nilfs
->ns_inode_lock
);
2755 up_write(&nilfs
->ns_segctor_sem
);
2757 nilfs_dispose_list(nilfs
, &garbage_list
, 1);