iommu/amd: Fix devid mapping for ivrs_ioapic override
[linux/fpc-iii.git] / fs / nilfs2 / segment.c
bloba1a191634abc1729f94a443ab4eac4cdddaaf54c
1 /*
2 * segment.c - NILFS segment constructor.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
24 #include <linux/pagemap.h>
25 #include <linux/buffer_head.h>
26 #include <linux/writeback.h>
27 #include <linux/bio.h>
28 #include <linux/completion.h>
29 #include <linux/blkdev.h>
30 #include <linux/backing-dev.h>
31 #include <linux/freezer.h>
32 #include <linux/kthread.h>
33 #include <linux/crc32.h>
34 #include <linux/pagevec.h>
35 #include <linux/slab.h>
36 #include "nilfs.h"
37 #include "btnode.h"
38 #include "page.h"
39 #include "segment.h"
40 #include "sufile.h"
41 #include "cpfile.h"
42 #include "ifile.h"
43 #include "segbuf.h"
47 * Segment constructor
49 #define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
51 #define SC_MAX_SEGDELTA 64 /* Upper limit of the number of segments
52 appended in collection retry loop */
54 /* Construction mode */
55 enum {
56 SC_LSEG_SR = 1, /* Make a logical segment having a super root */
57 SC_LSEG_DSYNC, /* Flush data blocks of a given file and make
58 a logical segment without a super root */
59 SC_FLUSH_FILE, /* Flush data files, leads to segment writes without
60 creating a checkpoint */
61 SC_FLUSH_DAT, /* Flush DAT file. This also creates segments without
62 a checkpoint */
65 /* Stage numbers of dirty block collection */
66 enum {
67 NILFS_ST_INIT = 0,
68 NILFS_ST_GC, /* Collecting dirty blocks for GC */
69 NILFS_ST_FILE,
70 NILFS_ST_IFILE,
71 NILFS_ST_CPFILE,
72 NILFS_ST_SUFILE,
73 NILFS_ST_DAT,
74 NILFS_ST_SR, /* Super root */
75 NILFS_ST_DSYNC, /* Data sync blocks */
76 NILFS_ST_DONE,
79 /* State flags of collection */
80 #define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
81 #define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
82 #define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
83 #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
85 /* Operations depending on the construction mode and file type */
86 struct nilfs_sc_operations {
87 int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
88 struct inode *);
89 int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
90 struct inode *);
91 int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
92 struct inode *);
93 void (*write_data_binfo)(struct nilfs_sc_info *,
94 struct nilfs_segsum_pointer *,
95 union nilfs_binfo *);
96 void (*write_node_binfo)(struct nilfs_sc_info *,
97 struct nilfs_segsum_pointer *,
98 union nilfs_binfo *);
102 * Other definitions
104 static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
105 static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
106 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
107 static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
109 #define nilfs_cnt32_gt(a, b) \
110 (typecheck(__u32, a) && typecheck(__u32, b) && \
111 ((__s32)(b) - (__s32)(a) < 0))
112 #define nilfs_cnt32_ge(a, b) \
113 (typecheck(__u32, a) && typecheck(__u32, b) && \
114 ((__s32)(a) - (__s32)(b) >= 0))
115 #define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
116 #define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
118 static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti)
120 struct nilfs_transaction_info *cur_ti = current->journal_info;
121 void *save = NULL;
123 if (cur_ti) {
124 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
125 return ++cur_ti->ti_count;
126 else {
128 * If journal_info field is occupied by other FS,
129 * it is saved and will be restored on
130 * nilfs_transaction_commit().
132 printk(KERN_WARNING
133 "NILFS warning: journal info from a different "
134 "FS\n");
135 save = current->journal_info;
138 if (!ti) {
139 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
140 if (!ti)
141 return -ENOMEM;
142 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
143 } else {
144 ti->ti_flags = 0;
146 ti->ti_count = 0;
147 ti->ti_save = save;
148 ti->ti_magic = NILFS_TI_MAGIC;
149 current->journal_info = ti;
150 return 0;
154 * nilfs_transaction_begin - start indivisible file operations.
155 * @sb: super block
156 * @ti: nilfs_transaction_info
157 * @vacancy_check: flags for vacancy rate checks
159 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
160 * the segment semaphore, to make a segment construction and write tasks
161 * exclusive. The function is used with nilfs_transaction_commit() in pairs.
162 * The region enclosed by these two functions can be nested. To avoid a
163 * deadlock, the semaphore is only acquired or released in the outermost call.
165 * This function allocates a nilfs_transaction_info struct to keep context
166 * information on it. It is initialized and hooked onto the current task in
167 * the outermost call. If a pre-allocated struct is given to @ti, it is used
168 * instead; otherwise a new struct is assigned from a slab.
170 * When @vacancy_check flag is set, this function will check the amount of
171 * free space, and will wait for the GC to reclaim disk space if low capacity.
173 * Return Value: On success, 0 is returned. On error, one of the following
174 * negative error code is returned.
176 * %-ENOMEM - Insufficient memory available.
178 * %-ENOSPC - No space left on device
180 int nilfs_transaction_begin(struct super_block *sb,
181 struct nilfs_transaction_info *ti,
182 int vacancy_check)
184 struct the_nilfs *nilfs;
185 int ret = nilfs_prepare_segment_lock(ti);
187 if (unlikely(ret < 0))
188 return ret;
189 if (ret > 0)
190 return 0;
192 sb_start_intwrite(sb);
194 nilfs = sb->s_fs_info;
195 down_read(&nilfs->ns_segctor_sem);
196 if (vacancy_check && nilfs_near_disk_full(nilfs)) {
197 up_read(&nilfs->ns_segctor_sem);
198 ret = -ENOSPC;
199 goto failed;
201 return 0;
203 failed:
204 ti = current->journal_info;
205 current->journal_info = ti->ti_save;
206 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
207 kmem_cache_free(nilfs_transaction_cachep, ti);
208 sb_end_intwrite(sb);
209 return ret;
213 * nilfs_transaction_commit - commit indivisible file operations.
214 * @sb: super block
216 * nilfs_transaction_commit() releases the read semaphore which is
217 * acquired by nilfs_transaction_begin(). This is only performed
218 * in outermost call of this function. If a commit flag is set,
219 * nilfs_transaction_commit() sets a timer to start the segment
220 * constructor. If a sync flag is set, it starts construction
221 * directly.
223 int nilfs_transaction_commit(struct super_block *sb)
225 struct nilfs_transaction_info *ti = current->journal_info;
226 struct the_nilfs *nilfs = sb->s_fs_info;
227 int err = 0;
229 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
230 ti->ti_flags |= NILFS_TI_COMMIT;
231 if (ti->ti_count > 0) {
232 ti->ti_count--;
233 return 0;
235 if (nilfs->ns_writer) {
236 struct nilfs_sc_info *sci = nilfs->ns_writer;
238 if (ti->ti_flags & NILFS_TI_COMMIT)
239 nilfs_segctor_start_timer(sci);
240 if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
241 nilfs_segctor_do_flush(sci, 0);
243 up_read(&nilfs->ns_segctor_sem);
244 current->journal_info = ti->ti_save;
246 if (ti->ti_flags & NILFS_TI_SYNC)
247 err = nilfs_construct_segment(sb);
248 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
249 kmem_cache_free(nilfs_transaction_cachep, ti);
250 sb_end_intwrite(sb);
251 return err;
254 void nilfs_transaction_abort(struct super_block *sb)
256 struct nilfs_transaction_info *ti = current->journal_info;
257 struct the_nilfs *nilfs = sb->s_fs_info;
259 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
260 if (ti->ti_count > 0) {
261 ti->ti_count--;
262 return;
264 up_read(&nilfs->ns_segctor_sem);
266 current->journal_info = ti->ti_save;
267 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
268 kmem_cache_free(nilfs_transaction_cachep, ti);
269 sb_end_intwrite(sb);
272 void nilfs_relax_pressure_in_lock(struct super_block *sb)
274 struct the_nilfs *nilfs = sb->s_fs_info;
275 struct nilfs_sc_info *sci = nilfs->ns_writer;
277 if (!sci || !sci->sc_flush_request)
278 return;
280 set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
281 up_read(&nilfs->ns_segctor_sem);
283 down_write(&nilfs->ns_segctor_sem);
284 if (sci->sc_flush_request &&
285 test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
286 struct nilfs_transaction_info *ti = current->journal_info;
288 ti->ti_flags |= NILFS_TI_WRITER;
289 nilfs_segctor_do_immediate_flush(sci);
290 ti->ti_flags &= ~NILFS_TI_WRITER;
292 downgrade_write(&nilfs->ns_segctor_sem);
295 static void nilfs_transaction_lock(struct super_block *sb,
296 struct nilfs_transaction_info *ti,
297 int gcflag)
299 struct nilfs_transaction_info *cur_ti = current->journal_info;
300 struct the_nilfs *nilfs = sb->s_fs_info;
301 struct nilfs_sc_info *sci = nilfs->ns_writer;
303 WARN_ON(cur_ti);
304 ti->ti_flags = NILFS_TI_WRITER;
305 ti->ti_count = 0;
306 ti->ti_save = cur_ti;
307 ti->ti_magic = NILFS_TI_MAGIC;
308 INIT_LIST_HEAD(&ti->ti_garbage);
309 current->journal_info = ti;
311 for (;;) {
312 down_write(&nilfs->ns_segctor_sem);
313 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
314 break;
316 nilfs_segctor_do_immediate_flush(sci);
318 up_write(&nilfs->ns_segctor_sem);
319 yield();
321 if (gcflag)
322 ti->ti_flags |= NILFS_TI_GC;
325 static void nilfs_transaction_unlock(struct super_block *sb)
327 struct nilfs_transaction_info *ti = current->journal_info;
328 struct the_nilfs *nilfs = sb->s_fs_info;
330 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
331 BUG_ON(ti->ti_count > 0);
333 up_write(&nilfs->ns_segctor_sem);
334 current->journal_info = ti->ti_save;
335 if (!list_empty(&ti->ti_garbage))
336 nilfs_dispose_list(nilfs, &ti->ti_garbage, 0);
339 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
340 struct nilfs_segsum_pointer *ssp,
341 unsigned bytes)
343 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
344 unsigned blocksize = sci->sc_super->s_blocksize;
345 void *p;
347 if (unlikely(ssp->offset + bytes > blocksize)) {
348 ssp->offset = 0;
349 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
350 &segbuf->sb_segsum_buffers));
351 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
353 p = ssp->bh->b_data + ssp->offset;
354 ssp->offset += bytes;
355 return p;
359 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
360 * @sci: nilfs_sc_info
362 static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
364 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
365 struct buffer_head *sumbh;
366 unsigned sumbytes;
367 unsigned flags = 0;
368 int err;
370 if (nilfs_doing_gc())
371 flags = NILFS_SS_GC;
372 err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
373 if (unlikely(err))
374 return err;
376 sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
377 sumbytes = segbuf->sb_sum.sumbytes;
378 sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
379 sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
380 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
381 return 0;
384 static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
386 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
387 if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
388 return -E2BIG; /* The current segment is filled up
389 (internal code) */
390 sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
391 return nilfs_segctor_reset_segment_buffer(sci);
394 static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
396 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
397 int err;
399 if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
400 err = nilfs_segctor_feed_segment(sci);
401 if (err)
402 return err;
403 segbuf = sci->sc_curseg;
405 err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
406 if (likely(!err))
407 segbuf->sb_sum.flags |= NILFS_SS_SR;
408 return err;
412 * Functions for making segment summary and payloads
414 static int nilfs_segctor_segsum_block_required(
415 struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
416 unsigned binfo_size)
418 unsigned blocksize = sci->sc_super->s_blocksize;
419 /* Size of finfo and binfo is enough small against blocksize */
421 return ssp->offset + binfo_size +
422 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
423 blocksize;
426 static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
427 struct inode *inode)
429 sci->sc_curseg->sb_sum.nfinfo++;
430 sci->sc_binfo_ptr = sci->sc_finfo_ptr;
431 nilfs_segctor_map_segsum_entry(
432 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
434 if (NILFS_I(inode)->i_root &&
435 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
436 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
437 /* skip finfo */
440 static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
441 struct inode *inode)
443 struct nilfs_finfo *finfo;
444 struct nilfs_inode_info *ii;
445 struct nilfs_segment_buffer *segbuf;
446 __u64 cno;
448 if (sci->sc_blk_cnt == 0)
449 return;
451 ii = NILFS_I(inode);
453 if (test_bit(NILFS_I_GCINODE, &ii->i_state))
454 cno = ii->i_cno;
455 else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
456 cno = 0;
457 else
458 cno = sci->sc_cno;
460 finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
461 sizeof(*finfo));
462 finfo->fi_ino = cpu_to_le64(inode->i_ino);
463 finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
464 finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
465 finfo->fi_cno = cpu_to_le64(cno);
467 segbuf = sci->sc_curseg;
468 segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
469 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
470 sci->sc_finfo_ptr = sci->sc_binfo_ptr;
471 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
474 static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
475 struct buffer_head *bh,
476 struct inode *inode,
477 unsigned binfo_size)
479 struct nilfs_segment_buffer *segbuf;
480 int required, err = 0;
482 retry:
483 segbuf = sci->sc_curseg;
484 required = nilfs_segctor_segsum_block_required(
485 sci, &sci->sc_binfo_ptr, binfo_size);
486 if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
487 nilfs_segctor_end_finfo(sci, inode);
488 err = nilfs_segctor_feed_segment(sci);
489 if (err)
490 return err;
491 goto retry;
493 if (unlikely(required)) {
494 err = nilfs_segbuf_extend_segsum(segbuf);
495 if (unlikely(err))
496 goto failed;
498 if (sci->sc_blk_cnt == 0)
499 nilfs_segctor_begin_finfo(sci, inode);
501 nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
502 /* Substitution to vblocknr is delayed until update_blocknr() */
503 nilfs_segbuf_add_file_buffer(segbuf, bh);
504 sci->sc_blk_cnt++;
505 failed:
506 return err;
510 * Callback functions that enumerate, mark, and collect dirty blocks
512 static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
513 struct buffer_head *bh, struct inode *inode)
515 int err;
517 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
518 if (err < 0)
519 return err;
521 err = nilfs_segctor_add_file_block(sci, bh, inode,
522 sizeof(struct nilfs_binfo_v));
523 if (!err)
524 sci->sc_datablk_cnt++;
525 return err;
528 static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
529 struct buffer_head *bh,
530 struct inode *inode)
532 return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
535 static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
536 struct buffer_head *bh,
537 struct inode *inode)
539 WARN_ON(!buffer_dirty(bh));
540 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
543 static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
544 struct nilfs_segsum_pointer *ssp,
545 union nilfs_binfo *binfo)
547 struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
548 sci, ssp, sizeof(*binfo_v));
549 *binfo_v = binfo->bi_v;
552 static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
553 struct nilfs_segsum_pointer *ssp,
554 union nilfs_binfo *binfo)
556 __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
557 sci, ssp, sizeof(*vblocknr));
558 *vblocknr = binfo->bi_v.bi_vblocknr;
561 static struct nilfs_sc_operations nilfs_sc_file_ops = {
562 .collect_data = nilfs_collect_file_data,
563 .collect_node = nilfs_collect_file_node,
564 .collect_bmap = nilfs_collect_file_bmap,
565 .write_data_binfo = nilfs_write_file_data_binfo,
566 .write_node_binfo = nilfs_write_file_node_binfo,
569 static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
570 struct buffer_head *bh, struct inode *inode)
572 int err;
574 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
575 if (err < 0)
576 return err;
578 err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
579 if (!err)
580 sci->sc_datablk_cnt++;
581 return err;
584 static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
585 struct buffer_head *bh, struct inode *inode)
587 WARN_ON(!buffer_dirty(bh));
588 return nilfs_segctor_add_file_block(sci, bh, inode,
589 sizeof(struct nilfs_binfo_dat));
592 static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
593 struct nilfs_segsum_pointer *ssp,
594 union nilfs_binfo *binfo)
596 __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
597 sizeof(*blkoff));
598 *blkoff = binfo->bi_dat.bi_blkoff;
601 static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
602 struct nilfs_segsum_pointer *ssp,
603 union nilfs_binfo *binfo)
605 struct nilfs_binfo_dat *binfo_dat =
606 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
607 *binfo_dat = binfo->bi_dat;
610 static struct nilfs_sc_operations nilfs_sc_dat_ops = {
611 .collect_data = nilfs_collect_dat_data,
612 .collect_node = nilfs_collect_file_node,
613 .collect_bmap = nilfs_collect_dat_bmap,
614 .write_data_binfo = nilfs_write_dat_data_binfo,
615 .write_node_binfo = nilfs_write_dat_node_binfo,
618 static struct nilfs_sc_operations nilfs_sc_dsync_ops = {
619 .collect_data = nilfs_collect_file_data,
620 .collect_node = NULL,
621 .collect_bmap = NULL,
622 .write_data_binfo = nilfs_write_file_data_binfo,
623 .write_node_binfo = NULL,
626 static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
627 struct list_head *listp,
628 size_t nlimit,
629 loff_t start, loff_t end)
631 struct address_space *mapping = inode->i_mapping;
632 struct pagevec pvec;
633 pgoff_t index = 0, last = ULONG_MAX;
634 size_t ndirties = 0;
635 int i;
637 if (unlikely(start != 0 || end != LLONG_MAX)) {
639 * A valid range is given for sync-ing data pages. The
640 * range is rounded to per-page; extra dirty buffers
641 * may be included if blocksize < pagesize.
643 index = start >> PAGE_SHIFT;
644 last = end >> PAGE_SHIFT;
646 pagevec_init(&pvec, 0);
647 repeat:
648 if (unlikely(index > last) ||
649 !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
650 min_t(pgoff_t, last - index,
651 PAGEVEC_SIZE - 1) + 1))
652 return ndirties;
654 for (i = 0; i < pagevec_count(&pvec); i++) {
655 struct buffer_head *bh, *head;
656 struct page *page = pvec.pages[i];
658 if (unlikely(page->index > last))
659 break;
661 lock_page(page);
662 if (!page_has_buffers(page))
663 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
664 unlock_page(page);
666 bh = head = page_buffers(page);
667 do {
668 if (!buffer_dirty(bh) || buffer_async_write(bh))
669 continue;
670 get_bh(bh);
671 list_add_tail(&bh->b_assoc_buffers, listp);
672 ndirties++;
673 if (unlikely(ndirties >= nlimit)) {
674 pagevec_release(&pvec);
675 cond_resched();
676 return ndirties;
678 } while (bh = bh->b_this_page, bh != head);
680 pagevec_release(&pvec);
681 cond_resched();
682 goto repeat;
685 static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
686 struct list_head *listp)
688 struct nilfs_inode_info *ii = NILFS_I(inode);
689 struct address_space *mapping = &ii->i_btnode_cache;
690 struct pagevec pvec;
691 struct buffer_head *bh, *head;
692 unsigned int i;
693 pgoff_t index = 0;
695 pagevec_init(&pvec, 0);
697 while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
698 PAGEVEC_SIZE)) {
699 for (i = 0; i < pagevec_count(&pvec); i++) {
700 bh = head = page_buffers(pvec.pages[i]);
701 do {
702 if (buffer_dirty(bh) &&
703 !buffer_async_write(bh)) {
704 get_bh(bh);
705 list_add_tail(&bh->b_assoc_buffers,
706 listp);
708 bh = bh->b_this_page;
709 } while (bh != head);
711 pagevec_release(&pvec);
712 cond_resched();
716 static void nilfs_dispose_list(struct the_nilfs *nilfs,
717 struct list_head *head, int force)
719 struct nilfs_inode_info *ii, *n;
720 struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
721 unsigned nv = 0;
723 while (!list_empty(head)) {
724 spin_lock(&nilfs->ns_inode_lock);
725 list_for_each_entry_safe(ii, n, head, i_dirty) {
726 list_del_init(&ii->i_dirty);
727 if (force) {
728 if (unlikely(ii->i_bh)) {
729 brelse(ii->i_bh);
730 ii->i_bh = NULL;
732 } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
733 set_bit(NILFS_I_QUEUED, &ii->i_state);
734 list_add_tail(&ii->i_dirty,
735 &nilfs->ns_dirty_files);
736 continue;
738 ivec[nv++] = ii;
739 if (nv == SC_N_INODEVEC)
740 break;
742 spin_unlock(&nilfs->ns_inode_lock);
744 for (pii = ivec; nv > 0; pii++, nv--)
745 iput(&(*pii)->vfs_inode);
749 static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
750 struct nilfs_root *root)
752 int ret = 0;
754 if (nilfs_mdt_fetch_dirty(root->ifile))
755 ret++;
756 if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
757 ret++;
758 if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
759 ret++;
760 if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
761 ret++;
762 return ret;
765 static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
767 return list_empty(&sci->sc_dirty_files) &&
768 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
769 sci->sc_nfreesegs == 0 &&
770 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
773 static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
775 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
776 int ret = 0;
778 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
779 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
781 spin_lock(&nilfs->ns_inode_lock);
782 if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
783 ret++;
785 spin_unlock(&nilfs->ns_inode_lock);
786 return ret;
789 static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
791 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
793 nilfs_mdt_clear_dirty(sci->sc_root->ifile);
794 nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
795 nilfs_mdt_clear_dirty(nilfs->ns_sufile);
796 nilfs_mdt_clear_dirty(nilfs->ns_dat);
799 static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
801 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
802 struct buffer_head *bh_cp;
803 struct nilfs_checkpoint *raw_cp;
804 int err;
806 /* XXX: this interface will be changed */
807 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
808 &raw_cp, &bh_cp);
809 if (likely(!err)) {
810 /* The following code is duplicated with cpfile. But, it is
811 needed to collect the checkpoint even if it was not newly
812 created */
813 mark_buffer_dirty(bh_cp);
814 nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
815 nilfs_cpfile_put_checkpoint(
816 nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
817 } else
818 WARN_ON(err == -EINVAL || err == -ENOENT);
820 return err;
823 static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
825 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
826 struct buffer_head *bh_cp;
827 struct nilfs_checkpoint *raw_cp;
828 int err;
830 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
831 &raw_cp, &bh_cp);
832 if (unlikely(err)) {
833 WARN_ON(err == -EINVAL || err == -ENOENT);
834 goto failed_ibh;
836 raw_cp->cp_snapshot_list.ssl_next = 0;
837 raw_cp->cp_snapshot_list.ssl_prev = 0;
838 raw_cp->cp_inodes_count =
839 cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
840 raw_cp->cp_blocks_count =
841 cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
842 raw_cp->cp_nblk_inc =
843 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
844 raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
845 raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
847 if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
848 nilfs_checkpoint_clear_minor(raw_cp);
849 else
850 nilfs_checkpoint_set_minor(raw_cp);
852 nilfs_write_inode_common(sci->sc_root->ifile,
853 &raw_cp->cp_ifile_inode, 1);
854 nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
855 return 0;
857 failed_ibh:
858 return err;
861 static void nilfs_fill_in_file_bmap(struct inode *ifile,
862 struct nilfs_inode_info *ii)
865 struct buffer_head *ibh;
866 struct nilfs_inode *raw_inode;
868 if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
869 ibh = ii->i_bh;
870 BUG_ON(!ibh);
871 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
872 ibh);
873 nilfs_bmap_write(ii->i_bmap, raw_inode);
874 nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
878 static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
880 struct nilfs_inode_info *ii;
882 list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
883 nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
884 set_bit(NILFS_I_COLLECTED, &ii->i_state);
888 static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
889 struct the_nilfs *nilfs)
891 struct buffer_head *bh_sr;
892 struct nilfs_super_root *raw_sr;
893 unsigned isz, srsz;
895 bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
896 raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
897 isz = nilfs->ns_inode_size;
898 srsz = NILFS_SR_BYTES(isz);
900 raw_sr->sr_bytes = cpu_to_le16(srsz);
901 raw_sr->sr_nongc_ctime
902 = cpu_to_le64(nilfs_doing_gc() ?
903 nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
904 raw_sr->sr_flags = 0;
906 nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
907 NILFS_SR_DAT_OFFSET(isz), 1);
908 nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
909 NILFS_SR_CPFILE_OFFSET(isz), 1);
910 nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
911 NILFS_SR_SUFILE_OFFSET(isz), 1);
912 memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
915 static void nilfs_redirty_inodes(struct list_head *head)
917 struct nilfs_inode_info *ii;
919 list_for_each_entry(ii, head, i_dirty) {
920 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
921 clear_bit(NILFS_I_COLLECTED, &ii->i_state);
925 static void nilfs_drop_collected_inodes(struct list_head *head)
927 struct nilfs_inode_info *ii;
929 list_for_each_entry(ii, head, i_dirty) {
930 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
931 continue;
933 clear_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
934 set_bit(NILFS_I_UPDATED, &ii->i_state);
938 static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
939 struct inode *inode,
940 struct list_head *listp,
941 int (*collect)(struct nilfs_sc_info *,
942 struct buffer_head *,
943 struct inode *))
945 struct buffer_head *bh, *n;
946 int err = 0;
948 if (collect) {
949 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
950 list_del_init(&bh->b_assoc_buffers);
951 err = collect(sci, bh, inode);
952 brelse(bh);
953 if (unlikely(err))
954 goto dispose_buffers;
956 return 0;
959 dispose_buffers:
960 while (!list_empty(listp)) {
961 bh = list_first_entry(listp, struct buffer_head,
962 b_assoc_buffers);
963 list_del_init(&bh->b_assoc_buffers);
964 brelse(bh);
966 return err;
969 static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
971 /* Remaining number of blocks within segment buffer */
972 return sci->sc_segbuf_nblocks -
973 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
976 static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
977 struct inode *inode,
978 struct nilfs_sc_operations *sc_ops)
980 LIST_HEAD(data_buffers);
981 LIST_HEAD(node_buffers);
982 int err;
984 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
985 size_t n, rest = nilfs_segctor_buffer_rest(sci);
987 n = nilfs_lookup_dirty_data_buffers(
988 inode, &data_buffers, rest + 1, 0, LLONG_MAX);
989 if (n > rest) {
990 err = nilfs_segctor_apply_buffers(
991 sci, inode, &data_buffers,
992 sc_ops->collect_data);
993 BUG_ON(!err); /* always receive -E2BIG or true error */
994 goto break_or_fail;
997 nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
999 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1000 err = nilfs_segctor_apply_buffers(
1001 sci, inode, &data_buffers, sc_ops->collect_data);
1002 if (unlikely(err)) {
1003 /* dispose node list */
1004 nilfs_segctor_apply_buffers(
1005 sci, inode, &node_buffers, NULL);
1006 goto break_or_fail;
1008 sci->sc_stage.flags |= NILFS_CF_NODE;
1010 /* Collect node */
1011 err = nilfs_segctor_apply_buffers(
1012 sci, inode, &node_buffers, sc_ops->collect_node);
1013 if (unlikely(err))
1014 goto break_or_fail;
1016 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1017 err = nilfs_segctor_apply_buffers(
1018 sci, inode, &node_buffers, sc_ops->collect_bmap);
1019 if (unlikely(err))
1020 goto break_or_fail;
1022 nilfs_segctor_end_finfo(sci, inode);
1023 sci->sc_stage.flags &= ~NILFS_CF_NODE;
1025 break_or_fail:
1026 return err;
1029 static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1030 struct inode *inode)
1032 LIST_HEAD(data_buffers);
1033 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1034 int err;
1036 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1037 sci->sc_dsync_start,
1038 sci->sc_dsync_end);
1040 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1041 nilfs_collect_file_data);
1042 if (!err) {
1043 nilfs_segctor_end_finfo(sci, inode);
1044 BUG_ON(n > rest);
1045 /* always receive -E2BIG or true error if n > rest */
1047 return err;
1050 static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1052 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1053 struct list_head *head;
1054 struct nilfs_inode_info *ii;
1055 size_t ndone;
1056 int err = 0;
1058 switch (sci->sc_stage.scnt) {
1059 case NILFS_ST_INIT:
1060 /* Pre-processes */
1061 sci->sc_stage.flags = 0;
1063 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1064 sci->sc_nblk_inc = 0;
1065 sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1066 if (mode == SC_LSEG_DSYNC) {
1067 sci->sc_stage.scnt = NILFS_ST_DSYNC;
1068 goto dsync_mode;
1072 sci->sc_stage.dirty_file_ptr = NULL;
1073 sci->sc_stage.gc_inode_ptr = NULL;
1074 if (mode == SC_FLUSH_DAT) {
1075 sci->sc_stage.scnt = NILFS_ST_DAT;
1076 goto dat_stage;
1078 sci->sc_stage.scnt++; /* Fall through */
1079 case NILFS_ST_GC:
1080 if (nilfs_doing_gc()) {
1081 head = &sci->sc_gc_inodes;
1082 ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1083 head, i_dirty);
1084 list_for_each_entry_continue(ii, head, i_dirty) {
1085 err = nilfs_segctor_scan_file(
1086 sci, &ii->vfs_inode,
1087 &nilfs_sc_file_ops);
1088 if (unlikely(err)) {
1089 sci->sc_stage.gc_inode_ptr = list_entry(
1090 ii->i_dirty.prev,
1091 struct nilfs_inode_info,
1092 i_dirty);
1093 goto break_or_fail;
1095 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1097 sci->sc_stage.gc_inode_ptr = NULL;
1099 sci->sc_stage.scnt++; /* Fall through */
1100 case NILFS_ST_FILE:
1101 head = &sci->sc_dirty_files;
1102 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1103 i_dirty);
1104 list_for_each_entry_continue(ii, head, i_dirty) {
1105 clear_bit(NILFS_I_DIRTY, &ii->i_state);
1107 err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1108 &nilfs_sc_file_ops);
1109 if (unlikely(err)) {
1110 sci->sc_stage.dirty_file_ptr =
1111 list_entry(ii->i_dirty.prev,
1112 struct nilfs_inode_info,
1113 i_dirty);
1114 goto break_or_fail;
1116 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1117 /* XXX: required ? */
1119 sci->sc_stage.dirty_file_ptr = NULL;
1120 if (mode == SC_FLUSH_FILE) {
1121 sci->sc_stage.scnt = NILFS_ST_DONE;
1122 return 0;
1124 sci->sc_stage.scnt++;
1125 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1126 /* Fall through */
1127 case NILFS_ST_IFILE:
1128 err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
1129 &nilfs_sc_file_ops);
1130 if (unlikely(err))
1131 break;
1132 sci->sc_stage.scnt++;
1133 /* Creating a checkpoint */
1134 err = nilfs_segctor_create_checkpoint(sci);
1135 if (unlikely(err))
1136 break;
1137 /* Fall through */
1138 case NILFS_ST_CPFILE:
1139 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1140 &nilfs_sc_file_ops);
1141 if (unlikely(err))
1142 break;
1143 sci->sc_stage.scnt++; /* Fall through */
1144 case NILFS_ST_SUFILE:
1145 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1146 sci->sc_nfreesegs, &ndone);
1147 if (unlikely(err)) {
1148 nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1149 sci->sc_freesegs, ndone,
1150 NULL);
1151 break;
1153 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1155 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1156 &nilfs_sc_file_ops);
1157 if (unlikely(err))
1158 break;
1159 sci->sc_stage.scnt++; /* Fall through */
1160 case NILFS_ST_DAT:
1161 dat_stage:
1162 err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
1163 &nilfs_sc_dat_ops);
1164 if (unlikely(err))
1165 break;
1166 if (mode == SC_FLUSH_DAT) {
1167 sci->sc_stage.scnt = NILFS_ST_DONE;
1168 return 0;
1170 sci->sc_stage.scnt++; /* Fall through */
1171 case NILFS_ST_SR:
1172 if (mode == SC_LSEG_SR) {
1173 /* Appending a super root */
1174 err = nilfs_segctor_add_super_root(sci);
1175 if (unlikely(err))
1176 break;
1178 /* End of a logical segment */
1179 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1180 sci->sc_stage.scnt = NILFS_ST_DONE;
1181 return 0;
1182 case NILFS_ST_DSYNC:
1183 dsync_mode:
1184 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1185 ii = sci->sc_dsync_inode;
1186 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1187 break;
1189 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1190 if (unlikely(err))
1191 break;
1192 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1193 sci->sc_stage.scnt = NILFS_ST_DONE;
1194 return 0;
1195 case NILFS_ST_DONE:
1196 return 0;
1197 default:
1198 BUG();
1201 break_or_fail:
1202 return err;
1206 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1207 * @sci: nilfs_sc_info
1208 * @nilfs: nilfs object
1210 static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1211 struct the_nilfs *nilfs)
1213 struct nilfs_segment_buffer *segbuf, *prev;
1214 __u64 nextnum;
1215 int err, alloc = 0;
1217 segbuf = nilfs_segbuf_new(sci->sc_super);
1218 if (unlikely(!segbuf))
1219 return -ENOMEM;
1221 if (list_empty(&sci->sc_write_logs)) {
1222 nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1223 nilfs->ns_pseg_offset, nilfs);
1224 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1225 nilfs_shift_to_next_segment(nilfs);
1226 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1229 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1230 nextnum = nilfs->ns_nextnum;
1232 if (nilfs->ns_segnum == nilfs->ns_nextnum)
1233 /* Start from the head of a new full segment */
1234 alloc++;
1235 } else {
1236 /* Continue logs */
1237 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1238 nilfs_segbuf_map_cont(segbuf, prev);
1239 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1240 nextnum = prev->sb_nextnum;
1242 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1243 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1244 segbuf->sb_sum.seg_seq++;
1245 alloc++;
1249 err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
1250 if (err)
1251 goto failed;
1253 if (alloc) {
1254 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
1255 if (err)
1256 goto failed;
1258 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1260 BUG_ON(!list_empty(&sci->sc_segbufs));
1261 list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1262 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1263 return 0;
1265 failed:
1266 nilfs_segbuf_free(segbuf);
1267 return err;
1270 static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1271 struct the_nilfs *nilfs, int nadd)
1273 struct nilfs_segment_buffer *segbuf, *prev;
1274 struct inode *sufile = nilfs->ns_sufile;
1275 __u64 nextnextnum;
1276 LIST_HEAD(list);
1277 int err, ret, i;
1279 prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1281 * Since the segment specified with nextnum might be allocated during
1282 * the previous construction, the buffer including its segusage may
1283 * not be dirty. The following call ensures that the buffer is dirty
1284 * and will pin the buffer on memory until the sufile is written.
1286 err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
1287 if (unlikely(err))
1288 return err;
1290 for (i = 0; i < nadd; i++) {
1291 /* extend segment info */
1292 err = -ENOMEM;
1293 segbuf = nilfs_segbuf_new(sci->sc_super);
1294 if (unlikely(!segbuf))
1295 goto failed;
1297 /* map this buffer to region of segment on-disk */
1298 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1299 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1301 /* allocate the next next full segment */
1302 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1303 if (unlikely(err))
1304 goto failed_segbuf;
1306 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1307 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1309 list_add_tail(&segbuf->sb_list, &list);
1310 prev = segbuf;
1312 list_splice_tail(&list, &sci->sc_segbufs);
1313 return 0;
1315 failed_segbuf:
1316 nilfs_segbuf_free(segbuf);
1317 failed:
1318 list_for_each_entry(segbuf, &list, sb_list) {
1319 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1320 WARN_ON(ret); /* never fails */
1322 nilfs_destroy_logs(&list);
1323 return err;
1326 static void nilfs_free_incomplete_logs(struct list_head *logs,
1327 struct the_nilfs *nilfs)
1329 struct nilfs_segment_buffer *segbuf, *prev;
1330 struct inode *sufile = nilfs->ns_sufile;
1331 int ret;
1333 segbuf = NILFS_FIRST_SEGBUF(logs);
1334 if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1335 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1336 WARN_ON(ret); /* never fails */
1338 if (atomic_read(&segbuf->sb_err)) {
1339 /* Case 1: The first segment failed */
1340 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1341 /* Case 1a: Partial segment appended into an existing
1342 segment */
1343 nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1344 segbuf->sb_fseg_end);
1345 else /* Case 1b: New full segment */
1346 set_nilfs_discontinued(nilfs);
1349 prev = segbuf;
1350 list_for_each_entry_continue(segbuf, logs, sb_list) {
1351 if (prev->sb_nextnum != segbuf->sb_nextnum) {
1352 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1353 WARN_ON(ret); /* never fails */
1355 if (atomic_read(&segbuf->sb_err) &&
1356 segbuf->sb_segnum != nilfs->ns_nextnum)
1357 /* Case 2: extended segment (!= next) failed */
1358 nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1359 prev = segbuf;
1363 static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1364 struct inode *sufile)
1366 struct nilfs_segment_buffer *segbuf;
1367 unsigned long live_blocks;
1368 int ret;
1370 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1371 live_blocks = segbuf->sb_sum.nblocks +
1372 (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1373 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1374 live_blocks,
1375 sci->sc_seg_ctime);
1376 WARN_ON(ret); /* always succeed because the segusage is dirty */
1380 static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
1382 struct nilfs_segment_buffer *segbuf;
1383 int ret;
1385 segbuf = NILFS_FIRST_SEGBUF(logs);
1386 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1387 segbuf->sb_pseg_start -
1388 segbuf->sb_fseg_start, 0);
1389 WARN_ON(ret); /* always succeed because the segusage is dirty */
1391 list_for_each_entry_continue(segbuf, logs, sb_list) {
1392 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1393 0, 0);
1394 WARN_ON(ret); /* always succeed */
1398 static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1399 struct nilfs_segment_buffer *last,
1400 struct inode *sufile)
1402 struct nilfs_segment_buffer *segbuf = last;
1403 int ret;
1405 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1406 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1407 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1408 WARN_ON(ret);
1410 nilfs_truncate_logs(&sci->sc_segbufs, last);
1414 static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1415 struct the_nilfs *nilfs, int mode)
1417 struct nilfs_cstage prev_stage = sci->sc_stage;
1418 int err, nadd = 1;
1420 /* Collection retry loop */
1421 for (;;) {
1422 sci->sc_nblk_this_inc = 0;
1423 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1425 err = nilfs_segctor_reset_segment_buffer(sci);
1426 if (unlikely(err))
1427 goto failed;
1429 err = nilfs_segctor_collect_blocks(sci, mode);
1430 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1431 if (!err)
1432 break;
1434 if (unlikely(err != -E2BIG))
1435 goto failed;
1437 /* The current segment is filled up */
1438 if (mode != SC_LSEG_SR || sci->sc_stage.scnt < NILFS_ST_CPFILE)
1439 break;
1441 nilfs_clear_logs(&sci->sc_segbufs);
1443 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1444 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1445 sci->sc_freesegs,
1446 sci->sc_nfreesegs,
1447 NULL);
1448 WARN_ON(err); /* do not happen */
1449 sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
1452 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1453 if (unlikely(err))
1454 return err;
1456 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1457 sci->sc_stage = prev_stage;
1459 nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1460 return 0;
1462 failed:
1463 return err;
1466 static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1467 struct buffer_head *new_bh)
1469 BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1471 list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1472 /* The caller must release old_bh */
1475 static int
1476 nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1477 struct nilfs_segment_buffer *segbuf,
1478 int mode)
1480 struct inode *inode = NULL;
1481 sector_t blocknr;
1482 unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1483 unsigned long nblocks = 0, ndatablk = 0;
1484 struct nilfs_sc_operations *sc_op = NULL;
1485 struct nilfs_segsum_pointer ssp;
1486 struct nilfs_finfo *finfo = NULL;
1487 union nilfs_binfo binfo;
1488 struct buffer_head *bh, *bh_org;
1489 ino_t ino = 0;
1490 int err = 0;
1492 if (!nfinfo)
1493 goto out;
1495 blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1496 ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1497 ssp.offset = sizeof(struct nilfs_segment_summary);
1499 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1500 if (bh == segbuf->sb_super_root)
1501 break;
1502 if (!finfo) {
1503 finfo = nilfs_segctor_map_segsum_entry(
1504 sci, &ssp, sizeof(*finfo));
1505 ino = le64_to_cpu(finfo->fi_ino);
1506 nblocks = le32_to_cpu(finfo->fi_nblocks);
1507 ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1509 inode = bh->b_page->mapping->host;
1511 if (mode == SC_LSEG_DSYNC)
1512 sc_op = &nilfs_sc_dsync_ops;
1513 else if (ino == NILFS_DAT_INO)
1514 sc_op = &nilfs_sc_dat_ops;
1515 else /* file blocks */
1516 sc_op = &nilfs_sc_file_ops;
1518 bh_org = bh;
1519 get_bh(bh_org);
1520 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1521 &binfo);
1522 if (bh != bh_org)
1523 nilfs_list_replace_buffer(bh_org, bh);
1524 brelse(bh_org);
1525 if (unlikely(err))
1526 goto failed_bmap;
1528 if (ndatablk > 0)
1529 sc_op->write_data_binfo(sci, &ssp, &binfo);
1530 else
1531 sc_op->write_node_binfo(sci, &ssp, &binfo);
1533 blocknr++;
1534 if (--nblocks == 0) {
1535 finfo = NULL;
1536 if (--nfinfo == 0)
1537 break;
1538 } else if (ndatablk > 0)
1539 ndatablk--;
1541 out:
1542 return 0;
1544 failed_bmap:
1545 return err;
1548 static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1550 struct nilfs_segment_buffer *segbuf;
1551 int err;
1553 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1554 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1555 if (unlikely(err))
1556 return err;
1557 nilfs_segbuf_fill_in_segsum(segbuf);
1559 return 0;
1562 static void nilfs_begin_page_io(struct page *page)
1564 if (!page || PageWriteback(page))
1565 /* For split b-tree node pages, this function may be called
1566 twice. We ignore the 2nd or later calls by this check. */
1567 return;
1569 lock_page(page);
1570 clear_page_dirty_for_io(page);
1571 set_page_writeback(page);
1572 unlock_page(page);
1575 static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1577 struct nilfs_segment_buffer *segbuf;
1578 struct page *bd_page = NULL, *fs_page = NULL;
1580 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1581 struct buffer_head *bh;
1583 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1584 b_assoc_buffers) {
1585 set_buffer_async_write(bh);
1586 if (bh->b_page != bd_page) {
1587 if (bd_page) {
1588 lock_page(bd_page);
1589 clear_page_dirty_for_io(bd_page);
1590 set_page_writeback(bd_page);
1591 unlock_page(bd_page);
1593 bd_page = bh->b_page;
1597 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1598 b_assoc_buffers) {
1599 set_buffer_async_write(bh);
1600 if (bh == segbuf->sb_super_root) {
1601 if (bh->b_page != bd_page) {
1602 lock_page(bd_page);
1603 clear_page_dirty_for_io(bd_page);
1604 set_page_writeback(bd_page);
1605 unlock_page(bd_page);
1606 bd_page = bh->b_page;
1608 break;
1610 if (bh->b_page != fs_page) {
1611 nilfs_begin_page_io(fs_page);
1612 fs_page = bh->b_page;
1616 if (bd_page) {
1617 lock_page(bd_page);
1618 clear_page_dirty_for_io(bd_page);
1619 set_page_writeback(bd_page);
1620 unlock_page(bd_page);
1622 nilfs_begin_page_io(fs_page);
1625 static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1626 struct the_nilfs *nilfs)
1628 int ret;
1630 ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1631 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1632 return ret;
1635 static void nilfs_end_page_io(struct page *page, int err)
1637 if (!page)
1638 return;
1640 if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
1642 * For b-tree node pages, this function may be called twice
1643 * or more because they might be split in a segment.
1645 if (PageDirty(page)) {
1647 * For pages holding split b-tree node buffers, dirty
1648 * flag on the buffers may be cleared discretely.
1649 * In that case, the page is once redirtied for
1650 * remaining buffers, and it must be cancelled if
1651 * all the buffers get cleaned later.
1653 lock_page(page);
1654 if (nilfs_page_buffers_clean(page))
1655 __nilfs_clear_page_dirty(page);
1656 unlock_page(page);
1658 return;
1661 if (!err) {
1662 if (!nilfs_page_buffers_clean(page))
1663 __set_page_dirty_nobuffers(page);
1664 ClearPageError(page);
1665 } else {
1666 __set_page_dirty_nobuffers(page);
1667 SetPageError(page);
1670 end_page_writeback(page);
1673 static void nilfs_abort_logs(struct list_head *logs, int err)
1675 struct nilfs_segment_buffer *segbuf;
1676 struct page *bd_page = NULL, *fs_page = NULL;
1677 struct buffer_head *bh;
1679 if (list_empty(logs))
1680 return;
1682 list_for_each_entry(segbuf, logs, sb_list) {
1683 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1684 b_assoc_buffers) {
1685 clear_buffer_async_write(bh);
1686 if (bh->b_page != bd_page) {
1687 if (bd_page)
1688 end_page_writeback(bd_page);
1689 bd_page = bh->b_page;
1693 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1694 b_assoc_buffers) {
1695 clear_buffer_async_write(bh);
1696 if (bh == segbuf->sb_super_root) {
1697 if (bh->b_page != bd_page) {
1698 end_page_writeback(bd_page);
1699 bd_page = bh->b_page;
1701 break;
1703 if (bh->b_page != fs_page) {
1704 nilfs_end_page_io(fs_page, err);
1705 fs_page = bh->b_page;
1709 if (bd_page)
1710 end_page_writeback(bd_page);
1712 nilfs_end_page_io(fs_page, err);
1715 static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1716 struct the_nilfs *nilfs, int err)
1718 LIST_HEAD(logs);
1719 int ret;
1721 list_splice_tail_init(&sci->sc_write_logs, &logs);
1722 ret = nilfs_wait_on_logs(&logs);
1723 nilfs_abort_logs(&logs, ret ? : err);
1725 list_splice_tail_init(&sci->sc_segbufs, &logs);
1726 nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1727 nilfs_free_incomplete_logs(&logs, nilfs);
1729 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1730 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1731 sci->sc_freesegs,
1732 sci->sc_nfreesegs,
1733 NULL);
1734 WARN_ON(ret); /* do not happen */
1737 nilfs_destroy_logs(&logs);
1740 static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1741 struct nilfs_segment_buffer *segbuf)
1743 nilfs->ns_segnum = segbuf->sb_segnum;
1744 nilfs->ns_nextnum = segbuf->sb_nextnum;
1745 nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1746 + segbuf->sb_sum.nblocks;
1747 nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1748 nilfs->ns_ctime = segbuf->sb_sum.ctime;
1751 static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1753 struct nilfs_segment_buffer *segbuf;
1754 struct page *bd_page = NULL, *fs_page = NULL;
1755 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1756 int update_sr = false;
1758 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1759 struct buffer_head *bh;
1761 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1762 b_assoc_buffers) {
1763 set_buffer_uptodate(bh);
1764 clear_buffer_dirty(bh);
1765 clear_buffer_async_write(bh);
1766 if (bh->b_page != bd_page) {
1767 if (bd_page)
1768 end_page_writeback(bd_page);
1769 bd_page = bh->b_page;
1773 * We assume that the buffers which belong to the same page
1774 * continue over the buffer list.
1775 * Under this assumption, the last BHs of pages is
1776 * identifiable by the discontinuity of bh->b_page
1777 * (page != fs_page).
1779 * For B-tree node blocks, however, this assumption is not
1780 * guaranteed. The cleanup code of B-tree node pages needs
1781 * special care.
1783 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1784 b_assoc_buffers) {
1785 set_buffer_uptodate(bh);
1786 clear_buffer_dirty(bh);
1787 clear_buffer_async_write(bh);
1788 clear_buffer_delay(bh);
1789 clear_buffer_nilfs_volatile(bh);
1790 clear_buffer_nilfs_redirected(bh);
1791 if (bh == segbuf->sb_super_root) {
1792 if (bh->b_page != bd_page) {
1793 end_page_writeback(bd_page);
1794 bd_page = bh->b_page;
1796 update_sr = true;
1797 break;
1799 if (bh->b_page != fs_page) {
1800 nilfs_end_page_io(fs_page, 0);
1801 fs_page = bh->b_page;
1805 if (!nilfs_segbuf_simplex(segbuf)) {
1806 if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
1807 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1808 sci->sc_lseg_stime = jiffies;
1810 if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
1811 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1815 * Since pages may continue over multiple segment buffers,
1816 * end of the last page must be checked outside of the loop.
1818 if (bd_page)
1819 end_page_writeback(bd_page);
1821 nilfs_end_page_io(fs_page, 0);
1823 nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1825 if (nilfs_doing_gc())
1826 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
1827 else
1828 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
1830 sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1832 segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1833 nilfs_set_next_segment(nilfs, segbuf);
1835 if (update_sr) {
1836 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
1837 segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
1839 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
1840 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1841 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1842 nilfs_segctor_clear_metadata_dirty(sci);
1843 } else
1844 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1847 static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1849 int ret;
1851 ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1852 if (!ret) {
1853 nilfs_segctor_complete_write(sci);
1854 nilfs_destroy_logs(&sci->sc_write_logs);
1856 return ret;
1859 static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1860 struct the_nilfs *nilfs)
1862 struct nilfs_inode_info *ii, *n;
1863 struct inode *ifile = sci->sc_root->ifile;
1865 spin_lock(&nilfs->ns_inode_lock);
1866 retry:
1867 list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
1868 if (!ii->i_bh) {
1869 struct buffer_head *ibh;
1870 int err;
1872 spin_unlock(&nilfs->ns_inode_lock);
1873 err = nilfs_ifile_get_inode_block(
1874 ifile, ii->vfs_inode.i_ino, &ibh);
1875 if (unlikely(err)) {
1876 nilfs_warning(sci->sc_super, __func__,
1877 "failed to get inode block.\n");
1878 return err;
1880 mark_buffer_dirty(ibh);
1881 nilfs_mdt_mark_dirty(ifile);
1882 spin_lock(&nilfs->ns_inode_lock);
1883 if (likely(!ii->i_bh))
1884 ii->i_bh = ibh;
1885 else
1886 brelse(ibh);
1887 goto retry;
1890 clear_bit(NILFS_I_QUEUED, &ii->i_state);
1891 set_bit(NILFS_I_BUSY, &ii->i_state);
1892 list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
1894 spin_unlock(&nilfs->ns_inode_lock);
1896 return 0;
1899 static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1900 struct the_nilfs *nilfs)
1902 struct nilfs_transaction_info *ti = current->journal_info;
1903 struct nilfs_inode_info *ii, *n;
1905 spin_lock(&nilfs->ns_inode_lock);
1906 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
1907 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
1908 test_bit(NILFS_I_DIRTY, &ii->i_state))
1909 continue;
1911 clear_bit(NILFS_I_BUSY, &ii->i_state);
1912 brelse(ii->i_bh);
1913 ii->i_bh = NULL;
1914 list_move_tail(&ii->i_dirty, &ti->ti_garbage);
1916 spin_unlock(&nilfs->ns_inode_lock);
1920 * Main procedure of segment constructor
1922 static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
1924 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1925 int err;
1927 sci->sc_stage.scnt = NILFS_ST_INIT;
1928 sci->sc_cno = nilfs->ns_cno;
1930 err = nilfs_segctor_collect_dirty_files(sci, nilfs);
1931 if (unlikely(err))
1932 goto out;
1934 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
1935 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1937 if (nilfs_segctor_clean(sci))
1938 goto out;
1940 do {
1941 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
1943 err = nilfs_segctor_begin_construction(sci, nilfs);
1944 if (unlikely(err))
1945 goto out;
1947 /* Update time stamp */
1948 sci->sc_seg_ctime = get_seconds();
1950 err = nilfs_segctor_collect(sci, nilfs, mode);
1951 if (unlikely(err))
1952 goto failed;
1954 /* Avoid empty segment */
1955 if (sci->sc_stage.scnt == NILFS_ST_DONE &&
1956 nilfs_segbuf_empty(sci->sc_curseg)) {
1957 nilfs_segctor_abort_construction(sci, nilfs, 1);
1958 goto out;
1961 err = nilfs_segctor_assign(sci, mode);
1962 if (unlikely(err))
1963 goto failed;
1965 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
1966 nilfs_segctor_fill_in_file_bmap(sci);
1968 if (mode == SC_LSEG_SR &&
1969 sci->sc_stage.scnt >= NILFS_ST_CPFILE) {
1970 err = nilfs_segctor_fill_in_checkpoint(sci);
1971 if (unlikely(err))
1972 goto failed_to_write;
1974 nilfs_segctor_fill_in_super_root(sci, nilfs);
1976 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
1978 /* Write partial segments */
1979 nilfs_segctor_prepare_write(sci);
1981 nilfs_add_checksums_on_logs(&sci->sc_segbufs,
1982 nilfs->ns_crc_seed);
1984 err = nilfs_segctor_write(sci, nilfs);
1985 if (unlikely(err))
1986 goto failed_to_write;
1988 if (sci->sc_stage.scnt == NILFS_ST_DONE ||
1989 nilfs->ns_blocksize_bits != PAGE_CACHE_SHIFT) {
1991 * At this point, we avoid double buffering
1992 * for blocksize < pagesize because page dirty
1993 * flag is turned off during write and dirty
1994 * buffers are not properly collected for
1995 * pages crossing over segments.
1997 err = nilfs_segctor_wait(sci);
1998 if (err)
1999 goto failed_to_write;
2001 } while (sci->sc_stage.scnt != NILFS_ST_DONE);
2003 out:
2004 nilfs_segctor_drop_written_files(sci, nilfs);
2005 return err;
2007 failed_to_write:
2008 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2009 nilfs_redirty_inodes(&sci->sc_dirty_files);
2011 failed:
2012 if (nilfs_doing_gc())
2013 nilfs_redirty_inodes(&sci->sc_gc_inodes);
2014 nilfs_segctor_abort_construction(sci, nilfs, err);
2015 goto out;
2019 * nilfs_segctor_start_timer - set timer of background write
2020 * @sci: nilfs_sc_info
2022 * If the timer has already been set, it ignores the new request.
2023 * This function MUST be called within a section locking the segment
2024 * semaphore.
2026 static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2028 spin_lock(&sci->sc_state_lock);
2029 if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2030 sci->sc_timer.expires = jiffies + sci->sc_interval;
2031 add_timer(&sci->sc_timer);
2032 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2034 spin_unlock(&sci->sc_state_lock);
2037 static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2039 spin_lock(&sci->sc_state_lock);
2040 if (!(sci->sc_flush_request & (1 << bn))) {
2041 unsigned long prev_req = sci->sc_flush_request;
2043 sci->sc_flush_request |= (1 << bn);
2044 if (!prev_req)
2045 wake_up(&sci->sc_wait_daemon);
2047 spin_unlock(&sci->sc_state_lock);
2051 * nilfs_flush_segment - trigger a segment construction for resource control
2052 * @sb: super block
2053 * @ino: inode number of the file to be flushed out.
2055 void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2057 struct the_nilfs *nilfs = sb->s_fs_info;
2058 struct nilfs_sc_info *sci = nilfs->ns_writer;
2060 if (!sci || nilfs_doing_construction())
2061 return;
2062 nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2063 /* assign bit 0 to data files */
2066 struct nilfs_segctor_wait_request {
2067 wait_queue_t wq;
2068 __u32 seq;
2069 int err;
2070 atomic_t done;
2073 static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2075 struct nilfs_segctor_wait_request wait_req;
2076 int err = 0;
2078 spin_lock(&sci->sc_state_lock);
2079 init_wait(&wait_req.wq);
2080 wait_req.err = 0;
2081 atomic_set(&wait_req.done, 0);
2082 wait_req.seq = ++sci->sc_seq_request;
2083 spin_unlock(&sci->sc_state_lock);
2085 init_waitqueue_entry(&wait_req.wq, current);
2086 add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2087 set_current_state(TASK_INTERRUPTIBLE);
2088 wake_up(&sci->sc_wait_daemon);
2090 for (;;) {
2091 if (atomic_read(&wait_req.done)) {
2092 err = wait_req.err;
2093 break;
2095 if (!signal_pending(current)) {
2096 schedule();
2097 continue;
2099 err = -ERESTARTSYS;
2100 break;
2102 finish_wait(&sci->sc_wait_request, &wait_req.wq);
2103 return err;
2106 static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2108 struct nilfs_segctor_wait_request *wrq, *n;
2109 unsigned long flags;
2111 spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2112 list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.task_list,
2113 wq.task_list) {
2114 if (!atomic_read(&wrq->done) &&
2115 nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2116 wrq->err = err;
2117 atomic_set(&wrq->done, 1);
2119 if (atomic_read(&wrq->done)) {
2120 wrq->wq.func(&wrq->wq,
2121 TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2122 0, NULL);
2125 spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2129 * nilfs_construct_segment - construct a logical segment
2130 * @sb: super block
2132 * Return Value: On success, 0 is retured. On errors, one of the following
2133 * negative error code is returned.
2135 * %-EROFS - Read only filesystem.
2137 * %-EIO - I/O error
2139 * %-ENOSPC - No space left on device (only in a panic state).
2141 * %-ERESTARTSYS - Interrupted.
2143 * %-ENOMEM - Insufficient memory available.
2145 int nilfs_construct_segment(struct super_block *sb)
2147 struct the_nilfs *nilfs = sb->s_fs_info;
2148 struct nilfs_sc_info *sci = nilfs->ns_writer;
2149 struct nilfs_transaction_info *ti;
2150 int err;
2152 if (!sci)
2153 return -EROFS;
2155 /* A call inside transactions causes a deadlock. */
2156 BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2158 err = nilfs_segctor_sync(sci);
2159 return err;
2163 * nilfs_construct_dsync_segment - construct a data-only logical segment
2164 * @sb: super block
2165 * @inode: inode whose data blocks should be written out
2166 * @start: start byte offset
2167 * @end: end byte offset (inclusive)
2169 * Return Value: On success, 0 is retured. On errors, one of the following
2170 * negative error code is returned.
2172 * %-EROFS - Read only filesystem.
2174 * %-EIO - I/O error
2176 * %-ENOSPC - No space left on device (only in a panic state).
2178 * %-ERESTARTSYS - Interrupted.
2180 * %-ENOMEM - Insufficient memory available.
2182 int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2183 loff_t start, loff_t end)
2185 struct the_nilfs *nilfs = sb->s_fs_info;
2186 struct nilfs_sc_info *sci = nilfs->ns_writer;
2187 struct nilfs_inode_info *ii;
2188 struct nilfs_transaction_info ti;
2189 int err = 0;
2191 if (!sci)
2192 return -EROFS;
2194 nilfs_transaction_lock(sb, &ti, 0);
2196 ii = NILFS_I(inode);
2197 if (test_bit(NILFS_I_INODE_DIRTY, &ii->i_state) ||
2198 nilfs_test_opt(nilfs, STRICT_ORDER) ||
2199 test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2200 nilfs_discontinued(nilfs)) {
2201 nilfs_transaction_unlock(sb);
2202 err = nilfs_segctor_sync(sci);
2203 return err;
2206 spin_lock(&nilfs->ns_inode_lock);
2207 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2208 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2209 spin_unlock(&nilfs->ns_inode_lock);
2210 nilfs_transaction_unlock(sb);
2211 return 0;
2213 spin_unlock(&nilfs->ns_inode_lock);
2214 sci->sc_dsync_inode = ii;
2215 sci->sc_dsync_start = start;
2216 sci->sc_dsync_end = end;
2218 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2220 nilfs_transaction_unlock(sb);
2221 return err;
2224 #define FLUSH_FILE_BIT (0x1) /* data file only */
2225 #define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */
2228 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2229 * @sci: segment constructor object
2231 static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2233 spin_lock(&sci->sc_state_lock);
2234 sci->sc_seq_accepted = sci->sc_seq_request;
2235 spin_unlock(&sci->sc_state_lock);
2236 del_timer_sync(&sci->sc_timer);
2240 * nilfs_segctor_notify - notify the result of request to caller threads
2241 * @sci: segment constructor object
2242 * @mode: mode of log forming
2243 * @err: error code to be notified
2245 static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2247 /* Clear requests (even when the construction failed) */
2248 spin_lock(&sci->sc_state_lock);
2250 if (mode == SC_LSEG_SR) {
2251 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2252 sci->sc_seq_done = sci->sc_seq_accepted;
2253 nilfs_segctor_wakeup(sci, err);
2254 sci->sc_flush_request = 0;
2255 } else {
2256 if (mode == SC_FLUSH_FILE)
2257 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2258 else if (mode == SC_FLUSH_DAT)
2259 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2261 /* re-enable timer if checkpoint creation was not done */
2262 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2263 time_before(jiffies, sci->sc_timer.expires))
2264 add_timer(&sci->sc_timer);
2266 spin_unlock(&sci->sc_state_lock);
2270 * nilfs_segctor_construct - form logs and write them to disk
2271 * @sci: segment constructor object
2272 * @mode: mode of log forming
2274 static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2276 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2277 struct nilfs_super_block **sbp;
2278 int err = 0;
2280 nilfs_segctor_accept(sci);
2282 if (nilfs_discontinued(nilfs))
2283 mode = SC_LSEG_SR;
2284 if (!nilfs_segctor_confirm(sci))
2285 err = nilfs_segctor_do_construct(sci, mode);
2287 if (likely(!err)) {
2288 if (mode != SC_FLUSH_DAT)
2289 atomic_set(&nilfs->ns_ndirtyblks, 0);
2290 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2291 nilfs_discontinued(nilfs)) {
2292 down_write(&nilfs->ns_sem);
2293 err = -EIO;
2294 sbp = nilfs_prepare_super(sci->sc_super,
2295 nilfs_sb_will_flip(nilfs));
2296 if (likely(sbp)) {
2297 nilfs_set_log_cursor(sbp[0], nilfs);
2298 err = nilfs_commit_super(sci->sc_super,
2299 NILFS_SB_COMMIT);
2301 up_write(&nilfs->ns_sem);
2305 nilfs_segctor_notify(sci, mode, err);
2306 return err;
2309 static void nilfs_construction_timeout(unsigned long data)
2311 struct task_struct *p = (struct task_struct *)data;
2312 wake_up_process(p);
2315 static void
2316 nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2318 struct nilfs_inode_info *ii, *n;
2320 list_for_each_entry_safe(ii, n, head, i_dirty) {
2321 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2322 continue;
2323 list_del_init(&ii->i_dirty);
2324 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2325 nilfs_btnode_cache_clear(&ii->i_btnode_cache);
2326 iput(&ii->vfs_inode);
2330 int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2331 void **kbufs)
2333 struct the_nilfs *nilfs = sb->s_fs_info;
2334 struct nilfs_sc_info *sci = nilfs->ns_writer;
2335 struct nilfs_transaction_info ti;
2336 int err;
2338 if (unlikely(!sci))
2339 return -EROFS;
2341 nilfs_transaction_lock(sb, &ti, 1);
2343 err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
2344 if (unlikely(err))
2345 goto out_unlock;
2347 err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
2348 if (unlikely(err)) {
2349 nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
2350 goto out_unlock;
2353 sci->sc_freesegs = kbufs[4];
2354 sci->sc_nfreesegs = argv[4].v_nmembs;
2355 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2357 for (;;) {
2358 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2359 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2361 if (likely(!err))
2362 break;
2364 nilfs_warning(sb, __func__,
2365 "segment construction failed. (err=%d)", err);
2366 set_current_state(TASK_INTERRUPTIBLE);
2367 schedule_timeout(sci->sc_interval);
2369 if (nilfs_test_opt(nilfs, DISCARD)) {
2370 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2371 sci->sc_nfreesegs);
2372 if (ret) {
2373 printk(KERN_WARNING
2374 "NILFS warning: error %d on discard request, "
2375 "turning discards off for the device\n", ret);
2376 nilfs_clear_opt(nilfs, DISCARD);
2380 out_unlock:
2381 sci->sc_freesegs = NULL;
2382 sci->sc_nfreesegs = 0;
2383 nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
2384 nilfs_transaction_unlock(sb);
2385 return err;
2388 static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2390 struct nilfs_transaction_info ti;
2392 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2393 nilfs_segctor_construct(sci, mode);
2396 * Unclosed segment should be retried. We do this using sc_timer.
2397 * Timeout of sc_timer will invoke complete construction which leads
2398 * to close the current logical segment.
2400 if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2401 nilfs_segctor_start_timer(sci);
2403 nilfs_transaction_unlock(sci->sc_super);
2406 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2408 int mode = 0;
2409 int err;
2411 spin_lock(&sci->sc_state_lock);
2412 mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2413 SC_FLUSH_DAT : SC_FLUSH_FILE;
2414 spin_unlock(&sci->sc_state_lock);
2416 if (mode) {
2417 err = nilfs_segctor_do_construct(sci, mode);
2419 spin_lock(&sci->sc_state_lock);
2420 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2421 ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2422 spin_unlock(&sci->sc_state_lock);
2424 clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2427 static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2429 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2430 time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2431 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2432 return SC_FLUSH_FILE;
2433 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2434 return SC_FLUSH_DAT;
2436 return SC_LSEG_SR;
2440 * nilfs_segctor_thread - main loop of the segment constructor thread.
2441 * @arg: pointer to a struct nilfs_sc_info.
2443 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2444 * to execute segment constructions.
2446 static int nilfs_segctor_thread(void *arg)
2448 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2449 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2450 int timeout = 0;
2452 sci->sc_timer.data = (unsigned long)current;
2453 sci->sc_timer.function = nilfs_construction_timeout;
2455 /* start sync. */
2456 sci->sc_task = current;
2457 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2458 printk(KERN_INFO
2459 "segctord starting. Construction interval = %lu seconds, "
2460 "CP frequency < %lu seconds\n",
2461 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2463 spin_lock(&sci->sc_state_lock);
2464 loop:
2465 for (;;) {
2466 int mode;
2468 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2469 goto end_thread;
2471 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2472 mode = SC_LSEG_SR;
2473 else if (!sci->sc_flush_request)
2474 break;
2475 else
2476 mode = nilfs_segctor_flush_mode(sci);
2478 spin_unlock(&sci->sc_state_lock);
2479 nilfs_segctor_thread_construct(sci, mode);
2480 spin_lock(&sci->sc_state_lock);
2481 timeout = 0;
2485 if (freezing(current)) {
2486 spin_unlock(&sci->sc_state_lock);
2487 try_to_freeze();
2488 spin_lock(&sci->sc_state_lock);
2489 } else {
2490 DEFINE_WAIT(wait);
2491 int should_sleep = 1;
2493 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2494 TASK_INTERRUPTIBLE);
2496 if (sci->sc_seq_request != sci->sc_seq_done)
2497 should_sleep = 0;
2498 else if (sci->sc_flush_request)
2499 should_sleep = 0;
2500 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2501 should_sleep = time_before(jiffies,
2502 sci->sc_timer.expires);
2504 if (should_sleep) {
2505 spin_unlock(&sci->sc_state_lock);
2506 schedule();
2507 spin_lock(&sci->sc_state_lock);
2509 finish_wait(&sci->sc_wait_daemon, &wait);
2510 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2511 time_after_eq(jiffies, sci->sc_timer.expires));
2513 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2514 set_nilfs_discontinued(nilfs);
2516 goto loop;
2518 end_thread:
2519 spin_unlock(&sci->sc_state_lock);
2521 /* end sync. */
2522 sci->sc_task = NULL;
2523 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2524 return 0;
2527 static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2529 struct task_struct *t;
2531 t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2532 if (IS_ERR(t)) {
2533 int err = PTR_ERR(t);
2535 printk(KERN_ERR "NILFS: error %d creating segctord thread\n",
2536 err);
2537 return err;
2539 wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2540 return 0;
2543 static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2544 __acquires(&sci->sc_state_lock)
2545 __releases(&sci->sc_state_lock)
2547 sci->sc_state |= NILFS_SEGCTOR_QUIT;
2549 while (sci->sc_task) {
2550 wake_up(&sci->sc_wait_daemon);
2551 spin_unlock(&sci->sc_state_lock);
2552 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2553 spin_lock(&sci->sc_state_lock);
2558 * Setup & clean-up functions
2560 static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2561 struct nilfs_root *root)
2563 struct the_nilfs *nilfs = sb->s_fs_info;
2564 struct nilfs_sc_info *sci;
2566 sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2567 if (!sci)
2568 return NULL;
2570 sci->sc_super = sb;
2572 nilfs_get_root(root);
2573 sci->sc_root = root;
2575 init_waitqueue_head(&sci->sc_wait_request);
2576 init_waitqueue_head(&sci->sc_wait_daemon);
2577 init_waitqueue_head(&sci->sc_wait_task);
2578 spin_lock_init(&sci->sc_state_lock);
2579 INIT_LIST_HEAD(&sci->sc_dirty_files);
2580 INIT_LIST_HEAD(&sci->sc_segbufs);
2581 INIT_LIST_HEAD(&sci->sc_write_logs);
2582 INIT_LIST_HEAD(&sci->sc_gc_inodes);
2583 init_timer(&sci->sc_timer);
2585 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2586 sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2587 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2589 if (nilfs->ns_interval)
2590 sci->sc_interval = HZ * nilfs->ns_interval;
2591 if (nilfs->ns_watermark)
2592 sci->sc_watermark = nilfs->ns_watermark;
2593 return sci;
2596 static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2598 int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2600 /* The segctord thread was stopped and its timer was removed.
2601 But some tasks remain. */
2602 do {
2603 struct nilfs_transaction_info ti;
2605 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2606 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2607 nilfs_transaction_unlock(sci->sc_super);
2609 } while (ret && retrycount-- > 0);
2613 * nilfs_segctor_destroy - destroy the segment constructor.
2614 * @sci: nilfs_sc_info
2616 * nilfs_segctor_destroy() kills the segctord thread and frees
2617 * the nilfs_sc_info struct.
2618 * Caller must hold the segment semaphore.
2620 static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2622 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2623 int flag;
2625 up_write(&nilfs->ns_segctor_sem);
2627 spin_lock(&sci->sc_state_lock);
2628 nilfs_segctor_kill_thread(sci);
2629 flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2630 || sci->sc_seq_request != sci->sc_seq_done);
2631 spin_unlock(&sci->sc_state_lock);
2633 if (flag || !nilfs_segctor_confirm(sci))
2634 nilfs_segctor_write_out(sci);
2636 if (!list_empty(&sci->sc_dirty_files)) {
2637 nilfs_warning(sci->sc_super, __func__,
2638 "dirty file(s) after the final construction\n");
2639 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
2642 WARN_ON(!list_empty(&sci->sc_segbufs));
2643 WARN_ON(!list_empty(&sci->sc_write_logs));
2645 nilfs_put_root(sci->sc_root);
2647 down_write(&nilfs->ns_segctor_sem);
2649 del_timer_sync(&sci->sc_timer);
2650 kfree(sci);
2654 * nilfs_attach_log_writer - attach log writer
2655 * @sb: super block instance
2656 * @root: root object of the current filesystem tree
2658 * This allocates a log writer object, initializes it, and starts the
2659 * log writer.
2661 * Return Value: On success, 0 is returned. On error, one of the following
2662 * negative error code is returned.
2664 * %-ENOMEM - Insufficient memory available.
2666 int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
2668 struct the_nilfs *nilfs = sb->s_fs_info;
2669 int err;
2671 if (nilfs->ns_writer) {
2673 * This happens if the filesystem was remounted
2674 * read/write after nilfs_error degenerated it into a
2675 * read-only mount.
2677 nilfs_detach_log_writer(sb);
2680 nilfs->ns_writer = nilfs_segctor_new(sb, root);
2681 if (!nilfs->ns_writer)
2682 return -ENOMEM;
2684 err = nilfs_segctor_start_thread(nilfs->ns_writer);
2685 if (err) {
2686 kfree(nilfs->ns_writer);
2687 nilfs->ns_writer = NULL;
2689 return err;
2693 * nilfs_detach_log_writer - destroy log writer
2694 * @sb: super block instance
2696 * This kills log writer daemon, frees the log writer object, and
2697 * destroys list of dirty files.
2699 void nilfs_detach_log_writer(struct super_block *sb)
2701 struct the_nilfs *nilfs = sb->s_fs_info;
2702 LIST_HEAD(garbage_list);
2704 down_write(&nilfs->ns_segctor_sem);
2705 if (nilfs->ns_writer) {
2706 nilfs_segctor_destroy(nilfs->ns_writer);
2707 nilfs->ns_writer = NULL;
2710 /* Force to free the list of dirty files */
2711 spin_lock(&nilfs->ns_inode_lock);
2712 if (!list_empty(&nilfs->ns_dirty_files)) {
2713 list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
2714 nilfs_warning(sb, __func__,
2715 "Hit dirty file after stopped log writer\n");
2717 spin_unlock(&nilfs->ns_inode_lock);
2718 up_write(&nilfs->ns_segctor_sem);
2720 nilfs_dispose_list(nilfs, &garbage_list, 1);