mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / fs / nilfs2 / the_nilfs.c
blob2dd75bf619ad0e1167a62e90ef59258d2ee88e26
1 /*
2 * the_nilfs.c - the_nilfs shared structure.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * Written by Ryusuke Konishi.
20 #include <linux/buffer_head.h>
21 #include <linux/slab.h>
22 #include <linux/blkdev.h>
23 #include <linux/backing-dev.h>
24 #include <linux/random.h>
25 #include <linux/crc32.h>
26 #include "nilfs.h"
27 #include "segment.h"
28 #include "alloc.h"
29 #include "cpfile.h"
30 #include "sufile.h"
31 #include "dat.h"
32 #include "segbuf.h"
35 static int nilfs_valid_sb(struct nilfs_super_block *sbp);
37 void nilfs_set_last_segment(struct the_nilfs *nilfs,
38 sector_t start_blocknr, u64 seq, __u64 cno)
40 spin_lock(&nilfs->ns_last_segment_lock);
41 nilfs->ns_last_pseg = start_blocknr;
42 nilfs->ns_last_seq = seq;
43 nilfs->ns_last_cno = cno;
45 if (!nilfs_sb_dirty(nilfs)) {
46 if (nilfs->ns_prev_seq == nilfs->ns_last_seq)
47 goto stay_cursor;
49 set_nilfs_sb_dirty(nilfs);
51 nilfs->ns_prev_seq = nilfs->ns_last_seq;
53 stay_cursor:
54 spin_unlock(&nilfs->ns_last_segment_lock);
57 /**
58 * alloc_nilfs - allocate a nilfs object
59 * @sb: super block instance
61 * Return Value: On success, pointer to the_nilfs is returned.
62 * On error, NULL is returned.
64 struct the_nilfs *alloc_nilfs(struct super_block *sb)
66 struct the_nilfs *nilfs;
68 nilfs = kzalloc(sizeof(*nilfs), GFP_KERNEL);
69 if (!nilfs)
70 return NULL;
72 nilfs->ns_sb = sb;
73 nilfs->ns_bdev = sb->s_bdev;
74 atomic_set(&nilfs->ns_ndirtyblks, 0);
75 init_rwsem(&nilfs->ns_sem);
76 mutex_init(&nilfs->ns_snapshot_mount_mutex);
77 INIT_LIST_HEAD(&nilfs->ns_dirty_files);
78 INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
79 spin_lock_init(&nilfs->ns_inode_lock);
80 spin_lock_init(&nilfs->ns_next_gen_lock);
81 spin_lock_init(&nilfs->ns_last_segment_lock);
82 nilfs->ns_cptree = RB_ROOT;
83 spin_lock_init(&nilfs->ns_cptree_lock);
84 init_rwsem(&nilfs->ns_segctor_sem);
85 nilfs->ns_sb_update_freq = NILFS_SB_FREQ;
87 return nilfs;
90 /**
91 * destroy_nilfs - destroy nilfs object
92 * @nilfs: nilfs object to be released
94 void destroy_nilfs(struct the_nilfs *nilfs)
96 might_sleep();
97 if (nilfs_init(nilfs)) {
98 nilfs_sysfs_delete_device_group(nilfs);
99 brelse(nilfs->ns_sbh[0]);
100 brelse(nilfs->ns_sbh[1]);
102 kfree(nilfs);
105 static int nilfs_load_super_root(struct the_nilfs *nilfs,
106 struct super_block *sb, sector_t sr_block)
108 struct buffer_head *bh_sr;
109 struct nilfs_super_root *raw_sr;
110 struct nilfs_super_block **sbp = nilfs->ns_sbp;
111 struct nilfs_inode *rawi;
112 unsigned int dat_entry_size, segment_usage_size, checkpoint_size;
113 unsigned int inode_size;
114 int err;
116 err = nilfs_read_super_root_block(nilfs, sr_block, &bh_sr, 1);
117 if (unlikely(err))
118 return err;
120 down_read(&nilfs->ns_sem);
121 dat_entry_size = le16_to_cpu(sbp[0]->s_dat_entry_size);
122 checkpoint_size = le16_to_cpu(sbp[0]->s_checkpoint_size);
123 segment_usage_size = le16_to_cpu(sbp[0]->s_segment_usage_size);
124 up_read(&nilfs->ns_sem);
126 inode_size = nilfs->ns_inode_size;
128 rawi = (void *)bh_sr->b_data + NILFS_SR_DAT_OFFSET(inode_size);
129 err = nilfs_dat_read(sb, dat_entry_size, rawi, &nilfs->ns_dat);
130 if (err)
131 goto failed;
133 rawi = (void *)bh_sr->b_data + NILFS_SR_CPFILE_OFFSET(inode_size);
134 err = nilfs_cpfile_read(sb, checkpoint_size, rawi, &nilfs->ns_cpfile);
135 if (err)
136 goto failed_dat;
138 rawi = (void *)bh_sr->b_data + NILFS_SR_SUFILE_OFFSET(inode_size);
139 err = nilfs_sufile_read(sb, segment_usage_size, rawi,
140 &nilfs->ns_sufile);
141 if (err)
142 goto failed_cpfile;
144 raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
145 nilfs->ns_nongc_ctime = le64_to_cpu(raw_sr->sr_nongc_ctime);
147 failed:
148 brelse(bh_sr);
149 return err;
151 failed_cpfile:
152 iput(nilfs->ns_cpfile);
154 failed_dat:
155 iput(nilfs->ns_dat);
156 goto failed;
159 static void nilfs_init_recovery_info(struct nilfs_recovery_info *ri)
161 memset(ri, 0, sizeof(*ri));
162 INIT_LIST_HEAD(&ri->ri_used_segments);
165 static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri)
167 nilfs_dispose_segment_list(&ri->ri_used_segments);
171 * nilfs_store_log_cursor - load log cursor from a super block
172 * @nilfs: nilfs object
173 * @sbp: buffer storing super block to be read
175 * nilfs_store_log_cursor() reads the last position of the log
176 * containing a super root from a given super block, and initializes
177 * relevant information on the nilfs object preparatory for log
178 * scanning and recovery.
180 static int nilfs_store_log_cursor(struct the_nilfs *nilfs,
181 struct nilfs_super_block *sbp)
183 int ret = 0;
185 nilfs->ns_last_pseg = le64_to_cpu(sbp->s_last_pseg);
186 nilfs->ns_last_cno = le64_to_cpu(sbp->s_last_cno);
187 nilfs->ns_last_seq = le64_to_cpu(sbp->s_last_seq);
189 nilfs->ns_prev_seq = nilfs->ns_last_seq;
190 nilfs->ns_seg_seq = nilfs->ns_last_seq;
191 nilfs->ns_segnum =
192 nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg);
193 nilfs->ns_cno = nilfs->ns_last_cno + 1;
194 if (nilfs->ns_segnum >= nilfs->ns_nsegments) {
195 nilfs_msg(nilfs->ns_sb, KERN_ERR,
196 "pointed segment number is out of range: segnum=%llu, nsegments=%lu",
197 (unsigned long long)nilfs->ns_segnum,
198 nilfs->ns_nsegments);
199 ret = -EINVAL;
201 return ret;
205 * load_nilfs - load and recover the nilfs
206 * @nilfs: the_nilfs structure to be released
207 * @sb: super block isntance used to recover past segment
209 * load_nilfs() searches and load the latest super root,
210 * attaches the last segment, and does recovery if needed.
211 * The caller must call this exclusively for simultaneous mounts.
213 int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
215 struct nilfs_recovery_info ri;
216 unsigned int s_flags = sb->s_flags;
217 int really_read_only = bdev_read_only(nilfs->ns_bdev);
218 int valid_fs = nilfs_valid_fs(nilfs);
219 int err;
221 if (!valid_fs) {
222 nilfs_msg(sb, KERN_WARNING, "mounting unchecked fs");
223 if (s_flags & MS_RDONLY) {
224 nilfs_msg(sb, KERN_INFO,
225 "recovery required for readonly filesystem");
226 nilfs_msg(sb, KERN_INFO,
227 "write access will be enabled during recovery");
231 nilfs_init_recovery_info(&ri);
233 err = nilfs_search_super_root(nilfs, &ri);
234 if (unlikely(err)) {
235 struct nilfs_super_block **sbp = nilfs->ns_sbp;
236 int blocksize;
238 if (err != -EINVAL)
239 goto scan_error;
241 if (!nilfs_valid_sb(sbp[1])) {
242 nilfs_msg(sb, KERN_WARNING,
243 "unable to fall back to spare super block");
244 goto scan_error;
246 nilfs_msg(sb, KERN_INFO,
247 "trying rollback from an earlier position");
250 * restore super block with its spare and reconfigure
251 * relevant states of the nilfs object.
253 memcpy(sbp[0], sbp[1], nilfs->ns_sbsize);
254 nilfs->ns_crc_seed = le32_to_cpu(sbp[0]->s_crc_seed);
255 nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime);
257 /* verify consistency between two super blocks */
258 blocksize = BLOCK_SIZE << le32_to_cpu(sbp[0]->s_log_block_size);
259 if (blocksize != nilfs->ns_blocksize) {
260 nilfs_msg(sb, KERN_WARNING,
261 "blocksize differs between two super blocks (%d != %d)",
262 blocksize, nilfs->ns_blocksize);
263 goto scan_error;
266 err = nilfs_store_log_cursor(nilfs, sbp[0]);
267 if (err)
268 goto scan_error;
270 /* drop clean flag to allow roll-forward and recovery */
271 nilfs->ns_mount_state &= ~NILFS_VALID_FS;
272 valid_fs = 0;
274 err = nilfs_search_super_root(nilfs, &ri);
275 if (err)
276 goto scan_error;
279 err = nilfs_load_super_root(nilfs, sb, ri.ri_super_root);
280 if (unlikely(err)) {
281 nilfs_msg(sb, KERN_ERR, "error %d while loading super root",
282 err);
283 goto failed;
286 if (valid_fs)
287 goto skip_recovery;
289 if (s_flags & MS_RDONLY) {
290 __u64 features;
292 if (nilfs_test_opt(nilfs, NORECOVERY)) {
293 nilfs_msg(sb, KERN_INFO,
294 "norecovery option specified, skipping roll-forward recovery");
295 goto skip_recovery;
297 features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) &
298 ~NILFS_FEATURE_COMPAT_RO_SUPP;
299 if (features) {
300 nilfs_msg(sb, KERN_ERR,
301 "couldn't proceed with recovery because of unsupported optional features (%llx)",
302 (unsigned long long)features);
303 err = -EROFS;
304 goto failed_unload;
306 if (really_read_only) {
307 nilfs_msg(sb, KERN_ERR,
308 "write access unavailable, cannot proceed");
309 err = -EROFS;
310 goto failed_unload;
312 sb->s_flags &= ~MS_RDONLY;
313 } else if (nilfs_test_opt(nilfs, NORECOVERY)) {
314 nilfs_msg(sb, KERN_ERR,
315 "recovery cancelled because norecovery option was specified for a read/write mount");
316 err = -EINVAL;
317 goto failed_unload;
320 err = nilfs_salvage_orphan_logs(nilfs, sb, &ri);
321 if (err)
322 goto failed_unload;
324 down_write(&nilfs->ns_sem);
325 nilfs->ns_mount_state |= NILFS_VALID_FS; /* set "clean" flag */
326 err = nilfs_cleanup_super(sb);
327 up_write(&nilfs->ns_sem);
329 if (err) {
330 nilfs_msg(sb, KERN_ERR,
331 "error %d updating super block. recovery unfinished.",
332 err);
333 goto failed_unload;
335 nilfs_msg(sb, KERN_INFO, "recovery complete");
337 skip_recovery:
338 nilfs_clear_recovery_info(&ri);
339 sb->s_flags = s_flags;
340 return 0;
342 scan_error:
343 nilfs_msg(sb, KERN_ERR, "error %d while searching super root", err);
344 goto failed;
346 failed_unload:
347 iput(nilfs->ns_cpfile);
348 iput(nilfs->ns_sufile);
349 iput(nilfs->ns_dat);
351 failed:
352 nilfs_clear_recovery_info(&ri);
353 sb->s_flags = s_flags;
354 return err;
357 static unsigned long long nilfs_max_size(unsigned int blkbits)
359 unsigned int max_bits;
360 unsigned long long res = MAX_LFS_FILESIZE; /* page cache limit */
362 max_bits = blkbits + NILFS_BMAP_KEY_BIT; /* bmap size limit */
363 if (max_bits < 64)
364 res = min_t(unsigned long long, res, (1ULL << max_bits) - 1);
365 return res;
369 * nilfs_nrsvsegs - calculate the number of reserved segments
370 * @nilfs: nilfs object
371 * @nsegs: total number of segments
373 unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs)
375 return max_t(unsigned long, NILFS_MIN_NRSVSEGS,
376 DIV_ROUND_UP(nsegs * nilfs->ns_r_segments_percentage,
377 100));
380 void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs)
382 nilfs->ns_nsegments = nsegs;
383 nilfs->ns_nrsvsegs = nilfs_nrsvsegs(nilfs, nsegs);
386 static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
387 struct nilfs_super_block *sbp)
389 if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) {
390 nilfs_msg(nilfs->ns_sb, KERN_ERR,
391 "unsupported revision (superblock rev.=%d.%d, current rev.=%d.%d). Please check the version of mkfs.nilfs(2).",
392 le32_to_cpu(sbp->s_rev_level),
393 le16_to_cpu(sbp->s_minor_rev_level),
394 NILFS_CURRENT_REV, NILFS_MINOR_REV);
395 return -EINVAL;
397 nilfs->ns_sbsize = le16_to_cpu(sbp->s_bytes);
398 if (nilfs->ns_sbsize > BLOCK_SIZE)
399 return -EINVAL;
401 nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size);
402 if (nilfs->ns_inode_size > nilfs->ns_blocksize) {
403 nilfs_msg(nilfs->ns_sb, KERN_ERR,
404 "too large inode size: %d bytes",
405 nilfs->ns_inode_size);
406 return -EINVAL;
407 } else if (nilfs->ns_inode_size < NILFS_MIN_INODE_SIZE) {
408 nilfs_msg(nilfs->ns_sb, KERN_ERR,
409 "too small inode size: %d bytes",
410 nilfs->ns_inode_size);
411 return -EINVAL;
414 nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino);
416 nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
417 if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) {
418 nilfs_msg(nilfs->ns_sb, KERN_ERR,
419 "too short segment: %lu blocks",
420 nilfs->ns_blocks_per_segment);
421 return -EINVAL;
424 nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block);
425 nilfs->ns_r_segments_percentage =
426 le32_to_cpu(sbp->s_r_segments_percentage);
427 if (nilfs->ns_r_segments_percentage < 1 ||
428 nilfs->ns_r_segments_percentage > 99) {
429 nilfs_msg(nilfs->ns_sb, KERN_ERR,
430 "invalid reserved segments percentage: %lu",
431 nilfs->ns_r_segments_percentage);
432 return -EINVAL;
435 nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments));
436 nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed);
437 return 0;
440 static int nilfs_valid_sb(struct nilfs_super_block *sbp)
442 static unsigned char sum[4];
443 const int sumoff = offsetof(struct nilfs_super_block, s_sum);
444 size_t bytes;
445 u32 crc;
447 if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
448 return 0;
449 bytes = le16_to_cpu(sbp->s_bytes);
450 if (bytes < sumoff + 4 || bytes > BLOCK_SIZE)
451 return 0;
452 crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
453 sumoff);
454 crc = crc32_le(crc, sum, 4);
455 crc = crc32_le(crc, (unsigned char *)sbp + sumoff + 4,
456 bytes - sumoff - 4);
457 return crc == le32_to_cpu(sbp->s_sum);
460 static int nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset)
462 return offset < ((le64_to_cpu(sbp->s_nsegments) *
463 le32_to_cpu(sbp->s_blocks_per_segment)) <<
464 (le32_to_cpu(sbp->s_log_block_size) + 10));
467 static void nilfs_release_super_block(struct the_nilfs *nilfs)
469 int i;
471 for (i = 0; i < 2; i++) {
472 if (nilfs->ns_sbp[i]) {
473 brelse(nilfs->ns_sbh[i]);
474 nilfs->ns_sbh[i] = NULL;
475 nilfs->ns_sbp[i] = NULL;
480 void nilfs_fall_back_super_block(struct the_nilfs *nilfs)
482 brelse(nilfs->ns_sbh[0]);
483 nilfs->ns_sbh[0] = nilfs->ns_sbh[1];
484 nilfs->ns_sbp[0] = nilfs->ns_sbp[1];
485 nilfs->ns_sbh[1] = NULL;
486 nilfs->ns_sbp[1] = NULL;
489 void nilfs_swap_super_block(struct the_nilfs *nilfs)
491 struct buffer_head *tsbh = nilfs->ns_sbh[0];
492 struct nilfs_super_block *tsbp = nilfs->ns_sbp[0];
494 nilfs->ns_sbh[0] = nilfs->ns_sbh[1];
495 nilfs->ns_sbp[0] = nilfs->ns_sbp[1];
496 nilfs->ns_sbh[1] = tsbh;
497 nilfs->ns_sbp[1] = tsbp;
500 static int nilfs_load_super_block(struct the_nilfs *nilfs,
501 struct super_block *sb, int blocksize,
502 struct nilfs_super_block **sbpp)
504 struct nilfs_super_block **sbp = nilfs->ns_sbp;
505 struct buffer_head **sbh = nilfs->ns_sbh;
506 u64 sb2off = NILFS_SB2_OFFSET_BYTES(nilfs->ns_bdev->bd_inode->i_size);
507 int valid[2], swp = 0;
509 sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize,
510 &sbh[0]);
511 sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]);
513 if (!sbp[0]) {
514 if (!sbp[1]) {
515 nilfs_msg(sb, KERN_ERR, "unable to read superblock");
516 return -EIO;
518 nilfs_msg(sb, KERN_WARNING,
519 "unable to read primary superblock (blocksize = %d)",
520 blocksize);
521 } else if (!sbp[1]) {
522 nilfs_msg(sb, KERN_WARNING,
523 "unable to read secondary superblock (blocksize = %d)",
524 blocksize);
528 * Compare two super blocks and set 1 in swp if the secondary
529 * super block is valid and newer. Otherwise, set 0 in swp.
531 valid[0] = nilfs_valid_sb(sbp[0]);
532 valid[1] = nilfs_valid_sb(sbp[1]);
533 swp = valid[1] && (!valid[0] ||
534 le64_to_cpu(sbp[1]->s_last_cno) >
535 le64_to_cpu(sbp[0]->s_last_cno));
537 if (valid[swp] && nilfs_sb2_bad_offset(sbp[swp], sb2off)) {
538 brelse(sbh[1]);
539 sbh[1] = NULL;
540 sbp[1] = NULL;
541 valid[1] = 0;
542 swp = 0;
544 if (!valid[swp]) {
545 nilfs_release_super_block(nilfs);
546 nilfs_msg(sb, KERN_ERR, "couldn't find nilfs on the device");
547 return -EINVAL;
550 if (!valid[!swp])
551 nilfs_msg(sb, KERN_WARNING,
552 "broken superblock, retrying with spare superblock (blocksize = %d)",
553 blocksize);
554 if (swp)
555 nilfs_swap_super_block(nilfs);
557 nilfs->ns_sbwcount = 0;
558 nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime);
559 nilfs->ns_prot_seq = le64_to_cpu(sbp[valid[1] & !swp]->s_last_seq);
560 *sbpp = sbp[0];
561 return 0;
565 * init_nilfs - initialize a NILFS instance.
566 * @nilfs: the_nilfs structure
567 * @sb: super block
568 * @data: mount options
570 * init_nilfs() performs common initialization per block device (e.g.
571 * reading the super block, getting disk layout information, initializing
572 * shared fields in the_nilfs).
574 * Return Value: On success, 0 is returned. On error, a negative error
575 * code is returned.
577 int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
579 struct nilfs_super_block *sbp;
580 int blocksize;
581 int err;
583 down_write(&nilfs->ns_sem);
585 blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE);
586 if (!blocksize) {
587 nilfs_msg(sb, KERN_ERR, "unable to set blocksize");
588 err = -EINVAL;
589 goto out;
591 err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp);
592 if (err)
593 goto out;
595 err = nilfs_store_magic_and_option(sb, sbp, data);
596 if (err)
597 goto failed_sbh;
599 err = nilfs_check_feature_compatibility(sb, sbp);
600 if (err)
601 goto failed_sbh;
603 blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size);
604 if (blocksize < NILFS_MIN_BLOCK_SIZE ||
605 blocksize > NILFS_MAX_BLOCK_SIZE) {
606 nilfs_msg(sb, KERN_ERR,
607 "couldn't mount because of unsupported filesystem blocksize %d",
608 blocksize);
609 err = -EINVAL;
610 goto failed_sbh;
612 if (sb->s_blocksize != blocksize) {
613 int hw_blocksize = bdev_logical_block_size(sb->s_bdev);
615 if (blocksize < hw_blocksize) {
616 nilfs_msg(sb, KERN_ERR,
617 "blocksize %d too small for device (sector-size = %d)",
618 blocksize, hw_blocksize);
619 err = -EINVAL;
620 goto failed_sbh;
622 nilfs_release_super_block(nilfs);
623 sb_set_blocksize(sb, blocksize);
625 err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp);
626 if (err)
627 goto out;
629 * Not to failed_sbh; sbh is released automatically
630 * when reloading fails.
633 nilfs->ns_blocksize_bits = sb->s_blocksize_bits;
634 nilfs->ns_blocksize = blocksize;
636 get_random_bytes(&nilfs->ns_next_generation,
637 sizeof(nilfs->ns_next_generation));
639 err = nilfs_store_disk_layout(nilfs, sbp);
640 if (err)
641 goto failed_sbh;
643 sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits);
645 nilfs->ns_mount_state = le16_to_cpu(sbp->s_state);
647 err = nilfs_store_log_cursor(nilfs, sbp);
648 if (err)
649 goto failed_sbh;
651 err = nilfs_sysfs_create_device_group(sb);
652 if (err)
653 goto failed_sbh;
655 set_nilfs_init(nilfs);
656 err = 0;
657 out:
658 up_write(&nilfs->ns_sem);
659 return err;
661 failed_sbh:
662 nilfs_release_super_block(nilfs);
663 goto out;
666 int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump,
667 size_t nsegs)
669 sector_t seg_start, seg_end;
670 sector_t start = 0, nblocks = 0;
671 unsigned int sects_per_block;
672 __u64 *sn;
673 int ret = 0;
675 sects_per_block = (1 << nilfs->ns_blocksize_bits) /
676 bdev_logical_block_size(nilfs->ns_bdev);
677 for (sn = segnump; sn < segnump + nsegs; sn++) {
678 nilfs_get_segment_range(nilfs, *sn, &seg_start, &seg_end);
680 if (!nblocks) {
681 start = seg_start;
682 nblocks = seg_end - seg_start + 1;
683 } else if (start + nblocks == seg_start) {
684 nblocks += seg_end - seg_start + 1;
685 } else {
686 ret = blkdev_issue_discard(nilfs->ns_bdev,
687 start * sects_per_block,
688 nblocks * sects_per_block,
689 GFP_NOFS, 0);
690 if (ret < 0)
691 return ret;
692 nblocks = 0;
695 if (nblocks)
696 ret = blkdev_issue_discard(nilfs->ns_bdev,
697 start * sects_per_block,
698 nblocks * sects_per_block,
699 GFP_NOFS, 0);
700 return ret;
703 int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks)
705 unsigned long ncleansegs;
707 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
708 ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
709 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
710 *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment;
711 return 0;
714 int nilfs_near_disk_full(struct the_nilfs *nilfs)
716 unsigned long ncleansegs, nincsegs;
718 ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
719 nincsegs = atomic_read(&nilfs->ns_ndirtyblks) /
720 nilfs->ns_blocks_per_segment + 1;
722 return ncleansegs <= nilfs->ns_nrsvsegs + nincsegs;
725 struct nilfs_root *nilfs_lookup_root(struct the_nilfs *nilfs, __u64 cno)
727 struct rb_node *n;
728 struct nilfs_root *root;
730 spin_lock(&nilfs->ns_cptree_lock);
731 n = nilfs->ns_cptree.rb_node;
732 while (n) {
733 root = rb_entry(n, struct nilfs_root, rb_node);
735 if (cno < root->cno) {
736 n = n->rb_left;
737 } else if (cno > root->cno) {
738 n = n->rb_right;
739 } else {
740 atomic_inc(&root->count);
741 spin_unlock(&nilfs->ns_cptree_lock);
742 return root;
745 spin_unlock(&nilfs->ns_cptree_lock);
747 return NULL;
750 struct nilfs_root *
751 nilfs_find_or_create_root(struct the_nilfs *nilfs, __u64 cno)
753 struct rb_node **p, *parent;
754 struct nilfs_root *root, *new;
755 int err;
757 root = nilfs_lookup_root(nilfs, cno);
758 if (root)
759 return root;
761 new = kzalloc(sizeof(*root), GFP_KERNEL);
762 if (!new)
763 return NULL;
765 spin_lock(&nilfs->ns_cptree_lock);
767 p = &nilfs->ns_cptree.rb_node;
768 parent = NULL;
770 while (*p) {
771 parent = *p;
772 root = rb_entry(parent, struct nilfs_root, rb_node);
774 if (cno < root->cno) {
775 p = &(*p)->rb_left;
776 } else if (cno > root->cno) {
777 p = &(*p)->rb_right;
778 } else {
779 atomic_inc(&root->count);
780 spin_unlock(&nilfs->ns_cptree_lock);
781 kfree(new);
782 return root;
786 new->cno = cno;
787 new->ifile = NULL;
788 new->nilfs = nilfs;
789 atomic_set(&new->count, 1);
790 atomic64_set(&new->inodes_count, 0);
791 atomic64_set(&new->blocks_count, 0);
793 rb_link_node(&new->rb_node, parent, p);
794 rb_insert_color(&new->rb_node, &nilfs->ns_cptree);
796 spin_unlock(&nilfs->ns_cptree_lock);
798 err = nilfs_sysfs_create_snapshot_group(new);
799 if (err) {
800 kfree(new);
801 new = NULL;
804 return new;
807 void nilfs_put_root(struct nilfs_root *root)
809 if (atomic_dec_and_test(&root->count)) {
810 struct the_nilfs *nilfs = root->nilfs;
812 nilfs_sysfs_delete_snapshot_group(root);
814 spin_lock(&nilfs->ns_cptree_lock);
815 rb_erase(&root->rb_node, &nilfs->ns_cptree);
816 spin_unlock(&nilfs->ns_cptree_lock);
817 iput(root->ifile);
819 kfree(root);