2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/completion.h>
16 #include <linux/buffer_head.h>
17 #include <linux/blkdev.h>
18 #include <linux/kthread.h>
19 #include <linux/export.h>
20 #include <linux/namei.h>
21 #include <linux/mount.h>
22 #include <linux/gfs2_ondisk.h>
23 #include <linux/quotaops.h>
24 #include <linux/lockdep.h>
25 #include <linux/module.h>
42 #include "trace_gfs2.h"
48 * gfs2_tune_init - Fill a gfs2_tune structure with default values
53 static void gfs2_tune_init(struct gfs2_tune
*gt
)
55 spin_lock_init(>
->gt_spin
);
57 gt
->gt_quota_warn_period
= 10;
58 gt
->gt_quota_scale_num
= 1;
59 gt
->gt_quota_scale_den
= 1;
60 gt
->gt_new_files_jdata
= 0;
61 gt
->gt_max_readahead
= 1 << 18;
62 gt
->gt_complain_secs
= 10;
65 static struct gfs2_sbd
*init_sbd(struct super_block
*sb
)
68 struct address_space
*mapping
;
70 sdp
= kzalloc(sizeof(struct gfs2_sbd
), GFP_KERNEL
);
76 sdp
->sd_lkstats
= alloc_percpu(struct gfs2_pcpu_lkstats
);
77 if (!sdp
->sd_lkstats
) {
82 set_bit(SDF_NOJOURNALID
, &sdp
->sd_flags
);
83 gfs2_tune_init(&sdp
->sd_tune
);
85 init_waitqueue_head(&sdp
->sd_glock_wait
);
86 atomic_set(&sdp
->sd_glock_disposal
, 0);
87 init_completion(&sdp
->sd_locking_init
);
88 init_completion(&sdp
->sd_wdack
);
89 spin_lock_init(&sdp
->sd_statfs_spin
);
91 spin_lock_init(&sdp
->sd_rindex_spin
);
92 sdp
->sd_rindex_tree
.rb_node
= NULL
;
94 INIT_LIST_HEAD(&sdp
->sd_jindex_list
);
95 spin_lock_init(&sdp
->sd_jindex_spin
);
96 mutex_init(&sdp
->sd_jindex_mutex
);
97 init_completion(&sdp
->sd_journal_ready
);
99 INIT_LIST_HEAD(&sdp
->sd_quota_list
);
100 mutex_init(&sdp
->sd_quota_mutex
);
101 mutex_init(&sdp
->sd_quota_sync_mutex
);
102 init_waitqueue_head(&sdp
->sd_quota_wait
);
103 INIT_LIST_HEAD(&sdp
->sd_trunc_list
);
104 spin_lock_init(&sdp
->sd_trunc_lock
);
105 spin_lock_init(&sdp
->sd_bitmap_lock
);
107 mapping
= &sdp
->sd_aspace
;
109 address_space_init_once(mapping
);
110 mapping
->a_ops
= &gfs2_rgrp_aops
;
111 mapping
->host
= sb
->s_bdev
->bd_inode
;
113 mapping_set_gfp_mask(mapping
, GFP_NOFS
);
114 mapping
->private_data
= NULL
;
115 mapping
->writeback_index
= 0;
117 spin_lock_init(&sdp
->sd_log_lock
);
118 atomic_set(&sdp
->sd_log_pinned
, 0);
119 INIT_LIST_HEAD(&sdp
->sd_log_le_revoke
);
120 INIT_LIST_HEAD(&sdp
->sd_log_le_ordered
);
121 spin_lock_init(&sdp
->sd_ordered_lock
);
123 init_waitqueue_head(&sdp
->sd_log_waitq
);
124 init_waitqueue_head(&sdp
->sd_logd_waitq
);
125 spin_lock_init(&sdp
->sd_ail_lock
);
126 INIT_LIST_HEAD(&sdp
->sd_ail1_list
);
127 INIT_LIST_HEAD(&sdp
->sd_ail2_list
);
129 init_rwsem(&sdp
->sd_log_flush_lock
);
130 atomic_set(&sdp
->sd_log_in_flight
, 0);
131 atomic_set(&sdp
->sd_reserving_log
, 0);
132 init_waitqueue_head(&sdp
->sd_reserving_log_wait
);
133 init_waitqueue_head(&sdp
->sd_log_flush_wait
);
134 atomic_set(&sdp
->sd_freeze_state
, SFS_UNFROZEN
);
135 mutex_init(&sdp
->sd_freeze_mutex
);
142 * gfs2_check_sb - Check superblock
143 * @sdp: the filesystem
144 * @sb: The superblock
145 * @silent: Don't print a message if the check fails
147 * Checks the version code of the FS is one that we understand how to
148 * read and that the sizes of the various on-disk structures have not
152 static int gfs2_check_sb(struct gfs2_sbd
*sdp
, int silent
)
154 struct gfs2_sb_host
*sb
= &sdp
->sd_sb
;
156 if (sb
->sb_magic
!= GFS2_MAGIC
||
157 sb
->sb_type
!= GFS2_METATYPE_SB
) {
159 pr_warn("not a GFS2 filesystem\n");
163 /* If format numbers match exactly, we're done. */
165 if (sb
->sb_fs_format
== GFS2_FORMAT_FS
&&
166 sb
->sb_multihost_format
== GFS2_FORMAT_MULTI
)
169 fs_warn(sdp
, "Unknown on-disk format, unable to mount\n");
174 static void end_bio_io_page(struct bio
*bio
)
176 struct page
*page
= bio
->bi_private
;
179 SetPageUptodate(page
);
181 pr_warn("error %d reading superblock\n", bio
->bi_error
);
185 static void gfs2_sb_in(struct gfs2_sbd
*sdp
, const void *buf
)
187 struct gfs2_sb_host
*sb
= &sdp
->sd_sb
;
188 struct super_block
*s
= sdp
->sd_vfs
;
189 const struct gfs2_sb
*str
= buf
;
191 sb
->sb_magic
= be32_to_cpu(str
->sb_header
.mh_magic
);
192 sb
->sb_type
= be32_to_cpu(str
->sb_header
.mh_type
);
193 sb
->sb_format
= be32_to_cpu(str
->sb_header
.mh_format
);
194 sb
->sb_fs_format
= be32_to_cpu(str
->sb_fs_format
);
195 sb
->sb_multihost_format
= be32_to_cpu(str
->sb_multihost_format
);
196 sb
->sb_bsize
= be32_to_cpu(str
->sb_bsize
);
197 sb
->sb_bsize_shift
= be32_to_cpu(str
->sb_bsize_shift
);
198 sb
->sb_master_dir
.no_addr
= be64_to_cpu(str
->sb_master_dir
.no_addr
);
199 sb
->sb_master_dir
.no_formal_ino
= be64_to_cpu(str
->sb_master_dir
.no_formal_ino
);
200 sb
->sb_root_dir
.no_addr
= be64_to_cpu(str
->sb_root_dir
.no_addr
);
201 sb
->sb_root_dir
.no_formal_ino
= be64_to_cpu(str
->sb_root_dir
.no_formal_ino
);
203 memcpy(sb
->sb_lockproto
, str
->sb_lockproto
, GFS2_LOCKNAME_LEN
);
204 memcpy(sb
->sb_locktable
, str
->sb_locktable
, GFS2_LOCKNAME_LEN
);
205 memcpy(s
->s_uuid
, str
->sb_uuid
, 16);
209 * gfs2_read_super - Read the gfs2 super block from disk
210 * @sdp: The GFS2 super block
211 * @sector: The location of the super block
212 * @error: The error code to return
214 * This uses the bio functions to read the super block from disk
215 * because we want to be 100% sure that we never read cached data.
216 * A super block is read twice only during each GFS2 mount and is
217 * never written to by the filesystem. The first time its read no
218 * locks are held, and the only details which are looked at are those
219 * relating to the locking protocol. Once locking is up and working,
220 * the sb is read again under the lock to establish the location of
221 * the master directory (contains pointers to journals etc) and the
224 * Returns: 0 on success or error
227 static int gfs2_read_super(struct gfs2_sbd
*sdp
, sector_t sector
, int silent
)
229 struct super_block
*sb
= sdp
->sd_vfs
;
234 page
= alloc_page(GFP_NOFS
);
238 ClearPageUptodate(page
);
239 ClearPageDirty(page
);
242 bio
= bio_alloc(GFP_NOFS
, 1);
243 bio
->bi_iter
.bi_sector
= sector
* (sb
->s_blocksize
>> 9);
244 bio
->bi_bdev
= sb
->s_bdev
;
245 bio_add_page(bio
, page
, PAGE_SIZE
, 0);
247 bio
->bi_end_io
= end_bio_io_page
;
248 bio
->bi_private
= page
;
249 submit_bio(READ_SYNC
| REQ_META
, bio
);
250 wait_on_page_locked(page
);
252 if (!PageUptodate(page
)) {
260 return gfs2_check_sb(sdp
, silent
);
264 * gfs2_read_sb - Read super block
265 * @sdp: The GFS2 superblock
266 * @silent: Don't print message if mount fails
270 static int gfs2_read_sb(struct gfs2_sbd
*sdp
, int silent
)
272 u32 hash_blocks
, ind_blocks
, leaf_blocks
;
277 error
= gfs2_read_super(sdp
, GFS2_SB_ADDR
>> sdp
->sd_fsb2bb_shift
, silent
);
280 fs_err(sdp
, "can't read superblock\n");
284 sdp
->sd_fsb2bb_shift
= sdp
->sd_sb
.sb_bsize_shift
-
285 GFS2_BASIC_BLOCK_SHIFT
;
286 sdp
->sd_fsb2bb
= 1 << sdp
->sd_fsb2bb_shift
;
287 sdp
->sd_diptrs
= (sdp
->sd_sb
.sb_bsize
-
288 sizeof(struct gfs2_dinode
)) / sizeof(u64
);
289 sdp
->sd_inptrs
= (sdp
->sd_sb
.sb_bsize
-
290 sizeof(struct gfs2_meta_header
)) / sizeof(u64
);
291 sdp
->sd_jbsize
= sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_meta_header
);
292 sdp
->sd_hash_bsize
= sdp
->sd_sb
.sb_bsize
/ 2;
293 sdp
->sd_hash_bsize_shift
= sdp
->sd_sb
.sb_bsize_shift
- 1;
294 sdp
->sd_hash_ptrs
= sdp
->sd_hash_bsize
/ sizeof(u64
);
295 sdp
->sd_qc_per_block
= (sdp
->sd_sb
.sb_bsize
-
296 sizeof(struct gfs2_meta_header
)) /
297 sizeof(struct gfs2_quota_change
);
298 sdp
->sd_blocks_per_bitmap
= (sdp
->sd_sb
.sb_bsize
-
299 sizeof(struct gfs2_meta_header
))
300 * GFS2_NBBY
; /* not the rgrp bitmap, subsequent bitmaps only */
302 /* Compute maximum reservation required to add a entry to a directory */
304 hash_blocks
= DIV_ROUND_UP(sizeof(u64
) * (1 << GFS2_DIR_MAX_DEPTH
),
308 for (tmp_blocks
= hash_blocks
; tmp_blocks
> sdp
->sd_diptrs
;) {
309 tmp_blocks
= DIV_ROUND_UP(tmp_blocks
, sdp
->sd_inptrs
);
310 ind_blocks
+= tmp_blocks
;
313 leaf_blocks
= 2 + GFS2_DIR_MAX_DEPTH
;
315 sdp
->sd_max_dirres
= hash_blocks
+ ind_blocks
+ leaf_blocks
;
317 sdp
->sd_heightsize
[0] = sdp
->sd_sb
.sb_bsize
-
318 sizeof(struct gfs2_dinode
);
319 sdp
->sd_heightsize
[1] = sdp
->sd_sb
.sb_bsize
* sdp
->sd_diptrs
;
324 space
= sdp
->sd_heightsize
[x
- 1] * sdp
->sd_inptrs
;
326 m
= do_div(d
, sdp
->sd_inptrs
);
328 if (d
!= sdp
->sd_heightsize
[x
- 1] || m
)
330 sdp
->sd_heightsize
[x
] = space
;
332 sdp
->sd_max_height
= x
;
333 sdp
->sd_heightsize
[x
] = ~0;
334 gfs2_assert(sdp
, sdp
->sd_max_height
<= GFS2_MAX_META_HEIGHT
);
336 sdp
->sd_jheightsize
[0] = sdp
->sd_sb
.sb_bsize
-
337 sizeof(struct gfs2_dinode
);
338 sdp
->sd_jheightsize
[1] = sdp
->sd_jbsize
* sdp
->sd_diptrs
;
343 space
= sdp
->sd_jheightsize
[x
- 1] * sdp
->sd_inptrs
;
345 m
= do_div(d
, sdp
->sd_inptrs
);
347 if (d
!= sdp
->sd_jheightsize
[x
- 1] || m
)
349 sdp
->sd_jheightsize
[x
] = space
;
351 sdp
->sd_max_jheight
= x
;
352 sdp
->sd_jheightsize
[x
] = ~0;
353 gfs2_assert(sdp
, sdp
->sd_max_jheight
<= GFS2_MAX_META_HEIGHT
);
355 sdp
->sd_max_dents_per_leaf
= (sdp
->sd_sb
.sb_bsize
-
356 sizeof(struct gfs2_leaf
)) /
357 GFS2_MIN_DIRENT_SIZE
;
361 static int init_names(struct gfs2_sbd
*sdp
, int silent
)
366 proto
= sdp
->sd_args
.ar_lockproto
;
367 table
= sdp
->sd_args
.ar_locktable
;
369 /* Try to autodetect */
371 if (!proto
[0] || !table
[0]) {
372 error
= gfs2_read_super(sdp
, GFS2_SB_ADDR
>> sdp
->sd_fsb2bb_shift
, silent
);
377 proto
= sdp
->sd_sb
.sb_lockproto
;
379 table
= sdp
->sd_sb
.sb_locktable
;
383 table
= sdp
->sd_vfs
->s_id
;
385 strlcpy(sdp
->sd_proto_name
, proto
, GFS2_FSNAME_LEN
);
386 strlcpy(sdp
->sd_table_name
, table
, GFS2_FSNAME_LEN
);
388 table
= sdp
->sd_table_name
;
389 while ((table
= strchr(table
, '/')))
395 static int init_locking(struct gfs2_sbd
*sdp
, struct gfs2_holder
*mount_gh
,
403 error
= gfs2_glock_nq_num(sdp
,
404 GFS2_MOUNT_LOCK
, &gfs2_nondisk_glops
,
405 LM_ST_EXCLUSIVE
, LM_FLAG_NOEXP
| GL_NOCACHE
,
408 fs_err(sdp
, "can't acquire mount glock: %d\n", error
);
412 error
= gfs2_glock_nq_num(sdp
,
413 GFS2_LIVE_LOCK
, &gfs2_nondisk_glops
,
415 LM_FLAG_NOEXP
| GL_EXACT
,
418 fs_err(sdp
, "can't acquire live glock: %d\n", error
);
422 error
= gfs2_glock_get(sdp
, GFS2_RENAME_LOCK
, &gfs2_nondisk_glops
,
423 CREATE
, &sdp
->sd_rename_gl
);
425 fs_err(sdp
, "can't create rename glock: %d\n", error
);
429 error
= gfs2_glock_get(sdp
, GFS2_FREEZE_LOCK
, &gfs2_freeze_glops
,
430 CREATE
, &sdp
->sd_freeze_gl
);
432 fs_err(sdp
, "can't create transaction glock: %d\n", error
);
439 gfs2_glock_put(sdp
->sd_freeze_gl
);
441 gfs2_glock_put(sdp
->sd_rename_gl
);
443 gfs2_glock_dq_uninit(&sdp
->sd_live_gh
);
445 gfs2_glock_dq_uninit(mount_gh
);
450 static int gfs2_lookup_root(struct super_block
*sb
, struct dentry
**dptr
,
451 u64 no_addr
, const char *name
)
453 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
454 struct dentry
*dentry
;
457 inode
= gfs2_inode_lookup(sb
, DT_DIR
, no_addr
, 0);
459 fs_err(sdp
, "can't read in %s inode: %ld\n", name
, PTR_ERR(inode
));
460 return PTR_ERR(inode
);
462 dentry
= d_make_root(inode
);
464 fs_err(sdp
, "can't alloc %s dentry\n", name
);
471 static int init_sb(struct gfs2_sbd
*sdp
, int silent
)
473 struct super_block
*sb
= sdp
->sd_vfs
;
474 struct gfs2_holder sb_gh
;
478 ret
= gfs2_glock_nq_num(sdp
, GFS2_SB_LOCK
, &gfs2_meta_glops
,
479 LM_ST_SHARED
, 0, &sb_gh
);
481 fs_err(sdp
, "can't acquire superblock glock: %d\n", ret
);
485 ret
= gfs2_read_sb(sdp
, silent
);
487 fs_err(sdp
, "can't read superblock: %d\n", ret
);
491 /* Set up the buffer cache and SB for real */
492 if (sdp
->sd_sb
.sb_bsize
< bdev_logical_block_size(sb
->s_bdev
)) {
494 fs_err(sdp
, "FS block size (%u) is too small for device "
496 sdp
->sd_sb
.sb_bsize
, bdev_logical_block_size(sb
->s_bdev
));
499 if (sdp
->sd_sb
.sb_bsize
> PAGE_SIZE
) {
501 fs_err(sdp
, "FS block size (%u) is too big for machine "
503 sdp
->sd_sb
.sb_bsize
, (unsigned int)PAGE_SIZE
);
506 sb_set_blocksize(sb
, sdp
->sd_sb
.sb_bsize
);
508 /* Get the root inode */
509 no_addr
= sdp
->sd_sb
.sb_root_dir
.no_addr
;
510 ret
= gfs2_lookup_root(sb
, &sdp
->sd_root_dir
, no_addr
, "root");
514 /* Get the master inode */
515 no_addr
= sdp
->sd_sb
.sb_master_dir
.no_addr
;
516 ret
= gfs2_lookup_root(sb
, &sdp
->sd_master_dir
, no_addr
, "master");
518 dput(sdp
->sd_root_dir
);
521 sb
->s_root
= dget(sdp
->sd_args
.ar_meta
? sdp
->sd_master_dir
: sdp
->sd_root_dir
);
523 gfs2_glock_dq_uninit(&sb_gh
);
527 static void gfs2_others_may_mount(struct gfs2_sbd
*sdp
)
529 char *message
= "FIRSTMOUNT=Done";
530 char *envp
[] = { message
, NULL
};
532 fs_info(sdp
, "first mount done, others may mount\n");
534 if (sdp
->sd_lockstruct
.ls_ops
->lm_first_done
)
535 sdp
->sd_lockstruct
.ls_ops
->lm_first_done(sdp
);
537 kobject_uevent_env(&sdp
->sd_kobj
, KOBJ_CHANGE
, envp
);
541 * gfs2_jindex_hold - Grab a lock on the jindex
542 * @sdp: The GFS2 superblock
543 * @ji_gh: the holder for the jindex glock
548 static int gfs2_jindex_hold(struct gfs2_sbd
*sdp
, struct gfs2_holder
*ji_gh
)
550 struct gfs2_inode
*dip
= GFS2_I(sdp
->sd_jindex
);
553 struct gfs2_jdesc
*jd
;
558 mutex_lock(&sdp
->sd_jindex_mutex
);
561 error
= gfs2_glock_nq_init(dip
->i_gl
, LM_ST_SHARED
, 0, ji_gh
);
565 name
.len
= sprintf(buf
, "journal%u", sdp
->sd_journals
);
566 name
.hash
= gfs2_disk_hash(name
.name
, name
.len
);
568 error
= gfs2_dir_check(sdp
->sd_jindex
, &name
, NULL
);
569 if (error
== -ENOENT
) {
574 gfs2_glock_dq_uninit(ji_gh
);
580 jd
= kzalloc(sizeof(struct gfs2_jdesc
), GFP_KERNEL
);
584 INIT_LIST_HEAD(&jd
->extent_list
);
585 INIT_LIST_HEAD(&jd
->jd_revoke_list
);
587 INIT_WORK(&jd
->jd_work
, gfs2_recover_func
);
588 jd
->jd_inode
= gfs2_lookupi(sdp
->sd_jindex
, &name
, 1);
589 if (!jd
->jd_inode
|| IS_ERR(jd
->jd_inode
)) {
593 error
= PTR_ERR(jd
->jd_inode
);
598 spin_lock(&sdp
->sd_jindex_spin
);
599 jd
->jd_jid
= sdp
->sd_journals
++;
600 list_add_tail(&jd
->jd_list
, &sdp
->sd_jindex_list
);
601 spin_unlock(&sdp
->sd_jindex_spin
);
604 mutex_unlock(&sdp
->sd_jindex_mutex
);
610 * check_journal_clean - Make sure a journal is clean for a spectator mount
611 * @sdp: The GFS2 superblock
612 * @jd: The journal descriptor
614 * Returns: 0 if the journal is clean or locked, else an error
616 static int check_journal_clean(struct gfs2_sbd
*sdp
, struct gfs2_jdesc
*jd
)
619 struct gfs2_holder j_gh
;
620 struct gfs2_log_header_host head
;
621 struct gfs2_inode
*ip
;
623 ip
= GFS2_I(jd
->jd_inode
);
624 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_NOEXP
|
625 GL_EXACT
| GL_NOCACHE
, &j_gh
);
627 fs_err(sdp
, "Error locking journal for spectator mount.\n");
630 error
= gfs2_jdesc_check(jd
);
632 fs_err(sdp
, "Error checking journal for spectator mount.\n");
635 error
= gfs2_find_jhead(jd
, &head
);
637 fs_err(sdp
, "Error parsing journal for spectator mount.\n");
640 if (!(head
.lh_flags
& GFS2_LOG_HEAD_UNMOUNT
)) {
642 fs_err(sdp
, "jid=%u: Journal is dirty, so the first mounter "
643 "must not be a spectator.\n", jd
->jd_jid
);
647 gfs2_glock_dq_uninit(&j_gh
);
651 static int init_journal(struct gfs2_sbd
*sdp
, int undo
)
653 struct inode
*master
= d_inode(sdp
->sd_master_dir
);
654 struct gfs2_holder ji_gh
;
655 struct gfs2_inode
*ip
;
664 sdp
->sd_jindex
= gfs2_lookup_simple(master
, "jindex");
665 if (IS_ERR(sdp
->sd_jindex
)) {
666 fs_err(sdp
, "can't lookup journal index: %d\n", error
);
667 return PTR_ERR(sdp
->sd_jindex
);
670 /* Load in the journal index special file */
672 error
= gfs2_jindex_hold(sdp
, &ji_gh
);
674 fs_err(sdp
, "can't read journal index: %d\n", error
);
679 if (!gfs2_jindex_size(sdp
)) {
680 fs_err(sdp
, "no journals!\n");
684 if (sdp
->sd_args
.ar_spectator
) {
685 sdp
->sd_jdesc
= gfs2_jdesc_find(sdp
, 0);
686 atomic_set(&sdp
->sd_log_blks_free
, sdp
->sd_jdesc
->jd_blocks
);
687 atomic_set(&sdp
->sd_log_thresh1
, 2*sdp
->sd_jdesc
->jd_blocks
/5);
688 atomic_set(&sdp
->sd_log_thresh2
, 4*sdp
->sd_jdesc
->jd_blocks
/5);
690 if (sdp
->sd_lockstruct
.ls_jid
>= gfs2_jindex_size(sdp
)) {
691 fs_err(sdp
, "can't mount journal #%u\n",
692 sdp
->sd_lockstruct
.ls_jid
);
693 fs_err(sdp
, "there are only %u journals (0 - %u)\n",
694 gfs2_jindex_size(sdp
),
695 gfs2_jindex_size(sdp
) - 1);
698 sdp
->sd_jdesc
= gfs2_jdesc_find(sdp
, sdp
->sd_lockstruct
.ls_jid
);
700 error
= gfs2_glock_nq_num(sdp
, sdp
->sd_lockstruct
.ls_jid
,
702 LM_ST_EXCLUSIVE
, LM_FLAG_NOEXP
,
703 &sdp
->sd_journal_gh
);
705 fs_err(sdp
, "can't acquire journal glock: %d\n", error
);
709 ip
= GFS2_I(sdp
->sd_jdesc
->jd_inode
);
710 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
,
711 LM_FLAG_NOEXP
| GL_EXACT
| GL_NOCACHE
,
714 fs_err(sdp
, "can't acquire journal inode glock: %d\n",
716 goto fail_journal_gh
;
719 error
= gfs2_jdesc_check(sdp
->sd_jdesc
);
721 fs_err(sdp
, "my journal (%u) is bad: %d\n",
722 sdp
->sd_jdesc
->jd_jid
, error
);
725 atomic_set(&sdp
->sd_log_blks_free
, sdp
->sd_jdesc
->jd_blocks
);
726 atomic_set(&sdp
->sd_log_thresh1
, 2*sdp
->sd_jdesc
->jd_blocks
/5);
727 atomic_set(&sdp
->sd_log_thresh2
, 4*sdp
->sd_jdesc
->jd_blocks
/5);
729 /* Map the extents for this journal's blocks */
730 gfs2_map_journal_extents(sdp
, sdp
->sd_jdesc
);
732 trace_gfs2_log_blocks(sdp
, atomic_read(&sdp
->sd_log_blks_free
));
734 if (sdp
->sd_lockstruct
.ls_first
) {
736 for (x
= 0; x
< sdp
->sd_journals
; x
++) {
737 struct gfs2_jdesc
*jd
= gfs2_jdesc_find(sdp
, x
);
739 if (sdp
->sd_args
.ar_spectator
) {
740 error
= check_journal_clean(sdp
, jd
);
745 error
= gfs2_recover_journal(jd
, true);
747 fs_err(sdp
, "error recovering journal %u: %d\n",
753 gfs2_others_may_mount(sdp
);
754 } else if (!sdp
->sd_args
.ar_spectator
) {
755 error
= gfs2_recover_journal(sdp
->sd_jdesc
, true);
757 fs_err(sdp
, "error recovering my journal: %d\n", error
);
762 sdp
->sd_log_idle
= 1;
763 set_bit(SDF_JOURNAL_CHECKED
, &sdp
->sd_flags
);
764 gfs2_glock_dq_uninit(&ji_gh
);
766 INIT_WORK(&sdp
->sd_freeze_work
, gfs2_freeze_func
);
770 if (!sdp
->sd_args
.ar_spectator
)
771 gfs2_glock_dq_uninit(&sdp
->sd_jinode_gh
);
773 if (!sdp
->sd_args
.ar_spectator
)
774 gfs2_glock_dq_uninit(&sdp
->sd_journal_gh
);
776 gfs2_jindex_free(sdp
);
778 gfs2_glock_dq_uninit(&ji_gh
);
780 iput(sdp
->sd_jindex
);
784 static struct lock_class_key gfs2_quota_imutex_key
;
786 static int init_inodes(struct gfs2_sbd
*sdp
, int undo
)
789 struct inode
*master
= d_inode(sdp
->sd_master_dir
);
794 error
= init_journal(sdp
, undo
);
795 complete_all(&sdp
->sd_journal_ready
);
799 /* Read in the master statfs inode */
800 sdp
->sd_statfs_inode
= gfs2_lookup_simple(master
, "statfs");
801 if (IS_ERR(sdp
->sd_statfs_inode
)) {
802 error
= PTR_ERR(sdp
->sd_statfs_inode
);
803 fs_err(sdp
, "can't read in statfs inode: %d\n", error
);
807 /* Read in the resource index inode */
808 sdp
->sd_rindex
= gfs2_lookup_simple(master
, "rindex");
809 if (IS_ERR(sdp
->sd_rindex
)) {
810 error
= PTR_ERR(sdp
->sd_rindex
);
811 fs_err(sdp
, "can't get resource index inode: %d\n", error
);
814 sdp
->sd_rindex_uptodate
= 0;
816 /* Read in the quota inode */
817 sdp
->sd_quota_inode
= gfs2_lookup_simple(master
, "quota");
818 if (IS_ERR(sdp
->sd_quota_inode
)) {
819 error
= PTR_ERR(sdp
->sd_quota_inode
);
820 fs_err(sdp
, "can't get quota file inode: %d\n", error
);
824 * i_mutex on quota files is special. Since this inode is hidden system
825 * file, we are safe to define locking ourselves.
827 lockdep_set_class(&sdp
->sd_quota_inode
->i_mutex
,
828 &gfs2_quota_imutex_key
);
830 error
= gfs2_rindex_update(sdp
);
837 iput(sdp
->sd_quota_inode
);
839 gfs2_clear_rgrpd(sdp
);
840 iput(sdp
->sd_rindex
);
842 iput(sdp
->sd_statfs_inode
);
844 init_journal(sdp
, UNDO
);
849 static int init_per_node(struct gfs2_sbd
*sdp
, int undo
)
851 struct inode
*pn
= NULL
;
854 struct gfs2_inode
*ip
;
855 struct inode
*master
= d_inode(sdp
->sd_master_dir
);
857 if (sdp
->sd_args
.ar_spectator
)
863 pn
= gfs2_lookup_simple(master
, "per_node");
866 fs_err(sdp
, "can't find per_node directory: %d\n", error
);
870 sprintf(buf
, "statfs_change%u", sdp
->sd_jdesc
->jd_jid
);
871 sdp
->sd_sc_inode
= gfs2_lookup_simple(pn
, buf
);
872 if (IS_ERR(sdp
->sd_sc_inode
)) {
873 error
= PTR_ERR(sdp
->sd_sc_inode
);
874 fs_err(sdp
, "can't find local \"sc\" file: %d\n", error
);
878 sprintf(buf
, "quota_change%u", sdp
->sd_jdesc
->jd_jid
);
879 sdp
->sd_qc_inode
= gfs2_lookup_simple(pn
, buf
);
880 if (IS_ERR(sdp
->sd_qc_inode
)) {
881 error
= PTR_ERR(sdp
->sd_qc_inode
);
882 fs_err(sdp
, "can't find local \"qc\" file: %d\n", error
);
889 ip
= GFS2_I(sdp
->sd_sc_inode
);
890 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0,
893 fs_err(sdp
, "can't lock local \"sc\" file: %d\n", error
);
897 ip
= GFS2_I(sdp
->sd_qc_inode
);
898 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0,
901 fs_err(sdp
, "can't lock local \"qc\" file: %d\n", error
);
908 gfs2_glock_dq_uninit(&sdp
->sd_qc_gh
);
910 gfs2_glock_dq_uninit(&sdp
->sd_sc_gh
);
912 iput(sdp
->sd_qc_inode
);
914 iput(sdp
->sd_sc_inode
);
920 static const match_table_t nolock_tokens
= {
921 { Opt_jid
, "jid=%d\n", },
925 static const struct lm_lockops nolock_ops
= {
926 .lm_proto_name
= "lock_nolock",
927 .lm_put_lock
= gfs2_glock_free
,
928 .lm_tokens
= &nolock_tokens
,
932 * gfs2_lm_mount - mount a locking protocol
933 * @sdp: the filesystem
934 * @args: mount arguments
935 * @silent: if 1, don't complain if the FS isn't a GFS2 fs
940 static int gfs2_lm_mount(struct gfs2_sbd
*sdp
, int silent
)
942 const struct lm_lockops
*lm
;
943 struct lm_lockstruct
*ls
= &sdp
->sd_lockstruct
;
944 struct gfs2_args
*args
= &sdp
->sd_args
;
945 const char *proto
= sdp
->sd_proto_name
;
946 const char *table
= sdp
->sd_table_name
;
950 if (!strcmp("lock_nolock", proto
)) {
952 sdp
->sd_args
.ar_localflocks
= 1;
953 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
954 } else if (!strcmp("lock_dlm", proto
)) {
958 pr_info("can't find protocol %s\n", proto
);
962 fs_info(sdp
, "Trying to join cluster \"%s\", \"%s\"\n", proto
, table
);
967 for (options
= args
->ar_hostdata
; (o
= strsep(&options
, ":")); ) {
968 substring_t tmp
[MAX_OPT_ARGS
];
974 token
= match_token(o
, *lm
->lm_tokens
, tmp
);
977 ret
= match_int(&tmp
[0], &option
);
978 if (ret
|| option
< 0)
980 if (test_and_clear_bit(SDF_NOJOURNALID
, &sdp
->sd_flags
))
985 /* Obsolete, but left for backward compat purposes */
988 ret
= match_int(&tmp
[0], &option
);
989 if (ret
|| (option
!= 0 && option
!= 1))
991 ls
->ls_first
= option
;
996 fs_info(sdp
, "unknown hostdata (%s)\n", o
);
1001 if (lm
->lm_mount
== NULL
) {
1002 fs_info(sdp
, "Now mounting FS...\n");
1003 complete_all(&sdp
->sd_locking_init
);
1006 ret
= lm
->lm_mount(sdp
, table
);
1008 fs_info(sdp
, "Joined cluster. Now mounting FS...\n");
1009 complete_all(&sdp
->sd_locking_init
);
1013 void gfs2_lm_unmount(struct gfs2_sbd
*sdp
)
1015 const struct lm_lockops
*lm
= sdp
->sd_lockstruct
.ls_ops
;
1016 if (likely(!test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)) &&
1018 lm
->lm_unmount(sdp
);
1021 static int wait_on_journal(struct gfs2_sbd
*sdp
)
1023 if (sdp
->sd_lockstruct
.ls_ops
->lm_mount
== NULL
)
1026 return wait_on_bit(&sdp
->sd_flags
, SDF_NOJOURNALID
, TASK_INTERRUPTIBLE
)
1030 void gfs2_online_uevent(struct gfs2_sbd
*sdp
)
1032 struct super_block
*sb
= sdp
->sd_vfs
;
1035 char *envp
[] = { ro
, spectator
, NULL
};
1036 sprintf(ro
, "RDONLY=%d", (sb
->s_flags
& MS_RDONLY
) ? 1 : 0);
1037 sprintf(spectator
, "SPECTATOR=%d", sdp
->sd_args
.ar_spectator
? 1 : 0);
1038 kobject_uevent_env(&sdp
->sd_kobj
, KOBJ_ONLINE
, envp
);
1042 * fill_super - Read in superblock
1043 * @sb: The VFS superblock
1044 * @data: Mount options
1045 * @silent: Don't complain if it's not a GFS2 filesystem
1050 static int fill_super(struct super_block
*sb
, struct gfs2_args
*args
, int silent
)
1052 struct gfs2_sbd
*sdp
;
1053 struct gfs2_holder mount_gh
;
1058 pr_warn("can't alloc struct gfs2_sbd\n");
1061 sdp
->sd_args
= *args
;
1063 if (sdp
->sd_args
.ar_spectator
) {
1064 sb
->s_flags
|= MS_RDONLY
;
1065 set_bit(SDF_RORECOVERY
, &sdp
->sd_flags
);
1067 if (sdp
->sd_args
.ar_posix_acl
)
1068 sb
->s_flags
|= MS_POSIXACL
;
1069 if (sdp
->sd_args
.ar_nobarrier
)
1070 set_bit(SDF_NOBARRIERS
, &sdp
->sd_flags
);
1072 sb
->s_flags
|= MS_NOSEC
;
1073 sb
->s_magic
= GFS2_MAGIC
;
1074 sb
->s_op
= &gfs2_super_ops
;
1075 sb
->s_d_op
= &gfs2_dops
;
1076 sb
->s_export_op
= &gfs2_export_ops
;
1077 sb
->s_xattr
= gfs2_xattr_handlers
;
1078 sb
->s_qcop
= &gfs2_quotactl_ops
;
1079 sb
->s_quota_types
= QTYPE_MASK_USR
| QTYPE_MASK_GRP
;
1080 sb_dqopt(sb
)->flags
|= DQUOT_QUOTA_SYS_FILE
;
1081 sb
->s_time_gran
= 1;
1082 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
1084 /* Set up the buffer cache and fill in some fake block size values
1085 to allow us to read-in the on-disk superblock. */
1086 sdp
->sd_sb
.sb_bsize
= sb_min_blocksize(sb
, GFS2_BASIC_BLOCK
);
1087 sdp
->sd_sb
.sb_bsize_shift
= sb
->s_blocksize_bits
;
1088 sdp
->sd_fsb2bb_shift
= sdp
->sd_sb
.sb_bsize_shift
-
1089 GFS2_BASIC_BLOCK_SHIFT
;
1090 sdp
->sd_fsb2bb
= 1 << sdp
->sd_fsb2bb_shift
;
1092 sdp
->sd_tune
.gt_logd_secs
= sdp
->sd_args
.ar_commit
;
1093 sdp
->sd_tune
.gt_quota_quantum
= sdp
->sd_args
.ar_quota_quantum
;
1094 if (sdp
->sd_args
.ar_statfs_quantum
) {
1095 sdp
->sd_tune
.gt_statfs_slow
= 0;
1096 sdp
->sd_tune
.gt_statfs_quantum
= sdp
->sd_args
.ar_statfs_quantum
;
1098 sdp
->sd_tune
.gt_statfs_slow
= 1;
1099 sdp
->sd_tune
.gt_statfs_quantum
= 30;
1102 error
= init_names(sdp
, silent
);
1104 /* In this case, we haven't initialized sysfs, so we have to
1105 manually free the sdp. */
1106 free_percpu(sdp
->sd_lkstats
);
1108 sb
->s_fs_info
= NULL
;
1112 snprintf(sdp
->sd_fsname
, GFS2_FSNAME_LEN
, "%s", sdp
->sd_table_name
);
1114 error
= gfs2_sys_fs_add(sdp
);
1116 * If we hit an error here, gfs2_sys_fs_add will have called function
1117 * kobject_put which causes the sysfs usage count to go to zero, which
1118 * causes sysfs to call function gfs2_sbd_release, which frees sdp.
1119 * Subsequent error paths here will call gfs2_sys_fs_del, which also
1120 * kobject_put to free sdp.
1125 gfs2_create_debugfs_file(sdp
);
1127 error
= gfs2_lm_mount(sdp
, silent
);
1131 error
= init_locking(sdp
, &mount_gh
, DO
);
1135 error
= init_sb(sdp
, silent
);
1139 error
= wait_on_journal(sdp
);
1144 * If user space has failed to join the cluster or some similar
1145 * failure has occurred, then the journal id will contain a
1146 * negative (error) number. This will then be returned to the
1147 * caller (of the mount syscall). We do this even for spectator
1148 * mounts (which just write a jid of 0 to indicate "ok" even though
1149 * the jid is unused in the spectator case)
1151 if (sdp
->sd_lockstruct
.ls_jid
< 0) {
1152 error
= sdp
->sd_lockstruct
.ls_jid
;
1153 sdp
->sd_lockstruct
.ls_jid
= 0;
1157 if (sdp
->sd_args
.ar_spectator
)
1158 snprintf(sdp
->sd_fsname
, GFS2_FSNAME_LEN
, "%s.s",
1159 sdp
->sd_table_name
);
1161 snprintf(sdp
->sd_fsname
, GFS2_FSNAME_LEN
, "%s.%u",
1162 sdp
->sd_table_name
, sdp
->sd_lockstruct
.ls_jid
);
1164 error
= init_inodes(sdp
, DO
);
1168 error
= init_per_node(sdp
, DO
);
1172 error
= gfs2_statfs_init(sdp
);
1174 fs_err(sdp
, "can't initialize statfs subsystem: %d\n", error
);
1178 if (!(sb
->s_flags
& MS_RDONLY
)) {
1179 error
= gfs2_make_fs_rw(sdp
);
1181 fs_err(sdp
, "can't make FS RW: %d\n", error
);
1186 gfs2_glock_dq_uninit(&mount_gh
);
1187 gfs2_online_uevent(sdp
);
1191 init_per_node(sdp
, UNDO
);
1193 init_inodes(sdp
, UNDO
);
1195 if (sdp
->sd_root_dir
)
1196 dput(sdp
->sd_root_dir
);
1197 if (sdp
->sd_master_dir
)
1198 dput(sdp
->sd_master_dir
);
1203 init_locking(sdp
, &mount_gh
, UNDO
);
1205 complete_all(&sdp
->sd_journal_ready
);
1206 gfs2_gl_hash_clear(sdp
);
1207 gfs2_lm_unmount(sdp
);
1209 gfs2_delete_debugfs_file(sdp
);
1210 free_percpu(sdp
->sd_lkstats
);
1211 /* gfs2_sys_fs_del must be the last thing we do, since it causes
1212 * sysfs to call function gfs2_sbd_release, which frees sdp. */
1213 gfs2_sys_fs_del(sdp
);
1214 sb
->s_fs_info
= NULL
;
1218 static int set_gfs2_super(struct super_block
*s
, void *data
)
1221 s
->s_dev
= s
->s_bdev
->bd_dev
;
1224 * We set the bdi here to the queue backing, file systems can
1225 * overwrite this in ->fill_super()
1227 s
->s_bdi
= &bdev_get_queue(s
->s_bdev
)->backing_dev_info
;
1231 static int test_gfs2_super(struct super_block
*s
, void *ptr
)
1233 struct block_device
*bdev
= ptr
;
1234 return (bdev
== s
->s_bdev
);
1238 * gfs2_mount - Get the GFS2 superblock
1239 * @fs_type: The GFS2 filesystem type
1240 * @flags: Mount flags
1241 * @dev_name: The name of the device
1242 * @data: The mount arguments
1244 * Q. Why not use get_sb_bdev() ?
1245 * A. We need to select one of two root directories to mount, independent
1246 * of whether this is the initial, or subsequent, mount of this sb
1248 * Returns: 0 or -ve on error
1251 static struct dentry
*gfs2_mount(struct file_system_type
*fs_type
, int flags
,
1252 const char *dev_name
, void *data
)
1254 struct block_device
*bdev
;
1255 struct super_block
*s
;
1256 fmode_t mode
= FMODE_READ
| FMODE_EXCL
;
1258 struct gfs2_args args
;
1259 struct gfs2_sbd
*sdp
;
1261 if (!(flags
& MS_RDONLY
))
1262 mode
|= FMODE_WRITE
;
1264 bdev
= blkdev_get_by_path(dev_name
, mode
, fs_type
);
1266 return ERR_CAST(bdev
);
1269 * once the super is inserted into the list by sget, s_umount
1270 * will protect the lockfs code from trying to start a snapshot
1271 * while we are mounting
1273 mutex_lock(&bdev
->bd_fsfreeze_mutex
);
1274 if (bdev
->bd_fsfreeze_count
> 0) {
1275 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
1279 s
= sget(fs_type
, test_gfs2_super
, set_gfs2_super
, flags
, bdev
);
1280 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
1287 * s_umount nests inside bd_mutex during
1288 * __invalidate_device(). blkdev_put() acquires
1289 * bd_mutex and can't be called under s_umount. Drop
1290 * s_umount temporarily. This is safe as we're
1291 * holding an active reference.
1293 up_write(&s
->s_umount
);
1294 blkdev_put(bdev
, mode
);
1295 down_write(&s
->s_umount
);
1297 /* s_mode must be set before deactivate_locked_super calls */
1301 memset(&args
, 0, sizeof(args
));
1302 args
.ar_quota
= GFS2_QUOTA_DEFAULT
;
1303 args
.ar_data
= GFS2_DATA_DEFAULT
;
1304 args
.ar_commit
= 30;
1305 args
.ar_statfs_quantum
= 30;
1306 args
.ar_quota_quantum
= 60;
1307 args
.ar_errors
= GFS2_ERRORS_DEFAULT
;
1309 error
= gfs2_mount_args(&args
, data
);
1311 pr_warn("can't parse mount arguments\n");
1317 if ((flags
^ s
->s_flags
) & MS_RDONLY
)
1320 snprintf(s
->s_id
, sizeof(s
->s_id
), "%pg", bdev
);
1321 sb_set_blocksize(s
, block_size(bdev
));
1322 error
= fill_super(s
, &args
, flags
& MS_SILENT
? 1 : 0);
1325 s
->s_flags
|= MS_ACTIVE
;
1331 return dget(sdp
->sd_master_dir
);
1333 return dget(sdp
->sd_root_dir
);
1336 deactivate_locked_super(s
);
1337 return ERR_PTR(error
);
1339 blkdev_put(bdev
, mode
);
1340 return ERR_PTR(error
);
1343 static int set_meta_super(struct super_block
*s
, void *ptr
)
1348 static struct dentry
*gfs2_mount_meta(struct file_system_type
*fs_type
,
1349 int flags
, const char *dev_name
, void *data
)
1351 struct super_block
*s
;
1352 struct gfs2_sbd
*sdp
;
1356 error
= kern_path(dev_name
, LOOKUP_FOLLOW
, &path
);
1358 pr_warn("path_lookup on %s returned error %d\n",
1360 return ERR_PTR(error
);
1362 s
= sget(&gfs2_fs_type
, test_gfs2_super
, set_meta_super
, flags
,
1363 d_inode(path
.dentry
)->i_sb
->s_bdev
);
1366 pr_warn("gfs2 mount does not exist\n");
1369 if ((flags
^ s
->s_flags
) & MS_RDONLY
) {
1370 deactivate_locked_super(s
);
1371 return ERR_PTR(-EBUSY
);
1374 return dget(sdp
->sd_master_dir
);
1377 static void gfs2_kill_sb(struct super_block
*sb
)
1379 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1382 kill_block_super(sb
);
1386 gfs2_log_flush(sdp
, NULL
, SYNC_FLUSH
);
1387 dput(sdp
->sd_root_dir
);
1388 dput(sdp
->sd_master_dir
);
1389 sdp
->sd_root_dir
= NULL
;
1390 sdp
->sd_master_dir
= NULL
;
1391 shrink_dcache_sb(sb
);
1392 gfs2_delete_debugfs_file(sdp
);
1393 free_percpu(sdp
->sd_lkstats
);
1394 kill_block_super(sb
);
1397 struct file_system_type gfs2_fs_type
= {
1399 .fs_flags
= FS_REQUIRES_DEV
,
1400 .mount
= gfs2_mount
,
1401 .kill_sb
= gfs2_kill_sb
,
1402 .owner
= THIS_MODULE
,
1404 MODULE_ALIAS_FS("gfs2");
1406 struct file_system_type gfs2meta_fs_type
= {
1408 .fs_flags
= FS_REQUIRES_DEV
,
1409 .mount
= gfs2_mount_meta
,
1410 .owner
= THIS_MODULE
,
1412 MODULE_ALIAS_FS("gfs2meta");