1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/sched.h>
10 #include <linux/cred.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/module.h>
15 #include <linux/kobject.h>
16 #include <linux/uaccess.h>
17 #include <linux/gfs2_ondisk.h>
18 #include <linux/blkdev.h>
31 struct attribute attr
;
32 ssize_t (*show
)(struct gfs2_sbd
*, char *);
33 ssize_t (*store
)(struct gfs2_sbd
*, const char *, size_t);
36 static ssize_t
gfs2_attr_show(struct kobject
*kobj
, struct attribute
*attr
,
39 struct gfs2_sbd
*sdp
= container_of(kobj
, struct gfs2_sbd
, sd_kobj
);
40 struct gfs2_attr
*a
= container_of(attr
, struct gfs2_attr
, attr
);
41 return a
->show
? a
->show(sdp
, buf
) : 0;
44 static ssize_t
gfs2_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
45 const char *buf
, size_t len
)
47 struct gfs2_sbd
*sdp
= container_of(kobj
, struct gfs2_sbd
, sd_kobj
);
48 struct gfs2_attr
*a
= container_of(attr
, struct gfs2_attr
, attr
);
49 return a
->store
? a
->store(sdp
, buf
, len
) : len
;
52 static const struct sysfs_ops gfs2_attr_ops
= {
53 .show
= gfs2_attr_show
,
54 .store
= gfs2_attr_store
,
58 static struct kset
*gfs2_kset
;
60 static ssize_t
id_show(struct gfs2_sbd
*sdp
, char *buf
)
62 return snprintf(buf
, PAGE_SIZE
, "%u:%u\n",
63 MAJOR(sdp
->sd_vfs
->s_dev
), MINOR(sdp
->sd_vfs
->s_dev
));
66 static ssize_t
status_show(struct gfs2_sbd
*sdp
, char *buf
)
68 unsigned long f
= sdp
->sd_flags
;
71 s
= snprintf(buf
, PAGE_SIZE
,
72 "Journal Checked: %d\n"
83 "Skip DLM Unlock: %d\n"
84 "Force AIL Flush: %d\n"
85 "FS Freeze Initiator: %d\n"
88 "Withdraw In Prog: %d\n"
89 "Remote Withdraw: %d\n"
90 "Withdraw Recovery: %d\n"
93 "sd_log_flush_lock: %d\n"
94 "sd_log_num_revoke: %u\n"
95 "sd_log_in_flight: %d\n"
96 "sd_log_blks_needed: %d\n"
97 "sd_log_blks_free: %d\n"
98 "sd_log_flush_head: %d\n"
99 "sd_log_flush_tail: %d\n"
100 "sd_log_blks_reserved: %d\n"
101 "sd_log_revokes_available: %d\n"
102 "sd_log_pinned: %d\n"
103 "sd_log_thresh1: %d\n"
104 "sd_log_thresh2: %d\n",
105 test_bit(SDF_JOURNAL_CHECKED
, &f
),
106 test_bit(SDF_JOURNAL_LIVE
, &f
),
107 (sdp
->sd_jdesc
? sdp
->sd_jdesc
->jd_jid
: 0),
108 (sdp
->sd_args
.ar_spectator
? 1 : 0),
109 test_bit(SDF_WITHDRAWN
, &f
),
110 test_bit(SDF_NOBARRIERS
, &f
),
111 test_bit(SDF_NORECOVERY
, &f
),
112 test_bit(SDF_DEMOTE
, &f
),
113 test_bit(SDF_NOJOURNALID
, &f
),
114 (sb_rdonly(sdp
->sd_vfs
) ? 1 : 0),
115 test_bit(SDF_RORECOVERY
, &f
),
116 test_bit(SDF_SKIP_DLM_UNLOCK
, &f
),
117 test_bit(SDF_FORCE_AIL_FLUSH
, &f
),
118 test_bit(SDF_FREEZE_INITIATOR
, &f
),
119 test_bit(SDF_FROZEN
, &f
),
120 test_bit(SDF_WITHDRAWING
, &f
),
121 test_bit(SDF_WITHDRAW_IN_PROG
, &f
),
122 test_bit(SDF_REMOTE_WITHDRAW
, &f
),
123 test_bit(SDF_WITHDRAW_RECOVERY
, &f
),
124 test_bit(SDF_KILL
, &f
),
126 rwsem_is_locked(&sdp
->sd_log_flush_lock
),
127 sdp
->sd_log_num_revoke
,
128 atomic_read(&sdp
->sd_log_in_flight
),
129 atomic_read(&sdp
->sd_log_blks_needed
),
130 atomic_read(&sdp
->sd_log_blks_free
),
131 sdp
->sd_log_flush_head
,
132 sdp
->sd_log_flush_tail
,
133 sdp
->sd_log_blks_reserved
,
134 atomic_read(&sdp
->sd_log_revokes_available
),
135 atomic_read(&sdp
->sd_log_pinned
),
136 atomic_read(&sdp
->sd_log_thresh1
),
137 atomic_read(&sdp
->sd_log_thresh2
));
141 static ssize_t
fsname_show(struct gfs2_sbd
*sdp
, char *buf
)
143 return snprintf(buf
, PAGE_SIZE
, "%s\n", sdp
->sd_fsname
);
146 static ssize_t
uuid_show(struct gfs2_sbd
*sdp
, char *buf
)
148 struct super_block
*s
= sdp
->sd_vfs
;
151 if (uuid_is_null(&s
->s_uuid
))
153 return snprintf(buf
, PAGE_SIZE
, "%pUB\n", &s
->s_uuid
);
156 static ssize_t
freeze_show(struct gfs2_sbd
*sdp
, char *buf
)
158 struct super_block
*sb
= sdp
->sd_vfs
;
159 int frozen
= (sb
->s_writers
.frozen
== SB_UNFROZEN
) ? 0 : 1;
161 return snprintf(buf
, PAGE_SIZE
, "%d\n", frozen
);
164 static ssize_t
freeze_store(struct gfs2_sbd
*sdp
, const char *buf
, size_t len
)
168 error
= kstrtoint(buf
, 0, &n
);
172 if (!capable(CAP_SYS_ADMIN
))
177 error
= thaw_super(sdp
->sd_vfs
, FREEZE_HOLDER_USERSPACE
);
180 error
= freeze_super(sdp
->sd_vfs
, FREEZE_HOLDER_USERSPACE
);
187 fs_warn(sdp
, "freeze %d error %d\n", n
, error
);
194 static ssize_t
withdraw_show(struct gfs2_sbd
*sdp
, char *buf
)
196 unsigned int b
= gfs2_withdrawing_or_withdrawn(sdp
);
197 return snprintf(buf
, PAGE_SIZE
, "%u\n", b
);
200 static ssize_t
withdraw_store(struct gfs2_sbd
*sdp
, const char *buf
, size_t len
)
204 if (!capable(CAP_SYS_ADMIN
))
207 error
= kstrtoint(buf
, 0, &val
);
214 gfs2_lm(sdp
, "withdrawing from cluster at user's request\n");
220 static ssize_t
statfs_sync_store(struct gfs2_sbd
*sdp
, const char *buf
,
225 if (!capable(CAP_SYS_ADMIN
))
228 error
= kstrtoint(buf
, 0, &val
);
235 gfs2_statfs_sync(sdp
->sd_vfs
, 0);
239 static ssize_t
quota_sync_store(struct gfs2_sbd
*sdp
, const char *buf
,
244 if (!capable(CAP_SYS_ADMIN
))
247 error
= kstrtoint(buf
, 0, &val
);
254 gfs2_quota_sync(sdp
->sd_vfs
, 0);
258 static ssize_t
quota_refresh_user_store(struct gfs2_sbd
*sdp
, const char *buf
,
265 if (!capable(CAP_SYS_ADMIN
))
268 error
= kstrtou32(buf
, 0, &id
);
272 qid
= make_kqid(current_user_ns(), USRQUOTA
, id
);
276 error
= gfs2_quota_refresh(sdp
, qid
);
277 return error
? error
: len
;
280 static ssize_t
quota_refresh_group_store(struct gfs2_sbd
*sdp
, const char *buf
,
287 if (!capable(CAP_SYS_ADMIN
))
290 error
= kstrtou32(buf
, 0, &id
);
294 qid
= make_kqid(current_user_ns(), GRPQUOTA
, id
);
298 error
= gfs2_quota_refresh(sdp
, qid
);
299 return error
? error
: len
;
302 static ssize_t
demote_rq_store(struct gfs2_sbd
*sdp
, const char *buf
, size_t len
)
304 struct gfs2_glock
*gl
;
305 const struct gfs2_glock_operations
*glops
;
308 unsigned long long glnum
;
312 if (!capable(CAP_SYS_ADMIN
))
315 rv
= sscanf(buf
, "%u:%llu %15s", &gltype
, &glnum
,
320 if (strcmp(mode
, "EX") == 0)
321 glmode
= LM_ST_UNLOCKED
;
322 else if ((strcmp(mode
, "CW") == 0) || (strcmp(mode
, "DF") == 0))
323 glmode
= LM_ST_DEFERRED
;
324 else if ((strcmp(mode
, "PR") == 0) || (strcmp(mode
, "SH") == 0))
325 glmode
= LM_ST_SHARED
;
329 if (gltype
> LM_TYPE_JOURNAL
)
331 if (gltype
== LM_TYPE_NONDISK
&& glnum
== GFS2_FREEZE_LOCK
)
332 glops
= &gfs2_freeze_glops
;
334 glops
= gfs2_glops_list
[gltype
];
337 if (!test_and_set_bit(SDF_DEMOTE
, &sdp
->sd_flags
))
338 fs_info(sdp
, "demote interface used\n");
339 rv
= gfs2_glock_get(sdp
, glnum
, glops
, NO_CREATE
, &gl
);
342 gfs2_glock_cb(gl
, glmode
);
348 #define GFS2_ATTR(name, mode, show, store) \
349 static struct gfs2_attr gfs2_attr_##name = __ATTR(name, mode, show, store)
351 GFS2_ATTR(id
, 0444, id_show
, NULL
);
352 GFS2_ATTR(fsname
, 0444, fsname_show
, NULL
);
353 GFS2_ATTR(uuid
, 0444, uuid_show
, NULL
);
354 GFS2_ATTR(freeze
, 0644, freeze_show
, freeze_store
);
355 GFS2_ATTR(withdraw
, 0644, withdraw_show
, withdraw_store
);
356 GFS2_ATTR(statfs_sync
, 0200, NULL
, statfs_sync_store
);
357 GFS2_ATTR(quota_sync
, 0200, NULL
, quota_sync_store
);
358 GFS2_ATTR(quota_refresh_user
, 0200, NULL
, quota_refresh_user_store
);
359 GFS2_ATTR(quota_refresh_group
, 0200, NULL
, quota_refresh_group_store
);
360 GFS2_ATTR(demote_rq
, 0200, NULL
, demote_rq_store
);
361 GFS2_ATTR(status
, 0400, status_show
, NULL
);
363 static struct attribute
*gfs2_attrs
[] = {
365 &gfs2_attr_fsname
.attr
,
366 &gfs2_attr_uuid
.attr
,
367 &gfs2_attr_freeze
.attr
,
368 &gfs2_attr_withdraw
.attr
,
369 &gfs2_attr_statfs_sync
.attr
,
370 &gfs2_attr_quota_sync
.attr
,
371 &gfs2_attr_quota_refresh_user
.attr
,
372 &gfs2_attr_quota_refresh_group
.attr
,
373 &gfs2_attr_demote_rq
.attr
,
374 &gfs2_attr_status
.attr
,
377 ATTRIBUTE_GROUPS(gfs2
);
379 static void gfs2_sbd_release(struct kobject
*kobj
)
381 struct gfs2_sbd
*sdp
= container_of(kobj
, struct gfs2_sbd
, sd_kobj
);
383 complete(&sdp
->sd_kobj_unregister
);
386 static struct kobj_type gfs2_ktype
= {
387 .release
= gfs2_sbd_release
,
388 .default_groups
= gfs2_groups
,
389 .sysfs_ops
= &gfs2_attr_ops
,
394 * lock_module. Originally from lock_dlm
397 static ssize_t
proto_name_show(struct gfs2_sbd
*sdp
, char *buf
)
399 const struct lm_lockops
*ops
= sdp
->sd_lockstruct
.ls_ops
;
400 return sprintf(buf
, "%s\n", ops
->lm_proto_name
);
403 static ssize_t
block_show(struct gfs2_sbd
*sdp
, char *buf
)
405 struct lm_lockstruct
*ls
= &sdp
->sd_lockstruct
;
409 if (test_bit(DFL_BLOCK_LOCKS
, &ls
->ls_recover_flags
))
411 ret
= sprintf(buf
, "%d\n", val
);
415 static ssize_t
block_store(struct gfs2_sbd
*sdp
, const char *buf
, size_t len
)
417 struct lm_lockstruct
*ls
= &sdp
->sd_lockstruct
;
420 ret
= kstrtoint(buf
, 0, &val
);
425 set_bit(DFL_BLOCK_LOCKS
, &ls
->ls_recover_flags
);
427 clear_bit(DFL_BLOCK_LOCKS
, &ls
->ls_recover_flags
);
428 smp_mb__after_atomic();
429 gfs2_glock_thaw(sdp
);
436 static ssize_t
wdack_show(struct gfs2_sbd
*sdp
, char *buf
)
438 int val
= completion_done(&sdp
->sd_wdack
) ? 1 : 0;
440 return sprintf(buf
, "%d\n", val
);
443 static ssize_t
wdack_store(struct gfs2_sbd
*sdp
, const char *buf
, size_t len
)
447 ret
= kstrtoint(buf
, 0, &val
);
452 !strcmp(sdp
->sd_lockstruct
.ls_ops
->lm_proto_name
, "lock_dlm"))
453 complete(&sdp
->sd_wdack
);
459 static ssize_t
lkfirst_show(struct gfs2_sbd
*sdp
, char *buf
)
461 struct lm_lockstruct
*ls
= &sdp
->sd_lockstruct
;
462 return sprintf(buf
, "%d\n", ls
->ls_first
);
465 static ssize_t
lkfirst_store(struct gfs2_sbd
*sdp
, const char *buf
, size_t len
)
470 rv
= sscanf(buf
, "%u", &first
);
471 if (rv
!= 1 || first
> 1)
473 rv
= wait_for_completion_killable(&sdp
->sd_locking_init
);
476 spin_lock(&sdp
->sd_jindex_spin
);
478 if (test_bit(SDF_NOJOURNALID
, &sdp
->sd_flags
) == 0)
481 if (sdp
->sd_args
.ar_spectator
)
483 if (sdp
->sd_lockstruct
.ls_ops
->lm_mount
== NULL
)
485 sdp
->sd_lockstruct
.ls_first
= first
;
488 spin_unlock(&sdp
->sd_jindex_spin
);
489 return rv
? rv
: len
;
492 static ssize_t
first_done_show(struct gfs2_sbd
*sdp
, char *buf
)
494 struct lm_lockstruct
*ls
= &sdp
->sd_lockstruct
;
495 return sprintf(buf
, "%d\n", !!test_bit(DFL_FIRST_MOUNT_DONE
, &ls
->ls_recover_flags
));
498 int gfs2_recover_set(struct gfs2_sbd
*sdp
, unsigned jid
)
500 struct gfs2_jdesc
*jd
;
503 /* Wait for our primary journal to be initialized */
504 wait_for_completion(&sdp
->sd_journal_ready
);
506 spin_lock(&sdp
->sd_jindex_spin
);
509 * If we're a spectator, we use journal0, but it's not really ours.
510 * So we need to wait for its recovery too. If we skip it we'd never
511 * queue work to the recovery workqueue, and so its completion would
512 * never clear the DFL_BLOCK_LOCKS flag, so all our locks would
513 * permanently stop working.
517 if (sdp
->sd_jdesc
->jd_jid
== jid
&& !sdp
->sd_args
.ar_spectator
)
520 list_for_each_entry(jd
, &sdp
->sd_jindex_list
, jd_list
) {
521 if (jd
->jd_jid
!= jid
&& !sdp
->sd_args
.ar_spectator
)
523 rv
= gfs2_recover_journal(jd
, false);
527 spin_unlock(&sdp
->sd_jindex_spin
);
531 static ssize_t
recover_store(struct gfs2_sbd
*sdp
, const char *buf
, size_t len
)
536 rv
= sscanf(buf
, "%u", &jid
);
540 if (test_bit(SDF_NORECOVERY
, &sdp
->sd_flags
)) {
545 rv
= gfs2_recover_set(sdp
, jid
);
547 return rv
? rv
: len
;
550 static ssize_t
recover_done_show(struct gfs2_sbd
*sdp
, char *buf
)
552 struct lm_lockstruct
*ls
= &sdp
->sd_lockstruct
;
553 return sprintf(buf
, "%d\n", ls
->ls_recover_jid_done
);
556 static ssize_t
recover_status_show(struct gfs2_sbd
*sdp
, char *buf
)
558 struct lm_lockstruct
*ls
= &sdp
->sd_lockstruct
;
559 return sprintf(buf
, "%d\n", ls
->ls_recover_jid_status
);
562 static ssize_t
jid_show(struct gfs2_sbd
*sdp
, char *buf
)
564 return sprintf(buf
, "%d\n", sdp
->sd_lockstruct
.ls_jid
);
567 static ssize_t
jid_store(struct gfs2_sbd
*sdp
, const char *buf
, size_t len
)
572 rv
= sscanf(buf
, "%d", &jid
);
575 rv
= wait_for_completion_killable(&sdp
->sd_locking_init
);
578 spin_lock(&sdp
->sd_jindex_spin
);
580 if (sdp
->sd_lockstruct
.ls_ops
->lm_mount
== NULL
)
583 if (test_bit(SDF_NOJOURNALID
, &sdp
->sd_flags
) == 0)
586 if (sdp
->sd_args
.ar_spectator
&& jid
> 0)
588 sdp
->sd_lockstruct
.ls_jid
= jid
;
589 clear_bit(SDF_NOJOURNALID
, &sdp
->sd_flags
);
590 smp_mb__after_atomic();
591 wake_up_bit(&sdp
->sd_flags
, SDF_NOJOURNALID
);
593 spin_unlock(&sdp
->sd_jindex_spin
);
594 return rv
? rv
: len
;
597 #define GDLM_ATTR(_name,_mode,_show,_store) \
598 static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store)
600 GDLM_ATTR(proto_name
, 0444, proto_name_show
, NULL
);
601 GDLM_ATTR(block
, 0644, block_show
, block_store
);
602 GDLM_ATTR(withdraw
, 0644, wdack_show
, wdack_store
);
603 GDLM_ATTR(jid
, 0644, jid_show
, jid_store
);
604 GDLM_ATTR(first
, 0644, lkfirst_show
, lkfirst_store
);
605 GDLM_ATTR(first_done
, 0444, first_done_show
, NULL
);
606 GDLM_ATTR(recover
, 0600, NULL
, recover_store
);
607 GDLM_ATTR(recover_done
, 0444, recover_done_show
, NULL
);
608 GDLM_ATTR(recover_status
, 0444, recover_status_show
, NULL
);
610 static struct attribute
*lock_module_attrs
[] = {
611 &gdlm_attr_proto_name
.attr
,
612 &gdlm_attr_block
.attr
,
613 &gdlm_attr_withdraw
.attr
,
615 &gdlm_attr_first
.attr
,
616 &gdlm_attr_first_done
.attr
,
617 &gdlm_attr_recover
.attr
,
618 &gdlm_attr_recover_done
.attr
,
619 &gdlm_attr_recover_status
.attr
,
624 * get and set struct gfs2_tune fields
627 static ssize_t
quota_scale_show(struct gfs2_sbd
*sdp
, char *buf
)
629 return snprintf(buf
, PAGE_SIZE
, "%u %u\n",
630 sdp
->sd_tune
.gt_quota_scale_num
,
631 sdp
->sd_tune
.gt_quota_scale_den
);
634 static ssize_t
quota_scale_store(struct gfs2_sbd
*sdp
, const char *buf
,
637 struct gfs2_tune
*gt
= &sdp
->sd_tune
;
640 if (!capable(CAP_SYS_ADMIN
))
643 if (sscanf(buf
, "%u %u", &x
, &y
) != 2 || !y
)
646 spin_lock(>
->gt_spin
);
647 gt
->gt_quota_scale_num
= x
;
648 gt
->gt_quota_scale_den
= y
;
649 spin_unlock(>
->gt_spin
);
653 static ssize_t
tune_set(struct gfs2_sbd
*sdp
, unsigned int *field
,
654 int check_zero
, const char *buf
, size_t len
)
656 struct gfs2_tune
*gt
= &sdp
->sd_tune
;
660 if (!capable(CAP_SYS_ADMIN
))
663 error
= kstrtouint(buf
, 0, &x
);
667 if (check_zero
&& !x
)
670 spin_lock(>
->gt_spin
);
672 spin_unlock(>
->gt_spin
);
676 #define TUNE_ATTR_3(name, show, store) \
677 static struct gfs2_attr tune_attr_##name = __ATTR(name, 0644, show, store)
679 #define TUNE_ATTR_2(name, store) \
680 static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \
682 return snprintf(buf, PAGE_SIZE, "%u\n", sdp->sd_tune.gt_##name); \
684 TUNE_ATTR_3(name, name##_show, store)
686 #define TUNE_ATTR(name, check_zero) \
687 static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\
689 return tune_set(sdp, &sdp->sd_tune.gt_##name, check_zero, buf, len); \
691 TUNE_ATTR_2(name, name##_store)
693 TUNE_ATTR(quota_warn_period
, 0);
694 TUNE_ATTR(quota_quantum
, 0);
695 TUNE_ATTR(max_readahead
, 0);
696 TUNE_ATTR(complain_secs
, 0);
697 TUNE_ATTR(statfs_slow
, 0);
698 TUNE_ATTR(new_files_jdata
, 0);
699 TUNE_ATTR(statfs_quantum
, 1);
700 TUNE_ATTR_3(quota_scale
, quota_scale_show
, quota_scale_store
);
702 static struct attribute
*tune_attrs
[] = {
703 &tune_attr_quota_warn_period
.attr
,
704 &tune_attr_quota_quantum
.attr
,
705 &tune_attr_max_readahead
.attr
,
706 &tune_attr_complain_secs
.attr
,
707 &tune_attr_statfs_slow
.attr
,
708 &tune_attr_statfs_quantum
.attr
,
709 &tune_attr_quota_scale
.attr
,
710 &tune_attr_new_files_jdata
.attr
,
714 static const struct attribute_group tune_group
= {
719 static const struct attribute_group lock_module_group
= {
720 .name
= "lock_module",
721 .attrs
= lock_module_attrs
,
724 int gfs2_sys_fs_add(struct gfs2_sbd
*sdp
)
726 struct super_block
*sb
= sdp
->sd_vfs
;
730 char *envp
[] = { ro
, spectator
, NULL
};
732 sprintf(ro
, "RDONLY=%d", sb_rdonly(sb
));
733 sprintf(spectator
, "SPECTATOR=%d", sdp
->sd_args
.ar_spectator
? 1 : 0);
735 init_completion(&sdp
->sd_kobj_unregister
);
736 sdp
->sd_kobj
.kset
= gfs2_kset
;
737 error
= kobject_init_and_add(&sdp
->sd_kobj
, &gfs2_ktype
, NULL
,
738 "%s", sdp
->sd_table_name
);
742 error
= sysfs_create_group(&sdp
->sd_kobj
, &tune_group
);
746 error
= sysfs_create_group(&sdp
->sd_kobj
, &lock_module_group
);
750 error
= sysfs_create_link(&sdp
->sd_kobj
,
751 &disk_to_dev(sb
->s_bdev
->bd_disk
)->kobj
,
754 goto fail_lock_module
;
756 kobject_uevent_env(&sdp
->sd_kobj
, KOBJ_ADD
, envp
);
760 sysfs_remove_group(&sdp
->sd_kobj
, &lock_module_group
);
762 sysfs_remove_group(&sdp
->sd_kobj
, &tune_group
);
764 fs_err(sdp
, "error %d adding sysfs files\n", error
);
765 kobject_put(&sdp
->sd_kobj
);
766 wait_for_completion(&sdp
->sd_kobj_unregister
);
767 sb
->s_fs_info
= NULL
;
771 void gfs2_sys_fs_del(struct gfs2_sbd
*sdp
)
773 sysfs_remove_link(&sdp
->sd_kobj
, "device");
774 sysfs_remove_group(&sdp
->sd_kobj
, &tune_group
);
775 sysfs_remove_group(&sdp
->sd_kobj
, &lock_module_group
);
776 kobject_put(&sdp
->sd_kobj
);
777 wait_for_completion(&sdp
->sd_kobj_unregister
);
780 static int gfs2_uevent(const struct kobject
*kobj
, struct kobj_uevent_env
*env
)
782 const struct gfs2_sbd
*sdp
= container_of(kobj
, struct gfs2_sbd
, sd_kobj
);
783 const struct super_block
*s
= sdp
->sd_vfs
;
785 add_uevent_var(env
, "LOCKTABLE=%s", sdp
->sd_table_name
);
786 add_uevent_var(env
, "LOCKPROTO=%s", sdp
->sd_proto_name
);
787 if (!test_bit(SDF_NOJOURNALID
, &sdp
->sd_flags
))
788 add_uevent_var(env
, "JOURNALID=%d", sdp
->sd_lockstruct
.ls_jid
);
789 if (!uuid_is_null(&s
->s_uuid
))
790 add_uevent_var(env
, "UUID=%pUB", &s
->s_uuid
);
794 static const struct kset_uevent_ops gfs2_uevent_ops
= {
795 .uevent
= gfs2_uevent
,
798 int gfs2_sys_init(void)
800 gfs2_kset
= kset_create_and_add("gfs2", &gfs2_uevent_ops
, fs_kobj
);
806 void gfs2_sys_uninit(void)
808 kset_unregister(gfs2_kset
);