1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * super.c contains code to handle: - mount structures
9 * - filesystem drivers list
11 * - umount system call
14 * GK 2/5/95 - Changed to support mounting the root fs via NFS
16 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
17 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
18 * Added options to /proc/mounts:
19 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
20 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
21 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/blkdev.h>
27 #include <linux/mount.h>
28 #include <linux/security.h>
29 #include <linux/writeback.h> /* for the emergency remount stuff */
30 #include <linux/idr.h>
31 #include <linux/mutex.h>
32 #include <linux/backing-dev.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/cleancache.h>
35 #include <linux/fsnotify.h>
36 #include <linux/lockdep.h>
37 #include <linux/user_namespace.h>
40 static int thaw_super_locked(struct super_block
*sb
);
42 static LIST_HEAD(super_blocks
);
43 static DEFINE_SPINLOCK(sb_lock
);
45 static char *sb_writers_name
[SB_FREEZE_LEVELS
] = {
52 * One thing we have to be careful of with a per-sb shrinker is that we don't
53 * drop the last active reference to the superblock from within the shrinker.
54 * If that happens we could trigger unregistering the shrinker from within the
55 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
56 * take a passive reference to the superblock to avoid this from occurring.
58 static unsigned long super_cache_scan(struct shrinker
*shrink
,
59 struct shrink_control
*sc
)
61 struct super_block
*sb
;
68 sb
= container_of(shrink
, struct super_block
, s_shrink
);
71 * Deadlock avoidance. We may hold various FS locks, and we don't want
72 * to recurse into the FS that called us in clear_inode() and friends..
74 if (!(sc
->gfp_mask
& __GFP_FS
))
77 if (!trylock_super(sb
))
80 if (sb
->s_op
->nr_cached_objects
)
81 fs_objects
= sb
->s_op
->nr_cached_objects(sb
, sc
);
83 inodes
= list_lru_shrink_count(&sb
->s_inode_lru
, sc
);
84 dentries
= list_lru_shrink_count(&sb
->s_dentry_lru
, sc
);
85 total_objects
= dentries
+ inodes
+ fs_objects
+ 1;
89 /* proportion the scan between the caches */
90 dentries
= mult_frac(sc
->nr_to_scan
, dentries
, total_objects
);
91 inodes
= mult_frac(sc
->nr_to_scan
, inodes
, total_objects
);
92 fs_objects
= mult_frac(sc
->nr_to_scan
, fs_objects
, total_objects
);
95 * prune the dcache first as the icache is pinned by it, then
96 * prune the icache, followed by the filesystem specific caches
98 * Ensure that we always scan at least one object - memcg kmem
99 * accounting uses this to fully empty the caches.
101 sc
->nr_to_scan
= dentries
+ 1;
102 freed
= prune_dcache_sb(sb
, sc
);
103 sc
->nr_to_scan
= inodes
+ 1;
104 freed
+= prune_icache_sb(sb
, sc
);
107 sc
->nr_to_scan
= fs_objects
+ 1;
108 freed
+= sb
->s_op
->free_cached_objects(sb
, sc
);
111 up_read(&sb
->s_umount
);
115 static unsigned long super_cache_count(struct shrinker
*shrink
,
116 struct shrink_control
*sc
)
118 struct super_block
*sb
;
119 long total_objects
= 0;
121 sb
= container_of(shrink
, struct super_block
, s_shrink
);
124 * We don't call trylock_super() here as it is a scalability bottleneck,
125 * so we're exposed to partial setup state. The shrinker rwsem does not
126 * protect filesystem operations backing list_lru_shrink_count() or
127 * s_op->nr_cached_objects(). Counts can change between
128 * super_cache_count and super_cache_scan, so we really don't need locks
131 * However, if we are currently mounting the superblock, the underlying
132 * filesystem might be in a state of partial construction and hence it
133 * is dangerous to access it. trylock_super() uses a SB_BORN check to
134 * avoid this situation, so do the same here. The memory barrier is
135 * matched with the one in mount_fs() as we don't hold locks here.
137 if (!(sb
->s_flags
& SB_BORN
))
141 if (sb
->s_op
&& sb
->s_op
->nr_cached_objects
)
142 total_objects
= sb
->s_op
->nr_cached_objects(sb
, sc
);
144 total_objects
+= list_lru_shrink_count(&sb
->s_dentry_lru
, sc
);
145 total_objects
+= list_lru_shrink_count(&sb
->s_inode_lru
, sc
);
147 total_objects
= vfs_pressure_ratio(total_objects
);
148 return total_objects
;
151 static void destroy_super_work(struct work_struct
*work
)
153 struct super_block
*s
= container_of(work
, struct super_block
,
157 for (i
= 0; i
< SB_FREEZE_LEVELS
; i
++)
158 percpu_free_rwsem(&s
->s_writers
.rw_sem
[i
]);
162 static void destroy_super_rcu(struct rcu_head
*head
)
164 struct super_block
*s
= container_of(head
, struct super_block
, rcu
);
165 INIT_WORK(&s
->destroy_work
, destroy_super_work
);
166 schedule_work(&s
->destroy_work
);
169 /* Free a superblock that has never been seen by anyone */
170 static void destroy_unused_super(struct super_block
*s
)
174 up_write(&s
->s_umount
);
175 list_lru_destroy(&s
->s_dentry_lru
);
176 list_lru_destroy(&s
->s_inode_lru
);
178 put_user_ns(s
->s_user_ns
);
180 free_prealloced_shrinker(&s
->s_shrink
);
181 /* no delays needed */
182 destroy_super_work(&s
->destroy_work
);
186 * alloc_super - create new superblock
187 * @type: filesystem type superblock should belong to
188 * @flags: the mount flags
189 * @user_ns: User namespace for the super_block
191 * Allocates and initializes a new &struct super_block. alloc_super()
192 * returns a pointer new superblock or %NULL if allocation had failed.
194 static struct super_block
*alloc_super(struct file_system_type
*type
, int flags
,
195 struct user_namespace
*user_ns
)
197 struct super_block
*s
= kzalloc(sizeof(struct super_block
), GFP_USER
);
198 static const struct super_operations default_op
;
204 INIT_LIST_HEAD(&s
->s_mounts
);
205 s
->s_user_ns
= get_user_ns(user_ns
);
206 init_rwsem(&s
->s_umount
);
207 lockdep_set_class(&s
->s_umount
, &type
->s_umount_key
);
209 * sget() can have s_umount recursion.
211 * When it cannot find a suitable sb, it allocates a new
212 * one (this one), and tries again to find a suitable old
215 * In case that succeeds, it will acquire the s_umount
216 * lock of the old one. Since these are clearly distrinct
217 * locks, and this object isn't exposed yet, there's no
220 * Annotate this by putting this lock in a different
223 down_write_nested(&s
->s_umount
, SINGLE_DEPTH_NESTING
);
225 if (security_sb_alloc(s
))
228 for (i
= 0; i
< SB_FREEZE_LEVELS
; i
++) {
229 if (__percpu_init_rwsem(&s
->s_writers
.rw_sem
[i
],
231 &type
->s_writers_key
[i
]))
234 init_waitqueue_head(&s
->s_writers
.wait_unfrozen
);
235 s
->s_bdi
= &noop_backing_dev_info
;
237 if (s
->s_user_ns
!= &init_user_ns
)
238 s
->s_iflags
|= SB_I_NODEV
;
239 INIT_HLIST_NODE(&s
->s_instances
);
240 INIT_HLIST_BL_HEAD(&s
->s_roots
);
241 mutex_init(&s
->s_sync_lock
);
242 INIT_LIST_HEAD(&s
->s_inodes
);
243 spin_lock_init(&s
->s_inode_list_lock
);
244 INIT_LIST_HEAD(&s
->s_inodes_wb
);
245 spin_lock_init(&s
->s_inode_wblist_lock
);
247 if (list_lru_init_memcg(&s
->s_dentry_lru
))
249 if (list_lru_init_memcg(&s
->s_inode_lru
))
252 atomic_set(&s
->s_active
, 1);
253 mutex_init(&s
->s_vfs_rename_mutex
);
254 lockdep_set_class(&s
->s_vfs_rename_mutex
, &type
->s_vfs_rename_key
);
255 init_rwsem(&s
->s_dquot
.dqio_sem
);
256 s
->s_maxbytes
= MAX_NON_LFS
;
257 s
->s_op
= &default_op
;
258 s
->s_time_gran
= 1000000000;
259 s
->cleancache_poolid
= CLEANCACHE_NO_POOL
;
261 s
->s_shrink
.seeks
= DEFAULT_SEEKS
;
262 s
->s_shrink
.scan_objects
= super_cache_scan
;
263 s
->s_shrink
.count_objects
= super_cache_count
;
264 s
->s_shrink
.batch
= 1024;
265 s
->s_shrink
.flags
= SHRINKER_NUMA_AWARE
| SHRINKER_MEMCG_AWARE
;
266 if (prealloc_shrinker(&s
->s_shrink
))
271 destroy_unused_super(s
);
275 /* Superblock refcounting */
278 * Drop a superblock's refcount. The caller must hold sb_lock.
280 static void __put_super(struct super_block
*s
)
283 list_del_init(&s
->s_list
);
284 WARN_ON(s
->s_dentry_lru
.node
);
285 WARN_ON(s
->s_inode_lru
.node
);
286 WARN_ON(!list_empty(&s
->s_mounts
));
288 put_user_ns(s
->s_user_ns
);
290 call_rcu(&s
->rcu
, destroy_super_rcu
);
295 * put_super - drop a temporary reference to superblock
296 * @sb: superblock in question
298 * Drops a temporary reference, frees superblock if there's no
301 static void put_super(struct super_block
*sb
)
305 spin_unlock(&sb_lock
);
310 * deactivate_locked_super - drop an active reference to superblock
311 * @s: superblock to deactivate
313 * Drops an active reference to superblock, converting it into a temporary
314 * one if there is no other active references left. In that case we
315 * tell fs driver to shut it down and drop the temporary reference we
318 * Caller holds exclusive lock on superblock; that lock is released.
320 void deactivate_locked_super(struct super_block
*s
)
322 struct file_system_type
*fs
= s
->s_type
;
323 if (atomic_dec_and_test(&s
->s_active
)) {
324 cleancache_invalidate_fs(s
);
325 unregister_shrinker(&s
->s_shrink
);
329 * Since list_lru_destroy() may sleep, we cannot call it from
330 * put_super(), where we hold the sb_lock. Therefore we destroy
331 * the lru lists right now.
333 list_lru_destroy(&s
->s_dentry_lru
);
334 list_lru_destroy(&s
->s_inode_lru
);
339 up_write(&s
->s_umount
);
343 EXPORT_SYMBOL(deactivate_locked_super
);
346 * deactivate_super - drop an active reference to superblock
347 * @s: superblock to deactivate
349 * Variant of deactivate_locked_super(), except that superblock is *not*
350 * locked by caller. If we are going to drop the final active reference,
351 * lock will be acquired prior to that.
353 void deactivate_super(struct super_block
*s
)
355 if (!atomic_add_unless(&s
->s_active
, -1, 1)) {
356 down_write(&s
->s_umount
);
357 deactivate_locked_super(s
);
361 EXPORT_SYMBOL(deactivate_super
);
364 * grab_super - acquire an active reference
365 * @s: reference we are trying to make active
367 * Tries to acquire an active reference. grab_super() is used when we
368 * had just found a superblock in super_blocks or fs_type->fs_supers
369 * and want to turn it into a full-blown active reference. grab_super()
370 * is called with sb_lock held and drops it. Returns 1 in case of
371 * success, 0 if we had failed (superblock contents was already dead or
372 * dying when grab_super() had been called). Note that this is only
373 * called for superblocks not in rundown mode (== ones still on ->fs_supers
374 * of their type), so increment of ->s_count is OK here.
376 static int grab_super(struct super_block
*s
) __releases(sb_lock
)
379 spin_unlock(&sb_lock
);
380 down_write(&s
->s_umount
);
381 if ((s
->s_flags
& SB_BORN
) && atomic_inc_not_zero(&s
->s_active
)) {
385 up_write(&s
->s_umount
);
391 * trylock_super - try to grab ->s_umount shared
392 * @sb: reference we are trying to grab
394 * Try to prevent fs shutdown. This is used in places where we
395 * cannot take an active reference but we need to ensure that the
396 * filesystem is not shut down while we are working on it. It returns
397 * false if we cannot acquire s_umount or if we lose the race and
398 * filesystem already got into shutdown, and returns true with the s_umount
399 * lock held in read mode in case of success. On successful return,
400 * the caller must drop the s_umount lock when done.
402 * Note that unlike get_super() et.al. this one does *not* bump ->s_count.
403 * The reason why it's safe is that we are OK with doing trylock instead
404 * of down_read(). There's a couple of places that are OK with that, but
405 * it's very much not a general-purpose interface.
407 bool trylock_super(struct super_block
*sb
)
409 if (down_read_trylock(&sb
->s_umount
)) {
410 if (!hlist_unhashed(&sb
->s_instances
) &&
411 sb
->s_root
&& (sb
->s_flags
& SB_BORN
))
413 up_read(&sb
->s_umount
);
420 * generic_shutdown_super - common helper for ->kill_sb()
421 * @sb: superblock to kill
423 * generic_shutdown_super() does all fs-independent work on superblock
424 * shutdown. Typical ->kill_sb() should pick all fs-specific objects
425 * that need destruction out of superblock, call generic_shutdown_super()
426 * and release aforementioned objects. Note: dentries and inodes _are_
427 * taken care of and do not need specific handling.
429 * Upon calling this function, the filesystem may no longer alter or
430 * rearrange the set of dentries belonging to this super_block, nor may it
431 * change the attachments of dentries to inodes.
433 void generic_shutdown_super(struct super_block
*sb
)
435 const struct super_operations
*sop
= sb
->s_op
;
438 shrink_dcache_for_umount(sb
);
440 sb
->s_flags
&= ~SB_ACTIVE
;
442 fsnotify_unmount_inodes(sb
);
443 cgroup_writeback_umount();
447 if (sb
->s_dio_done_wq
) {
448 destroy_workqueue(sb
->s_dio_done_wq
);
449 sb
->s_dio_done_wq
= NULL
;
455 if (!list_empty(&sb
->s_inodes
)) {
456 printk("VFS: Busy inodes after unmount of %s. "
457 "Self-destruct in 5 seconds. Have a nice day...\n",
462 /* should be initialized for __put_super_and_need_restart() */
463 hlist_del_init(&sb
->s_instances
);
464 spin_unlock(&sb_lock
);
465 up_write(&sb
->s_umount
);
466 if (sb
->s_bdi
!= &noop_backing_dev_info
) {
468 sb
->s_bdi
= &noop_backing_dev_info
;
472 EXPORT_SYMBOL(generic_shutdown_super
);
475 * sget_userns - find or create a superblock
476 * @type: filesystem type superblock should belong to
477 * @test: comparison callback
478 * @set: setup callback
479 * @flags: mount flags
480 * @user_ns: User namespace for the super_block
481 * @data: argument to each of them
483 struct super_block
*sget_userns(struct file_system_type
*type
,
484 int (*test
)(struct super_block
*,void *),
485 int (*set
)(struct super_block
*,void *),
486 int flags
, struct user_namespace
*user_ns
,
489 struct super_block
*s
= NULL
;
490 struct super_block
*old
;
493 if (!(flags
& (SB_KERNMOUNT
|SB_SUBMOUNT
)) &&
494 !(type
->fs_flags
& FS_USERNS_MOUNT
) &&
495 !capable(CAP_SYS_ADMIN
))
496 return ERR_PTR(-EPERM
);
500 hlist_for_each_entry(old
, &type
->fs_supers
, s_instances
) {
501 if (!test(old
, data
))
503 if (user_ns
!= old
->s_user_ns
) {
504 spin_unlock(&sb_lock
);
505 destroy_unused_super(s
);
506 return ERR_PTR(-EBUSY
);
508 if (!grab_super(old
))
510 destroy_unused_super(s
);
515 spin_unlock(&sb_lock
);
516 s
= alloc_super(type
, (flags
& ~SB_SUBMOUNT
), user_ns
);
518 return ERR_PTR(-ENOMEM
);
524 spin_unlock(&sb_lock
);
525 destroy_unused_super(s
);
529 strlcpy(s
->s_id
, type
->name
, sizeof(s
->s_id
));
530 list_add_tail(&s
->s_list
, &super_blocks
);
531 hlist_add_head(&s
->s_instances
, &type
->fs_supers
);
532 spin_unlock(&sb_lock
);
533 get_filesystem(type
);
534 register_shrinker_prepared(&s
->s_shrink
);
538 EXPORT_SYMBOL(sget_userns
);
541 * sget - find or create a superblock
542 * @type: filesystem type superblock should belong to
543 * @test: comparison callback
544 * @set: setup callback
545 * @flags: mount flags
546 * @data: argument to each of them
548 struct super_block
*sget(struct file_system_type
*type
,
549 int (*test
)(struct super_block
*,void *),
550 int (*set
)(struct super_block
*,void *),
554 struct user_namespace
*user_ns
= current_user_ns();
556 /* We don't yet pass the user namespace of the parent
557 * mount through to here so always use &init_user_ns
558 * until that changes.
560 if (flags
& SB_SUBMOUNT
)
561 user_ns
= &init_user_ns
;
563 /* Ensure the requestor has permissions over the target filesystem */
564 if (!(flags
& (SB_KERNMOUNT
|SB_SUBMOUNT
)) && !ns_capable(user_ns
, CAP_SYS_ADMIN
))
565 return ERR_PTR(-EPERM
);
567 return sget_userns(type
, test
, set
, flags
, user_ns
, data
);
572 void drop_super(struct super_block
*sb
)
574 up_read(&sb
->s_umount
);
578 EXPORT_SYMBOL(drop_super
);
580 void drop_super_exclusive(struct super_block
*sb
)
582 up_write(&sb
->s_umount
);
585 EXPORT_SYMBOL(drop_super_exclusive
);
587 static void __iterate_supers(void (*f
)(struct super_block
*))
589 struct super_block
*sb
, *p
= NULL
;
592 list_for_each_entry(sb
, &super_blocks
, s_list
) {
593 if (hlist_unhashed(&sb
->s_instances
))
596 spin_unlock(&sb_lock
);
607 spin_unlock(&sb_lock
);
610 * iterate_supers - call function for all active superblocks
611 * @f: function to call
612 * @arg: argument to pass to it
614 * Scans the superblock list and calls given function, passing it
615 * locked superblock and given argument.
617 void iterate_supers(void (*f
)(struct super_block
*, void *), void *arg
)
619 struct super_block
*sb
, *p
= NULL
;
622 list_for_each_entry(sb
, &super_blocks
, s_list
) {
623 if (hlist_unhashed(&sb
->s_instances
))
626 spin_unlock(&sb_lock
);
628 down_read(&sb
->s_umount
);
629 if (sb
->s_root
&& (sb
->s_flags
& SB_BORN
))
631 up_read(&sb
->s_umount
);
640 spin_unlock(&sb_lock
);
644 * iterate_supers_type - call function for superblocks of given type
646 * @f: function to call
647 * @arg: argument to pass to it
649 * Scans the superblock list and calls given function, passing it
650 * locked superblock and given argument.
652 void iterate_supers_type(struct file_system_type
*type
,
653 void (*f
)(struct super_block
*, void *), void *arg
)
655 struct super_block
*sb
, *p
= NULL
;
658 hlist_for_each_entry(sb
, &type
->fs_supers
, s_instances
) {
660 spin_unlock(&sb_lock
);
662 down_read(&sb
->s_umount
);
663 if (sb
->s_root
&& (sb
->s_flags
& SB_BORN
))
665 up_read(&sb
->s_umount
);
674 spin_unlock(&sb_lock
);
677 EXPORT_SYMBOL(iterate_supers_type
);
679 static struct super_block
*__get_super(struct block_device
*bdev
, bool excl
)
681 struct super_block
*sb
;
688 list_for_each_entry(sb
, &super_blocks
, s_list
) {
689 if (hlist_unhashed(&sb
->s_instances
))
691 if (sb
->s_bdev
== bdev
) {
693 spin_unlock(&sb_lock
);
695 down_read(&sb
->s_umount
);
697 down_write(&sb
->s_umount
);
699 if (sb
->s_root
&& (sb
->s_flags
& SB_BORN
))
702 up_read(&sb
->s_umount
);
704 up_write(&sb
->s_umount
);
705 /* nope, got unmounted */
711 spin_unlock(&sb_lock
);
716 * get_super - get the superblock of a device
717 * @bdev: device to get the superblock for
719 * Scans the superblock list and finds the superblock of the file system
720 * mounted on the device given. %NULL is returned if no match is found.
722 struct super_block
*get_super(struct block_device
*bdev
)
724 return __get_super(bdev
, false);
726 EXPORT_SYMBOL(get_super
);
728 static struct super_block
*__get_super_thawed(struct block_device
*bdev
,
732 struct super_block
*s
= __get_super(bdev
, excl
);
733 if (!s
|| s
->s_writers
.frozen
== SB_UNFROZEN
)
736 up_read(&s
->s_umount
);
738 up_write(&s
->s_umount
);
739 wait_event(s
->s_writers
.wait_unfrozen
,
740 s
->s_writers
.frozen
== SB_UNFROZEN
);
746 * get_super_thawed - get thawed superblock of a device
747 * @bdev: device to get the superblock for
749 * Scans the superblock list and finds the superblock of the file system
750 * mounted on the device. The superblock is returned once it is thawed
751 * (or immediately if it was not frozen). %NULL is returned if no match
754 struct super_block
*get_super_thawed(struct block_device
*bdev
)
756 return __get_super_thawed(bdev
, false);
758 EXPORT_SYMBOL(get_super_thawed
);
761 * get_super_exclusive_thawed - get thawed superblock of a device
762 * @bdev: device to get the superblock for
764 * Scans the superblock list and finds the superblock of the file system
765 * mounted on the device. The superblock is returned once it is thawed
766 * (or immediately if it was not frozen) and s_umount semaphore is held
767 * in exclusive mode. %NULL is returned if no match is found.
769 struct super_block
*get_super_exclusive_thawed(struct block_device
*bdev
)
771 return __get_super_thawed(bdev
, true);
773 EXPORT_SYMBOL(get_super_exclusive_thawed
);
776 * get_active_super - get an active reference to the superblock of a device
777 * @bdev: device to get the superblock for
779 * Scans the superblock list and finds the superblock of the file system
780 * mounted on the device given. Returns the superblock with an active
781 * reference or %NULL if none was found.
783 struct super_block
*get_active_super(struct block_device
*bdev
)
785 struct super_block
*sb
;
792 list_for_each_entry(sb
, &super_blocks
, s_list
) {
793 if (hlist_unhashed(&sb
->s_instances
))
795 if (sb
->s_bdev
== bdev
) {
798 up_write(&sb
->s_umount
);
802 spin_unlock(&sb_lock
);
806 struct super_block
*user_get_super(dev_t dev
)
808 struct super_block
*sb
;
812 list_for_each_entry(sb
, &super_blocks
, s_list
) {
813 if (hlist_unhashed(&sb
->s_instances
))
815 if (sb
->s_dev
== dev
) {
817 spin_unlock(&sb_lock
);
818 down_read(&sb
->s_umount
);
820 if (sb
->s_root
&& (sb
->s_flags
& SB_BORN
))
822 up_read(&sb
->s_umount
);
823 /* nope, got unmounted */
829 spin_unlock(&sb_lock
);
834 * do_remount_sb - asks filesystem to change mount options.
835 * @sb: superblock in question
836 * @sb_flags: revised superblock flags
837 * @data: the rest of options
838 * @force: whether or not to force the change
840 * Alters the mount options of a mounted file system.
842 int do_remount_sb(struct super_block
*sb
, int sb_flags
, void *data
, int force
)
847 if (sb
->s_writers
.frozen
!= SB_UNFROZEN
)
851 if (!(sb_flags
& SB_RDONLY
) && bdev_read_only(sb
->s_bdev
))
855 remount_ro
= (sb_flags
& SB_RDONLY
) && !sb_rdonly(sb
);
858 if (!hlist_empty(&sb
->s_pins
)) {
859 up_write(&sb
->s_umount
);
860 group_pin_kill(&sb
->s_pins
);
861 down_write(&sb
->s_umount
);
864 if (sb
->s_writers
.frozen
!= SB_UNFROZEN
)
866 remount_ro
= (sb_flags
& SB_RDONLY
) && !sb_rdonly(sb
);
869 shrink_dcache_sb(sb
);
871 /* If we are remounting RDONLY and current sb is read/write,
872 make sure there are no rw files opened */
875 sb
->s_readonly_remount
= 1;
878 retval
= sb_prepare_remount_readonly(sb
);
884 if (sb
->s_op
->remount_fs
) {
885 retval
= sb
->s_op
->remount_fs(sb
, &sb_flags
, data
);
888 goto cancel_readonly
;
889 /* If forced remount, go ahead despite any errors */
890 WARN(1, "forced remount of a %s fs returned %i\n",
891 sb
->s_type
->name
, retval
);
894 sb
->s_flags
= (sb
->s_flags
& ~MS_RMT_MASK
) | (sb_flags
& MS_RMT_MASK
);
895 /* Needs to be ordered wrt mnt_is_readonly() */
897 sb
->s_readonly_remount
= 0;
900 * Some filesystems modify their metadata via some other path than the
901 * bdev buffer cache (eg. use a private mapping, or directories in
902 * pagecache, etc). Also file data modifications go via their own
903 * mappings. So If we try to mount readonly then copy the filesystem
904 * from bdev, we could get stale data, so invalidate it to give a best
905 * effort at coherency.
907 if (remount_ro
&& sb
->s_bdev
)
908 invalidate_bdev(sb
->s_bdev
);
912 sb
->s_readonly_remount
= 0;
916 static void do_emergency_remount_callback(struct super_block
*sb
)
918 down_write(&sb
->s_umount
);
919 if (sb
->s_root
&& sb
->s_bdev
&& (sb
->s_flags
& SB_BORN
) &&
922 * What lock protects sb->s_flags??
924 do_remount_sb(sb
, SB_RDONLY
, NULL
, 1);
926 up_write(&sb
->s_umount
);
929 static void do_emergency_remount(struct work_struct
*work
)
931 __iterate_supers(do_emergency_remount_callback
);
933 printk("Emergency Remount complete\n");
936 void emergency_remount(void)
938 struct work_struct
*work
;
940 work
= kmalloc(sizeof(*work
), GFP_ATOMIC
);
942 INIT_WORK(work
, do_emergency_remount
);
947 static void do_thaw_all_callback(struct super_block
*sb
)
949 down_write(&sb
->s_umount
);
950 if (sb
->s_root
&& sb
->s_flags
& SB_BORN
) {
951 emergency_thaw_bdev(sb
);
952 thaw_super_locked(sb
);
954 up_write(&sb
->s_umount
);
958 static void do_thaw_all(struct work_struct
*work
)
960 __iterate_supers(do_thaw_all_callback
);
962 printk(KERN_WARNING
"Emergency Thaw complete\n");
966 * emergency_thaw_all -- forcibly thaw every frozen filesystem
968 * Used for emergency unfreeze of all filesystems via SysRq
970 void emergency_thaw_all(void)
972 struct work_struct
*work
;
974 work
= kmalloc(sizeof(*work
), GFP_ATOMIC
);
976 INIT_WORK(work
, do_thaw_all
);
982 * Unnamed block devices are dummy devices used by virtual
983 * filesystems which don't use real block-devices. -- jrs
986 static DEFINE_IDA(unnamed_dev_ida
);
987 static DEFINE_SPINLOCK(unnamed_dev_lock
);/* protects the above */
988 /* Many userspace utilities consider an FSID of 0 invalid.
989 * Always return at least 1 from get_anon_bdev.
991 static int unnamed_dev_start
= 1;
993 int get_anon_bdev(dev_t
*p
)
999 if (ida_pre_get(&unnamed_dev_ida
, GFP_ATOMIC
) == 0)
1001 spin_lock(&unnamed_dev_lock
);
1002 error
= ida_get_new_above(&unnamed_dev_ida
, unnamed_dev_start
, &dev
);
1004 unnamed_dev_start
= dev
+ 1;
1005 spin_unlock(&unnamed_dev_lock
);
1006 if (error
== -EAGAIN
)
1007 /* We raced and lost with another CPU. */
1012 if (dev
>= (1 << MINORBITS
)) {
1013 spin_lock(&unnamed_dev_lock
);
1014 ida_remove(&unnamed_dev_ida
, dev
);
1015 if (unnamed_dev_start
> dev
)
1016 unnamed_dev_start
= dev
;
1017 spin_unlock(&unnamed_dev_lock
);
1020 *p
= MKDEV(0, dev
& MINORMASK
);
1023 EXPORT_SYMBOL(get_anon_bdev
);
1025 void free_anon_bdev(dev_t dev
)
1027 int slot
= MINOR(dev
);
1028 spin_lock(&unnamed_dev_lock
);
1029 ida_remove(&unnamed_dev_ida
, slot
);
1030 if (slot
< unnamed_dev_start
)
1031 unnamed_dev_start
= slot
;
1032 spin_unlock(&unnamed_dev_lock
);
1034 EXPORT_SYMBOL(free_anon_bdev
);
1036 int set_anon_super(struct super_block
*s
, void *data
)
1038 return get_anon_bdev(&s
->s_dev
);
1041 EXPORT_SYMBOL(set_anon_super
);
1043 void kill_anon_super(struct super_block
*sb
)
1045 dev_t dev
= sb
->s_dev
;
1046 generic_shutdown_super(sb
);
1047 free_anon_bdev(dev
);
1050 EXPORT_SYMBOL(kill_anon_super
);
1052 void kill_litter_super(struct super_block
*sb
)
1055 d_genocide(sb
->s_root
);
1056 kill_anon_super(sb
);
1059 EXPORT_SYMBOL(kill_litter_super
);
1061 static int ns_test_super(struct super_block
*sb
, void *data
)
1063 return sb
->s_fs_info
== data
;
1066 static int ns_set_super(struct super_block
*sb
, void *data
)
1068 sb
->s_fs_info
= data
;
1069 return set_anon_super(sb
, NULL
);
1072 struct dentry
*mount_ns(struct file_system_type
*fs_type
,
1073 int flags
, void *data
, void *ns
, struct user_namespace
*user_ns
,
1074 int (*fill_super
)(struct super_block
*, void *, int))
1076 struct super_block
*sb
;
1078 /* Don't allow mounting unless the caller has CAP_SYS_ADMIN
1079 * over the namespace.
1081 if (!(flags
& SB_KERNMOUNT
) && !ns_capable(user_ns
, CAP_SYS_ADMIN
))
1082 return ERR_PTR(-EPERM
);
1084 sb
= sget_userns(fs_type
, ns_test_super
, ns_set_super
, flags
,
1087 return ERR_CAST(sb
);
1091 err
= fill_super(sb
, data
, flags
& SB_SILENT
? 1 : 0);
1093 deactivate_locked_super(sb
);
1094 return ERR_PTR(err
);
1097 sb
->s_flags
|= SB_ACTIVE
;
1100 return dget(sb
->s_root
);
1103 EXPORT_SYMBOL(mount_ns
);
1106 static int set_bdev_super(struct super_block
*s
, void *data
)
1109 s
->s_dev
= s
->s_bdev
->bd_dev
;
1110 s
->s_bdi
= bdi_get(s
->s_bdev
->bd_bdi
);
1115 static int test_bdev_super(struct super_block
*s
, void *data
)
1117 return (void *)s
->s_bdev
== data
;
1120 struct dentry
*mount_bdev(struct file_system_type
*fs_type
,
1121 int flags
, const char *dev_name
, void *data
,
1122 int (*fill_super
)(struct super_block
*, void *, int))
1124 struct block_device
*bdev
;
1125 struct super_block
*s
;
1126 fmode_t mode
= FMODE_READ
| FMODE_EXCL
;
1129 if (!(flags
& SB_RDONLY
))
1130 mode
|= FMODE_WRITE
;
1132 bdev
= blkdev_get_by_path(dev_name
, mode
, fs_type
);
1134 return ERR_CAST(bdev
);
1137 * once the super is inserted into the list by sget, s_umount
1138 * will protect the lockfs code from trying to start a snapshot
1139 * while we are mounting
1141 mutex_lock(&bdev
->bd_fsfreeze_mutex
);
1142 if (bdev
->bd_fsfreeze_count
> 0) {
1143 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
1147 s
= sget(fs_type
, test_bdev_super
, set_bdev_super
, flags
| SB_NOSEC
,
1149 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
1154 if ((flags
^ s
->s_flags
) & SB_RDONLY
) {
1155 deactivate_locked_super(s
);
1161 * s_umount nests inside bd_mutex during
1162 * __invalidate_device(). blkdev_put() acquires
1163 * bd_mutex and can't be called under s_umount. Drop
1164 * s_umount temporarily. This is safe as we're
1165 * holding an active reference.
1167 up_write(&s
->s_umount
);
1168 blkdev_put(bdev
, mode
);
1169 down_write(&s
->s_umount
);
1172 snprintf(s
->s_id
, sizeof(s
->s_id
), "%pg", bdev
);
1173 sb_set_blocksize(s
, block_size(bdev
));
1174 error
= fill_super(s
, data
, flags
& SB_SILENT
? 1 : 0);
1176 deactivate_locked_super(s
);
1180 s
->s_flags
|= SB_ACTIVE
;
1184 return dget(s
->s_root
);
1189 blkdev_put(bdev
, mode
);
1191 return ERR_PTR(error
);
1193 EXPORT_SYMBOL(mount_bdev
);
1195 void kill_block_super(struct super_block
*sb
)
1197 struct block_device
*bdev
= sb
->s_bdev
;
1198 fmode_t mode
= sb
->s_mode
;
1200 bdev
->bd_super
= NULL
;
1201 generic_shutdown_super(sb
);
1202 sync_blockdev(bdev
);
1203 WARN_ON_ONCE(!(mode
& FMODE_EXCL
));
1204 blkdev_put(bdev
, mode
| FMODE_EXCL
);
1207 EXPORT_SYMBOL(kill_block_super
);
1210 struct dentry
*mount_nodev(struct file_system_type
*fs_type
,
1211 int flags
, void *data
,
1212 int (*fill_super
)(struct super_block
*, void *, int))
1215 struct super_block
*s
= sget(fs_type
, NULL
, set_anon_super
, flags
, NULL
);
1220 error
= fill_super(s
, data
, flags
& SB_SILENT
? 1 : 0);
1222 deactivate_locked_super(s
);
1223 return ERR_PTR(error
);
1225 s
->s_flags
|= SB_ACTIVE
;
1226 return dget(s
->s_root
);
1228 EXPORT_SYMBOL(mount_nodev
);
1230 static int compare_single(struct super_block
*s
, void *p
)
1235 struct dentry
*mount_single(struct file_system_type
*fs_type
,
1236 int flags
, void *data
,
1237 int (*fill_super
)(struct super_block
*, void *, int))
1239 struct super_block
*s
;
1242 s
= sget(fs_type
, compare_single
, set_anon_super
, flags
, NULL
);
1246 error
= fill_super(s
, data
, flags
& SB_SILENT
? 1 : 0);
1248 deactivate_locked_super(s
);
1249 return ERR_PTR(error
);
1251 s
->s_flags
|= SB_ACTIVE
;
1253 do_remount_sb(s
, flags
, data
, 0);
1255 return dget(s
->s_root
);
1257 EXPORT_SYMBOL(mount_single
);
1260 mount_fs(struct file_system_type
*type
, int flags
, const char *name
, void *data
)
1262 struct dentry
*root
;
1263 struct super_block
*sb
;
1264 char *secdata
= NULL
;
1265 int error
= -ENOMEM
;
1267 if (data
&& !(type
->fs_flags
& FS_BINARY_MOUNTDATA
)) {
1268 secdata
= alloc_secdata();
1272 error
= security_sb_copy_data(data
, secdata
);
1274 goto out_free_secdata
;
1277 root
= type
->mount(type
, flags
, name
, data
);
1279 error
= PTR_ERR(root
);
1280 goto out_free_secdata
;
1284 WARN_ON(!sb
->s_bdi
);
1287 * Write barrier is for super_cache_count(). We place it before setting
1288 * SB_BORN as the data dependency between the two functions is the
1289 * superblock structure contents that we just set up, not the SB_BORN
1293 sb
->s_flags
|= SB_BORN
;
1295 error
= security_sb_kern_mount(sb
, flags
, secdata
);
1300 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1301 * but s_maxbytes was an unsigned long long for many releases. Throw
1302 * this warning for a little while to try and catch filesystems that
1303 * violate this rule.
1305 WARN((sb
->s_maxbytes
< 0), "%s set sb->s_maxbytes to "
1306 "negative value (%lld)\n", type
->name
, sb
->s_maxbytes
);
1308 up_write(&sb
->s_umount
);
1309 free_secdata(secdata
);
1313 deactivate_locked_super(sb
);
1315 free_secdata(secdata
);
1317 return ERR_PTR(error
);
1321 * Setup private BDI for given superblock. It gets automatically cleaned up
1322 * in generic_shutdown_super().
1324 int super_setup_bdi_name(struct super_block
*sb
, char *fmt
, ...)
1326 struct backing_dev_info
*bdi
;
1330 bdi
= bdi_alloc(GFP_KERNEL
);
1334 bdi
->name
= sb
->s_type
->name
;
1336 va_start(args
, fmt
);
1337 err
= bdi_register_va(bdi
, fmt
, args
);
1343 WARN_ON(sb
->s_bdi
!= &noop_backing_dev_info
);
1348 EXPORT_SYMBOL(super_setup_bdi_name
);
1351 * Setup private BDI for given superblock. I gets automatically cleaned up
1352 * in generic_shutdown_super().
1354 int super_setup_bdi(struct super_block
*sb
)
1356 static atomic_long_t bdi_seq
= ATOMIC_LONG_INIT(0);
1358 return super_setup_bdi_name(sb
, "%.28s-%ld", sb
->s_type
->name
,
1359 atomic_long_inc_return(&bdi_seq
));
1361 EXPORT_SYMBOL(super_setup_bdi
);
1364 * This is an internal function, please use sb_end_{write,pagefault,intwrite}
1367 void __sb_end_write(struct super_block
*sb
, int level
)
1369 percpu_up_read(sb
->s_writers
.rw_sem
+ level
-1);
1371 EXPORT_SYMBOL(__sb_end_write
);
1374 * This is an internal function, please use sb_start_{write,pagefault,intwrite}
1377 int __sb_start_write(struct super_block
*sb
, int level
, bool wait
)
1379 bool force_trylock
= false;
1382 #ifdef CONFIG_LOCKDEP
1384 * We want lockdep to tell us about possible deadlocks with freezing
1385 * but it's it bit tricky to properly instrument it. Getting a freeze
1386 * protection works as getting a read lock but there are subtle
1387 * problems. XFS for example gets freeze protection on internal level
1388 * twice in some cases, which is OK only because we already hold a
1389 * freeze protection also on higher level. Due to these cases we have
1390 * to use wait == F (trylock mode) which must not fail.
1395 for (i
= 0; i
< level
- 1; i
++)
1396 if (percpu_rwsem_is_held(sb
->s_writers
.rw_sem
+ i
)) {
1397 force_trylock
= true;
1402 if (wait
&& !force_trylock
)
1403 percpu_down_read(sb
->s_writers
.rw_sem
+ level
-1);
1405 ret
= percpu_down_read_trylock(sb
->s_writers
.rw_sem
+ level
-1);
1407 WARN_ON(force_trylock
&& !ret
);
1410 EXPORT_SYMBOL(__sb_start_write
);
1413 * sb_wait_write - wait until all writers to given file system finish
1414 * @sb: the super for which we wait
1415 * @level: type of writers we wait for (normal vs page fault)
1417 * This function waits until there are no writers of given type to given file
1420 static void sb_wait_write(struct super_block
*sb
, int level
)
1422 percpu_down_write(sb
->s_writers
.rw_sem
+ level
-1);
1426 * We are going to return to userspace and forget about these locks, the
1427 * ownership goes to the caller of thaw_super() which does unlock().
1429 static void lockdep_sb_freeze_release(struct super_block
*sb
)
1433 for (level
= SB_FREEZE_LEVELS
- 1; level
>= 0; level
--)
1434 percpu_rwsem_release(sb
->s_writers
.rw_sem
+ level
, 0, _THIS_IP_
);
1438 * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
1440 static void lockdep_sb_freeze_acquire(struct super_block
*sb
)
1444 for (level
= 0; level
< SB_FREEZE_LEVELS
; ++level
)
1445 percpu_rwsem_acquire(sb
->s_writers
.rw_sem
+ level
, 0, _THIS_IP_
);
1448 static void sb_freeze_unlock(struct super_block
*sb
)
1452 for (level
= SB_FREEZE_LEVELS
- 1; level
>= 0; level
--)
1453 percpu_up_write(sb
->s_writers
.rw_sem
+ level
);
1457 * freeze_super - lock the filesystem and force it into a consistent state
1458 * @sb: the super to lock
1460 * Syncs the super to make sure the filesystem is consistent and calls the fs's
1461 * freeze_fs. Subsequent calls to this without first thawing the fs will return
1464 * During this function, sb->s_writers.frozen goes through these values:
1466 * SB_UNFROZEN: File system is normal, all writes progress as usual.
1468 * SB_FREEZE_WRITE: The file system is in the process of being frozen. New
1469 * writes should be blocked, though page faults are still allowed. We wait for
1470 * all writes to complete and then proceed to the next stage.
1472 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
1473 * but internal fs threads can still modify the filesystem (although they
1474 * should not dirty new pages or inodes), writeback can run etc. After waiting
1475 * for all running page faults we sync the filesystem which will clean all
1476 * dirty pages and inodes (no new dirty pages or inodes can be created when
1479 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
1480 * modification are blocked (e.g. XFS preallocation truncation on inode
1481 * reclaim). This is usually implemented by blocking new transactions for
1482 * filesystems that have them and need this additional guard. After all
1483 * internal writers are finished we call ->freeze_fs() to finish filesystem
1484 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
1485 * mostly auxiliary for filesystems to verify they do not modify frozen fs.
1487 * sb->s_writers.frozen is protected by sb->s_umount.
1489 int freeze_super(struct super_block
*sb
)
1493 atomic_inc(&sb
->s_active
);
1494 down_write(&sb
->s_umount
);
1495 if (sb
->s_writers
.frozen
!= SB_UNFROZEN
) {
1496 deactivate_locked_super(sb
);
1500 if (!(sb
->s_flags
& SB_BORN
)) {
1501 up_write(&sb
->s_umount
);
1502 return 0; /* sic - it's "nothing to do" */
1505 if (sb_rdonly(sb
)) {
1506 /* Nothing to do really... */
1507 sb
->s_writers
.frozen
= SB_FREEZE_COMPLETE
;
1508 up_write(&sb
->s_umount
);
1512 sb
->s_writers
.frozen
= SB_FREEZE_WRITE
;
1513 /* Release s_umount to preserve sb_start_write -> s_umount ordering */
1514 up_write(&sb
->s_umount
);
1515 sb_wait_write(sb
, SB_FREEZE_WRITE
);
1516 down_write(&sb
->s_umount
);
1518 /* Now we go and block page faults... */
1519 sb
->s_writers
.frozen
= SB_FREEZE_PAGEFAULT
;
1520 sb_wait_write(sb
, SB_FREEZE_PAGEFAULT
);
1522 /* All writers are done so after syncing there won't be dirty data */
1523 sync_filesystem(sb
);
1525 /* Now wait for internal filesystem counter */
1526 sb
->s_writers
.frozen
= SB_FREEZE_FS
;
1527 sb_wait_write(sb
, SB_FREEZE_FS
);
1529 if (sb
->s_op
->freeze_fs
) {
1530 ret
= sb
->s_op
->freeze_fs(sb
);
1533 "VFS:Filesystem freeze failed\n");
1534 sb
->s_writers
.frozen
= SB_UNFROZEN
;
1535 sb_freeze_unlock(sb
);
1536 wake_up(&sb
->s_writers
.wait_unfrozen
);
1537 deactivate_locked_super(sb
);
1542 * For debugging purposes so that fs can warn if it sees write activity
1543 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
1545 sb
->s_writers
.frozen
= SB_FREEZE_COMPLETE
;
1546 lockdep_sb_freeze_release(sb
);
1547 up_write(&sb
->s_umount
);
1550 EXPORT_SYMBOL(freeze_super
);
1553 * thaw_super -- unlock filesystem
1554 * @sb: the super to thaw
1556 * Unlocks the filesystem and marks it writeable again after freeze_super().
1558 static int thaw_super_locked(struct super_block
*sb
)
1562 if (sb
->s_writers
.frozen
!= SB_FREEZE_COMPLETE
) {
1563 up_write(&sb
->s_umount
);
1567 if (sb_rdonly(sb
)) {
1568 sb
->s_writers
.frozen
= SB_UNFROZEN
;
1572 lockdep_sb_freeze_acquire(sb
);
1574 if (sb
->s_op
->unfreeze_fs
) {
1575 error
= sb
->s_op
->unfreeze_fs(sb
);
1578 "VFS:Filesystem thaw failed\n");
1579 lockdep_sb_freeze_release(sb
);
1580 up_write(&sb
->s_umount
);
1585 sb
->s_writers
.frozen
= SB_UNFROZEN
;
1586 sb_freeze_unlock(sb
);
1588 wake_up(&sb
->s_writers
.wait_unfrozen
);
1589 deactivate_locked_super(sb
);
1593 int thaw_super(struct super_block
*sb
)
1595 down_write(&sb
->s_umount
);
1596 return thaw_super_locked(sb
);
1598 EXPORT_SYMBOL(thaw_super
);