1 // SPDX-License-Identifier: GPL-2.0-only
5 * (C) Copyright Al Viro 2000, 2001
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/cred.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* init_rootfs */
21 #include <linux/fs_struct.h> /* get_fs_root et.al. */
22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
23 #include <linux/file.h>
24 #include <linux/uaccess.h>
25 #include <linux/proc_ns.h>
26 #include <linux/magic.h>
27 #include <linux/memblock.h>
28 #include <linux/proc_fs.h>
29 #include <linux/task_work.h>
30 #include <linux/sched/task.h>
31 #include <uapi/linux/mount.h>
32 #include <linux/fs_context.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/mnt_idmapping.h>
35 #include <linux/nospec.h>
40 /* Maximum number of mounts in a mount namespace */
41 static unsigned int sysctl_mount_max __read_mostly
= 100000;
43 static unsigned int m_hash_mask __ro_after_init
;
44 static unsigned int m_hash_shift __ro_after_init
;
45 static unsigned int mp_hash_mask __ro_after_init
;
46 static unsigned int mp_hash_shift __ro_after_init
;
48 static __initdata
unsigned long mhash_entries
;
49 static int __init
set_mhash_entries(char *str
)
53 mhash_entries
= simple_strtoul(str
, &str
, 0);
56 __setup("mhash_entries=", set_mhash_entries
);
58 static __initdata
unsigned long mphash_entries
;
59 static int __init
set_mphash_entries(char *str
)
63 mphash_entries
= simple_strtoul(str
, &str
, 0);
66 __setup("mphash_entries=", set_mphash_entries
);
69 static DEFINE_IDA(mnt_id_ida
);
70 static DEFINE_IDA(mnt_group_ida
);
72 /* Don't allow confusion with old 32bit mount ID */
73 #define MNT_UNIQUE_ID_OFFSET (1ULL << 31)
74 static atomic64_t mnt_id_ctr
= ATOMIC64_INIT(MNT_UNIQUE_ID_OFFSET
);
76 static struct hlist_head
*mount_hashtable __ro_after_init
;
77 static struct hlist_head
*mountpoint_hashtable __ro_after_init
;
78 static struct kmem_cache
*mnt_cache __ro_after_init
;
79 static DECLARE_RWSEM(namespace_sem
);
80 static HLIST_HEAD(unmounted
); /* protected by namespace_sem */
81 static LIST_HEAD(ex_mountpoints
); /* protected by namespace_sem */
82 static DEFINE_RWLOCK(mnt_ns_tree_lock
);
83 static struct rb_root mnt_ns_tree
= RB_ROOT
; /* protected by mnt_ns_tree_lock */
86 unsigned int attr_set
;
87 unsigned int attr_clr
;
88 unsigned int propagation
;
89 unsigned int lookup_flags
;
91 struct user_namespace
*mnt_userns
;
92 struct mnt_idmap
*mnt_idmap
;
96 struct kobject
*fs_kobj __ro_after_init
;
97 EXPORT_SYMBOL_GPL(fs_kobj
);
100 * vfsmount lock may be taken for read to prevent changes to the
101 * vfsmount hash, ie. during mountpoint lookups or walking back
104 * It should be taken for write in all cases where the vfsmount
105 * tree or hash is modified or when a vfsmount structure is modified.
107 __cacheline_aligned_in_smp
DEFINE_SEQLOCK(mount_lock
);
109 static int mnt_ns_cmp(u64 seq
, const struct mnt_namespace
*ns
)
120 static inline struct mnt_namespace
*node_to_mnt_ns(const struct rb_node
*node
)
124 return rb_entry(node
, struct mnt_namespace
, mnt_ns_tree_node
);
127 static bool mnt_ns_less(struct rb_node
*a
, const struct rb_node
*b
)
129 struct mnt_namespace
*ns_a
= node_to_mnt_ns(a
);
130 struct mnt_namespace
*ns_b
= node_to_mnt_ns(b
);
131 u64 seq_a
= ns_a
->seq
;
133 return mnt_ns_cmp(seq_a
, ns_b
) < 0;
136 static void mnt_ns_tree_add(struct mnt_namespace
*ns
)
138 guard(write_lock
)(&mnt_ns_tree_lock
);
139 rb_add(&ns
->mnt_ns_tree_node
, &mnt_ns_tree
, mnt_ns_less
);
142 static void mnt_ns_release(struct mnt_namespace
*ns
)
144 lockdep_assert_not_held(&mnt_ns_tree_lock
);
146 /* keep alive for {list,stat}mount() */
147 if (refcount_dec_and_test(&ns
->passive
)) {
148 put_user_ns(ns
->user_ns
);
152 DEFINE_FREE(mnt_ns_release
, struct mnt_namespace
*, if (_T
) mnt_ns_release(_T
))
154 static void mnt_ns_tree_remove(struct mnt_namespace
*ns
)
156 /* remove from global mount namespace list */
157 if (!is_anon_ns(ns
)) {
158 guard(write_lock
)(&mnt_ns_tree_lock
);
159 rb_erase(&ns
->mnt_ns_tree_node
, &mnt_ns_tree
);
166 * Returns the mount namespace which either has the specified id, or has the
167 * next smallest id afer the specified one.
169 static struct mnt_namespace
*mnt_ns_find_id_at(u64 mnt_ns_id
)
171 struct rb_node
*node
= mnt_ns_tree
.rb_node
;
172 struct mnt_namespace
*ret
= NULL
;
174 lockdep_assert_held(&mnt_ns_tree_lock
);
177 struct mnt_namespace
*n
= node_to_mnt_ns(node
);
179 if (mnt_ns_id
<= n
->seq
) {
180 ret
= node_to_mnt_ns(node
);
181 if (mnt_ns_id
== n
->seq
)
183 node
= node
->rb_left
;
185 node
= node
->rb_right
;
192 * Lookup a mount namespace by id and take a passive reference count. Taking a
193 * passive reference means the mount namespace can be emptied if e.g., the last
194 * task holding an active reference exits. To access the mounts of the
195 * namespace the @namespace_sem must first be acquired. If the namespace has
196 * already shut down before acquiring @namespace_sem, {list,stat}mount() will
197 * see that the mount rbtree of the namespace is empty.
199 static struct mnt_namespace
*lookup_mnt_ns(u64 mnt_ns_id
)
201 struct mnt_namespace
*ns
;
203 guard(read_lock
)(&mnt_ns_tree_lock
);
204 ns
= mnt_ns_find_id_at(mnt_ns_id
);
205 if (!ns
|| ns
->seq
!= mnt_ns_id
)
208 refcount_inc(&ns
->passive
);
212 static inline void lock_mount_hash(void)
214 write_seqlock(&mount_lock
);
217 static inline void unlock_mount_hash(void)
219 write_sequnlock(&mount_lock
);
222 static inline struct hlist_head
*m_hash(struct vfsmount
*mnt
, struct dentry
*dentry
)
224 unsigned long tmp
= ((unsigned long)mnt
/ L1_CACHE_BYTES
);
225 tmp
+= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
226 tmp
= tmp
+ (tmp
>> m_hash_shift
);
227 return &mount_hashtable
[tmp
& m_hash_mask
];
230 static inline struct hlist_head
*mp_hash(struct dentry
*dentry
)
232 unsigned long tmp
= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
233 tmp
= tmp
+ (tmp
>> mp_hash_shift
);
234 return &mountpoint_hashtable
[tmp
& mp_hash_mask
];
237 static int mnt_alloc_id(struct mount
*mnt
)
239 int res
= ida_alloc(&mnt_id_ida
, GFP_KERNEL
);
244 mnt
->mnt_id_unique
= atomic64_inc_return(&mnt_id_ctr
);
248 static void mnt_free_id(struct mount
*mnt
)
250 ida_free(&mnt_id_ida
, mnt
->mnt_id
);
254 * Allocate a new peer group ID
256 static int mnt_alloc_group_id(struct mount
*mnt
)
258 int res
= ida_alloc_min(&mnt_group_ida
, 1, GFP_KERNEL
);
262 mnt
->mnt_group_id
= res
;
267 * Release a peer group ID
269 void mnt_release_group_id(struct mount
*mnt
)
271 ida_free(&mnt_group_ida
, mnt
->mnt_group_id
);
272 mnt
->mnt_group_id
= 0;
276 * vfsmount lock must be held for read
278 static inline void mnt_add_count(struct mount
*mnt
, int n
)
281 this_cpu_add(mnt
->mnt_pcp
->mnt_count
, n
);
290 * vfsmount lock must be held for write
292 int mnt_get_count(struct mount
*mnt
)
298 for_each_possible_cpu(cpu
) {
299 count
+= per_cpu_ptr(mnt
->mnt_pcp
, cpu
)->mnt_count
;
304 return mnt
->mnt_count
;
308 static struct mount
*alloc_vfsmnt(const char *name
)
310 struct mount
*mnt
= kmem_cache_zalloc(mnt_cache
, GFP_KERNEL
);
314 err
= mnt_alloc_id(mnt
);
319 mnt
->mnt_devname
= kstrdup_const(name
,
321 if (!mnt
->mnt_devname
)
326 mnt
->mnt_pcp
= alloc_percpu(struct mnt_pcp
);
328 goto out_free_devname
;
330 this_cpu_add(mnt
->mnt_pcp
->mnt_count
, 1);
333 mnt
->mnt_writers
= 0;
336 INIT_HLIST_NODE(&mnt
->mnt_hash
);
337 INIT_LIST_HEAD(&mnt
->mnt_child
);
338 INIT_LIST_HEAD(&mnt
->mnt_mounts
);
339 INIT_LIST_HEAD(&mnt
->mnt_list
);
340 INIT_LIST_HEAD(&mnt
->mnt_expire
);
341 INIT_LIST_HEAD(&mnt
->mnt_share
);
342 INIT_LIST_HEAD(&mnt
->mnt_slave_list
);
343 INIT_LIST_HEAD(&mnt
->mnt_slave
);
344 INIT_HLIST_NODE(&mnt
->mnt_mp_list
);
345 INIT_LIST_HEAD(&mnt
->mnt_umounting
);
346 INIT_HLIST_HEAD(&mnt
->mnt_stuck_children
);
347 mnt
->mnt
.mnt_idmap
= &nop_mnt_idmap
;
353 kfree_const(mnt
->mnt_devname
);
358 kmem_cache_free(mnt_cache
, mnt
);
363 * Most r/o checks on a fs are for operations that take
364 * discrete amounts of time, like a write() or unlink().
365 * We must keep track of when those operations start
366 * (for permission checks) and when they end, so that
367 * we can determine when writes are able to occur to
371 * __mnt_is_readonly: check whether a mount is read-only
372 * @mnt: the mount to check for its write status
374 * This shouldn't be used directly ouside of the VFS.
375 * It does not guarantee that the filesystem will stay
376 * r/w, just that it is right *now*. This can not and
377 * should not be used in place of IS_RDONLY(inode).
378 * mnt_want/drop_write() will _keep_ the filesystem
381 bool __mnt_is_readonly(struct vfsmount
*mnt
)
383 return (mnt
->mnt_flags
& MNT_READONLY
) || sb_rdonly(mnt
->mnt_sb
);
385 EXPORT_SYMBOL_GPL(__mnt_is_readonly
);
387 static inline void mnt_inc_writers(struct mount
*mnt
)
390 this_cpu_inc(mnt
->mnt_pcp
->mnt_writers
);
396 static inline void mnt_dec_writers(struct mount
*mnt
)
399 this_cpu_dec(mnt
->mnt_pcp
->mnt_writers
);
405 static unsigned int mnt_get_writers(struct mount
*mnt
)
408 unsigned int count
= 0;
411 for_each_possible_cpu(cpu
) {
412 count
+= per_cpu_ptr(mnt
->mnt_pcp
, cpu
)->mnt_writers
;
417 return mnt
->mnt_writers
;
421 static int mnt_is_readonly(struct vfsmount
*mnt
)
423 if (READ_ONCE(mnt
->mnt_sb
->s_readonly_remount
))
426 * The barrier pairs with the barrier in sb_start_ro_state_change()
427 * making sure if we don't see s_readonly_remount set yet, we also will
428 * not see any superblock / mount flag changes done by remount.
429 * It also pairs with the barrier in sb_end_ro_state_change()
430 * assuring that if we see s_readonly_remount already cleared, we will
431 * see the values of superblock / mount flags updated by remount.
434 return __mnt_is_readonly(mnt
);
438 * Most r/o & frozen checks on a fs are for operations that take discrete
439 * amounts of time, like a write() or unlink(). We must keep track of when
440 * those operations start (for permission checks) and when they end, so that we
441 * can determine when writes are able to occur to a filesystem.
444 * mnt_get_write_access - get write access to a mount without freeze protection
445 * @m: the mount on which to take a write
447 * This tells the low-level filesystem that a write is about to be performed to
448 * it, and makes sure that writes are allowed (mnt it read-write) before
449 * returning success. This operation does not protect against filesystem being
450 * frozen. When the write operation is finished, mnt_put_write_access() must be
451 * called. This is effectively a refcount.
453 int mnt_get_write_access(struct vfsmount
*m
)
455 struct mount
*mnt
= real_mount(m
);
459 mnt_inc_writers(mnt
);
461 * The store to mnt_inc_writers must be visible before we pass
462 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
463 * incremented count after it has set MNT_WRITE_HOLD.
466 might_lock(&mount_lock
.lock
);
467 while (READ_ONCE(mnt
->mnt
.mnt_flags
) & MNT_WRITE_HOLD
) {
468 if (!IS_ENABLED(CONFIG_PREEMPT_RT
)) {
472 * This prevents priority inversion, if the task
473 * setting MNT_WRITE_HOLD got preempted on a remote
474 * CPU, and it prevents life lock if the task setting
475 * MNT_WRITE_HOLD has a lower priority and is bound to
476 * the same CPU as the task that is spinning here.
485 * The barrier pairs with the barrier sb_start_ro_state_change() making
486 * sure that if we see MNT_WRITE_HOLD cleared, we will also see
487 * s_readonly_remount set (or even SB_RDONLY / MNT_READONLY flags) in
488 * mnt_is_readonly() and bail in case we are racing with remount
492 if (mnt_is_readonly(m
)) {
493 mnt_dec_writers(mnt
);
500 EXPORT_SYMBOL_GPL(mnt_get_write_access
);
503 * mnt_want_write - get write access to a mount
504 * @m: the mount on which to take a write
506 * This tells the low-level filesystem that a write is about to be performed to
507 * it, and makes sure that writes are allowed (mount is read-write, filesystem
508 * is not frozen) before returning success. When the write operation is
509 * finished, mnt_drop_write() must be called. This is effectively a refcount.
511 int mnt_want_write(struct vfsmount
*m
)
515 sb_start_write(m
->mnt_sb
);
516 ret
= mnt_get_write_access(m
);
518 sb_end_write(m
->mnt_sb
);
521 EXPORT_SYMBOL_GPL(mnt_want_write
);
524 * mnt_get_write_access_file - get write access to a file's mount
525 * @file: the file who's mount on which to take a write
527 * This is like mnt_get_write_access, but if @file is already open for write it
528 * skips incrementing mnt_writers (since the open file already has a reference)
529 * and instead only does the check for emergency r/o remounts. This must be
530 * paired with mnt_put_write_access_file.
532 int mnt_get_write_access_file(struct file
*file
)
534 if (file
->f_mode
& FMODE_WRITER
) {
536 * Superblock may have become readonly while there are still
537 * writable fd's, e.g. due to a fs error with errors=remount-ro
539 if (__mnt_is_readonly(file
->f_path
.mnt
))
543 return mnt_get_write_access(file
->f_path
.mnt
);
547 * mnt_want_write_file - get write access to a file's mount
548 * @file: the file who's mount on which to take a write
550 * This is like mnt_want_write, but if the file is already open for writing it
551 * skips incrementing mnt_writers (since the open file already has a reference)
552 * and instead only does the freeze protection and the check for emergency r/o
553 * remounts. This must be paired with mnt_drop_write_file.
555 int mnt_want_write_file(struct file
*file
)
559 sb_start_write(file_inode(file
)->i_sb
);
560 ret
= mnt_get_write_access_file(file
);
562 sb_end_write(file_inode(file
)->i_sb
);
565 EXPORT_SYMBOL_GPL(mnt_want_write_file
);
568 * mnt_put_write_access - give up write access to a mount
569 * @mnt: the mount on which to give up write access
571 * Tells the low-level filesystem that we are done
572 * performing writes to it. Must be matched with
573 * mnt_get_write_access() call above.
575 void mnt_put_write_access(struct vfsmount
*mnt
)
578 mnt_dec_writers(real_mount(mnt
));
581 EXPORT_SYMBOL_GPL(mnt_put_write_access
);
584 * mnt_drop_write - give up write access to a mount
585 * @mnt: the mount on which to give up write access
587 * Tells the low-level filesystem that we are done performing writes to it and
588 * also allows filesystem to be frozen again. Must be matched with
589 * mnt_want_write() call above.
591 void mnt_drop_write(struct vfsmount
*mnt
)
593 mnt_put_write_access(mnt
);
594 sb_end_write(mnt
->mnt_sb
);
596 EXPORT_SYMBOL_GPL(mnt_drop_write
);
598 void mnt_put_write_access_file(struct file
*file
)
600 if (!(file
->f_mode
& FMODE_WRITER
))
601 mnt_put_write_access(file
->f_path
.mnt
);
604 void mnt_drop_write_file(struct file
*file
)
606 mnt_put_write_access_file(file
);
607 sb_end_write(file_inode(file
)->i_sb
);
609 EXPORT_SYMBOL(mnt_drop_write_file
);
612 * mnt_hold_writers - prevent write access to the given mount
613 * @mnt: mnt to prevent write access to
615 * Prevents write access to @mnt if there are no active writers for @mnt.
616 * This function needs to be called and return successfully before changing
617 * properties of @mnt that need to remain stable for callers with write access
620 * After this functions has been called successfully callers must pair it with
621 * a call to mnt_unhold_writers() in order to stop preventing write access to
624 * Context: This function expects lock_mount_hash() to be held serializing
625 * setting MNT_WRITE_HOLD.
626 * Return: On success 0 is returned.
627 * On error, -EBUSY is returned.
629 static inline int mnt_hold_writers(struct mount
*mnt
)
631 mnt
->mnt
.mnt_flags
|= MNT_WRITE_HOLD
;
633 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
634 * should be visible before we do.
639 * With writers on hold, if this value is zero, then there are
640 * definitely no active writers (although held writers may subsequently
641 * increment the count, they'll have to wait, and decrement it after
642 * seeing MNT_READONLY).
644 * It is OK to have counter incremented on one CPU and decremented on
645 * another: the sum will add up correctly. The danger would be when we
646 * sum up each counter, if we read a counter before it is incremented,
647 * but then read another CPU's count which it has been subsequently
648 * decremented from -- we would see more decrements than we should.
649 * MNT_WRITE_HOLD protects against this scenario, because
650 * mnt_want_write first increments count, then smp_mb, then spins on
651 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
652 * we're counting up here.
654 if (mnt_get_writers(mnt
) > 0)
661 * mnt_unhold_writers - stop preventing write access to the given mount
662 * @mnt: mnt to stop preventing write access to
664 * Stop preventing write access to @mnt allowing callers to gain write access
667 * This function can only be called after a successful call to
668 * mnt_hold_writers().
670 * Context: This function expects lock_mount_hash() to be held.
672 static inline void mnt_unhold_writers(struct mount
*mnt
)
675 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
676 * that become unheld will see MNT_READONLY.
679 mnt
->mnt
.mnt_flags
&= ~MNT_WRITE_HOLD
;
682 static int mnt_make_readonly(struct mount
*mnt
)
686 ret
= mnt_hold_writers(mnt
);
688 mnt
->mnt
.mnt_flags
|= MNT_READONLY
;
689 mnt_unhold_writers(mnt
);
693 int sb_prepare_remount_readonly(struct super_block
*sb
)
698 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
699 if (atomic_long_read(&sb
->s_remove_count
))
703 list_for_each_entry(mnt
, &sb
->s_mounts
, mnt_instance
) {
704 if (!(mnt
->mnt
.mnt_flags
& MNT_READONLY
)) {
705 err
= mnt_hold_writers(mnt
);
710 if (!err
&& atomic_long_read(&sb
->s_remove_count
))
714 sb_start_ro_state_change(sb
);
715 list_for_each_entry(mnt
, &sb
->s_mounts
, mnt_instance
) {
716 if (mnt
->mnt
.mnt_flags
& MNT_WRITE_HOLD
)
717 mnt
->mnt
.mnt_flags
&= ~MNT_WRITE_HOLD
;
724 static void free_vfsmnt(struct mount
*mnt
)
726 mnt_idmap_put(mnt_idmap(&mnt
->mnt
));
727 kfree_const(mnt
->mnt_devname
);
729 free_percpu(mnt
->mnt_pcp
);
731 kmem_cache_free(mnt_cache
, mnt
);
734 static void delayed_free_vfsmnt(struct rcu_head
*head
)
736 free_vfsmnt(container_of(head
, struct mount
, mnt_rcu
));
739 /* call under rcu_read_lock */
740 int __legitimize_mnt(struct vfsmount
*bastard
, unsigned seq
)
743 if (read_seqretry(&mount_lock
, seq
))
747 mnt
= real_mount(bastard
);
748 mnt_add_count(mnt
, 1);
749 smp_mb(); // see mntput_no_expire()
750 if (likely(!read_seqretry(&mount_lock
, seq
)))
752 if (bastard
->mnt_flags
& MNT_SYNC_UMOUNT
) {
753 mnt_add_count(mnt
, -1);
757 if (unlikely(bastard
->mnt_flags
& MNT_DOOMED
)) {
758 mnt_add_count(mnt
, -1);
763 /* caller will mntput() */
767 /* call under rcu_read_lock */
768 static bool legitimize_mnt(struct vfsmount
*bastard
, unsigned seq
)
770 int res
= __legitimize_mnt(bastard
, seq
);
773 if (unlikely(res
< 0)) {
782 * __lookup_mnt - find first child mount
784 * @dentry: mountpoint
786 * If @mnt has a child mount @c mounted @dentry find and return it.
788 * Note that the child mount @c need not be unique. There are cases
789 * where shadow mounts are created. For example, during mount
790 * propagation when a source mount @mnt whose root got overmounted by a
791 * mount @o after path lookup but before @namespace_sem could be
792 * acquired gets copied and propagated. So @mnt gets copied including
793 * @o. When @mnt is propagated to a destination mount @d that already
794 * has another mount @n mounted at the same mountpoint then the source
795 * mount @mnt will be tucked beneath @n, i.e., @n will be mounted on
796 * @mnt and @mnt mounted on @d. Now both @n and @o are mounted at @mnt
799 * Return: The first child of @mnt mounted @dentry or NULL.
801 struct mount
*__lookup_mnt(struct vfsmount
*mnt
, struct dentry
*dentry
)
803 struct hlist_head
*head
= m_hash(mnt
, dentry
);
806 hlist_for_each_entry_rcu(p
, head
, mnt_hash
)
807 if (&p
->mnt_parent
->mnt
== mnt
&& p
->mnt_mountpoint
== dentry
)
813 * lookup_mnt - Return the first child mount mounted at path
815 * "First" means first mounted chronologically. If you create the
818 * mount /dev/sda1 /mnt
819 * mount /dev/sda2 /mnt
820 * mount /dev/sda3 /mnt
822 * Then lookup_mnt() on the base /mnt dentry in the root mount will
823 * return successively the root dentry and vfsmount of /dev/sda1, then
824 * /dev/sda2, then /dev/sda3, then NULL.
826 * lookup_mnt takes a reference to the found vfsmount.
828 struct vfsmount
*lookup_mnt(const struct path
*path
)
830 struct mount
*child_mnt
;
836 seq
= read_seqbegin(&mount_lock
);
837 child_mnt
= __lookup_mnt(path
->mnt
, path
->dentry
);
838 m
= child_mnt
? &child_mnt
->mnt
: NULL
;
839 } while (!legitimize_mnt(m
, seq
));
845 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
846 * current mount namespace.
848 * The common case is dentries are not mountpoints at all and that
849 * test is handled inline. For the slow case when we are actually
850 * dealing with a mountpoint of some kind, walk through all of the
851 * mounts in the current mount namespace and test to see if the dentry
854 * The mount_hashtable is not usable in the context because we
855 * need to identify all mounts that may be in the current mount
856 * namespace not just a mount that happens to have some specified
859 bool __is_local_mountpoint(struct dentry
*dentry
)
861 struct mnt_namespace
*ns
= current
->nsproxy
->mnt_ns
;
862 struct mount
*mnt
, *n
;
863 bool is_covered
= false;
865 down_read(&namespace_sem
);
866 rbtree_postorder_for_each_entry_safe(mnt
, n
, &ns
->mounts
, mnt_node
) {
867 is_covered
= (mnt
->mnt_mountpoint
== dentry
);
871 up_read(&namespace_sem
);
876 static struct mountpoint
*lookup_mountpoint(struct dentry
*dentry
)
878 struct hlist_head
*chain
= mp_hash(dentry
);
879 struct mountpoint
*mp
;
881 hlist_for_each_entry(mp
, chain
, m_hash
) {
882 if (mp
->m_dentry
== dentry
) {
890 static struct mountpoint
*get_mountpoint(struct dentry
*dentry
)
892 struct mountpoint
*mp
, *new = NULL
;
895 if (d_mountpoint(dentry
)) {
896 /* might be worth a WARN_ON() */
897 if (d_unlinked(dentry
))
898 return ERR_PTR(-ENOENT
);
900 read_seqlock_excl(&mount_lock
);
901 mp
= lookup_mountpoint(dentry
);
902 read_sequnlock_excl(&mount_lock
);
908 new = kmalloc(sizeof(struct mountpoint
), GFP_KERNEL
);
910 return ERR_PTR(-ENOMEM
);
913 /* Exactly one processes may set d_mounted */
914 ret
= d_set_mounted(dentry
);
916 /* Someone else set d_mounted? */
920 /* The dentry is not available as a mountpoint? */
925 /* Add the new mountpoint to the hash table */
926 read_seqlock_excl(&mount_lock
);
927 new->m_dentry
= dget(dentry
);
929 hlist_add_head(&new->m_hash
, mp_hash(dentry
));
930 INIT_HLIST_HEAD(&new->m_list
);
931 read_sequnlock_excl(&mount_lock
);
941 * vfsmount lock must be held. Additionally, the caller is responsible
942 * for serializing calls for given disposal list.
944 static void __put_mountpoint(struct mountpoint
*mp
, struct list_head
*list
)
946 if (!--mp
->m_count
) {
947 struct dentry
*dentry
= mp
->m_dentry
;
948 BUG_ON(!hlist_empty(&mp
->m_list
));
949 spin_lock(&dentry
->d_lock
);
950 dentry
->d_flags
&= ~DCACHE_MOUNTED
;
951 spin_unlock(&dentry
->d_lock
);
952 dput_to_list(dentry
, list
);
953 hlist_del(&mp
->m_hash
);
958 /* called with namespace_lock and vfsmount lock */
959 static void put_mountpoint(struct mountpoint
*mp
)
961 __put_mountpoint(mp
, &ex_mountpoints
);
964 static inline int check_mnt(struct mount
*mnt
)
966 return mnt
->mnt_ns
== current
->nsproxy
->mnt_ns
;
970 * vfsmount lock must be held for write
972 static void touch_mnt_namespace(struct mnt_namespace
*ns
)
976 wake_up_interruptible(&ns
->poll
);
981 * vfsmount lock must be held for write
983 static void __touch_mnt_namespace(struct mnt_namespace
*ns
)
985 if (ns
&& ns
->event
!= event
) {
987 wake_up_interruptible(&ns
->poll
);
992 * vfsmount lock must be held for write
994 static struct mountpoint
*unhash_mnt(struct mount
*mnt
)
996 struct mountpoint
*mp
;
997 mnt
->mnt_parent
= mnt
;
998 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
999 list_del_init(&mnt
->mnt_child
);
1000 hlist_del_init_rcu(&mnt
->mnt_hash
);
1001 hlist_del_init(&mnt
->mnt_mp_list
);
1008 * vfsmount lock must be held for write
1010 static void umount_mnt(struct mount
*mnt
)
1012 put_mountpoint(unhash_mnt(mnt
));
1016 * vfsmount lock must be held for write
1018 void mnt_set_mountpoint(struct mount
*mnt
,
1019 struct mountpoint
*mp
,
1020 struct mount
*child_mnt
)
1023 mnt_add_count(mnt
, 1); /* essentially, that's mntget */
1024 child_mnt
->mnt_mountpoint
= mp
->m_dentry
;
1025 child_mnt
->mnt_parent
= mnt
;
1026 child_mnt
->mnt_mp
= mp
;
1027 hlist_add_head(&child_mnt
->mnt_mp_list
, &mp
->m_list
);
1031 * mnt_set_mountpoint_beneath - mount a mount beneath another one
1033 * @new_parent: the source mount
1034 * @top_mnt: the mount beneath which @new_parent is mounted
1035 * @new_mp: the new mountpoint of @top_mnt on @new_parent
1037 * Remove @top_mnt from its current mountpoint @top_mnt->mnt_mp and
1038 * parent @top_mnt->mnt_parent and mount it on top of @new_parent at
1039 * @new_mp. And mount @new_parent on the old parent and old
1040 * mountpoint of @top_mnt.
1042 * Context: This function expects namespace_lock() and lock_mount_hash()
1043 * to have been acquired in that order.
1045 static void mnt_set_mountpoint_beneath(struct mount
*new_parent
,
1046 struct mount
*top_mnt
,
1047 struct mountpoint
*new_mp
)
1049 struct mount
*old_top_parent
= top_mnt
->mnt_parent
;
1050 struct mountpoint
*old_top_mp
= top_mnt
->mnt_mp
;
1052 mnt_set_mountpoint(old_top_parent
, old_top_mp
, new_parent
);
1053 mnt_change_mountpoint(new_parent
, new_mp
, top_mnt
);
1057 static void __attach_mnt(struct mount
*mnt
, struct mount
*parent
)
1059 hlist_add_head_rcu(&mnt
->mnt_hash
,
1060 m_hash(&parent
->mnt
, mnt
->mnt_mountpoint
));
1061 list_add_tail(&mnt
->mnt_child
, &parent
->mnt_mounts
);
1065 * attach_mnt - mount a mount, attach to @mount_hashtable and parent's
1066 * list of child mounts
1067 * @parent: the parent
1068 * @mnt: the new mount
1069 * @mp: the new mountpoint
1070 * @beneath: whether to mount @mnt beneath or on top of @parent
1072 * If @beneath is false, mount @mnt at @mp on @parent. Then attach @mnt
1073 * to @parent's child mount list and to @mount_hashtable.
1075 * If @beneath is true, remove @mnt from its current parent and
1076 * mountpoint and mount it on @mp on @parent, and mount @parent on the
1077 * old parent and old mountpoint of @mnt. Finally, attach @parent to
1078 * @mnt_hashtable and @parent->mnt_parent->mnt_mounts.
1080 * Note, when __attach_mnt() is called @mnt->mnt_parent already points
1081 * to the correct parent.
1083 * Context: This function expects namespace_lock() and lock_mount_hash()
1084 * to have been acquired in that order.
1086 static void attach_mnt(struct mount
*mnt
, struct mount
*parent
,
1087 struct mountpoint
*mp
, bool beneath
)
1090 mnt_set_mountpoint_beneath(mnt
, parent
, mp
);
1092 mnt_set_mountpoint(parent
, mp
, mnt
);
1094 * Note, @mnt->mnt_parent has to be used. If @mnt was mounted
1095 * beneath @parent then @mnt will need to be attached to
1096 * @parent's old parent, not @parent. IOW, @mnt->mnt_parent
1097 * isn't the same mount as @parent.
1099 __attach_mnt(mnt
, mnt
->mnt_parent
);
1102 void mnt_change_mountpoint(struct mount
*parent
, struct mountpoint
*mp
, struct mount
*mnt
)
1104 struct mountpoint
*old_mp
= mnt
->mnt_mp
;
1105 struct mount
*old_parent
= mnt
->mnt_parent
;
1107 list_del_init(&mnt
->mnt_child
);
1108 hlist_del_init(&mnt
->mnt_mp_list
);
1109 hlist_del_init_rcu(&mnt
->mnt_hash
);
1111 attach_mnt(mnt
, parent
, mp
, false);
1113 put_mountpoint(old_mp
);
1114 mnt_add_count(old_parent
, -1);
1117 static inline struct mount
*node_to_mount(struct rb_node
*node
)
1119 return node
? rb_entry(node
, struct mount
, mnt_node
) : NULL
;
1122 static void mnt_add_to_ns(struct mnt_namespace
*ns
, struct mount
*mnt
)
1124 struct rb_node
**link
= &ns
->mounts
.rb_node
;
1125 struct rb_node
*parent
= NULL
;
1127 WARN_ON(mnt
->mnt
.mnt_flags
& MNT_ONRB
);
1131 if (mnt
->mnt_id_unique
< node_to_mount(parent
)->mnt_id_unique
)
1132 link
= &parent
->rb_left
;
1134 link
= &parent
->rb_right
;
1136 rb_link_node(&mnt
->mnt_node
, parent
, link
);
1137 rb_insert_color(&mnt
->mnt_node
, &ns
->mounts
);
1138 mnt
->mnt
.mnt_flags
|= MNT_ONRB
;
1142 * vfsmount lock must be held for write
1144 static void commit_tree(struct mount
*mnt
)
1146 struct mount
*parent
= mnt
->mnt_parent
;
1149 struct mnt_namespace
*n
= parent
->mnt_ns
;
1151 BUG_ON(parent
== mnt
);
1153 list_add_tail(&head
, &mnt
->mnt_list
);
1154 while (!list_empty(&head
)) {
1155 m
= list_first_entry(&head
, typeof(*m
), mnt_list
);
1156 list_del(&m
->mnt_list
);
1158 mnt_add_to_ns(n
, m
);
1160 n
->nr_mounts
+= n
->pending_mounts
;
1161 n
->pending_mounts
= 0;
1163 __attach_mnt(mnt
, parent
);
1164 touch_mnt_namespace(n
);
1167 static struct mount
*next_mnt(struct mount
*p
, struct mount
*root
)
1169 struct list_head
*next
= p
->mnt_mounts
.next
;
1170 if (next
== &p
->mnt_mounts
) {
1174 next
= p
->mnt_child
.next
;
1175 if (next
!= &p
->mnt_parent
->mnt_mounts
)
1180 return list_entry(next
, struct mount
, mnt_child
);
1183 static struct mount
*skip_mnt_tree(struct mount
*p
)
1185 struct list_head
*prev
= p
->mnt_mounts
.prev
;
1186 while (prev
!= &p
->mnt_mounts
) {
1187 p
= list_entry(prev
, struct mount
, mnt_child
);
1188 prev
= p
->mnt_mounts
.prev
;
1194 * vfs_create_mount - Create a mount for a configured superblock
1195 * @fc: The configuration context with the superblock attached
1197 * Create a mount to an already configured superblock. If necessary, the
1198 * caller should invoke vfs_get_tree() before calling this.
1200 * Note that this does not attach the mount to anything.
1202 struct vfsmount
*vfs_create_mount(struct fs_context
*fc
)
1207 return ERR_PTR(-EINVAL
);
1209 mnt
= alloc_vfsmnt(fc
->source
?: "none");
1211 return ERR_PTR(-ENOMEM
);
1213 if (fc
->sb_flags
& SB_KERNMOUNT
)
1214 mnt
->mnt
.mnt_flags
= MNT_INTERNAL
;
1216 atomic_inc(&fc
->root
->d_sb
->s_active
);
1217 mnt
->mnt
.mnt_sb
= fc
->root
->d_sb
;
1218 mnt
->mnt
.mnt_root
= dget(fc
->root
);
1219 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
1220 mnt
->mnt_parent
= mnt
;
1223 list_add_tail(&mnt
->mnt_instance
, &mnt
->mnt
.mnt_sb
->s_mounts
);
1224 unlock_mount_hash();
1227 EXPORT_SYMBOL(vfs_create_mount
);
1229 struct vfsmount
*fc_mount(struct fs_context
*fc
)
1231 int err
= vfs_get_tree(fc
);
1233 up_write(&fc
->root
->d_sb
->s_umount
);
1234 return vfs_create_mount(fc
);
1236 return ERR_PTR(err
);
1238 EXPORT_SYMBOL(fc_mount
);
1240 struct vfsmount
*vfs_kern_mount(struct file_system_type
*type
,
1241 int flags
, const char *name
,
1244 struct fs_context
*fc
;
1245 struct vfsmount
*mnt
;
1249 return ERR_PTR(-EINVAL
);
1251 fc
= fs_context_for_mount(type
, flags
);
1253 return ERR_CAST(fc
);
1256 ret
= vfs_parse_fs_string(fc
, "source",
1257 name
, strlen(name
));
1259 ret
= parse_monolithic_mount_data(fc
, data
);
1268 EXPORT_SYMBOL_GPL(vfs_kern_mount
);
1271 vfs_submount(const struct dentry
*mountpoint
, struct file_system_type
*type
,
1272 const char *name
, void *data
)
1274 /* Until it is worked out how to pass the user namespace
1275 * through from the parent mount to the submount don't support
1276 * unprivileged mounts with submounts.
1278 if (mountpoint
->d_sb
->s_user_ns
!= &init_user_ns
)
1279 return ERR_PTR(-EPERM
);
1281 return vfs_kern_mount(type
, SB_SUBMOUNT
, name
, data
);
1283 EXPORT_SYMBOL_GPL(vfs_submount
);
1285 static struct mount
*clone_mnt(struct mount
*old
, struct dentry
*root
,
1288 struct super_block
*sb
= old
->mnt
.mnt_sb
;
1292 mnt
= alloc_vfsmnt(old
->mnt_devname
);
1294 return ERR_PTR(-ENOMEM
);
1296 if (flag
& (CL_SLAVE
| CL_PRIVATE
| CL_SHARED_TO_SLAVE
))
1297 mnt
->mnt_group_id
= 0; /* not a peer of original */
1299 mnt
->mnt_group_id
= old
->mnt_group_id
;
1301 if ((flag
& CL_MAKE_SHARED
) && !mnt
->mnt_group_id
) {
1302 err
= mnt_alloc_group_id(mnt
);
1307 mnt
->mnt
.mnt_flags
= old
->mnt
.mnt_flags
;
1308 mnt
->mnt
.mnt_flags
&= ~(MNT_WRITE_HOLD
|MNT_MARKED
|MNT_INTERNAL
|MNT_ONRB
);
1310 atomic_inc(&sb
->s_active
);
1311 mnt
->mnt
.mnt_idmap
= mnt_idmap_get(mnt_idmap(&old
->mnt
));
1313 mnt
->mnt
.mnt_sb
= sb
;
1314 mnt
->mnt
.mnt_root
= dget(root
);
1315 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
1316 mnt
->mnt_parent
= mnt
;
1318 list_add_tail(&mnt
->mnt_instance
, &sb
->s_mounts
);
1319 unlock_mount_hash();
1321 if ((flag
& CL_SLAVE
) ||
1322 ((flag
& CL_SHARED_TO_SLAVE
) && IS_MNT_SHARED(old
))) {
1323 list_add(&mnt
->mnt_slave
, &old
->mnt_slave_list
);
1324 mnt
->mnt_master
= old
;
1325 CLEAR_MNT_SHARED(mnt
);
1326 } else if (!(flag
& CL_PRIVATE
)) {
1327 if ((flag
& CL_MAKE_SHARED
) || IS_MNT_SHARED(old
))
1328 list_add(&mnt
->mnt_share
, &old
->mnt_share
);
1329 if (IS_MNT_SLAVE(old
))
1330 list_add(&mnt
->mnt_slave
, &old
->mnt_slave
);
1331 mnt
->mnt_master
= old
->mnt_master
;
1333 CLEAR_MNT_SHARED(mnt
);
1335 if (flag
& CL_MAKE_SHARED
)
1336 set_mnt_shared(mnt
);
1338 /* stick the duplicate mount on the same expiry list
1339 * as the original if that was on one */
1340 if (flag
& CL_EXPIRE
) {
1341 if (!list_empty(&old
->mnt_expire
))
1342 list_add(&mnt
->mnt_expire
, &old
->mnt_expire
);
1350 return ERR_PTR(err
);
1353 static void cleanup_mnt(struct mount
*mnt
)
1355 struct hlist_node
*p
;
1358 * The warning here probably indicates that somebody messed
1359 * up a mnt_want/drop_write() pair. If this happens, the
1360 * filesystem was probably unable to make r/w->r/o transitions.
1361 * The locking used to deal with mnt_count decrement provides barriers,
1362 * so mnt_get_writers() below is safe.
1364 WARN_ON(mnt_get_writers(mnt
));
1365 if (unlikely(mnt
->mnt_pins
.first
))
1367 hlist_for_each_entry_safe(m
, p
, &mnt
->mnt_stuck_children
, mnt_umount
) {
1368 hlist_del(&m
->mnt_umount
);
1371 fsnotify_vfsmount_delete(&mnt
->mnt
);
1372 dput(mnt
->mnt
.mnt_root
);
1373 deactivate_super(mnt
->mnt
.mnt_sb
);
1375 call_rcu(&mnt
->mnt_rcu
, delayed_free_vfsmnt
);
1378 static void __cleanup_mnt(struct rcu_head
*head
)
1380 cleanup_mnt(container_of(head
, struct mount
, mnt_rcu
));
1383 static LLIST_HEAD(delayed_mntput_list
);
1384 static void delayed_mntput(struct work_struct
*unused
)
1386 struct llist_node
*node
= llist_del_all(&delayed_mntput_list
);
1387 struct mount
*m
, *t
;
1389 llist_for_each_entry_safe(m
, t
, node
, mnt_llist
)
1392 static DECLARE_DELAYED_WORK(delayed_mntput_work
, delayed_mntput
);
1394 static void mntput_no_expire(struct mount
*mnt
)
1400 if (likely(READ_ONCE(mnt
->mnt_ns
))) {
1402 * Since we don't do lock_mount_hash() here,
1403 * ->mnt_ns can change under us. However, if it's
1404 * non-NULL, then there's a reference that won't
1405 * be dropped until after an RCU delay done after
1406 * turning ->mnt_ns NULL. So if we observe it
1407 * non-NULL under rcu_read_lock(), the reference
1408 * we are dropping is not the final one.
1410 mnt_add_count(mnt
, -1);
1416 * make sure that if __legitimize_mnt() has not seen us grab
1417 * mount_lock, we'll see their refcount increment here.
1420 mnt_add_count(mnt
, -1);
1421 count
= mnt_get_count(mnt
);
1425 unlock_mount_hash();
1428 if (unlikely(mnt
->mnt
.mnt_flags
& MNT_DOOMED
)) {
1430 unlock_mount_hash();
1433 mnt
->mnt
.mnt_flags
|= MNT_DOOMED
;
1436 list_del(&mnt
->mnt_instance
);
1438 if (unlikely(!list_empty(&mnt
->mnt_mounts
))) {
1439 struct mount
*p
, *tmp
;
1440 list_for_each_entry_safe(p
, tmp
, &mnt
->mnt_mounts
, mnt_child
) {
1441 __put_mountpoint(unhash_mnt(p
), &list
);
1442 hlist_add_head(&p
->mnt_umount
, &mnt
->mnt_stuck_children
);
1445 unlock_mount_hash();
1446 shrink_dentry_list(&list
);
1448 if (likely(!(mnt
->mnt
.mnt_flags
& MNT_INTERNAL
))) {
1449 struct task_struct
*task
= current
;
1450 if (likely(!(task
->flags
& PF_KTHREAD
))) {
1451 init_task_work(&mnt
->mnt_rcu
, __cleanup_mnt
);
1452 if (!task_work_add(task
, &mnt
->mnt_rcu
, TWA_RESUME
))
1455 if (llist_add(&mnt
->mnt_llist
, &delayed_mntput_list
))
1456 schedule_delayed_work(&delayed_mntput_work
, 1);
1462 void mntput(struct vfsmount
*mnt
)
1465 struct mount
*m
= real_mount(mnt
);
1466 /* avoid cacheline pingpong */
1467 if (unlikely(m
->mnt_expiry_mark
))
1468 WRITE_ONCE(m
->mnt_expiry_mark
, 0);
1469 mntput_no_expire(m
);
1472 EXPORT_SYMBOL(mntput
);
1474 struct vfsmount
*mntget(struct vfsmount
*mnt
)
1477 mnt_add_count(real_mount(mnt
), 1);
1480 EXPORT_SYMBOL(mntget
);
1483 * Make a mount point inaccessible to new lookups.
1484 * Because there may still be current users, the caller MUST WAIT
1485 * for an RCU grace period before destroying the mount point.
1487 void mnt_make_shortterm(struct vfsmount
*mnt
)
1490 real_mount(mnt
)->mnt_ns
= NULL
;
1494 * path_is_mountpoint() - Check if path is a mount in the current namespace.
1495 * @path: path to check
1497 * d_mountpoint() can only be used reliably to establish if a dentry is
1498 * not mounted in any namespace and that common case is handled inline.
1499 * d_mountpoint() isn't aware of the possibility there may be multiple
1500 * mounts using a given dentry in a different namespace. This function
1501 * checks if the passed in path is a mountpoint rather than the dentry
1504 bool path_is_mountpoint(const struct path
*path
)
1509 if (!d_mountpoint(path
->dentry
))
1514 seq
= read_seqbegin(&mount_lock
);
1515 res
= __path_is_mountpoint(path
);
1516 } while (read_seqretry(&mount_lock
, seq
));
1521 EXPORT_SYMBOL(path_is_mountpoint
);
1523 struct vfsmount
*mnt_clone_internal(const struct path
*path
)
1526 p
= clone_mnt(real_mount(path
->mnt
), path
->dentry
, CL_PRIVATE
);
1529 p
->mnt
.mnt_flags
|= MNT_INTERNAL
;
1534 * Returns the mount which either has the specified mnt_id, or has the next
1535 * smallest id afer the specified one.
1537 static struct mount
*mnt_find_id_at(struct mnt_namespace
*ns
, u64 mnt_id
)
1539 struct rb_node
*node
= ns
->mounts
.rb_node
;
1540 struct mount
*ret
= NULL
;
1543 struct mount
*m
= node_to_mount(node
);
1545 if (mnt_id
<= m
->mnt_id_unique
) {
1546 ret
= node_to_mount(node
);
1547 if (mnt_id
== m
->mnt_id_unique
)
1549 node
= node
->rb_left
;
1551 node
= node
->rb_right
;
1558 * Returns the mount which either has the specified mnt_id, or has the next
1559 * greater id before the specified one.
1561 static struct mount
*mnt_find_id_at_reverse(struct mnt_namespace
*ns
, u64 mnt_id
)
1563 struct rb_node
*node
= ns
->mounts
.rb_node
;
1564 struct mount
*ret
= NULL
;
1567 struct mount
*m
= node_to_mount(node
);
1569 if (mnt_id
>= m
->mnt_id_unique
) {
1570 ret
= node_to_mount(node
);
1571 if (mnt_id
== m
->mnt_id_unique
)
1573 node
= node
->rb_right
;
1575 node
= node
->rb_left
;
1581 #ifdef CONFIG_PROC_FS
1583 /* iterator; we want it to have access to namespace_sem, thus here... */
1584 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
1586 struct proc_mounts
*p
= m
->private;
1588 down_read(&namespace_sem
);
1590 return mnt_find_id_at(p
->ns
, *pos
);
1593 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1595 struct mount
*next
= NULL
, *mnt
= v
;
1596 struct rb_node
*node
= rb_next(&mnt
->mnt_node
);
1600 next
= node_to_mount(node
);
1601 *pos
= next
->mnt_id_unique
;
1606 static void m_stop(struct seq_file
*m
, void *v
)
1608 up_read(&namespace_sem
);
1611 static int m_show(struct seq_file
*m
, void *v
)
1613 struct proc_mounts
*p
= m
->private;
1614 struct mount
*r
= v
;
1615 return p
->show(m
, &r
->mnt
);
1618 const struct seq_operations mounts_op
= {
1625 #endif /* CONFIG_PROC_FS */
1628 * may_umount_tree - check if a mount tree is busy
1629 * @m: root of mount tree
1631 * This is called to check if a tree of mounts has any
1632 * open files, pwds, chroots or sub mounts that are
1635 int may_umount_tree(struct vfsmount
*m
)
1637 struct mount
*mnt
= real_mount(m
);
1638 int actual_refs
= 0;
1639 int minimum_refs
= 0;
1643 /* write lock needed for mnt_get_count */
1645 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1646 actual_refs
+= mnt_get_count(p
);
1649 unlock_mount_hash();
1651 if (actual_refs
> minimum_refs
)
1657 EXPORT_SYMBOL(may_umount_tree
);
1660 * may_umount - check if a mount point is busy
1661 * @mnt: root of mount
1663 * This is called to check if a mount point has any
1664 * open files, pwds, chroots or sub mounts. If the
1665 * mount has sub mounts this will return busy
1666 * regardless of whether the sub mounts are busy.
1668 * Doesn't take quota and stuff into account. IOW, in some cases it will
1669 * give false negatives. The main reason why it's here is that we need
1670 * a non-destructive way to look for easily umountable filesystems.
1672 int may_umount(struct vfsmount
*mnt
)
1675 down_read(&namespace_sem
);
1677 if (propagate_mount_busy(real_mount(mnt
), 2))
1679 unlock_mount_hash();
1680 up_read(&namespace_sem
);
1684 EXPORT_SYMBOL(may_umount
);
1686 static void namespace_unlock(void)
1688 struct hlist_head head
;
1689 struct hlist_node
*p
;
1693 hlist_move_list(&unmounted
, &head
);
1694 list_splice_init(&ex_mountpoints
, &list
);
1696 up_write(&namespace_sem
);
1698 shrink_dentry_list(&list
);
1700 if (likely(hlist_empty(&head
)))
1703 synchronize_rcu_expedited();
1705 hlist_for_each_entry_safe(m
, p
, &head
, mnt_umount
) {
1706 hlist_del(&m
->mnt_umount
);
1711 static inline void namespace_lock(void)
1713 down_write(&namespace_sem
);
1716 enum umount_tree_flags
{
1718 UMOUNT_PROPAGATE
= 2,
1719 UMOUNT_CONNECTED
= 4,
1722 static bool disconnect_mount(struct mount
*mnt
, enum umount_tree_flags how
)
1724 /* Leaving mounts connected is only valid for lazy umounts */
1725 if (how
& UMOUNT_SYNC
)
1728 /* A mount without a parent has nothing to be connected to */
1729 if (!mnt_has_parent(mnt
))
1732 /* Because the reference counting rules change when mounts are
1733 * unmounted and connected, umounted mounts may not be
1734 * connected to mounted mounts.
1736 if (!(mnt
->mnt_parent
->mnt
.mnt_flags
& MNT_UMOUNT
))
1739 /* Has it been requested that the mount remain connected? */
1740 if (how
& UMOUNT_CONNECTED
)
1743 /* Is the mount locked such that it needs to remain connected? */
1744 if (IS_MNT_LOCKED(mnt
))
1747 /* By default disconnect the mount */
1752 * mount_lock must be held
1753 * namespace_sem must be held for write
1755 static void umount_tree(struct mount
*mnt
, enum umount_tree_flags how
)
1757 LIST_HEAD(tmp_list
);
1760 if (how
& UMOUNT_PROPAGATE
)
1761 propagate_mount_unlock(mnt
);
1763 /* Gather the mounts to umount */
1764 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1765 p
->mnt
.mnt_flags
|= MNT_UMOUNT
;
1766 if (p
->mnt
.mnt_flags
& MNT_ONRB
)
1767 move_from_ns(p
, &tmp_list
);
1769 list_move(&p
->mnt_list
, &tmp_list
);
1772 /* Hide the mounts from mnt_mounts */
1773 list_for_each_entry(p
, &tmp_list
, mnt_list
) {
1774 list_del_init(&p
->mnt_child
);
1777 /* Add propagated mounts to the tmp_list */
1778 if (how
& UMOUNT_PROPAGATE
)
1779 propagate_umount(&tmp_list
);
1781 while (!list_empty(&tmp_list
)) {
1782 struct mnt_namespace
*ns
;
1784 p
= list_first_entry(&tmp_list
, struct mount
, mnt_list
);
1785 list_del_init(&p
->mnt_expire
);
1786 list_del_init(&p
->mnt_list
);
1790 __touch_mnt_namespace(ns
);
1793 if (how
& UMOUNT_SYNC
)
1794 p
->mnt
.mnt_flags
|= MNT_SYNC_UMOUNT
;
1796 disconnect
= disconnect_mount(p
, how
);
1797 if (mnt_has_parent(p
)) {
1798 mnt_add_count(p
->mnt_parent
, -1);
1800 /* Don't forget about p */
1801 list_add_tail(&p
->mnt_child
, &p
->mnt_parent
->mnt_mounts
);
1806 change_mnt_propagation(p
, MS_PRIVATE
);
1808 hlist_add_head(&p
->mnt_umount
, &unmounted
);
1812 static void shrink_submounts(struct mount
*mnt
);
1814 static int do_umount_root(struct super_block
*sb
)
1818 down_write(&sb
->s_umount
);
1819 if (!sb_rdonly(sb
)) {
1820 struct fs_context
*fc
;
1822 fc
= fs_context_for_reconfigure(sb
->s_root
, SB_RDONLY
,
1827 ret
= parse_monolithic_mount_data(fc
, NULL
);
1829 ret
= reconfigure_super(fc
);
1833 up_write(&sb
->s_umount
);
1837 static int do_umount(struct mount
*mnt
, int flags
)
1839 struct super_block
*sb
= mnt
->mnt
.mnt_sb
;
1842 retval
= security_sb_umount(&mnt
->mnt
, flags
);
1847 * Allow userspace to request a mountpoint be expired rather than
1848 * unmounting unconditionally. Unmount only happens if:
1849 * (1) the mark is already set (the mark is cleared by mntput())
1850 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1852 if (flags
& MNT_EXPIRE
) {
1853 if (&mnt
->mnt
== current
->fs
->root
.mnt
||
1854 flags
& (MNT_FORCE
| MNT_DETACH
))
1858 * probably don't strictly need the lock here if we examined
1859 * all race cases, but it's a slowpath.
1862 if (mnt_get_count(mnt
) != 2) {
1863 unlock_mount_hash();
1866 unlock_mount_hash();
1868 if (!xchg(&mnt
->mnt_expiry_mark
, 1))
1873 * If we may have to abort operations to get out of this
1874 * mount, and they will themselves hold resources we must
1875 * allow the fs to do things. In the Unix tradition of
1876 * 'Gee thats tricky lets do it in userspace' the umount_begin
1877 * might fail to complete on the first run through as other tasks
1878 * must return, and the like. Thats for the mount program to worry
1879 * about for the moment.
1882 if (flags
& MNT_FORCE
&& sb
->s_op
->umount_begin
) {
1883 sb
->s_op
->umount_begin(sb
);
1887 * No sense to grab the lock for this test, but test itself looks
1888 * somewhat bogus. Suggestions for better replacement?
1889 * Ho-hum... In principle, we might treat that as umount + switch
1890 * to rootfs. GC would eventually take care of the old vfsmount.
1891 * Actually it makes sense, especially if rootfs would contain a
1892 * /reboot - static binary that would close all descriptors and
1893 * call reboot(9). Then init(8) could umount root and exec /reboot.
1895 if (&mnt
->mnt
== current
->fs
->root
.mnt
&& !(flags
& MNT_DETACH
)) {
1897 * Special case for "unmounting" root ...
1898 * we just try to remount it readonly.
1900 if (!ns_capable(sb
->s_user_ns
, CAP_SYS_ADMIN
))
1902 return do_umount_root(sb
);
1908 /* Recheck MNT_LOCKED with the locks held */
1910 if (mnt
->mnt
.mnt_flags
& MNT_LOCKED
)
1914 if (flags
& MNT_DETACH
) {
1915 if (mnt
->mnt
.mnt_flags
& MNT_ONRB
||
1916 !list_empty(&mnt
->mnt_list
))
1917 umount_tree(mnt
, UMOUNT_PROPAGATE
);
1920 shrink_submounts(mnt
);
1922 if (!propagate_mount_busy(mnt
, 2)) {
1923 if (mnt
->mnt
.mnt_flags
& MNT_ONRB
||
1924 !list_empty(&mnt
->mnt_list
))
1925 umount_tree(mnt
, UMOUNT_PROPAGATE
|UMOUNT_SYNC
);
1930 unlock_mount_hash();
1936 * __detach_mounts - lazily unmount all mounts on the specified dentry
1938 * During unlink, rmdir, and d_drop it is possible to loose the path
1939 * to an existing mountpoint, and wind up leaking the mount.
1940 * detach_mounts allows lazily unmounting those mounts instead of
1943 * The caller may hold dentry->d_inode->i_mutex.
1945 void __detach_mounts(struct dentry
*dentry
)
1947 struct mountpoint
*mp
;
1952 mp
= lookup_mountpoint(dentry
);
1957 while (!hlist_empty(&mp
->m_list
)) {
1958 mnt
= hlist_entry(mp
->m_list
.first
, struct mount
, mnt_mp_list
);
1959 if (mnt
->mnt
.mnt_flags
& MNT_UMOUNT
) {
1961 hlist_add_head(&mnt
->mnt_umount
, &unmounted
);
1963 else umount_tree(mnt
, UMOUNT_CONNECTED
);
1967 unlock_mount_hash();
1972 * Is the caller allowed to modify his namespace?
1974 bool may_mount(void)
1976 return ns_capable(current
->nsproxy
->mnt_ns
->user_ns
, CAP_SYS_ADMIN
);
1979 static void warn_mandlock(void)
1981 pr_warn_once("=======================================================\n"
1982 "WARNING: The mand mount option has been deprecated and\n"
1983 " and is ignored by this kernel. Remove the mand\n"
1984 " option from the mount to silence this warning.\n"
1985 "=======================================================\n");
1988 static int can_umount(const struct path
*path
, int flags
)
1990 struct mount
*mnt
= real_mount(path
->mnt
);
1994 if (!path_mounted(path
))
1996 if (!check_mnt(mnt
))
1998 if (mnt
->mnt
.mnt_flags
& MNT_LOCKED
) /* Check optimistically */
2000 if (flags
& MNT_FORCE
&& !capable(CAP_SYS_ADMIN
))
2005 // caller is responsible for flags being sane
2006 int path_umount(struct path
*path
, int flags
)
2008 struct mount
*mnt
= real_mount(path
->mnt
);
2011 ret
= can_umount(path
, flags
);
2013 ret
= do_umount(mnt
, flags
);
2015 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
2017 mntput_no_expire(mnt
);
2021 static int ksys_umount(char __user
*name
, int flags
)
2023 int lookup_flags
= LOOKUP_MOUNTPOINT
;
2027 // basic validity checks done first
2028 if (flags
& ~(MNT_FORCE
| MNT_DETACH
| MNT_EXPIRE
| UMOUNT_NOFOLLOW
))
2031 if (!(flags
& UMOUNT_NOFOLLOW
))
2032 lookup_flags
|= LOOKUP_FOLLOW
;
2033 ret
= user_path_at(AT_FDCWD
, name
, lookup_flags
, &path
);
2036 return path_umount(&path
, flags
);
2039 SYSCALL_DEFINE2(umount
, char __user
*, name
, int, flags
)
2041 return ksys_umount(name
, flags
);
2044 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
2047 * The 2.0 compatible umount. No flags.
2049 SYSCALL_DEFINE1(oldumount
, char __user
*, name
)
2051 return ksys_umount(name
, 0);
2056 static bool is_mnt_ns_file(struct dentry
*dentry
)
2058 /* Is this a proxy for a mount namespace? */
2059 return dentry
->d_op
== &ns_dentry_operations
&&
2060 dentry
->d_fsdata
== &mntns_operations
;
2063 struct ns_common
*from_mnt_ns(struct mnt_namespace
*mnt
)
2068 struct mnt_namespace
*__lookup_next_mnt_ns(struct mnt_namespace
*mntns
, bool previous
)
2070 guard(read_lock
)(&mnt_ns_tree_lock
);
2072 struct rb_node
*node
;
2075 node
= rb_prev(&mntns
->mnt_ns_tree_node
);
2077 node
= rb_next(&mntns
->mnt_ns_tree_node
);
2079 return ERR_PTR(-ENOENT
);
2081 mntns
= node_to_mnt_ns(node
);
2082 node
= &mntns
->mnt_ns_tree_node
;
2084 if (!ns_capable_noaudit(mntns
->user_ns
, CAP_SYS_ADMIN
))
2088 * Holding mnt_ns_tree_lock prevents the mount namespace from
2089 * being freed but it may well be on it's deathbed. We want an
2090 * active reference, not just a passive one here as we're
2091 * persisting the mount namespace.
2093 if (!refcount_inc_not_zero(&mntns
->ns
.count
))
2100 static bool mnt_ns_loop(struct dentry
*dentry
)
2102 /* Could bind mounting the mount namespace inode cause a
2103 * mount namespace loop?
2105 struct mnt_namespace
*mnt_ns
;
2106 if (!is_mnt_ns_file(dentry
))
2109 mnt_ns
= to_mnt_ns(get_proc_ns(dentry
->d_inode
));
2110 return current
->nsproxy
->mnt_ns
->seq
>= mnt_ns
->seq
;
2113 struct mount
*copy_tree(struct mount
*src_root
, struct dentry
*dentry
,
2116 struct mount
*res
, *src_parent
, *src_root_child
, *src_mnt
,
2117 *dst_parent
, *dst_mnt
;
2119 if (!(flag
& CL_COPY_UNBINDABLE
) && IS_MNT_UNBINDABLE(src_root
))
2120 return ERR_PTR(-EINVAL
);
2122 if (!(flag
& CL_COPY_MNT_NS_FILE
) && is_mnt_ns_file(dentry
))
2123 return ERR_PTR(-EINVAL
);
2125 res
= dst_mnt
= clone_mnt(src_root
, dentry
, flag
);
2126 if (IS_ERR(dst_mnt
))
2129 src_parent
= src_root
;
2130 dst_mnt
->mnt_mountpoint
= src_root
->mnt_mountpoint
;
2132 list_for_each_entry(src_root_child
, &src_root
->mnt_mounts
, mnt_child
) {
2133 if (!is_subdir(src_root_child
->mnt_mountpoint
, dentry
))
2136 for (src_mnt
= src_root_child
; src_mnt
;
2137 src_mnt
= next_mnt(src_mnt
, src_root_child
)) {
2138 if (!(flag
& CL_COPY_UNBINDABLE
) &&
2139 IS_MNT_UNBINDABLE(src_mnt
)) {
2140 if (src_mnt
->mnt
.mnt_flags
& MNT_LOCKED
) {
2141 /* Both unbindable and locked. */
2142 dst_mnt
= ERR_PTR(-EPERM
);
2145 src_mnt
= skip_mnt_tree(src_mnt
);
2149 if (!(flag
& CL_COPY_MNT_NS_FILE
) &&
2150 is_mnt_ns_file(src_mnt
->mnt
.mnt_root
)) {
2151 src_mnt
= skip_mnt_tree(src_mnt
);
2154 while (src_parent
!= src_mnt
->mnt_parent
) {
2155 src_parent
= src_parent
->mnt_parent
;
2156 dst_mnt
= dst_mnt
->mnt_parent
;
2159 src_parent
= src_mnt
;
2160 dst_parent
= dst_mnt
;
2161 dst_mnt
= clone_mnt(src_mnt
, src_mnt
->mnt
.mnt_root
, flag
);
2162 if (IS_ERR(dst_mnt
))
2165 list_add_tail(&dst_mnt
->mnt_list
, &res
->mnt_list
);
2166 attach_mnt(dst_mnt
, dst_parent
, src_parent
->mnt_mp
, false);
2167 unlock_mount_hash();
2175 umount_tree(res
, UMOUNT_SYNC
);
2176 unlock_mount_hash();
2181 /* Caller should check returned pointer for errors */
2183 struct vfsmount
*collect_mounts(const struct path
*path
)
2187 if (!check_mnt(real_mount(path
->mnt
)))
2188 tree
= ERR_PTR(-EINVAL
);
2190 tree
= copy_tree(real_mount(path
->mnt
), path
->dentry
,
2191 CL_COPY_ALL
| CL_PRIVATE
);
2194 return ERR_CAST(tree
);
2198 static void free_mnt_ns(struct mnt_namespace
*);
2199 static struct mnt_namespace
*alloc_mnt_ns(struct user_namespace
*, bool);
2201 void dissolve_on_fput(struct vfsmount
*mnt
)
2203 struct mnt_namespace
*ns
;
2206 ns
= real_mount(mnt
)->mnt_ns
;
2209 umount_tree(real_mount(mnt
), UMOUNT_CONNECTED
);
2213 unlock_mount_hash();
2219 void drop_collected_mounts(struct vfsmount
*mnt
)
2223 umount_tree(real_mount(mnt
), 0);
2224 unlock_mount_hash();
2228 bool has_locked_children(struct mount
*mnt
, struct dentry
*dentry
)
2230 struct mount
*child
;
2232 list_for_each_entry(child
, &mnt
->mnt_mounts
, mnt_child
) {
2233 if (!is_subdir(child
->mnt_mountpoint
, dentry
))
2236 if (child
->mnt
.mnt_flags
& MNT_LOCKED
)
2243 * clone_private_mount - create a private clone of a path
2244 * @path: path to clone
2246 * This creates a new vfsmount, which will be the clone of @path. The new mount
2247 * will not be attached anywhere in the namespace and will be private (i.e.
2248 * changes to the originating mount won't be propagated into this).
2250 * Release with mntput().
2252 struct vfsmount
*clone_private_mount(const struct path
*path
)
2254 struct mount
*old_mnt
= real_mount(path
->mnt
);
2255 struct mount
*new_mnt
;
2257 down_read(&namespace_sem
);
2258 if (IS_MNT_UNBINDABLE(old_mnt
))
2261 if (!check_mnt(old_mnt
))
2264 if (has_locked_children(old_mnt
, path
->dentry
))
2267 new_mnt
= clone_mnt(old_mnt
, path
->dentry
, CL_PRIVATE
);
2268 up_read(&namespace_sem
);
2270 if (IS_ERR(new_mnt
))
2271 return ERR_CAST(new_mnt
);
2273 /* Longterm mount to be removed by kern_unmount*() */
2274 new_mnt
->mnt_ns
= MNT_NS_INTERNAL
;
2276 return &new_mnt
->mnt
;
2279 up_read(&namespace_sem
);
2280 return ERR_PTR(-EINVAL
);
2282 EXPORT_SYMBOL_GPL(clone_private_mount
);
2284 int iterate_mounts(int (*f
)(struct vfsmount
*, void *), void *arg
,
2285 struct vfsmount
*root
)
2288 int res
= f(root
, arg
);
2291 list_for_each_entry(mnt
, &real_mount(root
)->mnt_list
, mnt_list
) {
2292 res
= f(&mnt
->mnt
, arg
);
2299 static void lock_mnt_tree(struct mount
*mnt
)
2303 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
2304 int flags
= p
->mnt
.mnt_flags
;
2305 /* Don't allow unprivileged users to change mount flags */
2306 flags
|= MNT_LOCK_ATIME
;
2308 if (flags
& MNT_READONLY
)
2309 flags
|= MNT_LOCK_READONLY
;
2311 if (flags
& MNT_NODEV
)
2312 flags
|= MNT_LOCK_NODEV
;
2314 if (flags
& MNT_NOSUID
)
2315 flags
|= MNT_LOCK_NOSUID
;
2317 if (flags
& MNT_NOEXEC
)
2318 flags
|= MNT_LOCK_NOEXEC
;
2319 /* Don't allow unprivileged users to reveal what is under a mount */
2320 if (list_empty(&p
->mnt_expire
))
2321 flags
|= MNT_LOCKED
;
2322 p
->mnt
.mnt_flags
= flags
;
2326 static void cleanup_group_ids(struct mount
*mnt
, struct mount
*end
)
2330 for (p
= mnt
; p
!= end
; p
= next_mnt(p
, mnt
)) {
2331 if (p
->mnt_group_id
&& !IS_MNT_SHARED(p
))
2332 mnt_release_group_id(p
);
2336 static int invent_group_ids(struct mount
*mnt
, bool recurse
)
2340 for (p
= mnt
; p
; p
= recurse
? next_mnt(p
, mnt
) : NULL
) {
2341 if (!p
->mnt_group_id
&& !IS_MNT_SHARED(p
)) {
2342 int err
= mnt_alloc_group_id(p
);
2344 cleanup_group_ids(mnt
, p
);
2353 int count_mounts(struct mnt_namespace
*ns
, struct mount
*mnt
)
2355 unsigned int max
= READ_ONCE(sysctl_mount_max
);
2356 unsigned int mounts
= 0;
2359 if (ns
->nr_mounts
>= max
)
2361 max
-= ns
->nr_mounts
;
2362 if (ns
->pending_mounts
>= max
)
2364 max
-= ns
->pending_mounts
;
2366 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
))
2372 ns
->pending_mounts
+= mounts
;
2376 enum mnt_tree_flags_t
{
2377 MNT_TREE_MOVE
= BIT(0),
2378 MNT_TREE_BENEATH
= BIT(1),
2382 * attach_recursive_mnt - attach a source mount tree
2383 * @source_mnt: mount tree to be attached
2384 * @top_mnt: mount that @source_mnt will be mounted on or mounted beneath
2385 * @dest_mp: the mountpoint @source_mnt will be mounted at
2386 * @flags: modify how @source_mnt is supposed to be attached
2388 * NOTE: in the table below explains the semantics when a source mount
2389 * of a given type is attached to a destination mount of a given type.
2390 * ---------------------------------------------------------------------------
2391 * | BIND MOUNT OPERATION |
2392 * |**************************************************************************
2393 * | source-->| shared | private | slave | unbindable |
2397 * |**************************************************************************
2398 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
2400 * |non-shared| shared (+) | private | slave (*) | invalid |
2401 * ***************************************************************************
2402 * A bind operation clones the source mount and mounts the clone on the
2403 * destination mount.
2405 * (++) the cloned mount is propagated to all the mounts in the propagation
2406 * tree of the destination mount and the cloned mount is added to
2407 * the peer group of the source mount.
2408 * (+) the cloned mount is created under the destination mount and is marked
2409 * as shared. The cloned mount is added to the peer group of the source
2411 * (+++) the mount is propagated to all the mounts in the propagation tree
2412 * of the destination mount and the cloned mount is made slave
2413 * of the same master as that of the source mount. The cloned mount
2414 * is marked as 'shared and slave'.
2415 * (*) the cloned mount is made a slave of the same master as that of the
2418 * ---------------------------------------------------------------------------
2419 * | MOVE MOUNT OPERATION |
2420 * |**************************************************************************
2421 * | source-->| shared | private | slave | unbindable |
2425 * |**************************************************************************
2426 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
2428 * |non-shared| shared (+*) | private | slave (*) | unbindable |
2429 * ***************************************************************************
2431 * (+) the mount is moved to the destination. And is then propagated to
2432 * all the mounts in the propagation tree of the destination mount.
2433 * (+*) the mount is moved to the destination.
2434 * (+++) the mount is moved to the destination and is then propagated to
2435 * all the mounts belonging to the destination mount's propagation tree.
2436 * the mount is marked as 'shared and slave'.
2437 * (*) the mount continues to be a slave at the new location.
2439 * if the source mount is a tree, the operations explained above is
2440 * applied to each mount in the tree.
2441 * Must be called without spinlocks held, since this function can sleep
2444 * Context: The function expects namespace_lock() to be held.
2445 * Return: If @source_mnt was successfully attached 0 is returned.
2446 * Otherwise a negative error code is returned.
2448 static int attach_recursive_mnt(struct mount
*source_mnt
,
2449 struct mount
*top_mnt
,
2450 struct mountpoint
*dest_mp
,
2451 enum mnt_tree_flags_t flags
)
2453 struct user_namespace
*user_ns
= current
->nsproxy
->mnt_ns
->user_ns
;
2454 HLIST_HEAD(tree_list
);
2455 struct mnt_namespace
*ns
= top_mnt
->mnt_ns
;
2456 struct mountpoint
*smp
;
2457 struct mount
*child
, *dest_mnt
, *p
;
2458 struct hlist_node
*n
;
2460 bool moving
= flags
& MNT_TREE_MOVE
, beneath
= flags
& MNT_TREE_BENEATH
;
2463 * Preallocate a mountpoint in case the new mounts need to be
2464 * mounted beneath mounts on the same mountpoint.
2466 smp
= get_mountpoint(source_mnt
->mnt
.mnt_root
);
2468 return PTR_ERR(smp
);
2470 /* Is there space to add these mounts to the mount namespace? */
2472 err
= count_mounts(ns
, source_mnt
);
2478 dest_mnt
= top_mnt
->mnt_parent
;
2482 if (IS_MNT_SHARED(dest_mnt
)) {
2483 err
= invent_group_ids(source_mnt
, true);
2486 err
= propagate_mnt(dest_mnt
, dest_mp
, source_mnt
, &tree_list
);
2490 goto out_cleanup_ids
;
2492 if (IS_MNT_SHARED(dest_mnt
)) {
2493 for (p
= source_mnt
; p
; p
= next_mnt(p
, source_mnt
))
2500 unhash_mnt(source_mnt
);
2501 attach_mnt(source_mnt
, top_mnt
, dest_mp
, beneath
);
2502 touch_mnt_namespace(source_mnt
->mnt_ns
);
2504 if (source_mnt
->mnt_ns
) {
2507 /* move from anon - the caller will destroy */
2508 for (p
= source_mnt
; p
; p
= next_mnt(p
, source_mnt
))
2509 move_from_ns(p
, &head
);
2510 list_del_init(&head
);
2513 mnt_set_mountpoint_beneath(source_mnt
, top_mnt
, smp
);
2515 mnt_set_mountpoint(dest_mnt
, dest_mp
, source_mnt
);
2516 commit_tree(source_mnt
);
2519 hlist_for_each_entry_safe(child
, n
, &tree_list
, mnt_hash
) {
2521 hlist_del_init(&child
->mnt_hash
);
2522 q
= __lookup_mnt(&child
->mnt_parent
->mnt
,
2523 child
->mnt_mountpoint
);
2525 mnt_change_mountpoint(child
, smp
, q
);
2526 /* Notice when we are propagating across user namespaces */
2527 if (child
->mnt_parent
->mnt_ns
->user_ns
!= user_ns
)
2528 lock_mnt_tree(child
);
2529 child
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
2532 put_mountpoint(smp
);
2533 unlock_mount_hash();
2538 while (!hlist_empty(&tree_list
)) {
2539 child
= hlist_entry(tree_list
.first
, struct mount
, mnt_hash
);
2540 child
->mnt_parent
->mnt_ns
->pending_mounts
= 0;
2541 umount_tree(child
, UMOUNT_SYNC
);
2543 unlock_mount_hash();
2544 cleanup_group_ids(source_mnt
, NULL
);
2546 ns
->pending_mounts
= 0;
2548 read_seqlock_excl(&mount_lock
);
2549 put_mountpoint(smp
);
2550 read_sequnlock_excl(&mount_lock
);
2556 * do_lock_mount - lock mount and mountpoint
2557 * @path: target path
2558 * @beneath: whether the intention is to mount beneath @path
2560 * Follow the mount stack on @path until the top mount @mnt is found. If
2561 * the initial @path->{mnt,dentry} is a mountpoint lookup the first
2562 * mount stacked on top of it. Then simply follow @{mnt,mnt->mnt_root}
2563 * until nothing is stacked on top of it anymore.
2565 * Acquire the inode_lock() on the top mount's ->mnt_root to protect
2566 * against concurrent removal of the new mountpoint from another mount
2569 * If @beneath is requested, acquire inode_lock() on @mnt's mountpoint
2570 * @mp on @mnt->mnt_parent must be acquired. This protects against a
2571 * concurrent unlink of @mp->mnt_dentry from another mount namespace
2572 * where @mnt doesn't have a child mount mounted @mp. A concurrent
2573 * removal of @mnt->mnt_root doesn't matter as nothing will be mounted
2574 * on top of it for @beneath.
2576 * In addition, @beneath needs to make sure that @mnt hasn't been
2577 * unmounted or moved from its current mountpoint in between dropping
2578 * @mount_lock and acquiring @namespace_sem. For the !@beneath case @mnt
2579 * being unmounted would be detected later by e.g., calling
2580 * check_mnt(mnt) in the function it's called from. For the @beneath
2581 * case however, it's useful to detect it directly in do_lock_mount().
2582 * If @mnt hasn't been unmounted then @mnt->mnt_mountpoint still points
2583 * to @mnt->mnt_mp->m_dentry. But if @mnt has been unmounted it will
2584 * point to @mnt->mnt_root and @mnt->mnt_mp will be NULL.
2586 * Return: Either the target mountpoint on the top mount or the top
2587 * mount's mountpoint.
2589 static struct mountpoint
*do_lock_mount(struct path
*path
, bool beneath
)
2591 struct vfsmount
*mnt
= path
->mnt
;
2592 struct dentry
*dentry
;
2593 struct mountpoint
*mp
= ERR_PTR(-ENOENT
);
2599 m
= real_mount(mnt
);
2600 read_seqlock_excl(&mount_lock
);
2601 dentry
= dget(m
->mnt_mountpoint
);
2602 read_sequnlock_excl(&mount_lock
);
2604 dentry
= path
->dentry
;
2607 inode_lock(dentry
->d_inode
);
2608 if (unlikely(cant_mount(dentry
))) {
2609 inode_unlock(dentry
->d_inode
);
2615 if (beneath
&& (!is_mounted(mnt
) || m
->mnt_mountpoint
!= dentry
)) {
2617 inode_unlock(dentry
->d_inode
);
2621 mnt
= lookup_mnt(path
);
2626 inode_unlock(dentry
->d_inode
);
2631 path
->dentry
= dget(mnt
->mnt_root
);
2634 mp
= get_mountpoint(dentry
);
2637 inode_unlock(dentry
->d_inode
);
2647 static inline struct mountpoint
*lock_mount(struct path
*path
)
2649 return do_lock_mount(path
, false);
2652 static void unlock_mount(struct mountpoint
*where
)
2654 struct dentry
*dentry
= where
->m_dentry
;
2656 read_seqlock_excl(&mount_lock
);
2657 put_mountpoint(where
);
2658 read_sequnlock_excl(&mount_lock
);
2661 inode_unlock(dentry
->d_inode
);
2664 static int graft_tree(struct mount
*mnt
, struct mount
*p
, struct mountpoint
*mp
)
2666 if (mnt
->mnt
.mnt_sb
->s_flags
& SB_NOUSER
)
2669 if (d_is_dir(mp
->m_dentry
) !=
2670 d_is_dir(mnt
->mnt
.mnt_root
))
2673 return attach_recursive_mnt(mnt
, p
, mp
, 0);
2677 * Sanity check the flags to change_mnt_propagation.
2680 static int flags_to_propagation_type(int ms_flags
)
2682 int type
= ms_flags
& ~(MS_REC
| MS_SILENT
);
2684 /* Fail if any non-propagation flags are set */
2685 if (type
& ~(MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
2687 /* Only one propagation flag should be set */
2688 if (!is_power_of_2(type
))
2694 * recursively change the type of the mountpoint.
2696 static int do_change_type(struct path
*path
, int ms_flags
)
2699 struct mount
*mnt
= real_mount(path
->mnt
);
2700 int recurse
= ms_flags
& MS_REC
;
2704 if (!path_mounted(path
))
2707 type
= flags_to_propagation_type(ms_flags
);
2712 if (type
== MS_SHARED
) {
2713 err
= invent_group_ids(mnt
, recurse
);
2719 for (m
= mnt
; m
; m
= (recurse
? next_mnt(m
, mnt
) : NULL
))
2720 change_mnt_propagation(m
, type
);
2721 unlock_mount_hash();
2728 static struct mount
*__do_loopback(struct path
*old_path
, int recurse
)
2730 struct mount
*mnt
= ERR_PTR(-EINVAL
), *old
= real_mount(old_path
->mnt
);
2732 if (IS_MNT_UNBINDABLE(old
))
2735 if (!check_mnt(old
) && old_path
->dentry
->d_op
!= &ns_dentry_operations
)
2738 if (!recurse
&& has_locked_children(old
, old_path
->dentry
))
2742 mnt
= copy_tree(old
, old_path
->dentry
, CL_COPY_MNT_NS_FILE
);
2744 mnt
= clone_mnt(old
, old_path
->dentry
, 0);
2747 mnt
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
2753 * do loopback mount.
2755 static int do_loopback(struct path
*path
, const char *old_name
,
2758 struct path old_path
;
2759 struct mount
*mnt
= NULL
, *parent
;
2760 struct mountpoint
*mp
;
2762 if (!old_name
|| !*old_name
)
2764 err
= kern_path(old_name
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &old_path
);
2769 if (mnt_ns_loop(old_path
.dentry
))
2772 mp
= lock_mount(path
);
2778 parent
= real_mount(path
->mnt
);
2779 if (!check_mnt(parent
))
2782 mnt
= __do_loopback(&old_path
, recurse
);
2788 err
= graft_tree(mnt
, parent
, mp
);
2791 umount_tree(mnt
, UMOUNT_SYNC
);
2792 unlock_mount_hash();
2797 path_put(&old_path
);
2801 static struct file
*open_detached_copy(struct path
*path
, bool recursive
)
2803 struct user_namespace
*user_ns
= current
->nsproxy
->mnt_ns
->user_ns
;
2804 struct mnt_namespace
*ns
= alloc_mnt_ns(user_ns
, true);
2805 struct mount
*mnt
, *p
;
2809 return ERR_CAST(ns
);
2812 mnt
= __do_loopback(path
, recursive
);
2816 return ERR_CAST(mnt
);
2820 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
2821 mnt_add_to_ns(ns
, p
);
2826 unlock_mount_hash();
2830 path
->mnt
= &mnt
->mnt
;
2831 file
= dentry_open(path
, O_PATH
, current_cred());
2833 dissolve_on_fput(path
->mnt
);
2835 file
->f_mode
|= FMODE_NEED_UNMOUNT
;
2839 SYSCALL_DEFINE3(open_tree
, int, dfd
, const char __user
*, filename
, unsigned, flags
)
2843 int lookup_flags
= LOOKUP_AUTOMOUNT
| LOOKUP_FOLLOW
;
2844 bool detached
= flags
& OPEN_TREE_CLONE
;
2848 BUILD_BUG_ON(OPEN_TREE_CLOEXEC
!= O_CLOEXEC
);
2850 if (flags
& ~(AT_EMPTY_PATH
| AT_NO_AUTOMOUNT
| AT_RECURSIVE
|
2851 AT_SYMLINK_NOFOLLOW
| OPEN_TREE_CLONE
|
2855 if ((flags
& (AT_RECURSIVE
| OPEN_TREE_CLONE
)) == AT_RECURSIVE
)
2858 if (flags
& AT_NO_AUTOMOUNT
)
2859 lookup_flags
&= ~LOOKUP_AUTOMOUNT
;
2860 if (flags
& AT_SYMLINK_NOFOLLOW
)
2861 lookup_flags
&= ~LOOKUP_FOLLOW
;
2862 if (flags
& AT_EMPTY_PATH
)
2863 lookup_flags
|= LOOKUP_EMPTY
;
2865 if (detached
&& !may_mount())
2868 fd
= get_unused_fd_flags(flags
& O_CLOEXEC
);
2872 error
= user_path_at(dfd
, filename
, lookup_flags
, &path
);
2873 if (unlikely(error
)) {
2874 file
= ERR_PTR(error
);
2877 file
= open_detached_copy(&path
, flags
& AT_RECURSIVE
);
2879 file
= dentry_open(&path
, O_PATH
, current_cred());
2884 return PTR_ERR(file
);
2886 fd_install(fd
, file
);
2891 * Don't allow locked mount flags to be cleared.
2893 * No locks need to be held here while testing the various MNT_LOCK
2894 * flags because those flags can never be cleared once they are set.
2896 static bool can_change_locked_flags(struct mount
*mnt
, unsigned int mnt_flags
)
2898 unsigned int fl
= mnt
->mnt
.mnt_flags
;
2900 if ((fl
& MNT_LOCK_READONLY
) &&
2901 !(mnt_flags
& MNT_READONLY
))
2904 if ((fl
& MNT_LOCK_NODEV
) &&
2905 !(mnt_flags
& MNT_NODEV
))
2908 if ((fl
& MNT_LOCK_NOSUID
) &&
2909 !(mnt_flags
& MNT_NOSUID
))
2912 if ((fl
& MNT_LOCK_NOEXEC
) &&
2913 !(mnt_flags
& MNT_NOEXEC
))
2916 if ((fl
& MNT_LOCK_ATIME
) &&
2917 ((fl
& MNT_ATIME_MASK
) != (mnt_flags
& MNT_ATIME_MASK
)))
2923 static int change_mount_ro_state(struct mount
*mnt
, unsigned int mnt_flags
)
2925 bool readonly_request
= (mnt_flags
& MNT_READONLY
);
2927 if (readonly_request
== __mnt_is_readonly(&mnt
->mnt
))
2930 if (readonly_request
)
2931 return mnt_make_readonly(mnt
);
2933 mnt
->mnt
.mnt_flags
&= ~MNT_READONLY
;
2937 static void set_mount_attributes(struct mount
*mnt
, unsigned int mnt_flags
)
2939 mnt_flags
|= mnt
->mnt
.mnt_flags
& ~MNT_USER_SETTABLE_MASK
;
2940 mnt
->mnt
.mnt_flags
= mnt_flags
;
2941 touch_mnt_namespace(mnt
->mnt_ns
);
2944 static void mnt_warn_timestamp_expiry(struct path
*mountpoint
, struct vfsmount
*mnt
)
2946 struct super_block
*sb
= mnt
->mnt_sb
;
2948 if (!__mnt_is_readonly(mnt
) &&
2949 (!(sb
->s_iflags
& SB_I_TS_EXPIRY_WARNED
)) &&
2950 (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX
> sb
->s_time_max
)) {
2951 char *buf
, *mntpath
;
2953 buf
= (char *)__get_free_page(GFP_KERNEL
);
2955 mntpath
= d_path(mountpoint
, buf
, PAGE_SIZE
);
2957 mntpath
= ERR_PTR(-ENOMEM
);
2958 if (IS_ERR(mntpath
))
2959 mntpath
= "(unknown)";
2961 pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n",
2963 is_mounted(mnt
) ? "remounted" : "mounted",
2964 mntpath
, &sb
->s_time_max
,
2965 (unsigned long long)sb
->s_time_max
);
2967 sb
->s_iflags
|= SB_I_TS_EXPIRY_WARNED
;
2969 free_page((unsigned long)buf
);
2974 * Handle reconfiguration of the mountpoint only without alteration of the
2975 * superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND
2978 static int do_reconfigure_mnt(struct path
*path
, unsigned int mnt_flags
)
2980 struct super_block
*sb
= path
->mnt
->mnt_sb
;
2981 struct mount
*mnt
= real_mount(path
->mnt
);
2984 if (!check_mnt(mnt
))
2987 if (!path_mounted(path
))
2990 if (!can_change_locked_flags(mnt
, mnt_flags
))
2994 * We're only checking whether the superblock is read-only not
2995 * changing it, so only take down_read(&sb->s_umount).
2997 down_read(&sb
->s_umount
);
2999 ret
= change_mount_ro_state(mnt
, mnt_flags
);
3001 set_mount_attributes(mnt
, mnt_flags
);
3002 unlock_mount_hash();
3003 up_read(&sb
->s_umount
);
3005 mnt_warn_timestamp_expiry(path
, &mnt
->mnt
);
3011 * change filesystem flags. dir should be a physical root of filesystem.
3012 * If you've mounted a non-root directory somewhere and want to do remount
3013 * on it - tough luck.
3015 static int do_remount(struct path
*path
, int ms_flags
, int sb_flags
,
3016 int mnt_flags
, void *data
)
3019 struct super_block
*sb
= path
->mnt
->mnt_sb
;
3020 struct mount
*mnt
= real_mount(path
->mnt
);
3021 struct fs_context
*fc
;
3023 if (!check_mnt(mnt
))
3026 if (!path_mounted(path
))
3029 if (!can_change_locked_flags(mnt
, mnt_flags
))
3032 fc
= fs_context_for_reconfigure(path
->dentry
, sb_flags
, MS_RMT_MASK
);
3037 * Indicate to the filesystem that the remount request is coming
3038 * from the legacy mount system call.
3042 err
= parse_monolithic_mount_data(fc
, data
);
3044 down_write(&sb
->s_umount
);
3046 if (ns_capable(sb
->s_user_ns
, CAP_SYS_ADMIN
)) {
3047 err
= reconfigure_super(fc
);
3050 set_mount_attributes(mnt
, mnt_flags
);
3051 unlock_mount_hash();
3054 up_write(&sb
->s_umount
);
3057 mnt_warn_timestamp_expiry(path
, &mnt
->mnt
);
3063 static inline int tree_contains_unbindable(struct mount
*mnt
)
3066 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
3067 if (IS_MNT_UNBINDABLE(p
))
3074 * Check that there aren't references to earlier/same mount namespaces in the
3075 * specified subtree. Such references can act as pins for mount namespaces
3076 * that aren't checked by the mount-cycle checking code, thereby allowing
3077 * cycles to be made.
3079 static bool check_for_nsfs_mounts(struct mount
*subtree
)
3085 for (p
= subtree
; p
; p
= next_mnt(p
, subtree
))
3086 if (mnt_ns_loop(p
->mnt
.mnt_root
))
3091 unlock_mount_hash();
3095 static int do_set_group(struct path
*from_path
, struct path
*to_path
)
3097 struct mount
*from
, *to
;
3100 from
= real_mount(from_path
->mnt
);
3101 to
= real_mount(to_path
->mnt
);
3106 /* To and From must be mounted */
3107 if (!is_mounted(&from
->mnt
))
3109 if (!is_mounted(&to
->mnt
))
3113 /* We should be allowed to modify mount namespaces of both mounts */
3114 if (!ns_capable(from
->mnt_ns
->user_ns
, CAP_SYS_ADMIN
))
3116 if (!ns_capable(to
->mnt_ns
->user_ns
, CAP_SYS_ADMIN
))
3120 /* To and From paths should be mount roots */
3121 if (!path_mounted(from_path
))
3123 if (!path_mounted(to_path
))
3126 /* Setting sharing groups is only allowed across same superblock */
3127 if (from
->mnt
.mnt_sb
!= to
->mnt
.mnt_sb
)
3130 /* From mount root should be wider than To mount root */
3131 if (!is_subdir(to
->mnt
.mnt_root
, from
->mnt
.mnt_root
))
3134 /* From mount should not have locked children in place of To's root */
3135 if (has_locked_children(from
, to
->mnt
.mnt_root
))
3138 /* Setting sharing groups is only allowed on private mounts */
3139 if (IS_MNT_SHARED(to
) || IS_MNT_SLAVE(to
))
3142 /* From should not be private */
3143 if (!IS_MNT_SHARED(from
) && !IS_MNT_SLAVE(from
))
3146 if (IS_MNT_SLAVE(from
)) {
3147 struct mount
*m
= from
->mnt_master
;
3149 list_add(&to
->mnt_slave
, &m
->mnt_slave_list
);
3153 if (IS_MNT_SHARED(from
)) {
3154 to
->mnt_group_id
= from
->mnt_group_id
;
3155 list_add(&to
->mnt_share
, &from
->mnt_share
);
3158 unlock_mount_hash();
3168 * path_overmounted - check if path is overmounted
3169 * @path: path to check
3171 * Check if path is overmounted, i.e., if there's a mount on top of
3172 * @path->mnt with @path->dentry as mountpoint.
3174 * Context: This function expects namespace_lock() to be held.
3175 * Return: If path is overmounted true is returned, false if not.
3177 static inline bool path_overmounted(const struct path
*path
)
3180 if (unlikely(__lookup_mnt(path
->mnt
, path
->dentry
))) {
3189 * can_move_mount_beneath - check that we can mount beneath the top mount
3190 * @from: mount to mount beneath
3191 * @to: mount under which to mount
3192 * @mp: mountpoint of @to
3194 * - Make sure that @to->dentry is actually the root of a mount under
3195 * which we can mount another mount.
3196 * - Make sure that nothing can be mounted beneath the caller's current
3197 * root or the rootfs of the namespace.
3198 * - Make sure that the caller can unmount the topmost mount ensuring
3199 * that the caller could reveal the underlying mountpoint.
3200 * - Ensure that nothing has been mounted on top of @from before we
3201 * grabbed @namespace_sem to avoid creating pointless shadow mounts.
3202 * - Prevent mounting beneath a mount if the propagation relationship
3203 * between the source mount, parent mount, and top mount would lead to
3204 * nonsensical mount trees.
3206 * Context: This function expects namespace_lock() to be held.
3207 * Return: On success 0, and on error a negative error code is returned.
3209 static int can_move_mount_beneath(const struct path
*from
,
3210 const struct path
*to
,
3211 const struct mountpoint
*mp
)
3213 struct mount
*mnt_from
= real_mount(from
->mnt
),
3214 *mnt_to
= real_mount(to
->mnt
),
3215 *parent_mnt_to
= mnt_to
->mnt_parent
;
3217 if (!mnt_has_parent(mnt_to
))
3220 if (!path_mounted(to
))
3223 if (IS_MNT_LOCKED(mnt_to
))
3226 /* Avoid creating shadow mounts during mount propagation. */
3227 if (path_overmounted(from
))
3231 * Mounting beneath the rootfs only makes sense when the
3232 * semantics of pivot_root(".", ".") are used.
3234 if (&mnt_to
->mnt
== current
->fs
->root
.mnt
)
3236 if (parent_mnt_to
== current
->nsproxy
->mnt_ns
->root
)
3239 for (struct mount
*p
= mnt_from
; mnt_has_parent(p
); p
= p
->mnt_parent
)
3244 * If the parent mount propagates to the child mount this would
3245 * mean mounting @mnt_from on @mnt_to->mnt_parent and then
3246 * propagating a copy @c of @mnt_from on top of @mnt_to. This
3247 * defeats the whole purpose of mounting beneath another mount.
3249 if (propagation_would_overmount(parent_mnt_to
, mnt_to
, mp
))
3253 * If @mnt_to->mnt_parent propagates to @mnt_from this would
3254 * mean propagating a copy @c of @mnt_from on top of @mnt_from.
3255 * Afterwards @mnt_from would be mounted on top of
3256 * @mnt_to->mnt_parent and @mnt_to would be unmounted from
3257 * @mnt->mnt_parent and remounted on @mnt_from. But since @c is
3258 * already mounted on @mnt_from, @mnt_to would ultimately be
3259 * remounted on top of @c. Afterwards, @mnt_from would be
3260 * covered by a copy @c of @mnt_from and @c would be covered by
3261 * @mnt_from itself. This defeats the whole purpose of mounting
3262 * @mnt_from beneath @mnt_to.
3264 if (propagation_would_overmount(parent_mnt_to
, mnt_from
, mp
))
3270 static int do_move_mount(struct path
*old_path
, struct path
*new_path
,
3273 struct mnt_namespace
*ns
;
3276 struct mount
*parent
;
3277 struct mountpoint
*mp
, *old_mp
;
3280 enum mnt_tree_flags_t flags
= 0;
3282 mp
= do_lock_mount(new_path
, beneath
);
3286 old
= real_mount(old_path
->mnt
);
3287 p
= real_mount(new_path
->mnt
);
3288 parent
= old
->mnt_parent
;
3289 attached
= mnt_has_parent(old
);
3291 flags
|= MNT_TREE_MOVE
;
3292 old_mp
= old
->mnt_mp
;
3296 /* The mountpoint must be in our namespace. */
3300 /* The thing moved must be mounted... */
3301 if (!is_mounted(&old
->mnt
))
3304 /* ... and either ours or the root of anon namespace */
3305 if (!(attached
? check_mnt(old
) : is_anon_ns(ns
)))
3308 if (old
->mnt
.mnt_flags
& MNT_LOCKED
)
3311 if (!path_mounted(old_path
))
3314 if (d_is_dir(new_path
->dentry
) !=
3315 d_is_dir(old_path
->dentry
))
3318 * Don't move a mount residing in a shared parent.
3320 if (attached
&& IS_MNT_SHARED(parent
))
3324 err
= can_move_mount_beneath(old_path
, new_path
, mp
);
3330 flags
|= MNT_TREE_BENEATH
;
3334 * Don't move a mount tree containing unbindable mounts to a destination
3335 * mount which is shared.
3337 if (IS_MNT_SHARED(p
) && tree_contains_unbindable(old
))
3340 if (!check_for_nsfs_mounts(old
))
3342 for (; mnt_has_parent(p
); p
= p
->mnt_parent
)
3346 err
= attach_recursive_mnt(old
, real_mount(new_path
->mnt
), mp
, flags
);
3350 /* if the mount is moved, it should no longer be expire
3352 list_del_init(&old
->mnt_expire
);
3354 put_mountpoint(old_mp
);
3359 mntput_no_expire(parent
);
3366 static int do_move_mount_old(struct path
*path
, const char *old_name
)
3368 struct path old_path
;
3371 if (!old_name
|| !*old_name
)
3374 err
= kern_path(old_name
, LOOKUP_FOLLOW
, &old_path
);
3378 err
= do_move_mount(&old_path
, path
, false);
3379 path_put(&old_path
);
3384 * add a mount into a namespace's mount tree
3386 static int do_add_mount(struct mount
*newmnt
, struct mountpoint
*mp
,
3387 const struct path
*path
, int mnt_flags
)
3389 struct mount
*parent
= real_mount(path
->mnt
);
3391 mnt_flags
&= ~MNT_INTERNAL_FLAGS
;
3393 if (unlikely(!check_mnt(parent
))) {
3394 /* that's acceptable only for automounts done in private ns */
3395 if (!(mnt_flags
& MNT_SHRINKABLE
))
3397 /* ... and for those we'd better have mountpoint still alive */
3398 if (!parent
->mnt_ns
)
3402 /* Refuse the same filesystem on the same mount point */
3403 if (path
->mnt
->mnt_sb
== newmnt
->mnt
.mnt_sb
&& path_mounted(path
))
3406 if (d_is_symlink(newmnt
->mnt
.mnt_root
))
3409 newmnt
->mnt
.mnt_flags
= mnt_flags
;
3410 return graft_tree(newmnt
, parent
, mp
);
3413 static bool mount_too_revealing(const struct super_block
*sb
, int *new_mnt_flags
);
3416 * Create a new mount using a superblock configuration and request it
3417 * be added to the namespace tree.
3419 static int do_new_mount_fc(struct fs_context
*fc
, struct path
*mountpoint
,
3420 unsigned int mnt_flags
)
3422 struct vfsmount
*mnt
;
3423 struct mountpoint
*mp
;
3424 struct super_block
*sb
= fc
->root
->d_sb
;
3427 error
= security_sb_kern_mount(sb
);
3428 if (!error
&& mount_too_revealing(sb
, &mnt_flags
))
3431 if (unlikely(error
)) {
3436 up_write(&sb
->s_umount
);
3438 mnt
= vfs_create_mount(fc
);
3440 return PTR_ERR(mnt
);
3442 mnt_warn_timestamp_expiry(mountpoint
, mnt
);
3444 mp
= lock_mount(mountpoint
);
3449 error
= do_add_mount(real_mount(mnt
), mp
, mountpoint
, mnt_flags
);
3457 * create a new mount for userspace and request it to be added into the
3460 static int do_new_mount(struct path
*path
, const char *fstype
, int sb_flags
,
3461 int mnt_flags
, const char *name
, void *data
)
3463 struct file_system_type
*type
;
3464 struct fs_context
*fc
;
3465 const char *subtype
= NULL
;
3471 type
= get_fs_type(fstype
);
3475 if (type
->fs_flags
& FS_HAS_SUBTYPE
) {
3476 subtype
= strchr(fstype
, '.');
3480 put_filesystem(type
);
3486 fc
= fs_context_for_mount(type
, sb_flags
);
3487 put_filesystem(type
);
3492 * Indicate to the filesystem that the mount request is coming
3493 * from the legacy mount system call.
3498 err
= vfs_parse_fs_string(fc
, "subtype",
3499 subtype
, strlen(subtype
));
3501 err
= vfs_parse_fs_string(fc
, "source", name
, strlen(name
));
3503 err
= parse_monolithic_mount_data(fc
, data
);
3504 if (!err
&& !mount_capable(fc
))
3507 err
= vfs_get_tree(fc
);
3509 err
= do_new_mount_fc(fc
, path
, mnt_flags
);
3515 int finish_automount(struct vfsmount
*m
, const struct path
*path
)
3517 struct dentry
*dentry
= path
->dentry
;
3518 struct mountpoint
*mp
;
3527 mnt
= real_mount(m
);
3528 /* The new mount record should have at least 2 refs to prevent it being
3529 * expired before we get a chance to add it
3531 BUG_ON(mnt_get_count(mnt
) < 2);
3533 if (m
->mnt_sb
== path
->mnt
->mnt_sb
&&
3534 m
->mnt_root
== dentry
) {
3540 * we don't want to use lock_mount() - in this case finding something
3541 * that overmounts our mountpoint to be means "quitely drop what we've
3542 * got", not "try to mount it on top".
3544 inode_lock(dentry
->d_inode
);
3546 if (unlikely(cant_mount(dentry
))) {
3548 goto discard_locked
;
3550 if (path_overmounted(path
)) {
3552 goto discard_locked
;
3554 mp
= get_mountpoint(dentry
);
3557 goto discard_locked
;
3560 err
= do_add_mount(mnt
, mp
, path
, path
->mnt
->mnt_flags
| MNT_SHRINKABLE
);
3569 inode_unlock(dentry
->d_inode
);
3571 /* remove m from any expiration list it may be on */
3572 if (!list_empty(&mnt
->mnt_expire
)) {
3574 list_del_init(&mnt
->mnt_expire
);
3583 * mnt_set_expiry - Put a mount on an expiration list
3584 * @mnt: The mount to list.
3585 * @expiry_list: The list to add the mount to.
3587 void mnt_set_expiry(struct vfsmount
*mnt
, struct list_head
*expiry_list
)
3591 list_add_tail(&real_mount(mnt
)->mnt_expire
, expiry_list
);
3595 EXPORT_SYMBOL(mnt_set_expiry
);
3598 * process a list of expirable mountpoints with the intent of discarding any
3599 * mountpoints that aren't in use and haven't been touched since last we came
3602 void mark_mounts_for_expiry(struct list_head
*mounts
)
3604 struct mount
*mnt
, *next
;
3605 LIST_HEAD(graveyard
);
3607 if (list_empty(mounts
))
3613 /* extract from the expiration list every vfsmount that matches the
3614 * following criteria:
3615 * - only referenced by its parent vfsmount
3616 * - still marked for expiry (marked on the last call here; marks are
3617 * cleared by mntput())
3619 list_for_each_entry_safe(mnt
, next
, mounts
, mnt_expire
) {
3620 if (!xchg(&mnt
->mnt_expiry_mark
, 1) ||
3621 propagate_mount_busy(mnt
, 1))
3623 list_move(&mnt
->mnt_expire
, &graveyard
);
3625 while (!list_empty(&graveyard
)) {
3626 mnt
= list_first_entry(&graveyard
, struct mount
, mnt_expire
);
3627 touch_mnt_namespace(mnt
->mnt_ns
);
3628 umount_tree(mnt
, UMOUNT_PROPAGATE
|UMOUNT_SYNC
);
3630 unlock_mount_hash();
3634 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry
);
3637 * Ripoff of 'select_parent()'
3639 * search the list of submounts for a given mountpoint, and move any
3640 * shrinkable submounts to the 'graveyard' list.
3642 static int select_submounts(struct mount
*parent
, struct list_head
*graveyard
)
3644 struct mount
*this_parent
= parent
;
3645 struct list_head
*next
;
3649 next
= this_parent
->mnt_mounts
.next
;
3651 while (next
!= &this_parent
->mnt_mounts
) {
3652 struct list_head
*tmp
= next
;
3653 struct mount
*mnt
= list_entry(tmp
, struct mount
, mnt_child
);
3656 if (!(mnt
->mnt
.mnt_flags
& MNT_SHRINKABLE
))
3659 * Descend a level if the d_mounts list is non-empty.
3661 if (!list_empty(&mnt
->mnt_mounts
)) {
3666 if (!propagate_mount_busy(mnt
, 1)) {
3667 list_move_tail(&mnt
->mnt_expire
, graveyard
);
3672 * All done at this level ... ascend and resume the search
3674 if (this_parent
!= parent
) {
3675 next
= this_parent
->mnt_child
.next
;
3676 this_parent
= this_parent
->mnt_parent
;
3683 * process a list of expirable mountpoints with the intent of discarding any
3684 * submounts of a specific parent mountpoint
3686 * mount_lock must be held for write
3688 static void shrink_submounts(struct mount
*mnt
)
3690 LIST_HEAD(graveyard
);
3693 /* extract submounts of 'mountpoint' from the expiration list */
3694 while (select_submounts(mnt
, &graveyard
)) {
3695 while (!list_empty(&graveyard
)) {
3696 m
= list_first_entry(&graveyard
, struct mount
,
3698 touch_mnt_namespace(m
->mnt_ns
);
3699 umount_tree(m
, UMOUNT_PROPAGATE
|UMOUNT_SYNC
);
3704 static void *copy_mount_options(const void __user
* data
)
3707 unsigned left
, offset
;
3712 copy
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
3714 return ERR_PTR(-ENOMEM
);
3716 left
= copy_from_user(copy
, data
, PAGE_SIZE
);
3719 * Not all architectures have an exact copy_from_user(). Resort to
3722 offset
= PAGE_SIZE
- left
;
3725 if (get_user(c
, (const char __user
*)data
+ offset
))
3732 if (left
== PAGE_SIZE
) {
3734 return ERR_PTR(-EFAULT
);
3740 static char *copy_mount_string(const void __user
*data
)
3742 return data
? strndup_user(data
, PATH_MAX
) : NULL
;
3746 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
3747 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
3749 * data is a (void *) that can point to any structure up to
3750 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
3751 * information (or be NULL).
3753 * Pre-0.97 versions of mount() didn't have a flags word.
3754 * When the flags word was introduced its top half was required
3755 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
3756 * Therefore, if this magic number is present, it carries no information
3757 * and must be discarded.
3759 int path_mount(const char *dev_name
, struct path
*path
,
3760 const char *type_page
, unsigned long flags
, void *data_page
)
3762 unsigned int mnt_flags
= 0, sb_flags
;
3766 if ((flags
& MS_MGC_MSK
) == MS_MGC_VAL
)
3767 flags
&= ~MS_MGC_MSK
;
3769 /* Basic sanity checks */
3771 ((char *)data_page
)[PAGE_SIZE
- 1] = 0;
3773 if (flags
& MS_NOUSER
)
3776 ret
= security_sb_mount(dev_name
, path
, type_page
, flags
, data_page
);
3781 if (flags
& SB_MANDLOCK
)
3784 /* Default to relatime unless overriden */
3785 if (!(flags
& MS_NOATIME
))
3786 mnt_flags
|= MNT_RELATIME
;
3788 /* Separate the per-mountpoint flags */
3789 if (flags
& MS_NOSUID
)
3790 mnt_flags
|= MNT_NOSUID
;
3791 if (flags
& MS_NODEV
)
3792 mnt_flags
|= MNT_NODEV
;
3793 if (flags
& MS_NOEXEC
)
3794 mnt_flags
|= MNT_NOEXEC
;
3795 if (flags
& MS_NOATIME
)
3796 mnt_flags
|= MNT_NOATIME
;
3797 if (flags
& MS_NODIRATIME
)
3798 mnt_flags
|= MNT_NODIRATIME
;
3799 if (flags
& MS_STRICTATIME
)
3800 mnt_flags
&= ~(MNT_RELATIME
| MNT_NOATIME
);
3801 if (flags
& MS_RDONLY
)
3802 mnt_flags
|= MNT_READONLY
;
3803 if (flags
& MS_NOSYMFOLLOW
)
3804 mnt_flags
|= MNT_NOSYMFOLLOW
;
3806 /* The default atime for remount is preservation */
3807 if ((flags
& MS_REMOUNT
) &&
3808 ((flags
& (MS_NOATIME
| MS_NODIRATIME
| MS_RELATIME
|
3809 MS_STRICTATIME
)) == 0)) {
3810 mnt_flags
&= ~MNT_ATIME_MASK
;
3811 mnt_flags
|= path
->mnt
->mnt_flags
& MNT_ATIME_MASK
;
3814 sb_flags
= flags
& (SB_RDONLY
|
3823 if ((flags
& (MS_REMOUNT
| MS_BIND
)) == (MS_REMOUNT
| MS_BIND
))
3824 return do_reconfigure_mnt(path
, mnt_flags
);
3825 if (flags
& MS_REMOUNT
)
3826 return do_remount(path
, flags
, sb_flags
, mnt_flags
, data_page
);
3827 if (flags
& MS_BIND
)
3828 return do_loopback(path
, dev_name
, flags
& MS_REC
);
3829 if (flags
& (MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
3830 return do_change_type(path
, flags
);
3831 if (flags
& MS_MOVE
)
3832 return do_move_mount_old(path
, dev_name
);
3834 return do_new_mount(path
, type_page
, sb_flags
, mnt_flags
, dev_name
,
3838 long do_mount(const char *dev_name
, const char __user
*dir_name
,
3839 const char *type_page
, unsigned long flags
, void *data_page
)
3844 ret
= user_path_at(AT_FDCWD
, dir_name
, LOOKUP_FOLLOW
, &path
);
3847 ret
= path_mount(dev_name
, &path
, type_page
, flags
, data_page
);
3852 static struct ucounts
*inc_mnt_namespaces(struct user_namespace
*ns
)
3854 return inc_ucount(ns
, current_euid(), UCOUNT_MNT_NAMESPACES
);
3857 static void dec_mnt_namespaces(struct ucounts
*ucounts
)
3859 dec_ucount(ucounts
, UCOUNT_MNT_NAMESPACES
);
3862 static void free_mnt_ns(struct mnt_namespace
*ns
)
3864 if (!is_anon_ns(ns
))
3865 ns_free_inum(&ns
->ns
);
3866 dec_mnt_namespaces(ns
->ucounts
);
3867 mnt_ns_tree_remove(ns
);
3871 * Assign a sequence number so we can detect when we attempt to bind
3872 * mount a reference to an older mount namespace into the current
3873 * mount namespace, preventing reference counting loops. A 64bit
3874 * number incrementing at 10Ghz will take 12,427 years to wrap which
3875 * is effectively never, so we can ignore the possibility.
3877 static atomic64_t mnt_ns_seq
= ATOMIC64_INIT(1);
3879 static struct mnt_namespace
*alloc_mnt_ns(struct user_namespace
*user_ns
, bool anon
)
3881 struct mnt_namespace
*new_ns
;
3882 struct ucounts
*ucounts
;
3885 ucounts
= inc_mnt_namespaces(user_ns
);
3887 return ERR_PTR(-ENOSPC
);
3889 new_ns
= kzalloc(sizeof(struct mnt_namespace
), GFP_KERNEL_ACCOUNT
);
3891 dec_mnt_namespaces(ucounts
);
3892 return ERR_PTR(-ENOMEM
);
3895 ret
= ns_alloc_inum(&new_ns
->ns
);
3898 dec_mnt_namespaces(ucounts
);
3899 return ERR_PTR(ret
);
3902 new_ns
->ns
.ops
= &mntns_operations
;
3904 new_ns
->seq
= atomic64_inc_return(&mnt_ns_seq
);
3905 refcount_set(&new_ns
->ns
.count
, 1);
3906 refcount_set(&new_ns
->passive
, 1);
3907 new_ns
->mounts
= RB_ROOT
;
3908 RB_CLEAR_NODE(&new_ns
->mnt_ns_tree_node
);
3909 init_waitqueue_head(&new_ns
->poll
);
3910 new_ns
->user_ns
= get_user_ns(user_ns
);
3911 new_ns
->ucounts
= ucounts
;
3916 struct mnt_namespace
*copy_mnt_ns(unsigned long flags
, struct mnt_namespace
*ns
,
3917 struct user_namespace
*user_ns
, struct fs_struct
*new_fs
)
3919 struct mnt_namespace
*new_ns
;
3920 struct vfsmount
*rootmnt
= NULL
, *pwdmnt
= NULL
;
3921 struct mount
*p
, *q
;
3928 if (likely(!(flags
& CLONE_NEWNS
))) {
3935 new_ns
= alloc_mnt_ns(user_ns
, false);
3940 /* First pass: copy the tree topology */
3941 copy_flags
= CL_COPY_UNBINDABLE
| CL_EXPIRE
;
3942 if (user_ns
!= ns
->user_ns
)
3943 copy_flags
|= CL_SHARED_TO_SLAVE
;
3944 new = copy_tree(old
, old
->mnt
.mnt_root
, copy_flags
);
3947 ns_free_inum(&new_ns
->ns
);
3948 dec_mnt_namespaces(new_ns
->ucounts
);
3949 mnt_ns_release(new_ns
);
3950 return ERR_CAST(new);
3952 if (user_ns
!= ns
->user_ns
) {
3955 unlock_mount_hash();
3960 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
3961 * as belonging to new namespace. We have already acquired a private
3962 * fs_struct, so tsk->fs->lock is not needed.
3967 mnt_add_to_ns(new_ns
, q
);
3968 new_ns
->nr_mounts
++;
3970 if (&p
->mnt
== new_fs
->root
.mnt
) {
3971 new_fs
->root
.mnt
= mntget(&q
->mnt
);
3974 if (&p
->mnt
== new_fs
->pwd
.mnt
) {
3975 new_fs
->pwd
.mnt
= mntget(&q
->mnt
);
3979 p
= next_mnt(p
, old
);
3980 q
= next_mnt(q
, new);
3983 // an mntns binding we'd skipped?
3984 while (p
->mnt
.mnt_root
!= q
->mnt
.mnt_root
)
3985 p
= next_mnt(skip_mnt_tree(p
), old
);
3987 mnt_ns_tree_add(new_ns
);
3998 struct dentry
*mount_subtree(struct vfsmount
*m
, const char *name
)
4000 struct mount
*mnt
= real_mount(m
);
4001 struct mnt_namespace
*ns
;
4002 struct super_block
*s
;
4006 ns
= alloc_mnt_ns(&init_user_ns
, true);
4009 return ERR_CAST(ns
);
4013 mnt_add_to_ns(ns
, mnt
);
4015 err
= vfs_path_lookup(m
->mnt_root
, m
,
4016 name
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &path
);
4021 return ERR_PTR(err
);
4023 /* trade a vfsmount reference for active sb one */
4024 s
= path
.mnt
->mnt_sb
;
4025 atomic_inc(&s
->s_active
);
4027 /* lock the sucker */
4028 down_write(&s
->s_umount
);
4029 /* ... and return the root of (sub)tree on it */
4032 EXPORT_SYMBOL(mount_subtree
);
4034 SYSCALL_DEFINE5(mount
, char __user
*, dev_name
, char __user
*, dir_name
,
4035 char __user
*, type
, unsigned long, flags
, void __user
*, data
)
4042 kernel_type
= copy_mount_string(type
);
4043 ret
= PTR_ERR(kernel_type
);
4044 if (IS_ERR(kernel_type
))
4047 kernel_dev
= copy_mount_string(dev_name
);
4048 ret
= PTR_ERR(kernel_dev
);
4049 if (IS_ERR(kernel_dev
))
4052 options
= copy_mount_options(data
);
4053 ret
= PTR_ERR(options
);
4054 if (IS_ERR(options
))
4057 ret
= do_mount(kernel_dev
, dir_name
, kernel_type
, flags
, options
);
4068 #define FSMOUNT_VALID_FLAGS \
4069 (MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \
4070 MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME | \
4071 MOUNT_ATTR_NOSYMFOLLOW)
4073 #define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP)
4075 #define MOUNT_SETATTR_PROPAGATION_FLAGS \
4076 (MS_UNBINDABLE | MS_PRIVATE | MS_SLAVE | MS_SHARED)
4078 static unsigned int attr_flags_to_mnt_flags(u64 attr_flags
)
4080 unsigned int mnt_flags
= 0;
4082 if (attr_flags
& MOUNT_ATTR_RDONLY
)
4083 mnt_flags
|= MNT_READONLY
;
4084 if (attr_flags
& MOUNT_ATTR_NOSUID
)
4085 mnt_flags
|= MNT_NOSUID
;
4086 if (attr_flags
& MOUNT_ATTR_NODEV
)
4087 mnt_flags
|= MNT_NODEV
;
4088 if (attr_flags
& MOUNT_ATTR_NOEXEC
)
4089 mnt_flags
|= MNT_NOEXEC
;
4090 if (attr_flags
& MOUNT_ATTR_NODIRATIME
)
4091 mnt_flags
|= MNT_NODIRATIME
;
4092 if (attr_flags
& MOUNT_ATTR_NOSYMFOLLOW
)
4093 mnt_flags
|= MNT_NOSYMFOLLOW
;
4099 * Create a kernel mount representation for a new, prepared superblock
4100 * (specified by fs_fd) and attach to an open_tree-like file descriptor.
4102 SYSCALL_DEFINE3(fsmount
, int, fs_fd
, unsigned int, flags
,
4103 unsigned int, attr_flags
)
4105 struct mnt_namespace
*ns
;
4106 struct fs_context
*fc
;
4108 struct path newmount
;
4110 unsigned int mnt_flags
= 0;
4116 if ((flags
& ~(FSMOUNT_CLOEXEC
)) != 0)
4119 if (attr_flags
& ~FSMOUNT_VALID_FLAGS
)
4122 mnt_flags
= attr_flags_to_mnt_flags(attr_flags
);
4124 switch (attr_flags
& MOUNT_ATTR__ATIME
) {
4125 case MOUNT_ATTR_STRICTATIME
:
4127 case MOUNT_ATTR_NOATIME
:
4128 mnt_flags
|= MNT_NOATIME
;
4130 case MOUNT_ATTR_RELATIME
:
4131 mnt_flags
|= MNT_RELATIME
;
4137 CLASS(fd
, f
)(fs_fd
);
4141 if (fd_file(f
)->f_op
!= &fscontext_fops
)
4144 fc
= fd_file(f
)->private_data
;
4146 ret
= mutex_lock_interruptible(&fc
->uapi_mutex
);
4150 /* There must be a valid superblock or we can't mount it */
4156 if (mount_too_revealing(fc
->root
->d_sb
, &mnt_flags
)) {
4157 pr_warn("VFS: Mount too revealing\n");
4162 if (fc
->phase
!= FS_CONTEXT_AWAITING_MOUNT
)
4165 if (fc
->sb_flags
& SB_MANDLOCK
)
4168 newmount
.mnt
= vfs_create_mount(fc
);
4169 if (IS_ERR(newmount
.mnt
)) {
4170 ret
= PTR_ERR(newmount
.mnt
);
4173 newmount
.dentry
= dget(fc
->root
);
4174 newmount
.mnt
->mnt_flags
= mnt_flags
;
4176 /* We've done the mount bit - now move the file context into more or
4177 * less the same state as if we'd done an fspick(). We don't want to
4178 * do any memory allocation or anything like that at this point as we
4179 * don't want to have to handle any errors incurred.
4181 vfs_clean_context(fc
);
4183 ns
= alloc_mnt_ns(current
->nsproxy
->mnt_ns
->user_ns
, true);
4188 mnt
= real_mount(newmount
.mnt
);
4191 mnt_add_to_ns(ns
, mnt
);
4192 mntget(newmount
.mnt
);
4194 /* Attach to an apparent O_PATH fd with a note that we need to unmount
4195 * it, not just simply put it.
4197 file
= dentry_open(&newmount
, O_PATH
, fc
->cred
);
4199 dissolve_on_fput(newmount
.mnt
);
4200 ret
= PTR_ERR(file
);
4203 file
->f_mode
|= FMODE_NEED_UNMOUNT
;
4205 ret
= get_unused_fd_flags((flags
& FSMOUNT_CLOEXEC
) ? O_CLOEXEC
: 0);
4207 fd_install(ret
, file
);
4212 path_put(&newmount
);
4214 mutex_unlock(&fc
->uapi_mutex
);
4219 * Move a mount from one place to another. In combination with
4220 * fsopen()/fsmount() this is used to install a new mount and in combination
4221 * with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy
4224 * Note the flags value is a combination of MOVE_MOUNT_* flags.
4226 SYSCALL_DEFINE5(move_mount
,
4227 int, from_dfd
, const char __user
*, from_pathname
,
4228 int, to_dfd
, const char __user
*, to_pathname
,
4229 unsigned int, flags
)
4231 struct path from_path
, to_path
;
4232 unsigned int lflags
;
4238 if (flags
& ~MOVE_MOUNT__MASK
)
4241 if ((flags
& (MOVE_MOUNT_BENEATH
| MOVE_MOUNT_SET_GROUP
)) ==
4242 (MOVE_MOUNT_BENEATH
| MOVE_MOUNT_SET_GROUP
))
4245 /* If someone gives a pathname, they aren't permitted to move
4246 * from an fd that requires unmount as we can't get at the flag
4247 * to clear it afterwards.
4250 if (flags
& MOVE_MOUNT_F_SYMLINKS
) lflags
|= LOOKUP_FOLLOW
;
4251 if (flags
& MOVE_MOUNT_F_AUTOMOUNTS
) lflags
|= LOOKUP_AUTOMOUNT
;
4252 if (flags
& MOVE_MOUNT_F_EMPTY_PATH
) lflags
|= LOOKUP_EMPTY
;
4254 ret
= user_path_at(from_dfd
, from_pathname
, lflags
, &from_path
);
4259 if (flags
& MOVE_MOUNT_T_SYMLINKS
) lflags
|= LOOKUP_FOLLOW
;
4260 if (flags
& MOVE_MOUNT_T_AUTOMOUNTS
) lflags
|= LOOKUP_AUTOMOUNT
;
4261 if (flags
& MOVE_MOUNT_T_EMPTY_PATH
) lflags
|= LOOKUP_EMPTY
;
4263 ret
= user_path_at(to_dfd
, to_pathname
, lflags
, &to_path
);
4267 ret
= security_move_mount(&from_path
, &to_path
);
4271 if (flags
& MOVE_MOUNT_SET_GROUP
)
4272 ret
= do_set_group(&from_path
, &to_path
);
4274 ret
= do_move_mount(&from_path
, &to_path
,
4275 (flags
& MOVE_MOUNT_BENEATH
));
4280 path_put(&from_path
);
4285 * Return true if path is reachable from root
4287 * namespace_sem or mount_lock is held
4289 bool is_path_reachable(struct mount
*mnt
, struct dentry
*dentry
,
4290 const struct path
*root
)
4292 while (&mnt
->mnt
!= root
->mnt
&& mnt_has_parent(mnt
)) {
4293 dentry
= mnt
->mnt_mountpoint
;
4294 mnt
= mnt
->mnt_parent
;
4296 return &mnt
->mnt
== root
->mnt
&& is_subdir(dentry
, root
->dentry
);
4299 bool path_is_under(const struct path
*path1
, const struct path
*path2
)
4302 read_seqlock_excl(&mount_lock
);
4303 res
= is_path_reachable(real_mount(path1
->mnt
), path1
->dentry
, path2
);
4304 read_sequnlock_excl(&mount_lock
);
4307 EXPORT_SYMBOL(path_is_under
);
4310 * pivot_root Semantics:
4311 * Moves the root file system of the current process to the directory put_old,
4312 * makes new_root as the new root file system of the current process, and sets
4313 * root/cwd of all processes which had them on the current root to new_root.
4316 * The new_root and put_old must be directories, and must not be on the
4317 * same file system as the current process root. The put_old must be
4318 * underneath new_root, i.e. adding a non-zero number of /.. to the string
4319 * pointed to by put_old must yield the same directory as new_root. No other
4320 * file system may be mounted on put_old. After all, new_root is a mountpoint.
4322 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
4323 * See Documentation/filesystems/ramfs-rootfs-initramfs.rst for alternatives
4324 * in this situation.
4327 * - we don't move root/cwd if they are not at the root (reason: if something
4328 * cared enough to change them, it's probably wrong to force them elsewhere)
4329 * - it's okay to pick a root that isn't the root of a file system, e.g.
4330 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
4331 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
4334 SYSCALL_DEFINE2(pivot_root
, const char __user
*, new_root
,
4335 const char __user
*, put_old
)
4337 struct path
new, old
, root
;
4338 struct mount
*new_mnt
, *root_mnt
, *old_mnt
, *root_parent
, *ex_parent
;
4339 struct mountpoint
*old_mp
, *root_mp
;
4345 error
= user_path_at(AT_FDCWD
, new_root
,
4346 LOOKUP_FOLLOW
| LOOKUP_DIRECTORY
, &new);
4350 error
= user_path_at(AT_FDCWD
, put_old
,
4351 LOOKUP_FOLLOW
| LOOKUP_DIRECTORY
, &old
);
4355 error
= security_sb_pivotroot(&old
, &new);
4359 get_fs_root(current
->fs
, &root
);
4360 old_mp
= lock_mount(&old
);
4361 error
= PTR_ERR(old_mp
);
4366 new_mnt
= real_mount(new.mnt
);
4367 root_mnt
= real_mount(root
.mnt
);
4368 old_mnt
= real_mount(old
.mnt
);
4369 ex_parent
= new_mnt
->mnt_parent
;
4370 root_parent
= root_mnt
->mnt_parent
;
4371 if (IS_MNT_SHARED(old_mnt
) ||
4372 IS_MNT_SHARED(ex_parent
) ||
4373 IS_MNT_SHARED(root_parent
))
4375 if (!check_mnt(root_mnt
) || !check_mnt(new_mnt
))
4377 if (new_mnt
->mnt
.mnt_flags
& MNT_LOCKED
)
4380 if (d_unlinked(new.dentry
))
4383 if (new_mnt
== root_mnt
|| old_mnt
== root_mnt
)
4384 goto out4
; /* loop, on the same file system */
4386 if (!path_mounted(&root
))
4387 goto out4
; /* not a mountpoint */
4388 if (!mnt_has_parent(root_mnt
))
4389 goto out4
; /* not attached */
4390 if (!path_mounted(&new))
4391 goto out4
; /* not a mountpoint */
4392 if (!mnt_has_parent(new_mnt
))
4393 goto out4
; /* not attached */
4394 /* make sure we can reach put_old from new_root */
4395 if (!is_path_reachable(old_mnt
, old
.dentry
, &new))
4397 /* make certain new is below the root */
4398 if (!is_path_reachable(new_mnt
, new.dentry
, &root
))
4401 umount_mnt(new_mnt
);
4402 root_mp
= unhash_mnt(root_mnt
); /* we'll need its mountpoint */
4403 if (root_mnt
->mnt
.mnt_flags
& MNT_LOCKED
) {
4404 new_mnt
->mnt
.mnt_flags
|= MNT_LOCKED
;
4405 root_mnt
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
4407 /* mount old root on put_old */
4408 attach_mnt(root_mnt
, old_mnt
, old_mp
, false);
4409 /* mount new_root on / */
4410 attach_mnt(new_mnt
, root_parent
, root_mp
, false);
4411 mnt_add_count(root_parent
, -1);
4412 touch_mnt_namespace(current
->nsproxy
->mnt_ns
);
4413 /* A moved mount should not expire automatically */
4414 list_del_init(&new_mnt
->mnt_expire
);
4415 put_mountpoint(root_mp
);
4416 unlock_mount_hash();
4417 chroot_fs_refs(&root
, &new);
4420 unlock_mount(old_mp
);
4422 mntput_no_expire(ex_parent
);
4433 static unsigned int recalc_flags(struct mount_kattr
*kattr
, struct mount
*mnt
)
4435 unsigned int flags
= mnt
->mnt
.mnt_flags
;
4437 /* flags to clear */
4438 flags
&= ~kattr
->attr_clr
;
4439 /* flags to raise */
4440 flags
|= kattr
->attr_set
;
4445 static int can_idmap_mount(const struct mount_kattr
*kattr
, struct mount
*mnt
)
4447 struct vfsmount
*m
= &mnt
->mnt
;
4448 struct user_namespace
*fs_userns
= m
->mnt_sb
->s_user_ns
;
4450 if (!kattr
->mnt_idmap
)
4454 * Creating an idmapped mount with the filesystem wide idmapping
4455 * doesn't make sense so block that. We don't allow mushy semantics.
4457 if (kattr
->mnt_userns
== m
->mnt_sb
->s_user_ns
)
4461 * Once a mount has been idmapped we don't allow it to change its
4462 * mapping. It makes things simpler and callers can just create
4463 * another bind-mount they can idmap if they want to.
4465 if (is_idmapped_mnt(m
))
4468 /* The underlying filesystem doesn't support idmapped mounts yet. */
4469 if (!(m
->mnt_sb
->s_type
->fs_flags
& FS_ALLOW_IDMAP
))
4472 /* The filesystem has turned off idmapped mounts. */
4473 if (m
->mnt_sb
->s_iflags
& SB_I_NOIDMAP
)
4476 /* We're not controlling the superblock. */
4477 if (!ns_capable(fs_userns
, CAP_SYS_ADMIN
))
4480 /* Mount has already been visible in the filesystem hierarchy. */
4481 if (!is_anon_ns(mnt
->mnt_ns
))
4488 * mnt_allow_writers() - check whether the attribute change allows writers
4489 * @kattr: the new mount attributes
4490 * @mnt: the mount to which @kattr will be applied
4492 * Check whether thew new mount attributes in @kattr allow concurrent writers.
4494 * Return: true if writers need to be held, false if not
4496 static inline bool mnt_allow_writers(const struct mount_kattr
*kattr
,
4497 const struct mount
*mnt
)
4499 return (!(kattr
->attr_set
& MNT_READONLY
) ||
4500 (mnt
->mnt
.mnt_flags
& MNT_READONLY
)) &&
4504 static int mount_setattr_prepare(struct mount_kattr
*kattr
, struct mount
*mnt
)
4509 for (m
= mnt
; m
; m
= next_mnt(m
, mnt
)) {
4510 if (!can_change_locked_flags(m
, recalc_flags(kattr
, m
))) {
4515 err
= can_idmap_mount(kattr
, m
);
4519 if (!mnt_allow_writers(kattr
, m
)) {
4520 err
= mnt_hold_writers(m
);
4525 if (!kattr
->recurse
)
4533 * If we had to call mnt_hold_writers() MNT_WRITE_HOLD will
4534 * be set in @mnt_flags. The loop unsets MNT_WRITE_HOLD for all
4535 * mounts and needs to take care to include the first mount.
4537 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
4538 /* If we had to hold writers unblock them. */
4539 if (p
->mnt
.mnt_flags
& MNT_WRITE_HOLD
)
4540 mnt_unhold_writers(p
);
4543 * We're done once the first mount we changed got
4544 * MNT_WRITE_HOLD unset.
4553 static void do_idmap_mount(const struct mount_kattr
*kattr
, struct mount
*mnt
)
4555 if (!kattr
->mnt_idmap
)
4559 * Pairs with smp_load_acquire() in mnt_idmap().
4561 * Since we only allow a mount to change the idmapping once and
4562 * verified this in can_idmap_mount() we know that the mount has
4563 * @nop_mnt_idmap attached to it. So there's no need to drop any
4566 smp_store_release(&mnt
->mnt
.mnt_idmap
, mnt_idmap_get(kattr
->mnt_idmap
));
4569 static void mount_setattr_commit(struct mount_kattr
*kattr
, struct mount
*mnt
)
4573 for (m
= mnt
; m
; m
= next_mnt(m
, mnt
)) {
4576 do_idmap_mount(kattr
, m
);
4577 flags
= recalc_flags(kattr
, m
);
4578 WRITE_ONCE(m
->mnt
.mnt_flags
, flags
);
4580 /* If we had to hold writers unblock them. */
4581 if (m
->mnt
.mnt_flags
& MNT_WRITE_HOLD
)
4582 mnt_unhold_writers(m
);
4584 if (kattr
->propagation
)
4585 change_mnt_propagation(m
, kattr
->propagation
);
4586 if (!kattr
->recurse
)
4589 touch_mnt_namespace(mnt
->mnt_ns
);
4592 static int do_mount_setattr(struct path
*path
, struct mount_kattr
*kattr
)
4594 struct mount
*mnt
= real_mount(path
->mnt
);
4597 if (!path_mounted(path
))
4600 if (kattr
->mnt_userns
) {
4601 struct mnt_idmap
*mnt_idmap
;
4603 mnt_idmap
= alloc_mnt_idmap(kattr
->mnt_userns
);
4604 if (IS_ERR(mnt_idmap
))
4605 return PTR_ERR(mnt_idmap
);
4606 kattr
->mnt_idmap
= mnt_idmap
;
4609 if (kattr
->propagation
) {
4611 * Only take namespace_lock() if we're actually changing
4615 if (kattr
->propagation
== MS_SHARED
) {
4616 err
= invent_group_ids(mnt
, kattr
->recurse
);
4627 /* Ensure that this isn't anything purely vfs internal. */
4628 if (!is_mounted(&mnt
->mnt
))
4632 * If this is an attached mount make sure it's located in the callers
4633 * mount namespace. If it's not don't let the caller interact with it.
4635 * If this mount doesn't have a parent it's most often simply a
4636 * detached mount with an anonymous mount namespace. IOW, something
4637 * that's simply not attached yet. But there are apparently also users
4638 * that do change mount properties on the rootfs itself. That obviously
4639 * neither has a parent nor is it a detached mount so we cannot
4640 * unconditionally check for detached mounts.
4642 if ((mnt_has_parent(mnt
) || !is_anon_ns(mnt
->mnt_ns
)) && !check_mnt(mnt
))
4646 * First, we get the mount tree in a shape where we can change mount
4647 * properties without failure. If we succeeded to do so we commit all
4648 * changes and if we failed we clean up.
4650 err
= mount_setattr_prepare(kattr
, mnt
);
4652 mount_setattr_commit(kattr
, mnt
);
4655 unlock_mount_hash();
4657 if (kattr
->propagation
) {
4659 cleanup_group_ids(mnt
, NULL
);
4666 static int build_mount_idmapped(const struct mount_attr
*attr
, size_t usize
,
4667 struct mount_kattr
*kattr
, unsigned int flags
)
4669 struct ns_common
*ns
;
4670 struct user_namespace
*mnt_userns
;
4672 if (!((attr
->attr_set
| attr
->attr_clr
) & MOUNT_ATTR_IDMAP
))
4676 * We currently do not support clearing an idmapped mount. If this ever
4677 * is a use-case we can revisit this but for now let's keep it simple
4680 if (attr
->attr_clr
& MOUNT_ATTR_IDMAP
)
4683 if (attr
->userns_fd
> INT_MAX
)
4686 CLASS(fd
, f
)(attr
->userns_fd
);
4690 if (!proc_ns_file(fd_file(f
)))
4693 ns
= get_proc_ns(file_inode(fd_file(f
)));
4694 if (ns
->ops
->type
!= CLONE_NEWUSER
)
4698 * The initial idmapping cannot be used to create an idmapped
4699 * mount. We use the initial idmapping as an indicator of a mount
4700 * that is not idmapped. It can simply be passed into helpers that
4701 * are aware of idmapped mounts as a convenient shortcut. A user
4702 * can just create a dedicated identity mapping to achieve the same
4705 mnt_userns
= container_of(ns
, struct user_namespace
, ns
);
4706 if (mnt_userns
== &init_user_ns
)
4709 /* We're not controlling the target namespace. */
4710 if (!ns_capable(mnt_userns
, CAP_SYS_ADMIN
))
4713 kattr
->mnt_userns
= get_user_ns(mnt_userns
);
4717 static int build_mount_kattr(const struct mount_attr
*attr
, size_t usize
,
4718 struct mount_kattr
*kattr
, unsigned int flags
)
4720 unsigned int lookup_flags
= LOOKUP_AUTOMOUNT
| LOOKUP_FOLLOW
;
4722 if (flags
& AT_NO_AUTOMOUNT
)
4723 lookup_flags
&= ~LOOKUP_AUTOMOUNT
;
4724 if (flags
& AT_SYMLINK_NOFOLLOW
)
4725 lookup_flags
&= ~LOOKUP_FOLLOW
;
4726 if (flags
& AT_EMPTY_PATH
)
4727 lookup_flags
|= LOOKUP_EMPTY
;
4729 *kattr
= (struct mount_kattr
) {
4730 .lookup_flags
= lookup_flags
,
4731 .recurse
= !!(flags
& AT_RECURSIVE
),
4734 if (attr
->propagation
& ~MOUNT_SETATTR_PROPAGATION_FLAGS
)
4736 if (hweight32(attr
->propagation
& MOUNT_SETATTR_PROPAGATION_FLAGS
) > 1)
4738 kattr
->propagation
= attr
->propagation
;
4740 if ((attr
->attr_set
| attr
->attr_clr
) & ~MOUNT_SETATTR_VALID_FLAGS
)
4743 kattr
->attr_set
= attr_flags_to_mnt_flags(attr
->attr_set
);
4744 kattr
->attr_clr
= attr_flags_to_mnt_flags(attr
->attr_clr
);
4747 * Since the MOUNT_ATTR_<atime> values are an enum, not a bitmap,
4748 * users wanting to transition to a different atime setting cannot
4749 * simply specify the atime setting in @attr_set, but must also
4750 * specify MOUNT_ATTR__ATIME in the @attr_clr field.
4751 * So ensure that MOUNT_ATTR__ATIME can't be partially set in
4752 * @attr_clr and that @attr_set can't have any atime bits set if
4753 * MOUNT_ATTR__ATIME isn't set in @attr_clr.
4755 if (attr
->attr_clr
& MOUNT_ATTR__ATIME
) {
4756 if ((attr
->attr_clr
& MOUNT_ATTR__ATIME
) != MOUNT_ATTR__ATIME
)
4760 * Clear all previous time settings as they are mutually
4763 kattr
->attr_clr
|= MNT_RELATIME
| MNT_NOATIME
;
4764 switch (attr
->attr_set
& MOUNT_ATTR__ATIME
) {
4765 case MOUNT_ATTR_RELATIME
:
4766 kattr
->attr_set
|= MNT_RELATIME
;
4768 case MOUNT_ATTR_NOATIME
:
4769 kattr
->attr_set
|= MNT_NOATIME
;
4771 case MOUNT_ATTR_STRICTATIME
:
4777 if (attr
->attr_set
& MOUNT_ATTR__ATIME
)
4781 return build_mount_idmapped(attr
, usize
, kattr
, flags
);
4784 static void finish_mount_kattr(struct mount_kattr
*kattr
)
4786 put_user_ns(kattr
->mnt_userns
);
4787 kattr
->mnt_userns
= NULL
;
4789 if (kattr
->mnt_idmap
)
4790 mnt_idmap_put(kattr
->mnt_idmap
);
4793 SYSCALL_DEFINE5(mount_setattr
, int, dfd
, const char __user
*, path
,
4794 unsigned int, flags
, struct mount_attr __user
*, uattr
,
4799 struct mount_attr attr
;
4800 struct mount_kattr kattr
;
4802 BUILD_BUG_ON(sizeof(struct mount_attr
) != MOUNT_ATTR_SIZE_VER0
);
4804 if (flags
& ~(AT_EMPTY_PATH
|
4806 AT_SYMLINK_NOFOLLOW
|
4810 if (unlikely(usize
> PAGE_SIZE
))
4812 if (unlikely(usize
< MOUNT_ATTR_SIZE_VER0
))
4818 err
= copy_struct_from_user(&attr
, sizeof(attr
), uattr
, usize
);
4822 /* Don't bother walking through the mounts if this is a nop. */
4823 if (attr
.attr_set
== 0 &&
4824 attr
.attr_clr
== 0 &&
4825 attr
.propagation
== 0)
4828 err
= build_mount_kattr(&attr
, usize
, &kattr
, flags
);
4832 err
= user_path_at(dfd
, path
, kattr
.lookup_flags
, &target
);
4834 err
= do_mount_setattr(&target
, &kattr
);
4837 finish_mount_kattr(&kattr
);
4841 int show_path(struct seq_file
*m
, struct dentry
*root
)
4843 if (root
->d_sb
->s_op
->show_path
)
4844 return root
->d_sb
->s_op
->show_path(m
, root
);
4846 seq_dentry(m
, root
, " \t\n\\");
4850 static struct vfsmount
*lookup_mnt_in_ns(u64 id
, struct mnt_namespace
*ns
)
4852 struct mount
*mnt
= mnt_find_id_at(ns
, id
);
4854 if (!mnt
|| mnt
->mnt_id_unique
!= id
)
4861 struct statmount __user
*buf
;
4863 struct vfsmount
*mnt
;
4866 struct statmount sm
;
4867 struct seq_file seq
;
4870 static u64
mnt_to_attr_flags(struct vfsmount
*mnt
)
4872 unsigned int mnt_flags
= READ_ONCE(mnt
->mnt_flags
);
4875 if (mnt_flags
& MNT_READONLY
)
4876 attr_flags
|= MOUNT_ATTR_RDONLY
;
4877 if (mnt_flags
& MNT_NOSUID
)
4878 attr_flags
|= MOUNT_ATTR_NOSUID
;
4879 if (mnt_flags
& MNT_NODEV
)
4880 attr_flags
|= MOUNT_ATTR_NODEV
;
4881 if (mnt_flags
& MNT_NOEXEC
)
4882 attr_flags
|= MOUNT_ATTR_NOEXEC
;
4883 if (mnt_flags
& MNT_NODIRATIME
)
4884 attr_flags
|= MOUNT_ATTR_NODIRATIME
;
4885 if (mnt_flags
& MNT_NOSYMFOLLOW
)
4886 attr_flags
|= MOUNT_ATTR_NOSYMFOLLOW
;
4888 if (mnt_flags
& MNT_NOATIME
)
4889 attr_flags
|= MOUNT_ATTR_NOATIME
;
4890 else if (mnt_flags
& MNT_RELATIME
)
4891 attr_flags
|= MOUNT_ATTR_RELATIME
;
4893 attr_flags
|= MOUNT_ATTR_STRICTATIME
;
4895 if (is_idmapped_mnt(mnt
))
4896 attr_flags
|= MOUNT_ATTR_IDMAP
;
4901 static u64
mnt_to_propagation_flags(struct mount
*m
)
4903 u64 propagation
= 0;
4905 if (IS_MNT_SHARED(m
))
4906 propagation
|= MS_SHARED
;
4907 if (IS_MNT_SLAVE(m
))
4908 propagation
|= MS_SLAVE
;
4909 if (IS_MNT_UNBINDABLE(m
))
4910 propagation
|= MS_UNBINDABLE
;
4912 propagation
|= MS_PRIVATE
;
4917 static void statmount_sb_basic(struct kstatmount
*s
)
4919 struct super_block
*sb
= s
->mnt
->mnt_sb
;
4921 s
->sm
.mask
|= STATMOUNT_SB_BASIC
;
4922 s
->sm
.sb_dev_major
= MAJOR(sb
->s_dev
);
4923 s
->sm
.sb_dev_minor
= MINOR(sb
->s_dev
);
4924 s
->sm
.sb_magic
= sb
->s_magic
;
4925 s
->sm
.sb_flags
= sb
->s_flags
& (SB_RDONLY
|SB_SYNCHRONOUS
|SB_DIRSYNC
|SB_LAZYTIME
);
4928 static void statmount_mnt_basic(struct kstatmount
*s
)
4930 struct mount
*m
= real_mount(s
->mnt
);
4932 s
->sm
.mask
|= STATMOUNT_MNT_BASIC
;
4933 s
->sm
.mnt_id
= m
->mnt_id_unique
;
4934 s
->sm
.mnt_parent_id
= m
->mnt_parent
->mnt_id_unique
;
4935 s
->sm
.mnt_id_old
= m
->mnt_id
;
4936 s
->sm
.mnt_parent_id_old
= m
->mnt_parent
->mnt_id
;
4937 s
->sm
.mnt_attr
= mnt_to_attr_flags(&m
->mnt
);
4938 s
->sm
.mnt_propagation
= mnt_to_propagation_flags(m
);
4939 s
->sm
.mnt_peer_group
= IS_MNT_SHARED(m
) ? m
->mnt_group_id
: 0;
4940 s
->sm
.mnt_master
= IS_MNT_SLAVE(m
) ? m
->mnt_master
->mnt_group_id
: 0;
4943 static void statmount_propagate_from(struct kstatmount
*s
)
4945 struct mount
*m
= real_mount(s
->mnt
);
4947 s
->sm
.mask
|= STATMOUNT_PROPAGATE_FROM
;
4948 if (IS_MNT_SLAVE(m
))
4949 s
->sm
.propagate_from
= get_dominating_id(m
, ¤t
->fs
->root
);
4952 static int statmount_mnt_root(struct kstatmount
*s
, struct seq_file
*seq
)
4955 size_t start
= seq
->count
;
4957 ret
= show_path(seq
, s
->mnt
->mnt_root
);
4961 if (unlikely(seq_has_overflowed(seq
)))
4965 * Unescape the result. It would be better if supplied string was not
4966 * escaped in the first place, but that's a pretty invasive change.
4968 seq
->buf
[seq
->count
] = '\0';
4970 seq_commit(seq
, string_unescape_inplace(seq
->buf
+ start
, UNESCAPE_OCTAL
));
4974 static int statmount_mnt_point(struct kstatmount
*s
, struct seq_file
*seq
)
4976 struct vfsmount
*mnt
= s
->mnt
;
4977 struct path mnt_path
= { .dentry
= mnt
->mnt_root
, .mnt
= mnt
};
4980 err
= seq_path_root(seq
, &mnt_path
, &s
->root
, "");
4981 return err
== SEQ_SKIP
? 0 : err
;
4984 static int statmount_fs_type(struct kstatmount
*s
, struct seq_file
*seq
)
4986 struct super_block
*sb
= s
->mnt
->mnt_sb
;
4988 seq_puts(seq
, sb
->s_type
->name
);
4992 static void statmount_fs_subtype(struct kstatmount
*s
, struct seq_file
*seq
)
4994 struct super_block
*sb
= s
->mnt
->mnt_sb
;
4997 seq_puts(seq
, sb
->s_subtype
);
5000 static int statmount_sb_source(struct kstatmount
*s
, struct seq_file
*seq
)
5002 struct super_block
*sb
= s
->mnt
->mnt_sb
;
5003 struct mount
*r
= real_mount(s
->mnt
);
5005 if (sb
->s_op
->show_devname
) {
5006 size_t start
= seq
->count
;
5009 ret
= sb
->s_op
->show_devname(seq
, s
->mnt
->mnt_root
);
5013 if (unlikely(seq_has_overflowed(seq
)))
5016 /* Unescape the result */
5017 seq
->buf
[seq
->count
] = '\0';
5019 seq_commit(seq
, string_unescape_inplace(seq
->buf
+ start
, UNESCAPE_OCTAL
));
5020 } else if (r
->mnt_devname
) {
5021 seq_puts(seq
, r
->mnt_devname
);
5026 static void statmount_mnt_ns_id(struct kstatmount
*s
, struct mnt_namespace
*ns
)
5028 s
->sm
.mask
|= STATMOUNT_MNT_NS_ID
;
5029 s
->sm
.mnt_ns_id
= ns
->seq
;
5032 static int statmount_mnt_opts(struct kstatmount
*s
, struct seq_file
*seq
)
5034 struct vfsmount
*mnt
= s
->mnt
;
5035 struct super_block
*sb
= mnt
->mnt_sb
;
5038 if (sb
->s_op
->show_options
) {
5039 size_t start
= seq
->count
;
5041 err
= sb
->s_op
->show_options(seq
, mnt
->mnt_root
);
5045 if (unlikely(seq_has_overflowed(seq
)))
5048 if (seq
->count
== start
)
5051 /* skip leading comma */
5052 memmove(seq
->buf
+ start
, seq
->buf
+ start
+ 1,
5053 seq
->count
- start
- 1);
5060 static inline int statmount_opt_process(struct seq_file
*seq
, size_t start
)
5062 char *buf_end
, *opt_end
, *src
, *dst
;
5065 if (unlikely(seq_has_overflowed(seq
)))
5068 buf_end
= seq
->buf
+ seq
->count
;
5069 dst
= seq
->buf
+ start
;
5070 src
= dst
+ 1; /* skip initial comma */
5072 if (src
>= buf_end
) {
5078 for (; src
< buf_end
; src
= opt_end
+ 1) {
5079 opt_end
= strchrnul(src
, ',');
5081 dst
+= string_unescape(src
, dst
, 0, UNESCAPE_OCTAL
) + 1;
5082 if (WARN_ON_ONCE(++count
== INT_MAX
))
5085 seq
->count
= dst
- 1 - seq
->buf
;
5089 static int statmount_opt_array(struct kstatmount
*s
, struct seq_file
*seq
)
5091 struct vfsmount
*mnt
= s
->mnt
;
5092 struct super_block
*sb
= mnt
->mnt_sb
;
5093 size_t start
= seq
->count
;
5096 if (!sb
->s_op
->show_options
)
5099 err
= sb
->s_op
->show_options(seq
, mnt
->mnt_root
);
5103 err
= statmount_opt_process(seq
, start
);
5107 s
->sm
.opt_num
= err
;
5111 static int statmount_opt_sec_array(struct kstatmount
*s
, struct seq_file
*seq
)
5113 struct vfsmount
*mnt
= s
->mnt
;
5114 struct super_block
*sb
= mnt
->mnt_sb
;
5115 size_t start
= seq
->count
;
5118 err
= security_sb_show_options(seq
, sb
);
5122 err
= statmount_opt_process(seq
, start
);
5126 s
->sm
.opt_sec_num
= err
;
5130 static int statmount_string(struct kstatmount
*s
, u64 flag
)
5134 struct seq_file
*seq
= &s
->seq
;
5135 struct statmount
*sm
= &s
->sm
;
5136 u32 start
= seq
->count
;
5139 case STATMOUNT_FS_TYPE
:
5140 sm
->fs_type
= start
;
5141 ret
= statmount_fs_type(s
, seq
);
5143 case STATMOUNT_MNT_ROOT
:
5144 sm
->mnt_root
= start
;
5145 ret
= statmount_mnt_root(s
, seq
);
5147 case STATMOUNT_MNT_POINT
:
5148 sm
->mnt_point
= start
;
5149 ret
= statmount_mnt_point(s
, seq
);
5151 case STATMOUNT_MNT_OPTS
:
5152 sm
->mnt_opts
= start
;
5153 ret
= statmount_mnt_opts(s
, seq
);
5155 case STATMOUNT_OPT_ARRAY
:
5156 sm
->opt_array
= start
;
5157 ret
= statmount_opt_array(s
, seq
);
5159 case STATMOUNT_OPT_SEC_ARRAY
:
5160 sm
->opt_sec_array
= start
;
5161 ret
= statmount_opt_sec_array(s
, seq
);
5163 case STATMOUNT_FS_SUBTYPE
:
5164 sm
->fs_subtype
= start
;
5165 statmount_fs_subtype(s
, seq
);
5167 case STATMOUNT_SB_SOURCE
:
5168 sm
->sb_source
= start
;
5169 ret
= statmount_sb_source(s
, seq
);
5177 * If nothing was emitted, return to avoid setting the flag
5178 * and terminating the buffer.
5180 if (seq
->count
== start
)
5182 if (unlikely(check_add_overflow(sizeof(*sm
), seq
->count
, &kbufsize
)))
5184 if (kbufsize
>= s
->bufsize
)
5187 /* signal a retry */
5188 if (unlikely(seq_has_overflowed(seq
)))
5194 seq
->buf
[seq
->count
++] = '\0';
5199 static int copy_statmount_to_user(struct kstatmount
*s
)
5201 struct statmount
*sm
= &s
->sm
;
5202 struct seq_file
*seq
= &s
->seq
;
5203 char __user
*str
= ((char __user
*)s
->buf
) + sizeof(*sm
);
5204 size_t copysize
= min_t(size_t, s
->bufsize
, sizeof(*sm
));
5206 if (seq
->count
&& copy_to_user(str
, seq
->buf
, seq
->count
))
5209 /* Return the number of bytes copied to the buffer */
5210 sm
->size
= copysize
+ seq
->count
;
5211 if (copy_to_user(s
->buf
, sm
, copysize
))
5217 static struct mount
*listmnt_next(struct mount
*curr
, bool reverse
)
5219 struct rb_node
*node
;
5222 node
= rb_prev(&curr
->mnt_node
);
5224 node
= rb_next(&curr
->mnt_node
);
5226 return node_to_mount(node
);
5229 static int grab_requested_root(struct mnt_namespace
*ns
, struct path
*root
)
5231 struct mount
*first
, *child
;
5233 rwsem_assert_held(&namespace_sem
);
5235 /* We're looking at our own ns, just use get_fs_root. */
5236 if (ns
== current
->nsproxy
->mnt_ns
) {
5237 get_fs_root(current
->fs
, root
);
5242 * We have to find the first mount in our ns and use that, however it
5243 * may not exist, so handle that properly.
5245 if (RB_EMPTY_ROOT(&ns
->mounts
))
5248 first
= child
= ns
->root
;
5250 child
= listmnt_next(child
, false);
5253 if (child
->mnt_parent
== first
)
5257 root
->mnt
= mntget(&child
->mnt
);
5258 root
->dentry
= dget(root
->mnt
->mnt_root
);
5262 static int do_statmount(struct kstatmount
*s
, u64 mnt_id
, u64 mnt_ns_id
,
5263 struct mnt_namespace
*ns
)
5265 struct path root
__free(path_put
) = {};
5269 /* Has the namespace already been emptied? */
5270 if (mnt_ns_id
&& RB_EMPTY_ROOT(&ns
->mounts
))
5273 s
->mnt
= lookup_mnt_in_ns(mnt_id
, ns
);
5277 err
= grab_requested_root(ns
, &root
);
5282 * Don't trigger audit denials. We just want to determine what
5283 * mounts to show users.
5285 m
= real_mount(s
->mnt
);
5286 if (!is_path_reachable(m
, m
->mnt
.mnt_root
, &root
) &&
5287 !ns_capable_noaudit(ns
->user_ns
, CAP_SYS_ADMIN
))
5290 err
= security_sb_statfs(s
->mnt
->mnt_root
);
5295 if (s
->mask
& STATMOUNT_SB_BASIC
)
5296 statmount_sb_basic(s
);
5298 if (s
->mask
& STATMOUNT_MNT_BASIC
)
5299 statmount_mnt_basic(s
);
5301 if (s
->mask
& STATMOUNT_PROPAGATE_FROM
)
5302 statmount_propagate_from(s
);
5304 if (s
->mask
& STATMOUNT_FS_TYPE
)
5305 err
= statmount_string(s
, STATMOUNT_FS_TYPE
);
5307 if (!err
&& s
->mask
& STATMOUNT_MNT_ROOT
)
5308 err
= statmount_string(s
, STATMOUNT_MNT_ROOT
);
5310 if (!err
&& s
->mask
& STATMOUNT_MNT_POINT
)
5311 err
= statmount_string(s
, STATMOUNT_MNT_POINT
);
5313 if (!err
&& s
->mask
& STATMOUNT_MNT_OPTS
)
5314 err
= statmount_string(s
, STATMOUNT_MNT_OPTS
);
5316 if (!err
&& s
->mask
& STATMOUNT_OPT_ARRAY
)
5317 err
= statmount_string(s
, STATMOUNT_OPT_ARRAY
);
5319 if (!err
&& s
->mask
& STATMOUNT_OPT_SEC_ARRAY
)
5320 err
= statmount_string(s
, STATMOUNT_OPT_SEC_ARRAY
);
5322 if (!err
&& s
->mask
& STATMOUNT_FS_SUBTYPE
)
5323 err
= statmount_string(s
, STATMOUNT_FS_SUBTYPE
);
5325 if (!err
&& s
->mask
& STATMOUNT_SB_SOURCE
)
5326 err
= statmount_string(s
, STATMOUNT_SB_SOURCE
);
5328 if (!err
&& s
->mask
& STATMOUNT_MNT_NS_ID
)
5329 statmount_mnt_ns_id(s
, ns
);
5337 static inline bool retry_statmount(const long ret
, size_t *seq_size
)
5339 if (likely(ret
!= -EAGAIN
))
5341 if (unlikely(check_mul_overflow(*seq_size
, 2, seq_size
)))
5343 if (unlikely(*seq_size
> MAX_RW_COUNT
))
5348 #define STATMOUNT_STRING_REQ (STATMOUNT_MNT_ROOT | STATMOUNT_MNT_POINT | \
5349 STATMOUNT_FS_TYPE | STATMOUNT_MNT_OPTS | \
5350 STATMOUNT_FS_SUBTYPE | STATMOUNT_SB_SOURCE | \
5351 STATMOUNT_OPT_ARRAY | STATMOUNT_OPT_SEC_ARRAY)
5353 static int prepare_kstatmount(struct kstatmount
*ks
, struct mnt_id_req
*kreq
,
5354 struct statmount __user
*buf
, size_t bufsize
,
5357 if (!access_ok(buf
, bufsize
))
5360 memset(ks
, 0, sizeof(*ks
));
5361 ks
->mask
= kreq
->param
;
5363 ks
->bufsize
= bufsize
;
5365 if (ks
->mask
& STATMOUNT_STRING_REQ
) {
5366 if (bufsize
== sizeof(ks
->sm
))
5369 ks
->seq
.buf
= kvmalloc(seq_size
, GFP_KERNEL_ACCOUNT
);
5373 ks
->seq
.size
= seq_size
;
5379 static int copy_mnt_id_req(const struct mnt_id_req __user
*req
,
5380 struct mnt_id_req
*kreq
)
5385 BUILD_BUG_ON(sizeof(struct mnt_id_req
) != MNT_ID_REQ_SIZE_VER1
);
5387 ret
= get_user(usize
, &req
->size
);
5390 if (unlikely(usize
> PAGE_SIZE
))
5392 if (unlikely(usize
< MNT_ID_REQ_SIZE_VER0
))
5394 memset(kreq
, 0, sizeof(*kreq
));
5395 ret
= copy_struct_from_user(kreq
, sizeof(*kreq
), req
, usize
);
5398 if (kreq
->spare
!= 0)
5400 /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
5401 if (kreq
->mnt_id
<= MNT_UNIQUE_ID_OFFSET
)
5407 * If the user requested a specific mount namespace id, look that up and return
5408 * that, or if not simply grab a passive reference on our mount namespace and
5411 static struct mnt_namespace
*grab_requested_mnt_ns(const struct mnt_id_req
*kreq
)
5413 struct mnt_namespace
*mnt_ns
;
5415 if (kreq
->mnt_ns_id
&& kreq
->spare
)
5416 return ERR_PTR(-EINVAL
);
5418 if (kreq
->mnt_ns_id
)
5419 return lookup_mnt_ns(kreq
->mnt_ns_id
);
5422 struct ns_common
*ns
;
5424 CLASS(fd
, f
)(kreq
->spare
);
5426 return ERR_PTR(-EBADF
);
5428 if (!proc_ns_file(fd_file(f
)))
5429 return ERR_PTR(-EINVAL
);
5431 ns
= get_proc_ns(file_inode(fd_file(f
)));
5432 if (ns
->ops
->type
!= CLONE_NEWNS
)
5433 return ERR_PTR(-EINVAL
);
5435 mnt_ns
= to_mnt_ns(ns
);
5437 mnt_ns
= current
->nsproxy
->mnt_ns
;
5440 refcount_inc(&mnt_ns
->passive
);
5444 SYSCALL_DEFINE4(statmount
, const struct mnt_id_req __user
*, req
,
5445 struct statmount __user
*, buf
, size_t, bufsize
,
5446 unsigned int, flags
)
5448 struct mnt_namespace
*ns
__free(mnt_ns_release
) = NULL
;
5449 struct kstatmount
*ks
__free(kfree
) = NULL
;
5450 struct mnt_id_req kreq
;
5451 /* We currently support retrieval of 3 strings. */
5452 size_t seq_size
= 3 * PATH_MAX
;
5458 ret
= copy_mnt_id_req(req
, &kreq
);
5462 ns
= grab_requested_mnt_ns(&kreq
);
5466 if (kreq
.mnt_ns_id
&& (ns
!= current
->nsproxy
->mnt_ns
) &&
5467 !ns_capable_noaudit(ns
->user_ns
, CAP_SYS_ADMIN
))
5470 ks
= kmalloc(sizeof(*ks
), GFP_KERNEL_ACCOUNT
);
5475 ret
= prepare_kstatmount(ks
, &kreq
, buf
, bufsize
, seq_size
);
5479 scoped_guard(rwsem_read
, &namespace_sem
)
5480 ret
= do_statmount(ks
, kreq
.mnt_id
, kreq
.mnt_ns_id
, ns
);
5483 ret
= copy_statmount_to_user(ks
);
5484 kvfree(ks
->seq
.buf
);
5485 if (retry_statmount(ret
, &seq_size
))
5490 static ssize_t
do_listmount(struct mnt_namespace
*ns
, u64 mnt_parent_id
,
5491 u64 last_mnt_id
, u64
*mnt_ids
, size_t nr_mnt_ids
,
5494 struct path root
__free(path_put
) = {};
5496 struct mount
*r
, *first
;
5499 rwsem_assert_held(&namespace_sem
);
5501 ret
= grab_requested_root(ns
, &root
);
5505 if (mnt_parent_id
== LSMT_ROOT
) {
5508 orig
.mnt
= lookup_mnt_in_ns(mnt_parent_id
, ns
);
5511 orig
.dentry
= orig
.mnt
->mnt_root
;
5515 * Don't trigger audit denials. We just want to determine what
5516 * mounts to show users.
5518 if (!is_path_reachable(real_mount(orig
.mnt
), orig
.dentry
, &root
) &&
5519 !ns_capable_noaudit(ns
->user_ns
, CAP_SYS_ADMIN
))
5522 ret
= security_sb_statfs(orig
.dentry
);
5528 first
= node_to_mount(rb_last(&ns
->mounts
));
5530 first
= node_to_mount(rb_first(&ns
->mounts
));
5533 first
= mnt_find_id_at_reverse(ns
, last_mnt_id
- 1);
5535 first
= mnt_find_id_at(ns
, last_mnt_id
+ 1);
5538 for (ret
= 0, r
= first
; r
&& nr_mnt_ids
; r
= listmnt_next(r
, reverse
)) {
5539 if (r
->mnt_id_unique
== mnt_parent_id
)
5541 if (!is_path_reachable(r
, r
->mnt
.mnt_root
, &orig
))
5543 *mnt_ids
= r
->mnt_id_unique
;
5551 SYSCALL_DEFINE4(listmount
, const struct mnt_id_req __user
*, req
,
5552 u64 __user
*, mnt_ids
, size_t, nr_mnt_ids
, unsigned int, flags
)
5554 u64
*kmnt_ids
__free(kvfree
) = NULL
;
5555 const size_t maxcount
= 1000000;
5556 struct mnt_namespace
*ns
__free(mnt_ns_release
) = NULL
;
5557 struct mnt_id_req kreq
;
5561 if (flags
& ~LISTMOUNT_REVERSE
)
5565 * If the mount namespace really has more than 1 million mounts the
5566 * caller must iterate over the mount namespace (and reconsider their
5567 * system design...).
5569 if (unlikely(nr_mnt_ids
> maxcount
))
5572 if (!access_ok(mnt_ids
, nr_mnt_ids
* sizeof(*mnt_ids
)))
5575 ret
= copy_mnt_id_req(req
, &kreq
);
5579 last_mnt_id
= kreq
.param
;
5580 /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
5581 if (last_mnt_id
!= 0 && last_mnt_id
<= MNT_UNIQUE_ID_OFFSET
)
5584 kmnt_ids
= kvmalloc_array(nr_mnt_ids
, sizeof(*kmnt_ids
),
5585 GFP_KERNEL_ACCOUNT
);
5589 ns
= grab_requested_mnt_ns(&kreq
);
5593 if (kreq
.mnt_ns_id
&& (ns
!= current
->nsproxy
->mnt_ns
) &&
5594 !ns_capable_noaudit(ns
->user_ns
, CAP_SYS_ADMIN
))
5597 scoped_guard(rwsem_read
, &namespace_sem
)
5598 ret
= do_listmount(ns
, kreq
.mnt_id
, last_mnt_id
, kmnt_ids
,
5599 nr_mnt_ids
, (flags
& LISTMOUNT_REVERSE
));
5603 if (copy_to_user(mnt_ids
, kmnt_ids
, ret
* sizeof(*mnt_ids
)))
5609 static void __init
init_mount_tree(void)
5611 struct vfsmount
*mnt
;
5613 struct mnt_namespace
*ns
;
5616 mnt
= vfs_kern_mount(&rootfs_fs_type
, 0, "rootfs", NULL
);
5618 panic("Can't create rootfs");
5620 ns
= alloc_mnt_ns(&init_user_ns
, false);
5622 panic("Can't allocate initial namespace");
5623 m
= real_mount(mnt
);
5626 mnt_add_to_ns(ns
, m
);
5627 init_task
.nsproxy
->mnt_ns
= ns
;
5631 root
.dentry
= mnt
->mnt_root
;
5632 mnt
->mnt_flags
|= MNT_LOCKED
;
5634 set_fs_pwd(current
->fs
, &root
);
5635 set_fs_root(current
->fs
, &root
);
5637 mnt_ns_tree_add(ns
);
5640 void __init
mnt_init(void)
5644 mnt_cache
= kmem_cache_create("mnt_cache", sizeof(struct mount
),
5645 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_ACCOUNT
, NULL
);
5647 mount_hashtable
= alloc_large_system_hash("Mount-cache",
5648 sizeof(struct hlist_head
),
5651 &m_hash_shift
, &m_hash_mask
, 0, 0);
5652 mountpoint_hashtable
= alloc_large_system_hash("Mountpoint-cache",
5653 sizeof(struct hlist_head
),
5656 &mp_hash_shift
, &mp_hash_mask
, 0, 0);
5658 if (!mount_hashtable
|| !mountpoint_hashtable
)
5659 panic("Failed to allocate mount hash table\n");
5665 printk(KERN_WARNING
"%s: sysfs_init error: %d\n",
5667 fs_kobj
= kobject_create_and_add("fs", NULL
);
5669 printk(KERN_WARNING
"%s: kobj create error\n", __func__
);
5675 void put_mnt_ns(struct mnt_namespace
*ns
)
5677 if (!refcount_dec_and_test(&ns
->ns
.count
))
5679 drop_collected_mounts(&ns
->root
->mnt
);
5683 struct vfsmount
*kern_mount(struct file_system_type
*type
)
5685 struct vfsmount
*mnt
;
5686 mnt
= vfs_kern_mount(type
, SB_KERNMOUNT
, type
->name
, NULL
);
5689 * it is a longterm mount, don't release mnt until
5690 * we unmount before file sys is unregistered
5692 real_mount(mnt
)->mnt_ns
= MNT_NS_INTERNAL
;
5696 EXPORT_SYMBOL_GPL(kern_mount
);
5698 void kern_unmount(struct vfsmount
*mnt
)
5700 /* release long term mount so mount point can be released */
5702 mnt_make_shortterm(mnt
);
5703 synchronize_rcu(); /* yecchhh... */
5707 EXPORT_SYMBOL(kern_unmount
);
5709 void kern_unmount_array(struct vfsmount
*mnt
[], unsigned int num
)
5713 for (i
= 0; i
< num
; i
++)
5714 mnt_make_shortterm(mnt
[i
]);
5715 synchronize_rcu_expedited();
5716 for (i
= 0; i
< num
; i
++)
5719 EXPORT_SYMBOL(kern_unmount_array
);
5721 bool our_mnt(struct vfsmount
*mnt
)
5723 return check_mnt(real_mount(mnt
));
5726 bool current_chrooted(void)
5728 /* Does the current process have a non-standard root */
5729 struct path ns_root
;
5730 struct path fs_root
;
5733 /* Find the namespace root */
5734 ns_root
.mnt
= ¤t
->nsproxy
->mnt_ns
->root
->mnt
;
5735 ns_root
.dentry
= ns_root
.mnt
->mnt_root
;
5737 while (d_mountpoint(ns_root
.dentry
) && follow_down_one(&ns_root
))
5740 get_fs_root(current
->fs
, &fs_root
);
5742 chrooted
= !path_equal(&fs_root
, &ns_root
);
5750 static bool mnt_already_visible(struct mnt_namespace
*ns
,
5751 const struct super_block
*sb
,
5754 int new_flags
= *new_mnt_flags
;
5755 struct mount
*mnt
, *n
;
5756 bool visible
= false;
5758 down_read(&namespace_sem
);
5759 rbtree_postorder_for_each_entry_safe(mnt
, n
, &ns
->mounts
, mnt_node
) {
5760 struct mount
*child
;
5763 if (mnt
->mnt
.mnt_sb
->s_type
!= sb
->s_type
)
5766 /* This mount is not fully visible if it's root directory
5767 * is not the root directory of the filesystem.
5769 if (mnt
->mnt
.mnt_root
!= mnt
->mnt
.mnt_sb
->s_root
)
5772 /* A local view of the mount flags */
5773 mnt_flags
= mnt
->mnt
.mnt_flags
;
5775 /* Don't miss readonly hidden in the superblock flags */
5776 if (sb_rdonly(mnt
->mnt
.mnt_sb
))
5777 mnt_flags
|= MNT_LOCK_READONLY
;
5779 /* Verify the mount flags are equal to or more permissive
5780 * than the proposed new mount.
5782 if ((mnt_flags
& MNT_LOCK_READONLY
) &&
5783 !(new_flags
& MNT_READONLY
))
5785 if ((mnt_flags
& MNT_LOCK_ATIME
) &&
5786 ((mnt_flags
& MNT_ATIME_MASK
) != (new_flags
& MNT_ATIME_MASK
)))
5789 /* This mount is not fully visible if there are any
5790 * locked child mounts that cover anything except for
5791 * empty directories.
5793 list_for_each_entry(child
, &mnt
->mnt_mounts
, mnt_child
) {
5794 struct inode
*inode
= child
->mnt_mountpoint
->d_inode
;
5795 /* Only worry about locked mounts */
5796 if (!(child
->mnt
.mnt_flags
& MNT_LOCKED
))
5798 /* Is the directory permanently empty? */
5799 if (!is_empty_dir_inode(inode
))
5802 /* Preserve the locked attributes */
5803 *new_mnt_flags
|= mnt_flags
& (MNT_LOCK_READONLY
| \
5810 up_read(&namespace_sem
);
5814 static bool mount_too_revealing(const struct super_block
*sb
, int *new_mnt_flags
)
5816 const unsigned long required_iflags
= SB_I_NOEXEC
| SB_I_NODEV
;
5817 struct mnt_namespace
*ns
= current
->nsproxy
->mnt_ns
;
5818 unsigned long s_iflags
;
5820 if (ns
->user_ns
== &init_user_ns
)
5823 /* Can this filesystem be too revealing? */
5824 s_iflags
= sb
->s_iflags
;
5825 if (!(s_iflags
& SB_I_USERNS_VISIBLE
))
5828 if ((s_iflags
& required_iflags
) != required_iflags
) {
5829 WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n",
5834 return !mnt_already_visible(ns
, sb
, new_mnt_flags
);
5837 bool mnt_may_suid(struct vfsmount
*mnt
)
5840 * Foreign mounts (accessed via fchdir or through /proc
5841 * symlinks) are always treated as if they are nosuid. This
5842 * prevents namespaces from trusting potentially unsafe
5843 * suid/sgid bits, file caps, or security labels that originate
5844 * in other namespaces.
5846 return !(mnt
->mnt_flags
& MNT_NOSUID
) && check_mnt(real_mount(mnt
)) &&
5847 current_in_userns(mnt
->mnt_sb
->s_user_ns
);
5850 static struct ns_common
*mntns_get(struct task_struct
*task
)
5852 struct ns_common
*ns
= NULL
;
5853 struct nsproxy
*nsproxy
;
5856 nsproxy
= task
->nsproxy
;
5858 ns
= &nsproxy
->mnt_ns
->ns
;
5859 get_mnt_ns(to_mnt_ns(ns
));
5866 static void mntns_put(struct ns_common
*ns
)
5868 put_mnt_ns(to_mnt_ns(ns
));
5871 static int mntns_install(struct nsset
*nsset
, struct ns_common
*ns
)
5873 struct nsproxy
*nsproxy
= nsset
->nsproxy
;
5874 struct fs_struct
*fs
= nsset
->fs
;
5875 struct mnt_namespace
*mnt_ns
= to_mnt_ns(ns
), *old_mnt_ns
;
5876 struct user_namespace
*user_ns
= nsset
->cred
->user_ns
;
5880 if (!ns_capable(mnt_ns
->user_ns
, CAP_SYS_ADMIN
) ||
5881 !ns_capable(user_ns
, CAP_SYS_CHROOT
) ||
5882 !ns_capable(user_ns
, CAP_SYS_ADMIN
))
5885 if (is_anon_ns(mnt_ns
))
5892 old_mnt_ns
= nsproxy
->mnt_ns
;
5893 nsproxy
->mnt_ns
= mnt_ns
;
5896 err
= vfs_path_lookup(mnt_ns
->root
->mnt
.mnt_root
, &mnt_ns
->root
->mnt
,
5897 "/", LOOKUP_DOWN
, &root
);
5899 /* revert to old namespace */
5900 nsproxy
->mnt_ns
= old_mnt_ns
;
5905 put_mnt_ns(old_mnt_ns
);
5907 /* Update the pwd and root */
5908 set_fs_pwd(fs
, &root
);
5909 set_fs_root(fs
, &root
);
5915 static struct user_namespace
*mntns_owner(struct ns_common
*ns
)
5917 return to_mnt_ns(ns
)->user_ns
;
5920 const struct proc_ns_operations mntns_operations
= {
5922 .type
= CLONE_NEWNS
,
5925 .install
= mntns_install
,
5926 .owner
= mntns_owner
,
5929 #ifdef CONFIG_SYSCTL
5930 static struct ctl_table fs_namespace_sysctls
[] = {
5932 .procname
= "mount-max",
5933 .data
= &sysctl_mount_max
,
5934 .maxlen
= sizeof(unsigned int),
5936 .proc_handler
= proc_dointvec_minmax
,
5937 .extra1
= SYSCTL_ONE
,
5941 static int __init
init_fs_namespace_sysctls(void)
5943 register_sysctl_init("fs", fs_namespace_sysctls
);
5946 fs_initcall(init_fs_namespace_sysctls
);
5948 #endif /* CONFIG_SYSCTL */