1 // SPDX-License-Identifier: GPL-2.0-only
5 * (C) Copyright Al Viro 2000, 2001
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/cred.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* init_rootfs */
21 #include <linux/fs_struct.h> /* get_fs_root et.al. */
22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
23 #include <linux/file.h>
24 #include <linux/uaccess.h>
25 #include <linux/proc_ns.h>
26 #include <linux/magic.h>
27 #include <linux/memblock.h>
28 #include <linux/proc_fs.h>
29 #include <linux/task_work.h>
30 #include <linux/sched/task.h>
31 #include <uapi/linux/mount.h>
32 #include <linux/fs_context.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/mnt_idmapping.h>
35 #include <linux/pidfs.h>
40 /* Maximum number of mounts in a mount namespace */
41 static unsigned int sysctl_mount_max __read_mostly
= 100000;
43 static unsigned int m_hash_mask __ro_after_init
;
44 static unsigned int m_hash_shift __ro_after_init
;
45 static unsigned int mp_hash_mask __ro_after_init
;
46 static unsigned int mp_hash_shift __ro_after_init
;
48 static __initdata
unsigned long mhash_entries
;
49 static int __init
set_mhash_entries(char *str
)
53 mhash_entries
= simple_strtoul(str
, &str
, 0);
56 __setup("mhash_entries=", set_mhash_entries
);
58 static __initdata
unsigned long mphash_entries
;
59 static int __init
set_mphash_entries(char *str
)
63 mphash_entries
= simple_strtoul(str
, &str
, 0);
66 __setup("mphash_entries=", set_mphash_entries
);
69 static DEFINE_XARRAY_FLAGS(mnt_id_xa
, XA_FLAGS_ALLOC
);
70 static DEFINE_IDA(mnt_group_ida
);
72 /* Don't allow confusion with old 32bit mount ID */
73 #define MNT_UNIQUE_ID_OFFSET (1ULL << 31)
74 static u64 mnt_id_ctr
= MNT_UNIQUE_ID_OFFSET
;
76 static struct hlist_head
*mount_hashtable __ro_after_init
;
77 static struct hlist_head
*mountpoint_hashtable __ro_after_init
;
78 static struct kmem_cache
*mnt_cache __ro_after_init
;
79 static DECLARE_RWSEM(namespace_sem
);
80 static HLIST_HEAD(unmounted
); /* protected by namespace_sem */
81 static LIST_HEAD(ex_mountpoints
); /* protected by namespace_sem */
82 static DEFINE_SEQLOCK(mnt_ns_tree_lock
);
84 static struct rb_root mnt_ns_tree
= RB_ROOT
; /* protected by mnt_ns_tree_lock */
85 static LIST_HEAD(mnt_ns_list
); /* protected by mnt_ns_tree_lock */
88 unsigned int attr_set
;
89 unsigned int attr_clr
;
90 unsigned int propagation
;
91 unsigned int lookup_flags
;
93 struct user_namespace
*mnt_userns
;
94 struct mnt_idmap
*mnt_idmap
;
98 struct kobject
*fs_kobj __ro_after_init
;
99 EXPORT_SYMBOL_GPL(fs_kobj
);
102 * vfsmount lock may be taken for read to prevent changes to the
103 * vfsmount hash, ie. during mountpoint lookups or walking back
106 * It should be taken for write in all cases where the vfsmount
107 * tree or hash is modified or when a vfsmount structure is modified.
109 __cacheline_aligned_in_smp
DEFINE_SEQLOCK(mount_lock
);
111 static inline struct mnt_namespace
*node_to_mnt_ns(const struct rb_node
*node
)
115 return rb_entry(node
, struct mnt_namespace
, mnt_ns_tree_node
);
118 static int mnt_ns_cmp(struct rb_node
*a
, const struct rb_node
*b
)
120 struct mnt_namespace
*ns_a
= node_to_mnt_ns(a
);
121 struct mnt_namespace
*ns_b
= node_to_mnt_ns(b
);
122 u64 seq_a
= ns_a
->seq
;
123 u64 seq_b
= ns_b
->seq
;
132 static inline void mnt_ns_tree_write_lock(void)
134 write_seqlock(&mnt_ns_tree_lock
);
137 static inline void mnt_ns_tree_write_unlock(void)
139 write_sequnlock(&mnt_ns_tree_lock
);
142 static void mnt_ns_tree_add(struct mnt_namespace
*ns
)
144 struct rb_node
*node
, *prev
;
146 mnt_ns_tree_write_lock();
147 node
= rb_find_add_rcu(&ns
->mnt_ns_tree_node
, &mnt_ns_tree
, mnt_ns_cmp
);
149 * If there's no previous entry simply add it after the
150 * head and if there is add it after the previous entry.
152 prev
= rb_prev(&ns
->mnt_ns_tree_node
);
154 list_add_rcu(&ns
->mnt_ns_list
, &mnt_ns_list
);
156 list_add_rcu(&ns
->mnt_ns_list
, &node_to_mnt_ns(prev
)->mnt_ns_list
);
157 mnt_ns_tree_write_unlock();
162 static void mnt_ns_release(struct mnt_namespace
*ns
)
164 /* keep alive for {list,stat}mount() */
165 if (refcount_dec_and_test(&ns
->passive
)) {
166 put_user_ns(ns
->user_ns
);
170 DEFINE_FREE(mnt_ns_release
, struct mnt_namespace
*, if (_T
) mnt_ns_release(_T
))
172 static void mnt_ns_release_rcu(struct rcu_head
*rcu
)
174 mnt_ns_release(container_of(rcu
, struct mnt_namespace
, mnt_ns_rcu
));
177 static void mnt_ns_tree_remove(struct mnt_namespace
*ns
)
179 /* remove from global mount namespace list */
180 if (!is_anon_ns(ns
)) {
181 mnt_ns_tree_write_lock();
182 rb_erase(&ns
->mnt_ns_tree_node
, &mnt_ns_tree
);
183 list_bidir_del_rcu(&ns
->mnt_ns_list
);
184 mnt_ns_tree_write_unlock();
187 call_rcu(&ns
->mnt_ns_rcu
, mnt_ns_release_rcu
);
190 static int mnt_ns_find(const void *key
, const struct rb_node
*node
)
192 const u64 mnt_ns_id
= *(u64
*)key
;
193 const struct mnt_namespace
*ns
= node_to_mnt_ns(node
);
195 if (mnt_ns_id
< ns
->seq
)
197 if (mnt_ns_id
> ns
->seq
)
203 * Lookup a mount namespace by id and take a passive reference count. Taking a
204 * passive reference means the mount namespace can be emptied if e.g., the last
205 * task holding an active reference exits. To access the mounts of the
206 * namespace the @namespace_sem must first be acquired. If the namespace has
207 * already shut down before acquiring @namespace_sem, {list,stat}mount() will
208 * see that the mount rbtree of the namespace is empty.
210 * Note the lookup is lockless protected by a sequence counter. We only
211 * need to guard against false negatives as false positives aren't
212 * possible. So if we didn't find a mount namespace and the sequence
213 * counter has changed we need to retry. If the sequence counter is
214 * still the same we know the search actually failed.
216 static struct mnt_namespace
*lookup_mnt_ns(u64 mnt_ns_id
)
218 struct mnt_namespace
*ns
;
219 struct rb_node
*node
;
224 seq
= read_seqbegin(&mnt_ns_tree_lock
);
225 node
= rb_find_rcu(&mnt_ns_id
, &mnt_ns_tree
, mnt_ns_find
);
228 } while (read_seqretry(&mnt_ns_tree_lock
, seq
));
234 * The last reference count is put with RCU delay so we can
235 * unconditonally acquire a reference here.
237 ns
= node_to_mnt_ns(node
);
238 refcount_inc(&ns
->passive
);
242 static inline void lock_mount_hash(void)
244 write_seqlock(&mount_lock
);
247 static inline void unlock_mount_hash(void)
249 write_sequnlock(&mount_lock
);
252 static inline struct hlist_head
*m_hash(struct vfsmount
*mnt
, struct dentry
*dentry
)
254 unsigned long tmp
= ((unsigned long)mnt
/ L1_CACHE_BYTES
);
255 tmp
+= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
256 tmp
= tmp
+ (tmp
>> m_hash_shift
);
257 return &mount_hashtable
[tmp
& m_hash_mask
];
260 static inline struct hlist_head
*mp_hash(struct dentry
*dentry
)
262 unsigned long tmp
= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
263 tmp
= tmp
+ (tmp
>> mp_hash_shift
);
264 return &mountpoint_hashtable
[tmp
& mp_hash_mask
];
267 static int mnt_alloc_id(struct mount
*mnt
)
272 res
= __xa_alloc(&mnt_id_xa
, &mnt
->mnt_id
, mnt
, XA_LIMIT(1, INT_MAX
), GFP_KERNEL
);
274 mnt
->mnt_id_unique
= ++mnt_id_ctr
;
275 xa_unlock(&mnt_id_xa
);
279 static void mnt_free_id(struct mount
*mnt
)
281 xa_erase(&mnt_id_xa
, mnt
->mnt_id
);
285 * Allocate a new peer group ID
287 static int mnt_alloc_group_id(struct mount
*mnt
)
289 int res
= ida_alloc_min(&mnt_group_ida
, 1, GFP_KERNEL
);
293 mnt
->mnt_group_id
= res
;
298 * Release a peer group ID
300 void mnt_release_group_id(struct mount
*mnt
)
302 ida_free(&mnt_group_ida
, mnt
->mnt_group_id
);
303 mnt
->mnt_group_id
= 0;
307 * vfsmount lock must be held for read
309 static inline void mnt_add_count(struct mount
*mnt
, int n
)
312 this_cpu_add(mnt
->mnt_pcp
->mnt_count
, n
);
321 * vfsmount lock must be held for write
323 int mnt_get_count(struct mount
*mnt
)
329 for_each_possible_cpu(cpu
) {
330 count
+= per_cpu_ptr(mnt
->mnt_pcp
, cpu
)->mnt_count
;
335 return mnt
->mnt_count
;
339 static struct mount
*alloc_vfsmnt(const char *name
)
341 struct mount
*mnt
= kmem_cache_zalloc(mnt_cache
, GFP_KERNEL
);
345 err
= mnt_alloc_id(mnt
);
350 mnt
->mnt_devname
= kstrdup_const(name
,
352 if (!mnt
->mnt_devname
)
357 mnt
->mnt_pcp
= alloc_percpu(struct mnt_pcp
);
359 goto out_free_devname
;
361 this_cpu_add(mnt
->mnt_pcp
->mnt_count
, 1);
364 mnt
->mnt_writers
= 0;
367 INIT_HLIST_NODE(&mnt
->mnt_hash
);
368 INIT_LIST_HEAD(&mnt
->mnt_child
);
369 INIT_LIST_HEAD(&mnt
->mnt_mounts
);
370 INIT_LIST_HEAD(&mnt
->mnt_list
);
371 INIT_LIST_HEAD(&mnt
->mnt_expire
);
372 INIT_LIST_HEAD(&mnt
->mnt_share
);
373 INIT_LIST_HEAD(&mnt
->mnt_slave_list
);
374 INIT_LIST_HEAD(&mnt
->mnt_slave
);
375 INIT_HLIST_NODE(&mnt
->mnt_mp_list
);
376 INIT_LIST_HEAD(&mnt
->mnt_umounting
);
377 INIT_HLIST_HEAD(&mnt
->mnt_stuck_children
);
378 RB_CLEAR_NODE(&mnt
->mnt_node
);
379 mnt
->mnt
.mnt_idmap
= &nop_mnt_idmap
;
385 kfree_const(mnt
->mnt_devname
);
390 kmem_cache_free(mnt_cache
, mnt
);
395 * Most r/o checks on a fs are for operations that take
396 * discrete amounts of time, like a write() or unlink().
397 * We must keep track of when those operations start
398 * (for permission checks) and when they end, so that
399 * we can determine when writes are able to occur to
403 * __mnt_is_readonly: check whether a mount is read-only
404 * @mnt: the mount to check for its write status
406 * This shouldn't be used directly ouside of the VFS.
407 * It does not guarantee that the filesystem will stay
408 * r/w, just that it is right *now*. This can not and
409 * should not be used in place of IS_RDONLY(inode).
410 * mnt_want/drop_write() will _keep_ the filesystem
413 bool __mnt_is_readonly(struct vfsmount
*mnt
)
415 return (mnt
->mnt_flags
& MNT_READONLY
) || sb_rdonly(mnt
->mnt_sb
);
417 EXPORT_SYMBOL_GPL(__mnt_is_readonly
);
419 static inline void mnt_inc_writers(struct mount
*mnt
)
422 this_cpu_inc(mnt
->mnt_pcp
->mnt_writers
);
428 static inline void mnt_dec_writers(struct mount
*mnt
)
431 this_cpu_dec(mnt
->mnt_pcp
->mnt_writers
);
437 static unsigned int mnt_get_writers(struct mount
*mnt
)
440 unsigned int count
= 0;
443 for_each_possible_cpu(cpu
) {
444 count
+= per_cpu_ptr(mnt
->mnt_pcp
, cpu
)->mnt_writers
;
449 return mnt
->mnt_writers
;
453 static int mnt_is_readonly(struct vfsmount
*mnt
)
455 if (READ_ONCE(mnt
->mnt_sb
->s_readonly_remount
))
458 * The barrier pairs with the barrier in sb_start_ro_state_change()
459 * making sure if we don't see s_readonly_remount set yet, we also will
460 * not see any superblock / mount flag changes done by remount.
461 * It also pairs with the barrier in sb_end_ro_state_change()
462 * assuring that if we see s_readonly_remount already cleared, we will
463 * see the values of superblock / mount flags updated by remount.
466 return __mnt_is_readonly(mnt
);
470 * Most r/o & frozen checks on a fs are for operations that take discrete
471 * amounts of time, like a write() or unlink(). We must keep track of when
472 * those operations start (for permission checks) and when they end, so that we
473 * can determine when writes are able to occur to a filesystem.
476 * mnt_get_write_access - get write access to a mount without freeze protection
477 * @m: the mount on which to take a write
479 * This tells the low-level filesystem that a write is about to be performed to
480 * it, and makes sure that writes are allowed (mnt it read-write) before
481 * returning success. This operation does not protect against filesystem being
482 * frozen. When the write operation is finished, mnt_put_write_access() must be
483 * called. This is effectively a refcount.
485 int mnt_get_write_access(struct vfsmount
*m
)
487 struct mount
*mnt
= real_mount(m
);
491 mnt_inc_writers(mnt
);
493 * The store to mnt_inc_writers must be visible before we pass
494 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
495 * incremented count after it has set MNT_WRITE_HOLD.
498 might_lock(&mount_lock
.lock
);
499 while (READ_ONCE(mnt
->mnt
.mnt_flags
) & MNT_WRITE_HOLD
) {
500 if (!IS_ENABLED(CONFIG_PREEMPT_RT
)) {
504 * This prevents priority inversion, if the task
505 * setting MNT_WRITE_HOLD got preempted on a remote
506 * CPU, and it prevents life lock if the task setting
507 * MNT_WRITE_HOLD has a lower priority and is bound to
508 * the same CPU as the task that is spinning here.
517 * The barrier pairs with the barrier sb_start_ro_state_change() making
518 * sure that if we see MNT_WRITE_HOLD cleared, we will also see
519 * s_readonly_remount set (or even SB_RDONLY / MNT_READONLY flags) in
520 * mnt_is_readonly() and bail in case we are racing with remount
524 if (mnt_is_readonly(m
)) {
525 mnt_dec_writers(mnt
);
532 EXPORT_SYMBOL_GPL(mnt_get_write_access
);
535 * mnt_want_write - get write access to a mount
536 * @m: the mount on which to take a write
538 * This tells the low-level filesystem that a write is about to be performed to
539 * it, and makes sure that writes are allowed (mount is read-write, filesystem
540 * is not frozen) before returning success. When the write operation is
541 * finished, mnt_drop_write() must be called. This is effectively a refcount.
543 int mnt_want_write(struct vfsmount
*m
)
547 sb_start_write(m
->mnt_sb
);
548 ret
= mnt_get_write_access(m
);
550 sb_end_write(m
->mnt_sb
);
553 EXPORT_SYMBOL_GPL(mnt_want_write
);
556 * mnt_get_write_access_file - get write access to a file's mount
557 * @file: the file who's mount on which to take a write
559 * This is like mnt_get_write_access, but if @file is already open for write it
560 * skips incrementing mnt_writers (since the open file already has a reference)
561 * and instead only does the check for emergency r/o remounts. This must be
562 * paired with mnt_put_write_access_file.
564 int mnt_get_write_access_file(struct file
*file
)
566 if (file
->f_mode
& FMODE_WRITER
) {
568 * Superblock may have become readonly while there are still
569 * writable fd's, e.g. due to a fs error with errors=remount-ro
571 if (__mnt_is_readonly(file
->f_path
.mnt
))
575 return mnt_get_write_access(file
->f_path
.mnt
);
579 * mnt_want_write_file - get write access to a file's mount
580 * @file: the file who's mount on which to take a write
582 * This is like mnt_want_write, but if the file is already open for writing it
583 * skips incrementing mnt_writers (since the open file already has a reference)
584 * and instead only does the freeze protection and the check for emergency r/o
585 * remounts. This must be paired with mnt_drop_write_file.
587 int mnt_want_write_file(struct file
*file
)
591 sb_start_write(file_inode(file
)->i_sb
);
592 ret
= mnt_get_write_access_file(file
);
594 sb_end_write(file_inode(file
)->i_sb
);
597 EXPORT_SYMBOL_GPL(mnt_want_write_file
);
600 * mnt_put_write_access - give up write access to a mount
601 * @mnt: the mount on which to give up write access
603 * Tells the low-level filesystem that we are done
604 * performing writes to it. Must be matched with
605 * mnt_get_write_access() call above.
607 void mnt_put_write_access(struct vfsmount
*mnt
)
610 mnt_dec_writers(real_mount(mnt
));
613 EXPORT_SYMBOL_GPL(mnt_put_write_access
);
616 * mnt_drop_write - give up write access to a mount
617 * @mnt: the mount on which to give up write access
619 * Tells the low-level filesystem that we are done performing writes to it and
620 * also allows filesystem to be frozen again. Must be matched with
621 * mnt_want_write() call above.
623 void mnt_drop_write(struct vfsmount
*mnt
)
625 mnt_put_write_access(mnt
);
626 sb_end_write(mnt
->mnt_sb
);
628 EXPORT_SYMBOL_GPL(mnt_drop_write
);
630 void mnt_put_write_access_file(struct file
*file
)
632 if (!(file
->f_mode
& FMODE_WRITER
))
633 mnt_put_write_access(file
->f_path
.mnt
);
636 void mnt_drop_write_file(struct file
*file
)
638 mnt_put_write_access_file(file
);
639 sb_end_write(file_inode(file
)->i_sb
);
641 EXPORT_SYMBOL(mnt_drop_write_file
);
644 * mnt_hold_writers - prevent write access to the given mount
645 * @mnt: mnt to prevent write access to
647 * Prevents write access to @mnt if there are no active writers for @mnt.
648 * This function needs to be called and return successfully before changing
649 * properties of @mnt that need to remain stable for callers with write access
652 * After this functions has been called successfully callers must pair it with
653 * a call to mnt_unhold_writers() in order to stop preventing write access to
656 * Context: This function expects lock_mount_hash() to be held serializing
657 * setting MNT_WRITE_HOLD.
658 * Return: On success 0 is returned.
659 * On error, -EBUSY is returned.
661 static inline int mnt_hold_writers(struct mount
*mnt
)
663 mnt
->mnt
.mnt_flags
|= MNT_WRITE_HOLD
;
665 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
666 * should be visible before we do.
671 * With writers on hold, if this value is zero, then there are
672 * definitely no active writers (although held writers may subsequently
673 * increment the count, they'll have to wait, and decrement it after
674 * seeing MNT_READONLY).
676 * It is OK to have counter incremented on one CPU and decremented on
677 * another: the sum will add up correctly. The danger would be when we
678 * sum up each counter, if we read a counter before it is incremented,
679 * but then read another CPU's count which it has been subsequently
680 * decremented from -- we would see more decrements than we should.
681 * MNT_WRITE_HOLD protects against this scenario, because
682 * mnt_want_write first increments count, then smp_mb, then spins on
683 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
684 * we're counting up here.
686 if (mnt_get_writers(mnt
) > 0)
693 * mnt_unhold_writers - stop preventing write access to the given mount
694 * @mnt: mnt to stop preventing write access to
696 * Stop preventing write access to @mnt allowing callers to gain write access
699 * This function can only be called after a successful call to
700 * mnt_hold_writers().
702 * Context: This function expects lock_mount_hash() to be held.
704 static inline void mnt_unhold_writers(struct mount
*mnt
)
707 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
708 * that become unheld will see MNT_READONLY.
711 mnt
->mnt
.mnt_flags
&= ~MNT_WRITE_HOLD
;
714 static int mnt_make_readonly(struct mount
*mnt
)
718 ret
= mnt_hold_writers(mnt
);
720 mnt
->mnt
.mnt_flags
|= MNT_READONLY
;
721 mnt_unhold_writers(mnt
);
725 int sb_prepare_remount_readonly(struct super_block
*sb
)
730 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
731 if (atomic_long_read(&sb
->s_remove_count
))
735 list_for_each_entry(mnt
, &sb
->s_mounts
, mnt_instance
) {
736 if (!(mnt
->mnt
.mnt_flags
& MNT_READONLY
)) {
737 err
= mnt_hold_writers(mnt
);
742 if (!err
&& atomic_long_read(&sb
->s_remove_count
))
746 sb_start_ro_state_change(sb
);
747 list_for_each_entry(mnt
, &sb
->s_mounts
, mnt_instance
) {
748 if (mnt
->mnt
.mnt_flags
& MNT_WRITE_HOLD
)
749 mnt
->mnt
.mnt_flags
&= ~MNT_WRITE_HOLD
;
756 static void free_vfsmnt(struct mount
*mnt
)
758 mnt_idmap_put(mnt_idmap(&mnt
->mnt
));
759 kfree_const(mnt
->mnt_devname
);
761 free_percpu(mnt
->mnt_pcp
);
763 kmem_cache_free(mnt_cache
, mnt
);
766 static void delayed_free_vfsmnt(struct rcu_head
*head
)
768 free_vfsmnt(container_of(head
, struct mount
, mnt_rcu
));
771 /* call under rcu_read_lock */
772 int __legitimize_mnt(struct vfsmount
*bastard
, unsigned seq
)
775 if (read_seqretry(&mount_lock
, seq
))
779 mnt
= real_mount(bastard
);
780 mnt_add_count(mnt
, 1);
781 smp_mb(); // see mntput_no_expire()
782 if (likely(!read_seqretry(&mount_lock
, seq
)))
784 if (bastard
->mnt_flags
& MNT_SYNC_UMOUNT
) {
785 mnt_add_count(mnt
, -1);
789 if (unlikely(bastard
->mnt_flags
& MNT_DOOMED
)) {
790 mnt_add_count(mnt
, -1);
795 /* caller will mntput() */
799 /* call under rcu_read_lock */
800 static bool legitimize_mnt(struct vfsmount
*bastard
, unsigned seq
)
802 int res
= __legitimize_mnt(bastard
, seq
);
805 if (unlikely(res
< 0)) {
814 * __lookup_mnt - find first child mount
816 * @dentry: mountpoint
818 * If @mnt has a child mount @c mounted @dentry find and return it.
820 * Note that the child mount @c need not be unique. There are cases
821 * where shadow mounts are created. For example, during mount
822 * propagation when a source mount @mnt whose root got overmounted by a
823 * mount @o after path lookup but before @namespace_sem could be
824 * acquired gets copied and propagated. So @mnt gets copied including
825 * @o. When @mnt is propagated to a destination mount @d that already
826 * has another mount @n mounted at the same mountpoint then the source
827 * mount @mnt will be tucked beneath @n, i.e., @n will be mounted on
828 * @mnt and @mnt mounted on @d. Now both @n and @o are mounted at @mnt
831 * Return: The first child of @mnt mounted @dentry or NULL.
833 struct mount
*__lookup_mnt(struct vfsmount
*mnt
, struct dentry
*dentry
)
835 struct hlist_head
*head
= m_hash(mnt
, dentry
);
838 hlist_for_each_entry_rcu(p
, head
, mnt_hash
)
839 if (&p
->mnt_parent
->mnt
== mnt
&& p
->mnt_mountpoint
== dentry
)
845 * lookup_mnt - Return the first child mount mounted at path
847 * "First" means first mounted chronologically. If you create the
850 * mount /dev/sda1 /mnt
851 * mount /dev/sda2 /mnt
852 * mount /dev/sda3 /mnt
854 * Then lookup_mnt() on the base /mnt dentry in the root mount will
855 * return successively the root dentry and vfsmount of /dev/sda1, then
856 * /dev/sda2, then /dev/sda3, then NULL.
858 * lookup_mnt takes a reference to the found vfsmount.
860 struct vfsmount
*lookup_mnt(const struct path
*path
)
862 struct mount
*child_mnt
;
868 seq
= read_seqbegin(&mount_lock
);
869 child_mnt
= __lookup_mnt(path
->mnt
, path
->dentry
);
870 m
= child_mnt
? &child_mnt
->mnt
: NULL
;
871 } while (!legitimize_mnt(m
, seq
));
877 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
878 * current mount namespace.
880 * The common case is dentries are not mountpoints at all and that
881 * test is handled inline. For the slow case when we are actually
882 * dealing with a mountpoint of some kind, walk through all of the
883 * mounts in the current mount namespace and test to see if the dentry
886 * The mount_hashtable is not usable in the context because we
887 * need to identify all mounts that may be in the current mount
888 * namespace not just a mount that happens to have some specified
891 bool __is_local_mountpoint(struct dentry
*dentry
)
893 struct mnt_namespace
*ns
= current
->nsproxy
->mnt_ns
;
894 struct mount
*mnt
, *n
;
895 bool is_covered
= false;
897 down_read(&namespace_sem
);
898 rbtree_postorder_for_each_entry_safe(mnt
, n
, &ns
->mounts
, mnt_node
) {
899 is_covered
= (mnt
->mnt_mountpoint
== dentry
);
903 up_read(&namespace_sem
);
908 static struct mountpoint
*lookup_mountpoint(struct dentry
*dentry
)
910 struct hlist_head
*chain
= mp_hash(dentry
);
911 struct mountpoint
*mp
;
913 hlist_for_each_entry(mp
, chain
, m_hash
) {
914 if (mp
->m_dentry
== dentry
) {
922 static struct mountpoint
*get_mountpoint(struct dentry
*dentry
)
924 struct mountpoint
*mp
, *new = NULL
;
927 if (d_mountpoint(dentry
)) {
928 /* might be worth a WARN_ON() */
929 if (d_unlinked(dentry
))
930 return ERR_PTR(-ENOENT
);
932 read_seqlock_excl(&mount_lock
);
933 mp
= lookup_mountpoint(dentry
);
934 read_sequnlock_excl(&mount_lock
);
940 new = kmalloc(sizeof(struct mountpoint
), GFP_KERNEL
);
942 return ERR_PTR(-ENOMEM
);
945 /* Exactly one processes may set d_mounted */
946 ret
= d_set_mounted(dentry
);
948 /* Someone else set d_mounted? */
952 /* The dentry is not available as a mountpoint? */
957 /* Add the new mountpoint to the hash table */
958 read_seqlock_excl(&mount_lock
);
959 new->m_dentry
= dget(dentry
);
961 hlist_add_head(&new->m_hash
, mp_hash(dentry
));
962 INIT_HLIST_HEAD(&new->m_list
);
963 read_sequnlock_excl(&mount_lock
);
973 * vfsmount lock must be held. Additionally, the caller is responsible
974 * for serializing calls for given disposal list.
976 static void __put_mountpoint(struct mountpoint
*mp
, struct list_head
*list
)
978 if (!--mp
->m_count
) {
979 struct dentry
*dentry
= mp
->m_dentry
;
980 BUG_ON(!hlist_empty(&mp
->m_list
));
981 spin_lock(&dentry
->d_lock
);
982 dentry
->d_flags
&= ~DCACHE_MOUNTED
;
983 spin_unlock(&dentry
->d_lock
);
984 dput_to_list(dentry
, list
);
985 hlist_del(&mp
->m_hash
);
990 /* called with namespace_lock and vfsmount lock */
991 static void put_mountpoint(struct mountpoint
*mp
)
993 __put_mountpoint(mp
, &ex_mountpoints
);
996 static inline int check_mnt(struct mount
*mnt
)
998 return mnt
->mnt_ns
== current
->nsproxy
->mnt_ns
;
1002 * vfsmount lock must be held for write
1004 static void touch_mnt_namespace(struct mnt_namespace
*ns
)
1007 ns
->event
= ++event
;
1008 wake_up_interruptible(&ns
->poll
);
1013 * vfsmount lock must be held for write
1015 static void __touch_mnt_namespace(struct mnt_namespace
*ns
)
1017 if (ns
&& ns
->event
!= event
) {
1019 wake_up_interruptible(&ns
->poll
);
1024 * vfsmount lock must be held for write
1026 static struct mountpoint
*unhash_mnt(struct mount
*mnt
)
1028 struct mountpoint
*mp
;
1029 mnt
->mnt_parent
= mnt
;
1030 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
1031 list_del_init(&mnt
->mnt_child
);
1032 hlist_del_init_rcu(&mnt
->mnt_hash
);
1033 hlist_del_init(&mnt
->mnt_mp_list
);
1040 * vfsmount lock must be held for write
1042 static void umount_mnt(struct mount
*mnt
)
1044 put_mountpoint(unhash_mnt(mnt
));
1048 * vfsmount lock must be held for write
1050 void mnt_set_mountpoint(struct mount
*mnt
,
1051 struct mountpoint
*mp
,
1052 struct mount
*child_mnt
)
1055 mnt_add_count(mnt
, 1); /* essentially, that's mntget */
1056 child_mnt
->mnt_mountpoint
= mp
->m_dentry
;
1057 child_mnt
->mnt_parent
= mnt
;
1058 child_mnt
->mnt_mp
= mp
;
1059 hlist_add_head(&child_mnt
->mnt_mp_list
, &mp
->m_list
);
1063 * mnt_set_mountpoint_beneath - mount a mount beneath another one
1065 * @new_parent: the source mount
1066 * @top_mnt: the mount beneath which @new_parent is mounted
1067 * @new_mp: the new mountpoint of @top_mnt on @new_parent
1069 * Remove @top_mnt from its current mountpoint @top_mnt->mnt_mp and
1070 * parent @top_mnt->mnt_parent and mount it on top of @new_parent at
1071 * @new_mp. And mount @new_parent on the old parent and old
1072 * mountpoint of @top_mnt.
1074 * Context: This function expects namespace_lock() and lock_mount_hash()
1075 * to have been acquired in that order.
1077 static void mnt_set_mountpoint_beneath(struct mount
*new_parent
,
1078 struct mount
*top_mnt
,
1079 struct mountpoint
*new_mp
)
1081 struct mount
*old_top_parent
= top_mnt
->mnt_parent
;
1082 struct mountpoint
*old_top_mp
= top_mnt
->mnt_mp
;
1084 mnt_set_mountpoint(old_top_parent
, old_top_mp
, new_parent
);
1085 mnt_change_mountpoint(new_parent
, new_mp
, top_mnt
);
1089 static void __attach_mnt(struct mount
*mnt
, struct mount
*parent
)
1091 hlist_add_head_rcu(&mnt
->mnt_hash
,
1092 m_hash(&parent
->mnt
, mnt
->mnt_mountpoint
));
1093 list_add_tail(&mnt
->mnt_child
, &parent
->mnt_mounts
);
1097 * attach_mnt - mount a mount, attach to @mount_hashtable and parent's
1098 * list of child mounts
1099 * @parent: the parent
1100 * @mnt: the new mount
1101 * @mp: the new mountpoint
1102 * @beneath: whether to mount @mnt beneath or on top of @parent
1104 * If @beneath is false, mount @mnt at @mp on @parent. Then attach @mnt
1105 * to @parent's child mount list and to @mount_hashtable.
1107 * If @beneath is true, remove @mnt from its current parent and
1108 * mountpoint and mount it on @mp on @parent, and mount @parent on the
1109 * old parent and old mountpoint of @mnt. Finally, attach @parent to
1110 * @mnt_hashtable and @parent->mnt_parent->mnt_mounts.
1112 * Note, when __attach_mnt() is called @mnt->mnt_parent already points
1113 * to the correct parent.
1115 * Context: This function expects namespace_lock() and lock_mount_hash()
1116 * to have been acquired in that order.
1118 static void attach_mnt(struct mount
*mnt
, struct mount
*parent
,
1119 struct mountpoint
*mp
, bool beneath
)
1122 mnt_set_mountpoint_beneath(mnt
, parent
, mp
);
1124 mnt_set_mountpoint(parent
, mp
, mnt
);
1126 * Note, @mnt->mnt_parent has to be used. If @mnt was mounted
1127 * beneath @parent then @mnt will need to be attached to
1128 * @parent's old parent, not @parent. IOW, @mnt->mnt_parent
1129 * isn't the same mount as @parent.
1131 __attach_mnt(mnt
, mnt
->mnt_parent
);
1134 void mnt_change_mountpoint(struct mount
*parent
, struct mountpoint
*mp
, struct mount
*mnt
)
1136 struct mountpoint
*old_mp
= mnt
->mnt_mp
;
1137 struct mount
*old_parent
= mnt
->mnt_parent
;
1139 list_del_init(&mnt
->mnt_child
);
1140 hlist_del_init(&mnt
->mnt_mp_list
);
1141 hlist_del_init_rcu(&mnt
->mnt_hash
);
1143 attach_mnt(mnt
, parent
, mp
, false);
1145 put_mountpoint(old_mp
);
1146 mnt_add_count(old_parent
, -1);
1149 static inline struct mount
*node_to_mount(struct rb_node
*node
)
1151 return node
? rb_entry(node
, struct mount
, mnt_node
) : NULL
;
1154 static void mnt_add_to_ns(struct mnt_namespace
*ns
, struct mount
*mnt
)
1156 struct rb_node
**link
= &ns
->mounts
.rb_node
;
1157 struct rb_node
*parent
= NULL
;
1158 bool mnt_first_node
= true, mnt_last_node
= true;
1160 WARN_ON(mnt_ns_attached(mnt
));
1164 if (mnt
->mnt_id_unique
< node_to_mount(parent
)->mnt_id_unique
) {
1165 link
= &parent
->rb_left
;
1166 mnt_last_node
= false;
1168 link
= &parent
->rb_right
;
1169 mnt_first_node
= false;
1174 ns
->mnt_last_node
= &mnt
->mnt_node
;
1176 ns
->mnt_first_node
= &mnt
->mnt_node
;
1177 rb_link_node(&mnt
->mnt_node
, parent
, link
);
1178 rb_insert_color(&mnt
->mnt_node
, &ns
->mounts
);
1182 * vfsmount lock must be held for write
1184 static void commit_tree(struct mount
*mnt
)
1186 struct mount
*parent
= mnt
->mnt_parent
;
1189 struct mnt_namespace
*n
= parent
->mnt_ns
;
1191 BUG_ON(parent
== mnt
);
1193 list_add_tail(&head
, &mnt
->mnt_list
);
1194 while (!list_empty(&head
)) {
1195 m
= list_first_entry(&head
, typeof(*m
), mnt_list
);
1196 list_del(&m
->mnt_list
);
1198 mnt_add_to_ns(n
, m
);
1200 n
->nr_mounts
+= n
->pending_mounts
;
1201 n
->pending_mounts
= 0;
1203 __attach_mnt(mnt
, parent
);
1204 touch_mnt_namespace(n
);
1207 static struct mount
*next_mnt(struct mount
*p
, struct mount
*root
)
1209 struct list_head
*next
= p
->mnt_mounts
.next
;
1210 if (next
== &p
->mnt_mounts
) {
1214 next
= p
->mnt_child
.next
;
1215 if (next
!= &p
->mnt_parent
->mnt_mounts
)
1220 return list_entry(next
, struct mount
, mnt_child
);
1223 static struct mount
*skip_mnt_tree(struct mount
*p
)
1225 struct list_head
*prev
= p
->mnt_mounts
.prev
;
1226 while (prev
!= &p
->mnt_mounts
) {
1227 p
= list_entry(prev
, struct mount
, mnt_child
);
1228 prev
= p
->mnt_mounts
.prev
;
1234 * vfs_create_mount - Create a mount for a configured superblock
1235 * @fc: The configuration context with the superblock attached
1237 * Create a mount to an already configured superblock. If necessary, the
1238 * caller should invoke vfs_get_tree() before calling this.
1240 * Note that this does not attach the mount to anything.
1242 struct vfsmount
*vfs_create_mount(struct fs_context
*fc
)
1247 return ERR_PTR(-EINVAL
);
1249 mnt
= alloc_vfsmnt(fc
->source
?: "none");
1251 return ERR_PTR(-ENOMEM
);
1253 if (fc
->sb_flags
& SB_KERNMOUNT
)
1254 mnt
->mnt
.mnt_flags
= MNT_INTERNAL
;
1256 atomic_inc(&fc
->root
->d_sb
->s_active
);
1257 mnt
->mnt
.mnt_sb
= fc
->root
->d_sb
;
1258 mnt
->mnt
.mnt_root
= dget(fc
->root
);
1259 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
1260 mnt
->mnt_parent
= mnt
;
1263 list_add_tail(&mnt
->mnt_instance
, &mnt
->mnt
.mnt_sb
->s_mounts
);
1264 unlock_mount_hash();
1267 EXPORT_SYMBOL(vfs_create_mount
);
1269 struct vfsmount
*fc_mount(struct fs_context
*fc
)
1271 int err
= vfs_get_tree(fc
);
1273 up_write(&fc
->root
->d_sb
->s_umount
);
1274 return vfs_create_mount(fc
);
1276 return ERR_PTR(err
);
1278 EXPORT_SYMBOL(fc_mount
);
1280 struct vfsmount
*vfs_kern_mount(struct file_system_type
*type
,
1281 int flags
, const char *name
,
1284 struct fs_context
*fc
;
1285 struct vfsmount
*mnt
;
1289 return ERR_PTR(-EINVAL
);
1291 fc
= fs_context_for_mount(type
, flags
);
1293 return ERR_CAST(fc
);
1296 ret
= vfs_parse_fs_string(fc
, "source",
1297 name
, strlen(name
));
1299 ret
= parse_monolithic_mount_data(fc
, data
);
1308 EXPORT_SYMBOL_GPL(vfs_kern_mount
);
1311 vfs_submount(const struct dentry
*mountpoint
, struct file_system_type
*type
,
1312 const char *name
, void *data
)
1314 /* Until it is worked out how to pass the user namespace
1315 * through from the parent mount to the submount don't support
1316 * unprivileged mounts with submounts.
1318 if (mountpoint
->d_sb
->s_user_ns
!= &init_user_ns
)
1319 return ERR_PTR(-EPERM
);
1321 return vfs_kern_mount(type
, SB_SUBMOUNT
, name
, data
);
1323 EXPORT_SYMBOL_GPL(vfs_submount
);
1325 static struct mount
*clone_mnt(struct mount
*old
, struct dentry
*root
,
1328 struct super_block
*sb
= old
->mnt
.mnt_sb
;
1332 mnt
= alloc_vfsmnt(old
->mnt_devname
);
1334 return ERR_PTR(-ENOMEM
);
1336 if (flag
& (CL_SLAVE
| CL_PRIVATE
| CL_SHARED_TO_SLAVE
))
1337 mnt
->mnt_group_id
= 0; /* not a peer of original */
1339 mnt
->mnt_group_id
= old
->mnt_group_id
;
1341 if ((flag
& CL_MAKE_SHARED
) && !mnt
->mnt_group_id
) {
1342 err
= mnt_alloc_group_id(mnt
);
1347 mnt
->mnt
.mnt_flags
= old
->mnt
.mnt_flags
;
1348 mnt
->mnt
.mnt_flags
&= ~(MNT_WRITE_HOLD
|MNT_MARKED
|MNT_INTERNAL
);
1350 atomic_inc(&sb
->s_active
);
1351 mnt
->mnt
.mnt_idmap
= mnt_idmap_get(mnt_idmap(&old
->mnt
));
1353 mnt
->mnt
.mnt_sb
= sb
;
1354 mnt
->mnt
.mnt_root
= dget(root
);
1355 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
1356 mnt
->mnt_parent
= mnt
;
1358 list_add_tail(&mnt
->mnt_instance
, &sb
->s_mounts
);
1359 unlock_mount_hash();
1361 if ((flag
& CL_SLAVE
) ||
1362 ((flag
& CL_SHARED_TO_SLAVE
) && IS_MNT_SHARED(old
))) {
1363 list_add(&mnt
->mnt_slave
, &old
->mnt_slave_list
);
1364 mnt
->mnt_master
= old
;
1365 CLEAR_MNT_SHARED(mnt
);
1366 } else if (!(flag
& CL_PRIVATE
)) {
1367 if ((flag
& CL_MAKE_SHARED
) || IS_MNT_SHARED(old
))
1368 list_add(&mnt
->mnt_share
, &old
->mnt_share
);
1369 if (IS_MNT_SLAVE(old
))
1370 list_add(&mnt
->mnt_slave
, &old
->mnt_slave
);
1371 mnt
->mnt_master
= old
->mnt_master
;
1373 CLEAR_MNT_SHARED(mnt
);
1375 if (flag
& CL_MAKE_SHARED
)
1376 set_mnt_shared(mnt
);
1378 /* stick the duplicate mount on the same expiry list
1379 * as the original if that was on one */
1380 if (flag
& CL_EXPIRE
) {
1381 if (!list_empty(&old
->mnt_expire
))
1382 list_add(&mnt
->mnt_expire
, &old
->mnt_expire
);
1390 return ERR_PTR(err
);
1393 static void cleanup_mnt(struct mount
*mnt
)
1395 struct hlist_node
*p
;
1398 * The warning here probably indicates that somebody messed
1399 * up a mnt_want/drop_write() pair. If this happens, the
1400 * filesystem was probably unable to make r/w->r/o transitions.
1401 * The locking used to deal with mnt_count decrement provides barriers,
1402 * so mnt_get_writers() below is safe.
1404 WARN_ON(mnt_get_writers(mnt
));
1405 if (unlikely(mnt
->mnt_pins
.first
))
1407 hlist_for_each_entry_safe(m
, p
, &mnt
->mnt_stuck_children
, mnt_umount
) {
1408 hlist_del(&m
->mnt_umount
);
1411 fsnotify_vfsmount_delete(&mnt
->mnt
);
1412 dput(mnt
->mnt
.mnt_root
);
1413 deactivate_super(mnt
->mnt
.mnt_sb
);
1415 call_rcu(&mnt
->mnt_rcu
, delayed_free_vfsmnt
);
1418 static void __cleanup_mnt(struct rcu_head
*head
)
1420 cleanup_mnt(container_of(head
, struct mount
, mnt_rcu
));
1423 static LLIST_HEAD(delayed_mntput_list
);
1424 static void delayed_mntput(struct work_struct
*unused
)
1426 struct llist_node
*node
= llist_del_all(&delayed_mntput_list
);
1427 struct mount
*m
, *t
;
1429 llist_for_each_entry_safe(m
, t
, node
, mnt_llist
)
1432 static DECLARE_DELAYED_WORK(delayed_mntput_work
, delayed_mntput
);
1434 static void mntput_no_expire(struct mount
*mnt
)
1440 if (likely(READ_ONCE(mnt
->mnt_ns
))) {
1442 * Since we don't do lock_mount_hash() here,
1443 * ->mnt_ns can change under us. However, if it's
1444 * non-NULL, then there's a reference that won't
1445 * be dropped until after an RCU delay done after
1446 * turning ->mnt_ns NULL. So if we observe it
1447 * non-NULL under rcu_read_lock(), the reference
1448 * we are dropping is not the final one.
1450 mnt_add_count(mnt
, -1);
1456 * make sure that if __legitimize_mnt() has not seen us grab
1457 * mount_lock, we'll see their refcount increment here.
1460 mnt_add_count(mnt
, -1);
1461 count
= mnt_get_count(mnt
);
1465 unlock_mount_hash();
1468 if (unlikely(mnt
->mnt
.mnt_flags
& MNT_DOOMED
)) {
1470 unlock_mount_hash();
1473 mnt
->mnt
.mnt_flags
|= MNT_DOOMED
;
1476 list_del(&mnt
->mnt_instance
);
1478 if (unlikely(!list_empty(&mnt
->mnt_mounts
))) {
1479 struct mount
*p
, *tmp
;
1480 list_for_each_entry_safe(p
, tmp
, &mnt
->mnt_mounts
, mnt_child
) {
1481 __put_mountpoint(unhash_mnt(p
), &list
);
1482 hlist_add_head(&p
->mnt_umount
, &mnt
->mnt_stuck_children
);
1485 unlock_mount_hash();
1486 shrink_dentry_list(&list
);
1488 if (likely(!(mnt
->mnt
.mnt_flags
& MNT_INTERNAL
))) {
1489 struct task_struct
*task
= current
;
1490 if (likely(!(task
->flags
& PF_KTHREAD
))) {
1491 init_task_work(&mnt
->mnt_rcu
, __cleanup_mnt
);
1492 if (!task_work_add(task
, &mnt
->mnt_rcu
, TWA_RESUME
))
1495 if (llist_add(&mnt
->mnt_llist
, &delayed_mntput_list
))
1496 schedule_delayed_work(&delayed_mntput_work
, 1);
1502 void mntput(struct vfsmount
*mnt
)
1505 struct mount
*m
= real_mount(mnt
);
1506 /* avoid cacheline pingpong */
1507 if (unlikely(m
->mnt_expiry_mark
))
1508 WRITE_ONCE(m
->mnt_expiry_mark
, 0);
1509 mntput_no_expire(m
);
1512 EXPORT_SYMBOL(mntput
);
1514 struct vfsmount
*mntget(struct vfsmount
*mnt
)
1517 mnt_add_count(real_mount(mnt
), 1);
1520 EXPORT_SYMBOL(mntget
);
1523 * Make a mount point inaccessible to new lookups.
1524 * Because there may still be current users, the caller MUST WAIT
1525 * for an RCU grace period before destroying the mount point.
1527 void mnt_make_shortterm(struct vfsmount
*mnt
)
1530 real_mount(mnt
)->mnt_ns
= NULL
;
1534 * path_is_mountpoint() - Check if path is a mount in the current namespace.
1535 * @path: path to check
1537 * d_mountpoint() can only be used reliably to establish if a dentry is
1538 * not mounted in any namespace and that common case is handled inline.
1539 * d_mountpoint() isn't aware of the possibility there may be multiple
1540 * mounts using a given dentry in a different namespace. This function
1541 * checks if the passed in path is a mountpoint rather than the dentry
1544 bool path_is_mountpoint(const struct path
*path
)
1549 if (!d_mountpoint(path
->dentry
))
1554 seq
= read_seqbegin(&mount_lock
);
1555 res
= __path_is_mountpoint(path
);
1556 } while (read_seqretry(&mount_lock
, seq
));
1561 EXPORT_SYMBOL(path_is_mountpoint
);
1563 struct vfsmount
*mnt_clone_internal(const struct path
*path
)
1566 p
= clone_mnt(real_mount(path
->mnt
), path
->dentry
, CL_PRIVATE
);
1569 p
->mnt
.mnt_flags
|= MNT_INTERNAL
;
1574 * Returns the mount which either has the specified mnt_id, or has the next
1575 * smallest id afer the specified one.
1577 static struct mount
*mnt_find_id_at(struct mnt_namespace
*ns
, u64 mnt_id
)
1579 struct rb_node
*node
= ns
->mounts
.rb_node
;
1580 struct mount
*ret
= NULL
;
1583 struct mount
*m
= node_to_mount(node
);
1585 if (mnt_id
<= m
->mnt_id_unique
) {
1586 ret
= node_to_mount(node
);
1587 if (mnt_id
== m
->mnt_id_unique
)
1589 node
= node
->rb_left
;
1591 node
= node
->rb_right
;
1598 * Returns the mount which either has the specified mnt_id, or has the next
1599 * greater id before the specified one.
1601 static struct mount
*mnt_find_id_at_reverse(struct mnt_namespace
*ns
, u64 mnt_id
)
1603 struct rb_node
*node
= ns
->mounts
.rb_node
;
1604 struct mount
*ret
= NULL
;
1607 struct mount
*m
= node_to_mount(node
);
1609 if (mnt_id
>= m
->mnt_id_unique
) {
1610 ret
= node_to_mount(node
);
1611 if (mnt_id
== m
->mnt_id_unique
)
1613 node
= node
->rb_right
;
1615 node
= node
->rb_left
;
1621 #ifdef CONFIG_PROC_FS
1623 /* iterator; we want it to have access to namespace_sem, thus here... */
1624 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
1626 struct proc_mounts
*p
= m
->private;
1628 down_read(&namespace_sem
);
1630 return mnt_find_id_at(p
->ns
, *pos
);
1633 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1635 struct mount
*next
= NULL
, *mnt
= v
;
1636 struct rb_node
*node
= rb_next(&mnt
->mnt_node
);
1640 next
= node_to_mount(node
);
1641 *pos
= next
->mnt_id_unique
;
1646 static void m_stop(struct seq_file
*m
, void *v
)
1648 up_read(&namespace_sem
);
1651 static int m_show(struct seq_file
*m
, void *v
)
1653 struct proc_mounts
*p
= m
->private;
1654 struct mount
*r
= v
;
1655 return p
->show(m
, &r
->mnt
);
1658 const struct seq_operations mounts_op
= {
1665 #endif /* CONFIG_PROC_FS */
1668 * may_umount_tree - check if a mount tree is busy
1669 * @m: root of mount tree
1671 * This is called to check if a tree of mounts has any
1672 * open files, pwds, chroots or sub mounts that are
1675 int may_umount_tree(struct vfsmount
*m
)
1677 struct mount
*mnt
= real_mount(m
);
1678 int actual_refs
= 0;
1679 int minimum_refs
= 0;
1683 /* write lock needed for mnt_get_count */
1685 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1686 actual_refs
+= mnt_get_count(p
);
1689 unlock_mount_hash();
1691 if (actual_refs
> minimum_refs
)
1697 EXPORT_SYMBOL(may_umount_tree
);
1700 * may_umount - check if a mount point is busy
1701 * @mnt: root of mount
1703 * This is called to check if a mount point has any
1704 * open files, pwds, chroots or sub mounts. If the
1705 * mount has sub mounts this will return busy
1706 * regardless of whether the sub mounts are busy.
1708 * Doesn't take quota and stuff into account. IOW, in some cases it will
1709 * give false negatives. The main reason why it's here is that we need
1710 * a non-destructive way to look for easily umountable filesystems.
1712 int may_umount(struct vfsmount
*mnt
)
1715 down_read(&namespace_sem
);
1717 if (propagate_mount_busy(real_mount(mnt
), 2))
1719 unlock_mount_hash();
1720 up_read(&namespace_sem
);
1724 EXPORT_SYMBOL(may_umount
);
1726 static void namespace_unlock(void)
1728 struct hlist_head head
;
1729 struct hlist_node
*p
;
1733 hlist_move_list(&unmounted
, &head
);
1734 list_splice_init(&ex_mountpoints
, &list
);
1736 up_write(&namespace_sem
);
1738 shrink_dentry_list(&list
);
1740 if (likely(hlist_empty(&head
)))
1743 synchronize_rcu_expedited();
1745 hlist_for_each_entry_safe(m
, p
, &head
, mnt_umount
) {
1746 hlist_del(&m
->mnt_umount
);
1751 static inline void namespace_lock(void)
1753 down_write(&namespace_sem
);
1756 enum umount_tree_flags
{
1758 UMOUNT_PROPAGATE
= 2,
1759 UMOUNT_CONNECTED
= 4,
1762 static bool disconnect_mount(struct mount
*mnt
, enum umount_tree_flags how
)
1764 /* Leaving mounts connected is only valid for lazy umounts */
1765 if (how
& UMOUNT_SYNC
)
1768 /* A mount without a parent has nothing to be connected to */
1769 if (!mnt_has_parent(mnt
))
1772 /* Because the reference counting rules change when mounts are
1773 * unmounted and connected, umounted mounts may not be
1774 * connected to mounted mounts.
1776 if (!(mnt
->mnt_parent
->mnt
.mnt_flags
& MNT_UMOUNT
))
1779 /* Has it been requested that the mount remain connected? */
1780 if (how
& UMOUNT_CONNECTED
)
1783 /* Is the mount locked such that it needs to remain connected? */
1784 if (IS_MNT_LOCKED(mnt
))
1787 /* By default disconnect the mount */
1792 * mount_lock must be held
1793 * namespace_sem must be held for write
1795 static void umount_tree(struct mount
*mnt
, enum umount_tree_flags how
)
1797 LIST_HEAD(tmp_list
);
1800 if (how
& UMOUNT_PROPAGATE
)
1801 propagate_mount_unlock(mnt
);
1803 /* Gather the mounts to umount */
1804 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1805 p
->mnt
.mnt_flags
|= MNT_UMOUNT
;
1806 if (mnt_ns_attached(p
))
1807 move_from_ns(p
, &tmp_list
);
1809 list_move(&p
->mnt_list
, &tmp_list
);
1812 /* Hide the mounts from mnt_mounts */
1813 list_for_each_entry(p
, &tmp_list
, mnt_list
) {
1814 list_del_init(&p
->mnt_child
);
1817 /* Add propagated mounts to the tmp_list */
1818 if (how
& UMOUNT_PROPAGATE
)
1819 propagate_umount(&tmp_list
);
1821 while (!list_empty(&tmp_list
)) {
1822 struct mnt_namespace
*ns
;
1824 p
= list_first_entry(&tmp_list
, struct mount
, mnt_list
);
1825 list_del_init(&p
->mnt_expire
);
1826 list_del_init(&p
->mnt_list
);
1830 __touch_mnt_namespace(ns
);
1833 if (how
& UMOUNT_SYNC
)
1834 p
->mnt
.mnt_flags
|= MNT_SYNC_UMOUNT
;
1836 disconnect
= disconnect_mount(p
, how
);
1837 if (mnt_has_parent(p
)) {
1838 mnt_add_count(p
->mnt_parent
, -1);
1840 /* Don't forget about p */
1841 list_add_tail(&p
->mnt_child
, &p
->mnt_parent
->mnt_mounts
);
1846 change_mnt_propagation(p
, MS_PRIVATE
);
1848 hlist_add_head(&p
->mnt_umount
, &unmounted
);
1852 static void shrink_submounts(struct mount
*mnt
);
1854 static int do_umount_root(struct super_block
*sb
)
1858 down_write(&sb
->s_umount
);
1859 if (!sb_rdonly(sb
)) {
1860 struct fs_context
*fc
;
1862 fc
= fs_context_for_reconfigure(sb
->s_root
, SB_RDONLY
,
1867 ret
= parse_monolithic_mount_data(fc
, NULL
);
1869 ret
= reconfigure_super(fc
);
1873 up_write(&sb
->s_umount
);
1877 static int do_umount(struct mount
*mnt
, int flags
)
1879 struct super_block
*sb
= mnt
->mnt
.mnt_sb
;
1882 retval
= security_sb_umount(&mnt
->mnt
, flags
);
1887 * Allow userspace to request a mountpoint be expired rather than
1888 * unmounting unconditionally. Unmount only happens if:
1889 * (1) the mark is already set (the mark is cleared by mntput())
1890 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1892 if (flags
& MNT_EXPIRE
) {
1893 if (&mnt
->mnt
== current
->fs
->root
.mnt
||
1894 flags
& (MNT_FORCE
| MNT_DETACH
))
1898 * probably don't strictly need the lock here if we examined
1899 * all race cases, but it's a slowpath.
1902 if (mnt_get_count(mnt
) != 2) {
1903 unlock_mount_hash();
1906 unlock_mount_hash();
1908 if (!xchg(&mnt
->mnt_expiry_mark
, 1))
1913 * If we may have to abort operations to get out of this
1914 * mount, and they will themselves hold resources we must
1915 * allow the fs to do things. In the Unix tradition of
1916 * 'Gee thats tricky lets do it in userspace' the umount_begin
1917 * might fail to complete on the first run through as other tasks
1918 * must return, and the like. Thats for the mount program to worry
1919 * about for the moment.
1922 if (flags
& MNT_FORCE
&& sb
->s_op
->umount_begin
) {
1923 sb
->s_op
->umount_begin(sb
);
1927 * No sense to grab the lock for this test, but test itself looks
1928 * somewhat bogus. Suggestions for better replacement?
1929 * Ho-hum... In principle, we might treat that as umount + switch
1930 * to rootfs. GC would eventually take care of the old vfsmount.
1931 * Actually it makes sense, especially if rootfs would contain a
1932 * /reboot - static binary that would close all descriptors and
1933 * call reboot(9). Then init(8) could umount root and exec /reboot.
1935 if (&mnt
->mnt
== current
->fs
->root
.mnt
&& !(flags
& MNT_DETACH
)) {
1937 * Special case for "unmounting" root ...
1938 * we just try to remount it readonly.
1940 if (!ns_capable(sb
->s_user_ns
, CAP_SYS_ADMIN
))
1942 return do_umount_root(sb
);
1948 /* Recheck MNT_LOCKED with the locks held */
1950 if (mnt
->mnt
.mnt_flags
& MNT_LOCKED
)
1954 if (flags
& MNT_DETACH
) {
1955 if (mnt_ns_attached(mnt
) || !list_empty(&mnt
->mnt_list
))
1956 umount_tree(mnt
, UMOUNT_PROPAGATE
);
1959 shrink_submounts(mnt
);
1961 if (!propagate_mount_busy(mnt
, 2)) {
1962 if (mnt_ns_attached(mnt
) || !list_empty(&mnt
->mnt_list
))
1963 umount_tree(mnt
, UMOUNT_PROPAGATE
|UMOUNT_SYNC
);
1968 unlock_mount_hash();
1974 * __detach_mounts - lazily unmount all mounts on the specified dentry
1976 * During unlink, rmdir, and d_drop it is possible to loose the path
1977 * to an existing mountpoint, and wind up leaking the mount.
1978 * detach_mounts allows lazily unmounting those mounts instead of
1981 * The caller may hold dentry->d_inode->i_mutex.
1983 void __detach_mounts(struct dentry
*dentry
)
1985 struct mountpoint
*mp
;
1990 mp
= lookup_mountpoint(dentry
);
1995 while (!hlist_empty(&mp
->m_list
)) {
1996 mnt
= hlist_entry(mp
->m_list
.first
, struct mount
, mnt_mp_list
);
1997 if (mnt
->mnt
.mnt_flags
& MNT_UMOUNT
) {
1999 hlist_add_head(&mnt
->mnt_umount
, &unmounted
);
2001 else umount_tree(mnt
, UMOUNT_CONNECTED
);
2005 unlock_mount_hash();
2010 * Is the caller allowed to modify his namespace?
2012 bool may_mount(void)
2014 return ns_capable(current
->nsproxy
->mnt_ns
->user_ns
, CAP_SYS_ADMIN
);
2017 static void warn_mandlock(void)
2019 pr_warn_once("=======================================================\n"
2020 "WARNING: The mand mount option has been deprecated and\n"
2021 " and is ignored by this kernel. Remove the mand\n"
2022 " option from the mount to silence this warning.\n"
2023 "=======================================================\n");
2026 static int can_umount(const struct path
*path
, int flags
)
2028 struct mount
*mnt
= real_mount(path
->mnt
);
2032 if (!path_mounted(path
))
2034 if (!check_mnt(mnt
))
2036 if (mnt
->mnt
.mnt_flags
& MNT_LOCKED
) /* Check optimistically */
2038 if (flags
& MNT_FORCE
&& !capable(CAP_SYS_ADMIN
))
2043 // caller is responsible for flags being sane
2044 int path_umount(struct path
*path
, int flags
)
2046 struct mount
*mnt
= real_mount(path
->mnt
);
2049 ret
= can_umount(path
, flags
);
2051 ret
= do_umount(mnt
, flags
);
2053 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
2055 mntput_no_expire(mnt
);
2059 static int ksys_umount(char __user
*name
, int flags
)
2061 int lookup_flags
= LOOKUP_MOUNTPOINT
;
2065 // basic validity checks done first
2066 if (flags
& ~(MNT_FORCE
| MNT_DETACH
| MNT_EXPIRE
| UMOUNT_NOFOLLOW
))
2069 if (!(flags
& UMOUNT_NOFOLLOW
))
2070 lookup_flags
|= LOOKUP_FOLLOW
;
2071 ret
= user_path_at(AT_FDCWD
, name
, lookup_flags
, &path
);
2074 return path_umount(&path
, flags
);
2077 SYSCALL_DEFINE2(umount
, char __user
*, name
, int, flags
)
2079 return ksys_umount(name
, flags
);
2082 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
2085 * The 2.0 compatible umount. No flags.
2087 SYSCALL_DEFINE1(oldumount
, char __user
*, name
)
2089 return ksys_umount(name
, 0);
2094 static bool is_mnt_ns_file(struct dentry
*dentry
)
2096 struct ns_common
*ns
;
2098 /* Is this a proxy for a mount namespace? */
2099 if (dentry
->d_op
!= &ns_dentry_operations
)
2102 ns
= d_inode(dentry
)->i_private
;
2104 return ns
->ops
== &mntns_operations
;
2107 struct ns_common
*from_mnt_ns(struct mnt_namespace
*mnt
)
2112 struct mnt_namespace
*get_sequential_mnt_ns(struct mnt_namespace
*mntns
, bool previous
)
2117 struct list_head
*list
;
2120 list
= rcu_dereference(list_bidir_prev_rcu(&mntns
->mnt_ns_list
));
2122 list
= rcu_dereference(list_next_rcu(&mntns
->mnt_ns_list
));
2123 if (list_is_head(list
, &mnt_ns_list
))
2124 return ERR_PTR(-ENOENT
);
2126 mntns
= list_entry_rcu(list
, struct mnt_namespace
, mnt_ns_list
);
2129 * The last passive reference count is put with RCU
2130 * delay so accessing the mount namespace is not just
2131 * safe but all relevant members are still valid.
2133 if (!ns_capable_noaudit(mntns
->user_ns
, CAP_SYS_ADMIN
))
2137 * We need an active reference count as we're persisting
2138 * the mount namespace and it might already be on its
2141 if (!refcount_inc_not_zero(&mntns
->ns
.count
))
2148 static bool mnt_ns_loop(struct dentry
*dentry
)
2150 /* Could bind mounting the mount namespace inode cause a
2151 * mount namespace loop?
2153 struct mnt_namespace
*mnt_ns
;
2154 if (!is_mnt_ns_file(dentry
))
2157 mnt_ns
= to_mnt_ns(get_proc_ns(dentry
->d_inode
));
2158 return current
->nsproxy
->mnt_ns
->seq
>= mnt_ns
->seq
;
2161 struct mount
*copy_tree(struct mount
*src_root
, struct dentry
*dentry
,
2164 struct mount
*res
, *src_parent
, *src_root_child
, *src_mnt
,
2165 *dst_parent
, *dst_mnt
;
2167 if (!(flag
& CL_COPY_UNBINDABLE
) && IS_MNT_UNBINDABLE(src_root
))
2168 return ERR_PTR(-EINVAL
);
2170 if (!(flag
& CL_COPY_MNT_NS_FILE
) && is_mnt_ns_file(dentry
))
2171 return ERR_PTR(-EINVAL
);
2173 res
= dst_mnt
= clone_mnt(src_root
, dentry
, flag
);
2174 if (IS_ERR(dst_mnt
))
2177 src_parent
= src_root
;
2178 dst_mnt
->mnt_mountpoint
= src_root
->mnt_mountpoint
;
2180 list_for_each_entry(src_root_child
, &src_root
->mnt_mounts
, mnt_child
) {
2181 if (!is_subdir(src_root_child
->mnt_mountpoint
, dentry
))
2184 for (src_mnt
= src_root_child
; src_mnt
;
2185 src_mnt
= next_mnt(src_mnt
, src_root_child
)) {
2186 if (!(flag
& CL_COPY_UNBINDABLE
) &&
2187 IS_MNT_UNBINDABLE(src_mnt
)) {
2188 if (src_mnt
->mnt
.mnt_flags
& MNT_LOCKED
) {
2189 /* Both unbindable and locked. */
2190 dst_mnt
= ERR_PTR(-EPERM
);
2193 src_mnt
= skip_mnt_tree(src_mnt
);
2197 if (!(flag
& CL_COPY_MNT_NS_FILE
) &&
2198 is_mnt_ns_file(src_mnt
->mnt
.mnt_root
)) {
2199 src_mnt
= skip_mnt_tree(src_mnt
);
2202 while (src_parent
!= src_mnt
->mnt_parent
) {
2203 src_parent
= src_parent
->mnt_parent
;
2204 dst_mnt
= dst_mnt
->mnt_parent
;
2207 src_parent
= src_mnt
;
2208 dst_parent
= dst_mnt
;
2209 dst_mnt
= clone_mnt(src_mnt
, src_mnt
->mnt
.mnt_root
, flag
);
2210 if (IS_ERR(dst_mnt
))
2213 list_add_tail(&dst_mnt
->mnt_list
, &res
->mnt_list
);
2214 attach_mnt(dst_mnt
, dst_parent
, src_parent
->mnt_mp
, false);
2215 unlock_mount_hash();
2223 umount_tree(res
, UMOUNT_SYNC
);
2224 unlock_mount_hash();
2229 /* Caller should check returned pointer for errors */
2231 struct vfsmount
*collect_mounts(const struct path
*path
)
2235 if (!check_mnt(real_mount(path
->mnt
)))
2236 tree
= ERR_PTR(-EINVAL
);
2238 tree
= copy_tree(real_mount(path
->mnt
), path
->dentry
,
2239 CL_COPY_ALL
| CL_PRIVATE
);
2242 return ERR_CAST(tree
);
2246 static void free_mnt_ns(struct mnt_namespace
*);
2247 static struct mnt_namespace
*alloc_mnt_ns(struct user_namespace
*, bool);
2249 void dissolve_on_fput(struct vfsmount
*mnt
)
2251 struct mnt_namespace
*ns
;
2254 ns
= real_mount(mnt
)->mnt_ns
;
2257 umount_tree(real_mount(mnt
), UMOUNT_CONNECTED
);
2261 unlock_mount_hash();
2267 void drop_collected_mounts(struct vfsmount
*mnt
)
2271 umount_tree(real_mount(mnt
), 0);
2272 unlock_mount_hash();
2276 bool has_locked_children(struct mount
*mnt
, struct dentry
*dentry
)
2278 struct mount
*child
;
2280 list_for_each_entry(child
, &mnt
->mnt_mounts
, mnt_child
) {
2281 if (!is_subdir(child
->mnt_mountpoint
, dentry
))
2284 if (child
->mnt
.mnt_flags
& MNT_LOCKED
)
2291 * clone_private_mount - create a private clone of a path
2292 * @path: path to clone
2294 * This creates a new vfsmount, which will be the clone of @path. The new mount
2295 * will not be attached anywhere in the namespace and will be private (i.e.
2296 * changes to the originating mount won't be propagated into this).
2298 * Release with mntput().
2300 struct vfsmount
*clone_private_mount(const struct path
*path
)
2302 struct mount
*old_mnt
= real_mount(path
->mnt
);
2303 struct mount
*new_mnt
;
2305 down_read(&namespace_sem
);
2306 if (IS_MNT_UNBINDABLE(old_mnt
))
2309 if (!check_mnt(old_mnt
))
2312 if (has_locked_children(old_mnt
, path
->dentry
))
2315 new_mnt
= clone_mnt(old_mnt
, path
->dentry
, CL_PRIVATE
);
2316 up_read(&namespace_sem
);
2318 if (IS_ERR(new_mnt
))
2319 return ERR_CAST(new_mnt
);
2321 /* Longterm mount to be removed by kern_unmount*() */
2322 new_mnt
->mnt_ns
= MNT_NS_INTERNAL
;
2324 return &new_mnt
->mnt
;
2327 up_read(&namespace_sem
);
2328 return ERR_PTR(-EINVAL
);
2330 EXPORT_SYMBOL_GPL(clone_private_mount
);
2332 int iterate_mounts(int (*f
)(struct vfsmount
*, void *), void *arg
,
2333 struct vfsmount
*root
)
2336 int res
= f(root
, arg
);
2339 list_for_each_entry(mnt
, &real_mount(root
)->mnt_list
, mnt_list
) {
2340 res
= f(&mnt
->mnt
, arg
);
2347 static void lock_mnt_tree(struct mount
*mnt
)
2351 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
2352 int flags
= p
->mnt
.mnt_flags
;
2353 /* Don't allow unprivileged users to change mount flags */
2354 flags
|= MNT_LOCK_ATIME
;
2356 if (flags
& MNT_READONLY
)
2357 flags
|= MNT_LOCK_READONLY
;
2359 if (flags
& MNT_NODEV
)
2360 flags
|= MNT_LOCK_NODEV
;
2362 if (flags
& MNT_NOSUID
)
2363 flags
|= MNT_LOCK_NOSUID
;
2365 if (flags
& MNT_NOEXEC
)
2366 flags
|= MNT_LOCK_NOEXEC
;
2367 /* Don't allow unprivileged users to reveal what is under a mount */
2368 if (list_empty(&p
->mnt_expire
))
2369 flags
|= MNT_LOCKED
;
2370 p
->mnt
.mnt_flags
= flags
;
2374 static void cleanup_group_ids(struct mount
*mnt
, struct mount
*end
)
2378 for (p
= mnt
; p
!= end
; p
= next_mnt(p
, mnt
)) {
2379 if (p
->mnt_group_id
&& !IS_MNT_SHARED(p
))
2380 mnt_release_group_id(p
);
2384 static int invent_group_ids(struct mount
*mnt
, bool recurse
)
2388 for (p
= mnt
; p
; p
= recurse
? next_mnt(p
, mnt
) : NULL
) {
2389 if (!p
->mnt_group_id
&& !IS_MNT_SHARED(p
)) {
2390 int err
= mnt_alloc_group_id(p
);
2392 cleanup_group_ids(mnt
, p
);
2401 int count_mounts(struct mnt_namespace
*ns
, struct mount
*mnt
)
2403 unsigned int max
= READ_ONCE(sysctl_mount_max
);
2404 unsigned int mounts
= 0;
2407 if (ns
->nr_mounts
>= max
)
2409 max
-= ns
->nr_mounts
;
2410 if (ns
->pending_mounts
>= max
)
2412 max
-= ns
->pending_mounts
;
2414 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
))
2420 ns
->pending_mounts
+= mounts
;
2424 enum mnt_tree_flags_t
{
2425 MNT_TREE_MOVE
= BIT(0),
2426 MNT_TREE_BENEATH
= BIT(1),
2430 * attach_recursive_mnt - attach a source mount tree
2431 * @source_mnt: mount tree to be attached
2432 * @top_mnt: mount that @source_mnt will be mounted on or mounted beneath
2433 * @dest_mp: the mountpoint @source_mnt will be mounted at
2434 * @flags: modify how @source_mnt is supposed to be attached
2436 * NOTE: in the table below explains the semantics when a source mount
2437 * of a given type is attached to a destination mount of a given type.
2438 * ---------------------------------------------------------------------------
2439 * | BIND MOUNT OPERATION |
2440 * |**************************************************************************
2441 * | source-->| shared | private | slave | unbindable |
2445 * |**************************************************************************
2446 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
2448 * |non-shared| shared (+) | private | slave (*) | invalid |
2449 * ***************************************************************************
2450 * A bind operation clones the source mount and mounts the clone on the
2451 * destination mount.
2453 * (++) the cloned mount is propagated to all the mounts in the propagation
2454 * tree of the destination mount and the cloned mount is added to
2455 * the peer group of the source mount.
2456 * (+) the cloned mount is created under the destination mount and is marked
2457 * as shared. The cloned mount is added to the peer group of the source
2459 * (+++) the mount is propagated to all the mounts in the propagation tree
2460 * of the destination mount and the cloned mount is made slave
2461 * of the same master as that of the source mount. The cloned mount
2462 * is marked as 'shared and slave'.
2463 * (*) the cloned mount is made a slave of the same master as that of the
2466 * ---------------------------------------------------------------------------
2467 * | MOVE MOUNT OPERATION |
2468 * |**************************************************************************
2469 * | source-->| shared | private | slave | unbindable |
2473 * |**************************************************************************
2474 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
2476 * |non-shared| shared (+*) | private | slave (*) | unbindable |
2477 * ***************************************************************************
2479 * (+) the mount is moved to the destination. And is then propagated to
2480 * all the mounts in the propagation tree of the destination mount.
2481 * (+*) the mount is moved to the destination.
2482 * (+++) the mount is moved to the destination and is then propagated to
2483 * all the mounts belonging to the destination mount's propagation tree.
2484 * the mount is marked as 'shared and slave'.
2485 * (*) the mount continues to be a slave at the new location.
2487 * if the source mount is a tree, the operations explained above is
2488 * applied to each mount in the tree.
2489 * Must be called without spinlocks held, since this function can sleep
2492 * Context: The function expects namespace_lock() to be held.
2493 * Return: If @source_mnt was successfully attached 0 is returned.
2494 * Otherwise a negative error code is returned.
2496 static int attach_recursive_mnt(struct mount
*source_mnt
,
2497 struct mount
*top_mnt
,
2498 struct mountpoint
*dest_mp
,
2499 enum mnt_tree_flags_t flags
)
2501 struct user_namespace
*user_ns
= current
->nsproxy
->mnt_ns
->user_ns
;
2502 HLIST_HEAD(tree_list
);
2503 struct mnt_namespace
*ns
= top_mnt
->mnt_ns
;
2504 struct mountpoint
*smp
;
2505 struct mount
*child
, *dest_mnt
, *p
;
2506 struct hlist_node
*n
;
2508 bool moving
= flags
& MNT_TREE_MOVE
, beneath
= flags
& MNT_TREE_BENEATH
;
2511 * Preallocate a mountpoint in case the new mounts need to be
2512 * mounted beneath mounts on the same mountpoint.
2514 smp
= get_mountpoint(source_mnt
->mnt
.mnt_root
);
2516 return PTR_ERR(smp
);
2518 /* Is there space to add these mounts to the mount namespace? */
2520 err
= count_mounts(ns
, source_mnt
);
2526 dest_mnt
= top_mnt
->mnt_parent
;
2530 if (IS_MNT_SHARED(dest_mnt
)) {
2531 err
= invent_group_ids(source_mnt
, true);
2534 err
= propagate_mnt(dest_mnt
, dest_mp
, source_mnt
, &tree_list
);
2538 goto out_cleanup_ids
;
2540 if (IS_MNT_SHARED(dest_mnt
)) {
2541 for (p
= source_mnt
; p
; p
= next_mnt(p
, source_mnt
))
2548 unhash_mnt(source_mnt
);
2549 attach_mnt(source_mnt
, top_mnt
, dest_mp
, beneath
);
2550 touch_mnt_namespace(source_mnt
->mnt_ns
);
2552 if (source_mnt
->mnt_ns
) {
2555 /* move from anon - the caller will destroy */
2556 for (p
= source_mnt
; p
; p
= next_mnt(p
, source_mnt
))
2557 move_from_ns(p
, &head
);
2558 list_del_init(&head
);
2561 mnt_set_mountpoint_beneath(source_mnt
, top_mnt
, smp
);
2563 mnt_set_mountpoint(dest_mnt
, dest_mp
, source_mnt
);
2564 commit_tree(source_mnt
);
2567 hlist_for_each_entry_safe(child
, n
, &tree_list
, mnt_hash
) {
2569 hlist_del_init(&child
->mnt_hash
);
2570 q
= __lookup_mnt(&child
->mnt_parent
->mnt
,
2571 child
->mnt_mountpoint
);
2573 mnt_change_mountpoint(child
, smp
, q
);
2574 /* Notice when we are propagating across user namespaces */
2575 if (child
->mnt_parent
->mnt_ns
->user_ns
!= user_ns
)
2576 lock_mnt_tree(child
);
2577 child
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
2580 put_mountpoint(smp
);
2581 unlock_mount_hash();
2586 while (!hlist_empty(&tree_list
)) {
2587 child
= hlist_entry(tree_list
.first
, struct mount
, mnt_hash
);
2588 child
->mnt_parent
->mnt_ns
->pending_mounts
= 0;
2589 umount_tree(child
, UMOUNT_SYNC
);
2591 unlock_mount_hash();
2592 cleanup_group_ids(source_mnt
, NULL
);
2594 ns
->pending_mounts
= 0;
2596 read_seqlock_excl(&mount_lock
);
2597 put_mountpoint(smp
);
2598 read_sequnlock_excl(&mount_lock
);
2604 * do_lock_mount - lock mount and mountpoint
2605 * @path: target path
2606 * @beneath: whether the intention is to mount beneath @path
2608 * Follow the mount stack on @path until the top mount @mnt is found. If
2609 * the initial @path->{mnt,dentry} is a mountpoint lookup the first
2610 * mount stacked on top of it. Then simply follow @{mnt,mnt->mnt_root}
2611 * until nothing is stacked on top of it anymore.
2613 * Acquire the inode_lock() on the top mount's ->mnt_root to protect
2614 * against concurrent removal of the new mountpoint from another mount
2617 * If @beneath is requested, acquire inode_lock() on @mnt's mountpoint
2618 * @mp on @mnt->mnt_parent must be acquired. This protects against a
2619 * concurrent unlink of @mp->mnt_dentry from another mount namespace
2620 * where @mnt doesn't have a child mount mounted @mp. A concurrent
2621 * removal of @mnt->mnt_root doesn't matter as nothing will be mounted
2622 * on top of it for @beneath.
2624 * In addition, @beneath needs to make sure that @mnt hasn't been
2625 * unmounted or moved from its current mountpoint in between dropping
2626 * @mount_lock and acquiring @namespace_sem. For the !@beneath case @mnt
2627 * being unmounted would be detected later by e.g., calling
2628 * check_mnt(mnt) in the function it's called from. For the @beneath
2629 * case however, it's useful to detect it directly in do_lock_mount().
2630 * If @mnt hasn't been unmounted then @mnt->mnt_mountpoint still points
2631 * to @mnt->mnt_mp->m_dentry. But if @mnt has been unmounted it will
2632 * point to @mnt->mnt_root and @mnt->mnt_mp will be NULL.
2634 * Return: Either the target mountpoint on the top mount or the top
2635 * mount's mountpoint.
2637 static struct mountpoint
*do_lock_mount(struct path
*path
, bool beneath
)
2639 struct vfsmount
*mnt
= path
->mnt
;
2640 struct dentry
*dentry
;
2641 struct mountpoint
*mp
= ERR_PTR(-ENOENT
);
2647 m
= real_mount(mnt
);
2648 read_seqlock_excl(&mount_lock
);
2649 dentry
= dget(m
->mnt_mountpoint
);
2650 read_sequnlock_excl(&mount_lock
);
2652 dentry
= path
->dentry
;
2655 inode_lock(dentry
->d_inode
);
2656 if (unlikely(cant_mount(dentry
))) {
2657 inode_unlock(dentry
->d_inode
);
2663 if (beneath
&& (!is_mounted(mnt
) || m
->mnt_mountpoint
!= dentry
)) {
2665 inode_unlock(dentry
->d_inode
);
2669 mnt
= lookup_mnt(path
);
2674 inode_unlock(dentry
->d_inode
);
2679 path
->dentry
= dget(mnt
->mnt_root
);
2682 mp
= get_mountpoint(dentry
);
2685 inode_unlock(dentry
->d_inode
);
2695 static inline struct mountpoint
*lock_mount(struct path
*path
)
2697 return do_lock_mount(path
, false);
2700 static void unlock_mount(struct mountpoint
*where
)
2702 struct dentry
*dentry
= where
->m_dentry
;
2704 read_seqlock_excl(&mount_lock
);
2705 put_mountpoint(where
);
2706 read_sequnlock_excl(&mount_lock
);
2709 inode_unlock(dentry
->d_inode
);
2712 static int graft_tree(struct mount
*mnt
, struct mount
*p
, struct mountpoint
*mp
)
2714 if (mnt
->mnt
.mnt_sb
->s_flags
& SB_NOUSER
)
2717 if (d_is_dir(mp
->m_dentry
) !=
2718 d_is_dir(mnt
->mnt
.mnt_root
))
2721 return attach_recursive_mnt(mnt
, p
, mp
, 0);
2725 * Sanity check the flags to change_mnt_propagation.
2728 static int flags_to_propagation_type(int ms_flags
)
2730 int type
= ms_flags
& ~(MS_REC
| MS_SILENT
);
2732 /* Fail if any non-propagation flags are set */
2733 if (type
& ~(MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
2735 /* Only one propagation flag should be set */
2736 if (!is_power_of_2(type
))
2742 * recursively change the type of the mountpoint.
2744 static int do_change_type(struct path
*path
, int ms_flags
)
2747 struct mount
*mnt
= real_mount(path
->mnt
);
2748 int recurse
= ms_flags
& MS_REC
;
2752 if (!path_mounted(path
))
2755 type
= flags_to_propagation_type(ms_flags
);
2760 if (type
== MS_SHARED
) {
2761 err
= invent_group_ids(mnt
, recurse
);
2767 for (m
= mnt
; m
; m
= (recurse
? next_mnt(m
, mnt
) : NULL
))
2768 change_mnt_propagation(m
, type
);
2769 unlock_mount_hash();
2776 static struct mount
*__do_loopback(struct path
*old_path
, int recurse
)
2778 struct mount
*mnt
= ERR_PTR(-EINVAL
), *old
= real_mount(old_path
->mnt
);
2780 if (IS_MNT_UNBINDABLE(old
))
2783 if (!check_mnt(old
)) {
2784 const struct dentry_operations
*d_op
= old_path
->dentry
->d_op
;
2786 if (d_op
!= &ns_dentry_operations
&&
2787 d_op
!= &pidfs_dentry_operations
)
2791 if (!recurse
&& has_locked_children(old
, old_path
->dentry
))
2795 mnt
= copy_tree(old
, old_path
->dentry
, CL_COPY_MNT_NS_FILE
);
2797 mnt
= clone_mnt(old
, old_path
->dentry
, 0);
2800 mnt
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
2806 * do loopback mount.
2808 static int do_loopback(struct path
*path
, const char *old_name
,
2811 struct path old_path
;
2812 struct mount
*mnt
= NULL
, *parent
;
2813 struct mountpoint
*mp
;
2815 if (!old_name
|| !*old_name
)
2817 err
= kern_path(old_name
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &old_path
);
2822 if (mnt_ns_loop(old_path
.dentry
))
2825 mp
= lock_mount(path
);
2831 parent
= real_mount(path
->mnt
);
2832 if (!check_mnt(parent
))
2835 mnt
= __do_loopback(&old_path
, recurse
);
2841 err
= graft_tree(mnt
, parent
, mp
);
2844 umount_tree(mnt
, UMOUNT_SYNC
);
2845 unlock_mount_hash();
2850 path_put(&old_path
);
2854 static struct file
*open_detached_copy(struct path
*path
, bool recursive
)
2856 struct user_namespace
*user_ns
= current
->nsproxy
->mnt_ns
->user_ns
;
2857 struct mnt_namespace
*ns
= alloc_mnt_ns(user_ns
, true);
2858 struct mount
*mnt
, *p
;
2862 return ERR_CAST(ns
);
2865 mnt
= __do_loopback(path
, recursive
);
2869 return ERR_CAST(mnt
);
2873 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
2874 mnt_add_to_ns(ns
, p
);
2879 unlock_mount_hash();
2883 path
->mnt
= &mnt
->mnt
;
2884 file
= dentry_open(path
, O_PATH
, current_cred());
2886 dissolve_on_fput(path
->mnt
);
2888 file
->f_mode
|= FMODE_NEED_UNMOUNT
;
2892 SYSCALL_DEFINE3(open_tree
, int, dfd
, const char __user
*, filename
, unsigned, flags
)
2896 int lookup_flags
= LOOKUP_AUTOMOUNT
| LOOKUP_FOLLOW
;
2897 bool detached
= flags
& OPEN_TREE_CLONE
;
2901 BUILD_BUG_ON(OPEN_TREE_CLOEXEC
!= O_CLOEXEC
);
2903 if (flags
& ~(AT_EMPTY_PATH
| AT_NO_AUTOMOUNT
| AT_RECURSIVE
|
2904 AT_SYMLINK_NOFOLLOW
| OPEN_TREE_CLONE
|
2908 if ((flags
& (AT_RECURSIVE
| OPEN_TREE_CLONE
)) == AT_RECURSIVE
)
2911 if (flags
& AT_NO_AUTOMOUNT
)
2912 lookup_flags
&= ~LOOKUP_AUTOMOUNT
;
2913 if (flags
& AT_SYMLINK_NOFOLLOW
)
2914 lookup_flags
&= ~LOOKUP_FOLLOW
;
2915 if (flags
& AT_EMPTY_PATH
)
2916 lookup_flags
|= LOOKUP_EMPTY
;
2918 if (detached
&& !may_mount())
2921 fd
= get_unused_fd_flags(flags
& O_CLOEXEC
);
2925 error
= user_path_at(dfd
, filename
, lookup_flags
, &path
);
2926 if (unlikely(error
)) {
2927 file
= ERR_PTR(error
);
2930 file
= open_detached_copy(&path
, flags
& AT_RECURSIVE
);
2932 file
= dentry_open(&path
, O_PATH
, current_cred());
2937 return PTR_ERR(file
);
2939 fd_install(fd
, file
);
2944 * Don't allow locked mount flags to be cleared.
2946 * No locks need to be held here while testing the various MNT_LOCK
2947 * flags because those flags can never be cleared once they are set.
2949 static bool can_change_locked_flags(struct mount
*mnt
, unsigned int mnt_flags
)
2951 unsigned int fl
= mnt
->mnt
.mnt_flags
;
2953 if ((fl
& MNT_LOCK_READONLY
) &&
2954 !(mnt_flags
& MNT_READONLY
))
2957 if ((fl
& MNT_LOCK_NODEV
) &&
2958 !(mnt_flags
& MNT_NODEV
))
2961 if ((fl
& MNT_LOCK_NOSUID
) &&
2962 !(mnt_flags
& MNT_NOSUID
))
2965 if ((fl
& MNT_LOCK_NOEXEC
) &&
2966 !(mnt_flags
& MNT_NOEXEC
))
2969 if ((fl
& MNT_LOCK_ATIME
) &&
2970 ((fl
& MNT_ATIME_MASK
) != (mnt_flags
& MNT_ATIME_MASK
)))
2976 static int change_mount_ro_state(struct mount
*mnt
, unsigned int mnt_flags
)
2978 bool readonly_request
= (mnt_flags
& MNT_READONLY
);
2980 if (readonly_request
== __mnt_is_readonly(&mnt
->mnt
))
2983 if (readonly_request
)
2984 return mnt_make_readonly(mnt
);
2986 mnt
->mnt
.mnt_flags
&= ~MNT_READONLY
;
2990 static void set_mount_attributes(struct mount
*mnt
, unsigned int mnt_flags
)
2992 mnt_flags
|= mnt
->mnt
.mnt_flags
& ~MNT_USER_SETTABLE_MASK
;
2993 mnt
->mnt
.mnt_flags
= mnt_flags
;
2994 touch_mnt_namespace(mnt
->mnt_ns
);
2997 static void mnt_warn_timestamp_expiry(struct path
*mountpoint
, struct vfsmount
*mnt
)
2999 struct super_block
*sb
= mnt
->mnt_sb
;
3001 if (!__mnt_is_readonly(mnt
) &&
3002 (!(sb
->s_iflags
& SB_I_TS_EXPIRY_WARNED
)) &&
3003 (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX
> sb
->s_time_max
)) {
3004 char *buf
, *mntpath
;
3006 buf
= (char *)__get_free_page(GFP_KERNEL
);
3008 mntpath
= d_path(mountpoint
, buf
, PAGE_SIZE
);
3010 mntpath
= ERR_PTR(-ENOMEM
);
3011 if (IS_ERR(mntpath
))
3012 mntpath
= "(unknown)";
3014 pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n",
3016 is_mounted(mnt
) ? "remounted" : "mounted",
3017 mntpath
, &sb
->s_time_max
,
3018 (unsigned long long)sb
->s_time_max
);
3020 sb
->s_iflags
|= SB_I_TS_EXPIRY_WARNED
;
3022 free_page((unsigned long)buf
);
3027 * Handle reconfiguration of the mountpoint only without alteration of the
3028 * superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND
3031 static int do_reconfigure_mnt(struct path
*path
, unsigned int mnt_flags
)
3033 struct super_block
*sb
= path
->mnt
->mnt_sb
;
3034 struct mount
*mnt
= real_mount(path
->mnt
);
3037 if (!check_mnt(mnt
))
3040 if (!path_mounted(path
))
3043 if (!can_change_locked_flags(mnt
, mnt_flags
))
3047 * We're only checking whether the superblock is read-only not
3048 * changing it, so only take down_read(&sb->s_umount).
3050 down_read(&sb
->s_umount
);
3052 ret
= change_mount_ro_state(mnt
, mnt_flags
);
3054 set_mount_attributes(mnt
, mnt_flags
);
3055 unlock_mount_hash();
3056 up_read(&sb
->s_umount
);
3058 mnt_warn_timestamp_expiry(path
, &mnt
->mnt
);
3064 * change filesystem flags. dir should be a physical root of filesystem.
3065 * If you've mounted a non-root directory somewhere and want to do remount
3066 * on it - tough luck.
3068 static int do_remount(struct path
*path
, int ms_flags
, int sb_flags
,
3069 int mnt_flags
, void *data
)
3072 struct super_block
*sb
= path
->mnt
->mnt_sb
;
3073 struct mount
*mnt
= real_mount(path
->mnt
);
3074 struct fs_context
*fc
;
3076 if (!check_mnt(mnt
))
3079 if (!path_mounted(path
))
3082 if (!can_change_locked_flags(mnt
, mnt_flags
))
3085 fc
= fs_context_for_reconfigure(path
->dentry
, sb_flags
, MS_RMT_MASK
);
3090 * Indicate to the filesystem that the remount request is coming
3091 * from the legacy mount system call.
3095 err
= parse_monolithic_mount_data(fc
, data
);
3097 down_write(&sb
->s_umount
);
3099 if (ns_capable(sb
->s_user_ns
, CAP_SYS_ADMIN
)) {
3100 err
= reconfigure_super(fc
);
3103 set_mount_attributes(mnt
, mnt_flags
);
3104 unlock_mount_hash();
3107 up_write(&sb
->s_umount
);
3110 mnt_warn_timestamp_expiry(path
, &mnt
->mnt
);
3116 static inline int tree_contains_unbindable(struct mount
*mnt
)
3119 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
3120 if (IS_MNT_UNBINDABLE(p
))
3127 * Check that there aren't references to earlier/same mount namespaces in the
3128 * specified subtree. Such references can act as pins for mount namespaces
3129 * that aren't checked by the mount-cycle checking code, thereby allowing
3130 * cycles to be made.
3132 static bool check_for_nsfs_mounts(struct mount
*subtree
)
3138 for (p
= subtree
; p
; p
= next_mnt(p
, subtree
))
3139 if (mnt_ns_loop(p
->mnt
.mnt_root
))
3144 unlock_mount_hash();
3148 static int do_set_group(struct path
*from_path
, struct path
*to_path
)
3150 struct mount
*from
, *to
;
3153 from
= real_mount(from_path
->mnt
);
3154 to
= real_mount(to_path
->mnt
);
3159 /* To and From must be mounted */
3160 if (!is_mounted(&from
->mnt
))
3162 if (!is_mounted(&to
->mnt
))
3166 /* We should be allowed to modify mount namespaces of both mounts */
3167 if (!ns_capable(from
->mnt_ns
->user_ns
, CAP_SYS_ADMIN
))
3169 if (!ns_capable(to
->mnt_ns
->user_ns
, CAP_SYS_ADMIN
))
3173 /* To and From paths should be mount roots */
3174 if (!path_mounted(from_path
))
3176 if (!path_mounted(to_path
))
3179 /* Setting sharing groups is only allowed across same superblock */
3180 if (from
->mnt
.mnt_sb
!= to
->mnt
.mnt_sb
)
3183 /* From mount root should be wider than To mount root */
3184 if (!is_subdir(to
->mnt
.mnt_root
, from
->mnt
.mnt_root
))
3187 /* From mount should not have locked children in place of To's root */
3188 if (has_locked_children(from
, to
->mnt
.mnt_root
))
3191 /* Setting sharing groups is only allowed on private mounts */
3192 if (IS_MNT_SHARED(to
) || IS_MNT_SLAVE(to
))
3195 /* From should not be private */
3196 if (!IS_MNT_SHARED(from
) && !IS_MNT_SLAVE(from
))
3199 if (IS_MNT_SLAVE(from
)) {
3200 struct mount
*m
= from
->mnt_master
;
3202 list_add(&to
->mnt_slave
, &m
->mnt_slave_list
);
3206 if (IS_MNT_SHARED(from
)) {
3207 to
->mnt_group_id
= from
->mnt_group_id
;
3208 list_add(&to
->mnt_share
, &from
->mnt_share
);
3211 unlock_mount_hash();
3221 * path_overmounted - check if path is overmounted
3222 * @path: path to check
3224 * Check if path is overmounted, i.e., if there's a mount on top of
3225 * @path->mnt with @path->dentry as mountpoint.
3227 * Context: This function expects namespace_lock() to be held.
3228 * Return: If path is overmounted true is returned, false if not.
3230 static inline bool path_overmounted(const struct path
*path
)
3233 if (unlikely(__lookup_mnt(path
->mnt
, path
->dentry
))) {
3242 * can_move_mount_beneath - check that we can mount beneath the top mount
3243 * @from: mount to mount beneath
3244 * @to: mount under which to mount
3245 * @mp: mountpoint of @to
3247 * - Make sure that @to->dentry is actually the root of a mount under
3248 * which we can mount another mount.
3249 * - Make sure that nothing can be mounted beneath the caller's current
3250 * root or the rootfs of the namespace.
3251 * - Make sure that the caller can unmount the topmost mount ensuring
3252 * that the caller could reveal the underlying mountpoint.
3253 * - Ensure that nothing has been mounted on top of @from before we
3254 * grabbed @namespace_sem to avoid creating pointless shadow mounts.
3255 * - Prevent mounting beneath a mount if the propagation relationship
3256 * between the source mount, parent mount, and top mount would lead to
3257 * nonsensical mount trees.
3259 * Context: This function expects namespace_lock() to be held.
3260 * Return: On success 0, and on error a negative error code is returned.
3262 static int can_move_mount_beneath(const struct path
*from
,
3263 const struct path
*to
,
3264 const struct mountpoint
*mp
)
3266 struct mount
*mnt_from
= real_mount(from
->mnt
),
3267 *mnt_to
= real_mount(to
->mnt
),
3268 *parent_mnt_to
= mnt_to
->mnt_parent
;
3270 if (!mnt_has_parent(mnt_to
))
3273 if (!path_mounted(to
))
3276 if (IS_MNT_LOCKED(mnt_to
))
3279 /* Avoid creating shadow mounts during mount propagation. */
3280 if (path_overmounted(from
))
3284 * Mounting beneath the rootfs only makes sense when the
3285 * semantics of pivot_root(".", ".") are used.
3287 if (&mnt_to
->mnt
== current
->fs
->root
.mnt
)
3289 if (parent_mnt_to
== current
->nsproxy
->mnt_ns
->root
)
3292 for (struct mount
*p
= mnt_from
; mnt_has_parent(p
); p
= p
->mnt_parent
)
3297 * If the parent mount propagates to the child mount this would
3298 * mean mounting @mnt_from on @mnt_to->mnt_parent and then
3299 * propagating a copy @c of @mnt_from on top of @mnt_to. This
3300 * defeats the whole purpose of mounting beneath another mount.
3302 if (propagation_would_overmount(parent_mnt_to
, mnt_to
, mp
))
3306 * If @mnt_to->mnt_parent propagates to @mnt_from this would
3307 * mean propagating a copy @c of @mnt_from on top of @mnt_from.
3308 * Afterwards @mnt_from would be mounted on top of
3309 * @mnt_to->mnt_parent and @mnt_to would be unmounted from
3310 * @mnt->mnt_parent and remounted on @mnt_from. But since @c is
3311 * already mounted on @mnt_from, @mnt_to would ultimately be
3312 * remounted on top of @c. Afterwards, @mnt_from would be
3313 * covered by a copy @c of @mnt_from and @c would be covered by
3314 * @mnt_from itself. This defeats the whole purpose of mounting
3315 * @mnt_from beneath @mnt_to.
3317 if (propagation_would_overmount(parent_mnt_to
, mnt_from
, mp
))
3323 static int do_move_mount(struct path
*old_path
, struct path
*new_path
,
3326 struct mnt_namespace
*ns
;
3329 struct mount
*parent
;
3330 struct mountpoint
*mp
, *old_mp
;
3333 enum mnt_tree_flags_t flags
= 0;
3335 mp
= do_lock_mount(new_path
, beneath
);
3339 old
= real_mount(old_path
->mnt
);
3340 p
= real_mount(new_path
->mnt
);
3341 parent
= old
->mnt_parent
;
3342 attached
= mnt_has_parent(old
);
3344 flags
|= MNT_TREE_MOVE
;
3345 old_mp
= old
->mnt_mp
;
3349 /* The mountpoint must be in our namespace. */
3353 /* The thing moved must be mounted... */
3354 if (!is_mounted(&old
->mnt
))
3357 /* ... and either ours or the root of anon namespace */
3358 if (!(attached
? check_mnt(old
) : is_anon_ns(ns
)))
3361 if (old
->mnt
.mnt_flags
& MNT_LOCKED
)
3364 if (!path_mounted(old_path
))
3367 if (d_is_dir(new_path
->dentry
) !=
3368 d_is_dir(old_path
->dentry
))
3371 * Don't move a mount residing in a shared parent.
3373 if (attached
&& IS_MNT_SHARED(parent
))
3377 err
= can_move_mount_beneath(old_path
, new_path
, mp
);
3383 flags
|= MNT_TREE_BENEATH
;
3387 * Don't move a mount tree containing unbindable mounts to a destination
3388 * mount which is shared.
3390 if (IS_MNT_SHARED(p
) && tree_contains_unbindable(old
))
3393 if (!check_for_nsfs_mounts(old
))
3395 for (; mnt_has_parent(p
); p
= p
->mnt_parent
)
3399 err
= attach_recursive_mnt(old
, real_mount(new_path
->mnt
), mp
, flags
);
3403 /* if the mount is moved, it should no longer be expire
3405 list_del_init(&old
->mnt_expire
);
3407 put_mountpoint(old_mp
);
3412 mntput_no_expire(parent
);
3419 static int do_move_mount_old(struct path
*path
, const char *old_name
)
3421 struct path old_path
;
3424 if (!old_name
|| !*old_name
)
3427 err
= kern_path(old_name
, LOOKUP_FOLLOW
, &old_path
);
3431 err
= do_move_mount(&old_path
, path
, false);
3432 path_put(&old_path
);
3437 * add a mount into a namespace's mount tree
3439 static int do_add_mount(struct mount
*newmnt
, struct mountpoint
*mp
,
3440 const struct path
*path
, int mnt_flags
)
3442 struct mount
*parent
= real_mount(path
->mnt
);
3444 mnt_flags
&= ~MNT_INTERNAL_FLAGS
;
3446 if (unlikely(!check_mnt(parent
))) {
3447 /* that's acceptable only for automounts done in private ns */
3448 if (!(mnt_flags
& MNT_SHRINKABLE
))
3450 /* ... and for those we'd better have mountpoint still alive */
3451 if (!parent
->mnt_ns
)
3455 /* Refuse the same filesystem on the same mount point */
3456 if (path
->mnt
->mnt_sb
== newmnt
->mnt
.mnt_sb
&& path_mounted(path
))
3459 if (d_is_symlink(newmnt
->mnt
.mnt_root
))
3462 newmnt
->mnt
.mnt_flags
= mnt_flags
;
3463 return graft_tree(newmnt
, parent
, mp
);
3466 static bool mount_too_revealing(const struct super_block
*sb
, int *new_mnt_flags
);
3469 * Create a new mount using a superblock configuration and request it
3470 * be added to the namespace tree.
3472 static int do_new_mount_fc(struct fs_context
*fc
, struct path
*mountpoint
,
3473 unsigned int mnt_flags
)
3475 struct vfsmount
*mnt
;
3476 struct mountpoint
*mp
;
3477 struct super_block
*sb
= fc
->root
->d_sb
;
3480 error
= security_sb_kern_mount(sb
);
3481 if (!error
&& mount_too_revealing(sb
, &mnt_flags
))
3484 if (unlikely(error
)) {
3489 up_write(&sb
->s_umount
);
3491 mnt
= vfs_create_mount(fc
);
3493 return PTR_ERR(mnt
);
3495 mnt_warn_timestamp_expiry(mountpoint
, mnt
);
3497 mp
= lock_mount(mountpoint
);
3502 error
= do_add_mount(real_mount(mnt
), mp
, mountpoint
, mnt_flags
);
3510 * create a new mount for userspace and request it to be added into the
3513 static int do_new_mount(struct path
*path
, const char *fstype
, int sb_flags
,
3514 int mnt_flags
, const char *name
, void *data
)
3516 struct file_system_type
*type
;
3517 struct fs_context
*fc
;
3518 const char *subtype
= NULL
;
3524 type
= get_fs_type(fstype
);
3528 if (type
->fs_flags
& FS_HAS_SUBTYPE
) {
3529 subtype
= strchr(fstype
, '.');
3533 put_filesystem(type
);
3539 fc
= fs_context_for_mount(type
, sb_flags
);
3540 put_filesystem(type
);
3545 * Indicate to the filesystem that the mount request is coming
3546 * from the legacy mount system call.
3551 err
= vfs_parse_fs_string(fc
, "subtype",
3552 subtype
, strlen(subtype
));
3554 err
= vfs_parse_fs_string(fc
, "source", name
, strlen(name
));
3556 err
= parse_monolithic_mount_data(fc
, data
);
3557 if (!err
&& !mount_capable(fc
))
3560 err
= vfs_get_tree(fc
);
3562 err
= do_new_mount_fc(fc
, path
, mnt_flags
);
3568 int finish_automount(struct vfsmount
*m
, const struct path
*path
)
3570 struct dentry
*dentry
= path
->dentry
;
3571 struct mountpoint
*mp
;
3580 mnt
= real_mount(m
);
3581 /* The new mount record should have at least 2 refs to prevent it being
3582 * expired before we get a chance to add it
3584 BUG_ON(mnt_get_count(mnt
) < 2);
3586 if (m
->mnt_sb
== path
->mnt
->mnt_sb
&&
3587 m
->mnt_root
== dentry
) {
3593 * we don't want to use lock_mount() - in this case finding something
3594 * that overmounts our mountpoint to be means "quitely drop what we've
3595 * got", not "try to mount it on top".
3597 inode_lock(dentry
->d_inode
);
3599 if (unlikely(cant_mount(dentry
))) {
3601 goto discard_locked
;
3603 if (path_overmounted(path
)) {
3605 goto discard_locked
;
3607 mp
= get_mountpoint(dentry
);
3610 goto discard_locked
;
3613 err
= do_add_mount(mnt
, mp
, path
, path
->mnt
->mnt_flags
| MNT_SHRINKABLE
);
3622 inode_unlock(dentry
->d_inode
);
3624 /* remove m from any expiration list it may be on */
3625 if (!list_empty(&mnt
->mnt_expire
)) {
3627 list_del_init(&mnt
->mnt_expire
);
3636 * mnt_set_expiry - Put a mount on an expiration list
3637 * @mnt: The mount to list.
3638 * @expiry_list: The list to add the mount to.
3640 void mnt_set_expiry(struct vfsmount
*mnt
, struct list_head
*expiry_list
)
3644 list_add_tail(&real_mount(mnt
)->mnt_expire
, expiry_list
);
3648 EXPORT_SYMBOL(mnt_set_expiry
);
3651 * process a list of expirable mountpoints with the intent of discarding any
3652 * mountpoints that aren't in use and haven't been touched since last we came
3655 void mark_mounts_for_expiry(struct list_head
*mounts
)
3657 struct mount
*mnt
, *next
;
3658 LIST_HEAD(graveyard
);
3660 if (list_empty(mounts
))
3666 /* extract from the expiration list every vfsmount that matches the
3667 * following criteria:
3668 * - only referenced by its parent vfsmount
3669 * - still marked for expiry (marked on the last call here; marks are
3670 * cleared by mntput())
3672 list_for_each_entry_safe(mnt
, next
, mounts
, mnt_expire
) {
3673 if (!xchg(&mnt
->mnt_expiry_mark
, 1) ||
3674 propagate_mount_busy(mnt
, 1))
3676 list_move(&mnt
->mnt_expire
, &graveyard
);
3678 while (!list_empty(&graveyard
)) {
3679 mnt
= list_first_entry(&graveyard
, struct mount
, mnt_expire
);
3680 touch_mnt_namespace(mnt
->mnt_ns
);
3681 umount_tree(mnt
, UMOUNT_PROPAGATE
|UMOUNT_SYNC
);
3683 unlock_mount_hash();
3687 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry
);
3690 * Ripoff of 'select_parent()'
3692 * search the list of submounts for a given mountpoint, and move any
3693 * shrinkable submounts to the 'graveyard' list.
3695 static int select_submounts(struct mount
*parent
, struct list_head
*graveyard
)
3697 struct mount
*this_parent
= parent
;
3698 struct list_head
*next
;
3702 next
= this_parent
->mnt_mounts
.next
;
3704 while (next
!= &this_parent
->mnt_mounts
) {
3705 struct list_head
*tmp
= next
;
3706 struct mount
*mnt
= list_entry(tmp
, struct mount
, mnt_child
);
3709 if (!(mnt
->mnt
.mnt_flags
& MNT_SHRINKABLE
))
3712 * Descend a level if the d_mounts list is non-empty.
3714 if (!list_empty(&mnt
->mnt_mounts
)) {
3719 if (!propagate_mount_busy(mnt
, 1)) {
3720 list_move_tail(&mnt
->mnt_expire
, graveyard
);
3725 * All done at this level ... ascend and resume the search
3727 if (this_parent
!= parent
) {
3728 next
= this_parent
->mnt_child
.next
;
3729 this_parent
= this_parent
->mnt_parent
;
3736 * process a list of expirable mountpoints with the intent of discarding any
3737 * submounts of a specific parent mountpoint
3739 * mount_lock must be held for write
3741 static void shrink_submounts(struct mount
*mnt
)
3743 LIST_HEAD(graveyard
);
3746 /* extract submounts of 'mountpoint' from the expiration list */
3747 while (select_submounts(mnt
, &graveyard
)) {
3748 while (!list_empty(&graveyard
)) {
3749 m
= list_first_entry(&graveyard
, struct mount
,
3751 touch_mnt_namespace(m
->mnt_ns
);
3752 umount_tree(m
, UMOUNT_PROPAGATE
|UMOUNT_SYNC
);
3757 static void *copy_mount_options(const void __user
* data
)
3760 unsigned left
, offset
;
3765 copy
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
3767 return ERR_PTR(-ENOMEM
);
3769 left
= copy_from_user(copy
, data
, PAGE_SIZE
);
3772 * Not all architectures have an exact copy_from_user(). Resort to
3775 offset
= PAGE_SIZE
- left
;
3778 if (get_user(c
, (const char __user
*)data
+ offset
))
3785 if (left
== PAGE_SIZE
) {
3787 return ERR_PTR(-EFAULT
);
3793 static char *copy_mount_string(const void __user
*data
)
3795 return data
? strndup_user(data
, PATH_MAX
) : NULL
;
3799 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
3800 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
3802 * data is a (void *) that can point to any structure up to
3803 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
3804 * information (or be NULL).
3806 * Pre-0.97 versions of mount() didn't have a flags word.
3807 * When the flags word was introduced its top half was required
3808 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
3809 * Therefore, if this magic number is present, it carries no information
3810 * and must be discarded.
3812 int path_mount(const char *dev_name
, struct path
*path
,
3813 const char *type_page
, unsigned long flags
, void *data_page
)
3815 unsigned int mnt_flags
= 0, sb_flags
;
3819 if ((flags
& MS_MGC_MSK
) == MS_MGC_VAL
)
3820 flags
&= ~MS_MGC_MSK
;
3822 /* Basic sanity checks */
3824 ((char *)data_page
)[PAGE_SIZE
- 1] = 0;
3826 if (flags
& MS_NOUSER
)
3829 ret
= security_sb_mount(dev_name
, path
, type_page
, flags
, data_page
);
3834 if (flags
& SB_MANDLOCK
)
3837 /* Default to relatime unless overriden */
3838 if (!(flags
& MS_NOATIME
))
3839 mnt_flags
|= MNT_RELATIME
;
3841 /* Separate the per-mountpoint flags */
3842 if (flags
& MS_NOSUID
)
3843 mnt_flags
|= MNT_NOSUID
;
3844 if (flags
& MS_NODEV
)
3845 mnt_flags
|= MNT_NODEV
;
3846 if (flags
& MS_NOEXEC
)
3847 mnt_flags
|= MNT_NOEXEC
;
3848 if (flags
& MS_NOATIME
)
3849 mnt_flags
|= MNT_NOATIME
;
3850 if (flags
& MS_NODIRATIME
)
3851 mnt_flags
|= MNT_NODIRATIME
;
3852 if (flags
& MS_STRICTATIME
)
3853 mnt_flags
&= ~(MNT_RELATIME
| MNT_NOATIME
);
3854 if (flags
& MS_RDONLY
)
3855 mnt_flags
|= MNT_READONLY
;
3856 if (flags
& MS_NOSYMFOLLOW
)
3857 mnt_flags
|= MNT_NOSYMFOLLOW
;
3859 /* The default atime for remount is preservation */
3860 if ((flags
& MS_REMOUNT
) &&
3861 ((flags
& (MS_NOATIME
| MS_NODIRATIME
| MS_RELATIME
|
3862 MS_STRICTATIME
)) == 0)) {
3863 mnt_flags
&= ~MNT_ATIME_MASK
;
3864 mnt_flags
|= path
->mnt
->mnt_flags
& MNT_ATIME_MASK
;
3867 sb_flags
= flags
& (SB_RDONLY
|
3876 if ((flags
& (MS_REMOUNT
| MS_BIND
)) == (MS_REMOUNT
| MS_BIND
))
3877 return do_reconfigure_mnt(path
, mnt_flags
);
3878 if (flags
& MS_REMOUNT
)
3879 return do_remount(path
, flags
, sb_flags
, mnt_flags
, data_page
);
3880 if (flags
& MS_BIND
)
3881 return do_loopback(path
, dev_name
, flags
& MS_REC
);
3882 if (flags
& (MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
3883 return do_change_type(path
, flags
);
3884 if (flags
& MS_MOVE
)
3885 return do_move_mount_old(path
, dev_name
);
3887 return do_new_mount(path
, type_page
, sb_flags
, mnt_flags
, dev_name
,
3891 int do_mount(const char *dev_name
, const char __user
*dir_name
,
3892 const char *type_page
, unsigned long flags
, void *data_page
)
3897 ret
= user_path_at(AT_FDCWD
, dir_name
, LOOKUP_FOLLOW
, &path
);
3900 ret
= path_mount(dev_name
, &path
, type_page
, flags
, data_page
);
3905 static struct ucounts
*inc_mnt_namespaces(struct user_namespace
*ns
)
3907 return inc_ucount(ns
, current_euid(), UCOUNT_MNT_NAMESPACES
);
3910 static void dec_mnt_namespaces(struct ucounts
*ucounts
)
3912 dec_ucount(ucounts
, UCOUNT_MNT_NAMESPACES
);
3915 static void free_mnt_ns(struct mnt_namespace
*ns
)
3917 if (!is_anon_ns(ns
))
3918 ns_free_inum(&ns
->ns
);
3919 dec_mnt_namespaces(ns
->ucounts
);
3920 mnt_ns_tree_remove(ns
);
3924 * Assign a sequence number so we can detect when we attempt to bind
3925 * mount a reference to an older mount namespace into the current
3926 * mount namespace, preventing reference counting loops. A 64bit
3927 * number incrementing at 10Ghz will take 12,427 years to wrap which
3928 * is effectively never, so we can ignore the possibility.
3930 static atomic64_t mnt_ns_seq
= ATOMIC64_INIT(1);
3932 static struct mnt_namespace
*alloc_mnt_ns(struct user_namespace
*user_ns
, bool anon
)
3934 struct mnt_namespace
*new_ns
;
3935 struct ucounts
*ucounts
;
3938 ucounts
= inc_mnt_namespaces(user_ns
);
3940 return ERR_PTR(-ENOSPC
);
3942 new_ns
= kzalloc(sizeof(struct mnt_namespace
), GFP_KERNEL_ACCOUNT
);
3944 dec_mnt_namespaces(ucounts
);
3945 return ERR_PTR(-ENOMEM
);
3948 ret
= ns_alloc_inum(&new_ns
->ns
);
3951 dec_mnt_namespaces(ucounts
);
3952 return ERR_PTR(ret
);
3955 new_ns
->ns
.ops
= &mntns_operations
;
3957 new_ns
->seq
= atomic64_inc_return(&mnt_ns_seq
);
3958 refcount_set(&new_ns
->ns
.count
, 1);
3959 refcount_set(&new_ns
->passive
, 1);
3960 new_ns
->mounts
= RB_ROOT
;
3961 INIT_LIST_HEAD(&new_ns
->mnt_ns_list
);
3962 RB_CLEAR_NODE(&new_ns
->mnt_ns_tree_node
);
3963 init_waitqueue_head(&new_ns
->poll
);
3964 new_ns
->user_ns
= get_user_ns(user_ns
);
3965 new_ns
->ucounts
= ucounts
;
3970 struct mnt_namespace
*copy_mnt_ns(unsigned long flags
, struct mnt_namespace
*ns
,
3971 struct user_namespace
*user_ns
, struct fs_struct
*new_fs
)
3973 struct mnt_namespace
*new_ns
;
3974 struct vfsmount
*rootmnt
= NULL
, *pwdmnt
= NULL
;
3975 struct mount
*p
, *q
;
3982 if (likely(!(flags
& CLONE_NEWNS
))) {
3989 new_ns
= alloc_mnt_ns(user_ns
, false);
3994 /* First pass: copy the tree topology */
3995 copy_flags
= CL_COPY_UNBINDABLE
| CL_EXPIRE
;
3996 if (user_ns
!= ns
->user_ns
)
3997 copy_flags
|= CL_SHARED_TO_SLAVE
;
3998 new = copy_tree(old
, old
->mnt
.mnt_root
, copy_flags
);
4001 ns_free_inum(&new_ns
->ns
);
4002 dec_mnt_namespaces(new_ns
->ucounts
);
4003 mnt_ns_release(new_ns
);
4004 return ERR_CAST(new);
4006 if (user_ns
!= ns
->user_ns
) {
4009 unlock_mount_hash();
4014 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
4015 * as belonging to new namespace. We have already acquired a private
4016 * fs_struct, so tsk->fs->lock is not needed.
4021 mnt_add_to_ns(new_ns
, q
);
4022 new_ns
->nr_mounts
++;
4024 if (&p
->mnt
== new_fs
->root
.mnt
) {
4025 new_fs
->root
.mnt
= mntget(&q
->mnt
);
4028 if (&p
->mnt
== new_fs
->pwd
.mnt
) {
4029 new_fs
->pwd
.mnt
= mntget(&q
->mnt
);
4033 p
= next_mnt(p
, old
);
4034 q
= next_mnt(q
, new);
4037 // an mntns binding we'd skipped?
4038 while (p
->mnt
.mnt_root
!= q
->mnt
.mnt_root
)
4039 p
= next_mnt(skip_mnt_tree(p
), old
);
4048 mnt_ns_tree_add(new_ns
);
4052 struct dentry
*mount_subtree(struct vfsmount
*m
, const char *name
)
4054 struct mount
*mnt
= real_mount(m
);
4055 struct mnt_namespace
*ns
;
4056 struct super_block
*s
;
4060 ns
= alloc_mnt_ns(&init_user_ns
, true);
4063 return ERR_CAST(ns
);
4067 mnt_add_to_ns(ns
, mnt
);
4069 err
= vfs_path_lookup(m
->mnt_root
, m
,
4070 name
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &path
);
4075 return ERR_PTR(err
);
4077 /* trade a vfsmount reference for active sb one */
4078 s
= path
.mnt
->mnt_sb
;
4079 atomic_inc(&s
->s_active
);
4081 /* lock the sucker */
4082 down_write(&s
->s_umount
);
4083 /* ... and return the root of (sub)tree on it */
4086 EXPORT_SYMBOL(mount_subtree
);
4088 SYSCALL_DEFINE5(mount
, char __user
*, dev_name
, char __user
*, dir_name
,
4089 char __user
*, type
, unsigned long, flags
, void __user
*, data
)
4096 kernel_type
= copy_mount_string(type
);
4097 ret
= PTR_ERR(kernel_type
);
4098 if (IS_ERR(kernel_type
))
4101 kernel_dev
= copy_mount_string(dev_name
);
4102 ret
= PTR_ERR(kernel_dev
);
4103 if (IS_ERR(kernel_dev
))
4106 options
= copy_mount_options(data
);
4107 ret
= PTR_ERR(options
);
4108 if (IS_ERR(options
))
4111 ret
= do_mount(kernel_dev
, dir_name
, kernel_type
, flags
, options
);
4122 #define FSMOUNT_VALID_FLAGS \
4123 (MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \
4124 MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME | \
4125 MOUNT_ATTR_NOSYMFOLLOW)
4127 #define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP)
4129 #define MOUNT_SETATTR_PROPAGATION_FLAGS \
4130 (MS_UNBINDABLE | MS_PRIVATE | MS_SLAVE | MS_SHARED)
4132 static unsigned int attr_flags_to_mnt_flags(u64 attr_flags
)
4134 unsigned int mnt_flags
= 0;
4136 if (attr_flags
& MOUNT_ATTR_RDONLY
)
4137 mnt_flags
|= MNT_READONLY
;
4138 if (attr_flags
& MOUNT_ATTR_NOSUID
)
4139 mnt_flags
|= MNT_NOSUID
;
4140 if (attr_flags
& MOUNT_ATTR_NODEV
)
4141 mnt_flags
|= MNT_NODEV
;
4142 if (attr_flags
& MOUNT_ATTR_NOEXEC
)
4143 mnt_flags
|= MNT_NOEXEC
;
4144 if (attr_flags
& MOUNT_ATTR_NODIRATIME
)
4145 mnt_flags
|= MNT_NODIRATIME
;
4146 if (attr_flags
& MOUNT_ATTR_NOSYMFOLLOW
)
4147 mnt_flags
|= MNT_NOSYMFOLLOW
;
4153 * Create a kernel mount representation for a new, prepared superblock
4154 * (specified by fs_fd) and attach to an open_tree-like file descriptor.
4156 SYSCALL_DEFINE3(fsmount
, int, fs_fd
, unsigned int, flags
,
4157 unsigned int, attr_flags
)
4159 struct mnt_namespace
*ns
;
4160 struct fs_context
*fc
;
4162 struct path newmount
;
4164 unsigned int mnt_flags
= 0;
4170 if ((flags
& ~(FSMOUNT_CLOEXEC
)) != 0)
4173 if (attr_flags
& ~FSMOUNT_VALID_FLAGS
)
4176 mnt_flags
= attr_flags_to_mnt_flags(attr_flags
);
4178 switch (attr_flags
& MOUNT_ATTR__ATIME
) {
4179 case MOUNT_ATTR_STRICTATIME
:
4181 case MOUNT_ATTR_NOATIME
:
4182 mnt_flags
|= MNT_NOATIME
;
4184 case MOUNT_ATTR_RELATIME
:
4185 mnt_flags
|= MNT_RELATIME
;
4191 CLASS(fd
, f
)(fs_fd
);
4195 if (fd_file(f
)->f_op
!= &fscontext_fops
)
4198 fc
= fd_file(f
)->private_data
;
4200 ret
= mutex_lock_interruptible(&fc
->uapi_mutex
);
4204 /* There must be a valid superblock or we can't mount it */
4210 if (mount_too_revealing(fc
->root
->d_sb
, &mnt_flags
)) {
4211 pr_warn("VFS: Mount too revealing\n");
4216 if (fc
->phase
!= FS_CONTEXT_AWAITING_MOUNT
)
4219 if (fc
->sb_flags
& SB_MANDLOCK
)
4222 newmount
.mnt
= vfs_create_mount(fc
);
4223 if (IS_ERR(newmount
.mnt
)) {
4224 ret
= PTR_ERR(newmount
.mnt
);
4227 newmount
.dentry
= dget(fc
->root
);
4228 newmount
.mnt
->mnt_flags
= mnt_flags
;
4230 /* We've done the mount bit - now move the file context into more or
4231 * less the same state as if we'd done an fspick(). We don't want to
4232 * do any memory allocation or anything like that at this point as we
4233 * don't want to have to handle any errors incurred.
4235 vfs_clean_context(fc
);
4237 ns
= alloc_mnt_ns(current
->nsproxy
->mnt_ns
->user_ns
, true);
4242 mnt
= real_mount(newmount
.mnt
);
4245 mnt_add_to_ns(ns
, mnt
);
4246 mntget(newmount
.mnt
);
4248 /* Attach to an apparent O_PATH fd with a note that we need to unmount
4249 * it, not just simply put it.
4251 file
= dentry_open(&newmount
, O_PATH
, fc
->cred
);
4253 dissolve_on_fput(newmount
.mnt
);
4254 ret
= PTR_ERR(file
);
4257 file
->f_mode
|= FMODE_NEED_UNMOUNT
;
4259 ret
= get_unused_fd_flags((flags
& FSMOUNT_CLOEXEC
) ? O_CLOEXEC
: 0);
4261 fd_install(ret
, file
);
4266 path_put(&newmount
);
4268 mutex_unlock(&fc
->uapi_mutex
);
4273 * Move a mount from one place to another. In combination with
4274 * fsopen()/fsmount() this is used to install a new mount and in combination
4275 * with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy
4278 * Note the flags value is a combination of MOVE_MOUNT_* flags.
4280 SYSCALL_DEFINE5(move_mount
,
4281 int, from_dfd
, const char __user
*, from_pathname
,
4282 int, to_dfd
, const char __user
*, to_pathname
,
4283 unsigned int, flags
)
4285 struct path from_path
, to_path
;
4286 unsigned int lflags
;
4292 if (flags
& ~MOVE_MOUNT__MASK
)
4295 if ((flags
& (MOVE_MOUNT_BENEATH
| MOVE_MOUNT_SET_GROUP
)) ==
4296 (MOVE_MOUNT_BENEATH
| MOVE_MOUNT_SET_GROUP
))
4299 /* If someone gives a pathname, they aren't permitted to move
4300 * from an fd that requires unmount as we can't get at the flag
4301 * to clear it afterwards.
4304 if (flags
& MOVE_MOUNT_F_SYMLINKS
) lflags
|= LOOKUP_FOLLOW
;
4305 if (flags
& MOVE_MOUNT_F_AUTOMOUNTS
) lflags
|= LOOKUP_AUTOMOUNT
;
4306 if (flags
& MOVE_MOUNT_F_EMPTY_PATH
) lflags
|= LOOKUP_EMPTY
;
4308 ret
= user_path_at(from_dfd
, from_pathname
, lflags
, &from_path
);
4313 if (flags
& MOVE_MOUNT_T_SYMLINKS
) lflags
|= LOOKUP_FOLLOW
;
4314 if (flags
& MOVE_MOUNT_T_AUTOMOUNTS
) lflags
|= LOOKUP_AUTOMOUNT
;
4315 if (flags
& MOVE_MOUNT_T_EMPTY_PATH
) lflags
|= LOOKUP_EMPTY
;
4317 ret
= user_path_at(to_dfd
, to_pathname
, lflags
, &to_path
);
4321 ret
= security_move_mount(&from_path
, &to_path
);
4325 if (flags
& MOVE_MOUNT_SET_GROUP
)
4326 ret
= do_set_group(&from_path
, &to_path
);
4328 ret
= do_move_mount(&from_path
, &to_path
,
4329 (flags
& MOVE_MOUNT_BENEATH
));
4334 path_put(&from_path
);
4339 * Return true if path is reachable from root
4341 * namespace_sem or mount_lock is held
4343 bool is_path_reachable(struct mount
*mnt
, struct dentry
*dentry
,
4344 const struct path
*root
)
4346 while (&mnt
->mnt
!= root
->mnt
&& mnt_has_parent(mnt
)) {
4347 dentry
= mnt
->mnt_mountpoint
;
4348 mnt
= mnt
->mnt_parent
;
4350 return &mnt
->mnt
== root
->mnt
&& is_subdir(dentry
, root
->dentry
);
4353 bool path_is_under(const struct path
*path1
, const struct path
*path2
)
4356 read_seqlock_excl(&mount_lock
);
4357 res
= is_path_reachable(real_mount(path1
->mnt
), path1
->dentry
, path2
);
4358 read_sequnlock_excl(&mount_lock
);
4361 EXPORT_SYMBOL(path_is_under
);
4364 * pivot_root Semantics:
4365 * Moves the root file system of the current process to the directory put_old,
4366 * makes new_root as the new root file system of the current process, and sets
4367 * root/cwd of all processes which had them on the current root to new_root.
4370 * The new_root and put_old must be directories, and must not be on the
4371 * same file system as the current process root. The put_old must be
4372 * underneath new_root, i.e. adding a non-zero number of /.. to the string
4373 * pointed to by put_old must yield the same directory as new_root. No other
4374 * file system may be mounted on put_old. After all, new_root is a mountpoint.
4376 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
4377 * See Documentation/filesystems/ramfs-rootfs-initramfs.rst for alternatives
4378 * in this situation.
4381 * - we don't move root/cwd if they are not at the root (reason: if something
4382 * cared enough to change them, it's probably wrong to force them elsewhere)
4383 * - it's okay to pick a root that isn't the root of a file system, e.g.
4384 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
4385 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
4388 SYSCALL_DEFINE2(pivot_root
, const char __user
*, new_root
,
4389 const char __user
*, put_old
)
4391 struct path
new, old
, root
;
4392 struct mount
*new_mnt
, *root_mnt
, *old_mnt
, *root_parent
, *ex_parent
;
4393 struct mountpoint
*old_mp
, *root_mp
;
4399 error
= user_path_at(AT_FDCWD
, new_root
,
4400 LOOKUP_FOLLOW
| LOOKUP_DIRECTORY
, &new);
4404 error
= user_path_at(AT_FDCWD
, put_old
,
4405 LOOKUP_FOLLOW
| LOOKUP_DIRECTORY
, &old
);
4409 error
= security_sb_pivotroot(&old
, &new);
4413 get_fs_root(current
->fs
, &root
);
4414 old_mp
= lock_mount(&old
);
4415 error
= PTR_ERR(old_mp
);
4420 new_mnt
= real_mount(new.mnt
);
4421 root_mnt
= real_mount(root
.mnt
);
4422 old_mnt
= real_mount(old
.mnt
);
4423 ex_parent
= new_mnt
->mnt_parent
;
4424 root_parent
= root_mnt
->mnt_parent
;
4425 if (IS_MNT_SHARED(old_mnt
) ||
4426 IS_MNT_SHARED(ex_parent
) ||
4427 IS_MNT_SHARED(root_parent
))
4429 if (!check_mnt(root_mnt
) || !check_mnt(new_mnt
))
4431 if (new_mnt
->mnt
.mnt_flags
& MNT_LOCKED
)
4434 if (d_unlinked(new.dentry
))
4437 if (new_mnt
== root_mnt
|| old_mnt
== root_mnt
)
4438 goto out4
; /* loop, on the same file system */
4440 if (!path_mounted(&root
))
4441 goto out4
; /* not a mountpoint */
4442 if (!mnt_has_parent(root_mnt
))
4443 goto out4
; /* not attached */
4444 if (!path_mounted(&new))
4445 goto out4
; /* not a mountpoint */
4446 if (!mnt_has_parent(new_mnt
))
4447 goto out4
; /* not attached */
4448 /* make sure we can reach put_old from new_root */
4449 if (!is_path_reachable(old_mnt
, old
.dentry
, &new))
4451 /* make certain new is below the root */
4452 if (!is_path_reachable(new_mnt
, new.dentry
, &root
))
4455 umount_mnt(new_mnt
);
4456 root_mp
= unhash_mnt(root_mnt
); /* we'll need its mountpoint */
4457 if (root_mnt
->mnt
.mnt_flags
& MNT_LOCKED
) {
4458 new_mnt
->mnt
.mnt_flags
|= MNT_LOCKED
;
4459 root_mnt
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
4461 /* mount old root on put_old */
4462 attach_mnt(root_mnt
, old_mnt
, old_mp
, false);
4463 /* mount new_root on / */
4464 attach_mnt(new_mnt
, root_parent
, root_mp
, false);
4465 mnt_add_count(root_parent
, -1);
4466 touch_mnt_namespace(current
->nsproxy
->mnt_ns
);
4467 /* A moved mount should not expire automatically */
4468 list_del_init(&new_mnt
->mnt_expire
);
4469 put_mountpoint(root_mp
);
4470 unlock_mount_hash();
4471 chroot_fs_refs(&root
, &new);
4474 unlock_mount(old_mp
);
4476 mntput_no_expire(ex_parent
);
4487 static unsigned int recalc_flags(struct mount_kattr
*kattr
, struct mount
*mnt
)
4489 unsigned int flags
= mnt
->mnt
.mnt_flags
;
4491 /* flags to clear */
4492 flags
&= ~kattr
->attr_clr
;
4493 /* flags to raise */
4494 flags
|= kattr
->attr_set
;
4499 static int can_idmap_mount(const struct mount_kattr
*kattr
, struct mount
*mnt
)
4501 struct vfsmount
*m
= &mnt
->mnt
;
4502 struct user_namespace
*fs_userns
= m
->mnt_sb
->s_user_ns
;
4504 if (!kattr
->mnt_idmap
)
4508 * Creating an idmapped mount with the filesystem wide idmapping
4509 * doesn't make sense so block that. We don't allow mushy semantics.
4511 if (kattr
->mnt_userns
== m
->mnt_sb
->s_user_ns
)
4515 * Once a mount has been idmapped we don't allow it to change its
4516 * mapping. It makes things simpler and callers can just create
4517 * another bind-mount they can idmap if they want to.
4519 if (is_idmapped_mnt(m
))
4522 /* The underlying filesystem doesn't support idmapped mounts yet. */
4523 if (!(m
->mnt_sb
->s_type
->fs_flags
& FS_ALLOW_IDMAP
))
4526 /* The filesystem has turned off idmapped mounts. */
4527 if (m
->mnt_sb
->s_iflags
& SB_I_NOIDMAP
)
4530 /* We're not controlling the superblock. */
4531 if (!ns_capable(fs_userns
, CAP_SYS_ADMIN
))
4534 /* Mount has already been visible in the filesystem hierarchy. */
4535 if (!is_anon_ns(mnt
->mnt_ns
))
4542 * mnt_allow_writers() - check whether the attribute change allows writers
4543 * @kattr: the new mount attributes
4544 * @mnt: the mount to which @kattr will be applied
4546 * Check whether thew new mount attributes in @kattr allow concurrent writers.
4548 * Return: true if writers need to be held, false if not
4550 static inline bool mnt_allow_writers(const struct mount_kattr
*kattr
,
4551 const struct mount
*mnt
)
4553 return (!(kattr
->attr_set
& MNT_READONLY
) ||
4554 (mnt
->mnt
.mnt_flags
& MNT_READONLY
)) &&
4558 static int mount_setattr_prepare(struct mount_kattr
*kattr
, struct mount
*mnt
)
4563 for (m
= mnt
; m
; m
= next_mnt(m
, mnt
)) {
4564 if (!can_change_locked_flags(m
, recalc_flags(kattr
, m
))) {
4569 err
= can_idmap_mount(kattr
, m
);
4573 if (!mnt_allow_writers(kattr
, m
)) {
4574 err
= mnt_hold_writers(m
);
4579 if (!kattr
->recurse
)
4587 * If we had to call mnt_hold_writers() MNT_WRITE_HOLD will
4588 * be set in @mnt_flags. The loop unsets MNT_WRITE_HOLD for all
4589 * mounts and needs to take care to include the first mount.
4591 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
4592 /* If we had to hold writers unblock them. */
4593 if (p
->mnt
.mnt_flags
& MNT_WRITE_HOLD
)
4594 mnt_unhold_writers(p
);
4597 * We're done once the first mount we changed got
4598 * MNT_WRITE_HOLD unset.
4607 static void do_idmap_mount(const struct mount_kattr
*kattr
, struct mount
*mnt
)
4609 if (!kattr
->mnt_idmap
)
4613 * Pairs with smp_load_acquire() in mnt_idmap().
4615 * Since we only allow a mount to change the idmapping once and
4616 * verified this in can_idmap_mount() we know that the mount has
4617 * @nop_mnt_idmap attached to it. So there's no need to drop any
4620 smp_store_release(&mnt
->mnt
.mnt_idmap
, mnt_idmap_get(kattr
->mnt_idmap
));
4623 static void mount_setattr_commit(struct mount_kattr
*kattr
, struct mount
*mnt
)
4627 for (m
= mnt
; m
; m
= next_mnt(m
, mnt
)) {
4630 do_idmap_mount(kattr
, m
);
4631 flags
= recalc_flags(kattr
, m
);
4632 WRITE_ONCE(m
->mnt
.mnt_flags
, flags
);
4634 /* If we had to hold writers unblock them. */
4635 if (m
->mnt
.mnt_flags
& MNT_WRITE_HOLD
)
4636 mnt_unhold_writers(m
);
4638 if (kattr
->propagation
)
4639 change_mnt_propagation(m
, kattr
->propagation
);
4640 if (!kattr
->recurse
)
4643 touch_mnt_namespace(mnt
->mnt_ns
);
4646 static int do_mount_setattr(struct path
*path
, struct mount_kattr
*kattr
)
4648 struct mount
*mnt
= real_mount(path
->mnt
);
4651 if (!path_mounted(path
))
4654 if (kattr
->mnt_userns
) {
4655 struct mnt_idmap
*mnt_idmap
;
4657 mnt_idmap
= alloc_mnt_idmap(kattr
->mnt_userns
);
4658 if (IS_ERR(mnt_idmap
))
4659 return PTR_ERR(mnt_idmap
);
4660 kattr
->mnt_idmap
= mnt_idmap
;
4663 if (kattr
->propagation
) {
4665 * Only take namespace_lock() if we're actually changing
4669 if (kattr
->propagation
== MS_SHARED
) {
4670 err
= invent_group_ids(mnt
, kattr
->recurse
);
4681 /* Ensure that this isn't anything purely vfs internal. */
4682 if (!is_mounted(&mnt
->mnt
))
4686 * If this is an attached mount make sure it's located in the callers
4687 * mount namespace. If it's not don't let the caller interact with it.
4689 * If this mount doesn't have a parent it's most often simply a
4690 * detached mount with an anonymous mount namespace. IOW, something
4691 * that's simply not attached yet. But there are apparently also users
4692 * that do change mount properties on the rootfs itself. That obviously
4693 * neither has a parent nor is it a detached mount so we cannot
4694 * unconditionally check for detached mounts.
4696 if ((mnt_has_parent(mnt
) || !is_anon_ns(mnt
->mnt_ns
)) && !check_mnt(mnt
))
4700 * First, we get the mount tree in a shape where we can change mount
4701 * properties without failure. If we succeeded to do so we commit all
4702 * changes and if we failed we clean up.
4704 err
= mount_setattr_prepare(kattr
, mnt
);
4706 mount_setattr_commit(kattr
, mnt
);
4709 unlock_mount_hash();
4711 if (kattr
->propagation
) {
4713 cleanup_group_ids(mnt
, NULL
);
4720 static int build_mount_idmapped(const struct mount_attr
*attr
, size_t usize
,
4721 struct mount_kattr
*kattr
, unsigned int flags
)
4723 struct ns_common
*ns
;
4724 struct user_namespace
*mnt_userns
;
4726 if (!((attr
->attr_set
| attr
->attr_clr
) & MOUNT_ATTR_IDMAP
))
4730 * We currently do not support clearing an idmapped mount. If this ever
4731 * is a use-case we can revisit this but for now let's keep it simple
4734 if (attr
->attr_clr
& MOUNT_ATTR_IDMAP
)
4737 if (attr
->userns_fd
> INT_MAX
)
4740 CLASS(fd
, f
)(attr
->userns_fd
);
4744 if (!proc_ns_file(fd_file(f
)))
4747 ns
= get_proc_ns(file_inode(fd_file(f
)));
4748 if (ns
->ops
->type
!= CLONE_NEWUSER
)
4752 * The initial idmapping cannot be used to create an idmapped
4753 * mount. We use the initial idmapping as an indicator of a mount
4754 * that is not idmapped. It can simply be passed into helpers that
4755 * are aware of idmapped mounts as a convenient shortcut. A user
4756 * can just create a dedicated identity mapping to achieve the same
4759 mnt_userns
= container_of(ns
, struct user_namespace
, ns
);
4760 if (mnt_userns
== &init_user_ns
)
4763 /* We're not controlling the target namespace. */
4764 if (!ns_capable(mnt_userns
, CAP_SYS_ADMIN
))
4767 kattr
->mnt_userns
= get_user_ns(mnt_userns
);
4771 static int build_mount_kattr(const struct mount_attr
*attr
, size_t usize
,
4772 struct mount_kattr
*kattr
, unsigned int flags
)
4774 unsigned int lookup_flags
= LOOKUP_AUTOMOUNT
| LOOKUP_FOLLOW
;
4776 if (flags
& AT_NO_AUTOMOUNT
)
4777 lookup_flags
&= ~LOOKUP_AUTOMOUNT
;
4778 if (flags
& AT_SYMLINK_NOFOLLOW
)
4779 lookup_flags
&= ~LOOKUP_FOLLOW
;
4780 if (flags
& AT_EMPTY_PATH
)
4781 lookup_flags
|= LOOKUP_EMPTY
;
4783 *kattr
= (struct mount_kattr
) {
4784 .lookup_flags
= lookup_flags
,
4785 .recurse
= !!(flags
& AT_RECURSIVE
),
4788 if (attr
->propagation
& ~MOUNT_SETATTR_PROPAGATION_FLAGS
)
4790 if (hweight32(attr
->propagation
& MOUNT_SETATTR_PROPAGATION_FLAGS
) > 1)
4792 kattr
->propagation
= attr
->propagation
;
4794 if ((attr
->attr_set
| attr
->attr_clr
) & ~MOUNT_SETATTR_VALID_FLAGS
)
4797 kattr
->attr_set
= attr_flags_to_mnt_flags(attr
->attr_set
);
4798 kattr
->attr_clr
= attr_flags_to_mnt_flags(attr
->attr_clr
);
4801 * Since the MOUNT_ATTR_<atime> values are an enum, not a bitmap,
4802 * users wanting to transition to a different atime setting cannot
4803 * simply specify the atime setting in @attr_set, but must also
4804 * specify MOUNT_ATTR__ATIME in the @attr_clr field.
4805 * So ensure that MOUNT_ATTR__ATIME can't be partially set in
4806 * @attr_clr and that @attr_set can't have any atime bits set if
4807 * MOUNT_ATTR__ATIME isn't set in @attr_clr.
4809 if (attr
->attr_clr
& MOUNT_ATTR__ATIME
) {
4810 if ((attr
->attr_clr
& MOUNT_ATTR__ATIME
) != MOUNT_ATTR__ATIME
)
4814 * Clear all previous time settings as they are mutually
4817 kattr
->attr_clr
|= MNT_RELATIME
| MNT_NOATIME
;
4818 switch (attr
->attr_set
& MOUNT_ATTR__ATIME
) {
4819 case MOUNT_ATTR_RELATIME
:
4820 kattr
->attr_set
|= MNT_RELATIME
;
4822 case MOUNT_ATTR_NOATIME
:
4823 kattr
->attr_set
|= MNT_NOATIME
;
4825 case MOUNT_ATTR_STRICTATIME
:
4831 if (attr
->attr_set
& MOUNT_ATTR__ATIME
)
4835 return build_mount_idmapped(attr
, usize
, kattr
, flags
);
4838 static void finish_mount_kattr(struct mount_kattr
*kattr
)
4840 put_user_ns(kattr
->mnt_userns
);
4841 kattr
->mnt_userns
= NULL
;
4843 if (kattr
->mnt_idmap
)
4844 mnt_idmap_put(kattr
->mnt_idmap
);
4847 SYSCALL_DEFINE5(mount_setattr
, int, dfd
, const char __user
*, path
,
4848 unsigned int, flags
, struct mount_attr __user
*, uattr
,
4853 struct mount_attr attr
;
4854 struct mount_kattr kattr
;
4856 BUILD_BUG_ON(sizeof(struct mount_attr
) != MOUNT_ATTR_SIZE_VER0
);
4858 if (flags
& ~(AT_EMPTY_PATH
|
4860 AT_SYMLINK_NOFOLLOW
|
4864 if (unlikely(usize
> PAGE_SIZE
))
4866 if (unlikely(usize
< MOUNT_ATTR_SIZE_VER0
))
4872 err
= copy_struct_from_user(&attr
, sizeof(attr
), uattr
, usize
);
4876 /* Don't bother walking through the mounts if this is a nop. */
4877 if (attr
.attr_set
== 0 &&
4878 attr
.attr_clr
== 0 &&
4879 attr
.propagation
== 0)
4882 err
= build_mount_kattr(&attr
, usize
, &kattr
, flags
);
4886 err
= user_path_at(dfd
, path
, kattr
.lookup_flags
, &target
);
4888 err
= do_mount_setattr(&target
, &kattr
);
4891 finish_mount_kattr(&kattr
);
4895 int show_path(struct seq_file
*m
, struct dentry
*root
)
4897 if (root
->d_sb
->s_op
->show_path
)
4898 return root
->d_sb
->s_op
->show_path(m
, root
);
4900 seq_dentry(m
, root
, " \t\n\\");
4904 static struct vfsmount
*lookup_mnt_in_ns(u64 id
, struct mnt_namespace
*ns
)
4906 struct mount
*mnt
= mnt_find_id_at(ns
, id
);
4908 if (!mnt
|| mnt
->mnt_id_unique
!= id
)
4915 struct statmount __user
*buf
;
4917 struct vfsmount
*mnt
;
4920 struct statmount sm
;
4921 struct seq_file seq
;
4924 static u64
mnt_to_attr_flags(struct vfsmount
*mnt
)
4926 unsigned int mnt_flags
= READ_ONCE(mnt
->mnt_flags
);
4929 if (mnt_flags
& MNT_READONLY
)
4930 attr_flags
|= MOUNT_ATTR_RDONLY
;
4931 if (mnt_flags
& MNT_NOSUID
)
4932 attr_flags
|= MOUNT_ATTR_NOSUID
;
4933 if (mnt_flags
& MNT_NODEV
)
4934 attr_flags
|= MOUNT_ATTR_NODEV
;
4935 if (mnt_flags
& MNT_NOEXEC
)
4936 attr_flags
|= MOUNT_ATTR_NOEXEC
;
4937 if (mnt_flags
& MNT_NODIRATIME
)
4938 attr_flags
|= MOUNT_ATTR_NODIRATIME
;
4939 if (mnt_flags
& MNT_NOSYMFOLLOW
)
4940 attr_flags
|= MOUNT_ATTR_NOSYMFOLLOW
;
4942 if (mnt_flags
& MNT_NOATIME
)
4943 attr_flags
|= MOUNT_ATTR_NOATIME
;
4944 else if (mnt_flags
& MNT_RELATIME
)
4945 attr_flags
|= MOUNT_ATTR_RELATIME
;
4947 attr_flags
|= MOUNT_ATTR_STRICTATIME
;
4949 if (is_idmapped_mnt(mnt
))
4950 attr_flags
|= MOUNT_ATTR_IDMAP
;
4955 static u64
mnt_to_propagation_flags(struct mount
*m
)
4957 u64 propagation
= 0;
4959 if (IS_MNT_SHARED(m
))
4960 propagation
|= MS_SHARED
;
4961 if (IS_MNT_SLAVE(m
))
4962 propagation
|= MS_SLAVE
;
4963 if (IS_MNT_UNBINDABLE(m
))
4964 propagation
|= MS_UNBINDABLE
;
4966 propagation
|= MS_PRIVATE
;
4971 static void statmount_sb_basic(struct kstatmount
*s
)
4973 struct super_block
*sb
= s
->mnt
->mnt_sb
;
4975 s
->sm
.mask
|= STATMOUNT_SB_BASIC
;
4976 s
->sm
.sb_dev_major
= MAJOR(sb
->s_dev
);
4977 s
->sm
.sb_dev_minor
= MINOR(sb
->s_dev
);
4978 s
->sm
.sb_magic
= sb
->s_magic
;
4979 s
->sm
.sb_flags
= sb
->s_flags
& (SB_RDONLY
|SB_SYNCHRONOUS
|SB_DIRSYNC
|SB_LAZYTIME
);
4982 static void statmount_mnt_basic(struct kstatmount
*s
)
4984 struct mount
*m
= real_mount(s
->mnt
);
4986 s
->sm
.mask
|= STATMOUNT_MNT_BASIC
;
4987 s
->sm
.mnt_id
= m
->mnt_id_unique
;
4988 s
->sm
.mnt_parent_id
= m
->mnt_parent
->mnt_id_unique
;
4989 s
->sm
.mnt_id_old
= m
->mnt_id
;
4990 s
->sm
.mnt_parent_id_old
= m
->mnt_parent
->mnt_id
;
4991 s
->sm
.mnt_attr
= mnt_to_attr_flags(&m
->mnt
);
4992 s
->sm
.mnt_propagation
= mnt_to_propagation_flags(m
);
4993 s
->sm
.mnt_peer_group
= IS_MNT_SHARED(m
) ? m
->mnt_group_id
: 0;
4994 s
->sm
.mnt_master
= IS_MNT_SLAVE(m
) ? m
->mnt_master
->mnt_group_id
: 0;
4997 static void statmount_propagate_from(struct kstatmount
*s
)
4999 struct mount
*m
= real_mount(s
->mnt
);
5001 s
->sm
.mask
|= STATMOUNT_PROPAGATE_FROM
;
5002 if (IS_MNT_SLAVE(m
))
5003 s
->sm
.propagate_from
= get_dominating_id(m
, ¤t
->fs
->root
);
5006 static int statmount_mnt_root(struct kstatmount
*s
, struct seq_file
*seq
)
5009 size_t start
= seq
->count
;
5011 ret
= show_path(seq
, s
->mnt
->mnt_root
);
5015 if (unlikely(seq_has_overflowed(seq
)))
5019 * Unescape the result. It would be better if supplied string was not
5020 * escaped in the first place, but that's a pretty invasive change.
5022 seq
->buf
[seq
->count
] = '\0';
5024 seq_commit(seq
, string_unescape_inplace(seq
->buf
+ start
, UNESCAPE_OCTAL
));
5028 static int statmount_mnt_point(struct kstatmount
*s
, struct seq_file
*seq
)
5030 struct vfsmount
*mnt
= s
->mnt
;
5031 struct path mnt_path
= { .dentry
= mnt
->mnt_root
, .mnt
= mnt
};
5034 err
= seq_path_root(seq
, &mnt_path
, &s
->root
, "");
5035 return err
== SEQ_SKIP
? 0 : err
;
5038 static int statmount_fs_type(struct kstatmount
*s
, struct seq_file
*seq
)
5040 struct super_block
*sb
= s
->mnt
->mnt_sb
;
5042 seq_puts(seq
, sb
->s_type
->name
);
5046 static void statmount_fs_subtype(struct kstatmount
*s
, struct seq_file
*seq
)
5048 struct super_block
*sb
= s
->mnt
->mnt_sb
;
5051 seq_puts(seq
, sb
->s_subtype
);
5054 static int statmount_sb_source(struct kstatmount
*s
, struct seq_file
*seq
)
5056 struct super_block
*sb
= s
->mnt
->mnt_sb
;
5057 struct mount
*r
= real_mount(s
->mnt
);
5059 if (sb
->s_op
->show_devname
) {
5060 size_t start
= seq
->count
;
5063 ret
= sb
->s_op
->show_devname(seq
, s
->mnt
->mnt_root
);
5067 if (unlikely(seq_has_overflowed(seq
)))
5070 /* Unescape the result */
5071 seq
->buf
[seq
->count
] = '\0';
5073 seq_commit(seq
, string_unescape_inplace(seq
->buf
+ start
, UNESCAPE_OCTAL
));
5074 } else if (r
->mnt_devname
) {
5075 seq_puts(seq
, r
->mnt_devname
);
5080 static void statmount_mnt_ns_id(struct kstatmount
*s
, struct mnt_namespace
*ns
)
5082 s
->sm
.mask
|= STATMOUNT_MNT_NS_ID
;
5083 s
->sm
.mnt_ns_id
= ns
->seq
;
5086 static int statmount_mnt_opts(struct kstatmount
*s
, struct seq_file
*seq
)
5088 struct vfsmount
*mnt
= s
->mnt
;
5089 struct super_block
*sb
= mnt
->mnt_sb
;
5092 if (sb
->s_op
->show_options
) {
5093 size_t start
= seq
->count
;
5095 err
= security_sb_show_options(seq
, sb
);
5099 err
= sb
->s_op
->show_options(seq
, mnt
->mnt_root
);
5103 if (unlikely(seq_has_overflowed(seq
)))
5106 if (seq
->count
== start
)
5109 /* skip leading comma */
5110 memmove(seq
->buf
+ start
, seq
->buf
+ start
+ 1,
5111 seq
->count
- start
- 1);
5118 static inline int statmount_opt_process(struct seq_file
*seq
, size_t start
)
5120 char *buf_end
, *opt_end
, *src
, *dst
;
5123 if (unlikely(seq_has_overflowed(seq
)))
5126 buf_end
= seq
->buf
+ seq
->count
;
5127 dst
= seq
->buf
+ start
;
5128 src
= dst
+ 1; /* skip initial comma */
5130 if (src
>= buf_end
) {
5136 for (; src
< buf_end
; src
= opt_end
+ 1) {
5137 opt_end
= strchrnul(src
, ',');
5139 dst
+= string_unescape(src
, dst
, 0, UNESCAPE_OCTAL
) + 1;
5140 if (WARN_ON_ONCE(++count
== INT_MAX
))
5143 seq
->count
= dst
- 1 - seq
->buf
;
5147 static int statmount_opt_array(struct kstatmount
*s
, struct seq_file
*seq
)
5149 struct vfsmount
*mnt
= s
->mnt
;
5150 struct super_block
*sb
= mnt
->mnt_sb
;
5151 size_t start
= seq
->count
;
5154 if (!sb
->s_op
->show_options
)
5157 err
= sb
->s_op
->show_options(seq
, mnt
->mnt_root
);
5161 err
= statmount_opt_process(seq
, start
);
5165 s
->sm
.opt_num
= err
;
5169 static int statmount_opt_sec_array(struct kstatmount
*s
, struct seq_file
*seq
)
5171 struct vfsmount
*mnt
= s
->mnt
;
5172 struct super_block
*sb
= mnt
->mnt_sb
;
5173 size_t start
= seq
->count
;
5176 err
= security_sb_show_options(seq
, sb
);
5180 err
= statmount_opt_process(seq
, start
);
5184 s
->sm
.opt_sec_num
= err
;
5188 static int statmount_string(struct kstatmount
*s
, u64 flag
)
5192 struct seq_file
*seq
= &s
->seq
;
5193 struct statmount
*sm
= &s
->sm
;
5194 u32 start
= seq
->count
;
5197 case STATMOUNT_FS_TYPE
:
5198 sm
->fs_type
= start
;
5199 ret
= statmount_fs_type(s
, seq
);
5201 case STATMOUNT_MNT_ROOT
:
5202 sm
->mnt_root
= start
;
5203 ret
= statmount_mnt_root(s
, seq
);
5205 case STATMOUNT_MNT_POINT
:
5206 sm
->mnt_point
= start
;
5207 ret
= statmount_mnt_point(s
, seq
);
5209 case STATMOUNT_MNT_OPTS
:
5210 sm
->mnt_opts
= start
;
5211 ret
= statmount_mnt_opts(s
, seq
);
5213 case STATMOUNT_OPT_ARRAY
:
5214 sm
->opt_array
= start
;
5215 ret
= statmount_opt_array(s
, seq
);
5217 case STATMOUNT_OPT_SEC_ARRAY
:
5218 sm
->opt_sec_array
= start
;
5219 ret
= statmount_opt_sec_array(s
, seq
);
5221 case STATMOUNT_FS_SUBTYPE
:
5222 sm
->fs_subtype
= start
;
5223 statmount_fs_subtype(s
, seq
);
5225 case STATMOUNT_SB_SOURCE
:
5226 sm
->sb_source
= start
;
5227 ret
= statmount_sb_source(s
, seq
);
5235 * If nothing was emitted, return to avoid setting the flag
5236 * and terminating the buffer.
5238 if (seq
->count
== start
)
5240 if (unlikely(check_add_overflow(sizeof(*sm
), seq
->count
, &kbufsize
)))
5242 if (kbufsize
>= s
->bufsize
)
5245 /* signal a retry */
5246 if (unlikely(seq_has_overflowed(seq
)))
5252 seq
->buf
[seq
->count
++] = '\0';
5257 static int copy_statmount_to_user(struct kstatmount
*s
)
5259 struct statmount
*sm
= &s
->sm
;
5260 struct seq_file
*seq
= &s
->seq
;
5261 char __user
*str
= ((char __user
*)s
->buf
) + sizeof(*sm
);
5262 size_t copysize
= min_t(size_t, s
->bufsize
, sizeof(*sm
));
5264 if (seq
->count
&& copy_to_user(str
, seq
->buf
, seq
->count
))
5267 /* Return the number of bytes copied to the buffer */
5268 sm
->size
= copysize
+ seq
->count
;
5269 if (copy_to_user(s
->buf
, sm
, copysize
))
5275 static struct mount
*listmnt_next(struct mount
*curr
, bool reverse
)
5277 struct rb_node
*node
;
5280 node
= rb_prev(&curr
->mnt_node
);
5282 node
= rb_next(&curr
->mnt_node
);
5284 return node_to_mount(node
);
5287 static int grab_requested_root(struct mnt_namespace
*ns
, struct path
*root
)
5289 struct mount
*first
, *child
;
5291 rwsem_assert_held(&namespace_sem
);
5293 /* We're looking at our own ns, just use get_fs_root. */
5294 if (ns
== current
->nsproxy
->mnt_ns
) {
5295 get_fs_root(current
->fs
, root
);
5300 * We have to find the first mount in our ns and use that, however it
5301 * may not exist, so handle that properly.
5303 if (RB_EMPTY_ROOT(&ns
->mounts
))
5306 first
= child
= ns
->root
;
5308 child
= listmnt_next(child
, false);
5311 if (child
->mnt_parent
== first
)
5315 root
->mnt
= mntget(&child
->mnt
);
5316 root
->dentry
= dget(root
->mnt
->mnt_root
);
5320 static int do_statmount(struct kstatmount
*s
, u64 mnt_id
, u64 mnt_ns_id
,
5321 struct mnt_namespace
*ns
)
5323 struct path root
__free(path_put
) = {};
5327 /* Has the namespace already been emptied? */
5328 if (mnt_ns_id
&& RB_EMPTY_ROOT(&ns
->mounts
))
5331 s
->mnt
= lookup_mnt_in_ns(mnt_id
, ns
);
5335 err
= grab_requested_root(ns
, &root
);
5340 * Don't trigger audit denials. We just want to determine what
5341 * mounts to show users.
5343 m
= real_mount(s
->mnt
);
5344 if (!is_path_reachable(m
, m
->mnt
.mnt_root
, &root
) &&
5345 !ns_capable_noaudit(ns
->user_ns
, CAP_SYS_ADMIN
))
5348 err
= security_sb_statfs(s
->mnt
->mnt_root
);
5353 if (s
->mask
& STATMOUNT_SB_BASIC
)
5354 statmount_sb_basic(s
);
5356 if (s
->mask
& STATMOUNT_MNT_BASIC
)
5357 statmount_mnt_basic(s
);
5359 if (s
->mask
& STATMOUNT_PROPAGATE_FROM
)
5360 statmount_propagate_from(s
);
5362 if (s
->mask
& STATMOUNT_FS_TYPE
)
5363 err
= statmount_string(s
, STATMOUNT_FS_TYPE
);
5365 if (!err
&& s
->mask
& STATMOUNT_MNT_ROOT
)
5366 err
= statmount_string(s
, STATMOUNT_MNT_ROOT
);
5368 if (!err
&& s
->mask
& STATMOUNT_MNT_POINT
)
5369 err
= statmount_string(s
, STATMOUNT_MNT_POINT
);
5371 if (!err
&& s
->mask
& STATMOUNT_MNT_OPTS
)
5372 err
= statmount_string(s
, STATMOUNT_MNT_OPTS
);
5374 if (!err
&& s
->mask
& STATMOUNT_OPT_ARRAY
)
5375 err
= statmount_string(s
, STATMOUNT_OPT_ARRAY
);
5377 if (!err
&& s
->mask
& STATMOUNT_OPT_SEC_ARRAY
)
5378 err
= statmount_string(s
, STATMOUNT_OPT_SEC_ARRAY
);
5380 if (!err
&& s
->mask
& STATMOUNT_FS_SUBTYPE
)
5381 err
= statmount_string(s
, STATMOUNT_FS_SUBTYPE
);
5383 if (!err
&& s
->mask
& STATMOUNT_SB_SOURCE
)
5384 err
= statmount_string(s
, STATMOUNT_SB_SOURCE
);
5386 if (!err
&& s
->mask
& STATMOUNT_MNT_NS_ID
)
5387 statmount_mnt_ns_id(s
, ns
);
5395 static inline bool retry_statmount(const long ret
, size_t *seq_size
)
5397 if (likely(ret
!= -EAGAIN
))
5399 if (unlikely(check_mul_overflow(*seq_size
, 2, seq_size
)))
5401 if (unlikely(*seq_size
> MAX_RW_COUNT
))
5406 #define STATMOUNT_STRING_REQ (STATMOUNT_MNT_ROOT | STATMOUNT_MNT_POINT | \
5407 STATMOUNT_FS_TYPE | STATMOUNT_MNT_OPTS | \
5408 STATMOUNT_FS_SUBTYPE | STATMOUNT_SB_SOURCE | \
5409 STATMOUNT_OPT_ARRAY | STATMOUNT_OPT_SEC_ARRAY)
5411 static int prepare_kstatmount(struct kstatmount
*ks
, struct mnt_id_req
*kreq
,
5412 struct statmount __user
*buf
, size_t bufsize
,
5415 if (!access_ok(buf
, bufsize
))
5418 memset(ks
, 0, sizeof(*ks
));
5419 ks
->mask
= kreq
->param
;
5421 ks
->bufsize
= bufsize
;
5423 if (ks
->mask
& STATMOUNT_STRING_REQ
) {
5424 if (bufsize
== sizeof(ks
->sm
))
5427 ks
->seq
.buf
= kvmalloc(seq_size
, GFP_KERNEL_ACCOUNT
);
5431 ks
->seq
.size
= seq_size
;
5437 static int copy_mnt_id_req(const struct mnt_id_req __user
*req
,
5438 struct mnt_id_req
*kreq
)
5443 BUILD_BUG_ON(sizeof(struct mnt_id_req
) != MNT_ID_REQ_SIZE_VER1
);
5445 ret
= get_user(usize
, &req
->size
);
5448 if (unlikely(usize
> PAGE_SIZE
))
5450 if (unlikely(usize
< MNT_ID_REQ_SIZE_VER0
))
5452 memset(kreq
, 0, sizeof(*kreq
));
5453 ret
= copy_struct_from_user(kreq
, sizeof(*kreq
), req
, usize
);
5456 if (kreq
->spare
!= 0)
5458 /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
5459 if (kreq
->mnt_id
<= MNT_UNIQUE_ID_OFFSET
)
5465 * If the user requested a specific mount namespace id, look that up and return
5466 * that, or if not simply grab a passive reference on our mount namespace and
5469 static struct mnt_namespace
*grab_requested_mnt_ns(const struct mnt_id_req
*kreq
)
5471 struct mnt_namespace
*mnt_ns
;
5473 if (kreq
->mnt_ns_id
&& kreq
->spare
)
5474 return ERR_PTR(-EINVAL
);
5476 if (kreq
->mnt_ns_id
)
5477 return lookup_mnt_ns(kreq
->mnt_ns_id
);
5480 struct ns_common
*ns
;
5482 CLASS(fd
, f
)(kreq
->spare
);
5484 return ERR_PTR(-EBADF
);
5486 if (!proc_ns_file(fd_file(f
)))
5487 return ERR_PTR(-EINVAL
);
5489 ns
= get_proc_ns(file_inode(fd_file(f
)));
5490 if (ns
->ops
->type
!= CLONE_NEWNS
)
5491 return ERR_PTR(-EINVAL
);
5493 mnt_ns
= to_mnt_ns(ns
);
5495 mnt_ns
= current
->nsproxy
->mnt_ns
;
5498 refcount_inc(&mnt_ns
->passive
);
5502 SYSCALL_DEFINE4(statmount
, const struct mnt_id_req __user
*, req
,
5503 struct statmount __user
*, buf
, size_t, bufsize
,
5504 unsigned int, flags
)
5506 struct mnt_namespace
*ns
__free(mnt_ns_release
) = NULL
;
5507 struct kstatmount
*ks
__free(kfree
) = NULL
;
5508 struct mnt_id_req kreq
;
5509 /* We currently support retrieval of 3 strings. */
5510 size_t seq_size
= 3 * PATH_MAX
;
5516 ret
= copy_mnt_id_req(req
, &kreq
);
5520 ns
= grab_requested_mnt_ns(&kreq
);
5524 if (kreq
.mnt_ns_id
&& (ns
!= current
->nsproxy
->mnt_ns
) &&
5525 !ns_capable_noaudit(ns
->user_ns
, CAP_SYS_ADMIN
))
5528 ks
= kmalloc(sizeof(*ks
), GFP_KERNEL_ACCOUNT
);
5533 ret
= prepare_kstatmount(ks
, &kreq
, buf
, bufsize
, seq_size
);
5537 scoped_guard(rwsem_read
, &namespace_sem
)
5538 ret
= do_statmount(ks
, kreq
.mnt_id
, kreq
.mnt_ns_id
, ns
);
5541 ret
= copy_statmount_to_user(ks
);
5542 kvfree(ks
->seq
.buf
);
5543 if (retry_statmount(ret
, &seq_size
))
5548 static ssize_t
do_listmount(struct mnt_namespace
*ns
, u64 mnt_parent_id
,
5549 u64 last_mnt_id
, u64
*mnt_ids
, size_t nr_mnt_ids
,
5552 struct path root
__free(path_put
) = {};
5554 struct mount
*r
, *first
;
5557 rwsem_assert_held(&namespace_sem
);
5559 ret
= grab_requested_root(ns
, &root
);
5563 if (mnt_parent_id
== LSMT_ROOT
) {
5566 orig
.mnt
= lookup_mnt_in_ns(mnt_parent_id
, ns
);
5569 orig
.dentry
= orig
.mnt
->mnt_root
;
5573 * Don't trigger audit denials. We just want to determine what
5574 * mounts to show users.
5576 if (!is_path_reachable(real_mount(orig
.mnt
), orig
.dentry
, &root
) &&
5577 !ns_capable_noaudit(ns
->user_ns
, CAP_SYS_ADMIN
))
5580 ret
= security_sb_statfs(orig
.dentry
);
5586 first
= node_to_mount(ns
->mnt_last_node
);
5588 first
= node_to_mount(ns
->mnt_first_node
);
5591 first
= mnt_find_id_at_reverse(ns
, last_mnt_id
- 1);
5593 first
= mnt_find_id_at(ns
, last_mnt_id
+ 1);
5596 for (ret
= 0, r
= first
; r
&& nr_mnt_ids
; r
= listmnt_next(r
, reverse
)) {
5597 if (r
->mnt_id_unique
== mnt_parent_id
)
5599 if (!is_path_reachable(r
, r
->mnt
.mnt_root
, &orig
))
5601 *mnt_ids
= r
->mnt_id_unique
;
5609 SYSCALL_DEFINE4(listmount
, const struct mnt_id_req __user
*, req
,
5610 u64 __user
*, mnt_ids
, size_t, nr_mnt_ids
, unsigned int, flags
)
5612 u64
*kmnt_ids
__free(kvfree
) = NULL
;
5613 const size_t maxcount
= 1000000;
5614 struct mnt_namespace
*ns
__free(mnt_ns_release
) = NULL
;
5615 struct mnt_id_req kreq
;
5619 if (flags
& ~LISTMOUNT_REVERSE
)
5623 * If the mount namespace really has more than 1 million mounts the
5624 * caller must iterate over the mount namespace (and reconsider their
5625 * system design...).
5627 if (unlikely(nr_mnt_ids
> maxcount
))
5630 if (!access_ok(mnt_ids
, nr_mnt_ids
* sizeof(*mnt_ids
)))
5633 ret
= copy_mnt_id_req(req
, &kreq
);
5637 last_mnt_id
= kreq
.param
;
5638 /* The first valid unique mount id is MNT_UNIQUE_ID_OFFSET + 1. */
5639 if (last_mnt_id
!= 0 && last_mnt_id
<= MNT_UNIQUE_ID_OFFSET
)
5642 kmnt_ids
= kvmalloc_array(nr_mnt_ids
, sizeof(*kmnt_ids
),
5643 GFP_KERNEL_ACCOUNT
);
5647 ns
= grab_requested_mnt_ns(&kreq
);
5651 if (kreq
.mnt_ns_id
&& (ns
!= current
->nsproxy
->mnt_ns
) &&
5652 !ns_capable_noaudit(ns
->user_ns
, CAP_SYS_ADMIN
))
5655 scoped_guard(rwsem_read
, &namespace_sem
)
5656 ret
= do_listmount(ns
, kreq
.mnt_id
, last_mnt_id
, kmnt_ids
,
5657 nr_mnt_ids
, (flags
& LISTMOUNT_REVERSE
));
5661 if (copy_to_user(mnt_ids
, kmnt_ids
, ret
* sizeof(*mnt_ids
)))
5667 static void __init
init_mount_tree(void)
5669 struct vfsmount
*mnt
;
5671 struct mnt_namespace
*ns
;
5674 mnt
= vfs_kern_mount(&rootfs_fs_type
, 0, "rootfs", NULL
);
5676 panic("Can't create rootfs");
5678 ns
= alloc_mnt_ns(&init_user_ns
, false);
5680 panic("Can't allocate initial namespace");
5681 m
= real_mount(mnt
);
5684 mnt_add_to_ns(ns
, m
);
5685 init_task
.nsproxy
->mnt_ns
= ns
;
5689 root
.dentry
= mnt
->mnt_root
;
5690 mnt
->mnt_flags
|= MNT_LOCKED
;
5692 set_fs_pwd(current
->fs
, &root
);
5693 set_fs_root(current
->fs
, &root
);
5695 mnt_ns_tree_add(ns
);
5698 void __init
mnt_init(void)
5702 mnt_cache
= kmem_cache_create("mnt_cache", sizeof(struct mount
),
5703 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_ACCOUNT
, NULL
);
5705 mount_hashtable
= alloc_large_system_hash("Mount-cache",
5706 sizeof(struct hlist_head
),
5709 &m_hash_shift
, &m_hash_mask
, 0, 0);
5710 mountpoint_hashtable
= alloc_large_system_hash("Mountpoint-cache",
5711 sizeof(struct hlist_head
),
5714 &mp_hash_shift
, &mp_hash_mask
, 0, 0);
5716 if (!mount_hashtable
|| !mountpoint_hashtable
)
5717 panic("Failed to allocate mount hash table\n");
5723 printk(KERN_WARNING
"%s: sysfs_init error: %d\n",
5725 fs_kobj
= kobject_create_and_add("fs", NULL
);
5727 printk(KERN_WARNING
"%s: kobj create error\n", __func__
);
5733 void put_mnt_ns(struct mnt_namespace
*ns
)
5735 if (!refcount_dec_and_test(&ns
->ns
.count
))
5737 drop_collected_mounts(&ns
->root
->mnt
);
5741 struct vfsmount
*kern_mount(struct file_system_type
*type
)
5743 struct vfsmount
*mnt
;
5744 mnt
= vfs_kern_mount(type
, SB_KERNMOUNT
, type
->name
, NULL
);
5747 * it is a longterm mount, don't release mnt until
5748 * we unmount before file sys is unregistered
5750 real_mount(mnt
)->mnt_ns
= MNT_NS_INTERNAL
;
5754 EXPORT_SYMBOL_GPL(kern_mount
);
5756 void kern_unmount(struct vfsmount
*mnt
)
5758 /* release long term mount so mount point can be released */
5760 mnt_make_shortterm(mnt
);
5761 synchronize_rcu(); /* yecchhh... */
5765 EXPORT_SYMBOL(kern_unmount
);
5767 void kern_unmount_array(struct vfsmount
*mnt
[], unsigned int num
)
5771 for (i
= 0; i
< num
; i
++)
5772 mnt_make_shortterm(mnt
[i
]);
5773 synchronize_rcu_expedited();
5774 for (i
= 0; i
< num
; i
++)
5777 EXPORT_SYMBOL(kern_unmount_array
);
5779 bool our_mnt(struct vfsmount
*mnt
)
5781 return check_mnt(real_mount(mnt
));
5784 bool current_chrooted(void)
5786 /* Does the current process have a non-standard root */
5787 struct path ns_root
;
5788 struct path fs_root
;
5791 /* Find the namespace root */
5792 ns_root
.mnt
= ¤t
->nsproxy
->mnt_ns
->root
->mnt
;
5793 ns_root
.dentry
= ns_root
.mnt
->mnt_root
;
5795 while (d_mountpoint(ns_root
.dentry
) && follow_down_one(&ns_root
))
5798 get_fs_root(current
->fs
, &fs_root
);
5800 chrooted
= !path_equal(&fs_root
, &ns_root
);
5808 static bool mnt_already_visible(struct mnt_namespace
*ns
,
5809 const struct super_block
*sb
,
5812 int new_flags
= *new_mnt_flags
;
5813 struct mount
*mnt
, *n
;
5814 bool visible
= false;
5816 down_read(&namespace_sem
);
5817 rbtree_postorder_for_each_entry_safe(mnt
, n
, &ns
->mounts
, mnt_node
) {
5818 struct mount
*child
;
5821 if (mnt
->mnt
.mnt_sb
->s_type
!= sb
->s_type
)
5824 /* This mount is not fully visible if it's root directory
5825 * is not the root directory of the filesystem.
5827 if (mnt
->mnt
.mnt_root
!= mnt
->mnt
.mnt_sb
->s_root
)
5830 /* A local view of the mount flags */
5831 mnt_flags
= mnt
->mnt
.mnt_flags
;
5833 /* Don't miss readonly hidden in the superblock flags */
5834 if (sb_rdonly(mnt
->mnt
.mnt_sb
))
5835 mnt_flags
|= MNT_LOCK_READONLY
;
5837 /* Verify the mount flags are equal to or more permissive
5838 * than the proposed new mount.
5840 if ((mnt_flags
& MNT_LOCK_READONLY
) &&
5841 !(new_flags
& MNT_READONLY
))
5843 if ((mnt_flags
& MNT_LOCK_ATIME
) &&
5844 ((mnt_flags
& MNT_ATIME_MASK
) != (new_flags
& MNT_ATIME_MASK
)))
5847 /* This mount is not fully visible if there are any
5848 * locked child mounts that cover anything except for
5849 * empty directories.
5851 list_for_each_entry(child
, &mnt
->mnt_mounts
, mnt_child
) {
5852 struct inode
*inode
= child
->mnt_mountpoint
->d_inode
;
5853 /* Only worry about locked mounts */
5854 if (!(child
->mnt
.mnt_flags
& MNT_LOCKED
))
5856 /* Is the directory permanently empty? */
5857 if (!is_empty_dir_inode(inode
))
5860 /* Preserve the locked attributes */
5861 *new_mnt_flags
|= mnt_flags
& (MNT_LOCK_READONLY
| \
5868 up_read(&namespace_sem
);
5872 static bool mount_too_revealing(const struct super_block
*sb
, int *new_mnt_flags
)
5874 const unsigned long required_iflags
= SB_I_NOEXEC
| SB_I_NODEV
;
5875 struct mnt_namespace
*ns
= current
->nsproxy
->mnt_ns
;
5876 unsigned long s_iflags
;
5878 if (ns
->user_ns
== &init_user_ns
)
5881 /* Can this filesystem be too revealing? */
5882 s_iflags
= sb
->s_iflags
;
5883 if (!(s_iflags
& SB_I_USERNS_VISIBLE
))
5886 if ((s_iflags
& required_iflags
) != required_iflags
) {
5887 WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n",
5892 return !mnt_already_visible(ns
, sb
, new_mnt_flags
);
5895 bool mnt_may_suid(struct vfsmount
*mnt
)
5898 * Foreign mounts (accessed via fchdir or through /proc
5899 * symlinks) are always treated as if they are nosuid. This
5900 * prevents namespaces from trusting potentially unsafe
5901 * suid/sgid bits, file caps, or security labels that originate
5902 * in other namespaces.
5904 return !(mnt
->mnt_flags
& MNT_NOSUID
) && check_mnt(real_mount(mnt
)) &&
5905 current_in_userns(mnt
->mnt_sb
->s_user_ns
);
5908 static struct ns_common
*mntns_get(struct task_struct
*task
)
5910 struct ns_common
*ns
= NULL
;
5911 struct nsproxy
*nsproxy
;
5914 nsproxy
= task
->nsproxy
;
5916 ns
= &nsproxy
->mnt_ns
->ns
;
5917 get_mnt_ns(to_mnt_ns(ns
));
5924 static void mntns_put(struct ns_common
*ns
)
5926 put_mnt_ns(to_mnt_ns(ns
));
5929 static int mntns_install(struct nsset
*nsset
, struct ns_common
*ns
)
5931 struct nsproxy
*nsproxy
= nsset
->nsproxy
;
5932 struct fs_struct
*fs
= nsset
->fs
;
5933 struct mnt_namespace
*mnt_ns
= to_mnt_ns(ns
), *old_mnt_ns
;
5934 struct user_namespace
*user_ns
= nsset
->cred
->user_ns
;
5938 if (!ns_capable(mnt_ns
->user_ns
, CAP_SYS_ADMIN
) ||
5939 !ns_capable(user_ns
, CAP_SYS_CHROOT
) ||
5940 !ns_capable(user_ns
, CAP_SYS_ADMIN
))
5943 if (is_anon_ns(mnt_ns
))
5950 old_mnt_ns
= nsproxy
->mnt_ns
;
5951 nsproxy
->mnt_ns
= mnt_ns
;
5954 err
= vfs_path_lookup(mnt_ns
->root
->mnt
.mnt_root
, &mnt_ns
->root
->mnt
,
5955 "/", LOOKUP_DOWN
, &root
);
5957 /* revert to old namespace */
5958 nsproxy
->mnt_ns
= old_mnt_ns
;
5963 put_mnt_ns(old_mnt_ns
);
5965 /* Update the pwd and root */
5966 set_fs_pwd(fs
, &root
);
5967 set_fs_root(fs
, &root
);
5973 static struct user_namespace
*mntns_owner(struct ns_common
*ns
)
5975 return to_mnt_ns(ns
)->user_ns
;
5978 const struct proc_ns_operations mntns_operations
= {
5980 .type
= CLONE_NEWNS
,
5983 .install
= mntns_install
,
5984 .owner
= mntns_owner
,
5987 #ifdef CONFIG_SYSCTL
5988 static struct ctl_table fs_namespace_sysctls
[] = {
5990 .procname
= "mount-max",
5991 .data
= &sysctl_mount_max
,
5992 .maxlen
= sizeof(unsigned int),
5994 .proc_handler
= proc_dointvec_minmax
,
5995 .extra1
= SYSCTL_ONE
,
5999 static int __init
init_fs_namespace_sysctls(void)
6001 register_sysctl_init("fs", fs_namespace_sysctls
);
6004 fs_initcall(init_fs_namespace_sysctls
);
6006 #endif /* CONFIG_SYSCTL */