4 * (C) Copyright IBM Corporation 2005.
5 * Released under GPL v2.
6 * Author : Ram Pai (linuxram@us.ibm.com)
9 #include <linux/mnt_namespace.h>
10 #include <linux/mount.h>
12 #include <linux/nsproxy.h>
16 /* return the next shared peer mount of @p */
17 static inline struct mount
*next_peer(struct mount
*p
)
19 return list_entry(p
->mnt_share
.next
, struct mount
, mnt_share
);
22 static inline struct mount
*first_slave(struct mount
*p
)
24 return list_entry(p
->mnt_slave_list
.next
, struct mount
, mnt_slave
);
27 static inline struct mount
*next_slave(struct mount
*p
)
29 return list_entry(p
->mnt_slave
.next
, struct mount
, mnt_slave
);
32 static struct mount
*get_peer_under_root(struct mount
*mnt
,
33 struct mnt_namespace
*ns
,
34 const struct path
*root
)
36 struct mount
*m
= mnt
;
39 /* Check the namespace first for optimization */
40 if (m
->mnt_ns
== ns
&& is_path_reachable(m
, m
->mnt
.mnt_root
, root
))
50 * Get ID of closest dominating peer group having a representative
51 * under the given root.
53 * Caller must hold namespace_sem
55 int get_dominating_id(struct mount
*mnt
, const struct path
*root
)
59 for (m
= mnt
->mnt_master
; m
!= NULL
; m
= m
->mnt_master
) {
60 struct mount
*d
= get_peer_under_root(m
, mnt
->mnt_ns
, root
);
62 return d
->mnt_group_id
;
68 static int do_make_slave(struct mount
*mnt
)
70 struct mount
*master
, *slave_mnt
;
72 if (list_empty(&mnt
->mnt_share
)) {
73 if (IS_MNT_SHARED(mnt
)) {
74 mnt_release_group_id(mnt
);
75 CLEAR_MNT_SHARED(mnt
);
77 master
= mnt
->mnt_master
;
79 struct list_head
*p
= &mnt
->mnt_slave_list
;
80 while (!list_empty(p
)) {
81 slave_mnt
= list_first_entry(p
,
82 struct mount
, mnt_slave
);
83 list_del_init(&slave_mnt
->mnt_slave
);
84 slave_mnt
->mnt_master
= NULL
;
91 * slave 'mnt' to a peer mount that has the
92 * same root dentry. If none is available then
93 * slave it to anything that is available.
95 for (m
= master
= next_peer(mnt
); m
!= mnt
; m
= next_peer(m
)) {
96 if (m
->mnt
.mnt_root
== mnt
->mnt
.mnt_root
) {
101 list_del_init(&mnt
->mnt_share
);
102 mnt
->mnt_group_id
= 0;
103 CLEAR_MNT_SHARED(mnt
);
105 list_for_each_entry(slave_mnt
, &mnt
->mnt_slave_list
, mnt_slave
)
106 slave_mnt
->mnt_master
= master
;
107 list_move(&mnt
->mnt_slave
, &master
->mnt_slave_list
);
108 list_splice(&mnt
->mnt_slave_list
, master
->mnt_slave_list
.prev
);
109 INIT_LIST_HEAD(&mnt
->mnt_slave_list
);
110 mnt
->mnt_master
= master
;
115 * vfsmount lock must be held for write
117 void change_mnt_propagation(struct mount
*mnt
, int type
)
119 if (type
== MS_SHARED
) {
124 if (type
!= MS_SLAVE
) {
125 list_del_init(&mnt
->mnt_slave
);
126 mnt
->mnt_master
= NULL
;
127 if (type
== MS_UNBINDABLE
)
128 mnt
->mnt
.mnt_flags
|= MNT_UNBINDABLE
;
130 mnt
->mnt
.mnt_flags
&= ~MNT_UNBINDABLE
;
135 * get the next mount in the propagation tree.
136 * @m: the mount seen last
137 * @origin: the original mount from where the tree walk initiated
139 * Note that peer groups form contiguous segments of slave lists.
140 * We rely on that in get_source() to be able to find out if
141 * vfsmount found while iterating with propagation_next() is
142 * a peer of one we'd found earlier.
144 static struct mount
*propagation_next(struct mount
*m
,
145 struct mount
*origin
)
147 /* are there any slaves of this mount? */
148 if (!IS_MNT_NEW(m
) && !list_empty(&m
->mnt_slave_list
))
149 return first_slave(m
);
152 struct mount
*master
= m
->mnt_master
;
154 if (master
== origin
->mnt_master
) {
155 struct mount
*next
= next_peer(m
);
156 return (next
== origin
) ? NULL
: next
;
157 } else if (m
->mnt_slave
.next
!= &master
->mnt_slave_list
)
158 return next_slave(m
);
165 static struct mount
*next_group(struct mount
*m
, struct mount
*origin
)
170 if (!IS_MNT_NEW(m
) && !list_empty(&m
->mnt_slave_list
))
171 return first_slave(m
);
173 if (m
->mnt_group_id
== origin
->mnt_group_id
) {
176 } else if (m
->mnt_slave
.next
!= &next
->mnt_slave
)
180 /* m is the last peer */
182 struct mount
*master
= m
->mnt_master
;
183 if (m
->mnt_slave
.next
!= &master
->mnt_slave_list
)
184 return next_slave(m
);
185 m
= next_peer(master
);
186 if (master
->mnt_group_id
== origin
->mnt_group_id
)
188 if (master
->mnt_slave
.next
== &m
->mnt_slave
)
197 /* all accesses are serialized by namespace_sem */
198 static struct user_namespace
*user_ns
;
199 static struct mount
*last_dest
, *first_source
, *last_source
, *dest_master
;
200 static struct mountpoint
*mp
;
201 static struct hlist_head
*list
;
203 static inline bool peers(struct mount
*m1
, struct mount
*m2
)
205 return m1
->mnt_group_id
== m2
->mnt_group_id
&& m1
->mnt_group_id
;
208 static int propagate_one(struct mount
*m
)
212 /* skip ones added by this propagate_mnt() */
215 /* skip if mountpoint isn't covered by it */
216 if (!is_subdir(mp
->m_dentry
, m
->mnt
.mnt_root
))
218 if (peers(m
, last_dest
)) {
219 type
= CL_MAKE_SHARED
;
223 for (n
= m
; ; n
= p
) {
225 if (p
== dest_master
|| IS_MNT_MARKED(p
))
229 struct mount
*parent
= last_source
->mnt_parent
;
230 if (last_source
== first_source
)
232 done
= parent
->mnt_master
== p
;
233 if (done
&& peers(n
, parent
))
235 last_source
= last_source
->mnt_master
;
239 /* beginning of peer group among the slaves? */
240 if (IS_MNT_SHARED(m
))
241 type
|= CL_MAKE_SHARED
;
244 /* Notice when we are propagating across user namespaces */
245 if (m
->mnt_ns
->user_ns
!= user_ns
)
246 type
|= CL_UNPRIVILEGED
;
247 child
= copy_tree(last_source
, last_source
->mnt
.mnt_root
, type
);
249 return PTR_ERR(child
);
250 child
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
251 mnt_set_mountpoint(m
, mp
, child
);
254 if (m
->mnt_master
!= dest_master
) {
255 read_seqlock_excl(&mount_lock
);
256 SET_MNT_MARK(m
->mnt_master
);
257 read_sequnlock_excl(&mount_lock
);
259 hlist_add_head(&child
->mnt_hash
, list
);
260 return count_mounts(m
->mnt_ns
, child
);
264 * mount 'source_mnt' under the destination 'dest_mnt' at
265 * dentry 'dest_dentry'. And propagate that mount to
266 * all the peer and slave mounts of 'dest_mnt'.
267 * Link all the new mounts into a propagation tree headed at
268 * source_mnt. Also link all the new mounts using ->mnt_list
269 * headed at source_mnt's ->mnt_list
271 * @dest_mnt: destination mount.
272 * @dest_dentry: destination dentry.
273 * @source_mnt: source mount.
274 * @tree_list : list of heads of trees to be attached.
276 int propagate_mnt(struct mount
*dest_mnt
, struct mountpoint
*dest_mp
,
277 struct mount
*source_mnt
, struct hlist_head
*tree_list
)
283 * we don't want to bother passing tons of arguments to
284 * propagate_one(); everything is serialized by namespace_sem,
285 * so globals will do just fine.
287 user_ns
= current
->nsproxy
->mnt_ns
->user_ns
;
288 last_dest
= dest_mnt
;
289 first_source
= source_mnt
;
290 last_source
= source_mnt
;
293 dest_master
= dest_mnt
->mnt_master
;
295 /* all peers of dest_mnt, except dest_mnt itself */
296 for (n
= next_peer(dest_mnt
); n
!= dest_mnt
; n
= next_peer(n
)) {
297 ret
= propagate_one(n
);
302 /* all slave groups */
303 for (m
= next_group(dest_mnt
, dest_mnt
); m
;
304 m
= next_group(m
, dest_mnt
)) {
305 /* everything in that slave group */
308 ret
= propagate_one(n
);
315 read_seqlock_excl(&mount_lock
);
316 hlist_for_each_entry(n
, tree_list
, mnt_hash
) {
318 if (m
->mnt_master
!= dest_mnt
->mnt_master
)
319 CLEAR_MNT_MARK(m
->mnt_master
);
321 read_sequnlock_excl(&mount_lock
);
325 static struct mount
*find_topper(struct mount
*mnt
)
327 /* If there is exactly one mount covering mnt completely return it. */
330 if (!list_is_singular(&mnt
->mnt_mounts
))
333 child
= list_first_entry(&mnt
->mnt_mounts
, struct mount
, mnt_child
);
334 if (child
->mnt_mountpoint
!= mnt
->mnt
.mnt_root
)
341 * return true if the refcount is greater than count
343 static inline int do_refcount_check(struct mount
*mnt
, int count
)
345 return mnt_get_count(mnt
) > count
;
349 * check if the mount 'mnt' can be unmounted successfully.
350 * @mnt: the mount to be checked for unmount
351 * NOTE: unmounting 'mnt' would naturally propagate to all
352 * other mounts its parent propagates to.
353 * Check if any of these mounts that **do not have submounts**
354 * have more references than 'refcnt'. If so return busy.
356 * vfsmount lock must be held for write
358 int propagate_mount_busy(struct mount
*mnt
, int refcnt
)
360 struct mount
*m
, *child
, *topper
;
361 struct mount
*parent
= mnt
->mnt_parent
;
364 return do_refcount_check(mnt
, refcnt
);
367 * quickly check if the current mount can be unmounted.
368 * If not, we don't have to go checking for all other
371 if (!list_empty(&mnt
->mnt_mounts
) || do_refcount_check(mnt
, refcnt
))
374 for (m
= propagation_next(parent
, parent
); m
;
375 m
= propagation_next(m
, parent
)) {
377 child
= __lookup_mnt(&m
->mnt
, mnt
->mnt_mountpoint
);
381 /* Is there exactly one mount on the child that covers
382 * it completely whose reference should be ignored?
384 topper
= find_topper(child
);
387 else if (!list_empty(&child
->mnt_mounts
))
390 if (do_refcount_check(child
, count
))
397 * Clear MNT_LOCKED when it can be shown to be safe.
399 * mount_lock lock must be held for write
401 void propagate_mount_unlock(struct mount
*mnt
)
403 struct mount
*parent
= mnt
->mnt_parent
;
404 struct mount
*m
, *child
;
406 BUG_ON(parent
== mnt
);
408 for (m
= propagation_next(parent
, parent
); m
;
409 m
= propagation_next(m
, parent
)) {
410 child
= __lookup_mnt(&m
->mnt
, mnt
->mnt_mountpoint
);
412 child
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
417 * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
419 static void mark_umount_candidates(struct mount
*mnt
)
421 struct mount
*parent
= mnt
->mnt_parent
;
424 BUG_ON(parent
== mnt
);
426 for (m
= propagation_next(parent
, parent
); m
;
427 m
= propagation_next(m
, parent
)) {
428 struct mount
*child
= __lookup_mnt(&m
->mnt
,
429 mnt
->mnt_mountpoint
);
430 if (!child
|| (child
->mnt
.mnt_flags
& MNT_UMOUNT
))
432 if (!IS_MNT_LOCKED(child
) || IS_MNT_MARKED(m
)) {
439 * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
440 * parent propagates to.
442 static void __propagate_umount(struct mount
*mnt
)
444 struct mount
*parent
= mnt
->mnt_parent
;
447 BUG_ON(parent
== mnt
);
449 for (m
= propagation_next(parent
, parent
); m
;
450 m
= propagation_next(m
, parent
)) {
451 struct mount
*topper
;
452 struct mount
*child
= __lookup_mnt(&m
->mnt
,
453 mnt
->mnt_mountpoint
);
455 * umount the child only if the child has no children
456 * and the child is marked safe to unmount.
458 if (!child
|| !IS_MNT_MARKED(child
))
460 CLEAR_MNT_MARK(child
);
462 /* If there is exactly one mount covering all of child
463 * replace child with that mount.
465 topper
= find_topper(child
);
467 mnt_change_mountpoint(child
->mnt_parent
, child
->mnt_mp
,
470 if (list_empty(&child
->mnt_mounts
)) {
471 list_del_init(&child
->mnt_child
);
472 child
->mnt
.mnt_flags
|= MNT_UMOUNT
;
473 list_move_tail(&child
->mnt_list
, &mnt
->mnt_list
);
479 * collect all mounts that receive propagation from the mount in @list,
480 * and return these additional mounts in the same list.
481 * @list: the list of mounts to be unmounted.
483 * vfsmount lock must be held for write
485 int propagate_umount(struct list_head
*list
)
489 list_for_each_entry_reverse(mnt
, list
, mnt_list
)
490 mark_umount_candidates(mnt
);
492 list_for_each_entry(mnt
, list
, mnt_list
)
493 __propagate_umount(mnt
);