ath9k: prevent aggregation session deadlocks
[linux/fpc-iii.git] / fs / pnode.c
blob3d2a7141b87a4b38a1aea383a68810ada1271404
1 /*
2 * linux/fs/pnode.c
4 * (C) Copyright IBM Corporation 2005.
5 * Released under GPL v2.
6 * Author : Ram Pai (linuxram@us.ibm.com)
8 */
9 #include <linux/mnt_namespace.h>
10 #include <linux/mount.h>
11 #include <linux/fs.h>
12 #include <linux/nsproxy.h>
13 #include "internal.h"
14 #include "pnode.h"
16 /* return the next shared peer mount of @p */
17 static inline struct mount *next_peer(struct mount *p)
19 return list_entry(p->mnt_share.next, struct mount, mnt_share);
22 static inline struct mount *first_slave(struct mount *p)
24 return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
27 static inline struct mount *next_slave(struct mount *p)
29 return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
32 static struct mount *get_peer_under_root(struct mount *mnt,
33 struct mnt_namespace *ns,
34 const struct path *root)
36 struct mount *m = mnt;
38 do {
39 /* Check the namespace first for optimization */
40 if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root))
41 return m;
43 m = next_peer(m);
44 } while (m != mnt);
46 return NULL;
50 * Get ID of closest dominating peer group having a representative
51 * under the given root.
53 * Caller must hold namespace_sem
55 int get_dominating_id(struct mount *mnt, const struct path *root)
57 struct mount *m;
59 for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) {
60 struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root);
61 if (d)
62 return d->mnt_group_id;
65 return 0;
68 static int do_make_slave(struct mount *mnt)
70 struct mount *peer_mnt = mnt, *master = mnt->mnt_master;
71 struct mount *slave_mnt;
74 * slave 'mnt' to a peer mount that has the
75 * same root dentry. If none is available then
76 * slave it to anything that is available.
78 while ((peer_mnt = next_peer(peer_mnt)) != mnt &&
79 peer_mnt->mnt.mnt_root != mnt->mnt.mnt_root) ;
81 if (peer_mnt == mnt) {
82 peer_mnt = next_peer(mnt);
83 if (peer_mnt == mnt)
84 peer_mnt = NULL;
86 if (IS_MNT_SHARED(mnt) && list_empty(&mnt->mnt_share))
87 mnt_release_group_id(mnt);
89 list_del_init(&mnt->mnt_share);
90 mnt->mnt_group_id = 0;
92 if (peer_mnt)
93 master = peer_mnt;
95 if (master) {
96 list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave)
97 slave_mnt->mnt_master = master;
98 list_move(&mnt->mnt_slave, &master->mnt_slave_list);
99 list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev);
100 INIT_LIST_HEAD(&mnt->mnt_slave_list);
101 } else {
102 struct list_head *p = &mnt->mnt_slave_list;
103 while (!list_empty(p)) {
104 slave_mnt = list_first_entry(p,
105 struct mount, mnt_slave);
106 list_del_init(&slave_mnt->mnt_slave);
107 slave_mnt->mnt_master = NULL;
110 mnt->mnt_master = master;
111 CLEAR_MNT_SHARED(mnt);
112 return 0;
116 * vfsmount lock must be held for write
118 void change_mnt_propagation(struct mount *mnt, int type)
120 if (type == MS_SHARED) {
121 set_mnt_shared(mnt);
122 return;
124 do_make_slave(mnt);
125 if (type != MS_SLAVE) {
126 list_del_init(&mnt->mnt_slave);
127 mnt->mnt_master = NULL;
128 if (type == MS_UNBINDABLE)
129 mnt->mnt.mnt_flags |= MNT_UNBINDABLE;
130 else
131 mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE;
136 * get the next mount in the propagation tree.
137 * @m: the mount seen last
138 * @origin: the original mount from where the tree walk initiated
140 * Note that peer groups form contiguous segments of slave lists.
141 * We rely on that in get_source() to be able to find out if
142 * vfsmount found while iterating with propagation_next() is
143 * a peer of one we'd found earlier.
145 static struct mount *propagation_next(struct mount *m,
146 struct mount *origin)
148 /* are there any slaves of this mount? */
149 if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
150 return first_slave(m);
152 while (1) {
153 struct mount *master = m->mnt_master;
155 if (master == origin->mnt_master) {
156 struct mount *next = next_peer(m);
157 return (next == origin) ? NULL : next;
158 } else if (m->mnt_slave.next != &master->mnt_slave_list)
159 return next_slave(m);
161 /* back at master */
162 m = master;
167 * return the source mount to be used for cloning
169 * @dest the current destination mount
170 * @last_dest the last seen destination mount
171 * @last_src the last seen source mount
172 * @type return CL_SLAVE if the new mount has to be
173 * cloned as a slave.
175 static struct mount *get_source(struct mount *dest,
176 struct mount *last_dest,
177 struct mount *last_src,
178 int *type)
180 struct mount *p_last_src = NULL;
181 struct mount *p_last_dest = NULL;
183 while (last_dest != dest->mnt_master) {
184 p_last_dest = last_dest;
185 p_last_src = last_src;
186 last_dest = last_dest->mnt_master;
187 last_src = last_src->mnt_master;
190 if (p_last_dest) {
191 do {
192 p_last_dest = next_peer(p_last_dest);
193 } while (IS_MNT_NEW(p_last_dest));
194 /* is that a peer of the earlier? */
195 if (dest == p_last_dest) {
196 *type = CL_MAKE_SHARED;
197 return p_last_src;
200 /* slave of the earlier, then */
201 *type = CL_SLAVE;
202 /* beginning of peer group among the slaves? */
203 if (IS_MNT_SHARED(dest))
204 *type |= CL_MAKE_SHARED;
205 return last_src;
209 * mount 'source_mnt' under the destination 'dest_mnt' at
210 * dentry 'dest_dentry'. And propagate that mount to
211 * all the peer and slave mounts of 'dest_mnt'.
212 * Link all the new mounts into a propagation tree headed at
213 * source_mnt. Also link all the new mounts using ->mnt_list
214 * headed at source_mnt's ->mnt_list
216 * @dest_mnt: destination mount.
217 * @dest_dentry: destination dentry.
218 * @source_mnt: source mount.
219 * @tree_list : list of heads of trees to be attached.
221 int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
222 struct mount *source_mnt, struct list_head *tree_list)
224 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
225 struct mount *m, *child;
226 int ret = 0;
227 struct mount *prev_dest_mnt = dest_mnt;
228 struct mount *prev_src_mnt = source_mnt;
229 LIST_HEAD(tmp_list);
231 for (m = propagation_next(dest_mnt, dest_mnt); m;
232 m = propagation_next(m, dest_mnt)) {
233 int type;
234 struct mount *source;
236 if (IS_MNT_NEW(m))
237 continue;
239 source = get_source(m, prev_dest_mnt, prev_src_mnt, &type);
241 /* Notice when we are propagating across user namespaces */
242 if (m->mnt_ns->user_ns != user_ns)
243 type |= CL_UNPRIVILEGED;
245 child = copy_tree(source, source->mnt.mnt_root, type);
246 if (IS_ERR(child)) {
247 ret = PTR_ERR(child);
248 list_splice(tree_list, tmp_list.prev);
249 goto out;
252 if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) {
253 mnt_set_mountpoint(m, dest_mp, child);
254 list_add_tail(&child->mnt_hash, tree_list);
255 } else {
257 * This can happen if the parent mount was bind mounted
258 * on some subdirectory of a shared/slave mount.
260 list_add_tail(&child->mnt_hash, &tmp_list);
262 prev_dest_mnt = m;
263 prev_src_mnt = child;
265 out:
266 br_write_lock(&vfsmount_lock);
267 while (!list_empty(&tmp_list)) {
268 child = list_first_entry(&tmp_list, struct mount, mnt_hash);
269 umount_tree(child, 0);
271 br_write_unlock(&vfsmount_lock);
272 return ret;
276 * return true if the refcount is greater than count
278 static inline int do_refcount_check(struct mount *mnt, int count)
280 int mycount = mnt_get_count(mnt) - mnt->mnt_ghosts;
281 return (mycount > count);
285 * check if the mount 'mnt' can be unmounted successfully.
286 * @mnt: the mount to be checked for unmount
287 * NOTE: unmounting 'mnt' would naturally propagate to all
288 * other mounts its parent propagates to.
289 * Check if any of these mounts that **do not have submounts**
290 * have more references than 'refcnt'. If so return busy.
292 * vfsmount lock must be held for write
294 int propagate_mount_busy(struct mount *mnt, int refcnt)
296 struct mount *m, *child;
297 struct mount *parent = mnt->mnt_parent;
298 int ret = 0;
300 if (mnt == parent)
301 return do_refcount_check(mnt, refcnt);
304 * quickly check if the current mount can be unmounted.
305 * If not, we don't have to go checking for all other
306 * mounts
308 if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt))
309 return 1;
311 for (m = propagation_next(parent, parent); m;
312 m = propagation_next(m, parent)) {
313 child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint, 0);
314 if (child && list_empty(&child->mnt_mounts) &&
315 (ret = do_refcount_check(child, 1)))
316 break;
318 return ret;
322 * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
323 * parent propagates to.
325 static void __propagate_umount(struct mount *mnt)
327 struct mount *parent = mnt->mnt_parent;
328 struct mount *m;
330 BUG_ON(parent == mnt);
332 for (m = propagation_next(parent, parent); m;
333 m = propagation_next(m, parent)) {
335 struct mount *child = __lookup_mnt(&m->mnt,
336 mnt->mnt_mountpoint, 0);
338 * umount the child only if the child has no
339 * other children
341 if (child && list_empty(&child->mnt_mounts))
342 list_move_tail(&child->mnt_hash, &mnt->mnt_hash);
347 * collect all mounts that receive propagation from the mount in @list,
348 * and return these additional mounts in the same list.
349 * @list: the list of mounts to be unmounted.
351 * vfsmount lock must be held for write
353 int propagate_umount(struct list_head *list)
355 struct mount *mnt;
357 list_for_each_entry(mnt, list, mnt_hash)
358 __propagate_umount(mnt);
359 return 0;