x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / fs / pnode.c
blob5bc7896d122ae700fc76c3a71e123149d013b92a
1 /*
2 * linux/fs/pnode.c
4 * (C) Copyright IBM Corporation 2005.
5 * Released under GPL v2.
6 * Author : Ram Pai (linuxram@us.ibm.com)
8 */
9 #include <linux/mnt_namespace.h>
10 #include <linux/mount.h>
11 #include <linux/fs.h>
12 #include <linux/nsproxy.h>
13 #include "internal.h"
14 #include "pnode.h"
16 /* return the next shared peer mount of @p */
17 static inline struct mount *next_peer(struct mount *p)
19 return list_entry(p->mnt_share.next, struct mount, mnt_share);
22 static inline struct mount *first_slave(struct mount *p)
24 return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
27 static inline struct mount *next_slave(struct mount *p)
29 return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
32 static struct mount *get_peer_under_root(struct mount *mnt,
33 struct mnt_namespace *ns,
34 const struct path *root)
36 struct mount *m = mnt;
38 do {
39 /* Check the namespace first for optimization */
40 if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root))
41 return m;
43 m = next_peer(m);
44 } while (m != mnt);
46 return NULL;
50 * Get ID of closest dominating peer group having a representative
51 * under the given root.
53 * Caller must hold namespace_sem
55 int get_dominating_id(struct mount *mnt, const struct path *root)
57 struct mount *m;
59 for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) {
60 struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root);
61 if (d)
62 return d->mnt_group_id;
65 return 0;
68 static int do_make_slave(struct mount *mnt)
70 struct mount *master, *slave_mnt;
72 if (list_empty(&mnt->mnt_share)) {
73 if (IS_MNT_SHARED(mnt)) {
74 mnt_release_group_id(mnt);
75 CLEAR_MNT_SHARED(mnt);
77 master = mnt->mnt_master;
78 if (!master) {
79 struct list_head *p = &mnt->mnt_slave_list;
80 while (!list_empty(p)) {
81 slave_mnt = list_first_entry(p,
82 struct mount, mnt_slave);
83 list_del_init(&slave_mnt->mnt_slave);
84 slave_mnt->mnt_master = NULL;
86 return 0;
88 } else {
89 struct mount *m;
91 * slave 'mnt' to a peer mount that has the
92 * same root dentry. If none is available then
93 * slave it to anything that is available.
95 for (m = master = next_peer(mnt); m != mnt; m = next_peer(m)) {
96 if (m->mnt.mnt_root == mnt->mnt.mnt_root) {
97 master = m;
98 break;
101 list_del_init(&mnt->mnt_share);
102 mnt->mnt_group_id = 0;
103 CLEAR_MNT_SHARED(mnt);
105 list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave)
106 slave_mnt->mnt_master = master;
107 list_move(&mnt->mnt_slave, &master->mnt_slave_list);
108 list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev);
109 INIT_LIST_HEAD(&mnt->mnt_slave_list);
110 mnt->mnt_master = master;
111 return 0;
115 * vfsmount lock must be held for write
117 void change_mnt_propagation(struct mount *mnt, int type)
119 if (type == MS_SHARED) {
120 set_mnt_shared(mnt);
121 return;
123 do_make_slave(mnt);
124 if (type != MS_SLAVE) {
125 list_del_init(&mnt->mnt_slave);
126 mnt->mnt_master = NULL;
127 if (type == MS_UNBINDABLE)
128 mnt->mnt.mnt_flags |= MNT_UNBINDABLE;
129 else
130 mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE;
135 * get the next mount in the propagation tree.
136 * @m: the mount seen last
137 * @origin: the original mount from where the tree walk initiated
139 * Note that peer groups form contiguous segments of slave lists.
140 * We rely on that in get_source() to be able to find out if
141 * vfsmount found while iterating with propagation_next() is
142 * a peer of one we'd found earlier.
144 static struct mount *propagation_next(struct mount *m,
145 struct mount *origin)
147 /* are there any slaves of this mount? */
148 if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
149 return first_slave(m);
151 while (1) {
152 struct mount *master = m->mnt_master;
154 if (master == origin->mnt_master) {
155 struct mount *next = next_peer(m);
156 return (next == origin) ? NULL : next;
157 } else if (m->mnt_slave.next != &master->mnt_slave_list)
158 return next_slave(m);
160 /* back at master */
161 m = master;
165 static struct mount *next_group(struct mount *m, struct mount *origin)
167 while (1) {
168 while (1) {
169 struct mount *next;
170 if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
171 return first_slave(m);
172 next = next_peer(m);
173 if (m->mnt_group_id == origin->mnt_group_id) {
174 if (next == origin)
175 return NULL;
176 } else if (m->mnt_slave.next != &next->mnt_slave)
177 break;
178 m = next;
180 /* m is the last peer */
181 while (1) {
182 struct mount *master = m->mnt_master;
183 if (m->mnt_slave.next != &master->mnt_slave_list)
184 return next_slave(m);
185 m = next_peer(master);
186 if (master->mnt_group_id == origin->mnt_group_id)
187 break;
188 if (master->mnt_slave.next == &m->mnt_slave)
189 break;
190 m = master;
192 if (m == origin)
193 return NULL;
197 /* all accesses are serialized by namespace_sem */
198 static struct user_namespace *user_ns;
199 static struct mount *last_dest, *first_source, *last_source, *dest_master;
200 static struct mountpoint *mp;
201 static struct hlist_head *list;
203 static inline bool peers(struct mount *m1, struct mount *m2)
205 return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id;
208 static int propagate_one(struct mount *m)
210 struct mount *child;
211 int type;
212 /* skip ones added by this propagate_mnt() */
213 if (IS_MNT_NEW(m))
214 return 0;
215 /* skip if mountpoint isn't covered by it */
216 if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
217 return 0;
218 if (peers(m, last_dest)) {
219 type = CL_MAKE_SHARED;
220 } else {
221 struct mount *n, *p;
222 bool done;
223 for (n = m; ; n = p) {
224 p = n->mnt_master;
225 if (p == dest_master || IS_MNT_MARKED(p))
226 break;
228 do {
229 struct mount *parent = last_source->mnt_parent;
230 if (last_source == first_source)
231 break;
232 done = parent->mnt_master == p;
233 if (done && peers(n, parent))
234 break;
235 last_source = last_source->mnt_master;
236 } while (!done);
238 type = CL_SLAVE;
239 /* beginning of peer group among the slaves? */
240 if (IS_MNT_SHARED(m))
241 type |= CL_MAKE_SHARED;
244 /* Notice when we are propagating across user namespaces */
245 if (m->mnt_ns->user_ns != user_ns)
246 type |= CL_UNPRIVILEGED;
247 child = copy_tree(last_source, last_source->mnt.mnt_root, type);
248 if (IS_ERR(child))
249 return PTR_ERR(child);
250 child->mnt.mnt_flags &= ~MNT_LOCKED;
251 mnt_set_mountpoint(m, mp, child);
252 last_dest = m;
253 last_source = child;
254 if (m->mnt_master != dest_master) {
255 read_seqlock_excl(&mount_lock);
256 SET_MNT_MARK(m->mnt_master);
257 read_sequnlock_excl(&mount_lock);
259 hlist_add_head(&child->mnt_hash, list);
260 return count_mounts(m->mnt_ns, child);
264 * mount 'source_mnt' under the destination 'dest_mnt' at
265 * dentry 'dest_dentry'. And propagate that mount to
266 * all the peer and slave mounts of 'dest_mnt'.
267 * Link all the new mounts into a propagation tree headed at
268 * source_mnt. Also link all the new mounts using ->mnt_list
269 * headed at source_mnt's ->mnt_list
271 * @dest_mnt: destination mount.
272 * @dest_dentry: destination dentry.
273 * @source_mnt: source mount.
274 * @tree_list : list of heads of trees to be attached.
276 int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
277 struct mount *source_mnt, struct hlist_head *tree_list)
279 struct mount *m, *n;
280 int ret = 0;
283 * we don't want to bother passing tons of arguments to
284 * propagate_one(); everything is serialized by namespace_sem,
285 * so globals will do just fine.
287 user_ns = current->nsproxy->mnt_ns->user_ns;
288 last_dest = dest_mnt;
289 first_source = source_mnt;
290 last_source = source_mnt;
291 mp = dest_mp;
292 list = tree_list;
293 dest_master = dest_mnt->mnt_master;
295 /* all peers of dest_mnt, except dest_mnt itself */
296 for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) {
297 ret = propagate_one(n);
298 if (ret)
299 goto out;
302 /* all slave groups */
303 for (m = next_group(dest_mnt, dest_mnt); m;
304 m = next_group(m, dest_mnt)) {
305 /* everything in that slave group */
306 n = m;
307 do {
308 ret = propagate_one(n);
309 if (ret)
310 goto out;
311 n = next_peer(n);
312 } while (n != m);
314 out:
315 read_seqlock_excl(&mount_lock);
316 hlist_for_each_entry(n, tree_list, mnt_hash) {
317 m = n->mnt_parent;
318 if (m->mnt_master != dest_mnt->mnt_master)
319 CLEAR_MNT_MARK(m->mnt_master);
321 read_sequnlock_excl(&mount_lock);
322 return ret;
325 static struct mount *find_topper(struct mount *mnt)
327 /* If there is exactly one mount covering mnt completely return it. */
328 struct mount *child;
330 if (!list_is_singular(&mnt->mnt_mounts))
331 return NULL;
333 child = list_first_entry(&mnt->mnt_mounts, struct mount, mnt_child);
334 if (child->mnt_mountpoint != mnt->mnt.mnt_root)
335 return NULL;
337 return child;
341 * return true if the refcount is greater than count
343 static inline int do_refcount_check(struct mount *mnt, int count)
345 return mnt_get_count(mnt) > count;
349 * check if the mount 'mnt' can be unmounted successfully.
350 * @mnt: the mount to be checked for unmount
351 * NOTE: unmounting 'mnt' would naturally propagate to all
352 * other mounts its parent propagates to.
353 * Check if any of these mounts that **do not have submounts**
354 * have more references than 'refcnt'. If so return busy.
356 * vfsmount lock must be held for write
358 int propagate_mount_busy(struct mount *mnt, int refcnt)
360 struct mount *m, *child, *topper;
361 struct mount *parent = mnt->mnt_parent;
363 if (mnt == parent)
364 return do_refcount_check(mnt, refcnt);
367 * quickly check if the current mount can be unmounted.
368 * If not, we don't have to go checking for all other
369 * mounts
371 if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt))
372 return 1;
374 for (m = propagation_next(parent, parent); m;
375 m = propagation_next(m, parent)) {
376 int count = 1;
377 child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
378 if (!child)
379 continue;
381 /* Is there exactly one mount on the child that covers
382 * it completely whose reference should be ignored?
384 topper = find_topper(child);
385 if (topper)
386 count += 1;
387 else if (!list_empty(&child->mnt_mounts))
388 continue;
390 if (do_refcount_check(child, count))
391 return 1;
393 return 0;
397 * Clear MNT_LOCKED when it can be shown to be safe.
399 * mount_lock lock must be held for write
401 void propagate_mount_unlock(struct mount *mnt)
403 struct mount *parent = mnt->mnt_parent;
404 struct mount *m, *child;
406 BUG_ON(parent == mnt);
408 for (m = propagation_next(parent, parent); m;
409 m = propagation_next(m, parent)) {
410 child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
411 if (child)
412 child->mnt.mnt_flags &= ~MNT_LOCKED;
417 * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
419 static void mark_umount_candidates(struct mount *mnt)
421 struct mount *parent = mnt->mnt_parent;
422 struct mount *m;
424 BUG_ON(parent == mnt);
426 for (m = propagation_next(parent, parent); m;
427 m = propagation_next(m, parent)) {
428 struct mount *child = __lookup_mnt(&m->mnt,
429 mnt->mnt_mountpoint);
430 if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
431 continue;
432 if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
433 SET_MNT_MARK(child);
439 * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
440 * parent propagates to.
442 static void __propagate_umount(struct mount *mnt)
444 struct mount *parent = mnt->mnt_parent;
445 struct mount *m;
447 BUG_ON(parent == mnt);
449 for (m = propagation_next(parent, parent); m;
450 m = propagation_next(m, parent)) {
451 struct mount *topper;
452 struct mount *child = __lookup_mnt(&m->mnt,
453 mnt->mnt_mountpoint);
455 * umount the child only if the child has no children
456 * and the child is marked safe to unmount.
458 if (!child || !IS_MNT_MARKED(child))
459 continue;
460 CLEAR_MNT_MARK(child);
462 /* If there is exactly one mount covering all of child
463 * replace child with that mount.
465 topper = find_topper(child);
466 if (topper)
467 mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
468 topper);
470 if (list_empty(&child->mnt_mounts)) {
471 list_del_init(&child->mnt_child);
472 child->mnt.mnt_flags |= MNT_UMOUNT;
473 list_move_tail(&child->mnt_list, &mnt->mnt_list);
479 * collect all mounts that receive propagation from the mount in @list,
480 * and return these additional mounts in the same list.
481 * @list: the list of mounts to be unmounted.
483 * vfsmount lock must be held for write
485 int propagate_umount(struct list_head *list)
487 struct mount *mnt;
489 list_for_each_entry_reverse(mnt, list, mnt_list)
490 mark_umount_candidates(mnt);
492 list_for_each_entry(mnt, list, mnt_list)
493 __propagate_umount(mnt);
494 return 0;