x86/PCI: Use generic cacheline sizing instead of per-vendor tests.
[linux-2.6/next.git] / fs / fs_struct.c
blobeee059052db5b723e690fc5c1892062540c35c2b
1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/fs.h>
4 #include <linux/path.h>
5 #include <linux/slab.h>
6 #include <linux/fs_struct.h>
8 /*
9 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
10 * It can block.
12 void set_fs_root(struct fs_struct *fs, struct path *path)
14 struct path old_root;
16 write_lock(&fs->lock);
17 old_root = fs->root;
18 fs->root = *path;
19 path_get(path);
20 write_unlock(&fs->lock);
21 if (old_root.dentry)
22 path_put(&old_root);
26 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
27 * It can block.
29 void set_fs_pwd(struct fs_struct *fs, struct path *path)
31 struct path old_pwd;
33 write_lock(&fs->lock);
34 old_pwd = fs->pwd;
35 fs->pwd = *path;
36 path_get(path);
37 write_unlock(&fs->lock);
39 if (old_pwd.dentry)
40 path_put(&old_pwd);
43 void chroot_fs_refs(struct path *old_root, struct path *new_root)
45 struct task_struct *g, *p;
46 struct fs_struct *fs;
47 int count = 0;
49 read_lock(&tasklist_lock);
50 do_each_thread(g, p) {
51 task_lock(p);
52 fs = p->fs;
53 if (fs) {
54 write_lock(&fs->lock);
55 if (fs->root.dentry == old_root->dentry
56 && fs->root.mnt == old_root->mnt) {
57 path_get(new_root);
58 fs->root = *new_root;
59 count++;
61 if (fs->pwd.dentry == old_root->dentry
62 && fs->pwd.mnt == old_root->mnt) {
63 path_get(new_root);
64 fs->pwd = *new_root;
65 count++;
67 write_unlock(&fs->lock);
69 task_unlock(p);
70 } while_each_thread(g, p);
71 read_unlock(&tasklist_lock);
72 while (count--)
73 path_put(old_root);
76 void free_fs_struct(struct fs_struct *fs)
78 path_put(&fs->root);
79 path_put(&fs->pwd);
80 kmem_cache_free(fs_cachep, fs);
83 void exit_fs(struct task_struct *tsk)
85 struct fs_struct *fs = tsk->fs;
87 if (fs) {
88 int kill;
89 task_lock(tsk);
90 write_lock(&fs->lock);
91 tsk->fs = NULL;
92 kill = !--fs->users;
93 write_unlock(&fs->lock);
94 task_unlock(tsk);
95 if (kill)
96 free_fs_struct(fs);
100 struct fs_struct *copy_fs_struct(struct fs_struct *old)
102 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
103 /* We don't need to lock fs - think why ;-) */
104 if (fs) {
105 fs->users = 1;
106 fs->in_exec = 0;
107 rwlock_init(&fs->lock);
108 fs->umask = old->umask;
109 read_lock(&old->lock);
110 fs->root = old->root;
111 path_get(&old->root);
112 fs->pwd = old->pwd;
113 path_get(&old->pwd);
114 read_unlock(&old->lock);
116 return fs;
119 int unshare_fs_struct(void)
121 struct fs_struct *fs = current->fs;
122 struct fs_struct *new_fs = copy_fs_struct(fs);
123 int kill;
125 if (!new_fs)
126 return -ENOMEM;
128 task_lock(current);
129 write_lock(&fs->lock);
130 kill = !--fs->users;
131 current->fs = new_fs;
132 write_unlock(&fs->lock);
133 task_unlock(current);
135 if (kill)
136 free_fs_struct(fs);
138 return 0;
140 EXPORT_SYMBOL_GPL(unshare_fs_struct);
142 int current_umask(void)
144 return current->fs->umask;
146 EXPORT_SYMBOL(current_umask);
148 /* to be mentioned only in INIT_TASK */
149 struct fs_struct init_fs = {
150 .users = 1,
151 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
152 .umask = 0022,
155 void daemonize_fs_struct(void)
157 struct fs_struct *fs = current->fs;
159 if (fs) {
160 int kill;
162 task_lock(current);
164 write_lock(&init_fs.lock);
165 init_fs.users++;
166 write_unlock(&init_fs.lock);
168 write_lock(&fs->lock);
169 current->fs = &init_fs;
170 kill = !--fs->users;
171 write_unlock(&fs->lock);
173 task_unlock(current);
174 if (kill)
175 free_fs_struct(fs);