staging:iio:Documentation gyro -> anglvel updates in attribute names
[zen-stable.git] / fs / fs_struct.c
blob78b519c135365fb02d533441377ac4785abcb3ed
1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/fs.h>
4 #include <linux/path.h>
5 #include <linux/slab.h>
6 #include <linux/fs_struct.h>
7 #include "internal.h"
9 static inline void path_get_longterm(struct path *path)
11 path_get(path);
12 mnt_make_longterm(path->mnt);
15 static inline void path_put_longterm(struct path *path)
17 mnt_make_shortterm(path->mnt);
18 path_put(path);
22 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
23 * It can block.
25 void set_fs_root(struct fs_struct *fs, struct path *path)
27 struct path old_root;
29 spin_lock(&fs->lock);
30 write_seqcount_begin(&fs->seq);
31 old_root = fs->root;
32 fs->root = *path;
33 path_get_longterm(path);
34 write_seqcount_end(&fs->seq);
35 spin_unlock(&fs->lock);
36 if (old_root.dentry)
37 path_put_longterm(&old_root);
41 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
42 * It can block.
44 void set_fs_pwd(struct fs_struct *fs, struct path *path)
46 struct path old_pwd;
48 spin_lock(&fs->lock);
49 write_seqcount_begin(&fs->seq);
50 old_pwd = fs->pwd;
51 fs->pwd = *path;
52 path_get_longterm(path);
53 write_seqcount_end(&fs->seq);
54 spin_unlock(&fs->lock);
56 if (old_pwd.dentry)
57 path_put_longterm(&old_pwd);
60 void chroot_fs_refs(struct path *old_root, struct path *new_root)
62 struct task_struct *g, *p;
63 struct fs_struct *fs;
64 int count = 0;
66 read_lock(&tasklist_lock);
67 do_each_thread(g, p) {
68 task_lock(p);
69 fs = p->fs;
70 if (fs) {
71 spin_lock(&fs->lock);
72 write_seqcount_begin(&fs->seq);
73 if (fs->root.dentry == old_root->dentry
74 && fs->root.mnt == old_root->mnt) {
75 path_get_longterm(new_root);
76 fs->root = *new_root;
77 count++;
79 if (fs->pwd.dentry == old_root->dentry
80 && fs->pwd.mnt == old_root->mnt) {
81 path_get_longterm(new_root);
82 fs->pwd = *new_root;
83 count++;
85 write_seqcount_end(&fs->seq);
86 spin_unlock(&fs->lock);
88 task_unlock(p);
89 } while_each_thread(g, p);
90 read_unlock(&tasklist_lock);
91 while (count--)
92 path_put_longterm(old_root);
95 void free_fs_struct(struct fs_struct *fs)
97 path_put_longterm(&fs->root);
98 path_put_longterm(&fs->pwd);
99 kmem_cache_free(fs_cachep, fs);
102 void exit_fs(struct task_struct *tsk)
104 struct fs_struct *fs = tsk->fs;
106 if (fs) {
107 int kill;
108 task_lock(tsk);
109 spin_lock(&fs->lock);
110 write_seqcount_begin(&fs->seq);
111 tsk->fs = NULL;
112 kill = !--fs->users;
113 write_seqcount_end(&fs->seq);
114 spin_unlock(&fs->lock);
115 task_unlock(tsk);
116 if (kill)
117 free_fs_struct(fs);
121 struct fs_struct *copy_fs_struct(struct fs_struct *old)
123 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
124 /* We don't need to lock fs - think why ;-) */
125 if (fs) {
126 fs->users = 1;
127 fs->in_exec = 0;
128 spin_lock_init(&fs->lock);
129 seqcount_init(&fs->seq);
130 fs->umask = old->umask;
132 spin_lock(&old->lock);
133 fs->root = old->root;
134 path_get_longterm(&fs->root);
135 fs->pwd = old->pwd;
136 path_get_longterm(&fs->pwd);
137 spin_unlock(&old->lock);
139 return fs;
142 int unshare_fs_struct(void)
144 struct fs_struct *fs = current->fs;
145 struct fs_struct *new_fs = copy_fs_struct(fs);
146 int kill;
148 if (!new_fs)
149 return -ENOMEM;
151 task_lock(current);
152 spin_lock(&fs->lock);
153 kill = !--fs->users;
154 current->fs = new_fs;
155 spin_unlock(&fs->lock);
156 task_unlock(current);
158 if (kill)
159 free_fs_struct(fs);
161 return 0;
163 EXPORT_SYMBOL_GPL(unshare_fs_struct);
165 int current_umask(void)
167 return current->fs->umask;
169 EXPORT_SYMBOL(current_umask);
171 /* to be mentioned only in INIT_TASK */
172 struct fs_struct init_fs = {
173 .users = 1,
174 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
175 .seq = SEQCNT_ZERO,
176 .umask = 0022,
179 void daemonize_fs_struct(void)
181 struct fs_struct *fs = current->fs;
183 if (fs) {
184 int kill;
186 task_lock(current);
188 spin_lock(&init_fs.lock);
189 init_fs.users++;
190 spin_unlock(&init_fs.lock);
192 spin_lock(&fs->lock);
193 current->fs = &init_fs;
194 kill = !--fs->users;
195 spin_unlock(&fs->lock);
197 task_unlock(current);
198 if (kill)
199 free_fs_struct(fs);