sparc32: Kill totally unused memory information tables.
[linux/fpc-iii.git] / fs / dnotify.c
blobeaecc4cfe5402584c6e3b9510628170c3be463bf
1 /*
2 * Directory notifications for Linux.
4 * Copyright (C) 2000,2001,2002 Stephen Rothwell
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2, or (at your option) any
9 * later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 #include <linux/fs.h>
17 #include <linux/module.h>
18 #include <linux/sched.h>
19 #include <linux/dnotify.h>
20 #include <linux/init.h>
21 #include <linux/spinlock.h>
22 #include <linux/slab.h>
23 #include <linux/file.h>
25 int dir_notify_enable __read_mostly = 1;
27 static struct kmem_cache *dn_cache __read_mostly;
29 static void redo_inode_mask(struct inode *inode)
31 unsigned long new_mask;
32 struct dnotify_struct *dn;
34 new_mask = 0;
35 for (dn = inode->i_dnotify; dn != NULL; dn = dn->dn_next)
36 new_mask |= dn->dn_mask & ~DN_MULTISHOT;
37 inode->i_dnotify_mask = new_mask;
40 void dnotify_flush(struct file *filp, fl_owner_t id)
42 struct dnotify_struct *dn;
43 struct dnotify_struct **prev;
44 struct inode *inode;
46 inode = filp->f_path.dentry->d_inode;
47 if (!S_ISDIR(inode->i_mode))
48 return;
49 spin_lock(&inode->i_lock);
50 prev = &inode->i_dnotify;
51 while ((dn = *prev) != NULL) {
52 if ((dn->dn_owner == id) && (dn->dn_filp == filp)) {
53 *prev = dn->dn_next;
54 redo_inode_mask(inode);
55 kmem_cache_free(dn_cache, dn);
56 break;
58 prev = &dn->dn_next;
60 spin_unlock(&inode->i_lock);
63 int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
65 struct dnotify_struct *dn;
66 struct dnotify_struct *odn;
67 struct dnotify_struct **prev;
68 struct inode *inode;
69 fl_owner_t id = current->files;
70 struct file *f;
71 int error = 0;
73 if ((arg & ~DN_MULTISHOT) == 0) {
74 dnotify_flush(filp, id);
75 return 0;
77 if (!dir_notify_enable)
78 return -EINVAL;
79 inode = filp->f_path.dentry->d_inode;
80 if (!S_ISDIR(inode->i_mode))
81 return -ENOTDIR;
82 dn = kmem_cache_alloc(dn_cache, GFP_KERNEL);
83 if (dn == NULL)
84 return -ENOMEM;
85 spin_lock(&inode->i_lock);
86 prev = &inode->i_dnotify;
87 while ((odn = *prev) != NULL) {
88 if ((odn->dn_owner == id) && (odn->dn_filp == filp)) {
89 odn->dn_fd = fd;
90 odn->dn_mask |= arg;
91 inode->i_dnotify_mask |= arg & ~DN_MULTISHOT;
92 goto out_free;
94 prev = &odn->dn_next;
97 rcu_read_lock();
98 f = fcheck(fd);
99 rcu_read_unlock();
100 /* we'd lost the race with close(), sod off silently */
101 /* note that inode->i_lock prevents reordering problems
102 * between accesses to descriptor table and ->i_dnotify */
103 if (f != filp)
104 goto out_free;
106 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
107 if (error)
108 goto out_free;
110 dn->dn_mask = arg;
111 dn->dn_fd = fd;
112 dn->dn_filp = filp;
113 dn->dn_owner = id;
114 inode->i_dnotify_mask |= arg & ~DN_MULTISHOT;
115 dn->dn_next = inode->i_dnotify;
116 inode->i_dnotify = dn;
117 spin_unlock(&inode->i_lock);
119 if (filp->f_op && filp->f_op->dir_notify)
120 return filp->f_op->dir_notify(filp, arg);
121 return 0;
123 out_free:
124 spin_unlock(&inode->i_lock);
125 kmem_cache_free(dn_cache, dn);
126 return error;
129 void __inode_dir_notify(struct inode *inode, unsigned long event)
131 struct dnotify_struct * dn;
132 struct dnotify_struct **prev;
133 struct fown_struct * fown;
134 int changed = 0;
136 spin_lock(&inode->i_lock);
137 prev = &inode->i_dnotify;
138 while ((dn = *prev) != NULL) {
139 if ((dn->dn_mask & event) == 0) {
140 prev = &dn->dn_next;
141 continue;
143 fown = &dn->dn_filp->f_owner;
144 send_sigio(fown, dn->dn_fd, POLL_MSG);
145 if (dn->dn_mask & DN_MULTISHOT)
146 prev = &dn->dn_next;
147 else {
148 *prev = dn->dn_next;
149 changed = 1;
150 kmem_cache_free(dn_cache, dn);
153 if (changed)
154 redo_inode_mask(inode);
155 spin_unlock(&inode->i_lock);
158 EXPORT_SYMBOL(__inode_dir_notify);
161 * This is hopelessly wrong, but unfixable without API changes. At
162 * least it doesn't oops the kernel...
164 * To safely access ->d_parent we need to keep d_move away from it. Use the
165 * dentry's d_lock for this.
167 void dnotify_parent(struct dentry *dentry, unsigned long event)
169 struct dentry *parent;
171 if (!dir_notify_enable)
172 return;
174 spin_lock(&dentry->d_lock);
175 parent = dentry->d_parent;
176 if (parent->d_inode->i_dnotify_mask & event) {
177 dget(parent);
178 spin_unlock(&dentry->d_lock);
179 __inode_dir_notify(parent->d_inode, event);
180 dput(parent);
181 } else {
182 spin_unlock(&dentry->d_lock);
185 EXPORT_SYMBOL_GPL(dnotify_parent);
187 static int __init dnotify_init(void)
189 dn_cache = kmem_cache_create("dnotify_cache",
190 sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL);
191 return 0;
194 module_init(dnotify_init)