[SERIAL] sunhv: Convert to of_driver layer.
[linux/fpc-iii.git] / fs / char_dev.c
blob97986635b641393dd1b39cfc43eb4acdc1dbfc4e
1 /*
2 * linux/fs/char_dev.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/slab.h>
11 #include <linux/string.h>
13 #include <linux/major.h>
14 #include <linux/errno.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/seq_file.h>
19 #include <linux/kobject.h>
20 #include <linux/kobj_map.h>
21 #include <linux/cdev.h>
22 #include <linux/mutex.h>
24 #ifdef CONFIG_KMOD
25 #include <linux/kmod.h>
26 #endif
28 static struct kobj_map *cdev_map;
30 static DEFINE_MUTEX(chrdevs_lock);
32 static struct char_device_struct {
33 struct char_device_struct *next;
34 unsigned int major;
35 unsigned int baseminor;
36 int minorct;
37 char name[64];
38 struct file_operations *fops;
39 struct cdev *cdev; /* will die */
40 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
42 /* index in the above */
43 static inline int major_to_index(int major)
45 return major % CHRDEV_MAJOR_HASH_SIZE;
48 #ifdef CONFIG_PROC_FS
50 void chrdev_show(struct seq_file *f, off_t offset)
52 struct char_device_struct *cd;
54 if (offset < CHRDEV_MAJOR_HASH_SIZE) {
55 mutex_lock(&chrdevs_lock);
56 for (cd = chrdevs[offset]; cd; cd = cd->next)
57 seq_printf(f, "%3d %s\n", cd->major, cd->name);
58 mutex_unlock(&chrdevs_lock);
62 #endif /* CONFIG_PROC_FS */
65 * Register a single major with a specified minor range.
67 * If major == 0 this functions will dynamically allocate a major and return
68 * its number.
70 * If major > 0 this function will attempt to reserve the passed range of
71 * minors and will return zero on success.
73 * Returns a -ve errno on failure.
75 static struct char_device_struct *
76 __register_chrdev_region(unsigned int major, unsigned int baseminor,
77 int minorct, const char *name)
79 struct char_device_struct *cd, **cp;
80 int ret = 0;
81 int i;
83 cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
84 if (cd == NULL)
85 return ERR_PTR(-ENOMEM);
87 mutex_lock(&chrdevs_lock);
89 /* temporary */
90 if (major == 0) {
91 for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
92 if (chrdevs[i] == NULL)
93 break;
96 if (i == 0) {
97 ret = -EBUSY;
98 goto out;
100 major = i;
101 ret = major;
104 cd->major = major;
105 cd->baseminor = baseminor;
106 cd->minorct = minorct;
107 strncpy(cd->name,name, 64);
109 i = major_to_index(major);
111 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
112 if ((*cp)->major > major ||
113 ((*cp)->major == major && (*cp)->baseminor >= baseminor))
114 break;
115 if (*cp && (*cp)->major == major &&
116 (*cp)->baseminor < baseminor + minorct) {
117 ret = -EBUSY;
118 goto out;
120 cd->next = *cp;
121 *cp = cd;
122 mutex_unlock(&chrdevs_lock);
123 return cd;
124 out:
125 mutex_unlock(&chrdevs_lock);
126 kfree(cd);
127 return ERR_PTR(ret);
130 static struct char_device_struct *
131 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
133 struct char_device_struct *cd = NULL, **cp;
134 int i = major_to_index(major);
136 mutex_lock(&chrdevs_lock);
137 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
138 if ((*cp)->major == major &&
139 (*cp)->baseminor == baseminor &&
140 (*cp)->minorct == minorct)
141 break;
142 if (*cp) {
143 cd = *cp;
144 *cp = cd->next;
146 mutex_unlock(&chrdevs_lock);
147 return cd;
150 int register_chrdev_region(dev_t from, unsigned count, const char *name)
152 struct char_device_struct *cd;
153 dev_t to = from + count;
154 dev_t n, next;
156 for (n = from; n < to; n = next) {
157 next = MKDEV(MAJOR(n)+1, 0);
158 if (next > to)
159 next = to;
160 cd = __register_chrdev_region(MAJOR(n), MINOR(n),
161 next - n, name);
162 if (IS_ERR(cd))
163 goto fail;
165 return 0;
166 fail:
167 to = n;
168 for (n = from; n < to; n = next) {
169 next = MKDEV(MAJOR(n)+1, 0);
170 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
172 return PTR_ERR(cd);
175 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
176 const char *name)
178 struct char_device_struct *cd;
179 cd = __register_chrdev_region(0, baseminor, count, name);
180 if (IS_ERR(cd))
181 return PTR_ERR(cd);
182 *dev = MKDEV(cd->major, cd->baseminor);
183 return 0;
186 int register_chrdev(unsigned int major, const char *name,
187 const struct file_operations *fops)
189 struct char_device_struct *cd;
190 struct cdev *cdev;
191 char *s;
192 int err = -ENOMEM;
194 cd = __register_chrdev_region(major, 0, 256, name);
195 if (IS_ERR(cd))
196 return PTR_ERR(cd);
198 cdev = cdev_alloc();
199 if (!cdev)
200 goto out2;
202 cdev->owner = fops->owner;
203 cdev->ops = fops;
204 kobject_set_name(&cdev->kobj, "%s", name);
205 for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/'))
206 *s = '!';
208 err = cdev_add(cdev, MKDEV(cd->major, 0), 256);
209 if (err)
210 goto out;
212 cd->cdev = cdev;
214 return major ? 0 : cd->major;
215 out:
216 kobject_put(&cdev->kobj);
217 out2:
218 kfree(__unregister_chrdev_region(cd->major, 0, 256));
219 return err;
222 void unregister_chrdev_region(dev_t from, unsigned count)
224 dev_t to = from + count;
225 dev_t n, next;
227 for (n = from; n < to; n = next) {
228 next = MKDEV(MAJOR(n)+1, 0);
229 if (next > to)
230 next = to;
231 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
235 int unregister_chrdev(unsigned int major, const char *name)
237 struct char_device_struct *cd;
238 cd = __unregister_chrdev_region(major, 0, 256);
239 if (cd && cd->cdev)
240 cdev_del(cd->cdev);
241 kfree(cd);
242 return 0;
245 static DEFINE_SPINLOCK(cdev_lock);
247 static struct kobject *cdev_get(struct cdev *p)
249 struct module *owner = p->owner;
250 struct kobject *kobj;
252 if (owner && !try_module_get(owner))
253 return NULL;
254 kobj = kobject_get(&p->kobj);
255 if (!kobj)
256 module_put(owner);
257 return kobj;
260 void cdev_put(struct cdev *p)
262 if (p) {
263 struct module *owner = p->owner;
264 kobject_put(&p->kobj);
265 module_put(owner);
270 * Called every time a character special file is opened
272 int chrdev_open(struct inode * inode, struct file * filp)
274 struct cdev *p;
275 struct cdev *new = NULL;
276 int ret = 0;
278 spin_lock(&cdev_lock);
279 p = inode->i_cdev;
280 if (!p) {
281 struct kobject *kobj;
282 int idx;
283 spin_unlock(&cdev_lock);
284 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
285 if (!kobj)
286 return -ENXIO;
287 new = container_of(kobj, struct cdev, kobj);
288 spin_lock(&cdev_lock);
289 p = inode->i_cdev;
290 if (!p) {
291 inode->i_cdev = p = new;
292 inode->i_cindex = idx;
293 list_add(&inode->i_devices, &p->list);
294 new = NULL;
295 } else if (!cdev_get(p))
296 ret = -ENXIO;
297 } else if (!cdev_get(p))
298 ret = -ENXIO;
299 spin_unlock(&cdev_lock);
300 cdev_put(new);
301 if (ret)
302 return ret;
303 filp->f_op = fops_get(p->ops);
304 if (!filp->f_op) {
305 cdev_put(p);
306 return -ENXIO;
308 if (filp->f_op->open) {
309 lock_kernel();
310 ret = filp->f_op->open(inode,filp);
311 unlock_kernel();
313 if (ret)
314 cdev_put(p);
315 return ret;
318 void cd_forget(struct inode *inode)
320 spin_lock(&cdev_lock);
321 list_del_init(&inode->i_devices);
322 inode->i_cdev = NULL;
323 spin_unlock(&cdev_lock);
326 static void cdev_purge(struct cdev *cdev)
328 spin_lock(&cdev_lock);
329 while (!list_empty(&cdev->list)) {
330 struct inode *inode;
331 inode = container_of(cdev->list.next, struct inode, i_devices);
332 list_del_init(&inode->i_devices);
333 inode->i_cdev = NULL;
335 spin_unlock(&cdev_lock);
339 * Dummy default file-operations: the only thing this does
340 * is contain the open that then fills in the correct operations
341 * depending on the special file...
343 const struct file_operations def_chr_fops = {
344 .open = chrdev_open,
347 static struct kobject *exact_match(dev_t dev, int *part, void *data)
349 struct cdev *p = data;
350 return &p->kobj;
353 static int exact_lock(dev_t dev, void *data)
355 struct cdev *p = data;
356 return cdev_get(p) ? 0 : -1;
359 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
361 p->dev = dev;
362 p->count = count;
363 return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p);
366 static void cdev_unmap(dev_t dev, unsigned count)
368 kobj_unmap(cdev_map, dev, count);
371 void cdev_del(struct cdev *p)
373 cdev_unmap(p->dev, p->count);
374 kobject_put(&p->kobj);
378 static void cdev_default_release(struct kobject *kobj)
380 struct cdev *p = container_of(kobj, struct cdev, kobj);
381 cdev_purge(p);
384 static void cdev_dynamic_release(struct kobject *kobj)
386 struct cdev *p = container_of(kobj, struct cdev, kobj);
387 cdev_purge(p);
388 kfree(p);
391 static struct kobj_type ktype_cdev_default = {
392 .release = cdev_default_release,
395 static struct kobj_type ktype_cdev_dynamic = {
396 .release = cdev_dynamic_release,
399 struct cdev *cdev_alloc(void)
401 struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
402 if (p) {
403 p->kobj.ktype = &ktype_cdev_dynamic;
404 INIT_LIST_HEAD(&p->list);
405 kobject_init(&p->kobj);
407 return p;
410 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
412 memset(cdev, 0, sizeof *cdev);
413 INIT_LIST_HEAD(&cdev->list);
414 cdev->kobj.ktype = &ktype_cdev_default;
415 kobject_init(&cdev->kobj);
416 cdev->ops = fops;
419 static struct kobject *base_probe(dev_t dev, int *part, void *data)
421 if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
422 /* Make old-style 2.4 aliases work */
423 request_module("char-major-%d", MAJOR(dev));
424 return NULL;
427 void __init chrdev_init(void)
429 cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
433 /* Let modules do char dev stuff */
434 EXPORT_SYMBOL(register_chrdev_region);
435 EXPORT_SYMBOL(unregister_chrdev_region);
436 EXPORT_SYMBOL(alloc_chrdev_region);
437 EXPORT_SYMBOL(cdev_init);
438 EXPORT_SYMBOL(cdev_alloc);
439 EXPORT_SYMBOL(cdev_del);
440 EXPORT_SYMBOL(cdev_add);
441 EXPORT_SYMBOL(register_chrdev);
442 EXPORT_SYMBOL(unregister_chrdev);