[PATCH] slab: fix drain_array() so that it works correctly with the shared_array
[linux/fpc-iii.git] / fs / char_dev.c
blob5c36345c9bf78376b6996604d98aba2679d6ed09
1 /*
2 * linux/fs/char_dev.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/slab.h>
11 #include <linux/string.h>
13 #include <linux/major.h>
14 #include <linux/errno.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/devfs_fs_kernel.h>
19 #include <linux/kobject.h>
20 #include <linux/kobj_map.h>
21 #include <linux/cdev.h>
22 #include <linux/mutex.h>
24 #ifdef CONFIG_KMOD
25 #include <linux/kmod.h>
26 #endif
28 static struct kobj_map *cdev_map;
30 #define MAX_PROBE_HASH 255 /* random */
32 static DEFINE_MUTEX(chrdevs_lock);
34 static struct char_device_struct {
35 struct char_device_struct *next;
36 unsigned int major;
37 unsigned int baseminor;
38 int minorct;
39 char name[64];
40 struct file_operations *fops;
41 struct cdev *cdev; /* will die */
42 } *chrdevs[MAX_PROBE_HASH];
44 /* index in the above */
45 static inline int major_to_index(int major)
47 return major % MAX_PROBE_HASH;
50 struct chrdev_info {
51 int index;
52 struct char_device_struct *cd;
55 void *get_next_chrdev(void *dev)
57 struct chrdev_info *info;
59 if (dev == NULL) {
60 info = kmalloc(sizeof(*info), GFP_KERNEL);
61 if (!info)
62 goto out;
63 info->index=0;
64 info->cd = chrdevs[info->index];
65 if (info->cd)
66 goto out;
67 } else {
68 info = dev;
71 while (info->index < ARRAY_SIZE(chrdevs)) {
72 if (info->cd)
73 info->cd = info->cd->next;
74 if (info->cd)
75 goto out;
77 * No devices on this chain, move to the next
79 info->index++;
80 info->cd = (info->index < ARRAY_SIZE(chrdevs)) ?
81 chrdevs[info->index] : NULL;
82 if (info->cd)
83 goto out;
86 out:
87 return info;
90 void *acquire_chrdev_list(void)
92 mutex_lock(&chrdevs_lock);
93 return get_next_chrdev(NULL);
96 void release_chrdev_list(void *dev)
98 mutex_unlock(&chrdevs_lock);
99 kfree(dev);
103 int count_chrdev_list(void)
105 struct char_device_struct *cd;
106 int i, count;
108 count = 0;
110 for (i = 0; i < ARRAY_SIZE(chrdevs) ; i++) {
111 for (cd = chrdevs[i]; cd; cd = cd->next)
112 count++;
115 return count;
118 int get_chrdev_info(void *dev, int *major, char **name)
120 struct chrdev_info *info = dev;
122 if (info->cd == NULL)
123 return 1;
125 *major = info->cd->major;
126 *name = info->cd->name;
127 return 0;
131 * Register a single major with a specified minor range.
133 * If major == 0 this functions will dynamically allocate a major and return
134 * its number.
136 * If major > 0 this function will attempt to reserve the passed range of
137 * minors and will return zero on success.
139 * Returns a -ve errno on failure.
141 static struct char_device_struct *
142 __register_chrdev_region(unsigned int major, unsigned int baseminor,
143 int minorct, const char *name)
145 struct char_device_struct *cd, **cp;
146 int ret = 0;
147 int i;
149 cd = kmalloc(sizeof(struct char_device_struct), GFP_KERNEL);
150 if (cd == NULL)
151 return ERR_PTR(-ENOMEM);
153 memset(cd, 0, sizeof(struct char_device_struct));
155 mutex_lock(&chrdevs_lock);
157 /* temporary */
158 if (major == 0) {
159 for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
160 if (chrdevs[i] == NULL)
161 break;
164 if (i == 0) {
165 ret = -EBUSY;
166 goto out;
168 major = i;
169 ret = major;
172 cd->major = major;
173 cd->baseminor = baseminor;
174 cd->minorct = minorct;
175 strncpy(cd->name,name, 64);
177 i = major_to_index(major);
179 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
180 if ((*cp)->major > major ||
181 ((*cp)->major == major && (*cp)->baseminor >= baseminor))
182 break;
183 if (*cp && (*cp)->major == major &&
184 (*cp)->baseminor < baseminor + minorct) {
185 ret = -EBUSY;
186 goto out;
188 cd->next = *cp;
189 *cp = cd;
190 mutex_unlock(&chrdevs_lock);
191 return cd;
192 out:
193 mutex_unlock(&chrdevs_lock);
194 kfree(cd);
195 return ERR_PTR(ret);
198 static struct char_device_struct *
199 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
201 struct char_device_struct *cd = NULL, **cp;
202 int i = major_to_index(major);
204 mutex_lock(&chrdevs_lock);
205 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
206 if ((*cp)->major == major &&
207 (*cp)->baseminor == baseminor &&
208 (*cp)->minorct == minorct)
209 break;
210 if (*cp) {
211 cd = *cp;
212 *cp = cd->next;
214 mutex_unlock(&chrdevs_lock);
215 return cd;
218 int register_chrdev_region(dev_t from, unsigned count, const char *name)
220 struct char_device_struct *cd;
221 dev_t to = from + count;
222 dev_t n, next;
224 for (n = from; n < to; n = next) {
225 next = MKDEV(MAJOR(n)+1, 0);
226 if (next > to)
227 next = to;
228 cd = __register_chrdev_region(MAJOR(n), MINOR(n),
229 next - n, name);
230 if (IS_ERR(cd))
231 goto fail;
233 return 0;
234 fail:
235 to = n;
236 for (n = from; n < to; n = next) {
237 next = MKDEV(MAJOR(n)+1, 0);
238 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
240 return PTR_ERR(cd);
243 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
244 const char *name)
246 struct char_device_struct *cd;
247 cd = __register_chrdev_region(0, baseminor, count, name);
248 if (IS_ERR(cd))
249 return PTR_ERR(cd);
250 *dev = MKDEV(cd->major, cd->baseminor);
251 return 0;
254 int register_chrdev(unsigned int major, const char *name,
255 struct file_operations *fops)
257 struct char_device_struct *cd;
258 struct cdev *cdev;
259 char *s;
260 int err = -ENOMEM;
262 cd = __register_chrdev_region(major, 0, 256, name);
263 if (IS_ERR(cd))
264 return PTR_ERR(cd);
266 cdev = cdev_alloc();
267 if (!cdev)
268 goto out2;
270 cdev->owner = fops->owner;
271 cdev->ops = fops;
272 kobject_set_name(&cdev->kobj, "%s", name);
273 for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/'))
274 *s = '!';
276 err = cdev_add(cdev, MKDEV(cd->major, 0), 256);
277 if (err)
278 goto out;
280 cd->cdev = cdev;
282 return major ? 0 : cd->major;
283 out:
284 kobject_put(&cdev->kobj);
285 out2:
286 kfree(__unregister_chrdev_region(cd->major, 0, 256));
287 return err;
290 void unregister_chrdev_region(dev_t from, unsigned count)
292 dev_t to = from + count;
293 dev_t n, next;
295 for (n = from; n < to; n = next) {
296 next = MKDEV(MAJOR(n)+1, 0);
297 if (next > to)
298 next = to;
299 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
303 int unregister_chrdev(unsigned int major, const char *name)
305 struct char_device_struct *cd;
306 cd = __unregister_chrdev_region(major, 0, 256);
307 if (cd && cd->cdev)
308 cdev_del(cd->cdev);
309 kfree(cd);
310 return 0;
313 static DEFINE_SPINLOCK(cdev_lock);
315 static struct kobject *cdev_get(struct cdev *p)
317 struct module *owner = p->owner;
318 struct kobject *kobj;
320 if (owner && !try_module_get(owner))
321 return NULL;
322 kobj = kobject_get(&p->kobj);
323 if (!kobj)
324 module_put(owner);
325 return kobj;
328 void cdev_put(struct cdev *p)
330 if (p) {
331 struct module *owner = p->owner;
332 kobject_put(&p->kobj);
333 module_put(owner);
338 * Called every time a character special file is opened
340 int chrdev_open(struct inode * inode, struct file * filp)
342 struct cdev *p;
343 struct cdev *new = NULL;
344 int ret = 0;
346 spin_lock(&cdev_lock);
347 p = inode->i_cdev;
348 if (!p) {
349 struct kobject *kobj;
350 int idx;
351 spin_unlock(&cdev_lock);
352 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
353 if (!kobj)
354 return -ENXIO;
355 new = container_of(kobj, struct cdev, kobj);
356 spin_lock(&cdev_lock);
357 p = inode->i_cdev;
358 if (!p) {
359 inode->i_cdev = p = new;
360 inode->i_cindex = idx;
361 list_add(&inode->i_devices, &p->list);
362 new = NULL;
363 } else if (!cdev_get(p))
364 ret = -ENXIO;
365 } else if (!cdev_get(p))
366 ret = -ENXIO;
367 spin_unlock(&cdev_lock);
368 cdev_put(new);
369 if (ret)
370 return ret;
371 filp->f_op = fops_get(p->ops);
372 if (!filp->f_op) {
373 cdev_put(p);
374 return -ENXIO;
376 if (filp->f_op->open) {
377 lock_kernel();
378 ret = filp->f_op->open(inode,filp);
379 unlock_kernel();
381 if (ret)
382 cdev_put(p);
383 return ret;
386 void cd_forget(struct inode *inode)
388 spin_lock(&cdev_lock);
389 list_del_init(&inode->i_devices);
390 inode->i_cdev = NULL;
391 spin_unlock(&cdev_lock);
394 static void cdev_purge(struct cdev *cdev)
396 spin_lock(&cdev_lock);
397 while (!list_empty(&cdev->list)) {
398 struct inode *inode;
399 inode = container_of(cdev->list.next, struct inode, i_devices);
400 list_del_init(&inode->i_devices);
401 inode->i_cdev = NULL;
403 spin_unlock(&cdev_lock);
407 * Dummy default file-operations: the only thing this does
408 * is contain the open that then fills in the correct operations
409 * depending on the special file...
411 struct file_operations def_chr_fops = {
412 .open = chrdev_open,
415 static struct kobject *exact_match(dev_t dev, int *part, void *data)
417 struct cdev *p = data;
418 return &p->kobj;
421 static int exact_lock(dev_t dev, void *data)
423 struct cdev *p = data;
424 return cdev_get(p) ? 0 : -1;
427 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
429 p->dev = dev;
430 p->count = count;
431 return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p);
434 static void cdev_unmap(dev_t dev, unsigned count)
436 kobj_unmap(cdev_map, dev, count);
439 void cdev_del(struct cdev *p)
441 cdev_unmap(p->dev, p->count);
442 kobject_put(&p->kobj);
446 static void cdev_default_release(struct kobject *kobj)
448 struct cdev *p = container_of(kobj, struct cdev, kobj);
449 cdev_purge(p);
452 static void cdev_dynamic_release(struct kobject *kobj)
454 struct cdev *p = container_of(kobj, struct cdev, kobj);
455 cdev_purge(p);
456 kfree(p);
459 static struct kobj_type ktype_cdev_default = {
460 .release = cdev_default_release,
463 static struct kobj_type ktype_cdev_dynamic = {
464 .release = cdev_dynamic_release,
467 struct cdev *cdev_alloc(void)
469 struct cdev *p = kmalloc(sizeof(struct cdev), GFP_KERNEL);
470 if (p) {
471 memset(p, 0, sizeof(struct cdev));
472 p->kobj.ktype = &ktype_cdev_dynamic;
473 INIT_LIST_HEAD(&p->list);
474 kobject_init(&p->kobj);
476 return p;
479 void cdev_init(struct cdev *cdev, struct file_operations *fops)
481 memset(cdev, 0, sizeof *cdev);
482 INIT_LIST_HEAD(&cdev->list);
483 cdev->kobj.ktype = &ktype_cdev_default;
484 kobject_init(&cdev->kobj);
485 cdev->ops = fops;
488 static struct kobject *base_probe(dev_t dev, int *part, void *data)
490 if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
491 /* Make old-style 2.4 aliases work */
492 request_module("char-major-%d", MAJOR(dev));
493 return NULL;
496 void __init chrdev_init(void)
498 cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
502 /* Let modules do char dev stuff */
503 EXPORT_SYMBOL(register_chrdev_region);
504 EXPORT_SYMBOL(unregister_chrdev_region);
505 EXPORT_SYMBOL(alloc_chrdev_region);
506 EXPORT_SYMBOL(cdev_init);
507 EXPORT_SYMBOL(cdev_alloc);
508 EXPORT_SYMBOL(cdev_del);
509 EXPORT_SYMBOL(cdev_add);
510 EXPORT_SYMBOL(register_chrdev);
511 EXPORT_SYMBOL(unregister_chrdev);