[XFRM]: struct xfrm_id annotations
[hh.org.git] / fs / char_dev.c
blob0009346d827f52ad2da3c24bdbb6485d5cdfa6dd
1 /*
2 * linux/fs/char_dev.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/slab.h>
10 #include <linux/string.h>
12 #include <linux/major.h>
13 #include <linux/errno.h>
14 #include <linux/module.h>
15 #include <linux/smp_lock.h>
16 #include <linux/seq_file.h>
18 #include <linux/kobject.h>
19 #include <linux/kobj_map.h>
20 #include <linux/cdev.h>
21 #include <linux/mutex.h>
22 #include <linux/backing-dev.h>
24 #ifdef CONFIG_KMOD
25 #include <linux/kmod.h>
26 #endif
29 * capabilities for /dev/mem, /dev/kmem and similar directly mappable character
30 * devices
31 * - permits shared-mmap for read, write and/or exec
32 * - does not permit private mmap in NOMMU mode (can't do COW)
33 * - no readahead or I/O queue unplugging required
35 struct backing_dev_info directly_mappable_cdev_bdi = {
36 .capabilities = (
37 #ifdef CONFIG_MMU
38 /* permit private copies of the data to be taken */
39 BDI_CAP_MAP_COPY |
40 #endif
41 /* permit direct mmap, for read, write or exec */
42 BDI_CAP_MAP_DIRECT |
43 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP),
46 static struct kobj_map *cdev_map;
48 static DEFINE_MUTEX(chrdevs_lock);
50 static struct char_device_struct {
51 struct char_device_struct *next;
52 unsigned int major;
53 unsigned int baseminor;
54 int minorct;
55 char name[64];
56 struct file_operations *fops;
57 struct cdev *cdev; /* will die */
58 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
60 /* index in the above */
61 static inline int major_to_index(int major)
63 return major % CHRDEV_MAJOR_HASH_SIZE;
66 #ifdef CONFIG_PROC_FS
68 void chrdev_show(struct seq_file *f, off_t offset)
70 struct char_device_struct *cd;
72 if (offset < CHRDEV_MAJOR_HASH_SIZE) {
73 mutex_lock(&chrdevs_lock);
74 for (cd = chrdevs[offset]; cd; cd = cd->next)
75 seq_printf(f, "%3d %s\n", cd->major, cd->name);
76 mutex_unlock(&chrdevs_lock);
80 #endif /* CONFIG_PROC_FS */
83 * Register a single major with a specified minor range.
85 * If major == 0 this functions will dynamically allocate a major and return
86 * its number.
88 * If major > 0 this function will attempt to reserve the passed range of
89 * minors and will return zero on success.
91 * Returns a -ve errno on failure.
93 static struct char_device_struct *
94 __register_chrdev_region(unsigned int major, unsigned int baseminor,
95 int minorct, const char *name)
97 struct char_device_struct *cd, **cp;
98 int ret = 0;
99 int i;
101 cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
102 if (cd == NULL)
103 return ERR_PTR(-ENOMEM);
105 mutex_lock(&chrdevs_lock);
107 /* temporary */
108 if (major == 0) {
109 for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
110 if (chrdevs[i] == NULL)
111 break;
114 if (i == 0) {
115 ret = -EBUSY;
116 goto out;
118 major = i;
119 ret = major;
122 cd->major = major;
123 cd->baseminor = baseminor;
124 cd->minorct = minorct;
125 strncpy(cd->name,name, 64);
127 i = major_to_index(major);
129 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
130 if ((*cp)->major > major ||
131 ((*cp)->major == major && (*cp)->baseminor >= baseminor))
132 break;
133 if (*cp && (*cp)->major == major &&
134 (*cp)->baseminor < baseminor + minorct) {
135 ret = -EBUSY;
136 goto out;
138 cd->next = *cp;
139 *cp = cd;
140 mutex_unlock(&chrdevs_lock);
141 return cd;
142 out:
143 mutex_unlock(&chrdevs_lock);
144 kfree(cd);
145 return ERR_PTR(ret);
148 static struct char_device_struct *
149 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
151 struct char_device_struct *cd = NULL, **cp;
152 int i = major_to_index(major);
154 mutex_lock(&chrdevs_lock);
155 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
156 if ((*cp)->major == major &&
157 (*cp)->baseminor == baseminor &&
158 (*cp)->minorct == minorct)
159 break;
160 if (*cp) {
161 cd = *cp;
162 *cp = cd->next;
164 mutex_unlock(&chrdevs_lock);
165 return cd;
168 int register_chrdev_region(dev_t from, unsigned count, const char *name)
170 struct char_device_struct *cd;
171 dev_t to = from + count;
172 dev_t n, next;
174 for (n = from; n < to; n = next) {
175 next = MKDEV(MAJOR(n)+1, 0);
176 if (next > to)
177 next = to;
178 cd = __register_chrdev_region(MAJOR(n), MINOR(n),
179 next - n, name);
180 if (IS_ERR(cd))
181 goto fail;
183 return 0;
184 fail:
185 to = n;
186 for (n = from; n < to; n = next) {
187 next = MKDEV(MAJOR(n)+1, 0);
188 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
190 return PTR_ERR(cd);
193 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
194 const char *name)
196 struct char_device_struct *cd;
197 cd = __register_chrdev_region(0, baseminor, count, name);
198 if (IS_ERR(cd))
199 return PTR_ERR(cd);
200 *dev = MKDEV(cd->major, cd->baseminor);
201 return 0;
205 * register_chrdev() - Register a major number for character devices.
206 * @major: major device number or 0 for dynamic allocation
207 * @name: name of this range of devices
208 * @fops: file operations associated with this devices
210 * If @major == 0 this functions will dynamically allocate a major and return
211 * its number.
213 * If @major > 0 this function will attempt to reserve a device with the given
214 * major number and will return zero on success.
216 * Returns a -ve errno on failure.
218 * The name of this device has nothing to do with the name of the device in
219 * /dev. It only helps to keep track of the different owners of devices. If
220 * your module name has only one type of devices it's ok to use e.g. the name
221 * of the module here.
223 * This function registers a range of 256 minor numbers. The first minor number
224 * is 0.
226 int register_chrdev(unsigned int major, const char *name,
227 const struct file_operations *fops)
229 struct char_device_struct *cd;
230 struct cdev *cdev;
231 char *s;
232 int err = -ENOMEM;
234 cd = __register_chrdev_region(major, 0, 256, name);
235 if (IS_ERR(cd))
236 return PTR_ERR(cd);
238 cdev = cdev_alloc();
239 if (!cdev)
240 goto out2;
242 cdev->owner = fops->owner;
243 cdev->ops = fops;
244 kobject_set_name(&cdev->kobj, "%s", name);
245 for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/'))
246 *s = '!';
248 err = cdev_add(cdev, MKDEV(cd->major, 0), 256);
249 if (err)
250 goto out;
252 cd->cdev = cdev;
254 return major ? 0 : cd->major;
255 out:
256 kobject_put(&cdev->kobj);
257 out2:
258 kfree(__unregister_chrdev_region(cd->major, 0, 256));
259 return err;
262 void unregister_chrdev_region(dev_t from, unsigned count)
264 dev_t to = from + count;
265 dev_t n, next;
267 for (n = from; n < to; n = next) {
268 next = MKDEV(MAJOR(n)+1, 0);
269 if (next > to)
270 next = to;
271 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
275 int unregister_chrdev(unsigned int major, const char *name)
277 struct char_device_struct *cd;
278 cd = __unregister_chrdev_region(major, 0, 256);
279 if (cd && cd->cdev)
280 cdev_del(cd->cdev);
281 kfree(cd);
282 return 0;
285 static DEFINE_SPINLOCK(cdev_lock);
287 static struct kobject *cdev_get(struct cdev *p)
289 struct module *owner = p->owner;
290 struct kobject *kobj;
292 if (owner && !try_module_get(owner))
293 return NULL;
294 kobj = kobject_get(&p->kobj);
295 if (!kobj)
296 module_put(owner);
297 return kobj;
300 void cdev_put(struct cdev *p)
302 if (p) {
303 struct module *owner = p->owner;
304 kobject_put(&p->kobj);
305 module_put(owner);
310 * Called every time a character special file is opened
312 int chrdev_open(struct inode * inode, struct file * filp)
314 struct cdev *p;
315 struct cdev *new = NULL;
316 int ret = 0;
318 spin_lock(&cdev_lock);
319 p = inode->i_cdev;
320 if (!p) {
321 struct kobject *kobj;
322 int idx;
323 spin_unlock(&cdev_lock);
324 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
325 if (!kobj)
326 return -ENXIO;
327 new = container_of(kobj, struct cdev, kobj);
328 spin_lock(&cdev_lock);
329 p = inode->i_cdev;
330 if (!p) {
331 inode->i_cdev = p = new;
332 inode->i_cindex = idx;
333 list_add(&inode->i_devices, &p->list);
334 new = NULL;
335 } else if (!cdev_get(p))
336 ret = -ENXIO;
337 } else if (!cdev_get(p))
338 ret = -ENXIO;
339 spin_unlock(&cdev_lock);
340 cdev_put(new);
341 if (ret)
342 return ret;
343 filp->f_op = fops_get(p->ops);
344 if (!filp->f_op) {
345 cdev_put(p);
346 return -ENXIO;
348 if (filp->f_op->open) {
349 lock_kernel();
350 ret = filp->f_op->open(inode,filp);
351 unlock_kernel();
353 if (ret)
354 cdev_put(p);
355 return ret;
358 void cd_forget(struct inode *inode)
360 spin_lock(&cdev_lock);
361 list_del_init(&inode->i_devices);
362 inode->i_cdev = NULL;
363 spin_unlock(&cdev_lock);
366 static void cdev_purge(struct cdev *cdev)
368 spin_lock(&cdev_lock);
369 while (!list_empty(&cdev->list)) {
370 struct inode *inode;
371 inode = container_of(cdev->list.next, struct inode, i_devices);
372 list_del_init(&inode->i_devices);
373 inode->i_cdev = NULL;
375 spin_unlock(&cdev_lock);
379 * Dummy default file-operations: the only thing this does
380 * is contain the open that then fills in the correct operations
381 * depending on the special file...
383 const struct file_operations def_chr_fops = {
384 .open = chrdev_open,
387 static struct kobject *exact_match(dev_t dev, int *part, void *data)
389 struct cdev *p = data;
390 return &p->kobj;
393 static int exact_lock(dev_t dev, void *data)
395 struct cdev *p = data;
396 return cdev_get(p) ? 0 : -1;
399 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
401 p->dev = dev;
402 p->count = count;
403 return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p);
406 static void cdev_unmap(dev_t dev, unsigned count)
408 kobj_unmap(cdev_map, dev, count);
411 void cdev_del(struct cdev *p)
413 cdev_unmap(p->dev, p->count);
414 kobject_put(&p->kobj);
418 static void cdev_default_release(struct kobject *kobj)
420 struct cdev *p = container_of(kobj, struct cdev, kobj);
421 cdev_purge(p);
424 static void cdev_dynamic_release(struct kobject *kobj)
426 struct cdev *p = container_of(kobj, struct cdev, kobj);
427 cdev_purge(p);
428 kfree(p);
431 static struct kobj_type ktype_cdev_default = {
432 .release = cdev_default_release,
435 static struct kobj_type ktype_cdev_dynamic = {
436 .release = cdev_dynamic_release,
439 struct cdev *cdev_alloc(void)
441 struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
442 if (p) {
443 p->kobj.ktype = &ktype_cdev_dynamic;
444 INIT_LIST_HEAD(&p->list);
445 kobject_init(&p->kobj);
447 return p;
450 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
452 memset(cdev, 0, sizeof *cdev);
453 INIT_LIST_HEAD(&cdev->list);
454 cdev->kobj.ktype = &ktype_cdev_default;
455 kobject_init(&cdev->kobj);
456 cdev->ops = fops;
459 static struct kobject *base_probe(dev_t dev, int *part, void *data)
461 if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
462 /* Make old-style 2.4 aliases work */
463 request_module("char-major-%d", MAJOR(dev));
464 return NULL;
467 void __init chrdev_init(void)
469 cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
473 /* Let modules do char dev stuff */
474 EXPORT_SYMBOL(register_chrdev_region);
475 EXPORT_SYMBOL(unregister_chrdev_region);
476 EXPORT_SYMBOL(alloc_chrdev_region);
477 EXPORT_SYMBOL(cdev_init);
478 EXPORT_SYMBOL(cdev_alloc);
479 EXPORT_SYMBOL(cdev_del);
480 EXPORT_SYMBOL(cdev_add);
481 EXPORT_SYMBOL(register_chrdev);
482 EXPORT_SYMBOL(unregister_chrdev);
483 EXPORT_SYMBOL(directly_mappable_cdev_bdi);