Linux 4.19.133
[linux/fpc-iii.git] / fs / char_dev.c
blob5fffd5050fb7a3538e43c503daa1804c6c95eb84
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/char_dev.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/kdev_t.h>
11 #include <linux/slab.h>
12 #include <linux/string.h>
14 #include <linux/major.h>
15 #include <linux/errno.h>
16 #include <linux/module.h>
17 #include <linux/seq_file.h>
19 #include <linux/kobject.h>
20 #include <linux/kobj_map.h>
21 #include <linux/cdev.h>
22 #include <linux/mutex.h>
23 #include <linux/backing-dev.h>
24 #include <linux/tty.h>
26 #include "internal.h"
28 static struct kobj_map *cdev_map;
30 static DEFINE_MUTEX(chrdevs_lock);
32 #define CHRDEV_MAJOR_HASH_SIZE 255
34 static struct char_device_struct {
35 struct char_device_struct *next;
36 unsigned int major;
37 unsigned int baseminor;
38 int minorct;
39 char name[64];
40 struct cdev *cdev; /* will die */
41 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
43 /* index in the above */
44 static inline int major_to_index(unsigned major)
46 return major % CHRDEV_MAJOR_HASH_SIZE;
49 #ifdef CONFIG_PROC_FS
51 void chrdev_show(struct seq_file *f, off_t offset)
53 struct char_device_struct *cd;
55 mutex_lock(&chrdevs_lock);
56 for (cd = chrdevs[major_to_index(offset)]; cd; cd = cd->next) {
57 if (cd->major == offset)
58 seq_printf(f, "%3d %s\n", cd->major, cd->name);
60 mutex_unlock(&chrdevs_lock);
63 #endif /* CONFIG_PROC_FS */
65 static int find_dynamic_major(void)
67 int i;
68 struct char_device_struct *cd;
70 for (i = ARRAY_SIZE(chrdevs)-1; i >= CHRDEV_MAJOR_DYN_END; i--) {
71 if (chrdevs[i] == NULL)
72 return i;
75 for (i = CHRDEV_MAJOR_DYN_EXT_START;
76 i >= CHRDEV_MAJOR_DYN_EXT_END; i--) {
77 for (cd = chrdevs[major_to_index(i)]; cd; cd = cd->next)
78 if (cd->major == i)
79 break;
81 if (cd == NULL)
82 return i;
85 return -EBUSY;
89 * Register a single major with a specified minor range.
91 * If major == 0 this functions will dynamically allocate a major and return
92 * its number.
94 * If major > 0 this function will attempt to reserve the passed range of
95 * minors and will return zero on success.
97 * Returns a -ve errno on failure.
99 static struct char_device_struct *
100 __register_chrdev_region(unsigned int major, unsigned int baseminor,
101 int minorct, const char *name)
103 struct char_device_struct *cd, **cp;
104 int ret = 0;
105 int i;
107 cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
108 if (cd == NULL)
109 return ERR_PTR(-ENOMEM);
111 mutex_lock(&chrdevs_lock);
113 if (major == 0) {
114 ret = find_dynamic_major();
115 if (ret < 0) {
116 pr_err("CHRDEV \"%s\" dynamic allocation region is full\n",
117 name);
118 goto out;
120 major = ret;
123 if (major >= CHRDEV_MAJOR_MAX) {
124 pr_err("CHRDEV \"%s\" major requested (%u) is greater than the maximum (%u)\n",
125 name, major, CHRDEV_MAJOR_MAX-1);
126 ret = -EINVAL;
127 goto out;
130 cd->major = major;
131 cd->baseminor = baseminor;
132 cd->minorct = minorct;
133 strlcpy(cd->name, name, sizeof(cd->name));
135 i = major_to_index(major);
137 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
138 if ((*cp)->major > major ||
139 ((*cp)->major == major &&
140 (((*cp)->baseminor >= baseminor) ||
141 ((*cp)->baseminor + (*cp)->minorct > baseminor))))
142 break;
144 /* Check for overlapping minor ranges. */
145 if (*cp && (*cp)->major == major) {
146 int old_min = (*cp)->baseminor;
147 int old_max = (*cp)->baseminor + (*cp)->minorct - 1;
148 int new_min = baseminor;
149 int new_max = baseminor + minorct - 1;
151 /* New driver overlaps from the left. */
152 if (new_max >= old_min && new_max <= old_max) {
153 ret = -EBUSY;
154 goto out;
157 /* New driver overlaps from the right. */
158 if (new_min <= old_max && new_min >= old_min) {
159 ret = -EBUSY;
160 goto out;
163 if (new_min < old_min && new_max > old_max) {
164 ret = -EBUSY;
165 goto out;
170 cd->next = *cp;
171 *cp = cd;
172 mutex_unlock(&chrdevs_lock);
173 return cd;
174 out:
175 mutex_unlock(&chrdevs_lock);
176 kfree(cd);
177 return ERR_PTR(ret);
180 static struct char_device_struct *
181 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
183 struct char_device_struct *cd = NULL, **cp;
184 int i = major_to_index(major);
186 mutex_lock(&chrdevs_lock);
187 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
188 if ((*cp)->major == major &&
189 (*cp)->baseminor == baseminor &&
190 (*cp)->minorct == minorct)
191 break;
192 if (*cp) {
193 cd = *cp;
194 *cp = cd->next;
196 mutex_unlock(&chrdevs_lock);
197 return cd;
201 * register_chrdev_region() - register a range of device numbers
202 * @from: the first in the desired range of device numbers; must include
203 * the major number.
204 * @count: the number of consecutive device numbers required
205 * @name: the name of the device or driver.
207 * Return value is zero on success, a negative error code on failure.
209 int register_chrdev_region(dev_t from, unsigned count, const char *name)
211 struct char_device_struct *cd;
212 dev_t to = from + count;
213 dev_t n, next;
215 for (n = from; n < to; n = next) {
216 next = MKDEV(MAJOR(n)+1, 0);
217 if (next > to)
218 next = to;
219 cd = __register_chrdev_region(MAJOR(n), MINOR(n),
220 next - n, name);
221 if (IS_ERR(cd))
222 goto fail;
224 return 0;
225 fail:
226 to = n;
227 for (n = from; n < to; n = next) {
228 next = MKDEV(MAJOR(n)+1, 0);
229 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
231 return PTR_ERR(cd);
235 * alloc_chrdev_region() - register a range of char device numbers
236 * @dev: output parameter for first assigned number
237 * @baseminor: first of the requested range of minor numbers
238 * @count: the number of minor numbers required
239 * @name: the name of the associated device or driver
241 * Allocates a range of char device numbers. The major number will be
242 * chosen dynamically, and returned (along with the first minor number)
243 * in @dev. Returns zero or a negative error code.
245 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
246 const char *name)
248 struct char_device_struct *cd;
249 cd = __register_chrdev_region(0, baseminor, count, name);
250 if (IS_ERR(cd))
251 return PTR_ERR(cd);
252 *dev = MKDEV(cd->major, cd->baseminor);
253 return 0;
257 * __register_chrdev() - create and register a cdev occupying a range of minors
258 * @major: major device number or 0 for dynamic allocation
259 * @baseminor: first of the requested range of minor numbers
260 * @count: the number of minor numbers required
261 * @name: name of this range of devices
262 * @fops: file operations associated with this devices
264 * If @major == 0 this functions will dynamically allocate a major and return
265 * its number.
267 * If @major > 0 this function will attempt to reserve a device with the given
268 * major number and will return zero on success.
270 * Returns a -ve errno on failure.
272 * The name of this device has nothing to do with the name of the device in
273 * /dev. It only helps to keep track of the different owners of devices. If
274 * your module name has only one type of devices it's ok to use e.g. the name
275 * of the module here.
277 int __register_chrdev(unsigned int major, unsigned int baseminor,
278 unsigned int count, const char *name,
279 const struct file_operations *fops)
281 struct char_device_struct *cd;
282 struct cdev *cdev;
283 int err = -ENOMEM;
285 cd = __register_chrdev_region(major, baseminor, count, name);
286 if (IS_ERR(cd))
287 return PTR_ERR(cd);
289 cdev = cdev_alloc();
290 if (!cdev)
291 goto out2;
293 cdev->owner = fops->owner;
294 cdev->ops = fops;
295 kobject_set_name(&cdev->kobj, "%s", name);
297 err = cdev_add(cdev, MKDEV(cd->major, baseminor), count);
298 if (err)
299 goto out;
301 cd->cdev = cdev;
303 return major ? 0 : cd->major;
304 out:
305 kobject_put(&cdev->kobj);
306 out2:
307 kfree(__unregister_chrdev_region(cd->major, baseminor, count));
308 return err;
312 * unregister_chrdev_region() - unregister a range of device numbers
313 * @from: the first in the range of numbers to unregister
314 * @count: the number of device numbers to unregister
316 * This function will unregister a range of @count device numbers,
317 * starting with @from. The caller should normally be the one who
318 * allocated those numbers in the first place...
320 void unregister_chrdev_region(dev_t from, unsigned count)
322 dev_t to = from + count;
323 dev_t n, next;
325 for (n = from; n < to; n = next) {
326 next = MKDEV(MAJOR(n)+1, 0);
327 if (next > to)
328 next = to;
329 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
334 * __unregister_chrdev - unregister and destroy a cdev
335 * @major: major device number
336 * @baseminor: first of the range of minor numbers
337 * @count: the number of minor numbers this cdev is occupying
338 * @name: name of this range of devices
340 * Unregister and destroy the cdev occupying the region described by
341 * @major, @baseminor and @count. This function undoes what
342 * __register_chrdev() did.
344 void __unregister_chrdev(unsigned int major, unsigned int baseminor,
345 unsigned int count, const char *name)
347 struct char_device_struct *cd;
349 cd = __unregister_chrdev_region(major, baseminor, count);
350 if (cd && cd->cdev)
351 cdev_del(cd->cdev);
352 kfree(cd);
355 static DEFINE_SPINLOCK(cdev_lock);
357 static struct kobject *cdev_get(struct cdev *p)
359 struct module *owner = p->owner;
360 struct kobject *kobj;
362 if (owner && !try_module_get(owner))
363 return NULL;
364 kobj = kobject_get_unless_zero(&p->kobj);
365 if (!kobj)
366 module_put(owner);
367 return kobj;
370 void cdev_put(struct cdev *p)
372 if (p) {
373 struct module *owner = p->owner;
374 kobject_put(&p->kobj);
375 module_put(owner);
380 * Called every time a character special file is opened
382 static int chrdev_open(struct inode *inode, struct file *filp)
384 const struct file_operations *fops;
385 struct cdev *p;
386 struct cdev *new = NULL;
387 int ret = 0;
389 spin_lock(&cdev_lock);
390 p = inode->i_cdev;
391 if (!p) {
392 struct kobject *kobj;
393 int idx;
394 spin_unlock(&cdev_lock);
395 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
396 if (!kobj)
397 return -ENXIO;
398 new = container_of(kobj, struct cdev, kobj);
399 spin_lock(&cdev_lock);
400 /* Check i_cdev again in case somebody beat us to it while
401 we dropped the lock. */
402 p = inode->i_cdev;
403 if (!p) {
404 inode->i_cdev = p = new;
405 list_add(&inode->i_devices, &p->list);
406 new = NULL;
407 } else if (!cdev_get(p))
408 ret = -ENXIO;
409 } else if (!cdev_get(p))
410 ret = -ENXIO;
411 spin_unlock(&cdev_lock);
412 cdev_put(new);
413 if (ret)
414 return ret;
416 ret = -ENXIO;
417 fops = fops_get(p->ops);
418 if (!fops)
419 goto out_cdev_put;
421 replace_fops(filp, fops);
422 if (filp->f_op->open) {
423 ret = filp->f_op->open(inode, filp);
424 if (ret)
425 goto out_cdev_put;
428 return 0;
430 out_cdev_put:
431 cdev_put(p);
432 return ret;
435 void cd_forget(struct inode *inode)
437 spin_lock(&cdev_lock);
438 list_del_init(&inode->i_devices);
439 inode->i_cdev = NULL;
440 inode->i_mapping = &inode->i_data;
441 spin_unlock(&cdev_lock);
444 static void cdev_purge(struct cdev *cdev)
446 spin_lock(&cdev_lock);
447 while (!list_empty(&cdev->list)) {
448 struct inode *inode;
449 inode = container_of(cdev->list.next, struct inode, i_devices);
450 list_del_init(&inode->i_devices);
451 inode->i_cdev = NULL;
453 spin_unlock(&cdev_lock);
457 * Dummy default file-operations: the only thing this does
458 * is contain the open that then fills in the correct operations
459 * depending on the special file...
461 const struct file_operations def_chr_fops = {
462 .open = chrdev_open,
463 .llseek = noop_llseek,
466 static struct kobject *exact_match(dev_t dev, int *part, void *data)
468 struct cdev *p = data;
469 return &p->kobj;
472 static int exact_lock(dev_t dev, void *data)
474 struct cdev *p = data;
475 return cdev_get(p) ? 0 : -1;
479 * cdev_add() - add a char device to the system
480 * @p: the cdev structure for the device
481 * @dev: the first device number for which this device is responsible
482 * @count: the number of consecutive minor numbers corresponding to this
483 * device
485 * cdev_add() adds the device represented by @p to the system, making it
486 * live immediately. A negative error code is returned on failure.
488 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
490 int error;
492 p->dev = dev;
493 p->count = count;
495 error = kobj_map(cdev_map, dev, count, NULL,
496 exact_match, exact_lock, p);
497 if (error)
498 return error;
500 kobject_get(p->kobj.parent);
502 return 0;
506 * cdev_set_parent() - set the parent kobject for a char device
507 * @p: the cdev structure
508 * @kobj: the kobject to take a reference to
510 * cdev_set_parent() sets a parent kobject which will be referenced
511 * appropriately so the parent is not freed before the cdev. This
512 * should be called before cdev_add.
514 void cdev_set_parent(struct cdev *p, struct kobject *kobj)
516 WARN_ON(!kobj->state_initialized);
517 p->kobj.parent = kobj;
521 * cdev_device_add() - add a char device and it's corresponding
522 * struct device, linkink
523 * @dev: the device structure
524 * @cdev: the cdev structure
526 * cdev_device_add() adds the char device represented by @cdev to the system,
527 * just as cdev_add does. It then adds @dev to the system using device_add
528 * The dev_t for the char device will be taken from the struct device which
529 * needs to be initialized first. This helper function correctly takes a
530 * reference to the parent device so the parent will not get released until
531 * all references to the cdev are released.
533 * This helper uses dev->devt for the device number. If it is not set
534 * it will not add the cdev and it will be equivalent to device_add.
536 * This function should be used whenever the struct cdev and the
537 * struct device are members of the same structure whose lifetime is
538 * managed by the struct device.
540 * NOTE: Callers must assume that userspace was able to open the cdev and
541 * can call cdev fops callbacks at any time, even if this function fails.
543 int cdev_device_add(struct cdev *cdev, struct device *dev)
545 int rc = 0;
547 if (dev->devt) {
548 cdev_set_parent(cdev, &dev->kobj);
550 rc = cdev_add(cdev, dev->devt, 1);
551 if (rc)
552 return rc;
555 rc = device_add(dev);
556 if (rc)
557 cdev_del(cdev);
559 return rc;
563 * cdev_device_del() - inverse of cdev_device_add
564 * @dev: the device structure
565 * @cdev: the cdev structure
567 * cdev_device_del() is a helper function to call cdev_del and device_del.
568 * It should be used whenever cdev_device_add is used.
570 * If dev->devt is not set it will not remove the cdev and will be equivalent
571 * to device_del.
573 * NOTE: This guarantees that associated sysfs callbacks are not running
574 * or runnable, however any cdevs already open will remain and their fops
575 * will still be callable even after this function returns.
577 void cdev_device_del(struct cdev *cdev, struct device *dev)
579 device_del(dev);
580 if (dev->devt)
581 cdev_del(cdev);
584 static void cdev_unmap(dev_t dev, unsigned count)
586 kobj_unmap(cdev_map, dev, count);
590 * cdev_del() - remove a cdev from the system
591 * @p: the cdev structure to be removed
593 * cdev_del() removes @p from the system, possibly freeing the structure
594 * itself.
596 * NOTE: This guarantees that cdev device will no longer be able to be
597 * opened, however any cdevs already open will remain and their fops will
598 * still be callable even after cdev_del returns.
600 void cdev_del(struct cdev *p)
602 cdev_unmap(p->dev, p->count);
603 kobject_put(&p->kobj);
607 static void cdev_default_release(struct kobject *kobj)
609 struct cdev *p = container_of(kobj, struct cdev, kobj);
610 struct kobject *parent = kobj->parent;
612 cdev_purge(p);
613 kobject_put(parent);
616 static void cdev_dynamic_release(struct kobject *kobj)
618 struct cdev *p = container_of(kobj, struct cdev, kobj);
619 struct kobject *parent = kobj->parent;
621 cdev_purge(p);
622 kfree(p);
623 kobject_put(parent);
626 static struct kobj_type ktype_cdev_default = {
627 .release = cdev_default_release,
630 static struct kobj_type ktype_cdev_dynamic = {
631 .release = cdev_dynamic_release,
635 * cdev_alloc() - allocate a cdev structure
637 * Allocates and returns a cdev structure, or NULL on failure.
639 struct cdev *cdev_alloc(void)
641 struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
642 if (p) {
643 INIT_LIST_HEAD(&p->list);
644 kobject_init(&p->kobj, &ktype_cdev_dynamic);
646 return p;
650 * cdev_init() - initialize a cdev structure
651 * @cdev: the structure to initialize
652 * @fops: the file_operations for this device
654 * Initializes @cdev, remembering @fops, making it ready to add to the
655 * system with cdev_add().
657 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
659 memset(cdev, 0, sizeof *cdev);
660 INIT_LIST_HEAD(&cdev->list);
661 kobject_init(&cdev->kobj, &ktype_cdev_default);
662 cdev->ops = fops;
665 static struct kobject *base_probe(dev_t dev, int *part, void *data)
667 if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
668 /* Make old-style 2.4 aliases work */
669 request_module("char-major-%d", MAJOR(dev));
670 return NULL;
673 void __init chrdev_init(void)
675 cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
679 /* Let modules do char dev stuff */
680 EXPORT_SYMBOL(register_chrdev_region);
681 EXPORT_SYMBOL(unregister_chrdev_region);
682 EXPORT_SYMBOL(alloc_chrdev_region);
683 EXPORT_SYMBOL(cdev_init);
684 EXPORT_SYMBOL(cdev_alloc);
685 EXPORT_SYMBOL(cdev_del);
686 EXPORT_SYMBOL(cdev_add);
687 EXPORT_SYMBOL(cdev_set_parent);
688 EXPORT_SYMBOL(cdev_device_add);
689 EXPORT_SYMBOL(cdev_device_del);
690 EXPORT_SYMBOL(__register_chrdev);
691 EXPORT_SYMBOL(__unregister_chrdev);