4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/config.h>
8 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/string.h>
13 #include <linux/major.h>
14 #include <linux/errno.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/seq_file.h>
19 #include <linux/kobject.h>
20 #include <linux/kobj_map.h>
21 #include <linux/cdev.h>
22 #include <linux/mutex.h>
25 #include <linux/kmod.h>
28 static struct kobj_map
*cdev_map
;
30 static DEFINE_MUTEX(chrdevs_lock
);
32 static struct char_device_struct
{
33 struct char_device_struct
*next
;
35 unsigned int baseminor
;
38 struct file_operations
*fops
;
39 struct cdev
*cdev
; /* will die */
40 } *chrdevs
[CHRDEV_MAJOR_HASH_SIZE
];
42 /* index in the above */
43 static inline int major_to_index(int major
)
45 return major
% CHRDEV_MAJOR_HASH_SIZE
;
50 void chrdev_show(struct seq_file
*f
, off_t offset
)
52 struct char_device_struct
*cd
;
54 if (offset
< CHRDEV_MAJOR_HASH_SIZE
) {
55 mutex_lock(&chrdevs_lock
);
56 for (cd
= chrdevs
[offset
]; cd
; cd
= cd
->next
)
57 seq_printf(f
, "%3d %s\n", cd
->major
, cd
->name
);
58 mutex_unlock(&chrdevs_lock
);
62 #endif /* CONFIG_PROC_FS */
65 * Register a single major with a specified minor range.
67 * If major == 0 this functions will dynamically allocate a major and return
70 * If major > 0 this function will attempt to reserve the passed range of
71 * minors and will return zero on success.
73 * Returns a -ve errno on failure.
75 static struct char_device_struct
*
76 __register_chrdev_region(unsigned int major
, unsigned int baseminor
,
77 int minorct
, const char *name
)
79 struct char_device_struct
*cd
, **cp
;
83 cd
= kzalloc(sizeof(struct char_device_struct
), GFP_KERNEL
);
85 return ERR_PTR(-ENOMEM
);
87 mutex_lock(&chrdevs_lock
);
91 for (i
= ARRAY_SIZE(chrdevs
)-1; i
> 0; i
--) {
92 if (chrdevs
[i
] == NULL
)
105 cd
->baseminor
= baseminor
;
106 cd
->minorct
= minorct
;
107 strncpy(cd
->name
,name
, 64);
109 i
= major_to_index(major
);
111 for (cp
= &chrdevs
[i
]; *cp
; cp
= &(*cp
)->next
)
112 if ((*cp
)->major
> major
||
113 ((*cp
)->major
== major
&& (*cp
)->baseminor
>= baseminor
))
115 if (*cp
&& (*cp
)->major
== major
&&
116 (*cp
)->baseminor
< baseminor
+ minorct
) {
122 mutex_unlock(&chrdevs_lock
);
125 mutex_unlock(&chrdevs_lock
);
130 static struct char_device_struct
*
131 __unregister_chrdev_region(unsigned major
, unsigned baseminor
, int minorct
)
133 struct char_device_struct
*cd
= NULL
, **cp
;
134 int i
= major_to_index(major
);
136 mutex_lock(&chrdevs_lock
);
137 for (cp
= &chrdevs
[i
]; *cp
; cp
= &(*cp
)->next
)
138 if ((*cp
)->major
== major
&&
139 (*cp
)->baseminor
== baseminor
&&
140 (*cp
)->minorct
== minorct
)
146 mutex_unlock(&chrdevs_lock
);
150 int register_chrdev_region(dev_t from
, unsigned count
, const char *name
)
152 struct char_device_struct
*cd
;
153 dev_t to
= from
+ count
;
156 for (n
= from
; n
< to
; n
= next
) {
157 next
= MKDEV(MAJOR(n
)+1, 0);
160 cd
= __register_chrdev_region(MAJOR(n
), MINOR(n
),
168 for (n
= from
; n
< to
; n
= next
) {
169 next
= MKDEV(MAJOR(n
)+1, 0);
170 kfree(__unregister_chrdev_region(MAJOR(n
), MINOR(n
), next
- n
));
175 int alloc_chrdev_region(dev_t
*dev
, unsigned baseminor
, unsigned count
,
178 struct char_device_struct
*cd
;
179 cd
= __register_chrdev_region(0, baseminor
, count
, name
);
182 *dev
= MKDEV(cd
->major
, cd
->baseminor
);
186 int register_chrdev(unsigned int major
, const char *name
,
187 const struct file_operations
*fops
)
189 struct char_device_struct
*cd
;
194 cd
= __register_chrdev_region(major
, 0, 256, name
);
202 cdev
->owner
= fops
->owner
;
204 kobject_set_name(&cdev
->kobj
, "%s", name
);
205 for (s
= strchr(kobject_name(&cdev
->kobj
),'/'); s
; s
= strchr(s
, '/'))
208 err
= cdev_add(cdev
, MKDEV(cd
->major
, 0), 256);
214 return major
? 0 : cd
->major
;
216 kobject_put(&cdev
->kobj
);
218 kfree(__unregister_chrdev_region(cd
->major
, 0, 256));
222 void unregister_chrdev_region(dev_t from
, unsigned count
)
224 dev_t to
= from
+ count
;
227 for (n
= from
; n
< to
; n
= next
) {
228 next
= MKDEV(MAJOR(n
)+1, 0);
231 kfree(__unregister_chrdev_region(MAJOR(n
), MINOR(n
), next
- n
));
235 int unregister_chrdev(unsigned int major
, const char *name
)
237 struct char_device_struct
*cd
;
238 cd
= __unregister_chrdev_region(major
, 0, 256);
245 static DEFINE_SPINLOCK(cdev_lock
);
247 static struct kobject
*cdev_get(struct cdev
*p
)
249 struct module
*owner
= p
->owner
;
250 struct kobject
*kobj
;
252 if (owner
&& !try_module_get(owner
))
254 kobj
= kobject_get(&p
->kobj
);
260 void cdev_put(struct cdev
*p
)
263 struct module
*owner
= p
->owner
;
264 kobject_put(&p
->kobj
);
270 * Called every time a character special file is opened
272 int chrdev_open(struct inode
* inode
, struct file
* filp
)
275 struct cdev
*new = NULL
;
278 spin_lock(&cdev_lock
);
281 struct kobject
*kobj
;
283 spin_unlock(&cdev_lock
);
284 kobj
= kobj_lookup(cdev_map
, inode
->i_rdev
, &idx
);
287 new = container_of(kobj
, struct cdev
, kobj
);
288 spin_lock(&cdev_lock
);
291 inode
->i_cdev
= p
= new;
292 inode
->i_cindex
= idx
;
293 list_add(&inode
->i_devices
, &p
->list
);
295 } else if (!cdev_get(p
))
297 } else if (!cdev_get(p
))
299 spin_unlock(&cdev_lock
);
303 filp
->f_op
= fops_get(p
->ops
);
308 if (filp
->f_op
->open
) {
310 ret
= filp
->f_op
->open(inode
,filp
);
318 void cd_forget(struct inode
*inode
)
320 spin_lock(&cdev_lock
);
321 list_del_init(&inode
->i_devices
);
322 inode
->i_cdev
= NULL
;
323 spin_unlock(&cdev_lock
);
326 static void cdev_purge(struct cdev
*cdev
)
328 spin_lock(&cdev_lock
);
329 while (!list_empty(&cdev
->list
)) {
331 inode
= container_of(cdev
->list
.next
, struct inode
, i_devices
);
332 list_del_init(&inode
->i_devices
);
333 inode
->i_cdev
= NULL
;
335 spin_unlock(&cdev_lock
);
339 * Dummy default file-operations: the only thing this does
340 * is contain the open that then fills in the correct operations
341 * depending on the special file...
343 const struct file_operations def_chr_fops
= {
347 static struct kobject
*exact_match(dev_t dev
, int *part
, void *data
)
349 struct cdev
*p
= data
;
353 static int exact_lock(dev_t dev
, void *data
)
355 struct cdev
*p
= data
;
356 return cdev_get(p
) ? 0 : -1;
359 int cdev_add(struct cdev
*p
, dev_t dev
, unsigned count
)
363 return kobj_map(cdev_map
, dev
, count
, NULL
, exact_match
, exact_lock
, p
);
366 static void cdev_unmap(dev_t dev
, unsigned count
)
368 kobj_unmap(cdev_map
, dev
, count
);
371 void cdev_del(struct cdev
*p
)
373 cdev_unmap(p
->dev
, p
->count
);
374 kobject_put(&p
->kobj
);
378 static void cdev_default_release(struct kobject
*kobj
)
380 struct cdev
*p
= container_of(kobj
, struct cdev
, kobj
);
384 static void cdev_dynamic_release(struct kobject
*kobj
)
386 struct cdev
*p
= container_of(kobj
, struct cdev
, kobj
);
391 static struct kobj_type ktype_cdev_default
= {
392 .release
= cdev_default_release
,
395 static struct kobj_type ktype_cdev_dynamic
= {
396 .release
= cdev_dynamic_release
,
399 struct cdev
*cdev_alloc(void)
401 struct cdev
*p
= kzalloc(sizeof(struct cdev
), GFP_KERNEL
);
403 p
->kobj
.ktype
= &ktype_cdev_dynamic
;
404 INIT_LIST_HEAD(&p
->list
);
405 kobject_init(&p
->kobj
);
410 void cdev_init(struct cdev
*cdev
, const struct file_operations
*fops
)
412 memset(cdev
, 0, sizeof *cdev
);
413 INIT_LIST_HEAD(&cdev
->list
);
414 cdev
->kobj
.ktype
= &ktype_cdev_default
;
415 kobject_init(&cdev
->kobj
);
419 static struct kobject
*base_probe(dev_t dev
, int *part
, void *data
)
421 if (request_module("char-major-%d-%d", MAJOR(dev
), MINOR(dev
)) > 0)
422 /* Make old-style 2.4 aliases work */
423 request_module("char-major-%d", MAJOR(dev
));
427 void __init
chrdev_init(void)
429 cdev_map
= kobj_map_init(base_probe
, &chrdevs_lock
);
433 /* Let modules do char dev stuff */
434 EXPORT_SYMBOL(register_chrdev_region
);
435 EXPORT_SYMBOL(unregister_chrdev_region
);
436 EXPORT_SYMBOL(alloc_chrdev_region
);
437 EXPORT_SYMBOL(cdev_init
);
438 EXPORT_SYMBOL(cdev_alloc
);
439 EXPORT_SYMBOL(cdev_del
);
440 EXPORT_SYMBOL(cdev_add
);
441 EXPORT_SYMBOL(register_chrdev
);
442 EXPORT_SYMBOL(unregister_chrdev
);