1 /* $NetBSD: subr_devsw.c,v 1.27 2009/08/18 02:44:37 yamt Exp $ */
4 * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by MAEKAWA Masahide <gehenna@NetBSD.org>, and by Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
35 * subr_devsw.c: registers device drivers by name and by major
36 * number, and provides wrapper methods for performing I/O and
37 * other tasks on device drivers, keying on the device number
40 * When the system is built, the config(8) command generates
41 * static tables of device drivers built into the kernel image
42 * along with their associated methods. These are recorded in
43 * the cdevsw0 and bdevsw0 tables. Drivers can also be added to
44 * and removed from the system dynamically.
48 * When the system initially boots only the statically allocated
49 * indexes (bdevsw0, cdevsw0) are used. If these overflow due to
50 * allocation, we allocate a fixed block of memory to hold the new,
51 * expanded index. This "fork" of the table is only ever performed
52 * once in order to guarantee that other threads may safely access
55 * o Once a thread has a "reference" to the table via an earlier
56 * open() call, we know that the entry in the table must exist
57 * and so it is safe to access it.
59 * o Regardless of whether other threads see the old or new
60 * pointers, they will point to a correct device switch
61 * structure for the operation being performed.
63 * XXX Currently, the wrapper methods such as cdev_read() verify
64 * that a device driver does in fact exist before calling the
65 * associated driver method. This should be changed so that
66 * once the device is has been referenced by a vnode (opened),
67 * calling the other methods should be valid until that reference
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.27 2009/08/18 02:44:37 yamt Exp $");
74 #include <sys/param.h>
77 #include <sys/systm.h>
84 #define DPRINTF(x) printf x
85 #else /* DEVSW_DEBUG */
87 #endif /* DEVSW_DEBUG */
89 #define MAXDEVSW 512 /* the maximum of major device number */
90 #define BDEVSW_SIZE (sizeof(struct bdevsw *))
91 #define CDEVSW_SIZE (sizeof(struct cdevsw *))
92 #define DEVSWCONV_SIZE (sizeof(struct devsw_conv))
94 extern const struct bdevsw
**bdevsw
, *bdevsw0
[];
95 extern const struct cdevsw
**cdevsw
, *cdevsw0
[];
96 extern struct devsw_conv
*devsw_conv
, devsw_conv0
[];
97 extern const int sys_bdevsws
, sys_cdevsws
;
98 extern int max_bdevsws
, max_cdevsws
, max_devsw_convs
;
100 static int bdevsw_attach(const struct bdevsw
*, devmajor_t
*);
101 static int cdevsw_attach(const struct cdevsw
*, devmajor_t
*);
102 static void devsw_detach_locked(const struct bdevsw
*, const struct cdevsw
*);
104 kmutex_t device_lock
;
110 KASSERT(sys_bdevsws
< MAXDEVSW
- 1);
111 KASSERT(sys_cdevsws
< MAXDEVSW
- 1);
112 mutex_init(&device_lock
, MUTEX_DEFAULT
, IPL_NONE
);
116 devsw_attach(const char *devname
,
117 const struct bdevsw
*bdev
, devmajor_t
*bmajor
,
118 const struct cdevsw
*cdev
, devmajor_t
*cmajor
)
120 struct devsw_conv
*conv
;
125 if (devname
== NULL
|| cdev
== NULL
)
128 mutex_enter(&device_lock
);
130 for (i
= 0 ; i
< max_devsw_convs
; i
++) {
131 conv
= &devsw_conv
[i
];
132 if (conv
->d_name
== NULL
|| strcmp(devname
, conv
->d_name
) != 0)
136 *bmajor
= conv
->d_bmajor
;
138 *cmajor
= conv
->d_cmajor
;
140 if (*bmajor
!= conv
->d_bmajor
|| *cmajor
!= conv
->d_cmajor
) {
144 if ((*bmajor
>= 0 && bdev
== NULL
) || *cmajor
< 0) {
149 if ((*bmajor
>= 0 && bdevsw
[*bmajor
] != NULL
) ||
150 cdevsw
[*cmajor
] != NULL
) {
156 bdevsw
[*bmajor
] = bdev
;
157 cdevsw
[*cmajor
] = cdev
;
159 mutex_exit(&device_lock
);
163 error
= bdevsw_attach(bdev
, bmajor
);
166 error
= cdevsw_attach(cdev
, cmajor
);
168 devsw_detach_locked(bdev
, NULL
);
172 for (i
= 0 ; i
< max_devsw_convs
; i
++) {
173 if (devsw_conv
[i
].d_name
== NULL
)
176 if (i
== max_devsw_convs
) {
177 struct devsw_conv
*newptr
;
180 old
= max_devsw_convs
;
183 newptr
= kmem_zalloc(new * DEVSWCONV_SIZE
, KM_NOSLEEP
);
184 if (newptr
== NULL
) {
185 devsw_detach_locked(bdev
, cdev
);
189 newptr
[old
].d_name
= NULL
;
190 newptr
[old
].d_bmajor
= -1;
191 newptr
[old
].d_cmajor
= -1;
192 memcpy(newptr
, devsw_conv
, old
* DEVSWCONV_SIZE
);
193 if (devsw_conv
!= devsw_conv0
)
194 kmem_free(devsw_conv
, old
* DEVSWCONV_SIZE
);
196 max_devsw_convs
= new;
199 len
= strlen(devname
) + 1;
200 name
= kmem_alloc(len
, KM_NOSLEEP
);
202 devsw_detach_locked(bdev
, cdev
);
206 strlcpy(name
, devname
, len
);
208 devsw_conv
[i
].d_name
= name
;
209 devsw_conv
[i
].d_bmajor
= *bmajor
;
210 devsw_conv
[i
].d_cmajor
= *cmajor
;
212 mutex_exit(&device_lock
);
215 mutex_exit(&device_lock
);
220 bdevsw_attach(const struct bdevsw
*devsw
, devmajor_t
*devmajor
)
222 const struct bdevsw
**newptr
;
226 KASSERT(mutex_owned(&device_lock
));
232 for (bmajor
= sys_bdevsws
; bmajor
< max_bdevsws
; bmajor
++) {
233 if (bdevsw
[bmajor
] != NULL
)
235 for (i
= 0 ; i
< max_devsw_convs
; i
++) {
236 if (devsw_conv
[i
].d_bmajor
== bmajor
)
239 if (i
!= max_devsw_convs
)
246 if (*devmajor
>= MAXDEVSW
) {
247 printf("bdevsw_attach: block majors exhausted");
251 if (*devmajor
>= max_bdevsws
) {
252 KASSERT(bdevsw
== bdevsw0
);
253 newptr
= kmem_zalloc(MAXDEVSW
* BDEVSW_SIZE
, KM_NOSLEEP
);
256 memcpy(newptr
, bdevsw
, max_bdevsws
* BDEVSW_SIZE
);
258 max_bdevsws
= MAXDEVSW
;
261 if (bdevsw
[*devmajor
] != NULL
)
264 bdevsw
[*devmajor
] = devsw
;
270 cdevsw_attach(const struct cdevsw
*devsw
, devmajor_t
*devmajor
)
272 const struct cdevsw
**newptr
;
276 KASSERT(mutex_owned(&device_lock
));
279 for (cmajor
= sys_cdevsws
; cmajor
< max_cdevsws
; cmajor
++) {
280 if (cdevsw
[cmajor
] != NULL
)
282 for (i
= 0 ; i
< max_devsw_convs
; i
++) {
283 if (devsw_conv
[i
].d_cmajor
== cmajor
)
286 if (i
!= max_devsw_convs
)
293 if (*devmajor
>= MAXDEVSW
) {
294 printf("cdevsw_attach: character majors exhausted");
298 if (*devmajor
>= max_cdevsws
) {
299 KASSERT(cdevsw
== cdevsw0
);
300 newptr
= kmem_zalloc(MAXDEVSW
* CDEVSW_SIZE
, KM_NOSLEEP
);
303 memcpy(newptr
, cdevsw
, max_cdevsws
* CDEVSW_SIZE
);
305 max_cdevsws
= MAXDEVSW
;
308 if (cdevsw
[*devmajor
] != NULL
)
311 cdevsw
[*devmajor
] = devsw
;
317 devsw_detach_locked(const struct bdevsw
*bdev
, const struct cdevsw
*cdev
)
321 KASSERT(mutex_owned(&device_lock
));
324 for (i
= 0 ; i
< max_bdevsws
; i
++) {
325 if (bdevsw
[i
] != bdev
)
332 for (i
= 0 ; i
< max_cdevsws
; i
++) {
333 if (cdevsw
[i
] != cdev
)
342 devsw_detach(const struct bdevsw
*bdev
, const struct cdevsw
*cdev
)
345 mutex_enter(&device_lock
);
346 devsw_detach_locked(bdev
, cdev
);
347 mutex_exit(&device_lock
);
352 * Look up a block device by number.
354 * => Caller must ensure that the device is attached.
356 const struct bdevsw
*
357 bdevsw_lookup(dev_t dev
)
364 if (bmajor
< 0 || bmajor
>= max_bdevsws
)
367 return (bdevsw
[bmajor
]);
371 * Look up a character device by number.
373 * => Caller must ensure that the device is attached.
375 const struct cdevsw
*
376 cdevsw_lookup(dev_t dev
)
383 if (cmajor
< 0 || cmajor
>= max_cdevsws
)
386 return (cdevsw
[cmajor
]);
390 * Look up a block device by reference to its operations set.
392 * => Caller must ensure that the device is not detached, and therefore
393 * that the returned major is still valid when dereferenced.
396 bdevsw_lookup_major(const struct bdevsw
*bdev
)
400 for (bmajor
= 0 ; bmajor
< max_bdevsws
; bmajor
++) {
401 if (bdevsw
[bmajor
] == bdev
)
409 * Look up a character device by reference to its operations set.
411 * => Caller must ensure that the device is not detached, and therefore
412 * that the returned major is still valid when dereferenced.
415 cdevsw_lookup_major(const struct cdevsw
*cdev
)
419 for (cmajor
= 0 ; cmajor
< max_cdevsws
; cmajor
++) {
420 if (cdevsw
[cmajor
] == cdev
)
428 * Convert from block major number to name.
430 * => Caller must ensure that the device is not detached, and therefore
431 * that the name pointer is still valid when dereferenced.
434 devsw_blk2name(devmajor_t bmajor
)
443 mutex_enter(&device_lock
);
444 if (bmajor
< 0 || bmajor
>= max_bdevsws
|| bdevsw
[bmajor
] == NULL
) {
445 mutex_exit(&device_lock
);
448 for (i
= 0 ; i
< max_devsw_convs
; i
++) {
449 if (devsw_conv
[i
].d_bmajor
== bmajor
) {
450 cmajor
= devsw_conv
[i
].d_cmajor
;
454 if (cmajor
>= 0 && cmajor
< max_cdevsws
&& cdevsw
[cmajor
] != NULL
)
455 name
= devsw_conv
[i
].d_name
;
456 mutex_exit(&device_lock
);
462 * Convert char major number to device driver name.
465 cdevsw_getname(devmajor_t major
)
475 mutex_enter(&device_lock
);
476 for (i
= 0 ; i
< max_devsw_convs
; i
++) {
477 if (devsw_conv
[i
].d_cmajor
== major
) {
478 name
= devsw_conv
[i
].d_name
;
482 mutex_exit(&device_lock
);
487 * Convert block major number to device driver name.
490 bdevsw_getname(devmajor_t major
)
500 mutex_enter(&device_lock
);
501 for (i
= 0 ; i
< max_devsw_convs
; i
++) {
502 if (devsw_conv
[i
].d_bmajor
== major
) {
503 name
= devsw_conv
[i
].d_name
;
507 mutex_exit(&device_lock
);
512 * Convert from device name to block major number.
514 * => Caller must ensure that the device is not detached, and therefore
515 * that the major number is still valid when dereferenced.
518 devsw_name2blk(const char *name
, char *devname
, size_t devnamelen
)
520 struct devsw_conv
*conv
;
527 mutex_enter(&device_lock
);
528 for (i
= 0 ; i
< max_devsw_convs
; i
++) {
531 conv
= &devsw_conv
[i
];
532 if (conv
->d_name
== NULL
)
534 len
= strlen(conv
->d_name
);
535 if (strncmp(conv
->d_name
, name
, len
) != 0)
537 if (*(name
+len
) && !isdigit(*(name
+ len
)))
539 bmajor
= conv
->d_bmajor
;
540 if (bmajor
< 0 || bmajor
>= max_bdevsws
||
541 bdevsw
[bmajor
] == NULL
)
543 if (devname
!= NULL
) {
545 if (strlen(conv
->d_name
) >= devnamelen
)
546 printf("devsw_name2blk: too short buffer");
547 #endif /* DEVSW_DEBUG */
548 strncpy(devname
, conv
->d_name
, devnamelen
);
549 devname
[devnamelen
- 1] = '\0';
551 mutex_exit(&device_lock
);
555 mutex_exit(&device_lock
);
560 * Convert from device name to char major number.
562 * => Caller must ensure that the device is not detached, and therefore
563 * that the major number is still valid when dereferenced.
566 devsw_name2chr(const char *name
, char *devname
, size_t devnamelen
)
568 struct devsw_conv
*conv
;
575 mutex_enter(&device_lock
);
576 for (i
= 0 ; i
< max_devsw_convs
; i
++) {
579 conv
= &devsw_conv
[i
];
580 if (conv
->d_name
== NULL
)
582 len
= strlen(conv
->d_name
);
583 if (strncmp(conv
->d_name
, name
, len
) != 0)
585 if (*(name
+len
) && !isdigit(*(name
+ len
)))
587 cmajor
= conv
->d_cmajor
;
588 if (cmajor
< 0 || cmajor
>= max_cdevsws
||
589 cdevsw
[cmajor
] == NULL
)
591 if (devname
!= NULL
) {
593 if (strlen(conv
->d_name
) >= devnamelen
)
594 printf("devsw_name2chr: too short buffer");
595 #endif /* DEVSW_DEBUG */
596 strncpy(devname
, conv
->d_name
, devnamelen
);
597 devname
[devnamelen
- 1] = '\0';
599 mutex_exit(&device_lock
);
603 mutex_exit(&device_lock
);
608 * Convert from character dev_t to block dev_t.
610 * => Caller must ensure that the device is not detached, and therefore
611 * that the major number is still valid when dereferenced.
614 devsw_chr2blk(dev_t cdev
)
616 devmajor_t bmajor
, cmajor
;
620 cmajor
= major(cdev
);
624 mutex_enter(&device_lock
);
625 if (cmajor
< 0 || cmajor
>= max_cdevsws
|| cdevsw
[cmajor
] == NULL
) {
626 mutex_exit(&device_lock
);
629 for (i
= 0 ; i
< max_devsw_convs
; i
++) {
630 if (devsw_conv
[i
].d_cmajor
== cmajor
) {
631 bmajor
= devsw_conv
[i
].d_bmajor
;
635 if (bmajor
>= 0 && bmajor
< max_bdevsws
&& bdevsw
[bmajor
] != NULL
)
636 rv
= makedev(bmajor
, minor(cdev
));
637 mutex_exit(&device_lock
);
643 * Convert from block dev_t to character dev_t.
645 * => Caller must ensure that the device is not detached, and therefore
646 * that the major number is still valid when dereferenced.
649 devsw_blk2chr(dev_t bdev
)
651 devmajor_t bmajor
, cmajor
;
655 bmajor
= major(bdev
);
659 mutex_enter(&device_lock
);
660 if (bmajor
< 0 || bmajor
>= max_bdevsws
|| bdevsw
[bmajor
] == NULL
) {
661 mutex_exit(&device_lock
);
664 for (i
= 0 ; i
< max_devsw_convs
; i
++) {
665 if (devsw_conv
[i
].d_bmajor
== bmajor
) {
666 cmajor
= devsw_conv
[i
].d_cmajor
;
670 if (cmajor
>= 0 && cmajor
< max_cdevsws
&& cdevsw
[cmajor
] != NULL
)
671 rv
= makedev(cmajor
, minor(bdev
));
672 mutex_exit(&device_lock
);
678 * Device access methods.
681 #define DEV_LOCK(d) \
682 if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) { \
683 KERNEL_LOCK(1, NULL); \
686 #define DEV_UNLOCK(d) \
688 KERNEL_UNLOCK_ONE(NULL); \
692 bdev_open(dev_t dev
, int flag
, int devtype
, lwp_t
*l
)
694 const struct bdevsw
*d
;
698 * For open we need to lock, in order to synchronize
699 * with attach/detach.
701 mutex_enter(&device_lock
);
702 d
= bdevsw_lookup(dev
);
703 mutex_exit(&device_lock
);
708 rv
= (*d
->d_open
)(dev
, flag
, devtype
, l
);
715 bdev_close(dev_t dev
, int flag
, int devtype
, lwp_t
*l
)
717 const struct bdevsw
*d
;
720 if ((d
= bdevsw_lookup(dev
)) == NULL
)
724 rv
= (*d
->d_close
)(dev
, flag
, devtype
, l
);
731 bdev_strategy(struct buf
*bp
)
733 const struct bdevsw
*d
;
736 if ((d
= bdevsw_lookup(bp
->b_dev
)) == NULL
) {
738 bp
->b_resid
= bp
->b_bcount
;
744 (*d
->d_strategy
)(bp
);
749 bdev_ioctl(dev_t dev
, u_long cmd
, void *data
, int flag
, lwp_t
*l
)
751 const struct bdevsw
*d
;
754 if ((d
= bdevsw_lookup(dev
)) == NULL
)
758 rv
= (*d
->d_ioctl
)(dev
, cmd
, data
, flag
, l
);
765 bdev_dump(dev_t dev
, daddr_t addr
, void *data
, size_t sz
)
767 const struct bdevsw
*d
;
771 * Dump can be called without the device open. Since it can
772 * currently only be called with the system paused (and in a
773 * potentially unstable state), we don't perform any locking.
775 if ((d
= bdevsw_lookup(dev
)) == NULL
)
779 rv
= (*d
->d_dump
)(dev
, addr
, data
, sz
);
788 const struct bdevsw
*d
;
790 if ((d
= bdevsw_lookup(dev
)) == NULL
)
792 return d
->d_flag
& D_TYPEMASK
;
796 cdev_open(dev_t dev
, int flag
, int devtype
, lwp_t
*l
)
798 const struct cdevsw
*d
;
802 * For open we need to lock, in order to synchronize
803 * with attach/detach.
805 mutex_enter(&device_lock
);
806 d
= cdevsw_lookup(dev
);
807 mutex_exit(&device_lock
);
812 rv
= (*d
->d_open
)(dev
, flag
, devtype
, l
);
819 cdev_close(dev_t dev
, int flag
, int devtype
, lwp_t
*l
)
821 const struct cdevsw
*d
;
824 if ((d
= cdevsw_lookup(dev
)) == NULL
)
828 rv
= (*d
->d_close
)(dev
, flag
, devtype
, l
);
835 cdev_read(dev_t dev
, struct uio
*uio
, int flag
)
837 const struct cdevsw
*d
;
840 if ((d
= cdevsw_lookup(dev
)) == NULL
)
844 rv
= (*d
->d_read
)(dev
, uio
, flag
);
851 cdev_write(dev_t dev
, struct uio
*uio
, int flag
)
853 const struct cdevsw
*d
;
856 if ((d
= cdevsw_lookup(dev
)) == NULL
)
860 rv
= (*d
->d_write
)(dev
, uio
, flag
);
867 cdev_ioctl(dev_t dev
, u_long cmd
, void *data
, int flag
, lwp_t
*l
)
869 const struct cdevsw
*d
;
872 if ((d
= cdevsw_lookup(dev
)) == NULL
)
876 rv
= (*d
->d_ioctl
)(dev
, cmd
, data
, flag
, l
);
883 cdev_stop(struct tty
*tp
, int flag
)
885 const struct cdevsw
*d
;
888 if ((d
= cdevsw_lookup(tp
->t_dev
)) == NULL
)
892 (*d
->d_stop
)(tp
, flag
);
899 const struct cdevsw
*d
;
901 if ((d
= cdevsw_lookup(dev
)) == NULL
)
904 /* XXX Check if necessary. */
905 if (d
->d_tty
== NULL
)
908 return (*d
->d_tty
)(dev
);
912 cdev_poll(dev_t dev
, int flag
, lwp_t
*l
)
914 const struct cdevsw
*d
;
917 if ((d
= cdevsw_lookup(dev
)) == NULL
)
921 rv
= (*d
->d_poll
)(dev
, flag
, l
);
928 cdev_mmap(dev_t dev
, off_t off
, int flag
)
930 const struct cdevsw
*d
;
934 if ((d
= cdevsw_lookup(dev
)) == NULL
)
935 return (paddr_t
)-1LL;
938 rv
= (*d
->d_mmap
)(dev
, off
, flag
);
945 cdev_kqfilter(dev_t dev
, struct knote
*kn
)
947 const struct cdevsw
*d
;
950 if ((d
= cdevsw_lookup(dev
)) == NULL
)
954 rv
= (*d
->d_kqfilter
)(dev
, kn
);
963 const struct cdevsw
*d
;
965 if ((d
= cdevsw_lookup(dev
)) == NULL
)
967 return d
->d_flag
& D_TYPEMASK
;