No empty .Rs/.Re
[netbsd-mini2440.git] / sys / kern / subr_devsw.c
blobc7ff5adc4bb623275213183e4a1d83165ae89451
1 /* $NetBSD: subr_devsw.c,v 1.27 2009/08/18 02:44:37 yamt Exp $ */
3 /*-
4 * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by MAEKAWA Masahide <gehenna@NetBSD.org>, and by Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Overview
35 * subr_devsw.c: registers device drivers by name and by major
36 * number, and provides wrapper methods for performing I/O and
37 * other tasks on device drivers, keying on the device number
38 * (dev_t).
40 * When the system is built, the config(8) command generates
41 * static tables of device drivers built into the kernel image
42 * along with their associated methods. These are recorded in
43 * the cdevsw0 and bdevsw0 tables. Drivers can also be added to
44 * and removed from the system dynamically.
46 * Allocation
48 * When the system initially boots only the statically allocated
49 * indexes (bdevsw0, cdevsw0) are used. If these overflow due to
50 * allocation, we allocate a fixed block of memory to hold the new,
51 * expanded index. This "fork" of the table is only ever performed
52 * once in order to guarantee that other threads may safely access
53 * the device tables:
55 * o Once a thread has a "reference" to the table via an earlier
56 * open() call, we know that the entry in the table must exist
57 * and so it is safe to access it.
59 * o Regardless of whether other threads see the old or new
60 * pointers, they will point to a correct device switch
61 * structure for the operation being performed.
63 * XXX Currently, the wrapper methods such as cdev_read() verify
64 * that a device driver does in fact exist before calling the
65 * associated driver method. This should be changed so that
66 * once the device is has been referenced by a vnode (opened),
67 * calling the other methods should be valid until that reference
68 * is dropped.
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.27 2009/08/18 02:44:37 yamt Exp $");
74 #include <sys/param.h>
75 #include <sys/conf.h>
76 #include <sys/kmem.h>
77 #include <sys/systm.h>
78 #include <sys/poll.h>
79 #include <sys/tty.h>
80 #include <sys/cpu.h>
81 #include <sys/buf.h>
83 #ifdef DEVSW_DEBUG
84 #define DPRINTF(x) printf x
85 #else /* DEVSW_DEBUG */
86 #define DPRINTF(x)
87 #endif /* DEVSW_DEBUG */
89 #define MAXDEVSW 512 /* the maximum of major device number */
90 #define BDEVSW_SIZE (sizeof(struct bdevsw *))
91 #define CDEVSW_SIZE (sizeof(struct cdevsw *))
92 #define DEVSWCONV_SIZE (sizeof(struct devsw_conv))
94 extern const struct bdevsw **bdevsw, *bdevsw0[];
95 extern const struct cdevsw **cdevsw, *cdevsw0[];
96 extern struct devsw_conv *devsw_conv, devsw_conv0[];
97 extern const int sys_bdevsws, sys_cdevsws;
98 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
100 static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
101 static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
102 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
104 kmutex_t device_lock;
106 void
107 devsw_init(void)
110 KASSERT(sys_bdevsws < MAXDEVSW - 1);
111 KASSERT(sys_cdevsws < MAXDEVSW - 1);
112 mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
116 devsw_attach(const char *devname,
117 const struct bdevsw *bdev, devmajor_t *bmajor,
118 const struct cdevsw *cdev, devmajor_t *cmajor)
120 struct devsw_conv *conv;
121 char *name;
122 int error, i;
123 size_t len;
125 if (devname == NULL || cdev == NULL)
126 return (EINVAL);
128 mutex_enter(&device_lock);
130 for (i = 0 ; i < max_devsw_convs ; i++) {
131 conv = &devsw_conv[i];
132 if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
133 continue;
135 if (*bmajor < 0)
136 *bmajor = conv->d_bmajor;
137 if (*cmajor < 0)
138 *cmajor = conv->d_cmajor;
140 if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
141 error = EINVAL;
142 goto fail;
144 if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
145 error = EINVAL;
146 goto fail;
149 if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
150 cdevsw[*cmajor] != NULL) {
151 error = EEXIST;
152 goto fail;
155 if (bdev != NULL)
156 bdevsw[*bmajor] = bdev;
157 cdevsw[*cmajor] = cdev;
159 mutex_exit(&device_lock);
160 return (0);
163 error = bdevsw_attach(bdev, bmajor);
164 if (error != 0)
165 goto fail;
166 error = cdevsw_attach(cdev, cmajor);
167 if (error != 0) {
168 devsw_detach_locked(bdev, NULL);
169 goto fail;
172 for (i = 0 ; i < max_devsw_convs ; i++) {
173 if (devsw_conv[i].d_name == NULL)
174 break;
176 if (i == max_devsw_convs) {
177 struct devsw_conv *newptr;
178 int old, new;
180 old = max_devsw_convs;
181 new = old + 1;
183 newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP);
184 if (newptr == NULL) {
185 devsw_detach_locked(bdev, cdev);
186 error = ENOMEM;
187 goto fail;
189 newptr[old].d_name = NULL;
190 newptr[old].d_bmajor = -1;
191 newptr[old].d_cmajor = -1;
192 memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE);
193 if (devsw_conv != devsw_conv0)
194 kmem_free(devsw_conv, old * DEVSWCONV_SIZE);
195 devsw_conv = newptr;
196 max_devsw_convs = new;
199 len = strlen(devname) + 1;
200 name = kmem_alloc(len, KM_NOSLEEP);
201 if (name == NULL) {
202 devsw_detach_locked(bdev, cdev);
203 error = ENOMEM;
204 goto fail;
206 strlcpy(name, devname, len);
208 devsw_conv[i].d_name = name;
209 devsw_conv[i].d_bmajor = *bmajor;
210 devsw_conv[i].d_cmajor = *cmajor;
212 mutex_exit(&device_lock);
213 return (0);
214 fail:
215 mutex_exit(&device_lock);
216 return (error);
219 static int
220 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
222 const struct bdevsw **newptr;
223 devmajor_t bmajor;
224 int i;
226 KASSERT(mutex_owned(&device_lock));
228 if (devsw == NULL)
229 return (0);
231 if (*devmajor < 0) {
232 for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
233 if (bdevsw[bmajor] != NULL)
234 continue;
235 for (i = 0 ; i < max_devsw_convs ; i++) {
236 if (devsw_conv[i].d_bmajor == bmajor)
237 break;
239 if (i != max_devsw_convs)
240 continue;
241 break;
243 *devmajor = bmajor;
246 if (*devmajor >= MAXDEVSW) {
247 printf("bdevsw_attach: block majors exhausted");
248 return (ENOMEM);
251 if (*devmajor >= max_bdevsws) {
252 KASSERT(bdevsw == bdevsw0);
253 newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
254 if (newptr == NULL)
255 return (ENOMEM);
256 memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
257 bdevsw = newptr;
258 max_bdevsws = MAXDEVSW;
261 if (bdevsw[*devmajor] != NULL)
262 return (EEXIST);
264 bdevsw[*devmajor] = devsw;
266 return (0);
269 static int
270 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
272 const struct cdevsw **newptr;
273 devmajor_t cmajor;
274 int i;
276 KASSERT(mutex_owned(&device_lock));
278 if (*devmajor < 0) {
279 for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
280 if (cdevsw[cmajor] != NULL)
281 continue;
282 for (i = 0 ; i < max_devsw_convs ; i++) {
283 if (devsw_conv[i].d_cmajor == cmajor)
284 break;
286 if (i != max_devsw_convs)
287 continue;
288 break;
290 *devmajor = cmajor;
293 if (*devmajor >= MAXDEVSW) {
294 printf("cdevsw_attach: character majors exhausted");
295 return (ENOMEM);
298 if (*devmajor >= max_cdevsws) {
299 KASSERT(cdevsw == cdevsw0);
300 newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
301 if (newptr == NULL)
302 return (ENOMEM);
303 memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
304 cdevsw = newptr;
305 max_cdevsws = MAXDEVSW;
308 if (cdevsw[*devmajor] != NULL)
309 return (EEXIST);
311 cdevsw[*devmajor] = devsw;
313 return (0);
316 static void
317 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
319 int i;
321 KASSERT(mutex_owned(&device_lock));
323 if (bdev != NULL) {
324 for (i = 0 ; i < max_bdevsws ; i++) {
325 if (bdevsw[i] != bdev)
326 continue;
327 bdevsw[i] = NULL;
328 break;
331 if (cdev != NULL) {
332 for (i = 0 ; i < max_cdevsws ; i++) {
333 if (cdevsw[i] != cdev)
334 continue;
335 cdevsw[i] = NULL;
336 break;
342 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
345 mutex_enter(&device_lock);
346 devsw_detach_locked(bdev, cdev);
347 mutex_exit(&device_lock);
348 return 0;
352 * Look up a block device by number.
354 * => Caller must ensure that the device is attached.
356 const struct bdevsw *
357 bdevsw_lookup(dev_t dev)
359 devmajor_t bmajor;
361 if (dev == NODEV)
362 return (NULL);
363 bmajor = major(dev);
364 if (bmajor < 0 || bmajor >= max_bdevsws)
365 return (NULL);
367 return (bdevsw[bmajor]);
371 * Look up a character device by number.
373 * => Caller must ensure that the device is attached.
375 const struct cdevsw *
376 cdevsw_lookup(dev_t dev)
378 devmajor_t cmajor;
380 if (dev == NODEV)
381 return (NULL);
382 cmajor = major(dev);
383 if (cmajor < 0 || cmajor >= max_cdevsws)
384 return (NULL);
386 return (cdevsw[cmajor]);
390 * Look up a block device by reference to its operations set.
392 * => Caller must ensure that the device is not detached, and therefore
393 * that the returned major is still valid when dereferenced.
395 devmajor_t
396 bdevsw_lookup_major(const struct bdevsw *bdev)
398 devmajor_t bmajor;
400 for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
401 if (bdevsw[bmajor] == bdev)
402 return (bmajor);
405 return (NODEVMAJOR);
409 * Look up a character device by reference to its operations set.
411 * => Caller must ensure that the device is not detached, and therefore
412 * that the returned major is still valid when dereferenced.
414 devmajor_t
415 cdevsw_lookup_major(const struct cdevsw *cdev)
417 devmajor_t cmajor;
419 for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
420 if (cdevsw[cmajor] == cdev)
421 return (cmajor);
424 return (NODEVMAJOR);
428 * Convert from block major number to name.
430 * => Caller must ensure that the device is not detached, and therefore
431 * that the name pointer is still valid when dereferenced.
433 const char *
434 devsw_blk2name(devmajor_t bmajor)
436 const char *name;
437 devmajor_t cmajor;
438 int i;
440 name = NULL;
441 cmajor = -1;
443 mutex_enter(&device_lock);
444 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
445 mutex_exit(&device_lock);
446 return (NULL);
448 for (i = 0 ; i < max_devsw_convs; i++) {
449 if (devsw_conv[i].d_bmajor == bmajor) {
450 cmajor = devsw_conv[i].d_cmajor;
451 break;
454 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
455 name = devsw_conv[i].d_name;
456 mutex_exit(&device_lock);
458 return (name);
462 * Convert char major number to device driver name.
464 const char *
465 cdevsw_getname(devmajor_t major)
467 const char *name;
468 int i;
470 name = NULL;
472 if (major < 0)
473 return (NULL);
475 mutex_enter(&device_lock);
476 for (i = 0 ; i < max_devsw_convs; i++) {
477 if (devsw_conv[i].d_cmajor == major) {
478 name = devsw_conv[i].d_name;
479 break;
482 mutex_exit(&device_lock);
483 return (name);
487 * Convert block major number to device driver name.
489 const char *
490 bdevsw_getname(devmajor_t major)
492 const char *name;
493 int i;
495 name = NULL;
497 if (major < 0)
498 return (NULL);
500 mutex_enter(&device_lock);
501 for (i = 0 ; i < max_devsw_convs; i++) {
502 if (devsw_conv[i].d_bmajor == major) {
503 name = devsw_conv[i].d_name;
504 break;
507 mutex_exit(&device_lock);
508 return (name);
512 * Convert from device name to block major number.
514 * => Caller must ensure that the device is not detached, and therefore
515 * that the major number is still valid when dereferenced.
517 devmajor_t
518 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
520 struct devsw_conv *conv;
521 devmajor_t bmajor;
522 int i;
524 if (name == NULL)
525 return (NODEVMAJOR);
527 mutex_enter(&device_lock);
528 for (i = 0 ; i < max_devsw_convs ; i++) {
529 size_t len;
531 conv = &devsw_conv[i];
532 if (conv->d_name == NULL)
533 continue;
534 len = strlen(conv->d_name);
535 if (strncmp(conv->d_name, name, len) != 0)
536 continue;
537 if (*(name +len) && !isdigit(*(name + len)))
538 continue;
539 bmajor = conv->d_bmajor;
540 if (bmajor < 0 || bmajor >= max_bdevsws ||
541 bdevsw[bmajor] == NULL)
542 break;
543 if (devname != NULL) {
544 #ifdef DEVSW_DEBUG
545 if (strlen(conv->d_name) >= devnamelen)
546 printf("devsw_name2blk: too short buffer");
547 #endif /* DEVSW_DEBUG */
548 strncpy(devname, conv->d_name, devnamelen);
549 devname[devnamelen - 1] = '\0';
551 mutex_exit(&device_lock);
552 return (bmajor);
555 mutex_exit(&device_lock);
556 return (NODEVMAJOR);
560 * Convert from device name to char major number.
562 * => Caller must ensure that the device is not detached, and therefore
563 * that the major number is still valid when dereferenced.
565 devmajor_t
566 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
568 struct devsw_conv *conv;
569 devmajor_t cmajor;
570 int i;
572 if (name == NULL)
573 return (NODEVMAJOR);
575 mutex_enter(&device_lock);
576 for (i = 0 ; i < max_devsw_convs ; i++) {
577 size_t len;
579 conv = &devsw_conv[i];
580 if (conv->d_name == NULL)
581 continue;
582 len = strlen(conv->d_name);
583 if (strncmp(conv->d_name, name, len) != 0)
584 continue;
585 if (*(name +len) && !isdigit(*(name + len)))
586 continue;
587 cmajor = conv->d_cmajor;
588 if (cmajor < 0 || cmajor >= max_cdevsws ||
589 cdevsw[cmajor] == NULL)
590 break;
591 if (devname != NULL) {
592 #ifdef DEVSW_DEBUG
593 if (strlen(conv->d_name) >= devnamelen)
594 printf("devsw_name2chr: too short buffer");
595 #endif /* DEVSW_DEBUG */
596 strncpy(devname, conv->d_name, devnamelen);
597 devname[devnamelen - 1] = '\0';
599 mutex_exit(&device_lock);
600 return (cmajor);
603 mutex_exit(&device_lock);
604 return (NODEVMAJOR);
608 * Convert from character dev_t to block dev_t.
610 * => Caller must ensure that the device is not detached, and therefore
611 * that the major number is still valid when dereferenced.
613 dev_t
614 devsw_chr2blk(dev_t cdev)
616 devmajor_t bmajor, cmajor;
617 int i;
618 dev_t rv;
620 cmajor = major(cdev);
621 bmajor = NODEVMAJOR;
622 rv = NODEV;
624 mutex_enter(&device_lock);
625 if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
626 mutex_exit(&device_lock);
627 return (NODEV);
629 for (i = 0 ; i < max_devsw_convs ; i++) {
630 if (devsw_conv[i].d_cmajor == cmajor) {
631 bmajor = devsw_conv[i].d_bmajor;
632 break;
635 if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
636 rv = makedev(bmajor, minor(cdev));
637 mutex_exit(&device_lock);
639 return (rv);
643 * Convert from block dev_t to character dev_t.
645 * => Caller must ensure that the device is not detached, and therefore
646 * that the major number is still valid when dereferenced.
648 dev_t
649 devsw_blk2chr(dev_t bdev)
651 devmajor_t bmajor, cmajor;
652 int i;
653 dev_t rv;
655 bmajor = major(bdev);
656 cmajor = NODEVMAJOR;
657 rv = NODEV;
659 mutex_enter(&device_lock);
660 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
661 mutex_exit(&device_lock);
662 return (NODEV);
664 for (i = 0 ; i < max_devsw_convs ; i++) {
665 if (devsw_conv[i].d_bmajor == bmajor) {
666 cmajor = devsw_conv[i].d_cmajor;
667 break;
670 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
671 rv = makedev(cmajor, minor(bdev));
672 mutex_exit(&device_lock);
674 return (rv);
678 * Device access methods.
681 #define DEV_LOCK(d) \
682 if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) { \
683 KERNEL_LOCK(1, NULL); \
686 #define DEV_UNLOCK(d) \
687 if (mpflag == 0) { \
688 KERNEL_UNLOCK_ONE(NULL); \
692 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
694 const struct bdevsw *d;
695 int rv, mpflag;
698 * For open we need to lock, in order to synchronize
699 * with attach/detach.
701 mutex_enter(&device_lock);
702 d = bdevsw_lookup(dev);
703 mutex_exit(&device_lock);
704 if (d == NULL)
705 return ENXIO;
707 DEV_LOCK(d);
708 rv = (*d->d_open)(dev, flag, devtype, l);
709 DEV_UNLOCK(d);
711 return rv;
715 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
717 const struct bdevsw *d;
718 int rv, mpflag;
720 if ((d = bdevsw_lookup(dev)) == NULL)
721 return ENXIO;
723 DEV_LOCK(d);
724 rv = (*d->d_close)(dev, flag, devtype, l);
725 DEV_UNLOCK(d);
727 return rv;
730 void
731 bdev_strategy(struct buf *bp)
733 const struct bdevsw *d;
734 int mpflag;
736 if ((d = bdevsw_lookup(bp->b_dev)) == NULL) {
737 bp->b_error = ENXIO;
738 bp->b_resid = bp->b_bcount;
739 biodone(bp);
740 return;
743 DEV_LOCK(d);
744 (*d->d_strategy)(bp);
745 DEV_UNLOCK(d);
749 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
751 const struct bdevsw *d;
752 int rv, mpflag;
754 if ((d = bdevsw_lookup(dev)) == NULL)
755 return ENXIO;
757 DEV_LOCK(d);
758 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
759 DEV_UNLOCK(d);
761 return rv;
765 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
767 const struct bdevsw *d;
768 int rv;
771 * Dump can be called without the device open. Since it can
772 * currently only be called with the system paused (and in a
773 * potentially unstable state), we don't perform any locking.
775 if ((d = bdevsw_lookup(dev)) == NULL)
776 return ENXIO;
778 /* DEV_LOCK(d); */
779 rv = (*d->d_dump)(dev, addr, data, sz);
780 /* DEV_UNLOCK(d); */
782 return rv;
786 bdev_type(dev_t dev)
788 const struct bdevsw *d;
790 if ((d = bdevsw_lookup(dev)) == NULL)
791 return D_OTHER;
792 return d->d_flag & D_TYPEMASK;
796 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
798 const struct cdevsw *d;
799 int rv, mpflag;
802 * For open we need to lock, in order to synchronize
803 * with attach/detach.
805 mutex_enter(&device_lock);
806 d = cdevsw_lookup(dev);
807 mutex_exit(&device_lock);
808 if (d == NULL)
809 return ENXIO;
811 DEV_LOCK(d);
812 rv = (*d->d_open)(dev, flag, devtype, l);
813 DEV_UNLOCK(d);
815 return rv;
819 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
821 const struct cdevsw *d;
822 int rv, mpflag;
824 if ((d = cdevsw_lookup(dev)) == NULL)
825 return ENXIO;
827 DEV_LOCK(d);
828 rv = (*d->d_close)(dev, flag, devtype, l);
829 DEV_UNLOCK(d);
831 return rv;
835 cdev_read(dev_t dev, struct uio *uio, int flag)
837 const struct cdevsw *d;
838 int rv, mpflag;
840 if ((d = cdevsw_lookup(dev)) == NULL)
841 return ENXIO;
843 DEV_LOCK(d);
844 rv = (*d->d_read)(dev, uio, flag);
845 DEV_UNLOCK(d);
847 return rv;
851 cdev_write(dev_t dev, struct uio *uio, int flag)
853 const struct cdevsw *d;
854 int rv, mpflag;
856 if ((d = cdevsw_lookup(dev)) == NULL)
857 return ENXIO;
859 DEV_LOCK(d);
860 rv = (*d->d_write)(dev, uio, flag);
861 DEV_UNLOCK(d);
863 return rv;
867 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
869 const struct cdevsw *d;
870 int rv, mpflag;
872 if ((d = cdevsw_lookup(dev)) == NULL)
873 return ENXIO;
875 DEV_LOCK(d);
876 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
877 DEV_UNLOCK(d);
879 return rv;
882 void
883 cdev_stop(struct tty *tp, int flag)
885 const struct cdevsw *d;
886 int mpflag;
888 if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
889 return;
891 DEV_LOCK(d);
892 (*d->d_stop)(tp, flag);
893 DEV_UNLOCK(d);
896 struct tty *
897 cdev_tty(dev_t dev)
899 const struct cdevsw *d;
901 if ((d = cdevsw_lookup(dev)) == NULL)
902 return NULL;
904 /* XXX Check if necessary. */
905 if (d->d_tty == NULL)
906 return NULL;
908 return (*d->d_tty)(dev);
912 cdev_poll(dev_t dev, int flag, lwp_t *l)
914 const struct cdevsw *d;
915 int rv, mpflag;
917 if ((d = cdevsw_lookup(dev)) == NULL)
918 return POLLERR;
920 DEV_LOCK(d);
921 rv = (*d->d_poll)(dev, flag, l);
922 DEV_UNLOCK(d);
924 return rv;
927 paddr_t
928 cdev_mmap(dev_t dev, off_t off, int flag)
930 const struct cdevsw *d;
931 paddr_t rv;
932 int mpflag;
934 if ((d = cdevsw_lookup(dev)) == NULL)
935 return (paddr_t)-1LL;
937 DEV_LOCK(d);
938 rv = (*d->d_mmap)(dev, off, flag);
939 DEV_UNLOCK(d);
941 return rv;
945 cdev_kqfilter(dev_t dev, struct knote *kn)
947 const struct cdevsw *d;
948 int rv, mpflag;
950 if ((d = cdevsw_lookup(dev)) == NULL)
951 return ENXIO;
953 DEV_LOCK(d);
954 rv = (*d->d_kqfilter)(dev, kn);
955 DEV_UNLOCK(d);
957 return rv;
961 cdev_type(dev_t dev)
963 const struct cdevsw *d;
965 if ((d = cdevsw_lookup(dev)) == NULL)
966 return D_OTHER;
967 return d->d_flag & D_TYPEMASK;