4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, Alexey Zaytsev <alexey.zaytsev@gmail.com>
25 * Copyright 2017, Joyent Inc.
29 #include <sys/modctl.h>
30 #include <sys/blkdev.h>
31 #include <sys/types.h>
32 #include <sys/errno.h>
33 #include <sys/param.h>
34 #include <sys/stropts.h>
35 #include <sys/stream.h>
36 #include <sys/strsubr.h>
39 #include <sys/devops.h>
40 #include <sys/ksynch.h>
42 #include <sys/modctl.h>
43 #include <sys/debug.h>
45 #include <sys/containerof.h>
46 #include "virtiovar.h"
47 #include "virtioreg.h"
50 #define VIRTIO_BLK_F_BARRIER (1<<0)
51 #define VIRTIO_BLK_F_SIZE_MAX (1<<1)
52 #define VIRTIO_BLK_F_SEG_MAX (1<<2)
53 #define VIRTIO_BLK_F_GEOMETRY (1<<4)
54 #define VIRTIO_BLK_F_RO (1<<5)
55 #define VIRTIO_BLK_F_BLK_SIZE (1<<6)
56 #define VIRTIO_BLK_F_SCSI (1<<7)
57 #define VIRTIO_BLK_F_FLUSH (1<<9)
58 #define VIRTIO_BLK_F_TOPOLOGY (1<<10)
60 /* Configuration registers */
61 #define VIRTIO_BLK_CONFIG_CAPACITY 0 /* 64bit */
62 #define VIRTIO_BLK_CONFIG_SIZE_MAX 8 /* 32bit */
63 #define VIRTIO_BLK_CONFIG_SEG_MAX 12 /* 32bit */
64 #define VIRTIO_BLK_CONFIG_GEOMETRY_C 16 /* 16bit */
65 #define VIRTIO_BLK_CONFIG_GEOMETRY_H 18 /* 8bit */
66 #define VIRTIO_BLK_CONFIG_GEOMETRY_S 19 /* 8bit */
67 #define VIRTIO_BLK_CONFIG_BLK_SIZE 20 /* 32bit */
68 #define VIRTIO_BLK_CONFIG_TOPO_PBEXP 24 /* 8bit */
69 #define VIRTIO_BLK_CONFIG_TOPO_ALIGN 25 /* 8bit */
70 #define VIRTIO_BLK_CONFIG_TOPO_MIN_SZ 26 /* 16bit */
71 #define VIRTIO_BLK_CONFIG_TOPO_OPT_SZ 28 /* 32bit */
74 #define VIRTIO_BLK_T_IN 0
75 #define VIRTIO_BLK_T_OUT 1
76 #define VIRTIO_BLK_T_SCSI_CMD 2
77 #define VIRTIO_BLK_T_SCSI_CMD_OUT 3
78 #define VIRTIO_BLK_T_FLUSH 4
79 #define VIRTIO_BLK_T_FLUSH_OUT 5
80 #define VIRTIO_BLK_T_GET_ID 8
81 #define VIRTIO_BLK_T_BARRIER 0x80000000
83 #define VIRTIO_BLK_ID_BYTES 20 /* devid */
86 #define VIRTIO_BLK_S_OK 0
87 #define VIRTIO_BLK_S_IOERR 1
88 #define VIRTIO_BLK_S_UNSUPP 2
90 #define DEF_MAXINDIRECT (128)
91 #define DEF_MAXSECTOR (4096)
93 #define VIOBLK_POISON 0xdead0001dead0001
98 static char vioblk_ident
[] = "VirtIO block driver";
100 /* Request header structure */
101 struct vioblk_req_hdr
{
102 uint32_t type
; /* VIRTIO_BLK_T_* */
108 struct vioblk_req_hdr hdr
;
112 ddi_dma_handle_t dmah
;
113 ddi_dma_handle_t bd_dmah
;
114 ddi_dma_cookie_t dmac
;
118 struct vioblk_stats
{
119 struct kstat_named sts_rw_outofmemory
;
120 struct kstat_named sts_rw_badoffset
;
121 struct kstat_named sts_rw_queuemax
;
122 struct kstat_named sts_rw_cookiesmax
;
123 struct kstat_named sts_rw_cacheflush
;
124 struct kstat_named sts_intr_queuemax
;
125 struct kstat_named sts_intr_total
;
126 struct kstat_named sts_io_errors
;
127 struct kstat_named sts_unsupp_errors
;
128 struct kstat_named sts_nxio_errors
;
131 struct vioblk_lstats
{
132 uint64_t rw_cacheflush
;
134 unsigned int rw_cookiesmax
;
135 unsigned int intr_queuemax
;
136 unsigned int io_errors
;
137 unsigned int unsupp_errors
;
138 unsigned int nxio_errors
;
141 struct vioblk_softc
{
142 dev_info_t
*sc_dev
; /* mirrors virtio_softc->sc_dev */
143 struct virtio_softc sc_virtio
;
144 struct virtqueue
*sc_vq
;
146 struct vioblk_req
*sc_reqs
;
147 struct vioblk_stats
*ks_data
;
148 kstat_t
*sc_intrstat
;
149 uint64_t sc_capacity
;
151 struct vioblk_lstats sc_stats
;
153 boolean_t sc_in_poll_mode
;
154 boolean_t sc_readonly
;
161 char devid
[VIRTIO_BLK_ID_BYTES
+ 1];
164 static int vioblk_get_id(struct vioblk_softc
*sc
);
166 static int vioblk_read(void *arg
, bd_xfer_t
*xfer
);
167 static int vioblk_write(void *arg
, bd_xfer_t
*xfer
);
168 static int vioblk_flush(void *arg
, bd_xfer_t
*xfer
);
169 static void vioblk_driveinfo(void *arg
, bd_drive_t
*drive
);
170 static int vioblk_mediainfo(void *arg
, bd_media_t
*media
);
171 static int vioblk_devid_init(void *, dev_info_t
*, ddi_devid_t
*);
172 uint_t
vioblk_int_handler(caddr_t arg1
, caddr_t arg2
);
174 static bd_ops_t vioblk_ops
= {
184 static int vioblk_quiesce(dev_info_t
*);
185 static int vioblk_attach(dev_info_t
*, ddi_attach_cmd_t
);
186 static int vioblk_detach(dev_info_t
*, ddi_detach_cmd_t
);
188 static struct dev_ops vioblk_dev_ops
= {
192 nulldev
, /* identify */
194 vioblk_attach
, /* attach */
195 vioblk_detach
, /* detach */
200 vioblk_quiesce
/* quiesce */
205 /* Standard Module linkage initialization for a Streams driver */
206 extern struct mod_ops mod_driverops
;
208 static struct modldrv modldrv
= {
209 &mod_driverops
, /* Type of module. This one is a driver */
210 vioblk_ident
, /* short description */
211 &vioblk_dev_ops
/* driver specific ops */
214 static struct modlinkage modlinkage
= {
222 ddi_device_acc_attr_t vioblk_attr
= {
224 DDI_NEVERSWAP_ACC
, /* virtio is always native byte order */
225 DDI_STORECACHING_OK_ACC
,
229 /* DMA attr for the header/status blocks. */
230 static ddi_dma_attr_t vioblk_req_dma_attr
= {
231 DMA_ATTR_V0
, /* dma_attr version */
232 0, /* dma_attr_addr_lo */
233 0xFFFFFFFFFFFFFFFFull
, /* dma_attr_addr_hi */
234 0x00000000FFFFFFFFull
, /* dma_attr_count_max */
235 1, /* dma_attr_align */
236 1, /* dma_attr_burstsizes */
237 1, /* dma_attr_minxfer */
238 0xFFFFFFFFull
, /* dma_attr_maxxfer */
239 0xFFFFFFFFFFFFFFFFull
, /* dma_attr_seg */
240 1, /* dma_attr_sgllen */
241 1, /* dma_attr_granular */
242 0, /* dma_attr_flags */
245 /* DMA attr for the data blocks. */
246 static ddi_dma_attr_t vioblk_bd_dma_attr
= {
247 DMA_ATTR_V0
, /* dma_attr version */
248 0, /* dma_attr_addr_lo */
249 0xFFFFFFFFFFFFFFFFull
, /* dma_attr_addr_hi */
250 0x00000000FFFFFFFFull
, /* dma_attr_count_max */
251 1, /* dma_attr_align */
252 1, /* dma_attr_burstsizes */
253 1, /* dma_attr_minxfer */
254 0, /* dma_attr_maxxfer, set in attach */
255 0xFFFFFFFFFFFFFFFFull
, /* dma_attr_seg */
256 0, /* dma_attr_sgllen, set in attach */
257 1, /* dma_attr_granular */
258 0, /* dma_attr_flags */
262 vioblk_rw(struct vioblk_softc
*sc
, bd_xfer_t
*xfer
, int type
,
265 struct vioblk_req
*req
;
266 struct vq_entry
*ve_hdr
;
267 int total_cookies
, write
;
269 write
= (type
== VIRTIO_BLK_T_OUT
||
270 type
== VIRTIO_BLK_T_FLUSH_OUT
) ? 1 : 0;
273 if ((xfer
->x_blkno
+ xfer
->x_nblks
) > sc
->sc_nblks
) {
274 sc
->ks_data
->sts_rw_badoffset
.value
.ui64
++;
278 /* allocate top entry */
279 ve_hdr
= vq_alloc_entry(sc
->sc_vq
);
281 sc
->ks_data
->sts_rw_outofmemory
.value
.ui64
++;
285 /* getting request */
286 req
= &sc
->sc_reqs
[ve_hdr
->qe_index
];
287 req
->hdr
.type
= type
;
289 req
->hdr
.sector
= xfer
->x_blkno
;
293 virtio_ve_add_indirect_buf(ve_hdr
, req
->dmac
.dmac_laddress
,
294 sizeof (struct vioblk_req_hdr
), B_TRUE
);
298 virtio_ve_add_cookie(ve_hdr
, xfer
->x_dmah
, xfer
->x_dmac
,
299 xfer
->x_ndmac
, write
? B_TRUE
: B_FALSE
);
300 total_cookies
+= xfer
->x_ndmac
;
304 virtio_ve_add_indirect_buf(ve_hdr
,
305 req
->dmac
.dmac_laddress
+ sizeof (struct vioblk_req_hdr
),
306 sizeof (uint8_t), B_FALSE
);
308 /* sending the whole chain to the device */
309 virtio_push_chain(ve_hdr
, B_TRUE
);
311 if (sc
->sc_stats
.rw_cookiesmax
< total_cookies
)
312 sc
->sc_stats
.rw_cookiesmax
= total_cookies
;
314 return (DDI_SUCCESS
);
318 * Now in polling mode. Interrupts are off, so we
319 * 1) poll for the already queued requests to complete.
320 * 2) push our request.
321 * 3) wait for our request to complete.
324 vioblk_rw_poll(struct vioblk_softc
*sc
, bd_xfer_t
*xfer
,
325 int type
, uint32_t len
)
330 ASSERT(xfer
->x_flags
& BD_XFER_POLL
);
332 /* Prevent a hard hang. */
333 tmout
= drv_usectohz(30000000);
335 /* Poll for an empty queue */
336 while (vq_num_used(sc
->sc_vq
)) {
337 /* Check if any pending requests completed. */
338 ret
= vioblk_int_handler((caddr_t
)&sc
->sc_virtio
, NULL
);
339 if (ret
!= DDI_INTR_CLAIMED
) {
346 ret
= vioblk_rw(sc
, xfer
, type
, len
);
350 tmout
= drv_usectohz(30000000);
351 /* Poll for an empty queue again. */
352 while (vq_num_used(sc
->sc_vq
)) {
353 /* Check if any pending requests completed. */
354 ret
= vioblk_int_handler((caddr_t
)&sc
->sc_virtio
, NULL
);
355 if (ret
!= DDI_INTR_CLAIMED
) {
362 return (DDI_SUCCESS
);
366 vioblk_read(void *arg
, bd_xfer_t
*xfer
)
369 struct vioblk_softc
*sc
= (void *)arg
;
371 if (xfer
->x_flags
& BD_XFER_POLL
) {
372 if (!sc
->sc_in_poll_mode
) {
373 virtio_stop_vq_intr(sc
->sc_vq
);
374 sc
->sc_in_poll_mode
= 1;
377 ret
= vioblk_rw_poll(sc
, xfer
, VIRTIO_BLK_T_IN
,
378 xfer
->x_nblks
* DEV_BSIZE
);
380 if (sc
->sc_in_poll_mode
) {
381 virtio_start_vq_intr(sc
->sc_vq
);
382 sc
->sc_in_poll_mode
= 0;
385 ret
= vioblk_rw(sc
, xfer
, VIRTIO_BLK_T_IN
,
386 xfer
->x_nblks
* DEV_BSIZE
);
393 vioblk_write(void *arg
, bd_xfer_t
*xfer
)
396 struct vioblk_softc
*sc
= (void *)arg
;
398 if (xfer
->x_flags
& BD_XFER_POLL
) {
399 if (!sc
->sc_in_poll_mode
) {
400 virtio_stop_vq_intr(sc
->sc_vq
);
401 sc
->sc_in_poll_mode
= 1;
404 ret
= vioblk_rw_poll(sc
, xfer
, VIRTIO_BLK_T_OUT
,
405 xfer
->x_nblks
* DEV_BSIZE
);
407 if (sc
->sc_in_poll_mode
) {
408 virtio_start_vq_intr(sc
->sc_vq
);
409 sc
->sc_in_poll_mode
= 0;
412 ret
= vioblk_rw(sc
, xfer
, VIRTIO_BLK_T_OUT
,
413 xfer
->x_nblks
* DEV_BSIZE
);
419 vioblk_flush(void *arg
, bd_xfer_t
*xfer
)
422 struct vioblk_softc
*sc
= (void *)arg
;
424 ASSERT((xfer
->x_flags
& BD_XFER_POLL
) == 0);
426 ret
= vioblk_rw(sc
, xfer
, VIRTIO_BLK_T_FLUSH_OUT
,
427 xfer
->x_nblks
* DEV_BSIZE
);
430 sc
->sc_stats
.rw_cacheflush
++;
437 vioblk_driveinfo(void *arg
, bd_drive_t
*drive
)
439 struct vioblk_softc
*sc
= (void *)arg
;
441 drive
->d_qsize
= sc
->sc_vq
->vq_num
;
442 drive
->d_removable
= B_FALSE
;
443 drive
->d_hotpluggable
= B_TRUE
;
447 drive
->d_vendor
= "Virtio";
448 drive
->d_vendor_len
= strlen(drive
->d_vendor
);
450 drive
->d_product
= "Block Device";
451 drive
->d_product_len
= strlen(drive
->d_product
);
453 (void) vioblk_get_id(sc
);
454 drive
->d_serial
= sc
->devid
;
455 drive
->d_serial_len
= strlen(drive
->d_serial
);
457 drive
->d_revision
= "0000";
458 drive
->d_revision_len
= strlen(drive
->d_revision
);
462 vioblk_mediainfo(void *arg
, bd_media_t
*media
)
464 struct vioblk_softc
*sc
= (void *)arg
;
466 media
->m_nblks
= sc
->sc_nblks
;
467 media
->m_blksize
= sc
->sc_blk_size
;
468 media
->m_readonly
= sc
->sc_readonly
;
469 media
->m_pblksize
= sc
->sc_pblk_size
;
474 vioblk_get_id(struct vioblk_softc
*sc
)
480 deadline
= ddi_get_lbolt() + (clock_t)drv_usectohz(3 * 1000000);
481 (void) memset(&xfer
, 0, sizeof (bd_xfer_t
));
484 ret
= ddi_dma_alloc_handle(sc
->sc_dev
, &vioblk_bd_dma_attr
,
485 DDI_DMA_SLEEP
, NULL
, &xfer
.x_dmah
);
486 if (ret
!= DDI_SUCCESS
)
489 ret
= ddi_dma_addr_bind_handle(xfer
.x_dmah
, NULL
, (caddr_t
)&sc
->devid
,
490 VIRTIO_BLK_ID_BYTES
, DDI_DMA_READ
| DDI_DMA_CONSISTENT
,
491 DDI_DMA_SLEEP
, NULL
, &xfer
.x_dmac
, &xfer
.x_ndmac
);
492 if (ret
!= DDI_DMA_MAPPED
) {
497 mutex_enter(&sc
->lock_devid
);
499 ret
= vioblk_rw(sc
, &xfer
, VIRTIO_BLK_T_GET_ID
,
500 VIRTIO_BLK_ID_BYTES
);
502 mutex_exit(&sc
->lock_devid
);
507 ret
= cv_timedwait(&sc
->cv_devid
, &sc
->lock_devid
, deadline
);
508 mutex_exit(&sc
->lock_devid
);
510 (void) ddi_dma_unbind_handle(xfer
.x_dmah
);
511 ddi_dma_free_handle(&xfer
.x_dmah
);
515 dev_err(sc
->sc_dev
, CE_WARN
,
516 "Cannot get devid from the device");
517 return (DDI_FAILURE
);
523 (void) ddi_dma_unbind_handle(xfer
.x_dmah
);
525 ddi_dma_free_handle(&xfer
.x_dmah
);
531 vioblk_devid_init(void *arg
, dev_info_t
*devinfo
, ddi_devid_t
*devid
)
533 struct vioblk_softc
*sc
= (void *)arg
;
536 ret
= vioblk_get_id(sc
);
537 if (ret
!= DDI_SUCCESS
)
540 ret
= ddi_devid_init(devinfo
, DEVID_ATA_SERIAL
,
541 VIRTIO_BLK_ID_BYTES
, sc
->devid
, devid
);
542 if (ret
!= DDI_SUCCESS
) {
543 dev_err(devinfo
, CE_WARN
, "Cannot build devid from the device");
547 dev_debug(sc
->sc_dev
, CE_NOTE
,
548 "devid %x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x",
549 sc
->devid
[0], sc
->devid
[1], sc
->devid
[2], sc
->devid
[3],
550 sc
->devid
[4], sc
->devid
[5], sc
->devid
[6], sc
->devid
[7],
551 sc
->devid
[8], sc
->devid
[9], sc
->devid
[10], sc
->devid
[11],
552 sc
->devid
[12], sc
->devid
[13], sc
->devid
[14], sc
->devid
[15],
553 sc
->devid
[16], sc
->devid
[17], sc
->devid
[18], sc
->devid
[19]);
559 vioblk_show_features(struct vioblk_softc
*sc
, const char *prefix
,
564 char *bufend
= buf
+ sizeof (buf
);
566 /* LINTED E_PTRDIFF_OVERFLOW */
567 bufp
+= snprintf(bufp
, bufend
- bufp
, prefix
);
569 /* LINTED E_PTRDIFF_OVERFLOW */
570 bufp
+= virtio_show_features(features
, bufp
, bufend
- bufp
);
573 /* LINTED E_PTRDIFF_OVERFLOW */
574 bufp
+= snprintf(bufp
, bufend
- bufp
, "Vioblk ( ");
576 if (features
& VIRTIO_BLK_F_BARRIER
)
577 /* LINTED E_PTRDIFF_OVERFLOW */
578 bufp
+= snprintf(bufp
, bufend
- bufp
, "BARRIER ");
579 if (features
& VIRTIO_BLK_F_SIZE_MAX
)
580 /* LINTED E_PTRDIFF_OVERFLOW */
581 bufp
+= snprintf(bufp
, bufend
- bufp
, "SIZE_MAX ");
582 if (features
& VIRTIO_BLK_F_SEG_MAX
)
583 /* LINTED E_PTRDIFF_OVERFLOW */
584 bufp
+= snprintf(bufp
, bufend
- bufp
, "SEG_MAX ");
585 if (features
& VIRTIO_BLK_F_GEOMETRY
)
586 /* LINTED E_PTRDIFF_OVERFLOW */
587 bufp
+= snprintf(bufp
, bufend
- bufp
, "GEOMETRY ");
588 if (features
& VIRTIO_BLK_F_RO
)
589 /* LINTED E_PTRDIFF_OVERFLOW */
590 bufp
+= snprintf(bufp
, bufend
- bufp
, "RO ");
591 if (features
& VIRTIO_BLK_F_BLK_SIZE
)
592 /* LINTED E_PTRDIFF_OVERFLOW */
593 bufp
+= snprintf(bufp
, bufend
- bufp
, "BLK_SIZE ");
594 if (features
& VIRTIO_BLK_F_SCSI
)
595 /* LINTED E_PTRDIFF_OVERFLOW */
596 bufp
+= snprintf(bufp
, bufend
- bufp
, "SCSI ");
597 if (features
& VIRTIO_BLK_F_FLUSH
)
598 /* LINTED E_PTRDIFF_OVERFLOW */
599 bufp
+= snprintf(bufp
, bufend
- bufp
, "FLUSH ");
600 if (features
& VIRTIO_BLK_F_TOPOLOGY
)
601 /* LINTED E_PTRDIFF_OVERFLOW */
602 bufp
+= snprintf(bufp
, bufend
- bufp
, "TOPOLOGY ");
604 /* LINTED E_PTRDIFF_OVERFLOW */
605 bufp
+= snprintf(bufp
, bufend
- bufp
, ")");
608 dev_debug(sc
->sc_dev
, CE_NOTE
, "%s", buf
);
612 vioblk_dev_features(struct vioblk_softc
*sc
)
614 uint32_t host_features
;
616 host_features
= virtio_negotiate_features(&sc
->sc_virtio
,
618 VIRTIO_BLK_F_GEOMETRY
|
619 VIRTIO_BLK_F_BLK_SIZE
|
621 VIRTIO_BLK_F_TOPOLOGY
|
622 VIRTIO_BLK_F_SEG_MAX
|
623 VIRTIO_BLK_F_SIZE_MAX
|
624 VIRTIO_F_RING_INDIRECT_DESC
);
626 vioblk_show_features(sc
, "Host features: ", host_features
);
627 vioblk_show_features(sc
, "Negotiated features: ",
628 sc
->sc_virtio
.sc_features
);
630 if (!(sc
->sc_virtio
.sc_features
& VIRTIO_F_RING_INDIRECT_DESC
)) {
631 dev_err(sc
->sc_dev
, CE_NOTE
,
632 "Host does not support RING_INDIRECT_DESC, bye.");
633 return (DDI_FAILURE
);
636 return (DDI_SUCCESS
);
641 vioblk_int_handler(caddr_t arg1
, caddr_t arg2
)
643 struct virtio_softc
*vsc
= (void *)arg1
;
644 struct vioblk_softc
*sc
= __containerof(vsc
,
645 struct vioblk_softc
, sc_virtio
);
650 while ((ve
= virtio_pull_chain(sc
->sc_vq
, &len
))) {
651 struct vioblk_req
*req
= &sc
->sc_reqs
[ve
->qe_index
];
652 bd_xfer_t
*xfer
= req
->xfer
;
653 uint8_t status
= req
->status
;
654 uint32_t type
= req
->hdr
.type
;
656 if (req
->xfer
== (void *)VIOBLK_POISON
) {
657 dev_err(sc
->sc_dev
, CE_WARN
, "Poisoned descriptor!");
658 virtio_free_chain(ve
);
659 return (DDI_INTR_CLAIMED
);
662 req
->xfer
= (void *) VIOBLK_POISON
;
664 /* Note: blkdev tears down the payload mapping for us. */
665 virtio_free_chain(ve
);
667 /* returning payload back to blkdev */
669 case VIRTIO_BLK_S_OK
:
672 case VIRTIO_BLK_S_IOERR
:
674 sc
->sc_stats
.io_errors
++;
676 case VIRTIO_BLK_S_UNSUPP
:
677 sc
->sc_stats
.unsupp_errors
++;
681 sc
->sc_stats
.nxio_errors
++;
686 if (type
== VIRTIO_BLK_T_GET_ID
) {
687 /* notify devid_init */
688 mutex_enter(&sc
->lock_devid
);
689 cv_broadcast(&sc
->cv_devid
);
690 mutex_exit(&sc
->lock_devid
);
692 bd_xfer_done(xfer
, error
);
698 if (sc
->sc_stats
.intr_queuemax
< i
)
699 sc
->sc_stats
.intr_queuemax
= i
;
700 sc
->sc_stats
.intr_total
++;
702 return (DDI_INTR_CLAIMED
);
707 vioblk_config_handler(caddr_t arg1
, caddr_t arg2
)
709 return (DDI_INTR_CLAIMED
);
713 vioblk_register_ints(struct vioblk_softc
*sc
)
717 struct virtio_int_handler vioblk_conf_h
= {
718 vioblk_config_handler
721 struct virtio_int_handler vioblk_vq_h
[] = {
722 { vioblk_int_handler
},
726 ret
= virtio_register_ints(&sc
->sc_virtio
,
727 &vioblk_conf_h
, vioblk_vq_h
);
733 vioblk_free_reqs(struct vioblk_softc
*sc
)
737 qsize
= sc
->sc_vq
->vq_num
;
739 for (i
= 0; i
< qsize
; i
++) {
740 struct vioblk_req
*req
= &sc
->sc_reqs
[i
];
743 (void) ddi_dma_unbind_handle(req
->dmah
);
746 ddi_dma_free_handle(&req
->dmah
);
749 kmem_free(sc
->sc_reqs
, sizeof (struct vioblk_req
) * qsize
);
753 vioblk_alloc_reqs(struct vioblk_softc
*sc
)
758 qsize
= sc
->sc_vq
->vq_num
;
760 sc
->sc_reqs
= kmem_zalloc(sizeof (struct vioblk_req
) * qsize
, KM_SLEEP
);
762 for (i
= 0; i
< qsize
; i
++) {
763 struct vioblk_req
*req
= &sc
->sc_reqs
[i
];
765 ret
= ddi_dma_alloc_handle(sc
->sc_dev
, &vioblk_req_dma_attr
,
766 DDI_DMA_SLEEP
, NULL
, &req
->dmah
);
767 if (ret
!= DDI_SUCCESS
) {
769 dev_err(sc
->sc_dev
, CE_WARN
,
770 "Can't allocate dma handle for req "
775 ret
= ddi_dma_addr_bind_handle(req
->dmah
, NULL
,
777 sizeof (struct vioblk_req_hdr
) + sizeof (uint8_t),
778 DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
, DDI_DMA_SLEEP
,
779 NULL
, &req
->dmac
, &req
->ndmac
);
780 if (ret
!= DDI_DMA_MAPPED
) {
781 dev_err(sc
->sc_dev
, CE_WARN
,
782 "Can't bind req buffer %d", i
);
790 vioblk_free_reqs(sc
);
796 vioblk_ksupdate(kstat_t
*ksp
, int rw
)
798 struct vioblk_softc
*sc
= ksp
->ks_private
;
800 if (rw
== KSTAT_WRITE
)
803 sc
->ks_data
->sts_rw_cookiesmax
.value
.ui32
= sc
->sc_stats
.rw_cookiesmax
;
804 sc
->ks_data
->sts_intr_queuemax
.value
.ui32
= sc
->sc_stats
.intr_queuemax
;
805 sc
->ks_data
->sts_unsupp_errors
.value
.ui32
= sc
->sc_stats
.unsupp_errors
;
806 sc
->ks_data
->sts_nxio_errors
.value
.ui32
= sc
->sc_stats
.nxio_errors
;
807 sc
->ks_data
->sts_io_errors
.value
.ui32
= sc
->sc_stats
.io_errors
;
808 sc
->ks_data
->sts_rw_cacheflush
.value
.ui64
= sc
->sc_stats
.rw_cacheflush
;
809 sc
->ks_data
->sts_intr_total
.value
.ui64
= sc
->sc_stats
.intr_total
;
816 vioblk_attach(dev_info_t
*devinfo
, ddi_attach_cmd_t cmd
)
818 int ret
= DDI_SUCCESS
;
820 struct vioblk_softc
*sc
;
821 struct virtio_softc
*vsc
;
822 struct vioblk_stats
*ks_data
;
824 instance
= ddi_get_instance(devinfo
);
832 dev_err(devinfo
, CE_WARN
, "resume not supported yet");
833 return (DDI_FAILURE
);
836 dev_err(devinfo
, CE_WARN
, "cmd 0x%x not recognized", cmd
);
837 return (DDI_FAILURE
);
840 sc
= kmem_zalloc(sizeof (struct vioblk_softc
), KM_SLEEP
);
841 ddi_set_driver_private(devinfo
, sc
);
843 vsc
= &sc
->sc_virtio
;
845 /* Duplicate for faster access / less typing */
846 sc
->sc_dev
= devinfo
;
847 vsc
->sc_dev
= devinfo
;
849 cv_init(&sc
->cv_devid
, NULL
, CV_DRIVER
, NULL
);
850 mutex_init(&sc
->lock_devid
, NULL
, MUTEX_DRIVER
, NULL
);
853 * Initialize interrupt kstat. This should not normally fail, since
854 * we don't use a persistent stat. We do it this way to avoid having
855 * to test for it at run time on the hot path.
857 sc
->sc_intrstat
= kstat_create("vioblk", instance
,
858 "intrs", "controller", KSTAT_TYPE_NAMED
,
859 sizeof (struct vioblk_stats
) / sizeof (kstat_named_t
),
860 KSTAT_FLAG_PERSISTENT
);
861 if (sc
->sc_intrstat
== NULL
) {
862 dev_err(devinfo
, CE_WARN
, "kstat_create failed");
865 ks_data
= (struct vioblk_stats
*)sc
->sc_intrstat
->ks_data
;
866 kstat_named_init(&ks_data
->sts_rw_outofmemory
,
867 "total_rw_outofmemory", KSTAT_DATA_UINT64
);
868 kstat_named_init(&ks_data
->sts_rw_badoffset
,
869 "total_rw_badoffset", KSTAT_DATA_UINT64
);
870 kstat_named_init(&ks_data
->sts_intr_total
,
871 "total_intr", KSTAT_DATA_UINT64
);
872 kstat_named_init(&ks_data
->sts_io_errors
,
873 "total_io_errors", KSTAT_DATA_UINT32
);
874 kstat_named_init(&ks_data
->sts_unsupp_errors
,
875 "total_unsupp_errors", KSTAT_DATA_UINT32
);
876 kstat_named_init(&ks_data
->sts_nxio_errors
,
877 "total_nxio_errors", KSTAT_DATA_UINT32
);
878 kstat_named_init(&ks_data
->sts_rw_cacheflush
,
879 "total_rw_cacheflush", KSTAT_DATA_UINT64
);
880 kstat_named_init(&ks_data
->sts_rw_cookiesmax
,
881 "max_rw_cookies", KSTAT_DATA_UINT32
);
882 kstat_named_init(&ks_data
->sts_intr_queuemax
,
883 "max_intr_queue", KSTAT_DATA_UINT32
);
884 sc
->ks_data
= ks_data
;
885 sc
->sc_intrstat
->ks_private
= sc
;
886 sc
->sc_intrstat
->ks_update
= vioblk_ksupdate
;
887 kstat_install(sc
->sc_intrstat
);
890 ret
= ddi_regs_map_setup(devinfo
, 1,
891 (caddr_t
*)&sc
->sc_virtio
.sc_io_addr
,
892 0, 0, &vioblk_attr
, &sc
->sc_virtio
.sc_ioh
);
893 if (ret
!= DDI_SUCCESS
) {
894 dev_err(devinfo
, CE_WARN
, "unable to map bar0: [%d]", ret
);
898 virtio_device_reset(&sc
->sc_virtio
);
899 virtio_set_status(&sc
->sc_virtio
, VIRTIO_CONFIG_DEVICE_STATUS_ACK
);
900 virtio_set_status(&sc
->sc_virtio
, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER
);
902 if (vioblk_register_ints(sc
)) {
903 dev_err(devinfo
, CE_WARN
, "Unable to add interrupt");
907 ret
= vioblk_dev_features(sc
);
911 if (sc
->sc_virtio
.sc_features
& VIRTIO_BLK_F_RO
)
912 sc
->sc_readonly
= B_TRUE
;
914 sc
->sc_readonly
= B_FALSE
;
916 sc
->sc_capacity
= virtio_read_device_config_8(&sc
->sc_virtio
,
917 VIRTIO_BLK_CONFIG_CAPACITY
);
918 sc
->sc_nblks
= sc
->sc_capacity
;
920 sc
->sc_blk_size
= DEV_BSIZE
;
921 if (sc
->sc_virtio
.sc_features
& VIRTIO_BLK_F_BLK_SIZE
) {
922 sc
->sc_blk_size
= virtio_read_device_config_4(&sc
->sc_virtio
,
923 VIRTIO_BLK_CONFIG_BLK_SIZE
);
926 sc
->sc_pblk_size
= sc
->sc_blk_size
;
927 if (sc
->sc_virtio
.sc_features
& VIRTIO_BLK_F_TOPOLOGY
) {
928 sc
->sc_pblk_size
<<= virtio_read_device_config_1(&sc
->sc_virtio
,
929 VIRTIO_BLK_CONFIG_TOPO_PBEXP
);
932 /* Flushing is not supported. */
933 if (!(sc
->sc_virtio
.sc_features
& VIRTIO_BLK_F_FLUSH
)) {
934 vioblk_ops
.o_sync_cache
= NULL
;
937 sc
->sc_seg_max
= DEF_MAXINDIRECT
;
938 /* The max number of segments (cookies) in a request */
939 if (sc
->sc_virtio
.sc_features
& VIRTIO_BLK_F_SEG_MAX
) {
940 sc
->sc_seg_max
= virtio_read_device_config_4(&sc
->sc_virtio
,
941 VIRTIO_BLK_CONFIG_SEG_MAX
);
943 /* That's what Linux does. */
948 * SEG_MAX corresponds to the number of _data_
949 * blocks in a request
953 /* 2 descriptors taken for header/status */
954 vioblk_bd_dma_attr
.dma_attr_sgllen
= sc
->sc_seg_max
- 2;
957 /* The maximum size for a cookie in a request. */
958 sc
->sc_seg_size_max
= DEF_MAXSECTOR
;
959 if (sc
->sc_virtio
.sc_features
& VIRTIO_BLK_F_SIZE_MAX
) {
960 sc
->sc_seg_size_max
= virtio_read_device_config_4(
961 &sc
->sc_virtio
, VIRTIO_BLK_CONFIG_SIZE_MAX
);
964 /* The maximum request size */
965 vioblk_bd_dma_attr
.dma_attr_maxxfer
=
966 vioblk_bd_dma_attr
.dma_attr_sgllen
* sc
->sc_seg_size_max
;
968 dev_debug(devinfo
, CE_NOTE
,
969 "nblks=%" PRIu64
" blksize=%d (%d) num_seg=%d, "
970 "seg_size=%d, maxxfer=%" PRIu64
,
971 sc
->sc_nblks
, sc
->sc_blk_size
, sc
->sc_pblk_size
,
972 vioblk_bd_dma_attr
.dma_attr_sgllen
,
974 vioblk_bd_dma_attr
.dma_attr_maxxfer
);
977 sc
->sc_vq
= virtio_alloc_vq(&sc
->sc_virtio
, 0, 0,
978 sc
->sc_seg_max
, "I/O request");
979 if (sc
->sc_vq
== NULL
) {
983 ret
= vioblk_alloc_reqs(sc
);
988 sc
->bd_h
= bd_alloc_handle(sc
, &vioblk_ops
, &vioblk_bd_dma_attr
,
992 virtio_set_status(&sc
->sc_virtio
,
993 VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK
);
994 virtio_start_vq_intr(sc
->sc_vq
);
996 ret
= virtio_enable_ints(&sc
->sc_virtio
);
998 goto exit_enable_ints
;
1000 ret
= bd_attach_handle(devinfo
, sc
->bd_h
);
1001 if (ret
!= DDI_SUCCESS
) {
1002 dev_err(devinfo
, CE_WARN
, "Failed to attach blkdev");
1003 goto exit_attach_bd
;
1006 return (DDI_SUCCESS
);
1010 * There is no virtio_disable_ints(), it's done in virtio_release_ints.
1011 * If they ever get split, don't forget to add a call here.
1014 virtio_stop_vq_intr(sc
->sc_vq
);
1015 bd_free_handle(sc
->bd_h
);
1016 vioblk_free_reqs(sc
);
1018 virtio_free_vq(sc
->sc_vq
);
1021 virtio_release_ints(&sc
->sc_virtio
);
1023 virtio_set_status(&sc
->sc_virtio
, VIRTIO_CONFIG_DEVICE_STATUS_FAILED
);
1024 ddi_regs_map_free(&sc
->sc_virtio
.sc_ioh
);
1026 kstat_delete(sc
->sc_intrstat
);
1028 mutex_destroy(&sc
->lock_devid
);
1029 cv_destroy(&sc
->cv_devid
);
1030 kmem_free(sc
, sizeof (struct vioblk_softc
));
1031 return (DDI_FAILURE
);
1035 vioblk_detach(dev_info_t
*devinfo
, ddi_detach_cmd_t cmd
)
1037 struct vioblk_softc
*sc
= ddi_get_driver_private(devinfo
);
1043 case DDI_PM_SUSPEND
:
1044 cmn_err(CE_WARN
, "suspend not supported yet");
1045 return (DDI_FAILURE
);
1048 cmn_err(CE_WARN
, "cmd 0x%x unrecognized", cmd
);
1049 return (DDI_FAILURE
);
1052 (void) bd_detach_handle(sc
->bd_h
);
1053 virtio_stop_vq_intr(sc
->sc_vq
);
1054 virtio_release_ints(&sc
->sc_virtio
);
1055 vioblk_free_reqs(sc
);
1056 virtio_free_vq(sc
->sc_vq
);
1057 virtio_device_reset(&sc
->sc_virtio
);
1058 ddi_regs_map_free(&sc
->sc_virtio
.sc_ioh
);
1059 kstat_delete(sc
->sc_intrstat
);
1060 kmem_free(sc
, sizeof (struct vioblk_softc
));
1062 return (DDI_SUCCESS
);
1066 vioblk_quiesce(dev_info_t
*devinfo
)
1068 struct vioblk_softc
*sc
= ddi_get_driver_private(devinfo
);
1070 virtio_stop_vq_intr(sc
->sc_vq
);
1071 virtio_device_reset(&sc
->sc_virtio
);
1073 return (DDI_SUCCESS
);
1081 bd_mod_init(&vioblk_dev_ops
);
1083 if ((rv
= mod_install(&modlinkage
)) != 0) {
1084 bd_mod_fini(&vioblk_dev_ops
);
1095 if ((rv
= mod_remove(&modlinkage
)) == 0) {
1096 bd_mod_fini(&vioblk_dev_ops
);
1103 _info(struct modinfo
*modinfop
)
1105 return (mod_info(&modlinkage
, modinfop
));