4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 #include <sys/errno.h>
29 #include <sys/types.h>
34 #include <sys/sunddi.h>
37 #include <sys/modctl.h>
38 #include <sys/ddi_impldefs.h>
39 #include <sys/sysmacros.h>
40 #include <sys/ddidevmap.h>
41 #include <sys/xendev.h>
42 #include <public/io/protocols.h>
43 #include <xen/io/blkif_impl.h>
45 #include "blk_common.h"
48 /* blk interface status */
55 * frontend xenbus state changed to XenbusStateConnected,
60 * frontend xenbus state changed to XenbusStateClosed,
61 * interface disconnected
66 /* backend device status */
70 /* backend device is ready (hotplug script finishes successfully) */
79 * frontend's xenbus state has changed to
80 * XenbusStateInitialised, is ready for connecting
85 typedef struct blk_ring_state_s
{
87 boolean_t rs_sleeping_on_ring
;
93 static char *blk_stats
[] = {
101 typedef struct blk_stats_s
{
102 uint64_t bs_req_reads
;
103 uint64_t bs_req_writes
;
104 uint64_t bs_req_barriers
;
105 uint64_t bs_req_flushes
;
113 blk_stats_t ri_stats
;
117 blk_ring_cb_t ri_ringup
;
118 caddr_t ri_ringup_arg
;
119 blk_ring_cb_t ri_ringdown
;
120 caddr_t ri_ringdown_arg
;
122 /* blk interface, backend, and frontend status */
123 enum blk_if_state ri_if_status
;
124 enum blk_be_state ri_be_status
;
125 enum blk_fe_state ri_fe_status
;
129 enum blkif_protocol ri_protocol
;
133 xendev_ring_t
*ri_ring
;
134 blk_ring_state_t ri_state
;
138 static void blk_oe_state_change(dev_info_t
*dip
, ddi_eventcookie_t id
,
139 void *arg
, void *impl_data
);
140 static void blk_hp_state_change(dev_info_t
*dip
, ddi_eventcookie_t id
,
141 void *arg
, void *impl_data
);
142 static int blk_check_state_transition(blk_ring_t ring
, XenbusState oestate
);
143 static int blk_start_connect(blk_ring_t ring
);
144 static void blk_start_disconnect(blk_ring_t ring
);
145 static void blk_ring_close(blk_ring_t ring
);
146 static int blk_bindto_frontend(blk_ring_t ring
);
147 static void blk_unbindfrom_frontend(blk_ring_t ring
);
148 static uint_t
blk_intr(caddr_t arg
);
150 static int blk_kstat_init(blk_ring_t ring
);
151 static void blk_kstat_fini(blk_ring_t ring
);
152 static int blk_kstat_update(kstat_t
*ksp
, int flag
);
154 static void blk_ring_request_32(blkif_request_t
*dst
,
155 blkif_x86_32_request_t
*src
);
156 static void blk_ring_request_64(blkif_request_t
*dst
,
157 blkif_x86_64_request_t
*src
);
159 static void blk_ring_response_32(blkif_x86_32_response_t
*dst
,
160 blkif_response_t
*src
);
161 static void blk_ring_response_64(blkif_x86_64_response_t
*dst
,
162 blkif_response_t
*src
);
169 blk_ring_init(blk_ringinit_args_t
*args
, blk_ring_t
*ringp
)
175 ring
= kmem_zalloc(sizeof (struct blk_ring_s
), KM_SLEEP
);
176 mutex_init(&ring
->ri_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
177 ring
->ri_dip
= args
->ar_dip
;
178 ring
->ri_intr
= args
->ar_intr
;
179 ring
->ri_intr_arg
= args
->ar_intr_arg
;
180 ring
->ri_ringup
= args
->ar_ringup
;
181 ring
->ri_ringup_arg
= args
->ar_ringup_arg
;
182 ring
->ri_ringdown
= args
->ar_ringdown
;
183 ring
->ri_ringdown_arg
= args
->ar_ringdown_arg
;
185 ring
->ri_if_status
= BLK_IF_UNKNOWN
;
186 ring
->ri_be_status
= BLK_BE_UNKNOWN
;
187 ring
->ri_fe_status
= BLK_FE_UNKNOWN
;
188 ring
->ri_state
.rs_sleeping_on_ring
= B_FALSE
;
189 ring
->ri_state
.rs_ring_up
= B_FALSE
;
191 mutex_init(&ring
->ri_state
.rs_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
192 cv_init(&ring
->ri_state
.rs_cv
, NULL
, CV_DRIVER
, NULL
);
194 e
= blk_kstat_init(ring
);
195 if (e
!= DDI_SUCCESS
) {
196 goto ringinitfail_kstat
;
199 /* Watch frontend and hotplug state change */
200 if (xvdi_add_event_handler(ring
->ri_dip
, XS_OE_STATE
,
201 blk_oe_state_change
, ring
) != DDI_SUCCESS
) {
202 goto ringinitfail_oestate
;
204 if (xvdi_add_event_handler(ring
->ri_dip
, XS_HP_STATE
,
205 blk_hp_state_change
, ring
) != DDI_SUCCESS
) {
206 goto ringinitfail_hpstate
;
210 * Kick-off hotplug script
212 if (xvdi_post_event(ring
->ri_dip
, XEN_HP_ADD
) != DDI_SUCCESS
) {
213 cmn_err(CE_WARN
, "blk@%s: failed to start hotplug script",
214 ddi_get_name_addr(ring
->ri_dip
));
215 goto ringinitfail_postevent
;
219 * start waiting for hotplug event and otherend state event
220 * mainly for debugging, frontend will not take any op seeing this
222 (void) xvdi_switch_state(ring
->ri_dip
, XBT_NULL
, XenbusStateInitWait
);
225 return (DDI_SUCCESS
);
227 ringinitfail_postevent
:
228 xvdi_remove_event_handler(ring
->ri_dip
, XS_HP_STATE
);
229 ringinitfail_hpstate
:
230 xvdi_remove_event_handler(ring
->ri_dip
, XS_OE_STATE
);
231 ringinitfail_oestate
:
232 blk_kstat_fini(ring
);
234 cv_destroy(&ring
->ri_state
.rs_cv
);
235 mutex_destroy(&ring
->ri_state
.rs_mutex
);
236 mutex_destroy(&ring
->ri_mutex
);
237 kmem_free(ring
, sizeof (struct blk_ring_s
));
238 return (DDI_FAILURE
);
246 blk_ring_fini(blk_ring_t
*ringp
)
253 mutex_enter(&ring
->ri_mutex
);
254 if (ring
->ri_if_status
!= BLK_IF_DISCONNECTED
) {
255 blk_ring_close(ring
);
257 mutex_exit(&ring
->ri_mutex
);
259 xvdi_remove_event_handler(ring
->ri_dip
, NULL
);
260 blk_kstat_fini(ring
);
261 cv_destroy(&ring
->ri_state
.rs_cv
);
262 mutex_destroy(&ring
->ri_state
.rs_mutex
);
263 mutex_destroy(&ring
->ri_mutex
);
264 kmem_free(ring
, sizeof (struct blk_ring_s
));
274 blk_kstat_init(blk_ring_t ring
)
276 int nstat
= sizeof (blk_stats
) / sizeof (blk_stats
[0]);
277 char **cp
= blk_stats
;
280 ring
->ri_kstats
= kstat_create(ddi_get_name(ring
->ri_dip
),
281 ddi_get_instance(ring
->ri_dip
), "req_statistics", "block",
282 KSTAT_TYPE_NAMED
, nstat
, 0);
283 if (ring
->ri_kstats
== NULL
) {
284 return (DDI_FAILURE
);
287 ring
->ri_kstats
->ks_private
= ring
;
288 ring
->ri_kstats
->ks_update
= blk_kstat_update
;
290 knp
= ring
->ri_kstats
->ks_data
;
292 kstat_named_init(knp
, *cp
, KSTAT_DATA_UINT64
);
298 kstat_install(ring
->ri_kstats
);
300 return (DDI_SUCCESS
);
308 blk_kstat_fini(blk_ring_t ring
)
310 kstat_delete(ring
->ri_kstats
);
318 blk_kstat_update(kstat_t
*ksp
, int flag
)
325 if (flag
!= KSTAT_READ
) {
329 ring
= ksp
->ks_private
;
330 stats
= &ring
->ri_stats
;
334 * Assignment order should match that of the names in
337 (knp
++)->value
.ui64
= stats
->bs_req_reads
;
338 (knp
++)->value
.ui64
= stats
->bs_req_writes
;
339 (knp
++)->value
.ui64
= stats
->bs_req_barriers
;
340 (knp
++)->value
.ui64
= stats
->bs_req_flushes
;
341 (knp
++)->value
.ui64
= 0; /* oo_req */
348 * blk_oe_state_change()
352 blk_oe_state_change(dev_info_t
*dip
, ddi_eventcookie_t id
, void *arg
,
355 XenbusState new_state
;
359 ring
= (blk_ring_t
)arg
;
360 new_state
= *(XenbusState
*)impl_data
;
362 mutex_enter(&ring
->ri_mutex
);
364 if (blk_check_state_transition(ring
, new_state
) == DDI_FAILURE
) {
365 mutex_exit(&ring
->ri_mutex
);
370 case XenbusStateInitialised
:
371 ASSERT(ring
->ri_if_status
== BLK_IF_UNKNOWN
);
373 /* frontend is ready for connecting */
374 ring
->ri_fe_status
= BLK_FE_READY
;
376 if (ring
->ri_be_status
== BLK_BE_READY
) {
377 mutex_exit(&ring
->ri_mutex
);
378 if (blk_start_connect(ring
) != DDI_SUCCESS
)
379 (void) blk_start_disconnect(ring
);
380 mutex_enter(&ring
->ri_mutex
);
383 case XenbusStateClosing
:
384 (void) xvdi_switch_state(dip
, XBT_NULL
, XenbusStateClosing
);
386 case XenbusStateClosed
:
388 (void) xvdi_post_event(ring
->ri_dip
, XEN_HP_REMOVE
);
389 if (ring
->ri_ringdown
!= NULL
) {
390 (*(ring
->ri_ringdown
))(ring
->ri_ringdown_arg
);
392 blk_ring_close(ring
);
394 /* reset state in case of reconnect */
395 ring
->ri_if_status
= BLK_IF_UNKNOWN
;
396 ring
->ri_be_status
= BLK_BE_UNKNOWN
;
397 ring
->ri_fe_status
= BLK_FE_UNKNOWN
;
398 ring
->ri_state
.rs_sleeping_on_ring
= B_FALSE
;
399 ring
->ri_state
.rs_ring_up
= B_FALSE
;
406 mutex_exit(&ring
->ri_mutex
);
411 * blk_hp_state_change()
415 blk_hp_state_change(dev_info_t
*dip
, ddi_eventcookie_t id
, void *arg
,
418 xendev_hotplug_state_t hpstate
;
422 ring
= (blk_ring_t
)arg
;
423 hpstate
= *(xendev_hotplug_state_t
*)impl_data
;
425 mutex_enter(&ring
->ri_mutex
);
426 if (hpstate
== Connected
) {
427 /* Hotplug script has completed successfully */
428 if (ring
->ri_be_status
== BLK_BE_UNKNOWN
) {
429 ring
->ri_be_status
= BLK_BE_READY
;
430 if (ring
->ri_fe_status
== BLK_FE_READY
) {
431 mutex_exit(&ring
->ri_mutex
);
432 /* try to connect to frontend */
433 if (blk_start_connect(ring
) != DDI_SUCCESS
)
434 (void) blk_start_disconnect(ring
);
435 mutex_enter(&ring
->ri_mutex
);
439 mutex_exit(&ring
->ri_mutex
);
444 * blk_check_state_transition()
445 * check the XenbusState change to see if the change is a valid transition
446 * or not. The new state is written by frontend domain, or by running
447 * xenstore-write to change it manually in dom0.
450 blk_check_state_transition(blk_ring_t ring
, XenbusState oestate
)
452 switch (ring
->ri_if_status
) {
454 if (ring
->ri_fe_status
== BLK_FE_UNKNOWN
) {
455 if ((oestate
== XenbusStateUnknown
) ||
456 (oestate
== XenbusStateConnected
))
457 goto statechkfail_bug
;
458 else if ((oestate
== XenbusStateInitialising
) ||
459 (oestate
== XenbusStateInitWait
))
460 goto statechkfail_nop
;
462 if ((oestate
== XenbusStateUnknown
) ||
463 (oestate
== XenbusStateInitialising
) ||
464 (oestate
== XenbusStateInitWait
) ||
465 (oestate
== XenbusStateConnected
))
466 goto statechkfail_bug
;
467 else if (oestate
== XenbusStateInitialised
)
468 goto statechkfail_nop
;
472 case BLK_IF_CONNECTED
:
473 if ((oestate
== XenbusStateUnknown
) ||
474 (oestate
== XenbusStateInitialising
) ||
475 (oestate
== XenbusStateInitWait
) ||
476 (oestate
== XenbusStateInitialised
))
477 goto statechkfail_bug
;
478 else if (oestate
== XenbusStateConnected
)
479 goto statechkfail_nop
;
482 case BLK_IF_DISCONNECTED
:
484 goto statechkfail_bug
;
487 return (DDI_SUCCESS
);
490 cmn_err(CE_NOTE
, "blk@%s: unexpected otherend "
491 "state change to %d!, when status is %d",
492 ddi_get_name_addr(ring
->ri_dip
), oestate
,
496 return (DDI_FAILURE
);
501 * blk_start_connect()
502 * Kick-off connect process
503 * If ri_fe_status == BLK_FE_READY and ri_be_status == BLK_BE_READY
504 * the ri_if_status will be changed to BLK_IF_CONNECTED on success,
505 * otherwise, ri_if_status will not be changed
508 blk_start_connect(blk_ring_t ring
)
510 xenbus_transaction_t xbt
;
521 * Start connect to frontend only when backend device are ready
522 * and frontend has moved to XenbusStateInitialised, which means
525 ASSERT(ring
->ri_fe_status
== BLK_FE_READY
);
526 ASSERT(ring
->ri_be_status
== BLK_BE_READY
);
528 xsnode
= xvdi_get_xsname(dip
);
529 if (xsnode
== NULL
) {
530 goto startconnectfail_get_xsname
;
533 ring
->ri_fe
= xvdi_get_oeid(dip
);
534 if (ring
->ri_fe
== (domid_t
)-1) {
535 goto startconnectfail_get_oeid
;
538 e
= xvdi_switch_state(dip
, XBT_NULL
, XenbusStateInitialised
);
540 goto startconnectfail_switch_init
;
543 e
= blk_bindto_frontend(ring
);
544 if (e
!= DDI_SUCCESS
) {
545 goto startconnectfail_bindto_frontend
;
547 ring
->ri_if_status
= BLK_IF_CONNECTED
;
549 e
= ddi_add_intr(dip
, 0, NULL
, NULL
, blk_intr
, (caddr_t
)ring
);
550 if (e
!= DDI_SUCCESS
) {
551 goto startconnectfail_add_intr
;
555 e
= xenbus_transaction_start(&xbt
);
557 xvdi_fatal_error(dip
, e
, "transaction start");
558 goto startconnectfail_transaction_start
;
561 /* xentop requires the instance in xenstore */
562 e
= xenbus_printf(xbt
, xsnode
, "instance", "%d",
563 ddi_get_instance(ring
->ri_dip
));
565 cmn_err(CE_WARN
, "xdb@%s: failed to write 'instance'",
566 ddi_get_name_addr(dip
));
567 xvdi_fatal_error(dip
, e
, "writing 'instance'");
568 (void) xenbus_transaction_end(xbt
, 1);
569 goto startconnectfail_xenbus_printf
;
572 /* If feature-barrier isn't present in xenstore, add it */
573 e
= xenbus_read(xbt
, xsnode
, "feature-barrier", (void **)&barrier
,
576 e
= xenbus_printf(xbt
, xsnode
, "feature-barrier", "%d", 1);
578 cmn_err(CE_WARN
, "xdb@%s: failed to write "
579 "'feature-barrier'", ddi_get_name_addr(dip
));
580 xvdi_fatal_error(dip
, e
, "writing 'feature-barrier'");
581 (void) xenbus_transaction_end(xbt
, 1);
582 goto startconnectfail_xenbus_printf
;
585 kmem_free(barrier
, len
);
588 e
= xvdi_switch_state(dip
, xbt
, XenbusStateConnected
);
590 xvdi_fatal_error(dip
, e
, "writing 'state'");
591 (void) xenbus_transaction_end(xbt
, 1);
592 goto startconnectfail_switch_connected
;
595 e
= xenbus_transaction_end(xbt
, 0);
598 /* transaction is ended, don't need to abort it */
601 xvdi_fatal_error(dip
, e
, "completing transaction");
602 goto startconnectfail_transaction_end
;
605 mutex_enter(&ring
->ri_state
.rs_mutex
);
606 ring
->ri_state
.rs_ring_up
= B_TRUE
;
607 if (ring
->ri_state
.rs_sleeping_on_ring
) {
608 ring
->ri_state
.rs_sleeping_on_ring
= B_FALSE
;
609 cv_signal(&ring
->ri_state
.rs_cv
);
611 mutex_exit(&ring
->ri_state
.rs_mutex
);
613 if (ring
->ri_ringup
!= NULL
) {
614 (*(ring
->ri_ringup
))(ring
->ri_ringup_arg
);
617 return (DDI_SUCCESS
);
620 startconnectfail_transaction_end
:
621 startconnectfail_switch_connected
:
622 startconnectfail_xenbus_printf
:
623 startconnectfail_transaction_start
:
624 ddi_remove_intr(dip
, 0, NULL
);
625 startconnectfail_add_intr
:
626 blk_unbindfrom_frontend(ring
);
627 ring
->ri_fe
= (domid_t
)-1;
628 startconnectfail_bindto_frontend
:
629 (void) xvdi_switch_state(dip
, XBT_NULL
, XenbusStateClosed
);
630 startconnectfail_switch_init
:
631 startconnectfail_get_oeid
:
632 startconnectfail_get_xsname
:
633 return (DDI_FAILURE
);
638 * blk_start_disconnect()
639 * Kick-off disconnect process. ri_if_status will not be changed
642 blk_start_disconnect(blk_ring_t ring
)
644 /* Kick-off disconnect process */
645 (void) xvdi_switch_state(ring
->ri_dip
, XBT_NULL
, XenbusStateClosing
);
651 * Disconnect from frontend and close backend device
652 * ifstatus will be changed to BLK_DISCONNECTED
653 * Xenbus state will be changed to XenbusStateClosed
656 blk_ring_close(blk_ring_t ring
)
661 /* mutex protect ri_if_status only here */
662 ASSERT(MUTEX_HELD(&ring
->ri_mutex
));
666 if (ring
->ri_if_status
!= BLK_IF_CONNECTED
) {
670 ring
->ri_if_status
= BLK_IF_DISCONNECTED
;
671 mutex_exit(&ring
->ri_mutex
);
673 /* stop accepting I/O request from frontend */
674 ddi_remove_intr(dip
, 0, NULL
);
676 blk_unbindfrom_frontend(ring
);
677 ring
->ri_fe
= (domid_t
)-1;
678 (void) xvdi_switch_state(dip
, XBT_NULL
, XenbusStateClosed
);
679 mutex_enter(&ring
->ri_mutex
);
684 * blk_bindto_frontend()
687 blk_bindto_frontend(blk_ring_t ring
)
689 evtchn_port_t evtchn
;
701 * Gather info from frontend
703 oename
= xvdi_get_oename(dip
);
704 if (oename
== NULL
) {
705 return (DDI_FAILURE
);
708 e
= xenbus_gather(XBT_NULL
, oename
, "ring-ref", "%lu", &gref
,
709 "event-channel", "%u", &evtchn
, NULL
);
711 xvdi_fatal_error(dip
, e
,
712 "Getting ring-ref and evtchn from frontend");
713 return (DDI_FAILURE
);
716 e
= xenbus_gather(XBT_NULL
, oename
, "protocol", "%63s",
719 (void) strcpy(protocol
, "unspecified, assuming native");
720 } else if (strcmp(protocol
, XEN_IO_PROTO_ABI_NATIVE
) == 0) {
721 ring
->ri_protocol
= BLKIF_PROTOCOL_NATIVE
;
722 ring
->ri_nentry
= BLKIF_RING_SIZE
;
723 ring
->ri_entrysize
= sizeof (union blkif_sring_entry
);
724 } else if (strcmp(protocol
, XEN_IO_PROTO_ABI_X86_32
) == 0) {
725 ring
->ri_protocol
= BLKIF_PROTOCOL_X86_32
;
726 ring
->ri_nentry
= BLKIF_X86_32_RING_SIZE
;
727 ring
->ri_entrysize
= sizeof (union blkif_x86_32_sring_entry
);
728 } else if (strcmp(protocol
, XEN_IO_PROTO_ABI_X86_64
) == 0) {
729 ring
->ri_protocol
= BLKIF_PROTOCOL_X86_64
;
730 ring
->ri_nentry
= BLKIF_X86_64_RING_SIZE
;
731 ring
->ri_entrysize
= sizeof (union blkif_x86_64_sring_entry
);
733 xvdi_fatal_error(dip
, e
, "unknown fe protocol");
734 return (DDI_FAILURE
);
740 e
= xvdi_map_ring(dip
, ring
->ri_nentry
, ring
->ri_entrysize
, gref
,
742 if (e
!= DDI_SUCCESS
) {
743 return (DDI_FAILURE
);
749 e
= xvdi_bind_evtchn(dip
, evtchn
);
750 if (e
!= DDI_SUCCESS
) {
751 xvdi_unmap_ring(ring
->ri_ring
);
752 return (DDI_FAILURE
);
756 return (DDI_SUCCESS
);
761 * blk_unbindfrom_frontend()
764 blk_unbindfrom_frontend(blk_ring_t ring
)
766 xvdi_free_evtchn(ring
->ri_dip
);
767 xvdi_unmap_ring(ring
->ri_ring
);
775 blk_intr(caddr_t arg
)
779 ring
= (blk_ring_t
)arg
;
780 if (ring
->ri_if_status
!= BLK_IF_CONNECTED
) {
781 return (DDI_INTR_CLAIMED
);
784 (void) (*ring
->ri_intr
)(ring
->ri_intr_arg
);
785 return (DDI_INTR_CLAIMED
);
790 * blk_ring_request_get()
793 blk_ring_request_get(blk_ring_t ring
, blkif_request_t
*req
)
795 blkif_request_t
*src
;
799 mutex_enter(&ring
->ri_mutex
);
801 if (ring
->ri_if_status
!= BLK_IF_CONNECTED
) {
802 mutex_exit(&ring
->ri_mutex
);
806 src
= xvdi_ring_get_request(ring
->ri_ring
);
808 mutex_exit(&ring
->ri_mutex
);
812 switch (ring
->ri_protocol
) {
813 case BLKIF_PROTOCOL_NATIVE
:
814 bcopy(src
, req
, sizeof (*req
));
816 case BLKIF_PROTOCOL_X86_32
:
817 blk_ring_request_32(req
, (blkif_x86_32_request_t
*)src
);
819 case BLKIF_PROTOCOL_X86_64
:
820 blk_ring_request_64(req
, (blkif_x86_64_request_t
*)src
);
823 cmn_err(CE_WARN
, "blkif@%s: unrecognised protocol: %d",
824 ddi_get_name_addr(ring
->ri_dip
),
827 mutex_exit(&ring
->ri_mutex
);
829 stats
= &ring
->ri_stats
;
830 switch (req
->operation
) {
832 stats
->bs_req_reads
++;
835 stats
->bs_req_writes
++;
837 case BLKIF_OP_WRITE_BARRIER
:
838 stats
->bs_req_barriers
++;
840 case BLKIF_OP_FLUSH_DISKCACHE
:
841 stats
->bs_req_flushes
++;
850 * blk_ring_request_requeue()
851 * if a request is requeued, caller will have to poll for request
855 blk_ring_request_requeue(blk_ring_t ring
)
857 mutex_enter(&ring
->ri_mutex
);
859 if (ring
->ri_if_status
!= BLK_IF_CONNECTED
) {
860 mutex_exit(&ring
->ri_mutex
);
864 ring
->ri_ring
->xr_sring
.br
.req_cons
--;
866 mutex_exit(&ring
->ri_mutex
);
871 * blk_ring_response_put()
874 blk_ring_response_put(blk_ring_t ring
, blkif_response_t
*src
)
876 blkif_response_t
*rsp
;
880 mutex_enter(&ring
->ri_mutex
);
882 if (ring
->ri_if_status
!= BLK_IF_CONNECTED
) {
883 mutex_exit(&ring
->ri_mutex
);
887 rsp
= xvdi_ring_get_response(ring
->ri_ring
);
890 switch (ring
->ri_protocol
) {
891 case BLKIF_PROTOCOL_NATIVE
:
892 bcopy(src
, rsp
, sizeof (*rsp
));
894 case BLKIF_PROTOCOL_X86_32
:
895 blk_ring_response_32((blkif_x86_32_response_t
*)rsp
, src
);
897 case BLKIF_PROTOCOL_X86_64
:
898 blk_ring_response_64((blkif_x86_64_response_t
*)rsp
, src
);
901 cmn_err(CE_WARN
, "blk@%s: unrecognised protocol: %d",
902 ddi_get_name_addr(ring
->ri_dip
),
906 e
= xvdi_ring_push_response(ring
->ri_ring
);
908 xvdi_notify_oe(ring
->ri_dip
);
911 mutex_exit(&ring
->ri_mutex
);
916 * blk_ring_request_32()
919 blk_ring_request_32(blkif_request_t
*dst
, blkif_x86_32_request_t
*src
)
921 int i
, n
= BLKIF_MAX_SEGMENTS_PER_REQUEST
;
922 dst
->operation
= src
->operation
;
923 dst
->nr_segments
= src
->nr_segments
;
924 dst
->handle
= src
->handle
;
926 dst
->sector_number
= src
->sector_number
;
927 if (n
> src
->nr_segments
)
928 n
= src
->nr_segments
;
929 for (i
= 0; i
< n
; i
++)
930 dst
->seg
[i
] = src
->seg
[i
];
935 * blk_ring_request_64()
938 blk_ring_request_64(blkif_request_t
*dst
, blkif_x86_64_request_t
*src
)
940 int i
, n
= BLKIF_MAX_SEGMENTS_PER_REQUEST
;
941 dst
->operation
= src
->operation
;
942 dst
->nr_segments
= src
->nr_segments
;
943 dst
->handle
= src
->handle
;
945 dst
->sector_number
= src
->sector_number
;
946 if (n
> src
->nr_segments
)
947 n
= src
->nr_segments
;
948 for (i
= 0; i
< n
; i
++)
949 dst
->seg
[i
] = src
->seg
[i
];
954 * blk_ring_response_32()
957 blk_ring_response_32(blkif_x86_32_response_t
*dst
, blkif_response_t
*src
)
960 dst
->operation
= src
->operation
;
961 dst
->status
= src
->status
;
966 * blk_ring_response_64()
969 blk_ring_response_64(blkif_x86_64_response_t
*dst
, blkif_response_t
*src
)
972 dst
->operation
= src
->operation
;
973 dst
->status
= src
->status
;
978 * blk_ring_request_dump()
981 blk_ring_request_dump(blkif_request_t
*req
)
986 * Exploit the public interface definitions for BLKIF_OP_READ
989 char *op_name
[] = { "read", "write", "barrier", "flush" };
991 cmn_err(CE_NOTE
, " op=%s", op_name
[req
->operation
]);
992 cmn_err(CE_NOTE
, " num of segments=%d", req
->nr_segments
);
993 cmn_err(CE_NOTE
, " handle=%d", req
->handle
);
994 cmn_err(CE_NOTE
, " id=0x%llx", (unsigned long long)req
->id
);
995 cmn_err(CE_NOTE
, " start sector=%llu",
996 (unsigned long long)req
->sector_number
);
997 for (i
= 0; i
< req
->nr_segments
; i
++) {
998 cmn_err(CE_NOTE
, " gref=%d, first sec=%d,"
999 "last sec=%d", req
->seg
[i
].gref
, req
->seg
[i
].first_sect
,
1000 req
->seg
[i
].last_sect
);
1006 * blk_ring_response_dump()
1009 blk_ring_response_dump(blkif_response_t
*resp
)
1012 * Exploit the public interface definitions for BLKIF_OP_READ
1015 char *op_name
[] = { "read", "write", "barrier", "flush" };
1017 cmn_err(CE_NOTE
, " op=%d:%s", resp
->operation
,
1018 op_name
[resp
->operation
]);
1019 cmn_err(CE_NOTE
, " op=%d", resp
->operation
);
1020 cmn_err(CE_NOTE
, " status=%d", resp
->status
);