2 * Xen SCSI frontend driver
4 * Copyright (c) 2008, FUJITSU Limited
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/device.h>
34 #include <linux/wait.h>
35 #include <linux/interrupt.h>
36 #include <linux/mutex.h>
37 #include <linux/spinlock.h>
38 #include <linux/sched.h>
39 #include <linux/blkdev.h>
40 #include <linux/pfn.h>
41 #include <linux/slab.h>
42 #include <linux/bitops.h>
44 #include <scsi/scsi_cmnd.h>
45 #include <scsi/scsi_device.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_host.h>
50 #include <xen/xenbus.h>
51 #include <xen/grant_table.h>
52 #include <xen/events.h>
55 #include <xen/interface/grant_table.h>
56 #include <xen/interface/io/vscsiif.h>
57 #include <xen/interface/io/protocols.h>
59 #include <asm/xen/hypervisor.h>
61 #define VSCSIFRONT_OP_ADD_LUN 1
62 #define VSCSIFRONT_OP_DEL_LUN 2
63 #define VSCSIFRONT_OP_READD_LUN 3
66 #define VSCSIIF_DEFAULT_CMD_PER_LUN 10
67 #define VSCSIIF_MAX_TARGET 64
68 #define VSCSIIF_MAX_LUN 255
70 #define VSCSIIF_RING_SIZE __CONST_RING_SIZE(vscsiif, PAGE_SIZE)
71 #define VSCSIIF_MAX_REQS VSCSIIF_RING_SIZE
73 #define vscsiif_grants_sg(_sg) (PFN_UP((_sg) * \
74 sizeof(struct scsiif_request_segment)))
76 struct vscsifrnt_shadow
{
77 /* command between backend and frontend */
85 unsigned int nr_grants
; /* number of grants in gref[] */
86 struct scsiif_request_segment
*sg
; /* scatter/gather elements */
87 struct scsiif_request_segment seg
[VSCSIIF_SG_TABLESIZE
];
89 /* Do reset or abort function. */
90 wait_queue_head_t wq_reset
; /* reset work queue */
91 int wait_reset
; /* reset work queue condition */
92 int32_t rslt_reset
; /* reset response status: */
93 /* SUCCESS or FAILED or: */
94 #define RSLT_RESET_WAITING 0
95 #define RSLT_RESET_ERR -1
97 /* Requested struct scsi_cmnd is stored from kernel. */
99 int gref
[vscsiif_grants_sg(SG_ALL
) + SG_ALL
];
102 struct vscsifrnt_info
{
103 struct xenbus_device
*dev
;
105 struct Scsi_Host
*host
;
115 grant_ref_t ring_ref
;
116 struct vscsiif_front_ring ring
;
117 struct vscsiif_response ring_rsp
;
119 spinlock_t shadow_lock
;
120 DECLARE_BITMAP(shadow_free_bitmap
, VSCSIIF_MAX_REQS
);
121 struct vscsifrnt_shadow
*shadow
[VSCSIIF_MAX_REQS
];
123 /* Following items are protected by the host lock. */
124 wait_queue_head_t wq_sync
;
125 wait_queue_head_t wq_pause
;
126 unsigned int wait_ring_available
:1;
127 unsigned int waiting_pause
:1;
128 unsigned int pause
:1;
131 char dev_state_path
[64];
132 struct task_struct
*curr
;
135 static DEFINE_MUTEX(scsifront_mutex
);
137 static void scsifront_wake_up(struct vscsifrnt_info
*info
)
139 info
->wait_ring_available
= 0;
140 wake_up(&info
->wq_sync
);
143 static int scsifront_get_rqid(struct vscsifrnt_info
*info
)
148 spin_lock_irqsave(&info
->shadow_lock
, flags
);
150 free
= find_first_bit(info
->shadow_free_bitmap
, VSCSIIF_MAX_REQS
);
151 __clear_bit(free
, info
->shadow_free_bitmap
);
153 spin_unlock_irqrestore(&info
->shadow_lock
, flags
);
158 static int _scsifront_put_rqid(struct vscsifrnt_info
*info
, uint32_t id
)
160 int empty
= bitmap_empty(info
->shadow_free_bitmap
, VSCSIIF_MAX_REQS
);
162 __set_bit(id
, info
->shadow_free_bitmap
);
163 info
->shadow
[id
] = NULL
;
165 return empty
|| info
->wait_ring_available
;
168 static void scsifront_put_rqid(struct vscsifrnt_info
*info
, uint32_t id
)
173 spin_lock_irqsave(&info
->shadow_lock
, flags
);
174 kick
= _scsifront_put_rqid(info
, id
);
175 spin_unlock_irqrestore(&info
->shadow_lock
, flags
);
178 scsifront_wake_up(info
);
181 static int scsifront_do_request(struct vscsifrnt_info
*info
,
182 struct vscsifrnt_shadow
*shadow
)
184 struct vscsiif_front_ring
*ring
= &(info
->ring
);
185 struct vscsiif_request
*ring_req
;
186 struct scsi_cmnd
*sc
= shadow
->sc
;
190 if (RING_FULL(&info
->ring
))
193 id
= scsifront_get_rqid(info
); /* use id in response */
194 if (id
>= VSCSIIF_MAX_REQS
)
197 info
->shadow
[id
] = shadow
;
200 ring_req
= RING_GET_REQUEST(&(info
->ring
), ring
->req_prod_pvt
);
201 ring
->req_prod_pvt
++;
204 ring_req
->act
= shadow
->act
;
205 ring_req
->ref_rqid
= shadow
->ref_rqid
;
206 ring_req
->nr_segments
= shadow
->nr_segments
;
208 ring_req
->id
= sc
->device
->id
;
209 ring_req
->lun
= sc
->device
->lun
;
210 ring_req
->channel
= sc
->device
->channel
;
211 ring_req
->cmd_len
= sc
->cmd_len
;
213 BUG_ON(sc
->cmd_len
> VSCSIIF_MAX_COMMAND_SIZE
);
215 memcpy(ring_req
->cmnd
, sc
->cmnd
, sc
->cmd_len
);
217 ring_req
->sc_data_direction
= (uint8_t)sc
->sc_data_direction
;
218 ring_req
->timeout_per_command
= scsi_cmd_to_rq(sc
)->timeout
/ HZ
;
220 for (i
= 0; i
< (shadow
->nr_segments
& ~VSCSIIF_SG_GRANT
); i
++)
221 ring_req
->seg
[i
] = shadow
->seg
[i
];
223 shadow
->inflight
= true;
225 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring
, notify
);
227 notify_remote_via_irq(info
->irq
);
232 static void scsifront_set_error(struct vscsifrnt_info
*info
, const char *msg
)
234 shost_printk(KERN_ERR
, info
->host
, KBUILD_MODNAME
"%s\n"
235 "Disabling device for further use\n", msg
);
236 info
->host_active
= STATE_ERROR
;
239 static void scsifront_gnttab_done(struct vscsifrnt_info
*info
,
240 struct vscsifrnt_shadow
*shadow
)
244 if (shadow
->sc
->sc_data_direction
== DMA_NONE
)
247 for (i
= 0; i
< shadow
->nr_grants
; i
++) {
248 if (unlikely(!gnttab_try_end_foreign_access(shadow
->gref
[i
]))) {
249 scsifront_set_error(info
, "grant still in use by backend");
257 static unsigned int scsifront_host_byte(int32_t rslt
)
259 switch (XEN_VSCSIIF_RSLT_HOST(rslt
)) {
260 case XEN_VSCSIIF_RSLT_HOST_OK
:
262 case XEN_VSCSIIF_RSLT_HOST_NO_CONNECT
:
263 return DID_NO_CONNECT
;
264 case XEN_VSCSIIF_RSLT_HOST_BUS_BUSY
:
266 case XEN_VSCSIIF_RSLT_HOST_TIME_OUT
:
268 case XEN_VSCSIIF_RSLT_HOST_BAD_TARGET
:
269 return DID_BAD_TARGET
;
270 case XEN_VSCSIIF_RSLT_HOST_ABORT
:
272 case XEN_VSCSIIF_RSLT_HOST_PARITY
:
274 case XEN_VSCSIIF_RSLT_HOST_ERROR
:
276 case XEN_VSCSIIF_RSLT_HOST_RESET
:
278 case XEN_VSCSIIF_RSLT_HOST_BAD_INTR
:
280 case XEN_VSCSIIF_RSLT_HOST_PASSTHROUGH
:
281 return DID_PASSTHROUGH
;
282 case XEN_VSCSIIF_RSLT_HOST_SOFT_ERROR
:
283 return DID_SOFT_ERROR
;
284 case XEN_VSCSIIF_RSLT_HOST_IMM_RETRY
:
285 return DID_IMM_RETRY
;
286 case XEN_VSCSIIF_RSLT_HOST_REQUEUE
:
288 case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_DISRUPTED
:
289 return DID_TRANSPORT_DISRUPTED
;
290 case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST
:
291 return DID_TRANSPORT_FAILFAST
;
292 case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL
:
293 return DID_TRANSPORT_MARGINAL
;
299 static void scsifront_cdb_cmd_done(struct vscsifrnt_info
*info
,
300 struct vscsiif_response
*ring_rsp
)
302 struct vscsifrnt_shadow
*shadow
;
303 struct scsi_cmnd
*sc
;
308 shadow
= info
->shadow
[id
];
313 scsifront_gnttab_done(info
, shadow
);
314 if (info
->host_active
== STATE_ERROR
)
316 scsifront_put_rqid(info
, id
);
318 set_host_byte(sc
, scsifront_host_byte(ring_rsp
->rslt
));
319 set_status_byte(sc
, XEN_VSCSIIF_RSLT_STATUS(ring_rsp
->rslt
));
320 scsi_set_resid(sc
, ring_rsp
->residual_len
);
322 sense_len
= min_t(uint8_t, VSCSIIF_SENSE_BUFFERSIZE
,
323 ring_rsp
->sense_len
);
326 memcpy(sc
->sense_buffer
, ring_rsp
->sense_buffer
, sense_len
);
331 static void scsifront_sync_cmd_done(struct vscsifrnt_info
*info
,
332 struct vscsiif_response
*ring_rsp
)
334 uint16_t id
= ring_rsp
->rqid
;
336 struct vscsifrnt_shadow
*shadow
= info
->shadow
[id
];
339 spin_lock_irqsave(&info
->shadow_lock
, flags
);
340 shadow
->wait_reset
= 1;
341 switch (shadow
->rslt_reset
) {
342 case RSLT_RESET_WAITING
:
343 if (ring_rsp
->rslt
== XEN_VSCSIIF_RSLT_RESET_SUCCESS
)
344 shadow
->rslt_reset
= SUCCESS
;
346 shadow
->rslt_reset
= FAILED
;
349 kick
= _scsifront_put_rqid(info
, id
);
350 spin_unlock_irqrestore(&info
->shadow_lock
, flags
);
353 scsifront_wake_up(info
);
356 scsifront_set_error(info
, "bad reset state");
359 spin_unlock_irqrestore(&info
->shadow_lock
, flags
);
361 wake_up(&shadow
->wq_reset
);
364 static void scsifront_do_response(struct vscsifrnt_info
*info
,
365 struct vscsiif_response
*ring_rsp
)
367 struct vscsifrnt_shadow
*shadow
;
369 if (ring_rsp
->rqid
>= VSCSIIF_MAX_REQS
||
370 !info
->shadow
[ring_rsp
->rqid
]->inflight
) {
371 scsifront_set_error(info
, "illegal rqid returned by backend!");
374 shadow
= info
->shadow
[ring_rsp
->rqid
];
375 shadow
->inflight
= false;
377 if (shadow
->act
== VSCSIIF_ACT_SCSI_CDB
)
378 scsifront_cdb_cmd_done(info
, ring_rsp
);
380 scsifront_sync_cmd_done(info
, ring_rsp
);
383 static int scsifront_ring_drain(struct vscsifrnt_info
*info
,
384 unsigned int *eoiflag
)
386 struct vscsiif_response ring_rsp
;
390 rp
= READ_ONCE(info
->ring
.sring
->rsp_prod
);
391 virt_rmb(); /* ordering required respective to backend */
392 if (RING_RESPONSE_PROD_OVERFLOW(&info
->ring
, rp
)) {
393 scsifront_set_error(info
, "illegal number of responses");
396 for (i
= info
->ring
.rsp_cons
; i
!= rp
; i
++) {
397 RING_COPY_RESPONSE(&info
->ring
, i
, &ring_rsp
);
398 scsifront_do_response(info
, &ring_rsp
);
399 if (info
->host_active
== STATE_ERROR
)
401 *eoiflag
&= ~XEN_EOI_FLAG_SPURIOUS
;
404 info
->ring
.rsp_cons
= i
;
406 if (i
!= info
->ring
.req_prod_pvt
)
407 RING_FINAL_CHECK_FOR_RESPONSES(&info
->ring
, more_to_do
);
409 info
->ring
.sring
->rsp_event
= i
+ 1;
414 static int scsifront_cmd_done(struct vscsifrnt_info
*info
,
415 unsigned int *eoiflag
)
420 spin_lock_irqsave(info
->host
->host_lock
, flags
);
422 more_to_do
= scsifront_ring_drain(info
, eoiflag
);
424 info
->wait_ring_available
= 0;
426 spin_unlock_irqrestore(info
->host
->host_lock
, flags
);
428 wake_up(&info
->wq_sync
);
433 static irqreturn_t
scsifront_irq_fn(int irq
, void *dev_id
)
435 struct vscsifrnt_info
*info
= dev_id
;
436 unsigned int eoiflag
= XEN_EOI_FLAG_SPURIOUS
;
438 if (info
->host_active
== STATE_ERROR
) {
439 xen_irq_lateeoi(irq
, XEN_EOI_FLAG_SPURIOUS
);
443 while (scsifront_cmd_done(info
, &eoiflag
))
444 /* Yield point for this unbounded loop. */
447 xen_irq_lateeoi(irq
, eoiflag
);
452 static void scsifront_finish_all(struct vscsifrnt_info
*info
)
454 unsigned int i
, dummy
;
455 struct vscsiif_response resp
;
457 scsifront_ring_drain(info
, &dummy
);
459 for (i
= 0; i
< VSCSIIF_MAX_REQS
; i
++) {
460 if (test_bit(i
, info
->shadow_free_bitmap
))
464 resp
.rslt
= DID_RESET
<< 16;
465 resp
.residual_len
= 0;
466 scsifront_do_response(info
, &resp
);
470 static int map_data_for_request(struct vscsifrnt_info
*info
,
471 struct scsi_cmnd
*sc
,
472 struct vscsifrnt_shadow
*shadow
)
474 grant_ref_t gref_head
;
476 int err
, ref
, ref_cnt
= 0;
477 int grant_ro
= (sc
->sc_data_direction
== DMA_TO_DEVICE
);
478 unsigned int i
, off
, len
, bytes
;
479 unsigned int data_len
= scsi_bufflen(sc
);
480 unsigned int data_grants
= 0, seg_grants
= 0;
481 struct scatterlist
*sg
;
482 struct scsiif_request_segment
*seg
;
484 if (sc
->sc_data_direction
== DMA_NONE
|| !data_len
)
487 scsi_for_each_sg(sc
, sg
, scsi_sg_count(sc
), i
)
488 data_grants
+= PFN_UP(sg
->offset
+ sg
->length
);
490 if (data_grants
> VSCSIIF_SG_TABLESIZE
) {
491 if (data_grants
> info
->host
->sg_tablesize
) {
492 shost_printk(KERN_ERR
, info
->host
, KBUILD_MODNAME
493 "Unable to map request_buffer for command!\n");
496 seg_grants
= vscsiif_grants_sg(data_grants
);
497 shadow
->sg
= kcalloc(data_grants
,
498 sizeof(struct scsiif_request_segment
), GFP_ATOMIC
);
502 seg
= shadow
->sg
? : shadow
->seg
;
504 err
= gnttab_alloc_grant_references(seg_grants
+ data_grants
,
508 shost_printk(KERN_ERR
, info
->host
, KBUILD_MODNAME
509 "gnttab_alloc_grant_references() error\n");
514 page
= virt_to_page(seg
);
515 off
= offset_in_page(seg
);
516 len
= sizeof(struct scsiif_request_segment
) * data_grants
;
518 bytes
= min_t(unsigned int, len
, PAGE_SIZE
- off
);
520 ref
= gnttab_claim_grant_reference(&gref_head
);
521 BUG_ON(ref
== -ENOSPC
);
523 gnttab_grant_foreign_access_ref(ref
,
524 info
->dev
->otherend_id
,
525 xen_page_to_gfn(page
), 1);
526 shadow
->gref
[ref_cnt
] = ref
;
527 shadow
->seg
[ref_cnt
].gref
= ref
;
528 shadow
->seg
[ref_cnt
].offset
= (uint16_t)off
;
529 shadow
->seg
[ref_cnt
].length
= (uint16_t)bytes
;
536 BUG_ON(seg_grants
< ref_cnt
);
537 seg_grants
= ref_cnt
;
540 scsi_for_each_sg(sc
, sg
, scsi_sg_count(sc
), i
) {
545 while (len
> 0 && data_len
> 0) {
547 * sg sends a scatterlist that is larger than
548 * the data_len it wants transferred for certain
551 bytes
= min_t(unsigned int, len
, PAGE_SIZE
- off
);
552 bytes
= min(bytes
, data_len
);
554 ref
= gnttab_claim_grant_reference(&gref_head
);
555 BUG_ON(ref
== -ENOSPC
);
557 gnttab_grant_foreign_access_ref(ref
,
558 info
->dev
->otherend_id
,
559 xen_page_to_gfn(page
),
562 shadow
->gref
[ref_cnt
] = ref
;
564 seg
->offset
= (uint16_t)off
;
565 seg
->length
= (uint16_t)bytes
;
577 shadow
->nr_segments
= VSCSIIF_SG_GRANT
| seg_grants
;
579 shadow
->nr_segments
= (uint8_t)ref_cnt
;
580 shadow
->nr_grants
= ref_cnt
;
585 static int scsifront_enter(struct vscsifrnt_info
*info
)
593 static void scsifront_return(struct vscsifrnt_info
*info
)
599 if (!info
->waiting_pause
)
602 info
->waiting_pause
= 0;
603 wake_up(&info
->wq_pause
);
606 static int scsifront_queuecommand(struct Scsi_Host
*shost
,
607 struct scsi_cmnd
*sc
)
609 struct vscsifrnt_info
*info
= shost_priv(shost
);
610 struct vscsifrnt_shadow
*shadow
= scsi_cmd_priv(sc
);
614 if (info
->host_active
== STATE_ERROR
)
615 return SCSI_MLQUEUE_HOST_BUSY
;
620 shadow
->act
= VSCSIIF_ACT_SCSI_CDB
;
622 spin_lock_irqsave(shost
->host_lock
, flags
);
623 if (scsifront_enter(info
)) {
624 spin_unlock_irqrestore(shost
->host_lock
, flags
);
625 return SCSI_MLQUEUE_HOST_BUSY
;
628 err
= map_data_for_request(info
, sc
, shadow
);
630 pr_debug("%s: err %d\n", __func__
, err
);
631 scsifront_return(info
);
632 spin_unlock_irqrestore(shost
->host_lock
, flags
);
634 return SCSI_MLQUEUE_HOST_BUSY
;
635 sc
->result
= DID_ERROR
<< 16;
640 if (scsifront_do_request(info
, shadow
)) {
641 scsifront_gnttab_done(info
, shadow
);
645 scsifront_return(info
);
646 spin_unlock_irqrestore(shost
->host_lock
, flags
);
651 scsifront_return(info
);
652 spin_unlock_irqrestore(shost
->host_lock
, flags
);
653 pr_debug("%s: busy\n", __func__
);
654 return SCSI_MLQUEUE_HOST_BUSY
;
658 * Any exception handling (reset or abort) must be forwarded to the backend.
659 * We have to wait until an answer is returned. This answer contains the
660 * result to be returned to the requestor.
662 static int scsifront_action_handler(struct scsi_cmnd
*sc
, uint8_t act
)
664 struct Scsi_Host
*host
= sc
->device
->host
;
665 struct vscsifrnt_info
*info
= shost_priv(host
);
666 struct vscsifrnt_shadow
*shadow
, *s
= scsi_cmd_priv(sc
);
669 if (info
->host_active
== STATE_ERROR
)
672 shadow
= kzalloc(sizeof(*shadow
), GFP_NOIO
);
677 shadow
->rslt_reset
= RSLT_RESET_WAITING
;
679 shadow
->ref_rqid
= s
->rqid
;
680 init_waitqueue_head(&shadow
->wq_reset
);
682 spin_lock_irq(host
->host_lock
);
685 if (scsifront_enter(info
))
688 if (!scsifront_do_request(info
, shadow
))
691 scsifront_return(info
);
694 info
->wait_ring_available
= 1;
695 spin_unlock_irq(host
->host_lock
);
696 err
= wait_event_interruptible(info
->wq_sync
,
697 !info
->wait_ring_available
);
698 spin_lock_irq(host
->host_lock
);
701 spin_unlock_irq(host
->host_lock
);
702 err
= wait_event_interruptible(shadow
->wq_reset
, shadow
->wait_reset
);
703 spin_lock_irq(host
->host_lock
);
706 err
= shadow
->rslt_reset
;
707 scsifront_put_rqid(info
, shadow
->rqid
);
710 spin_lock(&info
->shadow_lock
);
711 shadow
->rslt_reset
= RSLT_RESET_ERR
;
712 spin_unlock(&info
->shadow_lock
);
716 scsifront_return(info
);
717 spin_unlock_irq(host
->host_lock
);
721 spin_unlock_irq(host
->host_lock
);
726 static int scsifront_eh_abort_handler(struct scsi_cmnd
*sc
)
728 pr_debug("%s\n", __func__
);
729 return scsifront_action_handler(sc
, VSCSIIF_ACT_SCSI_ABORT
);
732 static int scsifront_dev_reset_handler(struct scsi_cmnd
*sc
)
734 pr_debug("%s\n", __func__
);
735 return scsifront_action_handler(sc
, VSCSIIF_ACT_SCSI_RESET
);
738 static int scsifront_sdev_configure(struct scsi_device
*sdev
)
740 struct vscsifrnt_info
*info
= shost_priv(sdev
->host
);
743 if (info
->host_active
== STATE_ERROR
)
746 if (current
== info
->curr
) {
747 err
= xenbus_printf(XBT_NIL
, info
->dev
->nodename
,
748 info
->dev_state_path
, "%d", XenbusStateConnected
);
750 xenbus_dev_error(info
->dev
, err
,
751 "%s: writing dev_state_path", __func__
);
759 static void scsifront_sdev_destroy(struct scsi_device
*sdev
)
761 struct vscsifrnt_info
*info
= shost_priv(sdev
->host
);
764 if (current
== info
->curr
) {
765 err
= xenbus_printf(XBT_NIL
, info
->dev
->nodename
,
766 info
->dev_state_path
, "%d", XenbusStateClosed
);
768 xenbus_dev_error(info
->dev
, err
,
769 "%s: writing dev_state_path", __func__
);
773 static const struct scsi_host_template scsifront_sht
= {
774 .module
= THIS_MODULE
,
775 .name
= "Xen SCSI frontend driver",
776 .queuecommand
= scsifront_queuecommand
,
777 .eh_abort_handler
= scsifront_eh_abort_handler
,
778 .eh_device_reset_handler
= scsifront_dev_reset_handler
,
779 .slave_configure
= scsifront_sdev_configure
,
780 .slave_destroy
= scsifront_sdev_destroy
,
781 .cmd_per_lun
= VSCSIIF_DEFAULT_CMD_PER_LUN
,
782 .can_queue
= VSCSIIF_MAX_REQS
,
784 .cmd_size
= sizeof(struct vscsifrnt_shadow
),
785 .sg_tablesize
= VSCSIIF_SG_TABLESIZE
,
786 .proc_name
= "scsifront",
789 static int scsifront_alloc_ring(struct vscsifrnt_info
*info
)
791 struct xenbus_device
*dev
= info
->dev
;
792 struct vscsiif_sring
*sring
;
795 /***** Frontend to Backend ring start *****/
796 err
= xenbus_setup_ring(dev
, GFP_KERNEL
, (void **)&sring
, 1,
801 XEN_FRONT_RING_INIT(&info
->ring
, sring
, PAGE_SIZE
);
803 err
= xenbus_alloc_evtchn(dev
, &info
->evtchn
);
805 xenbus_dev_fatal(dev
, err
, "xenbus_alloc_evtchn");
809 err
= bind_evtchn_to_irq_lateeoi(info
->evtchn
);
811 xenbus_dev_fatal(dev
, err
, "bind_evtchn_to_irq");
817 err
= request_threaded_irq(info
->irq
, NULL
, scsifront_irq_fn
,
818 IRQF_ONESHOT
, "scsifront", info
);
820 xenbus_dev_fatal(dev
, err
, "request_threaded_irq");
828 unbind_from_irqhandler(info
->irq
, info
);
830 xenbus_teardown_ring((void **)&sring
, 1, &info
->ring_ref
);
835 static void scsifront_free_ring(struct vscsifrnt_info
*info
)
837 unbind_from_irqhandler(info
->irq
, info
);
838 xenbus_teardown_ring((void **)&info
->ring
.sring
, 1, &info
->ring_ref
);
841 static int scsifront_init_ring(struct vscsifrnt_info
*info
)
843 struct xenbus_device
*dev
= info
->dev
;
844 struct xenbus_transaction xbt
;
847 pr_debug("%s\n", __func__
);
849 err
= scsifront_alloc_ring(info
);
852 pr_debug("%s: %u %u\n", __func__
, info
->ring_ref
, info
->evtchn
);
855 err
= xenbus_transaction_start(&xbt
);
857 xenbus_dev_fatal(dev
, err
, "starting transaction");
859 err
= xenbus_printf(xbt
, dev
->nodename
, "ring-ref", "%u",
862 xenbus_dev_fatal(dev
, err
, "%s", "writing ring-ref");
866 err
= xenbus_printf(xbt
, dev
->nodename
, "event-channel", "%u",
870 xenbus_dev_fatal(dev
, err
, "%s", "writing event-channel");
874 err
= xenbus_transaction_end(xbt
, 0);
878 xenbus_dev_fatal(dev
, err
, "completing transaction");
885 xenbus_transaction_end(xbt
, 1);
887 scsifront_free_ring(info
);
893 static int scsifront_probe(struct xenbus_device
*dev
,
894 const struct xenbus_device_id
*id
)
896 struct vscsifrnt_info
*info
;
897 struct Scsi_Host
*host
;
899 char name
[TASK_COMM_LEN
];
901 host
= scsi_host_alloc(&scsifront_sht
, sizeof(*info
));
903 xenbus_dev_fatal(dev
, err
, "fail to allocate scsi host");
906 info
= shost_priv(host
);
908 dev_set_drvdata(&dev
->dev
, info
);
911 bitmap_fill(info
->shadow_free_bitmap
, VSCSIIF_MAX_REQS
);
913 err
= scsifront_init_ring(info
);
919 init_waitqueue_head(&info
->wq_sync
);
920 init_waitqueue_head(&info
->wq_pause
);
921 spin_lock_init(&info
->shadow_lock
);
923 snprintf(name
, TASK_COMM_LEN
, "vscsiif.%d", host
->host_no
);
925 host
->max_id
= VSCSIIF_MAX_TARGET
;
926 host
->max_channel
= 0;
927 host
->max_lun
= VSCSIIF_MAX_LUN
;
928 host
->max_sectors
= (host
->sg_tablesize
- 1) * PAGE_SIZE
/ 512;
929 host
->max_cmd_len
= VSCSIIF_MAX_COMMAND_SIZE
;
931 err
= scsi_add_host(host
, &dev
->dev
);
933 dev_err(&dev
->dev
, "fail to add scsi host %d\n", err
);
937 info
->host_active
= STATE_ACTIVE
;
939 xenbus_switch_state(dev
, XenbusStateInitialised
);
944 scsifront_free_ring(info
);
949 static int scsifront_resume(struct xenbus_device
*dev
)
951 struct vscsifrnt_info
*info
= dev_get_drvdata(&dev
->dev
);
952 struct Scsi_Host
*host
= info
->host
;
955 spin_lock_irq(host
->host_lock
);
957 /* Finish all still pending commands. */
958 scsifront_finish_all(info
);
960 spin_unlock_irq(host
->host_lock
);
962 /* Reconnect to dom0. */
963 scsifront_free_ring(info
);
964 err
= scsifront_init_ring(info
);
966 dev_err(&dev
->dev
, "fail to resume %d\n", err
);
971 xenbus_switch_state(dev
, XenbusStateInitialised
);
976 static int scsifront_suspend(struct xenbus_device
*dev
)
978 struct vscsifrnt_info
*info
= dev_get_drvdata(&dev
->dev
);
979 struct Scsi_Host
*host
= info
->host
;
982 /* No new commands for the backend. */
983 spin_lock_irq(host
->host_lock
);
985 while (info
->callers
&& !err
) {
986 info
->waiting_pause
= 1;
987 info
->wait_ring_available
= 0;
988 spin_unlock_irq(host
->host_lock
);
989 wake_up(&info
->wq_sync
);
990 err
= wait_event_interruptible(info
->wq_pause
,
991 !info
->waiting_pause
);
992 spin_lock_irq(host
->host_lock
);
994 spin_unlock_irq(host
->host_lock
);
998 static void scsifront_remove(struct xenbus_device
*dev
)
1000 struct vscsifrnt_info
*info
= dev_get_drvdata(&dev
->dev
);
1002 pr_debug("%s: %s removed\n", __func__
, dev
->nodename
);
1004 mutex_lock(&scsifront_mutex
);
1005 if (info
->host_active
!= STATE_INACTIVE
) {
1006 /* Scsi_host not yet removed */
1007 scsi_remove_host(info
->host
);
1008 info
->host_active
= STATE_INACTIVE
;
1010 mutex_unlock(&scsifront_mutex
);
1012 scsifront_free_ring(info
);
1013 scsi_host_put(info
->host
);
1016 static void scsifront_disconnect(struct vscsifrnt_info
*info
)
1018 struct xenbus_device
*dev
= info
->dev
;
1019 struct Scsi_Host
*host
= info
->host
;
1021 pr_debug("%s: %s disconnect\n", __func__
, dev
->nodename
);
1024 * When this function is executed, all devices of
1025 * Frontend have been deleted.
1026 * Therefore, it need not block I/O before remove_host.
1029 mutex_lock(&scsifront_mutex
);
1030 if (info
->host_active
!= STATE_INACTIVE
) {
1031 scsi_remove_host(host
);
1032 info
->host_active
= STATE_INACTIVE
;
1034 mutex_unlock(&scsifront_mutex
);
1036 xenbus_frontend_closed(dev
);
1039 static void scsifront_do_lun_hotplug(struct vscsifrnt_info
*info
, int op
)
1041 struct xenbus_device
*dev
= info
->dev
;
1045 unsigned int dir_n
= 0;
1046 unsigned int device_state
;
1047 unsigned int hst
, chn
, tgt
, lun
;
1048 struct scsi_device
*sdev
;
1050 if (info
->host_active
== STATE_ERROR
)
1053 dir
= xenbus_directory(XBT_NIL
, dev
->otherend
, "vscsi-devs", &dir_n
);
1057 /* mark current task as the one allowed to modify device states */
1059 info
->curr
= current
;
1061 for (i
= 0; i
< dir_n
; i
++) {
1063 snprintf(str
, sizeof(str
), "vscsi-devs/%s/state", dir
[i
]);
1064 err
= xenbus_scanf(XBT_NIL
, dev
->otherend
, str
, "%u",
1066 if (XENBUS_EXIST_ERR(err
))
1069 /* virtual SCSI device */
1070 snprintf(str
, sizeof(str
), "vscsi-devs/%s/v-dev", dir
[i
]);
1071 err
= xenbus_scanf(XBT_NIL
, dev
->otherend
, str
,
1072 "%u:%u:%u:%u", &hst
, &chn
, &tgt
, &lun
);
1073 if (XENBUS_EXIST_ERR(err
))
1077 * Front device state path, used in slave_configure called
1078 * on successfull scsi_add_device, and in slave_destroy called
1079 * on remove of a device.
1081 snprintf(info
->dev_state_path
, sizeof(info
->dev_state_path
),
1082 "vscsi-devs/%s/state", dir
[i
]);
1085 case VSCSIFRONT_OP_ADD_LUN
:
1086 if (device_state
!= XenbusStateInitialised
)
1089 if (scsi_add_device(info
->host
, chn
, tgt
, lun
)) {
1090 dev_err(&dev
->dev
, "scsi_add_device\n");
1091 err
= xenbus_printf(XBT_NIL
, dev
->nodename
,
1092 info
->dev_state_path
,
1093 "%d", XenbusStateClosed
);
1095 xenbus_dev_error(dev
, err
,
1096 "%s: writing dev_state_path", __func__
);
1099 case VSCSIFRONT_OP_DEL_LUN
:
1100 if (device_state
!= XenbusStateClosing
)
1103 sdev
= scsi_device_lookup(info
->host
, chn
, tgt
, lun
);
1105 scsi_remove_device(sdev
);
1106 scsi_device_put(sdev
);
1109 case VSCSIFRONT_OP_READD_LUN
:
1110 if (device_state
== XenbusStateConnected
) {
1111 err
= xenbus_printf(XBT_NIL
, dev
->nodename
,
1112 info
->dev_state_path
,
1113 "%d", XenbusStateConnected
);
1115 xenbus_dev_error(dev
, err
,
1116 "%s: writing dev_state_path", __func__
);
1129 static void scsifront_read_backend_params(struct xenbus_device
*dev
,
1130 struct vscsifrnt_info
*info
)
1132 unsigned int sg_grant
, nr_segs
;
1133 struct Scsi_Host
*host
= info
->host
;
1135 sg_grant
= xenbus_read_unsigned(dev
->otherend
, "feature-sg-grant", 0);
1136 nr_segs
= min_t(unsigned int, sg_grant
, SG_ALL
);
1137 nr_segs
= max_t(unsigned int, nr_segs
, VSCSIIF_SG_TABLESIZE
);
1138 nr_segs
= min_t(unsigned int, nr_segs
,
1139 VSCSIIF_SG_TABLESIZE
* PAGE_SIZE
/
1140 sizeof(struct scsiif_request_segment
));
1142 if (!info
->pause
&& sg_grant
)
1143 dev_info(&dev
->dev
, "using up to %d SG entries\n", nr_segs
);
1144 else if (info
->pause
&& nr_segs
< host
->sg_tablesize
)
1146 "SG entries decreased from %d to %u - device may not work properly anymore\n",
1147 host
->sg_tablesize
, nr_segs
);
1149 host
->sg_tablesize
= nr_segs
;
1150 host
->max_sectors
= (nr_segs
- 1) * PAGE_SIZE
/ 512;
1153 static void scsifront_backend_changed(struct xenbus_device
*dev
,
1154 enum xenbus_state backend_state
)
1156 struct vscsifrnt_info
*info
= dev_get_drvdata(&dev
->dev
);
1158 pr_debug("%s: %p %u %u\n", __func__
, dev
, dev
->state
, backend_state
);
1160 switch (backend_state
) {
1161 case XenbusStateUnknown
:
1162 case XenbusStateInitialising
:
1163 case XenbusStateInitWait
:
1164 case XenbusStateInitialised
:
1167 case XenbusStateConnected
:
1168 scsifront_read_backend_params(dev
, info
);
1171 scsifront_do_lun_hotplug(info
, VSCSIFRONT_OP_READD_LUN
);
1172 xenbus_switch_state(dev
, XenbusStateConnected
);
1177 if (xenbus_read_driver_state(dev
->nodename
) ==
1178 XenbusStateInitialised
)
1179 scsifront_do_lun_hotplug(info
, VSCSIFRONT_OP_ADD_LUN
);
1181 if (dev
->state
!= XenbusStateConnected
)
1182 xenbus_switch_state(dev
, XenbusStateConnected
);
1185 case XenbusStateClosed
:
1186 if (dev
->state
== XenbusStateClosed
)
1188 fallthrough
; /* Missed the backend's Closing state */
1189 case XenbusStateClosing
:
1190 scsifront_disconnect(info
);
1193 case XenbusStateReconfiguring
:
1194 scsifront_do_lun_hotplug(info
, VSCSIFRONT_OP_DEL_LUN
);
1195 xenbus_switch_state(dev
, XenbusStateReconfiguring
);
1198 case XenbusStateReconfigured
:
1199 scsifront_do_lun_hotplug(info
, VSCSIFRONT_OP_ADD_LUN
);
1200 xenbus_switch_state(dev
, XenbusStateConnected
);
1205 static const struct xenbus_device_id scsifront_ids
[] = {
1210 static struct xenbus_driver scsifront_driver
= {
1211 .ids
= scsifront_ids
,
1212 .probe
= scsifront_probe
,
1213 .remove
= scsifront_remove
,
1214 .resume
= scsifront_resume
,
1215 .suspend
= scsifront_suspend
,
1216 .otherend_changed
= scsifront_backend_changed
,
1219 static int __init
scsifront_init(void)
1224 return xenbus_register_frontend(&scsifront_driver
);
1226 module_init(scsifront_init
);
1228 static void __exit
scsifront_exit(void)
1230 xenbus_unregister_driver(&scsifront_driver
);
1232 module_exit(scsifront_exit
);
1234 MODULE_DESCRIPTION("Xen SCSI frontend driver");
1235 MODULE_LICENSE("GPL");
1236 MODULE_ALIAS("xen:vscsi");
1237 MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");