2 * Xen SCSI frontend driver
4 * Copyright (c) 2008, FUJITSU Limited
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/device.h>
34 #include <linux/wait.h>
35 #include <linux/interrupt.h>
36 #include <linux/mutex.h>
37 #include <linux/spinlock.h>
38 #include <linux/sched.h>
39 #include <linux/blkdev.h>
40 #include <linux/pfn.h>
41 #include <linux/slab.h>
42 #include <linux/bitops.h>
44 #include <scsi/scsi_cmnd.h>
45 #include <scsi/scsi_device.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_host.h>
50 #include <xen/xenbus.h>
51 #include <xen/grant_table.h>
52 #include <xen/events.h>
55 #include <xen/interface/grant_table.h>
56 #include <xen/interface/io/vscsiif.h>
57 #include <xen/interface/io/protocols.h>
59 #include <asm/xen/hypervisor.h>
62 #define GRANT_INVALID_REF 0
64 #define VSCSIFRONT_OP_ADD_LUN 1
65 #define VSCSIFRONT_OP_DEL_LUN 2
66 #define VSCSIFRONT_OP_READD_LUN 3
69 #define VSCSIIF_DEFAULT_CMD_PER_LUN 10
70 #define VSCSIIF_MAX_TARGET 64
71 #define VSCSIIF_MAX_LUN 255
73 #define VSCSIIF_RING_SIZE __CONST_RING_SIZE(vscsiif, PAGE_SIZE)
74 #define VSCSIIF_MAX_REQS VSCSIIF_RING_SIZE
76 #define vscsiif_grants_sg(_sg) (PFN_UP((_sg) * \
77 sizeof(struct scsiif_request_segment)))
79 struct vscsifrnt_shadow
{
80 /* command between backend and frontend */
84 unsigned int nr_grants
; /* number of grants in gref[] */
85 struct scsiif_request_segment
*sg
; /* scatter/gather elements */
87 /* Do reset or abort function. */
88 wait_queue_head_t wq_reset
; /* reset work queue */
89 int wait_reset
; /* reset work queue condition */
90 int32_t rslt_reset
; /* reset response status: */
91 /* SUCCESS or FAILED or: */
92 #define RSLT_RESET_WAITING 0
93 #define RSLT_RESET_ERR -1
95 /* Requested struct scsi_cmnd is stored from kernel. */
97 int gref
[vscsiif_grants_sg(SG_ALL
) + SG_ALL
];
100 struct vscsifrnt_info
{
101 struct xenbus_device
*dev
;
103 struct Scsi_Host
*host
;
109 grant_ref_t ring_ref
;
110 struct vscsiif_front_ring ring
;
111 struct vscsiif_response ring_rsp
;
113 spinlock_t shadow_lock
;
114 DECLARE_BITMAP(shadow_free_bitmap
, VSCSIIF_MAX_REQS
);
115 struct vscsifrnt_shadow
*shadow
[VSCSIIF_MAX_REQS
];
117 /* Following items are protected by the host lock. */
118 wait_queue_head_t wq_sync
;
119 wait_queue_head_t wq_pause
;
120 unsigned int wait_ring_available
:1;
121 unsigned int waiting_pause
:1;
122 unsigned int pause
:1;
125 char dev_state_path
[64];
126 struct task_struct
*curr
;
129 static DEFINE_MUTEX(scsifront_mutex
);
131 static void scsifront_wake_up(struct vscsifrnt_info
*info
)
133 info
->wait_ring_available
= 0;
134 wake_up(&info
->wq_sync
);
137 static int scsifront_get_rqid(struct vscsifrnt_info
*info
)
142 spin_lock_irqsave(&info
->shadow_lock
, flags
);
144 free
= find_first_bit(info
->shadow_free_bitmap
, VSCSIIF_MAX_REQS
);
145 __clear_bit(free
, info
->shadow_free_bitmap
);
147 spin_unlock_irqrestore(&info
->shadow_lock
, flags
);
152 static int _scsifront_put_rqid(struct vscsifrnt_info
*info
, uint32_t id
)
154 int empty
= bitmap_empty(info
->shadow_free_bitmap
, VSCSIIF_MAX_REQS
);
156 __set_bit(id
, info
->shadow_free_bitmap
);
157 info
->shadow
[id
] = NULL
;
159 return empty
|| info
->wait_ring_available
;
162 static void scsifront_put_rqid(struct vscsifrnt_info
*info
, uint32_t id
)
167 spin_lock_irqsave(&info
->shadow_lock
, flags
);
168 kick
= _scsifront_put_rqid(info
, id
);
169 spin_unlock_irqrestore(&info
->shadow_lock
, flags
);
172 scsifront_wake_up(info
);
175 static struct vscsiif_request
*scsifront_pre_req(struct vscsifrnt_info
*info
)
177 struct vscsiif_front_ring
*ring
= &(info
->ring
);
178 struct vscsiif_request
*ring_req
;
181 id
= scsifront_get_rqid(info
); /* use id in response */
182 if (id
>= VSCSIIF_MAX_REQS
)
185 ring_req
= RING_GET_REQUEST(&(info
->ring
), ring
->req_prod_pvt
);
187 ring
->req_prod_pvt
++;
189 ring_req
->rqid
= (uint16_t)id
;
194 static void scsifront_do_request(struct vscsifrnt_info
*info
)
196 struct vscsiif_front_ring
*ring
= &(info
->ring
);
199 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring
, notify
);
201 notify_remote_via_irq(info
->irq
);
204 static void scsifront_gnttab_done(struct vscsifrnt_info
*info
, uint32_t id
)
206 struct vscsifrnt_shadow
*s
= info
->shadow
[id
];
209 if (s
->sc
->sc_data_direction
== DMA_NONE
)
212 for (i
= 0; i
< s
->nr_grants
; i
++) {
213 if (unlikely(gnttab_query_foreign_access(s
->gref
[i
]) != 0)) {
214 shost_printk(KERN_ALERT
, info
->host
, KBUILD_MODNAME
215 "grant still in use by backend\n");
218 gnttab_end_foreign_access(s
->gref
[i
], 0, 0UL);
224 static void scsifront_cdb_cmd_done(struct vscsifrnt_info
*info
,
225 struct vscsiif_response
*ring_rsp
)
227 struct scsi_cmnd
*sc
;
232 sc
= info
->shadow
[id
]->sc
;
236 scsifront_gnttab_done(info
, id
);
237 scsifront_put_rqid(info
, id
);
239 sc
->result
= ring_rsp
->rslt
;
240 scsi_set_resid(sc
, ring_rsp
->residual_len
);
242 sense_len
= min_t(uint8_t, VSCSIIF_SENSE_BUFFERSIZE
,
243 ring_rsp
->sense_len
);
246 memcpy(sc
->sense_buffer
, ring_rsp
->sense_buffer
, sense_len
);
251 static void scsifront_sync_cmd_done(struct vscsifrnt_info
*info
,
252 struct vscsiif_response
*ring_rsp
)
254 uint16_t id
= ring_rsp
->rqid
;
256 struct vscsifrnt_shadow
*shadow
= info
->shadow
[id
];
259 spin_lock_irqsave(&info
->shadow_lock
, flags
);
260 shadow
->wait_reset
= 1;
261 switch (shadow
->rslt_reset
) {
262 case RSLT_RESET_WAITING
:
263 shadow
->rslt_reset
= ring_rsp
->rslt
;
266 kick
= _scsifront_put_rqid(info
, id
);
267 spin_unlock_irqrestore(&info
->shadow_lock
, flags
);
270 scsifront_wake_up(info
);
273 shost_printk(KERN_ERR
, info
->host
, KBUILD_MODNAME
274 "bad reset state %d, possibly leaking %u\n",
275 shadow
->rslt_reset
, id
);
278 spin_unlock_irqrestore(&info
->shadow_lock
, flags
);
280 wake_up(&shadow
->wq_reset
);
283 static void scsifront_do_response(struct vscsifrnt_info
*info
,
284 struct vscsiif_response
*ring_rsp
)
286 if (WARN(ring_rsp
->rqid
>= VSCSIIF_MAX_REQS
||
287 test_bit(ring_rsp
->rqid
, info
->shadow_free_bitmap
),
288 "illegal rqid %u returned by backend!\n", ring_rsp
->rqid
))
291 if (info
->shadow
[ring_rsp
->rqid
]->act
== VSCSIIF_ACT_SCSI_CDB
)
292 scsifront_cdb_cmd_done(info
, ring_rsp
);
294 scsifront_sync_cmd_done(info
, ring_rsp
);
297 static int scsifront_ring_drain(struct vscsifrnt_info
*info
)
299 struct vscsiif_response
*ring_rsp
;
303 rp
= info
->ring
.sring
->rsp_prod
;
304 rmb(); /* ordering required respective to dom0 */
305 for (i
= info
->ring
.rsp_cons
; i
!= rp
; i
++) {
306 ring_rsp
= RING_GET_RESPONSE(&info
->ring
, i
);
307 scsifront_do_response(info
, ring_rsp
);
310 info
->ring
.rsp_cons
= i
;
312 if (i
!= info
->ring
.req_prod_pvt
)
313 RING_FINAL_CHECK_FOR_RESPONSES(&info
->ring
, more_to_do
);
315 info
->ring
.sring
->rsp_event
= i
+ 1;
320 static int scsifront_cmd_done(struct vscsifrnt_info
*info
)
325 spin_lock_irqsave(info
->host
->host_lock
, flags
);
327 more_to_do
= scsifront_ring_drain(info
);
329 info
->wait_ring_available
= 0;
331 spin_unlock_irqrestore(info
->host
->host_lock
, flags
);
333 wake_up(&info
->wq_sync
);
338 static irqreturn_t
scsifront_irq_fn(int irq
, void *dev_id
)
340 struct vscsifrnt_info
*info
= dev_id
;
342 while (scsifront_cmd_done(info
))
343 /* Yield point for this unbounded loop. */
349 static void scsifront_finish_all(struct vscsifrnt_info
*info
)
352 struct vscsiif_response resp
;
354 scsifront_ring_drain(info
);
356 for (i
= 0; i
< VSCSIIF_MAX_REQS
; i
++) {
357 if (test_bit(i
, info
->shadow_free_bitmap
))
361 resp
.rslt
= DID_RESET
<< 16;
362 resp
.residual_len
= 0;
363 scsifront_do_response(info
, &resp
);
367 static int map_data_for_request(struct vscsifrnt_info
*info
,
368 struct scsi_cmnd
*sc
,
369 struct vscsiif_request
*ring_req
,
370 struct vscsifrnt_shadow
*shadow
)
372 grant_ref_t gref_head
;
374 int err
, ref
, ref_cnt
= 0;
375 int grant_ro
= (sc
->sc_data_direction
== DMA_TO_DEVICE
);
376 unsigned int i
, off
, len
, bytes
;
377 unsigned int data_len
= scsi_bufflen(sc
);
378 unsigned int data_grants
= 0, seg_grants
= 0;
379 struct scatterlist
*sg
;
380 struct scsiif_request_segment
*seg
;
382 ring_req
->nr_segments
= 0;
383 if (sc
->sc_data_direction
== DMA_NONE
|| !data_len
)
386 scsi_for_each_sg(sc
, sg
, scsi_sg_count(sc
), i
)
387 data_grants
+= PFN_UP(sg
->offset
+ sg
->length
);
389 if (data_grants
> VSCSIIF_SG_TABLESIZE
) {
390 if (data_grants
> info
->host
->sg_tablesize
) {
391 shost_printk(KERN_ERR
, info
->host
, KBUILD_MODNAME
392 "Unable to map request_buffer for command!\n");
395 seg_grants
= vscsiif_grants_sg(data_grants
);
396 shadow
->sg
= kcalloc(data_grants
,
397 sizeof(struct scsiif_request_segment
), GFP_ATOMIC
);
401 seg
= shadow
->sg
? : ring_req
->seg
;
403 err
= gnttab_alloc_grant_references(seg_grants
+ data_grants
,
407 shost_printk(KERN_ERR
, info
->host
, KBUILD_MODNAME
408 "gnttab_alloc_grant_references() error\n");
413 page
= virt_to_page(seg
);
414 off
= (unsigned long)seg
& ~PAGE_MASK
;
415 len
= sizeof(struct scsiif_request_segment
) * data_grants
;
417 bytes
= min_t(unsigned int, len
, PAGE_SIZE
- off
);
419 ref
= gnttab_claim_grant_reference(&gref_head
);
420 BUG_ON(ref
== -ENOSPC
);
422 gnttab_grant_foreign_access_ref(ref
,
423 info
->dev
->otherend_id
,
424 xen_page_to_gfn(page
), 1);
425 shadow
->gref
[ref_cnt
] = ref
;
426 ring_req
->seg
[ref_cnt
].gref
= ref
;
427 ring_req
->seg
[ref_cnt
].offset
= (uint16_t)off
;
428 ring_req
->seg
[ref_cnt
].length
= (uint16_t)bytes
;
435 BUG_ON(seg_grants
< ref_cnt
);
436 seg_grants
= ref_cnt
;
439 scsi_for_each_sg(sc
, sg
, scsi_sg_count(sc
), i
) {
444 while (len
> 0 && data_len
> 0) {
446 * sg sends a scatterlist that is larger than
447 * the data_len it wants transferred for certain
450 bytes
= min_t(unsigned int, len
, PAGE_SIZE
- off
);
451 bytes
= min(bytes
, data_len
);
453 ref
= gnttab_claim_grant_reference(&gref_head
);
454 BUG_ON(ref
== -ENOSPC
);
456 gnttab_grant_foreign_access_ref(ref
,
457 info
->dev
->otherend_id
,
458 xen_page_to_gfn(page
),
461 shadow
->gref
[ref_cnt
] = ref
;
463 seg
->offset
= (uint16_t)off
;
464 seg
->length
= (uint16_t)bytes
;
476 ring_req
->nr_segments
= VSCSIIF_SG_GRANT
| seg_grants
;
478 ring_req
->nr_segments
= (uint8_t)ref_cnt
;
479 shadow
->nr_grants
= ref_cnt
;
484 static struct vscsiif_request
*scsifront_command2ring(
485 struct vscsifrnt_info
*info
, struct scsi_cmnd
*sc
,
486 struct vscsifrnt_shadow
*shadow
)
488 struct vscsiif_request
*ring_req
;
490 memset(shadow
, 0, sizeof(*shadow
));
492 ring_req
= scsifront_pre_req(info
);
496 info
->shadow
[ring_req
->rqid
] = shadow
;
497 shadow
->rqid
= ring_req
->rqid
;
499 ring_req
->id
= sc
->device
->id
;
500 ring_req
->lun
= sc
->device
->lun
;
501 ring_req
->channel
= sc
->device
->channel
;
502 ring_req
->cmd_len
= sc
->cmd_len
;
504 BUG_ON(sc
->cmd_len
> VSCSIIF_MAX_COMMAND_SIZE
);
506 memcpy(ring_req
->cmnd
, sc
->cmnd
, sc
->cmd_len
);
508 ring_req
->sc_data_direction
= (uint8_t)sc
->sc_data_direction
;
509 ring_req
->timeout_per_command
= sc
->request
->timeout
/ HZ
;
514 static int scsifront_enter(struct vscsifrnt_info
*info
)
522 static void scsifront_return(struct vscsifrnt_info
*info
)
528 if (!info
->waiting_pause
)
531 info
->waiting_pause
= 0;
532 wake_up(&info
->wq_pause
);
535 static int scsifront_queuecommand(struct Scsi_Host
*shost
,
536 struct scsi_cmnd
*sc
)
538 struct vscsifrnt_info
*info
= shost_priv(shost
);
539 struct vscsiif_request
*ring_req
;
540 struct vscsifrnt_shadow
*shadow
= scsi_cmd_priv(sc
);
545 spin_lock_irqsave(shost
->host_lock
, flags
);
546 if (scsifront_enter(info
)) {
547 spin_unlock_irqrestore(shost
->host_lock
, flags
);
548 return SCSI_MLQUEUE_HOST_BUSY
;
550 if (RING_FULL(&info
->ring
))
553 ring_req
= scsifront_command2ring(info
, sc
, shadow
);
559 rqid
= ring_req
->rqid
;
560 ring_req
->act
= VSCSIIF_ACT_SCSI_CDB
;
563 shadow
->act
= VSCSIIF_ACT_SCSI_CDB
;
565 err
= map_data_for_request(info
, sc
, ring_req
, shadow
);
567 pr_debug("%s: err %d\n", __func__
, err
);
568 scsifront_put_rqid(info
, rqid
);
569 scsifront_return(info
);
570 spin_unlock_irqrestore(shost
->host_lock
, flags
);
572 return SCSI_MLQUEUE_HOST_BUSY
;
573 sc
->result
= DID_ERROR
<< 16;
578 scsifront_do_request(info
);
579 scsifront_return(info
);
580 spin_unlock_irqrestore(shost
->host_lock
, flags
);
585 scsifront_return(info
);
586 spin_unlock_irqrestore(shost
->host_lock
, flags
);
587 pr_debug("%s: busy\n", __func__
);
588 return SCSI_MLQUEUE_HOST_BUSY
;
592 * Any exception handling (reset or abort) must be forwarded to the backend.
593 * We have to wait until an answer is returned. This answer contains the
594 * result to be returned to the requestor.
596 static int scsifront_action_handler(struct scsi_cmnd
*sc
, uint8_t act
)
598 struct Scsi_Host
*host
= sc
->device
->host
;
599 struct vscsifrnt_info
*info
= shost_priv(host
);
600 struct vscsifrnt_shadow
*shadow
, *s
= scsi_cmd_priv(sc
);
601 struct vscsiif_request
*ring_req
;
604 shadow
= kmalloc(sizeof(*shadow
), GFP_NOIO
);
608 spin_lock_irq(host
->host_lock
);
611 if (!RING_FULL(&info
->ring
)) {
612 ring_req
= scsifront_command2ring(info
, sc
, shadow
);
616 if (err
|| info
->pause
) {
617 spin_unlock_irq(host
->host_lock
);
621 info
->wait_ring_available
= 1;
622 spin_unlock_irq(host
->host_lock
);
623 err
= wait_event_interruptible(info
->wq_sync
,
624 !info
->wait_ring_available
);
625 spin_lock_irq(host
->host_lock
);
628 if (scsifront_enter(info
)) {
629 spin_unlock_irq(host
->host_lock
);
634 ring_req
->ref_rqid
= s
->rqid
;
637 shadow
->rslt_reset
= RSLT_RESET_WAITING
;
638 init_waitqueue_head(&shadow
->wq_reset
);
640 ring_req
->nr_segments
= 0;
642 scsifront_do_request(info
);
644 spin_unlock_irq(host
->host_lock
);
645 err
= wait_event_interruptible(shadow
->wq_reset
, shadow
->wait_reset
);
646 spin_lock_irq(host
->host_lock
);
649 err
= shadow
->rslt_reset
;
650 scsifront_put_rqid(info
, shadow
->rqid
);
653 spin_lock(&info
->shadow_lock
);
654 shadow
->rslt_reset
= RSLT_RESET_ERR
;
655 spin_unlock(&info
->shadow_lock
);
659 scsifront_return(info
);
660 spin_unlock_irq(host
->host_lock
);
664 static int scsifront_eh_abort_handler(struct scsi_cmnd
*sc
)
666 pr_debug("%s\n", __func__
);
667 return scsifront_action_handler(sc
, VSCSIIF_ACT_SCSI_ABORT
);
670 static int scsifront_dev_reset_handler(struct scsi_cmnd
*sc
)
672 pr_debug("%s\n", __func__
);
673 return scsifront_action_handler(sc
, VSCSIIF_ACT_SCSI_RESET
);
676 static int scsifront_sdev_configure(struct scsi_device
*sdev
)
678 struct vscsifrnt_info
*info
= shost_priv(sdev
->host
);
680 if (info
&& current
== info
->curr
)
681 xenbus_printf(XBT_NIL
, info
->dev
->nodename
,
682 info
->dev_state_path
, "%d", XenbusStateConnected
);
687 static void scsifront_sdev_destroy(struct scsi_device
*sdev
)
689 struct vscsifrnt_info
*info
= shost_priv(sdev
->host
);
691 if (info
&& current
== info
->curr
)
692 xenbus_printf(XBT_NIL
, info
->dev
->nodename
,
693 info
->dev_state_path
, "%d", XenbusStateClosed
);
696 static struct scsi_host_template scsifront_sht
= {
697 .module
= THIS_MODULE
,
698 .name
= "Xen SCSI frontend driver",
699 .queuecommand
= scsifront_queuecommand
,
700 .eh_abort_handler
= scsifront_eh_abort_handler
,
701 .eh_device_reset_handler
= scsifront_dev_reset_handler
,
702 .slave_configure
= scsifront_sdev_configure
,
703 .slave_destroy
= scsifront_sdev_destroy
,
704 .cmd_per_lun
= VSCSIIF_DEFAULT_CMD_PER_LUN
,
705 .can_queue
= VSCSIIF_MAX_REQS
,
707 .cmd_size
= sizeof(struct vscsifrnt_shadow
),
708 .sg_tablesize
= VSCSIIF_SG_TABLESIZE
,
709 .use_clustering
= DISABLE_CLUSTERING
,
710 .proc_name
= "scsifront",
713 static int scsifront_alloc_ring(struct vscsifrnt_info
*info
)
715 struct xenbus_device
*dev
= info
->dev
;
716 struct vscsiif_sring
*sring
;
720 /***** Frontend to Backend ring start *****/
721 sring
= (struct vscsiif_sring
*)__get_free_page(GFP_KERNEL
);
723 xenbus_dev_fatal(dev
, err
,
724 "fail to allocate shared ring (Front to Back)");
727 SHARED_RING_INIT(sring
);
728 FRONT_RING_INIT(&info
->ring
, sring
, PAGE_SIZE
);
730 err
= xenbus_grant_ring(dev
, sring
, 1, &gref
);
732 free_page((unsigned long)sring
);
733 xenbus_dev_fatal(dev
, err
,
734 "fail to grant shared ring (Front to Back)");
737 info
->ring_ref
= gref
;
739 err
= xenbus_alloc_evtchn(dev
, &info
->evtchn
);
741 xenbus_dev_fatal(dev
, err
, "xenbus_alloc_evtchn");
745 err
= bind_evtchn_to_irq(info
->evtchn
);
747 xenbus_dev_fatal(dev
, err
, "bind_evtchn_to_irq");
753 err
= request_threaded_irq(info
->irq
, NULL
, scsifront_irq_fn
,
754 IRQF_ONESHOT
, "scsifront", info
);
756 xenbus_dev_fatal(dev
, err
, "request_threaded_irq");
764 unbind_from_irqhandler(info
->irq
, info
);
766 gnttab_end_foreign_access(info
->ring_ref
, 0,
767 (unsigned long)info
->ring
.sring
);
772 static void scsifront_free_ring(struct vscsifrnt_info
*info
)
774 unbind_from_irqhandler(info
->irq
, info
);
775 gnttab_end_foreign_access(info
->ring_ref
, 0,
776 (unsigned long)info
->ring
.sring
);
779 static int scsifront_init_ring(struct vscsifrnt_info
*info
)
781 struct xenbus_device
*dev
= info
->dev
;
782 struct xenbus_transaction xbt
;
785 pr_debug("%s\n", __func__
);
787 err
= scsifront_alloc_ring(info
);
790 pr_debug("%s: %u %u\n", __func__
, info
->ring_ref
, info
->evtchn
);
793 err
= xenbus_transaction_start(&xbt
);
795 xenbus_dev_fatal(dev
, err
, "starting transaction");
797 err
= xenbus_printf(xbt
, dev
->nodename
, "ring-ref", "%u",
800 xenbus_dev_fatal(dev
, err
, "%s", "writing ring-ref");
804 err
= xenbus_printf(xbt
, dev
->nodename
, "event-channel", "%u",
808 xenbus_dev_fatal(dev
, err
, "%s", "writing event-channel");
812 err
= xenbus_transaction_end(xbt
, 0);
816 xenbus_dev_fatal(dev
, err
, "completing transaction");
823 xenbus_transaction_end(xbt
, 1);
825 scsifront_free_ring(info
);
831 static int scsifront_probe(struct xenbus_device
*dev
,
832 const struct xenbus_device_id
*id
)
834 struct vscsifrnt_info
*info
;
835 struct Scsi_Host
*host
;
837 char name
[TASK_COMM_LEN
];
839 host
= scsi_host_alloc(&scsifront_sht
, sizeof(*info
));
841 xenbus_dev_fatal(dev
, err
, "fail to allocate scsi host");
844 info
= (struct vscsifrnt_info
*)host
->hostdata
;
846 dev_set_drvdata(&dev
->dev
, info
);
849 bitmap_fill(info
->shadow_free_bitmap
, VSCSIIF_MAX_REQS
);
851 err
= scsifront_init_ring(info
);
857 init_waitqueue_head(&info
->wq_sync
);
858 init_waitqueue_head(&info
->wq_pause
);
859 spin_lock_init(&info
->shadow_lock
);
861 snprintf(name
, TASK_COMM_LEN
, "vscsiif.%d", host
->host_no
);
863 host
->max_id
= VSCSIIF_MAX_TARGET
;
864 host
->max_channel
= 0;
865 host
->max_lun
= VSCSIIF_MAX_LUN
;
866 host
->max_sectors
= (host
->sg_tablesize
- 1) * PAGE_SIZE
/ 512;
867 host
->max_cmd_len
= VSCSIIF_MAX_COMMAND_SIZE
;
869 err
= scsi_add_host(host
, &dev
->dev
);
871 dev_err(&dev
->dev
, "fail to add scsi host %d\n", err
);
875 info
->host_active
= 1;
877 xenbus_switch_state(dev
, XenbusStateInitialised
);
882 scsifront_free_ring(info
);
887 static int scsifront_resume(struct xenbus_device
*dev
)
889 struct vscsifrnt_info
*info
= dev_get_drvdata(&dev
->dev
);
890 struct Scsi_Host
*host
= info
->host
;
893 spin_lock_irq(host
->host_lock
);
895 /* Finish all still pending commands. */
896 scsifront_finish_all(info
);
898 spin_unlock_irq(host
->host_lock
);
900 /* Reconnect to dom0. */
901 scsifront_free_ring(info
);
902 err
= scsifront_init_ring(info
);
904 dev_err(&dev
->dev
, "fail to resume %d\n", err
);
909 xenbus_switch_state(dev
, XenbusStateInitialised
);
914 static int scsifront_suspend(struct xenbus_device
*dev
)
916 struct vscsifrnt_info
*info
= dev_get_drvdata(&dev
->dev
);
917 struct Scsi_Host
*host
= info
->host
;
920 /* No new commands for the backend. */
921 spin_lock_irq(host
->host_lock
);
923 while (info
->callers
&& !err
) {
924 info
->waiting_pause
= 1;
925 info
->wait_ring_available
= 0;
926 spin_unlock_irq(host
->host_lock
);
927 wake_up(&info
->wq_sync
);
928 err
= wait_event_interruptible(info
->wq_pause
,
929 !info
->waiting_pause
);
930 spin_lock_irq(host
->host_lock
);
932 spin_unlock_irq(host
->host_lock
);
936 static int scsifront_remove(struct xenbus_device
*dev
)
938 struct vscsifrnt_info
*info
= dev_get_drvdata(&dev
->dev
);
940 pr_debug("%s: %s removed\n", __func__
, dev
->nodename
);
942 mutex_lock(&scsifront_mutex
);
943 if (info
->host_active
) {
944 /* Scsi_host not yet removed */
945 scsi_remove_host(info
->host
);
946 info
->host_active
= 0;
948 mutex_unlock(&scsifront_mutex
);
950 scsifront_free_ring(info
);
951 scsi_host_put(info
->host
);
956 static void scsifront_disconnect(struct vscsifrnt_info
*info
)
958 struct xenbus_device
*dev
= info
->dev
;
959 struct Scsi_Host
*host
= info
->host
;
961 pr_debug("%s: %s disconnect\n", __func__
, dev
->nodename
);
964 * When this function is executed, all devices of
965 * Frontend have been deleted.
966 * Therefore, it need not block I/O before remove_host.
969 mutex_lock(&scsifront_mutex
);
970 if (info
->host_active
) {
971 scsi_remove_host(host
);
972 info
->host_active
= 0;
974 mutex_unlock(&scsifront_mutex
);
976 xenbus_frontend_closed(dev
);
979 static void scsifront_do_lun_hotplug(struct vscsifrnt_info
*info
, int op
)
981 struct xenbus_device
*dev
= info
->dev
;
985 unsigned int dir_n
= 0;
986 unsigned int device_state
;
987 unsigned int hst
, chn
, tgt
, lun
;
988 struct scsi_device
*sdev
;
990 dir
= xenbus_directory(XBT_NIL
, dev
->otherend
, "vscsi-devs", &dir_n
);
994 /* mark current task as the one allowed to modify device states */
996 info
->curr
= current
;
998 for (i
= 0; i
< dir_n
; i
++) {
1000 snprintf(str
, sizeof(str
), "vscsi-devs/%s/state", dir
[i
]);
1001 err
= xenbus_scanf(XBT_NIL
, dev
->otherend
, str
, "%u",
1003 if (XENBUS_EXIST_ERR(err
))
1006 /* virtual SCSI device */
1007 snprintf(str
, sizeof(str
), "vscsi-devs/%s/v-dev", dir
[i
]);
1008 err
= xenbus_scanf(XBT_NIL
, dev
->otherend
, str
,
1009 "%u:%u:%u:%u", &hst
, &chn
, &tgt
, &lun
);
1010 if (XENBUS_EXIST_ERR(err
))
1014 * Front device state path, used in slave_configure called
1015 * on successfull scsi_add_device, and in slave_destroy called
1016 * on remove of a device.
1018 snprintf(info
->dev_state_path
, sizeof(info
->dev_state_path
),
1019 "vscsi-devs/%s/state", dir
[i
]);
1022 case VSCSIFRONT_OP_ADD_LUN
:
1023 if (device_state
!= XenbusStateInitialised
)
1026 if (scsi_add_device(info
->host
, chn
, tgt
, lun
)) {
1027 dev_err(&dev
->dev
, "scsi_add_device\n");
1028 xenbus_printf(XBT_NIL
, dev
->nodename
,
1029 info
->dev_state_path
,
1030 "%d", XenbusStateClosed
);
1033 case VSCSIFRONT_OP_DEL_LUN
:
1034 if (device_state
!= XenbusStateClosing
)
1037 sdev
= scsi_device_lookup(info
->host
, chn
, tgt
, lun
);
1039 scsi_remove_device(sdev
);
1040 scsi_device_put(sdev
);
1043 case VSCSIFRONT_OP_READD_LUN
:
1044 if (device_state
== XenbusStateConnected
)
1045 xenbus_printf(XBT_NIL
, dev
->nodename
,
1046 info
->dev_state_path
,
1047 "%d", XenbusStateConnected
);
1059 static void scsifront_read_backend_params(struct xenbus_device
*dev
,
1060 struct vscsifrnt_info
*info
)
1062 unsigned int sg_grant
, nr_segs
;
1064 struct Scsi_Host
*host
= info
->host
;
1066 ret
= xenbus_scanf(XBT_NIL
, dev
->otherend
, "feature-sg-grant", "%u",
1070 nr_segs
= min_t(unsigned int, sg_grant
, SG_ALL
);
1071 nr_segs
= max_t(unsigned int, nr_segs
, VSCSIIF_SG_TABLESIZE
);
1072 nr_segs
= min_t(unsigned int, nr_segs
,
1073 VSCSIIF_SG_TABLESIZE
* PAGE_SIZE
/
1074 sizeof(struct scsiif_request_segment
));
1076 if (!info
->pause
&& sg_grant
)
1077 dev_info(&dev
->dev
, "using up to %d SG entries\n", nr_segs
);
1078 else if (info
->pause
&& nr_segs
< host
->sg_tablesize
)
1080 "SG entries decreased from %d to %u - device may not work properly anymore\n",
1081 host
->sg_tablesize
, nr_segs
);
1083 host
->sg_tablesize
= nr_segs
;
1084 host
->max_sectors
= (nr_segs
- 1) * PAGE_SIZE
/ 512;
1087 static void scsifront_backend_changed(struct xenbus_device
*dev
,
1088 enum xenbus_state backend_state
)
1090 struct vscsifrnt_info
*info
= dev_get_drvdata(&dev
->dev
);
1092 pr_debug("%s: %p %u %u\n", __func__
, dev
, dev
->state
, backend_state
);
1094 switch (backend_state
) {
1095 case XenbusStateUnknown
:
1096 case XenbusStateInitialising
:
1097 case XenbusStateInitWait
:
1098 case XenbusStateInitialised
:
1101 case XenbusStateConnected
:
1102 scsifront_read_backend_params(dev
, info
);
1105 scsifront_do_lun_hotplug(info
, VSCSIFRONT_OP_READD_LUN
);
1106 xenbus_switch_state(dev
, XenbusStateConnected
);
1111 if (xenbus_read_driver_state(dev
->nodename
) ==
1112 XenbusStateInitialised
)
1113 scsifront_do_lun_hotplug(info
, VSCSIFRONT_OP_ADD_LUN
);
1115 if (dev
->state
!= XenbusStateConnected
)
1116 xenbus_switch_state(dev
, XenbusStateConnected
);
1119 case XenbusStateClosed
:
1120 if (dev
->state
== XenbusStateClosed
)
1122 /* Missed the backend's Closing state -- fallthrough */
1123 case XenbusStateClosing
:
1124 scsifront_disconnect(info
);
1127 case XenbusStateReconfiguring
:
1128 scsifront_do_lun_hotplug(info
, VSCSIFRONT_OP_DEL_LUN
);
1129 xenbus_switch_state(dev
, XenbusStateReconfiguring
);
1132 case XenbusStateReconfigured
:
1133 scsifront_do_lun_hotplug(info
, VSCSIFRONT_OP_ADD_LUN
);
1134 xenbus_switch_state(dev
, XenbusStateConnected
);
1139 static const struct xenbus_device_id scsifront_ids
[] = {
1144 static struct xenbus_driver scsifront_driver
= {
1145 .ids
= scsifront_ids
,
1146 .probe
= scsifront_probe
,
1147 .remove
= scsifront_remove
,
1148 .resume
= scsifront_resume
,
1149 .suspend
= scsifront_suspend
,
1150 .otherend_changed
= scsifront_backend_changed
,
1153 static int __init
scsifront_init(void)
1158 return xenbus_register_frontend(&scsifront_driver
);
1160 module_init(scsifront_init
);
1162 static void __exit
scsifront_exit(void)
1164 xenbus_unregister_driver(&scsifront_driver
);
1166 module_exit(scsifront_exit
);
1168 MODULE_DESCRIPTION("Xen SCSI frontend driver");
1169 MODULE_LICENSE("GPL");
1170 MODULE_ALIAS("xen:vscsi");
1171 MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");