Linux 3.4.102
[linux/fpc-iii.git] / drivers / scsi / storvsc_drv.c
blob446c02379c80b6dafb81461569e25c53dade0009
1 /*
2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 * K. Y. Srinivasan <kys@microsoft.com>
23 #include <linux/kernel.h>
24 #include <linux/wait.h>
25 #include <linux/sched.h>
26 #include <linux/completion.h>
27 #include <linux/string.h>
28 #include <linux/mm.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/slab.h>
32 #include <linux/module.h>
33 #include <linux/device.h>
34 #include <linux/hyperv.h>
35 #include <linux/mempool.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_cmnd.h>
38 #include <scsi/scsi_host.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_tcq.h>
41 #include <scsi/scsi_eh.h>
42 #include <scsi/scsi_devinfo.h>
43 #include <scsi/scsi_dbg.h>
46 * All wire protocol details (storage protocol between the guest and the host)
47 * are consolidated here.
49 * Begin protocol definitions.
53 * Version history:
54 * V1 Beta: 0.1
55 * V1 RC < 2008/1/31: 1.0
56 * V1 RC > 2008/1/31: 2.0
57 * Win7: 4.2
60 #define VMSTOR_CURRENT_MAJOR 4
61 #define VMSTOR_CURRENT_MINOR 2
64 /* Packet structure describing virtual storage requests. */
65 enum vstor_packet_operation {
66 VSTOR_OPERATION_COMPLETE_IO = 1,
67 VSTOR_OPERATION_REMOVE_DEVICE = 2,
68 VSTOR_OPERATION_EXECUTE_SRB = 3,
69 VSTOR_OPERATION_RESET_LUN = 4,
70 VSTOR_OPERATION_RESET_ADAPTER = 5,
71 VSTOR_OPERATION_RESET_BUS = 6,
72 VSTOR_OPERATION_BEGIN_INITIALIZATION = 7,
73 VSTOR_OPERATION_END_INITIALIZATION = 8,
74 VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9,
75 VSTOR_OPERATION_QUERY_PROPERTIES = 10,
76 VSTOR_OPERATION_ENUMERATE_BUS = 11,
77 VSTOR_OPERATION_MAXIMUM = 11
81 * Platform neutral description of a scsi request -
82 * this remains the same across the write regardless of 32/64 bit
83 * note: it's patterned off the SCSI_PASS_THROUGH structure
85 #define STORVSC_MAX_CMD_LEN 0x10
86 #define STORVSC_SENSE_BUFFER_SIZE 0x12
87 #define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14
89 struct vmscsi_request {
90 u16 length;
91 u8 srb_status;
92 u8 scsi_status;
94 u8 port_number;
95 u8 path_id;
96 u8 target_id;
97 u8 lun;
99 u8 cdb_length;
100 u8 sense_info_length;
101 u8 data_in;
102 u8 reserved;
104 u32 data_transfer_length;
106 union {
107 u8 cdb[STORVSC_MAX_CMD_LEN];
108 u8 sense_data[STORVSC_SENSE_BUFFER_SIZE];
109 u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING];
111 } __attribute((packed));
115 * This structure is sent during the intialization phase to get the different
116 * properties of the channel.
118 struct vmstorage_channel_properties {
119 u16 protocol_version;
120 u8 path_id;
121 u8 target_id;
123 /* Note: port number is only really known on the client side */
124 u32 port_number;
125 u32 flags;
126 u32 max_transfer_bytes;
129 * This id is unique for each channel and will correspond with
130 * vendor specific data in the inquiry data.
133 u64 unique_id;
134 } __packed;
136 /* This structure is sent during the storage protocol negotiations. */
137 struct vmstorage_protocol_version {
138 /* Major (MSW) and minor (LSW) version numbers. */
139 u16 major_minor;
142 * Revision number is auto-incremented whenever this file is changed
143 * (See FILL_VMSTOR_REVISION macro above). Mismatch does not
144 * definitely indicate incompatibility--but it does indicate mismatched
145 * builds.
146 * This is only used on the windows side. Just set it to 0.
148 u16 revision;
149 } __packed;
151 /* Channel Property Flags */
152 #define STORAGE_CHANNEL_REMOVABLE_FLAG 0x1
153 #define STORAGE_CHANNEL_EMULATED_IDE_FLAG 0x2
155 struct vstor_packet {
156 /* Requested operation type */
157 enum vstor_packet_operation operation;
159 /* Flags - see below for values */
160 u32 flags;
162 /* Status of the request returned from the server side. */
163 u32 status;
165 /* Data payload area */
166 union {
168 * Structure used to forward SCSI commands from the
169 * client to the server.
171 struct vmscsi_request vm_srb;
173 /* Structure used to query channel properties. */
174 struct vmstorage_channel_properties storage_channel_properties;
176 /* Used during version negotiations. */
177 struct vmstorage_protocol_version version;
179 } __packed;
182 * Packet Flags:
184 * This flag indicates that the server should send back a completion for this
185 * packet.
188 #define REQUEST_COMPLETION_FLAG 0x1
190 /* Matches Windows-end */
191 enum storvsc_request_type {
192 WRITE_TYPE = 0,
193 READ_TYPE,
194 UNKNOWN_TYPE,
198 * SRB status codes and masks; a subset of the codes used here.
201 #define SRB_STATUS_AUTOSENSE_VALID 0x80
202 #define SRB_STATUS_INVALID_LUN 0x20
203 #define SRB_STATUS_SUCCESS 0x01
204 #define SRB_STATUS_ERROR 0x04
207 * This is the end of Protocol specific defines.
212 * We setup a mempool to allocate request structures for this driver
213 * on a per-lun basis. The following define specifies the number of
214 * elements in the pool.
217 #define STORVSC_MIN_BUF_NR 64
218 static int storvsc_ringbuffer_size = (20 * PAGE_SIZE);
220 module_param(storvsc_ringbuffer_size, int, S_IRUGO);
221 MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
223 #define STORVSC_MAX_IO_REQUESTS 128
226 * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
227 * reality, the path/target is not used (ie always set to 0) so our
228 * scsi host adapter essentially has 1 bus with 1 target that contains
229 * up to 256 luns.
231 #define STORVSC_MAX_LUNS_PER_TARGET 64
232 #define STORVSC_MAX_TARGETS 1
233 #define STORVSC_MAX_CHANNELS 1
237 struct storvsc_cmd_request {
238 struct list_head entry;
239 struct scsi_cmnd *cmd;
241 unsigned int bounce_sgl_count;
242 struct scatterlist *bounce_sgl;
244 struct hv_device *device;
246 /* Synchronize the request/response if needed */
247 struct completion wait_event;
249 unsigned char *sense_buffer;
250 struct hv_multipage_buffer data_buffer;
251 struct vstor_packet vstor_packet;
255 /* A storvsc device is a device object that contains a vmbus channel */
256 struct storvsc_device {
257 struct hv_device *device;
259 bool destroy;
260 bool drain_notify;
261 atomic_t num_outstanding_req;
262 struct Scsi_Host *host;
264 wait_queue_head_t waiting_to_drain;
267 * Each unique Port/Path/Target represents 1 channel ie scsi
268 * controller. In reality, the pathid, targetid is always 0
269 * and the port is set by us
271 unsigned int port_number;
272 unsigned char path_id;
273 unsigned char target_id;
275 /* Used for vsc/vsp channel reset process */
276 struct storvsc_cmd_request init_request;
277 struct storvsc_cmd_request reset_request;
280 struct stor_mem_pools {
281 struct kmem_cache *request_pool;
282 mempool_t *request_mempool;
285 struct hv_host_device {
286 struct hv_device *dev;
287 unsigned int port;
288 unsigned char path;
289 unsigned char target;
292 struct storvsc_scan_work {
293 struct work_struct work;
294 struct Scsi_Host *host;
295 uint lun;
298 static void storvsc_bus_scan(struct work_struct *work)
300 struct storvsc_scan_work *wrk;
301 int id, order_id;
303 wrk = container_of(work, struct storvsc_scan_work, work);
304 for (id = 0; id < wrk->host->max_id; ++id) {
305 if (wrk->host->reverse_ordering)
306 order_id = wrk->host->max_id - id - 1;
307 else
308 order_id = id;
310 scsi_scan_target(&wrk->host->shost_gendev, 0,
311 order_id, SCAN_WILD_CARD, 1);
313 kfree(wrk);
316 static void storvsc_remove_lun(struct work_struct *work)
318 struct storvsc_scan_work *wrk;
319 struct scsi_device *sdev;
321 wrk = container_of(work, struct storvsc_scan_work, work);
322 if (!scsi_host_get(wrk->host))
323 goto done;
325 sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun);
327 if (sdev) {
328 scsi_remove_device(sdev);
329 scsi_device_put(sdev);
331 scsi_host_put(wrk->host);
333 done:
334 kfree(wrk);
338 * Major/minor macros. Minor version is in LSB, meaning that earlier flat
339 * version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1).
342 static inline u16 storvsc_get_version(u8 major, u8 minor)
344 u16 version;
346 version = ((major << 8) | minor);
347 return version;
351 * We can get incoming messages from the host that are not in response to
352 * messages that we have sent out. An example of this would be messages
353 * received by the guest to notify dynamic addition/removal of LUNs. To
354 * deal with potential race conditions where the driver may be in the
355 * midst of being unloaded when we might receive an unsolicited message
356 * from the host, we have implemented a mechanism to gurantee sequential
357 * consistency:
359 * 1) Once the device is marked as being destroyed, we will fail all
360 * outgoing messages.
361 * 2) We permit incoming messages when the device is being destroyed,
362 * only to properly account for messages already sent out.
365 static inline struct storvsc_device *get_out_stor_device(
366 struct hv_device *device)
368 struct storvsc_device *stor_device;
370 stor_device = hv_get_drvdata(device);
372 if (stor_device && stor_device->destroy)
373 stor_device = NULL;
375 return stor_device;
379 static inline void storvsc_wait_to_drain(struct storvsc_device *dev)
381 dev->drain_notify = true;
382 wait_event(dev->waiting_to_drain,
383 atomic_read(&dev->num_outstanding_req) == 0);
384 dev->drain_notify = false;
387 static inline struct storvsc_device *get_in_stor_device(
388 struct hv_device *device)
390 struct storvsc_device *stor_device;
392 stor_device = hv_get_drvdata(device);
394 if (!stor_device)
395 goto get_in_err;
398 * If the device is being destroyed; allow incoming
399 * traffic only to cleanup outstanding requests.
402 if (stor_device->destroy &&
403 (atomic_read(&stor_device->num_outstanding_req) == 0))
404 stor_device = NULL;
406 get_in_err:
407 return stor_device;
411 static void destroy_bounce_buffer(struct scatterlist *sgl,
412 unsigned int sg_count)
414 int i;
415 struct page *page_buf;
417 for (i = 0; i < sg_count; i++) {
418 page_buf = sg_page((&sgl[i]));
419 if (page_buf != NULL)
420 __free_page(page_buf);
423 kfree(sgl);
426 static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
428 int i;
430 /* No need to check */
431 if (sg_count < 2)
432 return -1;
434 /* We have at least 2 sg entries */
435 for (i = 0; i < sg_count; i++) {
436 if (i == 0) {
437 /* make sure 1st one does not have hole */
438 if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
439 return i;
440 } else if (i == sg_count - 1) {
441 /* make sure last one does not have hole */
442 if (sgl[i].offset != 0)
443 return i;
444 } else {
445 /* make sure no hole in the middle */
446 if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
447 return i;
450 return -1;
453 static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
454 unsigned int sg_count,
455 unsigned int len,
456 int write)
458 int i;
459 int num_pages;
460 struct scatterlist *bounce_sgl;
461 struct page *page_buf;
462 unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
464 num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
466 bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
467 if (!bounce_sgl)
468 return NULL;
470 sg_init_table(bounce_sgl, num_pages);
471 for (i = 0; i < num_pages; i++) {
472 page_buf = alloc_page(GFP_ATOMIC);
473 if (!page_buf)
474 goto cleanup;
475 sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
478 return bounce_sgl;
480 cleanup:
481 destroy_bounce_buffer(bounce_sgl, num_pages);
482 return NULL;
485 /* Disgusting wrapper functions */
486 static inline unsigned long sg_kmap_atomic(struct scatterlist *sgl, int idx)
488 void *addr = kmap_atomic(sg_page(sgl + idx));
489 return (unsigned long)addr;
492 static inline void sg_kunmap_atomic(unsigned long addr)
494 kunmap_atomic((void *)addr);
498 /* Assume the original sgl has enough room */
499 static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
500 struct scatterlist *bounce_sgl,
501 unsigned int orig_sgl_count,
502 unsigned int bounce_sgl_count)
504 int i;
505 int j = 0;
506 unsigned long src, dest;
507 unsigned int srclen, destlen, copylen;
508 unsigned int total_copied = 0;
509 unsigned long bounce_addr = 0;
510 unsigned long dest_addr = 0;
511 unsigned long flags;
513 local_irq_save(flags);
515 for (i = 0; i < orig_sgl_count; i++) {
516 dest_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
517 dest = dest_addr;
518 destlen = orig_sgl[i].length;
520 if (bounce_addr == 0)
521 bounce_addr = sg_kmap_atomic(bounce_sgl,j);
523 while (destlen) {
524 src = bounce_addr + bounce_sgl[j].offset;
525 srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
527 copylen = min(srclen, destlen);
528 memcpy((void *)dest, (void *)src, copylen);
530 total_copied += copylen;
531 bounce_sgl[j].offset += copylen;
532 destlen -= copylen;
533 dest += copylen;
535 if (bounce_sgl[j].offset == bounce_sgl[j].length) {
536 /* full */
537 sg_kunmap_atomic(bounce_addr);
538 j++;
541 * It is possible that the number of elements
542 * in the bounce buffer may not be equal to
543 * the number of elements in the original
544 * scatter list. Handle this correctly.
547 if (j == bounce_sgl_count) {
549 * We are done; cleanup and return.
551 sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
552 local_irq_restore(flags);
553 return total_copied;
556 /* if we need to use another bounce buffer */
557 if (destlen || i != orig_sgl_count - 1)
558 bounce_addr = sg_kmap_atomic(bounce_sgl,j);
559 } else if (destlen == 0 && i == orig_sgl_count - 1) {
560 /* unmap the last bounce that is < PAGE_SIZE */
561 sg_kunmap_atomic(bounce_addr);
565 sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
568 local_irq_restore(flags);
570 return total_copied;
573 /* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
574 static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
575 struct scatterlist *bounce_sgl,
576 unsigned int orig_sgl_count)
578 int i;
579 int j = 0;
580 unsigned long src, dest;
581 unsigned int srclen, destlen, copylen;
582 unsigned int total_copied = 0;
583 unsigned long bounce_addr = 0;
584 unsigned long src_addr = 0;
585 unsigned long flags;
587 local_irq_save(flags);
589 for (i = 0; i < orig_sgl_count; i++) {
590 src_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
591 src = src_addr;
592 srclen = orig_sgl[i].length;
594 if (bounce_addr == 0)
595 bounce_addr = sg_kmap_atomic(bounce_sgl,j);
597 while (srclen) {
598 /* assume bounce offset always == 0 */
599 dest = bounce_addr + bounce_sgl[j].length;
600 destlen = PAGE_SIZE - bounce_sgl[j].length;
602 copylen = min(srclen, destlen);
603 memcpy((void *)dest, (void *)src, copylen);
605 total_copied += copylen;
606 bounce_sgl[j].length += copylen;
607 srclen -= copylen;
608 src += copylen;
610 if (bounce_sgl[j].length == PAGE_SIZE) {
611 /* full..move to next entry */
612 sg_kunmap_atomic(bounce_addr);
613 j++;
615 /* if we need to use another bounce buffer */
616 if (srclen || i != orig_sgl_count - 1)
617 bounce_addr = sg_kmap_atomic(bounce_sgl,j);
619 } else if (srclen == 0 && i == orig_sgl_count - 1) {
620 /* unmap the last bounce that is < PAGE_SIZE */
621 sg_kunmap_atomic(bounce_addr);
625 sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
628 local_irq_restore(flags);
630 return total_copied;
633 static int storvsc_channel_init(struct hv_device *device)
635 struct storvsc_device *stor_device;
636 struct storvsc_cmd_request *request;
637 struct vstor_packet *vstor_packet;
638 int ret, t;
640 stor_device = get_out_stor_device(device);
641 if (!stor_device)
642 return -ENODEV;
644 request = &stor_device->init_request;
645 vstor_packet = &request->vstor_packet;
648 * Now, initiate the vsc/vsp initialization protocol on the open
649 * channel
651 memset(request, 0, sizeof(struct storvsc_cmd_request));
652 init_completion(&request->wait_event);
653 vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
654 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
656 ret = vmbus_sendpacket(device->channel, vstor_packet,
657 sizeof(struct vstor_packet),
658 (unsigned long)request,
659 VM_PKT_DATA_INBAND,
660 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
661 if (ret != 0)
662 goto cleanup;
664 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
665 if (t == 0) {
666 ret = -ETIMEDOUT;
667 goto cleanup;
670 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
671 vstor_packet->status != 0)
672 goto cleanup;
675 /* reuse the packet for version range supported */
676 memset(vstor_packet, 0, sizeof(struct vstor_packet));
677 vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
678 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
680 vstor_packet->version.major_minor =
681 storvsc_get_version(VMSTOR_CURRENT_MAJOR, VMSTOR_CURRENT_MINOR);
684 * The revision number is only used in Windows; set it to 0.
686 vstor_packet->version.revision = 0;
688 ret = vmbus_sendpacket(device->channel, vstor_packet,
689 sizeof(struct vstor_packet),
690 (unsigned long)request,
691 VM_PKT_DATA_INBAND,
692 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
693 if (ret != 0)
694 goto cleanup;
696 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
697 if (t == 0) {
698 ret = -ETIMEDOUT;
699 goto cleanup;
702 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
703 vstor_packet->status != 0)
704 goto cleanup;
707 memset(vstor_packet, 0, sizeof(struct vstor_packet));
708 vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
709 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
710 vstor_packet->storage_channel_properties.port_number =
711 stor_device->port_number;
713 ret = vmbus_sendpacket(device->channel, vstor_packet,
714 sizeof(struct vstor_packet),
715 (unsigned long)request,
716 VM_PKT_DATA_INBAND,
717 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
719 if (ret != 0)
720 goto cleanup;
722 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
723 if (t == 0) {
724 ret = -ETIMEDOUT;
725 goto cleanup;
728 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
729 vstor_packet->status != 0)
730 goto cleanup;
732 stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
733 stor_device->target_id
734 = vstor_packet->storage_channel_properties.target_id;
736 memset(vstor_packet, 0, sizeof(struct vstor_packet));
737 vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
738 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
740 ret = vmbus_sendpacket(device->channel, vstor_packet,
741 sizeof(struct vstor_packet),
742 (unsigned long)request,
743 VM_PKT_DATA_INBAND,
744 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
746 if (ret != 0)
747 goto cleanup;
749 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
750 if (t == 0) {
751 ret = -ETIMEDOUT;
752 goto cleanup;
755 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
756 vstor_packet->status != 0)
757 goto cleanup;
760 cleanup:
761 return ret;
765 static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
767 struct scsi_cmnd *scmnd = cmd_request->cmd;
768 struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
769 void (*scsi_done_fn)(struct scsi_cmnd *);
770 struct scsi_sense_hdr sense_hdr;
771 struct vmscsi_request *vm_srb;
772 struct storvsc_scan_work *wrk;
773 struct stor_mem_pools *memp = scmnd->device->hostdata;
775 vm_srb = &cmd_request->vstor_packet.vm_srb;
776 if (cmd_request->bounce_sgl_count) {
777 if (vm_srb->data_in == READ_TYPE)
778 copy_from_bounce_buffer(scsi_sglist(scmnd),
779 cmd_request->bounce_sgl,
780 scsi_sg_count(scmnd),
781 cmd_request->bounce_sgl_count);
782 destroy_bounce_buffer(cmd_request->bounce_sgl,
783 cmd_request->bounce_sgl_count);
787 * If there is an error; offline the device since all
788 * error recovery strategies would have already been
789 * deployed on the host side.
791 if (vm_srb->srb_status == SRB_STATUS_ERROR)
792 scmnd->result = DID_TARGET_FAILURE << 16;
793 else
794 scmnd->result = vm_srb->scsi_status;
797 * If the LUN is invalid; remove the device.
799 if (vm_srb->srb_status == SRB_STATUS_INVALID_LUN) {
800 struct storvsc_device *stor_dev;
801 struct hv_device *dev = host_dev->dev;
802 struct Scsi_Host *host;
804 stor_dev = get_in_stor_device(dev);
805 host = stor_dev->host;
807 wrk = kmalloc(sizeof(struct storvsc_scan_work),
808 GFP_ATOMIC);
809 if (!wrk) {
810 scmnd->result = DID_TARGET_FAILURE << 16;
811 } else {
812 wrk->host = host;
813 wrk->lun = vm_srb->lun;
814 INIT_WORK(&wrk->work, storvsc_remove_lun);
815 schedule_work(&wrk->work);
819 if (scmnd->result) {
820 if (scsi_normalize_sense(scmnd->sense_buffer,
821 SCSI_SENSE_BUFFERSIZE, &sense_hdr))
822 scsi_print_sense_hdr("storvsc", &sense_hdr);
825 scsi_set_resid(scmnd,
826 cmd_request->data_buffer.len -
827 vm_srb->data_transfer_length);
829 scsi_done_fn = scmnd->scsi_done;
831 scmnd->host_scribble = NULL;
832 scmnd->scsi_done = NULL;
834 scsi_done_fn(scmnd);
836 mempool_free(cmd_request, memp->request_mempool);
839 static void storvsc_on_io_completion(struct hv_device *device,
840 struct vstor_packet *vstor_packet,
841 struct storvsc_cmd_request *request)
843 struct storvsc_device *stor_device;
844 struct vstor_packet *stor_pkt;
846 stor_device = hv_get_drvdata(device);
847 stor_pkt = &request->vstor_packet;
850 * The current SCSI handling on the host side does
851 * not correctly handle:
852 * INQUIRY command with page code parameter set to 0x80
853 * MODE_SENSE command with cmd[2] == 0x1c
855 * Setup srb and scsi status so this won't be fatal.
856 * We do this so we can distinguish truly fatal failues
857 * (srb status == 0x4) and off-line the device in that case.
860 if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
861 (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
862 vstor_packet->vm_srb.scsi_status = 0;
863 vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS;
867 /* Copy over the status...etc */
868 stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
869 stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
870 stor_pkt->vm_srb.sense_info_length =
871 vstor_packet->vm_srb.sense_info_length;
873 if (vstor_packet->vm_srb.scsi_status != 0 ||
874 vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS){
875 dev_warn(&device->device,
876 "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
877 stor_pkt->vm_srb.cdb[0],
878 vstor_packet->vm_srb.scsi_status,
879 vstor_packet->vm_srb.srb_status);
882 if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
883 /* CHECK_CONDITION */
884 if (vstor_packet->vm_srb.srb_status &
885 SRB_STATUS_AUTOSENSE_VALID) {
886 /* autosense data available */
887 dev_warn(&device->device,
888 "stor pkt %p autosense data valid - len %d\n",
889 request,
890 vstor_packet->vm_srb.sense_info_length);
892 memcpy(request->sense_buffer,
893 vstor_packet->vm_srb.sense_data,
894 vstor_packet->vm_srb.sense_info_length);
899 stor_pkt->vm_srb.data_transfer_length =
900 vstor_packet->vm_srb.data_transfer_length;
902 storvsc_command_completion(request);
904 if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
905 stor_device->drain_notify)
906 wake_up(&stor_device->waiting_to_drain);
911 static void storvsc_on_receive(struct hv_device *device,
912 struct vstor_packet *vstor_packet,
913 struct storvsc_cmd_request *request)
915 struct storvsc_scan_work *work;
916 struct storvsc_device *stor_device;
918 switch (vstor_packet->operation) {
919 case VSTOR_OPERATION_COMPLETE_IO:
920 storvsc_on_io_completion(device, vstor_packet, request);
921 break;
923 case VSTOR_OPERATION_REMOVE_DEVICE:
924 case VSTOR_OPERATION_ENUMERATE_BUS:
925 stor_device = get_in_stor_device(device);
926 work = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
927 if (!work)
928 return;
930 INIT_WORK(&work->work, storvsc_bus_scan);
931 work->host = stor_device->host;
932 schedule_work(&work->work);
933 break;
935 default:
936 break;
940 static void storvsc_on_channel_callback(void *context)
942 struct hv_device *device = (struct hv_device *)context;
943 struct storvsc_device *stor_device;
944 u32 bytes_recvd;
945 u64 request_id;
946 unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
947 struct storvsc_cmd_request *request;
948 int ret;
951 stor_device = get_in_stor_device(device);
952 if (!stor_device)
953 return;
955 do {
956 ret = vmbus_recvpacket(device->channel, packet,
957 ALIGN(sizeof(struct vstor_packet), 8),
958 &bytes_recvd, &request_id);
959 if (ret == 0 && bytes_recvd > 0) {
961 request = (struct storvsc_cmd_request *)
962 (unsigned long)request_id;
964 if ((request == &stor_device->init_request) ||
965 (request == &stor_device->reset_request)) {
967 memcpy(&request->vstor_packet, packet,
968 sizeof(struct vstor_packet));
969 complete(&request->wait_event);
970 } else {
971 storvsc_on_receive(device,
972 (struct vstor_packet *)packet,
973 request);
975 } else {
976 break;
978 } while (1);
980 return;
983 static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size)
985 struct vmstorage_channel_properties props;
986 int ret;
988 memset(&props, 0, sizeof(struct vmstorage_channel_properties));
990 ret = vmbus_open(device->channel,
991 ring_size,
992 ring_size,
993 (void *)&props,
994 sizeof(struct vmstorage_channel_properties),
995 storvsc_on_channel_callback, device);
997 if (ret != 0)
998 return ret;
1000 ret = storvsc_channel_init(device);
1002 return ret;
1005 static int storvsc_dev_remove(struct hv_device *device)
1007 struct storvsc_device *stor_device;
1008 unsigned long flags;
1010 stor_device = hv_get_drvdata(device);
1012 spin_lock_irqsave(&device->channel->inbound_lock, flags);
1013 stor_device->destroy = true;
1014 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
1017 * At this point, all outbound traffic should be disable. We
1018 * only allow inbound traffic (responses) to proceed so that
1019 * outstanding requests can be completed.
1022 storvsc_wait_to_drain(stor_device);
1025 * Since we have already drained, we don't need to busy wait
1026 * as was done in final_release_stor_device()
1027 * Note that we cannot set the ext pointer to NULL until
1028 * we have drained - to drain the outgoing packets, we need to
1029 * allow incoming packets.
1031 spin_lock_irqsave(&device->channel->inbound_lock, flags);
1032 hv_set_drvdata(device, NULL);
1033 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
1035 /* Close the channel */
1036 vmbus_close(device->channel);
1038 kfree(stor_device);
1039 return 0;
1042 static int storvsc_do_io(struct hv_device *device,
1043 struct storvsc_cmd_request *request)
1045 struct storvsc_device *stor_device;
1046 struct vstor_packet *vstor_packet;
1047 int ret = 0;
1049 vstor_packet = &request->vstor_packet;
1050 stor_device = get_out_stor_device(device);
1052 if (!stor_device)
1053 return -ENODEV;
1056 request->device = device;
1059 vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
1061 vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
1064 vstor_packet->vm_srb.sense_info_length = STORVSC_SENSE_BUFFER_SIZE;
1067 vstor_packet->vm_srb.data_transfer_length =
1068 request->data_buffer.len;
1070 vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
1072 if (request->data_buffer.len) {
1073 ret = vmbus_sendpacket_multipagebuffer(device->channel,
1074 &request->data_buffer,
1075 vstor_packet,
1076 sizeof(struct vstor_packet),
1077 (unsigned long)request);
1078 } else {
1079 ret = vmbus_sendpacket(device->channel, vstor_packet,
1080 sizeof(struct vstor_packet),
1081 (unsigned long)request,
1082 VM_PKT_DATA_INBAND,
1083 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1086 if (ret != 0)
1087 return ret;
1089 atomic_inc(&stor_device->num_outstanding_req);
1091 return ret;
1094 static int storvsc_device_alloc(struct scsi_device *sdevice)
1096 struct stor_mem_pools *memp;
1097 int number = STORVSC_MIN_BUF_NR;
1099 memp = kzalloc(sizeof(struct stor_mem_pools), GFP_KERNEL);
1100 if (!memp)
1101 return -ENOMEM;
1103 memp->request_pool =
1104 kmem_cache_create(dev_name(&sdevice->sdev_dev),
1105 sizeof(struct storvsc_cmd_request), 0,
1106 SLAB_HWCACHE_ALIGN, NULL);
1108 if (!memp->request_pool)
1109 goto err0;
1111 memp->request_mempool = mempool_create(number, mempool_alloc_slab,
1112 mempool_free_slab,
1113 memp->request_pool);
1115 if (!memp->request_mempool)
1116 goto err1;
1118 sdevice->hostdata = memp;
1120 return 0;
1122 err1:
1123 kmem_cache_destroy(memp->request_pool);
1125 err0:
1126 kfree(memp);
1127 return -ENOMEM;
1130 static void storvsc_device_destroy(struct scsi_device *sdevice)
1132 struct stor_mem_pools *memp = sdevice->hostdata;
1134 if (!memp)
1135 return;
1137 mempool_destroy(memp->request_mempool);
1138 kmem_cache_destroy(memp->request_pool);
1139 kfree(memp);
1140 sdevice->hostdata = NULL;
1143 static int storvsc_device_configure(struct scsi_device *sdevice)
1145 scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG,
1146 STORVSC_MAX_IO_REQUESTS);
1148 blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
1150 blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
1152 return 0;
1155 static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
1156 sector_t capacity, int *info)
1158 sector_t nsect = capacity;
1159 sector_t cylinders = nsect;
1160 int heads, sectors_pt;
1163 * We are making up these values; let us keep it simple.
1165 heads = 0xff;
1166 sectors_pt = 0x3f; /* Sectors per track */
1167 sector_div(cylinders, heads * sectors_pt);
1168 if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect)
1169 cylinders = 0xffff;
1171 info[0] = heads;
1172 info[1] = sectors_pt;
1173 info[2] = (int)cylinders;
1175 return 0;
1178 static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
1180 struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
1181 struct hv_device *device = host_dev->dev;
1183 struct storvsc_device *stor_device;
1184 struct storvsc_cmd_request *request;
1185 struct vstor_packet *vstor_packet;
1186 int ret, t;
1189 stor_device = get_out_stor_device(device);
1190 if (!stor_device)
1191 return FAILED;
1193 request = &stor_device->reset_request;
1194 vstor_packet = &request->vstor_packet;
1196 init_completion(&request->wait_event);
1198 vstor_packet->operation = VSTOR_OPERATION_RESET_BUS;
1199 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
1200 vstor_packet->vm_srb.path_id = stor_device->path_id;
1202 ret = vmbus_sendpacket(device->channel, vstor_packet,
1203 sizeof(struct vstor_packet),
1204 (unsigned long)&stor_device->reset_request,
1205 VM_PKT_DATA_INBAND,
1206 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1207 if (ret != 0)
1208 return FAILED;
1210 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
1211 if (t == 0)
1212 return TIMEOUT_ERROR;
1216 * At this point, all outstanding requests in the adapter
1217 * should have been flushed out and return to us
1218 * There is a potential race here where the host may be in
1219 * the process of responding when we return from here.
1220 * Just wait for all in-transit packets to be accounted for
1221 * before we return from here.
1223 storvsc_wait_to_drain(stor_device);
1225 return SUCCESS;
1228 static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
1230 bool allowed = true;
1231 u8 scsi_op = scmnd->cmnd[0];
1233 switch (scsi_op) {
1235 * smartd sends this command and the host does not handle
1236 * this. So, don't send it.
1238 case SET_WINDOW:
1239 scmnd->result = ILLEGAL_REQUEST << 16;
1240 allowed = false;
1241 break;
1242 default:
1243 break;
1245 return allowed;
1248 static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1250 int ret;
1251 struct hv_host_device *host_dev = shost_priv(host);
1252 struct hv_device *dev = host_dev->dev;
1253 struct storvsc_cmd_request *cmd_request;
1254 unsigned int request_size = 0;
1255 int i;
1256 struct scatterlist *sgl;
1257 unsigned int sg_count = 0;
1258 struct vmscsi_request *vm_srb;
1259 struct stor_mem_pools *memp = scmnd->device->hostdata;
1261 if (!storvsc_scsi_cmd_ok(scmnd)) {
1262 scmnd->scsi_done(scmnd);
1263 return 0;
1266 request_size = sizeof(struct storvsc_cmd_request);
1268 cmd_request = mempool_alloc(memp->request_mempool,
1269 GFP_ATOMIC);
1272 * We might be invoked in an interrupt context; hence
1273 * mempool_alloc() can fail.
1275 if (!cmd_request)
1276 return SCSI_MLQUEUE_DEVICE_BUSY;
1278 memset(cmd_request, 0, sizeof(struct storvsc_cmd_request));
1280 /* Setup the cmd request */
1281 cmd_request->cmd = scmnd;
1283 scmnd->host_scribble = (unsigned char *)cmd_request;
1285 vm_srb = &cmd_request->vstor_packet.vm_srb;
1288 /* Build the SRB */
1289 switch (scmnd->sc_data_direction) {
1290 case DMA_TO_DEVICE:
1291 vm_srb->data_in = WRITE_TYPE;
1292 break;
1293 case DMA_FROM_DEVICE:
1294 vm_srb->data_in = READ_TYPE;
1295 break;
1296 default:
1297 vm_srb->data_in = UNKNOWN_TYPE;
1298 break;
1302 vm_srb->port_number = host_dev->port;
1303 vm_srb->path_id = scmnd->device->channel;
1304 vm_srb->target_id = scmnd->device->id;
1305 vm_srb->lun = scmnd->device->lun;
1307 vm_srb->cdb_length = scmnd->cmd_len;
1309 memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
1311 cmd_request->sense_buffer = scmnd->sense_buffer;
1314 cmd_request->data_buffer.len = scsi_bufflen(scmnd);
1315 if (scsi_sg_count(scmnd)) {
1316 sgl = (struct scatterlist *)scsi_sglist(scmnd);
1317 sg_count = scsi_sg_count(scmnd);
1319 /* check if we need to bounce the sgl */
1320 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
1321 cmd_request->bounce_sgl =
1322 create_bounce_buffer(sgl, scsi_sg_count(scmnd),
1323 scsi_bufflen(scmnd),
1324 vm_srb->data_in);
1325 if (!cmd_request->bounce_sgl) {
1326 ret = SCSI_MLQUEUE_HOST_BUSY;
1327 goto queue_error;
1330 cmd_request->bounce_sgl_count =
1331 ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >>
1332 PAGE_SHIFT;
1334 if (vm_srb->data_in == WRITE_TYPE)
1335 copy_to_bounce_buffer(sgl,
1336 cmd_request->bounce_sgl,
1337 scsi_sg_count(scmnd));
1339 sgl = cmd_request->bounce_sgl;
1340 sg_count = cmd_request->bounce_sgl_count;
1343 cmd_request->data_buffer.offset = sgl[0].offset;
1345 for (i = 0; i < sg_count; i++)
1346 cmd_request->data_buffer.pfn_array[i] =
1347 page_to_pfn(sg_page((&sgl[i])));
1349 } else if (scsi_sglist(scmnd)) {
1350 cmd_request->data_buffer.offset =
1351 virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
1352 cmd_request->data_buffer.pfn_array[0] =
1353 virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
1356 /* Invokes the vsc to start an IO */
1357 ret = storvsc_do_io(dev, cmd_request);
1359 if (ret == -EAGAIN) {
1360 /* no more space */
1362 if (cmd_request->bounce_sgl_count) {
1363 destroy_bounce_buffer(cmd_request->bounce_sgl,
1364 cmd_request->bounce_sgl_count);
1366 ret = SCSI_MLQUEUE_DEVICE_BUSY;
1367 goto queue_error;
1371 return 0;
1373 queue_error:
1374 mempool_free(cmd_request, memp->request_mempool);
1375 scmnd->host_scribble = NULL;
1376 return ret;
1379 static struct scsi_host_template scsi_driver = {
1380 .module = THIS_MODULE,
1381 .name = "storvsc_host_t",
1382 .bios_param = storvsc_get_chs,
1383 .queuecommand = storvsc_queuecommand,
1384 .eh_host_reset_handler = storvsc_host_reset_handler,
1385 .slave_alloc = storvsc_device_alloc,
1386 .slave_destroy = storvsc_device_destroy,
1387 .slave_configure = storvsc_device_configure,
1388 .cmd_per_lun = 1,
1389 /* 64 max_queue * 1 target */
1390 .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
1391 .this_id = -1,
1392 /* no use setting to 0 since ll_blk_rw reset it to 1 */
1393 /* currently 32 */
1394 .sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT,
1395 .use_clustering = DISABLE_CLUSTERING,
1396 /* Make sure we dont get a sg segment crosses a page boundary */
1397 .dma_boundary = PAGE_SIZE-1,
1400 enum {
1401 SCSI_GUID,
1402 IDE_GUID,
1405 static const struct hv_vmbus_device_id id_table[] = {
1406 /* SCSI guid */
1407 { VMBUS_DEVICE(0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
1408 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1409 .driver_data = SCSI_GUID },
1410 /* IDE guid */
1411 { VMBUS_DEVICE(0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
1412 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1413 .driver_data = IDE_GUID },
1414 { },
1417 MODULE_DEVICE_TABLE(vmbus, id_table);
1419 static int storvsc_probe(struct hv_device *device,
1420 const struct hv_vmbus_device_id *dev_id)
1422 int ret;
1423 struct Scsi_Host *host;
1424 struct hv_host_device *host_dev;
1425 bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
1426 int target = 0;
1427 struct storvsc_device *stor_device;
1429 host = scsi_host_alloc(&scsi_driver,
1430 sizeof(struct hv_host_device));
1431 if (!host)
1432 return -ENOMEM;
1434 host_dev = shost_priv(host);
1435 memset(host_dev, 0, sizeof(struct hv_host_device));
1437 host_dev->port = host->host_no;
1438 host_dev->dev = device;
1441 stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
1442 if (!stor_device) {
1443 ret = -ENOMEM;
1444 goto err_out0;
1447 stor_device->destroy = false;
1448 init_waitqueue_head(&stor_device->waiting_to_drain);
1449 stor_device->device = device;
1450 stor_device->host = host;
1451 hv_set_drvdata(device, stor_device);
1453 stor_device->port_number = host->host_no;
1454 ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size);
1455 if (ret)
1456 goto err_out1;
1458 host_dev->path = stor_device->path_id;
1459 host_dev->target = stor_device->target_id;
1461 /* max # of devices per target */
1462 host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
1463 /* max # of targets per channel */
1464 host->max_id = STORVSC_MAX_TARGETS;
1465 /* max # of channels */
1466 host->max_channel = STORVSC_MAX_CHANNELS - 1;
1467 /* max cmd length */
1468 host->max_cmd_len = STORVSC_MAX_CMD_LEN;
1470 /* Register the HBA and start the scsi bus scan */
1471 ret = scsi_add_host(host, &device->device);
1472 if (ret != 0)
1473 goto err_out2;
1475 if (!dev_is_ide) {
1476 scsi_scan_host(host);
1477 } else {
1478 target = (device->dev_instance.b[5] << 8 |
1479 device->dev_instance.b[4]);
1480 ret = scsi_add_device(host, 0, target, 0);
1481 if (ret) {
1482 scsi_remove_host(host);
1483 goto err_out2;
1486 return 0;
1488 err_out2:
1490 * Once we have connected with the host, we would need to
1491 * to invoke storvsc_dev_remove() to rollback this state and
1492 * this call also frees up the stor_device; hence the jump around
1493 * err_out1 label.
1495 storvsc_dev_remove(device);
1496 goto err_out0;
1498 err_out1:
1499 kfree(stor_device);
1501 err_out0:
1502 scsi_host_put(host);
1503 return ret;
1506 static int storvsc_remove(struct hv_device *dev)
1508 struct storvsc_device *stor_device = hv_get_drvdata(dev);
1509 struct Scsi_Host *host = stor_device->host;
1511 scsi_remove_host(host);
1512 storvsc_dev_remove(dev);
1513 scsi_host_put(host);
1515 return 0;
1518 static struct hv_driver storvsc_drv = {
1519 .name = KBUILD_MODNAME,
1520 .id_table = id_table,
1521 .probe = storvsc_probe,
1522 .remove = storvsc_remove,
1525 static int __init storvsc_drv_init(void)
1527 u32 max_outstanding_req_per_channel;
1530 * Divide the ring buffer data size (which is 1 page less
1531 * than the ring buffer size since that page is reserved for
1532 * the ring buffer indices) by the max request size (which is
1533 * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
1535 max_outstanding_req_per_channel =
1536 ((storvsc_ringbuffer_size - PAGE_SIZE) /
1537 ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
1538 sizeof(struct vstor_packet) + sizeof(u64),
1539 sizeof(u64)));
1541 if (max_outstanding_req_per_channel <
1542 STORVSC_MAX_IO_REQUESTS)
1543 return -EINVAL;
1545 return vmbus_driver_register(&storvsc_drv);
1548 static void __exit storvsc_drv_exit(void)
1550 vmbus_driver_unregister(&storvsc_drv);
1553 MODULE_LICENSE("GPL");
1554 MODULE_VERSION(HV_DRV_VERSION);
1555 MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
1556 module_init(storvsc_drv_init);
1557 module_exit(storvsc_drv_exit);