2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 * K. Y. Srinivasan <kys@microsoft.com>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/device.h>
25 #include <linux/blkdev.h>
26 #include <linux/major.h>
27 #include <linux/delay.h>
28 #include <linux/hdreg.h>
29 #include <linux/slab.h>
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_cmnd.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_dbg.h>
36 #include "hyperv_storage.h"
39 #define BLKVSC_MINORS 64
41 enum blkvsc_device_type
{
54 * This request ties the struct request and struct
55 * blkvsc_request/hv_storvsc_request together A struct request may be
56 * represented by 1 or more struct blkvsc_request
58 struct blkvsc_request_group
{
61 struct list_head blkvsc_req_list
; /* list of blkvsc_requests */
64 struct blkvsc_request
{
65 /* blkvsc_request_group.blkvsc_req_list */
66 struct list_head req_entry
;
68 /* block_device_context.pending_list */
69 struct list_head pend_entry
;
71 /* This may be null if we generate a request internally */
74 struct block_device_context
*dev
;
76 /* The group this request is part of. Maybe null */
77 struct blkvsc_request_group
*group
;
80 sector_t sector_start
;
81 unsigned long sector_count
;
83 unsigned char sense_buffer
[SCSI_SENSE_BUFFERSIZE
];
84 unsigned char cmd_len
;
85 unsigned char cmnd
[MAX_COMMAND_SIZE
];
87 struct hv_storvsc_request request
;
90 /* Per device structure */
91 struct block_device_context
{
92 /* point back to our device context */
93 struct hv_device
*device_ctx
;
94 struct kmem_cache
*request_pool
;
97 enum blkvsc_device_type device_type
;
98 struct list_head pending_list
;
100 unsigned char device_id
[64];
101 unsigned int device_id_len
;
102 int num_outstanding_reqs
;
104 unsigned int sector_size
;
108 unsigned char target
;
114 * There is a circular dependency involving blkvsc_request_completion()
115 * and blkvsc_do_request().
117 static void blkvsc_request_completion(struct hv_storvsc_request
*request
);
119 static int blkvsc_ringbuffer_size
= BLKVSC_RING_BUFFER_SIZE
;
121 module_param(blkvsc_ringbuffer_size
, int, S_IRUGO
);
122 MODULE_PARM_DESC(ring_size
, "Ring buffer size (in bytes)");
125 * There is a circular dependency involving blkvsc_probe()
128 static int blkvsc_probe(struct hv_device
*dev
);
130 static int blkvsc_device_add(struct hv_device
*device
,
131 void *additional_info
)
133 struct storvsc_device_info
*device_info
;
136 device_info
= (struct storvsc_device_info
*)additional_info
;
138 device_info
->ring_buffer_size
= blkvsc_ringbuffer_size
;
140 ret
= storvsc_dev_add(device
, additional_info
);
145 * We need to use the device instance guid to set the path and target
146 * id. For IDE devices, the device instance id is formatted as
147 * <bus id> * - <device id> - 8899 - 000000000000.
149 device_info
->path_id
= device
->dev_instance
.b
[3] << 24 |
150 device
->dev_instance
.b
[2] << 16 |
151 device
->dev_instance
.b
[1] << 8 |
152 device
->dev_instance
.b
[0];
154 device_info
->target_id
= device
->dev_instance
.b
[5] << 8 |
155 device
->dev_instance
.b
[4];
160 static int blkvsc_submit_request(struct blkvsc_request
*blkvsc_req
,
161 void (*request_completion
)(struct hv_storvsc_request
*))
163 struct block_device_context
*blkdev
= blkvsc_req
->dev
;
164 struct hv_storvsc_request
*storvsc_req
;
165 struct vmscsi_request
*vm_srb
;
169 storvsc_req
= &blkvsc_req
->request
;
170 vm_srb
= &storvsc_req
->vstor_packet
.vm_srb
;
172 vm_srb
->data_in
= blkvsc_req
->write
? WRITE_TYPE
: READ_TYPE
;
174 storvsc_req
->on_io_completion
= request_completion
;
175 storvsc_req
->context
= blkvsc_req
;
177 vm_srb
->port_number
= blkdev
->port
;
178 vm_srb
->path_id
= blkdev
->path
;
179 vm_srb
->target_id
= blkdev
->target
;
180 vm_srb
->lun
= 0; /* this is not really used at all */
182 vm_srb
->cdb_length
= blkvsc_req
->cmd_len
;
184 memcpy(vm_srb
->cdb
, blkvsc_req
->cmnd
, vm_srb
->cdb_length
);
186 storvsc_req
->sense_buffer
= blkvsc_req
->sense_buffer
;
188 ret
= storvsc_do_io(blkdev
->device_ctx
,
189 &blkvsc_req
->request
);
191 blkdev
->num_outstanding_reqs
++;
197 static int blkvsc_open(struct block_device
*bdev
, fmode_t mode
)
199 struct block_device_context
*blkdev
= bdev
->bd_disk
->private_data
;
202 spin_lock_irqsave(&blkdev
->lock
, flags
);
206 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
212 static int blkvsc_getgeo(struct block_device
*bd
, struct hd_geometry
*hg
)
214 sector_t nsect
= get_capacity(bd
->bd_disk
);
215 sector_t cylinders
= nsect
;
218 * We are making up these values; let us keep it simple.
222 sector_div(cylinders
, hg
->heads
* hg
->sectors
);
223 hg
->cylinders
= cylinders
;
224 if ((sector_t
)(hg
->cylinders
+ 1) * hg
->heads
* hg
->sectors
< nsect
)
225 hg
->cylinders
= 0xffff;
231 static void blkvsc_init_rw(struct blkvsc_request
*blkvsc_req
)
234 blkvsc_req
->cmd_len
= 16;
236 if (rq_data_dir(blkvsc_req
->req
)) {
237 blkvsc_req
->write
= 1;
238 blkvsc_req
->cmnd
[0] = WRITE_16
;
240 blkvsc_req
->write
= 0;
241 blkvsc_req
->cmnd
[0] = READ_16
;
244 blkvsc_req
->cmnd
[1] |=
245 (blkvsc_req
->req
->cmd_flags
& REQ_FUA
) ? 0x8 : 0;
247 *(unsigned long long *)&blkvsc_req
->cmnd
[2] =
248 cpu_to_be64(blkvsc_req
->sector_start
);
249 *(unsigned int *)&blkvsc_req
->cmnd
[10] =
250 cpu_to_be32(blkvsc_req
->sector_count
);
254 static int blkvsc_ioctl(struct block_device
*bd
, fmode_t mode
,
255 unsigned cmd
, unsigned long arg
)
257 struct block_device_context
*blkdev
= bd
->bd_disk
->private_data
;
261 case HDIO_GET_IDENTITY
:
262 if (copy_to_user((void __user
*)arg
, blkdev
->device_id
,
263 blkdev
->device_id_len
))
274 static void blkvsc_cmd_completion(struct hv_storvsc_request
*request
)
276 struct blkvsc_request
*blkvsc_req
=
277 (struct blkvsc_request
*)request
->context
;
278 struct block_device_context
*blkdev
=
279 (struct block_device_context
*)blkvsc_req
->dev
;
280 struct scsi_sense_hdr sense_hdr
;
281 struct vmscsi_request
*vm_srb
;
285 vm_srb
= &blkvsc_req
->request
.vstor_packet
.vm_srb
;
287 spin_lock_irqsave(&blkdev
->lock
, flags
);
288 blkdev
->num_outstanding_reqs
--;
289 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
291 if (vm_srb
->scsi_status
)
292 if (scsi_normalize_sense(blkvsc_req
->sense_buffer
,
293 SCSI_SENSE_BUFFERSIZE
, &sense_hdr
))
294 scsi_print_sense_hdr("blkvsc", &sense_hdr
);
296 complete(&blkvsc_req
->request
.wait_event
);
300 static int blkvsc_do_operation(struct block_device_context
*blkdev
,
301 enum blkvsc_op_type op
)
303 struct blkvsc_request
*blkvsc_req
;
304 struct page
*page_buf
;
306 unsigned char device_type
;
307 struct scsi_sense_hdr sense_hdr
;
308 struct vmscsi_request
*vm_srb
;
313 blkvsc_req
= kmem_cache_zalloc(blkdev
->request_pool
, GFP_KERNEL
);
317 page_buf
= alloc_page(GFP_KERNEL
);
319 kmem_cache_free(blkdev
->request_pool
, blkvsc_req
);
323 vm_srb
= &blkvsc_req
->request
.vstor_packet
.vm_srb
;
324 init_completion(&blkvsc_req
->request
.wait_event
);
325 blkvsc_req
->dev
= blkdev
;
326 blkvsc_req
->req
= NULL
;
327 blkvsc_req
->write
= 0;
329 blkvsc_req
->request
.data_buffer
.pfn_array
[0] =
330 page_to_pfn(page_buf
);
331 blkvsc_req
->request
.data_buffer
.offset
= 0;
335 blkvsc_req
->cmnd
[0] = INQUIRY
;
336 blkvsc_req
->cmnd
[1] = 0x1; /* Get product data */
337 blkvsc_req
->cmnd
[2] = 0x83; /* mode page 83 */
338 blkvsc_req
->cmnd
[4] = 64;
339 blkvsc_req
->cmd_len
= 6;
340 blkvsc_req
->request
.data_buffer
.len
= 64;
344 blkdev
->sector_size
= 0;
345 blkdev
->capacity
= 0;
347 blkvsc_req
->cmnd
[0] = READ_CAPACITY
;
348 blkvsc_req
->cmd_len
= 16;
349 blkvsc_req
->request
.data_buffer
.len
= 8;
353 blkvsc_req
->cmnd
[0] = SYNCHRONIZE_CACHE
;
354 blkvsc_req
->cmd_len
= 10;
355 blkvsc_req
->request
.data_buffer
.pfn_array
[0] = 0;
356 blkvsc_req
->request
.data_buffer
.len
= 0;
363 spin_lock_irqsave(&blkdev
->lock
, flags
);
364 blkvsc_submit_request(blkvsc_req
, blkvsc_cmd_completion
);
365 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
367 wait_for_completion_interruptible(&blkvsc_req
->request
.wait_event
);
370 if (vm_srb
->scsi_status
) {
371 scsi_normalize_sense(blkvsc_req
->sense_buffer
,
372 SCSI_SENSE_BUFFERSIZE
, &sense_hdr
);
377 buf
= kmap(page_buf
);
381 device_type
= buf
[0] & 0x1F;
383 if (device_type
== 0x0)
384 blkdev
->device_type
= HARDDISK_TYPE
;
386 blkdev
->device_type
= UNKNOWN_DEV_TYPE
;
388 blkdev
->device_id_len
= buf
[7];
389 if (blkdev
->device_id_len
> 64)
390 blkdev
->device_id_len
= 64;
392 memcpy(blkdev
->device_id
, &buf
[8], blkdev
->device_id_len
);
398 ((buf
[0] << 24) | (buf
[1] << 16) |
399 (buf
[2] << 8) | buf
[3]) + 1;
401 blkdev
->sector_size
=
402 (buf
[4] << 24) | (buf
[5] << 16) |
403 (buf
[6] << 8) | buf
[7];
414 __free_page(page_buf
);
416 kmem_cache_free(blkdev
->request_pool
, blkvsc_req
);
422 static int blkvsc_cancel_pending_reqs(struct block_device_context
*blkdev
)
424 struct blkvsc_request
*pend_req
, *tmp
;
425 struct blkvsc_request
*comp_req
, *tmp2
;
426 struct vmscsi_request
*vm_srb
;
431 /* Flush the pending list first */
432 list_for_each_entry_safe(pend_req
, tmp
, &blkdev
->pending_list
,
435 * The pend_req could be part of a partially completed
436 * request. If so, complete those req first until we
439 list_for_each_entry_safe(comp_req
, tmp2
,
440 &pend_req
->group
->blkvsc_req_list
,
443 if (comp_req
== pend_req
)
446 list_del(&comp_req
->req_entry
);
450 &comp_req
->request
.vstor_packet
.
452 ret
= __blk_end_request(comp_req
->req
,
453 (!vm_srb
->scsi_status
? 0 : -EIO
),
454 comp_req
->sector_count
*
455 blkdev
->sector_size
);
457 /* FIXME: shouldn't this do more than return? */
462 kmem_cache_free(blkdev
->request_pool
, comp_req
);
465 list_del(&pend_req
->pend_entry
);
467 list_del(&pend_req
->req_entry
);
470 if (!__blk_end_request(pend_req
->req
, -EIO
,
471 pend_req
->sector_count
*
472 blkdev
->sector_size
)) {
474 * All the sectors have been xferred ie the
477 kmem_cache_free(blkdev
->request_pool
,
482 kmem_cache_free(blkdev
->request_pool
, pend_req
);
491 * blkvsc_remove() - Callback when our device is removed
493 static int blkvsc_remove(struct hv_device
*dev
)
495 struct block_device_context
*blkdev
= dev_get_drvdata(&dev
->device
);
499 /* Get to a known state */
500 spin_lock_irqsave(&blkdev
->lock
, flags
);
502 blkdev
->shutting_down
= 1;
504 blk_stop_queue(blkdev
->gd
->queue
);
506 blkvsc_cancel_pending_reqs(blkdev
);
508 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
510 blkvsc_do_operation(blkdev
, DO_FLUSH
);
512 if (blkdev
->users
== 0) {
513 del_gendisk(blkdev
->gd
);
514 put_disk(blkdev
->gd
);
515 blk_cleanup_queue(blkdev
->gd
->queue
);
517 storvsc_dev_remove(blkdev
->device_ctx
);
519 kmem_cache_destroy(blkdev
->request_pool
);
526 static void blkvsc_shutdown(struct hv_device
*dev
)
528 struct block_device_context
*blkdev
= dev_get_drvdata(&dev
->device
);
534 spin_lock_irqsave(&blkdev
->lock
, flags
);
536 blkdev
->shutting_down
= 1;
538 blk_stop_queue(blkdev
->gd
->queue
);
540 blkvsc_cancel_pending_reqs(blkdev
);
542 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
544 blkvsc_do_operation(blkdev
, DO_FLUSH
);
547 * Now wait for all outgoing I/O to be drained.
549 storvsc_wait_to_drain((struct storvsc_device
*)dev
->ext
);
553 static int blkvsc_release(struct gendisk
*disk
, fmode_t mode
)
555 struct block_device_context
*blkdev
= disk
->private_data
;
558 spin_lock_irqsave(&blkdev
->lock
, flags
);
560 if ((--blkdev
->users
== 0) && (blkdev
->shutting_down
)) {
561 blk_stop_queue(blkdev
->gd
->queue
);
562 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
564 blkvsc_do_operation(blkdev
, DO_FLUSH
);
565 del_gendisk(blkdev
->gd
);
566 put_disk(blkdev
->gd
);
567 blk_cleanup_queue(blkdev
->gd
->queue
);
569 storvsc_dev_remove(blkdev
->device_ctx
);
571 kmem_cache_destroy(blkdev
->request_pool
);
574 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
581 * We break the request into 1 or more blkvsc_requests and submit
582 * them. If we cant submit them all, we put them on the
583 * pending_list. The blkvsc_request() will work on the pending_list.
585 static int blkvsc_do_request(struct block_device_context
*blkdev
,
588 struct bio
*bio
= NULL
;
589 struct bio_vec
*bvec
= NULL
;
590 struct bio_vec
*prev_bvec
= NULL
;
591 struct blkvsc_request
*blkvsc_req
= NULL
;
592 struct blkvsc_request
*tmp
;
595 sector_t start_sector
;
596 unsigned long num_sectors
= 0;
599 struct blkvsc_request_group
*group
= NULL
;
601 /* Create a group to tie req to list of blkvsc_reqs */
602 group
= kmem_cache_zalloc(blkdev
->request_pool
, GFP_ATOMIC
);
606 INIT_LIST_HEAD(&group
->blkvsc_req_list
);
607 group
->outstanding
= group
->status
= 0;
609 start_sector
= blk_rq_pos(req
);
611 /* foreach bio in the request */
613 for (bio
= req
->bio
; bio
; bio
= bio
->bi_next
) {
615 * Map this bio into an existing or new storvsc request
617 bio_for_each_segment(bvec
, bio
, seg_idx
) {
618 /* Get a new storvsc request */
621 (databuf_idx
>= MAX_MULTIPAGE_BUFFER_COUNT
)
622 /* hole at the begin of page */
623 || (bvec
->bv_offset
!= 0) ||
624 /* hold at the end of page */
626 (prev_bvec
->bv_len
!= PAGE_SIZE
))) {
627 /* submit the prev one */
629 blkvsc_req
->sector_start
=
632 blkvsc_req
->sector_start
,
633 (blkdev
->sector_size
>> 9));
635 blkvsc_req
->sector_count
=
637 (blkdev
->sector_size
>> 9);
638 blkvsc_init_rw(blkvsc_req
);
642 * Create new blkvsc_req to represent
647 blkdev
->request_pool
, GFP_ATOMIC
);
649 /* free up everything */
650 list_for_each_entry_safe(
652 &group
->blkvsc_req_list
,
655 &blkvsc_req
->req_entry
);
657 blkdev
->request_pool
,
662 blkdev
->request_pool
, group
);
666 memset(blkvsc_req
, 0,
667 sizeof(struct blkvsc_request
));
669 blkvsc_req
->dev
= blkdev
;
670 blkvsc_req
->req
= req
;
677 /* Add to the group */
678 blkvsc_req
->group
= group
;
679 blkvsc_req
->group
->outstanding
++;
680 list_add_tail(&blkvsc_req
->req_entry
,
681 &blkvsc_req
->group
->blkvsc_req_list
);
683 start_sector
+= num_sectors
;
689 * Add the curr bvec/segment to the curr
692 blkvsc_req
->request
.data_buffer
.
693 pfn_array
[databuf_idx
]
694 = page_to_pfn(bvec
->bv_page
);
695 blkvsc_req
->request
.data_buffer
.len
701 num_sectors
+= bvec
->bv_len
>> 9;
703 } /* bio_for_each_segment */
705 } /* rq_for_each_bio */
708 /* Handle the last one */
710 blkvsc_req
->sector_start
= start_sector
;
711 sector_div(blkvsc_req
->sector_start
,
712 (blkdev
->sector_size
>> 9));
714 blkvsc_req
->sector_count
= num_sectors
/
715 (blkdev
->sector_size
>> 9);
717 blkvsc_init_rw(blkvsc_req
);
720 list_for_each_entry(blkvsc_req
, &group
->blkvsc_req_list
, req_entry
) {
723 list_add_tail(&blkvsc_req
->pend_entry
,
724 &blkdev
->pending_list
);
726 ret
= blkvsc_submit_request(blkvsc_req
,
727 blkvsc_request_completion
);
730 list_add_tail(&blkvsc_req
->pend_entry
,
731 &blkdev
->pending_list
);
740 static int blkvsc_do_pending_reqs(struct block_device_context
*blkdev
)
742 struct blkvsc_request
*pend_req
, *tmp
;
745 /* Flush the pending list first */
746 list_for_each_entry_safe(pend_req
, tmp
, &blkdev
->pending_list
,
749 ret
= blkvsc_submit_request(pend_req
,
750 blkvsc_request_completion
);
754 list_del(&pend_req
->pend_entry
);
761 static void blkvsc_request(struct request_queue
*queue
)
763 struct block_device_context
*blkdev
= NULL
;
767 while ((req
= blk_peek_request(queue
)) != NULL
) {
769 blkdev
= req
->rq_disk
->private_data
;
770 if (blkdev
->shutting_down
|| req
->cmd_type
!= REQ_TYPE_FS
) {
771 __blk_end_request_cur(req
, 0);
775 ret
= blkvsc_do_pending_reqs(blkdev
);
778 blk_stop_queue(queue
);
782 blk_start_request(req
);
784 ret
= blkvsc_do_request(blkdev
, req
);
786 blk_stop_queue(queue
);
788 } else if (ret
< 0) {
789 blk_requeue_request(queue
, req
);
790 blk_stop_queue(queue
);
796 static const struct hv_vmbus_device_id id_table
[] = {
798 { VMBUS_DEVICE(0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
799 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) },
803 MODULE_DEVICE_TABLE(vmbus
, id_table
);
805 /* The one and only one */
806 static struct hv_driver blkvsc_drv
= {
808 .id_table
= id_table
,
809 .probe
= blkvsc_probe
,
810 .remove
= blkvsc_remove
,
811 .shutdown
= blkvsc_shutdown
,
814 static const struct block_device_operations block_ops
= {
815 .owner
= THIS_MODULE
,
817 .release
= blkvsc_release
,
818 .getgeo
= blkvsc_getgeo
,
819 .ioctl
= blkvsc_ioctl
,
823 * blkvsc_drv_init - BlkVsc driver initialization.
825 static int blkvsc_drv_init(void)
827 BUILD_BUG_ON(sizeof(sector_t
) != 8);
828 return vmbus_driver_register(&blkvsc_drv
);
831 static void blkvsc_drv_exit(void)
833 vmbus_driver_unregister(&blkvsc_drv
);
837 * blkvsc_probe - Add a new device for this driver
839 static int blkvsc_probe(struct hv_device
*dev
)
841 struct block_device_context
*blkdev
= NULL
;
842 struct storvsc_device_info device_info
;
843 struct storvsc_major_info major_info
;
846 blkdev
= kzalloc(sizeof(struct block_device_context
), GFP_KERNEL
);
852 INIT_LIST_HEAD(&blkdev
->pending_list
);
854 /* Initialize what we can here */
855 spin_lock_init(&blkdev
->lock
);
858 blkdev
->request_pool
= kmem_cache_create(dev_name(&dev
->device
),
859 sizeof(struct blkvsc_request
), 0,
860 SLAB_HWCACHE_ALIGN
, NULL
);
861 if (!blkdev
->request_pool
) {
867 ret
= blkvsc_device_add(dev
, &device_info
);
871 blkdev
->device_ctx
= dev
;
872 /* this identified the device 0 or 1 */
873 blkdev
->target
= device_info
.target_id
;
874 /* this identified the ide ctrl 0 or 1 */
875 blkdev
->path
= device_info
.path_id
;
877 dev_set_drvdata(&dev
->device
, blkdev
);
879 ret
= storvsc_get_major_info(&device_info
, &major_info
);
884 if (major_info
.do_register
) {
885 ret
= register_blkdev(major_info
.major
, major_info
.devname
);
888 DPRINT_ERR(BLKVSC_DRV
,
889 "register_blkdev() failed! ret %d", ret
);
894 DPRINT_INFO(BLKVSC_DRV
, "blkvsc registered for major %d!!",
897 blkdev
->gd
= alloc_disk(BLKVSC_MINORS
);
903 blkdev
->gd
->queue
= blk_init_queue(blkvsc_request
, &blkdev
->lock
);
905 blk_queue_max_segment_size(blkdev
->gd
->queue
, PAGE_SIZE
);
906 blk_queue_max_segments(blkdev
->gd
->queue
, MAX_MULTIPAGE_BUFFER_COUNT
);
907 blk_queue_segment_boundary(blkdev
->gd
->queue
, PAGE_SIZE
-1);
908 blk_queue_bounce_limit(blkdev
->gd
->queue
, BLK_BOUNCE_ANY
);
909 blk_queue_dma_alignment(blkdev
->gd
->queue
, 511);
911 blkdev
->gd
->major
= major_info
.major
;
912 if (major_info
.index
== 1 || major_info
.index
== 3)
913 blkdev
->gd
->first_minor
= BLKVSC_MINORS
;
915 blkdev
->gd
->first_minor
= 0;
916 blkdev
->gd
->fops
= &block_ops
;
917 blkdev
->gd
->private_data
= blkdev
;
918 blkdev
->gd
->driverfs_dev
= &(blkdev
->device_ctx
->device
);
919 sprintf(blkdev
->gd
->disk_name
, "hd%c", 'a' + major_info
.index
);
921 blkvsc_do_operation(blkdev
, DO_INQUIRY
);
922 blkvsc_do_operation(blkdev
, DO_CAPACITY
);
924 set_capacity(blkdev
->gd
, blkdev
->capacity
* (blkdev
->sector_size
/512));
925 blk_queue_logical_block_size(blkdev
->gd
->queue
, blkdev
->sector_size
);
927 add_disk(blkdev
->gd
);
929 DPRINT_INFO(BLKVSC_DRV
, "%s added!! capacity %lu sector_size %d",
930 blkdev
->gd
->disk_name
, (unsigned long)blkdev
->capacity
,
931 blkdev
->sector_size
);
936 storvsc_dev_remove(dev
);
940 if (blkdev
->request_pool
) {
941 kmem_cache_destroy(blkdev
->request_pool
);
942 blkdev
->request_pool
= NULL
;
951 static void blkvsc_request_completion(struct hv_storvsc_request
*request
)
953 struct blkvsc_request
*blkvsc_req
=
954 (struct blkvsc_request
*)request
->context
;
955 struct block_device_context
*blkdev
=
956 (struct block_device_context
*)blkvsc_req
->dev
;
958 struct blkvsc_request
*comp_req
, *tmp
;
959 struct vmscsi_request
*vm_srb
;
962 spin_lock_irqsave(&blkdev
->lock
, flags
);
964 blkdev
->num_outstanding_reqs
--;
965 blkvsc_req
->group
->outstanding
--;
968 * Only start processing when all the blkvsc_reqs are
969 * completed. This guarantees no out-of-order blkvsc_req
970 * completion when calling end_that_request_first()
972 if (blkvsc_req
->group
->outstanding
== 0) {
973 list_for_each_entry_safe(comp_req
, tmp
,
974 &blkvsc_req
->group
->blkvsc_req_list
,
977 list_del(&comp_req
->req_entry
);
980 &comp_req
->request
.vstor_packet
.vm_srb
;
981 if (!__blk_end_request(comp_req
->req
,
982 (!vm_srb
->scsi_status
? 0 : -EIO
),
983 comp_req
->sector_count
* blkdev
->sector_size
)) {
985 * All the sectors have been xferred ie the
988 kmem_cache_free(blkdev
->request_pool
,
992 kmem_cache_free(blkdev
->request_pool
, comp_req
);
995 if (!blkdev
->shutting_down
) {
996 blkvsc_do_pending_reqs(blkdev
);
997 blk_start_queue(blkdev
->gd
->queue
);
998 blkvsc_request(blkdev
->gd
->queue
);
1002 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
1005 static void __exit
blkvsc_exit(void)
1010 MODULE_LICENSE("GPL");
1011 MODULE_VERSION(HV_DRV_VERSION
);
1012 MODULE_DESCRIPTION("Microsoft Hyper-V virtual block driver");
1013 module_init(blkvsc_drv_init
);
1014 module_exit(blkvsc_exit
);