2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 * K. Y. Srinivasan <kys@microsoft.com>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/device.h>
25 #include <linux/blkdev.h>
26 #include <linux/major.h>
27 #include <linux/delay.h>
28 #include <linux/hdreg.h>
29 #include <linux/slab.h>
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_cmnd.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_dbg.h>
36 #include "hyperv_storage.h"
39 #define BLKVSC_MINORS 64
41 enum blkvsc_device_type
{
54 * This request ties the struct request and struct
55 * blkvsc_request/hv_storvsc_request together A struct request may be
56 * represented by 1 or more struct blkvsc_request
58 struct blkvsc_request_group
{
61 struct list_head blkvsc_req_list
; /* list of blkvsc_requests */
64 struct blkvsc_request
{
65 /* blkvsc_request_group.blkvsc_req_list */
66 struct list_head req_entry
;
68 /* block_device_context.pending_list */
69 struct list_head pend_entry
;
71 /* This may be null if we generate a request internally */
74 struct block_device_context
*dev
;
76 /* The group this request is part of. Maybe null */
77 struct blkvsc_request_group
*group
;
80 sector_t sector_start
;
81 unsigned long sector_count
;
83 unsigned char sense_buffer
[SCSI_SENSE_BUFFERSIZE
];
84 unsigned char cmd_len
;
85 unsigned char cmnd
[MAX_COMMAND_SIZE
];
87 struct hv_storvsc_request request
;
90 /* Per device structure */
91 struct block_device_context
{
92 /* point back to our device context */
93 struct hv_device
*device_ctx
;
94 struct kmem_cache
*request_pool
;
97 enum blkvsc_device_type device_type
;
98 struct list_head pending_list
;
100 unsigned char device_id
[64];
101 unsigned int device_id_len
;
102 int num_outstanding_reqs
;
104 unsigned int sector_size
;
108 unsigned char target
;
112 static const char *drv_name
= "blkvsc";
114 /* {32412632-86cb-44a2-9b5c-50d1417354f5} */
115 static const struct hv_guid dev_type
= {
117 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
118 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5
123 * There is a circular dependency involving blkvsc_request_completion()
124 * and blkvsc_do_request().
126 static void blkvsc_request_completion(struct hv_storvsc_request
*request
);
128 static int blkvsc_ringbuffer_size
= BLKVSC_RING_BUFFER_SIZE
;
130 module_param(blkvsc_ringbuffer_size
, int, S_IRUGO
);
131 MODULE_PARM_DESC(ring_size
, "Ring buffer size (in bytes)");
134 * There is a circular dependency involving blkvsc_probe()
137 static int blkvsc_probe(struct hv_device
*dev
);
139 static int blkvsc_device_add(struct hv_device
*device
,
140 void *additional_info
)
142 struct storvsc_device_info
*device_info
;
145 device_info
= (struct storvsc_device_info
*)additional_info
;
147 device_info
->ring_buffer_size
= blkvsc_ringbuffer_size
;
149 ret
= storvsc_dev_add(device
, additional_info
);
154 * We need to use the device instance guid to set the path and target
155 * id. For IDE devices, the device instance id is formatted as
156 * <bus id> * - <device id> - 8899 - 000000000000.
158 device_info
->path_id
= device
->dev_instance
.data
[3] << 24 |
159 device
->dev_instance
.data
[2] << 16 |
160 device
->dev_instance
.data
[1] << 8 |
161 device
->dev_instance
.data
[0];
163 device_info
->target_id
= device
->dev_instance
.data
[5] << 8 |
164 device
->dev_instance
.data
[4];
169 static int blkvsc_submit_request(struct blkvsc_request
*blkvsc_req
,
170 void (*request_completion
)(struct hv_storvsc_request
*))
172 struct block_device_context
*blkdev
= blkvsc_req
->dev
;
173 struct hv_storvsc_request
*storvsc_req
;
174 struct vmscsi_request
*vm_srb
;
178 storvsc_req
= &blkvsc_req
->request
;
179 vm_srb
= &storvsc_req
->vstor_packet
.vm_srb
;
181 vm_srb
->data_in
= blkvsc_req
->write
? WRITE_TYPE
: READ_TYPE
;
183 storvsc_req
->on_io_completion
= request_completion
;
184 storvsc_req
->context
= blkvsc_req
;
186 vm_srb
->port_number
= blkdev
->port
;
187 vm_srb
->path_id
= blkdev
->path
;
188 vm_srb
->target_id
= blkdev
->target
;
189 vm_srb
->lun
= 0; /* this is not really used at all */
191 vm_srb
->cdb_length
= blkvsc_req
->cmd_len
;
193 memcpy(vm_srb
->cdb
, blkvsc_req
->cmnd
, vm_srb
->cdb_length
);
195 storvsc_req
->sense_buffer
= blkvsc_req
->sense_buffer
;
197 ret
= storvsc_do_io(blkdev
->device_ctx
,
198 &blkvsc_req
->request
);
200 blkdev
->num_outstanding_reqs
++;
206 static int blkvsc_open(struct block_device
*bdev
, fmode_t mode
)
208 struct block_device_context
*blkdev
= bdev
->bd_disk
->private_data
;
211 spin_lock_irqsave(&blkdev
->lock
, flags
);
215 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
221 static int blkvsc_getgeo(struct block_device
*bd
, struct hd_geometry
*hg
)
223 sector_t nsect
= get_capacity(bd
->bd_disk
);
224 sector_t cylinders
= nsect
;
227 * We are making up these values; let us keep it simple.
231 sector_div(cylinders
, hg
->heads
* hg
->sectors
);
232 hg
->cylinders
= cylinders
;
233 if ((sector_t
)(hg
->cylinders
+ 1) * hg
->heads
* hg
->sectors
< nsect
)
234 hg
->cylinders
= 0xffff;
240 static void blkvsc_init_rw(struct blkvsc_request
*blkvsc_req
)
243 blkvsc_req
->cmd_len
= 16;
245 if (rq_data_dir(blkvsc_req
->req
)) {
246 blkvsc_req
->write
= 1;
247 blkvsc_req
->cmnd
[0] = WRITE_16
;
249 blkvsc_req
->write
= 0;
250 blkvsc_req
->cmnd
[0] = READ_16
;
253 blkvsc_req
->cmnd
[1] |=
254 (blkvsc_req
->req
->cmd_flags
& REQ_FUA
) ? 0x8 : 0;
256 *(unsigned long long *)&blkvsc_req
->cmnd
[2] =
257 cpu_to_be64(blkvsc_req
->sector_start
);
258 *(unsigned int *)&blkvsc_req
->cmnd
[10] =
259 cpu_to_be32(blkvsc_req
->sector_count
);
263 static int blkvsc_ioctl(struct block_device
*bd
, fmode_t mode
,
264 unsigned cmd
, unsigned long arg
)
266 struct block_device_context
*blkdev
= bd
->bd_disk
->private_data
;
270 case HDIO_GET_IDENTITY
:
271 if (copy_to_user((void __user
*)arg
, blkdev
->device_id
,
272 blkdev
->device_id_len
))
283 static void blkvsc_cmd_completion(struct hv_storvsc_request
*request
)
285 struct blkvsc_request
*blkvsc_req
=
286 (struct blkvsc_request
*)request
->context
;
287 struct block_device_context
*blkdev
=
288 (struct block_device_context
*)blkvsc_req
->dev
;
289 struct scsi_sense_hdr sense_hdr
;
290 struct vmscsi_request
*vm_srb
;
294 vm_srb
= &blkvsc_req
->request
.vstor_packet
.vm_srb
;
296 spin_lock_irqsave(&blkdev
->lock
, flags
);
297 blkdev
->num_outstanding_reqs
--;
298 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
300 if (vm_srb
->scsi_status
)
301 if (scsi_normalize_sense(blkvsc_req
->sense_buffer
,
302 SCSI_SENSE_BUFFERSIZE
, &sense_hdr
))
303 scsi_print_sense_hdr("blkvsc", &sense_hdr
);
305 complete(&blkvsc_req
->request
.wait_event
);
309 static int blkvsc_do_operation(struct block_device_context
*blkdev
,
310 enum blkvsc_op_type op
)
312 struct blkvsc_request
*blkvsc_req
;
313 struct page
*page_buf
;
315 unsigned char device_type
;
316 struct scsi_sense_hdr sense_hdr
;
317 struct vmscsi_request
*vm_srb
;
322 blkvsc_req
= kmem_cache_zalloc(blkdev
->request_pool
, GFP_KERNEL
);
326 page_buf
= alloc_page(GFP_KERNEL
);
328 kmem_cache_free(blkdev
->request_pool
, blkvsc_req
);
332 vm_srb
= &blkvsc_req
->request
.vstor_packet
.vm_srb
;
333 init_completion(&blkvsc_req
->request
.wait_event
);
334 blkvsc_req
->dev
= blkdev
;
335 blkvsc_req
->req
= NULL
;
336 blkvsc_req
->write
= 0;
338 blkvsc_req
->request
.data_buffer
.pfn_array
[0] =
339 page_to_pfn(page_buf
);
340 blkvsc_req
->request
.data_buffer
.offset
= 0;
344 blkvsc_req
->cmnd
[0] = INQUIRY
;
345 blkvsc_req
->cmnd
[1] = 0x1; /* Get product data */
346 blkvsc_req
->cmnd
[2] = 0x83; /* mode page 83 */
347 blkvsc_req
->cmnd
[4] = 64;
348 blkvsc_req
->cmd_len
= 6;
349 blkvsc_req
->request
.data_buffer
.len
= 64;
353 blkdev
->sector_size
= 0;
354 blkdev
->capacity
= 0;
356 blkvsc_req
->cmnd
[0] = READ_CAPACITY
;
357 blkvsc_req
->cmd_len
= 16;
358 blkvsc_req
->request
.data_buffer
.len
= 8;
362 blkvsc_req
->cmnd
[0] = SYNCHRONIZE_CACHE
;
363 blkvsc_req
->cmd_len
= 10;
364 blkvsc_req
->request
.data_buffer
.pfn_array
[0] = 0;
365 blkvsc_req
->request
.data_buffer
.len
= 0;
372 spin_lock_irqsave(&blkdev
->lock
, flags
);
373 blkvsc_submit_request(blkvsc_req
, blkvsc_cmd_completion
);
374 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
376 wait_for_completion_interruptible(&blkvsc_req
->request
.wait_event
);
379 if (vm_srb
->scsi_status
) {
380 scsi_normalize_sense(blkvsc_req
->sense_buffer
,
381 SCSI_SENSE_BUFFERSIZE
, &sense_hdr
);
386 buf
= kmap(page_buf
);
390 device_type
= buf
[0] & 0x1F;
392 if (device_type
== 0x0)
393 blkdev
->device_type
= HARDDISK_TYPE
;
395 blkdev
->device_type
= UNKNOWN_DEV_TYPE
;
397 blkdev
->device_id_len
= buf
[7];
398 if (blkdev
->device_id_len
> 64)
399 blkdev
->device_id_len
= 64;
401 memcpy(blkdev
->device_id
, &buf
[8], blkdev
->device_id_len
);
407 ((buf
[0] << 24) | (buf
[1] << 16) |
408 (buf
[2] << 8) | buf
[3]) + 1;
410 blkdev
->sector_size
=
411 (buf
[4] << 24) | (buf
[5] << 16) |
412 (buf
[6] << 8) | buf
[7];
423 __free_page(page_buf
);
425 kmem_cache_free(blkdev
->request_pool
, blkvsc_req
);
431 static int blkvsc_cancel_pending_reqs(struct block_device_context
*blkdev
)
433 struct blkvsc_request
*pend_req
, *tmp
;
434 struct blkvsc_request
*comp_req
, *tmp2
;
435 struct vmscsi_request
*vm_srb
;
440 /* Flush the pending list first */
441 list_for_each_entry_safe(pend_req
, tmp
, &blkdev
->pending_list
,
444 * The pend_req could be part of a partially completed
445 * request. If so, complete those req first until we
448 list_for_each_entry_safe(comp_req
, tmp2
,
449 &pend_req
->group
->blkvsc_req_list
,
452 if (comp_req
== pend_req
)
455 list_del(&comp_req
->req_entry
);
459 &comp_req
->request
.vstor_packet
.
461 ret
= __blk_end_request(comp_req
->req
,
462 (!vm_srb
->scsi_status
? 0 : -EIO
),
463 comp_req
->sector_count
*
464 blkdev
->sector_size
);
466 /* FIXME: shouldn't this do more than return? */
471 kmem_cache_free(blkdev
->request_pool
, comp_req
);
474 list_del(&pend_req
->pend_entry
);
476 list_del(&pend_req
->req_entry
);
479 if (!__blk_end_request(pend_req
->req
, -EIO
,
480 pend_req
->sector_count
*
481 blkdev
->sector_size
)) {
483 * All the sectors have been xferred ie the
486 kmem_cache_free(blkdev
->request_pool
,
491 kmem_cache_free(blkdev
->request_pool
, pend_req
);
500 * blkvsc_remove() - Callback when our device is removed
502 static int blkvsc_remove(struct hv_device
*dev
)
504 struct block_device_context
*blkdev
= dev_get_drvdata(&dev
->device
);
508 /* Get to a known state */
509 spin_lock_irqsave(&blkdev
->lock
, flags
);
511 blkdev
->shutting_down
= 1;
513 blk_stop_queue(blkdev
->gd
->queue
);
515 blkvsc_cancel_pending_reqs(blkdev
);
517 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
519 blkvsc_do_operation(blkdev
, DO_FLUSH
);
521 if (blkdev
->users
== 0) {
522 del_gendisk(blkdev
->gd
);
523 put_disk(blkdev
->gd
);
524 blk_cleanup_queue(blkdev
->gd
->queue
);
526 storvsc_dev_remove(blkdev
->device_ctx
);
528 kmem_cache_destroy(blkdev
->request_pool
);
535 static void blkvsc_shutdown(struct hv_device
*dev
)
537 struct block_device_context
*blkdev
= dev_get_drvdata(&dev
->device
);
543 spin_lock_irqsave(&blkdev
->lock
, flags
);
545 blkdev
->shutting_down
= 1;
547 blk_stop_queue(blkdev
->gd
->queue
);
549 blkvsc_cancel_pending_reqs(blkdev
);
551 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
553 blkvsc_do_operation(blkdev
, DO_FLUSH
);
556 * Now wait for all outgoing I/O to be drained.
558 storvsc_wait_to_drain((struct storvsc_device
*)dev
->ext
);
562 static int blkvsc_release(struct gendisk
*disk
, fmode_t mode
)
564 struct block_device_context
*blkdev
= disk
->private_data
;
567 spin_lock_irqsave(&blkdev
->lock
, flags
);
569 if ((--blkdev
->users
== 0) && (blkdev
->shutting_down
)) {
570 blk_stop_queue(blkdev
->gd
->queue
);
571 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
573 blkvsc_do_operation(blkdev
, DO_FLUSH
);
574 del_gendisk(blkdev
->gd
);
575 put_disk(blkdev
->gd
);
576 blk_cleanup_queue(blkdev
->gd
->queue
);
578 storvsc_dev_remove(blkdev
->device_ctx
);
580 kmem_cache_destroy(blkdev
->request_pool
);
583 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
590 * We break the request into 1 or more blkvsc_requests and submit
591 * them. If we cant submit them all, we put them on the
592 * pending_list. The blkvsc_request() will work on the pending_list.
594 static int blkvsc_do_request(struct block_device_context
*blkdev
,
597 struct bio
*bio
= NULL
;
598 struct bio_vec
*bvec
= NULL
;
599 struct bio_vec
*prev_bvec
= NULL
;
600 struct blkvsc_request
*blkvsc_req
= NULL
;
601 struct blkvsc_request
*tmp
;
604 sector_t start_sector
;
605 unsigned long num_sectors
= 0;
608 struct blkvsc_request_group
*group
= NULL
;
610 /* Create a group to tie req to list of blkvsc_reqs */
611 group
= kmem_cache_zalloc(blkdev
->request_pool
, GFP_ATOMIC
);
615 INIT_LIST_HEAD(&group
->blkvsc_req_list
);
616 group
->outstanding
= group
->status
= 0;
618 start_sector
= blk_rq_pos(req
);
620 /* foreach bio in the request */
622 for (bio
= req
->bio
; bio
; bio
= bio
->bi_next
) {
624 * Map this bio into an existing or new storvsc request
626 bio_for_each_segment(bvec
, bio
, seg_idx
) {
627 /* Get a new storvsc request */
630 (databuf_idx
>= MAX_MULTIPAGE_BUFFER_COUNT
)
631 /* hole at the begin of page */
632 || (bvec
->bv_offset
!= 0) ||
633 /* hold at the end of page */
635 (prev_bvec
->bv_len
!= PAGE_SIZE
))) {
636 /* submit the prev one */
638 blkvsc_req
->sector_start
=
641 blkvsc_req
->sector_start
,
642 (blkdev
->sector_size
>> 9));
644 blkvsc_req
->sector_count
=
646 (blkdev
->sector_size
>> 9);
647 blkvsc_init_rw(blkvsc_req
);
651 * Create new blkvsc_req to represent
656 blkdev
->request_pool
, GFP_ATOMIC
);
658 /* free up everything */
659 list_for_each_entry_safe(
661 &group
->blkvsc_req_list
,
664 &blkvsc_req
->req_entry
);
666 blkdev
->request_pool
,
671 blkdev
->request_pool
, group
);
675 memset(blkvsc_req
, 0,
676 sizeof(struct blkvsc_request
));
678 blkvsc_req
->dev
= blkdev
;
679 blkvsc_req
->req
= req
;
686 /* Add to the group */
687 blkvsc_req
->group
= group
;
688 blkvsc_req
->group
->outstanding
++;
689 list_add_tail(&blkvsc_req
->req_entry
,
690 &blkvsc_req
->group
->blkvsc_req_list
);
692 start_sector
+= num_sectors
;
698 * Add the curr bvec/segment to the curr
701 blkvsc_req
->request
.data_buffer
.
702 pfn_array
[databuf_idx
]
703 = page_to_pfn(bvec
->bv_page
);
704 blkvsc_req
->request
.data_buffer
.len
710 num_sectors
+= bvec
->bv_len
>> 9;
712 } /* bio_for_each_segment */
714 } /* rq_for_each_bio */
717 /* Handle the last one */
719 blkvsc_req
->sector_start
= start_sector
;
720 sector_div(blkvsc_req
->sector_start
,
721 (blkdev
->sector_size
>> 9));
723 blkvsc_req
->sector_count
= num_sectors
/
724 (blkdev
->sector_size
>> 9);
726 blkvsc_init_rw(blkvsc_req
);
729 list_for_each_entry(blkvsc_req
, &group
->blkvsc_req_list
, req_entry
) {
732 list_add_tail(&blkvsc_req
->pend_entry
,
733 &blkdev
->pending_list
);
735 ret
= blkvsc_submit_request(blkvsc_req
,
736 blkvsc_request_completion
);
739 list_add_tail(&blkvsc_req
->pend_entry
,
740 &blkdev
->pending_list
);
749 static int blkvsc_do_pending_reqs(struct block_device_context
*blkdev
)
751 struct blkvsc_request
*pend_req
, *tmp
;
754 /* Flush the pending list first */
755 list_for_each_entry_safe(pend_req
, tmp
, &blkdev
->pending_list
,
758 ret
= blkvsc_submit_request(pend_req
,
759 blkvsc_request_completion
);
763 list_del(&pend_req
->pend_entry
);
770 static void blkvsc_request(struct request_queue
*queue
)
772 struct block_device_context
*blkdev
= NULL
;
776 while ((req
= blk_peek_request(queue
)) != NULL
) {
778 blkdev
= req
->rq_disk
->private_data
;
779 if (blkdev
->shutting_down
|| req
->cmd_type
!= REQ_TYPE_FS
) {
780 __blk_end_request_cur(req
, 0);
784 ret
= blkvsc_do_pending_reqs(blkdev
);
787 blk_stop_queue(queue
);
791 blk_start_request(req
);
793 ret
= blkvsc_do_request(blkdev
, req
);
795 blk_stop_queue(queue
);
797 } else if (ret
< 0) {
798 blk_requeue_request(queue
, req
);
799 blk_stop_queue(queue
);
807 /* The one and only one */
808 static struct hv_driver blkvsc_drv
= {
809 .probe
= blkvsc_probe
,
810 .remove
= blkvsc_remove
,
811 .shutdown
= blkvsc_shutdown
,
814 static const struct block_device_operations block_ops
= {
815 .owner
= THIS_MODULE
,
817 .release
= blkvsc_release
,
818 .getgeo
= blkvsc_getgeo
,
819 .ioctl
= blkvsc_ioctl
,
823 * blkvsc_drv_init - BlkVsc driver initialization.
825 static int blkvsc_drv_init(void)
827 struct hv_driver
*drv
= &blkvsc_drv
;
830 BUILD_BUG_ON(sizeof(sector_t
) != 8);
832 memcpy(&drv
->dev_type
, &dev_type
, sizeof(struct hv_guid
));
833 drv
->driver
.name
= drv_name
;
835 /* The driver belongs to vmbus */
836 ret
= vmbus_child_driver_register(&drv
->driver
);
842 static void blkvsc_drv_exit(void)
845 vmbus_child_driver_unregister(&blkvsc_drv
.driver
);
849 * blkvsc_probe - Add a new device for this driver
851 static int blkvsc_probe(struct hv_device
*dev
)
853 struct block_device_context
*blkdev
= NULL
;
854 struct storvsc_device_info device_info
;
855 struct storvsc_major_info major_info
;
858 blkdev
= kzalloc(sizeof(struct block_device_context
), GFP_KERNEL
);
864 INIT_LIST_HEAD(&blkdev
->pending_list
);
866 /* Initialize what we can here */
867 spin_lock_init(&blkdev
->lock
);
870 blkdev
->request_pool
= kmem_cache_create(dev_name(&dev
->device
),
871 sizeof(struct blkvsc_request
), 0,
872 SLAB_HWCACHE_ALIGN
, NULL
);
873 if (!blkdev
->request_pool
) {
879 ret
= blkvsc_device_add(dev
, &device_info
);
883 blkdev
->device_ctx
= dev
;
884 /* this identified the device 0 or 1 */
885 blkdev
->target
= device_info
.target_id
;
886 /* this identified the ide ctrl 0 or 1 */
887 blkdev
->path
= device_info
.path_id
;
889 dev_set_drvdata(&dev
->device
, blkdev
);
891 ret
= storvsc_get_major_info(&device_info
, &major_info
);
896 if (major_info
.do_register
) {
897 ret
= register_blkdev(major_info
.major
, major_info
.devname
);
900 DPRINT_ERR(BLKVSC_DRV
,
901 "register_blkdev() failed! ret %d", ret
);
906 DPRINT_INFO(BLKVSC_DRV
, "blkvsc registered for major %d!!",
909 blkdev
->gd
= alloc_disk(BLKVSC_MINORS
);
915 blkdev
->gd
->queue
= blk_init_queue(blkvsc_request
, &blkdev
->lock
);
917 blk_queue_max_segment_size(blkdev
->gd
->queue
, PAGE_SIZE
);
918 blk_queue_max_segments(blkdev
->gd
->queue
, MAX_MULTIPAGE_BUFFER_COUNT
);
919 blk_queue_segment_boundary(blkdev
->gd
->queue
, PAGE_SIZE
-1);
920 blk_queue_bounce_limit(blkdev
->gd
->queue
, BLK_BOUNCE_ANY
);
921 blk_queue_dma_alignment(blkdev
->gd
->queue
, 511);
923 blkdev
->gd
->major
= major_info
.major
;
924 if (major_info
.index
== 1 || major_info
.index
== 3)
925 blkdev
->gd
->first_minor
= BLKVSC_MINORS
;
927 blkdev
->gd
->first_minor
= 0;
928 blkdev
->gd
->fops
= &block_ops
;
929 blkdev
->gd
->private_data
= blkdev
;
930 blkdev
->gd
->driverfs_dev
= &(blkdev
->device_ctx
->device
);
931 sprintf(blkdev
->gd
->disk_name
, "hd%c", 'a' + major_info
.index
);
933 blkvsc_do_operation(blkdev
, DO_INQUIRY
);
934 blkvsc_do_operation(blkdev
, DO_CAPACITY
);
936 set_capacity(blkdev
->gd
, blkdev
->capacity
* (blkdev
->sector_size
/512));
937 blk_queue_logical_block_size(blkdev
->gd
->queue
, blkdev
->sector_size
);
939 add_disk(blkdev
->gd
);
941 DPRINT_INFO(BLKVSC_DRV
, "%s added!! capacity %lu sector_size %d",
942 blkdev
->gd
->disk_name
, (unsigned long)blkdev
->capacity
,
943 blkdev
->sector_size
);
948 storvsc_dev_remove(dev
);
952 if (blkdev
->request_pool
) {
953 kmem_cache_destroy(blkdev
->request_pool
);
954 blkdev
->request_pool
= NULL
;
963 static void blkvsc_request_completion(struct hv_storvsc_request
*request
)
965 struct blkvsc_request
*blkvsc_req
=
966 (struct blkvsc_request
*)request
->context
;
967 struct block_device_context
*blkdev
=
968 (struct block_device_context
*)blkvsc_req
->dev
;
970 struct blkvsc_request
*comp_req
, *tmp
;
971 struct vmscsi_request
*vm_srb
;
974 spin_lock_irqsave(&blkdev
->lock
, flags
);
976 blkdev
->num_outstanding_reqs
--;
977 blkvsc_req
->group
->outstanding
--;
980 * Only start processing when all the blkvsc_reqs are
981 * completed. This guarantees no out-of-order blkvsc_req
982 * completion when calling end_that_request_first()
984 if (blkvsc_req
->group
->outstanding
== 0) {
985 list_for_each_entry_safe(comp_req
, tmp
,
986 &blkvsc_req
->group
->blkvsc_req_list
,
989 list_del(&comp_req
->req_entry
);
992 &comp_req
->request
.vstor_packet
.vm_srb
;
993 if (!__blk_end_request(comp_req
->req
,
994 (!vm_srb
->scsi_status
? 0 : -EIO
),
995 comp_req
->sector_count
* blkdev
->sector_size
)) {
997 * All the sectors have been xferred ie the
1000 kmem_cache_free(blkdev
->request_pool
,
1004 kmem_cache_free(blkdev
->request_pool
, comp_req
);
1007 if (!blkdev
->shutting_down
) {
1008 blkvsc_do_pending_reqs(blkdev
);
1009 blk_start_queue(blkdev
->gd
->queue
);
1010 blkvsc_request(blkdev
->gd
->queue
);
1014 spin_unlock_irqrestore(&blkdev
->lock
, flags
);
1017 static void __exit
blkvsc_exit(void)
1022 MODULE_LICENSE("GPL");
1023 MODULE_VERSION(HV_DRV_VERSION
);
1024 MODULE_DESCRIPTION("Microsoft Hyper-V virtual block driver");
1025 module_init(blkvsc_drv_init
);
1026 module_exit(blkvsc_exit
);