2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/blkdev.h>
5 #include <linux/hdreg.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/interrupt.h>
9 #include <linux/virtio.h>
10 #include <linux/virtio_blk.h>
11 #include <linux/scatterlist.h>
12 #include <linux/string_helpers.h>
13 #include <scsi/scsi_cmnd.h>
14 #include <linux/idr.h>
15 #include <linux/blk-mq.h>
16 #include <linux/blk-mq-virtio.h>
17 #include <linux/numa.h>
20 #define VQ_NAME_LEN 16
23 static DEFINE_IDA(vd_index_ida
);
25 static struct workqueue_struct
*virtblk_wq
;
27 struct virtio_blk_vq
{
30 char name
[VQ_NAME_LEN
];
31 } ____cacheline_aligned_in_smp
;
34 struct virtio_device
*vdev
;
36 /* The disk structure for the kernel. */
39 /* Block layer tags. */
40 struct blk_mq_tag_set tag_set
;
42 /* Process context for config space updates */
43 struct work_struct config_work
;
45 /* What host tells us, plus 2 for header & tailer. */
46 unsigned int sg_elems
;
48 /* Ida index - used to track minor number allocations. */
53 struct virtio_blk_vq
*vqs
;
57 #ifdef CONFIG_VIRTIO_BLK_SCSI
58 struct scsi_request sreq
; /* for SCSI passthrough, must be first */
59 u8 sense
[SCSI_SENSE_BUFFERSIZE
];
60 struct virtio_scsi_inhdr in_hdr
;
62 struct virtio_blk_outhdr out_hdr
;
64 struct scatterlist sg
[];
67 static inline blk_status_t
virtblk_result(struct virtblk_req
*vbr
)
69 switch (vbr
->status
) {
72 case VIRTIO_BLK_S_UNSUPP
:
73 return BLK_STS_NOTSUPP
;
80 * If this is a packet command we need a couple of additional headers. Behind
81 * the normal outhdr we put a segment with the scsi command block, and before
82 * the normal inhdr we put the sense data and the inhdr with additional status
85 #ifdef CONFIG_VIRTIO_BLK_SCSI
86 static int virtblk_add_req_scsi(struct virtqueue
*vq
, struct virtblk_req
*vbr
,
87 struct scatterlist
*data_sg
, bool have_data
)
89 struct scatterlist hdr
, status
, cmd
, sense
, inhdr
, *sgs
[6];
90 unsigned int num_out
= 0, num_in
= 0;
92 sg_init_one(&hdr
, &vbr
->out_hdr
, sizeof(vbr
->out_hdr
));
93 sgs
[num_out
++] = &hdr
;
94 sg_init_one(&cmd
, vbr
->sreq
.cmd
, vbr
->sreq
.cmd_len
);
95 sgs
[num_out
++] = &cmd
;
98 if (vbr
->out_hdr
.type
& cpu_to_virtio32(vq
->vdev
, VIRTIO_BLK_T_OUT
))
99 sgs
[num_out
++] = data_sg
;
101 sgs
[num_out
+ num_in
++] = data_sg
;
104 sg_init_one(&sense
, vbr
->sense
, SCSI_SENSE_BUFFERSIZE
);
105 sgs
[num_out
+ num_in
++] = &sense
;
106 sg_init_one(&inhdr
, &vbr
->in_hdr
, sizeof(vbr
->in_hdr
));
107 sgs
[num_out
+ num_in
++] = &inhdr
;
108 sg_init_one(&status
, &vbr
->status
, sizeof(vbr
->status
));
109 sgs
[num_out
+ num_in
++] = &status
;
111 return virtqueue_add_sgs(vq
, sgs
, num_out
, num_in
, vbr
, GFP_ATOMIC
);
114 static inline void virtblk_scsi_request_done(struct request
*req
)
116 struct virtblk_req
*vbr
= blk_mq_rq_to_pdu(req
);
117 struct virtio_blk
*vblk
= req
->q
->queuedata
;
118 struct scsi_request
*sreq
= &vbr
->sreq
;
120 sreq
->resid_len
= virtio32_to_cpu(vblk
->vdev
, vbr
->in_hdr
.residual
);
121 sreq
->sense_len
= virtio32_to_cpu(vblk
->vdev
, vbr
->in_hdr
.sense_len
);
122 sreq
->result
= virtio32_to_cpu(vblk
->vdev
, vbr
->in_hdr
.errors
);
125 static int virtblk_ioctl(struct block_device
*bdev
, fmode_t mode
,
126 unsigned int cmd
, unsigned long data
)
128 struct gendisk
*disk
= bdev
->bd_disk
;
129 struct virtio_blk
*vblk
= disk
->private_data
;
132 * Only allow the generic SCSI ioctls if the host can support it.
134 if (!virtio_has_feature(vblk
->vdev
, VIRTIO_BLK_F_SCSI
))
137 return scsi_cmd_blk_ioctl(bdev
, mode
, cmd
,
138 (void __user
*)data
);
141 static inline int virtblk_add_req_scsi(struct virtqueue
*vq
,
142 struct virtblk_req
*vbr
, struct scatterlist
*data_sg
,
147 static inline void virtblk_scsi_request_done(struct request
*req
)
150 #define virtblk_ioctl NULL
151 #endif /* CONFIG_VIRTIO_BLK_SCSI */
153 static int virtblk_add_req(struct virtqueue
*vq
, struct virtblk_req
*vbr
,
154 struct scatterlist
*data_sg
, bool have_data
)
156 struct scatterlist hdr
, status
, *sgs
[3];
157 unsigned int num_out
= 0, num_in
= 0;
159 sg_init_one(&hdr
, &vbr
->out_hdr
, sizeof(vbr
->out_hdr
));
160 sgs
[num_out
++] = &hdr
;
163 if (vbr
->out_hdr
.type
& cpu_to_virtio32(vq
->vdev
, VIRTIO_BLK_T_OUT
))
164 sgs
[num_out
++] = data_sg
;
166 sgs
[num_out
+ num_in
++] = data_sg
;
169 sg_init_one(&status
, &vbr
->status
, sizeof(vbr
->status
));
170 sgs
[num_out
+ num_in
++] = &status
;
172 return virtqueue_add_sgs(vq
, sgs
, num_out
, num_in
, vbr
, GFP_ATOMIC
);
175 static inline void virtblk_request_done(struct request
*req
)
177 struct virtblk_req
*vbr
= blk_mq_rq_to_pdu(req
);
179 switch (req_op(req
)) {
181 case REQ_OP_SCSI_OUT
:
182 virtblk_scsi_request_done(req
);
186 blk_mq_end_request(req
, virtblk_result(vbr
));
189 static void virtblk_done(struct virtqueue
*vq
)
191 struct virtio_blk
*vblk
= vq
->vdev
->priv
;
192 bool req_done
= false;
194 struct virtblk_req
*vbr
;
198 spin_lock_irqsave(&vblk
->vqs
[qid
].lock
, flags
);
200 virtqueue_disable_cb(vq
);
201 while ((vbr
= virtqueue_get_buf(vblk
->vqs
[qid
].vq
, &len
)) != NULL
) {
202 struct request
*req
= blk_mq_rq_from_pdu(vbr
);
204 blk_mq_complete_request(req
);
207 if (unlikely(virtqueue_is_broken(vq
)))
209 } while (!virtqueue_enable_cb(vq
));
211 /* In case queue is stopped waiting for more buffers. */
213 blk_mq_start_stopped_hw_queues(vblk
->disk
->queue
, true);
214 spin_unlock_irqrestore(&vblk
->vqs
[qid
].lock
, flags
);
217 static blk_status_t
virtio_queue_rq(struct blk_mq_hw_ctx
*hctx
,
218 const struct blk_mq_queue_data
*bd
)
220 struct virtio_blk
*vblk
= hctx
->queue
->queuedata
;
221 struct request
*req
= bd
->rq
;
222 struct virtblk_req
*vbr
= blk_mq_rq_to_pdu(req
);
225 int qid
= hctx
->queue_num
;
230 BUG_ON(req
->nr_phys_segments
+ 2 > vblk
->sg_elems
);
232 switch (req_op(req
)) {
238 type
= VIRTIO_BLK_T_FLUSH
;
241 case REQ_OP_SCSI_OUT
:
242 type
= VIRTIO_BLK_T_SCSI_CMD
;
245 type
= VIRTIO_BLK_T_GET_ID
;
249 return BLK_STS_IOERR
;
252 vbr
->out_hdr
.type
= cpu_to_virtio32(vblk
->vdev
, type
);
253 vbr
->out_hdr
.sector
= type
?
254 0 : cpu_to_virtio64(vblk
->vdev
, blk_rq_pos(req
));
255 vbr
->out_hdr
.ioprio
= cpu_to_virtio32(vblk
->vdev
, req_get_ioprio(req
));
257 blk_mq_start_request(req
);
259 num
= blk_rq_map_sg(hctx
->queue
, req
, vbr
->sg
);
261 if (rq_data_dir(req
) == WRITE
)
262 vbr
->out_hdr
.type
|= cpu_to_virtio32(vblk
->vdev
, VIRTIO_BLK_T_OUT
);
264 vbr
->out_hdr
.type
|= cpu_to_virtio32(vblk
->vdev
, VIRTIO_BLK_T_IN
);
267 spin_lock_irqsave(&vblk
->vqs
[qid
].lock
, flags
);
268 if (blk_rq_is_scsi(req
))
269 err
= virtblk_add_req_scsi(vblk
->vqs
[qid
].vq
, vbr
, vbr
->sg
, num
);
271 err
= virtblk_add_req(vblk
->vqs
[qid
].vq
, vbr
, vbr
->sg
, num
);
273 virtqueue_kick(vblk
->vqs
[qid
].vq
);
274 blk_mq_stop_hw_queue(hctx
);
275 spin_unlock_irqrestore(&vblk
->vqs
[qid
].lock
, flags
);
276 /* Out of mem doesn't actually happen, since we fall back
277 * to direct descriptors */
278 if (err
== -ENOMEM
|| err
== -ENOSPC
)
279 return BLK_STS_RESOURCE
;
280 return BLK_STS_IOERR
;
283 if (bd
->last
&& virtqueue_kick_prepare(vblk
->vqs
[qid
].vq
))
285 spin_unlock_irqrestore(&vblk
->vqs
[qid
].lock
, flags
);
288 virtqueue_notify(vblk
->vqs
[qid
].vq
);
292 /* return id (s/n) string for *disk to *id_str
294 static int virtblk_get_id(struct gendisk
*disk
, char *id_str
)
296 struct virtio_blk
*vblk
= disk
->private_data
;
297 struct request_queue
*q
= vblk
->disk
->queue
;
301 req
= blk_get_request(q
, REQ_OP_DRV_IN
, GFP_KERNEL
);
305 err
= blk_rq_map_kern(q
, req
, id_str
, VIRTIO_BLK_ID_BYTES
, GFP_KERNEL
);
309 blk_execute_rq(vblk
->disk
->queue
, vblk
->disk
, req
, false);
310 err
= blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req
)));
312 blk_put_request(req
);
316 /* We provide getgeo only to please some old bootloader/partitioning tools */
317 static int virtblk_getgeo(struct block_device
*bd
, struct hd_geometry
*geo
)
319 struct virtio_blk
*vblk
= bd
->bd_disk
->private_data
;
321 /* see if the host passed in geometry config */
322 if (virtio_has_feature(vblk
->vdev
, VIRTIO_BLK_F_GEOMETRY
)) {
323 virtio_cread(vblk
->vdev
, struct virtio_blk_config
,
324 geometry
.cylinders
, &geo
->cylinders
);
325 virtio_cread(vblk
->vdev
, struct virtio_blk_config
,
326 geometry
.heads
, &geo
->heads
);
327 virtio_cread(vblk
->vdev
, struct virtio_blk_config
,
328 geometry
.sectors
, &geo
->sectors
);
330 /* some standard values, similar to sd */
332 geo
->sectors
= 1 << 5;
333 geo
->cylinders
= get_capacity(bd
->bd_disk
) >> 11;
338 static const struct block_device_operations virtblk_fops
= {
339 .ioctl
= virtblk_ioctl
,
340 .owner
= THIS_MODULE
,
341 .getgeo
= virtblk_getgeo
,
344 static int index_to_minor(int index
)
346 return index
<< PART_BITS
;
349 static int minor_to_index(int minor
)
351 return minor
>> PART_BITS
;
354 static ssize_t
virtblk_serial_show(struct device
*dev
,
355 struct device_attribute
*attr
, char *buf
)
357 struct gendisk
*disk
= dev_to_disk(dev
);
360 /* sysfs gives us a PAGE_SIZE buffer */
361 BUILD_BUG_ON(PAGE_SIZE
< VIRTIO_BLK_ID_BYTES
);
363 buf
[VIRTIO_BLK_ID_BYTES
] = '\0';
364 err
= virtblk_get_id(disk
, buf
);
368 if (err
== -EIO
) /* Unsupported? Make it empty. */
374 static DEVICE_ATTR(serial
, S_IRUGO
, virtblk_serial_show
, NULL
);
376 static void virtblk_config_changed_work(struct work_struct
*work
)
378 struct virtio_blk
*vblk
=
379 container_of(work
, struct virtio_blk
, config_work
);
380 struct virtio_device
*vdev
= vblk
->vdev
;
381 struct request_queue
*q
= vblk
->disk
->queue
;
382 char cap_str_2
[10], cap_str_10
[10];
383 char *envp
[] = { "RESIZE=1", NULL
};
384 unsigned long long nblocks
;
387 /* Host must always specify the capacity. */
388 virtio_cread(vdev
, struct virtio_blk_config
, capacity
, &capacity
);
390 /* If capacity is too big, truncate with warning. */
391 if ((sector_t
)capacity
!= capacity
) {
392 dev_warn(&vdev
->dev
, "Capacity %llu too large: truncating\n",
393 (unsigned long long)capacity
);
394 capacity
= (sector_t
)-1;
397 nblocks
= DIV_ROUND_UP_ULL(capacity
, queue_logical_block_size(q
) >> 9);
399 string_get_size(nblocks
, queue_logical_block_size(q
),
400 STRING_UNITS_2
, cap_str_2
, sizeof(cap_str_2
));
401 string_get_size(nblocks
, queue_logical_block_size(q
),
402 STRING_UNITS_10
, cap_str_10
, sizeof(cap_str_10
));
404 dev_notice(&vdev
->dev
,
405 "new size: %llu %d-byte logical blocks (%s/%s)\n",
407 queue_logical_block_size(q
),
411 set_capacity(vblk
->disk
, capacity
);
412 revalidate_disk(vblk
->disk
);
413 kobject_uevent_env(&disk_to_dev(vblk
->disk
)->kobj
, KOBJ_CHANGE
, envp
);
416 static void virtblk_config_changed(struct virtio_device
*vdev
)
418 struct virtio_blk
*vblk
= vdev
->priv
;
420 queue_work(virtblk_wq
, &vblk
->config_work
);
423 static int init_vq(struct virtio_blk
*vblk
)
427 vq_callback_t
**callbacks
;
429 struct virtqueue
**vqs
;
430 unsigned short num_vqs
;
431 struct virtio_device
*vdev
= vblk
->vdev
;
432 struct irq_affinity desc
= { 0, };
434 err
= virtio_cread_feature(vdev
, VIRTIO_BLK_F_MQ
,
435 struct virtio_blk_config
, num_queues
,
440 vblk
->vqs
= kmalloc_array(num_vqs
, sizeof(*vblk
->vqs
), GFP_KERNEL
);
444 names
= kmalloc_array(num_vqs
, sizeof(*names
), GFP_KERNEL
);
445 callbacks
= kmalloc_array(num_vqs
, sizeof(*callbacks
), GFP_KERNEL
);
446 vqs
= kmalloc_array(num_vqs
, sizeof(*vqs
), GFP_KERNEL
);
447 if (!names
|| !callbacks
|| !vqs
) {
452 for (i
= 0; i
< num_vqs
; i
++) {
453 callbacks
[i
] = virtblk_done
;
454 snprintf(vblk
->vqs
[i
].name
, VQ_NAME_LEN
, "req.%d", i
);
455 names
[i
] = vblk
->vqs
[i
].name
;
458 /* Discover virtqueues and write information to configuration. */
459 err
= virtio_find_vqs(vdev
, num_vqs
, vqs
, callbacks
, names
, &desc
);
463 for (i
= 0; i
< num_vqs
; i
++) {
464 spin_lock_init(&vblk
->vqs
[i
].lock
);
465 vblk
->vqs
[i
].vq
= vqs
[i
];
467 vblk
->num_vqs
= num_vqs
;
479 * Legacy naming scheme used for virtio devices. We are stuck with it for
480 * virtio blk but don't ever use it for any new driver.
482 static int virtblk_name_format(char *prefix
, int index
, char *buf
, int buflen
)
484 const int base
= 'z' - 'a' + 1;
485 char *begin
= buf
+ strlen(prefix
);
486 char *end
= buf
+ buflen
;
496 *--p
= 'a' + (index
% unit
);
497 index
= (index
/ unit
) - 1;
498 } while (index
>= 0);
500 memmove(begin
, p
, end
- p
);
501 memcpy(buf
, prefix
, strlen(prefix
));
506 static int virtblk_get_cache_mode(struct virtio_device
*vdev
)
511 err
= virtio_cread_feature(vdev
, VIRTIO_BLK_F_CONFIG_WCE
,
512 struct virtio_blk_config
, wce
,
516 * If WCE is not configurable and flush is not available,
517 * assume no writeback cache is in use.
520 writeback
= virtio_has_feature(vdev
, VIRTIO_BLK_F_FLUSH
);
525 static void virtblk_update_cache_mode(struct virtio_device
*vdev
)
527 u8 writeback
= virtblk_get_cache_mode(vdev
);
528 struct virtio_blk
*vblk
= vdev
->priv
;
530 blk_queue_write_cache(vblk
->disk
->queue
, writeback
, false);
531 revalidate_disk(vblk
->disk
);
534 static const char *const virtblk_cache_types
[] = {
535 "write through", "write back"
539 virtblk_cache_type_store(struct device
*dev
, struct device_attribute
*attr
,
540 const char *buf
, size_t count
)
542 struct gendisk
*disk
= dev_to_disk(dev
);
543 struct virtio_blk
*vblk
= disk
->private_data
;
544 struct virtio_device
*vdev
= vblk
->vdev
;
547 BUG_ON(!virtio_has_feature(vblk
->vdev
, VIRTIO_BLK_F_CONFIG_WCE
));
548 i
= sysfs_match_string(virtblk_cache_types
, buf
);
552 virtio_cwrite8(vdev
, offsetof(struct virtio_blk_config
, wce
), i
);
553 virtblk_update_cache_mode(vdev
);
558 virtblk_cache_type_show(struct device
*dev
, struct device_attribute
*attr
,
561 struct gendisk
*disk
= dev_to_disk(dev
);
562 struct virtio_blk
*vblk
= disk
->private_data
;
563 u8 writeback
= virtblk_get_cache_mode(vblk
->vdev
);
565 BUG_ON(writeback
>= ARRAY_SIZE(virtblk_cache_types
));
566 return snprintf(buf
, 40, "%s\n", virtblk_cache_types
[writeback
]);
569 static const struct device_attribute dev_attr_cache_type_ro
=
570 __ATTR(cache_type
, S_IRUGO
,
571 virtblk_cache_type_show
, NULL
);
572 static const struct device_attribute dev_attr_cache_type_rw
=
573 __ATTR(cache_type
, S_IRUGO
|S_IWUSR
,
574 virtblk_cache_type_show
, virtblk_cache_type_store
);
576 static int virtblk_init_request(struct blk_mq_tag_set
*set
, struct request
*rq
,
577 unsigned int hctx_idx
, unsigned int numa_node
)
579 struct virtio_blk
*vblk
= set
->driver_data
;
580 struct virtblk_req
*vbr
= blk_mq_rq_to_pdu(rq
);
582 #ifdef CONFIG_VIRTIO_BLK_SCSI
583 vbr
->sreq
.sense
= vbr
->sense
;
585 sg_init_table(vbr
->sg
, vblk
->sg_elems
);
589 static int virtblk_map_queues(struct blk_mq_tag_set
*set
)
591 struct virtio_blk
*vblk
= set
->driver_data
;
593 return blk_mq_virtio_map_queues(set
, vblk
->vdev
, 0);
596 #ifdef CONFIG_VIRTIO_BLK_SCSI
597 static void virtblk_initialize_rq(struct request
*req
)
599 struct virtblk_req
*vbr
= blk_mq_rq_to_pdu(req
);
601 scsi_req_init(&vbr
->sreq
);
605 static const struct blk_mq_ops virtio_mq_ops
= {
606 .queue_rq
= virtio_queue_rq
,
607 .complete
= virtblk_request_done
,
608 .init_request
= virtblk_init_request
,
609 #ifdef CONFIG_VIRTIO_BLK_SCSI
610 .initialize_rq_fn
= virtblk_initialize_rq
,
612 .map_queues
= virtblk_map_queues
,
615 static unsigned int virtblk_queue_depth
;
616 module_param_named(queue_depth
, virtblk_queue_depth
, uint
, 0444);
618 static int virtblk_probe(struct virtio_device
*vdev
)
620 struct virtio_blk
*vblk
;
621 struct request_queue
*q
;
625 u32 v
, blk_size
, sg_elems
, opt_io_size
;
627 u8 physical_block_exp
, alignment_offset
;
629 if (!vdev
->config
->get
) {
630 dev_err(&vdev
->dev
, "%s failure: config access disabled\n",
635 err
= ida_simple_get(&vd_index_ida
, 0, minor_to_index(1 << MINORBITS
),
641 /* We need to know how many segments before we allocate. */
642 err
= virtio_cread_feature(vdev
, VIRTIO_BLK_F_SEG_MAX
,
643 struct virtio_blk_config
, seg_max
,
646 /* We need at least one SG element, whatever they say. */
647 if (err
|| !sg_elems
)
650 /* We need an extra sg elements at head and tail. */
652 vdev
->priv
= vblk
= kmalloc(sizeof(*vblk
), GFP_KERNEL
);
659 vblk
->sg_elems
= sg_elems
;
661 INIT_WORK(&vblk
->config_work
, virtblk_config_changed_work
);
667 /* FIXME: How many partitions? How long is a piece of string? */
668 vblk
->disk
= alloc_disk(1 << PART_BITS
);
674 /* Default queue sizing is to fill the ring. */
675 if (!virtblk_queue_depth
) {
676 virtblk_queue_depth
= vblk
->vqs
[0].vq
->num_free
;
677 /* ... but without indirect descs, we use 2 descs per req */
678 if (!virtio_has_feature(vdev
, VIRTIO_RING_F_INDIRECT_DESC
))
679 virtblk_queue_depth
/= 2;
682 memset(&vblk
->tag_set
, 0, sizeof(vblk
->tag_set
));
683 vblk
->tag_set
.ops
= &virtio_mq_ops
;
684 vblk
->tag_set
.queue_depth
= virtblk_queue_depth
;
685 vblk
->tag_set
.numa_node
= NUMA_NO_NODE
;
686 vblk
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
;
687 vblk
->tag_set
.cmd_size
=
688 sizeof(struct virtblk_req
) +
689 sizeof(struct scatterlist
) * sg_elems
;
690 vblk
->tag_set
.driver_data
= vblk
;
691 vblk
->tag_set
.nr_hw_queues
= vblk
->num_vqs
;
693 err
= blk_mq_alloc_tag_set(&vblk
->tag_set
);
697 q
= blk_mq_init_queue(&vblk
->tag_set
);
702 vblk
->disk
->queue
= q
;
706 virtblk_name_format("vd", index
, vblk
->disk
->disk_name
, DISK_NAME_LEN
);
708 vblk
->disk
->major
= major
;
709 vblk
->disk
->first_minor
= index_to_minor(index
);
710 vblk
->disk
->private_data
= vblk
;
711 vblk
->disk
->fops
= &virtblk_fops
;
712 vblk
->disk
->flags
|= GENHD_FL_EXT_DEVT
;
715 /* configure queue flush support */
716 virtblk_update_cache_mode(vdev
);
718 /* If disk is read-only in the host, the guest should obey */
719 if (virtio_has_feature(vdev
, VIRTIO_BLK_F_RO
))
720 set_disk_ro(vblk
->disk
, 1);
722 /* Host must always specify the capacity. */
723 virtio_cread(vdev
, struct virtio_blk_config
, capacity
, &cap
);
725 /* If capacity is too big, truncate with warning. */
726 if ((sector_t
)cap
!= cap
) {
727 dev_warn(&vdev
->dev
, "Capacity %llu too large: truncating\n",
728 (unsigned long long)cap
);
731 set_capacity(vblk
->disk
, cap
);
733 /* We can handle whatever the host told us to handle. */
734 blk_queue_max_segments(q
, vblk
->sg_elems
-2);
736 /* No real sector limit. */
737 blk_queue_max_hw_sectors(q
, -1U);
739 /* Host can optionally specify maximum segment size and number of
741 err
= virtio_cread_feature(vdev
, VIRTIO_BLK_F_SIZE_MAX
,
742 struct virtio_blk_config
, size_max
, &v
);
744 blk_queue_max_segment_size(q
, v
);
746 blk_queue_max_segment_size(q
, -1U);
748 /* Host can optionally specify the block size of the device */
749 err
= virtio_cread_feature(vdev
, VIRTIO_BLK_F_BLK_SIZE
,
750 struct virtio_blk_config
, blk_size
,
753 blk_queue_logical_block_size(q
, blk_size
);
755 blk_size
= queue_logical_block_size(q
);
757 /* Use topology information if available */
758 err
= virtio_cread_feature(vdev
, VIRTIO_BLK_F_TOPOLOGY
,
759 struct virtio_blk_config
, physical_block_exp
,
760 &physical_block_exp
);
761 if (!err
&& physical_block_exp
)
762 blk_queue_physical_block_size(q
,
763 blk_size
* (1 << physical_block_exp
));
765 err
= virtio_cread_feature(vdev
, VIRTIO_BLK_F_TOPOLOGY
,
766 struct virtio_blk_config
, alignment_offset
,
768 if (!err
&& alignment_offset
)
769 blk_queue_alignment_offset(q
, blk_size
* alignment_offset
);
771 err
= virtio_cread_feature(vdev
, VIRTIO_BLK_F_TOPOLOGY
,
772 struct virtio_blk_config
, min_io_size
,
774 if (!err
&& min_io_size
)
775 blk_queue_io_min(q
, blk_size
* min_io_size
);
777 err
= virtio_cread_feature(vdev
, VIRTIO_BLK_F_TOPOLOGY
,
778 struct virtio_blk_config
, opt_io_size
,
780 if (!err
&& opt_io_size
)
781 blk_queue_io_opt(q
, blk_size
* opt_io_size
);
783 virtio_device_ready(vdev
);
785 device_add_disk(&vdev
->dev
, vblk
->disk
);
786 err
= device_create_file(disk_to_dev(vblk
->disk
), &dev_attr_serial
);
790 if (virtio_has_feature(vdev
, VIRTIO_BLK_F_CONFIG_WCE
))
791 err
= device_create_file(disk_to_dev(vblk
->disk
),
792 &dev_attr_cache_type_rw
);
794 err
= device_create_file(disk_to_dev(vblk
->disk
),
795 &dev_attr_cache_type_ro
);
801 del_gendisk(vblk
->disk
);
802 blk_cleanup_queue(vblk
->disk
->queue
);
804 blk_mq_free_tag_set(&vblk
->tag_set
);
806 put_disk(vblk
->disk
);
808 vdev
->config
->del_vqs(vdev
);
812 ida_simple_remove(&vd_index_ida
, index
);
817 static void virtblk_remove(struct virtio_device
*vdev
)
819 struct virtio_blk
*vblk
= vdev
->priv
;
820 int index
= vblk
->index
;
823 /* Make sure no work handler is accessing the device. */
824 flush_work(&vblk
->config_work
);
826 del_gendisk(vblk
->disk
);
827 blk_cleanup_queue(vblk
->disk
->queue
);
829 blk_mq_free_tag_set(&vblk
->tag_set
);
831 /* Stop all the virtqueues. */
832 vdev
->config
->reset(vdev
);
834 refc
= kref_read(&disk_to_dev(vblk
->disk
)->kobj
.kref
);
835 put_disk(vblk
->disk
);
836 vdev
->config
->del_vqs(vdev
);
840 /* Only free device id if we don't have any users */
842 ida_simple_remove(&vd_index_ida
, index
);
845 #ifdef CONFIG_PM_SLEEP
846 static int virtblk_freeze(struct virtio_device
*vdev
)
848 struct virtio_blk
*vblk
= vdev
->priv
;
850 /* Ensure we don't receive any more interrupts */
851 vdev
->config
->reset(vdev
);
853 /* Make sure no work handler is accessing the device. */
854 flush_work(&vblk
->config_work
);
856 blk_mq_quiesce_queue(vblk
->disk
->queue
);
858 vdev
->config
->del_vqs(vdev
);
862 static int virtblk_restore(struct virtio_device
*vdev
)
864 struct virtio_blk
*vblk
= vdev
->priv
;
867 ret
= init_vq(vdev
->priv
);
871 virtio_device_ready(vdev
);
873 blk_mq_unquiesce_queue(vblk
->disk
->queue
);
878 static const struct virtio_device_id id_table
[] = {
879 { VIRTIO_ID_BLOCK
, VIRTIO_DEV_ANY_ID
},
883 static unsigned int features_legacy
[] = {
884 VIRTIO_BLK_F_SEG_MAX
, VIRTIO_BLK_F_SIZE_MAX
, VIRTIO_BLK_F_GEOMETRY
,
885 VIRTIO_BLK_F_RO
, VIRTIO_BLK_F_BLK_SIZE
,
886 #ifdef CONFIG_VIRTIO_BLK_SCSI
889 VIRTIO_BLK_F_FLUSH
, VIRTIO_BLK_F_TOPOLOGY
, VIRTIO_BLK_F_CONFIG_WCE
,
893 static unsigned int features
[] = {
894 VIRTIO_BLK_F_SEG_MAX
, VIRTIO_BLK_F_SIZE_MAX
, VIRTIO_BLK_F_GEOMETRY
,
895 VIRTIO_BLK_F_RO
, VIRTIO_BLK_F_BLK_SIZE
,
896 VIRTIO_BLK_F_FLUSH
, VIRTIO_BLK_F_TOPOLOGY
, VIRTIO_BLK_F_CONFIG_WCE
,
900 static struct virtio_driver virtio_blk
= {
901 .feature_table
= features
,
902 .feature_table_size
= ARRAY_SIZE(features
),
903 .feature_table_legacy
= features_legacy
,
904 .feature_table_size_legacy
= ARRAY_SIZE(features_legacy
),
905 .driver
.name
= KBUILD_MODNAME
,
906 .driver
.owner
= THIS_MODULE
,
907 .id_table
= id_table
,
908 .probe
= virtblk_probe
,
909 .remove
= virtblk_remove
,
910 .config_changed
= virtblk_config_changed
,
911 #ifdef CONFIG_PM_SLEEP
912 .freeze
= virtblk_freeze
,
913 .restore
= virtblk_restore
,
917 static int __init
init(void)
921 virtblk_wq
= alloc_workqueue("virtio-blk", 0, 0);
925 major
= register_blkdev(0, "virtblk");
928 goto out_destroy_workqueue
;
931 error
= register_virtio_driver(&virtio_blk
);
933 goto out_unregister_blkdev
;
936 out_unregister_blkdev
:
937 unregister_blkdev(major
, "virtblk");
938 out_destroy_workqueue
:
939 destroy_workqueue(virtblk_wq
);
943 static void __exit
fini(void)
945 unregister_virtio_driver(&virtio_blk
);
946 unregister_blkdev(major
, "virtblk");
947 destroy_workqueue(virtblk_wq
);
952 MODULE_DEVICE_TABLE(virtio
, id_table
);
953 MODULE_DESCRIPTION("Virtio block driver");
954 MODULE_LICENSE("GPL");