Linux 4.19.133
[linux/fpc-iii.git] / drivers / block / virtio_blk.c
blob075523777a4a97e27b9ff4bcbc189b2b44029311
1 //#define DEBUG
2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/blkdev.h>
5 #include <linux/hdreg.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/interrupt.h>
9 #include <linux/virtio.h>
10 #include <linux/virtio_blk.h>
11 #include <linux/scatterlist.h>
12 #include <linux/string_helpers.h>
13 #include <scsi/scsi_cmnd.h>
14 #include <linux/idr.h>
15 #include <linux/blk-mq.h>
16 #include <linux/blk-mq-virtio.h>
17 #include <linux/numa.h>
19 #define PART_BITS 4
20 #define VQ_NAME_LEN 16
22 static int major;
23 static DEFINE_IDA(vd_index_ida);
25 static struct workqueue_struct *virtblk_wq;
27 struct virtio_blk_vq {
28 struct virtqueue *vq;
29 spinlock_t lock;
30 char name[VQ_NAME_LEN];
31 } ____cacheline_aligned_in_smp;
33 struct virtio_blk {
35 * This mutex must be held by anything that may run after
36 * virtblk_remove() sets vblk->vdev to NULL.
38 * blk-mq, virtqueue processing, and sysfs attribute code paths are
39 * shut down before vblk->vdev is set to NULL and therefore do not need
40 * to hold this mutex.
42 struct mutex vdev_mutex;
43 struct virtio_device *vdev;
45 /* The disk structure for the kernel. */
46 struct gendisk *disk;
48 /* Block layer tags. */
49 struct blk_mq_tag_set tag_set;
51 /* Process context for config space updates */
52 struct work_struct config_work;
55 * Tracks references from block_device_operations open/release and
56 * virtio_driver probe/remove so this object can be freed once no
57 * longer in use.
59 refcount_t refs;
61 /* What host tells us, plus 2 for header & tailer. */
62 unsigned int sg_elems;
64 /* Ida index - used to track minor number allocations. */
65 int index;
67 /* num of vqs */
68 int num_vqs;
69 struct virtio_blk_vq *vqs;
72 struct virtblk_req {
73 #ifdef CONFIG_VIRTIO_BLK_SCSI
74 struct scsi_request sreq; /* for SCSI passthrough, must be first */
75 u8 sense[SCSI_SENSE_BUFFERSIZE];
76 struct virtio_scsi_inhdr in_hdr;
77 #endif
78 struct virtio_blk_outhdr out_hdr;
79 u8 status;
80 struct scatterlist sg[];
83 static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
85 switch (vbr->status) {
86 case VIRTIO_BLK_S_OK:
87 return BLK_STS_OK;
88 case VIRTIO_BLK_S_UNSUPP:
89 return BLK_STS_NOTSUPP;
90 default:
91 return BLK_STS_IOERR;
96 * If this is a packet command we need a couple of additional headers. Behind
97 * the normal outhdr we put a segment with the scsi command block, and before
98 * the normal inhdr we put the sense data and the inhdr with additional status
99 * information.
101 #ifdef CONFIG_VIRTIO_BLK_SCSI
102 static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
103 struct scatterlist *data_sg, bool have_data)
105 struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
106 unsigned int num_out = 0, num_in = 0;
108 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
109 sgs[num_out++] = &hdr;
110 sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len);
111 sgs[num_out++] = &cmd;
113 if (have_data) {
114 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
115 sgs[num_out++] = data_sg;
116 else
117 sgs[num_out + num_in++] = data_sg;
120 sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
121 sgs[num_out + num_in++] = &sense;
122 sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
123 sgs[num_out + num_in++] = &inhdr;
124 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
125 sgs[num_out + num_in++] = &status;
127 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
130 static inline void virtblk_scsi_request_done(struct request *req)
132 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
133 struct virtio_blk *vblk = req->q->queuedata;
134 struct scsi_request *sreq = &vbr->sreq;
136 sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
137 sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
138 sreq->result = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
141 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
142 unsigned int cmd, unsigned long data)
144 struct gendisk *disk = bdev->bd_disk;
145 struct virtio_blk *vblk = disk->private_data;
148 * Only allow the generic SCSI ioctls if the host can support it.
150 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
151 return -ENOTTY;
153 return scsi_cmd_blk_ioctl(bdev, mode, cmd,
154 (void __user *)data);
156 #else
157 static inline int virtblk_add_req_scsi(struct virtqueue *vq,
158 struct virtblk_req *vbr, struct scatterlist *data_sg,
159 bool have_data)
161 return -EIO;
163 static inline void virtblk_scsi_request_done(struct request *req)
166 #define virtblk_ioctl NULL
167 #endif /* CONFIG_VIRTIO_BLK_SCSI */
169 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
170 struct scatterlist *data_sg, bool have_data)
172 struct scatterlist hdr, status, *sgs[3];
173 unsigned int num_out = 0, num_in = 0;
175 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
176 sgs[num_out++] = &hdr;
178 if (have_data) {
179 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
180 sgs[num_out++] = data_sg;
181 else
182 sgs[num_out + num_in++] = data_sg;
185 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
186 sgs[num_out + num_in++] = &status;
188 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
191 static inline void virtblk_request_done(struct request *req)
193 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
195 switch (req_op(req)) {
196 case REQ_OP_SCSI_IN:
197 case REQ_OP_SCSI_OUT:
198 virtblk_scsi_request_done(req);
199 break;
202 blk_mq_end_request(req, virtblk_result(vbr));
205 static void virtblk_done(struct virtqueue *vq)
207 struct virtio_blk *vblk = vq->vdev->priv;
208 bool req_done = false;
209 int qid = vq->index;
210 struct virtblk_req *vbr;
211 unsigned long flags;
212 unsigned int len;
214 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
215 do {
216 virtqueue_disable_cb(vq);
217 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
218 struct request *req = blk_mq_rq_from_pdu(vbr);
220 blk_mq_complete_request(req);
221 req_done = true;
223 if (unlikely(virtqueue_is_broken(vq)))
224 break;
225 } while (!virtqueue_enable_cb(vq));
227 /* In case queue is stopped waiting for more buffers. */
228 if (req_done)
229 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
230 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
233 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
234 const struct blk_mq_queue_data *bd)
236 struct virtio_blk *vblk = hctx->queue->queuedata;
237 struct request *req = bd->rq;
238 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
239 unsigned long flags;
240 unsigned int num;
241 int qid = hctx->queue_num;
242 int err;
243 bool notify = false;
244 u32 type;
246 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
248 switch (req_op(req)) {
249 case REQ_OP_READ:
250 case REQ_OP_WRITE:
251 type = 0;
252 break;
253 case REQ_OP_FLUSH:
254 type = VIRTIO_BLK_T_FLUSH;
255 break;
256 case REQ_OP_SCSI_IN:
257 case REQ_OP_SCSI_OUT:
258 type = VIRTIO_BLK_T_SCSI_CMD;
259 break;
260 case REQ_OP_DRV_IN:
261 type = VIRTIO_BLK_T_GET_ID;
262 break;
263 default:
264 WARN_ON_ONCE(1);
265 return BLK_STS_IOERR;
268 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
269 vbr->out_hdr.sector = type ?
270 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
271 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
273 blk_mq_start_request(req);
275 num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
276 if (num) {
277 if (rq_data_dir(req) == WRITE)
278 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
279 else
280 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
283 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
284 if (blk_rq_is_scsi(req))
285 err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
286 else
287 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
288 if (err) {
289 virtqueue_kick(vblk->vqs[qid].vq);
290 /* Don't stop the queue if -ENOMEM: we may have failed to
291 * bounce the buffer due to global resource outage.
293 if (err == -ENOSPC)
294 blk_mq_stop_hw_queue(hctx);
295 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
296 switch (err) {
297 case -ENOSPC:
298 return BLK_STS_DEV_RESOURCE;
299 case -ENOMEM:
300 return BLK_STS_RESOURCE;
301 default:
302 return BLK_STS_IOERR;
306 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
307 notify = true;
308 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
310 if (notify)
311 virtqueue_notify(vblk->vqs[qid].vq);
312 return BLK_STS_OK;
315 /* return id (s/n) string for *disk to *id_str
317 static int virtblk_get_id(struct gendisk *disk, char *id_str)
319 struct virtio_blk *vblk = disk->private_data;
320 struct request_queue *q = vblk->disk->queue;
321 struct request *req;
322 int err;
324 req = blk_get_request(q, REQ_OP_DRV_IN, 0);
325 if (IS_ERR(req))
326 return PTR_ERR(req);
328 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
329 if (err)
330 goto out;
332 blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
333 err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
334 out:
335 blk_put_request(req);
336 return err;
339 static void virtblk_get(struct virtio_blk *vblk)
341 refcount_inc(&vblk->refs);
344 static void virtblk_put(struct virtio_blk *vblk)
346 if (refcount_dec_and_test(&vblk->refs)) {
347 ida_simple_remove(&vd_index_ida, vblk->index);
348 mutex_destroy(&vblk->vdev_mutex);
349 kfree(vblk);
353 static int virtblk_open(struct block_device *bd, fmode_t mode)
355 struct virtio_blk *vblk = bd->bd_disk->private_data;
356 int ret = 0;
358 mutex_lock(&vblk->vdev_mutex);
360 if (vblk->vdev)
361 virtblk_get(vblk);
362 else
363 ret = -ENXIO;
365 mutex_unlock(&vblk->vdev_mutex);
366 return ret;
369 static void virtblk_release(struct gendisk *disk, fmode_t mode)
371 struct virtio_blk *vblk = disk->private_data;
373 virtblk_put(vblk);
376 /* We provide getgeo only to please some old bootloader/partitioning tools */
377 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
379 struct virtio_blk *vblk = bd->bd_disk->private_data;
380 int ret = 0;
382 mutex_lock(&vblk->vdev_mutex);
384 if (!vblk->vdev) {
385 ret = -ENXIO;
386 goto out;
389 /* see if the host passed in geometry config */
390 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
391 virtio_cread(vblk->vdev, struct virtio_blk_config,
392 geometry.cylinders, &geo->cylinders);
393 virtio_cread(vblk->vdev, struct virtio_blk_config,
394 geometry.heads, &geo->heads);
395 virtio_cread(vblk->vdev, struct virtio_blk_config,
396 geometry.sectors, &geo->sectors);
397 } else {
398 /* some standard values, similar to sd */
399 geo->heads = 1 << 6;
400 geo->sectors = 1 << 5;
401 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
403 out:
404 mutex_unlock(&vblk->vdev_mutex);
405 return ret;
408 static const struct block_device_operations virtblk_fops = {
409 .ioctl = virtblk_ioctl,
410 .owner = THIS_MODULE,
411 .open = virtblk_open,
412 .release = virtblk_release,
413 .getgeo = virtblk_getgeo,
416 static int index_to_minor(int index)
418 return index << PART_BITS;
421 static int minor_to_index(int minor)
423 return minor >> PART_BITS;
426 static ssize_t virtblk_serial_show(struct device *dev,
427 struct device_attribute *attr, char *buf)
429 struct gendisk *disk = dev_to_disk(dev);
430 int err;
432 /* sysfs gives us a PAGE_SIZE buffer */
433 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
435 buf[VIRTIO_BLK_ID_BYTES] = '\0';
436 err = virtblk_get_id(disk, buf);
437 if (!err)
438 return strlen(buf);
440 if (err == -EIO) /* Unsupported? Make it empty. */
441 return 0;
443 return err;
446 static DEVICE_ATTR(serial, 0444, virtblk_serial_show, NULL);
448 /* The queue's logical block size must be set before calling this */
449 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
451 struct virtio_device *vdev = vblk->vdev;
452 struct request_queue *q = vblk->disk->queue;
453 char cap_str_2[10], cap_str_10[10];
454 unsigned long long nblocks;
455 u64 capacity;
457 /* Host must always specify the capacity. */
458 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
460 /* If capacity is too big, truncate with warning. */
461 if ((sector_t)capacity != capacity) {
462 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
463 (unsigned long long)capacity);
464 capacity = (sector_t)-1;
467 nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
469 string_get_size(nblocks, queue_logical_block_size(q),
470 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
471 string_get_size(nblocks, queue_logical_block_size(q),
472 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
474 dev_notice(&vdev->dev,
475 "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
476 vblk->disk->disk_name,
477 resize ? "new size: " : "",
478 nblocks,
479 queue_logical_block_size(q),
480 cap_str_10,
481 cap_str_2);
483 set_capacity(vblk->disk, capacity);
486 static void virtblk_config_changed_work(struct work_struct *work)
488 struct virtio_blk *vblk =
489 container_of(work, struct virtio_blk, config_work);
490 char *envp[] = { "RESIZE=1", NULL };
492 virtblk_update_capacity(vblk, true);
493 revalidate_disk(vblk->disk);
494 kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
497 static void virtblk_config_changed(struct virtio_device *vdev)
499 struct virtio_blk *vblk = vdev->priv;
501 queue_work(virtblk_wq, &vblk->config_work);
504 static int init_vq(struct virtio_blk *vblk)
506 int err;
507 int i;
508 vq_callback_t **callbacks;
509 const char **names;
510 struct virtqueue **vqs;
511 unsigned short num_vqs;
512 struct virtio_device *vdev = vblk->vdev;
513 struct irq_affinity desc = { 0, };
515 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
516 struct virtio_blk_config, num_queues,
517 &num_vqs);
518 if (err)
519 num_vqs = 1;
521 num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
523 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
524 if (!vblk->vqs)
525 return -ENOMEM;
527 names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
528 callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
529 vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
530 if (!names || !callbacks || !vqs) {
531 err = -ENOMEM;
532 goto out;
535 for (i = 0; i < num_vqs; i++) {
536 callbacks[i] = virtblk_done;
537 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
538 names[i] = vblk->vqs[i].name;
541 /* Discover virtqueues and write information to configuration. */
542 err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
543 if (err)
544 goto out;
546 for (i = 0; i < num_vqs; i++) {
547 spin_lock_init(&vblk->vqs[i].lock);
548 vblk->vqs[i].vq = vqs[i];
550 vblk->num_vqs = num_vqs;
552 out:
553 kfree(vqs);
554 kfree(callbacks);
555 kfree(names);
556 if (err)
557 kfree(vblk->vqs);
558 return err;
562 * Legacy naming scheme used for virtio devices. We are stuck with it for
563 * virtio blk but don't ever use it for any new driver.
565 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
567 const int base = 'z' - 'a' + 1;
568 char *begin = buf + strlen(prefix);
569 char *end = buf + buflen;
570 char *p;
571 int unit;
573 p = end - 1;
574 *p = '\0';
575 unit = base;
576 do {
577 if (p == begin)
578 return -EINVAL;
579 *--p = 'a' + (index % unit);
580 index = (index / unit) - 1;
581 } while (index >= 0);
583 memmove(begin, p, end - p);
584 memcpy(buf, prefix, strlen(prefix));
586 return 0;
589 static int virtblk_get_cache_mode(struct virtio_device *vdev)
591 u8 writeback;
592 int err;
594 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
595 struct virtio_blk_config, wce,
596 &writeback);
599 * If WCE is not configurable and flush is not available,
600 * assume no writeback cache is in use.
602 if (err)
603 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
605 return writeback;
608 static void virtblk_update_cache_mode(struct virtio_device *vdev)
610 u8 writeback = virtblk_get_cache_mode(vdev);
611 struct virtio_blk *vblk = vdev->priv;
613 blk_queue_write_cache(vblk->disk->queue, writeback, false);
614 revalidate_disk(vblk->disk);
617 static const char *const virtblk_cache_types[] = {
618 "write through", "write back"
621 static ssize_t
622 virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
623 const char *buf, size_t count)
625 struct gendisk *disk = dev_to_disk(dev);
626 struct virtio_blk *vblk = disk->private_data;
627 struct virtio_device *vdev = vblk->vdev;
628 int i;
630 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
631 i = sysfs_match_string(virtblk_cache_types, buf);
632 if (i < 0)
633 return i;
635 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
636 virtblk_update_cache_mode(vdev);
637 return count;
640 static ssize_t
641 virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
642 char *buf)
644 struct gendisk *disk = dev_to_disk(dev);
645 struct virtio_blk *vblk = disk->private_data;
646 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
648 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
649 return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
652 static const struct device_attribute dev_attr_cache_type_ro =
653 __ATTR(cache_type, 0444,
654 virtblk_cache_type_show, NULL);
655 static const struct device_attribute dev_attr_cache_type_rw =
656 __ATTR(cache_type, 0644,
657 virtblk_cache_type_show, virtblk_cache_type_store);
659 static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
660 unsigned int hctx_idx, unsigned int numa_node)
662 struct virtio_blk *vblk = set->driver_data;
663 struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
665 #ifdef CONFIG_VIRTIO_BLK_SCSI
666 vbr->sreq.sense = vbr->sense;
667 #endif
668 sg_init_table(vbr->sg, vblk->sg_elems);
669 return 0;
672 static int virtblk_map_queues(struct blk_mq_tag_set *set)
674 struct virtio_blk *vblk = set->driver_data;
676 return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
679 #ifdef CONFIG_VIRTIO_BLK_SCSI
680 static void virtblk_initialize_rq(struct request *req)
682 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
684 scsi_req_init(&vbr->sreq);
686 #endif
688 static const struct blk_mq_ops virtio_mq_ops = {
689 .queue_rq = virtio_queue_rq,
690 .complete = virtblk_request_done,
691 .init_request = virtblk_init_request,
692 #ifdef CONFIG_VIRTIO_BLK_SCSI
693 .initialize_rq_fn = virtblk_initialize_rq,
694 #endif
695 .map_queues = virtblk_map_queues,
698 static unsigned int virtblk_queue_depth;
699 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
701 static int virtblk_probe(struct virtio_device *vdev)
703 struct virtio_blk *vblk;
704 struct request_queue *q;
705 int err, index;
707 u32 v, blk_size, sg_elems, opt_io_size;
708 u16 min_io_size;
709 u8 physical_block_exp, alignment_offset;
711 if (!vdev->config->get) {
712 dev_err(&vdev->dev, "%s failure: config access disabled\n",
713 __func__);
714 return -EINVAL;
717 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
718 GFP_KERNEL);
719 if (err < 0)
720 goto out;
721 index = err;
723 /* We need to know how many segments before we allocate. */
724 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
725 struct virtio_blk_config, seg_max,
726 &sg_elems);
728 /* We need at least one SG element, whatever they say. */
729 if (err || !sg_elems)
730 sg_elems = 1;
732 /* We need an extra sg elements at head and tail. */
733 sg_elems += 2;
734 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
735 if (!vblk) {
736 err = -ENOMEM;
737 goto out_free_index;
740 /* This reference is dropped in virtblk_remove(). */
741 refcount_set(&vblk->refs, 1);
742 mutex_init(&vblk->vdev_mutex);
744 vblk->vdev = vdev;
745 vblk->sg_elems = sg_elems;
747 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
749 err = init_vq(vblk);
750 if (err)
751 goto out_free_vblk;
753 /* FIXME: How many partitions? How long is a piece of string? */
754 vblk->disk = alloc_disk(1 << PART_BITS);
755 if (!vblk->disk) {
756 err = -ENOMEM;
757 goto out_free_vq;
760 /* Default queue sizing is to fill the ring. */
761 if (!virtblk_queue_depth) {
762 virtblk_queue_depth = vblk->vqs[0].vq->num_free;
763 /* ... but without indirect descs, we use 2 descs per req */
764 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
765 virtblk_queue_depth /= 2;
768 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
769 vblk->tag_set.ops = &virtio_mq_ops;
770 vblk->tag_set.queue_depth = virtblk_queue_depth;
771 vblk->tag_set.numa_node = NUMA_NO_NODE;
772 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
773 vblk->tag_set.cmd_size =
774 sizeof(struct virtblk_req) +
775 sizeof(struct scatterlist) * sg_elems;
776 vblk->tag_set.driver_data = vblk;
777 vblk->tag_set.nr_hw_queues = vblk->num_vqs;
779 err = blk_mq_alloc_tag_set(&vblk->tag_set);
780 if (err)
781 goto out_put_disk;
783 q = blk_mq_init_queue(&vblk->tag_set);
784 if (IS_ERR(q)) {
785 err = -ENOMEM;
786 goto out_free_tags;
788 vblk->disk->queue = q;
790 q->queuedata = vblk;
792 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
794 vblk->disk->major = major;
795 vblk->disk->first_minor = index_to_minor(index);
796 vblk->disk->private_data = vblk;
797 vblk->disk->fops = &virtblk_fops;
798 vblk->disk->flags |= GENHD_FL_EXT_DEVT;
799 vblk->index = index;
801 /* configure queue flush support */
802 virtblk_update_cache_mode(vdev);
804 /* If disk is read-only in the host, the guest should obey */
805 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
806 set_disk_ro(vblk->disk, 1);
808 /* We can handle whatever the host told us to handle. */
809 blk_queue_max_segments(q, vblk->sg_elems-2);
811 /* No real sector limit. */
812 blk_queue_max_hw_sectors(q, -1U);
814 /* Host can optionally specify maximum segment size and number of
815 * segments. */
816 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
817 struct virtio_blk_config, size_max, &v);
818 if (!err)
819 blk_queue_max_segment_size(q, v);
820 else
821 blk_queue_max_segment_size(q, -1U);
823 /* Host can optionally specify the block size of the device */
824 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
825 struct virtio_blk_config, blk_size,
826 &blk_size);
827 if (!err)
828 blk_queue_logical_block_size(q, blk_size);
829 else
830 blk_size = queue_logical_block_size(q);
832 /* Use topology information if available */
833 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
834 struct virtio_blk_config, physical_block_exp,
835 &physical_block_exp);
836 if (!err && physical_block_exp)
837 blk_queue_physical_block_size(q,
838 blk_size * (1 << physical_block_exp));
840 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
841 struct virtio_blk_config, alignment_offset,
842 &alignment_offset);
843 if (!err && alignment_offset)
844 blk_queue_alignment_offset(q, blk_size * alignment_offset);
846 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
847 struct virtio_blk_config, min_io_size,
848 &min_io_size);
849 if (!err && min_io_size)
850 blk_queue_io_min(q, blk_size * min_io_size);
852 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
853 struct virtio_blk_config, opt_io_size,
854 &opt_io_size);
855 if (!err && opt_io_size)
856 blk_queue_io_opt(q, blk_size * opt_io_size);
858 virtblk_update_capacity(vblk, false);
859 virtio_device_ready(vdev);
861 device_add_disk(&vdev->dev, vblk->disk);
862 err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
863 if (err)
864 goto out_del_disk;
866 if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
867 err = device_create_file(disk_to_dev(vblk->disk),
868 &dev_attr_cache_type_rw);
869 else
870 err = device_create_file(disk_to_dev(vblk->disk),
871 &dev_attr_cache_type_ro);
872 if (err)
873 goto out_del_disk;
874 return 0;
876 out_del_disk:
877 del_gendisk(vblk->disk);
878 blk_cleanup_queue(vblk->disk->queue);
879 out_free_tags:
880 blk_mq_free_tag_set(&vblk->tag_set);
881 out_put_disk:
882 put_disk(vblk->disk);
883 out_free_vq:
884 vdev->config->del_vqs(vdev);
885 kfree(vblk->vqs);
886 out_free_vblk:
887 kfree(vblk);
888 out_free_index:
889 ida_simple_remove(&vd_index_ida, index);
890 out:
891 return err;
894 static void virtblk_remove(struct virtio_device *vdev)
896 struct virtio_blk *vblk = vdev->priv;
898 /* Make sure no work handler is accessing the device. */
899 flush_work(&vblk->config_work);
901 del_gendisk(vblk->disk);
902 blk_cleanup_queue(vblk->disk->queue);
904 blk_mq_free_tag_set(&vblk->tag_set);
906 mutex_lock(&vblk->vdev_mutex);
908 /* Stop all the virtqueues. */
909 vdev->config->reset(vdev);
911 /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
912 vblk->vdev = NULL;
914 put_disk(vblk->disk);
915 vdev->config->del_vqs(vdev);
916 kfree(vblk->vqs);
918 mutex_unlock(&vblk->vdev_mutex);
920 virtblk_put(vblk);
923 #ifdef CONFIG_PM_SLEEP
924 static int virtblk_freeze(struct virtio_device *vdev)
926 struct virtio_blk *vblk = vdev->priv;
928 /* Ensure we don't receive any more interrupts */
929 vdev->config->reset(vdev);
931 /* Make sure no work handler is accessing the device. */
932 flush_work(&vblk->config_work);
934 blk_mq_quiesce_queue(vblk->disk->queue);
936 vdev->config->del_vqs(vdev);
937 return 0;
940 static int virtblk_restore(struct virtio_device *vdev)
942 struct virtio_blk *vblk = vdev->priv;
943 int ret;
945 ret = init_vq(vdev->priv);
946 if (ret)
947 return ret;
949 virtio_device_ready(vdev);
951 blk_mq_unquiesce_queue(vblk->disk->queue);
952 return 0;
954 #endif
956 static const struct virtio_device_id id_table[] = {
957 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
958 { 0 },
961 static unsigned int features_legacy[] = {
962 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
963 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
964 #ifdef CONFIG_VIRTIO_BLK_SCSI
965 VIRTIO_BLK_F_SCSI,
966 #endif
967 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
968 VIRTIO_BLK_F_MQ,
971 static unsigned int features[] = {
972 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
973 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
974 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
975 VIRTIO_BLK_F_MQ,
978 static struct virtio_driver virtio_blk = {
979 .feature_table = features,
980 .feature_table_size = ARRAY_SIZE(features),
981 .feature_table_legacy = features_legacy,
982 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
983 .driver.name = KBUILD_MODNAME,
984 .driver.owner = THIS_MODULE,
985 .id_table = id_table,
986 .probe = virtblk_probe,
987 .remove = virtblk_remove,
988 .config_changed = virtblk_config_changed,
989 #ifdef CONFIG_PM_SLEEP
990 .freeze = virtblk_freeze,
991 .restore = virtblk_restore,
992 #endif
995 static int __init init(void)
997 int error;
999 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
1000 if (!virtblk_wq)
1001 return -ENOMEM;
1003 major = register_blkdev(0, "virtblk");
1004 if (major < 0) {
1005 error = major;
1006 goto out_destroy_workqueue;
1009 error = register_virtio_driver(&virtio_blk);
1010 if (error)
1011 goto out_unregister_blkdev;
1012 return 0;
1014 out_unregister_blkdev:
1015 unregister_blkdev(major, "virtblk");
1016 out_destroy_workqueue:
1017 destroy_workqueue(virtblk_wq);
1018 return error;
1021 static void __exit fini(void)
1023 unregister_virtio_driver(&virtio_blk);
1024 unregister_blkdev(major, "virtblk");
1025 destroy_workqueue(virtblk_wq);
1027 module_init(init);
1028 module_exit(fini);
1030 MODULE_DEVICE_TABLE(virtio, id_table);
1031 MODULE_DESCRIPTION("Virtio block driver");
1032 MODULE_LICENSE("GPL");