treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / block / virtio_blk.c
blobfbbf18ac1d5de3d3558514e0f650994f033e256b
1 // SPDX-License-Identifier: GPL-2.0-only
2 //#define DEBUG
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/hdreg.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/interrupt.h>
10 #include <linux/virtio.h>
11 #include <linux/virtio_blk.h>
12 #include <linux/scatterlist.h>
13 #include <linux/string_helpers.h>
14 #include <scsi/scsi_cmnd.h>
15 #include <linux/idr.h>
16 #include <linux/blk-mq.h>
17 #include <linux/blk-mq-virtio.h>
18 #include <linux/numa.h>
20 #define PART_BITS 4
21 #define VQ_NAME_LEN 16
22 #define MAX_DISCARD_SEGMENTS 256u
24 static int major;
25 static DEFINE_IDA(vd_index_ida);
27 static struct workqueue_struct *virtblk_wq;
29 struct virtio_blk_vq {
30 struct virtqueue *vq;
31 spinlock_t lock;
32 char name[VQ_NAME_LEN];
33 } ____cacheline_aligned_in_smp;
35 struct virtio_blk {
36 struct virtio_device *vdev;
38 /* The disk structure for the kernel. */
39 struct gendisk *disk;
41 /* Block layer tags. */
42 struct blk_mq_tag_set tag_set;
44 /* Process context for config space updates */
45 struct work_struct config_work;
47 /* What host tells us, plus 2 for header & tailer. */
48 unsigned int sg_elems;
50 /* Ida index - used to track minor number allocations. */
51 int index;
53 /* num of vqs */
54 int num_vqs;
55 struct virtio_blk_vq *vqs;
58 struct virtblk_req {
59 #ifdef CONFIG_VIRTIO_BLK_SCSI
60 struct scsi_request sreq; /* for SCSI passthrough, must be first */
61 u8 sense[SCSI_SENSE_BUFFERSIZE];
62 struct virtio_scsi_inhdr in_hdr;
63 #endif
64 struct virtio_blk_outhdr out_hdr;
65 u8 status;
66 struct scatterlist sg[];
69 static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
71 switch (vbr->status) {
72 case VIRTIO_BLK_S_OK:
73 return BLK_STS_OK;
74 case VIRTIO_BLK_S_UNSUPP:
75 return BLK_STS_NOTSUPP;
76 default:
77 return BLK_STS_IOERR;
82 * If this is a packet command we need a couple of additional headers. Behind
83 * the normal outhdr we put a segment with the scsi command block, and before
84 * the normal inhdr we put the sense data and the inhdr with additional status
85 * information.
87 #ifdef CONFIG_VIRTIO_BLK_SCSI
88 static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
89 struct scatterlist *data_sg, bool have_data)
91 struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
92 unsigned int num_out = 0, num_in = 0;
94 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
95 sgs[num_out++] = &hdr;
96 sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len);
97 sgs[num_out++] = &cmd;
99 if (have_data) {
100 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
101 sgs[num_out++] = data_sg;
102 else
103 sgs[num_out + num_in++] = data_sg;
106 sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
107 sgs[num_out + num_in++] = &sense;
108 sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
109 sgs[num_out + num_in++] = &inhdr;
110 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
111 sgs[num_out + num_in++] = &status;
113 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
116 static inline void virtblk_scsi_request_done(struct request *req)
118 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
119 struct virtio_blk *vblk = req->q->queuedata;
120 struct scsi_request *sreq = &vbr->sreq;
122 sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
123 sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
124 sreq->result = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
127 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
128 unsigned int cmd, unsigned long data)
130 struct gendisk *disk = bdev->bd_disk;
131 struct virtio_blk *vblk = disk->private_data;
134 * Only allow the generic SCSI ioctls if the host can support it.
136 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
137 return -ENOTTY;
139 return scsi_cmd_blk_ioctl(bdev, mode, cmd,
140 (void __user *)data);
142 #else
143 static inline int virtblk_add_req_scsi(struct virtqueue *vq,
144 struct virtblk_req *vbr, struct scatterlist *data_sg,
145 bool have_data)
147 return -EIO;
149 static inline void virtblk_scsi_request_done(struct request *req)
152 #define virtblk_ioctl NULL
153 #endif /* CONFIG_VIRTIO_BLK_SCSI */
155 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
156 struct scatterlist *data_sg, bool have_data)
158 struct scatterlist hdr, status, *sgs[3];
159 unsigned int num_out = 0, num_in = 0;
161 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
162 sgs[num_out++] = &hdr;
164 if (have_data) {
165 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
166 sgs[num_out++] = data_sg;
167 else
168 sgs[num_out + num_in++] = data_sg;
171 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
172 sgs[num_out + num_in++] = &status;
174 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
177 static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
179 unsigned short segments = blk_rq_nr_discard_segments(req);
180 unsigned short n = 0;
181 struct virtio_blk_discard_write_zeroes *range;
182 struct bio *bio;
183 u32 flags = 0;
185 if (unmap)
186 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
188 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
189 if (!range)
190 return -ENOMEM;
192 __rq_for_each_bio(bio, req) {
193 u64 sector = bio->bi_iter.bi_sector;
194 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
196 range[n].flags = cpu_to_le32(flags);
197 range[n].num_sectors = cpu_to_le32(num_sectors);
198 range[n].sector = cpu_to_le64(sector);
199 n++;
202 req->special_vec.bv_page = virt_to_page(range);
203 req->special_vec.bv_offset = offset_in_page(range);
204 req->special_vec.bv_len = sizeof(*range) * segments;
205 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
207 return 0;
210 static inline void virtblk_request_done(struct request *req)
212 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
214 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
215 kfree(page_address(req->special_vec.bv_page) +
216 req->special_vec.bv_offset);
219 switch (req_op(req)) {
220 case REQ_OP_SCSI_IN:
221 case REQ_OP_SCSI_OUT:
222 virtblk_scsi_request_done(req);
223 break;
226 blk_mq_end_request(req, virtblk_result(vbr));
229 static void virtblk_done(struct virtqueue *vq)
231 struct virtio_blk *vblk = vq->vdev->priv;
232 bool req_done = false;
233 int qid = vq->index;
234 struct virtblk_req *vbr;
235 unsigned long flags;
236 unsigned int len;
238 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
239 do {
240 virtqueue_disable_cb(vq);
241 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
242 struct request *req = blk_mq_rq_from_pdu(vbr);
244 blk_mq_complete_request(req);
245 req_done = true;
247 if (unlikely(virtqueue_is_broken(vq)))
248 break;
249 } while (!virtqueue_enable_cb(vq));
251 /* In case queue is stopped waiting for more buffers. */
252 if (req_done)
253 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
254 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
257 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
259 struct virtio_blk *vblk = hctx->queue->queuedata;
260 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
261 bool kick;
263 spin_lock_irq(&vq->lock);
264 kick = virtqueue_kick_prepare(vq->vq);
265 spin_unlock_irq(&vq->lock);
267 if (kick)
268 virtqueue_notify(vq->vq);
271 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
272 const struct blk_mq_queue_data *bd)
274 struct virtio_blk *vblk = hctx->queue->queuedata;
275 struct request *req = bd->rq;
276 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
277 unsigned long flags;
278 unsigned int num;
279 int qid = hctx->queue_num;
280 int err;
281 bool notify = false;
282 bool unmap = false;
283 u32 type;
285 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
287 switch (req_op(req)) {
288 case REQ_OP_READ:
289 case REQ_OP_WRITE:
290 type = 0;
291 break;
292 case REQ_OP_FLUSH:
293 type = VIRTIO_BLK_T_FLUSH;
294 break;
295 case REQ_OP_DISCARD:
296 type = VIRTIO_BLK_T_DISCARD;
297 break;
298 case REQ_OP_WRITE_ZEROES:
299 type = VIRTIO_BLK_T_WRITE_ZEROES;
300 unmap = !(req->cmd_flags & REQ_NOUNMAP);
301 break;
302 case REQ_OP_SCSI_IN:
303 case REQ_OP_SCSI_OUT:
304 type = VIRTIO_BLK_T_SCSI_CMD;
305 break;
306 case REQ_OP_DRV_IN:
307 type = VIRTIO_BLK_T_GET_ID;
308 break;
309 default:
310 WARN_ON_ONCE(1);
311 return BLK_STS_IOERR;
314 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
315 vbr->out_hdr.sector = type ?
316 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
317 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
319 blk_mq_start_request(req);
321 if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
322 err = virtblk_setup_discard_write_zeroes(req, unmap);
323 if (err)
324 return BLK_STS_RESOURCE;
327 num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
328 if (num) {
329 if (rq_data_dir(req) == WRITE)
330 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
331 else
332 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
335 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
336 if (blk_rq_is_scsi(req))
337 err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
338 else
339 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
340 if (err) {
341 virtqueue_kick(vblk->vqs[qid].vq);
342 blk_mq_stop_hw_queue(hctx);
343 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
344 /* Out of mem doesn't actually happen, since we fall back
345 * to direct descriptors */
346 if (err == -ENOMEM || err == -ENOSPC)
347 return BLK_STS_DEV_RESOURCE;
348 return BLK_STS_IOERR;
351 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
352 notify = true;
353 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
355 if (notify)
356 virtqueue_notify(vblk->vqs[qid].vq);
357 return BLK_STS_OK;
360 /* return id (s/n) string for *disk to *id_str
362 static int virtblk_get_id(struct gendisk *disk, char *id_str)
364 struct virtio_blk *vblk = disk->private_data;
365 struct request_queue *q = vblk->disk->queue;
366 struct request *req;
367 int err;
369 req = blk_get_request(q, REQ_OP_DRV_IN, 0);
370 if (IS_ERR(req))
371 return PTR_ERR(req);
373 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
374 if (err)
375 goto out;
377 blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
378 err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
379 out:
380 blk_put_request(req);
381 return err;
384 /* We provide getgeo only to please some old bootloader/partitioning tools */
385 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
387 struct virtio_blk *vblk = bd->bd_disk->private_data;
389 /* see if the host passed in geometry config */
390 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
391 virtio_cread(vblk->vdev, struct virtio_blk_config,
392 geometry.cylinders, &geo->cylinders);
393 virtio_cread(vblk->vdev, struct virtio_blk_config,
394 geometry.heads, &geo->heads);
395 virtio_cread(vblk->vdev, struct virtio_blk_config,
396 geometry.sectors, &geo->sectors);
397 } else {
398 /* some standard values, similar to sd */
399 geo->heads = 1 << 6;
400 geo->sectors = 1 << 5;
401 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
403 return 0;
406 static const struct block_device_operations virtblk_fops = {
407 .ioctl = virtblk_ioctl,
408 #ifdef CONFIG_COMPAT
409 .compat_ioctl = blkdev_compat_ptr_ioctl,
410 #endif
411 .owner = THIS_MODULE,
412 .getgeo = virtblk_getgeo,
415 static int index_to_minor(int index)
417 return index << PART_BITS;
420 static int minor_to_index(int minor)
422 return minor >> PART_BITS;
425 static ssize_t serial_show(struct device *dev,
426 struct device_attribute *attr, char *buf)
428 struct gendisk *disk = dev_to_disk(dev);
429 int err;
431 /* sysfs gives us a PAGE_SIZE buffer */
432 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
434 buf[VIRTIO_BLK_ID_BYTES] = '\0';
435 err = virtblk_get_id(disk, buf);
436 if (!err)
437 return strlen(buf);
439 if (err == -EIO) /* Unsupported? Make it empty. */
440 return 0;
442 return err;
445 static DEVICE_ATTR_RO(serial);
447 /* The queue's logical block size must be set before calling this */
448 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
450 struct virtio_device *vdev = vblk->vdev;
451 struct request_queue *q = vblk->disk->queue;
452 char cap_str_2[10], cap_str_10[10];
453 unsigned long long nblocks;
454 u64 capacity;
456 /* Host must always specify the capacity. */
457 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
459 /* If capacity is too big, truncate with warning. */
460 if ((sector_t)capacity != capacity) {
461 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
462 (unsigned long long)capacity);
463 capacity = (sector_t)-1;
466 nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
468 string_get_size(nblocks, queue_logical_block_size(q),
469 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
470 string_get_size(nblocks, queue_logical_block_size(q),
471 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
473 dev_notice(&vdev->dev,
474 "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
475 vblk->disk->disk_name,
476 resize ? "new size: " : "",
477 nblocks,
478 queue_logical_block_size(q),
479 cap_str_10,
480 cap_str_2);
482 set_capacity(vblk->disk, capacity);
485 static void virtblk_config_changed_work(struct work_struct *work)
487 struct virtio_blk *vblk =
488 container_of(work, struct virtio_blk, config_work);
489 char *envp[] = { "RESIZE=1", NULL };
491 virtblk_update_capacity(vblk, true);
492 revalidate_disk(vblk->disk);
493 kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
496 static void virtblk_config_changed(struct virtio_device *vdev)
498 struct virtio_blk *vblk = vdev->priv;
500 queue_work(virtblk_wq, &vblk->config_work);
503 static int init_vq(struct virtio_blk *vblk)
505 int err;
506 int i;
507 vq_callback_t **callbacks;
508 const char **names;
509 struct virtqueue **vqs;
510 unsigned short num_vqs;
511 struct virtio_device *vdev = vblk->vdev;
512 struct irq_affinity desc = { 0, };
514 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
515 struct virtio_blk_config, num_queues,
516 &num_vqs);
517 if (err)
518 num_vqs = 1;
520 num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
522 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
523 if (!vblk->vqs)
524 return -ENOMEM;
526 names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
527 callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
528 vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
529 if (!names || !callbacks || !vqs) {
530 err = -ENOMEM;
531 goto out;
534 for (i = 0; i < num_vqs; i++) {
535 callbacks[i] = virtblk_done;
536 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
537 names[i] = vblk->vqs[i].name;
540 /* Discover virtqueues and write information to configuration. */
541 err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
542 if (err)
543 goto out;
545 for (i = 0; i < num_vqs; i++) {
546 spin_lock_init(&vblk->vqs[i].lock);
547 vblk->vqs[i].vq = vqs[i];
549 vblk->num_vqs = num_vqs;
551 out:
552 kfree(vqs);
553 kfree(callbacks);
554 kfree(names);
555 if (err)
556 kfree(vblk->vqs);
557 return err;
561 * Legacy naming scheme used for virtio devices. We are stuck with it for
562 * virtio blk but don't ever use it for any new driver.
564 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
566 const int base = 'z' - 'a' + 1;
567 char *begin = buf + strlen(prefix);
568 char *end = buf + buflen;
569 char *p;
570 int unit;
572 p = end - 1;
573 *p = '\0';
574 unit = base;
575 do {
576 if (p == begin)
577 return -EINVAL;
578 *--p = 'a' + (index % unit);
579 index = (index / unit) - 1;
580 } while (index >= 0);
582 memmove(begin, p, end - p);
583 memcpy(buf, prefix, strlen(prefix));
585 return 0;
588 static int virtblk_get_cache_mode(struct virtio_device *vdev)
590 u8 writeback;
591 int err;
593 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
594 struct virtio_blk_config, wce,
595 &writeback);
598 * If WCE is not configurable and flush is not available,
599 * assume no writeback cache is in use.
601 if (err)
602 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
604 return writeback;
607 static void virtblk_update_cache_mode(struct virtio_device *vdev)
609 u8 writeback = virtblk_get_cache_mode(vdev);
610 struct virtio_blk *vblk = vdev->priv;
612 blk_queue_write_cache(vblk->disk->queue, writeback, false);
613 revalidate_disk(vblk->disk);
616 static const char *const virtblk_cache_types[] = {
617 "write through", "write back"
620 static ssize_t
621 cache_type_store(struct device *dev, struct device_attribute *attr,
622 const char *buf, size_t count)
624 struct gendisk *disk = dev_to_disk(dev);
625 struct virtio_blk *vblk = disk->private_data;
626 struct virtio_device *vdev = vblk->vdev;
627 int i;
629 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
630 i = sysfs_match_string(virtblk_cache_types, buf);
631 if (i < 0)
632 return i;
634 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
635 virtblk_update_cache_mode(vdev);
636 return count;
639 static ssize_t
640 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
642 struct gendisk *disk = dev_to_disk(dev);
643 struct virtio_blk *vblk = disk->private_data;
644 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
646 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
647 return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
650 static DEVICE_ATTR_RW(cache_type);
652 static struct attribute *virtblk_attrs[] = {
653 &dev_attr_serial.attr,
654 &dev_attr_cache_type.attr,
655 NULL,
658 static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
659 struct attribute *a, int n)
661 struct device *dev = container_of(kobj, struct device, kobj);
662 struct gendisk *disk = dev_to_disk(dev);
663 struct virtio_blk *vblk = disk->private_data;
664 struct virtio_device *vdev = vblk->vdev;
666 if (a == &dev_attr_cache_type.attr &&
667 !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
668 return S_IRUGO;
670 return a->mode;
673 static const struct attribute_group virtblk_attr_group = {
674 .attrs = virtblk_attrs,
675 .is_visible = virtblk_attrs_are_visible,
678 static const struct attribute_group *virtblk_attr_groups[] = {
679 &virtblk_attr_group,
680 NULL,
683 static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
684 unsigned int hctx_idx, unsigned int numa_node)
686 struct virtio_blk *vblk = set->driver_data;
687 struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
689 #ifdef CONFIG_VIRTIO_BLK_SCSI
690 vbr->sreq.sense = vbr->sense;
691 #endif
692 sg_init_table(vbr->sg, vblk->sg_elems);
693 return 0;
696 static int virtblk_map_queues(struct blk_mq_tag_set *set)
698 struct virtio_blk *vblk = set->driver_data;
700 return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
701 vblk->vdev, 0);
704 #ifdef CONFIG_VIRTIO_BLK_SCSI
705 static void virtblk_initialize_rq(struct request *req)
707 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
709 scsi_req_init(&vbr->sreq);
711 #endif
713 static const struct blk_mq_ops virtio_mq_ops = {
714 .queue_rq = virtio_queue_rq,
715 .commit_rqs = virtio_commit_rqs,
716 .complete = virtblk_request_done,
717 .init_request = virtblk_init_request,
718 #ifdef CONFIG_VIRTIO_BLK_SCSI
719 .initialize_rq_fn = virtblk_initialize_rq,
720 #endif
721 .map_queues = virtblk_map_queues,
724 static unsigned int virtblk_queue_depth;
725 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
727 static int virtblk_probe(struct virtio_device *vdev)
729 struct virtio_blk *vblk;
730 struct request_queue *q;
731 int err, index;
733 u32 v, blk_size, max_size, sg_elems, opt_io_size;
734 u16 min_io_size;
735 u8 physical_block_exp, alignment_offset;
737 if (!vdev->config->get) {
738 dev_err(&vdev->dev, "%s failure: config access disabled\n",
739 __func__);
740 return -EINVAL;
743 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
744 GFP_KERNEL);
745 if (err < 0)
746 goto out;
747 index = err;
749 /* We need to know how many segments before we allocate. */
750 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
751 struct virtio_blk_config, seg_max,
752 &sg_elems);
754 /* We need at least one SG element, whatever they say. */
755 if (err || !sg_elems)
756 sg_elems = 1;
758 /* We need an extra sg elements at head and tail. */
759 sg_elems += 2;
760 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
761 if (!vblk) {
762 err = -ENOMEM;
763 goto out_free_index;
766 vblk->vdev = vdev;
767 vblk->sg_elems = sg_elems;
769 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
771 err = init_vq(vblk);
772 if (err)
773 goto out_free_vblk;
775 /* FIXME: How many partitions? How long is a piece of string? */
776 vblk->disk = alloc_disk(1 << PART_BITS);
777 if (!vblk->disk) {
778 err = -ENOMEM;
779 goto out_free_vq;
782 /* Default queue sizing is to fill the ring. */
783 if (!virtblk_queue_depth) {
784 virtblk_queue_depth = vblk->vqs[0].vq->num_free;
785 /* ... but without indirect descs, we use 2 descs per req */
786 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
787 virtblk_queue_depth /= 2;
790 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
791 vblk->tag_set.ops = &virtio_mq_ops;
792 vblk->tag_set.queue_depth = virtblk_queue_depth;
793 vblk->tag_set.numa_node = NUMA_NO_NODE;
794 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
795 vblk->tag_set.cmd_size =
796 sizeof(struct virtblk_req) +
797 sizeof(struct scatterlist) * sg_elems;
798 vblk->tag_set.driver_data = vblk;
799 vblk->tag_set.nr_hw_queues = vblk->num_vqs;
801 err = blk_mq_alloc_tag_set(&vblk->tag_set);
802 if (err)
803 goto out_put_disk;
805 q = blk_mq_init_queue(&vblk->tag_set);
806 if (IS_ERR(q)) {
807 err = -ENOMEM;
808 goto out_free_tags;
810 vblk->disk->queue = q;
812 q->queuedata = vblk;
814 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
816 vblk->disk->major = major;
817 vblk->disk->first_minor = index_to_minor(index);
818 vblk->disk->private_data = vblk;
819 vblk->disk->fops = &virtblk_fops;
820 vblk->disk->flags |= GENHD_FL_EXT_DEVT;
821 vblk->index = index;
823 /* configure queue flush support */
824 virtblk_update_cache_mode(vdev);
826 /* If disk is read-only in the host, the guest should obey */
827 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
828 set_disk_ro(vblk->disk, 1);
830 /* We can handle whatever the host told us to handle. */
831 blk_queue_max_segments(q, vblk->sg_elems-2);
833 /* No real sector limit. */
834 blk_queue_max_hw_sectors(q, -1U);
836 max_size = virtio_max_dma_size(vdev);
838 /* Host can optionally specify maximum segment size and number of
839 * segments. */
840 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
841 struct virtio_blk_config, size_max, &v);
842 if (!err)
843 max_size = min(max_size, v);
845 blk_queue_max_segment_size(q, max_size);
847 /* Host can optionally specify the block size of the device */
848 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
849 struct virtio_blk_config, blk_size,
850 &blk_size);
851 if (!err)
852 blk_queue_logical_block_size(q, blk_size);
853 else
854 blk_size = queue_logical_block_size(q);
856 /* Use topology information if available */
857 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
858 struct virtio_blk_config, physical_block_exp,
859 &physical_block_exp);
860 if (!err && physical_block_exp)
861 blk_queue_physical_block_size(q,
862 blk_size * (1 << physical_block_exp));
864 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
865 struct virtio_blk_config, alignment_offset,
866 &alignment_offset);
867 if (!err && alignment_offset)
868 blk_queue_alignment_offset(q, blk_size * alignment_offset);
870 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
871 struct virtio_blk_config, min_io_size,
872 &min_io_size);
873 if (!err && min_io_size)
874 blk_queue_io_min(q, blk_size * min_io_size);
876 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
877 struct virtio_blk_config, opt_io_size,
878 &opt_io_size);
879 if (!err && opt_io_size)
880 blk_queue_io_opt(q, blk_size * opt_io_size);
882 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
883 q->limits.discard_granularity = blk_size;
885 virtio_cread(vdev, struct virtio_blk_config,
886 discard_sector_alignment, &v);
887 q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;
889 virtio_cread(vdev, struct virtio_blk_config,
890 max_discard_sectors, &v);
891 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
893 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
894 &v);
895 blk_queue_max_discard_segments(q,
896 min_not_zero(v,
897 MAX_DISCARD_SEGMENTS));
899 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
902 if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
903 virtio_cread(vdev, struct virtio_blk_config,
904 max_write_zeroes_sectors, &v);
905 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
908 virtblk_update_capacity(vblk, false);
909 virtio_device_ready(vdev);
911 device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
912 return 0;
914 out_free_tags:
915 blk_mq_free_tag_set(&vblk->tag_set);
916 out_put_disk:
917 put_disk(vblk->disk);
918 out_free_vq:
919 vdev->config->del_vqs(vdev);
920 out_free_vblk:
921 kfree(vblk);
922 out_free_index:
923 ida_simple_remove(&vd_index_ida, index);
924 out:
925 return err;
928 static void virtblk_remove(struct virtio_device *vdev)
930 struct virtio_blk *vblk = vdev->priv;
931 int index = vblk->index;
932 int refc;
934 /* Make sure no work handler is accessing the device. */
935 flush_work(&vblk->config_work);
937 del_gendisk(vblk->disk);
938 blk_cleanup_queue(vblk->disk->queue);
940 blk_mq_free_tag_set(&vblk->tag_set);
942 /* Stop all the virtqueues. */
943 vdev->config->reset(vdev);
945 refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
946 put_disk(vblk->disk);
947 vdev->config->del_vqs(vdev);
948 kfree(vblk->vqs);
949 kfree(vblk);
951 /* Only free device id if we don't have any users */
952 if (refc == 1)
953 ida_simple_remove(&vd_index_ida, index);
956 #ifdef CONFIG_PM_SLEEP
957 static int virtblk_freeze(struct virtio_device *vdev)
959 struct virtio_blk *vblk = vdev->priv;
961 /* Ensure we don't receive any more interrupts */
962 vdev->config->reset(vdev);
964 /* Make sure no work handler is accessing the device. */
965 flush_work(&vblk->config_work);
967 blk_mq_quiesce_queue(vblk->disk->queue);
969 vdev->config->del_vqs(vdev);
970 return 0;
973 static int virtblk_restore(struct virtio_device *vdev)
975 struct virtio_blk *vblk = vdev->priv;
976 int ret;
978 ret = init_vq(vdev->priv);
979 if (ret)
980 return ret;
982 virtio_device_ready(vdev);
984 blk_mq_unquiesce_queue(vblk->disk->queue);
985 return 0;
987 #endif
989 static const struct virtio_device_id id_table[] = {
990 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
991 { 0 },
994 static unsigned int features_legacy[] = {
995 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
996 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
997 #ifdef CONFIG_VIRTIO_BLK_SCSI
998 VIRTIO_BLK_F_SCSI,
999 #endif
1000 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1001 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1004 static unsigned int features[] = {
1005 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
1006 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
1007 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1008 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1011 static struct virtio_driver virtio_blk = {
1012 .feature_table = features,
1013 .feature_table_size = ARRAY_SIZE(features),
1014 .feature_table_legacy = features_legacy,
1015 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
1016 .driver.name = KBUILD_MODNAME,
1017 .driver.owner = THIS_MODULE,
1018 .id_table = id_table,
1019 .probe = virtblk_probe,
1020 .remove = virtblk_remove,
1021 .config_changed = virtblk_config_changed,
1022 #ifdef CONFIG_PM_SLEEP
1023 .freeze = virtblk_freeze,
1024 .restore = virtblk_restore,
1025 #endif
1028 static int __init init(void)
1030 int error;
1032 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
1033 if (!virtblk_wq)
1034 return -ENOMEM;
1036 major = register_blkdev(0, "virtblk");
1037 if (major < 0) {
1038 error = major;
1039 goto out_destroy_workqueue;
1042 error = register_virtio_driver(&virtio_blk);
1043 if (error)
1044 goto out_unregister_blkdev;
1045 return 0;
1047 out_unregister_blkdev:
1048 unregister_blkdev(major, "virtblk");
1049 out_destroy_workqueue:
1050 destroy_workqueue(virtblk_wq);
1051 return error;
1054 static void __exit fini(void)
1056 unregister_virtio_driver(&virtio_blk);
1057 unregister_blkdev(major, "virtblk");
1058 destroy_workqueue(virtblk_wq);
1060 module_init(init);
1061 module_exit(fini);
1063 MODULE_DEVICE_TABLE(virtio, id_table);
1064 MODULE_DESCRIPTION("Virtio block driver");
1065 MODULE_LICENSE("GPL");