Revert "NFS: Make close(2) asynchronous when closing NFS O_DIRECT files"
[linux/fpc-iii.git] / drivers / block / virtio_blk.c
blobd4d05f064d390772a2f99acbf882eaa983788511
1 //#define DEBUG
2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/blkdev.h>
5 #include <linux/hdreg.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/virtio.h>
9 #include <linux/virtio_blk.h>
10 #include <linux/scatterlist.h>
11 #include <linux/string_helpers.h>
12 #include <scsi/scsi_cmnd.h>
13 #include <linux/idr.h>
14 #include <linux/blk-mq.h>
15 #include <linux/numa.h>
17 #define PART_BITS 4
18 #define VQ_NAME_LEN 16
20 static int major;
21 static DEFINE_IDA(vd_index_ida);
23 static struct workqueue_struct *virtblk_wq;
25 struct virtio_blk_vq {
26 struct virtqueue *vq;
27 spinlock_t lock;
28 char name[VQ_NAME_LEN];
29 } ____cacheline_aligned_in_smp;
31 struct virtio_blk {
32 struct virtio_device *vdev;
34 /* The disk structure for the kernel. */
35 struct gendisk *disk;
37 /* Block layer tags. */
38 struct blk_mq_tag_set tag_set;
40 /* Process context for config space updates */
41 struct work_struct config_work;
43 /* What host tells us, plus 2 for header & tailer. */
44 unsigned int sg_elems;
46 /* Ida index - used to track minor number allocations. */
47 int index;
49 /* num of vqs */
50 int num_vqs;
51 struct virtio_blk_vq *vqs;
54 struct virtblk_req {
55 struct request *req;
56 struct virtio_blk_outhdr out_hdr;
57 struct virtio_scsi_inhdr in_hdr;
58 u8 status;
59 struct scatterlist sg[];
62 static inline int virtblk_result(struct virtblk_req *vbr)
64 switch (vbr->status) {
65 case VIRTIO_BLK_S_OK:
66 return 0;
67 case VIRTIO_BLK_S_UNSUPP:
68 return -ENOTTY;
69 default:
70 return -EIO;
74 static int __virtblk_add_req(struct virtqueue *vq,
75 struct virtblk_req *vbr,
76 struct scatterlist *data_sg,
77 bool have_data)
79 struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
80 unsigned int num_out = 0, num_in = 0;
81 __virtio32 type = vbr->out_hdr.type & ~cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT);
83 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
84 sgs[num_out++] = &hdr;
87 * If this is a packet command we need a couple of additional headers.
88 * Behind the normal outhdr we put a segment with the scsi command
89 * block, and before the normal inhdr we put the sense data and the
90 * inhdr with additional status information.
92 if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
93 sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len);
94 sgs[num_out++] = &cmd;
97 if (have_data) {
98 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
99 sgs[num_out++] = data_sg;
100 else
101 sgs[num_out + num_in++] = data_sg;
104 if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
105 sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
106 sgs[num_out + num_in++] = &sense;
107 sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
108 sgs[num_out + num_in++] = &inhdr;
111 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
112 sgs[num_out + num_in++] = &status;
114 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
117 static inline void virtblk_request_done(struct request *req)
119 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
120 struct virtio_blk *vblk = req->q->queuedata;
121 int error = virtblk_result(vbr);
123 if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
124 req->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
125 req->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
126 req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
127 } else if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
128 req->errors = (error != 0);
131 blk_mq_end_request(req, error);
134 static void virtblk_done(struct virtqueue *vq)
136 struct virtio_blk *vblk = vq->vdev->priv;
137 bool req_done = false;
138 int qid = vq->index;
139 struct virtblk_req *vbr;
140 unsigned long flags;
141 unsigned int len;
143 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
144 do {
145 virtqueue_disable_cb(vq);
146 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
147 blk_mq_complete_request(vbr->req);
148 req_done = true;
150 if (unlikely(virtqueue_is_broken(vq)))
151 break;
152 } while (!virtqueue_enable_cb(vq));
154 /* In case queue is stopped waiting for more buffers. */
155 if (req_done)
156 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
157 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
160 static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
161 const struct blk_mq_queue_data *bd)
163 struct virtio_blk *vblk = hctx->queue->queuedata;
164 struct request *req = bd->rq;
165 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
166 unsigned long flags;
167 unsigned int num;
168 int qid = hctx->queue_num;
169 int err;
170 bool notify = false;
172 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
174 vbr->req = req;
175 if (req->cmd_flags & REQ_FLUSH) {
176 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
177 vbr->out_hdr.sector = 0;
178 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
179 } else {
180 switch (req->cmd_type) {
181 case REQ_TYPE_FS:
182 vbr->out_hdr.type = 0;
183 vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req));
184 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
185 break;
186 case REQ_TYPE_BLOCK_PC:
187 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD);
188 vbr->out_hdr.sector = 0;
189 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
190 break;
191 case REQ_TYPE_DRV_PRIV:
192 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
193 vbr->out_hdr.sector = 0;
194 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
195 break;
196 default:
197 /* We don't put anything else in the queue. */
198 BUG();
202 blk_mq_start_request(req);
204 num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
205 if (num) {
206 if (rq_data_dir(vbr->req) == WRITE)
207 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
208 else
209 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
212 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
213 err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
214 if (err) {
215 virtqueue_kick(vblk->vqs[qid].vq);
216 blk_mq_stop_hw_queue(hctx);
217 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
218 /* Out of mem doesn't actually happen, since we fall back
219 * to direct descriptors */
220 if (err == -ENOMEM || err == -ENOSPC)
221 return BLK_MQ_RQ_QUEUE_BUSY;
222 return BLK_MQ_RQ_QUEUE_ERROR;
225 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
226 notify = true;
227 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
229 if (notify)
230 virtqueue_notify(vblk->vqs[qid].vq);
231 return BLK_MQ_RQ_QUEUE_OK;
234 /* return id (s/n) string for *disk to *id_str
236 static int virtblk_get_id(struct gendisk *disk, char *id_str)
238 struct virtio_blk *vblk = disk->private_data;
239 struct request *req;
240 struct bio *bio;
241 int err;
243 bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
244 GFP_KERNEL);
245 if (IS_ERR(bio))
246 return PTR_ERR(bio);
248 req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
249 if (IS_ERR(req)) {
250 bio_put(bio);
251 return PTR_ERR(req);
254 req->cmd_type = REQ_TYPE_DRV_PRIV;
255 err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
256 blk_put_request(req);
258 return err;
261 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
262 unsigned int cmd, unsigned long data)
264 struct gendisk *disk = bdev->bd_disk;
265 struct virtio_blk *vblk = disk->private_data;
268 * Only allow the generic SCSI ioctls if the host can support it.
270 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
271 return -ENOTTY;
273 return scsi_cmd_blk_ioctl(bdev, mode, cmd,
274 (void __user *)data);
277 /* We provide getgeo only to please some old bootloader/partitioning tools */
278 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
280 struct virtio_blk *vblk = bd->bd_disk->private_data;
282 /* see if the host passed in geometry config */
283 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
284 virtio_cread(vblk->vdev, struct virtio_blk_config,
285 geometry.cylinders, &geo->cylinders);
286 virtio_cread(vblk->vdev, struct virtio_blk_config,
287 geometry.heads, &geo->heads);
288 virtio_cread(vblk->vdev, struct virtio_blk_config,
289 geometry.sectors, &geo->sectors);
290 } else {
291 /* some standard values, similar to sd */
292 geo->heads = 1 << 6;
293 geo->sectors = 1 << 5;
294 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
296 return 0;
299 static const struct block_device_operations virtblk_fops = {
300 .ioctl = virtblk_ioctl,
301 .owner = THIS_MODULE,
302 .getgeo = virtblk_getgeo,
305 static int index_to_minor(int index)
307 return index << PART_BITS;
310 static int minor_to_index(int minor)
312 return minor >> PART_BITS;
315 static ssize_t virtblk_serial_show(struct device *dev,
316 struct device_attribute *attr, char *buf)
318 struct gendisk *disk = dev_to_disk(dev);
319 int err;
321 /* sysfs gives us a PAGE_SIZE buffer */
322 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
324 buf[VIRTIO_BLK_ID_BYTES] = '\0';
325 err = virtblk_get_id(disk, buf);
326 if (!err)
327 return strlen(buf);
329 if (err == -EIO) /* Unsupported? Make it empty. */
330 return 0;
332 return err;
335 static DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
337 static void virtblk_config_changed_work(struct work_struct *work)
339 struct virtio_blk *vblk =
340 container_of(work, struct virtio_blk, config_work);
341 struct virtio_device *vdev = vblk->vdev;
342 struct request_queue *q = vblk->disk->queue;
343 char cap_str_2[10], cap_str_10[10];
344 char *envp[] = { "RESIZE=1", NULL };
345 u64 capacity;
347 /* Host must always specify the capacity. */
348 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
350 /* If capacity is too big, truncate with warning. */
351 if ((sector_t)capacity != capacity) {
352 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
353 (unsigned long long)capacity);
354 capacity = (sector_t)-1;
357 string_get_size(capacity, queue_logical_block_size(q),
358 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
359 string_get_size(capacity, queue_logical_block_size(q),
360 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
362 dev_notice(&vdev->dev,
363 "new size: %llu %d-byte logical blocks (%s/%s)\n",
364 (unsigned long long)capacity,
365 queue_logical_block_size(q),
366 cap_str_10, cap_str_2);
368 set_capacity(vblk->disk, capacity);
369 revalidate_disk(vblk->disk);
370 kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
373 static void virtblk_config_changed(struct virtio_device *vdev)
375 struct virtio_blk *vblk = vdev->priv;
377 queue_work(virtblk_wq, &vblk->config_work);
380 static int init_vq(struct virtio_blk *vblk)
382 int err = 0;
383 int i;
384 vq_callback_t **callbacks;
385 const char **names;
386 struct virtqueue **vqs;
387 unsigned short num_vqs;
388 struct virtio_device *vdev = vblk->vdev;
390 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
391 struct virtio_blk_config, num_queues,
392 &num_vqs);
393 if (err)
394 num_vqs = 1;
396 vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
397 if (!vblk->vqs) {
398 err = -ENOMEM;
399 goto out;
402 names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
403 if (!names)
404 goto err_names;
406 callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
407 if (!callbacks)
408 goto err_callbacks;
410 vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
411 if (!vqs)
412 goto err_vqs;
414 for (i = 0; i < num_vqs; i++) {
415 callbacks[i] = virtblk_done;
416 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
417 names[i] = vblk->vqs[i].name;
420 /* Discover virtqueues and write information to configuration. */
421 err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
422 if (err)
423 goto err_find_vqs;
425 for (i = 0; i < num_vqs; i++) {
426 spin_lock_init(&vblk->vqs[i].lock);
427 vblk->vqs[i].vq = vqs[i];
429 vblk->num_vqs = num_vqs;
431 err_find_vqs:
432 kfree(vqs);
433 err_vqs:
434 kfree(callbacks);
435 err_callbacks:
436 kfree(names);
437 err_names:
438 if (err)
439 kfree(vblk->vqs);
440 out:
441 return err;
445 * Legacy naming scheme used for virtio devices. We are stuck with it for
446 * virtio blk but don't ever use it for any new driver.
448 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
450 const int base = 'z' - 'a' + 1;
451 char *begin = buf + strlen(prefix);
452 char *end = buf + buflen;
453 char *p;
454 int unit;
456 p = end - 1;
457 *p = '\0';
458 unit = base;
459 do {
460 if (p == begin)
461 return -EINVAL;
462 *--p = 'a' + (index % unit);
463 index = (index / unit) - 1;
464 } while (index >= 0);
466 memmove(begin, p, end - p);
467 memcpy(buf, prefix, strlen(prefix));
469 return 0;
472 static int virtblk_get_cache_mode(struct virtio_device *vdev)
474 u8 writeback;
475 int err;
477 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
478 struct virtio_blk_config, wce,
479 &writeback);
480 if (err)
481 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE) ||
482 virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
484 return writeback;
487 static void virtblk_update_cache_mode(struct virtio_device *vdev)
489 u8 writeback = virtblk_get_cache_mode(vdev);
490 struct virtio_blk *vblk = vdev->priv;
492 if (writeback)
493 blk_queue_flush(vblk->disk->queue, REQ_FLUSH);
494 else
495 blk_queue_flush(vblk->disk->queue, 0);
497 revalidate_disk(vblk->disk);
500 static const char *const virtblk_cache_types[] = {
501 "write through", "write back"
504 static ssize_t
505 virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
506 const char *buf, size_t count)
508 struct gendisk *disk = dev_to_disk(dev);
509 struct virtio_blk *vblk = disk->private_data;
510 struct virtio_device *vdev = vblk->vdev;
511 int i;
513 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
514 for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
515 if (sysfs_streq(buf, virtblk_cache_types[i]))
516 break;
518 if (i < 0)
519 return -EINVAL;
521 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
522 virtblk_update_cache_mode(vdev);
523 return count;
526 static ssize_t
527 virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
528 char *buf)
530 struct gendisk *disk = dev_to_disk(dev);
531 struct virtio_blk *vblk = disk->private_data;
532 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
534 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
535 return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
538 static const struct device_attribute dev_attr_cache_type_ro =
539 __ATTR(cache_type, S_IRUGO,
540 virtblk_cache_type_show, NULL);
541 static const struct device_attribute dev_attr_cache_type_rw =
542 __ATTR(cache_type, S_IRUGO|S_IWUSR,
543 virtblk_cache_type_show, virtblk_cache_type_store);
545 static int virtblk_init_request(void *data, struct request *rq,
546 unsigned int hctx_idx, unsigned int request_idx,
547 unsigned int numa_node)
549 struct virtio_blk *vblk = data;
550 struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
552 sg_init_table(vbr->sg, vblk->sg_elems);
553 return 0;
556 static struct blk_mq_ops virtio_mq_ops = {
557 .queue_rq = virtio_queue_rq,
558 .map_queue = blk_mq_map_queue,
559 .complete = virtblk_request_done,
560 .init_request = virtblk_init_request,
563 static unsigned int virtblk_queue_depth;
564 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
566 static int virtblk_probe(struct virtio_device *vdev)
568 struct virtio_blk *vblk;
569 struct request_queue *q;
570 int err, index;
572 u64 cap;
573 u32 v, blk_size, sg_elems, opt_io_size;
574 u16 min_io_size;
575 u8 physical_block_exp, alignment_offset;
577 if (!vdev->config->get) {
578 dev_err(&vdev->dev, "%s failure: config access disabled\n",
579 __func__);
580 return -EINVAL;
583 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
584 GFP_KERNEL);
585 if (err < 0)
586 goto out;
587 index = err;
589 /* We need to know how many segments before we allocate. */
590 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
591 struct virtio_blk_config, seg_max,
592 &sg_elems);
594 /* We need at least one SG element, whatever they say. */
595 if (err || !sg_elems)
596 sg_elems = 1;
598 /* We need an extra sg elements at head and tail. */
599 sg_elems += 2;
600 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
601 if (!vblk) {
602 err = -ENOMEM;
603 goto out_free_index;
606 vblk->vdev = vdev;
607 vblk->sg_elems = sg_elems;
609 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
611 err = init_vq(vblk);
612 if (err)
613 goto out_free_vblk;
615 /* FIXME: How many partitions? How long is a piece of string? */
616 vblk->disk = alloc_disk(1 << PART_BITS);
617 if (!vblk->disk) {
618 err = -ENOMEM;
619 goto out_free_vq;
622 /* Default queue sizing is to fill the ring. */
623 if (!virtblk_queue_depth) {
624 virtblk_queue_depth = vblk->vqs[0].vq->num_free;
625 /* ... but without indirect descs, we use 2 descs per req */
626 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
627 virtblk_queue_depth /= 2;
630 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
631 vblk->tag_set.ops = &virtio_mq_ops;
632 vblk->tag_set.queue_depth = virtblk_queue_depth;
633 vblk->tag_set.numa_node = NUMA_NO_NODE;
634 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
635 vblk->tag_set.cmd_size =
636 sizeof(struct virtblk_req) +
637 sizeof(struct scatterlist) * sg_elems;
638 vblk->tag_set.driver_data = vblk;
639 vblk->tag_set.nr_hw_queues = vblk->num_vqs;
641 err = blk_mq_alloc_tag_set(&vblk->tag_set);
642 if (err)
643 goto out_put_disk;
645 q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
646 if (IS_ERR(q)) {
647 err = -ENOMEM;
648 goto out_free_tags;
651 q->queuedata = vblk;
653 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
655 vblk->disk->major = major;
656 vblk->disk->first_minor = index_to_minor(index);
657 vblk->disk->private_data = vblk;
658 vblk->disk->fops = &virtblk_fops;
659 vblk->disk->driverfs_dev = &vdev->dev;
660 vblk->index = index;
662 /* configure queue flush support */
663 virtblk_update_cache_mode(vdev);
665 /* If disk is read-only in the host, the guest should obey */
666 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
667 set_disk_ro(vblk->disk, 1);
669 /* Host must always specify the capacity. */
670 virtio_cread(vdev, struct virtio_blk_config, capacity, &cap);
672 /* If capacity is too big, truncate with warning. */
673 if ((sector_t)cap != cap) {
674 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
675 (unsigned long long)cap);
676 cap = (sector_t)-1;
678 set_capacity(vblk->disk, cap);
680 /* We can handle whatever the host told us to handle. */
681 blk_queue_max_segments(q, vblk->sg_elems-2);
683 /* No need to bounce any requests */
684 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
686 /* No real sector limit. */
687 blk_queue_max_hw_sectors(q, -1U);
689 /* Host can optionally specify maximum segment size and number of
690 * segments. */
691 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
692 struct virtio_blk_config, size_max, &v);
693 if (!err)
694 blk_queue_max_segment_size(q, v);
695 else
696 blk_queue_max_segment_size(q, -1U);
698 /* Host can optionally specify the block size of the device */
699 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
700 struct virtio_blk_config, blk_size,
701 &blk_size);
702 if (!err)
703 blk_queue_logical_block_size(q, blk_size);
704 else
705 blk_size = queue_logical_block_size(q);
707 /* Use topology information if available */
708 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
709 struct virtio_blk_config, physical_block_exp,
710 &physical_block_exp);
711 if (!err && physical_block_exp)
712 blk_queue_physical_block_size(q,
713 blk_size * (1 << physical_block_exp));
715 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
716 struct virtio_blk_config, alignment_offset,
717 &alignment_offset);
718 if (!err && alignment_offset)
719 blk_queue_alignment_offset(q, blk_size * alignment_offset);
721 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
722 struct virtio_blk_config, min_io_size,
723 &min_io_size);
724 if (!err && min_io_size)
725 blk_queue_io_min(q, blk_size * min_io_size);
727 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
728 struct virtio_blk_config, opt_io_size,
729 &opt_io_size);
730 if (!err && opt_io_size)
731 blk_queue_io_opt(q, blk_size * opt_io_size);
733 virtio_device_ready(vdev);
735 add_disk(vblk->disk);
736 err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
737 if (err)
738 goto out_del_disk;
740 if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
741 err = device_create_file(disk_to_dev(vblk->disk),
742 &dev_attr_cache_type_rw);
743 else
744 err = device_create_file(disk_to_dev(vblk->disk),
745 &dev_attr_cache_type_ro);
746 if (err)
747 goto out_del_disk;
748 return 0;
750 out_del_disk:
751 del_gendisk(vblk->disk);
752 blk_cleanup_queue(vblk->disk->queue);
753 out_free_tags:
754 blk_mq_free_tag_set(&vblk->tag_set);
755 out_put_disk:
756 put_disk(vblk->disk);
757 out_free_vq:
758 vdev->config->del_vqs(vdev);
759 out_free_vblk:
760 kfree(vblk);
761 out_free_index:
762 ida_simple_remove(&vd_index_ida, index);
763 out:
764 return err;
767 static void virtblk_remove(struct virtio_device *vdev)
769 struct virtio_blk *vblk = vdev->priv;
770 int index = vblk->index;
771 int refc;
773 /* Make sure no work handler is accessing the device. */
774 flush_work(&vblk->config_work);
776 del_gendisk(vblk->disk);
777 blk_cleanup_queue(vblk->disk->queue);
779 blk_mq_free_tag_set(&vblk->tag_set);
781 /* Stop all the virtqueues. */
782 vdev->config->reset(vdev);
784 refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
785 put_disk(vblk->disk);
786 vdev->config->del_vqs(vdev);
787 kfree(vblk->vqs);
788 kfree(vblk);
790 /* Only free device id if we don't have any users */
791 if (refc == 1)
792 ida_simple_remove(&vd_index_ida, index);
795 #ifdef CONFIG_PM_SLEEP
796 static int virtblk_freeze(struct virtio_device *vdev)
798 struct virtio_blk *vblk = vdev->priv;
800 /* Ensure we don't receive any more interrupts */
801 vdev->config->reset(vdev);
803 /* Make sure no work handler is accessing the device. */
804 flush_work(&vblk->config_work);
806 blk_mq_stop_hw_queues(vblk->disk->queue);
808 vdev->config->del_vqs(vdev);
809 return 0;
812 static int virtblk_restore(struct virtio_device *vdev)
814 struct virtio_blk *vblk = vdev->priv;
815 int ret;
817 ret = init_vq(vdev->priv);
818 if (ret)
819 return ret;
821 virtio_device_ready(vdev);
823 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
824 return 0;
826 #endif
828 static const struct virtio_device_id id_table[] = {
829 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
830 { 0 },
833 static unsigned int features_legacy[] = {
834 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
835 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
836 VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
837 VIRTIO_BLK_F_MQ,
840 static unsigned int features[] = {
841 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
842 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
843 VIRTIO_BLK_F_TOPOLOGY,
844 VIRTIO_BLK_F_MQ,
847 static struct virtio_driver virtio_blk = {
848 .feature_table = features,
849 .feature_table_size = ARRAY_SIZE(features),
850 .feature_table_legacy = features_legacy,
851 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
852 .driver.name = KBUILD_MODNAME,
853 .driver.owner = THIS_MODULE,
854 .id_table = id_table,
855 .probe = virtblk_probe,
856 .remove = virtblk_remove,
857 .config_changed = virtblk_config_changed,
858 #ifdef CONFIG_PM_SLEEP
859 .freeze = virtblk_freeze,
860 .restore = virtblk_restore,
861 #endif
864 static int __init init(void)
866 int error;
868 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
869 if (!virtblk_wq)
870 return -ENOMEM;
872 major = register_blkdev(0, "virtblk");
873 if (major < 0) {
874 error = major;
875 goto out_destroy_workqueue;
878 error = register_virtio_driver(&virtio_blk);
879 if (error)
880 goto out_unregister_blkdev;
881 return 0;
883 out_unregister_blkdev:
884 unregister_blkdev(major, "virtblk");
885 out_destroy_workqueue:
886 destroy_workqueue(virtblk_wq);
887 return error;
890 static void __exit fini(void)
892 unregister_virtio_driver(&virtio_blk);
893 unregister_blkdev(major, "virtblk");
894 destroy_workqueue(virtblk_wq);
896 module_init(init);
897 module_exit(fini);
899 MODULE_DEVICE_TABLE(virtio, id_table);
900 MODULE_DESCRIPTION("Virtio block driver");
901 MODULE_LICENSE("GPL");