parisc iommu: fix panic due to trying to allocate too large region
[linux/fpc-iii.git] / drivers / s390 / kvm / virtio_ccw.c
blob779dc5136291610f28361b7386151cc1bb4b9372
1 /*
2 * ccw based virtio transport
4 * Copyright IBM Corp. 2012
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
13 #include <linux/kernel_stat.h>
14 #include <linux/init.h>
15 #include <linux/bootmem.h>
16 #include <linux/err.h>
17 #include <linux/virtio.h>
18 #include <linux/virtio_config.h>
19 #include <linux/slab.h>
20 #include <linux/interrupt.h>
21 #include <linux/virtio_ring.h>
22 #include <linux/pfn.h>
23 #include <linux/async.h>
24 #include <linux/wait.h>
25 #include <linux/list.h>
26 #include <linux/bitops.h>
27 #include <linux/module.h>
28 #include <linux/io.h>
29 #include <linux/kvm_para.h>
30 #include <asm/setup.h>
31 #include <asm/irq.h>
32 #include <asm/cio.h>
33 #include <asm/ccwdev.h>
34 #include <asm/virtio-ccw.h>
37 * virtio related functions
40 struct vq_config_block {
41 __u16 index;
42 __u16 num;
43 } __packed;
45 #define VIRTIO_CCW_CONFIG_SIZE 0x100
46 /* same as PCI config space size, should be enough for all drivers */
48 struct virtio_ccw_device {
49 struct virtio_device vdev;
50 __u8 *status;
51 __u8 config[VIRTIO_CCW_CONFIG_SIZE];
52 struct ccw_device *cdev;
53 __u32 curr_io;
54 int err;
55 wait_queue_head_t wait_q;
56 spinlock_t lock;
57 struct list_head virtqueues;
58 unsigned long indicators;
59 unsigned long indicators2;
60 struct vq_config_block *config_block;
63 struct vq_info_block {
64 __u64 queue;
65 __u32 align;
66 __u16 index;
67 __u16 num;
68 } __packed;
70 struct virtio_feature_desc {
71 __u32 features;
72 __u8 index;
73 } __packed;
75 struct virtio_ccw_vq_info {
76 struct virtqueue *vq;
77 int num;
78 void *queue;
79 struct vq_info_block *info_block;
80 struct list_head node;
81 long cookie;
84 #define CCW_CMD_SET_VQ 0x13
85 #define CCW_CMD_VDEV_RESET 0x33
86 #define CCW_CMD_SET_IND 0x43
87 #define CCW_CMD_SET_CONF_IND 0x53
88 #define CCW_CMD_READ_FEAT 0x12
89 #define CCW_CMD_WRITE_FEAT 0x11
90 #define CCW_CMD_READ_CONF 0x22
91 #define CCW_CMD_WRITE_CONF 0x21
92 #define CCW_CMD_WRITE_STATUS 0x31
93 #define CCW_CMD_READ_VQ_CONF 0x32
95 #define VIRTIO_CCW_DOING_SET_VQ 0x00010000
96 #define VIRTIO_CCW_DOING_RESET 0x00040000
97 #define VIRTIO_CCW_DOING_READ_FEAT 0x00080000
98 #define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000
99 #define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000
100 #define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000
101 #define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000
102 #define VIRTIO_CCW_DOING_SET_IND 0x01000000
103 #define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
104 #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
105 #define VIRTIO_CCW_INTPARM_MASK 0xffff0000
107 static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
109 return container_of(vdev, struct virtio_ccw_device, vdev);
112 static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
114 unsigned long flags;
115 __u32 ret;
117 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
118 if (vcdev->err)
119 ret = 0;
120 else
121 ret = vcdev->curr_io & flag;
122 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
123 return ret;
126 static int ccw_io_helper(struct virtio_ccw_device *vcdev,
127 struct ccw1 *ccw, __u32 intparm)
129 int ret;
130 unsigned long flags;
131 int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
133 do {
134 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
135 ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
136 if (!ret) {
137 if (!vcdev->curr_io)
138 vcdev->err = 0;
139 vcdev->curr_io |= flag;
141 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
142 cpu_relax();
143 } while (ret == -EBUSY);
144 wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
145 return ret ? ret : vcdev->err;
148 static inline long do_kvm_notify(struct subchannel_id schid,
149 unsigned long queue_index,
150 long cookie)
152 register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY;
153 register struct subchannel_id __schid asm("2") = schid;
154 register unsigned long __index asm("3") = queue_index;
155 register long __rc asm("2");
156 register long __cookie asm("4") = cookie;
158 asm volatile ("diag 2,4,0x500\n"
159 : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index),
160 "d"(__cookie)
161 : "memory", "cc");
162 return __rc;
165 static void virtio_ccw_kvm_notify(struct virtqueue *vq)
167 struct virtio_ccw_vq_info *info = vq->priv;
168 struct virtio_ccw_device *vcdev;
169 struct subchannel_id schid;
171 vcdev = to_vc_device(info->vq->vdev);
172 ccw_device_get_schid(vcdev->cdev, &schid);
173 info->cookie = do_kvm_notify(schid, vq->index, info->cookie);
176 static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
177 struct ccw1 *ccw, int index)
179 vcdev->config_block->index = index;
180 ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
181 ccw->flags = 0;
182 ccw->count = sizeof(struct vq_config_block);
183 ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
184 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
185 return vcdev->config_block->num;
188 static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
190 struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
191 struct virtio_ccw_vq_info *info = vq->priv;
192 unsigned long flags;
193 unsigned long size;
194 int ret;
195 unsigned int index = vq->index;
197 /* Remove from our list. */
198 spin_lock_irqsave(&vcdev->lock, flags);
199 list_del(&info->node);
200 spin_unlock_irqrestore(&vcdev->lock, flags);
202 /* Release from host. */
203 info->info_block->queue = 0;
204 info->info_block->align = 0;
205 info->info_block->index = index;
206 info->info_block->num = 0;
207 ccw->cmd_code = CCW_CMD_SET_VQ;
208 ccw->flags = 0;
209 ccw->count = sizeof(*info->info_block);
210 ccw->cda = (__u32)(unsigned long)(info->info_block);
211 ret = ccw_io_helper(vcdev, ccw,
212 VIRTIO_CCW_DOING_SET_VQ | index);
214 * -ENODEV isn't considered an error: The device is gone anyway.
215 * This may happen on device detach.
217 if (ret && (ret != -ENODEV))
218 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
219 ret, index);
221 vring_del_virtqueue(vq);
222 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
223 free_pages_exact(info->queue, size);
224 kfree(info->info_block);
225 kfree(info);
228 static void virtio_ccw_del_vqs(struct virtio_device *vdev)
230 struct virtqueue *vq, *n;
231 struct ccw1 *ccw;
233 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
234 if (!ccw)
235 return;
238 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
239 virtio_ccw_del_vq(vq, ccw);
241 kfree(ccw);
244 static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
245 int i, vq_callback_t *callback,
246 const char *name,
247 struct ccw1 *ccw)
249 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
250 int err;
251 struct virtqueue *vq = NULL;
252 struct virtio_ccw_vq_info *info;
253 unsigned long size = 0; /* silence the compiler */
254 unsigned long flags;
256 /* Allocate queue. */
257 info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
258 if (!info) {
259 dev_warn(&vcdev->cdev->dev, "no info\n");
260 err = -ENOMEM;
261 goto out_err;
263 info->info_block = kzalloc(sizeof(*info->info_block),
264 GFP_DMA | GFP_KERNEL);
265 if (!info->info_block) {
266 dev_warn(&vcdev->cdev->dev, "no info block\n");
267 err = -ENOMEM;
268 goto out_err;
270 info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
271 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
272 info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
273 if (info->queue == NULL) {
274 dev_warn(&vcdev->cdev->dev, "no queue\n");
275 err = -ENOMEM;
276 goto out_err;
279 vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
280 true, info->queue, virtio_ccw_kvm_notify,
281 callback, name);
282 if (!vq) {
283 /* For now, we fail if we can't get the requested size. */
284 dev_warn(&vcdev->cdev->dev, "no vq\n");
285 err = -ENOMEM;
286 goto out_err;
289 /* Register it with the host. */
290 info->info_block->queue = (__u64)info->queue;
291 info->info_block->align = KVM_VIRTIO_CCW_RING_ALIGN;
292 info->info_block->index = i;
293 info->info_block->num = info->num;
294 ccw->cmd_code = CCW_CMD_SET_VQ;
295 ccw->flags = 0;
296 ccw->count = sizeof(*info->info_block);
297 ccw->cda = (__u32)(unsigned long)(info->info_block);
298 err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
299 if (err) {
300 dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
301 goto out_err;
304 info->vq = vq;
305 vq->priv = info;
307 /* Save it to our list. */
308 spin_lock_irqsave(&vcdev->lock, flags);
309 list_add(&info->node, &vcdev->virtqueues);
310 spin_unlock_irqrestore(&vcdev->lock, flags);
312 return vq;
314 out_err:
315 if (vq)
316 vring_del_virtqueue(vq);
317 if (info) {
318 if (info->queue)
319 free_pages_exact(info->queue, size);
320 kfree(info->info_block);
322 kfree(info);
323 return ERR_PTR(err);
326 static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
327 struct virtqueue *vqs[],
328 vq_callback_t *callbacks[],
329 const char *names[])
331 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
332 unsigned long *indicatorp = NULL;
333 int ret, i;
334 struct ccw1 *ccw;
336 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
337 if (!ccw)
338 return -ENOMEM;
340 for (i = 0; i < nvqs; ++i) {
341 vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
342 ccw);
343 if (IS_ERR(vqs[i])) {
344 ret = PTR_ERR(vqs[i]);
345 vqs[i] = NULL;
346 goto out;
349 ret = -ENOMEM;
350 /* We need a data area under 2G to communicate. */
351 indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
352 if (!indicatorp)
353 goto out;
354 *indicatorp = (unsigned long) &vcdev->indicators;
355 /* Register queue indicators with host. */
356 vcdev->indicators = 0;
357 ccw->cmd_code = CCW_CMD_SET_IND;
358 ccw->flags = 0;
359 ccw->count = sizeof(vcdev->indicators);
360 ccw->cda = (__u32)(unsigned long) indicatorp;
361 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
362 if (ret)
363 goto out;
364 /* Register indicators2 with host for config changes */
365 *indicatorp = (unsigned long) &vcdev->indicators2;
366 vcdev->indicators2 = 0;
367 ccw->cmd_code = CCW_CMD_SET_CONF_IND;
368 ccw->flags = 0;
369 ccw->count = sizeof(vcdev->indicators2);
370 ccw->cda = (__u32)(unsigned long) indicatorp;
371 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
372 if (ret)
373 goto out;
375 kfree(indicatorp);
376 kfree(ccw);
377 return 0;
378 out:
379 kfree(indicatorp);
380 kfree(ccw);
381 virtio_ccw_del_vqs(vdev);
382 return ret;
385 static void virtio_ccw_reset(struct virtio_device *vdev)
387 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
388 struct ccw1 *ccw;
390 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
391 if (!ccw)
392 return;
394 /* Zero status bits. */
395 *vcdev->status = 0;
397 /* Send a reset ccw on device. */
398 ccw->cmd_code = CCW_CMD_VDEV_RESET;
399 ccw->flags = 0;
400 ccw->count = 0;
401 ccw->cda = 0;
402 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
403 kfree(ccw);
406 static u32 virtio_ccw_get_features(struct virtio_device *vdev)
408 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
409 struct virtio_feature_desc *features;
410 int ret, rc;
411 struct ccw1 *ccw;
413 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
414 if (!ccw)
415 return 0;
417 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
418 if (!features) {
419 rc = 0;
420 goto out_free;
422 /* Read the feature bits from the host. */
423 /* TODO: Features > 32 bits */
424 features->index = 0;
425 ccw->cmd_code = CCW_CMD_READ_FEAT;
426 ccw->flags = 0;
427 ccw->count = sizeof(*features);
428 ccw->cda = (__u32)(unsigned long)features;
429 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
430 if (ret) {
431 rc = 0;
432 goto out_free;
435 rc = le32_to_cpu(features->features);
437 out_free:
438 kfree(features);
439 kfree(ccw);
440 return rc;
443 static void virtio_ccw_finalize_features(struct virtio_device *vdev)
445 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
446 struct virtio_feature_desc *features;
447 int i;
448 struct ccw1 *ccw;
450 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
451 if (!ccw)
452 return;
454 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
455 if (!features)
456 goto out_free;
458 /* Give virtio_ring a chance to accept features. */
459 vring_transport_features(vdev);
461 for (i = 0; i < sizeof(*vdev->features) / sizeof(features->features);
462 i++) {
463 int highbits = i % 2 ? 32 : 0;
464 features->index = i;
465 features->features = cpu_to_le32(vdev->features[i / 2]
466 >> highbits);
467 /* Write the feature bits to the host. */
468 ccw->cmd_code = CCW_CMD_WRITE_FEAT;
469 ccw->flags = 0;
470 ccw->count = sizeof(*features);
471 ccw->cda = (__u32)(unsigned long)features;
472 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
474 out_free:
475 kfree(features);
476 kfree(ccw);
479 static void virtio_ccw_get_config(struct virtio_device *vdev,
480 unsigned int offset, void *buf, unsigned len)
482 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
483 int ret;
484 struct ccw1 *ccw;
485 void *config_area;
487 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
488 if (!ccw)
489 return;
491 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
492 if (!config_area)
493 goto out_free;
495 /* Read the config area from the host. */
496 ccw->cmd_code = CCW_CMD_READ_CONF;
497 ccw->flags = 0;
498 ccw->count = offset + len;
499 ccw->cda = (__u32)(unsigned long)config_area;
500 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
501 if (ret)
502 goto out_free;
504 memcpy(vcdev->config, config_area, sizeof(vcdev->config));
505 memcpy(buf, &vcdev->config[offset], len);
507 out_free:
508 kfree(config_area);
509 kfree(ccw);
512 static void virtio_ccw_set_config(struct virtio_device *vdev,
513 unsigned int offset, const void *buf,
514 unsigned len)
516 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
517 struct ccw1 *ccw;
518 void *config_area;
520 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
521 if (!ccw)
522 return;
524 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
525 if (!config_area)
526 goto out_free;
528 memcpy(&vcdev->config[offset], buf, len);
529 /* Write the config area to the host. */
530 memcpy(config_area, vcdev->config, sizeof(vcdev->config));
531 ccw->cmd_code = CCW_CMD_WRITE_CONF;
532 ccw->flags = 0;
533 ccw->count = offset + len;
534 ccw->cda = (__u32)(unsigned long)config_area;
535 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
537 out_free:
538 kfree(config_area);
539 kfree(ccw);
542 static u8 virtio_ccw_get_status(struct virtio_device *vdev)
544 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
546 return *vcdev->status;
549 static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
551 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
552 struct ccw1 *ccw;
554 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
555 if (!ccw)
556 return;
558 /* Write the status to the host. */
559 *vcdev->status = status;
560 ccw->cmd_code = CCW_CMD_WRITE_STATUS;
561 ccw->flags = 0;
562 ccw->count = sizeof(status);
563 ccw->cda = (__u32)(unsigned long)vcdev->status;
564 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
565 kfree(ccw);
568 static struct virtio_config_ops virtio_ccw_config_ops = {
569 .get_features = virtio_ccw_get_features,
570 .finalize_features = virtio_ccw_finalize_features,
571 .get = virtio_ccw_get_config,
572 .set = virtio_ccw_set_config,
573 .get_status = virtio_ccw_get_status,
574 .set_status = virtio_ccw_set_status,
575 .reset = virtio_ccw_reset,
576 .find_vqs = virtio_ccw_find_vqs,
577 .del_vqs = virtio_ccw_del_vqs,
582 * ccw bus driver related functions
585 static void virtio_ccw_release_dev(struct device *_d)
587 struct virtio_device *dev = container_of(_d, struct virtio_device,
588 dev);
589 struct virtio_ccw_device *vcdev = to_vc_device(dev);
591 kfree(vcdev->status);
592 kfree(vcdev->config_block);
593 kfree(vcdev);
596 static int irb_is_error(struct irb *irb)
598 if (scsw_cstat(&irb->scsw) != 0)
599 return 1;
600 if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
601 return 1;
602 if (scsw_cc(&irb->scsw) != 0)
603 return 1;
604 return 0;
607 static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
608 int index)
610 struct virtio_ccw_vq_info *info;
611 unsigned long flags;
612 struct virtqueue *vq;
614 vq = NULL;
615 spin_lock_irqsave(&vcdev->lock, flags);
616 list_for_each_entry(info, &vcdev->virtqueues, node) {
617 if (info->vq->index == index) {
618 vq = info->vq;
619 break;
622 spin_unlock_irqrestore(&vcdev->lock, flags);
623 return vq;
626 static void virtio_ccw_int_handler(struct ccw_device *cdev,
627 unsigned long intparm,
628 struct irb *irb)
630 __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK;
631 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
632 int i;
633 struct virtqueue *vq;
634 struct virtio_driver *drv;
636 /* Check if it's a notification from the host. */
637 if ((intparm == 0) &&
638 (scsw_stctl(&irb->scsw) ==
639 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
640 /* OK */
642 if (irb_is_error(irb))
643 vcdev->err = -EIO; /* XXX - use real error */
644 if (vcdev->curr_io & activity) {
645 switch (activity) {
646 case VIRTIO_CCW_DOING_READ_FEAT:
647 case VIRTIO_CCW_DOING_WRITE_FEAT:
648 case VIRTIO_CCW_DOING_READ_CONFIG:
649 case VIRTIO_CCW_DOING_WRITE_CONFIG:
650 case VIRTIO_CCW_DOING_WRITE_STATUS:
651 case VIRTIO_CCW_DOING_SET_VQ:
652 case VIRTIO_CCW_DOING_SET_IND:
653 case VIRTIO_CCW_DOING_SET_CONF_IND:
654 case VIRTIO_CCW_DOING_RESET:
655 case VIRTIO_CCW_DOING_READ_VQ_CONF:
656 vcdev->curr_io &= ~activity;
657 wake_up(&vcdev->wait_q);
658 break;
659 default:
660 /* don't know what to do... */
661 dev_warn(&cdev->dev, "Suspicious activity '%08x'\n",
662 activity);
663 WARN_ON(1);
664 break;
667 for_each_set_bit(i, &vcdev->indicators,
668 sizeof(vcdev->indicators) * BITS_PER_BYTE) {
669 /* The bit clear must happen before the vring kick. */
670 clear_bit(i, &vcdev->indicators);
671 barrier();
672 vq = virtio_ccw_vq_by_ind(vcdev, i);
673 vring_interrupt(0, vq);
675 if (test_bit(0, &vcdev->indicators2)) {
676 drv = container_of(vcdev->vdev.dev.driver,
677 struct virtio_driver, driver);
679 if (drv && drv->config_changed)
680 drv->config_changed(&vcdev->vdev);
681 clear_bit(0, &vcdev->indicators2);
686 * We usually want to autoonline all devices, but give the admin
687 * a way to exempt devices from this.
689 #define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
690 (8*sizeof(long)))
691 static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS];
693 static char *no_auto = "";
695 module_param(no_auto, charp, 0444);
696 MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined");
698 static int virtio_ccw_check_autoonline(struct ccw_device *cdev)
700 struct ccw_dev_id id;
702 ccw_device_get_id(cdev, &id);
703 if (test_bit(id.devno, devs_no_auto[id.ssid]))
704 return 0;
705 return 1;
708 static void virtio_ccw_auto_online(void *data, async_cookie_t cookie)
710 struct ccw_device *cdev = data;
711 int ret;
713 ret = ccw_device_set_online(cdev);
714 if (ret)
715 dev_warn(&cdev->dev, "Failed to set online: %d\n", ret);
718 static int virtio_ccw_probe(struct ccw_device *cdev)
720 cdev->handler = virtio_ccw_int_handler;
722 if (virtio_ccw_check_autoonline(cdev))
723 async_schedule(virtio_ccw_auto_online, cdev);
724 return 0;
727 static void virtio_ccw_remove(struct ccw_device *cdev)
729 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
731 if (cdev->online) {
732 unregister_virtio_device(&vcdev->vdev);
733 dev_set_drvdata(&cdev->dev, NULL);
735 cdev->handler = NULL;
738 static int virtio_ccw_offline(struct ccw_device *cdev)
740 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
742 unregister_virtio_device(&vcdev->vdev);
743 dev_set_drvdata(&cdev->dev, NULL);
744 return 0;
748 static int virtio_ccw_online(struct ccw_device *cdev)
750 int ret;
751 struct virtio_ccw_device *vcdev;
753 vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL);
754 if (!vcdev) {
755 dev_warn(&cdev->dev, "Could not get memory for virtio\n");
756 ret = -ENOMEM;
757 goto out_free;
759 vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
760 GFP_DMA | GFP_KERNEL);
761 if (!vcdev->config_block) {
762 ret = -ENOMEM;
763 goto out_free;
765 vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
766 if (!vcdev->status) {
767 ret = -ENOMEM;
768 goto out_free;
771 vcdev->vdev.dev.parent = &cdev->dev;
772 vcdev->vdev.dev.release = virtio_ccw_release_dev;
773 vcdev->vdev.config = &virtio_ccw_config_ops;
774 vcdev->cdev = cdev;
775 init_waitqueue_head(&vcdev->wait_q);
776 INIT_LIST_HEAD(&vcdev->virtqueues);
777 spin_lock_init(&vcdev->lock);
779 dev_set_drvdata(&cdev->dev, vcdev);
780 vcdev->vdev.id.vendor = cdev->id.cu_type;
781 vcdev->vdev.id.device = cdev->id.cu_model;
782 ret = register_virtio_device(&vcdev->vdev);
783 if (ret) {
784 dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
785 ret);
786 goto out_put;
788 return 0;
789 out_put:
790 dev_set_drvdata(&cdev->dev, NULL);
791 put_device(&vcdev->vdev.dev);
792 return ret;
793 out_free:
794 if (vcdev) {
795 kfree(vcdev->status);
796 kfree(vcdev->config_block);
798 kfree(vcdev);
799 return ret;
802 static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
804 /* TODO: Check whether we need special handling here. */
805 return 0;
808 static struct ccw_device_id virtio_ids[] = {
809 { CCW_DEVICE(0x3832, 0) },
812 MODULE_DEVICE_TABLE(ccw, virtio_ids);
814 static struct ccw_driver virtio_ccw_driver = {
815 .driver = {
816 .owner = THIS_MODULE,
817 .name = "virtio_ccw",
819 .ids = virtio_ids,
820 .probe = virtio_ccw_probe,
821 .remove = virtio_ccw_remove,
822 .set_offline = virtio_ccw_offline,
823 .set_online = virtio_ccw_online,
824 .notify = virtio_ccw_cio_notify,
825 .int_class = IRQIO_VIR,
828 static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
829 int max_digit, int max_val)
831 int diff;
833 diff = 0;
834 *val = 0;
836 while (diff <= max_digit) {
837 int value = hex_to_bin(**cp);
839 if (value < 0)
840 break;
841 *val = *val * 16 + value;
842 (*cp)++;
843 diff++;
846 if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
847 return 1;
849 return 0;
852 static int __init parse_busid(char *str, unsigned int *cssid,
853 unsigned int *ssid, unsigned int *devno)
855 char *str_work;
856 int rc, ret;
858 rc = 1;
860 if (*str == '\0')
861 goto out;
863 str_work = str;
864 ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
865 if (ret || (str_work[0] != '.'))
866 goto out;
867 str_work++;
868 ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
869 if (ret || (str_work[0] != '.'))
870 goto out;
871 str_work++;
872 ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
873 if (ret || (str_work[0] != '\0'))
874 goto out;
876 rc = 0;
877 out:
878 return rc;
881 static void __init no_auto_parse(void)
883 unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
884 char *parm, *str;
885 int rc;
887 str = no_auto;
888 while ((parm = strsep(&str, ","))) {
889 rc = parse_busid(strsep(&parm, "-"), &from_cssid,
890 &from_ssid, &from);
891 if (rc)
892 continue;
893 if (parm != NULL) {
894 rc = parse_busid(parm, &to_cssid,
895 &to_ssid, &to);
896 if ((from_ssid > to_ssid) ||
897 ((from_ssid == to_ssid) && (from > to)))
898 rc = -EINVAL;
899 } else {
900 to_cssid = from_cssid;
901 to_ssid = from_ssid;
902 to = from;
904 if (rc)
905 continue;
906 while ((from_ssid < to_ssid) ||
907 ((from_ssid == to_ssid) && (from <= to))) {
908 set_bit(from, devs_no_auto[from_ssid]);
909 from++;
910 if (from > __MAX_SUBCHANNEL) {
911 from_ssid++;
912 from = 0;
918 static int __init virtio_ccw_init(void)
920 /* parse no_auto string before we do anything further */
921 no_auto_parse();
922 return ccw_driver_register(&virtio_ccw_driver);
924 module_init(virtio_ccw_init);
926 static void __exit virtio_ccw_exit(void)
928 ccw_driver_unregister(&virtio_ccw_driver);
930 module_exit(virtio_ccw_exit);