Revert "tty: hvc: Fix data abort due to race in hvc_open"
[linux/fpc-iii.git] / drivers / vhost / vdpa.c
blob0968361e3b7745906c1b7da2eab84a7bda4767bc
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2018-2020 Intel Corporation.
4 * Copyright (C) 2020 Red Hat, Inc.
6 * Author: Tiwei Bie <tiwei.bie@intel.com>
7 * Jason Wang <jasowang@redhat.com>
9 * Thanks Michael S. Tsirkin for the valuable comments and
10 * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
11 * their supports.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
18 #include <linux/iommu.h>
19 #include <linux/uuid.h>
20 #include <linux/vdpa.h>
21 #include <linux/nospec.h>
22 #include <linux/vhost.h>
23 #include <linux/virtio_net.h>
25 #include "vhost.h"
27 enum {
28 VHOST_VDPA_FEATURES =
29 (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
30 (1ULL << VIRTIO_F_ANY_LAYOUT) |
31 (1ULL << VIRTIO_F_VERSION_1) |
32 (1ULL << VIRTIO_F_IOMMU_PLATFORM) |
33 (1ULL << VIRTIO_F_RING_PACKED) |
34 (1ULL << VIRTIO_F_ORDER_PLATFORM) |
35 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
36 (1ULL << VIRTIO_RING_F_EVENT_IDX),
38 VHOST_VDPA_NET_FEATURES = VHOST_VDPA_FEATURES |
39 (1ULL << VIRTIO_NET_F_CSUM) |
40 (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
41 (1ULL << VIRTIO_NET_F_MTU) |
42 (1ULL << VIRTIO_NET_F_MAC) |
43 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
44 (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
45 (1ULL << VIRTIO_NET_F_GUEST_ECN) |
46 (1ULL << VIRTIO_NET_F_GUEST_UFO) |
47 (1ULL << VIRTIO_NET_F_HOST_TSO4) |
48 (1ULL << VIRTIO_NET_F_HOST_TSO6) |
49 (1ULL << VIRTIO_NET_F_HOST_ECN) |
50 (1ULL << VIRTIO_NET_F_HOST_UFO) |
51 (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
52 (1ULL << VIRTIO_NET_F_STATUS) |
53 (1ULL << VIRTIO_NET_F_SPEED_DUPLEX),
56 /* Currently, only network backend w/o multiqueue is supported. */
57 #define VHOST_VDPA_VQ_MAX 2
59 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
61 struct vhost_vdpa {
62 struct vhost_dev vdev;
63 struct iommu_domain *domain;
64 struct vhost_virtqueue *vqs;
65 struct completion completion;
66 struct vdpa_device *vdpa;
67 struct device dev;
68 struct cdev cdev;
69 atomic_t opened;
70 int nvqs;
71 int virtio_id;
72 int minor;
75 static DEFINE_IDA(vhost_vdpa_ida);
77 static dev_t vhost_vdpa_major;
79 static const u64 vhost_vdpa_features[] = {
80 [VIRTIO_ID_NET] = VHOST_VDPA_NET_FEATURES,
83 static void handle_vq_kick(struct vhost_work *work)
85 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
86 poll.work);
87 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
88 const struct vdpa_config_ops *ops = v->vdpa->config;
90 ops->kick_vq(v->vdpa, vq - v->vqs);
93 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
95 struct vhost_virtqueue *vq = private;
96 struct eventfd_ctx *call_ctx = vq->call_ctx;
98 if (call_ctx)
99 eventfd_signal(call_ctx, 1);
101 return IRQ_HANDLED;
104 static void vhost_vdpa_reset(struct vhost_vdpa *v)
106 struct vdpa_device *vdpa = v->vdpa;
107 const struct vdpa_config_ops *ops = vdpa->config;
109 ops->set_status(vdpa, 0);
112 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
114 struct vdpa_device *vdpa = v->vdpa;
115 const struct vdpa_config_ops *ops = vdpa->config;
116 u32 device_id;
118 device_id = ops->get_device_id(vdpa);
120 if (copy_to_user(argp, &device_id, sizeof(device_id)))
121 return -EFAULT;
123 return 0;
126 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
128 struct vdpa_device *vdpa = v->vdpa;
129 const struct vdpa_config_ops *ops = vdpa->config;
130 u8 status;
132 status = ops->get_status(vdpa);
134 if (copy_to_user(statusp, &status, sizeof(status)))
135 return -EFAULT;
137 return 0;
140 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
142 struct vdpa_device *vdpa = v->vdpa;
143 const struct vdpa_config_ops *ops = vdpa->config;
144 u8 status;
146 if (copy_from_user(&status, statusp, sizeof(status)))
147 return -EFAULT;
150 * Userspace shouldn't remove status bits unless reset the
151 * status to 0.
153 if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
154 return -EINVAL;
156 ops->set_status(vdpa, status);
158 return 0;
161 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
162 struct vhost_vdpa_config *c)
164 long size = 0;
166 switch (v->virtio_id) {
167 case VIRTIO_ID_NET:
168 size = sizeof(struct virtio_net_config);
169 break;
172 if (c->len == 0)
173 return -EINVAL;
175 if (c->len > size - c->off)
176 return -E2BIG;
178 return 0;
181 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
182 struct vhost_vdpa_config __user *c)
184 struct vdpa_device *vdpa = v->vdpa;
185 const struct vdpa_config_ops *ops = vdpa->config;
186 struct vhost_vdpa_config config;
187 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
188 u8 *buf;
190 if (copy_from_user(&config, c, size))
191 return -EFAULT;
192 if (vhost_vdpa_config_validate(v, &config))
193 return -EINVAL;
194 buf = kvzalloc(config.len, GFP_KERNEL);
195 if (!buf)
196 return -ENOMEM;
198 ops->get_config(vdpa, config.off, buf, config.len);
200 if (copy_to_user(c->buf, buf, config.len)) {
201 kvfree(buf);
202 return -EFAULT;
205 kvfree(buf);
206 return 0;
209 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
210 struct vhost_vdpa_config __user *c)
212 struct vdpa_device *vdpa = v->vdpa;
213 const struct vdpa_config_ops *ops = vdpa->config;
214 struct vhost_vdpa_config config;
215 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
216 u8 *buf;
218 if (copy_from_user(&config, c, size))
219 return -EFAULT;
220 if (vhost_vdpa_config_validate(v, &config))
221 return -EINVAL;
222 buf = kvzalloc(config.len, GFP_KERNEL);
223 if (!buf)
224 return -ENOMEM;
226 if (copy_from_user(buf, c->buf, config.len)) {
227 kvfree(buf);
228 return -EFAULT;
231 ops->set_config(vdpa, config.off, buf, config.len);
233 kvfree(buf);
234 return 0;
237 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
239 struct vdpa_device *vdpa = v->vdpa;
240 const struct vdpa_config_ops *ops = vdpa->config;
241 u64 features;
243 features = ops->get_features(vdpa);
244 features &= vhost_vdpa_features[v->virtio_id];
246 if (copy_to_user(featurep, &features, sizeof(features)))
247 return -EFAULT;
249 return 0;
252 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
254 struct vdpa_device *vdpa = v->vdpa;
255 const struct vdpa_config_ops *ops = vdpa->config;
256 u64 features;
259 * It's not allowed to change the features after they have
260 * been negotiated.
262 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
263 return -EBUSY;
265 if (copy_from_user(&features, featurep, sizeof(features)))
266 return -EFAULT;
268 if (features & ~vhost_vdpa_features[v->virtio_id])
269 return -EINVAL;
271 if (ops->set_features(vdpa, features))
272 return -EINVAL;
274 return 0;
277 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
279 struct vdpa_device *vdpa = v->vdpa;
280 const struct vdpa_config_ops *ops = vdpa->config;
281 u16 num;
283 num = ops->get_vq_num_max(vdpa);
285 if (copy_to_user(argp, &num, sizeof(num)))
286 return -EFAULT;
288 return 0;
291 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
292 void __user *argp)
294 struct vdpa_device *vdpa = v->vdpa;
295 const struct vdpa_config_ops *ops = vdpa->config;
296 struct vdpa_callback cb;
297 struct vhost_virtqueue *vq;
298 struct vhost_vring_state s;
299 u32 idx;
300 long r;
302 r = get_user(idx, (u32 __user *)argp);
303 if (r < 0)
304 return r;
306 if (idx >= v->nvqs)
307 return -ENOBUFS;
309 idx = array_index_nospec(idx, v->nvqs);
310 vq = &v->vqs[idx];
312 if (cmd == VHOST_VDPA_SET_VRING_ENABLE) {
313 if (copy_from_user(&s, argp, sizeof(s)))
314 return -EFAULT;
315 ops->set_vq_ready(vdpa, idx, s.num);
316 return 0;
319 if (cmd == VHOST_GET_VRING_BASE)
320 vq->last_avail_idx = ops->get_vq_state(v->vdpa, idx);
322 r = vhost_vring_ioctl(&v->vdev, cmd, argp);
323 if (r)
324 return r;
326 switch (cmd) {
327 case VHOST_SET_VRING_ADDR:
328 if (ops->set_vq_address(vdpa, idx,
329 (u64)(uintptr_t)vq->desc,
330 (u64)(uintptr_t)vq->avail,
331 (u64)(uintptr_t)vq->used))
332 r = -EINVAL;
333 break;
335 case VHOST_SET_VRING_BASE:
336 if (ops->set_vq_state(vdpa, idx, vq->last_avail_idx))
337 r = -EINVAL;
338 break;
340 case VHOST_SET_VRING_CALL:
341 if (vq->call_ctx) {
342 cb.callback = vhost_vdpa_virtqueue_cb;
343 cb.private = vq;
344 } else {
345 cb.callback = NULL;
346 cb.private = NULL;
348 ops->set_vq_cb(vdpa, idx, &cb);
349 break;
351 case VHOST_SET_VRING_NUM:
352 ops->set_vq_num(vdpa, idx, vq->num);
353 break;
356 return r;
359 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
360 unsigned int cmd, unsigned long arg)
362 struct vhost_vdpa *v = filep->private_data;
363 struct vhost_dev *d = &v->vdev;
364 void __user *argp = (void __user *)arg;
365 long r;
367 mutex_lock(&d->mutex);
369 switch (cmd) {
370 case VHOST_VDPA_GET_DEVICE_ID:
371 r = vhost_vdpa_get_device_id(v, argp);
372 break;
373 case VHOST_VDPA_GET_STATUS:
374 r = vhost_vdpa_get_status(v, argp);
375 break;
376 case VHOST_VDPA_SET_STATUS:
377 r = vhost_vdpa_set_status(v, argp);
378 break;
379 case VHOST_VDPA_GET_CONFIG:
380 r = vhost_vdpa_get_config(v, argp);
381 break;
382 case VHOST_VDPA_SET_CONFIG:
383 r = vhost_vdpa_set_config(v, argp);
384 break;
385 case VHOST_GET_FEATURES:
386 r = vhost_vdpa_get_features(v, argp);
387 break;
388 case VHOST_SET_FEATURES:
389 r = vhost_vdpa_set_features(v, argp);
390 break;
391 case VHOST_VDPA_GET_VRING_NUM:
392 r = vhost_vdpa_get_vring_num(v, argp);
393 break;
394 case VHOST_SET_LOG_BASE:
395 case VHOST_SET_LOG_FD:
396 r = -ENOIOCTLCMD;
397 break;
398 default:
399 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
400 if (r == -ENOIOCTLCMD)
401 r = vhost_vdpa_vring_ioctl(v, cmd, argp);
402 break;
405 mutex_unlock(&d->mutex);
406 return r;
409 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
411 struct vhost_dev *dev = &v->vdev;
412 struct vhost_iotlb *iotlb = dev->iotlb;
413 struct vhost_iotlb_map *map;
414 struct page *page;
415 unsigned long pfn, pinned;
417 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
418 pinned = map->size >> PAGE_SHIFT;
419 for (pfn = map->addr >> PAGE_SHIFT;
420 pinned > 0; pfn++, pinned--) {
421 page = pfn_to_page(pfn);
422 if (map->perm & VHOST_ACCESS_WO)
423 set_page_dirty_lock(page);
424 unpin_user_page(page);
426 atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
427 vhost_iotlb_map_free(iotlb, map);
431 static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
433 struct vhost_dev *dev = &v->vdev;
435 vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
436 kfree(dev->iotlb);
437 dev->iotlb = NULL;
440 static int perm_to_iommu_flags(u32 perm)
442 int flags = 0;
444 switch (perm) {
445 case VHOST_ACCESS_WO:
446 flags |= IOMMU_WRITE;
447 break;
448 case VHOST_ACCESS_RO:
449 flags |= IOMMU_READ;
450 break;
451 case VHOST_ACCESS_RW:
452 flags |= (IOMMU_WRITE | IOMMU_READ);
453 break;
454 default:
455 WARN(1, "invalidate vhost IOTLB permission\n");
456 break;
459 return flags | IOMMU_CACHE;
462 static int vhost_vdpa_map(struct vhost_vdpa *v,
463 u64 iova, u64 size, u64 pa, u32 perm)
465 struct vhost_dev *dev = &v->vdev;
466 struct vdpa_device *vdpa = v->vdpa;
467 const struct vdpa_config_ops *ops = vdpa->config;
468 int r = 0;
470 r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
471 pa, perm);
472 if (r)
473 return r;
475 if (ops->dma_map)
476 r = ops->dma_map(vdpa, iova, size, pa, perm);
477 else if (ops->set_map)
478 r = ops->set_map(vdpa, dev->iotlb);
479 else
480 r = iommu_map(v->domain, iova, pa, size,
481 perm_to_iommu_flags(perm));
483 return r;
486 static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
488 struct vhost_dev *dev = &v->vdev;
489 struct vdpa_device *vdpa = v->vdpa;
490 const struct vdpa_config_ops *ops = vdpa->config;
492 vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
494 if (ops->dma_map)
495 ops->dma_unmap(vdpa, iova, size);
496 else if (ops->set_map)
497 ops->set_map(vdpa, dev->iotlb);
498 else
499 iommu_unmap(v->domain, iova, size);
502 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
503 struct vhost_iotlb_msg *msg)
505 struct vhost_dev *dev = &v->vdev;
506 struct vhost_iotlb *iotlb = dev->iotlb;
507 struct page **page_list;
508 unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
509 unsigned int gup_flags = FOLL_LONGTERM;
510 unsigned long npages, cur_base, map_pfn, last_pfn = 0;
511 unsigned long locked, lock_limit, pinned, i;
512 u64 iova = msg->iova;
513 int ret = 0;
515 if (vhost_iotlb_itree_first(iotlb, msg->iova,
516 msg->iova + msg->size - 1))
517 return -EEXIST;
519 page_list = (struct page **) __get_free_page(GFP_KERNEL);
520 if (!page_list)
521 return -ENOMEM;
523 if (msg->perm & VHOST_ACCESS_WO)
524 gup_flags |= FOLL_WRITE;
526 npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
527 if (!npages)
528 return -EINVAL;
530 down_read(&dev->mm->mmap_sem);
532 locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
533 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
535 if (locked > lock_limit) {
536 ret = -ENOMEM;
537 goto out;
540 cur_base = msg->uaddr & PAGE_MASK;
541 iova &= PAGE_MASK;
543 while (npages) {
544 pinned = min_t(unsigned long, npages, list_size);
545 ret = pin_user_pages(cur_base, pinned,
546 gup_flags, page_list, NULL);
547 if (ret != pinned)
548 goto out;
550 if (!last_pfn)
551 map_pfn = page_to_pfn(page_list[0]);
553 for (i = 0; i < ret; i++) {
554 unsigned long this_pfn = page_to_pfn(page_list[i]);
555 u64 csize;
557 if (last_pfn && (this_pfn != last_pfn + 1)) {
558 /* Pin a contiguous chunk of memory */
559 csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
560 if (vhost_vdpa_map(v, iova, csize,
561 map_pfn << PAGE_SHIFT,
562 msg->perm))
563 goto out;
564 map_pfn = this_pfn;
565 iova += csize;
568 last_pfn = this_pfn;
571 cur_base += ret << PAGE_SHIFT;
572 npages -= ret;
575 /* Pin the rest chunk */
576 ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
577 map_pfn << PAGE_SHIFT, msg->perm);
578 out:
579 if (ret) {
580 vhost_vdpa_unmap(v, msg->iova, msg->size);
581 atomic64_sub(npages, &dev->mm->pinned_vm);
583 up_read(&dev->mm->mmap_sem);
584 free_page((unsigned long)page_list);
585 return ret;
588 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
589 struct vhost_iotlb_msg *msg)
591 struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
592 int r = 0;
594 r = vhost_dev_check_owner(dev);
595 if (r)
596 return r;
598 switch (msg->type) {
599 case VHOST_IOTLB_UPDATE:
600 r = vhost_vdpa_process_iotlb_update(v, msg);
601 break;
602 case VHOST_IOTLB_INVALIDATE:
603 vhost_vdpa_unmap(v, msg->iova, msg->size);
604 break;
605 default:
606 r = -EINVAL;
607 break;
610 return r;
613 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
614 struct iov_iter *from)
616 struct file *file = iocb->ki_filp;
617 struct vhost_vdpa *v = file->private_data;
618 struct vhost_dev *dev = &v->vdev;
620 return vhost_chr_write_iter(dev, from);
623 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
625 struct vdpa_device *vdpa = v->vdpa;
626 const struct vdpa_config_ops *ops = vdpa->config;
627 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
628 struct bus_type *bus;
629 int ret;
631 /* Device want to do DMA by itself */
632 if (ops->set_map || ops->dma_map)
633 return 0;
635 bus = dma_dev->bus;
636 if (!bus)
637 return -EFAULT;
639 if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
640 return -ENOTSUPP;
642 v->domain = iommu_domain_alloc(bus);
643 if (!v->domain)
644 return -EIO;
646 ret = iommu_attach_device(v->domain, dma_dev);
647 if (ret)
648 goto err_attach;
650 return 0;
652 err_attach:
653 iommu_domain_free(v->domain);
654 return ret;
657 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
659 struct vdpa_device *vdpa = v->vdpa;
660 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
662 if (v->domain) {
663 iommu_detach_device(v->domain, dma_dev);
664 iommu_domain_free(v->domain);
667 v->domain = NULL;
670 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
672 struct vhost_vdpa *v;
673 struct vhost_dev *dev;
674 struct vhost_virtqueue **vqs;
675 int nvqs, i, r, opened;
677 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
679 opened = atomic_cmpxchg(&v->opened, 0, 1);
680 if (opened)
681 return -EBUSY;
683 nvqs = v->nvqs;
684 vhost_vdpa_reset(v);
686 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
687 if (!vqs) {
688 r = -ENOMEM;
689 goto err;
692 dev = &v->vdev;
693 for (i = 0; i < nvqs; i++) {
694 vqs[i] = &v->vqs[i];
695 vqs[i]->handle_kick = handle_vq_kick;
697 vhost_dev_init(dev, vqs, nvqs, 0, 0, 0,
698 vhost_vdpa_process_iotlb_msg);
700 dev->iotlb = vhost_iotlb_alloc(0, 0);
701 if (!dev->iotlb) {
702 r = -ENOMEM;
703 goto err_init_iotlb;
706 r = vhost_vdpa_alloc_domain(v);
707 if (r)
708 goto err_init_iotlb;
710 filep->private_data = v;
712 return 0;
714 err_init_iotlb:
715 vhost_dev_cleanup(&v->vdev);
716 err:
717 atomic_dec(&v->opened);
718 return r;
721 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
723 struct vhost_vdpa *v = filep->private_data;
724 struct vhost_dev *d = &v->vdev;
726 mutex_lock(&d->mutex);
727 filep->private_data = NULL;
728 vhost_vdpa_reset(v);
729 vhost_dev_stop(&v->vdev);
730 vhost_vdpa_iotlb_free(v);
731 vhost_vdpa_free_domain(v);
732 vhost_dev_cleanup(&v->vdev);
733 kfree(v->vdev.vqs);
734 mutex_unlock(&d->mutex);
736 atomic_dec(&v->opened);
737 complete(&v->completion);
739 return 0;
742 static const struct file_operations vhost_vdpa_fops = {
743 .owner = THIS_MODULE,
744 .open = vhost_vdpa_open,
745 .release = vhost_vdpa_release,
746 .write_iter = vhost_vdpa_chr_write_iter,
747 .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
748 .compat_ioctl = compat_ptr_ioctl,
751 static void vhost_vdpa_release_dev(struct device *device)
753 struct vhost_vdpa *v =
754 container_of(device, struct vhost_vdpa, dev);
756 ida_simple_remove(&vhost_vdpa_ida, v->minor);
757 kfree(v->vqs);
758 kfree(v);
761 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
763 const struct vdpa_config_ops *ops = vdpa->config;
764 struct vhost_vdpa *v;
765 int minor, nvqs = VHOST_VDPA_VQ_MAX;
766 int r;
768 /* Currently, we only accept the network devices. */
769 if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
770 return -ENOTSUPP;
772 v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
773 if (!v)
774 return -ENOMEM;
776 minor = ida_simple_get(&vhost_vdpa_ida, 0,
777 VHOST_VDPA_DEV_MAX, GFP_KERNEL);
778 if (minor < 0) {
779 kfree(v);
780 return minor;
783 atomic_set(&v->opened, 0);
784 v->minor = minor;
785 v->vdpa = vdpa;
786 v->nvqs = nvqs;
787 v->virtio_id = ops->get_device_id(vdpa);
789 device_initialize(&v->dev);
790 v->dev.release = vhost_vdpa_release_dev;
791 v->dev.parent = &vdpa->dev;
792 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
793 v->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue),
794 GFP_KERNEL);
795 if (!v->vqs) {
796 r = -ENOMEM;
797 goto err;
800 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
801 if (r)
802 goto err;
804 cdev_init(&v->cdev, &vhost_vdpa_fops);
805 v->cdev.owner = THIS_MODULE;
807 r = cdev_device_add(&v->cdev, &v->dev);
808 if (r)
809 goto err;
811 init_completion(&v->completion);
812 vdpa_set_drvdata(vdpa, v);
814 return 0;
816 err:
817 put_device(&v->dev);
818 return r;
821 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
823 struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
824 int opened;
826 cdev_device_del(&v->cdev, &v->dev);
828 do {
829 opened = atomic_cmpxchg(&v->opened, 0, 1);
830 if (!opened)
831 break;
832 wait_for_completion(&v->completion);
833 } while (1);
835 put_device(&v->dev);
838 static struct vdpa_driver vhost_vdpa_driver = {
839 .driver = {
840 .name = "vhost_vdpa",
842 .probe = vhost_vdpa_probe,
843 .remove = vhost_vdpa_remove,
846 static int __init vhost_vdpa_init(void)
848 int r;
850 r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
851 "vhost-vdpa");
852 if (r)
853 goto err_alloc_chrdev;
855 r = vdpa_register_driver(&vhost_vdpa_driver);
856 if (r)
857 goto err_vdpa_register_driver;
859 return 0;
861 err_vdpa_register_driver:
862 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
863 err_alloc_chrdev:
864 return r;
866 module_init(vhost_vdpa_init);
868 static void __exit vhost_vdpa_exit(void)
870 vdpa_unregister_driver(&vhost_vdpa_driver);
871 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
873 module_exit(vhost_vdpa_exit);
875 MODULE_VERSION("0.0.1");
876 MODULE_LICENSE("GPL v2");
877 MODULE_AUTHOR("Intel Corporation");
878 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");