1 // SPDX-License-Identifier: GPL-2.0-only
3 * VDPA device simulator core.
5 * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/slab.h>
16 #include <linux/dma-map-ops.h>
17 #include <linux/vringh.h>
18 #include <linux/vdpa.h>
19 #include <linux/vhost_iotlb.h>
20 #include <uapi/linux/vdpa.h>
21 #include <uapi/linux/vhost_types.h>
25 #define DRV_VERSION "0.1"
26 #define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>"
27 #define DRV_DESC "vDPA Device Simulator core"
28 #define DRV_LICENSE "GPL v2"
30 static int batch_mapping
= 1;
31 module_param(batch_mapping
, int, 0444);
32 MODULE_PARM_DESC(batch_mapping
, "Batched mapping 1 -Enable; 0 - Disable");
34 static int max_iotlb_entries
= 2048;
35 module_param(max_iotlb_entries
, int, 0444);
36 MODULE_PARM_DESC(max_iotlb_entries
,
37 "Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)");
39 static bool use_va
= true;
40 module_param(use_va
, bool, 0444);
41 MODULE_PARM_DESC(use_va
, "Enable/disable the device's ability to use VA");
43 #define VDPASIM_QUEUE_ALIGN PAGE_SIZE
44 #define VDPASIM_QUEUE_MAX 256
45 #define VDPASIM_VENDOR_ID 0
47 struct vdpasim_mm_work
{
48 struct kthread_work work
;
49 struct vdpasim
*vdpasim
;
50 struct mm_struct
*mm_to_bind
;
54 static void vdpasim_mm_work_fn(struct kthread_work
*work
)
56 struct vdpasim_mm_work
*mm_work
=
57 container_of(work
, struct vdpasim_mm_work
, work
);
58 struct vdpasim
*vdpasim
= mm_work
->vdpasim
;
62 //TODO: should we attach the cgroup of the mm owner?
63 vdpasim
->mm_bound
= mm_work
->mm_to_bind
;
66 static void vdpasim_worker_change_mm_sync(struct vdpasim
*vdpasim
,
67 struct vdpasim_mm_work
*mm_work
)
69 struct kthread_work
*work
= &mm_work
->work
;
71 kthread_init_work(work
, vdpasim_mm_work_fn
);
72 kthread_queue_work(vdpasim
->worker
, work
);
74 kthread_flush_work(work
);
77 static struct vdpasim
*vdpa_to_sim(struct vdpa_device
*vdpa
)
79 return container_of(vdpa
, struct vdpasim
, vdpa
);
82 static void vdpasim_vq_notify(struct vringh
*vring
)
84 struct vdpasim_virtqueue
*vq
=
85 container_of(vring
, struct vdpasim_virtqueue
, vring
);
93 static void vdpasim_queue_ready(struct vdpasim
*vdpasim
, unsigned int idx
)
95 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
96 uint16_t last_avail_idx
= vq
->vring
.last_avail_idx
;
97 struct vring_desc
*desc
= (struct vring_desc
*)
98 (uintptr_t)vq
->desc_addr
;
99 struct vring_avail
*avail
= (struct vring_avail
*)
100 (uintptr_t)vq
->driver_addr
;
101 struct vring_used
*used
= (struct vring_used
*)
102 (uintptr_t)vq
->device_addr
;
104 if (use_va
&& vdpasim
->mm_bound
) {
105 vringh_init_iotlb_va(&vq
->vring
, vdpasim
->features
, vq
->num
,
106 true, desc
, avail
, used
);
108 vringh_init_iotlb(&vq
->vring
, vdpasim
->features
, vq
->num
,
109 true, desc
, avail
, used
);
112 vq
->vring
.last_avail_idx
= last_avail_idx
;
115 * Since vdpa_sim does not support receive inflight descriptors as a
116 * destination of a migration, let's set both avail_idx and used_idx
117 * the same at vq start. This is how vhost-user works in a
118 * VHOST_SET_VRING_BASE call.
120 * Although the simple fix is to set last_used_idx at
121 * vdpasim_set_vq_state, it would be reset at vdpasim_queue_ready.
123 vq
->vring
.last_used_idx
= last_avail_idx
;
124 vq
->vring
.notify
= vdpasim_vq_notify
;
127 static void vdpasim_vq_reset(struct vdpasim
*vdpasim
,
128 struct vdpasim_virtqueue
*vq
)
136 vringh_init_iotlb(&vq
->vring
, vdpasim
->dev_attr
.supported_features
,
137 VDPASIM_QUEUE_MAX
, false, NULL
, NULL
, NULL
);
139 vq
->vring
.notify
= NULL
;
142 static void vdpasim_do_reset(struct vdpasim
*vdpasim
, u32 flags
)
146 spin_lock(&vdpasim
->iommu_lock
);
148 for (i
= 0; i
< vdpasim
->dev_attr
.nvqs
; i
++) {
149 vdpasim_vq_reset(vdpasim
, &vdpasim
->vqs
[i
]);
150 vringh_set_iotlb(&vdpasim
->vqs
[i
].vring
, &vdpasim
->iommu
[0],
151 &vdpasim
->iommu_lock
);
154 if (flags
& VDPA_RESET_F_CLEAN_MAP
) {
155 for (i
= 0; i
< vdpasim
->dev_attr
.nas
; i
++) {
156 vhost_iotlb_reset(&vdpasim
->iommu
[i
]);
157 vhost_iotlb_add_range(&vdpasim
->iommu
[i
], 0, ULONG_MAX
,
159 vdpasim
->iommu_pt
[i
] = true;
163 vdpasim
->running
= false;
164 spin_unlock(&vdpasim
->iommu_lock
);
166 vdpasim
->features
= 0;
168 ++vdpasim
->generation
;
171 static const struct vdpa_config_ops vdpasim_config_ops
;
172 static const struct vdpa_config_ops vdpasim_batch_config_ops
;
174 static void vdpasim_work_fn(struct kthread_work
*work
)
176 struct vdpasim
*vdpasim
= container_of(work
, struct vdpasim
, work
);
177 struct mm_struct
*mm
= vdpasim
->mm_bound
;
180 if (!mmget_not_zero(mm
))
185 vdpasim
->dev_attr
.work_fn(vdpasim
);
188 kthread_unuse_mm(mm
);
193 struct vdpasim
*vdpasim_create(struct vdpasim_dev_attr
*dev_attr
,
194 const struct vdpa_dev_set_config
*config
)
196 const struct vdpa_config_ops
*ops
;
197 struct vdpa_device
*vdpa
;
198 struct vdpasim
*vdpasim
;
200 int i
, ret
= -ENOMEM
;
202 if (!dev_attr
->alloc_size
)
203 return ERR_PTR(-EINVAL
);
205 if (config
->mask
& BIT_ULL(VDPA_ATTR_DEV_FEATURES
)) {
206 if (config
->device_features
&
207 ~dev_attr
->supported_features
)
208 return ERR_PTR(-EINVAL
);
209 dev_attr
->supported_features
=
210 config
->device_features
;
214 ops
= &vdpasim_batch_config_ops
;
216 ops
= &vdpasim_config_ops
;
218 vdpa
= __vdpa_alloc_device(NULL
, ops
,
219 dev_attr
->ngroups
, dev_attr
->nas
,
220 dev_attr
->alloc_size
,
221 dev_attr
->name
, use_va
);
227 vdpasim
= vdpa_to_sim(vdpa
);
228 vdpasim
->dev_attr
= *dev_attr
;
229 dev
= &vdpasim
->vdpa
.dev
;
231 kthread_init_work(&vdpasim
->work
, vdpasim_work_fn
);
232 vdpasim
->worker
= kthread_create_worker(0, "vDPA sim worker: %s",
234 if (IS_ERR(vdpasim
->worker
))
237 mutex_init(&vdpasim
->mutex
);
238 spin_lock_init(&vdpasim
->iommu_lock
);
240 dev
->dma_mask
= &dev
->coherent_dma_mask
;
241 if (dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64)))
243 vdpasim
->vdpa
.mdev
= dev_attr
->mgmt_dev
;
245 vdpasim
->config
= kzalloc(dev_attr
->config_size
, GFP_KERNEL
);
246 if (!vdpasim
->config
)
249 vdpasim
->vqs
= kcalloc(dev_attr
->nvqs
, sizeof(struct vdpasim_virtqueue
),
254 vdpasim
->iommu
= kmalloc_array(vdpasim
->dev_attr
.nas
,
255 sizeof(*vdpasim
->iommu
), GFP_KERNEL
);
259 vdpasim
->iommu_pt
= kmalloc_array(vdpasim
->dev_attr
.nas
,
260 sizeof(*vdpasim
->iommu_pt
), GFP_KERNEL
);
261 if (!vdpasim
->iommu_pt
)
264 for (i
= 0; i
< vdpasim
->dev_attr
.nas
; i
++) {
265 vhost_iotlb_init(&vdpasim
->iommu
[i
], max_iotlb_entries
, 0);
266 vhost_iotlb_add_range(&vdpasim
->iommu
[i
], 0, ULONG_MAX
, 0,
268 vdpasim
->iommu_pt
[i
] = true;
271 for (i
= 0; i
< dev_attr
->nvqs
; i
++)
272 vringh_set_iotlb(&vdpasim
->vqs
[i
].vring
, &vdpasim
->iommu
[0],
273 &vdpasim
->iommu_lock
);
275 vdpasim
->vdpa
.dma_dev
= dev
;
284 EXPORT_SYMBOL_GPL(vdpasim_create
);
286 void vdpasim_schedule_work(struct vdpasim
*vdpasim
)
288 kthread_queue_work(vdpasim
->worker
, &vdpasim
->work
);
290 EXPORT_SYMBOL_GPL(vdpasim_schedule_work
);
292 static int vdpasim_set_vq_address(struct vdpa_device
*vdpa
, u16 idx
,
293 u64 desc_area
, u64 driver_area
,
296 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
297 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
299 vq
->desc_addr
= desc_area
;
300 vq
->driver_addr
= driver_area
;
301 vq
->device_addr
= device_area
;
306 static void vdpasim_set_vq_num(struct vdpa_device
*vdpa
, u16 idx
, u32 num
)
308 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
309 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
314 static u16
vdpasim_get_vq_size(struct vdpa_device
*vdpa
, u16 idx
)
316 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
317 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
319 if (vdpasim
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)
322 return VDPASIM_QUEUE_MAX
;
325 static void vdpasim_kick_vq(struct vdpa_device
*vdpa
, u16 idx
)
327 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
328 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
330 if (!vdpasim
->running
&&
331 (vdpasim
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
332 vdpasim
->pending_kick
= true;
337 vdpasim_schedule_work(vdpasim
);
340 static void vdpasim_set_vq_cb(struct vdpa_device
*vdpa
, u16 idx
,
341 struct vdpa_callback
*cb
)
343 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
344 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
346 vq
->cb
= cb
->callback
;
347 vq
->private = cb
->private;
350 static void vdpasim_set_vq_ready(struct vdpa_device
*vdpa
, u16 idx
, bool ready
)
352 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
353 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
356 mutex_lock(&vdpasim
->mutex
);
357 old_ready
= vq
->ready
;
359 if (vq
->ready
&& !old_ready
) {
360 vdpasim_queue_ready(vdpasim
, idx
);
362 mutex_unlock(&vdpasim
->mutex
);
365 static bool vdpasim_get_vq_ready(struct vdpa_device
*vdpa
, u16 idx
)
367 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
368 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
373 static int vdpasim_set_vq_state(struct vdpa_device
*vdpa
, u16 idx
,
374 const struct vdpa_vq_state
*state
)
376 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
377 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
378 struct vringh
*vrh
= &vq
->vring
;
380 mutex_lock(&vdpasim
->mutex
);
381 vrh
->last_avail_idx
= state
->split
.avail_index
;
382 mutex_unlock(&vdpasim
->mutex
);
387 static int vdpasim_get_vq_state(struct vdpa_device
*vdpa
, u16 idx
,
388 struct vdpa_vq_state
*state
)
390 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
391 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
392 struct vringh
*vrh
= &vq
->vring
;
394 state
->split
.avail_index
= vrh
->last_avail_idx
;
398 static int vdpasim_get_vq_stats(struct vdpa_device
*vdpa
, u16 idx
,
400 struct netlink_ext_ack
*extack
)
402 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
404 if (vdpasim
->dev_attr
.get_stats
)
405 return vdpasim
->dev_attr
.get_stats(vdpasim
, idx
,
410 static u32
vdpasim_get_vq_align(struct vdpa_device
*vdpa
)
412 return VDPASIM_QUEUE_ALIGN
;
415 static u32
vdpasim_get_vq_group(struct vdpa_device
*vdpa
, u16 idx
)
417 /* RX and TX belongs to group 0, CVQ belongs to group 1 */
424 static u64
vdpasim_get_device_features(struct vdpa_device
*vdpa
)
426 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
428 return vdpasim
->dev_attr
.supported_features
;
431 static u64
vdpasim_get_backend_features(const struct vdpa_device
*vdpa
)
433 return BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK
);
436 static int vdpasim_set_driver_features(struct vdpa_device
*vdpa
, u64 features
)
438 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
440 /* DMA mapping must be done by driver */
441 if (!(features
& (1ULL << VIRTIO_F_ACCESS_PLATFORM
)))
444 vdpasim
->features
= features
& vdpasim
->dev_attr
.supported_features
;
449 static u64
vdpasim_get_driver_features(struct vdpa_device
*vdpa
)
451 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
453 return vdpasim
->features
;
456 static void vdpasim_set_config_cb(struct vdpa_device
*vdpa
,
457 struct vdpa_callback
*cb
)
459 /* We don't support config interrupt */
462 static u16
vdpasim_get_vq_num_max(struct vdpa_device
*vdpa
)
464 return VDPASIM_QUEUE_MAX
;
467 static u32
vdpasim_get_device_id(struct vdpa_device
*vdpa
)
469 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
471 return vdpasim
->dev_attr
.id
;
474 static u32
vdpasim_get_vendor_id(struct vdpa_device
*vdpa
)
476 return VDPASIM_VENDOR_ID
;
479 static u8
vdpasim_get_status(struct vdpa_device
*vdpa
)
481 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
484 mutex_lock(&vdpasim
->mutex
);
485 status
= vdpasim
->status
;
486 mutex_unlock(&vdpasim
->mutex
);
491 static void vdpasim_set_status(struct vdpa_device
*vdpa
, u8 status
)
493 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
495 mutex_lock(&vdpasim
->mutex
);
496 vdpasim
->status
= status
;
497 vdpasim
->running
= (status
& VIRTIO_CONFIG_S_DRIVER_OK
) != 0;
498 mutex_unlock(&vdpasim
->mutex
);
501 static int vdpasim_compat_reset(struct vdpa_device
*vdpa
, u32 flags
)
503 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
505 mutex_lock(&vdpasim
->mutex
);
507 vdpasim_do_reset(vdpasim
, flags
);
508 mutex_unlock(&vdpasim
->mutex
);
513 static int vdpasim_reset(struct vdpa_device
*vdpa
)
515 return vdpasim_compat_reset(vdpa
, 0);
518 static int vdpasim_suspend(struct vdpa_device
*vdpa
)
520 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
522 mutex_lock(&vdpasim
->mutex
);
523 vdpasim
->running
= false;
524 mutex_unlock(&vdpasim
->mutex
);
529 static int vdpasim_resume(struct vdpa_device
*vdpa
)
531 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
534 mutex_lock(&vdpasim
->mutex
);
535 vdpasim
->running
= true;
537 if (vdpasim
->pending_kick
) {
538 /* Process pending descriptors */
539 for (i
= 0; i
< vdpasim
->dev_attr
.nvqs
; ++i
)
540 vdpasim_kick_vq(vdpa
, i
);
542 vdpasim
->pending_kick
= false;
545 mutex_unlock(&vdpasim
->mutex
);
550 static size_t vdpasim_get_config_size(struct vdpa_device
*vdpa
)
552 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
554 return vdpasim
->dev_attr
.config_size
;
557 static void vdpasim_get_config(struct vdpa_device
*vdpa
, unsigned int offset
,
558 void *buf
, unsigned int len
)
560 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
562 if (offset
+ len
> vdpasim
->dev_attr
.config_size
)
565 if (vdpasim
->dev_attr
.get_config
)
566 vdpasim
->dev_attr
.get_config(vdpasim
, vdpasim
->config
);
568 memcpy(buf
, vdpasim
->config
+ offset
, len
);
571 static void vdpasim_set_config(struct vdpa_device
*vdpa
, unsigned int offset
,
572 const void *buf
, unsigned int len
)
574 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
576 if (offset
+ len
> vdpasim
->dev_attr
.config_size
)
579 memcpy(vdpasim
->config
+ offset
, buf
, len
);
581 if (vdpasim
->dev_attr
.set_config
)
582 vdpasim
->dev_attr
.set_config(vdpasim
, vdpasim
->config
);
585 static u32
vdpasim_get_generation(struct vdpa_device
*vdpa
)
587 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
589 return vdpasim
->generation
;
592 static struct vdpa_iova_range
vdpasim_get_iova_range(struct vdpa_device
*vdpa
)
594 struct vdpa_iova_range range
= {
602 static int vdpasim_set_group_asid(struct vdpa_device
*vdpa
, unsigned int group
,
605 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
606 struct vhost_iotlb
*iommu
;
609 if (group
> vdpasim
->dev_attr
.ngroups
)
612 if (asid
>= vdpasim
->dev_attr
.nas
)
615 iommu
= &vdpasim
->iommu
[asid
];
617 mutex_lock(&vdpasim
->mutex
);
619 for (i
= 0; i
< vdpasim
->dev_attr
.nvqs
; i
++)
620 if (vdpasim_get_vq_group(vdpa
, i
) == group
)
621 vringh_set_iotlb(&vdpasim
->vqs
[i
].vring
, iommu
,
622 &vdpasim
->iommu_lock
);
624 mutex_unlock(&vdpasim
->mutex
);
629 static int vdpasim_set_map(struct vdpa_device
*vdpa
, unsigned int asid
,
630 struct vhost_iotlb
*iotlb
)
632 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
633 struct vhost_iotlb_map
*map
;
634 struct vhost_iotlb
*iommu
;
635 u64 start
= 0ULL, last
= 0ULL - 1;
638 if (asid
>= vdpasim
->dev_attr
.nas
)
641 spin_lock(&vdpasim
->iommu_lock
);
643 iommu
= &vdpasim
->iommu
[asid
];
644 vhost_iotlb_reset(iommu
);
645 vdpasim
->iommu_pt
[asid
] = false;
647 for (map
= vhost_iotlb_itree_first(iotlb
, start
, last
); map
;
648 map
= vhost_iotlb_itree_next(map
, start
, last
)) {
649 ret
= vhost_iotlb_add_range(iommu
, map
->start
,
650 map
->last
, map
->addr
, map
->perm
);
654 spin_unlock(&vdpasim
->iommu_lock
);
658 vhost_iotlb_reset(iommu
);
659 spin_unlock(&vdpasim
->iommu_lock
);
663 static int vdpasim_reset_map(struct vdpa_device
*vdpa
, unsigned int asid
)
665 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
667 if (asid
>= vdpasim
->dev_attr
.nas
)
670 spin_lock(&vdpasim
->iommu_lock
);
671 if (vdpasim
->iommu_pt
[asid
])
673 vhost_iotlb_reset(&vdpasim
->iommu
[asid
]);
674 vhost_iotlb_add_range(&vdpasim
->iommu
[asid
], 0, ULONG_MAX
,
676 vdpasim
->iommu_pt
[asid
] = true;
678 spin_unlock(&vdpasim
->iommu_lock
);
682 static int vdpasim_bind_mm(struct vdpa_device
*vdpa
, struct mm_struct
*mm
)
684 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
685 struct vdpasim_mm_work mm_work
;
687 mm_work
.vdpasim
= vdpasim
;
688 mm_work
.mm_to_bind
= mm
;
690 vdpasim_worker_change_mm_sync(vdpasim
, &mm_work
);
695 static void vdpasim_unbind_mm(struct vdpa_device
*vdpa
)
697 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
698 struct vdpasim_mm_work mm_work
;
700 mm_work
.vdpasim
= vdpasim
;
701 mm_work
.mm_to_bind
= NULL
;
703 vdpasim_worker_change_mm_sync(vdpasim
, &mm_work
);
706 static int vdpasim_dma_map(struct vdpa_device
*vdpa
, unsigned int asid
,
708 u64 pa
, u32 perm
, void *opaque
)
710 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
713 if (asid
>= vdpasim
->dev_attr
.nas
)
716 spin_lock(&vdpasim
->iommu_lock
);
717 if (vdpasim
->iommu_pt
[asid
]) {
718 vhost_iotlb_reset(&vdpasim
->iommu
[asid
]);
719 vdpasim
->iommu_pt
[asid
] = false;
721 ret
= vhost_iotlb_add_range_ctx(&vdpasim
->iommu
[asid
], iova
,
722 iova
+ size
- 1, pa
, perm
, opaque
);
723 spin_unlock(&vdpasim
->iommu_lock
);
728 static int vdpasim_dma_unmap(struct vdpa_device
*vdpa
, unsigned int asid
,
731 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
733 if (asid
>= vdpasim
->dev_attr
.nas
)
736 if (vdpasim
->iommu_pt
[asid
]) {
737 vhost_iotlb_reset(&vdpasim
->iommu
[asid
]);
738 vdpasim
->iommu_pt
[asid
] = false;
741 spin_lock(&vdpasim
->iommu_lock
);
742 vhost_iotlb_del_range(&vdpasim
->iommu
[asid
], iova
, iova
+ size
- 1);
743 spin_unlock(&vdpasim
->iommu_lock
);
748 static void vdpasim_free(struct vdpa_device
*vdpa
)
750 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
753 kthread_cancel_work_sync(&vdpasim
->work
);
754 kthread_destroy_worker(vdpasim
->worker
);
756 for (i
= 0; i
< vdpasim
->dev_attr
.nvqs
; i
++) {
757 vringh_kiov_cleanup(&vdpasim
->vqs
[i
].out_iov
);
758 vringh_kiov_cleanup(&vdpasim
->vqs
[i
].in_iov
);
761 vdpasim
->dev_attr
.free(vdpasim
);
763 for (i
= 0; i
< vdpasim
->dev_attr
.nas
; i
++)
764 vhost_iotlb_reset(&vdpasim
->iommu
[i
]);
765 kfree(vdpasim
->iommu
);
766 kfree(vdpasim
->iommu_pt
);
768 kfree(vdpasim
->config
);
771 static const struct vdpa_config_ops vdpasim_config_ops
= {
772 .set_vq_address
= vdpasim_set_vq_address
,
773 .set_vq_num
= vdpasim_set_vq_num
,
774 .kick_vq
= vdpasim_kick_vq
,
775 .set_vq_cb
= vdpasim_set_vq_cb
,
776 .set_vq_ready
= vdpasim_set_vq_ready
,
777 .get_vq_ready
= vdpasim_get_vq_ready
,
778 .set_vq_state
= vdpasim_set_vq_state
,
779 .get_vendor_vq_stats
= vdpasim_get_vq_stats
,
780 .get_vq_state
= vdpasim_get_vq_state
,
781 .get_vq_align
= vdpasim_get_vq_align
,
782 .get_vq_group
= vdpasim_get_vq_group
,
783 .get_device_features
= vdpasim_get_device_features
,
784 .get_backend_features
= vdpasim_get_backend_features
,
785 .set_driver_features
= vdpasim_set_driver_features
,
786 .get_driver_features
= vdpasim_get_driver_features
,
787 .set_config_cb
= vdpasim_set_config_cb
,
788 .get_vq_num_max
= vdpasim_get_vq_num_max
,
789 .get_vq_size
= vdpasim_get_vq_size
,
790 .get_device_id
= vdpasim_get_device_id
,
791 .get_vendor_id
= vdpasim_get_vendor_id
,
792 .get_status
= vdpasim_get_status
,
793 .set_status
= vdpasim_set_status
,
794 .reset
= vdpasim_reset
,
795 .compat_reset
= vdpasim_compat_reset
,
796 .suspend
= vdpasim_suspend
,
797 .resume
= vdpasim_resume
,
798 .get_config_size
= vdpasim_get_config_size
,
799 .get_config
= vdpasim_get_config
,
800 .set_config
= vdpasim_set_config
,
801 .get_generation
= vdpasim_get_generation
,
802 .get_iova_range
= vdpasim_get_iova_range
,
803 .set_group_asid
= vdpasim_set_group_asid
,
804 .dma_map
= vdpasim_dma_map
,
805 .dma_unmap
= vdpasim_dma_unmap
,
806 .reset_map
= vdpasim_reset_map
,
807 .bind_mm
= vdpasim_bind_mm
,
808 .unbind_mm
= vdpasim_unbind_mm
,
809 .free
= vdpasim_free
,
812 static const struct vdpa_config_ops vdpasim_batch_config_ops
= {
813 .set_vq_address
= vdpasim_set_vq_address
,
814 .set_vq_num
= vdpasim_set_vq_num
,
815 .kick_vq
= vdpasim_kick_vq
,
816 .set_vq_cb
= vdpasim_set_vq_cb
,
817 .set_vq_ready
= vdpasim_set_vq_ready
,
818 .get_vq_ready
= vdpasim_get_vq_ready
,
819 .set_vq_state
= vdpasim_set_vq_state
,
820 .get_vendor_vq_stats
= vdpasim_get_vq_stats
,
821 .get_vq_state
= vdpasim_get_vq_state
,
822 .get_vq_align
= vdpasim_get_vq_align
,
823 .get_vq_group
= vdpasim_get_vq_group
,
824 .get_device_features
= vdpasim_get_device_features
,
825 .get_backend_features
= vdpasim_get_backend_features
,
826 .set_driver_features
= vdpasim_set_driver_features
,
827 .get_driver_features
= vdpasim_get_driver_features
,
828 .set_config_cb
= vdpasim_set_config_cb
,
829 .get_vq_num_max
= vdpasim_get_vq_num_max
,
830 .get_device_id
= vdpasim_get_device_id
,
831 .get_vendor_id
= vdpasim_get_vendor_id
,
832 .get_status
= vdpasim_get_status
,
833 .set_status
= vdpasim_set_status
,
834 .reset
= vdpasim_reset
,
835 .compat_reset
= vdpasim_compat_reset
,
836 .suspend
= vdpasim_suspend
,
837 .resume
= vdpasim_resume
,
838 .get_config_size
= vdpasim_get_config_size
,
839 .get_config
= vdpasim_get_config
,
840 .set_config
= vdpasim_set_config
,
841 .get_generation
= vdpasim_get_generation
,
842 .get_iova_range
= vdpasim_get_iova_range
,
843 .set_group_asid
= vdpasim_set_group_asid
,
844 .set_map
= vdpasim_set_map
,
845 .reset_map
= vdpasim_reset_map
,
846 .bind_mm
= vdpasim_bind_mm
,
847 .unbind_mm
= vdpasim_unbind_mm
,
848 .free
= vdpasim_free
,
851 MODULE_VERSION(DRV_VERSION
);
852 MODULE_LICENSE(DRV_LICENSE
);
853 MODULE_AUTHOR(DRV_AUTHOR
);
854 MODULE_DESCRIPTION(DRV_DESC
);