1 // SPDX-License-Identifier: GPL-2.0-only
3 * VDPA device simulator core.
5 * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/sched.h>
16 #include <linux/dma-map-ops.h>
17 #include <linux/vringh.h>
18 #include <linux/vdpa.h>
19 #include <linux/vhost_iotlb.h>
23 #define DRV_VERSION "0.1"
24 #define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>"
25 #define DRV_DESC "vDPA Device Simulator core"
26 #define DRV_LICENSE "GPL v2"
28 static int batch_mapping
= 1;
29 module_param(batch_mapping
, int, 0444);
30 MODULE_PARM_DESC(batch_mapping
, "Batched mapping 1 -Enable; 0 - Disable");
32 static int max_iotlb_entries
= 2048;
33 module_param(max_iotlb_entries
, int, 0444);
34 MODULE_PARM_DESC(max_iotlb_entries
,
35 "Maximum number of iotlb entries. 0 means unlimited. (default: 2048)");
37 #define VDPASIM_QUEUE_ALIGN PAGE_SIZE
38 #define VDPASIM_QUEUE_MAX 256
39 #define VDPASIM_VENDOR_ID 0
41 static struct vdpasim
*vdpa_to_sim(struct vdpa_device
*vdpa
)
43 return container_of(vdpa
, struct vdpasim
, vdpa
);
46 static struct vdpasim
*dev_to_sim(struct device
*dev
)
48 struct vdpa_device
*vdpa
= dev_to_vdpa(dev
);
50 return vdpa_to_sim(vdpa
);
53 static void vdpasim_vq_notify(struct vringh
*vring
)
55 struct vdpasim_virtqueue
*vq
=
56 container_of(vring
, struct vdpasim_virtqueue
, vring
);
64 static void vdpasim_queue_ready(struct vdpasim
*vdpasim
, unsigned int idx
)
66 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
68 vringh_init_iotlb(&vq
->vring
, vdpasim
->dev_attr
.supported_features
,
69 VDPASIM_QUEUE_MAX
, false,
70 (struct vring_desc
*)(uintptr_t)vq
->desc_addr
,
71 (struct vring_avail
*)
72 (uintptr_t)vq
->driver_addr
,
74 (uintptr_t)vq
->device_addr
);
76 vq
->vring
.notify
= vdpasim_vq_notify
;
79 static void vdpasim_vq_reset(struct vdpasim
*vdpasim
,
80 struct vdpasim_virtqueue
*vq
)
88 vringh_init_iotlb(&vq
->vring
, vdpasim
->dev_attr
.supported_features
,
89 VDPASIM_QUEUE_MAX
, false, NULL
, NULL
, NULL
);
91 vq
->vring
.notify
= NULL
;
94 static void vdpasim_reset(struct vdpasim
*vdpasim
)
98 for (i
= 0; i
< vdpasim
->dev_attr
.nvqs
; i
++)
99 vdpasim_vq_reset(vdpasim
, &vdpasim
->vqs
[i
]);
101 spin_lock(&vdpasim
->iommu_lock
);
102 vhost_iotlb_reset(vdpasim
->iommu
);
103 spin_unlock(&vdpasim
->iommu_lock
);
105 vdpasim
->features
= 0;
107 ++vdpasim
->generation
;
110 static int dir_to_perm(enum dma_data_direction dir
)
115 case DMA_FROM_DEVICE
:
121 case DMA_BIDIRECTIONAL
:
131 static dma_addr_t
vdpasim_map_page(struct device
*dev
, struct page
*page
,
132 unsigned long offset
, size_t size
,
133 enum dma_data_direction dir
,
136 struct vdpasim
*vdpasim
= dev_to_sim(dev
);
137 struct vhost_iotlb
*iommu
= vdpasim
->iommu
;
138 u64 pa
= (page_to_pfn(page
) << PAGE_SHIFT
) + offset
;
139 int ret
, perm
= dir_to_perm(dir
);
142 return DMA_MAPPING_ERROR
;
144 /* For simplicity, use identical mapping to avoid e.g iova
147 spin_lock(&vdpasim
->iommu_lock
);
148 ret
= vhost_iotlb_add_range(iommu
, pa
, pa
+ size
- 1,
149 pa
, dir_to_perm(dir
));
150 spin_unlock(&vdpasim
->iommu_lock
);
152 return DMA_MAPPING_ERROR
;
154 return (dma_addr_t
)(pa
);
157 static void vdpasim_unmap_page(struct device
*dev
, dma_addr_t dma_addr
,
158 size_t size
, enum dma_data_direction dir
,
161 struct vdpasim
*vdpasim
= dev_to_sim(dev
);
162 struct vhost_iotlb
*iommu
= vdpasim
->iommu
;
164 spin_lock(&vdpasim
->iommu_lock
);
165 vhost_iotlb_del_range(iommu
, (u64
)dma_addr
,
166 (u64
)dma_addr
+ size
- 1);
167 spin_unlock(&vdpasim
->iommu_lock
);
170 static void *vdpasim_alloc_coherent(struct device
*dev
, size_t size
,
171 dma_addr_t
*dma_addr
, gfp_t flag
,
174 struct vdpasim
*vdpasim
= dev_to_sim(dev
);
175 struct vhost_iotlb
*iommu
= vdpasim
->iommu
;
176 void *addr
= kmalloc(size
, flag
);
179 spin_lock(&vdpasim
->iommu_lock
);
181 *dma_addr
= DMA_MAPPING_ERROR
;
183 u64 pa
= virt_to_phys(addr
);
185 ret
= vhost_iotlb_add_range(iommu
, (u64
)pa
,
189 *dma_addr
= DMA_MAPPING_ERROR
;
193 *dma_addr
= (dma_addr_t
)pa
;
195 spin_unlock(&vdpasim
->iommu_lock
);
200 static void vdpasim_free_coherent(struct device
*dev
, size_t size
,
201 void *vaddr
, dma_addr_t dma_addr
,
204 struct vdpasim
*vdpasim
= dev_to_sim(dev
);
205 struct vhost_iotlb
*iommu
= vdpasim
->iommu
;
207 spin_lock(&vdpasim
->iommu_lock
);
208 vhost_iotlb_del_range(iommu
, (u64
)dma_addr
,
209 (u64
)dma_addr
+ size
- 1);
210 spin_unlock(&vdpasim
->iommu_lock
);
212 kfree(phys_to_virt((uintptr_t)dma_addr
));
215 static const struct dma_map_ops vdpasim_dma_ops
= {
216 .map_page
= vdpasim_map_page
,
217 .unmap_page
= vdpasim_unmap_page
,
218 .alloc
= vdpasim_alloc_coherent
,
219 .free
= vdpasim_free_coherent
,
222 static const struct vdpa_config_ops vdpasim_config_ops
;
223 static const struct vdpa_config_ops vdpasim_batch_config_ops
;
225 struct vdpasim
*vdpasim_create(struct vdpasim_dev_attr
*dev_attr
)
227 const struct vdpa_config_ops
*ops
;
228 struct vdpasim
*vdpasim
;
230 int i
, ret
= -ENOMEM
;
233 ops
= &vdpasim_batch_config_ops
;
235 ops
= &vdpasim_config_ops
;
237 vdpasim
= vdpa_alloc_device(struct vdpasim
, vdpa
, NULL
, ops
,
242 vdpasim
->dev_attr
= *dev_attr
;
243 INIT_WORK(&vdpasim
->work
, dev_attr
->work_fn
);
244 spin_lock_init(&vdpasim
->lock
);
245 spin_lock_init(&vdpasim
->iommu_lock
);
247 dev
= &vdpasim
->vdpa
.dev
;
248 dev
->dma_mask
= &dev
->coherent_dma_mask
;
249 if (dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64)))
251 set_dma_ops(dev
, &vdpasim_dma_ops
);
253 vdpasim
->config
= kzalloc(dev_attr
->config_size
, GFP_KERNEL
);
254 if (!vdpasim
->config
)
257 vdpasim
->vqs
= kcalloc(dev_attr
->nvqs
, sizeof(struct vdpasim_virtqueue
),
262 vdpasim
->iommu
= vhost_iotlb_alloc(max_iotlb_entries
, 0);
266 vdpasim
->buffer
= kvmalloc(dev_attr
->buffer_size
, GFP_KERNEL
);
267 if (!vdpasim
->buffer
)
270 for (i
= 0; i
< dev_attr
->nvqs
; i
++)
271 vringh_set_iotlb(&vdpasim
->vqs
[i
].vring
, vdpasim
->iommu
);
273 vdpasim
->vdpa
.dma_dev
= dev
;
282 EXPORT_SYMBOL_GPL(vdpasim_create
);
284 static int vdpasim_set_vq_address(struct vdpa_device
*vdpa
, u16 idx
,
285 u64 desc_area
, u64 driver_area
,
288 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
289 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
291 vq
->desc_addr
= desc_area
;
292 vq
->driver_addr
= driver_area
;
293 vq
->device_addr
= device_area
;
298 static void vdpasim_set_vq_num(struct vdpa_device
*vdpa
, u16 idx
, u32 num
)
300 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
301 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
306 static void vdpasim_kick_vq(struct vdpa_device
*vdpa
, u16 idx
)
308 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
309 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
312 schedule_work(&vdpasim
->work
);
315 static void vdpasim_set_vq_cb(struct vdpa_device
*vdpa
, u16 idx
,
316 struct vdpa_callback
*cb
)
318 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
319 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
321 vq
->cb
= cb
->callback
;
322 vq
->private = cb
->private;
325 static void vdpasim_set_vq_ready(struct vdpa_device
*vdpa
, u16 idx
, bool ready
)
327 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
328 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
330 spin_lock(&vdpasim
->lock
);
333 vdpasim_queue_ready(vdpasim
, idx
);
334 spin_unlock(&vdpasim
->lock
);
337 static bool vdpasim_get_vq_ready(struct vdpa_device
*vdpa
, u16 idx
)
339 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
340 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
345 static int vdpasim_set_vq_state(struct vdpa_device
*vdpa
, u16 idx
,
346 const struct vdpa_vq_state
*state
)
348 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
349 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
350 struct vringh
*vrh
= &vq
->vring
;
352 spin_lock(&vdpasim
->lock
);
353 vrh
->last_avail_idx
= state
->avail_index
;
354 spin_unlock(&vdpasim
->lock
);
359 static int vdpasim_get_vq_state(struct vdpa_device
*vdpa
, u16 idx
,
360 struct vdpa_vq_state
*state
)
362 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
363 struct vdpasim_virtqueue
*vq
= &vdpasim
->vqs
[idx
];
364 struct vringh
*vrh
= &vq
->vring
;
366 state
->avail_index
= vrh
->last_avail_idx
;
370 static u32
vdpasim_get_vq_align(struct vdpa_device
*vdpa
)
372 return VDPASIM_QUEUE_ALIGN
;
375 static u64
vdpasim_get_features(struct vdpa_device
*vdpa
)
377 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
379 return vdpasim
->dev_attr
.supported_features
;
382 static int vdpasim_set_features(struct vdpa_device
*vdpa
, u64 features
)
384 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
386 /* DMA mapping must be done by driver */
387 if (!(features
& (1ULL << VIRTIO_F_ACCESS_PLATFORM
)))
390 vdpasim
->features
= features
& vdpasim
->dev_attr
.supported_features
;
395 static void vdpasim_set_config_cb(struct vdpa_device
*vdpa
,
396 struct vdpa_callback
*cb
)
398 /* We don't support config interrupt */
401 static u16
vdpasim_get_vq_num_max(struct vdpa_device
*vdpa
)
403 return VDPASIM_QUEUE_MAX
;
406 static u32
vdpasim_get_device_id(struct vdpa_device
*vdpa
)
408 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
410 return vdpasim
->dev_attr
.id
;
413 static u32
vdpasim_get_vendor_id(struct vdpa_device
*vdpa
)
415 return VDPASIM_VENDOR_ID
;
418 static u8
vdpasim_get_status(struct vdpa_device
*vdpa
)
420 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
423 spin_lock(&vdpasim
->lock
);
424 status
= vdpasim
->status
;
425 spin_unlock(&vdpasim
->lock
);
430 static void vdpasim_set_status(struct vdpa_device
*vdpa
, u8 status
)
432 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
434 spin_lock(&vdpasim
->lock
);
435 vdpasim
->status
= status
;
437 vdpasim_reset(vdpasim
);
438 spin_unlock(&vdpasim
->lock
);
441 static void vdpasim_get_config(struct vdpa_device
*vdpa
, unsigned int offset
,
442 void *buf
, unsigned int len
)
444 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
446 if (offset
+ len
> vdpasim
->dev_attr
.config_size
)
449 if (vdpasim
->dev_attr
.get_config
)
450 vdpasim
->dev_attr
.get_config(vdpasim
, vdpasim
->config
);
452 memcpy(buf
, vdpasim
->config
+ offset
, len
);
455 static void vdpasim_set_config(struct vdpa_device
*vdpa
, unsigned int offset
,
456 const void *buf
, unsigned int len
)
458 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
460 if (offset
+ len
> vdpasim
->dev_attr
.config_size
)
463 memcpy(vdpasim
->config
+ offset
, buf
, len
);
465 if (vdpasim
->dev_attr
.set_config
)
466 vdpasim
->dev_attr
.set_config(vdpasim
, vdpasim
->config
);
469 static u32
vdpasim_get_generation(struct vdpa_device
*vdpa
)
471 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
473 return vdpasim
->generation
;
476 static struct vdpa_iova_range
vdpasim_get_iova_range(struct vdpa_device
*vdpa
)
478 struct vdpa_iova_range range
= {
486 static int vdpasim_set_map(struct vdpa_device
*vdpa
,
487 struct vhost_iotlb
*iotlb
)
489 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
490 struct vhost_iotlb_map
*map
;
491 u64 start
= 0ULL, last
= 0ULL - 1;
494 spin_lock(&vdpasim
->iommu_lock
);
495 vhost_iotlb_reset(vdpasim
->iommu
);
497 for (map
= vhost_iotlb_itree_first(iotlb
, start
, last
); map
;
498 map
= vhost_iotlb_itree_next(map
, start
, last
)) {
499 ret
= vhost_iotlb_add_range(vdpasim
->iommu
, map
->start
,
500 map
->last
, map
->addr
, map
->perm
);
504 spin_unlock(&vdpasim
->iommu_lock
);
508 vhost_iotlb_reset(vdpasim
->iommu
);
509 spin_unlock(&vdpasim
->iommu_lock
);
513 static int vdpasim_dma_map(struct vdpa_device
*vdpa
, u64 iova
, u64 size
,
516 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
519 spin_lock(&vdpasim
->iommu_lock
);
520 ret
= vhost_iotlb_add_range(vdpasim
->iommu
, iova
, iova
+ size
- 1, pa
,
522 spin_unlock(&vdpasim
->iommu_lock
);
527 static int vdpasim_dma_unmap(struct vdpa_device
*vdpa
, u64 iova
, u64 size
)
529 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
531 spin_lock(&vdpasim
->iommu_lock
);
532 vhost_iotlb_del_range(vdpasim
->iommu
, iova
, iova
+ size
- 1);
533 spin_unlock(&vdpasim
->iommu_lock
);
538 static void vdpasim_free(struct vdpa_device
*vdpa
)
540 struct vdpasim
*vdpasim
= vdpa_to_sim(vdpa
);
542 cancel_work_sync(&vdpasim
->work
);
543 kvfree(vdpasim
->buffer
);
545 vhost_iotlb_free(vdpasim
->iommu
);
547 kfree(vdpasim
->config
);
550 static const struct vdpa_config_ops vdpasim_config_ops
= {
551 .set_vq_address
= vdpasim_set_vq_address
,
552 .set_vq_num
= vdpasim_set_vq_num
,
553 .kick_vq
= vdpasim_kick_vq
,
554 .set_vq_cb
= vdpasim_set_vq_cb
,
555 .set_vq_ready
= vdpasim_set_vq_ready
,
556 .get_vq_ready
= vdpasim_get_vq_ready
,
557 .set_vq_state
= vdpasim_set_vq_state
,
558 .get_vq_state
= vdpasim_get_vq_state
,
559 .get_vq_align
= vdpasim_get_vq_align
,
560 .get_features
= vdpasim_get_features
,
561 .set_features
= vdpasim_set_features
,
562 .set_config_cb
= vdpasim_set_config_cb
,
563 .get_vq_num_max
= vdpasim_get_vq_num_max
,
564 .get_device_id
= vdpasim_get_device_id
,
565 .get_vendor_id
= vdpasim_get_vendor_id
,
566 .get_status
= vdpasim_get_status
,
567 .set_status
= vdpasim_set_status
,
568 .get_config
= vdpasim_get_config
,
569 .set_config
= vdpasim_set_config
,
570 .get_generation
= vdpasim_get_generation
,
571 .get_iova_range
= vdpasim_get_iova_range
,
572 .dma_map
= vdpasim_dma_map
,
573 .dma_unmap
= vdpasim_dma_unmap
,
574 .free
= vdpasim_free
,
577 static const struct vdpa_config_ops vdpasim_batch_config_ops
= {
578 .set_vq_address
= vdpasim_set_vq_address
,
579 .set_vq_num
= vdpasim_set_vq_num
,
580 .kick_vq
= vdpasim_kick_vq
,
581 .set_vq_cb
= vdpasim_set_vq_cb
,
582 .set_vq_ready
= vdpasim_set_vq_ready
,
583 .get_vq_ready
= vdpasim_get_vq_ready
,
584 .set_vq_state
= vdpasim_set_vq_state
,
585 .get_vq_state
= vdpasim_get_vq_state
,
586 .get_vq_align
= vdpasim_get_vq_align
,
587 .get_features
= vdpasim_get_features
,
588 .set_features
= vdpasim_set_features
,
589 .set_config_cb
= vdpasim_set_config_cb
,
590 .get_vq_num_max
= vdpasim_get_vq_num_max
,
591 .get_device_id
= vdpasim_get_device_id
,
592 .get_vendor_id
= vdpasim_get_vendor_id
,
593 .get_status
= vdpasim_get_status
,
594 .set_status
= vdpasim_set_status
,
595 .get_config
= vdpasim_get_config
,
596 .set_config
= vdpasim_set_config
,
597 .get_generation
= vdpasim_get_generation
,
598 .get_iova_range
= vdpasim_get_iova_range
,
599 .set_map
= vdpasim_set_map
,
600 .free
= vdpasim_free
,
603 MODULE_VERSION(DRV_VERSION
);
604 MODULE_LICENSE(DRV_LICENSE
);
605 MODULE_AUTHOR(DRV_AUTHOR
);
606 MODULE_DESCRIPTION(DRV_DESC
);