1 // SPDX-License-Identifier: GPL-2.0
3 * Virtio driver for the paravirtualized IOMMU
5 * Copyright (C) 2019 Arm Limited
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/amba/bus.h>
11 #include <linux/delay.h>
12 #include <linux/dma-iommu.h>
13 #include <linux/freezer.h>
14 #include <linux/interval_tree.h>
15 #include <linux/iommu.h>
16 #include <linux/module.h>
17 #include <linux/of_iommu.h>
18 #include <linux/of_platform.h>
19 #include <linux/pci.h>
20 #include <linux/platform_device.h>
21 #include <linux/virtio.h>
22 #include <linux/virtio_config.h>
23 #include <linux/virtio_ids.h>
24 #include <linux/wait.h>
26 #include <uapi/linux/virtio_iommu.h>
28 #define MSI_IOVA_BASE 0x8000000
29 #define MSI_IOVA_LENGTH 0x100000
31 #define VIOMMU_REQUEST_VQ 0
32 #define VIOMMU_EVENT_VQ 1
33 #define VIOMMU_NR_VQS 2
36 struct iommu_device iommu
;
38 struct virtio_device
*vdev
;
40 struct ida domain_ids
;
42 struct virtqueue
*vqs
[VIOMMU_NR_VQS
];
43 spinlock_t request_lock
;
44 struct list_head requests
;
47 /* Device configuration */
48 struct iommu_domain_geometry geometry
;
52 /* Supported MAP flags */
57 struct viommu_mapping
{
59 struct interval_tree_node iova
;
63 struct viommu_domain
{
64 struct iommu_domain domain
;
65 struct viommu_dev
*viommu
;
66 struct mutex mutex
; /* protects viommu pointer */
70 spinlock_t mappings_lock
;
71 struct rb_root_cached mappings
;
73 unsigned long nr_endpoints
;
76 struct viommu_endpoint
{
78 struct viommu_dev
*viommu
;
79 struct viommu_domain
*vdomain
;
80 struct list_head resv_regions
;
83 struct viommu_request
{
84 struct list_head list
;
86 unsigned int write_offset
;
91 #define VIOMMU_FAULT_RESV_MASK 0xffffff00
96 struct virtio_iommu_fault fault
;
100 #define to_viommu_domain(domain) \
101 container_of(domain, struct viommu_domain, domain)
103 static int viommu_get_req_errno(void *buf
, size_t len
)
105 struct virtio_iommu_req_tail
*tail
= buf
+ len
- sizeof(*tail
);
107 switch (tail
->status
) {
108 case VIRTIO_IOMMU_S_OK
:
110 case VIRTIO_IOMMU_S_UNSUPP
:
112 case VIRTIO_IOMMU_S_INVAL
:
114 case VIRTIO_IOMMU_S_RANGE
:
116 case VIRTIO_IOMMU_S_NOENT
:
118 case VIRTIO_IOMMU_S_FAULT
:
120 case VIRTIO_IOMMU_S_NOMEM
:
122 case VIRTIO_IOMMU_S_IOERR
:
123 case VIRTIO_IOMMU_S_DEVERR
:
129 static void viommu_set_req_status(void *buf
, size_t len
, int status
)
131 struct virtio_iommu_req_tail
*tail
= buf
+ len
- sizeof(*tail
);
133 tail
->status
= status
;
136 static off_t
viommu_get_write_desc_offset(struct viommu_dev
*viommu
,
137 struct virtio_iommu_req_head
*req
,
140 size_t tail_size
= sizeof(struct virtio_iommu_req_tail
);
142 if (req
->type
== VIRTIO_IOMMU_T_PROBE
)
143 return len
- viommu
->probe_size
- tail_size
;
145 return len
- tail_size
;
149 * __viommu_sync_req - Complete all in-flight requests
151 * Wait for all added requests to complete. When this function returns, all
152 * requests that were in-flight at the time of the call have completed.
154 static int __viommu_sync_req(struct viommu_dev
*viommu
)
158 struct viommu_request
*req
;
159 struct virtqueue
*vq
= viommu
->vqs
[VIOMMU_REQUEST_VQ
];
161 assert_spin_locked(&viommu
->request_lock
);
165 while (!list_empty(&viommu
->requests
)) {
167 req
= virtqueue_get_buf(vq
, &len
);
172 viommu_set_req_status(req
->buf
, req
->len
,
173 VIRTIO_IOMMU_S_IOERR
);
175 write_len
= req
->len
- req
->write_offset
;
176 if (req
->writeback
&& len
== write_len
)
177 memcpy(req
->writeback
, req
->buf
+ req
->write_offset
,
180 list_del(&req
->list
);
187 static int viommu_sync_req(struct viommu_dev
*viommu
)
192 spin_lock_irqsave(&viommu
->request_lock
, flags
);
193 ret
= __viommu_sync_req(viommu
);
195 dev_dbg(viommu
->dev
, "could not sync requests (%d)\n", ret
);
196 spin_unlock_irqrestore(&viommu
->request_lock
, flags
);
202 * __viommu_add_request - Add one request to the queue
203 * @buf: pointer to the request buffer
204 * @len: length of the request buffer
205 * @writeback: copy data back to the buffer when the request completes.
207 * Add a request to the queue. Only synchronize the queue if it's already full.
208 * Otherwise don't kick the queue nor wait for requests to complete.
210 * When @writeback is true, data written by the device, including the request
211 * status, is copied into @buf after the request completes. This is unsafe if
212 * the caller allocates @buf on stack and drops the lock between add_req() and
215 * Return 0 if the request was successfully added to the queue.
217 static int __viommu_add_req(struct viommu_dev
*viommu
, void *buf
, size_t len
,
222 struct viommu_request
*req
;
223 struct scatterlist top_sg
, bottom_sg
;
224 struct scatterlist
*sg
[2] = { &top_sg
, &bottom_sg
};
225 struct virtqueue
*vq
= viommu
->vqs
[VIOMMU_REQUEST_VQ
];
227 assert_spin_locked(&viommu
->request_lock
);
229 write_offset
= viommu_get_write_desc_offset(viommu
, buf
, len
);
230 if (write_offset
<= 0)
233 req
= kzalloc(sizeof(*req
) + len
, GFP_ATOMIC
);
239 req
->writeback
= buf
+ write_offset
;
240 req
->write_offset
= write_offset
;
242 memcpy(&req
->buf
, buf
, write_offset
);
244 sg_init_one(&top_sg
, req
->buf
, write_offset
);
245 sg_init_one(&bottom_sg
, req
->buf
+ write_offset
, len
- write_offset
);
247 ret
= virtqueue_add_sgs(vq
, sg
, 1, 1, req
, GFP_ATOMIC
);
248 if (ret
== -ENOSPC
) {
249 /* If the queue is full, sync and retry */
250 if (!__viommu_sync_req(viommu
))
251 ret
= virtqueue_add_sgs(vq
, sg
, 1, 1, req
, GFP_ATOMIC
);
256 list_add_tail(&req
->list
, &viommu
->requests
);
264 static int viommu_add_req(struct viommu_dev
*viommu
, void *buf
, size_t len
)
269 spin_lock_irqsave(&viommu
->request_lock
, flags
);
270 ret
= __viommu_add_req(viommu
, buf
, len
, false);
272 dev_dbg(viommu
->dev
, "could not add request: %d\n", ret
);
273 spin_unlock_irqrestore(&viommu
->request_lock
, flags
);
279 * Send a request and wait for it to complete. Return the request status (as an
282 static int viommu_send_req_sync(struct viommu_dev
*viommu
, void *buf
,
288 spin_lock_irqsave(&viommu
->request_lock
, flags
);
290 ret
= __viommu_add_req(viommu
, buf
, len
, true);
292 dev_dbg(viommu
->dev
, "could not add request (%d)\n", ret
);
296 ret
= __viommu_sync_req(viommu
);
298 dev_dbg(viommu
->dev
, "could not sync requests (%d)\n", ret
);
299 /* Fall-through (get the actual request status) */
302 ret
= viommu_get_req_errno(buf
, len
);
304 spin_unlock_irqrestore(&viommu
->request_lock
, flags
);
309 * viommu_add_mapping - add a mapping to the internal tree
311 * On success, return the new mapping. Otherwise return NULL.
313 static int viommu_add_mapping(struct viommu_domain
*vdomain
, unsigned long iova
,
314 phys_addr_t paddr
, size_t size
, u32 flags
)
316 unsigned long irqflags
;
317 struct viommu_mapping
*mapping
;
319 mapping
= kzalloc(sizeof(*mapping
), GFP_ATOMIC
);
323 mapping
->paddr
= paddr
;
324 mapping
->iova
.start
= iova
;
325 mapping
->iova
.last
= iova
+ size
- 1;
326 mapping
->flags
= flags
;
328 spin_lock_irqsave(&vdomain
->mappings_lock
, irqflags
);
329 interval_tree_insert(&mapping
->iova
, &vdomain
->mappings
);
330 spin_unlock_irqrestore(&vdomain
->mappings_lock
, irqflags
);
336 * viommu_del_mappings - remove mappings from the internal tree
338 * @vdomain: the domain
339 * @iova: start of the range
340 * @size: size of the range. A size of 0 corresponds to the entire address
343 * On success, returns the number of unmapped bytes (>= size)
345 static size_t viommu_del_mappings(struct viommu_domain
*vdomain
,
346 unsigned long iova
, size_t size
)
350 unsigned long last
= iova
+ size
- 1;
351 struct viommu_mapping
*mapping
= NULL
;
352 struct interval_tree_node
*node
, *next
;
354 spin_lock_irqsave(&vdomain
->mappings_lock
, flags
);
355 next
= interval_tree_iter_first(&vdomain
->mappings
, iova
, last
);
358 mapping
= container_of(node
, struct viommu_mapping
, iova
);
359 next
= interval_tree_iter_next(node
, iova
, last
);
361 /* Trying to split a mapping? */
362 if (mapping
->iova
.start
< iova
)
366 * Virtio-iommu doesn't allow UNMAP to split a mapping created
367 * with a single MAP request, so remove the full mapping.
369 unmapped
+= mapping
->iova
.last
- mapping
->iova
.start
+ 1;
371 interval_tree_remove(node
, &vdomain
->mappings
);
374 spin_unlock_irqrestore(&vdomain
->mappings_lock
, flags
);
380 * viommu_replay_mappings - re-send MAP requests
382 * When reattaching a domain that was previously detached from all endpoints,
383 * mappings were deleted from the device. Re-create the mappings available in
386 static int viommu_replay_mappings(struct viommu_domain
*vdomain
)
390 struct viommu_mapping
*mapping
;
391 struct interval_tree_node
*node
;
392 struct virtio_iommu_req_map map
;
394 spin_lock_irqsave(&vdomain
->mappings_lock
, flags
);
395 node
= interval_tree_iter_first(&vdomain
->mappings
, 0, -1UL);
397 mapping
= container_of(node
, struct viommu_mapping
, iova
);
398 map
= (struct virtio_iommu_req_map
) {
399 .head
.type
= VIRTIO_IOMMU_T_MAP
,
400 .domain
= cpu_to_le32(vdomain
->id
),
401 .virt_start
= cpu_to_le64(mapping
->iova
.start
),
402 .virt_end
= cpu_to_le64(mapping
->iova
.last
),
403 .phys_start
= cpu_to_le64(mapping
->paddr
),
404 .flags
= cpu_to_le32(mapping
->flags
),
407 ret
= viommu_send_req_sync(vdomain
->viommu
, &map
, sizeof(map
));
411 node
= interval_tree_iter_next(node
, 0, -1UL);
413 spin_unlock_irqrestore(&vdomain
->mappings_lock
, flags
);
418 static int viommu_add_resv_mem(struct viommu_endpoint
*vdev
,
419 struct virtio_iommu_probe_resv_mem
*mem
,
424 phys_addr_t start
, end
;
425 struct iommu_resv_region
*region
= NULL
;
426 unsigned long prot
= IOMMU_WRITE
| IOMMU_NOEXEC
| IOMMU_MMIO
;
428 start
= start64
= le64_to_cpu(mem
->start
);
429 end
= end64
= le64_to_cpu(mem
->end
);
430 size
= end64
- start64
+ 1;
432 /* Catch any overflow, including the unlikely end64 - start64 + 1 = 0 */
433 if (start
!= start64
|| end
!= end64
|| size
< end64
- start64
)
436 if (len
< sizeof(*mem
))
439 switch (mem
->subtype
) {
441 dev_warn(vdev
->dev
, "unknown resv mem subtype 0x%x\n",
444 case VIRTIO_IOMMU_RESV_MEM_T_RESERVED
:
445 region
= iommu_alloc_resv_region(start
, size
, 0,
446 IOMMU_RESV_RESERVED
);
448 case VIRTIO_IOMMU_RESV_MEM_T_MSI
:
449 region
= iommu_alloc_resv_region(start
, size
, prot
,
456 list_add(®ion
->list
, &vdev
->resv_regions
);
460 static int viommu_probe_endpoint(struct viommu_dev
*viommu
, struct device
*dev
)
466 struct virtio_iommu_req_probe
*probe
;
467 struct virtio_iommu_probe_property
*prop
;
468 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
469 struct viommu_endpoint
*vdev
= dev_iommu_priv_get(dev
);
471 if (!fwspec
->num_ids
)
474 probe_len
= sizeof(*probe
) + viommu
->probe_size
+
475 sizeof(struct virtio_iommu_req_tail
);
476 probe
= kzalloc(probe_len
, GFP_KERNEL
);
480 probe
->head
.type
= VIRTIO_IOMMU_T_PROBE
;
482 * For now, assume that properties of an endpoint that outputs multiple
483 * IDs are consistent. Only probe the first one.
485 probe
->endpoint
= cpu_to_le32(fwspec
->ids
[0]);
487 ret
= viommu_send_req_sync(viommu
, probe
, probe_len
);
491 prop
= (void *)probe
->properties
;
492 type
= le16_to_cpu(prop
->type
) & VIRTIO_IOMMU_PROBE_T_MASK
;
494 while (type
!= VIRTIO_IOMMU_PROBE_T_NONE
&&
495 cur
< viommu
->probe_size
) {
496 len
= le16_to_cpu(prop
->length
) + sizeof(*prop
);
499 case VIRTIO_IOMMU_PROBE_T_RESV_MEM
:
500 ret
= viommu_add_resv_mem(vdev
, (void *)prop
, len
);
503 dev_err(dev
, "unknown viommu prop 0x%x\n", type
);
507 dev_err(dev
, "failed to parse viommu prop 0x%x\n", type
);
510 if (cur
>= viommu
->probe_size
)
513 prop
= (void *)probe
->properties
+ cur
;
514 type
= le16_to_cpu(prop
->type
) & VIRTIO_IOMMU_PROBE_T_MASK
;
522 static int viommu_fault_handler(struct viommu_dev
*viommu
,
523 struct virtio_iommu_fault
*fault
)
527 u8 reason
= fault
->reason
;
528 u32 flags
= le32_to_cpu(fault
->flags
);
529 u32 endpoint
= le32_to_cpu(fault
->endpoint
);
530 u64 address
= le64_to_cpu(fault
->address
);
533 case VIRTIO_IOMMU_FAULT_R_DOMAIN
:
534 reason_str
= "domain";
536 case VIRTIO_IOMMU_FAULT_R_MAPPING
:
539 case VIRTIO_IOMMU_FAULT_R_UNKNOWN
:
541 reason_str
= "unknown";
545 /* TODO: find EP by ID and report_iommu_fault */
546 if (flags
& VIRTIO_IOMMU_FAULT_F_ADDRESS
)
547 dev_err_ratelimited(viommu
->dev
, "%s fault from EP %u at %#llx [%s%s%s]\n",
548 reason_str
, endpoint
, address
,
549 flags
& VIRTIO_IOMMU_FAULT_F_READ
? "R" : "",
550 flags
& VIRTIO_IOMMU_FAULT_F_WRITE
? "W" : "",
551 flags
& VIRTIO_IOMMU_FAULT_F_EXEC
? "X" : "");
553 dev_err_ratelimited(viommu
->dev
, "%s fault from EP %u\n",
554 reason_str
, endpoint
);
558 static void viommu_event_handler(struct virtqueue
*vq
)
562 struct scatterlist sg
[1];
563 struct viommu_event
*evt
;
564 struct viommu_dev
*viommu
= vq
->vdev
->priv
;
566 while ((evt
= virtqueue_get_buf(vq
, &len
)) != NULL
) {
567 if (len
> sizeof(*evt
)) {
569 "invalid event buffer (len %u != %zu)\n",
571 } else if (!(evt
->head
& VIOMMU_FAULT_RESV_MASK
)) {
572 viommu_fault_handler(viommu
, &evt
->fault
);
575 sg_init_one(sg
, evt
, sizeof(*evt
));
576 ret
= virtqueue_add_inbuf(vq
, sg
, 1, evt
, GFP_ATOMIC
);
578 dev_err(viommu
->dev
, "could not add event buffer\n");
586 static struct iommu_domain
*viommu_domain_alloc(unsigned type
)
588 struct viommu_domain
*vdomain
;
590 if (type
!= IOMMU_DOMAIN_UNMANAGED
&& type
!= IOMMU_DOMAIN_DMA
)
593 vdomain
= kzalloc(sizeof(*vdomain
), GFP_KERNEL
);
597 mutex_init(&vdomain
->mutex
);
598 spin_lock_init(&vdomain
->mappings_lock
);
599 vdomain
->mappings
= RB_ROOT_CACHED
;
601 if (type
== IOMMU_DOMAIN_DMA
&&
602 iommu_get_dma_cookie(&vdomain
->domain
)) {
607 return &vdomain
->domain
;
610 static int viommu_domain_finalise(struct viommu_endpoint
*vdev
,
611 struct iommu_domain
*domain
)
614 unsigned long viommu_page_size
;
615 struct viommu_dev
*viommu
= vdev
->viommu
;
616 struct viommu_domain
*vdomain
= to_viommu_domain(domain
);
618 viommu_page_size
= 1UL << __ffs(viommu
->pgsize_bitmap
);
619 if (viommu_page_size
> PAGE_SIZE
) {
621 "granule 0x%lx larger than system page size 0x%lx\n",
622 viommu_page_size
, PAGE_SIZE
);
626 ret
= ida_alloc_range(&viommu
->domain_ids
, viommu
->first_domain
,
627 viommu
->last_domain
, GFP_KERNEL
);
631 vdomain
->id
= (unsigned int)ret
;
633 domain
->pgsize_bitmap
= viommu
->pgsize_bitmap
;
634 domain
->geometry
= viommu
->geometry
;
636 vdomain
->map_flags
= viommu
->map_flags
;
637 vdomain
->viommu
= viommu
;
642 static void viommu_domain_free(struct iommu_domain
*domain
)
644 struct viommu_domain
*vdomain
= to_viommu_domain(domain
);
646 iommu_put_dma_cookie(domain
);
648 /* Free all remaining mappings (size 2^64) */
649 viommu_del_mappings(vdomain
, 0, 0);
652 ida_free(&vdomain
->viommu
->domain_ids
, vdomain
->id
);
657 static int viommu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
661 struct virtio_iommu_req_attach req
;
662 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
663 struct viommu_endpoint
*vdev
= dev_iommu_priv_get(dev
);
664 struct viommu_domain
*vdomain
= to_viommu_domain(domain
);
666 mutex_lock(&vdomain
->mutex
);
667 if (!vdomain
->viommu
) {
669 * Properly initialize the domain now that we know which viommu
672 ret
= viommu_domain_finalise(vdev
, domain
);
673 } else if (vdomain
->viommu
!= vdev
->viommu
) {
674 dev_err(dev
, "cannot attach to foreign vIOMMU\n");
677 mutex_unlock(&vdomain
->mutex
);
683 * In the virtio-iommu device, when attaching the endpoint to a new
684 * domain, it is detached from the old one and, if as as a result the
685 * old domain isn't attached to any endpoint, all mappings are removed
686 * from the old domain and it is freed.
688 * In the driver the old domain still exists, and its mappings will be
689 * recreated if it gets reattached to an endpoint. Otherwise it will be
692 * vdev->vdomain is protected by group->mutex
695 vdev
->vdomain
->nr_endpoints
--;
697 req
= (struct virtio_iommu_req_attach
) {
698 .head
.type
= VIRTIO_IOMMU_T_ATTACH
,
699 .domain
= cpu_to_le32(vdomain
->id
),
702 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
703 req
.endpoint
= cpu_to_le32(fwspec
->ids
[i
]);
705 ret
= viommu_send_req_sync(vdomain
->viommu
, &req
, sizeof(req
));
710 if (!vdomain
->nr_endpoints
) {
712 * This endpoint is the first to be attached to the domain.
713 * Replay existing mappings (e.g. SW MSI).
715 ret
= viommu_replay_mappings(vdomain
);
720 vdomain
->nr_endpoints
++;
721 vdev
->vdomain
= vdomain
;
726 static int viommu_map(struct iommu_domain
*domain
, unsigned long iova
,
727 phys_addr_t paddr
, size_t size
, int prot
, gfp_t gfp
)
731 struct virtio_iommu_req_map map
;
732 struct viommu_domain
*vdomain
= to_viommu_domain(domain
);
734 flags
= (prot
& IOMMU_READ
? VIRTIO_IOMMU_MAP_F_READ
: 0) |
735 (prot
& IOMMU_WRITE
? VIRTIO_IOMMU_MAP_F_WRITE
: 0) |
736 (prot
& IOMMU_MMIO
? VIRTIO_IOMMU_MAP_F_MMIO
: 0);
738 if (flags
& ~vdomain
->map_flags
)
741 ret
= viommu_add_mapping(vdomain
, iova
, paddr
, size
, flags
);
745 map
= (struct virtio_iommu_req_map
) {
746 .head
.type
= VIRTIO_IOMMU_T_MAP
,
747 .domain
= cpu_to_le32(vdomain
->id
),
748 .virt_start
= cpu_to_le64(iova
),
749 .phys_start
= cpu_to_le64(paddr
),
750 .virt_end
= cpu_to_le64(iova
+ size
- 1),
751 .flags
= cpu_to_le32(flags
),
754 if (!vdomain
->nr_endpoints
)
757 ret
= viommu_send_req_sync(vdomain
->viommu
, &map
, sizeof(map
));
759 viommu_del_mappings(vdomain
, iova
, size
);
764 static size_t viommu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
765 size_t size
, struct iommu_iotlb_gather
*gather
)
769 struct virtio_iommu_req_unmap unmap
;
770 struct viommu_domain
*vdomain
= to_viommu_domain(domain
);
772 unmapped
= viommu_del_mappings(vdomain
, iova
, size
);
776 /* Device already removed all mappings after detach. */
777 if (!vdomain
->nr_endpoints
)
780 unmap
= (struct virtio_iommu_req_unmap
) {
781 .head
.type
= VIRTIO_IOMMU_T_UNMAP
,
782 .domain
= cpu_to_le32(vdomain
->id
),
783 .virt_start
= cpu_to_le64(iova
),
784 .virt_end
= cpu_to_le64(iova
+ unmapped
- 1),
787 ret
= viommu_add_req(vdomain
->viommu
, &unmap
, sizeof(unmap
));
788 return ret
? 0 : unmapped
;
791 static phys_addr_t
viommu_iova_to_phys(struct iommu_domain
*domain
,
796 struct viommu_mapping
*mapping
;
797 struct interval_tree_node
*node
;
798 struct viommu_domain
*vdomain
= to_viommu_domain(domain
);
800 spin_lock_irqsave(&vdomain
->mappings_lock
, flags
);
801 node
= interval_tree_iter_first(&vdomain
->mappings
, iova
, iova
);
803 mapping
= container_of(node
, struct viommu_mapping
, iova
);
804 paddr
= mapping
->paddr
+ (iova
- mapping
->iova
.start
);
806 spin_unlock_irqrestore(&vdomain
->mappings_lock
, flags
);
811 static void viommu_iotlb_sync(struct iommu_domain
*domain
,
812 struct iommu_iotlb_gather
*gather
)
814 struct viommu_domain
*vdomain
= to_viommu_domain(domain
);
816 viommu_sync_req(vdomain
->viommu
);
819 static void viommu_get_resv_regions(struct device
*dev
, struct list_head
*head
)
821 struct iommu_resv_region
*entry
, *new_entry
, *msi
= NULL
;
822 struct viommu_endpoint
*vdev
= dev_iommu_priv_get(dev
);
823 int prot
= IOMMU_WRITE
| IOMMU_NOEXEC
| IOMMU_MMIO
;
825 list_for_each_entry(entry
, &vdev
->resv_regions
, list
) {
826 if (entry
->type
== IOMMU_RESV_MSI
)
829 new_entry
= kmemdup(entry
, sizeof(*entry
), GFP_KERNEL
);
832 list_add_tail(&new_entry
->list
, head
);
836 * If the device didn't register any bypass MSI window, add a
837 * software-mapped region.
840 msi
= iommu_alloc_resv_region(MSI_IOVA_BASE
, MSI_IOVA_LENGTH
,
841 prot
, IOMMU_RESV_SW_MSI
);
845 list_add_tail(&msi
->list
, head
);
848 iommu_dma_get_resv_regions(dev
, head
);
851 static struct iommu_ops viommu_ops
;
852 static struct virtio_driver virtio_iommu_drv
;
854 static int viommu_match_node(struct device
*dev
, const void *data
)
856 return dev
->parent
->fwnode
== data
;
859 static struct viommu_dev
*viommu_get_by_fwnode(struct fwnode_handle
*fwnode
)
861 struct device
*dev
= driver_find_device(&virtio_iommu_drv
.driver
, NULL
,
862 fwnode
, viommu_match_node
);
865 return dev
? dev_to_virtio(dev
)->priv
: NULL
;
868 static struct iommu_device
*viommu_probe_device(struct device
*dev
)
871 struct viommu_endpoint
*vdev
;
872 struct viommu_dev
*viommu
= NULL
;
873 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
875 if (!fwspec
|| fwspec
->ops
!= &viommu_ops
)
876 return ERR_PTR(-ENODEV
);
878 viommu
= viommu_get_by_fwnode(fwspec
->iommu_fwnode
);
880 return ERR_PTR(-ENODEV
);
882 vdev
= kzalloc(sizeof(*vdev
), GFP_KERNEL
);
884 return ERR_PTR(-ENOMEM
);
887 vdev
->viommu
= viommu
;
888 INIT_LIST_HEAD(&vdev
->resv_regions
);
889 dev_iommu_priv_set(dev
, vdev
);
891 if (viommu
->probe_size
) {
892 /* Get additional information for this endpoint */
893 ret
= viommu_probe_endpoint(viommu
, dev
);
898 return &viommu
->iommu
;
901 generic_iommu_put_resv_regions(dev
, &vdev
->resv_regions
);
907 static void viommu_release_device(struct device
*dev
)
909 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
910 struct viommu_endpoint
*vdev
;
912 if (!fwspec
|| fwspec
->ops
!= &viommu_ops
)
915 vdev
= dev_iommu_priv_get(dev
);
917 generic_iommu_put_resv_regions(dev
, &vdev
->resv_regions
);
921 static struct iommu_group
*viommu_device_group(struct device
*dev
)
924 return pci_device_group(dev
);
926 return generic_device_group(dev
);
929 static int viommu_of_xlate(struct device
*dev
, struct of_phandle_args
*args
)
931 return iommu_fwspec_add_ids(dev
, args
->args
, 1);
934 static struct iommu_ops viommu_ops
= {
935 .domain_alloc
= viommu_domain_alloc
,
936 .domain_free
= viommu_domain_free
,
937 .attach_dev
= viommu_attach_dev
,
939 .unmap
= viommu_unmap
,
940 .iova_to_phys
= viommu_iova_to_phys
,
941 .iotlb_sync
= viommu_iotlb_sync
,
942 .probe_device
= viommu_probe_device
,
943 .release_device
= viommu_release_device
,
944 .device_group
= viommu_device_group
,
945 .get_resv_regions
= viommu_get_resv_regions
,
946 .put_resv_regions
= generic_iommu_put_resv_regions
,
947 .of_xlate
= viommu_of_xlate
,
950 static int viommu_init_vqs(struct viommu_dev
*viommu
)
952 struct virtio_device
*vdev
= dev_to_virtio(viommu
->dev
);
953 const char *names
[] = { "request", "event" };
954 vq_callback_t
*callbacks
[] = {
955 NULL
, /* No async requests */
956 viommu_event_handler
,
959 return virtio_find_vqs(vdev
, VIOMMU_NR_VQS
, viommu
->vqs
, callbacks
,
963 static int viommu_fill_evtq(struct viommu_dev
*viommu
)
966 struct scatterlist sg
[1];
967 struct viommu_event
*evts
;
968 struct virtqueue
*vq
= viommu
->vqs
[VIOMMU_EVENT_VQ
];
969 size_t nr_evts
= vq
->num_free
;
971 viommu
->evts
= evts
= devm_kmalloc_array(viommu
->dev
, nr_evts
,
972 sizeof(*evts
), GFP_KERNEL
);
976 for (i
= 0; i
< nr_evts
; i
++) {
977 sg_init_one(sg
, &evts
[i
], sizeof(*evts
));
978 ret
= virtqueue_add_inbuf(vq
, sg
, 1, &evts
[i
], GFP_KERNEL
);
986 static int viommu_probe(struct virtio_device
*vdev
)
988 struct device
*parent_dev
= vdev
->dev
.parent
;
989 struct viommu_dev
*viommu
= NULL
;
990 struct device
*dev
= &vdev
->dev
;
992 u64 input_end
= -1UL;
995 if (!virtio_has_feature(vdev
, VIRTIO_F_VERSION_1
) ||
996 !virtio_has_feature(vdev
, VIRTIO_IOMMU_F_MAP_UNMAP
))
999 viommu
= devm_kzalloc(dev
, sizeof(*viommu
), GFP_KERNEL
);
1003 spin_lock_init(&viommu
->request_lock
);
1004 ida_init(&viommu
->domain_ids
);
1006 viommu
->vdev
= vdev
;
1007 INIT_LIST_HEAD(&viommu
->requests
);
1009 ret
= viommu_init_vqs(viommu
);
1013 virtio_cread_le(vdev
, struct virtio_iommu_config
, page_size_mask
,
1014 &viommu
->pgsize_bitmap
);
1016 if (!viommu
->pgsize_bitmap
) {
1021 viommu
->map_flags
= VIRTIO_IOMMU_MAP_F_READ
| VIRTIO_IOMMU_MAP_F_WRITE
;
1022 viommu
->last_domain
= ~0U;
1024 /* Optional features */
1025 virtio_cread_le_feature(vdev
, VIRTIO_IOMMU_F_INPUT_RANGE
,
1026 struct virtio_iommu_config
, input_range
.start
,
1029 virtio_cread_le_feature(vdev
, VIRTIO_IOMMU_F_INPUT_RANGE
,
1030 struct virtio_iommu_config
, input_range
.end
,
1033 virtio_cread_le_feature(vdev
, VIRTIO_IOMMU_F_DOMAIN_RANGE
,
1034 struct virtio_iommu_config
, domain_range
.start
,
1035 &viommu
->first_domain
);
1037 virtio_cread_le_feature(vdev
, VIRTIO_IOMMU_F_DOMAIN_RANGE
,
1038 struct virtio_iommu_config
, domain_range
.end
,
1039 &viommu
->last_domain
);
1041 virtio_cread_le_feature(vdev
, VIRTIO_IOMMU_F_PROBE
,
1042 struct virtio_iommu_config
, probe_size
,
1043 &viommu
->probe_size
);
1045 viommu
->geometry
= (struct iommu_domain_geometry
) {
1046 .aperture_start
= input_start
,
1047 .aperture_end
= input_end
,
1048 .force_aperture
= true,
1051 if (virtio_has_feature(vdev
, VIRTIO_IOMMU_F_MMIO
))
1052 viommu
->map_flags
|= VIRTIO_IOMMU_MAP_F_MMIO
;
1054 viommu_ops
.pgsize_bitmap
= viommu
->pgsize_bitmap
;
1056 virtio_device_ready(vdev
);
1058 /* Populate the event queue with buffers */
1059 ret
= viommu_fill_evtq(viommu
);
1063 ret
= iommu_device_sysfs_add(&viommu
->iommu
, dev
, NULL
, "%s",
1064 virtio_bus_name(vdev
));
1068 iommu_device_set_ops(&viommu
->iommu
, &viommu_ops
);
1069 iommu_device_set_fwnode(&viommu
->iommu
, parent_dev
->fwnode
);
1071 iommu_device_register(&viommu
->iommu
);
1074 if (pci_bus_type
.iommu_ops
!= &viommu_ops
) {
1075 ret
= bus_set_iommu(&pci_bus_type
, &viommu_ops
);
1077 goto err_unregister
;
1080 #ifdef CONFIG_ARM_AMBA
1081 if (amba_bustype
.iommu_ops
!= &viommu_ops
) {
1082 ret
= bus_set_iommu(&amba_bustype
, &viommu_ops
);
1084 goto err_unregister
;
1087 if (platform_bus_type
.iommu_ops
!= &viommu_ops
) {
1088 ret
= bus_set_iommu(&platform_bus_type
, &viommu_ops
);
1090 goto err_unregister
;
1093 vdev
->priv
= viommu
;
1095 dev_info(dev
, "input address: %u bits\n",
1096 order_base_2(viommu
->geometry
.aperture_end
));
1097 dev_info(dev
, "page mask: %#llx\n", viommu
->pgsize_bitmap
);
1102 iommu_device_sysfs_remove(&viommu
->iommu
);
1103 iommu_device_unregister(&viommu
->iommu
);
1105 vdev
->config
->del_vqs(vdev
);
1110 static void viommu_remove(struct virtio_device
*vdev
)
1112 struct viommu_dev
*viommu
= vdev
->priv
;
1114 iommu_device_sysfs_remove(&viommu
->iommu
);
1115 iommu_device_unregister(&viommu
->iommu
);
1117 /* Stop all virtqueues */
1118 vdev
->config
->reset(vdev
);
1119 vdev
->config
->del_vqs(vdev
);
1121 dev_info(&vdev
->dev
, "device removed\n");
1124 static void viommu_config_changed(struct virtio_device
*vdev
)
1126 dev_warn(&vdev
->dev
, "config changed\n");
1129 static unsigned int features
[] = {
1130 VIRTIO_IOMMU_F_MAP_UNMAP
,
1131 VIRTIO_IOMMU_F_INPUT_RANGE
,
1132 VIRTIO_IOMMU_F_DOMAIN_RANGE
,
1133 VIRTIO_IOMMU_F_PROBE
,
1134 VIRTIO_IOMMU_F_MMIO
,
1137 static struct virtio_device_id id_table
[] = {
1138 { VIRTIO_ID_IOMMU
, VIRTIO_DEV_ANY_ID
},
1142 static struct virtio_driver virtio_iommu_drv
= {
1143 .driver
.name
= KBUILD_MODNAME
,
1144 .driver
.owner
= THIS_MODULE
,
1145 .id_table
= id_table
,
1146 .feature_table
= features
,
1147 .feature_table_size
= ARRAY_SIZE(features
),
1148 .probe
= viommu_probe
,
1149 .remove
= viommu_remove
,
1150 .config_changed
= viommu_config_changed
,
1153 module_virtio_driver(virtio_iommu_drv
);
1155 MODULE_DESCRIPTION("Virtio IOMMU driver");
1156 MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
1157 MODULE_LICENSE("GPL v2");