1 // SPDX-License-Identifier: GPL-2.0-only
3 * A fairly generic DMA-API to IOMMU-API glue layer.
5 * Copyright (C) 2014-2015 ARM Ltd.
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
11 #include <linux/acpi_iort.h>
12 #include <linux/device.h>
13 #include <linux/dma-contiguous.h>
14 #include <linux/dma-iommu.h>
15 #include <linux/dma-noncoherent.h>
16 #include <linux/gfp.h>
17 #include <linux/huge_mm.h>
18 #include <linux/iommu.h>
19 #include <linux/iova.h>
20 #include <linux/irq.h>
22 #include <linux/mutex.h>
23 #include <linux/pci.h>
24 #include <linux/scatterlist.h>
25 #include <linux/vmalloc.h>
26 #include <linux/crash_dump.h>
28 struct iommu_dma_msi_page
{
29 struct list_head list
;
34 enum iommu_dma_cookie_type
{
35 IOMMU_DMA_IOVA_COOKIE
,
39 struct iommu_dma_cookie
{
40 enum iommu_dma_cookie_type type
;
42 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
43 struct iova_domain iovad
;
44 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
47 struct list_head msi_page_list
;
49 /* Domain for flush queue callback; NULL if flush queue not in use */
50 struct iommu_domain
*fq_domain
;
53 static inline size_t cookie_msi_granule(struct iommu_dma_cookie
*cookie
)
55 if (cookie
->type
== IOMMU_DMA_IOVA_COOKIE
)
56 return cookie
->iovad
.granule
;
60 static struct iommu_dma_cookie
*cookie_alloc(enum iommu_dma_cookie_type type
)
62 struct iommu_dma_cookie
*cookie
;
64 cookie
= kzalloc(sizeof(*cookie
), GFP_KERNEL
);
66 INIT_LIST_HEAD(&cookie
->msi_page_list
);
73 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
74 * @domain: IOMMU domain to prepare for DMA-API usage
76 * IOMMU drivers should normally call this from their domain_alloc
77 * callback when domain->type == IOMMU_DOMAIN_DMA.
79 int iommu_get_dma_cookie(struct iommu_domain
*domain
)
81 if (domain
->iova_cookie
)
84 domain
->iova_cookie
= cookie_alloc(IOMMU_DMA_IOVA_COOKIE
);
85 if (!domain
->iova_cookie
)
90 EXPORT_SYMBOL(iommu_get_dma_cookie
);
93 * iommu_get_msi_cookie - Acquire just MSI remapping resources
94 * @domain: IOMMU domain to prepare
95 * @base: Start address of IOVA region for MSI mappings
97 * Users who manage their own IOVA allocation and do not want DMA API support,
98 * but would still like to take advantage of automatic MSI remapping, can use
99 * this to initialise their own domain appropriately. Users should reserve a
100 * contiguous IOVA region, starting at @base, large enough to accommodate the
101 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
102 * used by the devices attached to @domain.
104 int iommu_get_msi_cookie(struct iommu_domain
*domain
, dma_addr_t base
)
106 struct iommu_dma_cookie
*cookie
;
108 if (domain
->type
!= IOMMU_DOMAIN_UNMANAGED
)
111 if (domain
->iova_cookie
)
114 cookie
= cookie_alloc(IOMMU_DMA_MSI_COOKIE
);
118 cookie
->msi_iova
= base
;
119 domain
->iova_cookie
= cookie
;
122 EXPORT_SYMBOL(iommu_get_msi_cookie
);
125 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
126 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
127 * iommu_get_msi_cookie()
129 * IOMMU drivers should normally call this from their domain_free callback.
131 void iommu_put_dma_cookie(struct iommu_domain
*domain
)
133 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
134 struct iommu_dma_msi_page
*msi
, *tmp
;
139 if (cookie
->type
== IOMMU_DMA_IOVA_COOKIE
&& cookie
->iovad
.granule
)
140 put_iova_domain(&cookie
->iovad
);
142 list_for_each_entry_safe(msi
, tmp
, &cookie
->msi_page_list
, list
) {
143 list_del(&msi
->list
);
147 domain
->iova_cookie
= NULL
;
149 EXPORT_SYMBOL(iommu_put_dma_cookie
);
152 * iommu_dma_get_resv_regions - Reserved region driver helper
153 * @dev: Device from iommu_get_resv_regions()
154 * @list: Reserved region list from iommu_get_resv_regions()
156 * IOMMU drivers can use this to implement their .get_resv_regions callback
157 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
158 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
161 void iommu_dma_get_resv_regions(struct device
*dev
, struct list_head
*list
)
164 if (!is_of_node(dev_iommu_fwspec_get(dev
)->iommu_fwnode
))
165 iort_iommu_msi_get_resv_regions(dev
, list
);
168 EXPORT_SYMBOL(iommu_dma_get_resv_regions
);
170 static int cookie_init_hw_msi_region(struct iommu_dma_cookie
*cookie
,
171 phys_addr_t start
, phys_addr_t end
)
173 struct iova_domain
*iovad
= &cookie
->iovad
;
174 struct iommu_dma_msi_page
*msi_page
;
177 start
-= iova_offset(iovad
, start
);
178 num_pages
= iova_align(iovad
, end
- start
) >> iova_shift(iovad
);
180 for (i
= 0; i
< num_pages
; i
++) {
181 msi_page
= kmalloc(sizeof(*msi_page
), GFP_KERNEL
);
185 msi_page
->phys
= start
;
186 msi_page
->iova
= start
;
187 INIT_LIST_HEAD(&msi_page
->list
);
188 list_add(&msi_page
->list
, &cookie
->msi_page_list
);
189 start
+= iovad
->granule
;
195 static int iova_reserve_pci_windows(struct pci_dev
*dev
,
196 struct iova_domain
*iovad
)
198 struct pci_host_bridge
*bridge
= pci_find_host_bridge(dev
->bus
);
199 struct resource_entry
*window
;
200 unsigned long lo
, hi
;
201 phys_addr_t start
= 0, end
;
203 resource_list_for_each_entry(window
, &bridge
->windows
) {
204 if (resource_type(window
->res
) != IORESOURCE_MEM
)
207 lo
= iova_pfn(iovad
, window
->res
->start
- window
->offset
);
208 hi
= iova_pfn(iovad
, window
->res
->end
- window
->offset
);
209 reserve_iova(iovad
, lo
, hi
);
212 /* Get reserved DMA windows from host bridge */
213 resource_list_for_each_entry(window
, &bridge
->dma_ranges
) {
214 end
= window
->res
->start
- window
->offset
;
217 lo
= iova_pfn(iovad
, start
);
218 hi
= iova_pfn(iovad
, end
);
219 reserve_iova(iovad
, lo
, hi
);
221 /* dma_ranges list should be sorted */
222 dev_err(&dev
->dev
, "Failed to reserve IOVA\n");
226 start
= window
->res
->end
- window
->offset
+ 1;
227 /* If window is last entry */
228 if (window
->node
.next
== &bridge
->dma_ranges
&&
229 end
!= ~(phys_addr_t
)0) {
230 end
= ~(phys_addr_t
)0;
238 static int iova_reserve_iommu_regions(struct device
*dev
,
239 struct iommu_domain
*domain
)
241 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
242 struct iova_domain
*iovad
= &cookie
->iovad
;
243 struct iommu_resv_region
*region
;
244 LIST_HEAD(resv_regions
);
247 if (dev_is_pci(dev
)) {
248 ret
= iova_reserve_pci_windows(to_pci_dev(dev
), iovad
);
253 iommu_get_resv_regions(dev
, &resv_regions
);
254 list_for_each_entry(region
, &resv_regions
, list
) {
255 unsigned long lo
, hi
;
257 /* We ARE the software that manages these! */
258 if (region
->type
== IOMMU_RESV_SW_MSI
)
261 lo
= iova_pfn(iovad
, region
->start
);
262 hi
= iova_pfn(iovad
, region
->start
+ region
->length
- 1);
263 reserve_iova(iovad
, lo
, hi
);
265 if (region
->type
== IOMMU_RESV_MSI
)
266 ret
= cookie_init_hw_msi_region(cookie
, region
->start
,
267 region
->start
+ region
->length
);
271 iommu_put_resv_regions(dev
, &resv_regions
);
276 static void iommu_dma_flush_iotlb_all(struct iova_domain
*iovad
)
278 struct iommu_dma_cookie
*cookie
;
279 struct iommu_domain
*domain
;
281 cookie
= container_of(iovad
, struct iommu_dma_cookie
, iovad
);
282 domain
= cookie
->fq_domain
;
284 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
285 * implies that ops->flush_iotlb_all must be non-NULL.
287 domain
->ops
->flush_iotlb_all(domain
);
291 * iommu_dma_init_domain - Initialise a DMA mapping domain
292 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
293 * @base: IOVA at which the mappable address space starts
294 * @size: Size of IOVA space
295 * @dev: Device the domain is being initialised for
297 * @base and @size should be exact multiples of IOMMU page granularity to
298 * avoid rounding surprises. If necessary, we reserve the page at address 0
299 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
300 * any change which could make prior IOVAs invalid will fail.
302 static int iommu_dma_init_domain(struct iommu_domain
*domain
, dma_addr_t base
,
303 u64 size
, struct device
*dev
)
305 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
306 unsigned long order
, base_pfn
;
307 struct iova_domain
*iovad
;
310 if (!cookie
|| cookie
->type
!= IOMMU_DMA_IOVA_COOKIE
)
313 iovad
= &cookie
->iovad
;
315 /* Use the smallest supported page size for IOVA granularity */
316 order
= __ffs(domain
->pgsize_bitmap
);
317 base_pfn
= max_t(unsigned long, 1, base
>> order
);
319 /* Check the domain allows at least some access to the device... */
320 if (domain
->geometry
.force_aperture
) {
321 if (base
> domain
->geometry
.aperture_end
||
322 base
+ size
<= domain
->geometry
.aperture_start
) {
323 pr_warn("specified DMA range outside IOMMU capability\n");
326 /* ...then finally give it a kicking to make sure it fits */
327 base_pfn
= max_t(unsigned long, base_pfn
,
328 domain
->geometry
.aperture_start
>> order
);
331 /* start_pfn is always nonzero for an already-initialised domain */
332 if (iovad
->start_pfn
) {
333 if (1UL << order
!= iovad
->granule
||
334 base_pfn
!= iovad
->start_pfn
) {
335 pr_warn("Incompatible range for DMA domain\n");
342 init_iova_domain(iovad
, 1UL << order
, base_pfn
);
344 if (!cookie
->fq_domain
&& !iommu_domain_get_attr(domain
,
345 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
, &attr
) && attr
) {
346 cookie
->fq_domain
= domain
;
347 init_iova_flush_queue(iovad
, iommu_dma_flush_iotlb_all
, NULL
);
353 return iova_reserve_iommu_regions(dev
, domain
);
356 static int iommu_dma_deferred_attach(struct device
*dev
,
357 struct iommu_domain
*domain
)
359 const struct iommu_ops
*ops
= domain
->ops
;
361 if (!is_kdump_kernel())
364 if (unlikely(ops
->is_attach_deferred
&&
365 ops
->is_attach_deferred(domain
, dev
)))
366 return iommu_attach_device(domain
, dev
);
372 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
374 * @dir: Direction of DMA transfer
375 * @coherent: Is the DMA master cache-coherent?
376 * @attrs: DMA attributes for the mapping
378 * Return: corresponding IOMMU API page protection flags
380 static int dma_info_to_prot(enum dma_data_direction dir
, bool coherent
,
383 int prot
= coherent
? IOMMU_CACHE
: 0;
385 if (attrs
& DMA_ATTR_PRIVILEGED
)
389 case DMA_BIDIRECTIONAL
:
390 return prot
| IOMMU_READ
| IOMMU_WRITE
;
392 return prot
| IOMMU_READ
;
393 case DMA_FROM_DEVICE
:
394 return prot
| IOMMU_WRITE
;
400 static dma_addr_t
iommu_dma_alloc_iova(struct iommu_domain
*domain
,
401 size_t size
, u64 dma_limit
, struct device
*dev
)
403 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
404 struct iova_domain
*iovad
= &cookie
->iovad
;
405 unsigned long shift
, iova_len
, iova
= 0;
407 if (cookie
->type
== IOMMU_DMA_MSI_COOKIE
) {
408 cookie
->msi_iova
+= size
;
409 return cookie
->msi_iova
- size
;
412 shift
= iova_shift(iovad
);
413 iova_len
= size
>> shift
;
415 * Freeing non-power-of-two-sized allocations back into the IOVA caches
416 * will come back to bite us badly, so we have to waste a bit of space
417 * rounding up anything cacheable to make sure that can't happen. The
418 * order of the unadjusted size will still match upon freeing.
420 if (iova_len
< (1 << (IOVA_RANGE_CACHE_MAX_SIZE
- 1)))
421 iova_len
= roundup_pow_of_two(iova_len
);
423 dma_limit
= min_not_zero(dma_limit
, dev
->bus_dma_limit
);
425 if (domain
->geometry
.force_aperture
)
426 dma_limit
= min(dma_limit
, (u64
)domain
->geometry
.aperture_end
);
428 /* Try to get PCI devices a SAC address */
429 if (dma_limit
> DMA_BIT_MASK(32) && dev_is_pci(dev
))
430 iova
= alloc_iova_fast(iovad
, iova_len
,
431 DMA_BIT_MASK(32) >> shift
, false);
434 iova
= alloc_iova_fast(iovad
, iova_len
, dma_limit
>> shift
,
437 return (dma_addr_t
)iova
<< shift
;
440 static void iommu_dma_free_iova(struct iommu_dma_cookie
*cookie
,
441 dma_addr_t iova
, size_t size
)
443 struct iova_domain
*iovad
= &cookie
->iovad
;
445 /* The MSI case is only ever cleaning up its most recent allocation */
446 if (cookie
->type
== IOMMU_DMA_MSI_COOKIE
)
447 cookie
->msi_iova
-= size
;
448 else if (cookie
->fq_domain
) /* non-strict mode */
449 queue_iova(iovad
, iova_pfn(iovad
, iova
),
450 size
>> iova_shift(iovad
), 0);
452 free_iova_fast(iovad
, iova_pfn(iovad
, iova
),
453 size
>> iova_shift(iovad
));
456 static void __iommu_dma_unmap(struct device
*dev
, dma_addr_t dma_addr
,
459 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
460 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
461 struct iova_domain
*iovad
= &cookie
->iovad
;
462 size_t iova_off
= iova_offset(iovad
, dma_addr
);
463 struct iommu_iotlb_gather iotlb_gather
;
466 dma_addr
-= iova_off
;
467 size
= iova_align(iovad
, size
+ iova_off
);
468 iommu_iotlb_gather_init(&iotlb_gather
);
470 unmapped
= iommu_unmap_fast(domain
, dma_addr
, size
, &iotlb_gather
);
471 WARN_ON(unmapped
!= size
);
473 if (!cookie
->fq_domain
)
474 iommu_tlb_sync(domain
, &iotlb_gather
);
475 iommu_dma_free_iova(cookie
, dma_addr
, size
);
478 static dma_addr_t
__iommu_dma_map(struct device
*dev
, phys_addr_t phys
,
479 size_t size
, int prot
, u64 dma_mask
)
481 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
482 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
483 struct iova_domain
*iovad
= &cookie
->iovad
;
484 size_t iova_off
= iova_offset(iovad
, phys
);
487 if (unlikely(iommu_dma_deferred_attach(dev
, domain
)))
488 return DMA_MAPPING_ERROR
;
490 size
= iova_align(iovad
, size
+ iova_off
);
492 iova
= iommu_dma_alloc_iova(domain
, size
, dma_mask
, dev
);
494 return DMA_MAPPING_ERROR
;
496 if (iommu_map_atomic(domain
, iova
, phys
- iova_off
, size
, prot
)) {
497 iommu_dma_free_iova(cookie
, iova
, size
);
498 return DMA_MAPPING_ERROR
;
500 return iova
+ iova_off
;
503 static void __iommu_dma_free_pages(struct page
**pages
, int count
)
506 __free_page(pages
[count
]);
510 static struct page
**__iommu_dma_alloc_pages(struct device
*dev
,
511 unsigned int count
, unsigned long order_mask
, gfp_t gfp
)
514 unsigned int i
= 0, nid
= dev_to_node(dev
);
516 order_mask
&= (2U << MAX_ORDER
) - 1;
520 pages
= kvzalloc(count
* sizeof(*pages
), GFP_KERNEL
);
524 /* IOMMU can map any pages, so himem can also be used here */
525 gfp
|= __GFP_NOWARN
| __GFP_HIGHMEM
;
528 struct page
*page
= NULL
;
529 unsigned int order_size
;
532 * Higher-order allocations are a convenience rather
533 * than a necessity, hence using __GFP_NORETRY until
534 * falling back to minimum-order allocations.
536 for (order_mask
&= (2U << __fls(count
)) - 1;
537 order_mask
; order_mask
&= ~order_size
) {
538 unsigned int order
= __fls(order_mask
);
539 gfp_t alloc_flags
= gfp
;
541 order_size
= 1U << order
;
542 if (order_mask
> order_size
)
543 alloc_flags
|= __GFP_NORETRY
;
544 page
= alloc_pages_node(nid
, alloc_flags
, order
);
549 if (!PageCompound(page
)) {
550 split_page(page
, order
);
552 } else if (!split_huge_page(page
)) {
555 __free_pages(page
, order
);
558 __iommu_dma_free_pages(pages
, i
);
569 * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
570 * @dev: Device to allocate memory for. Must be a real device
571 * attached to an iommu_dma_domain
572 * @size: Size of buffer in bytes
573 * @dma_handle: Out argument for allocated DMA handle
574 * @gfp: Allocation flags
575 * @attrs: DMA attributes for this allocation
577 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
578 * but an IOMMU which supports smaller pages might not map the whole thing.
580 * Return: Mapped virtual address, or NULL on failure.
582 static void *iommu_dma_alloc_remap(struct device
*dev
, size_t size
,
583 dma_addr_t
*dma_handle
, gfp_t gfp
, unsigned long attrs
)
585 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
586 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
587 struct iova_domain
*iovad
= &cookie
->iovad
;
588 bool coherent
= dev_is_dma_coherent(dev
);
589 int ioprot
= dma_info_to_prot(DMA_BIDIRECTIONAL
, coherent
, attrs
);
590 pgprot_t prot
= dma_pgprot(dev
, PAGE_KERNEL
, attrs
);
591 unsigned int count
, min_size
, alloc_sizes
= domain
->pgsize_bitmap
;
597 *dma_handle
= DMA_MAPPING_ERROR
;
599 if (unlikely(iommu_dma_deferred_attach(dev
, domain
)))
602 min_size
= alloc_sizes
& -alloc_sizes
;
603 if (min_size
< PAGE_SIZE
) {
604 min_size
= PAGE_SIZE
;
605 alloc_sizes
|= PAGE_SIZE
;
607 size
= ALIGN(size
, min_size
);
609 if (attrs
& DMA_ATTR_ALLOC_SINGLE_PAGES
)
610 alloc_sizes
= min_size
;
612 count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
613 pages
= __iommu_dma_alloc_pages(dev
, count
, alloc_sizes
>> PAGE_SHIFT
,
618 size
= iova_align(iovad
, size
);
619 iova
= iommu_dma_alloc_iova(domain
, size
, dev
->coherent_dma_mask
, dev
);
623 if (sg_alloc_table_from_pages(&sgt
, pages
, count
, 0, size
, GFP_KERNEL
))
626 if (!(ioprot
& IOMMU_CACHE
)) {
627 struct scatterlist
*sg
;
630 for_each_sg(sgt
.sgl
, sg
, sgt
.orig_nents
, i
)
631 arch_dma_prep_coherent(sg_page(sg
), sg
->length
);
634 if (iommu_map_sg_atomic(domain
, iova
, sgt
.sgl
, sgt
.orig_nents
, ioprot
)
638 vaddr
= dma_common_pages_remap(pages
, size
, prot
,
639 __builtin_return_address(0));
648 __iommu_dma_unmap(dev
, iova
, size
);
652 iommu_dma_free_iova(cookie
, iova
, size
);
654 __iommu_dma_free_pages(pages
, count
);
659 * __iommu_dma_mmap - Map a buffer into provided user VMA
660 * @pages: Array representing buffer from __iommu_dma_alloc()
661 * @size: Size of buffer in bytes
662 * @vma: VMA describing requested userspace mapping
664 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
665 * for verifying the correct size and protection of @vma beforehand.
667 static int __iommu_dma_mmap(struct page
**pages
, size_t size
,
668 struct vm_area_struct
*vma
)
670 return vm_map_pages(vma
, pages
, PAGE_ALIGN(size
) >> PAGE_SHIFT
);
673 static void iommu_dma_sync_single_for_cpu(struct device
*dev
,
674 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction dir
)
678 if (dev_is_dma_coherent(dev
))
681 phys
= iommu_iova_to_phys(iommu_get_dma_domain(dev
), dma_handle
);
682 arch_sync_dma_for_cpu(phys
, size
, dir
);
685 static void iommu_dma_sync_single_for_device(struct device
*dev
,
686 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction dir
)
690 if (dev_is_dma_coherent(dev
))
693 phys
= iommu_iova_to_phys(iommu_get_dma_domain(dev
), dma_handle
);
694 arch_sync_dma_for_device(phys
, size
, dir
);
697 static void iommu_dma_sync_sg_for_cpu(struct device
*dev
,
698 struct scatterlist
*sgl
, int nelems
,
699 enum dma_data_direction dir
)
701 struct scatterlist
*sg
;
704 if (dev_is_dma_coherent(dev
))
707 for_each_sg(sgl
, sg
, nelems
, i
)
708 arch_sync_dma_for_cpu(sg_phys(sg
), sg
->length
, dir
);
711 static void iommu_dma_sync_sg_for_device(struct device
*dev
,
712 struct scatterlist
*sgl
, int nelems
,
713 enum dma_data_direction dir
)
715 struct scatterlist
*sg
;
718 if (dev_is_dma_coherent(dev
))
721 for_each_sg(sgl
, sg
, nelems
, i
)
722 arch_sync_dma_for_device(sg_phys(sg
), sg
->length
, dir
);
725 static dma_addr_t
iommu_dma_map_page(struct device
*dev
, struct page
*page
,
726 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
729 phys_addr_t phys
= page_to_phys(page
) + offset
;
730 bool coherent
= dev_is_dma_coherent(dev
);
731 int prot
= dma_info_to_prot(dir
, coherent
, attrs
);
732 dma_addr_t dma_handle
;
734 dma_handle
= __iommu_dma_map(dev
, phys
, size
, prot
, dma_get_mask(dev
));
735 if (!coherent
&& !(attrs
& DMA_ATTR_SKIP_CPU_SYNC
) &&
736 dma_handle
!= DMA_MAPPING_ERROR
)
737 arch_sync_dma_for_device(phys
, size
, dir
);
741 static void iommu_dma_unmap_page(struct device
*dev
, dma_addr_t dma_handle
,
742 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
744 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
745 iommu_dma_sync_single_for_cpu(dev
, dma_handle
, size
, dir
);
746 __iommu_dma_unmap(dev
, dma_handle
, size
);
750 * Prepare a successfully-mapped scatterlist to give back to the caller.
752 * At this point the segments are already laid out by iommu_dma_map_sg() to
753 * avoid individually crossing any boundaries, so we merely need to check a
754 * segment's start address to avoid concatenating across one.
756 static int __finalise_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
759 struct scatterlist
*s
, *cur
= sg
;
760 unsigned long seg_mask
= dma_get_seg_boundary(dev
);
761 unsigned int cur_len
= 0, max_len
= dma_get_max_seg_size(dev
);
764 for_each_sg(sg
, s
, nents
, i
) {
765 /* Restore this segment's original unaligned fields first */
766 unsigned int s_iova_off
= sg_dma_address(s
);
767 unsigned int s_length
= sg_dma_len(s
);
768 unsigned int s_iova_len
= s
->length
;
770 s
->offset
+= s_iova_off
;
771 s
->length
= s_length
;
772 sg_dma_address(s
) = DMA_MAPPING_ERROR
;
776 * Now fill in the real DMA data. If...
777 * - there is a valid output segment to append to
778 * - and this segment starts on an IOVA page boundary
779 * - but doesn't fall at a segment boundary
780 * - and wouldn't make the resulting output segment too long
782 if (cur_len
&& !s_iova_off
&& (dma_addr
& seg_mask
) &&
783 (max_len
- cur_len
>= s_length
)) {
784 /* ...then concatenate it with the previous one */
787 /* Otherwise start the next output segment */
793 sg_dma_address(cur
) = dma_addr
+ s_iova_off
;
796 sg_dma_len(cur
) = cur_len
;
797 dma_addr
+= s_iova_len
;
799 if (s_length
+ s_iova_off
< s_iova_len
)
806 * If mapping failed, then just restore the original list,
807 * but making sure the DMA fields are invalidated.
809 static void __invalidate_sg(struct scatterlist
*sg
, int nents
)
811 struct scatterlist
*s
;
814 for_each_sg(sg
, s
, nents
, i
) {
815 if (sg_dma_address(s
) != DMA_MAPPING_ERROR
)
816 s
->offset
+= sg_dma_address(s
);
818 s
->length
= sg_dma_len(s
);
819 sg_dma_address(s
) = DMA_MAPPING_ERROR
;
825 * The DMA API client is passing in a scatterlist which could describe
826 * any old buffer layout, but the IOMMU API requires everything to be
827 * aligned to IOMMU pages. Hence the need for this complicated bit of
828 * impedance-matching, to be able to hand off a suitably-aligned list,
829 * but still preserve the original offsets and sizes for the caller.
831 static int iommu_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
832 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
834 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
835 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
836 struct iova_domain
*iovad
= &cookie
->iovad
;
837 struct scatterlist
*s
, *prev
= NULL
;
838 int prot
= dma_info_to_prot(dir
, dev_is_dma_coherent(dev
), attrs
);
841 unsigned long mask
= dma_get_seg_boundary(dev
);
844 if (unlikely(iommu_dma_deferred_attach(dev
, domain
)))
847 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
848 iommu_dma_sync_sg_for_device(dev
, sg
, nents
, dir
);
851 * Work out how much IOVA space we need, and align the segments to
852 * IOVA granules for the IOMMU driver to handle. With some clever
853 * trickery we can modify the list in-place, but reversibly, by
854 * stashing the unaligned parts in the as-yet-unused DMA fields.
856 for_each_sg(sg
, s
, nents
, i
) {
857 size_t s_iova_off
= iova_offset(iovad
, s
->offset
);
858 size_t s_length
= s
->length
;
859 size_t pad_len
= (mask
- iova_len
+ 1) & mask
;
861 sg_dma_address(s
) = s_iova_off
;
862 sg_dma_len(s
) = s_length
;
863 s
->offset
-= s_iova_off
;
864 s_length
= iova_align(iovad
, s_length
+ s_iova_off
);
865 s
->length
= s_length
;
868 * Due to the alignment of our single IOVA allocation, we can
869 * depend on these assumptions about the segment boundary mask:
870 * - If mask size >= IOVA size, then the IOVA range cannot
871 * possibly fall across a boundary, so we don't care.
872 * - If mask size < IOVA size, then the IOVA range must start
873 * exactly on a boundary, therefore we can lay things out
874 * based purely on segment lengths without needing to know
875 * the actual addresses beforehand.
876 * - The mask must be a power of 2, so pad_len == 0 if
877 * iova_len == 0, thus we cannot dereference prev the first
878 * time through here (i.e. before it has a meaningful value).
880 if (pad_len
&& pad_len
< s_length
- 1) {
881 prev
->length
+= pad_len
;
885 iova_len
+= s_length
;
889 iova
= iommu_dma_alloc_iova(domain
, iova_len
, dma_get_mask(dev
), dev
);
894 * We'll leave any physical concatenation to the IOMMU driver's
895 * implementation - it knows better than we do.
897 if (iommu_map_sg_atomic(domain
, iova
, sg
, nents
, prot
) < iova_len
)
900 return __finalise_sg(dev
, sg
, nents
, iova
);
903 iommu_dma_free_iova(cookie
, iova
, iova_len
);
905 __invalidate_sg(sg
, nents
);
909 static void iommu_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
910 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
912 dma_addr_t start
, end
;
913 struct scatterlist
*tmp
;
916 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
917 iommu_dma_sync_sg_for_cpu(dev
, sg
, nents
, dir
);
920 * The scatterlist segments are mapped into a single
921 * contiguous IOVA allocation, so this is incredibly easy.
923 start
= sg_dma_address(sg
);
924 for_each_sg(sg_next(sg
), tmp
, nents
- 1, i
) {
925 if (sg_dma_len(tmp
) == 0)
929 end
= sg_dma_address(sg
) + sg_dma_len(sg
);
930 __iommu_dma_unmap(dev
, start
, end
- start
);
933 static dma_addr_t
iommu_dma_map_resource(struct device
*dev
, phys_addr_t phys
,
934 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
936 return __iommu_dma_map(dev
, phys
, size
,
937 dma_info_to_prot(dir
, false, attrs
) | IOMMU_MMIO
,
941 static void iommu_dma_unmap_resource(struct device
*dev
, dma_addr_t handle
,
942 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
944 __iommu_dma_unmap(dev
, handle
, size
);
947 static void __iommu_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
)
949 size_t alloc_size
= PAGE_ALIGN(size
);
950 int count
= alloc_size
>> PAGE_SHIFT
;
951 struct page
*page
= NULL
, **pages
= NULL
;
953 /* Non-coherent atomic allocation? Easy */
954 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
) &&
955 dma_free_from_pool(dev
, cpu_addr
, alloc_size
))
958 if (IS_ENABLED(CONFIG_DMA_REMAP
) && is_vmalloc_addr(cpu_addr
)) {
960 * If it the address is remapped, then it's either non-coherent
961 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
963 pages
= dma_common_find_pages(cpu_addr
);
965 page
= vmalloc_to_page(cpu_addr
);
966 dma_common_free_remap(cpu_addr
, alloc_size
);
968 /* Lowmem means a coherent atomic or CMA allocation */
969 page
= virt_to_page(cpu_addr
);
973 __iommu_dma_free_pages(pages
, count
);
975 dma_free_contiguous(dev
, page
, alloc_size
);
978 static void iommu_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
979 dma_addr_t handle
, unsigned long attrs
)
981 __iommu_dma_unmap(dev
, handle
, size
);
982 __iommu_dma_free(dev
, size
, cpu_addr
);
985 static void *iommu_dma_alloc_pages(struct device
*dev
, size_t size
,
986 struct page
**pagep
, gfp_t gfp
, unsigned long attrs
)
988 bool coherent
= dev_is_dma_coherent(dev
);
989 size_t alloc_size
= PAGE_ALIGN(size
);
990 int node
= dev_to_node(dev
);
991 struct page
*page
= NULL
;
994 page
= dma_alloc_contiguous(dev
, alloc_size
, gfp
);
996 page
= alloc_pages_node(node
, gfp
, get_order(alloc_size
));
1000 if (IS_ENABLED(CONFIG_DMA_REMAP
) && (!coherent
|| PageHighMem(page
))) {
1001 pgprot_t prot
= dma_pgprot(dev
, PAGE_KERNEL
, attrs
);
1003 cpu_addr
= dma_common_contiguous_remap(page
, alloc_size
,
1004 prot
, __builtin_return_address(0));
1006 goto out_free_pages
;
1009 arch_dma_prep_coherent(page
, size
);
1011 cpu_addr
= page_address(page
);
1015 memset(cpu_addr
, 0, alloc_size
);
1018 dma_free_contiguous(dev
, page
, alloc_size
);
1022 static void *iommu_dma_alloc(struct device
*dev
, size_t size
,
1023 dma_addr_t
*handle
, gfp_t gfp
, unsigned long attrs
)
1025 bool coherent
= dev_is_dma_coherent(dev
);
1026 int ioprot
= dma_info_to_prot(DMA_BIDIRECTIONAL
, coherent
, attrs
);
1027 struct page
*page
= NULL
;
1032 if (IS_ENABLED(CONFIG_DMA_REMAP
) && gfpflags_allow_blocking(gfp
) &&
1033 !(attrs
& DMA_ATTR_FORCE_CONTIGUOUS
))
1034 return iommu_dma_alloc_remap(dev
, size
, handle
, gfp
, attrs
);
1036 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
) &&
1037 !gfpflags_allow_blocking(gfp
) && !coherent
)
1038 cpu_addr
= dma_alloc_from_pool(dev
, PAGE_ALIGN(size
), &page
,
1041 cpu_addr
= iommu_dma_alloc_pages(dev
, size
, &page
, gfp
, attrs
);
1045 *handle
= __iommu_dma_map(dev
, page_to_phys(page
), size
, ioprot
,
1046 dev
->coherent_dma_mask
);
1047 if (*handle
== DMA_MAPPING_ERROR
) {
1048 __iommu_dma_free(dev
, size
, cpu_addr
);
1055 static int iommu_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
1056 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
1057 unsigned long attrs
)
1059 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1060 unsigned long pfn
, off
= vma
->vm_pgoff
;
1063 vma
->vm_page_prot
= dma_pgprot(dev
, vma
->vm_page_prot
, attrs
);
1065 if (dma_mmap_from_dev_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
1068 if (off
>= nr_pages
|| vma_pages(vma
) > nr_pages
- off
)
1071 if (IS_ENABLED(CONFIG_DMA_REMAP
) && is_vmalloc_addr(cpu_addr
)) {
1072 struct page
**pages
= dma_common_find_pages(cpu_addr
);
1075 return __iommu_dma_mmap(pages
, size
, vma
);
1076 pfn
= vmalloc_to_pfn(cpu_addr
);
1078 pfn
= page_to_pfn(virt_to_page(cpu_addr
));
1081 return remap_pfn_range(vma
, vma
->vm_start
, pfn
+ off
,
1082 vma
->vm_end
- vma
->vm_start
,
1086 static int iommu_dma_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
1087 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
1088 unsigned long attrs
)
1093 if (IS_ENABLED(CONFIG_DMA_REMAP
) && is_vmalloc_addr(cpu_addr
)) {
1094 struct page
**pages
= dma_common_find_pages(cpu_addr
);
1097 return sg_alloc_table_from_pages(sgt
, pages
,
1098 PAGE_ALIGN(size
) >> PAGE_SHIFT
,
1099 0, size
, GFP_KERNEL
);
1102 page
= vmalloc_to_page(cpu_addr
);
1104 page
= virt_to_page(cpu_addr
);
1107 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
1109 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
1113 static unsigned long iommu_dma_get_merge_boundary(struct device
*dev
)
1115 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
1117 return (1UL << __ffs(domain
->pgsize_bitmap
)) - 1;
1120 static const struct dma_map_ops iommu_dma_ops
= {
1121 .alloc
= iommu_dma_alloc
,
1122 .free
= iommu_dma_free
,
1123 .mmap
= iommu_dma_mmap
,
1124 .get_sgtable
= iommu_dma_get_sgtable
,
1125 .map_page
= iommu_dma_map_page
,
1126 .unmap_page
= iommu_dma_unmap_page
,
1127 .map_sg
= iommu_dma_map_sg
,
1128 .unmap_sg
= iommu_dma_unmap_sg
,
1129 .sync_single_for_cpu
= iommu_dma_sync_single_for_cpu
,
1130 .sync_single_for_device
= iommu_dma_sync_single_for_device
,
1131 .sync_sg_for_cpu
= iommu_dma_sync_sg_for_cpu
,
1132 .sync_sg_for_device
= iommu_dma_sync_sg_for_device
,
1133 .map_resource
= iommu_dma_map_resource
,
1134 .unmap_resource
= iommu_dma_unmap_resource
,
1135 .get_merge_boundary
= iommu_dma_get_merge_boundary
,
1139 * The IOMMU core code allocates the default DMA domain, which the underlying
1140 * IOMMU driver needs to support via the dma-iommu layer.
1142 void iommu_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
)
1144 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1150 * The IOMMU core code allocates the default DMA domain, which the
1151 * underlying IOMMU driver needs to support via the dma-iommu layer.
1153 if (domain
->type
== IOMMU_DOMAIN_DMA
) {
1154 if (iommu_dma_init_domain(domain
, dma_base
, size
, dev
))
1156 dev
->dma_ops
= &iommu_dma_ops
;
1161 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1165 static struct iommu_dma_msi_page
*iommu_dma_get_msi_page(struct device
*dev
,
1166 phys_addr_t msi_addr
, struct iommu_domain
*domain
)
1168 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
1169 struct iommu_dma_msi_page
*msi_page
;
1171 int prot
= IOMMU_WRITE
| IOMMU_NOEXEC
| IOMMU_MMIO
;
1172 size_t size
= cookie_msi_granule(cookie
);
1174 msi_addr
&= ~(phys_addr_t
)(size
- 1);
1175 list_for_each_entry(msi_page
, &cookie
->msi_page_list
, list
)
1176 if (msi_page
->phys
== msi_addr
)
1179 msi_page
= kzalloc(sizeof(*msi_page
), GFP_KERNEL
);
1183 iova
= iommu_dma_alloc_iova(domain
, size
, dma_get_mask(dev
), dev
);
1187 if (iommu_map(domain
, iova
, msi_addr
, size
, prot
))
1190 INIT_LIST_HEAD(&msi_page
->list
);
1191 msi_page
->phys
= msi_addr
;
1192 msi_page
->iova
= iova
;
1193 list_add(&msi_page
->list
, &cookie
->msi_page_list
);
1197 iommu_dma_free_iova(cookie
, iova
, size
);
1203 int iommu_dma_prepare_msi(struct msi_desc
*desc
, phys_addr_t msi_addr
)
1205 struct device
*dev
= msi_desc_to_dev(desc
);
1206 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1207 struct iommu_dma_msi_page
*msi_page
;
1208 static DEFINE_MUTEX(msi_prepare_lock
); /* see below */
1210 if (!domain
|| !domain
->iova_cookie
) {
1211 desc
->iommu_cookie
= NULL
;
1216 * In fact the whole prepare operation should already be serialised by
1217 * irq_domain_mutex further up the callchain, but that's pretty subtle
1218 * on its own, so consider this locking as failsafe documentation...
1220 mutex_lock(&msi_prepare_lock
);
1221 msi_page
= iommu_dma_get_msi_page(dev
, msi_addr
, domain
);
1222 mutex_unlock(&msi_prepare_lock
);
1224 msi_desc_set_iommu_cookie(desc
, msi_page
);
1231 void iommu_dma_compose_msi_msg(struct msi_desc
*desc
,
1232 struct msi_msg
*msg
)
1234 struct device
*dev
= msi_desc_to_dev(desc
);
1235 const struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1236 const struct iommu_dma_msi_page
*msi_page
;
1238 msi_page
= msi_desc_get_iommu_cookie(desc
);
1240 if (!domain
|| !domain
->iova_cookie
|| WARN_ON(!msi_page
))
1243 msg
->address_hi
= upper_32_bits(msi_page
->iova
);
1244 msg
->address_lo
&= cookie_msi_granule(domain
->iova_cookie
) - 1;
1245 msg
->address_lo
+= lower_32_bits(msi_page
->iova
);
1248 static int iommu_dma_init(void)
1250 return iova_cache_get();
1252 arch_initcall(iommu_dma_init
);