1 // SPDX-License-Identifier: GPL-2.0-only
3 * A fairly generic DMA-API to IOMMU-API glue layer.
5 * Copyright (C) 2014-2015 ARM Ltd.
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
11 #include <linux/acpi_iort.h>
12 #include <linux/device.h>
13 #include <linux/dma-contiguous.h>
14 #include <linux/dma-iommu.h>
15 #include <linux/dma-noncoherent.h>
16 #include <linux/gfp.h>
17 #include <linux/huge_mm.h>
18 #include <linux/iommu.h>
19 #include <linux/iova.h>
20 #include <linux/irq.h>
22 #include <linux/pci.h>
23 #include <linux/scatterlist.h>
24 #include <linux/vmalloc.h>
26 struct iommu_dma_msi_page
{
27 struct list_head list
;
32 enum iommu_dma_cookie_type
{
33 IOMMU_DMA_IOVA_COOKIE
,
37 struct iommu_dma_cookie
{
38 enum iommu_dma_cookie_type type
;
40 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
41 struct iova_domain iovad
;
42 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
45 struct list_head msi_page_list
;
48 /* Domain for flush queue callback; NULL if flush queue not in use */
49 struct iommu_domain
*fq_domain
;
52 static inline size_t cookie_msi_granule(struct iommu_dma_cookie
*cookie
)
54 if (cookie
->type
== IOMMU_DMA_IOVA_COOKIE
)
55 return cookie
->iovad
.granule
;
59 static struct iommu_dma_cookie
*cookie_alloc(enum iommu_dma_cookie_type type
)
61 struct iommu_dma_cookie
*cookie
;
63 cookie
= kzalloc(sizeof(*cookie
), GFP_KERNEL
);
65 spin_lock_init(&cookie
->msi_lock
);
66 INIT_LIST_HEAD(&cookie
->msi_page_list
);
73 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
74 * @domain: IOMMU domain to prepare for DMA-API usage
76 * IOMMU drivers should normally call this from their domain_alloc
77 * callback when domain->type == IOMMU_DOMAIN_DMA.
79 int iommu_get_dma_cookie(struct iommu_domain
*domain
)
81 if (domain
->iova_cookie
)
84 domain
->iova_cookie
= cookie_alloc(IOMMU_DMA_IOVA_COOKIE
);
85 if (!domain
->iova_cookie
)
90 EXPORT_SYMBOL(iommu_get_dma_cookie
);
93 * iommu_get_msi_cookie - Acquire just MSI remapping resources
94 * @domain: IOMMU domain to prepare
95 * @base: Start address of IOVA region for MSI mappings
97 * Users who manage their own IOVA allocation and do not want DMA API support,
98 * but would still like to take advantage of automatic MSI remapping, can use
99 * this to initialise their own domain appropriately. Users should reserve a
100 * contiguous IOVA region, starting at @base, large enough to accommodate the
101 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
102 * used by the devices attached to @domain.
104 int iommu_get_msi_cookie(struct iommu_domain
*domain
, dma_addr_t base
)
106 struct iommu_dma_cookie
*cookie
;
108 if (domain
->type
!= IOMMU_DOMAIN_UNMANAGED
)
111 if (domain
->iova_cookie
)
114 cookie
= cookie_alloc(IOMMU_DMA_MSI_COOKIE
);
118 cookie
->msi_iova
= base
;
119 domain
->iova_cookie
= cookie
;
122 EXPORT_SYMBOL(iommu_get_msi_cookie
);
125 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
126 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
127 * iommu_get_msi_cookie()
129 * IOMMU drivers should normally call this from their domain_free callback.
131 void iommu_put_dma_cookie(struct iommu_domain
*domain
)
133 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
134 struct iommu_dma_msi_page
*msi
, *tmp
;
139 if (cookie
->type
== IOMMU_DMA_IOVA_COOKIE
&& cookie
->iovad
.granule
)
140 put_iova_domain(&cookie
->iovad
);
142 list_for_each_entry_safe(msi
, tmp
, &cookie
->msi_page_list
, list
) {
143 list_del(&msi
->list
);
147 domain
->iova_cookie
= NULL
;
149 EXPORT_SYMBOL(iommu_put_dma_cookie
);
152 * iommu_dma_get_resv_regions - Reserved region driver helper
153 * @dev: Device from iommu_get_resv_regions()
154 * @list: Reserved region list from iommu_get_resv_regions()
156 * IOMMU drivers can use this to implement their .get_resv_regions callback
157 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
158 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
161 void iommu_dma_get_resv_regions(struct device
*dev
, struct list_head
*list
)
164 if (!is_of_node(dev_iommu_fwspec_get(dev
)->iommu_fwnode
))
165 iort_iommu_msi_get_resv_regions(dev
, list
);
168 EXPORT_SYMBOL(iommu_dma_get_resv_regions
);
170 static int cookie_init_hw_msi_region(struct iommu_dma_cookie
*cookie
,
171 phys_addr_t start
, phys_addr_t end
)
173 struct iova_domain
*iovad
= &cookie
->iovad
;
174 struct iommu_dma_msi_page
*msi_page
;
177 start
-= iova_offset(iovad
, start
);
178 num_pages
= iova_align(iovad
, end
- start
) >> iova_shift(iovad
);
180 msi_page
= kcalloc(num_pages
, sizeof(*msi_page
), GFP_KERNEL
);
184 for (i
= 0; i
< num_pages
; i
++) {
185 msi_page
[i
].phys
= start
;
186 msi_page
[i
].iova
= start
;
187 INIT_LIST_HEAD(&msi_page
[i
].list
);
188 list_add(&msi_page
[i
].list
, &cookie
->msi_page_list
);
189 start
+= iovad
->granule
;
195 static int iova_reserve_pci_windows(struct pci_dev
*dev
,
196 struct iova_domain
*iovad
)
198 struct pci_host_bridge
*bridge
= pci_find_host_bridge(dev
->bus
);
199 struct resource_entry
*window
;
200 unsigned long lo
, hi
;
201 phys_addr_t start
= 0, end
;
203 resource_list_for_each_entry(window
, &bridge
->windows
) {
204 if (resource_type(window
->res
) != IORESOURCE_MEM
)
207 lo
= iova_pfn(iovad
, window
->res
->start
- window
->offset
);
208 hi
= iova_pfn(iovad
, window
->res
->end
- window
->offset
);
209 reserve_iova(iovad
, lo
, hi
);
212 /* Get reserved DMA windows from host bridge */
213 resource_list_for_each_entry(window
, &bridge
->dma_ranges
) {
214 end
= window
->res
->start
- window
->offset
;
217 lo
= iova_pfn(iovad
, start
);
218 hi
= iova_pfn(iovad
, end
);
219 reserve_iova(iovad
, lo
, hi
);
221 /* dma_ranges list should be sorted */
222 dev_err(&dev
->dev
, "Failed to reserve IOVA\n");
226 start
= window
->res
->end
- window
->offset
+ 1;
227 /* If window is last entry */
228 if (window
->node
.next
== &bridge
->dma_ranges
&&
229 end
!= ~(phys_addr_t
)0) {
230 end
= ~(phys_addr_t
)0;
238 static int iova_reserve_iommu_regions(struct device
*dev
,
239 struct iommu_domain
*domain
)
241 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
242 struct iova_domain
*iovad
= &cookie
->iovad
;
243 struct iommu_resv_region
*region
;
244 LIST_HEAD(resv_regions
);
247 if (dev_is_pci(dev
)) {
248 ret
= iova_reserve_pci_windows(to_pci_dev(dev
), iovad
);
253 iommu_get_resv_regions(dev
, &resv_regions
);
254 list_for_each_entry(region
, &resv_regions
, list
) {
255 unsigned long lo
, hi
;
257 /* We ARE the software that manages these! */
258 if (region
->type
== IOMMU_RESV_SW_MSI
)
261 lo
= iova_pfn(iovad
, region
->start
);
262 hi
= iova_pfn(iovad
, region
->start
+ region
->length
- 1);
263 reserve_iova(iovad
, lo
, hi
);
265 if (region
->type
== IOMMU_RESV_MSI
)
266 ret
= cookie_init_hw_msi_region(cookie
, region
->start
,
267 region
->start
+ region
->length
);
271 iommu_put_resv_regions(dev
, &resv_regions
);
276 static void iommu_dma_flush_iotlb_all(struct iova_domain
*iovad
)
278 struct iommu_dma_cookie
*cookie
;
279 struct iommu_domain
*domain
;
281 cookie
= container_of(iovad
, struct iommu_dma_cookie
, iovad
);
282 domain
= cookie
->fq_domain
;
284 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
285 * implies that ops->flush_iotlb_all must be non-NULL.
287 domain
->ops
->flush_iotlb_all(domain
);
291 * iommu_dma_init_domain - Initialise a DMA mapping domain
292 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
293 * @base: IOVA at which the mappable address space starts
294 * @size: Size of IOVA space
295 * @dev: Device the domain is being initialised for
297 * @base and @size should be exact multiples of IOMMU page granularity to
298 * avoid rounding surprises. If necessary, we reserve the page at address 0
299 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
300 * any change which could make prior IOVAs invalid will fail.
302 static int iommu_dma_init_domain(struct iommu_domain
*domain
, dma_addr_t base
,
303 u64 size
, struct device
*dev
)
305 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
306 unsigned long order
, base_pfn
;
307 struct iova_domain
*iovad
;
310 if (!cookie
|| cookie
->type
!= IOMMU_DMA_IOVA_COOKIE
)
313 iovad
= &cookie
->iovad
;
315 /* Use the smallest supported page size for IOVA granularity */
316 order
= __ffs(domain
->pgsize_bitmap
);
317 base_pfn
= max_t(unsigned long, 1, base
>> order
);
319 /* Check the domain allows at least some access to the device... */
320 if (domain
->geometry
.force_aperture
) {
321 if (base
> domain
->geometry
.aperture_end
||
322 base
+ size
<= domain
->geometry
.aperture_start
) {
323 pr_warn("specified DMA range outside IOMMU capability\n");
326 /* ...then finally give it a kicking to make sure it fits */
327 base_pfn
= max_t(unsigned long, base_pfn
,
328 domain
->geometry
.aperture_start
>> order
);
331 /* start_pfn is always nonzero for an already-initialised domain */
332 if (iovad
->start_pfn
) {
333 if (1UL << order
!= iovad
->granule
||
334 base_pfn
!= iovad
->start_pfn
) {
335 pr_warn("Incompatible range for DMA domain\n");
342 init_iova_domain(iovad
, 1UL << order
, base_pfn
);
344 if (!cookie
->fq_domain
&& !iommu_domain_get_attr(domain
,
345 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
, &attr
) && attr
) {
346 cookie
->fq_domain
= domain
;
347 init_iova_flush_queue(iovad
, iommu_dma_flush_iotlb_all
, NULL
);
353 return iova_reserve_iommu_regions(dev
, domain
);
357 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
359 * @dir: Direction of DMA transfer
360 * @coherent: Is the DMA master cache-coherent?
361 * @attrs: DMA attributes for the mapping
363 * Return: corresponding IOMMU API page protection flags
365 static int dma_info_to_prot(enum dma_data_direction dir
, bool coherent
,
368 int prot
= coherent
? IOMMU_CACHE
: 0;
370 if (attrs
& DMA_ATTR_PRIVILEGED
)
374 case DMA_BIDIRECTIONAL
:
375 return prot
| IOMMU_READ
| IOMMU_WRITE
;
377 return prot
| IOMMU_READ
;
378 case DMA_FROM_DEVICE
:
379 return prot
| IOMMU_WRITE
;
385 static dma_addr_t
iommu_dma_alloc_iova(struct iommu_domain
*domain
,
386 size_t size
, dma_addr_t dma_limit
, struct device
*dev
)
388 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
389 struct iova_domain
*iovad
= &cookie
->iovad
;
390 unsigned long shift
, iova_len
, iova
= 0;
392 if (cookie
->type
== IOMMU_DMA_MSI_COOKIE
) {
393 cookie
->msi_iova
+= size
;
394 return cookie
->msi_iova
- size
;
397 shift
= iova_shift(iovad
);
398 iova_len
= size
>> shift
;
400 * Freeing non-power-of-two-sized allocations back into the IOVA caches
401 * will come back to bite us badly, so we have to waste a bit of space
402 * rounding up anything cacheable to make sure that can't happen. The
403 * order of the unadjusted size will still match upon freeing.
405 if (iova_len
< (1 << (IOVA_RANGE_CACHE_MAX_SIZE
- 1)))
406 iova_len
= roundup_pow_of_two(iova_len
);
408 if (dev
->bus_dma_mask
)
409 dma_limit
&= dev
->bus_dma_mask
;
411 if (domain
->geometry
.force_aperture
)
412 dma_limit
= min(dma_limit
, domain
->geometry
.aperture_end
);
414 /* Try to get PCI devices a SAC address */
415 if (dma_limit
> DMA_BIT_MASK(32) && dev_is_pci(dev
))
416 iova
= alloc_iova_fast(iovad
, iova_len
,
417 DMA_BIT_MASK(32) >> shift
, false);
420 iova
= alloc_iova_fast(iovad
, iova_len
, dma_limit
>> shift
,
423 return (dma_addr_t
)iova
<< shift
;
426 static void iommu_dma_free_iova(struct iommu_dma_cookie
*cookie
,
427 dma_addr_t iova
, size_t size
)
429 struct iova_domain
*iovad
= &cookie
->iovad
;
431 /* The MSI case is only ever cleaning up its most recent allocation */
432 if (cookie
->type
== IOMMU_DMA_MSI_COOKIE
)
433 cookie
->msi_iova
-= size
;
434 else if (cookie
->fq_domain
) /* non-strict mode */
435 queue_iova(iovad
, iova_pfn(iovad
, iova
),
436 size
>> iova_shift(iovad
), 0);
438 free_iova_fast(iovad
, iova_pfn(iovad
, iova
),
439 size
>> iova_shift(iovad
));
442 static void __iommu_dma_unmap(struct device
*dev
, dma_addr_t dma_addr
,
445 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
446 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
447 struct iova_domain
*iovad
= &cookie
->iovad
;
448 size_t iova_off
= iova_offset(iovad
, dma_addr
);
449 struct iommu_iotlb_gather iotlb_gather
;
452 dma_addr
-= iova_off
;
453 size
= iova_align(iovad
, size
+ iova_off
);
454 iommu_iotlb_gather_init(&iotlb_gather
);
456 unmapped
= iommu_unmap_fast(domain
, dma_addr
, size
, &iotlb_gather
);
457 WARN_ON(unmapped
!= size
);
459 if (!cookie
->fq_domain
)
460 iommu_tlb_sync(domain
, &iotlb_gather
);
461 iommu_dma_free_iova(cookie
, dma_addr
, size
);
464 static dma_addr_t
__iommu_dma_map(struct device
*dev
, phys_addr_t phys
,
465 size_t size
, int prot
)
467 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
468 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
469 struct iova_domain
*iovad
= &cookie
->iovad
;
470 size_t iova_off
= iova_offset(iovad
, phys
);
473 size
= iova_align(iovad
, size
+ iova_off
);
475 iova
= iommu_dma_alloc_iova(domain
, size
, dma_get_mask(dev
), dev
);
477 return DMA_MAPPING_ERROR
;
479 if (iommu_map(domain
, iova
, phys
- iova_off
, size
, prot
)) {
480 iommu_dma_free_iova(cookie
, iova
, size
);
481 return DMA_MAPPING_ERROR
;
483 return iova
+ iova_off
;
486 static void __iommu_dma_free_pages(struct page
**pages
, int count
)
489 __free_page(pages
[count
]);
493 static struct page
**__iommu_dma_alloc_pages(struct device
*dev
,
494 unsigned int count
, unsigned long order_mask
, gfp_t gfp
)
497 unsigned int i
= 0, nid
= dev_to_node(dev
);
499 order_mask
&= (2U << MAX_ORDER
) - 1;
503 pages
= kvzalloc(count
* sizeof(*pages
), GFP_KERNEL
);
507 /* IOMMU can map any pages, so himem can also be used here */
508 gfp
|= __GFP_NOWARN
| __GFP_HIGHMEM
;
511 struct page
*page
= NULL
;
512 unsigned int order_size
;
515 * Higher-order allocations are a convenience rather
516 * than a necessity, hence using __GFP_NORETRY until
517 * falling back to minimum-order allocations.
519 for (order_mask
&= (2U << __fls(count
)) - 1;
520 order_mask
; order_mask
&= ~order_size
) {
521 unsigned int order
= __fls(order_mask
);
522 gfp_t alloc_flags
= gfp
;
524 order_size
= 1U << order
;
525 if (order_mask
> order_size
)
526 alloc_flags
|= __GFP_NORETRY
;
527 page
= alloc_pages_node(nid
, alloc_flags
, order
);
532 if (!PageCompound(page
)) {
533 split_page(page
, order
);
535 } else if (!split_huge_page(page
)) {
538 __free_pages(page
, order
);
541 __iommu_dma_free_pages(pages
, i
);
552 * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
553 * @dev: Device to allocate memory for. Must be a real device
554 * attached to an iommu_dma_domain
555 * @size: Size of buffer in bytes
556 * @dma_handle: Out argument for allocated DMA handle
557 * @gfp: Allocation flags
558 * @attrs: DMA attributes for this allocation
560 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
561 * but an IOMMU which supports smaller pages might not map the whole thing.
563 * Return: Mapped virtual address, or NULL on failure.
565 static void *iommu_dma_alloc_remap(struct device
*dev
, size_t size
,
566 dma_addr_t
*dma_handle
, gfp_t gfp
, unsigned long attrs
)
568 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
569 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
570 struct iova_domain
*iovad
= &cookie
->iovad
;
571 bool coherent
= dev_is_dma_coherent(dev
);
572 int ioprot
= dma_info_to_prot(DMA_BIDIRECTIONAL
, coherent
, attrs
);
573 pgprot_t prot
= dma_pgprot(dev
, PAGE_KERNEL
, attrs
);
574 unsigned int count
, min_size
, alloc_sizes
= domain
->pgsize_bitmap
;
580 *dma_handle
= DMA_MAPPING_ERROR
;
582 min_size
= alloc_sizes
& -alloc_sizes
;
583 if (min_size
< PAGE_SIZE
) {
584 min_size
= PAGE_SIZE
;
585 alloc_sizes
|= PAGE_SIZE
;
587 size
= ALIGN(size
, min_size
);
589 if (attrs
& DMA_ATTR_ALLOC_SINGLE_PAGES
)
590 alloc_sizes
= min_size
;
592 count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
593 pages
= __iommu_dma_alloc_pages(dev
, count
, alloc_sizes
>> PAGE_SHIFT
,
598 size
= iova_align(iovad
, size
);
599 iova
= iommu_dma_alloc_iova(domain
, size
, dev
->coherent_dma_mask
, dev
);
603 if (sg_alloc_table_from_pages(&sgt
, pages
, count
, 0, size
, GFP_KERNEL
))
606 if (!(ioprot
& IOMMU_CACHE
)) {
607 struct scatterlist
*sg
;
610 for_each_sg(sgt
.sgl
, sg
, sgt
.orig_nents
, i
)
611 arch_dma_prep_coherent(sg_page(sg
), sg
->length
);
614 if (iommu_map_sg(domain
, iova
, sgt
.sgl
, sgt
.orig_nents
, ioprot
)
618 vaddr
= dma_common_pages_remap(pages
, size
, prot
,
619 __builtin_return_address(0));
628 __iommu_dma_unmap(dev
, iova
, size
);
632 iommu_dma_free_iova(cookie
, iova
, size
);
634 __iommu_dma_free_pages(pages
, count
);
639 * __iommu_dma_mmap - Map a buffer into provided user VMA
640 * @pages: Array representing buffer from __iommu_dma_alloc()
641 * @size: Size of buffer in bytes
642 * @vma: VMA describing requested userspace mapping
644 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
645 * for verifying the correct size and protection of @vma beforehand.
647 static int __iommu_dma_mmap(struct page
**pages
, size_t size
,
648 struct vm_area_struct
*vma
)
650 return vm_map_pages(vma
, pages
, PAGE_ALIGN(size
) >> PAGE_SHIFT
);
653 static void iommu_dma_sync_single_for_cpu(struct device
*dev
,
654 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction dir
)
658 if (dev_is_dma_coherent(dev
))
661 phys
= iommu_iova_to_phys(iommu_get_dma_domain(dev
), dma_handle
);
662 arch_sync_dma_for_cpu(dev
, phys
, size
, dir
);
665 static void iommu_dma_sync_single_for_device(struct device
*dev
,
666 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction dir
)
670 if (dev_is_dma_coherent(dev
))
673 phys
= iommu_iova_to_phys(iommu_get_dma_domain(dev
), dma_handle
);
674 arch_sync_dma_for_device(dev
, phys
, size
, dir
);
677 static void iommu_dma_sync_sg_for_cpu(struct device
*dev
,
678 struct scatterlist
*sgl
, int nelems
,
679 enum dma_data_direction dir
)
681 struct scatterlist
*sg
;
684 if (dev_is_dma_coherent(dev
))
687 for_each_sg(sgl
, sg
, nelems
, i
)
688 arch_sync_dma_for_cpu(dev
, sg_phys(sg
), sg
->length
, dir
);
691 static void iommu_dma_sync_sg_for_device(struct device
*dev
,
692 struct scatterlist
*sgl
, int nelems
,
693 enum dma_data_direction dir
)
695 struct scatterlist
*sg
;
698 if (dev_is_dma_coherent(dev
))
701 for_each_sg(sgl
, sg
, nelems
, i
)
702 arch_sync_dma_for_device(dev
, sg_phys(sg
), sg
->length
, dir
);
705 static dma_addr_t
iommu_dma_map_page(struct device
*dev
, struct page
*page
,
706 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
709 phys_addr_t phys
= page_to_phys(page
) + offset
;
710 bool coherent
= dev_is_dma_coherent(dev
);
711 int prot
= dma_info_to_prot(dir
, coherent
, attrs
);
712 dma_addr_t dma_handle
;
714 dma_handle
=__iommu_dma_map(dev
, phys
, size
, prot
);
715 if (!coherent
&& !(attrs
& DMA_ATTR_SKIP_CPU_SYNC
) &&
716 dma_handle
!= DMA_MAPPING_ERROR
)
717 arch_sync_dma_for_device(dev
, phys
, size
, dir
);
721 static void iommu_dma_unmap_page(struct device
*dev
, dma_addr_t dma_handle
,
722 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
724 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
725 iommu_dma_sync_single_for_cpu(dev
, dma_handle
, size
, dir
);
726 __iommu_dma_unmap(dev
, dma_handle
, size
);
730 * Prepare a successfully-mapped scatterlist to give back to the caller.
732 * At this point the segments are already laid out by iommu_dma_map_sg() to
733 * avoid individually crossing any boundaries, so we merely need to check a
734 * segment's start address to avoid concatenating across one.
736 static int __finalise_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
739 struct scatterlist
*s
, *cur
= sg
;
740 unsigned long seg_mask
= dma_get_seg_boundary(dev
);
741 unsigned int cur_len
= 0, max_len
= dma_get_max_seg_size(dev
);
744 for_each_sg(sg
, s
, nents
, i
) {
745 /* Restore this segment's original unaligned fields first */
746 unsigned int s_iova_off
= sg_dma_address(s
);
747 unsigned int s_length
= sg_dma_len(s
);
748 unsigned int s_iova_len
= s
->length
;
750 s
->offset
+= s_iova_off
;
751 s
->length
= s_length
;
752 sg_dma_address(s
) = DMA_MAPPING_ERROR
;
756 * Now fill in the real DMA data. If...
757 * - there is a valid output segment to append to
758 * - and this segment starts on an IOVA page boundary
759 * - but doesn't fall at a segment boundary
760 * - and wouldn't make the resulting output segment too long
762 if (cur_len
&& !s_iova_off
&& (dma_addr
& seg_mask
) &&
763 (max_len
- cur_len
>= s_length
)) {
764 /* ...then concatenate it with the previous one */
767 /* Otherwise start the next output segment */
773 sg_dma_address(cur
) = dma_addr
+ s_iova_off
;
776 sg_dma_len(cur
) = cur_len
;
777 dma_addr
+= s_iova_len
;
779 if (s_length
+ s_iova_off
< s_iova_len
)
786 * If mapping failed, then just restore the original list,
787 * but making sure the DMA fields are invalidated.
789 static void __invalidate_sg(struct scatterlist
*sg
, int nents
)
791 struct scatterlist
*s
;
794 for_each_sg(sg
, s
, nents
, i
) {
795 if (sg_dma_address(s
) != DMA_MAPPING_ERROR
)
796 s
->offset
+= sg_dma_address(s
);
798 s
->length
= sg_dma_len(s
);
799 sg_dma_address(s
) = DMA_MAPPING_ERROR
;
805 * The DMA API client is passing in a scatterlist which could describe
806 * any old buffer layout, but the IOMMU API requires everything to be
807 * aligned to IOMMU pages. Hence the need for this complicated bit of
808 * impedance-matching, to be able to hand off a suitably-aligned list,
809 * but still preserve the original offsets and sizes for the caller.
811 static int iommu_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
812 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
814 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
815 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
816 struct iova_domain
*iovad
= &cookie
->iovad
;
817 struct scatterlist
*s
, *prev
= NULL
;
818 int prot
= dma_info_to_prot(dir
, dev_is_dma_coherent(dev
), attrs
);
821 unsigned long mask
= dma_get_seg_boundary(dev
);
824 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
825 iommu_dma_sync_sg_for_device(dev
, sg
, nents
, dir
);
828 * Work out how much IOVA space we need, and align the segments to
829 * IOVA granules for the IOMMU driver to handle. With some clever
830 * trickery we can modify the list in-place, but reversibly, by
831 * stashing the unaligned parts in the as-yet-unused DMA fields.
833 for_each_sg(sg
, s
, nents
, i
) {
834 size_t s_iova_off
= iova_offset(iovad
, s
->offset
);
835 size_t s_length
= s
->length
;
836 size_t pad_len
= (mask
- iova_len
+ 1) & mask
;
838 sg_dma_address(s
) = s_iova_off
;
839 sg_dma_len(s
) = s_length
;
840 s
->offset
-= s_iova_off
;
841 s_length
= iova_align(iovad
, s_length
+ s_iova_off
);
842 s
->length
= s_length
;
845 * Due to the alignment of our single IOVA allocation, we can
846 * depend on these assumptions about the segment boundary mask:
847 * - If mask size >= IOVA size, then the IOVA range cannot
848 * possibly fall across a boundary, so we don't care.
849 * - If mask size < IOVA size, then the IOVA range must start
850 * exactly on a boundary, therefore we can lay things out
851 * based purely on segment lengths without needing to know
852 * the actual addresses beforehand.
853 * - The mask must be a power of 2, so pad_len == 0 if
854 * iova_len == 0, thus we cannot dereference prev the first
855 * time through here (i.e. before it has a meaningful value).
857 if (pad_len
&& pad_len
< s_length
- 1) {
858 prev
->length
+= pad_len
;
862 iova_len
+= s_length
;
866 iova
= iommu_dma_alloc_iova(domain
, iova_len
, dma_get_mask(dev
), dev
);
871 * We'll leave any physical concatenation to the IOMMU driver's
872 * implementation - it knows better than we do.
874 if (iommu_map_sg(domain
, iova
, sg
, nents
, prot
) < iova_len
)
877 return __finalise_sg(dev
, sg
, nents
, iova
);
880 iommu_dma_free_iova(cookie
, iova
, iova_len
);
882 __invalidate_sg(sg
, nents
);
886 static void iommu_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
887 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
889 dma_addr_t start
, end
;
890 struct scatterlist
*tmp
;
893 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
894 iommu_dma_sync_sg_for_cpu(dev
, sg
, nents
, dir
);
897 * The scatterlist segments are mapped into a single
898 * contiguous IOVA allocation, so this is incredibly easy.
900 start
= sg_dma_address(sg
);
901 for_each_sg(sg_next(sg
), tmp
, nents
- 1, i
) {
902 if (sg_dma_len(tmp
) == 0)
906 end
= sg_dma_address(sg
) + sg_dma_len(sg
);
907 __iommu_dma_unmap(dev
, start
, end
- start
);
910 static dma_addr_t
iommu_dma_map_resource(struct device
*dev
, phys_addr_t phys
,
911 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
913 return __iommu_dma_map(dev
, phys
, size
,
914 dma_info_to_prot(dir
, false, attrs
) | IOMMU_MMIO
);
917 static void iommu_dma_unmap_resource(struct device
*dev
, dma_addr_t handle
,
918 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
920 __iommu_dma_unmap(dev
, handle
, size
);
923 static void __iommu_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
)
925 size_t alloc_size
= PAGE_ALIGN(size
);
926 int count
= alloc_size
>> PAGE_SHIFT
;
927 struct page
*page
= NULL
, **pages
= NULL
;
929 /* Non-coherent atomic allocation? Easy */
930 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
) &&
931 dma_free_from_pool(cpu_addr
, alloc_size
))
934 if (IS_ENABLED(CONFIG_DMA_REMAP
) && is_vmalloc_addr(cpu_addr
)) {
936 * If it the address is remapped, then it's either non-coherent
937 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
939 pages
= dma_common_find_pages(cpu_addr
);
941 page
= vmalloc_to_page(cpu_addr
);
942 dma_common_free_remap(cpu_addr
, alloc_size
);
944 /* Lowmem means a coherent atomic or CMA allocation */
945 page
= virt_to_page(cpu_addr
);
949 __iommu_dma_free_pages(pages
, count
);
951 dma_free_contiguous(dev
, page
, alloc_size
);
954 static void iommu_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
955 dma_addr_t handle
, unsigned long attrs
)
957 __iommu_dma_unmap(dev
, handle
, size
);
958 __iommu_dma_free(dev
, size
, cpu_addr
);
961 static void *iommu_dma_alloc_pages(struct device
*dev
, size_t size
,
962 struct page
**pagep
, gfp_t gfp
, unsigned long attrs
)
964 bool coherent
= dev_is_dma_coherent(dev
);
965 size_t alloc_size
= PAGE_ALIGN(size
);
966 int node
= dev_to_node(dev
);
967 struct page
*page
= NULL
;
970 page
= dma_alloc_contiguous(dev
, alloc_size
, gfp
);
972 page
= alloc_pages_node(node
, gfp
, get_order(alloc_size
));
976 if (IS_ENABLED(CONFIG_DMA_REMAP
) && (!coherent
|| PageHighMem(page
))) {
977 pgprot_t prot
= dma_pgprot(dev
, PAGE_KERNEL
, attrs
);
979 cpu_addr
= dma_common_contiguous_remap(page
, alloc_size
,
980 prot
, __builtin_return_address(0));
985 arch_dma_prep_coherent(page
, size
);
987 cpu_addr
= page_address(page
);
991 memset(cpu_addr
, 0, alloc_size
);
994 dma_free_contiguous(dev
, page
, alloc_size
);
998 static void *iommu_dma_alloc(struct device
*dev
, size_t size
,
999 dma_addr_t
*handle
, gfp_t gfp
, unsigned long attrs
)
1001 bool coherent
= dev_is_dma_coherent(dev
);
1002 int ioprot
= dma_info_to_prot(DMA_BIDIRECTIONAL
, coherent
, attrs
);
1003 struct page
*page
= NULL
;
1008 if (IS_ENABLED(CONFIG_DMA_REMAP
) && gfpflags_allow_blocking(gfp
) &&
1009 !(attrs
& DMA_ATTR_FORCE_CONTIGUOUS
))
1010 return iommu_dma_alloc_remap(dev
, size
, handle
, gfp
, attrs
);
1012 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
) &&
1013 !gfpflags_allow_blocking(gfp
) && !coherent
)
1014 cpu_addr
= dma_alloc_from_pool(PAGE_ALIGN(size
), &page
, gfp
);
1016 cpu_addr
= iommu_dma_alloc_pages(dev
, size
, &page
, gfp
, attrs
);
1020 *handle
= __iommu_dma_map(dev
, page_to_phys(page
), size
, ioprot
);
1021 if (*handle
== DMA_MAPPING_ERROR
) {
1022 __iommu_dma_free(dev
, size
, cpu_addr
);
1029 static int iommu_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
1030 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
1031 unsigned long attrs
)
1033 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1034 unsigned long pfn
, off
= vma
->vm_pgoff
;
1037 vma
->vm_page_prot
= dma_pgprot(dev
, vma
->vm_page_prot
, attrs
);
1039 if (dma_mmap_from_dev_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
1042 if (off
>= nr_pages
|| vma_pages(vma
) > nr_pages
- off
)
1045 if (IS_ENABLED(CONFIG_DMA_REMAP
) && is_vmalloc_addr(cpu_addr
)) {
1046 struct page
**pages
= dma_common_find_pages(cpu_addr
);
1049 return __iommu_dma_mmap(pages
, size
, vma
);
1050 pfn
= vmalloc_to_pfn(cpu_addr
);
1052 pfn
= page_to_pfn(virt_to_page(cpu_addr
));
1055 return remap_pfn_range(vma
, vma
->vm_start
, pfn
+ off
,
1056 vma
->vm_end
- vma
->vm_start
,
1060 static int iommu_dma_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
1061 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
1062 unsigned long attrs
)
1067 if (IS_ENABLED(CONFIG_DMA_REMAP
) && is_vmalloc_addr(cpu_addr
)) {
1068 struct page
**pages
= dma_common_find_pages(cpu_addr
);
1071 return sg_alloc_table_from_pages(sgt
, pages
,
1072 PAGE_ALIGN(size
) >> PAGE_SHIFT
,
1073 0, size
, GFP_KERNEL
);
1076 page
= vmalloc_to_page(cpu_addr
);
1078 page
= virt_to_page(cpu_addr
);
1081 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
1083 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
1087 static unsigned long iommu_dma_get_merge_boundary(struct device
*dev
)
1089 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
1091 return (1UL << __ffs(domain
->pgsize_bitmap
)) - 1;
1094 static const struct dma_map_ops iommu_dma_ops
= {
1095 .alloc
= iommu_dma_alloc
,
1096 .free
= iommu_dma_free
,
1097 .mmap
= iommu_dma_mmap
,
1098 .get_sgtable
= iommu_dma_get_sgtable
,
1099 .map_page
= iommu_dma_map_page
,
1100 .unmap_page
= iommu_dma_unmap_page
,
1101 .map_sg
= iommu_dma_map_sg
,
1102 .unmap_sg
= iommu_dma_unmap_sg
,
1103 .sync_single_for_cpu
= iommu_dma_sync_single_for_cpu
,
1104 .sync_single_for_device
= iommu_dma_sync_single_for_device
,
1105 .sync_sg_for_cpu
= iommu_dma_sync_sg_for_cpu
,
1106 .sync_sg_for_device
= iommu_dma_sync_sg_for_device
,
1107 .map_resource
= iommu_dma_map_resource
,
1108 .unmap_resource
= iommu_dma_unmap_resource
,
1109 .get_merge_boundary
= iommu_dma_get_merge_boundary
,
1113 * The IOMMU core code allocates the default DMA domain, which the underlying
1114 * IOMMU driver needs to support via the dma-iommu layer.
1116 void iommu_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
)
1118 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1124 * The IOMMU core code allocates the default DMA domain, which the
1125 * underlying IOMMU driver needs to support via the dma-iommu layer.
1127 if (domain
->type
== IOMMU_DOMAIN_DMA
) {
1128 if (iommu_dma_init_domain(domain
, dma_base
, size
, dev
))
1130 dev
->dma_ops
= &iommu_dma_ops
;
1135 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1139 static struct iommu_dma_msi_page
*iommu_dma_get_msi_page(struct device
*dev
,
1140 phys_addr_t msi_addr
, struct iommu_domain
*domain
)
1142 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
1143 struct iommu_dma_msi_page
*msi_page
;
1145 int prot
= IOMMU_WRITE
| IOMMU_NOEXEC
| IOMMU_MMIO
;
1146 size_t size
= cookie_msi_granule(cookie
);
1148 msi_addr
&= ~(phys_addr_t
)(size
- 1);
1149 list_for_each_entry(msi_page
, &cookie
->msi_page_list
, list
)
1150 if (msi_page
->phys
== msi_addr
)
1153 msi_page
= kzalloc(sizeof(*msi_page
), GFP_ATOMIC
);
1157 iova
= iommu_dma_alloc_iova(domain
, size
, dma_get_mask(dev
), dev
);
1161 if (iommu_map(domain
, iova
, msi_addr
, size
, prot
))
1164 INIT_LIST_HEAD(&msi_page
->list
);
1165 msi_page
->phys
= msi_addr
;
1166 msi_page
->iova
= iova
;
1167 list_add(&msi_page
->list
, &cookie
->msi_page_list
);
1171 iommu_dma_free_iova(cookie
, iova
, size
);
1177 int iommu_dma_prepare_msi(struct msi_desc
*desc
, phys_addr_t msi_addr
)
1179 struct device
*dev
= msi_desc_to_dev(desc
);
1180 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1181 struct iommu_dma_cookie
*cookie
;
1182 struct iommu_dma_msi_page
*msi_page
;
1183 unsigned long flags
;
1185 if (!domain
|| !domain
->iova_cookie
) {
1186 desc
->iommu_cookie
= NULL
;
1190 cookie
= domain
->iova_cookie
;
1193 * We disable IRQs to rule out a possible inversion against
1194 * irq_desc_lock if, say, someone tries to retarget the affinity
1195 * of an MSI from within an IPI handler.
1197 spin_lock_irqsave(&cookie
->msi_lock
, flags
);
1198 msi_page
= iommu_dma_get_msi_page(dev
, msi_addr
, domain
);
1199 spin_unlock_irqrestore(&cookie
->msi_lock
, flags
);
1201 msi_desc_set_iommu_cookie(desc
, msi_page
);
1208 void iommu_dma_compose_msi_msg(struct msi_desc
*desc
,
1209 struct msi_msg
*msg
)
1211 struct device
*dev
= msi_desc_to_dev(desc
);
1212 const struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1213 const struct iommu_dma_msi_page
*msi_page
;
1215 msi_page
= msi_desc_get_iommu_cookie(desc
);
1217 if (!domain
|| !domain
->iova_cookie
|| WARN_ON(!msi_page
))
1220 msg
->address_hi
= upper_32_bits(msi_page
->iova
);
1221 msg
->address_lo
&= cookie_msi_granule(domain
->iova_cookie
) - 1;
1222 msg
->address_lo
+= lower_32_bits(msi_page
->iova
);
1225 static int iommu_dma_init(void)
1227 return iova_cache_get();
1229 arch_initcall(iommu_dma_init
);