1 // SPDX-License-Identifier: GPL-2.0-only
3 * A fairly generic DMA-API to IOMMU-API glue layer.
5 * Copyright (C) 2014-2015 ARM Ltd.
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
11 #include <linux/acpi_iort.h>
12 #include <linux/device.h>
13 #include <linux/dma-contiguous.h>
14 #include <linux/dma-iommu.h>
15 #include <linux/dma-noncoherent.h>
16 #include <linux/gfp.h>
17 #include <linux/huge_mm.h>
18 #include <linux/iommu.h>
19 #include <linux/iova.h>
20 #include <linux/irq.h>
22 #include <linux/mutex.h>
23 #include <linux/pci.h>
24 #include <linux/scatterlist.h>
25 #include <linux/vmalloc.h>
27 struct iommu_dma_msi_page
{
28 struct list_head list
;
33 enum iommu_dma_cookie_type
{
34 IOMMU_DMA_IOVA_COOKIE
,
38 struct iommu_dma_cookie
{
39 enum iommu_dma_cookie_type type
;
41 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
42 struct iova_domain iovad
;
43 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
46 struct list_head msi_page_list
;
48 /* Domain for flush queue callback; NULL if flush queue not in use */
49 struct iommu_domain
*fq_domain
;
52 static inline size_t cookie_msi_granule(struct iommu_dma_cookie
*cookie
)
54 if (cookie
->type
== IOMMU_DMA_IOVA_COOKIE
)
55 return cookie
->iovad
.granule
;
59 static struct iommu_dma_cookie
*cookie_alloc(enum iommu_dma_cookie_type type
)
61 struct iommu_dma_cookie
*cookie
;
63 cookie
= kzalloc(sizeof(*cookie
), GFP_KERNEL
);
65 INIT_LIST_HEAD(&cookie
->msi_page_list
);
72 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
73 * @domain: IOMMU domain to prepare for DMA-API usage
75 * IOMMU drivers should normally call this from their domain_alloc
76 * callback when domain->type == IOMMU_DOMAIN_DMA.
78 int iommu_get_dma_cookie(struct iommu_domain
*domain
)
80 if (domain
->iova_cookie
)
83 domain
->iova_cookie
= cookie_alloc(IOMMU_DMA_IOVA_COOKIE
);
84 if (!domain
->iova_cookie
)
89 EXPORT_SYMBOL(iommu_get_dma_cookie
);
92 * iommu_get_msi_cookie - Acquire just MSI remapping resources
93 * @domain: IOMMU domain to prepare
94 * @base: Start address of IOVA region for MSI mappings
96 * Users who manage their own IOVA allocation and do not want DMA API support,
97 * but would still like to take advantage of automatic MSI remapping, can use
98 * this to initialise their own domain appropriately. Users should reserve a
99 * contiguous IOVA region, starting at @base, large enough to accommodate the
100 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
101 * used by the devices attached to @domain.
103 int iommu_get_msi_cookie(struct iommu_domain
*domain
, dma_addr_t base
)
105 struct iommu_dma_cookie
*cookie
;
107 if (domain
->type
!= IOMMU_DOMAIN_UNMANAGED
)
110 if (domain
->iova_cookie
)
113 cookie
= cookie_alloc(IOMMU_DMA_MSI_COOKIE
);
117 cookie
->msi_iova
= base
;
118 domain
->iova_cookie
= cookie
;
121 EXPORT_SYMBOL(iommu_get_msi_cookie
);
124 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
125 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
126 * iommu_get_msi_cookie()
128 * IOMMU drivers should normally call this from their domain_free callback.
130 void iommu_put_dma_cookie(struct iommu_domain
*domain
)
132 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
133 struct iommu_dma_msi_page
*msi
, *tmp
;
138 if (cookie
->type
== IOMMU_DMA_IOVA_COOKIE
&& cookie
->iovad
.granule
)
139 put_iova_domain(&cookie
->iovad
);
141 list_for_each_entry_safe(msi
, tmp
, &cookie
->msi_page_list
, list
) {
142 list_del(&msi
->list
);
146 domain
->iova_cookie
= NULL
;
148 EXPORT_SYMBOL(iommu_put_dma_cookie
);
151 * iommu_dma_get_resv_regions - Reserved region driver helper
152 * @dev: Device from iommu_get_resv_regions()
153 * @list: Reserved region list from iommu_get_resv_regions()
155 * IOMMU drivers can use this to implement their .get_resv_regions callback
156 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
157 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
160 void iommu_dma_get_resv_regions(struct device
*dev
, struct list_head
*list
)
163 if (!is_of_node(dev_iommu_fwspec_get(dev
)->iommu_fwnode
))
164 iort_iommu_msi_get_resv_regions(dev
, list
);
167 EXPORT_SYMBOL(iommu_dma_get_resv_regions
);
169 static int cookie_init_hw_msi_region(struct iommu_dma_cookie
*cookie
,
170 phys_addr_t start
, phys_addr_t end
)
172 struct iova_domain
*iovad
= &cookie
->iovad
;
173 struct iommu_dma_msi_page
*msi_page
;
176 start
-= iova_offset(iovad
, start
);
177 num_pages
= iova_align(iovad
, end
- start
) >> iova_shift(iovad
);
179 for (i
= 0; i
< num_pages
; i
++) {
180 msi_page
= kmalloc(sizeof(*msi_page
), GFP_KERNEL
);
184 msi_page
->phys
= start
;
185 msi_page
->iova
= start
;
186 INIT_LIST_HEAD(&msi_page
->list
);
187 list_add(&msi_page
->list
, &cookie
->msi_page_list
);
188 start
+= iovad
->granule
;
194 static int iova_reserve_pci_windows(struct pci_dev
*dev
,
195 struct iova_domain
*iovad
)
197 struct pci_host_bridge
*bridge
= pci_find_host_bridge(dev
->bus
);
198 struct resource_entry
*window
;
199 unsigned long lo
, hi
;
200 phys_addr_t start
= 0, end
;
202 resource_list_for_each_entry(window
, &bridge
->windows
) {
203 if (resource_type(window
->res
) != IORESOURCE_MEM
)
206 lo
= iova_pfn(iovad
, window
->res
->start
- window
->offset
);
207 hi
= iova_pfn(iovad
, window
->res
->end
- window
->offset
);
208 reserve_iova(iovad
, lo
, hi
);
211 /* Get reserved DMA windows from host bridge */
212 resource_list_for_each_entry(window
, &bridge
->dma_ranges
) {
213 end
= window
->res
->start
- window
->offset
;
216 lo
= iova_pfn(iovad
, start
);
217 hi
= iova_pfn(iovad
, end
);
218 reserve_iova(iovad
, lo
, hi
);
220 /* dma_ranges list should be sorted */
221 dev_err(&dev
->dev
, "Failed to reserve IOVA\n");
225 start
= window
->res
->end
- window
->offset
+ 1;
226 /* If window is last entry */
227 if (window
->node
.next
== &bridge
->dma_ranges
&&
228 end
!= ~(phys_addr_t
)0) {
229 end
= ~(phys_addr_t
)0;
237 static int iova_reserve_iommu_regions(struct device
*dev
,
238 struct iommu_domain
*domain
)
240 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
241 struct iova_domain
*iovad
= &cookie
->iovad
;
242 struct iommu_resv_region
*region
;
243 LIST_HEAD(resv_regions
);
246 if (dev_is_pci(dev
)) {
247 ret
= iova_reserve_pci_windows(to_pci_dev(dev
), iovad
);
252 iommu_get_resv_regions(dev
, &resv_regions
);
253 list_for_each_entry(region
, &resv_regions
, list
) {
254 unsigned long lo
, hi
;
256 /* We ARE the software that manages these! */
257 if (region
->type
== IOMMU_RESV_SW_MSI
)
260 lo
= iova_pfn(iovad
, region
->start
);
261 hi
= iova_pfn(iovad
, region
->start
+ region
->length
- 1);
262 reserve_iova(iovad
, lo
, hi
);
264 if (region
->type
== IOMMU_RESV_MSI
)
265 ret
= cookie_init_hw_msi_region(cookie
, region
->start
,
266 region
->start
+ region
->length
);
270 iommu_put_resv_regions(dev
, &resv_regions
);
275 static void iommu_dma_flush_iotlb_all(struct iova_domain
*iovad
)
277 struct iommu_dma_cookie
*cookie
;
278 struct iommu_domain
*domain
;
280 cookie
= container_of(iovad
, struct iommu_dma_cookie
, iovad
);
281 domain
= cookie
->fq_domain
;
283 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
284 * implies that ops->flush_iotlb_all must be non-NULL.
286 domain
->ops
->flush_iotlb_all(domain
);
290 * iommu_dma_init_domain - Initialise a DMA mapping domain
291 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
292 * @base: IOVA at which the mappable address space starts
293 * @size: Size of IOVA space
294 * @dev: Device the domain is being initialised for
296 * @base and @size should be exact multiples of IOMMU page granularity to
297 * avoid rounding surprises. If necessary, we reserve the page at address 0
298 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
299 * any change which could make prior IOVAs invalid will fail.
301 static int iommu_dma_init_domain(struct iommu_domain
*domain
, dma_addr_t base
,
302 u64 size
, struct device
*dev
)
304 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
305 unsigned long order
, base_pfn
;
306 struct iova_domain
*iovad
;
309 if (!cookie
|| cookie
->type
!= IOMMU_DMA_IOVA_COOKIE
)
312 iovad
= &cookie
->iovad
;
314 /* Use the smallest supported page size for IOVA granularity */
315 order
= __ffs(domain
->pgsize_bitmap
);
316 base_pfn
= max_t(unsigned long, 1, base
>> order
);
318 /* Check the domain allows at least some access to the device... */
319 if (domain
->geometry
.force_aperture
) {
320 if (base
> domain
->geometry
.aperture_end
||
321 base
+ size
<= domain
->geometry
.aperture_start
) {
322 pr_warn("specified DMA range outside IOMMU capability\n");
325 /* ...then finally give it a kicking to make sure it fits */
326 base_pfn
= max_t(unsigned long, base_pfn
,
327 domain
->geometry
.aperture_start
>> order
);
330 /* start_pfn is always nonzero for an already-initialised domain */
331 if (iovad
->start_pfn
) {
332 if (1UL << order
!= iovad
->granule
||
333 base_pfn
!= iovad
->start_pfn
) {
334 pr_warn("Incompatible range for DMA domain\n");
341 init_iova_domain(iovad
, 1UL << order
, base_pfn
);
343 if (!cookie
->fq_domain
&& !iommu_domain_get_attr(domain
,
344 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
, &attr
) && attr
) {
345 cookie
->fq_domain
= domain
;
346 init_iova_flush_queue(iovad
, iommu_dma_flush_iotlb_all
, NULL
);
352 return iova_reserve_iommu_regions(dev
, domain
);
356 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
358 * @dir: Direction of DMA transfer
359 * @coherent: Is the DMA master cache-coherent?
360 * @attrs: DMA attributes for the mapping
362 * Return: corresponding IOMMU API page protection flags
364 static int dma_info_to_prot(enum dma_data_direction dir
, bool coherent
,
367 int prot
= coherent
? IOMMU_CACHE
: 0;
369 if (attrs
& DMA_ATTR_PRIVILEGED
)
373 case DMA_BIDIRECTIONAL
:
374 return prot
| IOMMU_READ
| IOMMU_WRITE
;
376 return prot
| IOMMU_READ
;
377 case DMA_FROM_DEVICE
:
378 return prot
| IOMMU_WRITE
;
384 static dma_addr_t
iommu_dma_alloc_iova(struct iommu_domain
*domain
,
385 size_t size
, dma_addr_t dma_limit
, struct device
*dev
)
387 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
388 struct iova_domain
*iovad
= &cookie
->iovad
;
389 unsigned long shift
, iova_len
, iova
= 0;
391 if (cookie
->type
== IOMMU_DMA_MSI_COOKIE
) {
392 cookie
->msi_iova
+= size
;
393 return cookie
->msi_iova
- size
;
396 shift
= iova_shift(iovad
);
397 iova_len
= size
>> shift
;
399 * Freeing non-power-of-two-sized allocations back into the IOVA caches
400 * will come back to bite us badly, so we have to waste a bit of space
401 * rounding up anything cacheable to make sure that can't happen. The
402 * order of the unadjusted size will still match upon freeing.
404 if (iova_len
< (1 << (IOVA_RANGE_CACHE_MAX_SIZE
- 1)))
405 iova_len
= roundup_pow_of_two(iova_len
);
407 if (dev
->bus_dma_mask
)
408 dma_limit
&= dev
->bus_dma_mask
;
410 if (domain
->geometry
.force_aperture
)
411 dma_limit
= min(dma_limit
, domain
->geometry
.aperture_end
);
413 /* Try to get PCI devices a SAC address */
414 if (dma_limit
> DMA_BIT_MASK(32) && dev_is_pci(dev
))
415 iova
= alloc_iova_fast(iovad
, iova_len
,
416 DMA_BIT_MASK(32) >> shift
, false);
419 iova
= alloc_iova_fast(iovad
, iova_len
, dma_limit
>> shift
,
422 return (dma_addr_t
)iova
<< shift
;
425 static void iommu_dma_free_iova(struct iommu_dma_cookie
*cookie
,
426 dma_addr_t iova
, size_t size
)
428 struct iova_domain
*iovad
= &cookie
->iovad
;
430 /* The MSI case is only ever cleaning up its most recent allocation */
431 if (cookie
->type
== IOMMU_DMA_MSI_COOKIE
)
432 cookie
->msi_iova
-= size
;
433 else if (cookie
->fq_domain
) /* non-strict mode */
434 queue_iova(iovad
, iova_pfn(iovad
, iova
),
435 size
>> iova_shift(iovad
), 0);
437 free_iova_fast(iovad
, iova_pfn(iovad
, iova
),
438 size
>> iova_shift(iovad
));
441 static void __iommu_dma_unmap(struct device
*dev
, dma_addr_t dma_addr
,
444 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
445 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
446 struct iova_domain
*iovad
= &cookie
->iovad
;
447 size_t iova_off
= iova_offset(iovad
, dma_addr
);
448 struct iommu_iotlb_gather iotlb_gather
;
451 dma_addr
-= iova_off
;
452 size
= iova_align(iovad
, size
+ iova_off
);
453 iommu_iotlb_gather_init(&iotlb_gather
);
455 unmapped
= iommu_unmap_fast(domain
, dma_addr
, size
, &iotlb_gather
);
456 WARN_ON(unmapped
!= size
);
458 if (!cookie
->fq_domain
)
459 iommu_tlb_sync(domain
, &iotlb_gather
);
460 iommu_dma_free_iova(cookie
, dma_addr
, size
);
463 static dma_addr_t
__iommu_dma_map(struct device
*dev
, phys_addr_t phys
,
464 size_t size
, int prot
)
466 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
467 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
468 struct iova_domain
*iovad
= &cookie
->iovad
;
469 size_t iova_off
= iova_offset(iovad
, phys
);
472 size
= iova_align(iovad
, size
+ iova_off
);
474 iova
= iommu_dma_alloc_iova(domain
, size
, dma_get_mask(dev
), dev
);
476 return DMA_MAPPING_ERROR
;
478 if (iommu_map(domain
, iova
, phys
- iova_off
, size
, prot
)) {
479 iommu_dma_free_iova(cookie
, iova
, size
);
480 return DMA_MAPPING_ERROR
;
482 return iova
+ iova_off
;
485 static void __iommu_dma_free_pages(struct page
**pages
, int count
)
488 __free_page(pages
[count
]);
492 static struct page
**__iommu_dma_alloc_pages(struct device
*dev
,
493 unsigned int count
, unsigned long order_mask
, gfp_t gfp
)
496 unsigned int i
= 0, nid
= dev_to_node(dev
);
498 order_mask
&= (2U << MAX_ORDER
) - 1;
502 pages
= kvzalloc(count
* sizeof(*pages
), GFP_KERNEL
);
506 /* IOMMU can map any pages, so himem can also be used here */
507 gfp
|= __GFP_NOWARN
| __GFP_HIGHMEM
;
510 struct page
*page
= NULL
;
511 unsigned int order_size
;
514 * Higher-order allocations are a convenience rather
515 * than a necessity, hence using __GFP_NORETRY until
516 * falling back to minimum-order allocations.
518 for (order_mask
&= (2U << __fls(count
)) - 1;
519 order_mask
; order_mask
&= ~order_size
) {
520 unsigned int order
= __fls(order_mask
);
521 gfp_t alloc_flags
= gfp
;
523 order_size
= 1U << order
;
524 if (order_mask
> order_size
)
525 alloc_flags
|= __GFP_NORETRY
;
526 page
= alloc_pages_node(nid
, alloc_flags
, order
);
531 if (!PageCompound(page
)) {
532 split_page(page
, order
);
534 } else if (!split_huge_page(page
)) {
537 __free_pages(page
, order
);
540 __iommu_dma_free_pages(pages
, i
);
551 * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
552 * @dev: Device to allocate memory for. Must be a real device
553 * attached to an iommu_dma_domain
554 * @size: Size of buffer in bytes
555 * @dma_handle: Out argument for allocated DMA handle
556 * @gfp: Allocation flags
557 * @attrs: DMA attributes for this allocation
559 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
560 * but an IOMMU which supports smaller pages might not map the whole thing.
562 * Return: Mapped virtual address, or NULL on failure.
564 static void *iommu_dma_alloc_remap(struct device
*dev
, size_t size
,
565 dma_addr_t
*dma_handle
, gfp_t gfp
, unsigned long attrs
)
567 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
568 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
569 struct iova_domain
*iovad
= &cookie
->iovad
;
570 bool coherent
= dev_is_dma_coherent(dev
);
571 int ioprot
= dma_info_to_prot(DMA_BIDIRECTIONAL
, coherent
, attrs
);
572 pgprot_t prot
= dma_pgprot(dev
, PAGE_KERNEL
, attrs
);
573 unsigned int count
, min_size
, alloc_sizes
= domain
->pgsize_bitmap
;
579 *dma_handle
= DMA_MAPPING_ERROR
;
581 min_size
= alloc_sizes
& -alloc_sizes
;
582 if (min_size
< PAGE_SIZE
) {
583 min_size
= PAGE_SIZE
;
584 alloc_sizes
|= PAGE_SIZE
;
586 size
= ALIGN(size
, min_size
);
588 if (attrs
& DMA_ATTR_ALLOC_SINGLE_PAGES
)
589 alloc_sizes
= min_size
;
591 count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
592 pages
= __iommu_dma_alloc_pages(dev
, count
, alloc_sizes
>> PAGE_SHIFT
,
597 size
= iova_align(iovad
, size
);
598 iova
= iommu_dma_alloc_iova(domain
, size
, dev
->coherent_dma_mask
, dev
);
602 if (sg_alloc_table_from_pages(&sgt
, pages
, count
, 0, size
, GFP_KERNEL
))
605 if (!(ioprot
& IOMMU_CACHE
)) {
606 struct scatterlist
*sg
;
609 for_each_sg(sgt
.sgl
, sg
, sgt
.orig_nents
, i
)
610 arch_dma_prep_coherent(sg_page(sg
), sg
->length
);
613 if (iommu_map_sg(domain
, iova
, sgt
.sgl
, sgt
.orig_nents
, ioprot
)
617 vaddr
= dma_common_pages_remap(pages
, size
, prot
,
618 __builtin_return_address(0));
627 __iommu_dma_unmap(dev
, iova
, size
);
631 iommu_dma_free_iova(cookie
, iova
, size
);
633 __iommu_dma_free_pages(pages
, count
);
638 * __iommu_dma_mmap - Map a buffer into provided user VMA
639 * @pages: Array representing buffer from __iommu_dma_alloc()
640 * @size: Size of buffer in bytes
641 * @vma: VMA describing requested userspace mapping
643 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
644 * for verifying the correct size and protection of @vma beforehand.
646 static int __iommu_dma_mmap(struct page
**pages
, size_t size
,
647 struct vm_area_struct
*vma
)
649 return vm_map_pages(vma
, pages
, PAGE_ALIGN(size
) >> PAGE_SHIFT
);
652 static void iommu_dma_sync_single_for_cpu(struct device
*dev
,
653 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction dir
)
657 if (dev_is_dma_coherent(dev
))
660 phys
= iommu_iova_to_phys(iommu_get_dma_domain(dev
), dma_handle
);
661 arch_sync_dma_for_cpu(dev
, phys
, size
, dir
);
664 static void iommu_dma_sync_single_for_device(struct device
*dev
,
665 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction dir
)
669 if (dev_is_dma_coherent(dev
))
672 phys
= iommu_iova_to_phys(iommu_get_dma_domain(dev
), dma_handle
);
673 arch_sync_dma_for_device(dev
, phys
, size
, dir
);
676 static void iommu_dma_sync_sg_for_cpu(struct device
*dev
,
677 struct scatterlist
*sgl
, int nelems
,
678 enum dma_data_direction dir
)
680 struct scatterlist
*sg
;
683 if (dev_is_dma_coherent(dev
))
686 for_each_sg(sgl
, sg
, nelems
, i
)
687 arch_sync_dma_for_cpu(dev
, sg_phys(sg
), sg
->length
, dir
);
690 static void iommu_dma_sync_sg_for_device(struct device
*dev
,
691 struct scatterlist
*sgl
, int nelems
,
692 enum dma_data_direction dir
)
694 struct scatterlist
*sg
;
697 if (dev_is_dma_coherent(dev
))
700 for_each_sg(sgl
, sg
, nelems
, i
)
701 arch_sync_dma_for_device(dev
, sg_phys(sg
), sg
->length
, dir
);
704 static dma_addr_t
iommu_dma_map_page(struct device
*dev
, struct page
*page
,
705 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
708 phys_addr_t phys
= page_to_phys(page
) + offset
;
709 bool coherent
= dev_is_dma_coherent(dev
);
710 int prot
= dma_info_to_prot(dir
, coherent
, attrs
);
711 dma_addr_t dma_handle
;
713 dma_handle
=__iommu_dma_map(dev
, phys
, size
, prot
);
714 if (!coherent
&& !(attrs
& DMA_ATTR_SKIP_CPU_SYNC
) &&
715 dma_handle
!= DMA_MAPPING_ERROR
)
716 arch_sync_dma_for_device(dev
, phys
, size
, dir
);
720 static void iommu_dma_unmap_page(struct device
*dev
, dma_addr_t dma_handle
,
721 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
723 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
724 iommu_dma_sync_single_for_cpu(dev
, dma_handle
, size
, dir
);
725 __iommu_dma_unmap(dev
, dma_handle
, size
);
729 * Prepare a successfully-mapped scatterlist to give back to the caller.
731 * At this point the segments are already laid out by iommu_dma_map_sg() to
732 * avoid individually crossing any boundaries, so we merely need to check a
733 * segment's start address to avoid concatenating across one.
735 static int __finalise_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
738 struct scatterlist
*s
, *cur
= sg
;
739 unsigned long seg_mask
= dma_get_seg_boundary(dev
);
740 unsigned int cur_len
= 0, max_len
= dma_get_max_seg_size(dev
);
743 for_each_sg(sg
, s
, nents
, i
) {
744 /* Restore this segment's original unaligned fields first */
745 unsigned int s_iova_off
= sg_dma_address(s
);
746 unsigned int s_length
= sg_dma_len(s
);
747 unsigned int s_iova_len
= s
->length
;
749 s
->offset
+= s_iova_off
;
750 s
->length
= s_length
;
751 sg_dma_address(s
) = DMA_MAPPING_ERROR
;
755 * Now fill in the real DMA data. If...
756 * - there is a valid output segment to append to
757 * - and this segment starts on an IOVA page boundary
758 * - but doesn't fall at a segment boundary
759 * - and wouldn't make the resulting output segment too long
761 if (cur_len
&& !s_iova_off
&& (dma_addr
& seg_mask
) &&
762 (max_len
- cur_len
>= s_length
)) {
763 /* ...then concatenate it with the previous one */
766 /* Otherwise start the next output segment */
772 sg_dma_address(cur
) = dma_addr
+ s_iova_off
;
775 sg_dma_len(cur
) = cur_len
;
776 dma_addr
+= s_iova_len
;
778 if (s_length
+ s_iova_off
< s_iova_len
)
785 * If mapping failed, then just restore the original list,
786 * but making sure the DMA fields are invalidated.
788 static void __invalidate_sg(struct scatterlist
*sg
, int nents
)
790 struct scatterlist
*s
;
793 for_each_sg(sg
, s
, nents
, i
) {
794 if (sg_dma_address(s
) != DMA_MAPPING_ERROR
)
795 s
->offset
+= sg_dma_address(s
);
797 s
->length
= sg_dma_len(s
);
798 sg_dma_address(s
) = DMA_MAPPING_ERROR
;
804 * The DMA API client is passing in a scatterlist which could describe
805 * any old buffer layout, but the IOMMU API requires everything to be
806 * aligned to IOMMU pages. Hence the need for this complicated bit of
807 * impedance-matching, to be able to hand off a suitably-aligned list,
808 * but still preserve the original offsets and sizes for the caller.
810 static int iommu_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
811 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
813 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
814 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
815 struct iova_domain
*iovad
= &cookie
->iovad
;
816 struct scatterlist
*s
, *prev
= NULL
;
817 int prot
= dma_info_to_prot(dir
, dev_is_dma_coherent(dev
), attrs
);
820 unsigned long mask
= dma_get_seg_boundary(dev
);
823 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
824 iommu_dma_sync_sg_for_device(dev
, sg
, nents
, dir
);
827 * Work out how much IOVA space we need, and align the segments to
828 * IOVA granules for the IOMMU driver to handle. With some clever
829 * trickery we can modify the list in-place, but reversibly, by
830 * stashing the unaligned parts in the as-yet-unused DMA fields.
832 for_each_sg(sg
, s
, nents
, i
) {
833 size_t s_iova_off
= iova_offset(iovad
, s
->offset
);
834 size_t s_length
= s
->length
;
835 size_t pad_len
= (mask
- iova_len
+ 1) & mask
;
837 sg_dma_address(s
) = s_iova_off
;
838 sg_dma_len(s
) = s_length
;
839 s
->offset
-= s_iova_off
;
840 s_length
= iova_align(iovad
, s_length
+ s_iova_off
);
841 s
->length
= s_length
;
844 * Due to the alignment of our single IOVA allocation, we can
845 * depend on these assumptions about the segment boundary mask:
846 * - If mask size >= IOVA size, then the IOVA range cannot
847 * possibly fall across a boundary, so we don't care.
848 * - If mask size < IOVA size, then the IOVA range must start
849 * exactly on a boundary, therefore we can lay things out
850 * based purely on segment lengths without needing to know
851 * the actual addresses beforehand.
852 * - The mask must be a power of 2, so pad_len == 0 if
853 * iova_len == 0, thus we cannot dereference prev the first
854 * time through here (i.e. before it has a meaningful value).
856 if (pad_len
&& pad_len
< s_length
- 1) {
857 prev
->length
+= pad_len
;
861 iova_len
+= s_length
;
865 iova
= iommu_dma_alloc_iova(domain
, iova_len
, dma_get_mask(dev
), dev
);
870 * We'll leave any physical concatenation to the IOMMU driver's
871 * implementation - it knows better than we do.
873 if (iommu_map_sg(domain
, iova
, sg
, nents
, prot
) < iova_len
)
876 return __finalise_sg(dev
, sg
, nents
, iova
);
879 iommu_dma_free_iova(cookie
, iova
, iova_len
);
881 __invalidate_sg(sg
, nents
);
885 static void iommu_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
886 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
888 dma_addr_t start
, end
;
889 struct scatterlist
*tmp
;
892 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
893 iommu_dma_sync_sg_for_cpu(dev
, sg
, nents
, dir
);
896 * The scatterlist segments are mapped into a single
897 * contiguous IOVA allocation, so this is incredibly easy.
899 start
= sg_dma_address(sg
);
900 for_each_sg(sg_next(sg
), tmp
, nents
- 1, i
) {
901 if (sg_dma_len(tmp
) == 0)
905 end
= sg_dma_address(sg
) + sg_dma_len(sg
);
906 __iommu_dma_unmap(dev
, start
, end
- start
);
909 static dma_addr_t
iommu_dma_map_resource(struct device
*dev
, phys_addr_t phys
,
910 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
912 return __iommu_dma_map(dev
, phys
, size
,
913 dma_info_to_prot(dir
, false, attrs
) | IOMMU_MMIO
);
916 static void iommu_dma_unmap_resource(struct device
*dev
, dma_addr_t handle
,
917 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
919 __iommu_dma_unmap(dev
, handle
, size
);
922 static void __iommu_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
)
924 size_t alloc_size
= PAGE_ALIGN(size
);
925 int count
= alloc_size
>> PAGE_SHIFT
;
926 struct page
*page
= NULL
, **pages
= NULL
;
928 /* Non-coherent atomic allocation? Easy */
929 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
) &&
930 dma_free_from_pool(cpu_addr
, alloc_size
))
933 if (IS_ENABLED(CONFIG_DMA_REMAP
) && is_vmalloc_addr(cpu_addr
)) {
935 * If it the address is remapped, then it's either non-coherent
936 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
938 pages
= dma_common_find_pages(cpu_addr
);
940 page
= vmalloc_to_page(cpu_addr
);
941 dma_common_free_remap(cpu_addr
, alloc_size
);
943 /* Lowmem means a coherent atomic or CMA allocation */
944 page
= virt_to_page(cpu_addr
);
948 __iommu_dma_free_pages(pages
, count
);
950 dma_free_contiguous(dev
, page
, alloc_size
);
953 static void iommu_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
954 dma_addr_t handle
, unsigned long attrs
)
956 __iommu_dma_unmap(dev
, handle
, size
);
957 __iommu_dma_free(dev
, size
, cpu_addr
);
960 static void *iommu_dma_alloc_pages(struct device
*dev
, size_t size
,
961 struct page
**pagep
, gfp_t gfp
, unsigned long attrs
)
963 bool coherent
= dev_is_dma_coherent(dev
);
964 size_t alloc_size
= PAGE_ALIGN(size
);
965 int node
= dev_to_node(dev
);
966 struct page
*page
= NULL
;
969 page
= dma_alloc_contiguous(dev
, alloc_size
, gfp
);
971 page
= alloc_pages_node(node
, gfp
, get_order(alloc_size
));
975 if (IS_ENABLED(CONFIG_DMA_REMAP
) && (!coherent
|| PageHighMem(page
))) {
976 pgprot_t prot
= dma_pgprot(dev
, PAGE_KERNEL
, attrs
);
978 cpu_addr
= dma_common_contiguous_remap(page
, alloc_size
,
979 prot
, __builtin_return_address(0));
984 arch_dma_prep_coherent(page
, size
);
986 cpu_addr
= page_address(page
);
990 memset(cpu_addr
, 0, alloc_size
);
993 dma_free_contiguous(dev
, page
, alloc_size
);
997 static void *iommu_dma_alloc(struct device
*dev
, size_t size
,
998 dma_addr_t
*handle
, gfp_t gfp
, unsigned long attrs
)
1000 bool coherent
= dev_is_dma_coherent(dev
);
1001 int ioprot
= dma_info_to_prot(DMA_BIDIRECTIONAL
, coherent
, attrs
);
1002 struct page
*page
= NULL
;
1007 if (IS_ENABLED(CONFIG_DMA_REMAP
) && gfpflags_allow_blocking(gfp
) &&
1008 !(attrs
& DMA_ATTR_FORCE_CONTIGUOUS
))
1009 return iommu_dma_alloc_remap(dev
, size
, handle
, gfp
, attrs
);
1011 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
) &&
1012 !gfpflags_allow_blocking(gfp
) && !coherent
)
1013 cpu_addr
= dma_alloc_from_pool(PAGE_ALIGN(size
), &page
, gfp
);
1015 cpu_addr
= iommu_dma_alloc_pages(dev
, size
, &page
, gfp
, attrs
);
1019 *handle
= __iommu_dma_map(dev
, page_to_phys(page
), size
, ioprot
);
1020 if (*handle
== DMA_MAPPING_ERROR
) {
1021 __iommu_dma_free(dev
, size
, cpu_addr
);
1028 static int iommu_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
1029 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
1030 unsigned long attrs
)
1032 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1033 unsigned long pfn
, off
= vma
->vm_pgoff
;
1036 vma
->vm_page_prot
= dma_pgprot(dev
, vma
->vm_page_prot
, attrs
);
1038 if (dma_mmap_from_dev_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
1041 if (off
>= nr_pages
|| vma_pages(vma
) > nr_pages
- off
)
1044 if (IS_ENABLED(CONFIG_DMA_REMAP
) && is_vmalloc_addr(cpu_addr
)) {
1045 struct page
**pages
= dma_common_find_pages(cpu_addr
);
1048 return __iommu_dma_mmap(pages
, size
, vma
);
1049 pfn
= vmalloc_to_pfn(cpu_addr
);
1051 pfn
= page_to_pfn(virt_to_page(cpu_addr
));
1054 return remap_pfn_range(vma
, vma
->vm_start
, pfn
+ off
,
1055 vma
->vm_end
- vma
->vm_start
,
1059 static int iommu_dma_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
1060 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
1061 unsigned long attrs
)
1066 if (IS_ENABLED(CONFIG_DMA_REMAP
) && is_vmalloc_addr(cpu_addr
)) {
1067 struct page
**pages
= dma_common_find_pages(cpu_addr
);
1070 return sg_alloc_table_from_pages(sgt
, pages
,
1071 PAGE_ALIGN(size
) >> PAGE_SHIFT
,
1072 0, size
, GFP_KERNEL
);
1075 page
= vmalloc_to_page(cpu_addr
);
1077 page
= virt_to_page(cpu_addr
);
1080 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
1082 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
1086 static unsigned long iommu_dma_get_merge_boundary(struct device
*dev
)
1088 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
1090 return (1UL << __ffs(domain
->pgsize_bitmap
)) - 1;
1093 static const struct dma_map_ops iommu_dma_ops
= {
1094 .alloc
= iommu_dma_alloc
,
1095 .free
= iommu_dma_free
,
1096 .mmap
= iommu_dma_mmap
,
1097 .get_sgtable
= iommu_dma_get_sgtable
,
1098 .map_page
= iommu_dma_map_page
,
1099 .unmap_page
= iommu_dma_unmap_page
,
1100 .map_sg
= iommu_dma_map_sg
,
1101 .unmap_sg
= iommu_dma_unmap_sg
,
1102 .sync_single_for_cpu
= iommu_dma_sync_single_for_cpu
,
1103 .sync_single_for_device
= iommu_dma_sync_single_for_device
,
1104 .sync_sg_for_cpu
= iommu_dma_sync_sg_for_cpu
,
1105 .sync_sg_for_device
= iommu_dma_sync_sg_for_device
,
1106 .map_resource
= iommu_dma_map_resource
,
1107 .unmap_resource
= iommu_dma_unmap_resource
,
1108 .get_merge_boundary
= iommu_dma_get_merge_boundary
,
1112 * The IOMMU core code allocates the default DMA domain, which the underlying
1113 * IOMMU driver needs to support via the dma-iommu layer.
1115 void iommu_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
)
1117 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1123 * The IOMMU core code allocates the default DMA domain, which the
1124 * underlying IOMMU driver needs to support via the dma-iommu layer.
1126 if (domain
->type
== IOMMU_DOMAIN_DMA
) {
1127 if (iommu_dma_init_domain(domain
, dma_base
, size
, dev
))
1129 dev
->dma_ops
= &iommu_dma_ops
;
1134 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1138 static struct iommu_dma_msi_page
*iommu_dma_get_msi_page(struct device
*dev
,
1139 phys_addr_t msi_addr
, struct iommu_domain
*domain
)
1141 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
1142 struct iommu_dma_msi_page
*msi_page
;
1144 int prot
= IOMMU_WRITE
| IOMMU_NOEXEC
| IOMMU_MMIO
;
1145 size_t size
= cookie_msi_granule(cookie
);
1147 msi_addr
&= ~(phys_addr_t
)(size
- 1);
1148 list_for_each_entry(msi_page
, &cookie
->msi_page_list
, list
)
1149 if (msi_page
->phys
== msi_addr
)
1152 msi_page
= kzalloc(sizeof(*msi_page
), GFP_KERNEL
);
1156 iova
= iommu_dma_alloc_iova(domain
, size
, dma_get_mask(dev
), dev
);
1160 if (iommu_map(domain
, iova
, msi_addr
, size
, prot
))
1163 INIT_LIST_HEAD(&msi_page
->list
);
1164 msi_page
->phys
= msi_addr
;
1165 msi_page
->iova
= iova
;
1166 list_add(&msi_page
->list
, &cookie
->msi_page_list
);
1170 iommu_dma_free_iova(cookie
, iova
, size
);
1176 int iommu_dma_prepare_msi(struct msi_desc
*desc
, phys_addr_t msi_addr
)
1178 struct device
*dev
= msi_desc_to_dev(desc
);
1179 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1180 struct iommu_dma_msi_page
*msi_page
;
1181 static DEFINE_MUTEX(msi_prepare_lock
); /* see below */
1183 if (!domain
|| !domain
->iova_cookie
) {
1184 desc
->iommu_cookie
= NULL
;
1189 * In fact the whole prepare operation should already be serialised by
1190 * irq_domain_mutex further up the callchain, but that's pretty subtle
1191 * on its own, so consider this locking as failsafe documentation...
1193 mutex_lock(&msi_prepare_lock
);
1194 msi_page
= iommu_dma_get_msi_page(dev
, msi_addr
, domain
);
1195 mutex_unlock(&msi_prepare_lock
);
1197 msi_desc_set_iommu_cookie(desc
, msi_page
);
1204 void iommu_dma_compose_msi_msg(struct msi_desc
*desc
,
1205 struct msi_msg
*msg
)
1207 struct device
*dev
= msi_desc_to_dev(desc
);
1208 const struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1209 const struct iommu_dma_msi_page
*msi_page
;
1211 msi_page
= msi_desc_get_iommu_cookie(desc
);
1213 if (!domain
|| !domain
->iova_cookie
|| WARN_ON(!msi_page
))
1216 msg
->address_hi
= upper_32_bits(msi_page
->iova
);
1217 msg
->address_lo
&= cookie_msi_granule(domain
->iova_cookie
) - 1;
1218 msg
->address_lo
+= lower_32_bits(msi_page
->iova
);
1221 static int iommu_dma_init(void)
1223 return iova_cache_get();
1225 arch_initcall(iommu_dma_init
);