1 // SPDX-License-Identifier: GPL-2.0-only
3 * A fairly generic DMA-API to IOMMU-API glue layer.
5 * Copyright (C) 2014-2015 ARM Ltd.
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
11 #include <linux/acpi_iort.h>
12 #include <linux/device.h>
13 #include <linux/dma-map-ops.h>
14 #include <linux/dma-iommu.h>
15 #include <linux/gfp.h>
16 #include <linux/huge_mm.h>
17 #include <linux/iommu.h>
18 #include <linux/iova.h>
19 #include <linux/irq.h>
21 #include <linux/mutex.h>
22 #include <linux/pci.h>
23 #include <linux/swiotlb.h>
24 #include <linux/scatterlist.h>
25 #include <linux/vmalloc.h>
26 #include <linux/crash_dump.h>
27 #include <linux/dma-direct.h>
29 struct iommu_dma_msi_page
{
30 struct list_head list
;
35 enum iommu_dma_cookie_type
{
36 IOMMU_DMA_IOVA_COOKIE
,
40 struct iommu_dma_cookie
{
41 enum iommu_dma_cookie_type type
;
43 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
44 struct iova_domain iovad
;
45 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
48 struct list_head msi_page_list
;
50 /* Domain for flush queue callback; NULL if flush queue not in use */
51 struct iommu_domain
*fq_domain
;
54 void iommu_dma_free_cpu_cached_iovas(unsigned int cpu
,
55 struct iommu_domain
*domain
)
57 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
58 struct iova_domain
*iovad
= &cookie
->iovad
;
60 free_cpu_cached_iovas(cpu
, iovad
);
63 static void iommu_dma_entry_dtor(unsigned long data
)
65 struct page
*freelist
= (struct page
*)data
;
68 unsigned long p
= (unsigned long)page_address(freelist
);
70 freelist
= freelist
->freelist
;
75 static inline size_t cookie_msi_granule(struct iommu_dma_cookie
*cookie
)
77 if (cookie
->type
== IOMMU_DMA_IOVA_COOKIE
)
78 return cookie
->iovad
.granule
;
82 static struct iommu_dma_cookie
*cookie_alloc(enum iommu_dma_cookie_type type
)
84 struct iommu_dma_cookie
*cookie
;
86 cookie
= kzalloc(sizeof(*cookie
), GFP_KERNEL
);
88 INIT_LIST_HEAD(&cookie
->msi_page_list
);
95 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
96 * @domain: IOMMU domain to prepare for DMA-API usage
98 * IOMMU drivers should normally call this from their domain_alloc
99 * callback when domain->type == IOMMU_DOMAIN_DMA.
101 int iommu_get_dma_cookie(struct iommu_domain
*domain
)
103 if (domain
->iova_cookie
)
106 domain
->iova_cookie
= cookie_alloc(IOMMU_DMA_IOVA_COOKIE
);
107 if (!domain
->iova_cookie
)
112 EXPORT_SYMBOL(iommu_get_dma_cookie
);
115 * iommu_get_msi_cookie - Acquire just MSI remapping resources
116 * @domain: IOMMU domain to prepare
117 * @base: Start address of IOVA region for MSI mappings
119 * Users who manage their own IOVA allocation and do not want DMA API support,
120 * but would still like to take advantage of automatic MSI remapping, can use
121 * this to initialise their own domain appropriately. Users should reserve a
122 * contiguous IOVA region, starting at @base, large enough to accommodate the
123 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
124 * used by the devices attached to @domain.
126 int iommu_get_msi_cookie(struct iommu_domain
*domain
, dma_addr_t base
)
128 struct iommu_dma_cookie
*cookie
;
130 if (domain
->type
!= IOMMU_DOMAIN_UNMANAGED
)
133 if (domain
->iova_cookie
)
136 cookie
= cookie_alloc(IOMMU_DMA_MSI_COOKIE
);
140 cookie
->msi_iova
= base
;
141 domain
->iova_cookie
= cookie
;
144 EXPORT_SYMBOL(iommu_get_msi_cookie
);
147 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
148 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
149 * iommu_get_msi_cookie()
151 * IOMMU drivers should normally call this from their domain_free callback.
153 void iommu_put_dma_cookie(struct iommu_domain
*domain
)
155 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
156 struct iommu_dma_msi_page
*msi
, *tmp
;
161 if (cookie
->type
== IOMMU_DMA_IOVA_COOKIE
&& cookie
->iovad
.granule
)
162 put_iova_domain(&cookie
->iovad
);
164 list_for_each_entry_safe(msi
, tmp
, &cookie
->msi_page_list
, list
) {
165 list_del(&msi
->list
);
169 domain
->iova_cookie
= NULL
;
171 EXPORT_SYMBOL(iommu_put_dma_cookie
);
174 * iommu_dma_get_resv_regions - Reserved region driver helper
175 * @dev: Device from iommu_get_resv_regions()
176 * @list: Reserved region list from iommu_get_resv_regions()
178 * IOMMU drivers can use this to implement their .get_resv_regions callback
179 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
180 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
183 void iommu_dma_get_resv_regions(struct device
*dev
, struct list_head
*list
)
186 if (!is_of_node(dev_iommu_fwspec_get(dev
)->iommu_fwnode
))
187 iort_iommu_msi_get_resv_regions(dev
, list
);
190 EXPORT_SYMBOL(iommu_dma_get_resv_regions
);
192 static int cookie_init_hw_msi_region(struct iommu_dma_cookie
*cookie
,
193 phys_addr_t start
, phys_addr_t end
)
195 struct iova_domain
*iovad
= &cookie
->iovad
;
196 struct iommu_dma_msi_page
*msi_page
;
199 start
-= iova_offset(iovad
, start
);
200 num_pages
= iova_align(iovad
, end
- start
) >> iova_shift(iovad
);
202 for (i
= 0; i
< num_pages
; i
++) {
203 msi_page
= kmalloc(sizeof(*msi_page
), GFP_KERNEL
);
207 msi_page
->phys
= start
;
208 msi_page
->iova
= start
;
209 INIT_LIST_HEAD(&msi_page
->list
);
210 list_add(&msi_page
->list
, &cookie
->msi_page_list
);
211 start
+= iovad
->granule
;
217 static int iova_reserve_pci_windows(struct pci_dev
*dev
,
218 struct iova_domain
*iovad
)
220 struct pci_host_bridge
*bridge
= pci_find_host_bridge(dev
->bus
);
221 struct resource_entry
*window
;
222 unsigned long lo
, hi
;
223 phys_addr_t start
= 0, end
;
225 resource_list_for_each_entry(window
, &bridge
->windows
) {
226 if (resource_type(window
->res
) != IORESOURCE_MEM
)
229 lo
= iova_pfn(iovad
, window
->res
->start
- window
->offset
);
230 hi
= iova_pfn(iovad
, window
->res
->end
- window
->offset
);
231 reserve_iova(iovad
, lo
, hi
);
234 /* Get reserved DMA windows from host bridge */
235 resource_list_for_each_entry(window
, &bridge
->dma_ranges
) {
236 end
= window
->res
->start
- window
->offset
;
239 lo
= iova_pfn(iovad
, start
);
240 hi
= iova_pfn(iovad
, end
);
241 reserve_iova(iovad
, lo
, hi
);
243 /* dma_ranges list should be sorted */
244 dev_err(&dev
->dev
, "Failed to reserve IOVA\n");
248 start
= window
->res
->end
- window
->offset
+ 1;
249 /* If window is last entry */
250 if (window
->node
.next
== &bridge
->dma_ranges
&&
251 end
!= ~(phys_addr_t
)0) {
252 end
= ~(phys_addr_t
)0;
260 static int iova_reserve_iommu_regions(struct device
*dev
,
261 struct iommu_domain
*domain
)
263 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
264 struct iova_domain
*iovad
= &cookie
->iovad
;
265 struct iommu_resv_region
*region
;
266 LIST_HEAD(resv_regions
);
269 if (dev_is_pci(dev
)) {
270 ret
= iova_reserve_pci_windows(to_pci_dev(dev
), iovad
);
275 iommu_get_resv_regions(dev
, &resv_regions
);
276 list_for_each_entry(region
, &resv_regions
, list
) {
277 unsigned long lo
, hi
;
279 /* We ARE the software that manages these! */
280 if (region
->type
== IOMMU_RESV_SW_MSI
)
283 lo
= iova_pfn(iovad
, region
->start
);
284 hi
= iova_pfn(iovad
, region
->start
+ region
->length
- 1);
285 reserve_iova(iovad
, lo
, hi
);
287 if (region
->type
== IOMMU_RESV_MSI
)
288 ret
= cookie_init_hw_msi_region(cookie
, region
->start
,
289 region
->start
+ region
->length
);
293 iommu_put_resv_regions(dev
, &resv_regions
);
298 static void iommu_dma_flush_iotlb_all(struct iova_domain
*iovad
)
300 struct iommu_dma_cookie
*cookie
;
301 struct iommu_domain
*domain
;
303 cookie
= container_of(iovad
, struct iommu_dma_cookie
, iovad
);
304 domain
= cookie
->fq_domain
;
306 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
307 * implies that ops->flush_iotlb_all must be non-NULL.
309 domain
->ops
->flush_iotlb_all(domain
);
313 * iommu_dma_init_domain - Initialise a DMA mapping domain
314 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
315 * @base: IOVA at which the mappable address space starts
316 * @size: Size of IOVA space
317 * @dev: Device the domain is being initialised for
319 * @base and @size should be exact multiples of IOMMU page granularity to
320 * avoid rounding surprises. If necessary, we reserve the page at address 0
321 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
322 * any change which could make prior IOVAs invalid will fail.
324 static int iommu_dma_init_domain(struct iommu_domain
*domain
, dma_addr_t base
,
325 u64 size
, struct device
*dev
)
327 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
328 unsigned long order
, base_pfn
;
329 struct iova_domain
*iovad
;
332 if (!cookie
|| cookie
->type
!= IOMMU_DMA_IOVA_COOKIE
)
335 iovad
= &cookie
->iovad
;
337 /* Use the smallest supported page size for IOVA granularity */
338 order
= __ffs(domain
->pgsize_bitmap
);
339 base_pfn
= max_t(unsigned long, 1, base
>> order
);
341 /* Check the domain allows at least some access to the device... */
342 if (domain
->geometry
.force_aperture
) {
343 if (base
> domain
->geometry
.aperture_end
||
344 base
+ size
<= domain
->geometry
.aperture_start
) {
345 pr_warn("specified DMA range outside IOMMU capability\n");
348 /* ...then finally give it a kicking to make sure it fits */
349 base_pfn
= max_t(unsigned long, base_pfn
,
350 domain
->geometry
.aperture_start
>> order
);
353 /* start_pfn is always nonzero for an already-initialised domain */
354 if (iovad
->start_pfn
) {
355 if (1UL << order
!= iovad
->granule
||
356 base_pfn
!= iovad
->start_pfn
) {
357 pr_warn("Incompatible range for DMA domain\n");
364 init_iova_domain(iovad
, 1UL << order
, base_pfn
);
366 if (!cookie
->fq_domain
&& !iommu_domain_get_attr(domain
,
367 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
, &attr
) && attr
) {
368 if (init_iova_flush_queue(iovad
, iommu_dma_flush_iotlb_all
,
369 iommu_dma_entry_dtor
))
370 pr_warn("iova flush queue initialization failed\n");
372 cookie
->fq_domain
= domain
;
378 return iova_reserve_iommu_regions(dev
, domain
);
381 static int iommu_dma_deferred_attach(struct device
*dev
,
382 struct iommu_domain
*domain
)
384 const struct iommu_ops
*ops
= domain
->ops
;
386 if (!is_kdump_kernel())
389 if (unlikely(ops
->is_attach_deferred
&&
390 ops
->is_attach_deferred(domain
, dev
)))
391 return iommu_attach_device(domain
, dev
);
397 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
399 * @dir: Direction of DMA transfer
400 * @coherent: Is the DMA master cache-coherent?
401 * @attrs: DMA attributes for the mapping
403 * Return: corresponding IOMMU API page protection flags
405 static int dma_info_to_prot(enum dma_data_direction dir
, bool coherent
,
408 int prot
= coherent
? IOMMU_CACHE
: 0;
410 if (attrs
& DMA_ATTR_PRIVILEGED
)
414 case DMA_BIDIRECTIONAL
:
415 return prot
| IOMMU_READ
| IOMMU_WRITE
;
417 return prot
| IOMMU_READ
;
418 case DMA_FROM_DEVICE
:
419 return prot
| IOMMU_WRITE
;
425 static dma_addr_t
iommu_dma_alloc_iova(struct iommu_domain
*domain
,
426 size_t size
, u64 dma_limit
, struct device
*dev
)
428 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
429 struct iova_domain
*iovad
= &cookie
->iovad
;
430 unsigned long shift
, iova_len
, iova
= 0;
432 if (cookie
->type
== IOMMU_DMA_MSI_COOKIE
) {
433 cookie
->msi_iova
+= size
;
434 return cookie
->msi_iova
- size
;
437 shift
= iova_shift(iovad
);
438 iova_len
= size
>> shift
;
440 * Freeing non-power-of-two-sized allocations back into the IOVA caches
441 * will come back to bite us badly, so we have to waste a bit of space
442 * rounding up anything cacheable to make sure that can't happen. The
443 * order of the unadjusted size will still match upon freeing.
445 if (iova_len
< (1 << (IOVA_RANGE_CACHE_MAX_SIZE
- 1)))
446 iova_len
= roundup_pow_of_two(iova_len
);
448 dma_limit
= min_not_zero(dma_limit
, dev
->bus_dma_limit
);
450 if (domain
->geometry
.force_aperture
)
451 dma_limit
= min(dma_limit
, (u64
)domain
->geometry
.aperture_end
);
453 /* Try to get PCI devices a SAC address */
454 if (dma_limit
> DMA_BIT_MASK(32) && dev_is_pci(dev
))
455 iova
= alloc_iova_fast(iovad
, iova_len
,
456 DMA_BIT_MASK(32) >> shift
, false);
459 iova
= alloc_iova_fast(iovad
, iova_len
, dma_limit
>> shift
,
462 return (dma_addr_t
)iova
<< shift
;
465 static void iommu_dma_free_iova(struct iommu_dma_cookie
*cookie
,
466 dma_addr_t iova
, size_t size
, struct page
*freelist
)
468 struct iova_domain
*iovad
= &cookie
->iovad
;
470 /* The MSI case is only ever cleaning up its most recent allocation */
471 if (cookie
->type
== IOMMU_DMA_MSI_COOKIE
)
472 cookie
->msi_iova
-= size
;
473 else if (cookie
->fq_domain
) /* non-strict mode */
474 queue_iova(iovad
, iova_pfn(iovad
, iova
),
475 size
>> iova_shift(iovad
),
476 (unsigned long)freelist
);
478 free_iova_fast(iovad
, iova_pfn(iovad
, iova
),
479 size
>> iova_shift(iovad
));
482 static void __iommu_dma_unmap(struct device
*dev
, dma_addr_t dma_addr
,
485 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
486 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
487 struct iova_domain
*iovad
= &cookie
->iovad
;
488 size_t iova_off
= iova_offset(iovad
, dma_addr
);
489 struct iommu_iotlb_gather iotlb_gather
;
492 dma_addr
-= iova_off
;
493 size
= iova_align(iovad
, size
+ iova_off
);
494 iommu_iotlb_gather_init(&iotlb_gather
);
496 unmapped
= iommu_unmap_fast(domain
, dma_addr
, size
, &iotlb_gather
);
497 WARN_ON(unmapped
!= size
);
499 if (!cookie
->fq_domain
)
500 iommu_iotlb_sync(domain
, &iotlb_gather
);
501 iommu_dma_free_iova(cookie
, dma_addr
, size
, iotlb_gather
.freelist
);
504 static void __iommu_dma_unmap_swiotlb(struct device
*dev
, dma_addr_t dma_addr
,
505 size_t size
, enum dma_data_direction dir
,
508 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
509 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
510 struct iova_domain
*iovad
= &cookie
->iovad
;
513 phys
= iommu_iova_to_phys(domain
, dma_addr
);
517 __iommu_dma_unmap(dev
, dma_addr
, size
);
519 if (unlikely(is_swiotlb_buffer(phys
)))
520 swiotlb_tbl_unmap_single(dev
, phys
, size
,
521 iova_align(iovad
, size
), dir
, attrs
);
524 static bool dev_is_untrusted(struct device
*dev
)
526 return dev_is_pci(dev
) && to_pci_dev(dev
)->untrusted
;
529 static dma_addr_t
__iommu_dma_map(struct device
*dev
, phys_addr_t phys
,
530 size_t size
, int prot
, u64 dma_mask
)
532 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
533 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
534 struct iova_domain
*iovad
= &cookie
->iovad
;
535 size_t iova_off
= iova_offset(iovad
, phys
);
538 if (unlikely(iommu_dma_deferred_attach(dev
, domain
)))
539 return DMA_MAPPING_ERROR
;
541 size
= iova_align(iovad
, size
+ iova_off
);
543 iova
= iommu_dma_alloc_iova(domain
, size
, dma_mask
, dev
);
545 return DMA_MAPPING_ERROR
;
547 if (iommu_map_atomic(domain
, iova
, phys
- iova_off
, size
, prot
)) {
548 iommu_dma_free_iova(cookie
, iova
, size
, NULL
);
549 return DMA_MAPPING_ERROR
;
551 return iova
+ iova_off
;
554 static dma_addr_t
__iommu_dma_map_swiotlb(struct device
*dev
, phys_addr_t phys
,
555 size_t org_size
, dma_addr_t dma_mask
, bool coherent
,
556 enum dma_data_direction dir
, unsigned long attrs
)
558 int prot
= dma_info_to_prot(dir
, coherent
, attrs
);
559 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
560 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
561 struct iova_domain
*iovad
= &cookie
->iovad
;
562 size_t aligned_size
= org_size
;
568 * If both the physical buffer start address and size are
569 * page aligned, we don't need to use a bounce page.
571 if (IS_ENABLED(CONFIG_SWIOTLB
) && dev_is_untrusted(dev
) &&
572 iova_offset(iovad
, phys
| org_size
)) {
573 aligned_size
= iova_align(iovad
, org_size
);
574 phys
= swiotlb_tbl_map_single(dev
, phys
, org_size
,
575 aligned_size
, dir
, attrs
);
577 if (phys
== DMA_MAPPING_ERROR
)
578 return DMA_MAPPING_ERROR
;
580 /* Cleanup the padding area. */
581 padding_start
= phys_to_virt(phys
);
582 padding_size
= aligned_size
;
584 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
) &&
585 (dir
== DMA_TO_DEVICE
||
586 dir
== DMA_BIDIRECTIONAL
)) {
587 padding_start
+= org_size
;
588 padding_size
-= org_size
;
591 memset(padding_start
, 0, padding_size
);
594 iova
= __iommu_dma_map(dev
, phys
, aligned_size
, prot
, dma_mask
);
595 if ((iova
== DMA_MAPPING_ERROR
) && is_swiotlb_buffer(phys
))
596 swiotlb_tbl_unmap_single(dev
, phys
, org_size
,
597 aligned_size
, dir
, attrs
);
602 static void __iommu_dma_free_pages(struct page
**pages
, int count
)
605 __free_page(pages
[count
]);
609 static struct page
**__iommu_dma_alloc_pages(struct device
*dev
,
610 unsigned int count
, unsigned long order_mask
, gfp_t gfp
)
613 unsigned int i
= 0, nid
= dev_to_node(dev
);
615 order_mask
&= (2U << MAX_ORDER
) - 1;
619 pages
= kvzalloc(count
* sizeof(*pages
), GFP_KERNEL
);
623 /* IOMMU can map any pages, so himem can also be used here */
624 gfp
|= __GFP_NOWARN
| __GFP_HIGHMEM
;
626 /* It makes no sense to muck about with huge pages */
630 struct page
*page
= NULL
;
631 unsigned int order_size
;
634 * Higher-order allocations are a convenience rather
635 * than a necessity, hence using __GFP_NORETRY until
636 * falling back to minimum-order allocations.
638 for (order_mask
&= (2U << __fls(count
)) - 1;
639 order_mask
; order_mask
&= ~order_size
) {
640 unsigned int order
= __fls(order_mask
);
641 gfp_t alloc_flags
= gfp
;
643 order_size
= 1U << order
;
644 if (order_mask
> order_size
)
645 alloc_flags
|= __GFP_NORETRY
;
646 page
= alloc_pages_node(nid
, alloc_flags
, order
);
650 split_page(page
, order
);
654 __iommu_dma_free_pages(pages
, i
);
665 * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
666 * @dev: Device to allocate memory for. Must be a real device
667 * attached to an iommu_dma_domain
668 * @size: Size of buffer in bytes
669 * @dma_handle: Out argument for allocated DMA handle
670 * @gfp: Allocation flags
671 * @prot: pgprot_t to use for the remapped mapping
672 * @attrs: DMA attributes for this allocation
674 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
675 * but an IOMMU which supports smaller pages might not map the whole thing.
677 * Return: Mapped virtual address, or NULL on failure.
679 static void *iommu_dma_alloc_remap(struct device
*dev
, size_t size
,
680 dma_addr_t
*dma_handle
, gfp_t gfp
, pgprot_t prot
,
683 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
684 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
685 struct iova_domain
*iovad
= &cookie
->iovad
;
686 bool coherent
= dev_is_dma_coherent(dev
);
687 int ioprot
= dma_info_to_prot(DMA_BIDIRECTIONAL
, coherent
, attrs
);
688 unsigned int count
, min_size
, alloc_sizes
= domain
->pgsize_bitmap
;
694 *dma_handle
= DMA_MAPPING_ERROR
;
696 if (unlikely(iommu_dma_deferred_attach(dev
, domain
)))
699 min_size
= alloc_sizes
& -alloc_sizes
;
700 if (min_size
< PAGE_SIZE
) {
701 min_size
= PAGE_SIZE
;
702 alloc_sizes
|= PAGE_SIZE
;
704 size
= ALIGN(size
, min_size
);
706 if (attrs
& DMA_ATTR_ALLOC_SINGLE_PAGES
)
707 alloc_sizes
= min_size
;
709 count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
710 pages
= __iommu_dma_alloc_pages(dev
, count
, alloc_sizes
>> PAGE_SHIFT
,
715 size
= iova_align(iovad
, size
);
716 iova
= iommu_dma_alloc_iova(domain
, size
, dev
->coherent_dma_mask
, dev
);
720 if (sg_alloc_table_from_pages(&sgt
, pages
, count
, 0, size
, GFP_KERNEL
))
723 if (!(ioprot
& IOMMU_CACHE
)) {
724 struct scatterlist
*sg
;
727 for_each_sg(sgt
.sgl
, sg
, sgt
.orig_nents
, i
)
728 arch_dma_prep_coherent(sg_page(sg
), sg
->length
);
731 if (iommu_map_sg_atomic(domain
, iova
, sgt
.sgl
, sgt
.orig_nents
, ioprot
)
735 vaddr
= dma_common_pages_remap(pages
, size
, prot
,
736 __builtin_return_address(0));
745 __iommu_dma_unmap(dev
, iova
, size
);
749 iommu_dma_free_iova(cookie
, iova
, size
, NULL
);
751 __iommu_dma_free_pages(pages
, count
);
755 static void iommu_dma_sync_single_for_cpu(struct device
*dev
,
756 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction dir
)
760 if (dev_is_dma_coherent(dev
) && !dev_is_untrusted(dev
))
763 phys
= iommu_iova_to_phys(iommu_get_dma_domain(dev
), dma_handle
);
764 if (!dev_is_dma_coherent(dev
))
765 arch_sync_dma_for_cpu(phys
, size
, dir
);
767 if (is_swiotlb_buffer(phys
))
768 swiotlb_tbl_sync_single(dev
, phys
, size
, dir
, SYNC_FOR_CPU
);
771 static void iommu_dma_sync_single_for_device(struct device
*dev
,
772 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction dir
)
776 if (dev_is_dma_coherent(dev
) && !dev_is_untrusted(dev
))
779 phys
= iommu_iova_to_phys(iommu_get_dma_domain(dev
), dma_handle
);
780 if (is_swiotlb_buffer(phys
))
781 swiotlb_tbl_sync_single(dev
, phys
, size
, dir
, SYNC_FOR_DEVICE
);
783 if (!dev_is_dma_coherent(dev
))
784 arch_sync_dma_for_device(phys
, size
, dir
);
787 static void iommu_dma_sync_sg_for_cpu(struct device
*dev
,
788 struct scatterlist
*sgl
, int nelems
,
789 enum dma_data_direction dir
)
791 struct scatterlist
*sg
;
794 if (dev_is_dma_coherent(dev
) && !dev_is_untrusted(dev
))
797 for_each_sg(sgl
, sg
, nelems
, i
) {
798 if (!dev_is_dma_coherent(dev
))
799 arch_sync_dma_for_cpu(sg_phys(sg
), sg
->length
, dir
);
801 if (is_swiotlb_buffer(sg_phys(sg
)))
802 swiotlb_tbl_sync_single(dev
, sg_phys(sg
), sg
->length
,
807 static void iommu_dma_sync_sg_for_device(struct device
*dev
,
808 struct scatterlist
*sgl
, int nelems
,
809 enum dma_data_direction dir
)
811 struct scatterlist
*sg
;
814 if (dev_is_dma_coherent(dev
) && !dev_is_untrusted(dev
))
817 for_each_sg(sgl
, sg
, nelems
, i
) {
818 if (is_swiotlb_buffer(sg_phys(sg
)))
819 swiotlb_tbl_sync_single(dev
, sg_phys(sg
), sg
->length
,
820 dir
, SYNC_FOR_DEVICE
);
822 if (!dev_is_dma_coherent(dev
))
823 arch_sync_dma_for_device(sg_phys(sg
), sg
->length
, dir
);
827 static dma_addr_t
iommu_dma_map_page(struct device
*dev
, struct page
*page
,
828 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
831 phys_addr_t phys
= page_to_phys(page
) + offset
;
832 bool coherent
= dev_is_dma_coherent(dev
);
833 dma_addr_t dma_handle
;
835 dma_handle
= __iommu_dma_map_swiotlb(dev
, phys
, size
, dma_get_mask(dev
),
836 coherent
, dir
, attrs
);
837 if (!coherent
&& !(attrs
& DMA_ATTR_SKIP_CPU_SYNC
) &&
838 dma_handle
!= DMA_MAPPING_ERROR
)
839 arch_sync_dma_for_device(phys
, size
, dir
);
843 static void iommu_dma_unmap_page(struct device
*dev
, dma_addr_t dma_handle
,
844 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
846 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
847 iommu_dma_sync_single_for_cpu(dev
, dma_handle
, size
, dir
);
848 __iommu_dma_unmap_swiotlb(dev
, dma_handle
, size
, dir
, attrs
);
852 * Prepare a successfully-mapped scatterlist to give back to the caller.
854 * At this point the segments are already laid out by iommu_dma_map_sg() to
855 * avoid individually crossing any boundaries, so we merely need to check a
856 * segment's start address to avoid concatenating across one.
858 static int __finalise_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
861 struct scatterlist
*s
, *cur
= sg
;
862 unsigned long seg_mask
= dma_get_seg_boundary(dev
);
863 unsigned int cur_len
= 0, max_len
= dma_get_max_seg_size(dev
);
866 for_each_sg(sg
, s
, nents
, i
) {
867 /* Restore this segment's original unaligned fields first */
868 unsigned int s_iova_off
= sg_dma_address(s
);
869 unsigned int s_length
= sg_dma_len(s
);
870 unsigned int s_iova_len
= s
->length
;
872 s
->offset
+= s_iova_off
;
873 s
->length
= s_length
;
874 sg_dma_address(s
) = DMA_MAPPING_ERROR
;
878 * Now fill in the real DMA data. If...
879 * - there is a valid output segment to append to
880 * - and this segment starts on an IOVA page boundary
881 * - but doesn't fall at a segment boundary
882 * - and wouldn't make the resulting output segment too long
884 if (cur_len
&& !s_iova_off
&& (dma_addr
& seg_mask
) &&
885 (max_len
- cur_len
>= s_length
)) {
886 /* ...then concatenate it with the previous one */
889 /* Otherwise start the next output segment */
895 sg_dma_address(cur
) = dma_addr
+ s_iova_off
;
898 sg_dma_len(cur
) = cur_len
;
899 dma_addr
+= s_iova_len
;
901 if (s_length
+ s_iova_off
< s_iova_len
)
908 * If mapping failed, then just restore the original list,
909 * but making sure the DMA fields are invalidated.
911 static void __invalidate_sg(struct scatterlist
*sg
, int nents
)
913 struct scatterlist
*s
;
916 for_each_sg(sg
, s
, nents
, i
) {
917 if (sg_dma_address(s
) != DMA_MAPPING_ERROR
)
918 s
->offset
+= sg_dma_address(s
);
920 s
->length
= sg_dma_len(s
);
921 sg_dma_address(s
) = DMA_MAPPING_ERROR
;
926 static void iommu_dma_unmap_sg_swiotlb(struct device
*dev
, struct scatterlist
*sg
,
927 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
929 struct scatterlist
*s
;
932 for_each_sg(sg
, s
, nents
, i
)
933 __iommu_dma_unmap_swiotlb(dev
, sg_dma_address(s
),
934 sg_dma_len(s
), dir
, attrs
);
937 static int iommu_dma_map_sg_swiotlb(struct device
*dev
, struct scatterlist
*sg
,
938 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
940 struct scatterlist
*s
;
943 for_each_sg(sg
, s
, nents
, i
) {
944 sg_dma_address(s
) = __iommu_dma_map_swiotlb(dev
, sg_phys(s
),
945 s
->length
, dma_get_mask(dev
),
946 dev_is_dma_coherent(dev
), dir
, attrs
);
947 if (sg_dma_address(s
) == DMA_MAPPING_ERROR
)
949 sg_dma_len(s
) = s
->length
;
955 iommu_dma_unmap_sg_swiotlb(dev
, sg
, i
, dir
, attrs
| DMA_ATTR_SKIP_CPU_SYNC
);
960 * The DMA API client is passing in a scatterlist which could describe
961 * any old buffer layout, but the IOMMU API requires everything to be
962 * aligned to IOMMU pages. Hence the need for this complicated bit of
963 * impedance-matching, to be able to hand off a suitably-aligned list,
964 * but still preserve the original offsets and sizes for the caller.
966 static int iommu_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
967 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
969 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
970 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
971 struct iova_domain
*iovad
= &cookie
->iovad
;
972 struct scatterlist
*s
, *prev
= NULL
;
973 int prot
= dma_info_to_prot(dir
, dev_is_dma_coherent(dev
), attrs
);
976 unsigned long mask
= dma_get_seg_boundary(dev
);
979 if (unlikely(iommu_dma_deferred_attach(dev
, domain
)))
982 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
983 iommu_dma_sync_sg_for_device(dev
, sg
, nents
, dir
);
985 if (dev_is_untrusted(dev
))
986 return iommu_dma_map_sg_swiotlb(dev
, sg
, nents
, dir
, attrs
);
989 * Work out how much IOVA space we need, and align the segments to
990 * IOVA granules for the IOMMU driver to handle. With some clever
991 * trickery we can modify the list in-place, but reversibly, by
992 * stashing the unaligned parts in the as-yet-unused DMA fields.
994 for_each_sg(sg
, s
, nents
, i
) {
995 size_t s_iova_off
= iova_offset(iovad
, s
->offset
);
996 size_t s_length
= s
->length
;
997 size_t pad_len
= (mask
- iova_len
+ 1) & mask
;
999 sg_dma_address(s
) = s_iova_off
;
1000 sg_dma_len(s
) = s_length
;
1001 s
->offset
-= s_iova_off
;
1002 s_length
= iova_align(iovad
, s_length
+ s_iova_off
);
1003 s
->length
= s_length
;
1006 * Due to the alignment of our single IOVA allocation, we can
1007 * depend on these assumptions about the segment boundary mask:
1008 * - If mask size >= IOVA size, then the IOVA range cannot
1009 * possibly fall across a boundary, so we don't care.
1010 * - If mask size < IOVA size, then the IOVA range must start
1011 * exactly on a boundary, therefore we can lay things out
1012 * based purely on segment lengths without needing to know
1013 * the actual addresses beforehand.
1014 * - The mask must be a power of 2, so pad_len == 0 if
1015 * iova_len == 0, thus we cannot dereference prev the first
1016 * time through here (i.e. before it has a meaningful value).
1018 if (pad_len
&& pad_len
< s_length
- 1) {
1019 prev
->length
+= pad_len
;
1020 iova_len
+= pad_len
;
1023 iova_len
+= s_length
;
1027 iova
= iommu_dma_alloc_iova(domain
, iova_len
, dma_get_mask(dev
), dev
);
1029 goto out_restore_sg
;
1032 * We'll leave any physical concatenation to the IOMMU driver's
1033 * implementation - it knows better than we do.
1035 if (iommu_map_sg_atomic(domain
, iova
, sg
, nents
, prot
) < iova_len
)
1038 return __finalise_sg(dev
, sg
, nents
, iova
);
1041 iommu_dma_free_iova(cookie
, iova
, iova_len
, NULL
);
1043 __invalidate_sg(sg
, nents
);
1047 static void iommu_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
1048 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
1050 dma_addr_t start
, end
;
1051 struct scatterlist
*tmp
;
1054 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
1055 iommu_dma_sync_sg_for_cpu(dev
, sg
, nents
, dir
);
1057 if (dev_is_untrusted(dev
)) {
1058 iommu_dma_unmap_sg_swiotlb(dev
, sg
, nents
, dir
, attrs
);
1063 * The scatterlist segments are mapped into a single
1064 * contiguous IOVA allocation, so this is incredibly easy.
1066 start
= sg_dma_address(sg
);
1067 for_each_sg(sg_next(sg
), tmp
, nents
- 1, i
) {
1068 if (sg_dma_len(tmp
) == 0)
1072 end
= sg_dma_address(sg
) + sg_dma_len(sg
);
1073 __iommu_dma_unmap(dev
, start
, end
- start
);
1076 static dma_addr_t
iommu_dma_map_resource(struct device
*dev
, phys_addr_t phys
,
1077 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
1079 return __iommu_dma_map(dev
, phys
, size
,
1080 dma_info_to_prot(dir
, false, attrs
) | IOMMU_MMIO
,
1084 static void iommu_dma_unmap_resource(struct device
*dev
, dma_addr_t handle
,
1085 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
1087 __iommu_dma_unmap(dev
, handle
, size
);
1090 static void __iommu_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
)
1092 size_t alloc_size
= PAGE_ALIGN(size
);
1093 int count
= alloc_size
>> PAGE_SHIFT
;
1094 struct page
*page
= NULL
, **pages
= NULL
;
1096 /* Non-coherent atomic allocation? Easy */
1097 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
) &&
1098 dma_free_from_pool(dev
, cpu_addr
, alloc_size
))
1101 if (IS_ENABLED(CONFIG_DMA_REMAP
) && is_vmalloc_addr(cpu_addr
)) {
1103 * If it the address is remapped, then it's either non-coherent
1104 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
1106 pages
= dma_common_find_pages(cpu_addr
);
1108 page
= vmalloc_to_page(cpu_addr
);
1109 dma_common_free_remap(cpu_addr
, alloc_size
);
1111 /* Lowmem means a coherent atomic or CMA allocation */
1112 page
= virt_to_page(cpu_addr
);
1116 __iommu_dma_free_pages(pages
, count
);
1118 dma_free_contiguous(dev
, page
, alloc_size
);
1121 static void iommu_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
1122 dma_addr_t handle
, unsigned long attrs
)
1124 __iommu_dma_unmap(dev
, handle
, size
);
1125 __iommu_dma_free(dev
, size
, cpu_addr
);
1128 static void *iommu_dma_alloc_pages(struct device
*dev
, size_t size
,
1129 struct page
**pagep
, gfp_t gfp
, unsigned long attrs
)
1131 bool coherent
= dev_is_dma_coherent(dev
);
1132 size_t alloc_size
= PAGE_ALIGN(size
);
1133 int node
= dev_to_node(dev
);
1134 struct page
*page
= NULL
;
1137 page
= dma_alloc_contiguous(dev
, alloc_size
, gfp
);
1139 page
= alloc_pages_node(node
, gfp
, get_order(alloc_size
));
1143 if (IS_ENABLED(CONFIG_DMA_REMAP
) && (!coherent
|| PageHighMem(page
))) {
1144 pgprot_t prot
= dma_pgprot(dev
, PAGE_KERNEL
, attrs
);
1146 cpu_addr
= dma_common_contiguous_remap(page
, alloc_size
,
1147 prot
, __builtin_return_address(0));
1149 goto out_free_pages
;
1152 arch_dma_prep_coherent(page
, size
);
1154 cpu_addr
= page_address(page
);
1158 memset(cpu_addr
, 0, alloc_size
);
1161 dma_free_contiguous(dev
, page
, alloc_size
);
1165 static void *iommu_dma_alloc(struct device
*dev
, size_t size
,
1166 dma_addr_t
*handle
, gfp_t gfp
, unsigned long attrs
)
1168 bool coherent
= dev_is_dma_coherent(dev
);
1169 int ioprot
= dma_info_to_prot(DMA_BIDIRECTIONAL
, coherent
, attrs
);
1170 struct page
*page
= NULL
;
1175 if (IS_ENABLED(CONFIG_DMA_REMAP
) && gfpflags_allow_blocking(gfp
) &&
1176 !(attrs
& DMA_ATTR_FORCE_CONTIGUOUS
)) {
1177 return iommu_dma_alloc_remap(dev
, size
, handle
, gfp
,
1178 dma_pgprot(dev
, PAGE_KERNEL
, attrs
), attrs
);
1181 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
) &&
1182 !gfpflags_allow_blocking(gfp
) && !coherent
)
1183 page
= dma_alloc_from_pool(dev
, PAGE_ALIGN(size
), &cpu_addr
,
1186 cpu_addr
= iommu_dma_alloc_pages(dev
, size
, &page
, gfp
, attrs
);
1190 *handle
= __iommu_dma_map(dev
, page_to_phys(page
), size
, ioprot
,
1191 dev
->coherent_dma_mask
);
1192 if (*handle
== DMA_MAPPING_ERROR
) {
1193 __iommu_dma_free(dev
, size
, cpu_addr
);
1200 #ifdef CONFIG_DMA_REMAP
1201 static void *iommu_dma_alloc_noncoherent(struct device
*dev
, size_t size
,
1202 dma_addr_t
*handle
, enum dma_data_direction dir
, gfp_t gfp
)
1204 if (!gfpflags_allow_blocking(gfp
)) {
1207 page
= dma_common_alloc_pages(dev
, size
, handle
, dir
, gfp
);
1210 return page_address(page
);
1213 return iommu_dma_alloc_remap(dev
, size
, handle
, gfp
| __GFP_ZERO
,
1217 static void iommu_dma_free_noncoherent(struct device
*dev
, size_t size
,
1218 void *cpu_addr
, dma_addr_t handle
, enum dma_data_direction dir
)
1220 __iommu_dma_unmap(dev
, handle
, size
);
1221 __iommu_dma_free(dev
, size
, cpu_addr
);
1224 #define iommu_dma_alloc_noncoherent NULL
1225 #define iommu_dma_free_noncoherent NULL
1226 #endif /* CONFIG_DMA_REMAP */
1228 static int iommu_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
1229 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
1230 unsigned long attrs
)
1232 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1233 unsigned long pfn
, off
= vma
->vm_pgoff
;
1236 vma
->vm_page_prot
= dma_pgprot(dev
, vma
->vm_page_prot
, attrs
);
1238 if (dma_mmap_from_dev_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
1241 if (off
>= nr_pages
|| vma_pages(vma
) > nr_pages
- off
)
1244 if (IS_ENABLED(CONFIG_DMA_REMAP
) && is_vmalloc_addr(cpu_addr
)) {
1245 struct page
**pages
= dma_common_find_pages(cpu_addr
);
1248 return vm_map_pages(vma
, pages
, nr_pages
);
1249 pfn
= vmalloc_to_pfn(cpu_addr
);
1251 pfn
= page_to_pfn(virt_to_page(cpu_addr
));
1254 return remap_pfn_range(vma
, vma
->vm_start
, pfn
+ off
,
1255 vma
->vm_end
- vma
->vm_start
,
1259 static int iommu_dma_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
1260 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
1261 unsigned long attrs
)
1266 if (IS_ENABLED(CONFIG_DMA_REMAP
) && is_vmalloc_addr(cpu_addr
)) {
1267 struct page
**pages
= dma_common_find_pages(cpu_addr
);
1270 return sg_alloc_table_from_pages(sgt
, pages
,
1271 PAGE_ALIGN(size
) >> PAGE_SHIFT
,
1272 0, size
, GFP_KERNEL
);
1275 page
= vmalloc_to_page(cpu_addr
);
1277 page
= virt_to_page(cpu_addr
);
1280 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
1282 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
1286 static unsigned long iommu_dma_get_merge_boundary(struct device
*dev
)
1288 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
1290 return (1UL << __ffs(domain
->pgsize_bitmap
)) - 1;
1293 static const struct dma_map_ops iommu_dma_ops
= {
1294 .alloc
= iommu_dma_alloc
,
1295 .free
= iommu_dma_free
,
1296 .alloc_pages
= dma_common_alloc_pages
,
1297 .free_pages
= dma_common_free_pages
,
1298 .alloc_noncoherent
= iommu_dma_alloc_noncoherent
,
1299 .free_noncoherent
= iommu_dma_free_noncoherent
,
1300 .mmap
= iommu_dma_mmap
,
1301 .get_sgtable
= iommu_dma_get_sgtable
,
1302 .map_page
= iommu_dma_map_page
,
1303 .unmap_page
= iommu_dma_unmap_page
,
1304 .map_sg
= iommu_dma_map_sg
,
1305 .unmap_sg
= iommu_dma_unmap_sg
,
1306 .sync_single_for_cpu
= iommu_dma_sync_single_for_cpu
,
1307 .sync_single_for_device
= iommu_dma_sync_single_for_device
,
1308 .sync_sg_for_cpu
= iommu_dma_sync_sg_for_cpu
,
1309 .sync_sg_for_device
= iommu_dma_sync_sg_for_device
,
1310 .map_resource
= iommu_dma_map_resource
,
1311 .unmap_resource
= iommu_dma_unmap_resource
,
1312 .get_merge_boundary
= iommu_dma_get_merge_boundary
,
1316 * The IOMMU core code allocates the default DMA domain, which the underlying
1317 * IOMMU driver needs to support via the dma-iommu layer.
1319 void iommu_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
)
1321 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1327 * The IOMMU core code allocates the default DMA domain, which the
1328 * underlying IOMMU driver needs to support via the dma-iommu layer.
1330 if (domain
->type
== IOMMU_DOMAIN_DMA
) {
1331 if (iommu_dma_init_domain(domain
, dma_base
, size
, dev
))
1333 dev
->dma_ops
= &iommu_dma_ops
;
1338 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1342 static struct iommu_dma_msi_page
*iommu_dma_get_msi_page(struct device
*dev
,
1343 phys_addr_t msi_addr
, struct iommu_domain
*domain
)
1345 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
1346 struct iommu_dma_msi_page
*msi_page
;
1348 int prot
= IOMMU_WRITE
| IOMMU_NOEXEC
| IOMMU_MMIO
;
1349 size_t size
= cookie_msi_granule(cookie
);
1351 msi_addr
&= ~(phys_addr_t
)(size
- 1);
1352 list_for_each_entry(msi_page
, &cookie
->msi_page_list
, list
)
1353 if (msi_page
->phys
== msi_addr
)
1356 msi_page
= kzalloc(sizeof(*msi_page
), GFP_KERNEL
);
1360 iova
= iommu_dma_alloc_iova(domain
, size
, dma_get_mask(dev
), dev
);
1364 if (iommu_map(domain
, iova
, msi_addr
, size
, prot
))
1367 INIT_LIST_HEAD(&msi_page
->list
);
1368 msi_page
->phys
= msi_addr
;
1369 msi_page
->iova
= iova
;
1370 list_add(&msi_page
->list
, &cookie
->msi_page_list
);
1374 iommu_dma_free_iova(cookie
, iova
, size
, NULL
);
1380 int iommu_dma_prepare_msi(struct msi_desc
*desc
, phys_addr_t msi_addr
)
1382 struct device
*dev
= msi_desc_to_dev(desc
);
1383 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1384 struct iommu_dma_msi_page
*msi_page
;
1385 static DEFINE_MUTEX(msi_prepare_lock
); /* see below */
1387 if (!domain
|| !domain
->iova_cookie
) {
1388 desc
->iommu_cookie
= NULL
;
1393 * In fact the whole prepare operation should already be serialised by
1394 * irq_domain_mutex further up the callchain, but that's pretty subtle
1395 * on its own, so consider this locking as failsafe documentation...
1397 mutex_lock(&msi_prepare_lock
);
1398 msi_page
= iommu_dma_get_msi_page(dev
, msi_addr
, domain
);
1399 mutex_unlock(&msi_prepare_lock
);
1401 msi_desc_set_iommu_cookie(desc
, msi_page
);
1408 void iommu_dma_compose_msi_msg(struct msi_desc
*desc
,
1409 struct msi_msg
*msg
)
1411 struct device
*dev
= msi_desc_to_dev(desc
);
1412 const struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1413 const struct iommu_dma_msi_page
*msi_page
;
1415 msi_page
= msi_desc_get_iommu_cookie(desc
);
1417 if (!domain
|| !domain
->iova_cookie
|| WARN_ON(!msi_page
))
1420 msg
->address_hi
= upper_32_bits(msi_page
->iova
);
1421 msg
->address_lo
&= cookie_msi_granule(domain
->iova_cookie
) - 1;
1422 msg
->address_lo
+= lower_32_bits(msi_page
->iova
);
1425 static int iommu_dma_init(void)
1427 return iova_cache_get();
1429 arch_initcall(iommu_dma_init
);