2 * A fairly generic DMA-API to IOMMU-API glue layer.
4 * Copyright (C) 2014-2015 ARM Ltd.
6 * based in part on arch/arm/mm/dma-mapping.c:
7 * Copyright (C) 2000-2004 Russell King
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/acpi_iort.h>
23 #include <linux/device.h>
24 #include <linux/dma-iommu.h>
25 #include <linux/gfp.h>
26 #include <linux/huge_mm.h>
27 #include <linux/iommu.h>
28 #include <linux/iova.h>
29 #include <linux/irq.h>
31 #include <linux/pci.h>
32 #include <linux/scatterlist.h>
33 #include <linux/vmalloc.h>
35 #define IOMMU_MAPPING_ERROR 0
37 struct iommu_dma_msi_page
{
38 struct list_head list
;
43 enum iommu_dma_cookie_type
{
44 IOMMU_DMA_IOVA_COOKIE
,
48 struct iommu_dma_cookie
{
49 enum iommu_dma_cookie_type type
;
51 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
52 struct iova_domain iovad
;
53 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
56 struct list_head msi_page_list
;
59 /* Domain for flush queue callback; NULL if flush queue not in use */
60 struct iommu_domain
*fq_domain
;
63 static inline size_t cookie_msi_granule(struct iommu_dma_cookie
*cookie
)
65 if (cookie
->type
== IOMMU_DMA_IOVA_COOKIE
)
66 return cookie
->iovad
.granule
;
70 static struct iommu_dma_cookie
*cookie_alloc(enum iommu_dma_cookie_type type
)
72 struct iommu_dma_cookie
*cookie
;
74 cookie
= kzalloc(sizeof(*cookie
), GFP_KERNEL
);
76 spin_lock_init(&cookie
->msi_lock
);
77 INIT_LIST_HEAD(&cookie
->msi_page_list
);
83 int iommu_dma_init(void)
85 return iova_cache_get();
89 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
90 * @domain: IOMMU domain to prepare for DMA-API usage
92 * IOMMU drivers should normally call this from their domain_alloc
93 * callback when domain->type == IOMMU_DOMAIN_DMA.
95 int iommu_get_dma_cookie(struct iommu_domain
*domain
)
97 if (domain
->iova_cookie
)
100 domain
->iova_cookie
= cookie_alloc(IOMMU_DMA_IOVA_COOKIE
);
101 if (!domain
->iova_cookie
)
106 EXPORT_SYMBOL(iommu_get_dma_cookie
);
109 * iommu_get_msi_cookie - Acquire just MSI remapping resources
110 * @domain: IOMMU domain to prepare
111 * @base: Start address of IOVA region for MSI mappings
113 * Users who manage their own IOVA allocation and do not want DMA API support,
114 * but would still like to take advantage of automatic MSI remapping, can use
115 * this to initialise their own domain appropriately. Users should reserve a
116 * contiguous IOVA region, starting at @base, large enough to accommodate the
117 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
118 * used by the devices attached to @domain.
120 int iommu_get_msi_cookie(struct iommu_domain
*domain
, dma_addr_t base
)
122 struct iommu_dma_cookie
*cookie
;
124 if (domain
->type
!= IOMMU_DOMAIN_UNMANAGED
)
127 if (domain
->iova_cookie
)
130 cookie
= cookie_alloc(IOMMU_DMA_MSI_COOKIE
);
134 cookie
->msi_iova
= base
;
135 domain
->iova_cookie
= cookie
;
138 EXPORT_SYMBOL(iommu_get_msi_cookie
);
141 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
142 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
143 * iommu_get_msi_cookie()
145 * IOMMU drivers should normally call this from their domain_free callback.
147 void iommu_put_dma_cookie(struct iommu_domain
*domain
)
149 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
150 struct iommu_dma_msi_page
*msi
, *tmp
;
155 if (cookie
->type
== IOMMU_DMA_IOVA_COOKIE
&& cookie
->iovad
.granule
)
156 put_iova_domain(&cookie
->iovad
);
158 list_for_each_entry_safe(msi
, tmp
, &cookie
->msi_page_list
, list
) {
159 list_del(&msi
->list
);
163 domain
->iova_cookie
= NULL
;
165 EXPORT_SYMBOL(iommu_put_dma_cookie
);
168 * iommu_dma_get_resv_regions - Reserved region driver helper
169 * @dev: Device from iommu_get_resv_regions()
170 * @list: Reserved region list from iommu_get_resv_regions()
172 * IOMMU drivers can use this to implement their .get_resv_regions callback
173 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
174 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
177 void iommu_dma_get_resv_regions(struct device
*dev
, struct list_head
*list
)
180 if (!is_of_node(dev
->iommu_fwspec
->iommu_fwnode
))
181 iort_iommu_msi_get_resv_regions(dev
, list
);
184 EXPORT_SYMBOL(iommu_dma_get_resv_regions
);
186 static int cookie_init_hw_msi_region(struct iommu_dma_cookie
*cookie
,
187 phys_addr_t start
, phys_addr_t end
)
189 struct iova_domain
*iovad
= &cookie
->iovad
;
190 struct iommu_dma_msi_page
*msi_page
;
193 start
-= iova_offset(iovad
, start
);
194 num_pages
= iova_align(iovad
, end
- start
) >> iova_shift(iovad
);
196 msi_page
= kcalloc(num_pages
, sizeof(*msi_page
), GFP_KERNEL
);
200 for (i
= 0; i
< num_pages
; i
++) {
201 msi_page
[i
].phys
= start
;
202 msi_page
[i
].iova
= start
;
203 INIT_LIST_HEAD(&msi_page
[i
].list
);
204 list_add(&msi_page
[i
].list
, &cookie
->msi_page_list
);
205 start
+= iovad
->granule
;
211 static void iova_reserve_pci_windows(struct pci_dev
*dev
,
212 struct iova_domain
*iovad
)
214 struct pci_host_bridge
*bridge
= pci_find_host_bridge(dev
->bus
);
215 struct resource_entry
*window
;
216 unsigned long lo
, hi
;
218 resource_list_for_each_entry(window
, &bridge
->windows
) {
219 if (resource_type(window
->res
) != IORESOURCE_MEM
)
222 lo
= iova_pfn(iovad
, window
->res
->start
- window
->offset
);
223 hi
= iova_pfn(iovad
, window
->res
->end
- window
->offset
);
224 reserve_iova(iovad
, lo
, hi
);
228 static int iova_reserve_iommu_regions(struct device
*dev
,
229 struct iommu_domain
*domain
)
231 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
232 struct iova_domain
*iovad
= &cookie
->iovad
;
233 struct iommu_resv_region
*region
;
234 LIST_HEAD(resv_regions
);
238 iova_reserve_pci_windows(to_pci_dev(dev
), iovad
);
240 iommu_get_resv_regions(dev
, &resv_regions
);
241 list_for_each_entry(region
, &resv_regions
, list
) {
242 unsigned long lo
, hi
;
244 /* We ARE the software that manages these! */
245 if (region
->type
== IOMMU_RESV_SW_MSI
)
248 lo
= iova_pfn(iovad
, region
->start
);
249 hi
= iova_pfn(iovad
, region
->start
+ region
->length
- 1);
250 reserve_iova(iovad
, lo
, hi
);
252 if (region
->type
== IOMMU_RESV_MSI
)
253 ret
= cookie_init_hw_msi_region(cookie
, region
->start
,
254 region
->start
+ region
->length
);
258 iommu_put_resv_regions(dev
, &resv_regions
);
263 static void iommu_dma_flush_iotlb_all(struct iova_domain
*iovad
)
265 struct iommu_dma_cookie
*cookie
;
266 struct iommu_domain
*domain
;
268 cookie
= container_of(iovad
, struct iommu_dma_cookie
, iovad
);
269 domain
= cookie
->fq_domain
;
271 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
272 * implies that ops->flush_iotlb_all must be non-NULL.
274 domain
->ops
->flush_iotlb_all(domain
);
278 * iommu_dma_init_domain - Initialise a DMA mapping domain
279 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
280 * @base: IOVA at which the mappable address space starts
281 * @size: Size of IOVA space
282 * @dev: Device the domain is being initialised for
284 * @base and @size should be exact multiples of IOMMU page granularity to
285 * avoid rounding surprises. If necessary, we reserve the page at address 0
286 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
287 * any change which could make prior IOVAs invalid will fail.
289 int iommu_dma_init_domain(struct iommu_domain
*domain
, dma_addr_t base
,
290 u64 size
, struct device
*dev
)
292 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
293 struct iova_domain
*iovad
= &cookie
->iovad
;
294 unsigned long order
, base_pfn
, end_pfn
;
297 if (!cookie
|| cookie
->type
!= IOMMU_DMA_IOVA_COOKIE
)
300 /* Use the smallest supported page size for IOVA granularity */
301 order
= __ffs(domain
->pgsize_bitmap
);
302 base_pfn
= max_t(unsigned long, 1, base
>> order
);
303 end_pfn
= (base
+ size
- 1) >> order
;
305 /* Check the domain allows at least some access to the device... */
306 if (domain
->geometry
.force_aperture
) {
307 if (base
> domain
->geometry
.aperture_end
||
308 base
+ size
<= domain
->geometry
.aperture_start
) {
309 pr_warn("specified DMA range outside IOMMU capability\n");
312 /* ...then finally give it a kicking to make sure it fits */
313 base_pfn
= max_t(unsigned long, base_pfn
,
314 domain
->geometry
.aperture_start
>> order
);
317 /* start_pfn is always nonzero for an already-initialised domain */
318 if (iovad
->start_pfn
) {
319 if (1UL << order
!= iovad
->granule
||
320 base_pfn
!= iovad
->start_pfn
) {
321 pr_warn("Incompatible range for DMA domain\n");
328 init_iova_domain(iovad
, 1UL << order
, base_pfn
);
330 if (!cookie
->fq_domain
&& !iommu_domain_get_attr(domain
,
331 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
, &attr
) && attr
) {
332 cookie
->fq_domain
= domain
;
333 init_iova_flush_queue(iovad
, iommu_dma_flush_iotlb_all
, NULL
);
339 return iova_reserve_iommu_regions(dev
, domain
);
341 EXPORT_SYMBOL(iommu_dma_init_domain
);
344 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
346 * @dir: Direction of DMA transfer
347 * @coherent: Is the DMA master cache-coherent?
348 * @attrs: DMA attributes for the mapping
350 * Return: corresponding IOMMU API page protection flags
352 int dma_info_to_prot(enum dma_data_direction dir
, bool coherent
,
355 int prot
= coherent
? IOMMU_CACHE
: 0;
357 if (attrs
& DMA_ATTR_PRIVILEGED
)
361 case DMA_BIDIRECTIONAL
:
362 return prot
| IOMMU_READ
| IOMMU_WRITE
;
364 return prot
| IOMMU_READ
;
365 case DMA_FROM_DEVICE
:
366 return prot
| IOMMU_WRITE
;
372 static dma_addr_t
iommu_dma_alloc_iova(struct iommu_domain
*domain
,
373 size_t size
, dma_addr_t dma_limit
, struct device
*dev
)
375 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
376 struct iova_domain
*iovad
= &cookie
->iovad
;
377 unsigned long shift
, iova_len
, iova
= 0;
379 if (cookie
->type
== IOMMU_DMA_MSI_COOKIE
) {
380 cookie
->msi_iova
+= size
;
381 return cookie
->msi_iova
- size
;
384 shift
= iova_shift(iovad
);
385 iova_len
= size
>> shift
;
387 * Freeing non-power-of-two-sized allocations back into the IOVA caches
388 * will come back to bite us badly, so we have to waste a bit of space
389 * rounding up anything cacheable to make sure that can't happen. The
390 * order of the unadjusted size will still match upon freeing.
392 if (iova_len
< (1 << (IOVA_RANGE_CACHE_MAX_SIZE
- 1)))
393 iova_len
= roundup_pow_of_two(iova_len
);
395 if (dev
->bus_dma_mask
)
396 dma_limit
&= dev
->bus_dma_mask
;
398 if (domain
->geometry
.force_aperture
)
399 dma_limit
= min(dma_limit
, domain
->geometry
.aperture_end
);
401 /* Try to get PCI devices a SAC address */
402 if (dma_limit
> DMA_BIT_MASK(32) && dev_is_pci(dev
))
403 iova
= alloc_iova_fast(iovad
, iova_len
,
404 DMA_BIT_MASK(32) >> shift
, false);
407 iova
= alloc_iova_fast(iovad
, iova_len
, dma_limit
>> shift
,
410 return (dma_addr_t
)iova
<< shift
;
413 static void iommu_dma_free_iova(struct iommu_dma_cookie
*cookie
,
414 dma_addr_t iova
, size_t size
)
416 struct iova_domain
*iovad
= &cookie
->iovad
;
418 /* The MSI case is only ever cleaning up its most recent allocation */
419 if (cookie
->type
== IOMMU_DMA_MSI_COOKIE
)
420 cookie
->msi_iova
-= size
;
421 else if (cookie
->fq_domain
) /* non-strict mode */
422 queue_iova(iovad
, iova_pfn(iovad
, iova
),
423 size
>> iova_shift(iovad
), 0);
425 free_iova_fast(iovad
, iova_pfn(iovad
, iova
),
426 size
>> iova_shift(iovad
));
429 static void __iommu_dma_unmap(struct iommu_domain
*domain
, dma_addr_t dma_addr
,
432 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
433 struct iova_domain
*iovad
= &cookie
->iovad
;
434 size_t iova_off
= iova_offset(iovad
, dma_addr
);
436 dma_addr
-= iova_off
;
437 size
= iova_align(iovad
, size
+ iova_off
);
439 WARN_ON(iommu_unmap_fast(domain
, dma_addr
, size
) != size
);
440 if (!cookie
->fq_domain
)
441 iommu_tlb_sync(domain
);
442 iommu_dma_free_iova(cookie
, dma_addr
, size
);
445 static void __iommu_dma_free_pages(struct page
**pages
, int count
)
448 __free_page(pages
[count
]);
452 static struct page
**__iommu_dma_alloc_pages(unsigned int count
,
453 unsigned long order_mask
, gfp_t gfp
)
456 unsigned int i
= 0, array_size
= count
* sizeof(*pages
);
458 order_mask
&= (2U << MAX_ORDER
) - 1;
462 if (array_size
<= PAGE_SIZE
)
463 pages
= kzalloc(array_size
, GFP_KERNEL
);
465 pages
= vzalloc(array_size
);
469 /* IOMMU can map any pages, so himem can also be used here */
470 gfp
|= __GFP_NOWARN
| __GFP_HIGHMEM
;
473 struct page
*page
= NULL
;
474 unsigned int order_size
;
477 * Higher-order allocations are a convenience rather
478 * than a necessity, hence using __GFP_NORETRY until
479 * falling back to minimum-order allocations.
481 for (order_mask
&= (2U << __fls(count
)) - 1;
482 order_mask
; order_mask
&= ~order_size
) {
483 unsigned int order
= __fls(order_mask
);
485 order_size
= 1U << order
;
486 page
= alloc_pages((order_mask
- order_size
) ?
487 gfp
| __GFP_NORETRY
: gfp
, order
);
492 if (!PageCompound(page
)) {
493 split_page(page
, order
);
495 } else if (!split_huge_page(page
)) {
498 __free_pages(page
, order
);
501 __iommu_dma_free_pages(pages
, i
);
512 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
513 * @dev: Device which owns this buffer
514 * @pages: Array of buffer pages as returned by iommu_dma_alloc()
515 * @size: Size of buffer in bytes
516 * @handle: DMA address of buffer
518 * Frees both the pages associated with the buffer, and the array
521 void iommu_dma_free(struct device
*dev
, struct page
**pages
, size_t size
,
524 __iommu_dma_unmap(iommu_get_dma_domain(dev
), *handle
, size
);
525 __iommu_dma_free_pages(pages
, PAGE_ALIGN(size
) >> PAGE_SHIFT
);
526 *handle
= IOMMU_MAPPING_ERROR
;
530 * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
531 * @dev: Device to allocate memory for. Must be a real device
532 * attached to an iommu_dma_domain
533 * @size: Size of buffer in bytes
534 * @gfp: Allocation flags
535 * @attrs: DMA attributes for this allocation
536 * @prot: IOMMU mapping flags
537 * @handle: Out argument for allocated DMA handle
538 * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
539 * given VA/PA are visible to the given non-coherent device.
541 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
542 * but an IOMMU which supports smaller pages might not map the whole thing.
544 * Return: Array of struct page pointers describing the buffer,
545 * or NULL on failure.
547 struct page
**iommu_dma_alloc(struct device
*dev
, size_t size
, gfp_t gfp
,
548 unsigned long attrs
, int prot
, dma_addr_t
*handle
,
549 void (*flush_page
)(struct device
*, const void *, phys_addr_t
))
551 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
552 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
553 struct iova_domain
*iovad
= &cookie
->iovad
;
557 unsigned int count
, min_size
, alloc_sizes
= domain
->pgsize_bitmap
;
559 *handle
= IOMMU_MAPPING_ERROR
;
561 min_size
= alloc_sizes
& -alloc_sizes
;
562 if (min_size
< PAGE_SIZE
) {
563 min_size
= PAGE_SIZE
;
564 alloc_sizes
|= PAGE_SIZE
;
566 size
= ALIGN(size
, min_size
);
568 if (attrs
& DMA_ATTR_ALLOC_SINGLE_PAGES
)
569 alloc_sizes
= min_size
;
571 count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
572 pages
= __iommu_dma_alloc_pages(count
, alloc_sizes
>> PAGE_SHIFT
, gfp
);
576 size
= iova_align(iovad
, size
);
577 iova
= iommu_dma_alloc_iova(domain
, size
, dev
->coherent_dma_mask
, dev
);
581 if (sg_alloc_table_from_pages(&sgt
, pages
, count
, 0, size
, GFP_KERNEL
))
584 if (!(prot
& IOMMU_CACHE
)) {
585 struct sg_mapping_iter miter
;
587 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
588 * sufficient here, so skip it by using the "wrong" direction.
590 sg_miter_start(&miter
, sgt
.sgl
, sgt
.orig_nents
, SG_MITER_FROM_SG
);
591 while (sg_miter_next(&miter
))
592 flush_page(dev
, miter
.addr
, page_to_phys(miter
.page
));
593 sg_miter_stop(&miter
);
596 if (iommu_map_sg(domain
, iova
, sgt
.sgl
, sgt
.orig_nents
, prot
)
607 iommu_dma_free_iova(cookie
, iova
, size
);
609 __iommu_dma_free_pages(pages
, count
);
614 * iommu_dma_mmap - Map a buffer into provided user VMA
615 * @pages: Array representing buffer from iommu_dma_alloc()
616 * @size: Size of buffer in bytes
617 * @vma: VMA describing requested userspace mapping
619 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
620 * for verifying the correct size and protection of @vma beforehand.
623 int iommu_dma_mmap(struct page
**pages
, size_t size
, struct vm_area_struct
*vma
)
625 unsigned long uaddr
= vma
->vm_start
;
626 unsigned int i
, count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
629 for (i
= vma
->vm_pgoff
; i
< count
&& uaddr
< vma
->vm_end
; i
++) {
630 ret
= vm_insert_page(vma
, uaddr
, pages
[i
]);
638 static dma_addr_t
__iommu_dma_map(struct device
*dev
, phys_addr_t phys
,
639 size_t size
, int prot
, struct iommu_domain
*domain
)
641 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
645 if (cookie
->type
== IOMMU_DMA_IOVA_COOKIE
) {
646 iova_off
= iova_offset(&cookie
->iovad
, phys
);
647 size
= iova_align(&cookie
->iovad
, size
+ iova_off
);
650 iova
= iommu_dma_alloc_iova(domain
, size
, dma_get_mask(dev
), dev
);
652 return IOMMU_MAPPING_ERROR
;
654 if (iommu_map(domain
, iova
, phys
- iova_off
, size
, prot
)) {
655 iommu_dma_free_iova(cookie
, iova
, size
);
656 return IOMMU_MAPPING_ERROR
;
658 return iova
+ iova_off
;
661 dma_addr_t
iommu_dma_map_page(struct device
*dev
, struct page
*page
,
662 unsigned long offset
, size_t size
, int prot
)
664 return __iommu_dma_map(dev
, page_to_phys(page
) + offset
, size
, prot
,
665 iommu_get_dma_domain(dev
));
668 void iommu_dma_unmap_page(struct device
*dev
, dma_addr_t handle
, size_t size
,
669 enum dma_data_direction dir
, unsigned long attrs
)
671 __iommu_dma_unmap(iommu_get_dma_domain(dev
), handle
, size
);
675 * Prepare a successfully-mapped scatterlist to give back to the caller.
677 * At this point the segments are already laid out by iommu_dma_map_sg() to
678 * avoid individually crossing any boundaries, so we merely need to check a
679 * segment's start address to avoid concatenating across one.
681 static int __finalise_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
684 struct scatterlist
*s
, *cur
= sg
;
685 unsigned long seg_mask
= dma_get_seg_boundary(dev
);
686 unsigned int cur_len
= 0, max_len
= dma_get_max_seg_size(dev
);
689 for_each_sg(sg
, s
, nents
, i
) {
690 /* Restore this segment's original unaligned fields first */
691 unsigned int s_iova_off
= sg_dma_address(s
);
692 unsigned int s_length
= sg_dma_len(s
);
693 unsigned int s_iova_len
= s
->length
;
695 s
->offset
+= s_iova_off
;
696 s
->length
= s_length
;
697 sg_dma_address(s
) = IOMMU_MAPPING_ERROR
;
701 * Now fill in the real DMA data. If...
702 * - there is a valid output segment to append to
703 * - and this segment starts on an IOVA page boundary
704 * - but doesn't fall at a segment boundary
705 * - and wouldn't make the resulting output segment too long
707 if (cur_len
&& !s_iova_off
&& (dma_addr
& seg_mask
) &&
708 (cur_len
+ s_length
<= max_len
)) {
709 /* ...then concatenate it with the previous one */
712 /* Otherwise start the next output segment */
718 sg_dma_address(cur
) = dma_addr
+ s_iova_off
;
721 sg_dma_len(cur
) = cur_len
;
722 dma_addr
+= s_iova_len
;
724 if (s_length
+ s_iova_off
< s_iova_len
)
731 * If mapping failed, then just restore the original list,
732 * but making sure the DMA fields are invalidated.
734 static void __invalidate_sg(struct scatterlist
*sg
, int nents
)
736 struct scatterlist
*s
;
739 for_each_sg(sg
, s
, nents
, i
) {
740 if (sg_dma_address(s
) != IOMMU_MAPPING_ERROR
)
741 s
->offset
+= sg_dma_address(s
);
743 s
->length
= sg_dma_len(s
);
744 sg_dma_address(s
) = IOMMU_MAPPING_ERROR
;
750 * The DMA API client is passing in a scatterlist which could describe
751 * any old buffer layout, but the IOMMU API requires everything to be
752 * aligned to IOMMU pages. Hence the need for this complicated bit of
753 * impedance-matching, to be able to hand off a suitably-aligned list,
754 * but still preserve the original offsets and sizes for the caller.
756 int iommu_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
759 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
760 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
761 struct iova_domain
*iovad
= &cookie
->iovad
;
762 struct scatterlist
*s
, *prev
= NULL
;
765 unsigned long mask
= dma_get_seg_boundary(dev
);
769 * Work out how much IOVA space we need, and align the segments to
770 * IOVA granules for the IOMMU driver to handle. With some clever
771 * trickery we can modify the list in-place, but reversibly, by
772 * stashing the unaligned parts in the as-yet-unused DMA fields.
774 for_each_sg(sg
, s
, nents
, i
) {
775 size_t s_iova_off
= iova_offset(iovad
, s
->offset
);
776 size_t s_length
= s
->length
;
777 size_t pad_len
= (mask
- iova_len
+ 1) & mask
;
779 sg_dma_address(s
) = s_iova_off
;
780 sg_dma_len(s
) = s_length
;
781 s
->offset
-= s_iova_off
;
782 s_length
= iova_align(iovad
, s_length
+ s_iova_off
);
783 s
->length
= s_length
;
786 * Due to the alignment of our single IOVA allocation, we can
787 * depend on these assumptions about the segment boundary mask:
788 * - If mask size >= IOVA size, then the IOVA range cannot
789 * possibly fall across a boundary, so we don't care.
790 * - If mask size < IOVA size, then the IOVA range must start
791 * exactly on a boundary, therefore we can lay things out
792 * based purely on segment lengths without needing to know
793 * the actual addresses beforehand.
794 * - The mask must be a power of 2, so pad_len == 0 if
795 * iova_len == 0, thus we cannot dereference prev the first
796 * time through here (i.e. before it has a meaningful value).
798 if (pad_len
&& pad_len
< s_length
- 1) {
799 prev
->length
+= pad_len
;
803 iova_len
+= s_length
;
807 iova
= iommu_dma_alloc_iova(domain
, iova_len
, dma_get_mask(dev
), dev
);
812 * We'll leave any physical concatenation to the IOMMU driver's
813 * implementation - it knows better than we do.
815 if (iommu_map_sg(domain
, iova
, sg
, nents
, prot
) < iova_len
)
818 return __finalise_sg(dev
, sg
, nents
, iova
);
821 iommu_dma_free_iova(cookie
, iova
, iova_len
);
823 __invalidate_sg(sg
, nents
);
827 void iommu_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
828 enum dma_data_direction dir
, unsigned long attrs
)
830 dma_addr_t start
, end
;
831 struct scatterlist
*tmp
;
834 * The scatterlist segments are mapped into a single
835 * contiguous IOVA allocation, so this is incredibly easy.
837 start
= sg_dma_address(sg
);
838 for_each_sg(sg_next(sg
), tmp
, nents
- 1, i
) {
839 if (sg_dma_len(tmp
) == 0)
843 end
= sg_dma_address(sg
) + sg_dma_len(sg
);
844 __iommu_dma_unmap(iommu_get_dma_domain(dev
), start
, end
- start
);
847 dma_addr_t
iommu_dma_map_resource(struct device
*dev
, phys_addr_t phys
,
848 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
850 return __iommu_dma_map(dev
, phys
, size
,
851 dma_info_to_prot(dir
, false, attrs
) | IOMMU_MMIO
,
852 iommu_get_dma_domain(dev
));
855 void iommu_dma_unmap_resource(struct device
*dev
, dma_addr_t handle
,
856 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
858 __iommu_dma_unmap(iommu_get_dma_domain(dev
), handle
, size
);
861 int iommu_dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
863 return dma_addr
== IOMMU_MAPPING_ERROR
;
866 static struct iommu_dma_msi_page
*iommu_dma_get_msi_page(struct device
*dev
,
867 phys_addr_t msi_addr
, struct iommu_domain
*domain
)
869 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
870 struct iommu_dma_msi_page
*msi_page
;
872 int prot
= IOMMU_WRITE
| IOMMU_NOEXEC
| IOMMU_MMIO
;
873 size_t size
= cookie_msi_granule(cookie
);
875 msi_addr
&= ~(phys_addr_t
)(size
- 1);
876 list_for_each_entry(msi_page
, &cookie
->msi_page_list
, list
)
877 if (msi_page
->phys
== msi_addr
)
880 msi_page
= kzalloc(sizeof(*msi_page
), GFP_ATOMIC
);
884 iova
= __iommu_dma_map(dev
, msi_addr
, size
, prot
, domain
);
885 if (iommu_dma_mapping_error(dev
, iova
))
888 INIT_LIST_HEAD(&msi_page
->list
);
889 msi_page
->phys
= msi_addr
;
890 msi_page
->iova
= iova
;
891 list_add(&msi_page
->list
, &cookie
->msi_page_list
);
899 void iommu_dma_map_msi_msg(int irq
, struct msi_msg
*msg
)
901 struct device
*dev
= msi_desc_to_dev(irq_get_msi_desc(irq
));
902 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
903 struct iommu_dma_cookie
*cookie
;
904 struct iommu_dma_msi_page
*msi_page
;
905 phys_addr_t msi_addr
= (u64
)msg
->address_hi
<< 32 | msg
->address_lo
;
908 if (!domain
|| !domain
->iova_cookie
)
911 cookie
= domain
->iova_cookie
;
914 * We disable IRQs to rule out a possible inversion against
915 * irq_desc_lock if, say, someone tries to retarget the affinity
916 * of an MSI from within an IPI handler.
918 spin_lock_irqsave(&cookie
->msi_lock
, flags
);
919 msi_page
= iommu_dma_get_msi_page(dev
, msi_addr
, domain
);
920 spin_unlock_irqrestore(&cookie
->msi_lock
, flags
);
922 if (WARN_ON(!msi_page
)) {
924 * We're called from a void callback, so the best we can do is
925 * 'fail' by filling the message with obviously bogus values.
926 * Since we got this far due to an IOMMU being present, it's
927 * not like the existing address would have worked anyway...
929 msg
->address_hi
= ~0U;
930 msg
->address_lo
= ~0U;
933 msg
->address_hi
= upper_32_bits(msi_page
->iova
);
934 msg
->address_lo
&= cookie_msi_granule(cookie
) - 1;
935 msg
->address_lo
+= lower_32_bits(msi_page
->iova
);