1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
4 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
5 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
7 #include <linux/dma-direct.h>
8 #include <linux/dma-noncoherent.h>
9 #include <linux/dma-contiguous.h>
10 #include <linux/highmem.h>
12 #include <asm/cache.h>
13 #include <asm/cpu-type.h>
14 #include <asm/dma-coherence.h>
18 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
19 * fill random cachelines with stale data at any time, requiring an extra
22 * Warning on the terminology - Linux calls an uncached area coherent; MIPS
23 * terminology calls memory areas with hardware maintained coherency coherent.
25 * Note that the R14000 and R16000 should also be checked for in this condition.
26 * However this function is only called on non-I/O-coherent systems and only the
27 * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
30 static inline bool cpu_needs_post_dma_flush(void)
32 switch (boot_cpu_type()) {
39 * Presence of MAARs suggests that the CPU supports
40 * speculatively prefetching data, and therefore requires
41 * the post-DMA flush/invalidate.
47 void arch_dma_prep_coherent(struct page
*page
, size_t size
)
49 dma_cache_wback_inv((unsigned long)page_address(page
), size
);
52 void *uncached_kernel_address(void *addr
)
54 return (void *)(__pa(addr
) + UNCAC_BASE
);
57 void *cached_kernel_address(void *addr
)
59 return __va(addr
) - UNCAC_BASE
;
62 static inline void dma_sync_virt(void *addr
, size_t size
,
63 enum dma_data_direction dir
)
67 dma_cache_wback((unsigned long)addr
, size
);
71 dma_cache_inv((unsigned long)addr
, size
);
74 case DMA_BIDIRECTIONAL
:
75 dma_cache_wback_inv((unsigned long)addr
, size
);
84 * A single sg entry may refer to multiple physically contiguous pages. But
85 * we still need to process highmem pages individually. If highmem is not
86 * configured then the bulk of this loop gets optimized out.
88 static inline void dma_sync_phys(phys_addr_t paddr
, size_t size
,
89 enum dma_data_direction dir
)
91 struct page
*page
= pfn_to_page(paddr
>> PAGE_SHIFT
);
92 unsigned long offset
= paddr
& ~PAGE_MASK
;
98 if (PageHighMem(page
)) {
101 if (offset
+ len
> PAGE_SIZE
)
102 len
= PAGE_SIZE
- offset
;
104 addr
= kmap_atomic(page
);
105 dma_sync_virt(addr
+ offset
, len
, dir
);
108 dma_sync_virt(page_address(page
) + offset
, size
, dir
);
115 void arch_sync_dma_for_device(phys_addr_t paddr
, size_t size
,
116 enum dma_data_direction dir
)
118 dma_sync_phys(paddr
, size
, dir
);
121 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
122 void arch_sync_dma_for_cpu(phys_addr_t paddr
, size_t size
,
123 enum dma_data_direction dir
)
125 if (cpu_needs_post_dma_flush())
126 dma_sync_phys(paddr
, size
, dir
);
130 void arch_dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
131 enum dma_data_direction direction
)
133 BUG_ON(direction
== DMA_NONE
);
135 dma_sync_virt(vaddr
, size
, direction
);
138 #ifdef CONFIG_DMA_PERDEV_COHERENT
139 void arch_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 size
,
140 const struct iommu_ops
*iommu
, bool coherent
)
142 dev
->dma_coherent
= coherent
;