1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009-2010 PetaLogix
4 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
6 * Provide default implementations of the DMA mapping callbacks for
7 * directly mapped busses.
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/gfp.h>
13 #include <linux/dma-debug.h>
14 #include <linux/export.h>
15 #include <linux/bug.h>
16 #include <asm/cacheflush.h>
18 #define NOT_COHERENT_CACHE
20 static void *dma_direct_alloc_coherent(struct device
*dev
, size_t size
,
21 dma_addr_t
*dma_handle
, gfp_t flag
,
24 #ifdef NOT_COHERENT_CACHE
25 return consistent_alloc(flag
, size
, dma_handle
);
29 int node
= dev_to_node(dev
);
31 /* ignore region specifiers */
32 flag
&= ~(__GFP_HIGHMEM
);
34 page
= alloc_pages_node(node
, flag
, get_order(size
));
37 ret
= page_address(page
);
39 *dma_handle
= virt_to_phys(ret
);
45 static void dma_direct_free_coherent(struct device
*dev
, size_t size
,
46 void *vaddr
, dma_addr_t dma_handle
,
49 #ifdef NOT_COHERENT_CACHE
50 consistent_free(size
, vaddr
);
52 free_pages((unsigned long)vaddr
, get_order(size
));
56 static inline void __dma_sync(unsigned long paddr
,
57 size_t size
, enum dma_data_direction direction
)
61 case DMA_BIDIRECTIONAL
:
62 flush_dcache_range(paddr
, paddr
+ size
);
65 invalidate_dcache_range(paddr
, paddr
+ size
);
72 static int dma_direct_map_sg(struct device
*dev
, struct scatterlist
*sgl
,
73 int nents
, enum dma_data_direction direction
,
76 struct scatterlist
*sg
;
79 /* FIXME this part of code is untested */
80 for_each_sg(sgl
, sg
, nents
, i
) {
81 sg
->dma_address
= sg_phys(sg
);
83 if (attrs
& DMA_ATTR_SKIP_CPU_SYNC
)
86 __dma_sync(sg_phys(sg
), sg
->length
, direction
);
92 static int dma_direct_dma_supported(struct device
*dev
, u64 mask
)
97 static inline dma_addr_t
dma_direct_map_page(struct device
*dev
,
101 enum dma_data_direction direction
,
104 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
105 __dma_sync(page_to_phys(page
) + offset
, size
, direction
);
106 return page_to_phys(page
) + offset
;
109 static inline void dma_direct_unmap_page(struct device
*dev
,
110 dma_addr_t dma_address
,
112 enum dma_data_direction direction
,
115 /* There is not necessary to do cache cleanup
117 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
118 * dma_address is physical address
120 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
121 __dma_sync(dma_address
, size
, direction
);
125 dma_direct_sync_single_for_cpu(struct device
*dev
,
126 dma_addr_t dma_handle
, size_t size
,
127 enum dma_data_direction direction
)
130 * It's pointless to flush the cache as the memory segment
131 * is given to the CPU
134 if (direction
== DMA_FROM_DEVICE
)
135 __dma_sync(dma_handle
, size
, direction
);
139 dma_direct_sync_single_for_device(struct device
*dev
,
140 dma_addr_t dma_handle
, size_t size
,
141 enum dma_data_direction direction
)
144 * It's pointless to invalidate the cache if the device isn't
145 * supposed to write to the relevant region
148 if (direction
== DMA_TO_DEVICE
)
149 __dma_sync(dma_handle
, size
, direction
);
153 dma_direct_sync_sg_for_cpu(struct device
*dev
,
154 struct scatterlist
*sgl
, int nents
,
155 enum dma_data_direction direction
)
157 struct scatterlist
*sg
;
160 /* FIXME this part of code is untested */
161 if (direction
== DMA_FROM_DEVICE
)
162 for_each_sg(sgl
, sg
, nents
, i
)
163 __dma_sync(sg
->dma_address
, sg
->length
, direction
);
167 dma_direct_sync_sg_for_device(struct device
*dev
,
168 struct scatterlist
*sgl
, int nents
,
169 enum dma_data_direction direction
)
171 struct scatterlist
*sg
;
174 /* FIXME this part of code is untested */
175 if (direction
== DMA_TO_DEVICE
)
176 for_each_sg(sgl
, sg
, nents
, i
)
177 __dma_sync(sg
->dma_address
, sg
->length
, direction
);
181 int dma_direct_mmap_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
182 void *cpu_addr
, dma_addr_t handle
, size_t size
,
186 unsigned long user_count
= vma_pages(vma
);
187 unsigned long count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
188 unsigned long off
= vma
->vm_pgoff
;
191 if (off
>= count
|| user_count
> (count
- off
))
194 #ifdef NOT_COHERENT_CACHE
195 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
196 pfn
= consistent_virt_to_pfn(cpu_addr
);
198 pfn
= virt_to_pfn(cpu_addr
);
200 return remap_pfn_range(vma
, vma
->vm_start
, pfn
+ off
,
201 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
);
207 const struct dma_map_ops dma_direct_ops
= {
208 .alloc
= dma_direct_alloc_coherent
,
209 .free
= dma_direct_free_coherent
,
210 .mmap
= dma_direct_mmap_coherent
,
211 .map_sg
= dma_direct_map_sg
,
212 .dma_supported
= dma_direct_dma_supported
,
213 .map_page
= dma_direct_map_page
,
214 .unmap_page
= dma_direct_unmap_page
,
215 .sync_single_for_cpu
= dma_direct_sync_single_for_cpu
,
216 .sync_single_for_device
= dma_direct_sync_single_for_device
,
217 .sync_sg_for_cpu
= dma_direct_sync_sg_for_cpu
,
218 .sync_sg_for_device
= dma_direct_sync_sg_for_device
,
220 EXPORT_SYMBOL(dma_direct_ops
);
222 /* Number of entries preallocated for DMA-API debugging */
223 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
225 static int __init
dma_init(void)
227 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
231 fs_initcall(dma_init
);