1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009-2010 PetaLogix
4 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
6 * Provide default implementations of the DMA mapping callbacks for
7 * directly mapped busses.
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/gfp.h>
13 #include <linux/dma-debug.h>
14 #include <linux/export.h>
15 #include <linux/bug.h>
17 #define NOT_COHERENT_CACHE
19 static void *dma_direct_alloc_coherent(struct device
*dev
, size_t size
,
20 dma_addr_t
*dma_handle
, gfp_t flag
,
23 #ifdef NOT_COHERENT_CACHE
24 return consistent_alloc(flag
, size
, dma_handle
);
28 int node
= dev_to_node(dev
);
30 /* ignore region specifiers */
31 flag
&= ~(__GFP_HIGHMEM
);
33 page
= alloc_pages_node(node
, flag
, get_order(size
));
36 ret
= page_address(page
);
38 *dma_handle
= virt_to_phys(ret
);
44 static void dma_direct_free_coherent(struct device
*dev
, size_t size
,
45 void *vaddr
, dma_addr_t dma_handle
,
48 #ifdef NOT_COHERENT_CACHE
49 consistent_free(size
, vaddr
);
51 free_pages((unsigned long)vaddr
, get_order(size
));
55 static int dma_direct_map_sg(struct device
*dev
, struct scatterlist
*sgl
,
56 int nents
, enum dma_data_direction direction
,
59 struct scatterlist
*sg
;
62 /* FIXME this part of code is untested */
63 for_each_sg(sgl
, sg
, nents
, i
) {
64 sg
->dma_address
= sg_phys(sg
);
66 if (attrs
& DMA_ATTR_SKIP_CPU_SYNC
)
69 __dma_sync(sg_phys(sg
), sg
->length
, direction
);
75 static int dma_direct_dma_supported(struct device
*dev
, u64 mask
)
80 static inline dma_addr_t
dma_direct_map_page(struct device
*dev
,
84 enum dma_data_direction direction
,
87 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
88 __dma_sync(page_to_phys(page
) + offset
, size
, direction
);
89 return page_to_phys(page
) + offset
;
92 static inline void dma_direct_unmap_page(struct device
*dev
,
93 dma_addr_t dma_address
,
95 enum dma_data_direction direction
,
98 /* There is not necessary to do cache cleanup
100 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
101 * dma_address is physical address
103 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
104 __dma_sync(dma_address
, size
, direction
);
108 dma_direct_sync_single_for_cpu(struct device
*dev
,
109 dma_addr_t dma_handle
, size_t size
,
110 enum dma_data_direction direction
)
113 * It's pointless to flush the cache as the memory segment
114 * is given to the CPU
117 if (direction
== DMA_FROM_DEVICE
)
118 __dma_sync(dma_handle
, size
, direction
);
122 dma_direct_sync_single_for_device(struct device
*dev
,
123 dma_addr_t dma_handle
, size_t size
,
124 enum dma_data_direction direction
)
127 * It's pointless to invalidate the cache if the device isn't
128 * supposed to write to the relevant region
131 if (direction
== DMA_TO_DEVICE
)
132 __dma_sync(dma_handle
, size
, direction
);
136 dma_direct_sync_sg_for_cpu(struct device
*dev
,
137 struct scatterlist
*sgl
, int nents
,
138 enum dma_data_direction direction
)
140 struct scatterlist
*sg
;
143 /* FIXME this part of code is untested */
144 if (direction
== DMA_FROM_DEVICE
)
145 for_each_sg(sgl
, sg
, nents
, i
)
146 __dma_sync(sg
->dma_address
, sg
->length
, direction
);
150 dma_direct_sync_sg_for_device(struct device
*dev
,
151 struct scatterlist
*sgl
, int nents
,
152 enum dma_data_direction direction
)
154 struct scatterlist
*sg
;
157 /* FIXME this part of code is untested */
158 if (direction
== DMA_TO_DEVICE
)
159 for_each_sg(sgl
, sg
, nents
, i
)
160 __dma_sync(sg
->dma_address
, sg
->length
, direction
);
164 int dma_direct_mmap_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
165 void *cpu_addr
, dma_addr_t handle
, size_t size
,
169 unsigned long user_count
= vma_pages(vma
);
170 unsigned long count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
171 unsigned long off
= vma
->vm_pgoff
;
174 if (off
>= count
|| user_count
> (count
- off
))
177 #ifdef NOT_COHERENT_CACHE
178 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
179 pfn
= consistent_virt_to_pfn(cpu_addr
);
181 pfn
= virt_to_pfn(cpu_addr
);
183 return remap_pfn_range(vma
, vma
->vm_start
, pfn
+ off
,
184 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
);
190 const struct dma_map_ops dma_direct_ops
= {
191 .alloc
= dma_direct_alloc_coherent
,
192 .free
= dma_direct_free_coherent
,
193 .mmap
= dma_direct_mmap_coherent
,
194 .map_sg
= dma_direct_map_sg
,
195 .dma_supported
= dma_direct_dma_supported
,
196 .map_page
= dma_direct_map_page
,
197 .unmap_page
= dma_direct_unmap_page
,
198 .sync_single_for_cpu
= dma_direct_sync_single_for_cpu
,
199 .sync_single_for_device
= dma_direct_sync_single_for_device
,
200 .sync_sg_for_cpu
= dma_direct_sync_sg_for_cpu
,
201 .sync_sg_for_device
= dma_direct_sync_sg_for_device
,
203 EXPORT_SYMBOL(dma_direct_ops
);
205 /* Number of entries preallocated for DMA-API debugging */
206 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
208 static int __init
dma_init(void)
210 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
214 fs_initcall(dma_init
);