2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 * Provide default implementations of the DMA mapping callbacks for
5 * directly mapped busses.
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dma-debug.h>
11 #include <linux/gfp.h>
12 #include <linux/memblock.h>
13 #include <linux/export.h>
14 #include <linux/pci.h>
17 #include <asm/machdep.h>
18 #include <asm/swiotlb.h>
19 #include <asm/iommu.h>
22 * Generic direct DMA implementation
24 * This implementation supports a per-device offset that can be applied if
25 * the address at which memory is visible to devices is not 0. Platform code
26 * can set archdata.dma_data to an unsigned long holding the offset. By
27 * default the offset is PCI_DRAM_OFFSET.
30 static u64 __maybe_unused
get_pfn_limit(struct device
*dev
)
32 u64 pfn
= (dev
->coherent_dma_mask
>> PAGE_SHIFT
) + 1;
33 struct dev_archdata __maybe_unused
*sd
= &dev
->archdata
;
36 if (sd
->max_direct_dma_addr
&& sd
->dma_ops
== &swiotlb_dma_ops
)
37 pfn
= min_t(u64
, pfn
, sd
->max_direct_dma_addr
>> PAGE_SHIFT
);
43 static int dma_direct_dma_supported(struct device
*dev
, u64 mask
)
46 u64 limit
= get_dma_offset(dev
) + (memblock_end_of_DRAM() - 1);
48 /* Limit fits in the mask, we are good */
53 /* Freescale gets another chance via ZONE_DMA/ZONE_DMA32, however
54 * that will have to be refined if/when they support iommus
65 void *__dma_direct_alloc_coherent(struct device
*dev
, size_t size
,
66 dma_addr_t
*dma_handle
, gfp_t flag
,
67 struct dma_attrs
*attrs
)
70 #ifdef CONFIG_NOT_COHERENT_CACHE
71 ret
= __dma_alloc_coherent(dev
, size
, dma_handle
, flag
);
74 *dma_handle
+= get_dma_offset(dev
);
78 int node
= dev_to_node(dev
);
80 u64 pfn
= get_pfn_limit(dev
);
84 * This code should be OK on other platforms, but we have drivers that
85 * don't set coherent_dma_mask. As a workaround we just ifdef it. This
86 * whole routine needs some serious cleanup.
89 zone
= dma_pfn_limit_to_zone(pfn
);
91 dev_err(dev
, "%s: No suitable zone for pfn %#llx\n",
100 #ifdef CONFIG_ZONE_DMA32
106 #endif /* CONFIG_FSL_SOC */
108 /* ignore region specifiers */
109 flag
&= ~(__GFP_HIGHMEM
);
111 page
= alloc_pages_node(node
, flag
, get_order(size
));
114 ret
= page_address(page
);
115 memset(ret
, 0, size
);
116 *dma_handle
= __pa(ret
) + get_dma_offset(dev
);
122 void __dma_direct_free_coherent(struct device
*dev
, size_t size
,
123 void *vaddr
, dma_addr_t dma_handle
,
124 struct dma_attrs
*attrs
)
126 #ifdef CONFIG_NOT_COHERENT_CACHE
127 __dma_free_coherent(size
, vaddr
);
129 free_pages((unsigned long)vaddr
, get_order(size
));
133 static void *dma_direct_alloc_coherent(struct device
*dev
, size_t size
,
134 dma_addr_t
*dma_handle
, gfp_t flag
,
135 struct dma_attrs
*attrs
)
137 struct iommu_table
*iommu
;
139 /* The coherent mask may be smaller than the real mask, check if
140 * we can really use the direct ops
142 if (dma_direct_dma_supported(dev
, dev
->coherent_dma_mask
))
143 return __dma_direct_alloc_coherent(dev
, size
, dma_handle
,
146 /* Ok we can't ... do we have an iommu ? If not, fail */
147 iommu
= get_iommu_table_base(dev
);
151 /* Try to use the iommu */
152 return iommu_alloc_coherent(dev
, iommu
, size
, dma_handle
,
153 dev
->coherent_dma_mask
, flag
,
157 static void dma_direct_free_coherent(struct device
*dev
, size_t size
,
158 void *vaddr
, dma_addr_t dma_handle
,
159 struct dma_attrs
*attrs
)
161 struct iommu_table
*iommu
;
163 /* See comments in dma_direct_alloc_coherent() */
164 if (dma_direct_dma_supported(dev
, dev
->coherent_dma_mask
))
165 return __dma_direct_free_coherent(dev
, size
, vaddr
, dma_handle
,
167 /* Maybe we used an iommu ... */
168 iommu
= get_iommu_table_base(dev
);
170 /* If we hit that we should have never allocated in the first
171 * place so how come we are freeing ?
175 iommu_free_coherent(iommu
, size
, vaddr
, dma_handle
);
178 int dma_direct_mmap_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
179 void *cpu_addr
, dma_addr_t handle
, size_t size
,
180 struct dma_attrs
*attrs
)
184 #ifdef CONFIG_NOT_COHERENT_CACHE
185 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
186 pfn
= __dma_get_coherent_pfn((unsigned long)cpu_addr
);
188 pfn
= page_to_pfn(virt_to_page(cpu_addr
));
190 return remap_pfn_range(vma
, vma
->vm_start
,
192 vma
->vm_end
- vma
->vm_start
,
196 static int dma_direct_map_sg(struct device
*dev
, struct scatterlist
*sgl
,
197 int nents
, enum dma_data_direction direction
,
198 struct dma_attrs
*attrs
)
200 struct scatterlist
*sg
;
203 for_each_sg(sgl
, sg
, nents
, i
) {
204 sg
->dma_address
= sg_phys(sg
) + get_dma_offset(dev
);
205 sg
->dma_length
= sg
->length
;
206 __dma_sync_page(sg_page(sg
), sg
->offset
, sg
->length
, direction
);
212 static void dma_direct_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
213 int nents
, enum dma_data_direction direction
,
214 struct dma_attrs
*attrs
)
218 static u64
dma_direct_get_required_mask(struct device
*dev
)
222 end
= memblock_end_of_DRAM() + get_dma_offset(dev
);
224 mask
= 1ULL << (fls64(end
) - 1);
230 static inline dma_addr_t
dma_direct_map_page(struct device
*dev
,
232 unsigned long offset
,
234 enum dma_data_direction dir
,
235 struct dma_attrs
*attrs
)
237 BUG_ON(dir
== DMA_NONE
);
238 __dma_sync_page(page
, offset
, size
, dir
);
239 return page_to_phys(page
) + offset
+ get_dma_offset(dev
);
242 static inline void dma_direct_unmap_page(struct device
*dev
,
243 dma_addr_t dma_address
,
245 enum dma_data_direction direction
,
246 struct dma_attrs
*attrs
)
250 #ifdef CONFIG_NOT_COHERENT_CACHE
251 static inline void dma_direct_sync_sg(struct device
*dev
,
252 struct scatterlist
*sgl
, int nents
,
253 enum dma_data_direction direction
)
255 struct scatterlist
*sg
;
258 for_each_sg(sgl
, sg
, nents
, i
)
259 __dma_sync_page(sg_page(sg
), sg
->offset
, sg
->length
, direction
);
262 static inline void dma_direct_sync_single(struct device
*dev
,
263 dma_addr_t dma_handle
, size_t size
,
264 enum dma_data_direction direction
)
266 __dma_sync(bus_to_virt(dma_handle
), size
, direction
);
270 struct dma_map_ops dma_direct_ops
= {
271 .alloc
= dma_direct_alloc_coherent
,
272 .free
= dma_direct_free_coherent
,
273 .mmap
= dma_direct_mmap_coherent
,
274 .map_sg
= dma_direct_map_sg
,
275 .unmap_sg
= dma_direct_unmap_sg
,
276 .dma_supported
= dma_direct_dma_supported
,
277 .map_page
= dma_direct_map_page
,
278 .unmap_page
= dma_direct_unmap_page
,
279 .get_required_mask
= dma_direct_get_required_mask
,
280 #ifdef CONFIG_NOT_COHERENT_CACHE
281 .sync_single_for_cpu
= dma_direct_sync_single
,
282 .sync_single_for_device
= dma_direct_sync_single
,
283 .sync_sg_for_cpu
= dma_direct_sync_sg
,
284 .sync_sg_for_device
= dma_direct_sync_sg
,
287 EXPORT_SYMBOL(dma_direct_ops
);
289 int dma_set_coherent_mask(struct device
*dev
, u64 mask
)
291 if (!dma_supported(dev
, mask
)) {
293 * We need to special case the direct DMA ops which can
294 * support a fallback for coherent allocations. There
295 * is no dma_op->set_coherent_mask() so we have to do
296 * things the hard way:
298 if (get_dma_ops(dev
) != &dma_direct_ops
||
299 get_iommu_table_base(dev
) == NULL
||
300 !dma_iommu_dma_supported(dev
, mask
))
303 dev
->coherent_dma_mask
= mask
;
306 EXPORT_SYMBOL_GPL(dma_set_coherent_mask
);
308 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
310 int __dma_set_mask(struct device
*dev
, u64 dma_mask
)
312 struct dma_map_ops
*dma_ops
= get_dma_ops(dev
);
314 if ((dma_ops
!= NULL
) && (dma_ops
->set_dma_mask
!= NULL
))
315 return dma_ops
->set_dma_mask(dev
, dma_mask
);
316 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
318 *dev
->dma_mask
= dma_mask
;
322 int dma_set_mask(struct device
*dev
, u64 dma_mask
)
324 if (ppc_md
.dma_set_mask
)
325 return ppc_md
.dma_set_mask(dev
, dma_mask
);
327 if (dev_is_pci(dev
)) {
328 struct pci_dev
*pdev
= to_pci_dev(dev
);
329 struct pci_controller
*phb
= pci_bus_to_host(pdev
->bus
);
330 if (phb
->controller_ops
.dma_set_mask
)
331 return phb
->controller_ops
.dma_set_mask(pdev
, dma_mask
);
334 return __dma_set_mask(dev
, dma_mask
);
336 EXPORT_SYMBOL(dma_set_mask
);
338 u64
__dma_get_required_mask(struct device
*dev
)
340 struct dma_map_ops
*dma_ops
= get_dma_ops(dev
);
342 if (unlikely(dma_ops
== NULL
))
345 if (dma_ops
->get_required_mask
)
346 return dma_ops
->get_required_mask(dev
);
348 return DMA_BIT_MASK(8 * sizeof(dma_addr_t
));
351 u64
dma_get_required_mask(struct device
*dev
)
353 if (ppc_md
.dma_get_required_mask
)
354 return ppc_md
.dma_get_required_mask(dev
);
356 if (dev_is_pci(dev
)) {
357 struct pci_dev
*pdev
= to_pci_dev(dev
);
358 struct pci_controller
*phb
= pci_bus_to_host(pdev
->bus
);
359 if (phb
->controller_ops
.dma_get_required_mask
)
360 return phb
->controller_ops
.dma_get_required_mask(pdev
);
363 return __dma_get_required_mask(dev
);
365 EXPORT_SYMBOL_GPL(dma_get_required_mask
);
367 static int __init
dma_init(void)
369 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
371 dma_debug_add_bus(&pci_bus_type
);
374 dma_debug_add_bus(&vio_bus_type
);
379 fs_initcall(dma_init
);