2 * Coherent per-device memory handling.
6 #include <linux/slab.h>
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/dma-mapping.h>
11 struct dma_coherent_mem
{
13 dma_addr_t device_base
;
14 unsigned long pfn_base
;
17 unsigned long *bitmap
;
19 bool use_dev_dma_pfn_offset
;
22 static struct dma_coherent_mem
*dma_coherent_default_memory __ro_after_init
;
24 static inline struct dma_coherent_mem
*dev_get_coherent_memory(struct device
*dev
)
26 if (dev
&& dev
->dma_mem
)
31 static inline dma_addr_t
dma_get_device_base(struct device
*dev
,
32 struct dma_coherent_mem
* mem
)
34 if (mem
->use_dev_dma_pfn_offset
)
35 return (mem
->pfn_base
- dev
->dma_pfn_offset
) << PAGE_SHIFT
;
37 return mem
->device_base
;
40 static int dma_init_coherent_memory(
41 phys_addr_t phys_addr
, dma_addr_t device_addr
, size_t size
, int flags
,
42 struct dma_coherent_mem
**mem
)
44 struct dma_coherent_mem
*dma_mem
= NULL
;
45 void __iomem
*mem_base
= NULL
;
46 int pages
= size
>> PAGE_SHIFT
;
47 int bitmap_size
= BITS_TO_LONGS(pages
) * sizeof(long);
55 mem_base
= memremap(phys_addr
, size
, MEMREMAP_WC
);
60 dma_mem
= kzalloc(sizeof(struct dma_coherent_mem
), GFP_KERNEL
);
65 dma_mem
->bitmap
= kzalloc(bitmap_size
, GFP_KERNEL
);
66 if (!dma_mem
->bitmap
) {
71 dma_mem
->virt_base
= mem_base
;
72 dma_mem
->device_base
= device_addr
;
73 dma_mem
->pfn_base
= PFN_DOWN(phys_addr
);
74 dma_mem
->size
= pages
;
75 dma_mem
->flags
= flags
;
76 spin_lock_init(&dma_mem
->spinlock
);
88 static void dma_release_coherent_memory(struct dma_coherent_mem
*mem
)
93 memunmap(mem
->virt_base
);
98 static int dma_assign_coherent_memory(struct device
*dev
,
99 struct dma_coherent_mem
*mem
)
111 int dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
112 dma_addr_t device_addr
, size_t size
, int flags
)
114 struct dma_coherent_mem
*mem
;
117 ret
= dma_init_coherent_memory(phys_addr
, device_addr
, size
, flags
, &mem
);
121 ret
= dma_assign_coherent_memory(dev
, mem
);
123 dma_release_coherent_memory(mem
);
126 EXPORT_SYMBOL(dma_declare_coherent_memory
);
128 void dma_release_declared_memory(struct device
*dev
)
130 struct dma_coherent_mem
*mem
= dev
->dma_mem
;
134 dma_release_coherent_memory(mem
);
137 EXPORT_SYMBOL(dma_release_declared_memory
);
139 void *dma_mark_declared_memory_occupied(struct device
*dev
,
140 dma_addr_t device_addr
, size_t size
)
142 struct dma_coherent_mem
*mem
= dev
->dma_mem
;
146 size
+= device_addr
& ~PAGE_MASK
;
149 return ERR_PTR(-EINVAL
);
151 spin_lock_irqsave(&mem
->spinlock
, flags
);
152 pos
= PFN_DOWN(device_addr
- dma_get_device_base(dev
, mem
));
153 err
= bitmap_allocate_region(mem
->bitmap
, pos
, get_order(size
));
154 spin_unlock_irqrestore(&mem
->spinlock
, flags
);
158 return mem
->virt_base
+ (pos
<< PAGE_SHIFT
);
160 EXPORT_SYMBOL(dma_mark_declared_memory_occupied
);
162 static void *__dma_alloc_from_coherent(struct dma_coherent_mem
*mem
,
163 ssize_t size
, dma_addr_t
*dma_handle
)
165 int order
= get_order(size
);
170 spin_lock_irqsave(&mem
->spinlock
, flags
);
172 if (unlikely(size
> (mem
->size
<< PAGE_SHIFT
)))
175 pageno
= bitmap_find_free_region(mem
->bitmap
, mem
->size
, order
);
176 if (unlikely(pageno
< 0))
180 * Memory was found in the coherent area.
182 *dma_handle
= mem
->device_base
+ (pageno
<< PAGE_SHIFT
);
183 ret
= mem
->virt_base
+ (pageno
<< PAGE_SHIFT
);
184 spin_unlock_irqrestore(&mem
->spinlock
, flags
);
185 memset(ret
, 0, size
);
188 spin_unlock_irqrestore(&mem
->spinlock
, flags
);
193 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
194 * @dev: device from which we allocate memory
195 * @size: size of requested memory area
196 * @dma_handle: This will be filled with the correct dma handle
197 * @ret: This pointer will be filled with the virtual address
200 * This function should be only called from per-arch dma_alloc_coherent()
201 * to support allocation from per-device coherent memory pools.
203 * Returns 0 if dma_alloc_coherent should continue with allocating from
204 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
206 int dma_alloc_from_dev_coherent(struct device
*dev
, ssize_t size
,
207 dma_addr_t
*dma_handle
, void **ret
)
209 struct dma_coherent_mem
*mem
= dev_get_coherent_memory(dev
);
214 *ret
= __dma_alloc_from_coherent(mem
, size
, dma_handle
);
219 * In the case where the allocation can not be satisfied from the
220 * per-device area, try to fall back to generic memory if the
221 * constraints allow it.
223 return mem
->flags
& DMA_MEMORY_EXCLUSIVE
;
225 EXPORT_SYMBOL(dma_alloc_from_dev_coherent
);
227 void *dma_alloc_from_global_coherent(ssize_t size
, dma_addr_t
*dma_handle
)
229 if (!dma_coherent_default_memory
)
232 return __dma_alloc_from_coherent(dma_coherent_default_memory
, size
,
236 static int __dma_release_from_coherent(struct dma_coherent_mem
*mem
,
237 int order
, void *vaddr
)
239 if (mem
&& vaddr
>= mem
->virt_base
&& vaddr
<
240 (mem
->virt_base
+ (mem
->size
<< PAGE_SHIFT
))) {
241 int page
= (vaddr
- mem
->virt_base
) >> PAGE_SHIFT
;
244 spin_lock_irqsave(&mem
->spinlock
, flags
);
245 bitmap_release_region(mem
->bitmap
, page
, order
);
246 spin_unlock_irqrestore(&mem
->spinlock
, flags
);
253 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
254 * @dev: device from which the memory was allocated
255 * @order: the order of pages allocated
256 * @vaddr: virtual address of allocated pages
258 * This checks whether the memory was allocated from the per-device
259 * coherent memory pool and if so, releases that memory.
261 * Returns 1 if we correctly released the memory, or 0 if the caller should
262 * proceed with releasing memory from generic pools.
264 int dma_release_from_dev_coherent(struct device
*dev
, int order
, void *vaddr
)
266 struct dma_coherent_mem
*mem
= dev_get_coherent_memory(dev
);
268 return __dma_release_from_coherent(mem
, order
, vaddr
);
270 EXPORT_SYMBOL(dma_release_from_dev_coherent
);
272 int dma_release_from_global_coherent(int order
, void *vaddr
)
274 if (!dma_coherent_default_memory
)
277 return __dma_release_from_coherent(dma_coherent_default_memory
, order
,
281 static int __dma_mmap_from_coherent(struct dma_coherent_mem
*mem
,
282 struct vm_area_struct
*vma
, void *vaddr
, size_t size
, int *ret
)
284 if (mem
&& vaddr
>= mem
->virt_base
&& vaddr
+ size
<=
285 (mem
->virt_base
+ (mem
->size
<< PAGE_SHIFT
))) {
286 unsigned long off
= vma
->vm_pgoff
;
287 int start
= (vaddr
- mem
->virt_base
) >> PAGE_SHIFT
;
288 int user_count
= vma_pages(vma
);
289 int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
292 if (off
< count
&& user_count
<= count
- off
) {
293 unsigned long pfn
= mem
->pfn_base
+ start
+ off
;
294 *ret
= remap_pfn_range(vma
, vma
->vm_start
, pfn
,
295 user_count
<< PAGE_SHIFT
,
304 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
305 * @dev: device from which the memory was allocated
306 * @vma: vm_area for the userspace memory
307 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
308 * @size: size of the memory buffer allocated
309 * @ret: result from remap_pfn_range()
311 * This checks whether the memory was allocated from the per-device
312 * coherent memory pool and if so, maps that memory to the provided vma.
314 * Returns 1 if we correctly mapped the memory, or 0 if the caller should
315 * proceed with mapping memory from generic pools.
317 int dma_mmap_from_dev_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
318 void *vaddr
, size_t size
, int *ret
)
320 struct dma_coherent_mem
*mem
= dev_get_coherent_memory(dev
);
322 return __dma_mmap_from_coherent(mem
, vma
, vaddr
, size
, ret
);
324 EXPORT_SYMBOL(dma_mmap_from_dev_coherent
);
326 int dma_mmap_from_global_coherent(struct vm_area_struct
*vma
, void *vaddr
,
327 size_t size
, int *ret
)
329 if (!dma_coherent_default_memory
)
332 return __dma_mmap_from_coherent(dma_coherent_default_memory
, vma
,
337 * Support for reserved memory regions defined in device tree
339 #ifdef CONFIG_OF_RESERVED_MEM
340 #include <linux/of.h>
341 #include <linux/of_fdt.h>
342 #include <linux/of_reserved_mem.h>
344 static struct reserved_mem
*dma_reserved_default_memory __initdata
;
346 static int rmem_dma_device_init(struct reserved_mem
*rmem
, struct device
*dev
)
348 struct dma_coherent_mem
*mem
= rmem
->priv
;
352 ret
= dma_init_coherent_memory(rmem
->base
, rmem
->base
,
354 DMA_MEMORY_EXCLUSIVE
, &mem
);
356 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
357 &rmem
->base
, (unsigned long)rmem
->size
/ SZ_1M
);
361 mem
->use_dev_dma_pfn_offset
= true;
363 dma_assign_coherent_memory(dev
, mem
);
367 static void rmem_dma_device_release(struct reserved_mem
*rmem
,
374 static const struct reserved_mem_ops rmem_dma_ops
= {
375 .device_init
= rmem_dma_device_init
,
376 .device_release
= rmem_dma_device_release
,
379 static int __init
rmem_dma_setup(struct reserved_mem
*rmem
)
381 unsigned long node
= rmem
->fdt_node
;
383 if (of_get_flat_dt_prop(node
, "reusable", NULL
))
387 if (!of_get_flat_dt_prop(node
, "no-map", NULL
)) {
388 pr_err("Reserved memory: regions without no-map are not yet supported\n");
392 if (of_get_flat_dt_prop(node
, "linux,dma-default", NULL
)) {
393 WARN(dma_reserved_default_memory
,
394 "Reserved memory: region for default DMA coherent area is redefined\n");
395 dma_reserved_default_memory
= rmem
;
399 rmem
->ops
= &rmem_dma_ops
;
400 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
401 &rmem
->base
, (unsigned long)rmem
->size
/ SZ_1M
);
405 static int __init
dma_init_reserved_memory(void)
407 const struct reserved_mem_ops
*ops
;
410 if (!dma_reserved_default_memory
)
413 ops
= dma_reserved_default_memory
->ops
;
416 * We rely on rmem_dma_device_init() does not propagate error of
417 * dma_assign_coherent_memory() for "NULL" device.
419 ret
= ops
->device_init(dma_reserved_default_memory
, NULL
);
422 dma_coherent_default_memory
= dma_reserved_default_memory
->priv
;
423 pr_info("DMA: default coherent area is set\n");
429 core_initcall(dma_init_reserved_memory
);
431 RESERVEDMEM_OF_DECLARE(dma
, "shared-dma-pool", rmem_dma_setup
);