1 // SPDX-License-Identifier: GPL-2.0
3 * Coherent per-device memory handling.
7 #include <linux/slab.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/dma-mapping.h>
12 struct dma_coherent_mem
{
14 dma_addr_t device_base
;
15 unsigned long pfn_base
;
18 unsigned long *bitmap
;
20 bool use_dev_dma_pfn_offset
;
23 static struct dma_coherent_mem
*dma_coherent_default_memory __ro_after_init
;
25 static inline struct dma_coherent_mem
*dev_get_coherent_memory(struct device
*dev
)
27 if (dev
&& dev
->dma_mem
)
32 static inline dma_addr_t
dma_get_device_base(struct device
*dev
,
33 struct dma_coherent_mem
* mem
)
35 if (mem
->use_dev_dma_pfn_offset
)
36 return (mem
->pfn_base
- dev
->dma_pfn_offset
) << PAGE_SHIFT
;
38 return mem
->device_base
;
41 static int dma_init_coherent_memory(
42 phys_addr_t phys_addr
, dma_addr_t device_addr
, size_t size
, int flags
,
43 struct dma_coherent_mem
**mem
)
45 struct dma_coherent_mem
*dma_mem
= NULL
;
46 void __iomem
*mem_base
= NULL
;
47 int pages
= size
>> PAGE_SHIFT
;
48 int bitmap_size
= BITS_TO_LONGS(pages
) * sizeof(long);
56 mem_base
= memremap(phys_addr
, size
, MEMREMAP_WC
);
61 dma_mem
= kzalloc(sizeof(struct dma_coherent_mem
), GFP_KERNEL
);
66 dma_mem
->bitmap
= kzalloc(bitmap_size
, GFP_KERNEL
);
67 if (!dma_mem
->bitmap
) {
72 dma_mem
->virt_base
= mem_base
;
73 dma_mem
->device_base
= device_addr
;
74 dma_mem
->pfn_base
= PFN_DOWN(phys_addr
);
75 dma_mem
->size
= pages
;
76 dma_mem
->flags
= flags
;
77 spin_lock_init(&dma_mem
->spinlock
);
89 static void dma_release_coherent_memory(struct dma_coherent_mem
*mem
)
94 memunmap(mem
->virt_base
);
99 static int dma_assign_coherent_memory(struct device
*dev
,
100 struct dma_coherent_mem
*mem
)
112 int dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
113 dma_addr_t device_addr
, size_t size
, int flags
)
115 struct dma_coherent_mem
*mem
;
118 ret
= dma_init_coherent_memory(phys_addr
, device_addr
, size
, flags
, &mem
);
122 ret
= dma_assign_coherent_memory(dev
, mem
);
124 dma_release_coherent_memory(mem
);
127 EXPORT_SYMBOL(dma_declare_coherent_memory
);
129 void dma_release_declared_memory(struct device
*dev
)
131 struct dma_coherent_mem
*mem
= dev
->dma_mem
;
135 dma_release_coherent_memory(mem
);
138 EXPORT_SYMBOL(dma_release_declared_memory
);
140 void *dma_mark_declared_memory_occupied(struct device
*dev
,
141 dma_addr_t device_addr
, size_t size
)
143 struct dma_coherent_mem
*mem
= dev
->dma_mem
;
147 size
+= device_addr
& ~PAGE_MASK
;
150 return ERR_PTR(-EINVAL
);
152 spin_lock_irqsave(&mem
->spinlock
, flags
);
153 pos
= PFN_DOWN(device_addr
- dma_get_device_base(dev
, mem
));
154 err
= bitmap_allocate_region(mem
->bitmap
, pos
, get_order(size
));
155 spin_unlock_irqrestore(&mem
->spinlock
, flags
);
159 return mem
->virt_base
+ (pos
<< PAGE_SHIFT
);
161 EXPORT_SYMBOL(dma_mark_declared_memory_occupied
);
163 static void *__dma_alloc_from_coherent(struct dma_coherent_mem
*mem
,
164 ssize_t size
, dma_addr_t
*dma_handle
)
166 int order
= get_order(size
);
171 spin_lock_irqsave(&mem
->spinlock
, flags
);
173 if (unlikely(size
> (mem
->size
<< PAGE_SHIFT
)))
176 pageno
= bitmap_find_free_region(mem
->bitmap
, mem
->size
, order
);
177 if (unlikely(pageno
< 0))
181 * Memory was found in the coherent area.
183 *dma_handle
= mem
->device_base
+ (pageno
<< PAGE_SHIFT
);
184 ret
= mem
->virt_base
+ (pageno
<< PAGE_SHIFT
);
185 spin_unlock_irqrestore(&mem
->spinlock
, flags
);
186 memset(ret
, 0, size
);
189 spin_unlock_irqrestore(&mem
->spinlock
, flags
);
194 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
195 * @dev: device from which we allocate memory
196 * @size: size of requested memory area
197 * @dma_handle: This will be filled with the correct dma handle
198 * @ret: This pointer will be filled with the virtual address
201 * This function should be only called from per-arch dma_alloc_coherent()
202 * to support allocation from per-device coherent memory pools.
204 * Returns 0 if dma_alloc_coherent should continue with allocating from
205 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
207 int dma_alloc_from_dev_coherent(struct device
*dev
, ssize_t size
,
208 dma_addr_t
*dma_handle
, void **ret
)
210 struct dma_coherent_mem
*mem
= dev_get_coherent_memory(dev
);
215 *ret
= __dma_alloc_from_coherent(mem
, size
, dma_handle
);
220 * In the case where the allocation can not be satisfied from the
221 * per-device area, try to fall back to generic memory if the
222 * constraints allow it.
224 return mem
->flags
& DMA_MEMORY_EXCLUSIVE
;
226 EXPORT_SYMBOL(dma_alloc_from_dev_coherent
);
228 void *dma_alloc_from_global_coherent(ssize_t size
, dma_addr_t
*dma_handle
)
230 if (!dma_coherent_default_memory
)
233 return __dma_alloc_from_coherent(dma_coherent_default_memory
, size
,
237 static int __dma_release_from_coherent(struct dma_coherent_mem
*mem
,
238 int order
, void *vaddr
)
240 if (mem
&& vaddr
>= mem
->virt_base
&& vaddr
<
241 (mem
->virt_base
+ (mem
->size
<< PAGE_SHIFT
))) {
242 int page
= (vaddr
- mem
->virt_base
) >> PAGE_SHIFT
;
245 spin_lock_irqsave(&mem
->spinlock
, flags
);
246 bitmap_release_region(mem
->bitmap
, page
, order
);
247 spin_unlock_irqrestore(&mem
->spinlock
, flags
);
254 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
255 * @dev: device from which the memory was allocated
256 * @order: the order of pages allocated
257 * @vaddr: virtual address of allocated pages
259 * This checks whether the memory was allocated from the per-device
260 * coherent memory pool and if so, releases that memory.
262 * Returns 1 if we correctly released the memory, or 0 if the caller should
263 * proceed with releasing memory from generic pools.
265 int dma_release_from_dev_coherent(struct device
*dev
, int order
, void *vaddr
)
267 struct dma_coherent_mem
*mem
= dev_get_coherent_memory(dev
);
269 return __dma_release_from_coherent(mem
, order
, vaddr
);
271 EXPORT_SYMBOL(dma_release_from_dev_coherent
);
273 int dma_release_from_global_coherent(int order
, void *vaddr
)
275 if (!dma_coherent_default_memory
)
278 return __dma_release_from_coherent(dma_coherent_default_memory
, order
,
282 static int __dma_mmap_from_coherent(struct dma_coherent_mem
*mem
,
283 struct vm_area_struct
*vma
, void *vaddr
, size_t size
, int *ret
)
285 if (mem
&& vaddr
>= mem
->virt_base
&& vaddr
+ size
<=
286 (mem
->virt_base
+ (mem
->size
<< PAGE_SHIFT
))) {
287 unsigned long off
= vma
->vm_pgoff
;
288 int start
= (vaddr
- mem
->virt_base
) >> PAGE_SHIFT
;
289 int user_count
= vma_pages(vma
);
290 int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
293 if (off
< count
&& user_count
<= count
- off
) {
294 unsigned long pfn
= mem
->pfn_base
+ start
+ off
;
295 *ret
= remap_pfn_range(vma
, vma
->vm_start
, pfn
,
296 user_count
<< PAGE_SHIFT
,
305 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
306 * @dev: device from which the memory was allocated
307 * @vma: vm_area for the userspace memory
308 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
309 * @size: size of the memory buffer allocated
310 * @ret: result from remap_pfn_range()
312 * This checks whether the memory was allocated from the per-device
313 * coherent memory pool and if so, maps that memory to the provided vma.
315 * Returns 1 if we correctly mapped the memory, or 0 if the caller should
316 * proceed with mapping memory from generic pools.
318 int dma_mmap_from_dev_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
319 void *vaddr
, size_t size
, int *ret
)
321 struct dma_coherent_mem
*mem
= dev_get_coherent_memory(dev
);
323 return __dma_mmap_from_coherent(mem
, vma
, vaddr
, size
, ret
);
325 EXPORT_SYMBOL(dma_mmap_from_dev_coherent
);
327 int dma_mmap_from_global_coherent(struct vm_area_struct
*vma
, void *vaddr
,
328 size_t size
, int *ret
)
330 if (!dma_coherent_default_memory
)
333 return __dma_mmap_from_coherent(dma_coherent_default_memory
, vma
,
338 * Support for reserved memory regions defined in device tree
340 #ifdef CONFIG_OF_RESERVED_MEM
341 #include <linux/of.h>
342 #include <linux/of_fdt.h>
343 #include <linux/of_reserved_mem.h>
345 static struct reserved_mem
*dma_reserved_default_memory __initdata
;
347 static int rmem_dma_device_init(struct reserved_mem
*rmem
, struct device
*dev
)
349 struct dma_coherent_mem
*mem
= rmem
->priv
;
353 ret
= dma_init_coherent_memory(rmem
->base
, rmem
->base
,
355 DMA_MEMORY_EXCLUSIVE
, &mem
);
357 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
358 &rmem
->base
, (unsigned long)rmem
->size
/ SZ_1M
);
362 mem
->use_dev_dma_pfn_offset
= true;
364 dma_assign_coherent_memory(dev
, mem
);
368 static void rmem_dma_device_release(struct reserved_mem
*rmem
,
375 static const struct reserved_mem_ops rmem_dma_ops
= {
376 .device_init
= rmem_dma_device_init
,
377 .device_release
= rmem_dma_device_release
,
380 static int __init
rmem_dma_setup(struct reserved_mem
*rmem
)
382 unsigned long node
= rmem
->fdt_node
;
384 if (of_get_flat_dt_prop(node
, "reusable", NULL
))
388 if (!of_get_flat_dt_prop(node
, "no-map", NULL
)) {
389 pr_err("Reserved memory: regions without no-map are not yet supported\n");
393 if (of_get_flat_dt_prop(node
, "linux,dma-default", NULL
)) {
394 WARN(dma_reserved_default_memory
,
395 "Reserved memory: region for default DMA coherent area is redefined\n");
396 dma_reserved_default_memory
= rmem
;
400 rmem
->ops
= &rmem_dma_ops
;
401 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
402 &rmem
->base
, (unsigned long)rmem
->size
/ SZ_1M
);
406 static int __init
dma_init_reserved_memory(void)
408 const struct reserved_mem_ops
*ops
;
411 if (!dma_reserved_default_memory
)
414 ops
= dma_reserved_default_memory
->ops
;
417 * We rely on rmem_dma_device_init() does not propagate error of
418 * dma_assign_coherent_memory() for "NULL" device.
420 ret
= ops
->device_init(dma_reserved_default_memory
, NULL
);
423 dma_coherent_default_memory
= dma_reserved_default_memory
->priv
;
424 pr_info("DMA: default coherent area is set\n");
430 core_initcall(dma_init_reserved_memory
);
432 RESERVEDMEM_OF_DECLARE(dma
, "shared-dma-pool", rmem_dma_setup
);