1 // SPDX-License-Identifier: GPL-2.0
3 * Coherent per-device memory handling.
7 #include <linux/slab.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/dma-mapping.h>
12 struct dma_coherent_mem
{
14 dma_addr_t device_base
;
15 unsigned long pfn_base
;
17 unsigned long *bitmap
;
19 bool use_dev_dma_pfn_offset
;
22 static struct dma_coherent_mem
*dma_coherent_default_memory __ro_after_init
;
24 static inline struct dma_coherent_mem
*dev_get_coherent_memory(struct device
*dev
)
26 if (dev
&& dev
->dma_mem
)
31 static inline dma_addr_t
dma_get_device_base(struct device
*dev
,
32 struct dma_coherent_mem
* mem
)
34 if (mem
->use_dev_dma_pfn_offset
)
35 return (mem
->pfn_base
- dev
->dma_pfn_offset
) << PAGE_SHIFT
;
37 return mem
->device_base
;
40 static int dma_init_coherent_memory(phys_addr_t phys_addr
,
41 dma_addr_t device_addr
, size_t size
,
42 struct dma_coherent_mem
**mem
)
44 struct dma_coherent_mem
*dma_mem
= NULL
;
45 void *mem_base
= NULL
;
46 int pages
= size
>> PAGE_SHIFT
;
47 int bitmap_size
= BITS_TO_LONGS(pages
) * sizeof(long);
55 mem_base
= memremap(phys_addr
, size
, MEMREMAP_WC
);
60 dma_mem
= kzalloc(sizeof(struct dma_coherent_mem
), GFP_KERNEL
);
65 dma_mem
->bitmap
= kzalloc(bitmap_size
, GFP_KERNEL
);
66 if (!dma_mem
->bitmap
) {
71 dma_mem
->virt_base
= mem_base
;
72 dma_mem
->device_base
= device_addr
;
73 dma_mem
->pfn_base
= PFN_DOWN(phys_addr
);
74 dma_mem
->size
= pages
;
75 spin_lock_init(&dma_mem
->spinlock
);
87 static void dma_release_coherent_memory(struct dma_coherent_mem
*mem
)
92 memunmap(mem
->virt_base
);
97 static int dma_assign_coherent_memory(struct device
*dev
,
98 struct dma_coherent_mem
*mem
)
110 int dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
111 dma_addr_t device_addr
, size_t size
)
113 struct dma_coherent_mem
*mem
;
116 ret
= dma_init_coherent_memory(phys_addr
, device_addr
, size
, &mem
);
120 ret
= dma_assign_coherent_memory(dev
, mem
);
122 dma_release_coherent_memory(mem
);
126 static void *__dma_alloc_from_coherent(struct device
*dev
,
127 struct dma_coherent_mem
*mem
,
128 ssize_t size
, dma_addr_t
*dma_handle
)
130 int order
= get_order(size
);
135 spin_lock_irqsave(&mem
->spinlock
, flags
);
137 if (unlikely(size
> ((dma_addr_t
)mem
->size
<< PAGE_SHIFT
)))
140 pageno
= bitmap_find_free_region(mem
->bitmap
, mem
->size
, order
);
141 if (unlikely(pageno
< 0))
145 * Memory was found in the coherent area.
147 *dma_handle
= dma_get_device_base(dev
, mem
) +
148 ((dma_addr_t
)pageno
<< PAGE_SHIFT
);
149 ret
= mem
->virt_base
+ ((dma_addr_t
)pageno
<< PAGE_SHIFT
);
150 spin_unlock_irqrestore(&mem
->spinlock
, flags
);
151 memset(ret
, 0, size
);
154 spin_unlock_irqrestore(&mem
->spinlock
, flags
);
159 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
160 * @dev: device from which we allocate memory
161 * @size: size of requested memory area
162 * @dma_handle: This will be filled with the correct dma handle
163 * @ret: This pointer will be filled with the virtual address
166 * This function should be only called from per-arch dma_alloc_coherent()
167 * to support allocation from per-device coherent memory pools.
169 * Returns 0 if dma_alloc_coherent should continue with allocating from
170 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
172 int dma_alloc_from_dev_coherent(struct device
*dev
, ssize_t size
,
173 dma_addr_t
*dma_handle
, void **ret
)
175 struct dma_coherent_mem
*mem
= dev_get_coherent_memory(dev
);
180 *ret
= __dma_alloc_from_coherent(dev
, mem
, size
, dma_handle
);
184 void *dma_alloc_from_global_coherent(struct device
*dev
, ssize_t size
,
185 dma_addr_t
*dma_handle
)
187 if (!dma_coherent_default_memory
)
190 return __dma_alloc_from_coherent(dev
, dma_coherent_default_memory
, size
,
194 static int __dma_release_from_coherent(struct dma_coherent_mem
*mem
,
195 int order
, void *vaddr
)
197 if (mem
&& vaddr
>= mem
->virt_base
&& vaddr
<
198 (mem
->virt_base
+ ((dma_addr_t
)mem
->size
<< PAGE_SHIFT
))) {
199 int page
= (vaddr
- mem
->virt_base
) >> PAGE_SHIFT
;
202 spin_lock_irqsave(&mem
->spinlock
, flags
);
203 bitmap_release_region(mem
->bitmap
, page
, order
);
204 spin_unlock_irqrestore(&mem
->spinlock
, flags
);
211 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
212 * @dev: device from which the memory was allocated
213 * @order: the order of pages allocated
214 * @vaddr: virtual address of allocated pages
216 * This checks whether the memory was allocated from the per-device
217 * coherent memory pool and if so, releases that memory.
219 * Returns 1 if we correctly released the memory, or 0 if the caller should
220 * proceed with releasing memory from generic pools.
222 int dma_release_from_dev_coherent(struct device
*dev
, int order
, void *vaddr
)
224 struct dma_coherent_mem
*mem
= dev_get_coherent_memory(dev
);
226 return __dma_release_from_coherent(mem
, order
, vaddr
);
229 int dma_release_from_global_coherent(int order
, void *vaddr
)
231 if (!dma_coherent_default_memory
)
234 return __dma_release_from_coherent(dma_coherent_default_memory
, order
,
238 static int __dma_mmap_from_coherent(struct dma_coherent_mem
*mem
,
239 struct vm_area_struct
*vma
, void *vaddr
, size_t size
, int *ret
)
241 if (mem
&& vaddr
>= mem
->virt_base
&& vaddr
+ size
<=
242 (mem
->virt_base
+ ((dma_addr_t
)mem
->size
<< PAGE_SHIFT
))) {
243 unsigned long off
= vma
->vm_pgoff
;
244 int start
= (vaddr
- mem
->virt_base
) >> PAGE_SHIFT
;
245 unsigned long user_count
= vma_pages(vma
);
246 int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
249 if (off
< count
&& user_count
<= count
- off
) {
250 unsigned long pfn
= mem
->pfn_base
+ start
+ off
;
251 *ret
= remap_pfn_range(vma
, vma
->vm_start
, pfn
,
252 user_count
<< PAGE_SHIFT
,
261 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
262 * @dev: device from which the memory was allocated
263 * @vma: vm_area for the userspace memory
264 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
265 * @size: size of the memory buffer allocated
266 * @ret: result from remap_pfn_range()
268 * This checks whether the memory was allocated from the per-device
269 * coherent memory pool and if so, maps that memory to the provided vma.
271 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
272 * should return @ret, or 0 if they should proceed with mapping memory from
275 int dma_mmap_from_dev_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
276 void *vaddr
, size_t size
, int *ret
)
278 struct dma_coherent_mem
*mem
= dev_get_coherent_memory(dev
);
280 return __dma_mmap_from_coherent(mem
, vma
, vaddr
, size
, ret
);
283 int dma_mmap_from_global_coherent(struct vm_area_struct
*vma
, void *vaddr
,
284 size_t size
, int *ret
)
286 if (!dma_coherent_default_memory
)
289 return __dma_mmap_from_coherent(dma_coherent_default_memory
, vma
,
294 * Support for reserved memory regions defined in device tree
296 #ifdef CONFIG_OF_RESERVED_MEM
297 #include <linux/of.h>
298 #include <linux/of_fdt.h>
299 #include <linux/of_reserved_mem.h>
301 static struct reserved_mem
*dma_reserved_default_memory __initdata
;
303 static int rmem_dma_device_init(struct reserved_mem
*rmem
, struct device
*dev
)
305 struct dma_coherent_mem
*mem
= rmem
->priv
;
309 ret
= dma_init_coherent_memory(rmem
->base
, rmem
->base
,
312 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
313 &rmem
->base
, (unsigned long)rmem
->size
/ SZ_1M
);
317 mem
->use_dev_dma_pfn_offset
= true;
319 dma_assign_coherent_memory(dev
, mem
);
323 static void rmem_dma_device_release(struct reserved_mem
*rmem
,
330 static const struct reserved_mem_ops rmem_dma_ops
= {
331 .device_init
= rmem_dma_device_init
,
332 .device_release
= rmem_dma_device_release
,
335 static int __init
rmem_dma_setup(struct reserved_mem
*rmem
)
337 unsigned long node
= rmem
->fdt_node
;
339 if (of_get_flat_dt_prop(node
, "reusable", NULL
))
343 if (!of_get_flat_dt_prop(node
, "no-map", NULL
)) {
344 pr_err("Reserved memory: regions without no-map are not yet supported\n");
348 if (of_get_flat_dt_prop(node
, "linux,dma-default", NULL
)) {
349 WARN(dma_reserved_default_memory
,
350 "Reserved memory: region for default DMA coherent area is redefined\n");
351 dma_reserved_default_memory
= rmem
;
355 rmem
->ops
= &rmem_dma_ops
;
356 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
357 &rmem
->base
, (unsigned long)rmem
->size
/ SZ_1M
);
361 static int __init
dma_init_reserved_memory(void)
363 const struct reserved_mem_ops
*ops
;
366 if (!dma_reserved_default_memory
)
369 ops
= dma_reserved_default_memory
->ops
;
372 * We rely on rmem_dma_device_init() does not propagate error of
373 * dma_assign_coherent_memory() for "NULL" device.
375 ret
= ops
->device_init(dma_reserved_default_memory
, NULL
);
378 dma_coherent_default_memory
= dma_reserved_default_memory
->priv
;
379 pr_info("DMA: default coherent area is set\n");
385 core_initcall(dma_init_reserved_memory
);
387 RESERVEDMEM_OF_DECLARE(dma
, "shared-dma-pool", rmem_dma_setup
);