1 // SPDX-License-Identifier: GPL-2.0
3 * Coherent per-device memory handling.
7 #include <linux/slab.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/dma-direct.h>
11 #include <linux/dma-map-ops.h>
13 struct dma_coherent_mem
{
15 dma_addr_t device_base
;
16 unsigned long pfn_base
;
18 unsigned long *bitmap
;
20 bool use_dev_dma_pfn_offset
;
23 static struct dma_coherent_mem
*dma_coherent_default_memory __ro_after_init
;
25 static inline struct dma_coherent_mem
*dev_get_coherent_memory(struct device
*dev
)
27 if (dev
&& dev
->dma_mem
)
32 static inline dma_addr_t
dma_get_device_base(struct device
*dev
,
33 struct dma_coherent_mem
* mem
)
35 if (mem
->use_dev_dma_pfn_offset
)
36 return phys_to_dma(dev
, PFN_PHYS(mem
->pfn_base
));
37 return mem
->device_base
;
40 static int dma_init_coherent_memory(phys_addr_t phys_addr
,
41 dma_addr_t device_addr
, size_t size
,
42 struct dma_coherent_mem
**mem
)
44 struct dma_coherent_mem
*dma_mem
= NULL
;
45 void *mem_base
= NULL
;
46 int pages
= size
>> PAGE_SHIFT
;
47 int bitmap_size
= BITS_TO_LONGS(pages
) * sizeof(long);
55 mem_base
= memremap(phys_addr
, size
, MEMREMAP_WC
);
60 dma_mem
= kzalloc(sizeof(struct dma_coherent_mem
), GFP_KERNEL
);
65 dma_mem
->bitmap
= kzalloc(bitmap_size
, GFP_KERNEL
);
66 if (!dma_mem
->bitmap
) {
71 dma_mem
->virt_base
= mem_base
;
72 dma_mem
->device_base
= device_addr
;
73 dma_mem
->pfn_base
= PFN_DOWN(phys_addr
);
74 dma_mem
->size
= pages
;
75 spin_lock_init(&dma_mem
->spinlock
);
87 static void dma_release_coherent_memory(struct dma_coherent_mem
*mem
)
92 memunmap(mem
->virt_base
);
97 static int dma_assign_coherent_memory(struct device
*dev
,
98 struct dma_coherent_mem
*mem
)
111 * Declare a region of memory to be handed out by dma_alloc_coherent() when it
112 * is asked for coherent memory for this device. This shall only be used
113 * from platform code, usually based on the device tree description.
115 * phys_addr is the CPU physical address to which the memory is currently
116 * assigned (this will be ioremapped so the CPU can access the region).
118 * device_addr is the DMA address the device needs to be programmed with to
119 * actually address this memory (this will be handed out as the dma_addr_t in
120 * dma_alloc_coherent()).
122 * size is the size of the area (must be a multiple of PAGE_SIZE).
124 * As a simplification for the platforms, only *one* such region of memory may
125 * be declared per device.
127 int dma_declare_coherent_memory(struct device
*dev
, phys_addr_t phys_addr
,
128 dma_addr_t device_addr
, size_t size
)
130 struct dma_coherent_mem
*mem
;
133 ret
= dma_init_coherent_memory(phys_addr
, device_addr
, size
, &mem
);
137 ret
= dma_assign_coherent_memory(dev
, mem
);
139 dma_release_coherent_memory(mem
);
143 static void *__dma_alloc_from_coherent(struct device
*dev
,
144 struct dma_coherent_mem
*mem
,
145 ssize_t size
, dma_addr_t
*dma_handle
)
147 int order
= get_order(size
);
152 spin_lock_irqsave(&mem
->spinlock
, flags
);
154 if (unlikely(size
> ((dma_addr_t
)mem
->size
<< PAGE_SHIFT
)))
157 pageno
= bitmap_find_free_region(mem
->bitmap
, mem
->size
, order
);
158 if (unlikely(pageno
< 0))
162 * Memory was found in the coherent area.
164 *dma_handle
= dma_get_device_base(dev
, mem
) +
165 ((dma_addr_t
)pageno
<< PAGE_SHIFT
);
166 ret
= mem
->virt_base
+ ((dma_addr_t
)pageno
<< PAGE_SHIFT
);
167 spin_unlock_irqrestore(&mem
->spinlock
, flags
);
168 memset(ret
, 0, size
);
171 spin_unlock_irqrestore(&mem
->spinlock
, flags
);
176 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
177 * @dev: device from which we allocate memory
178 * @size: size of requested memory area
179 * @dma_handle: This will be filled with the correct dma handle
180 * @ret: This pointer will be filled with the virtual address
183 * This function should be only called from per-arch dma_alloc_coherent()
184 * to support allocation from per-device coherent memory pools.
186 * Returns 0 if dma_alloc_coherent should continue with allocating from
187 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
189 int dma_alloc_from_dev_coherent(struct device
*dev
, ssize_t size
,
190 dma_addr_t
*dma_handle
, void **ret
)
192 struct dma_coherent_mem
*mem
= dev_get_coherent_memory(dev
);
197 *ret
= __dma_alloc_from_coherent(dev
, mem
, size
, dma_handle
);
201 void *dma_alloc_from_global_coherent(struct device
*dev
, ssize_t size
,
202 dma_addr_t
*dma_handle
)
204 if (!dma_coherent_default_memory
)
207 return __dma_alloc_from_coherent(dev
, dma_coherent_default_memory
, size
,
211 static int __dma_release_from_coherent(struct dma_coherent_mem
*mem
,
212 int order
, void *vaddr
)
214 if (mem
&& vaddr
>= mem
->virt_base
&& vaddr
<
215 (mem
->virt_base
+ ((dma_addr_t
)mem
->size
<< PAGE_SHIFT
))) {
216 int page
= (vaddr
- mem
->virt_base
) >> PAGE_SHIFT
;
219 spin_lock_irqsave(&mem
->spinlock
, flags
);
220 bitmap_release_region(mem
->bitmap
, page
, order
);
221 spin_unlock_irqrestore(&mem
->spinlock
, flags
);
228 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
229 * @dev: device from which the memory was allocated
230 * @order: the order of pages allocated
231 * @vaddr: virtual address of allocated pages
233 * This checks whether the memory was allocated from the per-device
234 * coherent memory pool and if so, releases that memory.
236 * Returns 1 if we correctly released the memory, or 0 if the caller should
237 * proceed with releasing memory from generic pools.
239 int dma_release_from_dev_coherent(struct device
*dev
, int order
, void *vaddr
)
241 struct dma_coherent_mem
*mem
= dev_get_coherent_memory(dev
);
243 return __dma_release_from_coherent(mem
, order
, vaddr
);
246 int dma_release_from_global_coherent(int order
, void *vaddr
)
248 if (!dma_coherent_default_memory
)
251 return __dma_release_from_coherent(dma_coherent_default_memory
, order
,
255 static int __dma_mmap_from_coherent(struct dma_coherent_mem
*mem
,
256 struct vm_area_struct
*vma
, void *vaddr
, size_t size
, int *ret
)
258 if (mem
&& vaddr
>= mem
->virt_base
&& vaddr
+ size
<=
259 (mem
->virt_base
+ ((dma_addr_t
)mem
->size
<< PAGE_SHIFT
))) {
260 unsigned long off
= vma
->vm_pgoff
;
261 int start
= (vaddr
- mem
->virt_base
) >> PAGE_SHIFT
;
262 unsigned long user_count
= vma_pages(vma
);
263 int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
266 if (off
< count
&& user_count
<= count
- off
) {
267 unsigned long pfn
= mem
->pfn_base
+ start
+ off
;
268 *ret
= remap_pfn_range(vma
, vma
->vm_start
, pfn
,
269 user_count
<< PAGE_SHIFT
,
278 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
279 * @dev: device from which the memory was allocated
280 * @vma: vm_area for the userspace memory
281 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
282 * @size: size of the memory buffer allocated
283 * @ret: result from remap_pfn_range()
285 * This checks whether the memory was allocated from the per-device
286 * coherent memory pool and if so, maps that memory to the provided vma.
288 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
289 * should return @ret, or 0 if they should proceed with mapping memory from
292 int dma_mmap_from_dev_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
293 void *vaddr
, size_t size
, int *ret
)
295 struct dma_coherent_mem
*mem
= dev_get_coherent_memory(dev
);
297 return __dma_mmap_from_coherent(mem
, vma
, vaddr
, size
, ret
);
300 int dma_mmap_from_global_coherent(struct vm_area_struct
*vma
, void *vaddr
,
301 size_t size
, int *ret
)
303 if (!dma_coherent_default_memory
)
306 return __dma_mmap_from_coherent(dma_coherent_default_memory
, vma
,
311 * Support for reserved memory regions defined in device tree
313 #ifdef CONFIG_OF_RESERVED_MEM
314 #include <linux/of.h>
315 #include <linux/of_fdt.h>
316 #include <linux/of_reserved_mem.h>
318 static struct reserved_mem
*dma_reserved_default_memory __initdata
;
320 static int rmem_dma_device_init(struct reserved_mem
*rmem
, struct device
*dev
)
322 struct dma_coherent_mem
*mem
= rmem
->priv
;
326 ret
= dma_init_coherent_memory(rmem
->base
, rmem
->base
,
329 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
330 &rmem
->base
, (unsigned long)rmem
->size
/ SZ_1M
);
334 mem
->use_dev_dma_pfn_offset
= true;
336 dma_assign_coherent_memory(dev
, mem
);
340 static void rmem_dma_device_release(struct reserved_mem
*rmem
,
347 static const struct reserved_mem_ops rmem_dma_ops
= {
348 .device_init
= rmem_dma_device_init
,
349 .device_release
= rmem_dma_device_release
,
352 static int __init
rmem_dma_setup(struct reserved_mem
*rmem
)
354 unsigned long node
= rmem
->fdt_node
;
356 if (of_get_flat_dt_prop(node
, "reusable", NULL
))
360 if (!of_get_flat_dt_prop(node
, "no-map", NULL
)) {
361 pr_err("Reserved memory: regions without no-map are not yet supported\n");
365 if (of_get_flat_dt_prop(node
, "linux,dma-default", NULL
)) {
366 WARN(dma_reserved_default_memory
,
367 "Reserved memory: region for default DMA coherent area is redefined\n");
368 dma_reserved_default_memory
= rmem
;
372 rmem
->ops
= &rmem_dma_ops
;
373 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
374 &rmem
->base
, (unsigned long)rmem
->size
/ SZ_1M
);
378 static int __init
dma_init_reserved_memory(void)
380 const struct reserved_mem_ops
*ops
;
383 if (!dma_reserved_default_memory
)
386 ops
= dma_reserved_default_memory
->ops
;
389 * We rely on rmem_dma_device_init() does not propagate error of
390 * dma_assign_coherent_memory() for "NULL" device.
392 ret
= ops
->device_init(dma_reserved_default_memory
, NULL
);
395 dma_coherent_default_memory
= dma_reserved_default_memory
->priv
;
396 pr_info("DMA: default coherent area is set\n");
402 core_initcall(dma_init_reserved_memory
);
404 RESERVEDMEM_OF_DECLARE(dma
, "shared-dma-pool", rmem_dma_setup
);