1 // SPDX-License-Identifier: GPL-2.0
3 * DMABUF CMA heap exporter
5 * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
6 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
8 * Also utilizing parts of Andrew Davis' SRAM heap:
9 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10 * Andrew F. Davis <afd@ti.com>
12 #include <linux/cma.h>
13 #include <linux/dma-buf.h>
14 #include <linux/dma-heap.h>
15 #include <linux/dma-map-ops.h>
16 #include <linux/err.h>
17 #include <linux/highmem.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
27 struct dma_heap
*heap
;
31 struct cma_heap_buffer
{
32 struct cma_heap
*heap
;
33 struct list_head attachments
;
36 struct page
*cma_pages
;
43 struct dma_heap_attachment
{
45 struct sg_table table
;
46 struct list_head list
;
50 static int cma_heap_attach(struct dma_buf
*dmabuf
,
51 struct dma_buf_attachment
*attachment
)
53 struct cma_heap_buffer
*buffer
= dmabuf
->priv
;
54 struct dma_heap_attachment
*a
;
57 a
= kzalloc(sizeof(*a
), GFP_KERNEL
);
61 ret
= sg_alloc_table_from_pages(&a
->table
, buffer
->pages
,
63 buffer
->pagecount
<< PAGE_SHIFT
,
70 a
->dev
= attachment
->dev
;
71 INIT_LIST_HEAD(&a
->list
);
76 mutex_lock(&buffer
->lock
);
77 list_add(&a
->list
, &buffer
->attachments
);
78 mutex_unlock(&buffer
->lock
);
83 static void cma_heap_detach(struct dma_buf
*dmabuf
,
84 struct dma_buf_attachment
*attachment
)
86 struct cma_heap_buffer
*buffer
= dmabuf
->priv
;
87 struct dma_heap_attachment
*a
= attachment
->priv
;
89 mutex_lock(&buffer
->lock
);
91 mutex_unlock(&buffer
->lock
);
93 sg_free_table(&a
->table
);
97 static struct sg_table
*cma_heap_map_dma_buf(struct dma_buf_attachment
*attachment
,
98 enum dma_data_direction direction
)
100 struct dma_heap_attachment
*a
= attachment
->priv
;
101 struct sg_table
*table
= &a
->table
;
104 ret
= dma_map_sgtable(attachment
->dev
, table
, direction
, 0);
106 return ERR_PTR(-ENOMEM
);
111 static void cma_heap_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
112 struct sg_table
*table
,
113 enum dma_data_direction direction
)
115 struct dma_heap_attachment
*a
= attachment
->priv
;
118 dma_unmap_sgtable(attachment
->dev
, table
, direction
, 0);
121 static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf
*dmabuf
,
122 enum dma_data_direction direction
)
124 struct cma_heap_buffer
*buffer
= dmabuf
->priv
;
125 struct dma_heap_attachment
*a
;
127 if (buffer
->vmap_cnt
)
128 invalidate_kernel_vmap_range(buffer
->vaddr
, buffer
->len
);
130 mutex_lock(&buffer
->lock
);
131 list_for_each_entry(a
, &buffer
->attachments
, list
) {
134 dma_sync_sgtable_for_cpu(a
->dev
, &a
->table
, direction
);
136 mutex_unlock(&buffer
->lock
);
141 static int cma_heap_dma_buf_end_cpu_access(struct dma_buf
*dmabuf
,
142 enum dma_data_direction direction
)
144 struct cma_heap_buffer
*buffer
= dmabuf
->priv
;
145 struct dma_heap_attachment
*a
;
147 if (buffer
->vmap_cnt
)
148 flush_kernel_vmap_range(buffer
->vaddr
, buffer
->len
);
150 mutex_lock(&buffer
->lock
);
151 list_for_each_entry(a
, &buffer
->attachments
, list
) {
154 dma_sync_sgtable_for_device(a
->dev
, &a
->table
, direction
);
156 mutex_unlock(&buffer
->lock
);
161 static vm_fault_t
cma_heap_vm_fault(struct vm_fault
*vmf
)
163 struct vm_area_struct
*vma
= vmf
->vma
;
164 struct cma_heap_buffer
*buffer
= vma
->vm_private_data
;
166 if (vmf
->pgoff
> buffer
->pagecount
)
167 return VM_FAULT_SIGBUS
;
169 vmf
->page
= buffer
->pages
[vmf
->pgoff
];
175 static const struct vm_operations_struct dma_heap_vm_ops
= {
176 .fault
= cma_heap_vm_fault
,
179 static int cma_heap_mmap(struct dma_buf
*dmabuf
, struct vm_area_struct
*vma
)
181 struct cma_heap_buffer
*buffer
= dmabuf
->priv
;
183 if ((vma
->vm_flags
& (VM_SHARED
| VM_MAYSHARE
)) == 0)
186 vma
->vm_ops
= &dma_heap_vm_ops
;
187 vma
->vm_private_data
= buffer
;
192 static void *cma_heap_do_vmap(struct cma_heap_buffer
*buffer
)
196 vaddr
= vmap(buffer
->pages
, buffer
->pagecount
, VM_MAP
, PAGE_KERNEL
);
198 return ERR_PTR(-ENOMEM
);
203 static int cma_heap_vmap(struct dma_buf
*dmabuf
, struct dma_buf_map
*map
)
205 struct cma_heap_buffer
*buffer
= dmabuf
->priv
;
209 mutex_lock(&buffer
->lock
);
210 if (buffer
->vmap_cnt
) {
212 dma_buf_map_set_vaddr(map
, buffer
->vaddr
);
216 vaddr
= cma_heap_do_vmap(buffer
);
218 ret
= PTR_ERR(vaddr
);
221 buffer
->vaddr
= vaddr
;
223 dma_buf_map_set_vaddr(map
, buffer
->vaddr
);
225 mutex_unlock(&buffer
->lock
);
230 static void cma_heap_vunmap(struct dma_buf
*dmabuf
, struct dma_buf_map
*map
)
232 struct cma_heap_buffer
*buffer
= dmabuf
->priv
;
234 mutex_lock(&buffer
->lock
);
235 if (!--buffer
->vmap_cnt
) {
236 vunmap(buffer
->vaddr
);
237 buffer
->vaddr
= NULL
;
239 mutex_unlock(&buffer
->lock
);
240 dma_buf_map_clear(map
);
243 static void cma_heap_dma_buf_release(struct dma_buf
*dmabuf
)
245 struct cma_heap_buffer
*buffer
= dmabuf
->priv
;
246 struct cma_heap
*cma_heap
= buffer
->heap
;
248 if (buffer
->vmap_cnt
> 0) {
249 WARN(1, "%s: buffer still mapped in the kernel\n", __func__
);
250 vunmap(buffer
->vaddr
);
251 buffer
->vaddr
= NULL
;
254 cma_release(cma_heap
->cma
, buffer
->cma_pages
, buffer
->pagecount
);
258 static const struct dma_buf_ops cma_heap_buf_ops
= {
259 .attach
= cma_heap_attach
,
260 .detach
= cma_heap_detach
,
261 .map_dma_buf
= cma_heap_map_dma_buf
,
262 .unmap_dma_buf
= cma_heap_unmap_dma_buf
,
263 .begin_cpu_access
= cma_heap_dma_buf_begin_cpu_access
,
264 .end_cpu_access
= cma_heap_dma_buf_end_cpu_access
,
265 .mmap
= cma_heap_mmap
,
266 .vmap
= cma_heap_vmap
,
267 .vunmap
= cma_heap_vunmap
,
268 .release
= cma_heap_dma_buf_release
,
271 static int cma_heap_allocate(struct dma_heap
*heap
,
273 unsigned long fd_flags
,
274 unsigned long heap_flags
)
276 struct cma_heap
*cma_heap
= dma_heap_get_drvdata(heap
);
277 struct cma_heap_buffer
*buffer
;
278 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
279 size_t size
= PAGE_ALIGN(len
);
280 pgoff_t pagecount
= size
>> PAGE_SHIFT
;
281 unsigned long align
= get_order(size
);
282 struct page
*cma_pages
;
283 struct dma_buf
*dmabuf
;
287 buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
291 INIT_LIST_HEAD(&buffer
->attachments
);
292 mutex_init(&buffer
->lock
);
295 if (align
> CONFIG_CMA_ALIGNMENT
)
296 align
= CONFIG_CMA_ALIGNMENT
;
298 cma_pages
= cma_alloc(cma_heap
->cma
, pagecount
, align
, false);
302 /* Clear the cma pages */
303 if (PageHighMem(cma_pages
)) {
304 unsigned long nr_clear_pages
= pagecount
;
305 struct page
*page
= cma_pages
;
307 while (nr_clear_pages
> 0) {
308 void *vaddr
= kmap_atomic(page
);
310 memset(vaddr
, 0, PAGE_SIZE
);
311 kunmap_atomic(vaddr
);
313 * Avoid wasting time zeroing memory if the process
314 * has been killed by by SIGKILL
316 if (fatal_signal_pending(current
))
322 memset(page_address(cma_pages
), 0, size
);
325 buffer
->pages
= kmalloc_array(pagecount
, sizeof(*buffer
->pages
), GFP_KERNEL
);
326 if (!buffer
->pages
) {
331 for (pg
= 0; pg
< pagecount
; pg
++)
332 buffer
->pages
[pg
] = &cma_pages
[pg
];
334 buffer
->cma_pages
= cma_pages
;
335 buffer
->heap
= cma_heap
;
336 buffer
->pagecount
= pagecount
;
338 /* create the dmabuf */
339 exp_info
.ops
= &cma_heap_buf_ops
;
340 exp_info
.size
= buffer
->len
;
341 exp_info
.flags
= fd_flags
;
342 exp_info
.priv
= buffer
;
343 dmabuf
= dma_buf_export(&exp_info
);
344 if (IS_ERR(dmabuf
)) {
345 ret
= PTR_ERR(dmabuf
);
349 ret
= dma_buf_fd(dmabuf
, fd_flags
);
352 /* just return, as put will call release and that will free */
359 kfree(buffer
->pages
);
361 cma_release(cma_heap
->cma
, cma_pages
, pagecount
);
368 static const struct dma_heap_ops cma_heap_ops
= {
369 .allocate
= cma_heap_allocate
,
372 static int __add_cma_heap(struct cma
*cma
, void *data
)
374 struct cma_heap
*cma_heap
;
375 struct dma_heap_export_info exp_info
;
377 cma_heap
= kzalloc(sizeof(*cma_heap
), GFP_KERNEL
);
382 exp_info
.name
= cma_get_name(cma
);
383 exp_info
.ops
= &cma_heap_ops
;
384 exp_info
.priv
= cma_heap
;
386 cma_heap
->heap
= dma_heap_add(&exp_info
);
387 if (IS_ERR(cma_heap
->heap
)) {
388 int ret
= PTR_ERR(cma_heap
->heap
);
397 static int add_default_cma_heap(void)
399 struct cma
*default_cma
= dev_get_cma_area(NULL
);
403 ret
= __add_cma_heap(default_cma
, NULL
);
407 module_init(add_default_cma_heap
);
408 MODULE_DESCRIPTION("DMA-BUF CMA Heap");
409 MODULE_LICENSE("GPL v2");