1 // SPDX-License-Identifier: GPL-2.0
3 * DMABUF CMA heap exporter
5 * Copyright (C) 2012, 2019 Linaro Ltd.
6 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
10 #include <linux/device.h>
11 #include <linux/dma-buf.h>
12 #include <linux/dma-heap.h>
13 #include <linux/dma-contiguous.h>
14 #include <linux/err.h>
15 #include <linux/errno.h>
16 #include <linux/highmem.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/scatterlist.h>
20 #include <linux/sched/signal.h>
22 #include "heap-helpers.h"
25 struct dma_heap
*heap
;
29 static void cma_heap_free(struct heap_helper_buffer
*buffer
)
31 struct cma_heap
*cma_heap
= dma_heap_get_drvdata(buffer
->heap
);
32 unsigned long nr_pages
= buffer
->pagecount
;
33 struct page
*cma_pages
= buffer
->priv_virt
;
38 cma_release(cma_heap
->cma
, cma_pages
, nr_pages
);
42 /* dmabuf heap CMA operations functions */
43 static int cma_heap_allocate(struct dma_heap
*heap
,
45 unsigned long fd_flags
,
46 unsigned long heap_flags
)
48 struct cma_heap
*cma_heap
= dma_heap_get_drvdata(heap
);
49 struct heap_helper_buffer
*helper_buffer
;
50 struct page
*cma_pages
;
51 size_t size
= PAGE_ALIGN(len
);
52 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
53 unsigned long align
= get_order(size
);
54 struct dma_buf
*dmabuf
;
58 if (align
> CONFIG_CMA_ALIGNMENT
)
59 align
= CONFIG_CMA_ALIGNMENT
;
61 helper_buffer
= kzalloc(sizeof(*helper_buffer
), GFP_KERNEL
);
65 init_heap_helper_buffer(helper_buffer
, cma_heap_free
);
66 helper_buffer
->heap
= heap
;
67 helper_buffer
->size
= len
;
69 cma_pages
= cma_alloc(cma_heap
->cma
, nr_pages
, align
, false);
73 if (PageHighMem(cma_pages
)) {
74 unsigned long nr_clear_pages
= nr_pages
;
75 struct page
*page
= cma_pages
;
77 while (nr_clear_pages
> 0) {
78 void *vaddr
= kmap_atomic(page
);
80 memset(vaddr
, 0, PAGE_SIZE
);
83 * Avoid wasting time zeroing memory if the process
84 * has been killed by by SIGKILL
86 if (fatal_signal_pending(current
))
93 memset(page_address(cma_pages
), 0, size
);
96 helper_buffer
->pagecount
= nr_pages
;
97 helper_buffer
->pages
= kmalloc_array(helper_buffer
->pagecount
,
98 sizeof(*helper_buffer
->pages
),
100 if (!helper_buffer
->pages
) {
105 for (pg
= 0; pg
< helper_buffer
->pagecount
; pg
++)
106 helper_buffer
->pages
[pg
] = &cma_pages
[pg
];
108 /* create the dmabuf */
109 dmabuf
= heap_helper_export_dmabuf(helper_buffer
, fd_flags
);
110 if (IS_ERR(dmabuf
)) {
111 ret
= PTR_ERR(dmabuf
);
115 helper_buffer
->dmabuf
= dmabuf
;
116 helper_buffer
->priv_virt
= cma_pages
;
118 ret
= dma_buf_fd(dmabuf
, fd_flags
);
121 /* just return, as put will call release and that will free */
128 kfree(helper_buffer
->pages
);
130 cma_release(cma_heap
->cma
, cma_pages
, nr_pages
);
132 kfree(helper_buffer
);
136 static const struct dma_heap_ops cma_heap_ops
= {
137 .allocate
= cma_heap_allocate
,
140 static int __add_cma_heap(struct cma
*cma
, void *data
)
142 struct cma_heap
*cma_heap
;
143 struct dma_heap_export_info exp_info
;
145 cma_heap
= kzalloc(sizeof(*cma_heap
), GFP_KERNEL
);
150 exp_info
.name
= cma_get_name(cma
);
151 exp_info
.ops
= &cma_heap_ops
;
152 exp_info
.priv
= cma_heap
;
154 cma_heap
->heap
= dma_heap_add(&exp_info
);
155 if (IS_ERR(cma_heap
->heap
)) {
156 int ret
= PTR_ERR(cma_heap
->heap
);
165 static int add_default_cma_heap(void)
167 struct cma
*default_cma
= dev_get_cma_area(NULL
);
171 ret
= __add_cma_heap(default_cma
, NULL
);
175 module_init(add_default_cma_heap
);
176 MODULE_DESCRIPTION("DMA-BUF CMA Heap");
177 MODULE_LICENSE("GPL v2");