1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/device.h>
3 #include <linux/dma-buf.h>
5 #include <linux/highmem.h>
7 #include <linux/list.h>
8 #include <linux/slab.h>
9 #include <linux/uaccess.h>
10 #include <linux/vmalloc.h>
11 #include <uapi/linux/dma-heap.h>
13 #include "heap-helpers.h"
15 void init_heap_helper_buffer(struct heap_helper_buffer
*buffer
,
16 void (*free
)(struct heap_helper_buffer
*))
18 buffer
->priv_virt
= NULL
;
19 mutex_init(&buffer
->lock
);
22 buffer
->pagecount
= 0;
24 INIT_LIST_HEAD(&buffer
->attachments
);
28 struct dma_buf
*heap_helper_export_dmabuf(struct heap_helper_buffer
*buffer
,
31 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
33 exp_info
.ops
= &heap_helper_ops
;
34 exp_info
.size
= buffer
->size
;
35 exp_info
.flags
= fd_flags
;
36 exp_info
.priv
= buffer
;
38 return dma_buf_export(&exp_info
);
41 static void *dma_heap_map_kernel(struct heap_helper_buffer
*buffer
)
45 vaddr
= vmap(buffer
->pages
, buffer
->pagecount
, VM_MAP
, PAGE_KERNEL
);
47 return ERR_PTR(-ENOMEM
);
52 static void dma_heap_buffer_destroy(struct heap_helper_buffer
*buffer
)
54 if (buffer
->vmap_cnt
> 0) {
55 WARN(1, "%s: buffer still mapped in the kernel\n", __func__
);
56 vunmap(buffer
->vaddr
);
62 static void *dma_heap_buffer_vmap_get(struct heap_helper_buffer
*buffer
)
66 if (buffer
->vmap_cnt
) {
70 vaddr
= dma_heap_map_kernel(buffer
);
73 buffer
->vaddr
= vaddr
;
78 static void dma_heap_buffer_vmap_put(struct heap_helper_buffer
*buffer
)
80 if (!--buffer
->vmap_cnt
) {
81 vunmap(buffer
->vaddr
);
86 struct dma_heaps_attachment
{
88 struct sg_table table
;
89 struct list_head list
;
92 static int dma_heap_attach(struct dma_buf
*dmabuf
,
93 struct dma_buf_attachment
*attachment
)
95 struct dma_heaps_attachment
*a
;
96 struct heap_helper_buffer
*buffer
= dmabuf
->priv
;
99 a
= kzalloc(sizeof(*a
), GFP_KERNEL
);
103 ret
= sg_alloc_table_from_pages(&a
->table
, buffer
->pages
,
104 buffer
->pagecount
, 0,
105 buffer
->pagecount
<< PAGE_SHIFT
,
112 a
->dev
= attachment
->dev
;
113 INIT_LIST_HEAD(&a
->list
);
115 attachment
->priv
= a
;
117 mutex_lock(&buffer
->lock
);
118 list_add(&a
->list
, &buffer
->attachments
);
119 mutex_unlock(&buffer
->lock
);
124 static void dma_heap_detach(struct dma_buf
*dmabuf
,
125 struct dma_buf_attachment
*attachment
)
127 struct dma_heaps_attachment
*a
= attachment
->priv
;
128 struct heap_helper_buffer
*buffer
= dmabuf
->priv
;
130 mutex_lock(&buffer
->lock
);
132 mutex_unlock(&buffer
->lock
);
134 sg_free_table(&a
->table
);
139 struct sg_table
*dma_heap_map_dma_buf(struct dma_buf_attachment
*attachment
,
140 enum dma_data_direction direction
)
142 struct dma_heaps_attachment
*a
= attachment
->priv
;
143 struct sg_table
*table
;
147 if (!dma_map_sg(attachment
->dev
, table
->sgl
, table
->nents
,
149 table
= ERR_PTR(-ENOMEM
);
153 static void dma_heap_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
154 struct sg_table
*table
,
155 enum dma_data_direction direction
)
157 dma_unmap_sg(attachment
->dev
, table
->sgl
, table
->nents
, direction
);
160 static vm_fault_t
dma_heap_vm_fault(struct vm_fault
*vmf
)
162 struct vm_area_struct
*vma
= vmf
->vma
;
163 struct heap_helper_buffer
*buffer
= vma
->vm_private_data
;
165 if (vmf
->pgoff
> buffer
->pagecount
)
166 return VM_FAULT_SIGBUS
;
168 vmf
->page
= buffer
->pages
[vmf
->pgoff
];
174 static const struct vm_operations_struct dma_heap_vm_ops
= {
175 .fault
= dma_heap_vm_fault
,
178 static int dma_heap_mmap(struct dma_buf
*dmabuf
, struct vm_area_struct
*vma
)
180 struct heap_helper_buffer
*buffer
= dmabuf
->priv
;
182 if ((vma
->vm_flags
& (VM_SHARED
| VM_MAYSHARE
)) == 0)
185 vma
->vm_ops
= &dma_heap_vm_ops
;
186 vma
->vm_private_data
= buffer
;
191 static void dma_heap_dma_buf_release(struct dma_buf
*dmabuf
)
193 struct heap_helper_buffer
*buffer
= dmabuf
->priv
;
195 dma_heap_buffer_destroy(buffer
);
198 static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf
*dmabuf
,
199 enum dma_data_direction direction
)
201 struct heap_helper_buffer
*buffer
= dmabuf
->priv
;
202 struct dma_heaps_attachment
*a
;
205 mutex_lock(&buffer
->lock
);
207 if (buffer
->vmap_cnt
)
208 invalidate_kernel_vmap_range(buffer
->vaddr
, buffer
->size
);
210 list_for_each_entry(a
, &buffer
->attachments
, list
) {
211 dma_sync_sg_for_cpu(a
->dev
, a
->table
.sgl
, a
->table
.nents
,
214 mutex_unlock(&buffer
->lock
);
219 static int dma_heap_dma_buf_end_cpu_access(struct dma_buf
*dmabuf
,
220 enum dma_data_direction direction
)
222 struct heap_helper_buffer
*buffer
= dmabuf
->priv
;
223 struct dma_heaps_attachment
*a
;
225 mutex_lock(&buffer
->lock
);
227 if (buffer
->vmap_cnt
)
228 flush_kernel_vmap_range(buffer
->vaddr
, buffer
->size
);
230 list_for_each_entry(a
, &buffer
->attachments
, list
) {
231 dma_sync_sg_for_device(a
->dev
, a
->table
.sgl
, a
->table
.nents
,
234 mutex_unlock(&buffer
->lock
);
239 static void *dma_heap_dma_buf_vmap(struct dma_buf
*dmabuf
)
241 struct heap_helper_buffer
*buffer
= dmabuf
->priv
;
244 mutex_lock(&buffer
->lock
);
245 vaddr
= dma_heap_buffer_vmap_get(buffer
);
246 mutex_unlock(&buffer
->lock
);
251 static void dma_heap_dma_buf_vunmap(struct dma_buf
*dmabuf
, void *vaddr
)
253 struct heap_helper_buffer
*buffer
= dmabuf
->priv
;
255 mutex_lock(&buffer
->lock
);
256 dma_heap_buffer_vmap_put(buffer
);
257 mutex_unlock(&buffer
->lock
);
260 const struct dma_buf_ops heap_helper_ops
= {
261 .map_dma_buf
= dma_heap_map_dma_buf
,
262 .unmap_dma_buf
= dma_heap_unmap_dma_buf
,
263 .mmap
= dma_heap_mmap
,
264 .release
= dma_heap_dma_buf_release
,
265 .attach
= dma_heap_attach
,
266 .detach
= dma_heap_detach
,
267 .begin_cpu_access
= dma_heap_dma_buf_begin_cpu_access
,
268 .end_cpu_access
= dma_heap_dma_buf_end_cpu_access
,
269 .vmap
= dma_heap_dma_buf_vmap
,
270 .vunmap
= dma_heap_dma_buf_vunmap
,