1 // SPDX-License-Identifier: GPL-2.0
3 * DMABUF System heap exporter
5 * Copyright (C) 2011 Google, Inc.
6 * Copyright (C) 2019, 2020 Linaro Ltd.
8 * Portions based off of Andrew Davis' SRAM heap:
9 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10 * Andrew F. Davis <afd@ti.com>
13 #include <linux/dma-buf.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dma-heap.h>
16 #include <linux/err.h>
17 #include <linux/highmem.h>
19 #include <linux/module.h>
20 #include <linux/scatterlist.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
24 static struct dma_heap
*sys_heap
;
26 struct system_heap_buffer
{
27 struct dma_heap
*heap
;
28 struct list_head attachments
;
31 struct sg_table sg_table
;
36 struct dma_heap_attachment
{
38 struct sg_table
*table
;
39 struct list_head list
;
43 #define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
44 | __GFP_NORETRY) & ~__GFP_RECLAIM) \
46 #define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
47 static gfp_t order_flags
[] = {HIGH_ORDER_GFP
, LOW_ORDER_GFP
, LOW_ORDER_GFP
};
49 * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
50 * to match with the sizes often found in IOMMUs. Using order 4 pages instead
51 * of order 0 pages can significantly improve the performance of many IOMMUs
52 * by reducing TLB pressure and time spent updating page tables.
54 static const unsigned int orders
[] = {8, 4, 0};
55 #define NUM_ORDERS ARRAY_SIZE(orders)
57 static struct sg_table
*dup_sg_table(struct sg_table
*table
)
59 struct sg_table
*new_table
;
61 struct scatterlist
*sg
, *new_sg
;
63 new_table
= kzalloc(sizeof(*new_table
), GFP_KERNEL
);
65 return ERR_PTR(-ENOMEM
);
67 ret
= sg_alloc_table(new_table
, table
->orig_nents
, GFP_KERNEL
);
70 return ERR_PTR(-ENOMEM
);
73 new_sg
= new_table
->sgl
;
74 for_each_sgtable_sg(table
, sg
, i
) {
75 sg_set_page(new_sg
, sg_page(sg
), sg
->length
, sg
->offset
);
76 new_sg
= sg_next(new_sg
);
82 static int system_heap_attach(struct dma_buf
*dmabuf
,
83 struct dma_buf_attachment
*attachment
)
85 struct system_heap_buffer
*buffer
= dmabuf
->priv
;
86 struct dma_heap_attachment
*a
;
87 struct sg_table
*table
;
89 a
= kzalloc(sizeof(*a
), GFP_KERNEL
);
93 table
= dup_sg_table(&buffer
->sg_table
);
100 a
->dev
= attachment
->dev
;
101 INIT_LIST_HEAD(&a
->list
);
104 attachment
->priv
= a
;
106 mutex_lock(&buffer
->lock
);
107 list_add(&a
->list
, &buffer
->attachments
);
108 mutex_unlock(&buffer
->lock
);
113 static void system_heap_detach(struct dma_buf
*dmabuf
,
114 struct dma_buf_attachment
*attachment
)
116 struct system_heap_buffer
*buffer
= dmabuf
->priv
;
117 struct dma_heap_attachment
*a
= attachment
->priv
;
119 mutex_lock(&buffer
->lock
);
121 mutex_unlock(&buffer
->lock
);
123 sg_free_table(a
->table
);
128 static struct sg_table
*system_heap_map_dma_buf(struct dma_buf_attachment
*attachment
,
129 enum dma_data_direction direction
)
131 struct dma_heap_attachment
*a
= attachment
->priv
;
132 struct sg_table
*table
= a
->table
;
135 ret
= dma_map_sgtable(attachment
->dev
, table
, direction
, 0);
143 static void system_heap_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
144 struct sg_table
*table
,
145 enum dma_data_direction direction
)
147 struct dma_heap_attachment
*a
= attachment
->priv
;
150 dma_unmap_sgtable(attachment
->dev
, table
, direction
, 0);
153 static int system_heap_dma_buf_begin_cpu_access(struct dma_buf
*dmabuf
,
154 enum dma_data_direction direction
)
156 struct system_heap_buffer
*buffer
= dmabuf
->priv
;
157 struct dma_heap_attachment
*a
;
159 mutex_lock(&buffer
->lock
);
161 if (buffer
->vmap_cnt
)
162 invalidate_kernel_vmap_range(buffer
->vaddr
, buffer
->len
);
164 list_for_each_entry(a
, &buffer
->attachments
, list
) {
167 dma_sync_sgtable_for_cpu(a
->dev
, a
->table
, direction
);
169 mutex_unlock(&buffer
->lock
);
174 static int system_heap_dma_buf_end_cpu_access(struct dma_buf
*dmabuf
,
175 enum dma_data_direction direction
)
177 struct system_heap_buffer
*buffer
= dmabuf
->priv
;
178 struct dma_heap_attachment
*a
;
180 mutex_lock(&buffer
->lock
);
182 if (buffer
->vmap_cnt
)
183 flush_kernel_vmap_range(buffer
->vaddr
, buffer
->len
);
185 list_for_each_entry(a
, &buffer
->attachments
, list
) {
188 dma_sync_sgtable_for_device(a
->dev
, a
->table
, direction
);
190 mutex_unlock(&buffer
->lock
);
195 static int system_heap_mmap(struct dma_buf
*dmabuf
, struct vm_area_struct
*vma
)
197 struct system_heap_buffer
*buffer
= dmabuf
->priv
;
198 struct sg_table
*table
= &buffer
->sg_table
;
199 unsigned long addr
= vma
->vm_start
;
200 struct sg_page_iter piter
;
203 for_each_sgtable_page(table
, &piter
, vma
->vm_pgoff
) {
204 struct page
*page
= sg_page_iter_page(&piter
);
206 ret
= remap_pfn_range(vma
, addr
, page_to_pfn(page
), PAGE_SIZE
,
211 if (addr
>= vma
->vm_end
)
217 static void *system_heap_do_vmap(struct system_heap_buffer
*buffer
)
219 struct sg_table
*table
= &buffer
->sg_table
;
220 int npages
= PAGE_ALIGN(buffer
->len
) / PAGE_SIZE
;
221 struct page
**pages
= vmalloc(sizeof(struct page
*) * npages
);
222 struct page
**tmp
= pages
;
223 struct sg_page_iter piter
;
227 return ERR_PTR(-ENOMEM
);
229 for_each_sgtable_page(table
, &piter
, 0) {
230 WARN_ON(tmp
- pages
>= npages
);
231 *tmp
++ = sg_page_iter_page(&piter
);
234 vaddr
= vmap(pages
, npages
, VM_MAP
, PAGE_KERNEL
);
238 return ERR_PTR(-ENOMEM
);
243 static int system_heap_vmap(struct dma_buf
*dmabuf
, struct dma_buf_map
*map
)
245 struct system_heap_buffer
*buffer
= dmabuf
->priv
;
249 mutex_lock(&buffer
->lock
);
250 if (buffer
->vmap_cnt
) {
252 dma_buf_map_set_vaddr(map
, buffer
->vaddr
);
256 vaddr
= system_heap_do_vmap(buffer
);
258 ret
= PTR_ERR(vaddr
);
262 buffer
->vaddr
= vaddr
;
264 dma_buf_map_set_vaddr(map
, buffer
->vaddr
);
266 mutex_unlock(&buffer
->lock
);
271 static void system_heap_vunmap(struct dma_buf
*dmabuf
, struct dma_buf_map
*map
)
273 struct system_heap_buffer
*buffer
= dmabuf
->priv
;
275 mutex_lock(&buffer
->lock
);
276 if (!--buffer
->vmap_cnt
) {
277 vunmap(buffer
->vaddr
);
278 buffer
->vaddr
= NULL
;
280 mutex_unlock(&buffer
->lock
);
281 dma_buf_map_clear(map
);
284 static void system_heap_dma_buf_release(struct dma_buf
*dmabuf
)
286 struct system_heap_buffer
*buffer
= dmabuf
->priv
;
287 struct sg_table
*table
;
288 struct scatterlist
*sg
;
291 table
= &buffer
->sg_table
;
292 for_each_sg(table
->sgl
, sg
, table
->nents
, i
) {
293 struct page
*page
= sg_page(sg
);
295 __free_pages(page
, compound_order(page
));
297 sg_free_table(table
);
301 static const struct dma_buf_ops system_heap_buf_ops
= {
302 .attach
= system_heap_attach
,
303 .detach
= system_heap_detach
,
304 .map_dma_buf
= system_heap_map_dma_buf
,
305 .unmap_dma_buf
= system_heap_unmap_dma_buf
,
306 .begin_cpu_access
= system_heap_dma_buf_begin_cpu_access
,
307 .end_cpu_access
= system_heap_dma_buf_end_cpu_access
,
308 .mmap
= system_heap_mmap
,
309 .vmap
= system_heap_vmap
,
310 .vunmap
= system_heap_vunmap
,
311 .release
= system_heap_dma_buf_release
,
314 static struct page
*alloc_largest_available(unsigned long size
,
315 unsigned int max_order
)
320 for (i
= 0; i
< NUM_ORDERS
; i
++) {
321 if (size
< (PAGE_SIZE
<< orders
[i
]))
323 if (max_order
< orders
[i
])
326 page
= alloc_pages(order_flags
[i
], orders
[i
]);
334 static int system_heap_allocate(struct dma_heap
*heap
,
336 unsigned long fd_flags
,
337 unsigned long heap_flags
)
339 struct system_heap_buffer
*buffer
;
340 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
341 unsigned long size_remaining
= len
;
342 unsigned int max_order
= orders
[0];
343 struct dma_buf
*dmabuf
;
344 struct sg_table
*table
;
345 struct scatterlist
*sg
;
346 struct list_head pages
;
347 struct page
*page
, *tmp_page
;
348 int i
, ret
= -ENOMEM
;
350 buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
354 INIT_LIST_HEAD(&buffer
->attachments
);
355 mutex_init(&buffer
->lock
);
359 INIT_LIST_HEAD(&pages
);
361 while (size_remaining
> 0) {
363 * Avoid trying to allocate memory if the process
364 * has been killed by SIGKILL
366 if (fatal_signal_pending(current
))
369 page
= alloc_largest_available(size_remaining
, max_order
);
373 list_add_tail(&page
->lru
, &pages
);
374 size_remaining
-= page_size(page
);
375 max_order
= compound_order(page
);
379 table
= &buffer
->sg_table
;
380 if (sg_alloc_table(table
, i
, GFP_KERNEL
))
384 list_for_each_entry_safe(page
, tmp_page
, &pages
, lru
) {
385 sg_set_page(sg
, page
, page_size(page
), 0);
387 list_del(&page
->lru
);
390 /* create the dmabuf */
391 exp_info
.ops
= &system_heap_buf_ops
;
392 exp_info
.size
= buffer
->len
;
393 exp_info
.flags
= fd_flags
;
394 exp_info
.priv
= buffer
;
395 dmabuf
= dma_buf_export(&exp_info
);
396 if (IS_ERR(dmabuf
)) {
397 ret
= PTR_ERR(dmabuf
);
401 ret
= dma_buf_fd(dmabuf
, fd_flags
);
404 /* just return, as put will call release and that will free */
410 for_each_sgtable_sg(table
, sg
, i
) {
411 struct page
*p
= sg_page(sg
);
413 __free_pages(p
, compound_order(p
));
415 sg_free_table(table
);
417 list_for_each_entry_safe(page
, tmp_page
, &pages
, lru
)
418 __free_pages(page
, compound_order(page
));
424 static const struct dma_heap_ops system_heap_ops
= {
425 .allocate
= system_heap_allocate
,
428 static int system_heap_create(void)
430 struct dma_heap_export_info exp_info
;
432 exp_info
.name
= "system";
433 exp_info
.ops
= &system_heap_ops
;
434 exp_info
.priv
= NULL
;
436 sys_heap
= dma_heap_add(&exp_info
);
437 if (IS_ERR(sys_heap
))
438 return PTR_ERR(sys_heap
);
442 module_init(system_heap_create
);
443 MODULE_LICENSE("GPL v2");