Merge tag 'trace-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux/fpc-iii.git] / drivers / dma-buf / heaps / system_heap.c
blob17e0e9a68baff4695f17c079b5f8ae29ec1e7ad3
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DMABUF System heap exporter
5 * Copyright (C) 2011 Google, Inc.
6 * Copyright (C) 2019, 2020 Linaro Ltd.
8 * Portions based off of Andrew Davis' SRAM heap:
9 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10 * Andrew F. Davis <afd@ti.com>
13 #include <linux/dma-buf.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dma-heap.h>
16 #include <linux/err.h>
17 #include <linux/highmem.h>
18 #include <linux/mm.h>
19 #include <linux/module.h>
20 #include <linux/scatterlist.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
24 static struct dma_heap *sys_heap;
26 struct system_heap_buffer {
27 struct dma_heap *heap;
28 struct list_head attachments;
29 struct mutex lock;
30 unsigned long len;
31 struct sg_table sg_table;
32 int vmap_cnt;
33 void *vaddr;
36 struct dma_heap_attachment {
37 struct device *dev;
38 struct sg_table *table;
39 struct list_head list;
40 bool mapped;
43 #define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
44 | __GFP_NORETRY) & ~__GFP_RECLAIM) \
45 | __GFP_COMP)
46 #define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
47 static gfp_t order_flags[] = {HIGH_ORDER_GFP, LOW_ORDER_GFP, LOW_ORDER_GFP};
49 * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
50 * to match with the sizes often found in IOMMUs. Using order 4 pages instead
51 * of order 0 pages can significantly improve the performance of many IOMMUs
52 * by reducing TLB pressure and time spent updating page tables.
54 static const unsigned int orders[] = {8, 4, 0};
55 #define NUM_ORDERS ARRAY_SIZE(orders)
57 static struct sg_table *dup_sg_table(struct sg_table *table)
59 struct sg_table *new_table;
60 int ret, i;
61 struct scatterlist *sg, *new_sg;
63 new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
64 if (!new_table)
65 return ERR_PTR(-ENOMEM);
67 ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
68 if (ret) {
69 kfree(new_table);
70 return ERR_PTR(-ENOMEM);
73 new_sg = new_table->sgl;
74 for_each_sgtable_sg(table, sg, i) {
75 sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
76 new_sg = sg_next(new_sg);
79 return new_table;
82 static int system_heap_attach(struct dma_buf *dmabuf,
83 struct dma_buf_attachment *attachment)
85 struct system_heap_buffer *buffer = dmabuf->priv;
86 struct dma_heap_attachment *a;
87 struct sg_table *table;
89 a = kzalloc(sizeof(*a), GFP_KERNEL);
90 if (!a)
91 return -ENOMEM;
93 table = dup_sg_table(&buffer->sg_table);
94 if (IS_ERR(table)) {
95 kfree(a);
96 return -ENOMEM;
99 a->table = table;
100 a->dev = attachment->dev;
101 INIT_LIST_HEAD(&a->list);
102 a->mapped = false;
104 attachment->priv = a;
106 mutex_lock(&buffer->lock);
107 list_add(&a->list, &buffer->attachments);
108 mutex_unlock(&buffer->lock);
110 return 0;
113 static void system_heap_detach(struct dma_buf *dmabuf,
114 struct dma_buf_attachment *attachment)
116 struct system_heap_buffer *buffer = dmabuf->priv;
117 struct dma_heap_attachment *a = attachment->priv;
119 mutex_lock(&buffer->lock);
120 list_del(&a->list);
121 mutex_unlock(&buffer->lock);
123 sg_free_table(a->table);
124 kfree(a->table);
125 kfree(a);
128 static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
129 enum dma_data_direction direction)
131 struct dma_heap_attachment *a = attachment->priv;
132 struct sg_table *table = a->table;
133 int ret;
135 ret = dma_map_sgtable(attachment->dev, table, direction, 0);
136 if (ret)
137 return ERR_PTR(ret);
139 a->mapped = true;
140 return table;
143 static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
144 struct sg_table *table,
145 enum dma_data_direction direction)
147 struct dma_heap_attachment *a = attachment->priv;
149 a->mapped = false;
150 dma_unmap_sgtable(attachment->dev, table, direction, 0);
153 static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
154 enum dma_data_direction direction)
156 struct system_heap_buffer *buffer = dmabuf->priv;
157 struct dma_heap_attachment *a;
159 mutex_lock(&buffer->lock);
161 if (buffer->vmap_cnt)
162 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
164 list_for_each_entry(a, &buffer->attachments, list) {
165 if (!a->mapped)
166 continue;
167 dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
169 mutex_unlock(&buffer->lock);
171 return 0;
174 static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
175 enum dma_data_direction direction)
177 struct system_heap_buffer *buffer = dmabuf->priv;
178 struct dma_heap_attachment *a;
180 mutex_lock(&buffer->lock);
182 if (buffer->vmap_cnt)
183 flush_kernel_vmap_range(buffer->vaddr, buffer->len);
185 list_for_each_entry(a, &buffer->attachments, list) {
186 if (!a->mapped)
187 continue;
188 dma_sync_sgtable_for_device(a->dev, a->table, direction);
190 mutex_unlock(&buffer->lock);
192 return 0;
195 static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
197 struct system_heap_buffer *buffer = dmabuf->priv;
198 struct sg_table *table = &buffer->sg_table;
199 unsigned long addr = vma->vm_start;
200 struct sg_page_iter piter;
201 int ret;
203 for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
204 struct page *page = sg_page_iter_page(&piter);
206 ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
207 vma->vm_page_prot);
208 if (ret)
209 return ret;
210 addr += PAGE_SIZE;
211 if (addr >= vma->vm_end)
212 return 0;
214 return 0;
217 static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
219 struct sg_table *table = &buffer->sg_table;
220 int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
221 struct page **pages = vmalloc(sizeof(struct page *) * npages);
222 struct page **tmp = pages;
223 struct sg_page_iter piter;
224 void *vaddr;
226 if (!pages)
227 return ERR_PTR(-ENOMEM);
229 for_each_sgtable_page(table, &piter, 0) {
230 WARN_ON(tmp - pages >= npages);
231 *tmp++ = sg_page_iter_page(&piter);
234 vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
235 vfree(pages);
237 if (!vaddr)
238 return ERR_PTR(-ENOMEM);
240 return vaddr;
243 static int system_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
245 struct system_heap_buffer *buffer = dmabuf->priv;
246 void *vaddr;
247 int ret = 0;
249 mutex_lock(&buffer->lock);
250 if (buffer->vmap_cnt) {
251 buffer->vmap_cnt++;
252 dma_buf_map_set_vaddr(map, buffer->vaddr);
253 goto out;
256 vaddr = system_heap_do_vmap(buffer);
257 if (IS_ERR(vaddr)) {
258 ret = PTR_ERR(vaddr);
259 goto out;
262 buffer->vaddr = vaddr;
263 buffer->vmap_cnt++;
264 dma_buf_map_set_vaddr(map, buffer->vaddr);
265 out:
266 mutex_unlock(&buffer->lock);
268 return ret;
271 static void system_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
273 struct system_heap_buffer *buffer = dmabuf->priv;
275 mutex_lock(&buffer->lock);
276 if (!--buffer->vmap_cnt) {
277 vunmap(buffer->vaddr);
278 buffer->vaddr = NULL;
280 mutex_unlock(&buffer->lock);
281 dma_buf_map_clear(map);
284 static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
286 struct system_heap_buffer *buffer = dmabuf->priv;
287 struct sg_table *table;
288 struct scatterlist *sg;
289 int i;
291 table = &buffer->sg_table;
292 for_each_sg(table->sgl, sg, table->nents, i) {
293 struct page *page = sg_page(sg);
295 __free_pages(page, compound_order(page));
297 sg_free_table(table);
298 kfree(buffer);
301 static const struct dma_buf_ops system_heap_buf_ops = {
302 .attach = system_heap_attach,
303 .detach = system_heap_detach,
304 .map_dma_buf = system_heap_map_dma_buf,
305 .unmap_dma_buf = system_heap_unmap_dma_buf,
306 .begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
307 .end_cpu_access = system_heap_dma_buf_end_cpu_access,
308 .mmap = system_heap_mmap,
309 .vmap = system_heap_vmap,
310 .vunmap = system_heap_vunmap,
311 .release = system_heap_dma_buf_release,
314 static struct page *alloc_largest_available(unsigned long size,
315 unsigned int max_order)
317 struct page *page;
318 int i;
320 for (i = 0; i < NUM_ORDERS; i++) {
321 if (size < (PAGE_SIZE << orders[i]))
322 continue;
323 if (max_order < orders[i])
324 continue;
326 page = alloc_pages(order_flags[i], orders[i]);
327 if (!page)
328 continue;
329 return page;
331 return NULL;
334 static int system_heap_allocate(struct dma_heap *heap,
335 unsigned long len,
336 unsigned long fd_flags,
337 unsigned long heap_flags)
339 struct system_heap_buffer *buffer;
340 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
341 unsigned long size_remaining = len;
342 unsigned int max_order = orders[0];
343 struct dma_buf *dmabuf;
344 struct sg_table *table;
345 struct scatterlist *sg;
346 struct list_head pages;
347 struct page *page, *tmp_page;
348 int i, ret = -ENOMEM;
350 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
351 if (!buffer)
352 return -ENOMEM;
354 INIT_LIST_HEAD(&buffer->attachments);
355 mutex_init(&buffer->lock);
356 buffer->heap = heap;
357 buffer->len = len;
359 INIT_LIST_HEAD(&pages);
360 i = 0;
361 while (size_remaining > 0) {
363 * Avoid trying to allocate memory if the process
364 * has been killed by SIGKILL
366 if (fatal_signal_pending(current))
367 goto free_buffer;
369 page = alloc_largest_available(size_remaining, max_order);
370 if (!page)
371 goto free_buffer;
373 list_add_tail(&page->lru, &pages);
374 size_remaining -= page_size(page);
375 max_order = compound_order(page);
376 i++;
379 table = &buffer->sg_table;
380 if (sg_alloc_table(table, i, GFP_KERNEL))
381 goto free_buffer;
383 sg = table->sgl;
384 list_for_each_entry_safe(page, tmp_page, &pages, lru) {
385 sg_set_page(sg, page, page_size(page), 0);
386 sg = sg_next(sg);
387 list_del(&page->lru);
390 /* create the dmabuf */
391 exp_info.ops = &system_heap_buf_ops;
392 exp_info.size = buffer->len;
393 exp_info.flags = fd_flags;
394 exp_info.priv = buffer;
395 dmabuf = dma_buf_export(&exp_info);
396 if (IS_ERR(dmabuf)) {
397 ret = PTR_ERR(dmabuf);
398 goto free_pages;
401 ret = dma_buf_fd(dmabuf, fd_flags);
402 if (ret < 0) {
403 dma_buf_put(dmabuf);
404 /* just return, as put will call release and that will free */
405 return ret;
407 return ret;
409 free_pages:
410 for_each_sgtable_sg(table, sg, i) {
411 struct page *p = sg_page(sg);
413 __free_pages(p, compound_order(p));
415 sg_free_table(table);
416 free_buffer:
417 list_for_each_entry_safe(page, tmp_page, &pages, lru)
418 __free_pages(page, compound_order(page));
419 kfree(buffer);
421 return ret;
424 static const struct dma_heap_ops system_heap_ops = {
425 .allocate = system_heap_allocate,
428 static int system_heap_create(void)
430 struct dma_heap_export_info exp_info;
432 exp_info.name = "system";
433 exp_info.ops = &system_heap_ops;
434 exp_info.priv = NULL;
436 sys_heap = dma_heap_add(&exp_info);
437 if (IS_ERR(sys_heap))
438 return PTR_ERR(sys_heap);
440 return 0;
442 module_init(system_heap_create);
443 MODULE_LICENSE("GPL v2");