1 // SPDX-License-Identifier: GPL-2.0
3 * Framework for userspace DMA-BUF allocations
5 * Copyright (C) 2011 Google, Inc.
6 * Copyright (C) 2019 Linaro Ltd.
9 #include <linux/cdev.h>
10 #include <linux/device.h>
11 #include <linux/dma-buf.h>
12 #include <linux/dma-heap.h>
13 #include <linux/err.h>
14 #include <linux/list.h>
15 #include <linux/nospec.h>
16 #include <linux/syscalls.h>
17 #include <linux/uaccess.h>
18 #include <linux/xarray.h>
19 #include <uapi/linux/dma-heap.h>
21 #define DEVNAME "dma_heap"
23 #define NUM_HEAP_MINORS 128
26 * struct dma_heap - represents a dmabuf heap in the system
27 * @name: used for debugging/device-node name
28 * @ops: ops struct for this heap
29 * @priv: private data for this heap
30 * @heap_devt: heap device node
31 * @list: list head connecting to list of heaps
32 * @heap_cdev: heap char device
34 * Represents a heap of memory from which buffers can be made.
38 const struct dma_heap_ops
*ops
;
41 struct list_head list
;
42 struct cdev heap_cdev
;
45 static LIST_HEAD(heap_list
);
46 static DEFINE_MUTEX(heap_list_lock
);
47 static dev_t dma_heap_devt
;
48 static struct class *dma_heap_class
;
49 static DEFINE_XARRAY_ALLOC(dma_heap_minors
);
51 static int dma_heap_buffer_alloc(struct dma_heap
*heap
, size_t len
,
55 struct dma_buf
*dmabuf
;
59 * Allocations from all heaps have to begin
60 * and end on page boundaries.
62 len
= PAGE_ALIGN(len
);
66 dmabuf
= heap
->ops
->allocate(heap
, len
, fd_flags
, heap_flags
);
68 return PTR_ERR(dmabuf
);
70 fd
= dma_buf_fd(dmabuf
, fd_flags
);
73 /* just return, as put will call release and that will free */
78 static int dma_heap_open(struct inode
*inode
, struct file
*file
)
80 struct dma_heap
*heap
;
82 heap
= xa_load(&dma_heap_minors
, iminor(inode
));
84 pr_err("dma_heap: minor %d unknown.\n", iminor(inode
));
88 /* instance data as context */
89 file
->private_data
= heap
;
90 nonseekable_open(inode
, file
);
95 static long dma_heap_ioctl_allocate(struct file
*file
, void *data
)
97 struct dma_heap_allocation_data
*heap_allocation
= data
;
98 struct dma_heap
*heap
= file
->private_data
;
101 if (heap_allocation
->fd
)
104 if (heap_allocation
->fd_flags
& ~DMA_HEAP_VALID_FD_FLAGS
)
107 if (heap_allocation
->heap_flags
& ~DMA_HEAP_VALID_HEAP_FLAGS
)
110 fd
= dma_heap_buffer_alloc(heap
, heap_allocation
->len
,
111 heap_allocation
->fd_flags
,
112 heap_allocation
->heap_flags
);
116 heap_allocation
->fd
= fd
;
121 static unsigned int dma_heap_ioctl_cmds
[] = {
122 DMA_HEAP_IOCTL_ALLOC
,
125 static long dma_heap_ioctl(struct file
*file
, unsigned int ucmd
,
128 char stack_kdata
[128];
129 char *kdata
= stack_kdata
;
131 unsigned int in_size
, out_size
, drv_size
, ksize
;
132 int nr
= _IOC_NR(ucmd
);
135 if (nr
>= ARRAY_SIZE(dma_heap_ioctl_cmds
))
138 nr
= array_index_nospec(nr
, ARRAY_SIZE(dma_heap_ioctl_cmds
));
139 /* Get the kernel ioctl cmd that matches */
140 kcmd
= dma_heap_ioctl_cmds
[nr
];
142 /* Figure out the delta between user cmd size and kernel cmd size */
143 drv_size
= _IOC_SIZE(kcmd
);
144 out_size
= _IOC_SIZE(ucmd
);
146 if ((ucmd
& kcmd
& IOC_IN
) == 0)
148 if ((ucmd
& kcmd
& IOC_OUT
) == 0)
150 ksize
= max(max(in_size
, out_size
), drv_size
);
152 /* If necessary, allocate buffer for ioctl argument */
153 if (ksize
> sizeof(stack_kdata
)) {
154 kdata
= kmalloc(ksize
, GFP_KERNEL
);
159 if (copy_from_user(kdata
, (void __user
*)arg
, in_size
) != 0) {
164 /* zero out any difference between the kernel/user structure size */
166 memset(kdata
+ in_size
, 0, ksize
- in_size
);
169 case DMA_HEAP_IOCTL_ALLOC
:
170 ret
= dma_heap_ioctl_allocate(file
, kdata
);
177 if (copy_to_user((void __user
*)arg
, kdata
, out_size
) != 0)
180 if (kdata
!= stack_kdata
)
185 static const struct file_operations dma_heap_fops
= {
186 .owner
= THIS_MODULE
,
187 .open
= dma_heap_open
,
188 .unlocked_ioctl
= dma_heap_ioctl
,
190 .compat_ioctl
= dma_heap_ioctl
,
195 * dma_heap_get_drvdata - get per-heap driver data
196 * @heap: DMA-Heap to retrieve private data for
199 * The per-heap data for the heap.
201 void *dma_heap_get_drvdata(struct dma_heap
*heap
)
207 * dma_heap_get_name - get heap name
208 * @heap: DMA-Heap to retrieve the name of
211 * The char* for the heap name.
213 const char *dma_heap_get_name(struct dma_heap
*heap
)
219 * dma_heap_add - adds a heap to dmabuf heaps
220 * @exp_info: information needed to register this heap
222 struct dma_heap
*dma_heap_add(const struct dma_heap_export_info
*exp_info
)
224 struct dma_heap
*heap
, *h
, *err_ret
;
225 struct device
*dev_ret
;
229 if (!exp_info
->name
|| !strcmp(exp_info
->name
, "")) {
230 pr_err("dma_heap: Cannot add heap without a name\n");
231 return ERR_PTR(-EINVAL
);
234 if (!exp_info
->ops
|| !exp_info
->ops
->allocate
) {
235 pr_err("dma_heap: Cannot add heap with invalid ops struct\n");
236 return ERR_PTR(-EINVAL
);
239 heap
= kzalloc(sizeof(*heap
), GFP_KERNEL
);
241 return ERR_PTR(-ENOMEM
);
243 heap
->name
= exp_info
->name
;
244 heap
->ops
= exp_info
->ops
;
245 heap
->priv
= exp_info
->priv
;
247 /* Find unused minor number */
248 ret
= xa_alloc(&dma_heap_minors
, &minor
, heap
,
249 XA_LIMIT(0, NUM_HEAP_MINORS
- 1), GFP_KERNEL
);
251 pr_err("dma_heap: Unable to get minor number for heap\n");
252 err_ret
= ERR_PTR(ret
);
257 heap
->heap_devt
= MKDEV(MAJOR(dma_heap_devt
), minor
);
259 cdev_init(&heap
->heap_cdev
, &dma_heap_fops
);
260 ret
= cdev_add(&heap
->heap_cdev
, heap
->heap_devt
, 1);
262 pr_err("dma_heap: Unable to add char device\n");
263 err_ret
= ERR_PTR(ret
);
267 dev_ret
= device_create(dma_heap_class
,
272 if (IS_ERR(dev_ret
)) {
273 pr_err("dma_heap: Unable to create device\n");
274 err_ret
= ERR_CAST(dev_ret
);
278 mutex_lock(&heap_list_lock
);
279 /* check the name is unique */
280 list_for_each_entry(h
, &heap_list
, list
) {
281 if (!strcmp(h
->name
, exp_info
->name
)) {
282 mutex_unlock(&heap_list_lock
);
283 pr_err("dma_heap: Already registered heap named %s\n",
285 err_ret
= ERR_PTR(-EINVAL
);
290 /* Add heap to the list */
291 list_add(&heap
->list
, &heap_list
);
292 mutex_unlock(&heap_list_lock
);
297 device_destroy(dma_heap_class
, heap
->heap_devt
);
299 cdev_del(&heap
->heap_cdev
);
301 xa_erase(&dma_heap_minors
, minor
);
307 static char *dma_heap_devnode(const struct device
*dev
, umode_t
*mode
)
309 return kasprintf(GFP_KERNEL
, "dma_heap/%s", dev_name(dev
));
312 static int dma_heap_init(void)
316 ret
= alloc_chrdev_region(&dma_heap_devt
, 0, NUM_HEAP_MINORS
, DEVNAME
);
320 dma_heap_class
= class_create(DEVNAME
);
321 if (IS_ERR(dma_heap_class
)) {
322 unregister_chrdev_region(dma_heap_devt
, NUM_HEAP_MINORS
);
323 return PTR_ERR(dma_heap_class
);
325 dma_heap_class
->devnode
= dma_heap_devnode
;
329 subsys_initcall(dma_heap_init
);