Merge tag 'clk-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / gpu / drm / drm_gem_dma_helper.c
blob16988d316a6dc702310fa44c15c92dc67b82802b
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * drm gem DMA helper functions
5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
7 * Based on Samsung Exynos code
9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
12 #include <linux/dma-buf.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/export.h>
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/slab.h>
20 #include <drm/drm.h>
21 #include <drm/drm_device.h>
22 #include <drm/drm_drv.h>
23 #include <drm/drm_gem_dma_helper.h>
24 #include <drm/drm_vma_manager.h>
26 /**
27 * DOC: dma helpers
29 * The DRM GEM/DMA helpers are a means to provide buffer objects that are
30 * presented to the device as a contiguous chunk of memory. This is useful
31 * for devices that do not support scatter-gather DMA (either directly or
32 * by using an intimately attached IOMMU).
34 * For devices that access the memory bus through an (external) IOMMU then
35 * the buffer objects are allocated using a traditional page-based
36 * allocator and may be scattered through physical memory. However they
37 * are contiguous in the IOVA space so appear contiguous to devices using
38 * them.
40 * For other devices then the helpers rely on CMA to provide buffer
41 * objects that are physically contiguous in memory.
43 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
44 * named functions with an _object_ infix (e.g., drm_gem_dma_object_vmap() wraps
45 * drm_gem_dma_vmap()). These helpers perform the necessary type conversion.
48 static const struct drm_gem_object_funcs drm_gem_dma_default_funcs = {
49 .free = drm_gem_dma_object_free,
50 .print_info = drm_gem_dma_object_print_info,
51 .get_sg_table = drm_gem_dma_object_get_sg_table,
52 .vmap = drm_gem_dma_object_vmap,
53 .mmap = drm_gem_dma_object_mmap,
54 .vm_ops = &drm_gem_dma_vm_ops,
57 /**
58 * __drm_gem_dma_create - Create a GEM DMA object without allocating memory
59 * @drm: DRM device
60 * @size: size of the object to allocate
61 * @private: true if used for internal purposes
63 * This function creates and initializes a GEM DMA object of the given size,
64 * but doesn't allocate any memory to back the object.
66 * Returns:
67 * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
68 * error code on failure.
70 static struct drm_gem_dma_object *
71 __drm_gem_dma_create(struct drm_device *drm, size_t size, bool private)
73 struct drm_gem_dma_object *dma_obj;
74 struct drm_gem_object *gem_obj;
75 int ret = 0;
77 if (drm->driver->gem_create_object) {
78 gem_obj = drm->driver->gem_create_object(drm, size);
79 if (IS_ERR(gem_obj))
80 return ERR_CAST(gem_obj);
81 dma_obj = to_drm_gem_dma_obj(gem_obj);
82 } else {
83 dma_obj = kzalloc(sizeof(*dma_obj), GFP_KERNEL);
84 if (!dma_obj)
85 return ERR_PTR(-ENOMEM);
86 gem_obj = &dma_obj->base;
89 if (!gem_obj->funcs)
90 gem_obj->funcs = &drm_gem_dma_default_funcs;
92 if (private) {
93 drm_gem_private_object_init(drm, gem_obj, size);
95 /* Always use writecombine for dma-buf mappings */
96 dma_obj->map_noncoherent = false;
97 } else {
98 ret = drm_gem_object_init(drm, gem_obj, size);
100 if (ret)
101 goto error;
103 ret = drm_gem_create_mmap_offset(gem_obj);
104 if (ret) {
105 drm_gem_object_release(gem_obj);
106 goto error;
109 return dma_obj;
111 error:
112 kfree(dma_obj);
113 return ERR_PTR(ret);
117 * drm_gem_dma_create - allocate an object with the given size
118 * @drm: DRM device
119 * @size: size of the object to allocate
121 * This function creates a DMA GEM object and allocates memory as backing store.
122 * The allocated memory will occupy a contiguous chunk of bus address space.
124 * For devices that are directly connected to the memory bus then the allocated
125 * memory will be physically contiguous. For devices that access through an
126 * IOMMU, then the allocated memory is not expected to be physically contiguous
127 * because having contiguous IOVAs is sufficient to meet a devices DMA
128 * requirements.
130 * Returns:
131 * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
132 * error code on failure.
134 struct drm_gem_dma_object *drm_gem_dma_create(struct drm_device *drm,
135 size_t size)
137 struct drm_gem_dma_object *dma_obj;
138 int ret;
140 size = round_up(size, PAGE_SIZE);
142 dma_obj = __drm_gem_dma_create(drm, size, false);
143 if (IS_ERR(dma_obj))
144 return dma_obj;
146 if (dma_obj->map_noncoherent) {
147 dma_obj->vaddr = dma_alloc_noncoherent(drm->dev, size,
148 &dma_obj->dma_addr,
149 DMA_TO_DEVICE,
150 GFP_KERNEL | __GFP_NOWARN);
151 } else {
152 dma_obj->vaddr = dma_alloc_wc(drm->dev, size,
153 &dma_obj->dma_addr,
154 GFP_KERNEL | __GFP_NOWARN);
156 if (!dma_obj->vaddr) {
157 drm_dbg(drm, "failed to allocate buffer with size %zu\n",
158 size);
159 ret = -ENOMEM;
160 goto error;
163 return dma_obj;
165 error:
166 drm_gem_object_put(&dma_obj->base);
167 return ERR_PTR(ret);
169 EXPORT_SYMBOL_GPL(drm_gem_dma_create);
172 * drm_gem_dma_create_with_handle - allocate an object with the given size and
173 * return a GEM handle to it
174 * @file_priv: DRM file-private structure to register the handle for
175 * @drm: DRM device
176 * @size: size of the object to allocate
177 * @handle: return location for the GEM handle
179 * This function creates a DMA GEM object, allocating a chunk of memory as
180 * backing store. The GEM object is then added to the list of object associated
181 * with the given file and a handle to it is returned.
183 * The allocated memory will occupy a contiguous chunk of bus address space.
184 * See drm_gem_dma_create() for more details.
186 * Returns:
187 * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
188 * error code on failure.
190 static struct drm_gem_dma_object *
191 drm_gem_dma_create_with_handle(struct drm_file *file_priv,
192 struct drm_device *drm, size_t size,
193 uint32_t *handle)
195 struct drm_gem_dma_object *dma_obj;
196 struct drm_gem_object *gem_obj;
197 int ret;
199 dma_obj = drm_gem_dma_create(drm, size);
200 if (IS_ERR(dma_obj))
201 return dma_obj;
203 gem_obj = &dma_obj->base;
206 * allocate a id of idr table where the obj is registered
207 * and handle has the id what user can see.
209 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
210 /* drop reference from allocate - handle holds it now. */
211 drm_gem_object_put(gem_obj);
212 if (ret)
213 return ERR_PTR(ret);
215 return dma_obj;
219 * drm_gem_dma_free - free resources associated with a DMA GEM object
220 * @dma_obj: DMA GEM object to free
222 * This function frees the backing memory of the DMA GEM object, cleans up the
223 * GEM object state and frees the memory used to store the object itself.
224 * If the buffer is imported and the virtual address is set, it is released.
226 void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
228 struct drm_gem_object *gem_obj = &dma_obj->base;
229 struct iosys_map map = IOSYS_MAP_INIT_VADDR(dma_obj->vaddr);
231 if (gem_obj->import_attach) {
232 if (dma_obj->vaddr)
233 dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map);
234 drm_prime_gem_destroy(gem_obj, dma_obj->sgt);
235 } else if (dma_obj->vaddr) {
236 if (dma_obj->map_noncoherent)
237 dma_free_noncoherent(gem_obj->dev->dev, dma_obj->base.size,
238 dma_obj->vaddr, dma_obj->dma_addr,
239 DMA_TO_DEVICE);
240 else
241 dma_free_wc(gem_obj->dev->dev, dma_obj->base.size,
242 dma_obj->vaddr, dma_obj->dma_addr);
245 drm_gem_object_release(gem_obj);
247 kfree(dma_obj);
249 EXPORT_SYMBOL_GPL(drm_gem_dma_free);
252 * drm_gem_dma_dumb_create_internal - create a dumb buffer object
253 * @file_priv: DRM file-private structure to create the dumb buffer for
254 * @drm: DRM device
255 * @args: IOCTL data
257 * This aligns the pitch and size arguments to the minimum required. This is
258 * an internal helper that can be wrapped by a driver to account for hardware
259 * with more specific alignment requirements. It should not be used directly
260 * as their &drm_driver.dumb_create callback.
262 * Returns:
263 * 0 on success or a negative error code on failure.
265 int drm_gem_dma_dumb_create_internal(struct drm_file *file_priv,
266 struct drm_device *drm,
267 struct drm_mode_create_dumb *args)
269 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
270 struct drm_gem_dma_object *dma_obj;
272 if (args->pitch < min_pitch)
273 args->pitch = min_pitch;
275 if (args->size < args->pitch * args->height)
276 args->size = args->pitch * args->height;
278 dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
279 &args->handle);
280 return PTR_ERR_OR_ZERO(dma_obj);
282 EXPORT_SYMBOL_GPL(drm_gem_dma_dumb_create_internal);
285 * drm_gem_dma_dumb_create - create a dumb buffer object
286 * @file_priv: DRM file-private structure to create the dumb buffer for
287 * @drm: DRM device
288 * @args: IOCTL data
290 * This function computes the pitch of the dumb buffer and rounds it up to an
291 * integer number of bytes per pixel. Drivers for hardware that doesn't have
292 * any additional restrictions on the pitch can directly use this function as
293 * their &drm_driver.dumb_create callback.
295 * For hardware with additional restrictions, drivers can adjust the fields
296 * set up by userspace and pass the IOCTL data along to the
297 * drm_gem_dma_dumb_create_internal() function.
299 * Returns:
300 * 0 on success or a negative error code on failure.
302 int drm_gem_dma_dumb_create(struct drm_file *file_priv,
303 struct drm_device *drm,
304 struct drm_mode_create_dumb *args)
306 struct drm_gem_dma_object *dma_obj;
308 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
309 args->size = args->pitch * args->height;
311 dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
312 &args->handle);
313 return PTR_ERR_OR_ZERO(dma_obj);
315 EXPORT_SYMBOL_GPL(drm_gem_dma_dumb_create);
317 const struct vm_operations_struct drm_gem_dma_vm_ops = {
318 .open = drm_gem_vm_open,
319 .close = drm_gem_vm_close,
321 EXPORT_SYMBOL_GPL(drm_gem_dma_vm_ops);
323 #ifndef CONFIG_MMU
325 * drm_gem_dma_get_unmapped_area - propose address for mapping in noMMU cases
326 * @filp: file object
327 * @addr: memory address
328 * @len: buffer size
329 * @pgoff: page offset
330 * @flags: memory flags
332 * This function is used in noMMU platforms to propose address mapping
333 * for a given buffer.
334 * It's intended to be used as a direct handler for the struct
335 * &file_operations.get_unmapped_area operation.
337 * Returns:
338 * mapping address on success or a negative error code on failure.
340 unsigned long drm_gem_dma_get_unmapped_area(struct file *filp,
341 unsigned long addr,
342 unsigned long len,
343 unsigned long pgoff,
344 unsigned long flags)
346 struct drm_gem_dma_object *dma_obj;
347 struct drm_gem_object *obj = NULL;
348 struct drm_file *priv = filp->private_data;
349 struct drm_device *dev = priv->minor->dev;
350 struct drm_vma_offset_node *node;
352 if (drm_dev_is_unplugged(dev))
353 return -ENODEV;
355 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
356 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
357 pgoff,
358 len >> PAGE_SHIFT);
359 if (likely(node)) {
360 obj = container_of(node, struct drm_gem_object, vma_node);
362 * When the object is being freed, after it hits 0-refcnt it
363 * proceeds to tear down the object. In the process it will
364 * attempt to remove the VMA offset and so acquire this
365 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
366 * that matches our range, we know it is in the process of being
367 * destroyed and will be freed as soon as we release the lock -
368 * so we have to check for the 0-refcnted object and treat it as
369 * invalid.
371 if (!kref_get_unless_zero(&obj->refcount))
372 obj = NULL;
375 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
377 if (!obj)
378 return -EINVAL;
380 if (!drm_vma_node_is_allowed(node, priv)) {
381 drm_gem_object_put(obj);
382 return -EACCES;
385 dma_obj = to_drm_gem_dma_obj(obj);
387 drm_gem_object_put(obj);
389 return dma_obj->vaddr ? (unsigned long)dma_obj->vaddr : -EINVAL;
391 EXPORT_SYMBOL_GPL(drm_gem_dma_get_unmapped_area);
392 #endif
395 * drm_gem_dma_print_info() - Print &drm_gem_dma_object info for debugfs
396 * @dma_obj: DMA GEM object
397 * @p: DRM printer
398 * @indent: Tab indentation level
400 * This function prints dma_addr and vaddr for use in e.g. debugfs output.
402 void drm_gem_dma_print_info(const struct drm_gem_dma_object *dma_obj,
403 struct drm_printer *p, unsigned int indent)
405 drm_printf_indent(p, indent, "dma_addr=%pad\n", &dma_obj->dma_addr);
406 drm_printf_indent(p, indent, "vaddr=%p\n", dma_obj->vaddr);
408 EXPORT_SYMBOL(drm_gem_dma_print_info);
411 * drm_gem_dma_get_sg_table - provide a scatter/gather table of pinned
412 * pages for a DMA GEM object
413 * @dma_obj: DMA GEM object
415 * This function exports a scatter/gather table by calling the standard
416 * DMA mapping API.
418 * Returns:
419 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
421 struct sg_table *drm_gem_dma_get_sg_table(struct drm_gem_dma_object *dma_obj)
423 struct drm_gem_object *obj = &dma_obj->base;
424 struct sg_table *sgt;
425 int ret;
427 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
428 if (!sgt)
429 return ERR_PTR(-ENOMEM);
431 ret = dma_get_sgtable(obj->dev->dev, sgt, dma_obj->vaddr,
432 dma_obj->dma_addr, obj->size);
433 if (ret < 0)
434 goto out;
436 return sgt;
438 out:
439 kfree(sgt);
440 return ERR_PTR(ret);
442 EXPORT_SYMBOL_GPL(drm_gem_dma_get_sg_table);
445 * drm_gem_dma_prime_import_sg_table - produce a DMA GEM object from another
446 * driver's scatter/gather table of pinned pages
447 * @dev: device to import into
448 * @attach: DMA-BUF attachment
449 * @sgt: scatter/gather table of pinned pages
451 * This function imports a scatter/gather table exported via DMA-BUF by
452 * another driver. Imported buffers must be physically contiguous in memory
453 * (i.e. the scatter/gather table must contain a single entry). Drivers that
454 * use the DMA helpers should set this as their
455 * &drm_driver.gem_prime_import_sg_table callback.
457 * Returns:
458 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
459 * error code on failure.
461 struct drm_gem_object *
462 drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
463 struct dma_buf_attachment *attach,
464 struct sg_table *sgt)
466 struct drm_gem_dma_object *dma_obj;
468 /* check if the entries in the sg_table are contiguous */
469 if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
470 return ERR_PTR(-EINVAL);
472 /* Create a DMA GEM buffer. */
473 dma_obj = __drm_gem_dma_create(dev, attach->dmabuf->size, true);
474 if (IS_ERR(dma_obj))
475 return ERR_CAST(dma_obj);
477 dma_obj->dma_addr = sg_dma_address(sgt->sgl);
478 dma_obj->sgt = sgt;
480 drm_dbg_prime(dev, "dma_addr = %pad, size = %zu\n", &dma_obj->dma_addr,
481 attach->dmabuf->size);
483 return &dma_obj->base;
485 EXPORT_SYMBOL_GPL(drm_gem_dma_prime_import_sg_table);
488 * drm_gem_dma_vmap - map a DMA GEM object into the kernel's virtual
489 * address space
490 * @dma_obj: DMA GEM object
491 * @map: Returns the kernel virtual address of the DMA GEM object's backing
492 * store.
494 * This function maps a buffer into the kernel's virtual address space.
495 * Since the DMA buffers are already mapped into the kernel virtual address
496 * space this simply returns the cached virtual address.
498 * Returns:
499 * 0 on success, or a negative error code otherwise.
501 int drm_gem_dma_vmap(struct drm_gem_dma_object *dma_obj,
502 struct iosys_map *map)
504 iosys_map_set_vaddr(map, dma_obj->vaddr);
506 return 0;
508 EXPORT_SYMBOL_GPL(drm_gem_dma_vmap);
511 * drm_gem_dma_mmap - memory-map an exported DMA GEM object
512 * @dma_obj: DMA GEM object
513 * @vma: VMA for the area to be mapped
515 * This function maps a buffer into a userspace process's address space.
516 * In addition to the usual GEM VMA setup it immediately faults in the entire
517 * object instead of using on-demand faulting.
519 * Returns:
520 * 0 on success or a negative error code on failure.
522 int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *vma)
524 struct drm_gem_object *obj = &dma_obj->base;
525 int ret;
528 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
529 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
530 * the whole buffer.
532 vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
533 vm_flags_mod(vma, VM_DONTEXPAND, VM_PFNMAP);
535 if (dma_obj->map_noncoherent) {
536 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
538 ret = dma_mmap_pages(dma_obj->base.dev->dev,
539 vma, vma->vm_end - vma->vm_start,
540 virt_to_page(dma_obj->vaddr));
541 } else {
542 ret = dma_mmap_wc(dma_obj->base.dev->dev, vma, dma_obj->vaddr,
543 dma_obj->dma_addr,
544 vma->vm_end - vma->vm_start);
546 if (ret)
547 drm_gem_vm_close(vma);
549 return ret;
551 EXPORT_SYMBOL_GPL(drm_gem_dma_mmap);
554 * drm_gem_dma_prime_import_sg_table_vmap - PRIME import another driver's
555 * scatter/gather table and get the virtual address of the buffer
556 * @dev: DRM device
557 * @attach: DMA-BUF attachment
558 * @sgt: Scatter/gather table of pinned pages
560 * This function imports a scatter/gather table using
561 * drm_gem_dma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
562 * virtual address. This ensures that a DMA GEM object always has its virtual
563 * address set. This address is released when the object is freed.
565 * This function can be used as the &drm_driver.gem_prime_import_sg_table
566 * callback. The &DRM_GEM_DMA_DRIVER_OPS_VMAP macro provides a shortcut to set
567 * the necessary DRM driver operations.
569 * Returns:
570 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
571 * error code on failure.
573 struct drm_gem_object *
574 drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
575 struct dma_buf_attachment *attach,
576 struct sg_table *sgt)
578 struct drm_gem_dma_object *dma_obj;
579 struct drm_gem_object *obj;
580 struct iosys_map map;
581 int ret;
583 ret = dma_buf_vmap_unlocked(attach->dmabuf, &map);
584 if (ret) {
585 DRM_ERROR("Failed to vmap PRIME buffer\n");
586 return ERR_PTR(ret);
589 obj = drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
590 if (IS_ERR(obj)) {
591 dma_buf_vunmap_unlocked(attach->dmabuf, &map);
592 return obj;
595 dma_obj = to_drm_gem_dma_obj(obj);
596 dma_obj->vaddr = map.vaddr;
598 return obj;
600 EXPORT_SYMBOL(drm_gem_dma_prime_import_sg_table_vmap);
602 MODULE_DESCRIPTION("DRM DMA memory-management helpers");
603 MODULE_IMPORT_NS("DMA_BUF");
604 MODULE_LICENSE("GPL");