drm/msm/hdmi: Enable HPD after HDMI IRQ is set up
[linux/fpc-iii.git] / drivers / gpu / drm / drm_gem_cma_helper.c
blob80a5115c384695c3fa7a0e7f99842b1a7c370803
1 /*
2 * drm gem CMA (contiguous memory allocator) helper functions
4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 * Based on Samsung Exynos code
8 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #include <linux/mm.h>
21 #include <linux/slab.h>
22 #include <linux/mutex.h>
23 #include <linux/export.h>
24 #include <linux/dma-buf.h>
25 #include <linux/dma-mapping.h>
27 #include <drm/drmP.h>
28 #include <drm/drm.h>
29 #include <drm/drm_gem_cma_helper.h>
30 #include <drm/drm_vma_manager.h>
32 /**
33 * DOC: cma helpers
35 * The Contiguous Memory Allocator reserves a pool of memory at early boot
36 * that is used to service requests for large blocks of contiguous memory.
38 * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
39 * objects that are physically contiguous in memory. This is useful for
40 * display drivers that are unable to map scattered buffers via an IOMMU.
43 /**
44 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
45 * @drm: DRM device
46 * @size: size of the object to allocate
48 * This function creates and initializes a GEM CMA object of the given size,
49 * but doesn't allocate any memory to back the object.
51 * Returns:
52 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
53 * error code on failure.
55 static struct drm_gem_cma_object *
56 __drm_gem_cma_create(struct drm_device *drm, size_t size)
58 struct drm_gem_cma_object *cma_obj;
59 struct drm_gem_object *gem_obj;
60 int ret;
62 if (drm->driver->gem_create_object)
63 gem_obj = drm->driver->gem_create_object(drm, size);
64 else
65 gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
66 if (!gem_obj)
67 return ERR_PTR(-ENOMEM);
68 cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
70 ret = drm_gem_object_init(drm, gem_obj, size);
71 if (ret)
72 goto error;
74 ret = drm_gem_create_mmap_offset(gem_obj);
75 if (ret) {
76 drm_gem_object_release(gem_obj);
77 goto error;
80 return cma_obj;
82 error:
83 kfree(cma_obj);
84 return ERR_PTR(ret);
87 /**
88 * drm_gem_cma_create - allocate an object with the given size
89 * @drm: DRM device
90 * @size: size of the object to allocate
92 * This function creates a CMA GEM object and allocates a contiguous chunk of
93 * memory as backing store. The backing memory has the writecombine attribute
94 * set.
96 * Returns:
97 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
98 * error code on failure.
100 struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
101 size_t size)
103 struct drm_gem_cma_object *cma_obj;
104 int ret;
106 size = round_up(size, PAGE_SIZE);
108 cma_obj = __drm_gem_cma_create(drm, size);
109 if (IS_ERR(cma_obj))
110 return cma_obj;
112 cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
113 GFP_KERNEL | __GFP_NOWARN);
114 if (!cma_obj->vaddr) {
115 dev_dbg(drm->dev, "failed to allocate buffer with size %zu\n",
116 size);
117 ret = -ENOMEM;
118 goto error;
121 return cma_obj;
123 error:
124 drm_gem_object_put_unlocked(&cma_obj->base);
125 return ERR_PTR(ret);
127 EXPORT_SYMBOL_GPL(drm_gem_cma_create);
130 * drm_gem_cma_create_with_handle - allocate an object with the given size and
131 * return a GEM handle to it
132 * @file_priv: DRM file-private structure to register the handle for
133 * @drm: DRM device
134 * @size: size of the object to allocate
135 * @handle: return location for the GEM handle
137 * This function creates a CMA GEM object, allocating a physically contiguous
138 * chunk of memory as backing store. The GEM object is then added to the list
139 * of object associated with the given file and a handle to it is returned.
141 * Returns:
142 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
143 * error code on failure.
145 static struct drm_gem_cma_object *
146 drm_gem_cma_create_with_handle(struct drm_file *file_priv,
147 struct drm_device *drm, size_t size,
148 uint32_t *handle)
150 struct drm_gem_cma_object *cma_obj;
151 struct drm_gem_object *gem_obj;
152 int ret;
154 cma_obj = drm_gem_cma_create(drm, size);
155 if (IS_ERR(cma_obj))
156 return cma_obj;
158 gem_obj = &cma_obj->base;
161 * allocate a id of idr table where the obj is registered
162 * and handle has the id what user can see.
164 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
165 /* drop reference from allocate - handle holds it now. */
166 drm_gem_object_put_unlocked(gem_obj);
167 if (ret)
168 return ERR_PTR(ret);
170 return cma_obj;
174 * drm_gem_cma_free_object - free resources associated with a CMA GEM object
175 * @gem_obj: GEM object to free
177 * This function frees the backing memory of the CMA GEM object, cleans up the
178 * GEM object state and frees the memory used to store the object itself.
179 * Drivers using the CMA helpers should set this as their
180 * &drm_driver.gem_free_object_unlocked callback.
182 void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
184 struct drm_gem_cma_object *cma_obj;
186 cma_obj = to_drm_gem_cma_obj(gem_obj);
188 if (cma_obj->vaddr) {
189 dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
190 cma_obj->vaddr, cma_obj->paddr);
191 } else if (gem_obj->import_attach) {
192 drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
195 drm_gem_object_release(gem_obj);
197 kfree(cma_obj);
199 EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
202 * drm_gem_cma_dumb_create_internal - create a dumb buffer object
203 * @file_priv: DRM file-private structure to create the dumb buffer for
204 * @drm: DRM device
205 * @args: IOCTL data
207 * This aligns the pitch and size arguments to the minimum required. This is
208 * an internal helper that can be wrapped by a driver to account for hardware
209 * with more specific alignment requirements. It should not be used directly
210 * as their &drm_driver.dumb_create callback.
212 * Returns:
213 * 0 on success or a negative error code on failure.
215 int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
216 struct drm_device *drm,
217 struct drm_mode_create_dumb *args)
219 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
220 struct drm_gem_cma_object *cma_obj;
222 if (args->pitch < min_pitch)
223 args->pitch = min_pitch;
225 if (args->size < args->pitch * args->height)
226 args->size = args->pitch * args->height;
228 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
229 &args->handle);
230 return PTR_ERR_OR_ZERO(cma_obj);
232 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
235 * drm_gem_cma_dumb_create - create a dumb buffer object
236 * @file_priv: DRM file-private structure to create the dumb buffer for
237 * @drm: DRM device
238 * @args: IOCTL data
240 * This function computes the pitch of the dumb buffer and rounds it up to an
241 * integer number of bytes per pixel. Drivers for hardware that doesn't have
242 * any additional restrictions on the pitch can directly use this function as
243 * their &drm_driver.dumb_create callback.
245 * For hardware with additional restrictions, drivers can adjust the fields
246 * set up by userspace and pass the IOCTL data along to the
247 * drm_gem_cma_dumb_create_internal() function.
249 * Returns:
250 * 0 on success or a negative error code on failure.
252 int drm_gem_cma_dumb_create(struct drm_file *file_priv,
253 struct drm_device *drm,
254 struct drm_mode_create_dumb *args)
256 struct drm_gem_cma_object *cma_obj;
258 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
259 args->size = args->pitch * args->height;
261 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
262 &args->handle);
263 return PTR_ERR_OR_ZERO(cma_obj);
265 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
267 const struct vm_operations_struct drm_gem_cma_vm_ops = {
268 .open = drm_gem_vm_open,
269 .close = drm_gem_vm_close,
271 EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
273 static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
274 struct vm_area_struct *vma)
276 int ret;
279 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
280 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
281 * the whole buffer.
283 vma->vm_flags &= ~VM_PFNMAP;
284 vma->vm_pgoff = 0;
286 ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
287 cma_obj->paddr, vma->vm_end - vma->vm_start);
288 if (ret)
289 drm_gem_vm_close(vma);
291 return ret;
295 * drm_gem_cma_mmap - memory-map a CMA GEM object
296 * @filp: file object
297 * @vma: VMA for the area to be mapped
299 * This function implements an augmented version of the GEM DRM file mmap
300 * operation for CMA objects: In addition to the usual GEM VMA setup it
301 * immediately faults in the entire object instead of using on-demaind
302 * faulting. Drivers which employ the CMA helpers should use this function
303 * as their ->mmap() handler in the DRM device file's file_operations
304 * structure.
306 * Instead of directly referencing this function, drivers should use the
307 * DEFINE_DRM_GEM_CMA_FOPS().macro.
309 * Returns:
310 * 0 on success or a negative error code on failure.
312 int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
314 struct drm_gem_cma_object *cma_obj;
315 struct drm_gem_object *gem_obj;
316 int ret;
318 ret = drm_gem_mmap(filp, vma);
319 if (ret)
320 return ret;
322 gem_obj = vma->vm_private_data;
323 cma_obj = to_drm_gem_cma_obj(gem_obj);
325 return drm_gem_cma_mmap_obj(cma_obj, vma);
327 EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
329 #ifndef CONFIG_MMU
331 * drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases
332 * @filp: file object
333 * @addr: memory address
334 * @len: buffer size
335 * @pgoff: page offset
336 * @flags: memory flags
338 * This function is used in noMMU platforms to propose address mapping
339 * for a given buffer.
340 * It's intended to be used as a direct handler for the struct
341 * &file_operations.get_unmapped_area operation.
343 * Returns:
344 * mapping address on success or a negative error code on failure.
346 unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
347 unsigned long addr,
348 unsigned long len,
349 unsigned long pgoff,
350 unsigned long flags)
352 struct drm_gem_cma_object *cma_obj;
353 struct drm_gem_object *obj = NULL;
354 struct drm_file *priv = filp->private_data;
355 struct drm_device *dev = priv->minor->dev;
356 struct drm_vma_offset_node *node;
358 if (drm_dev_is_unplugged(dev))
359 return -ENODEV;
361 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
362 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
363 pgoff,
364 len >> PAGE_SHIFT);
365 if (likely(node)) {
366 obj = container_of(node, struct drm_gem_object, vma_node);
368 * When the object is being freed, after it hits 0-refcnt it
369 * proceeds to tear down the object. In the process it will
370 * attempt to remove the VMA offset and so acquire this
371 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
372 * that matches our range, we know it is in the process of being
373 * destroyed and will be freed as soon as we release the lock -
374 * so we have to check for the 0-refcnted object and treat it as
375 * invalid.
377 if (!kref_get_unless_zero(&obj->refcount))
378 obj = NULL;
381 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
383 if (!obj)
384 return -EINVAL;
386 if (!drm_vma_node_is_allowed(node, priv)) {
387 drm_gem_object_put_unlocked(obj);
388 return -EACCES;
391 cma_obj = to_drm_gem_cma_obj(obj);
393 drm_gem_object_put_unlocked(obj);
395 return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
397 EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
398 #endif
401 * drm_gem_cma_print_info() - Print &drm_gem_cma_object info for debugfs
402 * @p: DRM printer
403 * @indent: Tab indentation level
404 * @obj: GEM object
406 * This function can be used as the &drm_driver->gem_print_info callback.
407 * It prints paddr and vaddr for use in e.g. debugfs output.
409 void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
410 const struct drm_gem_object *obj)
412 const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
414 drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr);
415 drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr);
417 EXPORT_SYMBOL(drm_gem_cma_print_info);
420 * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
421 * pages for a CMA GEM object
422 * @obj: GEM object
424 * This function exports a scatter/gather table suitable for PRIME usage by
425 * calling the standard DMA mapping API. Drivers using the CMA helpers should
426 * set this as their &drm_driver.gem_prime_get_sg_table callback.
428 * Returns:
429 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
431 struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
433 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
434 struct sg_table *sgt;
435 int ret;
437 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
438 if (!sgt)
439 return NULL;
441 ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
442 cma_obj->paddr, obj->size);
443 if (ret < 0)
444 goto out;
446 return sgt;
448 out:
449 kfree(sgt);
450 return NULL;
452 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
455 * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
456 * driver's scatter/gather table of pinned pages
457 * @dev: device to import into
458 * @attach: DMA-BUF attachment
459 * @sgt: scatter/gather table of pinned pages
461 * This function imports a scatter/gather table exported via DMA-BUF by
462 * another driver. Imported buffers must be physically contiguous in memory
463 * (i.e. the scatter/gather table must contain a single entry). Drivers that
464 * use the CMA helpers should set this as their
465 * &drm_driver.gem_prime_import_sg_table callback.
467 * Returns:
468 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
469 * error code on failure.
471 struct drm_gem_object *
472 drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
473 struct dma_buf_attachment *attach,
474 struct sg_table *sgt)
476 struct drm_gem_cma_object *cma_obj;
478 if (sgt->nents != 1) {
479 /* check if the entries in the sg_table are contiguous */
480 dma_addr_t next_addr = sg_dma_address(sgt->sgl);
481 struct scatterlist *s;
482 unsigned int i;
484 for_each_sg(sgt->sgl, s, sgt->nents, i) {
486 * sg_dma_address(s) is only valid for entries
487 * that have sg_dma_len(s) != 0
489 if (!sg_dma_len(s))
490 continue;
492 if (sg_dma_address(s) != next_addr)
493 return ERR_PTR(-EINVAL);
495 next_addr = sg_dma_address(s) + sg_dma_len(s);
499 /* Create a CMA GEM buffer. */
500 cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
501 if (IS_ERR(cma_obj))
502 return ERR_CAST(cma_obj);
504 cma_obj->paddr = sg_dma_address(sgt->sgl);
505 cma_obj->sgt = sgt;
507 DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
509 return &cma_obj->base;
511 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
514 * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
515 * @obj: GEM object
516 * @vma: VMA for the area to be mapped
518 * This function maps a buffer imported via DRM PRIME into a userspace
519 * process's address space. Drivers that use the CMA helpers should set this
520 * as their &drm_driver.gem_prime_mmap callback.
522 * Returns:
523 * 0 on success or a negative error code on failure.
525 int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
526 struct vm_area_struct *vma)
528 struct drm_gem_cma_object *cma_obj;
529 int ret;
531 ret = drm_gem_mmap_obj(obj, obj->size, vma);
532 if (ret < 0)
533 return ret;
535 cma_obj = to_drm_gem_cma_obj(obj);
536 return drm_gem_cma_mmap_obj(cma_obj, vma);
538 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
541 * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
542 * address space
543 * @obj: GEM object
545 * This function maps a buffer exported via DRM PRIME into the kernel's
546 * virtual address space. Since the CMA buffers are already mapped into the
547 * kernel virtual address space this simply returns the cached virtual
548 * address. Drivers using the CMA helpers should set this as their DRM
549 * driver's &drm_driver.gem_prime_vmap callback.
551 * Returns:
552 * The kernel virtual address of the CMA GEM object's backing store.
554 void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
556 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
558 return cma_obj->vaddr;
560 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
563 * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual
564 * address space
565 * @obj: GEM object
566 * @vaddr: kernel virtual address where the CMA GEM object was mapped
568 * This function removes a buffer exported via DRM PRIME from the kernel's
569 * virtual address space. This is a no-op because CMA buffers cannot be
570 * unmapped from kernel space. Drivers using the CMA helpers should set this
571 * as their &drm_driver.gem_prime_vunmap callback.
573 void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
575 /* Nothing to do */
577 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);