hyperv: Remove recv_pkt_list and lock
[linux/fpc-iii.git] / drivers / gpu / drm / drm_gem_cma_helper.c
blob05c97c5350a1b3fde396ed7a0a17d0af7d85e755
1 /*
2 * drm gem CMA (contiguous memory allocator) helper functions
4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 * Based on Samsung Exynos code
8 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #include <linux/mm.h>
21 #include <linux/slab.h>
22 #include <linux/mutex.h>
23 #include <linux/export.h>
24 #include <linux/dma-buf.h>
25 #include <linux/dma-mapping.h>
27 #include <drm/drmP.h>
28 #include <drm/drm.h>
29 #include <drm/drm_gem_cma_helper.h>
30 #include <drm/drm_vma_manager.h>
33 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
34 * @drm: The drm device
35 * @size: The GEM object size
37 * This function creates and initializes a GEM CMA object of the given size, but
38 * doesn't allocate any memory to back the object.
40 * Return a struct drm_gem_cma_object* on success or ERR_PTR values on failure.
42 static struct drm_gem_cma_object *
43 __drm_gem_cma_create(struct drm_device *drm, unsigned int size)
45 struct drm_gem_cma_object *cma_obj;
46 struct drm_gem_object *gem_obj;
47 int ret;
49 cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
50 if (!cma_obj)
51 return ERR_PTR(-ENOMEM);
53 gem_obj = &cma_obj->base;
55 ret = drm_gem_object_init(drm, gem_obj, size);
56 if (ret)
57 goto error;
59 ret = drm_gem_create_mmap_offset(gem_obj);
60 if (ret) {
61 drm_gem_object_release(gem_obj);
62 goto error;
65 return cma_obj;
67 error:
68 kfree(cma_obj);
69 return ERR_PTR(ret);
73 * drm_gem_cma_create - allocate an object with the given size
75 * returns a struct drm_gem_cma_object* on success or ERR_PTR values
76 * on failure.
78 struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
79 unsigned int size)
81 struct drm_gem_cma_object *cma_obj;
82 int ret;
84 size = round_up(size, PAGE_SIZE);
86 cma_obj = __drm_gem_cma_create(drm, size);
87 if (IS_ERR(cma_obj))
88 return cma_obj;
90 cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
91 &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
92 if (!cma_obj->vaddr) {
93 dev_err(drm->dev, "failed to allocate buffer with size %d\n",
94 size);
95 ret = -ENOMEM;
96 goto error;
99 return cma_obj;
101 error:
102 drm_gem_cma_free_object(&cma_obj->base);
103 return ERR_PTR(ret);
105 EXPORT_SYMBOL_GPL(drm_gem_cma_create);
108 * drm_gem_cma_create_with_handle - allocate an object with the given
109 * size and create a gem handle on it
111 * returns a struct drm_gem_cma_object* on success or ERR_PTR values
112 * on failure.
114 static struct drm_gem_cma_object *drm_gem_cma_create_with_handle(
115 struct drm_file *file_priv,
116 struct drm_device *drm, unsigned int size,
117 unsigned int *handle)
119 struct drm_gem_cma_object *cma_obj;
120 struct drm_gem_object *gem_obj;
121 int ret;
123 cma_obj = drm_gem_cma_create(drm, size);
124 if (IS_ERR(cma_obj))
125 return cma_obj;
127 gem_obj = &cma_obj->base;
130 * allocate a id of idr table where the obj is registered
131 * and handle has the id what user can see.
133 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
134 if (ret)
135 goto err_handle_create;
137 /* drop reference from allocate - handle holds it now. */
138 drm_gem_object_unreference_unlocked(gem_obj);
140 return cma_obj;
142 err_handle_create:
143 drm_gem_cma_free_object(gem_obj);
145 return ERR_PTR(ret);
149 * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback
150 * function
152 void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
154 struct drm_gem_cma_object *cma_obj;
156 drm_gem_free_mmap_offset(gem_obj);
158 cma_obj = to_drm_gem_cma_obj(gem_obj);
160 if (cma_obj->vaddr) {
161 dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size,
162 cma_obj->vaddr, cma_obj->paddr);
163 } else if (gem_obj->import_attach) {
164 drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
167 drm_gem_object_release(gem_obj);
169 kfree(cma_obj);
171 EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
174 * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
175 * function
177 * This aligns the pitch and size arguments to the minimum required. wrap
178 * this into your own function if you need bigger alignment.
180 int drm_gem_cma_dumb_create(struct drm_file *file_priv,
181 struct drm_device *dev, struct drm_mode_create_dumb *args)
183 struct drm_gem_cma_object *cma_obj;
184 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
186 if (args->pitch < min_pitch)
187 args->pitch = min_pitch;
189 if (args->size < args->pitch * args->height)
190 args->size = args->pitch * args->height;
192 cma_obj = drm_gem_cma_create_with_handle(file_priv, dev,
193 args->size, &args->handle);
194 return PTR_ERR_OR_ZERO(cma_obj);
196 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
199 * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback
200 * function
202 int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
203 struct drm_device *drm, uint32_t handle, uint64_t *offset)
205 struct drm_gem_object *gem_obj;
207 mutex_lock(&drm->struct_mutex);
209 gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
210 if (!gem_obj) {
211 dev_err(drm->dev, "failed to lookup gem object\n");
212 mutex_unlock(&drm->struct_mutex);
213 return -EINVAL;
216 *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
218 drm_gem_object_unreference(gem_obj);
220 mutex_unlock(&drm->struct_mutex);
222 return 0;
224 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
226 const struct vm_operations_struct drm_gem_cma_vm_ops = {
227 .open = drm_gem_vm_open,
228 .close = drm_gem_vm_close,
230 EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
232 static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
233 struct vm_area_struct *vma)
235 int ret;
238 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
239 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
240 * the whole buffer.
242 vma->vm_flags &= ~VM_PFNMAP;
243 vma->vm_pgoff = 0;
245 ret = dma_mmap_writecombine(cma_obj->base.dev->dev, vma,
246 cma_obj->vaddr, cma_obj->paddr,
247 vma->vm_end - vma->vm_start);
248 if (ret)
249 drm_gem_vm_close(vma);
251 return ret;
255 * drm_gem_cma_mmap - (struct file_operation)->mmap callback function
257 int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
259 struct drm_gem_cma_object *cma_obj;
260 struct drm_gem_object *gem_obj;
261 int ret;
263 ret = drm_gem_mmap(filp, vma);
264 if (ret)
265 return ret;
267 gem_obj = vma->vm_private_data;
268 cma_obj = to_drm_gem_cma_obj(gem_obj);
270 return drm_gem_cma_mmap_obj(cma_obj, vma);
272 EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
274 #ifdef CONFIG_DEBUG_FS
275 void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
277 struct drm_gem_object *obj = &cma_obj->base;
278 struct drm_device *dev = obj->dev;
279 uint64_t off;
281 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
283 off = drm_vma_node_start(&obj->vma_node);
285 seq_printf(m, "%2d (%2d) %08llx %pad %p %d",
286 obj->name, obj->refcount.refcount.counter,
287 off, &cma_obj->paddr, cma_obj->vaddr, obj->size);
289 seq_printf(m, "\n");
291 EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
292 #endif
294 /* low-level interface prime helpers */
295 struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
297 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
298 struct sg_table *sgt;
299 int ret;
301 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
302 if (!sgt)
303 return NULL;
305 ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
306 cma_obj->paddr, obj->size);
307 if (ret < 0)
308 goto out;
310 return sgt;
312 out:
313 kfree(sgt);
314 return NULL;
316 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
318 struct drm_gem_object *
319 drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size,
320 struct sg_table *sgt)
322 struct drm_gem_cma_object *cma_obj;
324 if (sgt->nents != 1)
325 return ERR_PTR(-EINVAL);
327 /* Create a CMA GEM buffer. */
328 cma_obj = __drm_gem_cma_create(dev, size);
329 if (IS_ERR(cma_obj))
330 return ERR_PTR(PTR_ERR(cma_obj));
332 cma_obj->paddr = sg_dma_address(sgt->sgl);
333 cma_obj->sgt = sgt;
335 DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, size);
337 return &cma_obj->base;
339 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
341 int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
342 struct vm_area_struct *vma)
344 struct drm_gem_cma_object *cma_obj;
345 struct drm_device *dev = obj->dev;
346 int ret;
348 mutex_lock(&dev->struct_mutex);
349 ret = drm_gem_mmap_obj(obj, obj->size, vma);
350 mutex_unlock(&dev->struct_mutex);
351 if (ret < 0)
352 return ret;
354 cma_obj = to_drm_gem_cma_obj(obj);
355 return drm_gem_cma_mmap_obj(cma_obj, vma);
357 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
359 void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
361 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
363 return cma_obj->vaddr;
365 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
367 void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
369 /* Nothing to do */
371 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);