treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / vgem / vgem_drv.c
blob5bd60ded3d8151d2e63b85d7c7afbb626a689665
1 /*
2 * Copyright 2011 Red Hat, Inc.
3 * Copyright © 2014 The Chromium OS Authors
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software")
7 * to deal in the software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * them Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
20 * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * Authors:
24 * Adam Jackson <ajax@redhat.com>
25 * Ben Widawsky <ben@bwidawsk.net>
28 /**
29 * This is vgem, a (non-hardware-backed) GEM service. This is used by Mesa's
30 * software renderer and the X server for efficient buffer sharing.
33 #include <linux/dma-buf.h>
34 #include <linux/module.h>
35 #include <linux/platform_device.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/vmalloc.h>
39 #include <drm/drm_drv.h>
40 #include <drm/drm_file.h>
41 #include <drm/drm_ioctl.h>
42 #include <drm/drm_prime.h>
44 #include "vgem_drv.h"
46 #define DRIVER_NAME "vgem"
47 #define DRIVER_DESC "Virtual GEM provider"
48 #define DRIVER_DATE "20120112"
49 #define DRIVER_MAJOR 1
50 #define DRIVER_MINOR 0
52 static struct vgem_device {
53 struct drm_device drm;
54 struct platform_device *platform;
55 } *vgem_device;
57 static void vgem_gem_free_object(struct drm_gem_object *obj)
59 struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
61 kvfree(vgem_obj->pages);
62 mutex_destroy(&vgem_obj->pages_lock);
64 if (obj->import_attach)
65 drm_prime_gem_destroy(obj, vgem_obj->table);
67 drm_gem_object_release(obj);
68 kfree(vgem_obj);
71 static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
73 struct vm_area_struct *vma = vmf->vma;
74 struct drm_vgem_gem_object *obj = vma->vm_private_data;
75 /* We don't use vmf->pgoff since that has the fake offset */
76 unsigned long vaddr = vmf->address;
77 vm_fault_t ret = VM_FAULT_SIGBUS;
78 loff_t num_pages;
79 pgoff_t page_offset;
80 page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
82 num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
84 if (page_offset >= num_pages)
85 return VM_FAULT_SIGBUS;
87 mutex_lock(&obj->pages_lock);
88 if (obj->pages) {
89 get_page(obj->pages[page_offset]);
90 vmf->page = obj->pages[page_offset];
91 ret = 0;
93 mutex_unlock(&obj->pages_lock);
94 if (ret) {
95 struct page *page;
97 page = shmem_read_mapping_page(
98 file_inode(obj->base.filp)->i_mapping,
99 page_offset);
100 if (!IS_ERR(page)) {
101 vmf->page = page;
102 ret = 0;
103 } else switch (PTR_ERR(page)) {
104 case -ENOSPC:
105 case -ENOMEM:
106 ret = VM_FAULT_OOM;
107 break;
108 case -EBUSY:
109 ret = VM_FAULT_RETRY;
110 break;
111 case -EFAULT:
112 case -EINVAL:
113 ret = VM_FAULT_SIGBUS;
114 break;
115 default:
116 WARN_ON(PTR_ERR(page));
117 ret = VM_FAULT_SIGBUS;
118 break;
122 return ret;
125 static const struct vm_operations_struct vgem_gem_vm_ops = {
126 .fault = vgem_gem_fault,
127 .open = drm_gem_vm_open,
128 .close = drm_gem_vm_close,
131 static int vgem_open(struct drm_device *dev, struct drm_file *file)
133 struct vgem_file *vfile;
134 int ret;
136 vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
137 if (!vfile)
138 return -ENOMEM;
140 file->driver_priv = vfile;
142 ret = vgem_fence_open(vfile);
143 if (ret) {
144 kfree(vfile);
145 return ret;
148 return 0;
151 static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
153 struct vgem_file *vfile = file->driver_priv;
155 vgem_fence_close(vfile);
156 kfree(vfile);
159 static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
160 unsigned long size)
162 struct drm_vgem_gem_object *obj;
163 int ret;
165 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
166 if (!obj)
167 return ERR_PTR(-ENOMEM);
169 ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
170 if (ret) {
171 kfree(obj);
172 return ERR_PTR(ret);
175 mutex_init(&obj->pages_lock);
177 return obj;
180 static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
182 drm_gem_object_release(&obj->base);
183 kfree(obj);
186 static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
187 struct drm_file *file,
188 unsigned int *handle,
189 unsigned long size)
191 struct drm_vgem_gem_object *obj;
192 int ret;
194 obj = __vgem_gem_create(dev, size);
195 if (IS_ERR(obj))
196 return ERR_CAST(obj);
198 ret = drm_gem_handle_create(file, &obj->base, handle);
199 drm_gem_object_put_unlocked(&obj->base);
200 if (ret)
201 return ERR_PTR(ret);
203 return &obj->base;
206 static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
207 struct drm_mode_create_dumb *args)
209 struct drm_gem_object *gem_object;
210 u64 pitch, size;
212 pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
213 size = args->height * pitch;
214 if (size == 0)
215 return -EINVAL;
217 gem_object = vgem_gem_create(dev, file, &args->handle, size);
218 if (IS_ERR(gem_object))
219 return PTR_ERR(gem_object);
221 args->size = gem_object->size;
222 args->pitch = pitch;
224 DRM_DEBUG("Created object of size %lld\n", size);
226 return 0;
229 static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
230 uint32_t handle, uint64_t *offset)
232 struct drm_gem_object *obj;
233 int ret;
235 obj = drm_gem_object_lookup(file, handle);
236 if (!obj)
237 return -ENOENT;
239 if (!obj->filp) {
240 ret = -EINVAL;
241 goto unref;
244 ret = drm_gem_create_mmap_offset(obj);
245 if (ret)
246 goto unref;
248 *offset = drm_vma_node_offset_addr(&obj->vma_node);
249 unref:
250 drm_gem_object_put_unlocked(obj);
252 return ret;
255 static struct drm_ioctl_desc vgem_ioctls[] = {
256 DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW),
257 DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
260 static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
262 unsigned long flags = vma->vm_flags;
263 int ret;
265 ret = drm_gem_mmap(filp, vma);
266 if (ret)
267 return ret;
269 /* Keep the WC mmaping set by drm_gem_mmap() but our pages
270 * are ordinary and not special.
272 vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
273 return 0;
276 static const struct file_operations vgem_driver_fops = {
277 .owner = THIS_MODULE,
278 .open = drm_open,
279 .mmap = vgem_mmap,
280 .poll = drm_poll,
281 .read = drm_read,
282 .unlocked_ioctl = drm_ioctl,
283 .compat_ioctl = drm_compat_ioctl,
284 .release = drm_release,
287 static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
289 mutex_lock(&bo->pages_lock);
290 if (bo->pages_pin_count++ == 0) {
291 struct page **pages;
293 pages = drm_gem_get_pages(&bo->base);
294 if (IS_ERR(pages)) {
295 bo->pages_pin_count--;
296 mutex_unlock(&bo->pages_lock);
297 return pages;
300 bo->pages = pages;
302 mutex_unlock(&bo->pages_lock);
304 return bo->pages;
307 static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
309 mutex_lock(&bo->pages_lock);
310 if (--bo->pages_pin_count == 0) {
311 drm_gem_put_pages(&bo->base, bo->pages, true, true);
312 bo->pages = NULL;
314 mutex_unlock(&bo->pages_lock);
317 static int vgem_prime_pin(struct drm_gem_object *obj)
319 struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
320 long n_pages = obj->size >> PAGE_SHIFT;
321 struct page **pages;
323 pages = vgem_pin_pages(bo);
324 if (IS_ERR(pages))
325 return PTR_ERR(pages);
327 /* Flush the object from the CPU cache so that importers can rely
328 * on coherent indirect access via the exported dma-address.
330 drm_clflush_pages(pages, n_pages);
332 return 0;
335 static void vgem_prime_unpin(struct drm_gem_object *obj)
337 struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
339 vgem_unpin_pages(bo);
342 static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
344 struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
346 return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT);
349 static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
350 struct dma_buf *dma_buf)
352 struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
354 return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev);
357 static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
358 struct dma_buf_attachment *attach, struct sg_table *sg)
360 struct drm_vgem_gem_object *obj;
361 int npages;
363 obj = __vgem_gem_create(dev, attach->dmabuf->size);
364 if (IS_ERR(obj))
365 return ERR_CAST(obj);
367 npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
369 obj->table = sg;
370 obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
371 if (!obj->pages) {
372 __vgem_gem_destroy(obj);
373 return ERR_PTR(-ENOMEM);
376 obj->pages_pin_count++; /* perma-pinned */
377 drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
378 npages);
379 return &obj->base;
382 static void *vgem_prime_vmap(struct drm_gem_object *obj)
384 struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
385 long n_pages = obj->size >> PAGE_SHIFT;
386 struct page **pages;
388 pages = vgem_pin_pages(bo);
389 if (IS_ERR(pages))
390 return NULL;
392 return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
395 static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
397 struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
399 vunmap(vaddr);
400 vgem_unpin_pages(bo);
403 static int vgem_prime_mmap(struct drm_gem_object *obj,
404 struct vm_area_struct *vma)
406 int ret;
408 if (obj->size < vma->vm_end - vma->vm_start)
409 return -EINVAL;
411 if (!obj->filp)
412 return -ENODEV;
414 ret = call_mmap(obj->filp, vma);
415 if (ret)
416 return ret;
418 fput(vma->vm_file);
419 vma->vm_file = get_file(obj->filp);
420 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
421 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
423 return 0;
426 static void vgem_release(struct drm_device *dev)
428 struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
430 platform_device_unregister(vgem->platform);
431 drm_dev_fini(&vgem->drm);
433 kfree(vgem);
436 static struct drm_driver vgem_driver = {
437 .driver_features = DRIVER_GEM | DRIVER_RENDER,
438 .release = vgem_release,
439 .open = vgem_open,
440 .postclose = vgem_postclose,
441 .gem_free_object_unlocked = vgem_gem_free_object,
442 .gem_vm_ops = &vgem_gem_vm_ops,
443 .ioctls = vgem_ioctls,
444 .num_ioctls = ARRAY_SIZE(vgem_ioctls),
445 .fops = &vgem_driver_fops,
447 .dumb_create = vgem_gem_dumb_create,
448 .dumb_map_offset = vgem_gem_dumb_map,
450 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
451 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
452 .gem_prime_pin = vgem_prime_pin,
453 .gem_prime_unpin = vgem_prime_unpin,
454 .gem_prime_import = vgem_prime_import,
455 .gem_prime_import_sg_table = vgem_prime_import_sg_table,
456 .gem_prime_get_sg_table = vgem_prime_get_sg_table,
457 .gem_prime_vmap = vgem_prime_vmap,
458 .gem_prime_vunmap = vgem_prime_vunmap,
459 .gem_prime_mmap = vgem_prime_mmap,
461 .name = DRIVER_NAME,
462 .desc = DRIVER_DESC,
463 .date = DRIVER_DATE,
464 .major = DRIVER_MAJOR,
465 .minor = DRIVER_MINOR,
468 static int __init vgem_init(void)
470 int ret;
472 vgem_device = kzalloc(sizeof(*vgem_device), GFP_KERNEL);
473 if (!vgem_device)
474 return -ENOMEM;
476 vgem_device->platform =
477 platform_device_register_simple("vgem", -1, NULL, 0);
478 if (IS_ERR(vgem_device->platform)) {
479 ret = PTR_ERR(vgem_device->platform);
480 goto out_free;
483 dma_coerce_mask_and_coherent(&vgem_device->platform->dev,
484 DMA_BIT_MASK(64));
485 ret = drm_dev_init(&vgem_device->drm, &vgem_driver,
486 &vgem_device->platform->dev);
487 if (ret)
488 goto out_unregister;
490 /* Final step: expose the device/driver to userspace */
491 ret = drm_dev_register(&vgem_device->drm, 0);
492 if (ret)
493 goto out_fini;
495 return 0;
497 out_fini:
498 drm_dev_fini(&vgem_device->drm);
499 out_unregister:
500 platform_device_unregister(vgem_device->platform);
501 out_free:
502 kfree(vgem_device);
503 return ret;
506 static void __exit vgem_exit(void)
508 drm_dev_unregister(&vgem_device->drm);
509 drm_dev_put(&vgem_device->drm);
512 module_init(vgem_init);
513 module_exit(vgem_exit);
515 MODULE_AUTHOR("Red Hat, Inc.");
516 MODULE_AUTHOR("Intel Corporation");
517 MODULE_DESCRIPTION(DRIVER_DESC);
518 MODULE_LICENSE("GPL and additional rights");