2 * Copyright 2011 Red Hat, Inc.
3 * Copyright © 2014 The Chromium OS Authors
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software")
7 * to deal in the software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * them Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
20 * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Adam Jackson <ajax@redhat.com>
25 * Ben Widawsky <ben@bwidawsk.net>
29 * This is vgem, a (non-hardware-backed) GEM service. This is used by Mesa's
30 * software renderer and the X server for efficient buffer sharing.
33 #include <linux/module.h>
34 #include <linux/ramfs.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/dma-buf.h>
39 #define DRIVER_NAME "vgem"
40 #define DRIVER_DESC "Virtual GEM provider"
41 #define DRIVER_DATE "20120112"
42 #define DRIVER_MAJOR 1
43 #define DRIVER_MINOR 0
45 static struct vgem_device
{
46 struct drm_device drm
;
47 struct platform_device
*platform
;
50 static void vgem_gem_free_object(struct drm_gem_object
*obj
)
52 struct drm_vgem_gem_object
*vgem_obj
= to_vgem_bo(obj
);
54 kvfree(vgem_obj
->pages
);
55 mutex_destroy(&vgem_obj
->pages_lock
);
57 if (obj
->import_attach
)
58 drm_prime_gem_destroy(obj
, vgem_obj
->table
);
60 drm_gem_object_release(obj
);
64 static int vgem_gem_fault(struct vm_fault
*vmf
)
66 struct vm_area_struct
*vma
= vmf
->vma
;
67 struct drm_vgem_gem_object
*obj
= vma
->vm_private_data
;
68 /* We don't use vmf->pgoff since that has the fake offset */
69 unsigned long vaddr
= vmf
->address
;
73 page_offset
= (vaddr
- vma
->vm_start
) >> PAGE_SHIFT
;
75 num_pages
= DIV_ROUND_UP(obj
->base
.size
, PAGE_SIZE
);
77 if (page_offset
> num_pages
)
78 return VM_FAULT_SIGBUS
;
81 mutex_lock(&obj
->pages_lock
);
83 get_page(obj
->pages
[page_offset
]);
84 vmf
->page
= obj
->pages
[page_offset
];
87 mutex_unlock(&obj
->pages_lock
);
91 page
= shmem_read_mapping_page(
92 file_inode(obj
->base
.filp
)->i_mapping
,
97 } else switch (PTR_ERR(page
)) {
103 ret
= VM_FAULT_RETRY
;
107 ret
= VM_FAULT_SIGBUS
;
110 WARN_ON(PTR_ERR(page
));
111 ret
= VM_FAULT_SIGBUS
;
119 static const struct vm_operations_struct vgem_gem_vm_ops
= {
120 .fault
= vgem_gem_fault
,
121 .open
= drm_gem_vm_open
,
122 .close
= drm_gem_vm_close
,
125 static int vgem_open(struct drm_device
*dev
, struct drm_file
*file
)
127 struct vgem_file
*vfile
;
130 vfile
= kzalloc(sizeof(*vfile
), GFP_KERNEL
);
134 file
->driver_priv
= vfile
;
136 ret
= vgem_fence_open(vfile
);
145 static void vgem_postclose(struct drm_device
*dev
, struct drm_file
*file
)
147 struct vgem_file
*vfile
= file
->driver_priv
;
149 vgem_fence_close(vfile
);
153 static struct drm_vgem_gem_object
*__vgem_gem_create(struct drm_device
*dev
,
156 struct drm_vgem_gem_object
*obj
;
159 obj
= kzalloc(sizeof(*obj
), GFP_KERNEL
);
161 return ERR_PTR(-ENOMEM
);
163 ret
= drm_gem_object_init(dev
, &obj
->base
, roundup(size
, PAGE_SIZE
));
169 mutex_init(&obj
->pages_lock
);
174 static void __vgem_gem_destroy(struct drm_vgem_gem_object
*obj
)
176 drm_gem_object_release(&obj
->base
);
180 static struct drm_gem_object
*vgem_gem_create(struct drm_device
*dev
,
181 struct drm_file
*file
,
182 unsigned int *handle
,
185 struct drm_vgem_gem_object
*obj
;
188 obj
= __vgem_gem_create(dev
, size
);
190 return ERR_CAST(obj
);
192 ret
= drm_gem_handle_create(file
, &obj
->base
, handle
);
193 drm_gem_object_put_unlocked(&obj
->base
);
200 __vgem_gem_destroy(obj
);
204 static int vgem_gem_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
205 struct drm_mode_create_dumb
*args
)
207 struct drm_gem_object
*gem_object
;
210 pitch
= args
->width
* DIV_ROUND_UP(args
->bpp
, 8);
211 size
= args
->height
* pitch
;
215 gem_object
= vgem_gem_create(dev
, file
, &args
->handle
, size
);
216 if (IS_ERR(gem_object
))
217 return PTR_ERR(gem_object
);
219 args
->size
= gem_object
->size
;
222 DRM_DEBUG_DRIVER("Created object of size %lld\n", size
);
227 static int vgem_gem_dumb_map(struct drm_file
*file
, struct drm_device
*dev
,
228 uint32_t handle
, uint64_t *offset
)
230 struct drm_gem_object
*obj
;
233 obj
= drm_gem_object_lookup(file
, handle
);
242 ret
= drm_gem_create_mmap_offset(obj
);
246 *offset
= drm_vma_node_offset_addr(&obj
->vma_node
);
248 drm_gem_object_put_unlocked(obj
);
253 static struct drm_ioctl_desc vgem_ioctls
[] = {
254 DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH
, vgem_fence_attach_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
255 DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL
, vgem_fence_signal_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
258 static int vgem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
260 unsigned long flags
= vma
->vm_flags
;
263 ret
= drm_gem_mmap(filp
, vma
);
267 /* Keep the WC mmaping set by drm_gem_mmap() but our pages
268 * are ordinary and not special.
270 vma
->vm_flags
= flags
| VM_DONTEXPAND
| VM_DONTDUMP
;
274 static const struct file_operations vgem_driver_fops
= {
275 .owner
= THIS_MODULE
,
280 .unlocked_ioctl
= drm_ioctl
,
281 .compat_ioctl
= drm_compat_ioctl
,
282 .release
= drm_release
,
285 static struct page
**vgem_pin_pages(struct drm_vgem_gem_object
*bo
)
287 mutex_lock(&bo
->pages_lock
);
288 if (bo
->pages_pin_count
++ == 0) {
291 pages
= drm_gem_get_pages(&bo
->base
);
293 bo
->pages_pin_count
--;
294 mutex_unlock(&bo
->pages_lock
);
300 mutex_unlock(&bo
->pages_lock
);
305 static void vgem_unpin_pages(struct drm_vgem_gem_object
*bo
)
307 mutex_lock(&bo
->pages_lock
);
308 if (--bo
->pages_pin_count
== 0) {
309 drm_gem_put_pages(&bo
->base
, bo
->pages
, true, true);
312 mutex_unlock(&bo
->pages_lock
);
315 static int vgem_prime_pin(struct drm_gem_object
*obj
)
317 struct drm_vgem_gem_object
*bo
= to_vgem_bo(obj
);
318 long n_pages
= obj
->size
>> PAGE_SHIFT
;
321 pages
= vgem_pin_pages(bo
);
323 return PTR_ERR(pages
);
325 /* Flush the object from the CPU cache so that importers can rely
326 * on coherent indirect access via the exported dma-address.
328 drm_clflush_pages(pages
, n_pages
);
333 static void vgem_prime_unpin(struct drm_gem_object
*obj
)
335 struct drm_vgem_gem_object
*bo
= to_vgem_bo(obj
);
337 vgem_unpin_pages(bo
);
340 static struct sg_table
*vgem_prime_get_sg_table(struct drm_gem_object
*obj
)
342 struct drm_vgem_gem_object
*bo
= to_vgem_bo(obj
);
344 return drm_prime_pages_to_sg(bo
->pages
, bo
->base
.size
>> PAGE_SHIFT
);
347 static struct drm_gem_object
* vgem_prime_import(struct drm_device
*dev
,
348 struct dma_buf
*dma_buf
)
350 struct vgem_device
*vgem
= container_of(dev
, typeof(*vgem
), drm
);
352 return drm_gem_prime_import_dev(dev
, dma_buf
, &vgem
->platform
->dev
);
355 static struct drm_gem_object
*vgem_prime_import_sg_table(struct drm_device
*dev
,
356 struct dma_buf_attachment
*attach
, struct sg_table
*sg
)
358 struct drm_vgem_gem_object
*obj
;
361 obj
= __vgem_gem_create(dev
, attach
->dmabuf
->size
);
363 return ERR_CAST(obj
);
365 npages
= PAGE_ALIGN(attach
->dmabuf
->size
) / PAGE_SIZE
;
368 obj
->pages
= kvmalloc_array(npages
, sizeof(struct page
*), GFP_KERNEL
);
370 __vgem_gem_destroy(obj
);
371 return ERR_PTR(-ENOMEM
);
374 obj
->pages_pin_count
++; /* perma-pinned */
375 drm_prime_sg_to_page_addr_arrays(obj
->table
, obj
->pages
, NULL
,
380 static void *vgem_prime_vmap(struct drm_gem_object
*obj
)
382 struct drm_vgem_gem_object
*bo
= to_vgem_bo(obj
);
383 long n_pages
= obj
->size
>> PAGE_SHIFT
;
386 pages
= vgem_pin_pages(bo
);
390 return vmap(pages
, n_pages
, 0, pgprot_writecombine(PAGE_KERNEL
));
393 static void vgem_prime_vunmap(struct drm_gem_object
*obj
, void *vaddr
)
395 struct drm_vgem_gem_object
*bo
= to_vgem_bo(obj
);
398 vgem_unpin_pages(bo
);
401 static int vgem_prime_mmap(struct drm_gem_object
*obj
,
402 struct vm_area_struct
*vma
)
406 if (obj
->size
< vma
->vm_end
- vma
->vm_start
)
412 ret
= call_mmap(obj
->filp
, vma
);
417 vma
->vm_file
= get_file(obj
->filp
);
418 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
419 vma
->vm_page_prot
= pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
424 static void vgem_release(struct drm_device
*dev
)
426 struct vgem_device
*vgem
= container_of(dev
, typeof(*vgem
), drm
);
428 platform_device_unregister(vgem
->platform
);
429 drm_dev_fini(&vgem
->drm
);
434 static struct drm_driver vgem_driver
= {
435 .driver_features
= DRIVER_GEM
| DRIVER_PRIME
,
436 .release
= vgem_release
,
438 .postclose
= vgem_postclose
,
439 .gem_free_object_unlocked
= vgem_gem_free_object
,
440 .gem_vm_ops
= &vgem_gem_vm_ops
,
441 .ioctls
= vgem_ioctls
,
442 .num_ioctls
= ARRAY_SIZE(vgem_ioctls
),
443 .fops
= &vgem_driver_fops
,
445 .dumb_create
= vgem_gem_dumb_create
,
446 .dumb_map_offset
= vgem_gem_dumb_map
,
448 .prime_handle_to_fd
= drm_gem_prime_handle_to_fd
,
449 .prime_fd_to_handle
= drm_gem_prime_fd_to_handle
,
450 .gem_prime_pin
= vgem_prime_pin
,
451 .gem_prime_unpin
= vgem_prime_unpin
,
452 .gem_prime_import
= vgem_prime_import
,
453 .gem_prime_export
= drm_gem_prime_export
,
454 .gem_prime_import_sg_table
= vgem_prime_import_sg_table
,
455 .gem_prime_get_sg_table
= vgem_prime_get_sg_table
,
456 .gem_prime_vmap
= vgem_prime_vmap
,
457 .gem_prime_vunmap
= vgem_prime_vunmap
,
458 .gem_prime_mmap
= vgem_prime_mmap
,
463 .major
= DRIVER_MAJOR
,
464 .minor
= DRIVER_MINOR
,
467 static int __init
vgem_init(void)
471 vgem_device
= kzalloc(sizeof(*vgem_device
), GFP_KERNEL
);
475 ret
= drm_dev_init(&vgem_device
->drm
, &vgem_driver
, NULL
);
479 vgem_device
->platform
=
480 platform_device_register_simple("vgem", -1, NULL
, 0);
481 if (IS_ERR(vgem_device
->platform
)) {
482 ret
= PTR_ERR(vgem_device
->platform
);
486 dma_coerce_mask_and_coherent(&vgem_device
->platform
->dev
,
489 /* Final step: expose the device/driver to userspace */
490 ret
= drm_dev_register(&vgem_device
->drm
, 0);
497 platform_device_unregister(vgem_device
->platform
);
499 drm_dev_fini(&vgem_device
->drm
);
505 static void __exit
vgem_exit(void)
507 drm_dev_unregister(&vgem_device
->drm
);
508 drm_dev_unref(&vgem_device
->drm
);
511 module_init(vgem_init
);
512 module_exit(vgem_exit
);
514 MODULE_AUTHOR("Red Hat, Inc.");
515 MODULE_AUTHOR("Intel Corporation");
516 MODULE_DESCRIPTION(DRIVER_DESC
);
517 MODULE_LICENSE("GPL and additional rights");