Staging: netwave: delete the driver
[linux/fpc-iii.git] / drivers / gpu / drm / drm_gem.c
blobaa89d4b0b4c44e87e2726c8945895e96280dc7bb
1 /*
2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include "drmP.h"
39 /** @file drm_gem.c
41 * This file provides some of the base ioctls and library routines for
42 * the graphics memory manager implemented by each device driver.
44 * Because various devices have different requirements in terms of
45 * synchronization and migration strategies, implementing that is left up to
46 * the driver, and all that the general API provides should be generic --
47 * allocating objects, reading/writing data with the cpu, freeing objects.
48 * Even there, platform-dependent optimizations for reading/writing data with
49 * the CPU mean we'll likely hook those out to driver-specific calls. However,
50 * the DRI2 implementation wants to have at least allocate/mmap be generic.
52 * The goal was to have swap-backed object allocation managed through
53 * struct file. However, file descriptors as handles to a struct file have
54 * two major failings:
55 * - Process limits prevent more than 1024 or so being used at a time by
56 * default.
57 * - Inability to allocate high fds will aggravate the X Server's select()
58 * handling, and likely that of many GL client applications as well.
60 * This led to a plan of using our own integer IDs (called handles, following
61 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
62 * ioctls. The objects themselves will still include the struct file so
63 * that we can transition to fds if the required kernel infrastructure shows
64 * up at a later date, and as our interface with shmfs for memory allocation.
68 * We make up offsets for buffer objects so we can recognize them at
69 * mmap time.
71 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
72 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
74 /**
75 * Initialize the GEM device fields
78 int
79 drm_gem_init(struct drm_device *dev)
81 struct drm_gem_mm *mm;
83 spin_lock_init(&dev->object_name_lock);
84 idr_init(&dev->object_name_idr);
85 atomic_set(&dev->object_count, 0);
86 atomic_set(&dev->object_memory, 0);
87 atomic_set(&dev->pin_count, 0);
88 atomic_set(&dev->pin_memory, 0);
89 atomic_set(&dev->gtt_count, 0);
90 atomic_set(&dev->gtt_memory, 0);
92 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
93 if (!mm) {
94 DRM_ERROR("out of memory\n");
95 return -ENOMEM;
98 dev->mm_private = mm;
100 if (drm_ht_create(&mm->offset_hash, 19)) {
101 kfree(mm);
102 return -ENOMEM;
105 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
106 DRM_FILE_PAGE_OFFSET_SIZE)) {
107 drm_ht_remove(&mm->offset_hash);
108 kfree(mm);
109 return -ENOMEM;
112 return 0;
115 void
116 drm_gem_destroy(struct drm_device *dev)
118 struct drm_gem_mm *mm = dev->mm_private;
120 drm_mm_takedown(&mm->offset_manager);
121 drm_ht_remove(&mm->offset_hash);
122 kfree(mm);
123 dev->mm_private = NULL;
127 * Allocate a GEM object of the specified size with shmfs backing store
129 struct drm_gem_object *
130 drm_gem_object_alloc(struct drm_device *dev, size_t size)
132 struct drm_gem_object *obj;
134 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
136 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
137 if (!obj)
138 goto free;
140 obj->dev = dev;
141 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
142 if (IS_ERR(obj->filp))
143 goto free;
145 kref_init(&obj->refcount);
146 kref_init(&obj->handlecount);
147 obj->size = size;
148 if (dev->driver->gem_init_object != NULL &&
149 dev->driver->gem_init_object(obj) != 0) {
150 goto fput;
152 atomic_inc(&dev->object_count);
153 atomic_add(obj->size, &dev->object_memory);
154 return obj;
155 fput:
156 fput(obj->filp);
157 free:
158 kfree(obj);
159 return NULL;
161 EXPORT_SYMBOL(drm_gem_object_alloc);
164 * Removes the mapping from handle to filp for this object.
166 static int
167 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
169 struct drm_device *dev;
170 struct drm_gem_object *obj;
172 /* This is gross. The idr system doesn't let us try a delete and
173 * return an error code. It just spews if you fail at deleting.
174 * So, we have to grab a lock around finding the object and then
175 * doing the delete on it and dropping the refcount, or the user
176 * could race us to double-decrement the refcount and cause a
177 * use-after-free later. Given the frequency of our handle lookups,
178 * we may want to use ida for number allocation and a hash table
179 * for the pointers, anyway.
181 spin_lock(&filp->table_lock);
183 /* Check if we currently have a reference on the object */
184 obj = idr_find(&filp->object_idr, handle);
185 if (obj == NULL) {
186 spin_unlock(&filp->table_lock);
187 return -EINVAL;
189 dev = obj->dev;
191 /* Release reference and decrement refcount. */
192 idr_remove(&filp->object_idr, handle);
193 spin_unlock(&filp->table_lock);
195 drm_gem_object_handle_unreference_unlocked(obj);
197 return 0;
201 * Create a handle for this object. This adds a handle reference
202 * to the object, which includes a regular reference count. Callers
203 * will likely want to dereference the object afterwards.
206 drm_gem_handle_create(struct drm_file *file_priv,
207 struct drm_gem_object *obj,
208 u32 *handlep)
210 int ret;
213 * Get the user-visible handle using idr.
215 again:
216 /* ensure there is space available to allocate a handle */
217 if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
218 return -ENOMEM;
220 /* do the allocation under our spinlock */
221 spin_lock(&file_priv->table_lock);
222 ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
223 spin_unlock(&file_priv->table_lock);
224 if (ret == -EAGAIN)
225 goto again;
227 if (ret != 0)
228 return ret;
230 drm_gem_object_handle_reference(obj);
231 return 0;
233 EXPORT_SYMBOL(drm_gem_handle_create);
235 /** Returns a reference to the object named by the handle. */
236 struct drm_gem_object *
237 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
238 u32 handle)
240 struct drm_gem_object *obj;
242 spin_lock(&filp->table_lock);
244 /* Check if we currently have a reference on the object */
245 obj = idr_find(&filp->object_idr, handle);
246 if (obj == NULL) {
247 spin_unlock(&filp->table_lock);
248 return NULL;
251 drm_gem_object_reference(obj);
253 spin_unlock(&filp->table_lock);
255 return obj;
257 EXPORT_SYMBOL(drm_gem_object_lookup);
260 * Releases the handle to an mm object.
263 drm_gem_close_ioctl(struct drm_device *dev, void *data,
264 struct drm_file *file_priv)
266 struct drm_gem_close *args = data;
267 int ret;
269 if (!(dev->driver->driver_features & DRIVER_GEM))
270 return -ENODEV;
272 ret = drm_gem_handle_delete(file_priv, args->handle);
274 return ret;
278 * Create a global name for an object, returning the name.
280 * Note that the name does not hold a reference; when the object
281 * is freed, the name goes away.
284 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
285 struct drm_file *file_priv)
287 struct drm_gem_flink *args = data;
288 struct drm_gem_object *obj;
289 int ret;
291 if (!(dev->driver->driver_features & DRIVER_GEM))
292 return -ENODEV;
294 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
295 if (obj == NULL)
296 return -EBADF;
298 again:
299 if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
300 ret = -ENOMEM;
301 goto err;
304 spin_lock(&dev->object_name_lock);
305 if (!obj->name) {
306 ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
307 &obj->name);
308 args->name = (uint64_t) obj->name;
309 spin_unlock(&dev->object_name_lock);
311 if (ret == -EAGAIN)
312 goto again;
314 if (ret != 0)
315 goto err;
317 /* Allocate a reference for the name table. */
318 drm_gem_object_reference(obj);
319 } else {
320 args->name = (uint64_t) obj->name;
321 spin_unlock(&dev->object_name_lock);
322 ret = 0;
325 err:
326 drm_gem_object_unreference_unlocked(obj);
327 return ret;
331 * Open an object using the global name, returning a handle and the size.
333 * This handle (of course) holds a reference to the object, so the object
334 * will not go away until the handle is deleted.
337 drm_gem_open_ioctl(struct drm_device *dev, void *data,
338 struct drm_file *file_priv)
340 struct drm_gem_open *args = data;
341 struct drm_gem_object *obj;
342 int ret;
343 u32 handle;
345 if (!(dev->driver->driver_features & DRIVER_GEM))
346 return -ENODEV;
348 spin_lock(&dev->object_name_lock);
349 obj = idr_find(&dev->object_name_idr, (int) args->name);
350 if (obj)
351 drm_gem_object_reference(obj);
352 spin_unlock(&dev->object_name_lock);
353 if (!obj)
354 return -ENOENT;
356 ret = drm_gem_handle_create(file_priv, obj, &handle);
357 drm_gem_object_unreference_unlocked(obj);
358 if (ret)
359 return ret;
361 args->handle = handle;
362 args->size = obj->size;
364 return 0;
368 * Called at device open time, sets up the structure for handling refcounting
369 * of mm objects.
371 void
372 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
374 idr_init(&file_private->object_idr);
375 spin_lock_init(&file_private->table_lock);
379 * Called at device close to release the file's
380 * handle references on objects.
382 static int
383 drm_gem_object_release_handle(int id, void *ptr, void *data)
385 struct drm_gem_object *obj = ptr;
387 drm_gem_object_handle_unreference_unlocked(obj);
389 return 0;
393 * Called at close time when the filp is going away.
395 * Releases any remaining references on objects by this filp.
397 void
398 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
400 idr_for_each(&file_private->object_idr,
401 &drm_gem_object_release_handle, NULL);
403 idr_destroy(&file_private->object_idr);
406 static void
407 drm_gem_object_free_common(struct drm_gem_object *obj)
409 struct drm_device *dev = obj->dev;
410 fput(obj->filp);
411 atomic_dec(&dev->object_count);
412 atomic_sub(obj->size, &dev->object_memory);
413 kfree(obj);
417 * Called after the last reference to the object has been lost.
418 * Must be called holding struct_ mutex
420 * Frees the object
422 void
423 drm_gem_object_free(struct kref *kref)
425 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
426 struct drm_device *dev = obj->dev;
428 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
430 if (dev->driver->gem_free_object != NULL)
431 dev->driver->gem_free_object(obj);
433 drm_gem_object_free_common(obj);
435 EXPORT_SYMBOL(drm_gem_object_free);
438 * Called after the last reference to the object has been lost.
439 * Must be called without holding struct_mutex
441 * Frees the object
443 void
444 drm_gem_object_free_unlocked(struct kref *kref)
446 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
447 struct drm_device *dev = obj->dev;
449 if (dev->driver->gem_free_object_unlocked != NULL)
450 dev->driver->gem_free_object_unlocked(obj);
451 else if (dev->driver->gem_free_object != NULL) {
452 mutex_lock(&dev->struct_mutex);
453 dev->driver->gem_free_object(obj);
454 mutex_unlock(&dev->struct_mutex);
457 drm_gem_object_free_common(obj);
459 EXPORT_SYMBOL(drm_gem_object_free_unlocked);
461 static void drm_gem_object_ref_bug(struct kref *list_kref)
463 BUG();
467 * Called after the last handle to the object has been closed
469 * Removes any name for the object. Note that this must be
470 * called before drm_gem_object_free or we'll be touching
471 * freed memory
473 void
474 drm_gem_object_handle_free(struct kref *kref)
476 struct drm_gem_object *obj = container_of(kref,
477 struct drm_gem_object,
478 handlecount);
479 struct drm_device *dev = obj->dev;
481 /* Remove any name for this object */
482 spin_lock(&dev->object_name_lock);
483 if (obj->name) {
484 idr_remove(&dev->object_name_idr, obj->name);
485 obj->name = 0;
486 spin_unlock(&dev->object_name_lock);
488 * The object name held a reference to this object, drop
489 * that now.
491 * This cannot be the last reference, since the handle holds one too.
493 kref_put(&obj->refcount, drm_gem_object_ref_bug);
494 } else
495 spin_unlock(&dev->object_name_lock);
498 EXPORT_SYMBOL(drm_gem_object_handle_free);
500 void drm_gem_vm_open(struct vm_area_struct *vma)
502 struct drm_gem_object *obj = vma->vm_private_data;
504 drm_gem_object_reference(obj);
506 EXPORT_SYMBOL(drm_gem_vm_open);
508 void drm_gem_vm_close(struct vm_area_struct *vma)
510 struct drm_gem_object *obj = vma->vm_private_data;
512 drm_gem_object_unreference_unlocked(obj);
514 EXPORT_SYMBOL(drm_gem_vm_close);
518 * drm_gem_mmap - memory map routine for GEM objects
519 * @filp: DRM file pointer
520 * @vma: VMA for the area to be mapped
522 * If a driver supports GEM object mapping, mmap calls on the DRM file
523 * descriptor will end up here.
525 * If we find the object based on the offset passed in (vma->vm_pgoff will
526 * contain the fake offset we created when the GTT map ioctl was called on
527 * the object), we set up the driver fault handler so that any accesses
528 * to the object can be trapped, to perform migration, GTT binding, surface
529 * register allocation, or performance monitoring.
531 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
533 struct drm_file *priv = filp->private_data;
534 struct drm_device *dev = priv->minor->dev;
535 struct drm_gem_mm *mm = dev->mm_private;
536 struct drm_local_map *map = NULL;
537 struct drm_gem_object *obj;
538 struct drm_hash_item *hash;
539 int ret = 0;
541 mutex_lock(&dev->struct_mutex);
543 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
544 mutex_unlock(&dev->struct_mutex);
545 return drm_mmap(filp, vma);
548 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
549 if (!map ||
550 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
551 ret = -EPERM;
552 goto out_unlock;
555 /* Check for valid size. */
556 if (map->size < vma->vm_end - vma->vm_start) {
557 ret = -EINVAL;
558 goto out_unlock;
561 obj = map->handle;
562 if (!obj->dev->driver->gem_vm_ops) {
563 ret = -EINVAL;
564 goto out_unlock;
567 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
568 vma->vm_ops = obj->dev->driver->gem_vm_ops;
569 vma->vm_private_data = map->handle;
570 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
572 /* Take a ref for this mapping of the object, so that the fault
573 * handler can dereference the mmap offset's pointer to the object.
574 * This reference is cleaned up by the corresponding vm_close
575 * (which should happen whether the vma was created by this call, or
576 * by a vm_open due to mremap or partial unmap or whatever).
578 drm_gem_object_reference(obj);
580 vma->vm_file = filp; /* Needed for drm_vm_open() */
581 drm_vm_open_locked(vma);
583 out_unlock:
584 mutex_unlock(&dev->struct_mutex);
586 return ret;
588 EXPORT_SYMBOL(drm_gem_mmap);