5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
9 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
11 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
12 * All Rights Reserved.
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
21 * The above copyright notice and this permission notice (including the next
22 * paragraph) shall be included in all copies or substantial portions of the
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
28 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
29 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
30 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
31 * DEALINGS IN THE SOFTWARE.
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/mount.h>
38 #include <linux/slab.h>
40 #include <drm/drm_core.h>
42 unsigned int drm_debug
= 0; /* 1 to enable debug output */
43 EXPORT_SYMBOL(drm_debug
);
45 unsigned int drm_rnodes
= 0; /* 1 to enable experimental render nodes API */
46 EXPORT_SYMBOL(drm_rnodes
);
48 /* 1 to allow user space to request universal planes (experimental) */
49 unsigned int drm_universal_planes
= 0;
50 EXPORT_SYMBOL(drm_universal_planes
);
52 unsigned int drm_vblank_offdelay
= 5000; /* Default to 5000 msecs. */
53 EXPORT_SYMBOL(drm_vblank_offdelay
);
55 unsigned int drm_timestamp_precision
= 20; /* Default to 20 usecs. */
56 EXPORT_SYMBOL(drm_timestamp_precision
);
59 * Default to use monotonic timestamps for wait-for-vblank and page-flip
62 unsigned int drm_timestamp_monotonic
= 1;
64 MODULE_AUTHOR(CORE_AUTHOR
);
65 MODULE_DESCRIPTION(CORE_DESC
);
66 MODULE_LICENSE("GPL and additional rights");
67 MODULE_PARM_DESC(debug
, "Enable debug output");
68 MODULE_PARM_DESC(rnodes
, "Enable experimental render nodes API");
69 MODULE_PARM_DESC(vblankoffdelay
, "Delay until vblank irq auto-disable [msecs]");
70 MODULE_PARM_DESC(timestamp_precision_usec
, "Max. error on timestamps [usecs]");
71 MODULE_PARM_DESC(timestamp_monotonic
, "Use monotonic timestamps");
73 module_param_named(debug
, drm_debug
, int, 0600);
74 module_param_named(rnodes
, drm_rnodes
, int, 0600);
75 module_param_named(universal_planes
, drm_universal_planes
, int, 0600);
76 module_param_named(vblankoffdelay
, drm_vblank_offdelay
, int, 0600);
77 module_param_named(timestamp_precision_usec
, drm_timestamp_precision
, int, 0600);
78 module_param_named(timestamp_monotonic
, drm_timestamp_monotonic
, int, 0600);
80 static DEFINE_SPINLOCK(drm_minor_lock
);
81 struct idr drm_minors_idr
;
83 struct class *drm_class
;
84 struct dentry
*drm_debugfs_root
;
86 int drm_err(const char *func
, const char *format
, ...)
92 va_start(args
, format
);
97 r
= printk(KERN_ERR
"[" DRM_NAME
":%s] *ERROR* %pV", func
, &vaf
);
103 EXPORT_SYMBOL(drm_err
);
105 void drm_ut_debug_printk(const char *function_name
, const char *format
, ...)
107 struct va_format vaf
;
110 va_start(args
, format
);
114 printk(KERN_DEBUG
"[" DRM_NAME
":%s] %pV", function_name
, &vaf
);
118 EXPORT_SYMBOL(drm_ut_debug_printk
);
120 struct drm_master
*drm_master_create(struct drm_minor
*minor
)
122 struct drm_master
*master
;
124 master
= kzalloc(sizeof(*master
), GFP_KERNEL
);
128 kref_init(&master
->refcount
);
129 spin_lock_init(&master
->lock
.spinlock
);
130 init_waitqueue_head(&master
->lock
.lock_queue
);
131 drm_ht_create(&master
->magiclist
, DRM_MAGIC_HASH_ORDER
);
132 INIT_LIST_HEAD(&master
->magicfree
);
133 master
->minor
= minor
;
138 struct drm_master
*drm_master_get(struct drm_master
*master
)
140 kref_get(&master
->refcount
);
143 EXPORT_SYMBOL(drm_master_get
);
145 static void drm_master_destroy(struct kref
*kref
)
147 struct drm_master
*master
= container_of(kref
, struct drm_master
, refcount
);
148 struct drm_magic_entry
*pt
, *next
;
149 struct drm_device
*dev
= master
->minor
->dev
;
150 struct drm_map_list
*r_list
, *list_temp
;
152 mutex_lock(&dev
->struct_mutex
);
153 if (dev
->driver
->master_destroy
)
154 dev
->driver
->master_destroy(dev
, master
);
156 list_for_each_entry_safe(r_list
, list_temp
, &dev
->maplist
, head
) {
157 if (r_list
->master
== master
) {
158 drm_rmmap_locked(dev
, r_list
->map
);
163 if (master
->unique
) {
164 kfree(master
->unique
);
165 master
->unique
= NULL
;
166 master
->unique_len
= 0;
172 list_for_each_entry_safe(pt
, next
, &master
->magicfree
, head
) {
174 drm_ht_remove_item(&master
->magiclist
, &pt
->hash_item
);
178 drm_ht_remove(&master
->magiclist
);
180 mutex_unlock(&dev
->struct_mutex
);
184 void drm_master_put(struct drm_master
**master
)
186 kref_put(&(*master
)->refcount
, drm_master_destroy
);
189 EXPORT_SYMBOL(drm_master_put
);
191 int drm_setmaster_ioctl(struct drm_device
*dev
, void *data
,
192 struct drm_file
*file_priv
)
196 mutex_lock(&dev
->master_mutex
);
197 if (file_priv
->is_master
)
200 if (file_priv
->minor
->master
) {
205 if (!file_priv
->master
) {
210 file_priv
->minor
->master
= drm_master_get(file_priv
->master
);
211 file_priv
->is_master
= 1;
212 if (dev
->driver
->master_set
) {
213 ret
= dev
->driver
->master_set(dev
, file_priv
, false);
214 if (unlikely(ret
!= 0)) {
215 file_priv
->is_master
= 0;
216 drm_master_put(&file_priv
->minor
->master
);
221 mutex_unlock(&dev
->master_mutex
);
225 int drm_dropmaster_ioctl(struct drm_device
*dev
, void *data
,
226 struct drm_file
*file_priv
)
230 mutex_lock(&dev
->master_mutex
);
231 if (!file_priv
->is_master
)
234 if (!file_priv
->minor
->master
)
238 if (dev
->driver
->master_drop
)
239 dev
->driver
->master_drop(dev
, file_priv
, false);
240 drm_master_put(&file_priv
->minor
->master
);
241 file_priv
->is_master
= 0;
244 mutex_unlock(&dev
->master_mutex
);
250 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
251 * of them is represented by a drm_minor object. Depending on the capabilities
252 * of the device-driver, different interfaces are registered.
254 * Minors can be accessed via dev->$minor_name. This pointer is either
255 * NULL or a valid drm_minor pointer and stays valid as long as the device is
256 * valid. This means, DRM minors have the same life-time as the underlying
257 * device. However, this doesn't mean that the minor is active. Minors are
258 * registered and unregistered dynamically according to device-state.
261 static struct drm_minor
**drm_minor_get_slot(struct drm_device
*dev
,
265 case DRM_MINOR_LEGACY
:
266 return &dev
->primary
;
267 case DRM_MINOR_RENDER
:
269 case DRM_MINOR_CONTROL
:
270 return &dev
->control
;
276 static int drm_minor_alloc(struct drm_device
*dev
, unsigned int type
)
278 struct drm_minor
*minor
;
280 minor
= kzalloc(sizeof(*minor
), GFP_KERNEL
);
287 *drm_minor_get_slot(dev
, type
) = minor
;
291 static void drm_minor_free(struct drm_device
*dev
, unsigned int type
)
293 struct drm_minor
**slot
;
295 slot
= drm_minor_get_slot(dev
, type
);
302 static int drm_minor_register(struct drm_device
*dev
, unsigned int type
)
304 struct drm_minor
*new_minor
;
311 new_minor
= *drm_minor_get_slot(dev
, type
);
315 idr_preload(GFP_KERNEL
);
316 spin_lock_irqsave(&drm_minor_lock
, flags
);
317 minor_id
= idr_alloc(&drm_minors_idr
,
322 spin_unlock_irqrestore(&drm_minor_lock
, flags
);
328 new_minor
->index
= minor_id
;
330 ret
= drm_debugfs_init(new_minor
, minor_id
, drm_debugfs_root
);
332 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
336 ret
= drm_sysfs_device_add(new_minor
);
338 DRM_ERROR("DRM: Error sysfs_device_add.\n");
342 /* replace NULL with @minor so lookups will succeed from now on */
343 spin_lock_irqsave(&drm_minor_lock
, flags
);
344 idr_replace(&drm_minors_idr
, new_minor
, new_minor
->index
);
345 spin_unlock_irqrestore(&drm_minor_lock
, flags
);
347 DRM_DEBUG("new minor assigned %d\n", minor_id
);
351 drm_debugfs_cleanup(new_minor
);
353 spin_lock_irqsave(&drm_minor_lock
, flags
);
354 idr_remove(&drm_minors_idr
, minor_id
);
355 spin_unlock_irqrestore(&drm_minor_lock
, flags
);
356 new_minor
->index
= 0;
360 static void drm_minor_unregister(struct drm_device
*dev
, unsigned int type
)
362 struct drm_minor
*minor
;
365 minor
= *drm_minor_get_slot(dev
, type
);
366 if (!minor
|| !minor
->kdev
)
369 spin_lock_irqsave(&drm_minor_lock
, flags
);
370 idr_remove(&drm_minors_idr
, minor
->index
);
371 spin_unlock_irqrestore(&drm_minor_lock
, flags
);
374 drm_debugfs_cleanup(minor
);
375 drm_sysfs_device_remove(minor
);
379 * drm_minor_acquire - Acquire a DRM minor
380 * @minor_id: Minor ID of the DRM-minor
382 * Looks up the given minor-ID and returns the respective DRM-minor object. The
383 * refence-count of the underlying device is increased so you must release this
384 * object with drm_minor_release().
386 * As long as you hold this minor, it is guaranteed that the object and the
387 * minor->dev pointer will stay valid! However, the device may get unplugged and
388 * unregistered while you hold the minor.
391 * Pointer to minor-object with increased device-refcount, or PTR_ERR on
394 struct drm_minor
*drm_minor_acquire(unsigned int minor_id
)
396 struct drm_minor
*minor
;
399 spin_lock_irqsave(&drm_minor_lock
, flags
);
400 minor
= idr_find(&drm_minors_idr
, minor_id
);
402 drm_dev_ref(minor
->dev
);
403 spin_unlock_irqrestore(&drm_minor_lock
, flags
);
406 return ERR_PTR(-ENODEV
);
407 } else if (drm_device_is_unplugged(minor
->dev
)) {
408 drm_dev_unref(minor
->dev
);
409 return ERR_PTR(-ENODEV
);
416 * drm_minor_release - Release DRM minor
417 * @minor: Pointer to DRM minor object
419 * Release a minor that was previously acquired via drm_minor_acquire().
421 void drm_minor_release(struct drm_minor
*minor
)
423 drm_dev_unref(minor
->dev
);
427 * Called via drm_exit() at module unload time or when pci device is
430 * Cleans up all DRM device, calling drm_lastclose().
433 void drm_put_dev(struct drm_device
*dev
)
438 DRM_ERROR("cleanup called no dev\n");
442 drm_dev_unregister(dev
);
445 EXPORT_SYMBOL(drm_put_dev
);
447 void drm_unplug_dev(struct drm_device
*dev
)
449 /* for a USB device */
450 drm_minor_unregister(dev
, DRM_MINOR_LEGACY
);
451 drm_minor_unregister(dev
, DRM_MINOR_RENDER
);
452 drm_minor_unregister(dev
, DRM_MINOR_CONTROL
);
454 mutex_lock(&drm_global_mutex
);
456 drm_device_set_unplugged(dev
);
458 if (dev
->open_count
== 0) {
461 mutex_unlock(&drm_global_mutex
);
463 EXPORT_SYMBOL(drm_unplug_dev
);
467 * We want to be able to allocate our own "struct address_space" to control
468 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
469 * stand-alone address_space objects, so we need an underlying inode. As there
470 * is no way to allocate an independent inode easily, we need a fake internal
473 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
474 * frees it again. You are allowed to use iget() and iput() to get references to
475 * the inode. But each drm_fs_inode_new() call must be paired with exactly one
476 * drm_fs_inode_free() call (which does not have to be the last iput()).
477 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
478 * between multiple inode-users. You could, technically, call
479 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
480 * iput(), but this way you'd end up with a new vfsmount for each inode.
483 static int drm_fs_cnt
;
484 static struct vfsmount
*drm_fs_mnt
;
486 static const struct dentry_operations drm_fs_dops
= {
487 .d_dname
= simple_dname
,
490 static const struct super_operations drm_fs_sops
= {
491 .statfs
= simple_statfs
,
494 static struct dentry
*drm_fs_mount(struct file_system_type
*fs_type
, int flags
,
495 const char *dev_name
, void *data
)
497 return mount_pseudo(fs_type
,
504 static struct file_system_type drm_fs_type
= {
506 .owner
= THIS_MODULE
,
507 .mount
= drm_fs_mount
,
508 .kill_sb
= kill_anon_super
,
511 static struct inode
*drm_fs_inode_new(void)
516 r
= simple_pin_fs(&drm_fs_type
, &drm_fs_mnt
, &drm_fs_cnt
);
518 DRM_ERROR("Cannot mount pseudo fs: %d\n", r
);
522 inode
= alloc_anon_inode(drm_fs_mnt
->mnt_sb
);
524 simple_release_fs(&drm_fs_mnt
, &drm_fs_cnt
);
529 static void drm_fs_inode_free(struct inode
*inode
)
533 simple_release_fs(&drm_fs_mnt
, &drm_fs_cnt
);
538 * drm_dev_alloc - Allocate new drm device
539 * @driver: DRM driver to allocate device for
540 * @parent: Parent device object
542 * Allocate and initialize a new DRM device. No device registration is done.
543 * Call drm_dev_register() to advertice the device to user space and register it
544 * with other core subsystems.
546 * The initial ref-count of the object is 1. Use drm_dev_ref() and
547 * drm_dev_unref() to take and drop further ref-counts.
550 * Pointer to new DRM device, or NULL if out of memory.
552 struct drm_device
*drm_dev_alloc(struct drm_driver
*driver
,
553 struct device
*parent
)
555 struct drm_device
*dev
;
558 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
562 kref_init(&dev
->ref
);
564 dev
->driver
= driver
;
566 INIT_LIST_HEAD(&dev
->filelist
);
567 INIT_LIST_HEAD(&dev
->ctxlist
);
568 INIT_LIST_HEAD(&dev
->vmalist
);
569 INIT_LIST_HEAD(&dev
->maplist
);
570 INIT_LIST_HEAD(&dev
->vblank_event_list
);
572 spin_lock_init(&dev
->count_lock
);
573 spin_lock_init(&dev
->event_lock
);
574 mutex_init(&dev
->struct_mutex
);
575 mutex_init(&dev
->ctxlist_mutex
);
576 mutex_init(&dev
->master_mutex
);
578 dev
->anon_inode
= drm_fs_inode_new();
579 if (IS_ERR(dev
->anon_inode
)) {
580 ret
= PTR_ERR(dev
->anon_inode
);
581 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret
);
585 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
586 ret
= drm_minor_alloc(dev
, DRM_MINOR_CONTROL
);
591 if (drm_core_check_feature(dev
, DRIVER_RENDER
) && drm_rnodes
) {
592 ret
= drm_minor_alloc(dev
, DRM_MINOR_RENDER
);
597 ret
= drm_minor_alloc(dev
, DRM_MINOR_LEGACY
);
601 if (drm_ht_create(&dev
->map_hash
, 12))
604 ret
= drm_ctxbitmap_init(dev
);
606 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
610 if (driver
->driver_features
& DRIVER_GEM
) {
611 ret
= drm_gem_init(dev
);
613 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
621 drm_ctxbitmap_cleanup(dev
);
623 drm_ht_remove(&dev
->map_hash
);
625 drm_minor_free(dev
, DRM_MINOR_LEGACY
);
626 drm_minor_free(dev
, DRM_MINOR_RENDER
);
627 drm_minor_free(dev
, DRM_MINOR_CONTROL
);
628 drm_fs_inode_free(dev
->anon_inode
);
630 mutex_destroy(&dev
->master_mutex
);
634 EXPORT_SYMBOL(drm_dev_alloc
);
636 static void drm_dev_release(struct kref
*ref
)
638 struct drm_device
*dev
= container_of(ref
, struct drm_device
, ref
);
640 if (dev
->driver
->driver_features
& DRIVER_GEM
)
641 drm_gem_destroy(dev
);
643 drm_ctxbitmap_cleanup(dev
);
644 drm_ht_remove(&dev
->map_hash
);
645 drm_fs_inode_free(dev
->anon_inode
);
647 drm_minor_free(dev
, DRM_MINOR_LEGACY
);
648 drm_minor_free(dev
, DRM_MINOR_RENDER
);
649 drm_minor_free(dev
, DRM_MINOR_CONTROL
);
653 mutex_destroy(&dev
->master_mutex
);
658 * drm_dev_ref - Take reference of a DRM device
659 * @dev: device to take reference of or NULL
661 * This increases the ref-count of @dev by one. You *must* already own a
662 * reference when calling this. Use drm_dev_unref() to drop this reference
665 * This function never fails. However, this function does not provide *any*
666 * guarantee whether the device is alive or running. It only provides a
667 * reference to the object and the memory associated with it.
669 void drm_dev_ref(struct drm_device
*dev
)
674 EXPORT_SYMBOL(drm_dev_ref
);
677 * drm_dev_unref - Drop reference of a DRM device
678 * @dev: device to drop reference of or NULL
680 * This decreases the ref-count of @dev by one. The device is destroyed if the
681 * ref-count drops to zero.
683 void drm_dev_unref(struct drm_device
*dev
)
686 kref_put(&dev
->ref
, drm_dev_release
);
688 EXPORT_SYMBOL(drm_dev_unref
);
691 * drm_dev_register - Register DRM device
692 * @dev: Device to register
694 * Register the DRM device @dev with the system, advertise device to user-space
695 * and start normal device operation. @dev must be allocated via drm_dev_alloc()
698 * Never call this twice on any device!
701 * 0 on success, negative error code on failure.
703 int drm_dev_register(struct drm_device
*dev
, unsigned long flags
)
707 mutex_lock(&drm_global_mutex
);
709 ret
= drm_minor_register(dev
, DRM_MINOR_CONTROL
);
713 ret
= drm_minor_register(dev
, DRM_MINOR_RENDER
);
717 ret
= drm_minor_register(dev
, DRM_MINOR_LEGACY
);
721 if (dev
->driver
->load
) {
722 ret
= dev
->driver
->load(dev
, flags
);
727 /* setup grouping for legacy outputs */
728 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
729 ret
= drm_mode_group_init_legacy_group(dev
,
730 &dev
->primary
->mode_group
);
739 if (dev
->driver
->unload
)
740 dev
->driver
->unload(dev
);
742 drm_minor_unregister(dev
, DRM_MINOR_LEGACY
);
743 drm_minor_unregister(dev
, DRM_MINOR_RENDER
);
744 drm_minor_unregister(dev
, DRM_MINOR_CONTROL
);
746 mutex_unlock(&drm_global_mutex
);
749 EXPORT_SYMBOL(drm_dev_register
);
752 * drm_dev_unregister - Unregister DRM device
753 * @dev: Device to unregister
755 * Unregister the DRM device from the system. This does the reverse of
756 * drm_dev_register() but does not deallocate the device. The caller must call
757 * drm_dev_unref() to drop their final reference.
759 void drm_dev_unregister(struct drm_device
*dev
)
761 struct drm_map_list
*r_list
, *list_temp
;
765 if (dev
->driver
->unload
)
766 dev
->driver
->unload(dev
);
769 drm_pci_agp_destroy(dev
);
771 drm_vblank_cleanup(dev
);
773 list_for_each_entry_safe(r_list
, list_temp
, &dev
->maplist
, head
)
774 drm_rmmap(dev
, r_list
->map
);
776 drm_minor_unregister(dev
, DRM_MINOR_LEGACY
);
777 drm_minor_unregister(dev
, DRM_MINOR_RENDER
);
778 drm_minor_unregister(dev
, DRM_MINOR_CONTROL
);
780 EXPORT_SYMBOL(drm_dev_unregister
);