2 * drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
3 * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
6 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
7 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Copyright (c) 2009, Intel Corporation.
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
31 * Rickard E. (Rik) Faith <faith@valinux.com>
32 * Gareth Hughes <gareth@valinux.com>
37 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
38 * Use is subject to license terms.
44 #include <sys/sysmacros.h>
45 #include <sys/types.h>
47 #include <sys/modctl.h>
50 #include <sys/cmn_err.h>
51 #include <sys/varargs.h>
54 #include <sys/sunddi.h>
55 #include <sys/sunldi.h>
57 #include <sys/agpgart.h>
59 #include <sys/sysmacros.h>
60 #include "drm_atomic.h"
63 #include "drm_linux_list.h"
66 #define __inline__ inline
69 #if !defined(__FUNCTION__)
71 #define __FUNCTION__ __func__
73 #define __FUNCTION__ " "
78 #define DRM_PAGE_SHIFT PAGESHIFT
79 #define DRM_PAGE_SIZE (1 << DRM_PAGE_SHIFT)
80 #define DRM_PAGE_OFFSET (DRM_PAGE_SIZE - 1)
81 #define DRM_PAGE_MASK ~(DRM_PAGE_SIZE - 1)
82 #define DRM_MB2PAGES(x) ((x) << 8)
83 #define DRM_PAGES2BYTES(x) ((x) << DRM_PAGE_SHIFT)
84 #define DRM_BYTES2PAGES(x) ((x) >> DRM_PAGE_SHIFT)
85 #define DRM_PAGES2KB(x) ((x) << 2)
86 #define DRM_ALIGNED(offset) (((offset) & DRM_PAGE_OFFSET) == 0)
88 #define PAGE_SHIFT DRM_PAGE_SHIFT
89 #define PAGE_SIZE DRM_PAGE_SIZE
91 #define DRM_MAX_INSTANCES 8
92 #define DRM_DEVNODE "drm"
93 #define DRM_UNOPENED 0
96 #define DRM_HASH_SIZE 16 /* Size of key hash table */
97 #define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
98 #define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
100 #define DRM_MEM_DMA 0
101 #define DRM_MEM_SAREA 1
102 #define DRM_MEM_DRIVER 2
103 #define DRM_MEM_MAGIC 3
104 #define DRM_MEM_IOCTLS 4
105 #define DRM_MEM_MAPS 5
106 #define DRM_MEM_BUFS 6
107 #define DRM_MEM_SEGS 7
108 #define DRM_MEM_PAGES 8
109 #define DRM_MEM_FILES 9
110 #define DRM_MEM_QUEUES 10
111 #define DRM_MEM_CMDS 11
112 #define DRM_MEM_MAPPINGS 12
113 #define DRM_MEM_BUFLISTS 13
114 #define DRM_MEM_DRMLISTS 14
115 #define DRM_MEM_TOTALDRM 15
116 #define DRM_MEM_BOUNDDRM 16
117 #define DRM_MEM_CTXBITMAP 17
118 #define DRM_MEM_STUB 18
119 #define DRM_MEM_SGLISTS 19
120 #define DRM_MEM_AGPLISTS 20
121 #define DRM_MEM_CTXLIST 21
122 #define DRM_MEM_MM 22
123 #define DRM_MEM_HASHTAB 23
124 #define DRM_MEM_OBJECTS 24
126 #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
127 #define DRM_MAP_HASH_OFFSET 0x10000000
128 #define DRM_MAP_HASH_ORDER 12
129 #define DRM_OBJECT_HASH_ORDER 12
130 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
131 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
132 #define DRM_MM_INIT_MAX_PAGES 256
135 /* Internal types and structures */
136 #define DRM_ARRAY_SIZE(x) (sizeof (x) / sizeof (x[0]))
137 #define DRM_MIN(a, b) ((a) < (b) ? (a) : (b))
138 #define DRM_MAX(a, b) ((a) > (b) ? (a) : (b))
140 #define DRM_IF_VERSION(maj, min) (maj << 16 | min)
142 #define __OS_HAS_AGP 1
144 #define DRM_DEV_MOD (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
145 #define DRM_DEV_UID 0
146 #define DRM_DEV_GID 0
148 #define DRM_CURRENTPID ddi_get_pid()
149 #define DRM_SPINLOCK(l) mutex_enter(l)
150 #define DRM_SPINUNLOCK(u) mutex_exit(u)
151 #define DRM_SPINLOCK_ASSERT(l)
152 #define DRM_LOCK() mutex_enter(&dev->dev_lock)
153 #define DRM_UNLOCK() mutex_exit(&dev->dev_lock)
154 #define DRM_LOCK_OWNED() ASSERT(mutex_owned(&dev->dev_lock))
155 #define spin_lock_irqsave(l, flag) mutex_enter(l)
156 #define spin_unlock_irqrestore(u, flag) mutex_exit(u)
157 #define spin_lock(l) mutex_enter(l)
158 #define spin_unlock(u) mutex_exit(u)
161 #define DRM_UDELAY(sec) ddi_msleep(sec)
162 #define DRM_MEMORYBARRIER()
164 typedef struct drm_file drm_file_t
;
165 typedef struct drm_device drm_device_t
;
166 typedef struct drm_driver_info drm_driver_t
;
168 #define DRM_DEVICE drm_device_t *dev = dev1
169 #define DRM_IOCTL_ARGS \
170 drm_device_t *dev1, intptr_t data, drm_file_t *fpriv, int mode
172 #define DRM_COPYFROM_WITH_RETURN(dest, src, size) \
173 if (ddi_copyin((src), (dest), (size), 0)) { \
174 DRM_ERROR("%s: copy from user failed", __func__); \
178 #define DRM_COPYTO_WITH_RETURN(dest, src, size) \
179 if (ddi_copyout((src), (dest), (size), 0)) { \
180 DRM_ERROR("%s: copy to user failed", __func__); \
184 #define DRM_COPY_FROM_USER(dest, src, size) \
185 ddi_copyin((src), (dest), (size), 0) /* flag for src */
187 #define DRM_COPY_TO_USER(dest, src, size) \
188 ddi_copyout((src), (dest), (size), 0) /* flags for dest */
190 #define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
191 ddi_copyin((arg2), (arg1), (arg3), 0)
193 #define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
194 ddi_copyout((arg2), arg1, arg3, 0)
196 #define DRM_READ8(map, offset) \
197 *(volatile uint8_t *)((uintptr_t)((map)->dev_addr) + (offset))
198 #define DRM_READ16(map, offset) \
199 *(volatile uint16_t *)((uintptr_t)((map)->dev_addr) + (offset))
200 #define DRM_READ32(map, offset) \
201 *(volatile uint32_t *)((uintptr_t)((map)->dev_addr) + (offset))
202 #define DRM_WRITE8(map, offset, val) \
203 *(volatile uint8_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
204 #define DRM_WRITE16(map, offset, val) \
205 *(volatile uint16_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
206 #define DRM_WRITE32(map, offset, val) \
207 *(volatile uint32_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
209 typedef struct drm_wait_queue
{
214 #define DRM_INIT_WAITQUEUE(q, pri) \
216 mutex_init(&(q)->lock, NULL, MUTEX_DRIVER, pri); \
217 cv_init(&(q)->cv, NULL, CV_DRIVER, NULL); \
220 #define DRM_FINI_WAITQUEUE(q) \
222 mutex_destroy(&(q)->lock); \
223 cv_destroy(&(q)->cv); \
226 #define DRM_WAKEUP(q) \
228 mutex_enter(&(q)->lock); \
229 cv_broadcast(&(q)->cv); \
230 mutex_exit(&(q)->lock); \
233 #define jiffies ddi_get_lbolt()
235 #define DRM_WAIT_ON(ret, q, timeout, condition) \
236 mutex_enter(&(q)->lock); \
237 while (!(condition)) { \
238 ret = cv_reltimedwait_sig(&(q)->cv, &(q)->lock, timeout,\
243 } else if (ret == 0) { \
250 mutex_exit(&(q)->lock);
252 #define DRM_WAIT(ret, q, condition) \
253 mutex_enter(&(q)->lock); \
254 if (!(condition)) { \
255 ret = cv_timedwait_sig(&(q)->cv, &(q)->lock, jiffies + 30 * DRM_HZ); \
257 /* gfx maybe hang */ \
264 mutex_exit(&(q)->lock);
267 #define DRM_GETSAREA() \
269 drm_local_map_t *map; \
270 DRM_SPINLOCK_ASSERT(&dev->dev_lock); \
271 TAILQ_FOREACH(map, &dev->maplist, link) { \
272 if (map->type == _DRM_SHM && \
273 map->flags & _DRM_CONTAINS_LOCK) { \
274 dev_priv->sarea = map; \
280 #define LOCK_TEST_WITH_RETURN(dev, fpriv) \
281 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || \
282 dev->lock.filp != fpriv) { \
283 DRM_DEBUG("%s called without lock held", __func__); \
287 #define DRM_IRQ_ARGS caddr_t arg
288 #define IRQ_HANDLED DDI_INTR_CLAIMED
289 #define IRQ_NONE DDI_INTR_UNCLAIMED
297 /* Capabilities taken from src/sys/dev/pci/pcireg.h. */
299 #define PCIY_AGP 0x02
303 #define PCIY_EXPRESS 0x10
306 #define PAGE_ALIGN(addr) (((addr) + DRM_PAGE_SIZE - 1) & DRM_PAGE_MASK)
307 #define DRM_SUSER(p) (crgetsgid(p) == 0 || crgetsuid(p) == 0)
309 #define DRM_GEM_OBJIDR_HASHNODE 1024
310 #define idr_list_for_each(entry, head) \
311 for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) \
312 list_for_each(entry, &(head)->next[key])
315 * wait for 400 milliseconds
317 #define DRM_HZ drv_usectohz(400000)
319 typedef unsigned long dma_addr_t
;
320 typedef uint64_t u64
;
321 typedef uint32_t u32
;
322 typedef uint16_t u16
;
324 typedef uint_t irqreturn_t
;
326 #define DRM_SUPPORT 1
327 #define DRM_UNSUPPORT 0
329 #define __OS_HAS_AGP 1
331 typedef struct drm_pci_id_list
340 #define DRM_MASTER 0x2
341 #define DRM_ROOT_ONLY 0x4
342 typedef int drm_ioctl_t(DRM_IOCTL_ARGS
);
343 typedef struct drm_ioctl_desc
{
344 int (*func
)(DRM_IOCTL_ARGS
);
348 typedef struct drm_magic_entry
{
350 struct drm_file
*priv
;
351 struct drm_magic_entry
*next
;
354 typedef struct drm_magic_head
{
355 struct drm_magic_entry
*head
;
356 struct drm_magic_entry
*tail
;
359 typedef struct drm_buf
{
360 int idx
; /* Index into master buflist */
361 int total
; /* Buffer size */
362 int order
; /* log-base-2(total) */
363 int used
; /* Amount of buffer in use (for DMA) */
364 unsigned long offset
; /* Byte offset (used internally) */
365 void *address
; /* Address of buffer */
366 unsigned long bus_address
; /* Bus address of buffer */
367 struct drm_buf
*next
; /* Kernel-only: used for free list */
368 volatile int pending
; /* On hardware DMA queue */
370 /* Uniq. identifier of holding process */
371 int context
; /* Kernel queue for this buffer */
379 } list
; /* Which list we're on */
381 int dev_priv_size
; /* Size of buffer private stoarge */
382 void *dev_private
; /* Per-buffer private storage */
385 typedef struct drm_freelist
{
386 int initialized
; /* Freelist in use */
387 uint32_t count
; /* Number of free buffers */
388 drm_buf_t
*next
; /* End pointer */
390 int low_mark
; /* Low water mark */
391 int high_mark
; /* High water mark */
394 typedef struct drm_buf_entry
{
402 unsigned long *seglist_bus
;
404 drm_freelist_t freelist
;
407 typedef TAILQ_HEAD(drm_file_list
, drm_file
) drm_file_list_t
;
410 typedef struct drm_local_map
{
411 unsigned long offset
; /* Physical address (0 for SAREA) */
412 unsigned long size
; /* Physical size (bytes) */
413 drm_map_type_t type
; /* Type of memory mapped */
414 drm_map_flags_t flags
; /* Flags */
415 void *handle
; /* User-space: "Handle" to pass to mmap */
416 /* Kernel-space: kernel-virtual address */
417 int mtrr
; /* Boolean: MTRR used */
419 int rid
; /* PCI resource ID for bus_space */
420 int kernel_owned
; /* Boolean: 1= initmapped, 0= addmapped */
421 caddr_t dev_addr
; /* base device address */
422 ddi_acc_handle_t dev_handle
; /* The data access handle */
423 ddi_umem_cookie_t drm_umem_cookie
; /* For SAREA alloc and free */
424 TAILQ_ENTRY(drm_local_map
) link
;
429 * This structure defines the drm_mm memory object, which will be used by the
430 * DRM for its buffer objects.
432 struct drm_gem_object
{
433 /* Reference count of this object */
436 /* Handle count of this object. Each handle also holds a reference */
437 atomic_t handlecount
;
439 /* Related drm device */
440 struct drm_device
*dev
;
444 * Size of the object, in bytes. Immutable over the object's
450 * Global name for this object, starts at 1. 0 means unnamed.
451 * Access is covered by the object_name_lock in the related drm_device
456 * Memory domains. These monitor which caches contain read/write data
457 * related to the object. When transitioning from one set of domains
458 * to another, the driver is called to ensure that caches are suitably
459 * flushed and invalidated
461 uint32_t read_domains
;
462 uint32_t write_domain
;
465 * While validating an exec operation, the
466 * new read/write domain values are computed here.
467 * They will be transferred to the above values
468 * at the point that any cache flushing occurs
470 uint32_t pending_read_domains
;
471 uint32_t pending_write_domain
;
473 void *driver_private
;
475 drm_local_map_t
*map
;
476 ddi_dma_handle_t dma_hdl
;
477 ddi_acc_handle_t acc_hdl
;
479 size_t real_size
; /* real size of memory */
484 struct idr_list
*next
, *prev
;
485 struct drm_gem_object
*obj
;
491 TAILQ_ENTRY(drm_file
) link
;
499 unsigned long ioctl_count
;
501 /* Mapping of mm object handles to object pointers. */
502 struct idr_list object_idr
;
503 /* Lock for synchronization of access to object_idr. */
510 typedef struct drm_lock_data
{
511 drm_hw_lock_t
*hw_lock
; /* Hardware lock */
513 /* Uniq. identifier of holding process */
514 kcondvar_t lock_cv
; /* lock queue - SOLARIS Specific */
515 kmutex_t lock_mutex
; /* lock - SOLARIS Specific */
516 unsigned long lock_time
; /* Time of last lock in clock ticks */
520 * This structure, in drm_device_t, is always initialized while the device
521 * is open. dev->dma_lock protects the incrementing of dev->buf_use, which
522 * when set marks that no further bufs may be allocated until device teardown
523 * occurs (when the last open of the device has closed). The high/low
524 * watermarks of bufs are only touched by the X Server, and thus not
525 * concurrently accessed, so no locking is needed.
527 typedef struct drm_device_dma
{
528 drm_buf_entry_t bufs
[DRM_MAX_ORDER
+1];
530 drm_buf_t
**buflist
; /* Vector of pointers info bufs */
533 unsigned long *pagelist
;
534 unsigned long byte_count
;
536 _DRM_DMA_USE_AGP
= 0x01,
537 _DRM_DMA_USE_SG
= 0x02
541 typedef struct drm_agp_mem
{
543 unsigned long bound
; /* address */
546 struct drm_agp_mem
*prev
;
547 struct drm_agp_mem
*next
;
550 typedef struct drm_agp_head
{
553 drm_agp_mem_t
*memory
;
559 int cant_use_aperture
;
560 unsigned long page_mask
;
561 ldi_ident_t agpgart_li
;
562 ldi_handle_t agpgart_lh
;
566 typedef struct drm_dma_handle
{
567 ddi_dma_handle_t dma_hdl
;
568 ddi_acc_handle_t acc_hdl
;
569 ddi_dma_cookie_t cookie
;
571 uintptr_t vaddr
; /* virtual addr */
572 uintptr_t paddr
; /* physical addr */
573 size_t real_sz
; /* real size of memory */
576 typedef struct drm_sg_mem
{
577 unsigned long handle
;
581 ddi_umem_cookie_t
*umem_cookie
;
582 drm_dma_handle_t
*dmah_sg
;
583 drm_dma_handle_t
*dmah_gart
; /* Handle to PCI memory */
587 * Generic memory manager structs
591 struct list_head fl_entry
;
592 struct list_head ml_entry
;
601 struct list_head fl_entry
;
602 struct list_head ml_entry
;
605 typedef TAILQ_HEAD(drm_map_list
, drm_local_map
) drm_map_list_t
;
607 typedef TAILQ_HEAD(drm_vbl_sig_list
, drm_vbl_sig
) drm_vbl_sig_list_t
;
608 typedef struct drm_vbl_sig
{
609 TAILQ_ENTRY(drm_vbl_sig
) link
;
610 unsigned int sequence
;
616 /* used for clone device */
617 typedef TAILQ_HEAD(drm_cminor_list
, drm_cminor
) drm_cminor_list_t
;
618 typedef struct drm_cminor
{
619 TAILQ_ENTRY(drm_cminor
) link
;
624 /* location of GART table */
625 #define DRM_ATI_GART_MAIN 1
626 #define DRM_ATI_GART_FB 2
628 typedef struct ati_pcigart_info
{
629 int gart_table_location
;
633 drm_local_map_t mapping
;
634 } drm_ati_pcigart_info
;
636 /* DRM device structure */
638 struct drm_driver_info
{
639 int (*load
)(struct drm_device
*, unsigned long);
640 int (*firstopen
)(struct drm_device
*);
641 int (*open
)(struct drm_device
*, drm_file_t
*);
642 void (*preclose
)(struct drm_device
*, drm_file_t
*);
643 void (*postclose
)(struct drm_device
*, drm_file_t
*);
644 void (*lastclose
)(struct drm_device
*);
645 int (*unload
)(struct drm_device
*);
646 void (*reclaim_buffers_locked
)(struct drm_device
*, drm_file_t
*);
647 int (*presetup
)(struct drm_device
*);
648 int (*postsetup
)(struct drm_device
*);
649 int (*open_helper
)(struct drm_device
*, drm_file_t
*);
650 void (*free_filp_priv
)(struct drm_device
*, drm_file_t
*);
651 void (*release
)(struct drm_device
*, void *);
652 int (*dma_ioctl
)(DRM_IOCTL_ARGS
);
653 void (*dma_ready
)(struct drm_device
*);
654 int (*dma_quiescent
)(struct drm_device
*);
655 int (*dma_flush_block_and_flush
)(struct drm_device
*,
656 int, drm_lock_flags_t
);
657 int (*dma_flush_unblock
)(struct drm_device
*, int,
659 int (*context_ctor
)(struct drm_device
*, int);
660 int (*context_dtor
)(struct drm_device
*, int);
661 int (*kernel_context_switch
)(struct drm_device
*, int, int);
662 int (*kernel_context_switch_unlock
)(struct drm_device
*);
663 int (*device_is_agp
) (struct drm_device
*);
664 int (*irq_preinstall
)(struct drm_device
*);
665 void (*irq_postinstall
)(struct drm_device
*);
666 void (*irq_uninstall
)(struct drm_device
*dev
);
667 uint_t (*irq_handler
)(DRM_IRQ_ARGS
);
668 int (*vblank_wait
)(struct drm_device
*, unsigned int *);
669 int (*vblank_wait2
)(struct drm_device
*, unsigned int *);
670 /* added for intel minimized vblank */
671 u32 (*get_vblank_counter
)(struct drm_device
*dev
, int crtc
);
672 int (*enable_vblank
)(struct drm_device
*dev
, int crtc
);
673 void (*disable_vblank
)(struct drm_device
*dev
, int crtc
);
676 * Driver-specific constructor for drm_gem_objects, to set up
677 * obj->driver_private.
679 * Returns 0 on success.
681 int (*gem_init_object
) (struct drm_gem_object
*obj
);
682 void (*gem_free_object
) (struct drm_gem_object
*obj
);
685 drm_ioctl_desc_t
*driver_ioctls
;
686 int max_driver_ioctl
;
691 int driver_patchlevel
;
692 const char *driver_name
; /* Simple driver name */
693 const char *driver_desc
; /* Longer driver name */
694 const char *driver_date
; /* Date of last major changes. */
697 unsigned require_agp
:1;
700 unsigned use_pci_dma
:1;
701 unsigned use_dma_queue
:1;
703 unsigned use_vbl_irq
:1;
704 unsigned use_vbl_irq2
:1;
705 unsigned use_mtrr
:1;
710 * hardware-specific code needs to initialize mutexes which
711 * can be used in interrupt context, so they need to know
712 * the interrupt priority. Interrupt cookie in drm_device
713 * structure is the intr_block field.
715 #define DRM_INTR_PRI(dev) \
716 DDI_INTR_PRI((dev)->intr_block)
719 drm_driver_t
*driver
;
720 drm_cminor_list_t minordevs
;
724 const char *desc
; /* current driver description */
728 ddi_iblock_cookie_t intr_block
;
729 uint32_t pci_device
; /* PCI device id */
731 char *unique
; /* Unique identifier: e.g., busid */
732 int unique_len
; /* Length of unique field */
733 int if_version
; /* Highest interface version set */
734 int flags
; /* Flags to open(2) */
737 kmutex_t vbl_lock
; /* protects vblank operations */
738 kmutex_t dma_lock
; /* protects dev->dma */
739 kmutex_t irq_lock
; /* protects irq condition checks */
740 kmutex_t dev_lock
; /* protects everything else */
741 drm_lock_data_t lock
; /* Information on hardware lock */
742 kmutex_t struct_mutex
; /* < For others */
745 int open_count
; /* Outstanding files open */
746 int buf_use
; /* Buffers in use -- cannot alloc */
748 /* Performance counters */
749 unsigned long counters
;
750 drm_stat_type_t types
[15];
754 drm_file_list_t files
;
755 drm_magic_head_t magiclist
[DRM_HASH_SIZE
];
757 /* Linked list of mappable regions. Protected by dev_lock */
758 drm_map_list_t maplist
;
760 drm_local_map_t
**context_sareas
;
763 /* DMA queues (contexts) */
764 drm_device_dma_t
*dma
; /* Optional pointer for DMA support */
766 /* Context support */
767 int irq
; /* Interrupt used by board */
768 int irq_enabled
; /* True if the irq handler is enabled */
773 atomic_t context_flag
; /* Context swapping flag */
774 int last_context
; /* Last current context */
776 /* Only used for Radeon */
777 atomic_t vbl_received
;
778 atomic_t vbl_received2
;
780 drm_vbl_sig_list_t vbl_sig_list
;
781 drm_vbl_sig_list_t vbl_sig_list2
;
783 * At load time, disabling the vblank interrupt won't be allowed since
784 * old clients may not call the modeset ioctl and therefore misbehave.
785 * Once the modeset ioctl *has* been called though, we can safely
786 * disable them when unused.
788 int vblank_disable_allowed
;
790 wait_queue_head_t vbl_queue
; /* vbl wait channel */
791 /* vbl wait channel array */
792 wait_queue_head_t
*vbl_queues
;
794 /* number of VBLANK interrupts */
795 /* (driver must alloc the right number of counters) */
796 atomic_t
*_vblank_count
;
797 /* signal list to send on VBLANK */
798 struct drm_vbl_sig_list
*vbl_sigs
;
800 /* number of signals pending on all crtcs */
801 atomic_t vbl_signal_pending
;
802 /* number of users of vblank interrupts per crtc */
803 atomic_t
*vblank_refcount
;
804 /* protected by dev->vbl_lock, used for wraparound handling */
806 /* so we don't call enable more than */
807 atomic_t
*vblank_enabled
;
808 /* Display driver is setting mode */
809 int *vblank_inmodeset
;
810 /* Don't wait while crtc is likely disabled */
812 /* size of vblank counter register */
813 u32 max_vblank_count
;
815 kmutex_t tasklet_lock
;
816 void (*locked_tasklet_func
)(struct drm_device
*dev
);
820 drm_sg_mem_t
*sg
; /* Scatter gather memory */
821 uint32_t *ctx_bitmap
;
823 unsigned int agp_buffer_token
;
824 drm_local_map_t
*agp_buffer_map
;
826 kstat_t
*asoft_ksp
; /* kstat support */
828 /* name Drawable information */
830 unsigned int drw_bitfield_length
;
832 unsigned int drw_info_length
;
833 drm_drawable_info_t
**drw_info
;
835 /* \name GEM information */
837 kmutex_t object_name_lock
;
838 struct idr_list object_name_idr
;
839 atomic_t object_count
;
840 atomic_t object_memory
;
846 uint32_t invalidate_domains
; /* domains pending invalidation */
847 uint32_t flush_domains
; /* domains pending flush */
856 /* Memory management support (drm_memory.c) */
857 void drm_mem_init(void);
858 void drm_mem_uninit(void);
859 void *drm_alloc(size_t, int);
860 void *drm_calloc(size_t, size_t, int);
861 void *drm_realloc(void *, size_t, size_t, int);
862 void drm_free(void *, size_t, int);
863 int drm_ioremap(drm_device_t
*, drm_local_map_t
*);
864 void drm_ioremapfree(drm_local_map_t
*);
866 void drm_core_ioremap(struct drm_local_map
*, struct drm_device
*);
867 void drm_core_ioremapfree(struct drm_local_map
*, struct drm_device
*);
869 void drm_pci_free(drm_device_t
*, drm_dma_handle_t
*);
870 void *drm_pci_alloc(drm_device_t
*, size_t, size_t, dma_addr_t
, int);
872 struct drm_local_map
*drm_core_findmap(struct drm_device
*, unsigned long);
874 int drm_context_switch(drm_device_t
*, int, int);
875 int drm_context_switch_complete(drm_device_t
*, int);
876 int drm_ctxbitmap_init(drm_device_t
*);
877 void drm_ctxbitmap_cleanup(drm_device_t
*);
878 void drm_ctxbitmap_free(drm_device_t
*, int);
879 int drm_ctxbitmap_next(drm_device_t
*);
881 /* Locking IOCTL support (drm_lock.c) */
882 int drm_lock_take(drm_lock_data_t
*, unsigned int);
883 int drm_lock_transfer(drm_device_t
*,
884 drm_lock_data_t
*, unsigned int);
885 int drm_lock_free(drm_device_t
*,
886 volatile unsigned int *, unsigned int);
888 /* Buffer management support (drm_bufs.c) */
889 unsigned long drm_get_resource_start(drm_device_t
*, unsigned int);
890 unsigned long drm_get_resource_len(drm_device_t
*, unsigned int);
891 int drm_initmap(drm_device_t
*, unsigned long, unsigned long,
892 unsigned int, int, int);
893 void drm_rmmap(drm_device_t
*, drm_local_map_t
*);
894 int drm_addmap(drm_device_t
*, unsigned long, unsigned long,
895 drm_map_type_t
, drm_map_flags_t
, drm_local_map_t
**);
896 int drm_order(unsigned long);
898 /* DMA support (drm_dma.c) */
899 int drm_dma_setup(drm_device_t
*);
900 void drm_dma_takedown(drm_device_t
*);
901 void drm_free_buffer(drm_device_t
*, drm_buf_t
*);
902 void drm_reclaim_buffers(drm_device_t
*, drm_file_t
*);
903 #define drm_core_reclaim_buffers drm_reclaim_buffers
905 /* IRQ support (drm_irq.c) */
906 int drm_irq_install(drm_device_t
*);
907 int drm_irq_uninstall(drm_device_t
*);
908 uint_t
drm_irq_handler(DRM_IRQ_ARGS
);
909 void drm_driver_irq_preinstall(drm_device_t
*);
910 void drm_driver_irq_postinstall(drm_device_t
*);
911 void drm_driver_irq_uninstall(drm_device_t
*);
912 int drm_vblank_wait(drm_device_t
*, unsigned int *);
913 void drm_vbl_send_signals(drm_device_t
*);
914 void drm_handle_vblank(struct drm_device
*dev
, int crtc
);
915 u32
drm_vblank_count(struct drm_device
*dev
, int crtc
);
916 int drm_vblank_get(struct drm_device
*dev
, int crtc
);
917 void drm_vblank_put(struct drm_device
*dev
, int crtc
);
918 int drm_vblank_init(struct drm_device
*dev
, int num_crtcs
);
919 void drm_vblank_cleanup(struct drm_device
*dev
);
920 int drm_modeset_ctl(DRM_IOCTL_ARGS
);
922 /* AGP/GART support (drm_agpsupport.c) */
923 int drm_device_is_agp(drm_device_t
*);
924 int drm_device_is_pcie(drm_device_t
*);
925 drm_agp_head_t
*drm_agp_init(drm_device_t
*);
926 void drm_agp_fini(drm_device_t
*);
927 int drm_agp_do_release(drm_device_t
*);
928 void *drm_agp_allocate_memory(size_t pages
,
929 uint32_t type
, drm_device_t
*dev
);
930 int drm_agp_free_memory(agp_allocate_t
*handle
, drm_device_t
*dev
);
931 int drm_agp_bind_memory(unsigned int, uint32_t, drm_device_t
*);
932 int drm_agp_unbind_memory(unsigned long, drm_device_t
*);
933 int drm_agp_bind_pages(drm_device_t
*dev
,
935 unsigned long num_pages
,
936 uint32_t gtt_offset
);
937 int drm_agp_unbind_pages(drm_device_t
*dev
,
938 unsigned long num_pages
,
941 void drm_agp_chipset_flush(struct drm_device
*dev
);
942 void drm_agp_rebind(struct drm_device
*dev
);
944 /* kstat support (drm_kstats.c) */
945 int drm_init_kstats(drm_device_t
*);
946 void drm_fini_kstats(drm_device_t
*);
948 /* Scatter Gather Support (drm_scatter.c) */
949 void drm_sg_cleanup(drm_device_t
*, drm_sg_mem_t
*);
951 /* ATI PCIGART support (ati_pcigart.c) */
952 int drm_ati_pcigart_init(drm_device_t
*, drm_ati_pcigart_info
*);
953 int drm_ati_pcigart_cleanup(drm_device_t
*, drm_ati_pcigart_info
*);
955 /* Locking IOCTL support (drm_drv.c) */
956 int drm_lock(DRM_IOCTL_ARGS
);
957 int drm_unlock(DRM_IOCTL_ARGS
);
958 int drm_version(DRM_IOCTL_ARGS
);
959 int drm_setversion(DRM_IOCTL_ARGS
);
960 /* Cache management (drm_cache.c) */
961 void drm_clflush_pages(caddr_t
*pages
, unsigned long num_pages
);
963 /* Misc. IOCTL support (drm_ioctl.c) */
964 int drm_irq_by_busid(DRM_IOCTL_ARGS
);
965 int drm_getunique(DRM_IOCTL_ARGS
);
966 int drm_setunique(DRM_IOCTL_ARGS
);
967 int drm_getmap(DRM_IOCTL_ARGS
);
968 int drm_getclient(DRM_IOCTL_ARGS
);
969 int drm_getstats(DRM_IOCTL_ARGS
);
970 int drm_noop(DRM_IOCTL_ARGS
);
972 /* Context IOCTL support (drm_context.c) */
973 int drm_resctx(DRM_IOCTL_ARGS
);
974 int drm_addctx(DRM_IOCTL_ARGS
);
975 int drm_modctx(DRM_IOCTL_ARGS
);
976 int drm_getctx(DRM_IOCTL_ARGS
);
977 int drm_switchctx(DRM_IOCTL_ARGS
);
978 int drm_newctx(DRM_IOCTL_ARGS
);
979 int drm_rmctx(DRM_IOCTL_ARGS
);
980 int drm_setsareactx(DRM_IOCTL_ARGS
);
981 int drm_getsareactx(DRM_IOCTL_ARGS
);
983 /* Drawable IOCTL support (drm_drawable.c) */
984 int drm_adddraw(DRM_IOCTL_ARGS
);
985 int drm_rmdraw(DRM_IOCTL_ARGS
);
986 int drm_update_draw(DRM_IOCTL_ARGS
);
988 /* Authentication IOCTL support (drm_auth.c) */
989 int drm_getmagic(DRM_IOCTL_ARGS
);
990 int drm_authmagic(DRM_IOCTL_ARGS
);
991 int drm_remove_magic(drm_device_t
*, drm_magic_t
);
992 drm_file_t
*drm_find_file(drm_device_t
*, drm_magic_t
);
993 /* Buffer management support (drm_bufs.c) */
994 int drm_addmap_ioctl(DRM_IOCTL_ARGS
);
995 int drm_rmmap_ioctl(DRM_IOCTL_ARGS
);
996 int drm_addbufs_ioctl(DRM_IOCTL_ARGS
);
997 int drm_infobufs(DRM_IOCTL_ARGS
);
998 int drm_markbufs(DRM_IOCTL_ARGS
);
999 int drm_freebufs(DRM_IOCTL_ARGS
);
1000 int drm_mapbufs(DRM_IOCTL_ARGS
);
1002 /* DMA support (drm_dma.c) */
1003 int drm_dma(DRM_IOCTL_ARGS
);
1005 /* IRQ support (drm_irq.c) */
1006 int drm_control(DRM_IOCTL_ARGS
);
1007 int drm_wait_vblank(DRM_IOCTL_ARGS
);
1009 /* AGP/GART support (drm_agpsupport.c) */
1010 int drm_agp_acquire(DRM_IOCTL_ARGS
);
1011 int drm_agp_release(DRM_IOCTL_ARGS
);
1012 int drm_agp_enable(DRM_IOCTL_ARGS
);
1013 int drm_agp_info(DRM_IOCTL_ARGS
);
1014 int drm_agp_alloc(DRM_IOCTL_ARGS
);
1015 int drm_agp_free(DRM_IOCTL_ARGS
);
1016 int drm_agp_unbind(DRM_IOCTL_ARGS
);
1017 int drm_agp_bind(DRM_IOCTL_ARGS
);
1019 /* Scatter Gather Support (drm_scatter.c) */
1020 int drm_sg_alloc(DRM_IOCTL_ARGS
);
1021 int drm_sg_free(DRM_IOCTL_ARGS
);
1024 struct drm_mm_node
*drm_mm_get_block(struct drm_mm_node
*parent
,
1025 unsigned long size
, unsigned alignment
);
1026 struct drm_mm_node
*drm_mm_search_free(const struct drm_mm
*mm
,
1028 unsigned alignment
, int best_match
);
1030 extern void drm_mm_clean_ml(const struct drm_mm
*mm
);
1031 extern int drm_debug_flag
;
1033 /* We add function to support DRM_DEBUG,DRM_ERROR,DRM_INFO */
1034 extern void drm_debug(const char *fmt
, ...);
1035 extern void drm_error(const char *fmt
, ...);
1036 extern void drm_info(const char *fmt
, ...);
1039 #define DRM_DEBUG if (drm_debug_flag >= 2) drm_debug
1040 #define DRM_INFO if (drm_debug_flag >= 1) drm_info
1042 #define DRM_DEBUG(...)
1043 #define DRM_INFO(...)
1046 #define DRM_ERROR drm_error
1049 #define MAX_INSTNUMS 16
1051 extern int drm_dev_to_instance(dev_t
);
1052 extern int drm_dev_to_minor(dev_t
);
1053 extern void *drm_supp_register(dev_info_t
*, drm_device_t
*);
1054 extern int drm_supp_unregister(void *);
1056 extern int drm_open(drm_device_t
*, drm_cminor_t
*, int, int, cred_t
*);
1057 extern int drm_close(drm_device_t
*, int, int, int, cred_t
*);
1058 extern int drm_attach(drm_device_t
*);
1059 extern int drm_detach(drm_device_t
*);
1060 extern int drm_probe(drm_device_t
*, drm_pci_id_list_t
*);
1062 extern int drm_pci_init(drm_device_t
*);
1063 extern void drm_pci_end(drm_device_t
*);
1064 extern int pci_get_info(drm_device_t
*, int *, int *, int *);
1065 extern int pci_get_irq(drm_device_t
*);
1066 extern int pci_get_vendor(drm_device_t
*);
1067 extern int pci_get_device(drm_device_t
*);
1069 extern struct drm_drawable_info
*drm_get_drawable_info(drm_device_t
*,
1071 /* File Operations helpers (drm_fops.c) */
1072 extern drm_file_t
*drm_find_file_by_proc(drm_device_t
*, cred_t
*);
1073 extern drm_cminor_t
*drm_find_file_by_minor(drm_device_t
*, int);
1074 extern int drm_open_helper(drm_device_t
*, drm_cminor_t
*, int, int,
1077 /* Graphics Execution Manager library functions (drm_gem.c) */
1078 int drm_gem_init(struct drm_device
*dev
);
1079 void drm_gem_object_free(struct drm_gem_object
*obj
);
1080 struct drm_gem_object
*drm_gem_object_alloc(struct drm_device
*dev
,
1082 void drm_gem_object_handle_free(struct drm_gem_object
*obj
);
1084 void drm_gem_object_reference(struct drm_gem_object
*obj
);
1085 void drm_gem_object_unreference(struct drm_gem_object
*obj
);
1087 int drm_gem_handle_create(struct drm_file
*file_priv
,
1088 struct drm_gem_object
*obj
,
1090 void drm_gem_object_handle_reference(struct drm_gem_object
*obj
);
1092 void drm_gem_object_handle_unreference(struct drm_gem_object
*obj
);
1094 struct drm_gem_object
*drm_gem_object_lookup(struct drm_file
*filp
,
1096 int drm_gem_close_ioctl(DRM_IOCTL_ARGS
);
1097 int drm_gem_flink_ioctl(DRM_IOCTL_ARGS
);
1098 int drm_gem_open_ioctl(DRM_IOCTL_ARGS
);
1099 void drm_gem_open(struct drm_file
*file_private
);
1100 void drm_gem_release(struct drm_device
*dev
, struct drm_file
*file_private
);
1103 #endif /* _DRMP_H */