No empty .Rs/.Re
[netbsd-mini2440.git] / sys / external / bsd / drm / dist / shared-core / drm.h
blob694e0344bdde41d01b15e497dfc37341a4bf011b
1 /**
2 * \file drm.h
3 * Header for the Direct Rendering Manager
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
7 * \par Acknowledgments:
8 * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
9 */
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All rights reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 /**
37 * \mainpage
39 * The Direct Rendering Manager (DRM) is a device-independent kernel-level
40 * device driver that provides support for the XFree86 Direct Rendering
41 * Infrastructure (DRI).
43 * The DRM supports the Direct Rendering Infrastructure (DRI) in four major
44 * ways:
45 * -# The DRM provides synchronized access to the graphics hardware via
46 * the use of an optimized two-tiered lock.
47 * -# The DRM enforces the DRI security policy for access to the graphics
48 * hardware by only allowing authenticated X11 clients access to
49 * restricted regions of memory.
50 * -# The DRM provides a generic DMA engine, complete with multiple
51 * queues and the ability to detect the need for an OpenGL context
52 * switch.
53 * -# The DRM is extensible via the use of small device-specific modules
54 * that rely extensively on the API exported by the DRM module.
58 #ifndef _DRM_H_
59 #define _DRM_H_
61 #ifndef __user
62 #define __user
63 #endif
64 #ifndef __iomem
65 #define __iomem
66 #endif
68 #ifdef __GNUC__
69 # define DEPRECATED __attribute__ ((deprecated))
70 #else
71 # define DEPRECATED
72 # ifndef __FUNCTION__
73 # define __FUNCTION__ __func__ /* C99 */
74 # endif
75 # ifndef __volatile__
76 # define __volatile__ volatile
77 # endif
78 #endif
80 #if defined(__linux__)
81 #include <asm/ioctl.h> /* For _IO* macros */
82 #define DRM_IOCTL_NR(n) _IOC_NR(n)
83 #define DRM_IOC_VOID _IOC_NONE
84 #define DRM_IOC_READ _IOC_READ
85 #define DRM_IOC_WRITE _IOC_WRITE
86 #define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
87 #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
88 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
89 #include <sys/ioccom.h>
90 #define DRM_IOCTL_NR(n) ((n) & 0xff)
91 #define DRM_IOC_VOID IOC_VOID
92 #define DRM_IOC_READ IOC_OUT
93 #define DRM_IOC_WRITE IOC_IN
94 #define DRM_IOC_READWRITE IOC_INOUT
95 #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
96 #endif
98 #ifdef __OpenBSD__
99 #define DRM_MAJOR 81
100 #endif
101 #if defined(__NetBSD__)
102 #define DRM_MAJOR 180
103 #endif
104 #if defined(__linux__)
105 #define DRM_MAJOR 226
106 #endif
107 #define DRM_MAX_MINOR 15
109 #define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
110 #define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
111 #define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
112 #define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
114 #define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
115 #define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
116 #define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
117 #define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
118 #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
120 #if defined(__linux__)
121 typedef unsigned int drm_handle_t;
122 #else
123 #include <sys/types.h>
124 typedef unsigned long drm_handle_t; /**< To mapped regions */
125 #endif
126 typedef unsigned int drm_context_t; /**< GLXContext handle */
127 typedef unsigned int drm_drawable_t;
128 typedef unsigned int drm_magic_t; /**< Magic for authentication */
131 * Cliprect.
133 * \warning If you change this structure, make sure you change
134 * XF86DRIClipRectRec in the server as well
136 * \note KW: Actually it's illegal to change either for
137 * backwards-compatibility reasons.
139 struct drm_clip_rect {
140 unsigned short x1;
141 unsigned short y1;
142 unsigned short x2;
143 unsigned short y2;
147 * Texture region,
149 struct drm_tex_region {
150 unsigned char next;
151 unsigned char prev;
152 unsigned char in_use;
153 unsigned char padding;
154 unsigned int age;
158 * Hardware lock.
160 * The lock structure is a simple cache-line aligned integer. To avoid
161 * processor bus contention on a multiprocessor system, there should not be any
162 * other data stored in the same cache line.
164 struct drm_hw_lock {
165 __volatile__ unsigned int lock; /**< lock variable */
166 char padding[60]; /**< Pad to cache line */
169 /* This is beyond ugly, and only works on GCC. However, it allows me to use
170 * drm.h in places (i.e., in the X-server) where I can't use size_t. The real
171 * fix is to use uint32_t instead of size_t, but that fix will break existing
172 * LP64 (i.e., PowerPC64, SPARC64, IA-64, Alpha, etc.) systems. That *will*
173 * eventually happen, though. I chose 'unsigned long' to be the fallback type
174 * because that works on all the platforms I know about. Hopefully, the
175 * real fix will happen before that bites us.
178 #ifdef __SIZE_TYPE__
179 # define DRM_SIZE_T __SIZE_TYPE__
180 #else
181 # warning "__SIZE_TYPE__ not defined. Assuming sizeof(size_t) == sizeof(unsigned long)!"
182 # define DRM_SIZE_T unsigned long
183 #endif
186 * DRM_IOCTL_VERSION ioctl argument type.
188 * \sa drmGetVersion().
190 struct drm_version {
191 int version_major; /**< Major version */
192 int version_minor; /**< Minor version */
193 int version_patchlevel; /**< Patch level */
194 DRM_SIZE_T name_len; /**< Length of name buffer */
195 char __user *name; /**< Name of driver */
196 DRM_SIZE_T date_len; /**< Length of date buffer */
197 char __user *date; /**< User-space buffer to hold date */
198 DRM_SIZE_T desc_len; /**< Length of desc buffer */
199 char __user *desc; /**< User-space buffer to hold desc */
203 * DRM_IOCTL_GET_UNIQUE ioctl argument type.
205 * \sa drmGetBusid() and drmSetBusId().
207 struct drm_unique {
208 DRM_SIZE_T unique_len; /**< Length of unique */
209 char __user *unique; /**< Unique name for driver instantiation */
212 #undef DRM_SIZE_T
214 struct drm_list {
215 int count; /**< Length of user-space structures */
216 struct drm_version __user *version;
219 struct drm_block {
220 int unused;
224 * DRM_IOCTL_CONTROL ioctl argument type.
226 * \sa drmCtlInstHandler() and drmCtlUninstHandler().
228 struct drm_control {
229 enum {
230 DRM_ADD_COMMAND,
231 DRM_RM_COMMAND,
232 DRM_INST_HANDLER,
233 DRM_UNINST_HANDLER
234 } func;
235 int irq;
239 * Type of memory to map.
241 enum drm_map_type {
242 _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
243 _DRM_REGISTERS = 1, /**< no caching, no core dump */
244 _DRM_SHM = 2, /**< shared, cached */
245 _DRM_AGP = 3, /**< AGP/GART */
246 _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
247 _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
248 _DRM_GEM = 6,
249 _DRM_TTM = 7,
253 * Memory mapping flags.
255 enum drm_map_flags {
256 _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
257 _DRM_READ_ONLY = 0x02,
258 _DRM_LOCKED = 0x04, /**< shared, cached, locked */
259 _DRM_KERNEL = 0x08, /**< kernel requires access */
260 _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
261 _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
262 _DRM_REMOVABLE = 0x40, /**< Removable mapping */
263 _DRM_DRIVER = 0x80 /**< Managed by driver */
266 struct drm_ctx_priv_map {
267 unsigned int ctx_id; /**< Context requesting private mapping */
268 void *handle; /**< Handle of map */
272 * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
273 * argument type.
275 * \sa drmAddMap().
277 struct drm_map {
278 unsigned long offset; /**< Requested physical address (0 for SAREA)*/
279 unsigned long size; /**< Requested physical size (bytes) */
280 enum drm_map_type type; /**< Type of memory to map */
281 enum drm_map_flags flags; /**< Flags */
282 void *handle; /**< User-space: "Handle" to pass to mmap() */
283 /**< Kernel-space: kernel-virtual address */
284 int mtrr; /**< MTRR slot used */
285 /* Private data */
289 * DRM_IOCTL_GET_CLIENT ioctl argument type.
291 struct drm_client {
292 int idx; /**< Which client desired? */
293 int auth; /**< Is client authenticated? */
294 unsigned long pid; /**< Process ID */
295 unsigned long uid; /**< User ID */
296 unsigned long magic; /**< Magic */
297 unsigned long iocs; /**< Ioctl count */
300 enum drm_stat_type {
301 _DRM_STAT_LOCK,
302 _DRM_STAT_OPENS,
303 _DRM_STAT_CLOSES,
304 _DRM_STAT_IOCTLS,
305 _DRM_STAT_LOCKS,
306 _DRM_STAT_UNLOCKS,
307 _DRM_STAT_VALUE, /**< Generic value */
308 _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
309 _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
311 _DRM_STAT_IRQ, /**< IRQ */
312 _DRM_STAT_PRIMARY, /**< Primary DMA bytes */
313 _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
314 _DRM_STAT_DMA, /**< DMA */
315 _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
316 _DRM_STAT_MISSED /**< Missed DMA opportunity */
317 /* Add to the *END* of the list */
321 * DRM_IOCTL_GET_STATS ioctl argument type.
323 struct drm_stats {
324 unsigned long count;
325 struct {
326 unsigned long value;
327 enum drm_stat_type type;
328 } data[15];
332 * Hardware locking flags.
334 enum drm_lock_flags {
335 _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
336 _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
337 _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
338 _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
339 /* These *HALT* flags aren't supported yet
340 -- they will be used to support the
341 full-screen DGA-like mode. */
342 _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
343 _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
347 * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
349 * \sa drmGetLock() and drmUnlock().
351 struct drm_lock {
352 int context;
353 enum drm_lock_flags flags;
357 * DMA flags
359 * \warning
360 * These values \e must match xf86drm.h.
362 * \sa drm_dma.
364 enum drm_dma_flags {
365 /* Flags for DMA buffer dispatch */
366 _DRM_DMA_BLOCK = 0x01, /**<
367 * Block until buffer dispatched.
369 * \note The buffer may not yet have
370 * been processed by the hardware --
371 * getting a hardware lock with the
372 * hardware quiescent will ensure
373 * that the buffer has been
374 * processed.
376 _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
377 _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
379 /* Flags for DMA buffer request */
380 _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
381 _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
382 _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
386 * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
388 * \sa drmAddBufs().
390 struct drm_buf_desc {
391 int count; /**< Number of buffers of this size */
392 int size; /**< Size in bytes */
393 int low_mark; /**< Low water mark */
394 int high_mark; /**< High water mark */
395 enum {
396 _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
397 _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
398 _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
399 _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
400 _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
401 } flags;
402 unsigned long agp_start; /**<
403 * Start address of where the AGP buffers are
404 * in the AGP aperture
409 * DRM_IOCTL_INFO_BUFS ioctl argument type.
411 struct drm_buf_info {
412 int count; /**< Number of buffers described in list */
413 struct drm_buf_desc __user *list; /**< List of buffer descriptions */
417 * DRM_IOCTL_FREE_BUFS ioctl argument type.
419 struct drm_buf_free {
420 int count;
421 int __user *list;
425 * Buffer information
427 * \sa drm_buf_map.
429 struct drm_buf_pub {
430 int idx; /**< Index into the master buffer list */
431 int total; /**< Buffer size */
432 int used; /**< Amount of buffer in use (for DMA) */
433 void __user *address; /**< Address of buffer */
437 * DRM_IOCTL_MAP_BUFS ioctl argument type.
439 struct drm_buf_map {
440 int count; /**< Length of the buffer list */
441 #if defined(__cplusplus)
442 void __user *c_virtual;
443 #else
444 void __user *virtual; /**< Mmap'd area in user-virtual */
445 #endif
446 struct drm_buf_pub __user *list; /**< Buffer information */
450 * DRM_IOCTL_DMA ioctl argument type.
452 * Indices here refer to the offset into the buffer list in drm_buf_get.
454 * \sa drmDMA().
456 struct drm_dma {
457 int context; /**< Context handle */
458 int send_count; /**< Number of buffers to send */
459 int __user *send_indices; /**< List of handles to buffers */
460 int __user *send_sizes; /**< Lengths of data to send */
461 enum drm_dma_flags flags; /**< Flags */
462 int request_count; /**< Number of buffers requested */
463 int request_size; /**< Desired size for buffers */
464 int __user *request_indices; /**< Buffer information */
465 int __user *request_sizes;
466 int granted_count; /**< Number of buffers granted */
469 enum drm_ctx_flags {
470 _DRM_CONTEXT_PRESERVED = 0x01,
471 _DRM_CONTEXT_2DONLY = 0x02
475 * DRM_IOCTL_ADD_CTX ioctl argument type.
477 * \sa drmCreateContext() and drmDestroyContext().
479 struct drm_ctx {
480 drm_context_t handle;
481 enum drm_ctx_flags flags;
485 * DRM_IOCTL_RES_CTX ioctl argument type.
487 struct drm_ctx_res {
488 int count;
489 struct drm_ctx __user *contexts;
493 * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
495 struct drm_draw {
496 drm_drawable_t handle;
500 * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
502 typedef enum {
503 DRM_DRAWABLE_CLIPRECTS,
504 } drm_drawable_info_type_t;
506 struct drm_update_draw {
507 drm_drawable_t handle;
508 unsigned int type;
509 unsigned int num;
510 unsigned long long data;
514 * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
516 struct drm_auth {
517 drm_magic_t magic;
521 * DRM_IOCTL_IRQ_BUSID ioctl argument type.
523 * \sa drmGetInterruptFromBusID().
525 struct drm_irq_busid {
526 int irq; /**< IRQ number */
527 int busnum; /**< bus number */
528 int devnum; /**< device number */
529 int funcnum; /**< function number */
532 enum drm_vblank_seq_type {
533 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
534 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
535 _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
536 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
537 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
538 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
541 #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
542 #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
543 _DRM_VBLANK_NEXTONMISS)
545 struct drm_wait_vblank_request {
546 enum drm_vblank_seq_type type;
547 unsigned int sequence;
548 unsigned long signal;
551 struct drm_wait_vblank_reply {
552 enum drm_vblank_seq_type type;
553 unsigned int sequence;
554 long tval_sec;
555 long tval_usec;
559 * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
561 * \sa drmWaitVBlank().
563 union drm_wait_vblank {
564 struct drm_wait_vblank_request request;
565 struct drm_wait_vblank_reply reply;
569 #define _DRM_PRE_MODESET 1
570 #define _DRM_POST_MODESET 2
573 * DRM_IOCTL_MODESET_CTL ioctl argument type
575 * \sa drmModesetCtl().
577 struct drm_modeset_ctl {
578 uint32_t crtc;
579 uint32_t cmd;
583 * DRM_IOCTL_AGP_ENABLE ioctl argument type.
585 * \sa drmAgpEnable().
587 struct drm_agp_mode {
588 unsigned long mode; /**< AGP mode */
592 * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
594 * \sa drmAgpAlloc() and drmAgpFree().
596 struct drm_agp_buffer {
597 unsigned long size; /**< In bytes -- will round to page boundary */
598 unsigned long handle; /**< Used for binding / unbinding */
599 unsigned long type; /**< Type of memory to allocate */
600 unsigned long physical; /**< Physical used by i810 */
604 * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
606 * \sa drmAgpBind() and drmAgpUnbind().
608 struct drm_agp_binding {
609 unsigned long handle; /**< From drm_agp_buffer */
610 unsigned long offset; /**< In bytes -- will round to page boundary */
614 * DRM_IOCTL_AGP_INFO ioctl argument type.
616 * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
617 * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
618 * drmAgpVendorId() and drmAgpDeviceId().
620 struct drm_agp_info {
621 int agp_version_major;
622 int agp_version_minor;
623 unsigned long mode;
624 unsigned long aperture_base; /**< physical address */
625 unsigned long aperture_size; /**< bytes */
626 unsigned long memory_allowed; /**< bytes */
627 unsigned long memory_used;
629 /** \name PCI information */
630 /*@{ */
631 unsigned short id_vendor;
632 unsigned short id_device;
633 /*@} */
637 * DRM_IOCTL_SG_ALLOC ioctl argument type.
639 struct drm_scatter_gather {
640 unsigned long size; /**< In bytes -- will round to page boundary */
641 unsigned long handle; /**< Used for mapping / unmapping */
645 * DRM_IOCTL_SET_VERSION ioctl argument type.
647 struct drm_set_version {
648 int drm_di_major;
649 int drm_di_minor;
650 int drm_dd_major;
651 int drm_dd_minor;
655 #define DRM_FENCE_FLAG_EMIT 0x00000001
656 #define DRM_FENCE_FLAG_SHAREABLE 0x00000002
658 * On hardware with no interrupt events for operation completion,
659 * indicates that the kernel should sleep while waiting for any blocking
660 * operation to complete rather than spinning.
662 * Has no effect otherwise.
664 #define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
665 #define DRM_FENCE_FLAG_NO_USER 0x00000010
667 /* Reserved for driver use */
668 #define DRM_FENCE_MASK_DRIVER 0xFF000000
670 #define DRM_FENCE_TYPE_EXE 0x00000001
672 struct drm_fence_arg {
673 unsigned int handle;
674 unsigned int fence_class;
675 unsigned int type;
676 unsigned int flags;
677 unsigned int signaled;
678 unsigned int error;
679 unsigned int sequence;
680 unsigned int pad64;
681 uint64_t expand_pad[2]; /*Future expansion */
684 /* Buffer permissions, referring to how the GPU uses the buffers.
685 * these translate to fence types used for the buffers.
686 * Typically a texture buffer is read, A destination buffer is write and
687 * a command (batch-) buffer is exe. Can be or-ed together.
690 #define DRM_BO_FLAG_READ (1ULL << 0)
691 #define DRM_BO_FLAG_WRITE (1ULL << 1)
692 #define DRM_BO_FLAG_EXE (1ULL << 2)
695 * All of the bits related to access mode
697 #define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
699 * Status flags. Can be read to determine the actual state of a buffer.
700 * Can also be set in the buffer mask before validation.
704 * Mask: Never evict this buffer. Not even with force. This type of buffer is only
705 * available to root and must be manually removed before buffer manager shutdown
706 * or lock.
707 * Flags: Acknowledge
709 #define DRM_BO_FLAG_NO_EVICT (1ULL << 4)
712 * Mask: Require that the buffer is placed in mappable memory when validated.
713 * If not set the buffer may or may not be in mappable memory when validated.
714 * Flags: If set, the buffer is in mappable memory.
716 #define DRM_BO_FLAG_MAPPABLE (1ULL << 5)
718 /* Mask: The buffer should be shareable with other processes.
719 * Flags: The buffer is shareable with other processes.
721 #define DRM_BO_FLAG_SHAREABLE (1ULL << 6)
723 /* Mask: If set, place the buffer in cache-coherent memory if available.
724 * If clear, never place the buffer in cache coherent memory if validated.
725 * Flags: The buffer is currently in cache-coherent memory.
727 #define DRM_BO_FLAG_CACHED (1ULL << 7)
729 /* Mask: Make sure that every time this buffer is validated,
730 * it ends up on the same location provided that the memory mask is the same.
731 * The buffer will also not be evicted when claiming space for
732 * other buffers. Basically a pinned buffer but it may be thrown out as
733 * part of buffer manager shutdown or locking.
734 * Flags: Acknowledge.
736 #define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
738 /* Mask: Make sure the buffer is in cached memory when mapped. In conjunction
739 * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
740 * with unsnooped PTEs instead of snooped, by using chipset-specific cache
741 * flushing at bind time. A better name might be DRM_BO_FLAG_TT_UNSNOOPED,
742 * as the eviction to local memory (TTM unbind) on map is just a side effect
743 * to prevent aggressive cache prefetch from the GPU disturbing the cache
744 * management that the DRM is doing.
746 * Flags: Acknowledge.
747 * Buffers allocated with this flag should not be used for suballocators
748 * This type may have issues on CPUs with over-aggressive caching
749 * http://marc.info/?l=linux-kernel&m=102376926732464&w=2
751 #define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19)
754 /* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
755 * Flags: Acknowledge.
757 #define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13)
760 * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
761 * Flags: Acknowledge.
763 #define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
764 #define DRM_BO_FLAG_TILE (1ULL << 15)
767 * Memory type flags that can be or'ed together in the mask, but only
768 * one appears in flags.
771 /* System memory */
772 #define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24)
773 /* Translation table memory */
774 #define DRM_BO_FLAG_MEM_TT (1ULL << 25)
775 /* Vram memory */
776 #define DRM_BO_FLAG_MEM_VRAM (1ULL << 26)
777 /* Up to the driver to define. */
778 #define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27)
779 #define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28)
780 #define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29)
781 #define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30)
782 #define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
783 /* We can add more of these now with a 64-bit flag type */
786 * This is a mask covering all of the memory type flags; easier to just
787 * use a single constant than a bunch of | values. It covers
788 * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
790 #define DRM_BO_MASK_MEM 0x00000000FF000000ULL
792 * This adds all of the CPU-mapping options in with the memory
793 * type to label all bits which change how the page gets mapped
795 #define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \
796 DRM_BO_FLAG_CACHED_MAPPED | \
797 DRM_BO_FLAG_CACHED | \
798 DRM_BO_FLAG_MAPPABLE)
800 /* Driver-private flags */
801 #define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
804 * Don't block on validate and map. Instead, return EBUSY.
806 #define DRM_BO_HINT_DONT_BLOCK 0x00000002
808 * Don't place this buffer on the unfenced list. This means
809 * that the buffer will not end up having a fence associated
810 * with it as a result of this operation
812 #define DRM_BO_HINT_DONT_FENCE 0x00000004
814 * On hardware with no interrupt events for operation completion,
815 * indicates that the kernel should sleep while waiting for any blocking
816 * operation to complete rather than spinning.
818 * Has no effect otherwise.
820 #define DRM_BO_HINT_WAIT_LAZY 0x00000008
822 * The client has compute relocations refering to this buffer using the
823 * offset in the presumed_offset field. If that offset ends up matching
824 * where this buffer lands, the kernel is free to skip executing those
825 * relocations
827 #define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
829 #define DRM_BO_INIT_MAGIC 0xfe769812
830 #define DRM_BO_INIT_MAJOR 1
831 #define DRM_BO_INIT_MINOR 0
832 #define DRM_BO_INIT_PATCH 0
835 struct drm_bo_info_req {
836 uint64_t mask;
837 uint64_t flags;
838 unsigned int handle;
839 unsigned int hint;
840 unsigned int fence_class;
841 unsigned int desired_tile_stride;
842 unsigned int tile_info;
843 unsigned int pad64;
844 uint64_t presumed_offset;
847 struct drm_bo_create_req {
848 uint64_t flags;
849 uint64_t size;
850 uint64_t buffer_start;
851 unsigned int hint;
852 unsigned int page_alignment;
857 * Reply flags
860 #define DRM_BO_REP_BUSY 0x00000001
862 struct drm_bo_info_rep {
863 uint64_t flags;
864 uint64_t proposed_flags;
865 uint64_t size;
866 uint64_t offset;
867 uint64_t arg_handle;
868 uint64_t buffer_start;
869 unsigned int handle;
870 unsigned int fence_flags;
871 unsigned int rep_flags;
872 unsigned int page_alignment;
873 unsigned int desired_tile_stride;
874 unsigned int hw_tile_stride;
875 unsigned int tile_info;
876 unsigned int pad64;
877 uint64_t expand_pad[4]; /*Future expansion */
880 struct drm_bo_arg_rep {
881 struct drm_bo_info_rep bo_info;
882 int ret;
883 unsigned int pad64;
886 struct drm_bo_create_arg {
887 union {
888 struct drm_bo_create_req req;
889 struct drm_bo_info_rep rep;
890 } d;
893 struct drm_bo_handle_arg {
894 unsigned int handle;
897 struct drm_bo_reference_info_arg {
898 union {
899 struct drm_bo_handle_arg req;
900 struct drm_bo_info_rep rep;
901 } d;
904 struct drm_bo_map_wait_idle_arg {
905 union {
906 struct drm_bo_info_req req;
907 struct drm_bo_info_rep rep;
908 } d;
911 struct drm_bo_op_req {
912 enum {
913 drm_bo_validate,
914 drm_bo_fence,
915 drm_bo_ref_fence,
916 } op;
917 unsigned int arg_handle;
918 struct drm_bo_info_req bo_req;
922 struct drm_bo_op_arg {
923 uint64_t next;
924 union {
925 struct drm_bo_op_req req;
926 struct drm_bo_arg_rep rep;
927 } d;
928 int handled;
929 unsigned int pad64;
933 #define DRM_BO_MEM_LOCAL 0
934 #define DRM_BO_MEM_TT 1
935 #define DRM_BO_MEM_VRAM 2
936 #define DRM_BO_MEM_PRIV0 3
937 #define DRM_BO_MEM_PRIV1 4
938 #define DRM_BO_MEM_PRIV2 5
939 #define DRM_BO_MEM_PRIV3 6
940 #define DRM_BO_MEM_PRIV4 7
942 #define DRM_BO_MEM_TYPES 8 /* For now. */
944 #define DRM_BO_LOCK_UNLOCK_BM (1 << 0)
945 #define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
947 struct drm_bo_version_arg {
948 uint32_t major;
949 uint32_t minor;
950 uint32_t patchlevel;
953 struct drm_mm_type_arg {
954 unsigned int mem_type;
955 unsigned int lock_flags;
958 struct drm_mm_init_arg {
959 unsigned int magic;
960 unsigned int major;
961 unsigned int minor;
962 unsigned int mem_type;
963 uint64_t p_offset;
964 uint64_t p_size;
967 struct drm_mm_info_arg {
968 unsigned int mem_type;
969 uint64_t p_size;
972 struct drm_gem_close {
973 /** Handle of the object to be closed. */
974 uint32_t handle;
975 uint32_t pad;
978 struct drm_gem_flink {
979 /** Handle for the object being named */
980 uint32_t handle;
982 /** Returned global name */
983 uint32_t name;
986 struct drm_gem_open {
987 /** Name of object being opened */
988 uint32_t name;
990 /** Returned handle for the object */
991 uint32_t handle;
993 /** Returned size of the object */
994 uint64_t size;
997 #include "drm_mode.h"
1000 * \name Ioctls Definitions
1002 /*@{*/
1004 #define DRM_IOCTL_BASE 'd'
1005 #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
1006 #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
1007 #define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
1008 #define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
1010 #define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
1011 #define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
1012 #define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
1013 #define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
1014 #define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
1015 #define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
1016 #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
1017 #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
1018 #define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
1020 #define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
1021 #define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
1022 #define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
1024 #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
1025 #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
1026 #define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
1027 #define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
1028 #define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
1029 #define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
1030 #define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
1031 #define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
1032 #define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
1033 #define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
1034 #define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
1036 #define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
1038 #define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
1039 #define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
1041 #define DRM_IOCTL_SET_MASTER DRM_IO(0x1e)
1042 #define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f)
1044 #define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
1045 #define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
1046 #define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
1047 #define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
1048 #define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
1049 #define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
1050 #define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
1051 #define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
1052 #define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
1053 #define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
1054 #define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
1055 #define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
1056 #define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
1058 #define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
1059 #define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
1060 #define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
1061 #define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
1062 #define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
1063 #define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
1064 #define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
1065 #define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
1067 #define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
1068 #define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
1070 #define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
1072 #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
1074 #define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
1075 #define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
1076 #define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
1077 #define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg)
1079 #define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg)
1080 #define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg)
1081 #define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg)
1082 #define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg)
1083 #define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg)
1084 #define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg)
1085 #define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg)
1086 #define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg)
1088 #define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg)
1089 #define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
1090 #define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg)
1091 #define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
1092 #define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg)
1093 #define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
1094 #define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
1095 #define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
1096 #define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg)
1097 #define DRM_IOCTL_MM_INFO DRM_IOWR(0xd7, struct drm_mm_info_arg)
1099 #define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
1101 #define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
1102 #define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc)
1103 #define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor)
1104 #define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
1105 #define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
1107 #define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder)
1109 #define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector)
1110 #define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd)
1111 #define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd)
1112 #define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property)
1113 #define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
1114 #define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
1116 #define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
1117 #define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
1118 #define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, uint32_t)
1119 #define DRM_IOCTL_MODE_REPLACEFB DRM_IOWR(0xB0, struct drm_mode_fb_cmd)
1121 /*@}*/
1124 * Device specific ioctls should only be in their respective headers
1125 * The device specific ioctl range is from 0x40 to 0x99.
1126 * Generic IOCTLS restart at 0xA0.
1128 * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
1129 * drmCommandReadWrite().
1131 #define DRM_COMMAND_BASE 0x40
1132 #define DRM_COMMAND_END 0xA0
1134 /* typedef area */
1135 #ifndef __KERNEL__
1136 typedef struct drm_clip_rect drm_clip_rect_t;
1137 typedef struct drm_tex_region drm_tex_region_t;
1138 typedef struct drm_hw_lock drm_hw_lock_t;
1139 typedef struct drm_version drm_version_t;
1140 typedef struct drm_unique drm_unique_t;
1141 typedef struct drm_list drm_list_t;
1142 typedef struct drm_block drm_block_t;
1143 typedef struct drm_control drm_control_t;
1144 typedef enum drm_map_type drm_map_type_t;
1145 typedef enum drm_map_flags drm_map_flags_t;
1146 typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
1147 typedef struct drm_map drm_map_t;
1148 typedef struct drm_client drm_client_t;
1149 typedef enum drm_stat_type drm_stat_type_t;
1150 typedef struct drm_stats drm_stats_t;
1151 typedef enum drm_lock_flags drm_lock_flags_t;
1152 typedef struct drm_lock drm_lock_t;
1153 typedef enum drm_dma_flags drm_dma_flags_t;
1154 typedef struct drm_buf_desc drm_buf_desc_t;
1155 typedef struct drm_buf_info drm_buf_info_t;
1156 typedef struct drm_buf_free drm_buf_free_t;
1157 typedef struct drm_buf_pub drm_buf_pub_t;
1158 typedef struct drm_buf_map drm_buf_map_t;
1159 typedef struct drm_dma drm_dma_t;
1160 typedef union drm_wait_vblank drm_wait_vblank_t;
1161 typedef struct drm_agp_mode drm_agp_mode_t;
1162 typedef enum drm_ctx_flags drm_ctx_flags_t;
1163 typedef struct drm_ctx drm_ctx_t;
1164 typedef struct drm_ctx_res drm_ctx_res_t;
1165 typedef struct drm_draw drm_draw_t;
1166 typedef struct drm_update_draw drm_update_draw_t;
1167 typedef struct drm_auth drm_auth_t;
1168 typedef struct drm_irq_busid drm_irq_busid_t;
1169 typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
1170 typedef struct drm_agp_buffer drm_agp_buffer_t;
1171 typedef struct drm_agp_binding drm_agp_binding_t;
1172 typedef struct drm_agp_info drm_agp_info_t;
1173 typedef struct drm_scatter_gather drm_scatter_gather_t;
1174 typedef struct drm_set_version drm_set_version_t;
1176 typedef struct drm_fence_arg drm_fence_arg_t;
1177 typedef struct drm_mm_type_arg drm_mm_type_arg_t;
1178 typedef struct drm_mm_init_arg drm_mm_init_arg_t;
1179 typedef enum drm_bo_type drm_bo_type_t;
1180 #endif
1182 #endif