treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / include / uapi / drm / vmwgfx_drm.h
blobfcb741e3068f206dd107a41c40e3e18728c3190d
1 /**************************************************************************
3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #ifndef __VMWGFX_DRM_H__
29 #define __VMWGFX_DRM_H__
31 #include "drm.h"
33 #if defined(__cplusplus)
34 extern "C" {
35 #endif
37 #define DRM_VMW_MAX_SURFACE_FACES 6
38 #define DRM_VMW_MAX_MIP_LEVELS 24
41 #define DRM_VMW_GET_PARAM 0
42 #define DRM_VMW_ALLOC_DMABUF 1
43 #define DRM_VMW_ALLOC_BO 1
44 #define DRM_VMW_UNREF_DMABUF 2
45 #define DRM_VMW_HANDLE_CLOSE 2
46 #define DRM_VMW_CURSOR_BYPASS 3
47 /* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
48 #define DRM_VMW_CONTROL_STREAM 4
49 #define DRM_VMW_CLAIM_STREAM 5
50 #define DRM_VMW_UNREF_STREAM 6
51 /* guarded by DRM_VMW_PARAM_3D == 1 */
52 #define DRM_VMW_CREATE_CONTEXT 7
53 #define DRM_VMW_UNREF_CONTEXT 8
54 #define DRM_VMW_CREATE_SURFACE 9
55 #define DRM_VMW_UNREF_SURFACE 10
56 #define DRM_VMW_REF_SURFACE 11
57 #define DRM_VMW_EXECBUF 12
58 #define DRM_VMW_GET_3D_CAP 13
59 #define DRM_VMW_FENCE_WAIT 14
60 #define DRM_VMW_FENCE_SIGNALED 15
61 #define DRM_VMW_FENCE_UNREF 16
62 #define DRM_VMW_FENCE_EVENT 17
63 #define DRM_VMW_PRESENT 18
64 #define DRM_VMW_PRESENT_READBACK 19
65 #define DRM_VMW_UPDATE_LAYOUT 20
66 #define DRM_VMW_CREATE_SHADER 21
67 #define DRM_VMW_UNREF_SHADER 22
68 #define DRM_VMW_GB_SURFACE_CREATE 23
69 #define DRM_VMW_GB_SURFACE_REF 24
70 #define DRM_VMW_SYNCCPU 25
71 #define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
72 #define DRM_VMW_GB_SURFACE_CREATE_EXT 27
73 #define DRM_VMW_GB_SURFACE_REF_EXT 28
74 #define DRM_VMW_MSG 29
76 /*************************************************************************/
77 /**
78 * DRM_VMW_GET_PARAM - get device information.
80 * DRM_VMW_PARAM_FIFO_OFFSET:
81 * Offset to use to map the first page of the FIFO read-only.
82 * The fifo is mapped using the mmap() system call on the drm device.
84 * DRM_VMW_PARAM_OVERLAY_IOCTL:
85 * Does the driver support the overlay ioctl.
87 * DRM_VMW_PARAM_SM4_1
88 * SM4_1 support is enabled.
91 #define DRM_VMW_PARAM_NUM_STREAMS 0
92 #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
93 #define DRM_VMW_PARAM_3D 2
94 #define DRM_VMW_PARAM_HW_CAPS 3
95 #define DRM_VMW_PARAM_FIFO_CAPS 4
96 #define DRM_VMW_PARAM_MAX_FB_SIZE 5
97 #define DRM_VMW_PARAM_FIFO_HW_VERSION 6
98 #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
99 #define DRM_VMW_PARAM_3D_CAPS_SIZE 8
100 #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
101 #define DRM_VMW_PARAM_MAX_MOB_SIZE 10
102 #define DRM_VMW_PARAM_SCREEN_TARGET 11
103 #define DRM_VMW_PARAM_DX 12
104 #define DRM_VMW_PARAM_HW_CAPS2 13
105 #define DRM_VMW_PARAM_SM4_1 14
108 * enum drm_vmw_handle_type - handle type for ref ioctls
111 enum drm_vmw_handle_type {
112 DRM_VMW_HANDLE_LEGACY = 0,
113 DRM_VMW_HANDLE_PRIME = 1
117 * struct drm_vmw_getparam_arg
119 * @value: Returned value. //Out
120 * @param: Parameter to query. //In.
122 * Argument to the DRM_VMW_GET_PARAM Ioctl.
125 struct drm_vmw_getparam_arg {
126 __u64 value;
127 __u32 param;
128 __u32 pad64;
131 /*************************************************************************/
133 * DRM_VMW_CREATE_CONTEXT - Create a host context.
135 * Allocates a device unique context id, and queues a create context command
136 * for the host. Does not wait for host completion.
140 * struct drm_vmw_context_arg
142 * @cid: Device unique context ID.
144 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
145 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
148 struct drm_vmw_context_arg {
149 __s32 cid;
150 __u32 pad64;
153 /*************************************************************************/
155 * DRM_VMW_UNREF_CONTEXT - Create a host context.
157 * Frees a global context id, and queues a destroy host command for the host.
158 * Does not wait for host completion. The context ID can be used directly
159 * in the command stream and shows up as the same context ID on the host.
162 /*************************************************************************/
164 * DRM_VMW_CREATE_SURFACE - Create a host suface.
166 * Allocates a device unique surface id, and queues a create surface command
167 * for the host. Does not wait for host completion. The surface ID can be
168 * used directly in the command stream and shows up as the same surface
169 * ID on the host.
173 * struct drm_wmv_surface_create_req
175 * @flags: Surface flags as understood by the host.
176 * @format: Surface format as understood by the host.
177 * @mip_levels: Number of mip levels for each face.
178 * An unused face should have 0 encoded.
179 * @size_addr: Address of a user-space array of sruct drm_vmw_size
180 * cast to an __u64 for 32-64 bit compatibility.
181 * The size of the array should equal the total number of mipmap levels.
182 * @shareable: Boolean whether other clients (as identified by file descriptors)
183 * may reference this surface.
184 * @scanout: Boolean whether the surface is intended to be used as a
185 * scanout.
187 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
188 * Output data from the DRM_VMW_REF_SURFACE Ioctl.
191 struct drm_vmw_surface_create_req {
192 __u32 flags;
193 __u32 format;
194 __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
195 __u64 size_addr;
196 __s32 shareable;
197 __s32 scanout;
201 * struct drm_wmv_surface_arg
203 * @sid: Surface id of created surface or surface to destroy or reference.
204 * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
206 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
207 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
208 * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
211 struct drm_vmw_surface_arg {
212 __s32 sid;
213 enum drm_vmw_handle_type handle_type;
217 * struct drm_vmw_size ioctl.
219 * @width - mip level width
220 * @height - mip level height
221 * @depth - mip level depth
223 * Description of a mip level.
224 * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
227 struct drm_vmw_size {
228 __u32 width;
229 __u32 height;
230 __u32 depth;
231 __u32 pad64;
235 * union drm_vmw_surface_create_arg
237 * @rep: Output data as described above.
238 * @req: Input data as described above.
240 * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
243 union drm_vmw_surface_create_arg {
244 struct drm_vmw_surface_arg rep;
245 struct drm_vmw_surface_create_req req;
248 /*************************************************************************/
250 * DRM_VMW_REF_SURFACE - Reference a host surface.
252 * Puts a reference on a host surface with a give sid, as previously
253 * returned by the DRM_VMW_CREATE_SURFACE ioctl.
254 * A reference will make sure the surface isn't destroyed while we hold
255 * it and will allow the calling client to use the surface ID in the command
256 * stream.
258 * On successful return, the Ioctl returns the surface information given
259 * in the DRM_VMW_CREATE_SURFACE ioctl.
263 * union drm_vmw_surface_reference_arg
265 * @rep: Output data as described above.
266 * @req: Input data as described above.
268 * Argument to the DRM_VMW_REF_SURFACE Ioctl.
271 union drm_vmw_surface_reference_arg {
272 struct drm_vmw_surface_create_req rep;
273 struct drm_vmw_surface_arg req;
276 /*************************************************************************/
278 * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
280 * Clear a reference previously put on a host surface.
281 * When all references are gone, including the one implicitly placed
282 * on creation,
283 * a destroy surface command will be queued for the host.
284 * Does not wait for completion.
287 /*************************************************************************/
289 * DRM_VMW_EXECBUF
291 * Submit a command buffer for execution on the host, and return a
292 * fence seqno that when signaled, indicates that the command buffer has
293 * executed.
297 * struct drm_vmw_execbuf_arg
299 * @commands: User-space address of a command buffer cast to an __u64.
300 * @command-size: Size in bytes of the command buffer.
301 * @throttle-us: Sleep until software is less than @throttle_us
302 * microseconds ahead of hardware. The driver may round this value
303 * to the nearest kernel tick.
304 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
305 * __u64.
306 * @version: Allows expanding the execbuf ioctl parameters without breaking
307 * backwards compatibility, since user-space will always tell the kernel
308 * which version it uses.
309 * @flags: Execbuf flags.
310 * @imported_fence_fd: FD for a fence imported from another device
312 * Argument to the DRM_VMW_EXECBUF Ioctl.
315 #define DRM_VMW_EXECBUF_VERSION 2
317 #define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0)
318 #define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1)
320 struct drm_vmw_execbuf_arg {
321 __u64 commands;
322 __u32 command_size;
323 __u32 throttle_us;
324 __u64 fence_rep;
325 __u32 version;
326 __u32 flags;
327 __u32 context_handle;
328 __s32 imported_fence_fd;
332 * struct drm_vmw_fence_rep
334 * @handle: Fence object handle for fence associated with a command submission.
335 * @mask: Fence flags relevant for this fence object.
336 * @seqno: Fence sequence number in fifo. A fence object with a lower
337 * seqno will signal the EXEC flag before a fence object with a higher
338 * seqno. This can be used by user-space to avoid kernel calls to determine
339 * whether a fence has signaled the EXEC flag. Note that @seqno will
340 * wrap at 32-bit.
341 * @passed_seqno: The highest seqno number processed by the hardware
342 * so far. This can be used to mark user-space fence objects as signaled, and
343 * to determine whether a fence seqno might be stale.
344 * @fd: FD associated with the fence, -1 if not exported
345 * @error: This member should've been set to -EFAULT on submission.
346 * The following actions should be take on completion:
347 * error == -EFAULT: Fence communication failed. The host is synchronized.
348 * Use the last fence id read from the FIFO fence register.
349 * error != 0 && error != -EFAULT:
350 * Fence submission failed. The host is synchronized. Use the fence_seq member.
351 * error == 0: All is OK, The host may not be synchronized.
352 * Use the fence_seq member.
354 * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
357 struct drm_vmw_fence_rep {
358 __u32 handle;
359 __u32 mask;
360 __u32 seqno;
361 __u32 passed_seqno;
362 __s32 fd;
363 __s32 error;
366 /*************************************************************************/
368 * DRM_VMW_ALLOC_BO
370 * Allocate a buffer object that is visible also to the host.
371 * NOTE: The buffer is
372 * identified by a handle and an offset, which are private to the guest, but
373 * useable in the command stream. The guest kernel may translate these
374 * and patch up the command stream accordingly. In the future, the offset may
375 * be zero at all times, or it may disappear from the interface before it is
376 * fixed.
378 * The buffer object may stay user-space mapped in the guest at all times,
379 * and is thus suitable for sub-allocation.
381 * Buffer objects are mapped using the mmap() syscall on the drm device.
385 * struct drm_vmw_alloc_bo_req
387 * @size: Required minimum size of the buffer.
389 * Input data to the DRM_VMW_ALLOC_BO Ioctl.
392 struct drm_vmw_alloc_bo_req {
393 __u32 size;
394 __u32 pad64;
396 #define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req
399 * struct drm_vmw_bo_rep
401 * @map_handle: Offset to use in the mmap() call used to map the buffer.
402 * @handle: Handle unique to this buffer. Used for unreferencing.
403 * @cur_gmr_id: GMR id to use in the command stream when this buffer is
404 * referenced. See not above.
405 * @cur_gmr_offset: Offset to use in the command stream when this buffer is
406 * referenced. See note above.
408 * Output data from the DRM_VMW_ALLOC_BO Ioctl.
411 struct drm_vmw_bo_rep {
412 __u64 map_handle;
413 __u32 handle;
414 __u32 cur_gmr_id;
415 __u32 cur_gmr_offset;
416 __u32 pad64;
418 #define drm_vmw_dmabuf_rep drm_vmw_bo_rep
421 * union drm_vmw_alloc_bo_arg
423 * @req: Input data as described above.
424 * @rep: Output data as described above.
426 * Argument to the DRM_VMW_ALLOC_BO Ioctl.
429 union drm_vmw_alloc_bo_arg {
430 struct drm_vmw_alloc_bo_req req;
431 struct drm_vmw_bo_rep rep;
433 #define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg
435 /*************************************************************************/
437 * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
439 * This IOCTL controls the overlay units of the svga device.
440 * The SVGA overlay units does not work like regular hardware units in
441 * that they do not automaticaly read back the contents of the given dma
442 * buffer. But instead only read back for each call to this ioctl, and
443 * at any point between this call being made and a following call that
444 * either changes the buffer or disables the stream.
448 * struct drm_vmw_rect
450 * Defines a rectangle. Used in the overlay ioctl to define
451 * source and destination rectangle.
454 struct drm_vmw_rect {
455 __s32 x;
456 __s32 y;
457 __u32 w;
458 __u32 h;
462 * struct drm_vmw_control_stream_arg
464 * @stream_id: Stearm to control
465 * @enabled: If false all following arguments are ignored.
466 * @handle: Handle to buffer for getting data from.
467 * @format: Format of the overlay as understood by the host.
468 * @width: Width of the overlay.
469 * @height: Height of the overlay.
470 * @size: Size of the overlay in bytes.
471 * @pitch: Array of pitches, the two last are only used for YUV12 formats.
472 * @offset: Offset from start of dma buffer to overlay.
473 * @src: Source rect, must be within the defined area above.
474 * @dst: Destination rect, x and y may be negative.
476 * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
479 struct drm_vmw_control_stream_arg {
480 __u32 stream_id;
481 __u32 enabled;
483 __u32 flags;
484 __u32 color_key;
486 __u32 handle;
487 __u32 offset;
488 __s32 format;
489 __u32 size;
490 __u32 width;
491 __u32 height;
492 __u32 pitch[3];
494 __u32 pad64;
495 struct drm_vmw_rect src;
496 struct drm_vmw_rect dst;
499 /*************************************************************************/
501 * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
505 #define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0)
506 #define DRM_VMW_CURSOR_BYPASS_FLAGS (1)
509 * struct drm_vmw_cursor_bypass_arg
511 * @flags: Flags.
512 * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
513 * @xpos: X position of cursor.
514 * @ypos: Y position of cursor.
515 * @xhot: X hotspot.
516 * @yhot: Y hotspot.
518 * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
521 struct drm_vmw_cursor_bypass_arg {
522 __u32 flags;
523 __u32 crtc_id;
524 __s32 xpos;
525 __s32 ypos;
526 __s32 xhot;
527 __s32 yhot;
530 /*************************************************************************/
532 * DRM_VMW_CLAIM_STREAM - Claim a single stream.
536 * struct drm_vmw_context_arg
538 * @stream_id: Device unique context ID.
540 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
541 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
544 struct drm_vmw_stream_arg {
545 __u32 stream_id;
546 __u32 pad64;
549 /*************************************************************************/
551 * DRM_VMW_UNREF_STREAM - Unclaim a stream.
553 * Return a single stream that was claimed by this process. Also makes
554 * sure that the stream has been stopped.
557 /*************************************************************************/
559 * DRM_VMW_GET_3D_CAP
561 * Read 3D capabilities from the FIFO
566 * struct drm_vmw_get_3d_cap_arg
568 * @buffer: Pointer to a buffer for capability data, cast to an __u64
569 * @size: Max size to copy
571 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
572 * ioctls.
575 struct drm_vmw_get_3d_cap_arg {
576 __u64 buffer;
577 __u32 max_size;
578 __u32 pad64;
581 /*************************************************************************/
583 * DRM_VMW_FENCE_WAIT
585 * Waits for a fence object to signal. The wait is interruptible, so that
586 * signals may be delivered during the interrupt. The wait may timeout,
587 * in which case the calls returns -EBUSY. If the wait is restarted,
588 * that is restarting without resetting @cookie_valid to zero,
589 * the timeout is computed from the first call.
591 * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
592 * on:
593 * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
594 * stream
595 * have executed.
596 * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
597 * commands
598 * in the buffer given to the EXECBUF ioctl returning the fence object handle
599 * are available to user-space.
601 * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
602 * fenc wait ioctl returns 0, the fence object has been unreferenced after
603 * the wait.
606 #define DRM_VMW_FENCE_FLAG_EXEC (1 << 0)
607 #define DRM_VMW_FENCE_FLAG_QUERY (1 << 1)
609 #define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
612 * struct drm_vmw_fence_wait_arg
614 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
615 * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
616 * @kernel_cookie: Set to 0 on first call. Left alone on restart.
617 * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
618 * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
619 * before returning.
620 * @flags: Fence flags to wait on.
621 * @wait_options: Options that control the behaviour of the wait ioctl.
623 * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
626 struct drm_vmw_fence_wait_arg {
627 __u32 handle;
628 __s32 cookie_valid;
629 __u64 kernel_cookie;
630 __u64 timeout_us;
631 __s32 lazy;
632 __s32 flags;
633 __s32 wait_options;
634 __s32 pad64;
637 /*************************************************************************/
639 * DRM_VMW_FENCE_SIGNALED
641 * Checks if a fence object is signaled..
645 * struct drm_vmw_fence_signaled_arg
647 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
648 * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
649 * @signaled: Out: Flags signaled.
650 * @sequence: Out: Highest sequence passed so far. Can be used to signal the
651 * EXEC flag of user-space fence objects.
653 * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
654 * ioctls.
657 struct drm_vmw_fence_signaled_arg {
658 __u32 handle;
659 __u32 flags;
660 __s32 signaled;
661 __u32 passed_seqno;
662 __u32 signaled_flags;
663 __u32 pad64;
666 /*************************************************************************/
668 * DRM_VMW_FENCE_UNREF
670 * Unreferences a fence object, and causes it to be destroyed if there are no
671 * other references to it.
676 * struct drm_vmw_fence_arg
678 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
680 * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
683 struct drm_vmw_fence_arg {
684 __u32 handle;
685 __u32 pad64;
689 /*************************************************************************/
691 * DRM_VMW_FENCE_EVENT
693 * Queues an event on a fence to be delivered on the drm character device
694 * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
695 * Optionally the approximate time when the fence signaled is
696 * given by the event.
700 * The event type
702 #define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
704 struct drm_vmw_event_fence {
705 struct drm_event base;
706 __u64 user_data;
707 __u32 tv_sec;
708 __u32 tv_usec;
712 * Flags that may be given to the command.
714 /* Request fence signaled time on the event. */
715 #define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
718 * struct drm_vmw_fence_event_arg
720 * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if
721 * the fence is not supposed to be referenced by user-space.
722 * @user_info: Info to be delivered with the event.
723 * @handle: Attach the event to this fence only.
724 * @flags: A set of flags as defined above.
726 struct drm_vmw_fence_event_arg {
727 __u64 fence_rep;
728 __u64 user_data;
729 __u32 handle;
730 __u32 flags;
734 /*************************************************************************/
736 * DRM_VMW_PRESENT
738 * Executes an SVGA present on a given fb for a given surface. The surface
739 * is placed on the framebuffer. Cliprects are given relative to the given
740 * point (the point disignated by dest_{x|y}).
745 * struct drm_vmw_present_arg
746 * @fb_id: framebuffer id to present / read back from.
747 * @sid: Surface id to present from.
748 * @dest_x: X placement coordinate for surface.
749 * @dest_y: Y placement coordinate for surface.
750 * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
751 * @num_clips: Number of cliprects given relative to the framebuffer origin,
752 * in the same coordinate space as the frame buffer.
753 * @pad64: Unused 64-bit padding.
755 * Input argument to the DRM_VMW_PRESENT ioctl.
758 struct drm_vmw_present_arg {
759 __u32 fb_id;
760 __u32 sid;
761 __s32 dest_x;
762 __s32 dest_y;
763 __u64 clips_ptr;
764 __u32 num_clips;
765 __u32 pad64;
769 /*************************************************************************/
771 * DRM_VMW_PRESENT_READBACK
773 * Executes an SVGA present readback from a given fb to the dma buffer
774 * currently bound as the fb. If there is no dma buffer bound to the fb,
775 * an error will be returned.
780 * struct drm_vmw_present_arg
781 * @fb_id: fb_id to present / read back from.
782 * @num_clips: Number of cliprects.
783 * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
784 * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64.
785 * If this member is NULL, then the ioctl should not return a fence.
788 struct drm_vmw_present_readback_arg {
789 __u32 fb_id;
790 __u32 num_clips;
791 __u64 clips_ptr;
792 __u64 fence_rep;
795 /*************************************************************************/
797 * DRM_VMW_UPDATE_LAYOUT - Update layout
799 * Updates the preferred modes and connection status for connectors. The
800 * command consists of one drm_vmw_update_layout_arg pointing to an array
801 * of num_outputs drm_vmw_rect's.
805 * struct drm_vmw_update_layout_arg
807 * @num_outputs: number of active connectors
808 * @rects: pointer to array of drm_vmw_rect cast to an __u64
810 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
812 struct drm_vmw_update_layout_arg {
813 __u32 num_outputs;
814 __u32 pad64;
815 __u64 rects;
819 /*************************************************************************/
821 * DRM_VMW_CREATE_SHADER - Create shader
823 * Creates a shader and optionally binds it to a dma buffer containing
824 * the shader byte-code.
828 * enum drm_vmw_shader_type - Shader types
830 enum drm_vmw_shader_type {
831 drm_vmw_shader_type_vs = 0,
832 drm_vmw_shader_type_ps,
837 * struct drm_vmw_shader_create_arg
839 * @shader_type: Shader type of the shader to create.
840 * @size: Size of the byte-code in bytes.
841 * where the shader byte-code starts
842 * @buffer_handle: Buffer handle identifying the buffer containing the
843 * shader byte-code
844 * @shader_handle: On successful completion contains a handle that
845 * can be used to subsequently identify the shader.
846 * @offset: Offset in bytes into the buffer given by @buffer_handle,
848 * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
850 struct drm_vmw_shader_create_arg {
851 enum drm_vmw_shader_type shader_type;
852 __u32 size;
853 __u32 buffer_handle;
854 __u32 shader_handle;
855 __u64 offset;
858 /*************************************************************************/
860 * DRM_VMW_UNREF_SHADER - Unreferences a shader
862 * Destroys a user-space reference to a shader, optionally destroying
863 * it.
867 * struct drm_vmw_shader_arg
869 * @handle: Handle identifying the shader to destroy.
871 * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
873 struct drm_vmw_shader_arg {
874 __u32 handle;
875 __u32 pad64;
878 /*************************************************************************/
880 * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
882 * Allocates a surface handle and queues a create surface command
883 * for the host on the first use of the surface. The surface ID can
884 * be used as the surface ID in commands referencing the surface.
888 * enum drm_vmw_surface_flags
890 * @drm_vmw_surface_flag_shareable: Whether the surface is shareable
891 * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout
892 * surface.
893 * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
894 * given.
895 * @drm_vmw_surface_flag_coherent: Back surface with coherent memory.
897 enum drm_vmw_surface_flags {
898 drm_vmw_surface_flag_shareable = (1 << 0),
899 drm_vmw_surface_flag_scanout = (1 << 1),
900 drm_vmw_surface_flag_create_buffer = (1 << 2),
901 drm_vmw_surface_flag_coherent = (1 << 3),
905 * struct drm_vmw_gb_surface_create_req
907 * @svga3d_flags: SVGA3d surface flags for the device.
908 * @format: SVGA3d format.
909 * @mip_level: Number of mip levels for all faces.
910 * @drm_surface_flags Flags as described above.
911 * @multisample_count Future use. Set to 0.
912 * @autogen_filter Future use. Set to 0.
913 * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
914 * if none.
915 * @base_size Size of the base mip level for all faces.
916 * @array_size Must be zero for non-DX hardware, and if non-zero
917 * svga3d_flags must have proper bind flags setup.
919 * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
920 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
922 struct drm_vmw_gb_surface_create_req {
923 __u32 svga3d_flags;
924 __u32 format;
925 __u32 mip_levels;
926 enum drm_vmw_surface_flags drm_surface_flags;
927 __u32 multisample_count;
928 __u32 autogen_filter;
929 __u32 buffer_handle;
930 __u32 array_size;
931 struct drm_vmw_size base_size;
935 * struct drm_vmw_gb_surface_create_rep
937 * @handle: Surface handle.
938 * @backup_size: Size of backup buffers for this surface.
939 * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none.
940 * @buffer_size: Actual size of the buffer identified by
941 * @buffer_handle
942 * @buffer_map_handle: Offset into device address space for the buffer
943 * identified by @buffer_handle.
945 * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
946 * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
948 struct drm_vmw_gb_surface_create_rep {
949 __u32 handle;
950 __u32 backup_size;
951 __u32 buffer_handle;
952 __u32 buffer_size;
953 __u64 buffer_map_handle;
957 * union drm_vmw_gb_surface_create_arg
959 * @req: Input argument as described above.
960 * @rep: Output argument as described above.
962 * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
964 union drm_vmw_gb_surface_create_arg {
965 struct drm_vmw_gb_surface_create_rep rep;
966 struct drm_vmw_gb_surface_create_req req;
969 /*************************************************************************/
971 * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
973 * Puts a reference on a host surface with a given handle, as previously
974 * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
975 * A reference will make sure the surface isn't destroyed while we hold
976 * it and will allow the calling client to use the surface handle in
977 * the command stream.
979 * On successful return, the Ioctl returns the surface information given
980 * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
984 * struct drm_vmw_gb_surface_reference_arg
986 * @creq: The data used as input when the surface was created, as described
987 * above at "struct drm_vmw_gb_surface_create_req"
988 * @crep: Additional data output when the surface was created, as described
989 * above at "struct drm_vmw_gb_surface_create_rep"
991 * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
993 struct drm_vmw_gb_surface_ref_rep {
994 struct drm_vmw_gb_surface_create_req creq;
995 struct drm_vmw_gb_surface_create_rep crep;
999 * union drm_vmw_gb_surface_reference_arg
1001 * @req: Input data as described above at "struct drm_vmw_surface_arg"
1002 * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
1004 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
1006 union drm_vmw_gb_surface_reference_arg {
1007 struct drm_vmw_gb_surface_ref_rep rep;
1008 struct drm_vmw_surface_arg req;
1012 /*************************************************************************/
1014 * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
1016 * Idles any previously submitted GPU operations on the buffer and
1017 * by default blocks command submissions that reference the buffer.
1018 * If the file descriptor used to grab a blocking CPU sync is closed, the
1019 * cpu sync is released.
1020 * The flags argument indicates how the grab / release operation should be
1021 * performed:
1025 * enum drm_vmw_synccpu_flags - Synccpu flags:
1027 * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
1028 * hint to the kernel to allow command submissions that references the buffer
1029 * for read-only.
1030 * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
1031 * referencing this buffer.
1032 * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
1033 * -EBUSY should the buffer be busy.
1034 * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
1035 * while the buffer is synced for CPU. This is similar to the GEM bo idle
1036 * behavior.
1038 enum drm_vmw_synccpu_flags {
1039 drm_vmw_synccpu_read = (1 << 0),
1040 drm_vmw_synccpu_write = (1 << 1),
1041 drm_vmw_synccpu_dontblock = (1 << 2),
1042 drm_vmw_synccpu_allow_cs = (1 << 3)
1046 * enum drm_vmw_synccpu_op - Synccpu operations:
1048 * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations
1049 * @drm_vmw_synccpu_release: Release a previous grab.
1051 enum drm_vmw_synccpu_op {
1052 drm_vmw_synccpu_grab,
1053 drm_vmw_synccpu_release
1057 * struct drm_vmw_synccpu_arg
1059 * @op: The synccpu operation as described above.
1060 * @handle: Handle identifying the buffer object.
1061 * @flags: Flags as described above.
1063 struct drm_vmw_synccpu_arg {
1064 enum drm_vmw_synccpu_op op;
1065 enum drm_vmw_synccpu_flags flags;
1066 __u32 handle;
1067 __u32 pad64;
1070 /*************************************************************************/
1072 * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
1074 * Allocates a device unique context id, and queues a create context command
1075 * for the host. Does not wait for host completion.
1077 enum drm_vmw_extended_context {
1078 drm_vmw_context_legacy,
1079 drm_vmw_context_dx
1083 * union drm_vmw_extended_context_arg
1085 * @req: Context type.
1086 * @rep: Context identifier.
1088 * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
1090 union drm_vmw_extended_context_arg {
1091 enum drm_vmw_extended_context req;
1092 struct drm_vmw_context_arg rep;
1095 /*************************************************************************/
1097 * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
1098 * underlying resource.
1100 * Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF
1101 * Ioctl.
1105 * struct drm_vmw_handle_close_arg
1107 * @handle: Handle to close.
1109 * Argument to the DRM_VMW_HANDLE_CLOSE Ioctl.
1111 struct drm_vmw_handle_close_arg {
1112 __u32 handle;
1113 __u32 pad64;
1115 #define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg
1117 /*************************************************************************/
1119 * DRM_VMW_GB_SURFACE_CREATE_EXT - Create a host guest-backed surface.
1121 * Allocates a surface handle and queues a create surface command
1122 * for the host on the first use of the surface. The surface ID can
1123 * be used as the surface ID in commands referencing the surface.
1125 * This new command extends DRM_VMW_GB_SURFACE_CREATE by adding version
1126 * parameter and 64 bit svga flag.
1130 * enum drm_vmw_surface_version
1132 * @drm_vmw_surface_gb_v1: Corresponds to current gb surface format with
1133 * svga3d surface flags split into 2, upper half and lower half.
1135 enum drm_vmw_surface_version {
1136 drm_vmw_gb_surface_v1
1140 * struct drm_vmw_gb_surface_create_ext_req
1142 * @base: Surface create parameters.
1143 * @version: Version of surface create ioctl.
1144 * @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.
1145 * @multisample_pattern: Multisampling pattern when msaa is supported.
1146 * @quality_level: Precision settings for each sample.
1147 * @must_be_zero: Reserved for future usage.
1149 * Input argument to the DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.
1150 * Part of output argument for the DRM_VMW_GB_SURFACE_REF_EXT Ioctl.
1152 struct drm_vmw_gb_surface_create_ext_req {
1153 struct drm_vmw_gb_surface_create_req base;
1154 enum drm_vmw_surface_version version;
1155 uint32_t svga3d_flags_upper_32_bits;
1156 SVGA3dMSPattern multisample_pattern;
1157 SVGA3dMSQualityLevel quality_level;
1158 uint64_t must_be_zero;
1162 * union drm_vmw_gb_surface_create_ext_arg
1164 * @req: Input argument as described above.
1165 * @rep: Output argument as described above.
1167 * Argument to the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1169 union drm_vmw_gb_surface_create_ext_arg {
1170 struct drm_vmw_gb_surface_create_rep rep;
1171 struct drm_vmw_gb_surface_create_ext_req req;
1174 /*************************************************************************/
1176 * DRM_VMW_GB_SURFACE_REF_EXT - Reference a host surface.
1178 * Puts a reference on a host surface with a given handle, as previously
1179 * returned by the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1180 * A reference will make sure the surface isn't destroyed while we hold
1181 * it and will allow the calling client to use the surface handle in
1182 * the command stream.
1184 * On successful return, the Ioctl returns the surface information given
1185 * to and returned from the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1189 * struct drm_vmw_gb_surface_ref_ext_rep
1191 * @creq: The data used as input when the surface was created, as described
1192 * above at "struct drm_vmw_gb_surface_create_ext_req"
1193 * @crep: Additional data output when the surface was created, as described
1194 * above at "struct drm_vmw_gb_surface_create_rep"
1196 * Output Argument to the DRM_VMW_GB_SURFACE_REF_EXT ioctl.
1198 struct drm_vmw_gb_surface_ref_ext_rep {
1199 struct drm_vmw_gb_surface_create_ext_req creq;
1200 struct drm_vmw_gb_surface_create_rep crep;
1204 * union drm_vmw_gb_surface_reference_ext_arg
1206 * @req: Input data as described above at "struct drm_vmw_surface_arg"
1207 * @rep: Output data as described above at
1208 * "struct drm_vmw_gb_surface_ref_ext_rep"
1210 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
1212 union drm_vmw_gb_surface_reference_ext_arg {
1213 struct drm_vmw_gb_surface_ref_ext_rep rep;
1214 struct drm_vmw_surface_arg req;
1218 * struct drm_vmw_msg_arg
1220 * @send: Pointer to user-space msg string (null terminated).
1221 * @receive: Pointer to user-space receive buffer.
1222 * @send_only: Boolean whether this is only sending or receiving too.
1224 * Argument to the DRM_VMW_MSG ioctl.
1226 struct drm_vmw_msg_arg {
1227 __u64 send;
1228 __u64 receive;
1229 __s32 send_only;
1230 __u32 receive_len;
1233 #if defined(__cplusplus)
1235 #endif
1237 #endif