1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include "vmwgfx_drv.h"
32 #include "ttm/ttm_placement.h"
34 #include "svga_overlay.h"
35 #include "svga_escape.h"
37 #define VMW_MAX_NUM_STREAMS 1
40 struct vmw_dma_buffer
*buf
;
43 struct drm_vmw_control_stream_arg saved
;
51 * Each stream is a single overlay. In Xv these are called ports.
54 struct vmw_stream stream
[VMW_MAX_NUM_STREAMS
];
57 static inline struct vmw_overlay
*vmw_overlay(struct drm_device
*dev
)
59 struct vmw_private
*dev_priv
= vmw_priv(dev
);
60 return dev_priv
? dev_priv
->overlay_priv
: NULL
;
63 struct vmw_escape_header
{
65 SVGAFifoCmdEscape body
;
68 struct vmw_escape_video_flush
{
69 struct vmw_escape_header escape
;
70 SVGAEscapeVideoFlush flush
;
73 static inline void fill_escape(struct vmw_escape_header
*header
,
76 header
->cmd
= SVGA_CMD_ESCAPE
;
77 header
->body
.nsid
= SVGA_ESCAPE_NSID_VMWARE
;
78 header
->body
.size
= size
;
81 static inline void fill_flush(struct vmw_escape_video_flush
*cmd
,
84 fill_escape(&cmd
->escape
, sizeof(cmd
->flush
));
85 cmd
->flush
.cmdType
= SVGA_ESCAPE_VMWARE_VIDEO_FLUSH
;
86 cmd
->flush
.streamId
= stream_id
;
90 * Pin or unpin a buffer in vram.
92 * @dev_priv: Driver private.
93 * @buf: DMA buffer to pin or unpin.
94 * @pin: Pin buffer in vram if true.
95 * @interruptible: Use interruptible wait.
97 * Takes the current masters ttm lock in read.
100 * -ERESTARTSYS if interrupted by a signal.
102 static int vmw_dmabuf_pin_in_vram(struct vmw_private
*dev_priv
,
103 struct vmw_dma_buffer
*buf
,
104 bool pin
, bool interruptible
)
106 struct ttm_buffer_object
*bo
= &buf
->base
;
107 struct ttm_placement
*overlay_placement
= &vmw_vram_placement
;
110 ret
= ttm_read_lock(&dev_priv
->active_master
->lock
, interruptible
);
111 if (unlikely(ret
!= 0))
114 ret
= ttm_bo_reserve(bo
, interruptible
, false, false, 0);
115 if (unlikely(ret
!= 0))
119 overlay_placement
= &vmw_vram_ne_placement
;
121 ret
= ttm_bo_validate(bo
, overlay_placement
, interruptible
, false, false);
123 ttm_bo_unreserve(bo
);
126 ttm_read_unlock(&dev_priv
->active_master
->lock
);
132 * Send put command to hw.
135 * -ERESTARTSYS if interrupted by a signal.
137 static int vmw_overlay_send_put(struct vmw_private
*dev_priv
,
138 struct vmw_dma_buffer
*buf
,
139 struct drm_vmw_control_stream_arg
*arg
,
143 struct vmw_escape_header escape
;
152 } items
[SVGA_VIDEO_PITCH_3
+ 1];
154 struct vmw_escape_video_flush flush
;
160 cmds
= vmw_fifo_reserve(dev_priv
, sizeof(*cmds
));
164 ret
= vmw_fallback_wait(dev_priv
, false, true, 0,
165 interruptible
, 3*HZ
);
166 if (interruptible
&& ret
== -ERESTARTSYS
)
172 fill_escape(&cmds
->escape
, sizeof(cmds
->body
));
173 cmds
->body
.header
.cmdType
= SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS
;
174 cmds
->body
.header
.streamId
= arg
->stream_id
;
176 for (i
= 0; i
<= SVGA_VIDEO_PITCH_3
; i
++)
177 cmds
->body
.items
[i
].registerId
= i
;
179 offset
= buf
->base
.offset
+ arg
->offset
;
181 cmds
->body
.items
[SVGA_VIDEO_ENABLED
].value
= true;
182 cmds
->body
.items
[SVGA_VIDEO_FLAGS
].value
= arg
->flags
;
183 cmds
->body
.items
[SVGA_VIDEO_DATA_OFFSET
].value
= offset
;
184 cmds
->body
.items
[SVGA_VIDEO_FORMAT
].value
= arg
->format
;
185 cmds
->body
.items
[SVGA_VIDEO_COLORKEY
].value
= arg
->color_key
;
186 cmds
->body
.items
[SVGA_VIDEO_SIZE
].value
= arg
->size
;
187 cmds
->body
.items
[SVGA_VIDEO_WIDTH
].value
= arg
->width
;
188 cmds
->body
.items
[SVGA_VIDEO_HEIGHT
].value
= arg
->height
;
189 cmds
->body
.items
[SVGA_VIDEO_SRC_X
].value
= arg
->src
.x
;
190 cmds
->body
.items
[SVGA_VIDEO_SRC_Y
].value
= arg
->src
.y
;
191 cmds
->body
.items
[SVGA_VIDEO_SRC_WIDTH
].value
= arg
->src
.w
;
192 cmds
->body
.items
[SVGA_VIDEO_SRC_HEIGHT
].value
= arg
->src
.h
;
193 cmds
->body
.items
[SVGA_VIDEO_DST_X
].value
= arg
->dst
.x
;
194 cmds
->body
.items
[SVGA_VIDEO_DST_Y
].value
= arg
->dst
.y
;
195 cmds
->body
.items
[SVGA_VIDEO_DST_WIDTH
].value
= arg
->dst
.w
;
196 cmds
->body
.items
[SVGA_VIDEO_DST_HEIGHT
].value
= arg
->dst
.h
;
197 cmds
->body
.items
[SVGA_VIDEO_PITCH_1
].value
= arg
->pitch
[0];
198 cmds
->body
.items
[SVGA_VIDEO_PITCH_2
].value
= arg
->pitch
[1];
199 cmds
->body
.items
[SVGA_VIDEO_PITCH_3
].value
= arg
->pitch
[2];
201 fill_flush(&cmds
->flush
, arg
->stream_id
);
203 vmw_fifo_commit(dev_priv
, sizeof(*cmds
));
209 * Send stop command to hw.
212 * -ERESTARTSYS if interrupted by a signal.
214 static int vmw_overlay_send_stop(struct vmw_private
*dev_priv
,
219 struct vmw_escape_header escape
;
220 SVGAEscapeVideoSetRegs body
;
221 struct vmw_escape_video_flush flush
;
226 cmds
= vmw_fifo_reserve(dev_priv
, sizeof(*cmds
));
230 ret
= vmw_fallback_wait(dev_priv
, false, true, 0,
231 interruptible
, 3*HZ
);
232 if (interruptible
&& ret
== -ERESTARTSYS
)
238 fill_escape(&cmds
->escape
, sizeof(cmds
->body
));
239 cmds
->body
.header
.cmdType
= SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS
;
240 cmds
->body
.header
.streamId
= stream_id
;
241 cmds
->body
.items
[0].registerId
= SVGA_VIDEO_ENABLED
;
242 cmds
->body
.items
[0].value
= false;
243 fill_flush(&cmds
->flush
, stream_id
);
245 vmw_fifo_commit(dev_priv
, sizeof(*cmds
));
251 * Stop or pause a stream.
253 * If the stream is paused the no evict flag is removed from the buffer
254 * but left in vram. This allows for instance mode_set to evict it
257 * The caller must hold the overlay lock.
259 * @stream_id which stream to stop/pause.
260 * @pause true to pause, false to stop completely.
262 static int vmw_overlay_stop(struct vmw_private
*dev_priv
,
263 uint32_t stream_id
, bool pause
,
266 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
267 struct vmw_stream
*stream
= &overlay
->stream
[stream_id
];
270 /* no buffer attached the stream is completely stopped */
274 /* If the stream is paused this is already done */
275 if (!stream
->paused
) {
276 ret
= vmw_overlay_send_stop(dev_priv
, stream_id
,
281 /* We just remove the NO_EVICT flag so no -ENOMEM */
282 ret
= vmw_dmabuf_pin_in_vram(dev_priv
, stream
->buf
, false,
284 if (interruptible
&& ret
== -ERESTARTSYS
)
291 vmw_dmabuf_unreference(&stream
->buf
);
292 stream
->paused
= false;
294 stream
->paused
= true;
301 * Update a stream and send any put or stop fifo commands needed.
303 * The caller must hold the overlay lock.
306 * -ENOMEM if buffer doesn't fit in vram.
307 * -ERESTARTSYS if interrupted.
309 static int vmw_overlay_update_stream(struct vmw_private
*dev_priv
,
310 struct vmw_dma_buffer
*buf
,
311 struct drm_vmw_control_stream_arg
*arg
,
314 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
315 struct vmw_stream
*stream
= &overlay
->stream
[arg
->stream_id
];
321 DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__
,
322 stream
->buf
, buf
, stream
->paused
? "" : "not ");
324 if (stream
->buf
!= buf
) {
325 ret
= vmw_overlay_stop(dev_priv
, arg
->stream_id
,
326 false, interruptible
);
329 } else if (!stream
->paused
) {
330 /* If the buffers match and not paused then just send
331 * the put command, no need to do anything else.
333 ret
= vmw_overlay_send_put(dev_priv
, buf
, arg
, interruptible
);
335 stream
->saved
= *arg
;
337 BUG_ON(!interruptible
);
342 /* We don't start the old stream if we are interrupted.
343 * Might return -ENOMEM if it can't fit the buffer in vram.
345 ret
= vmw_dmabuf_pin_in_vram(dev_priv
, buf
, true, interruptible
);
349 ret
= vmw_overlay_send_put(dev_priv
, buf
, arg
, interruptible
);
351 /* This one needs to happen no matter what. We only remove
352 * the NO_EVICT flag so this is safe from -ENOMEM.
354 BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv
, buf
, false, false) != 0);
358 if (stream
->buf
!= buf
)
359 stream
->buf
= vmw_dmabuf_reference(buf
);
360 stream
->saved
= *arg
;
361 /* stream is no longer stopped/paused */
362 stream
->paused
= false;
370 * Used by the fb code when starting.
372 * Takes the overlay lock.
374 int vmw_overlay_stop_all(struct vmw_private
*dev_priv
)
376 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
382 mutex_lock(&overlay
->mutex
);
384 for (i
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++) {
385 struct vmw_stream
*stream
= &overlay
->stream
[i
];
389 ret
= vmw_overlay_stop(dev_priv
, i
, false, false);
393 mutex_unlock(&overlay
->mutex
);
399 * Try to resume all paused streams.
401 * Used by the kms code after moving a new scanout buffer to vram.
403 * Takes the overlay lock.
405 int vmw_overlay_resume_all(struct vmw_private
*dev_priv
)
407 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
413 mutex_lock(&overlay
->mutex
);
415 for (i
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++) {
416 struct vmw_stream
*stream
= &overlay
->stream
[i
];
420 ret
= vmw_overlay_update_stream(dev_priv
, stream
->buf
,
421 &stream
->saved
, false);
423 DRM_INFO("%s: *warning* failed to resume stream %i\n",
427 mutex_unlock(&overlay
->mutex
);
433 * Pauses all active streams.
435 * Used by the kms code when moving a new scanout buffer to vram.
437 * Takes the overlay lock.
439 int vmw_overlay_pause_all(struct vmw_private
*dev_priv
)
441 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
447 mutex_lock(&overlay
->mutex
);
449 for (i
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++) {
450 if (overlay
->stream
[i
].paused
)
451 DRM_INFO("%s: *warning* stream %i already paused\n",
453 ret
= vmw_overlay_stop(dev_priv
, i
, true, false);
457 mutex_unlock(&overlay
->mutex
);
462 int vmw_overlay_ioctl(struct drm_device
*dev
, void *data
,
463 struct drm_file
*file_priv
)
465 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
466 struct vmw_private
*dev_priv
= vmw_priv(dev
);
467 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
468 struct drm_vmw_control_stream_arg
*arg
=
469 (struct drm_vmw_control_stream_arg
*)data
;
470 struct vmw_dma_buffer
*buf
;
471 struct vmw_resource
*res
;
477 ret
= vmw_user_stream_lookup(dev_priv
, tfile
, &arg
->stream_id
, &res
);
481 mutex_lock(&overlay
->mutex
);
484 ret
= vmw_overlay_stop(dev_priv
, arg
->stream_id
, false, true);
488 ret
= vmw_user_dmabuf_lookup(tfile
, arg
->handle
, &buf
);
492 ret
= vmw_overlay_update_stream(dev_priv
, buf
, arg
, true);
494 vmw_dmabuf_unreference(&buf
);
497 mutex_unlock(&overlay
->mutex
);
498 vmw_resource_unreference(&res
);
503 int vmw_overlay_num_overlays(struct vmw_private
*dev_priv
)
505 if (!dev_priv
->overlay_priv
)
508 return VMW_MAX_NUM_STREAMS
;
511 int vmw_overlay_num_free_overlays(struct vmw_private
*dev_priv
)
513 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
519 mutex_lock(&overlay
->mutex
);
521 for (i
= 0, k
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++)
522 if (!overlay
->stream
[i
].claimed
)
525 mutex_unlock(&overlay
->mutex
);
530 int vmw_overlay_claim(struct vmw_private
*dev_priv
, uint32_t *out
)
532 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
538 mutex_lock(&overlay
->mutex
);
540 for (i
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++) {
542 if (overlay
->stream
[i
].claimed
)
545 overlay
->stream
[i
].claimed
= true;
547 mutex_unlock(&overlay
->mutex
);
551 mutex_unlock(&overlay
->mutex
);
555 int vmw_overlay_unref(struct vmw_private
*dev_priv
, uint32_t stream_id
)
557 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
559 BUG_ON(stream_id
>= VMW_MAX_NUM_STREAMS
);
564 mutex_lock(&overlay
->mutex
);
566 WARN_ON(!overlay
->stream
[stream_id
].claimed
);
567 vmw_overlay_stop(dev_priv
, stream_id
, false, false);
568 overlay
->stream
[stream_id
].claimed
= false;
570 mutex_unlock(&overlay
->mutex
);
574 int vmw_overlay_init(struct vmw_private
*dev_priv
)
576 struct vmw_overlay
*overlay
;
579 if (dev_priv
->overlay_priv
)
582 if (!(dev_priv
->fifo
.capabilities
& SVGA_FIFO_CAP_VIDEO
) &&
583 (dev_priv
->fifo
.capabilities
& SVGA_FIFO_CAP_ESCAPE
)) {
584 DRM_INFO("hardware doesn't support overlays\n");
588 overlay
= kzalloc(sizeof(*overlay
), GFP_KERNEL
);
592 mutex_init(&overlay
->mutex
);
593 for (i
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++) {
594 overlay
->stream
[i
].buf
= NULL
;
595 overlay
->stream
[i
].paused
= false;
596 overlay
->stream
[i
].claimed
= false;
599 dev_priv
->overlay_priv
= overlay
;
604 int vmw_overlay_close(struct vmw_private
*dev_priv
)
606 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
607 bool forgotten_buffer
= false;
613 for (i
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++) {
614 if (overlay
->stream
[i
].buf
) {
615 forgotten_buffer
= true;
616 vmw_overlay_stop(dev_priv
, i
, false, false);
620 WARN_ON(forgotten_buffer
);
622 dev_priv
->overlay_priv
= NULL
;