1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include "vmwgfx_drv.h"
32 #include <drm/ttm/ttm_placement.h>
34 #include "device_include/svga_overlay.h"
35 #include "device_include/svga_escape.h"
37 #define VMW_MAX_NUM_STREAMS 1
38 #define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
41 struct vmw_buffer_object
*buf
;
44 struct drm_vmw_control_stream_arg saved
;
52 * Each stream is a single overlay. In Xv these are called ports.
55 struct vmw_stream stream
[VMW_MAX_NUM_STREAMS
];
58 static inline struct vmw_overlay
*vmw_overlay(struct drm_device
*dev
)
60 struct vmw_private
*dev_priv
= vmw_priv(dev
);
61 return dev_priv
? dev_priv
->overlay_priv
: NULL
;
64 struct vmw_escape_header
{
66 SVGAFifoCmdEscape body
;
69 struct vmw_escape_video_flush
{
70 struct vmw_escape_header escape
;
71 SVGAEscapeVideoFlush flush
;
74 static inline void fill_escape(struct vmw_escape_header
*header
,
77 header
->cmd
= SVGA_CMD_ESCAPE
;
78 header
->body
.nsid
= SVGA_ESCAPE_NSID_VMWARE
;
79 header
->body
.size
= size
;
82 static inline void fill_flush(struct vmw_escape_video_flush
*cmd
,
85 fill_escape(&cmd
->escape
, sizeof(cmd
->flush
));
86 cmd
->flush
.cmdType
= SVGA_ESCAPE_VMWARE_VIDEO_FLUSH
;
87 cmd
->flush
.streamId
= stream_id
;
91 * Send put command to hw.
94 * -ERESTARTSYS if interrupted by a signal.
96 static int vmw_overlay_send_put(struct vmw_private
*dev_priv
,
97 struct vmw_buffer_object
*buf
,
98 struct drm_vmw_control_stream_arg
*arg
,
101 struct vmw_escape_video_flush
*flush
;
103 bool have_so
= (dev_priv
->active_display_unit
== vmw_du_screen_object
);
108 struct vmw_escape_header escape
;
119 /* defines are a index needs + 1 */
121 num_items
= SVGA_VIDEO_DST_SCREEN_ID
+ 1;
123 num_items
= SVGA_VIDEO_PITCH_3
+ 1;
125 fifo_size
= sizeof(*cmds
) + sizeof(*flush
) + sizeof(*items
) * num_items
;
127 cmds
= vmw_fifo_reserve(dev_priv
, fifo_size
);
128 /* hardware has hung, can't do anything here */
132 items
= (typeof(items
))&cmds
[1];
133 flush
= (struct vmw_escape_video_flush
*)&items
[num_items
];
135 /* the size is header + number of items */
136 fill_escape(&cmds
->escape
, sizeof(*items
) * (num_items
+ 1));
138 cmds
->header
.cmdType
= SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS
;
139 cmds
->header
.streamId
= arg
->stream_id
;
141 /* the IDs are neatly numbered */
142 for (i
= 0; i
< num_items
; i
++)
143 items
[i
].registerId
= i
;
145 vmw_bo_get_guest_ptr(&buf
->base
, &ptr
);
146 ptr
.offset
+= arg
->offset
;
148 items
[SVGA_VIDEO_ENABLED
].value
= true;
149 items
[SVGA_VIDEO_FLAGS
].value
= arg
->flags
;
150 items
[SVGA_VIDEO_DATA_OFFSET
].value
= ptr
.offset
;
151 items
[SVGA_VIDEO_FORMAT
].value
= arg
->format
;
152 items
[SVGA_VIDEO_COLORKEY
].value
= arg
->color_key
;
153 items
[SVGA_VIDEO_SIZE
].value
= arg
->size
;
154 items
[SVGA_VIDEO_WIDTH
].value
= arg
->width
;
155 items
[SVGA_VIDEO_HEIGHT
].value
= arg
->height
;
156 items
[SVGA_VIDEO_SRC_X
].value
= arg
->src
.x
;
157 items
[SVGA_VIDEO_SRC_Y
].value
= arg
->src
.y
;
158 items
[SVGA_VIDEO_SRC_WIDTH
].value
= arg
->src
.w
;
159 items
[SVGA_VIDEO_SRC_HEIGHT
].value
= arg
->src
.h
;
160 items
[SVGA_VIDEO_DST_X
].value
= arg
->dst
.x
;
161 items
[SVGA_VIDEO_DST_Y
].value
= arg
->dst
.y
;
162 items
[SVGA_VIDEO_DST_WIDTH
].value
= arg
->dst
.w
;
163 items
[SVGA_VIDEO_DST_HEIGHT
].value
= arg
->dst
.h
;
164 items
[SVGA_VIDEO_PITCH_1
].value
= arg
->pitch
[0];
165 items
[SVGA_VIDEO_PITCH_2
].value
= arg
->pitch
[1];
166 items
[SVGA_VIDEO_PITCH_3
].value
= arg
->pitch
[2];
168 items
[SVGA_VIDEO_DATA_GMRID
].value
= ptr
.gmrId
;
169 items
[SVGA_VIDEO_DST_SCREEN_ID
].value
= SVGA_ID_INVALID
;
172 fill_flush(flush
, arg
->stream_id
);
174 vmw_fifo_commit(dev_priv
, fifo_size
);
180 * Send stop command to hw.
183 * -ERESTARTSYS if interrupted by a signal.
185 static int vmw_overlay_send_stop(struct vmw_private
*dev_priv
,
190 struct vmw_escape_header escape
;
191 SVGAEscapeVideoSetRegs body
;
192 struct vmw_escape_video_flush flush
;
197 cmds
= vmw_fifo_reserve(dev_priv
, sizeof(*cmds
));
201 ret
= vmw_fallback_wait(dev_priv
, false, true, 0,
202 interruptible
, 3*HZ
);
203 if (interruptible
&& ret
== -ERESTARTSYS
)
209 fill_escape(&cmds
->escape
, sizeof(cmds
->body
));
210 cmds
->body
.header
.cmdType
= SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS
;
211 cmds
->body
.header
.streamId
= stream_id
;
212 cmds
->body
.items
[0].registerId
= SVGA_VIDEO_ENABLED
;
213 cmds
->body
.items
[0].value
= false;
214 fill_flush(&cmds
->flush
, stream_id
);
216 vmw_fifo_commit(dev_priv
, sizeof(*cmds
));
222 * Move a buffer to vram or gmr if @pin is set, else unpin the buffer.
224 * With the introduction of screen objects buffers could now be
225 * used with GMRs instead of being locked to vram.
227 static int vmw_overlay_move_buffer(struct vmw_private
*dev_priv
,
228 struct vmw_buffer_object
*buf
,
229 bool pin
, bool inter
)
232 return vmw_bo_unpin(dev_priv
, buf
, inter
);
234 if (dev_priv
->active_display_unit
== vmw_du_legacy
)
235 return vmw_bo_pin_in_vram(dev_priv
, buf
, inter
);
237 return vmw_bo_pin_in_vram_or_gmr(dev_priv
, buf
, inter
);
241 * Stop or pause a stream.
243 * If the stream is paused the no evict flag is removed from the buffer
244 * but left in vram. This allows for instance mode_set to evict it
247 * The caller must hold the overlay lock.
249 * @stream_id which stream to stop/pause.
250 * @pause true to pause, false to stop completely.
252 static int vmw_overlay_stop(struct vmw_private
*dev_priv
,
253 uint32_t stream_id
, bool pause
,
256 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
257 struct vmw_stream
*stream
= &overlay
->stream
[stream_id
];
260 /* no buffer attached the stream is completely stopped */
264 /* If the stream is paused this is already done */
265 if (!stream
->paused
) {
266 ret
= vmw_overlay_send_stop(dev_priv
, stream_id
,
271 /* We just remove the NO_EVICT flag so no -ENOMEM */
272 ret
= vmw_overlay_move_buffer(dev_priv
, stream
->buf
, false,
274 if (interruptible
&& ret
== -ERESTARTSYS
)
281 vmw_bo_unreference(&stream
->buf
);
282 stream
->paused
= false;
284 stream
->paused
= true;
291 * Update a stream and send any put or stop fifo commands needed.
293 * The caller must hold the overlay lock.
296 * -ENOMEM if buffer doesn't fit in vram.
297 * -ERESTARTSYS if interrupted.
299 static int vmw_overlay_update_stream(struct vmw_private
*dev_priv
,
300 struct vmw_buffer_object
*buf
,
301 struct drm_vmw_control_stream_arg
*arg
,
304 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
305 struct vmw_stream
*stream
= &overlay
->stream
[arg
->stream_id
];
311 DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__
,
312 stream
->buf
, buf
, stream
->paused
? "" : "not ");
314 if (stream
->buf
!= buf
) {
315 ret
= vmw_overlay_stop(dev_priv
, arg
->stream_id
,
316 false, interruptible
);
319 } else if (!stream
->paused
) {
320 /* If the buffers match and not paused then just send
321 * the put command, no need to do anything else.
323 ret
= vmw_overlay_send_put(dev_priv
, buf
, arg
, interruptible
);
325 stream
->saved
= *arg
;
327 BUG_ON(!interruptible
);
332 /* We don't start the old stream if we are interrupted.
333 * Might return -ENOMEM if it can't fit the buffer in vram.
335 ret
= vmw_overlay_move_buffer(dev_priv
, buf
, true, interruptible
);
339 ret
= vmw_overlay_send_put(dev_priv
, buf
, arg
, interruptible
);
341 /* This one needs to happen no matter what. We only remove
342 * the NO_EVICT flag so this is safe from -ENOMEM.
344 BUG_ON(vmw_overlay_move_buffer(dev_priv
, buf
, false, false)
349 if (stream
->buf
!= buf
)
350 stream
->buf
= vmw_bo_reference(buf
);
351 stream
->saved
= *arg
;
352 /* stream is no longer stopped/paused */
353 stream
->paused
= false;
361 * Used by the fb code when starting.
363 * Takes the overlay lock.
365 int vmw_overlay_stop_all(struct vmw_private
*dev_priv
)
367 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
373 mutex_lock(&overlay
->mutex
);
375 for (i
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++) {
376 struct vmw_stream
*stream
= &overlay
->stream
[i
];
380 ret
= vmw_overlay_stop(dev_priv
, i
, false, false);
384 mutex_unlock(&overlay
->mutex
);
390 * Try to resume all paused streams.
392 * Used by the kms code after moving a new scanout buffer to vram.
394 * Takes the overlay lock.
396 int vmw_overlay_resume_all(struct vmw_private
*dev_priv
)
398 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
404 mutex_lock(&overlay
->mutex
);
406 for (i
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++) {
407 struct vmw_stream
*stream
= &overlay
->stream
[i
];
411 ret
= vmw_overlay_update_stream(dev_priv
, stream
->buf
,
412 &stream
->saved
, false);
414 DRM_INFO("%s: *warning* failed to resume stream %i\n",
418 mutex_unlock(&overlay
->mutex
);
424 * Pauses all active streams.
426 * Used by the kms code when moving a new scanout buffer to vram.
428 * Takes the overlay lock.
430 int vmw_overlay_pause_all(struct vmw_private
*dev_priv
)
432 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
438 mutex_lock(&overlay
->mutex
);
440 for (i
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++) {
441 if (overlay
->stream
[i
].paused
)
442 DRM_INFO("%s: *warning* stream %i already paused\n",
444 ret
= vmw_overlay_stop(dev_priv
, i
, true, false);
448 mutex_unlock(&overlay
->mutex
);
454 static bool vmw_overlay_available(const struct vmw_private
*dev_priv
)
456 return (dev_priv
->overlay_priv
!= NULL
&&
457 ((dev_priv
->fifo
.capabilities
& VMW_OVERLAY_CAP_MASK
) ==
458 VMW_OVERLAY_CAP_MASK
));
461 int vmw_overlay_ioctl(struct drm_device
*dev
, void *data
,
462 struct drm_file
*file_priv
)
464 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
465 struct vmw_private
*dev_priv
= vmw_priv(dev
);
466 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
467 struct drm_vmw_control_stream_arg
*arg
=
468 (struct drm_vmw_control_stream_arg
*)data
;
469 struct vmw_buffer_object
*buf
;
470 struct vmw_resource
*res
;
473 if (!vmw_overlay_available(dev_priv
))
476 ret
= vmw_user_stream_lookup(dev_priv
, tfile
, &arg
->stream_id
, &res
);
480 mutex_lock(&overlay
->mutex
);
483 ret
= vmw_overlay_stop(dev_priv
, arg
->stream_id
, false, true);
487 ret
= vmw_user_bo_lookup(tfile
, arg
->handle
, &buf
, NULL
);
491 ret
= vmw_overlay_update_stream(dev_priv
, buf
, arg
, true);
493 vmw_bo_unreference(&buf
);
496 mutex_unlock(&overlay
->mutex
);
497 vmw_resource_unreference(&res
);
502 int vmw_overlay_num_overlays(struct vmw_private
*dev_priv
)
504 if (!vmw_overlay_available(dev_priv
))
507 return VMW_MAX_NUM_STREAMS
;
510 int vmw_overlay_num_free_overlays(struct vmw_private
*dev_priv
)
512 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
515 if (!vmw_overlay_available(dev_priv
))
518 mutex_lock(&overlay
->mutex
);
520 for (i
= 0, k
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++)
521 if (!overlay
->stream
[i
].claimed
)
524 mutex_unlock(&overlay
->mutex
);
529 int vmw_overlay_claim(struct vmw_private
*dev_priv
, uint32_t *out
)
531 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
537 mutex_lock(&overlay
->mutex
);
539 for (i
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++) {
541 if (overlay
->stream
[i
].claimed
)
544 overlay
->stream
[i
].claimed
= true;
546 mutex_unlock(&overlay
->mutex
);
550 mutex_unlock(&overlay
->mutex
);
554 int vmw_overlay_unref(struct vmw_private
*dev_priv
, uint32_t stream_id
)
556 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
558 BUG_ON(stream_id
>= VMW_MAX_NUM_STREAMS
);
563 mutex_lock(&overlay
->mutex
);
565 WARN_ON(!overlay
->stream
[stream_id
].claimed
);
566 vmw_overlay_stop(dev_priv
, stream_id
, false, false);
567 overlay
->stream
[stream_id
].claimed
= false;
569 mutex_unlock(&overlay
->mutex
);
573 int vmw_overlay_init(struct vmw_private
*dev_priv
)
575 struct vmw_overlay
*overlay
;
578 if (dev_priv
->overlay_priv
)
581 overlay
= kzalloc(sizeof(*overlay
), GFP_KERNEL
);
585 mutex_init(&overlay
->mutex
);
586 for (i
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++) {
587 overlay
->stream
[i
].buf
= NULL
;
588 overlay
->stream
[i
].paused
= false;
589 overlay
->stream
[i
].claimed
= false;
592 dev_priv
->overlay_priv
= overlay
;
597 int vmw_overlay_close(struct vmw_private
*dev_priv
)
599 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
600 bool forgotten_buffer
= false;
606 for (i
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++) {
607 if (overlay
->stream
[i
].buf
) {
608 forgotten_buffer
= true;
609 vmw_overlay_stop(dev_priv
, i
, false, false);
613 WARN_ON(forgotten_buffer
);
615 dev_priv
->overlay_priv
= NULL
;