1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <drm/ttm/ttm_placement.h>
30 #include "device_include/svga_overlay.h"
31 #include "device_include/svga_escape.h"
33 #include "vmwgfx_drv.h"
35 #define VMW_MAX_NUM_STREAMS 1
36 #define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
39 struct vmw_buffer_object
*buf
;
42 struct drm_vmw_control_stream_arg saved
;
50 * Each stream is a single overlay. In Xv these are called ports.
53 struct vmw_stream stream
[VMW_MAX_NUM_STREAMS
];
56 static inline struct vmw_overlay
*vmw_overlay(struct drm_device
*dev
)
58 struct vmw_private
*dev_priv
= vmw_priv(dev
);
59 return dev_priv
? dev_priv
->overlay_priv
: NULL
;
62 struct vmw_escape_header
{
64 SVGAFifoCmdEscape body
;
67 struct vmw_escape_video_flush
{
68 struct vmw_escape_header escape
;
69 SVGAEscapeVideoFlush flush
;
72 static inline void fill_escape(struct vmw_escape_header
*header
,
75 header
->cmd
= SVGA_CMD_ESCAPE
;
76 header
->body
.nsid
= SVGA_ESCAPE_NSID_VMWARE
;
77 header
->body
.size
= size
;
80 static inline void fill_flush(struct vmw_escape_video_flush
*cmd
,
83 fill_escape(&cmd
->escape
, sizeof(cmd
->flush
));
84 cmd
->flush
.cmdType
= SVGA_ESCAPE_VMWARE_VIDEO_FLUSH
;
85 cmd
->flush
.streamId
= stream_id
;
89 * Send put command to hw.
92 * -ERESTARTSYS if interrupted by a signal.
94 static int vmw_overlay_send_put(struct vmw_private
*dev_priv
,
95 struct vmw_buffer_object
*buf
,
96 struct drm_vmw_control_stream_arg
*arg
,
99 struct vmw_escape_video_flush
*flush
;
101 bool have_so
= (dev_priv
->active_display_unit
== vmw_du_screen_object
);
106 struct vmw_escape_header escape
;
117 /* defines are a index needs + 1 */
119 num_items
= SVGA_VIDEO_DST_SCREEN_ID
+ 1;
121 num_items
= SVGA_VIDEO_PITCH_3
+ 1;
123 fifo_size
= sizeof(*cmds
) + sizeof(*flush
) + sizeof(*items
) * num_items
;
125 cmds
= VMW_FIFO_RESERVE(dev_priv
, fifo_size
);
126 /* hardware has hung, can't do anything here */
130 items
= (typeof(items
))&cmds
[1];
131 flush
= (struct vmw_escape_video_flush
*)&items
[num_items
];
133 /* the size is header + number of items */
134 fill_escape(&cmds
->escape
, sizeof(*items
) * (num_items
+ 1));
136 cmds
->header
.cmdType
= SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS
;
137 cmds
->header
.streamId
= arg
->stream_id
;
139 /* the IDs are neatly numbered */
140 for (i
= 0; i
< num_items
; i
++)
141 items
[i
].registerId
= i
;
143 vmw_bo_get_guest_ptr(&buf
->base
, &ptr
);
144 ptr
.offset
+= arg
->offset
;
146 items
[SVGA_VIDEO_ENABLED
].value
= true;
147 items
[SVGA_VIDEO_FLAGS
].value
= arg
->flags
;
148 items
[SVGA_VIDEO_DATA_OFFSET
].value
= ptr
.offset
;
149 items
[SVGA_VIDEO_FORMAT
].value
= arg
->format
;
150 items
[SVGA_VIDEO_COLORKEY
].value
= arg
->color_key
;
151 items
[SVGA_VIDEO_SIZE
].value
= arg
->size
;
152 items
[SVGA_VIDEO_WIDTH
].value
= arg
->width
;
153 items
[SVGA_VIDEO_HEIGHT
].value
= arg
->height
;
154 items
[SVGA_VIDEO_SRC_X
].value
= arg
->src
.x
;
155 items
[SVGA_VIDEO_SRC_Y
].value
= arg
->src
.y
;
156 items
[SVGA_VIDEO_SRC_WIDTH
].value
= arg
->src
.w
;
157 items
[SVGA_VIDEO_SRC_HEIGHT
].value
= arg
->src
.h
;
158 items
[SVGA_VIDEO_DST_X
].value
= arg
->dst
.x
;
159 items
[SVGA_VIDEO_DST_Y
].value
= arg
->dst
.y
;
160 items
[SVGA_VIDEO_DST_WIDTH
].value
= arg
->dst
.w
;
161 items
[SVGA_VIDEO_DST_HEIGHT
].value
= arg
->dst
.h
;
162 items
[SVGA_VIDEO_PITCH_1
].value
= arg
->pitch
[0];
163 items
[SVGA_VIDEO_PITCH_2
].value
= arg
->pitch
[1];
164 items
[SVGA_VIDEO_PITCH_3
].value
= arg
->pitch
[2];
166 items
[SVGA_VIDEO_DATA_GMRID
].value
= ptr
.gmrId
;
167 items
[SVGA_VIDEO_DST_SCREEN_ID
].value
= SVGA_ID_INVALID
;
170 fill_flush(flush
, arg
->stream_id
);
172 vmw_fifo_commit(dev_priv
, fifo_size
);
178 * Send stop command to hw.
181 * -ERESTARTSYS if interrupted by a signal.
183 static int vmw_overlay_send_stop(struct vmw_private
*dev_priv
,
188 struct vmw_escape_header escape
;
189 SVGAEscapeVideoSetRegs body
;
190 struct vmw_escape_video_flush flush
;
195 cmds
= VMW_FIFO_RESERVE(dev_priv
, sizeof(*cmds
));
199 ret
= vmw_fallback_wait(dev_priv
, false, true, 0,
200 interruptible
, 3*HZ
);
201 if (interruptible
&& ret
== -ERESTARTSYS
)
207 fill_escape(&cmds
->escape
, sizeof(cmds
->body
));
208 cmds
->body
.header
.cmdType
= SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS
;
209 cmds
->body
.header
.streamId
= stream_id
;
210 cmds
->body
.items
[0].registerId
= SVGA_VIDEO_ENABLED
;
211 cmds
->body
.items
[0].value
= false;
212 fill_flush(&cmds
->flush
, stream_id
);
214 vmw_fifo_commit(dev_priv
, sizeof(*cmds
));
220 * Move a buffer to vram or gmr if @pin is set, else unpin the buffer.
222 * With the introduction of screen objects buffers could now be
223 * used with GMRs instead of being locked to vram.
225 static int vmw_overlay_move_buffer(struct vmw_private
*dev_priv
,
226 struct vmw_buffer_object
*buf
,
227 bool pin
, bool inter
)
230 return vmw_bo_unpin(dev_priv
, buf
, inter
);
232 if (dev_priv
->active_display_unit
== vmw_du_legacy
)
233 return vmw_bo_pin_in_vram(dev_priv
, buf
, inter
);
235 return vmw_bo_pin_in_vram_or_gmr(dev_priv
, buf
, inter
);
239 * Stop or pause a stream.
241 * If the stream is paused the no evict flag is removed from the buffer
242 * but left in vram. This allows for instance mode_set to evict it
245 * The caller must hold the overlay lock.
247 * @stream_id which stream to stop/pause.
248 * @pause true to pause, false to stop completely.
250 static int vmw_overlay_stop(struct vmw_private
*dev_priv
,
251 uint32_t stream_id
, bool pause
,
254 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
255 struct vmw_stream
*stream
= &overlay
->stream
[stream_id
];
258 /* no buffer attached the stream is completely stopped */
262 /* If the stream is paused this is already done */
263 if (!stream
->paused
) {
264 ret
= vmw_overlay_send_stop(dev_priv
, stream_id
,
269 /* We just remove the NO_EVICT flag so no -ENOMEM */
270 ret
= vmw_overlay_move_buffer(dev_priv
, stream
->buf
, false,
272 if (interruptible
&& ret
== -ERESTARTSYS
)
279 vmw_bo_unreference(&stream
->buf
);
280 stream
->paused
= false;
282 stream
->paused
= true;
289 * Update a stream and send any put or stop fifo commands needed.
291 * The caller must hold the overlay lock.
294 * -ENOMEM if buffer doesn't fit in vram.
295 * -ERESTARTSYS if interrupted.
297 static int vmw_overlay_update_stream(struct vmw_private
*dev_priv
,
298 struct vmw_buffer_object
*buf
,
299 struct drm_vmw_control_stream_arg
*arg
,
302 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
303 struct vmw_stream
*stream
= &overlay
->stream
[arg
->stream_id
];
309 DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__
,
310 stream
->buf
, buf
, stream
->paused
? "" : "not ");
312 if (stream
->buf
!= buf
) {
313 ret
= vmw_overlay_stop(dev_priv
, arg
->stream_id
,
314 false, interruptible
);
317 } else if (!stream
->paused
) {
318 /* If the buffers match and not paused then just send
319 * the put command, no need to do anything else.
321 ret
= vmw_overlay_send_put(dev_priv
, buf
, arg
, interruptible
);
323 stream
->saved
= *arg
;
325 BUG_ON(!interruptible
);
330 /* We don't start the old stream if we are interrupted.
331 * Might return -ENOMEM if it can't fit the buffer in vram.
333 ret
= vmw_overlay_move_buffer(dev_priv
, buf
, true, interruptible
);
337 ret
= vmw_overlay_send_put(dev_priv
, buf
, arg
, interruptible
);
339 /* This one needs to happen no matter what. We only remove
340 * the NO_EVICT flag so this is safe from -ENOMEM.
342 BUG_ON(vmw_overlay_move_buffer(dev_priv
, buf
, false, false)
347 if (stream
->buf
!= buf
)
348 stream
->buf
= vmw_bo_reference(buf
);
349 stream
->saved
= *arg
;
350 /* stream is no longer stopped/paused */
351 stream
->paused
= false;
357 * Try to resume all paused streams.
359 * Used by the kms code after moving a new scanout buffer to vram.
361 * Takes the overlay lock.
363 int vmw_overlay_resume_all(struct vmw_private
*dev_priv
)
365 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
371 mutex_lock(&overlay
->mutex
);
373 for (i
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++) {
374 struct vmw_stream
*stream
= &overlay
->stream
[i
];
378 ret
= vmw_overlay_update_stream(dev_priv
, stream
->buf
,
379 &stream
->saved
, false);
381 DRM_INFO("%s: *warning* failed to resume stream %i\n",
385 mutex_unlock(&overlay
->mutex
);
391 * Pauses all active streams.
393 * Used by the kms code when moving a new scanout buffer to vram.
395 * Takes the overlay lock.
397 int vmw_overlay_pause_all(struct vmw_private
*dev_priv
)
399 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
405 mutex_lock(&overlay
->mutex
);
407 for (i
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++) {
408 if (overlay
->stream
[i
].paused
)
409 DRM_INFO("%s: *warning* stream %i already paused\n",
411 ret
= vmw_overlay_stop(dev_priv
, i
, true, false);
415 mutex_unlock(&overlay
->mutex
);
421 static bool vmw_overlay_available(const struct vmw_private
*dev_priv
)
423 return (dev_priv
->overlay_priv
!= NULL
&&
424 ((dev_priv
->fifo
.capabilities
& VMW_OVERLAY_CAP_MASK
) ==
425 VMW_OVERLAY_CAP_MASK
));
428 int vmw_overlay_ioctl(struct drm_device
*dev
, void *data
,
429 struct drm_file
*file_priv
)
431 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
432 struct vmw_private
*dev_priv
= vmw_priv(dev
);
433 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
434 struct drm_vmw_control_stream_arg
*arg
=
435 (struct drm_vmw_control_stream_arg
*)data
;
436 struct vmw_buffer_object
*buf
;
437 struct vmw_resource
*res
;
440 if (!vmw_overlay_available(dev_priv
))
443 ret
= vmw_user_stream_lookup(dev_priv
, tfile
, &arg
->stream_id
, &res
);
447 mutex_lock(&overlay
->mutex
);
450 ret
= vmw_overlay_stop(dev_priv
, arg
->stream_id
, false, true);
454 ret
= vmw_user_bo_lookup(tfile
, arg
->handle
, &buf
, NULL
);
458 ret
= vmw_overlay_update_stream(dev_priv
, buf
, arg
, true);
460 vmw_bo_unreference(&buf
);
463 mutex_unlock(&overlay
->mutex
);
464 vmw_resource_unreference(&res
);
469 int vmw_overlay_num_overlays(struct vmw_private
*dev_priv
)
471 if (!vmw_overlay_available(dev_priv
))
474 return VMW_MAX_NUM_STREAMS
;
477 int vmw_overlay_num_free_overlays(struct vmw_private
*dev_priv
)
479 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
482 if (!vmw_overlay_available(dev_priv
))
485 mutex_lock(&overlay
->mutex
);
487 for (i
= 0, k
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++)
488 if (!overlay
->stream
[i
].claimed
)
491 mutex_unlock(&overlay
->mutex
);
496 int vmw_overlay_claim(struct vmw_private
*dev_priv
, uint32_t *out
)
498 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
504 mutex_lock(&overlay
->mutex
);
506 for (i
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++) {
508 if (overlay
->stream
[i
].claimed
)
511 overlay
->stream
[i
].claimed
= true;
513 mutex_unlock(&overlay
->mutex
);
517 mutex_unlock(&overlay
->mutex
);
521 int vmw_overlay_unref(struct vmw_private
*dev_priv
, uint32_t stream_id
)
523 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
525 BUG_ON(stream_id
>= VMW_MAX_NUM_STREAMS
);
530 mutex_lock(&overlay
->mutex
);
532 WARN_ON(!overlay
->stream
[stream_id
].claimed
);
533 vmw_overlay_stop(dev_priv
, stream_id
, false, false);
534 overlay
->stream
[stream_id
].claimed
= false;
536 mutex_unlock(&overlay
->mutex
);
540 int vmw_overlay_init(struct vmw_private
*dev_priv
)
542 struct vmw_overlay
*overlay
;
545 if (dev_priv
->overlay_priv
)
548 overlay
= kzalloc(sizeof(*overlay
), GFP_KERNEL
);
552 mutex_init(&overlay
->mutex
);
553 for (i
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++) {
554 overlay
->stream
[i
].buf
= NULL
;
555 overlay
->stream
[i
].paused
= false;
556 overlay
->stream
[i
].claimed
= false;
559 dev_priv
->overlay_priv
= overlay
;
564 int vmw_overlay_close(struct vmw_private
*dev_priv
)
566 struct vmw_overlay
*overlay
= dev_priv
->overlay_priv
;
567 bool forgotten_buffer
= false;
573 for (i
= 0; i
< VMW_MAX_NUM_STREAMS
; i
++) {
574 if (overlay
->stream
[i
].buf
) {
575 forgotten_buffer
= true;
576 vmw_overlay_stop(dev_priv
, i
, false, false);
580 WARN_ON(forgotten_buffer
);
582 dev_priv
->overlay_priv
= NULL
;