1 /**************************************************************************
3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_kms.h"
30 /* Might need a hrtimer here? */
31 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
33 void vmw_du_cleanup(struct vmw_display_unit
*du
)
35 if (du
->cursor_surface
)
36 vmw_surface_unreference(&du
->cursor_surface
);
37 if (du
->cursor_dmabuf
)
38 vmw_dmabuf_unreference(&du
->cursor_dmabuf
);
39 drm_connector_unregister(&du
->connector
);
40 drm_crtc_cleanup(&du
->crtc
);
41 drm_encoder_cleanup(&du
->encoder
);
42 drm_connector_cleanup(&du
->connector
);
46 * Display Unit Cursor functions
49 int vmw_cursor_update_image(struct vmw_private
*dev_priv
,
50 u32
*image
, u32 width
, u32 height
,
51 u32 hotspotX
, u32 hotspotY
)
55 SVGAFifoCmdDefineAlphaCursor cursor
;
57 u32 image_size
= width
* height
* 4;
58 u32 cmd_size
= sizeof(*cmd
) + image_size
;
63 cmd
= vmw_fifo_reserve(dev_priv
, cmd_size
);
64 if (unlikely(cmd
== NULL
)) {
65 DRM_ERROR("Fifo reserve failed.\n");
69 memset(cmd
, 0, sizeof(*cmd
));
71 memcpy(&cmd
[1], image
, image_size
);
73 cmd
->cmd
= SVGA_CMD_DEFINE_ALPHA_CURSOR
;
75 cmd
->cursor
.width
= width
;
76 cmd
->cursor
.height
= height
;
77 cmd
->cursor
.hotspotX
= hotspotX
;
78 cmd
->cursor
.hotspotY
= hotspotY
;
80 vmw_fifo_commit_flush(dev_priv
, cmd_size
);
85 int vmw_cursor_update_dmabuf(struct vmw_private
*dev_priv
,
86 struct vmw_dma_buffer
*dmabuf
,
87 u32 width
, u32 height
,
88 u32 hotspotX
, u32 hotspotY
)
90 struct ttm_bo_kmap_obj map
;
91 unsigned long kmap_offset
;
92 unsigned long kmap_num
;
98 kmap_num
= (width
*height
*4 + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
100 ret
= ttm_bo_reserve(&dmabuf
->base
, true, false, NULL
);
101 if (unlikely(ret
!= 0)) {
102 DRM_ERROR("reserve failed\n");
106 ret
= ttm_bo_kmap(&dmabuf
->base
, kmap_offset
, kmap_num
, &map
);
107 if (unlikely(ret
!= 0))
110 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
111 ret
= vmw_cursor_update_image(dev_priv
, virtual, width
, height
,
116 ttm_bo_unreserve(&dmabuf
->base
);
122 void vmw_cursor_update_position(struct vmw_private
*dev_priv
,
123 bool show
, int x
, int y
)
125 u32
*fifo_mem
= dev_priv
->mmio_virt
;
128 vmw_mmio_write(show
? 1 : 0, fifo_mem
+ SVGA_FIFO_CURSOR_ON
);
129 vmw_mmio_write(x
, fifo_mem
+ SVGA_FIFO_CURSOR_X
);
130 vmw_mmio_write(y
, fifo_mem
+ SVGA_FIFO_CURSOR_Y
);
131 count
= vmw_mmio_read(fifo_mem
+ SVGA_FIFO_CURSOR_COUNT
);
132 vmw_mmio_write(++count
, fifo_mem
+ SVGA_FIFO_CURSOR_COUNT
);
137 * vmw_du_crtc_cursor_set2 - Driver cursor_set2 callback.
139 int vmw_du_crtc_cursor_set2(struct drm_crtc
*crtc
, struct drm_file
*file_priv
,
140 uint32_t handle
, uint32_t width
, uint32_t height
,
141 int32_t hot_x
, int32_t hot_y
)
143 struct vmw_private
*dev_priv
= vmw_priv(crtc
->dev
);
144 struct vmw_display_unit
*du
= vmw_crtc_to_du(crtc
);
145 struct vmw_surface
*surface
= NULL
;
146 struct vmw_dma_buffer
*dmabuf
= NULL
;
147 s32 hotspot_x
, hotspot_y
;
151 * FIXME: Unclear whether there's any global state touched by the
152 * cursor_set function, especially vmw_cursor_update_position looks
153 * suspicious. For now take the easy route and reacquire all locks. We
154 * can do this since the caller in the drm core doesn't check anything
155 * which is protected by any looks.
157 drm_modeset_unlock_crtc(crtc
);
158 drm_modeset_lock_all(dev_priv
->dev
);
159 hotspot_x
= hot_x
+ du
->hotspot_x
;
160 hotspot_y
= hot_y
+ du
->hotspot_y
;
162 /* A lot of the code assumes this */
163 if (handle
&& (width
!= 64 || height
!= 64)) {
169 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
171 ret
= vmw_user_lookup_handle(dev_priv
, tfile
,
172 handle
, &surface
, &dmabuf
);
174 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret
);
180 /* need to do this before taking down old image */
181 if (surface
&& !surface
->snooper
.image
) {
182 DRM_ERROR("surface not suitable for cursor\n");
183 vmw_surface_unreference(&surface
);
188 /* takedown old cursor */
189 if (du
->cursor_surface
) {
190 du
->cursor_surface
->snooper
.crtc
= NULL
;
191 vmw_surface_unreference(&du
->cursor_surface
);
193 if (du
->cursor_dmabuf
)
194 vmw_dmabuf_unreference(&du
->cursor_dmabuf
);
196 /* setup new image */
199 /* vmw_user_surface_lookup takes one reference */
200 du
->cursor_surface
= surface
;
202 du
->cursor_surface
->snooper
.crtc
= crtc
;
203 du
->cursor_age
= du
->cursor_surface
->snooper
.age
;
204 ret
= vmw_cursor_update_image(dev_priv
, surface
->snooper
.image
,
205 64, 64, hotspot_x
, hotspot_y
);
207 /* vmw_user_surface_lookup takes one reference */
208 du
->cursor_dmabuf
= dmabuf
;
210 ret
= vmw_cursor_update_dmabuf(dev_priv
, dmabuf
, width
, height
,
211 hotspot_x
, hotspot_y
);
213 vmw_cursor_update_position(dev_priv
, false, 0, 0);
218 vmw_cursor_update_position(dev_priv
, true,
219 du
->cursor_x
+ hotspot_x
,
220 du
->cursor_y
+ hotspot_y
);
221 du
->core_hotspot_x
= hot_x
;
222 du
->core_hotspot_y
= hot_y
;
226 drm_modeset_unlock_all(dev_priv
->dev
);
227 drm_modeset_lock_crtc(crtc
, crtc
->cursor
);
232 int vmw_du_crtc_cursor_move(struct drm_crtc
*crtc
, int x
, int y
)
234 struct vmw_private
*dev_priv
= vmw_priv(crtc
->dev
);
235 struct vmw_display_unit
*du
= vmw_crtc_to_du(crtc
);
236 bool shown
= du
->cursor_surface
|| du
->cursor_dmabuf
? true : false;
238 du
->cursor_x
= x
+ du
->set_gui_x
;
239 du
->cursor_y
= y
+ du
->set_gui_y
;
242 * FIXME: Unclear whether there's any global state touched by the
243 * cursor_set function, especially vmw_cursor_update_position looks
244 * suspicious. For now take the easy route and reacquire all locks. We
245 * can do this since the caller in the drm core doesn't check anything
246 * which is protected by any looks.
248 drm_modeset_unlock_crtc(crtc
);
249 drm_modeset_lock_all(dev_priv
->dev
);
251 vmw_cursor_update_position(dev_priv
, shown
,
252 du
->cursor_x
+ du
->hotspot_x
+
254 du
->cursor_y
+ du
->hotspot_y
+
257 drm_modeset_unlock_all(dev_priv
->dev
);
258 drm_modeset_lock_crtc(crtc
, crtc
->cursor
);
263 void vmw_kms_cursor_snoop(struct vmw_surface
*srf
,
264 struct ttm_object_file
*tfile
,
265 struct ttm_buffer_object
*bo
,
266 SVGA3dCmdHeader
*header
)
268 struct ttm_bo_kmap_obj map
;
269 unsigned long kmap_offset
;
270 unsigned long kmap_num
;
276 SVGA3dCmdHeader header
;
277 SVGA3dCmdSurfaceDMA dma
;
281 cmd
= container_of(header
, struct vmw_dma_cmd
, header
);
283 /* No snooper installed */
284 if (!srf
->snooper
.image
)
287 if (cmd
->dma
.host
.face
!= 0 || cmd
->dma
.host
.mipmap
!= 0) {
288 DRM_ERROR("face and mipmap for cursors should never != 0\n");
292 if (cmd
->header
.size
< 64) {
293 DRM_ERROR("at least one full copy box must be given\n");
297 box
= (SVGA3dCopyBox
*)&cmd
[1];
298 box_count
= (cmd
->header
.size
- sizeof(SVGA3dCmdSurfaceDMA
)) /
299 sizeof(SVGA3dCopyBox
);
301 if (cmd
->dma
.guest
.ptr
.offset
% PAGE_SIZE
||
302 box
->x
!= 0 || box
->y
!= 0 || box
->z
!= 0 ||
303 box
->srcx
!= 0 || box
->srcy
!= 0 || box
->srcz
!= 0 ||
304 box
->d
!= 1 || box_count
!= 1) {
305 /* TODO handle none page aligned offsets */
306 /* TODO handle more dst & src != 0 */
307 /* TODO handle more then one copy */
308 DRM_ERROR("Cant snoop dma request for cursor!\n");
309 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
310 box
->srcx
, box
->srcy
, box
->srcz
,
311 box
->x
, box
->y
, box
->z
,
312 box
->w
, box
->h
, box
->d
, box_count
,
313 cmd
->dma
.guest
.ptr
.offset
);
317 kmap_offset
= cmd
->dma
.guest
.ptr
.offset
>> PAGE_SHIFT
;
318 kmap_num
= (64*64*4) >> PAGE_SHIFT
;
320 ret
= ttm_bo_reserve(bo
, true, false, NULL
);
321 if (unlikely(ret
!= 0)) {
322 DRM_ERROR("reserve failed\n");
326 ret
= ttm_bo_kmap(bo
, kmap_offset
, kmap_num
, &map
);
327 if (unlikely(ret
!= 0))
330 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
332 if (box
->w
== 64 && cmd
->dma
.guest
.pitch
== 64*4) {
333 memcpy(srf
->snooper
.image
, virtual, 64*64*4);
335 /* Image is unsigned pointer. */
336 for (i
= 0; i
< box
->h
; i
++)
337 memcpy(srf
->snooper
.image
+ i
* 64,
338 virtual + i
* cmd
->dma
.guest
.pitch
,
346 ttm_bo_unreserve(bo
);
350 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
352 * @dev_priv: Pointer to the device private struct.
354 * Clears all legacy hotspots.
356 void vmw_kms_legacy_hotspot_clear(struct vmw_private
*dev_priv
)
358 struct drm_device
*dev
= dev_priv
->dev
;
359 struct vmw_display_unit
*du
;
360 struct drm_crtc
*crtc
;
362 drm_modeset_lock_all(dev
);
363 drm_for_each_crtc(crtc
, dev
) {
364 du
= vmw_crtc_to_du(crtc
);
369 drm_modeset_unlock_all(dev
);
372 void vmw_kms_cursor_post_execbuf(struct vmw_private
*dev_priv
)
374 struct drm_device
*dev
= dev_priv
->dev
;
375 struct vmw_display_unit
*du
;
376 struct drm_crtc
*crtc
;
378 mutex_lock(&dev
->mode_config
.mutex
);
380 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
381 du
= vmw_crtc_to_du(crtc
);
382 if (!du
->cursor_surface
||
383 du
->cursor_age
== du
->cursor_surface
->snooper
.age
)
386 du
->cursor_age
= du
->cursor_surface
->snooper
.age
;
387 vmw_cursor_update_image(dev_priv
,
388 du
->cursor_surface
->snooper
.image
,
390 du
->hotspot_x
+ du
->core_hotspot_x
,
391 du
->hotspot_y
+ du
->core_hotspot_y
);
394 mutex_unlock(&dev
->mode_config
.mutex
);
398 * Generic framebuffer code
402 * Surface framebuffer code
405 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer
*framebuffer
)
407 struct vmw_framebuffer_surface
*vfbs
=
408 vmw_framebuffer_to_vfbs(framebuffer
);
410 drm_framebuffer_cleanup(framebuffer
);
411 vmw_surface_unreference(&vfbs
->surface
);
412 if (vfbs
->base
.user_obj
)
413 ttm_base_object_unref(&vfbs
->base
.user_obj
);
418 static int vmw_framebuffer_surface_dirty(struct drm_framebuffer
*framebuffer
,
419 struct drm_file
*file_priv
,
420 unsigned flags
, unsigned color
,
421 struct drm_clip_rect
*clips
,
424 struct vmw_private
*dev_priv
= vmw_priv(framebuffer
->dev
);
425 struct vmw_framebuffer_surface
*vfbs
=
426 vmw_framebuffer_to_vfbs(framebuffer
);
427 struct drm_clip_rect norect
;
430 /* Legacy Display Unit does not support 3D */
431 if (dev_priv
->active_display_unit
== vmw_du_legacy
)
434 drm_modeset_lock_all(dev_priv
->dev
);
436 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
437 if (unlikely(ret
!= 0)) {
438 drm_modeset_unlock_all(dev_priv
->dev
);
445 norect
.x1
= norect
.y1
= 0;
446 norect
.x2
= framebuffer
->width
;
447 norect
.y2
= framebuffer
->height
;
448 } else if (flags
& DRM_MODE_FB_DIRTY_ANNOTATE_COPY
) {
450 inc
= 2; /* skip source rects */
453 if (dev_priv
->active_display_unit
== vmw_du_screen_object
)
454 ret
= vmw_kms_sou_do_surface_dirty(dev_priv
, &vfbs
->base
,
455 clips
, NULL
, NULL
, 0, 0,
456 num_clips
, inc
, NULL
);
458 ret
= vmw_kms_stdu_surface_dirty(dev_priv
, &vfbs
->base
,
459 clips
, NULL
, NULL
, 0, 0,
460 num_clips
, inc
, NULL
);
462 vmw_fifo_flush(dev_priv
, false);
463 ttm_read_unlock(&dev_priv
->reservation_sem
);
465 drm_modeset_unlock_all(dev_priv
->dev
);
471 * vmw_kms_readback - Perform a readback from the screen system to
472 * a dma-buffer backed framebuffer.
474 * @dev_priv: Pointer to the device private structure.
475 * @file_priv: Pointer to a struct drm_file identifying the caller.
476 * Must be set to NULL if @user_fence_rep is NULL.
477 * @vfb: Pointer to the dma-buffer backed framebuffer.
478 * @user_fence_rep: User-space provided structure for fence information.
479 * Must be set to non-NULL if @file_priv is non-NULL.
480 * @vclips: Array of clip rects.
481 * @num_clips: Number of clip rects in @vclips.
483 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
486 int vmw_kms_readback(struct vmw_private
*dev_priv
,
487 struct drm_file
*file_priv
,
488 struct vmw_framebuffer
*vfb
,
489 struct drm_vmw_fence_rep __user
*user_fence_rep
,
490 struct drm_vmw_rect
*vclips
,
493 switch (dev_priv
->active_display_unit
) {
494 case vmw_du_screen_object
:
495 return vmw_kms_sou_readback(dev_priv
, file_priv
, vfb
,
496 user_fence_rep
, vclips
, num_clips
);
497 case vmw_du_screen_target
:
498 return vmw_kms_stdu_dma(dev_priv
, file_priv
, vfb
,
499 user_fence_rep
, NULL
, vclips
, num_clips
,
503 "Readback called with invalid display system.\n");
510 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs
= {
511 .destroy
= vmw_framebuffer_surface_destroy
,
512 .dirty
= vmw_framebuffer_surface_dirty
,
515 static int vmw_kms_new_framebuffer_surface(struct vmw_private
*dev_priv
,
516 struct vmw_surface
*surface
,
517 struct vmw_framebuffer
**out
,
518 const struct drm_mode_fb_cmd
520 bool is_dmabuf_proxy
)
523 struct drm_device
*dev
= dev_priv
->dev
;
524 struct vmw_framebuffer_surface
*vfbs
;
525 enum SVGA3dSurfaceFormat format
;
528 /* 3D is only supported on HWv8 and newer hosts */
529 if (dev_priv
->active_display_unit
== vmw_du_legacy
)
536 /* Surface must be marked as a scanout. */
537 if (unlikely(!surface
->scanout
))
540 if (unlikely(surface
->mip_levels
[0] != 1 ||
541 surface
->num_sizes
!= 1 ||
542 surface
->base_size
.width
< mode_cmd
->width
||
543 surface
->base_size
.height
< mode_cmd
->height
||
544 surface
->base_size
.depth
!= 1)) {
545 DRM_ERROR("Incompatible surface dimensions "
546 "for requested mode.\n");
550 switch (mode_cmd
->depth
) {
552 format
= SVGA3D_A8R8G8B8
;
555 format
= SVGA3D_X8R8G8B8
;
558 format
= SVGA3D_R5G6B5
;
561 format
= SVGA3D_A1R5G5B5
;
564 DRM_ERROR("Invalid color depth: %d\n", mode_cmd
->depth
);
569 * For DX, surface format validation is done when surface->scanout
572 if (!dev_priv
->has_dx
&& format
!= surface
->format
) {
573 DRM_ERROR("Invalid surface format for requested mode.\n");
577 vfbs
= kzalloc(sizeof(*vfbs
), GFP_KERNEL
);
583 /* XXX get the first 3 from the surface info */
584 vfbs
->base
.base
.bits_per_pixel
= mode_cmd
->bpp
;
585 vfbs
->base
.base
.pitches
[0] = mode_cmd
->pitch
;
586 vfbs
->base
.base
.depth
= mode_cmd
->depth
;
587 vfbs
->base
.base
.width
= mode_cmd
->width
;
588 vfbs
->base
.base
.height
= mode_cmd
->height
;
589 vfbs
->surface
= vmw_surface_reference(surface
);
590 vfbs
->base
.user_handle
= mode_cmd
->handle
;
591 vfbs
->is_dmabuf_proxy
= is_dmabuf_proxy
;
595 ret
= drm_framebuffer_init(dev
, &vfbs
->base
.base
,
596 &vmw_framebuffer_surface_funcs
);
603 vmw_surface_unreference(&surface
);
610 * Dmabuf framebuffer code
613 static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer
*framebuffer
)
615 struct vmw_framebuffer_dmabuf
*vfbd
=
616 vmw_framebuffer_to_vfbd(framebuffer
);
618 drm_framebuffer_cleanup(framebuffer
);
619 vmw_dmabuf_unreference(&vfbd
->buffer
);
620 if (vfbd
->base
.user_obj
)
621 ttm_base_object_unref(&vfbd
->base
.user_obj
);
626 static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer
*framebuffer
,
627 struct drm_file
*file_priv
,
628 unsigned flags
, unsigned color
,
629 struct drm_clip_rect
*clips
,
632 struct vmw_private
*dev_priv
= vmw_priv(framebuffer
->dev
);
633 struct vmw_framebuffer_dmabuf
*vfbd
=
634 vmw_framebuffer_to_vfbd(framebuffer
);
635 struct drm_clip_rect norect
;
636 int ret
, increment
= 1;
638 drm_modeset_lock_all(dev_priv
->dev
);
640 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
641 if (unlikely(ret
!= 0)) {
642 drm_modeset_unlock_all(dev_priv
->dev
);
649 norect
.x1
= norect
.y1
= 0;
650 norect
.x2
= framebuffer
->width
;
651 norect
.y2
= framebuffer
->height
;
652 } else if (flags
& DRM_MODE_FB_DIRTY_ANNOTATE_COPY
) {
657 switch (dev_priv
->active_display_unit
) {
658 case vmw_du_screen_target
:
659 ret
= vmw_kms_stdu_dma(dev_priv
, NULL
, &vfbd
->base
, NULL
,
660 clips
, NULL
, num_clips
, increment
,
663 case vmw_du_screen_object
:
664 ret
= vmw_kms_sou_do_dmabuf_dirty(dev_priv
, &vfbd
->base
,
665 clips
, NULL
, num_clips
,
666 increment
, true, NULL
);
669 ret
= vmw_kms_ldu_do_dmabuf_dirty(dev_priv
, &vfbd
->base
, 0, 0,
670 clips
, num_clips
, increment
);
674 WARN_ONCE(true, "Dirty called with invalid display system.\n");
678 vmw_fifo_flush(dev_priv
, false);
679 ttm_read_unlock(&dev_priv
->reservation_sem
);
681 drm_modeset_unlock_all(dev_priv
->dev
);
686 static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs
= {
687 .destroy
= vmw_framebuffer_dmabuf_destroy
,
688 .dirty
= vmw_framebuffer_dmabuf_dirty
,
692 * Pin the dmabuffer to the start of vram.
694 static int vmw_framebuffer_pin(struct vmw_framebuffer
*vfb
)
696 struct vmw_private
*dev_priv
= vmw_priv(vfb
->base
.dev
);
697 struct vmw_dma_buffer
*buf
;
700 buf
= vfb
->dmabuf
? vmw_framebuffer_to_vfbd(&vfb
->base
)->buffer
:
701 vmw_framebuffer_to_vfbs(&vfb
->base
)->surface
->res
.backup
;
706 switch (dev_priv
->active_display_unit
) {
708 vmw_overlay_pause_all(dev_priv
);
709 ret
= vmw_dmabuf_pin_in_start_of_vram(dev_priv
, buf
, false);
710 vmw_overlay_resume_all(dev_priv
);
712 case vmw_du_screen_object
:
713 case vmw_du_screen_target
:
715 return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv
, buf
,
718 return vmw_dmabuf_pin_in_placement(dev_priv
, buf
,
719 &vmw_mob_placement
, false);
727 static int vmw_framebuffer_unpin(struct vmw_framebuffer
*vfb
)
729 struct vmw_private
*dev_priv
= vmw_priv(vfb
->base
.dev
);
730 struct vmw_dma_buffer
*buf
;
732 buf
= vfb
->dmabuf
? vmw_framebuffer_to_vfbd(&vfb
->base
)->buffer
:
733 vmw_framebuffer_to_vfbs(&vfb
->base
)->surface
->res
.backup
;
738 return vmw_dmabuf_unpin(dev_priv
, buf
, false);
742 * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
745 * @mode_cmd: parameters for the new surface
746 * @dmabuf_mob: MOB backing the DMA buf
747 * @srf_out: newly created surface
749 * When the content FB is a DMA buf, we create a surface as a proxy to the
750 * same buffer. This way we can do a surface copy rather than a surface DMA.
751 * This is a more efficient approach
754 * 0 on success, error code otherwise
756 static int vmw_create_dmabuf_proxy(struct drm_device
*dev
,
757 const struct drm_mode_fb_cmd
*mode_cmd
,
758 struct vmw_dma_buffer
*dmabuf_mob
,
759 struct vmw_surface
**srf_out
)
762 struct drm_vmw_size content_base_size
;
763 struct vmw_resource
*res
;
764 unsigned int bytes_pp
;
767 switch (mode_cmd
->depth
) {
770 format
= SVGA3D_X8R8G8B8
;
776 format
= SVGA3D_R5G6B5
;
786 DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd
->depth
);
790 content_base_size
.width
= mode_cmd
->pitch
/ bytes_pp
;
791 content_base_size
.height
= mode_cmd
->height
;
792 content_base_size
.depth
= 1;
794 ret
= vmw_surface_gb_priv_define(dev
,
795 0, /* kernel visible only */
798 true, /* can be a scanout buffer */
799 1, /* num of mip levels */
805 DRM_ERROR("Failed to allocate proxy content buffer\n");
809 res
= &(*srf_out
)->res
;
811 /* Reserve and switch the backing mob. */
812 mutex_lock(&res
->dev_priv
->cmdbuf_mutex
);
813 (void) vmw_resource_reserve(res
, false, true);
814 vmw_dmabuf_unreference(&res
->backup
);
815 res
->backup
= vmw_dmabuf_reference(dmabuf_mob
);
816 res
->backup_offset
= 0;
817 vmw_resource_unreserve(res
, false, NULL
, 0);
818 mutex_unlock(&res
->dev_priv
->cmdbuf_mutex
);
825 static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private
*dev_priv
,
826 struct vmw_dma_buffer
*dmabuf
,
827 struct vmw_framebuffer
**out
,
828 const struct drm_mode_fb_cmd
832 struct drm_device
*dev
= dev_priv
->dev
;
833 struct vmw_framebuffer_dmabuf
*vfbd
;
834 unsigned int requested_size
;
837 requested_size
= mode_cmd
->height
* mode_cmd
->pitch
;
838 if (unlikely(requested_size
> dmabuf
->base
.num_pages
* PAGE_SIZE
)) {
839 DRM_ERROR("Screen buffer object size is too small "
840 "for requested mode.\n");
844 /* Limited framebuffer color depth support for screen objects */
845 if (dev_priv
->active_display_unit
== vmw_du_screen_object
) {
846 switch (mode_cmd
->depth
) {
849 /* Only support 32 bpp for 32 and 24 depth fbs */
850 if (mode_cmd
->bpp
== 32)
853 DRM_ERROR("Invalid color depth/bbp: %d %d\n",
854 mode_cmd
->depth
, mode_cmd
->bpp
);
858 /* Only support 16 bpp for 16 and 15 depth fbs */
859 if (mode_cmd
->bpp
== 16)
862 DRM_ERROR("Invalid color depth/bbp: %d %d\n",
863 mode_cmd
->depth
, mode_cmd
->bpp
);
866 DRM_ERROR("Invalid color depth: %d\n", mode_cmd
->depth
);
871 vfbd
= kzalloc(sizeof(*vfbd
), GFP_KERNEL
);
877 vfbd
->base
.base
.bits_per_pixel
= mode_cmd
->bpp
;
878 vfbd
->base
.base
.pitches
[0] = mode_cmd
->pitch
;
879 vfbd
->base
.base
.depth
= mode_cmd
->depth
;
880 vfbd
->base
.base
.width
= mode_cmd
->width
;
881 vfbd
->base
.base
.height
= mode_cmd
->height
;
882 vfbd
->base
.dmabuf
= true;
883 vfbd
->buffer
= vmw_dmabuf_reference(dmabuf
);
884 vfbd
->base
.user_handle
= mode_cmd
->handle
;
887 ret
= drm_framebuffer_init(dev
, &vfbd
->base
.base
,
888 &vmw_framebuffer_dmabuf_funcs
);
895 vmw_dmabuf_unreference(&dmabuf
);
902 * vmw_kms_new_framebuffer - Create a new framebuffer.
904 * @dev_priv: Pointer to device private struct.
905 * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
906 * Either @dmabuf or @surface must be NULL.
907 * @surface: Pointer to a surface to wrap the kms framebuffer around.
908 * Either @dmabuf or @surface must be NULL.
909 * @only_2d: No presents will occur to this dma buffer based framebuffer. This
910 * Helps the code to do some important optimizations.
911 * @mode_cmd: Frame-buffer metadata.
913 struct vmw_framebuffer
*
914 vmw_kms_new_framebuffer(struct vmw_private
*dev_priv
,
915 struct vmw_dma_buffer
*dmabuf
,
916 struct vmw_surface
*surface
,
918 const struct drm_mode_fb_cmd
*mode_cmd
)
920 struct vmw_framebuffer
*vfb
= NULL
;
921 bool is_dmabuf_proxy
= false;
925 * We cannot use the SurfaceDMA command in an non-accelerated VM,
926 * therefore, wrap the DMA buf in a surface so we can use the
927 * SurfaceCopy command.
929 if (dmabuf
&& only_2d
&&
930 dev_priv
->active_display_unit
== vmw_du_screen_target
) {
931 ret
= vmw_create_dmabuf_proxy(dev_priv
->dev
, mode_cmd
,
936 is_dmabuf_proxy
= true;
939 /* Create the new framebuffer depending one what we have */
941 ret
= vmw_kms_new_framebuffer_surface(dev_priv
, surface
, &vfb
,
946 * vmw_create_dmabuf_proxy() adds a reference that is no longer
950 vmw_surface_unreference(&surface
);
952 ret
= vmw_kms_new_framebuffer_dmabuf(dev_priv
, dmabuf
, &vfb
,
961 vfb
->pin
= vmw_framebuffer_pin
;
962 vfb
->unpin
= vmw_framebuffer_unpin
;
968 * Generic Kernel modesetting functions
971 static struct drm_framebuffer
*vmw_kms_fb_create(struct drm_device
*dev
,
972 struct drm_file
*file_priv
,
973 const struct drm_mode_fb_cmd2
*mode_cmd2
)
975 struct vmw_private
*dev_priv
= vmw_priv(dev
);
976 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
977 struct vmw_framebuffer
*vfb
= NULL
;
978 struct vmw_surface
*surface
= NULL
;
979 struct vmw_dma_buffer
*bo
= NULL
;
980 struct ttm_base_object
*user_obj
;
981 struct drm_mode_fb_cmd mode_cmd
;
984 mode_cmd
.width
= mode_cmd2
->width
;
985 mode_cmd
.height
= mode_cmd2
->height
;
986 mode_cmd
.pitch
= mode_cmd2
->pitches
[0];
987 mode_cmd
.handle
= mode_cmd2
->handles
[0];
988 drm_fb_get_bpp_depth(mode_cmd2
->pixel_format
, &mode_cmd
.depth
,
992 * This code should be conditioned on Screen Objects not being used.
993 * If screen objects are used, we can allocate a GMR to hold the
994 * requested framebuffer.
997 if (!vmw_kms_validate_mode_vram(dev_priv
,
1000 DRM_ERROR("Requested mode exceed bounding box limit.\n");
1001 return ERR_PTR(-ENOMEM
);
1005 * Take a reference on the user object of the resource
1006 * backing the kms fb. This ensures that user-space handle
1007 * lookups on that resource will always work as long as
1008 * it's registered with a kms framebuffer. This is important,
1009 * since vmw_execbuf_process identifies resources in the
1010 * command stream using user-space handles.
1013 user_obj
= ttm_base_object_lookup(tfile
, mode_cmd
.handle
);
1014 if (unlikely(user_obj
== NULL
)) {
1015 DRM_ERROR("Could not locate requested kms frame buffer.\n");
1016 return ERR_PTR(-ENOENT
);
1020 * End conditioned code.
1023 /* returns either a dmabuf or surface */
1024 ret
= vmw_user_lookup_handle(dev_priv
, tfile
,
1030 vfb
= vmw_kms_new_framebuffer(dev_priv
, bo
, surface
,
1031 !(dev_priv
->capabilities
& SVGA_CAP_3D
),
1039 /* vmw_user_lookup_handle takes one ref so does new_fb */
1041 vmw_dmabuf_unreference(&bo
);
1043 vmw_surface_unreference(&surface
);
1046 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret
);
1047 ttm_base_object_unref(&user_obj
);
1048 return ERR_PTR(ret
);
1050 vfb
->user_obj
= user_obj
;
1055 static const struct drm_mode_config_funcs vmw_kms_funcs
= {
1056 .fb_create
= vmw_kms_fb_create
,
1059 static int vmw_kms_generic_present(struct vmw_private
*dev_priv
,
1060 struct drm_file
*file_priv
,
1061 struct vmw_framebuffer
*vfb
,
1062 struct vmw_surface
*surface
,
1064 int32_t destX
, int32_t destY
,
1065 struct drm_vmw_rect
*clips
,
1068 return vmw_kms_sou_do_surface_dirty(dev_priv
, vfb
, NULL
, clips
,
1069 &surface
->res
, destX
, destY
,
1070 num_clips
, 1, NULL
);
1074 int vmw_kms_present(struct vmw_private
*dev_priv
,
1075 struct drm_file
*file_priv
,
1076 struct vmw_framebuffer
*vfb
,
1077 struct vmw_surface
*surface
,
1079 int32_t destX
, int32_t destY
,
1080 struct drm_vmw_rect
*clips
,
1085 switch (dev_priv
->active_display_unit
) {
1086 case vmw_du_screen_target
:
1087 ret
= vmw_kms_stdu_surface_dirty(dev_priv
, vfb
, NULL
, clips
,
1088 &surface
->res
, destX
, destY
,
1089 num_clips
, 1, NULL
);
1091 case vmw_du_screen_object
:
1092 ret
= vmw_kms_generic_present(dev_priv
, file_priv
, vfb
, surface
,
1093 sid
, destX
, destY
, clips
,
1098 "Present called with invalid display system.\n");
1105 vmw_fifo_flush(dev_priv
, false);
1111 vmw_kms_create_hotplug_mode_update_property(struct vmw_private
*dev_priv
)
1113 if (dev_priv
->hotplug_mode_update_property
)
1116 dev_priv
->hotplug_mode_update_property
=
1117 drm_property_create_range(dev_priv
->dev
,
1118 DRM_MODE_PROP_IMMUTABLE
,
1119 "hotplug_mode_update", 0, 1);
1121 if (!dev_priv
->hotplug_mode_update_property
)
1126 int vmw_kms_init(struct vmw_private
*dev_priv
)
1128 struct drm_device
*dev
= dev_priv
->dev
;
1131 drm_mode_config_init(dev
);
1132 dev
->mode_config
.funcs
= &vmw_kms_funcs
;
1133 dev
->mode_config
.min_width
= 1;
1134 dev
->mode_config
.min_height
= 1;
1135 dev
->mode_config
.max_width
= dev_priv
->texture_max_width
;
1136 dev
->mode_config
.max_height
= dev_priv
->texture_max_height
;
1138 drm_mode_create_suggested_offset_properties(dev
);
1139 vmw_kms_create_hotplug_mode_update_property(dev_priv
);
1141 ret
= vmw_kms_stdu_init_display(dev_priv
);
1143 ret
= vmw_kms_sou_init_display(dev_priv
);
1144 if (ret
) /* Fallback */
1145 ret
= vmw_kms_ldu_init_display(dev_priv
);
1151 int vmw_kms_close(struct vmw_private
*dev_priv
)
1156 * Docs says we should take the lock before calling this function
1157 * but since it destroys encoders and our destructor calls
1158 * drm_encoder_cleanup which takes the lock we deadlock.
1160 drm_mode_config_cleanup(dev_priv
->dev
);
1161 if (dev_priv
->active_display_unit
== vmw_du_screen_object
)
1162 ret
= vmw_kms_sou_close_display(dev_priv
);
1163 else if (dev_priv
->active_display_unit
== vmw_du_screen_target
)
1164 ret
= vmw_kms_stdu_close_display(dev_priv
);
1166 ret
= vmw_kms_ldu_close_display(dev_priv
);
1171 int vmw_kms_cursor_bypass_ioctl(struct drm_device
*dev
, void *data
,
1172 struct drm_file
*file_priv
)
1174 struct drm_vmw_cursor_bypass_arg
*arg
= data
;
1175 struct vmw_display_unit
*du
;
1176 struct drm_crtc
*crtc
;
1180 mutex_lock(&dev
->mode_config
.mutex
);
1181 if (arg
->flags
& DRM_VMW_CURSOR_BYPASS_ALL
) {
1183 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
1184 du
= vmw_crtc_to_du(crtc
);
1185 du
->hotspot_x
= arg
->xhot
;
1186 du
->hotspot_y
= arg
->yhot
;
1189 mutex_unlock(&dev
->mode_config
.mutex
);
1193 crtc
= drm_crtc_find(dev
, arg
->crtc_id
);
1199 du
= vmw_crtc_to_du(crtc
);
1201 du
->hotspot_x
= arg
->xhot
;
1202 du
->hotspot_y
= arg
->yhot
;
1205 mutex_unlock(&dev
->mode_config
.mutex
);
1210 int vmw_kms_write_svga(struct vmw_private
*vmw_priv
,
1211 unsigned width
, unsigned height
, unsigned pitch
,
1212 unsigned bpp
, unsigned depth
)
1214 if (vmw_priv
->capabilities
& SVGA_CAP_PITCHLOCK
)
1215 vmw_write(vmw_priv
, SVGA_REG_PITCHLOCK
, pitch
);
1216 else if (vmw_fifo_have_pitchlock(vmw_priv
))
1217 vmw_mmio_write(pitch
, vmw_priv
->mmio_virt
+
1218 SVGA_FIFO_PITCHLOCK
);
1219 vmw_write(vmw_priv
, SVGA_REG_WIDTH
, width
);
1220 vmw_write(vmw_priv
, SVGA_REG_HEIGHT
, height
);
1221 vmw_write(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
, bpp
);
1223 if (vmw_read(vmw_priv
, SVGA_REG_DEPTH
) != depth
) {
1224 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
1225 depth
, bpp
, vmw_read(vmw_priv
, SVGA_REG_DEPTH
));
1232 int vmw_kms_save_vga(struct vmw_private
*vmw_priv
)
1234 struct vmw_vga_topology_state
*save
;
1237 vmw_priv
->vga_width
= vmw_read(vmw_priv
, SVGA_REG_WIDTH
);
1238 vmw_priv
->vga_height
= vmw_read(vmw_priv
, SVGA_REG_HEIGHT
);
1239 vmw_priv
->vga_bpp
= vmw_read(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
);
1240 if (vmw_priv
->capabilities
& SVGA_CAP_PITCHLOCK
)
1241 vmw_priv
->vga_pitchlock
=
1242 vmw_read(vmw_priv
, SVGA_REG_PITCHLOCK
);
1243 else if (vmw_fifo_have_pitchlock(vmw_priv
))
1244 vmw_priv
->vga_pitchlock
= vmw_mmio_read(vmw_priv
->mmio_virt
+
1245 SVGA_FIFO_PITCHLOCK
);
1247 if (!(vmw_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
))
1250 vmw_priv
->num_displays
= vmw_read(vmw_priv
,
1251 SVGA_REG_NUM_GUEST_DISPLAYS
);
1253 if (vmw_priv
->num_displays
== 0)
1254 vmw_priv
->num_displays
= 1;
1256 for (i
= 0; i
< vmw_priv
->num_displays
; ++i
) {
1257 save
= &vmw_priv
->vga_save
[i
];
1258 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, i
);
1259 save
->primary
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_IS_PRIMARY
);
1260 save
->pos_x
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_POSITION_X
);
1261 save
->pos_y
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_POSITION_Y
);
1262 save
->width
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_WIDTH
);
1263 save
->height
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_HEIGHT
);
1264 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, SVGA_ID_INVALID
);
1265 if (i
== 0 && vmw_priv
->num_displays
== 1 &&
1266 save
->width
== 0 && save
->height
== 0) {
1269 * It should be fairly safe to assume that these
1270 * values are uninitialized.
1273 save
->width
= vmw_priv
->vga_width
- save
->pos_x
;
1274 save
->height
= vmw_priv
->vga_height
- save
->pos_y
;
1281 int vmw_kms_restore_vga(struct vmw_private
*vmw_priv
)
1283 struct vmw_vga_topology_state
*save
;
1286 vmw_write(vmw_priv
, SVGA_REG_WIDTH
, vmw_priv
->vga_width
);
1287 vmw_write(vmw_priv
, SVGA_REG_HEIGHT
, vmw_priv
->vga_height
);
1288 vmw_write(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
, vmw_priv
->vga_bpp
);
1289 if (vmw_priv
->capabilities
& SVGA_CAP_PITCHLOCK
)
1290 vmw_write(vmw_priv
, SVGA_REG_PITCHLOCK
,
1291 vmw_priv
->vga_pitchlock
);
1292 else if (vmw_fifo_have_pitchlock(vmw_priv
))
1293 vmw_mmio_write(vmw_priv
->vga_pitchlock
,
1294 vmw_priv
->mmio_virt
+ SVGA_FIFO_PITCHLOCK
);
1296 if (!(vmw_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
))
1299 for (i
= 0; i
< vmw_priv
->num_displays
; ++i
) {
1300 save
= &vmw_priv
->vga_save
[i
];
1301 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, i
);
1302 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_IS_PRIMARY
, save
->primary
);
1303 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_X
, save
->pos_x
);
1304 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_Y
, save
->pos_y
);
1305 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_WIDTH
, save
->width
);
1306 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_HEIGHT
, save
->height
);
1307 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, SVGA_ID_INVALID
);
1313 bool vmw_kms_validate_mode_vram(struct vmw_private
*dev_priv
,
1317 return ((u64
) pitch
* (u64
) height
) < (u64
)
1318 ((dev_priv
->active_display_unit
== vmw_du_screen_target
) ?
1319 dev_priv
->prim_bb_mem
: dev_priv
->vram_size
);
1324 * Function called by DRM code called with vbl_lock held.
1326 u32
vmw_get_vblank_counter(struct drm_device
*dev
, unsigned int pipe
)
1332 * Function called by DRM code called with vbl_lock held.
1334 int vmw_enable_vblank(struct drm_device
*dev
, unsigned int pipe
)
1340 * Function called by DRM code called with vbl_lock held.
1342 void vmw_disable_vblank(struct drm_device
*dev
, unsigned int pipe
)
1348 * Small shared kms functions.
1351 static int vmw_du_update_layout(struct vmw_private
*dev_priv
, unsigned num
,
1352 struct drm_vmw_rect
*rects
)
1354 struct drm_device
*dev
= dev_priv
->dev
;
1355 struct vmw_display_unit
*du
;
1356 struct drm_connector
*con
;
1358 mutex_lock(&dev
->mode_config
.mutex
);
1364 DRM_INFO("%s: new layout ", __func__
);
1365 for (i
= 0; i
< num
; i
++)
1366 DRM_INFO("(%i, %i %ux%u) ", rects
[i
].x
, rects
[i
].y
,
1367 rects
[i
].w
, rects
[i
].h
);
1372 list_for_each_entry(con
, &dev
->mode_config
.connector_list
, head
) {
1373 du
= vmw_connector_to_du(con
);
1374 if (num
> du
->unit
) {
1375 du
->pref_width
= rects
[du
->unit
].w
;
1376 du
->pref_height
= rects
[du
->unit
].h
;
1377 du
->pref_active
= true;
1378 du
->gui_x
= rects
[du
->unit
].x
;
1379 du
->gui_y
= rects
[du
->unit
].y
;
1380 drm_object_property_set_value
1381 (&con
->base
, dev
->mode_config
.suggested_x_property
,
1383 drm_object_property_set_value
1384 (&con
->base
, dev
->mode_config
.suggested_y_property
,
1387 du
->pref_width
= 800;
1388 du
->pref_height
= 600;
1389 du
->pref_active
= false;
1390 drm_object_property_set_value
1391 (&con
->base
, dev
->mode_config
.suggested_x_property
,
1393 drm_object_property_set_value
1394 (&con
->base
, dev
->mode_config
.suggested_y_property
,
1397 con
->status
= vmw_du_connector_detect(con
, true);
1400 mutex_unlock(&dev
->mode_config
.mutex
);
1401 drm_sysfs_hotplug_event(dev
);
1406 int vmw_du_crtc_gamma_set(struct drm_crtc
*crtc
,
1407 u16
*r
, u16
*g
, u16
*b
,
1410 struct vmw_private
*dev_priv
= vmw_priv(crtc
->dev
);
1413 for (i
= 0; i
< size
; i
++) {
1414 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i
,
1416 vmw_write(dev_priv
, SVGA_PALETTE_BASE
+ i
* 3 + 0, r
[i
] >> 8);
1417 vmw_write(dev_priv
, SVGA_PALETTE_BASE
+ i
* 3 + 1, g
[i
] >> 8);
1418 vmw_write(dev_priv
, SVGA_PALETTE_BASE
+ i
* 3 + 2, b
[i
] >> 8);
1424 int vmw_du_connector_dpms(struct drm_connector
*connector
, int mode
)
1429 enum drm_connector_status
1430 vmw_du_connector_detect(struct drm_connector
*connector
, bool force
)
1432 uint32_t num_displays
;
1433 struct drm_device
*dev
= connector
->dev
;
1434 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1435 struct vmw_display_unit
*du
= vmw_connector_to_du(connector
);
1437 num_displays
= vmw_read(dev_priv
, SVGA_REG_NUM_DISPLAYS
);
1439 return ((vmw_connector_to_du(connector
)->unit
< num_displays
&&
1441 connector_status_connected
: connector_status_disconnected
);
1444 static struct drm_display_mode vmw_kms_connector_builtin
[] = {
1446 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER
, 25175, 640, 656,
1447 752, 800, 0, 480, 489, 492, 525, 0,
1448 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_NVSYNC
) },
1450 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER
, 40000, 800, 840,
1451 968, 1056, 0, 600, 601, 605, 628, 0,
1452 DRM_MODE_FLAG_PHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1454 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER
, 65000, 1024, 1048,
1455 1184, 1344, 0, 768, 771, 777, 806, 0,
1456 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_NVSYNC
) },
1458 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER
, 108000, 1152, 1216,
1459 1344, 1600, 0, 864, 865, 868, 900, 0,
1460 DRM_MODE_FLAG_PHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1462 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER
, 79500, 1280, 1344,
1463 1472, 1664, 0, 768, 771, 778, 798, 0,
1464 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1466 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER
, 83500, 1280, 1352,
1467 1480, 1680, 0, 800, 803, 809, 831, 0,
1468 DRM_MODE_FLAG_PHSYNC
| DRM_MODE_FLAG_NVSYNC
) },
1470 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER
, 108000, 1280, 1376,
1471 1488, 1800, 0, 960, 961, 964, 1000, 0,
1472 DRM_MODE_FLAG_PHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1473 /* 1280x1024@60Hz */
1474 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER
, 108000, 1280, 1328,
1475 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
1476 DRM_MODE_FLAG_PHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1478 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER
, 85500, 1360, 1424,
1479 1536, 1792, 0, 768, 771, 777, 795, 0,
1480 DRM_MODE_FLAG_PHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1481 /* 1440x1050@60Hz */
1482 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER
, 121750, 1400, 1488,
1483 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
1484 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1486 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER
, 106500, 1440, 1520,
1487 1672, 1904, 0, 900, 903, 909, 934, 0,
1488 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1489 /* 1600x1200@60Hz */
1490 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER
, 162000, 1600, 1664,
1491 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
1492 DRM_MODE_FLAG_PHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1493 /* 1680x1050@60Hz */
1494 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER
, 146250, 1680, 1784,
1495 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
1496 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1497 /* 1792x1344@60Hz */
1498 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER
, 204750, 1792, 1920,
1499 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
1500 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1501 /* 1853x1392@60Hz */
1502 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER
, 218250, 1856, 1952,
1503 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
1504 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1505 /* 1920x1200@60Hz */
1506 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER
, 193250, 1920, 2056,
1507 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
1508 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1509 /* 1920x1440@60Hz */
1510 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER
, 234000, 1920, 2048,
1511 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
1512 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1513 /* 2560x1600@60Hz */
1514 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER
, 348500, 2560, 2752,
1515 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
1516 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1518 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
1522 * vmw_guess_mode_timing - Provide fake timings for a
1523 * 60Hz vrefresh mode.
1525 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
1526 * members filled in.
1528 void vmw_guess_mode_timing(struct drm_display_mode
*mode
)
1530 mode
->hsync_start
= mode
->hdisplay
+ 50;
1531 mode
->hsync_end
= mode
->hsync_start
+ 50;
1532 mode
->htotal
= mode
->hsync_end
+ 50;
1534 mode
->vsync_start
= mode
->vdisplay
+ 50;
1535 mode
->vsync_end
= mode
->vsync_start
+ 50;
1536 mode
->vtotal
= mode
->vsync_end
+ 50;
1538 mode
->clock
= (u32
)mode
->htotal
* (u32
)mode
->vtotal
/ 100 * 6;
1539 mode
->vrefresh
= drm_mode_vrefresh(mode
);
1543 int vmw_du_connector_fill_modes(struct drm_connector
*connector
,
1544 uint32_t max_width
, uint32_t max_height
)
1546 struct vmw_display_unit
*du
= vmw_connector_to_du(connector
);
1547 struct drm_device
*dev
= connector
->dev
;
1548 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1549 struct drm_display_mode
*mode
= NULL
;
1550 struct drm_display_mode
*bmode
;
1551 struct drm_display_mode prefmode
= { DRM_MODE("preferred",
1552 DRM_MODE_TYPE_DRIVER
| DRM_MODE_TYPE_PREFERRED
,
1553 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1554 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
)
1557 u32 assumed_bpp
= 4;
1559 if (dev_priv
->assume_16bpp
)
1562 if (dev_priv
->active_display_unit
== vmw_du_screen_target
) {
1563 max_width
= min(max_width
, dev_priv
->stdu_max_width
);
1564 max_height
= min(max_height
, dev_priv
->stdu_max_height
);
1567 /* Add preferred mode */
1568 mode
= drm_mode_duplicate(dev
, &prefmode
);
1571 mode
->hdisplay
= du
->pref_width
;
1572 mode
->vdisplay
= du
->pref_height
;
1573 vmw_guess_mode_timing(mode
);
1575 if (vmw_kms_validate_mode_vram(dev_priv
,
1576 mode
->hdisplay
* assumed_bpp
,
1578 drm_mode_probed_add(connector
, mode
);
1580 drm_mode_destroy(dev
, mode
);
1584 if (du
->pref_mode
) {
1585 list_del_init(&du
->pref_mode
->head
);
1586 drm_mode_destroy(dev
, du
->pref_mode
);
1589 /* mode might be null here, this is intended */
1590 du
->pref_mode
= mode
;
1592 for (i
= 0; vmw_kms_connector_builtin
[i
].type
!= 0; i
++) {
1593 bmode
= &vmw_kms_connector_builtin
[i
];
1594 if (bmode
->hdisplay
> max_width
||
1595 bmode
->vdisplay
> max_height
)
1598 if (!vmw_kms_validate_mode_vram(dev_priv
,
1599 bmode
->hdisplay
* assumed_bpp
,
1603 mode
= drm_mode_duplicate(dev
, bmode
);
1606 mode
->vrefresh
= drm_mode_vrefresh(mode
);
1608 drm_mode_probed_add(connector
, mode
);
1611 drm_mode_connector_list_update(connector
);
1612 /* Move the prefered mode first, help apps pick the right mode. */
1613 drm_mode_sort(&connector
->modes
);
1618 int vmw_du_connector_set_property(struct drm_connector
*connector
,
1619 struct drm_property
*property
,
1622 struct vmw_display_unit
*du
= vmw_connector_to_du(connector
);
1623 struct vmw_private
*dev_priv
= vmw_priv(connector
->dev
);
1625 if (property
== dev_priv
->implicit_placement_property
)
1626 du
->is_implicit
= val
;
1632 int vmw_kms_update_layout_ioctl(struct drm_device
*dev
, void *data
,
1633 struct drm_file
*file_priv
)
1635 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1636 struct drm_vmw_update_layout_arg
*arg
=
1637 (struct drm_vmw_update_layout_arg
*)data
;
1638 void __user
*user_rects
;
1639 struct drm_vmw_rect
*rects
;
1640 unsigned rects_size
;
1643 u64 total_pixels
= 0;
1644 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
1645 struct drm_vmw_rect bounding_box
= {0};
1647 if (!arg
->num_outputs
) {
1648 struct drm_vmw_rect def_rect
= {0, 0, 800, 600};
1649 vmw_du_update_layout(dev_priv
, 1, &def_rect
);
1653 rects_size
= arg
->num_outputs
* sizeof(struct drm_vmw_rect
);
1654 rects
= kcalloc(arg
->num_outputs
, sizeof(struct drm_vmw_rect
),
1656 if (unlikely(!rects
))
1659 user_rects
= (void __user
*)(unsigned long)arg
->rects
;
1660 ret
= copy_from_user(rects
, user_rects
, rects_size
);
1661 if (unlikely(ret
!= 0)) {
1662 DRM_ERROR("Failed to get rects.\n");
1667 for (i
= 0; i
< arg
->num_outputs
; ++i
) {
1668 if (rects
[i
].x
< 0 ||
1670 rects
[i
].x
+ rects
[i
].w
> mode_config
->max_width
||
1671 rects
[i
].y
+ rects
[i
].h
> mode_config
->max_height
) {
1672 DRM_ERROR("Invalid GUI layout.\n");
1678 * bounding_box.w and bunding_box.h are used as
1679 * lower-right coordinates
1681 if (rects
[i
].x
+ rects
[i
].w
> bounding_box
.w
)
1682 bounding_box
.w
= rects
[i
].x
+ rects
[i
].w
;
1684 if (rects
[i
].y
+ rects
[i
].h
> bounding_box
.h
)
1685 bounding_box
.h
= rects
[i
].y
+ rects
[i
].h
;
1687 total_pixels
+= (u64
) rects
[i
].w
* (u64
) rects
[i
].h
;
1690 if (dev_priv
->active_display_unit
== vmw_du_screen_target
) {
1692 * For Screen Targets, the limits for a toplogy are:
1693 * 1. Bounding box (assuming 32bpp) must be < prim_bb_mem
1694 * 2. Total pixels (assuming 32bpp) must be < prim_bb_mem
1696 u64 bb_mem
= bounding_box
.w
* bounding_box
.h
* 4;
1697 u64 pixel_mem
= total_pixels
* 4;
1699 if (bb_mem
> dev_priv
->prim_bb_mem
) {
1700 DRM_ERROR("Topology is beyond supported limits.\n");
1705 if (pixel_mem
> dev_priv
->prim_bb_mem
) {
1706 DRM_ERROR("Combined output size too large\n");
1712 vmw_du_update_layout(dev_priv
, arg
->num_outputs
, rects
);
1720 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
1721 * on a set of cliprects and a set of display units.
1723 * @dev_priv: Pointer to a device private structure.
1724 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
1725 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
1726 * Cliprects are given in framebuffer coordinates.
1727 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
1728 * be NULL. Cliprects are given in source coordinates.
1729 * @dest_x: X coordinate offset for the crtc / destination clip rects.
1730 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
1731 * @num_clips: Number of cliprects in the @clips or @vclips array.
1732 * @increment: Integer with which to increment the clip counter when looping.
1733 * Used to skip a predetermined number of clip rects.
1734 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
1736 int vmw_kms_helper_dirty(struct vmw_private
*dev_priv
,
1737 struct vmw_framebuffer
*framebuffer
,
1738 const struct drm_clip_rect
*clips
,
1739 const struct drm_vmw_rect
*vclips
,
1740 s32 dest_x
, s32 dest_y
,
1743 struct vmw_kms_dirty
*dirty
)
1745 struct vmw_display_unit
*units
[VMWGFX_NUM_DISPLAY_UNITS
];
1746 struct drm_crtc
*crtc
;
1750 dirty
->dev_priv
= dev_priv
;
1752 list_for_each_entry(crtc
, &dev_priv
->dev
->mode_config
.crtc_list
, head
) {
1753 if (crtc
->primary
->fb
!= &framebuffer
->base
)
1755 units
[num_units
++] = vmw_crtc_to_du(crtc
);
1758 for (k
= 0; k
< num_units
; k
++) {
1759 struct vmw_display_unit
*unit
= units
[k
];
1760 s32 crtc_x
= unit
->crtc
.x
;
1761 s32 crtc_y
= unit
->crtc
.y
;
1762 s32 crtc_width
= unit
->crtc
.mode
.hdisplay
;
1763 s32 crtc_height
= unit
->crtc
.mode
.vdisplay
;
1764 const struct drm_clip_rect
*clips_ptr
= clips
;
1765 const struct drm_vmw_rect
*vclips_ptr
= vclips
;
1768 if (dirty
->fifo_reserve_size
> 0) {
1769 dirty
->cmd
= vmw_fifo_reserve(dev_priv
,
1770 dirty
->fifo_reserve_size
);
1772 DRM_ERROR("Couldn't reserve fifo space "
1773 "for dirty blits.\n");
1776 memset(dirty
->cmd
, 0, dirty
->fifo_reserve_size
);
1778 dirty
->num_hits
= 0;
1779 for (i
= 0; i
< num_clips
; i
++, clips_ptr
+= increment
,
1780 vclips_ptr
+= increment
) {
1785 * Select clip array type. Note that integer type
1786 * in @clips is unsigned short, whereas in @vclips
1790 dirty
->fb_x
= (s32
) clips_ptr
->x1
;
1791 dirty
->fb_y
= (s32
) clips_ptr
->y1
;
1792 dirty
->unit_x2
= (s32
) clips_ptr
->x2
+ dest_x
-
1794 dirty
->unit_y2
= (s32
) clips_ptr
->y2
+ dest_y
-
1797 dirty
->fb_x
= vclips_ptr
->x
;
1798 dirty
->fb_y
= vclips_ptr
->y
;
1799 dirty
->unit_x2
= dirty
->fb_x
+ vclips_ptr
->w
+
1801 dirty
->unit_y2
= dirty
->fb_y
+ vclips_ptr
->h
+
1805 dirty
->unit_x1
= dirty
->fb_x
+ dest_x
- crtc_x
;
1806 dirty
->unit_y1
= dirty
->fb_y
+ dest_y
- crtc_y
;
1808 /* Skip this clip if it's outside the crtc region */
1809 if (dirty
->unit_x1
>= crtc_width
||
1810 dirty
->unit_y1
>= crtc_height
||
1811 dirty
->unit_x2
<= 0 || dirty
->unit_y2
<= 0)
1814 /* Clip right and bottom to crtc limits */
1815 dirty
->unit_x2
= min_t(s32
, dirty
->unit_x2
,
1817 dirty
->unit_y2
= min_t(s32
, dirty
->unit_y2
,
1820 /* Clip left and top to crtc limits */
1821 clip_left
= min_t(s32
, dirty
->unit_x1
, 0);
1822 clip_top
= min_t(s32
, dirty
->unit_y1
, 0);
1823 dirty
->unit_x1
-= clip_left
;
1824 dirty
->unit_y1
-= clip_top
;
1825 dirty
->fb_x
-= clip_left
;
1826 dirty
->fb_y
-= clip_top
;
1831 dirty
->fifo_commit(dirty
);
1838 * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
1839 * command submission.
1841 * @dev_priv. Pointer to a device private structure.
1842 * @buf: The buffer object
1843 * @interruptible: Whether to perform waits as interruptible.
1844 * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
1845 * The buffer will be validated as a GMR. Already pinned buffers will not be
1848 * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
1849 * interrupted by a signal.
1851 int vmw_kms_helper_buffer_prepare(struct vmw_private
*dev_priv
,
1852 struct vmw_dma_buffer
*buf
,
1854 bool validate_as_mob
)
1856 struct ttm_buffer_object
*bo
= &buf
->base
;
1859 ttm_bo_reserve(bo
, false, false, NULL
);
1860 ret
= vmw_validate_single_buffer(dev_priv
, bo
, interruptible
,
1863 ttm_bo_unreserve(bo
);
1869 * vmw_kms_helper_buffer_revert - Undo the actions of
1870 * vmw_kms_helper_buffer_prepare.
1872 * @res: Pointer to the buffer object.
1874 * Helper to be used if an error forces the caller to undo the actions of
1875 * vmw_kms_helper_buffer_prepare.
1877 void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer
*buf
)
1880 ttm_bo_unreserve(&buf
->base
);
1884 * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
1885 * kms command submission.
1887 * @dev_priv: Pointer to a device private structure.
1888 * @file_priv: Pointer to a struct drm_file representing the caller's
1889 * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
1890 * if non-NULL, @user_fence_rep must be non-NULL.
1891 * @buf: The buffer object.
1892 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
1893 * ref-counted fence pointer is returned here.
1894 * @user_fence_rep: Optional pointer to a user-space provided struct
1895 * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
1896 * function copies fence data to user-space in a fail-safe manner.
1898 void vmw_kms_helper_buffer_finish(struct vmw_private
*dev_priv
,
1899 struct drm_file
*file_priv
,
1900 struct vmw_dma_buffer
*buf
,
1901 struct vmw_fence_obj
**out_fence
,
1902 struct drm_vmw_fence_rep __user
*
1905 struct vmw_fence_obj
*fence
;
1909 ret
= vmw_execbuf_fence_commands(file_priv
, dev_priv
, &fence
,
1910 file_priv
? &handle
: NULL
);
1912 vmw_fence_single_bo(&buf
->base
, fence
);
1914 vmw_execbuf_copy_fence_user(dev_priv
, vmw_fpriv(file_priv
),
1915 ret
, user_fence_rep
, fence
,
1920 vmw_fence_obj_unreference(&fence
);
1922 vmw_kms_helper_buffer_revert(buf
);
1927 * vmw_kms_helper_resource_revert - Undo the actions of
1928 * vmw_kms_helper_resource_prepare.
1930 * @res: Pointer to the resource. Typically a surface.
1932 * Helper to be used if an error forces the caller to undo the actions of
1933 * vmw_kms_helper_resource_prepare.
1935 void vmw_kms_helper_resource_revert(struct vmw_validation_ctx
*ctx
)
1937 struct vmw_resource
*res
= ctx
->res
;
1939 vmw_kms_helper_buffer_revert(ctx
->buf
);
1940 vmw_dmabuf_unreference(&ctx
->buf
);
1941 vmw_resource_unreserve(res
, false, NULL
, 0);
1942 mutex_unlock(&res
->dev_priv
->cmdbuf_mutex
);
1946 * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
1947 * command submission.
1949 * @res: Pointer to the resource. Typically a surface.
1950 * @interruptible: Whether to perform waits as interruptible.
1952 * Reserves and validates also the backup buffer if a guest-backed resource.
1953 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1954 * interrupted by a signal.
1956 int vmw_kms_helper_resource_prepare(struct vmw_resource
*res
,
1958 struct vmw_validation_ctx
*ctx
)
1966 ret
= mutex_lock_interruptible(&res
->dev_priv
->cmdbuf_mutex
);
1968 mutex_lock(&res
->dev_priv
->cmdbuf_mutex
);
1970 if (unlikely(ret
!= 0))
1971 return -ERESTARTSYS
;
1973 ret
= vmw_resource_reserve(res
, interruptible
, false);
1978 ret
= vmw_kms_helper_buffer_prepare(res
->dev_priv
, res
->backup
,
1980 res
->dev_priv
->has_mob
);
1984 ctx
->buf
= vmw_dmabuf_reference(res
->backup
);
1986 ret
= vmw_resource_validate(res
);
1992 vmw_kms_helper_buffer_revert(ctx
->buf
);
1994 vmw_resource_unreserve(res
, false, NULL
, 0);
1996 mutex_unlock(&res
->dev_priv
->cmdbuf_mutex
);
2001 * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
2002 * kms command submission.
2004 * @res: Pointer to the resource. Typically a surface.
2005 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
2006 * ref-counted fence pointer is returned here.
2008 void vmw_kms_helper_resource_finish(struct vmw_validation_ctx
*ctx
,
2009 struct vmw_fence_obj
**out_fence
)
2011 struct vmw_resource
*res
= ctx
->res
;
2013 if (ctx
->buf
|| out_fence
)
2014 vmw_kms_helper_buffer_finish(res
->dev_priv
, NULL
, ctx
->buf
,
2017 vmw_dmabuf_unreference(&ctx
->buf
);
2018 vmw_resource_unreserve(res
, false, NULL
, 0);
2019 mutex_unlock(&res
->dev_priv
->cmdbuf_mutex
);
2023 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2026 * @res: Pointer to the surface resource
2027 * @clips: Clip rects in framebuffer (surface) space.
2028 * @num_clips: Number of clips in @clips.
2029 * @increment: Integer with which to increment the clip counter when looping.
2030 * Used to skip a predetermined number of clip rects.
2032 * This function makes sure the proxy surface is updated from its backing MOB
2033 * using the region given by @clips. The surface resource @res and its backing
2034 * MOB needs to be reserved and validated on call.
2036 int vmw_kms_update_proxy(struct vmw_resource
*res
,
2037 const struct drm_clip_rect
*clips
,
2041 struct vmw_private
*dev_priv
= res
->dev_priv
;
2042 struct drm_vmw_size
*size
= &vmw_res_to_srf(res
)->base_size
;
2044 SVGA3dCmdHeader header
;
2045 SVGA3dCmdUpdateGBImage body
;
2048 size_t copy_size
= 0;
2054 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
) * num_clips
);
2056 DRM_ERROR("Couldn't reserve fifo space for proxy surface "
2061 for (i
= 0; i
< num_clips
; ++i
, clips
+= increment
, ++cmd
) {
2062 box
= &cmd
->body
.box
;
2064 cmd
->header
.id
= SVGA_3D_CMD_UPDATE_GB_IMAGE
;
2065 cmd
->header
.size
= sizeof(cmd
->body
);
2066 cmd
->body
.image
.sid
= res
->id
;
2067 cmd
->body
.image
.face
= 0;
2068 cmd
->body
.image
.mipmap
= 0;
2070 if (clips
->x1
> size
->width
|| clips
->x2
> size
->width
||
2071 clips
->y1
> size
->height
|| clips
->y2
> size
->height
) {
2072 DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2079 box
->w
= clips
->x2
- clips
->x1
;
2080 box
->h
= clips
->y2
- clips
->y1
;
2083 copy_size
+= sizeof(*cmd
);
2086 vmw_fifo_commit(dev_priv
, copy_size
);
2091 int vmw_kms_fbdev_init_data(struct vmw_private
*dev_priv
,
2095 struct drm_connector
**p_con
,
2096 struct drm_crtc
**p_crtc
,
2097 struct drm_display_mode
**p_mode
)
2099 struct drm_connector
*con
;
2100 struct vmw_display_unit
*du
;
2101 struct drm_display_mode
*mode
;
2104 list_for_each_entry(con
, &dev_priv
->dev
->mode_config
.connector_list
,
2113 DRM_ERROR("Could not find initial display unit.\n");
2117 if (list_empty(&con
->modes
))
2118 (void) vmw_du_connector_fill_modes(con
, max_width
, max_height
);
2120 if (list_empty(&con
->modes
)) {
2121 DRM_ERROR("Could not find initial display mode.\n");
2125 du
= vmw_connector_to_du(con
);
2127 *p_crtc
= &du
->crtc
;
2129 list_for_each_entry(mode
, &con
->modes
, head
) {
2130 if (mode
->type
& DRM_MODE_TYPE_PREFERRED
)
2134 if (mode
->type
& DRM_MODE_TYPE_PREFERRED
)
2137 WARN_ONCE(true, "Could not find initial preferred mode.\n");
2138 *p_mode
= list_first_entry(&con
->modes
,
2139 struct drm_display_mode
,
2147 * vmw_kms_del_active - unregister a crtc binding to the implicit framebuffer
2149 * @dev_priv: Pointer to a device private struct.
2150 * @du: The display unit of the crtc.
2152 void vmw_kms_del_active(struct vmw_private
*dev_priv
,
2153 struct vmw_display_unit
*du
)
2155 mutex_lock(&dev_priv
->global_kms_state_mutex
);
2156 if (du
->active_implicit
) {
2157 if (--(dev_priv
->num_implicit
) == 0)
2158 dev_priv
->implicit_fb
= NULL
;
2159 du
->active_implicit
= false;
2161 mutex_unlock(&dev_priv
->global_kms_state_mutex
);
2165 * vmw_kms_add_active - register a crtc binding to an implicit framebuffer
2167 * @vmw_priv: Pointer to a device private struct.
2168 * @du: The display unit of the crtc.
2169 * @vfb: The implicit framebuffer
2171 * Registers a binding to an implicit framebuffer.
2173 void vmw_kms_add_active(struct vmw_private
*dev_priv
,
2174 struct vmw_display_unit
*du
,
2175 struct vmw_framebuffer
*vfb
)
2177 mutex_lock(&dev_priv
->global_kms_state_mutex
);
2178 WARN_ON_ONCE(!dev_priv
->num_implicit
&& dev_priv
->implicit_fb
);
2180 if (!du
->active_implicit
&& du
->is_implicit
) {
2181 dev_priv
->implicit_fb
= vfb
;
2182 du
->active_implicit
= true;
2183 dev_priv
->num_implicit
++;
2185 mutex_unlock(&dev_priv
->global_kms_state_mutex
);
2189 * vmw_kms_screen_object_flippable - Check whether we can page-flip a crtc.
2191 * @dev_priv: Pointer to device-private struct.
2192 * @crtc: The crtc we want to flip.
2194 * Returns true or false depending whether it's OK to flip this crtc
2195 * based on the criterion that we must not have more than one implicit
2196 * frame-buffer at any one time.
2198 bool vmw_kms_crtc_flippable(struct vmw_private
*dev_priv
,
2199 struct drm_crtc
*crtc
)
2201 struct vmw_display_unit
*du
= vmw_crtc_to_du(crtc
);
2204 mutex_lock(&dev_priv
->global_kms_state_mutex
);
2205 ret
= !du
->is_implicit
|| dev_priv
->num_implicit
== 1;
2206 mutex_unlock(&dev_priv
->global_kms_state_mutex
);
2212 * vmw_kms_update_implicit_fb - Update the implicit fb.
2214 * @dev_priv: Pointer to device-private struct.
2215 * @crtc: The crtc the new implicit frame-buffer is bound to.
2217 void vmw_kms_update_implicit_fb(struct vmw_private
*dev_priv
,
2218 struct drm_crtc
*crtc
)
2220 struct vmw_display_unit
*du
= vmw_crtc_to_du(crtc
);
2221 struct vmw_framebuffer
*vfb
;
2223 mutex_lock(&dev_priv
->global_kms_state_mutex
);
2225 if (!du
->is_implicit
)
2228 vfb
= vmw_framebuffer_to_vfb(crtc
->primary
->fb
);
2229 WARN_ON_ONCE(dev_priv
->num_implicit
!= 1 &&
2230 dev_priv
->implicit_fb
!= vfb
);
2232 dev_priv
->implicit_fb
= vfb
;
2234 mutex_unlock(&dev_priv
->global_kms_state_mutex
);
2238 * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
2241 * @dev_priv: Pointer to a device private struct.
2242 * @immutable: Whether the property is immutable.
2244 * Sets up the implicit placement property unless it's already set up.
2247 vmw_kms_create_implicit_placement_property(struct vmw_private
*dev_priv
,
2250 if (dev_priv
->implicit_placement_property
)
2253 dev_priv
->implicit_placement_property
=
2254 drm_property_create_range(dev_priv
->dev
,
2256 DRM_MODE_PROP_IMMUTABLE
: 0,
2257 "implicit_placement", 0, 1);