1 /**************************************************************************
3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_kms.h"
31 /* Might need a hrtimer here? */
32 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
34 void vmw_du_cleanup(struct vmw_display_unit
*du
)
36 if (du
->cursor_surface
)
37 vmw_surface_unreference(&du
->cursor_surface
);
38 if (du
->cursor_dmabuf
)
39 vmw_dmabuf_unreference(&du
->cursor_dmabuf
);
40 drm_connector_unregister(&du
->connector
);
41 drm_crtc_cleanup(&du
->crtc
);
42 drm_encoder_cleanup(&du
->encoder
);
43 drm_connector_cleanup(&du
->connector
);
47 * Display Unit Cursor functions
50 int vmw_cursor_update_image(struct vmw_private
*dev_priv
,
51 u32
*image
, u32 width
, u32 height
,
52 u32 hotspotX
, u32 hotspotY
)
56 SVGAFifoCmdDefineAlphaCursor cursor
;
58 u32 image_size
= width
* height
* 4;
59 u32 cmd_size
= sizeof(*cmd
) + image_size
;
64 cmd
= vmw_fifo_reserve(dev_priv
, cmd_size
);
65 if (unlikely(cmd
== NULL
)) {
66 DRM_ERROR("Fifo reserve failed.\n");
70 memset(cmd
, 0, sizeof(*cmd
));
72 memcpy(&cmd
[1], image
, image_size
);
74 cmd
->cmd
= SVGA_CMD_DEFINE_ALPHA_CURSOR
;
76 cmd
->cursor
.width
= width
;
77 cmd
->cursor
.height
= height
;
78 cmd
->cursor
.hotspotX
= hotspotX
;
79 cmd
->cursor
.hotspotY
= hotspotY
;
81 vmw_fifo_commit_flush(dev_priv
, cmd_size
);
86 int vmw_cursor_update_dmabuf(struct vmw_private
*dev_priv
,
87 struct vmw_dma_buffer
*dmabuf
,
88 u32 width
, u32 height
,
89 u32 hotspotX
, u32 hotspotY
)
91 struct ttm_bo_kmap_obj map
;
92 unsigned long kmap_offset
;
93 unsigned long kmap_num
;
99 kmap_num
= (width
*height
*4 + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
101 ret
= ttm_bo_reserve(&dmabuf
->base
, true, false, NULL
);
102 if (unlikely(ret
!= 0)) {
103 DRM_ERROR("reserve failed\n");
107 ret
= ttm_bo_kmap(&dmabuf
->base
, kmap_offset
, kmap_num
, &map
);
108 if (unlikely(ret
!= 0))
111 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
112 ret
= vmw_cursor_update_image(dev_priv
, virtual, width
, height
,
117 ttm_bo_unreserve(&dmabuf
->base
);
123 void vmw_cursor_update_position(struct vmw_private
*dev_priv
,
124 bool show
, int x
, int y
)
126 u32
*fifo_mem
= dev_priv
->mmio_virt
;
129 vmw_mmio_write(show
? 1 : 0, fifo_mem
+ SVGA_FIFO_CURSOR_ON
);
130 vmw_mmio_write(x
, fifo_mem
+ SVGA_FIFO_CURSOR_X
);
131 vmw_mmio_write(y
, fifo_mem
+ SVGA_FIFO_CURSOR_Y
);
132 count
= vmw_mmio_read(fifo_mem
+ SVGA_FIFO_CURSOR_COUNT
);
133 vmw_mmio_write(++count
, fifo_mem
+ SVGA_FIFO_CURSOR_COUNT
);
138 * vmw_du_crtc_cursor_set2 - Driver cursor_set2 callback.
140 int vmw_du_crtc_cursor_set2(struct drm_crtc
*crtc
, struct drm_file
*file_priv
,
141 uint32_t handle
, uint32_t width
, uint32_t height
,
142 int32_t hot_x
, int32_t hot_y
)
144 struct vmw_private
*dev_priv
= vmw_priv(crtc
->dev
);
145 struct vmw_display_unit
*du
= vmw_crtc_to_du(crtc
);
146 struct vmw_surface
*surface
= NULL
;
147 struct vmw_dma_buffer
*dmabuf
= NULL
;
148 s32 hotspot_x
, hotspot_y
;
152 * FIXME: Unclear whether there's any global state touched by the
153 * cursor_set function, especially vmw_cursor_update_position looks
154 * suspicious. For now take the easy route and reacquire all locks. We
155 * can do this since the caller in the drm core doesn't check anything
156 * which is protected by any looks.
158 drm_modeset_unlock_crtc(crtc
);
159 drm_modeset_lock_all(dev_priv
->dev
);
160 hotspot_x
= hot_x
+ du
->hotspot_x
;
161 hotspot_y
= hot_y
+ du
->hotspot_y
;
163 /* A lot of the code assumes this */
164 if (handle
&& (width
!= 64 || height
!= 64)) {
170 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
172 ret
= vmw_user_lookup_handle(dev_priv
, tfile
,
173 handle
, &surface
, &dmabuf
);
175 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret
);
181 /* need to do this before taking down old image */
182 if (surface
&& !surface
->snooper
.image
) {
183 DRM_ERROR("surface not suitable for cursor\n");
184 vmw_surface_unreference(&surface
);
189 /* takedown old cursor */
190 if (du
->cursor_surface
) {
191 du
->cursor_surface
->snooper
.crtc
= NULL
;
192 vmw_surface_unreference(&du
->cursor_surface
);
194 if (du
->cursor_dmabuf
)
195 vmw_dmabuf_unreference(&du
->cursor_dmabuf
);
197 /* setup new image */
200 /* vmw_user_surface_lookup takes one reference */
201 du
->cursor_surface
= surface
;
203 du
->cursor_surface
->snooper
.crtc
= crtc
;
204 du
->cursor_age
= du
->cursor_surface
->snooper
.age
;
205 ret
= vmw_cursor_update_image(dev_priv
, surface
->snooper
.image
,
206 64, 64, hotspot_x
, hotspot_y
);
208 /* vmw_user_surface_lookup takes one reference */
209 du
->cursor_dmabuf
= dmabuf
;
211 ret
= vmw_cursor_update_dmabuf(dev_priv
, dmabuf
, width
, height
,
212 hotspot_x
, hotspot_y
);
214 vmw_cursor_update_position(dev_priv
, false, 0, 0);
219 vmw_cursor_update_position(dev_priv
, true,
220 du
->cursor_x
+ hotspot_x
,
221 du
->cursor_y
+ hotspot_y
);
222 du
->core_hotspot_x
= hot_x
;
223 du
->core_hotspot_y
= hot_y
;
227 drm_modeset_unlock_all(dev_priv
->dev
);
228 drm_modeset_lock_crtc(crtc
, crtc
->cursor
);
233 int vmw_du_crtc_cursor_move(struct drm_crtc
*crtc
, int x
, int y
)
235 struct vmw_private
*dev_priv
= vmw_priv(crtc
->dev
);
236 struct vmw_display_unit
*du
= vmw_crtc_to_du(crtc
);
237 bool shown
= du
->cursor_surface
|| du
->cursor_dmabuf
? true : false;
239 du
->cursor_x
= x
+ du
->set_gui_x
;
240 du
->cursor_y
= y
+ du
->set_gui_y
;
243 * FIXME: Unclear whether there's any global state touched by the
244 * cursor_set function, especially vmw_cursor_update_position looks
245 * suspicious. For now take the easy route and reacquire all locks. We
246 * can do this since the caller in the drm core doesn't check anything
247 * which is protected by any looks.
249 drm_modeset_unlock_crtc(crtc
);
250 drm_modeset_lock_all(dev_priv
->dev
);
252 vmw_cursor_update_position(dev_priv
, shown
,
253 du
->cursor_x
+ du
->hotspot_x
+
255 du
->cursor_y
+ du
->hotspot_y
+
258 drm_modeset_unlock_all(dev_priv
->dev
);
259 drm_modeset_lock_crtc(crtc
, crtc
->cursor
);
264 void vmw_kms_cursor_snoop(struct vmw_surface
*srf
,
265 struct ttm_object_file
*tfile
,
266 struct ttm_buffer_object
*bo
,
267 SVGA3dCmdHeader
*header
)
269 struct ttm_bo_kmap_obj map
;
270 unsigned long kmap_offset
;
271 unsigned long kmap_num
;
277 SVGA3dCmdHeader header
;
278 SVGA3dCmdSurfaceDMA dma
;
282 cmd
= container_of(header
, struct vmw_dma_cmd
, header
);
284 /* No snooper installed */
285 if (!srf
->snooper
.image
)
288 if (cmd
->dma
.host
.face
!= 0 || cmd
->dma
.host
.mipmap
!= 0) {
289 DRM_ERROR("face and mipmap for cursors should never != 0\n");
293 if (cmd
->header
.size
< 64) {
294 DRM_ERROR("at least one full copy box must be given\n");
298 box
= (SVGA3dCopyBox
*)&cmd
[1];
299 box_count
= (cmd
->header
.size
- sizeof(SVGA3dCmdSurfaceDMA
)) /
300 sizeof(SVGA3dCopyBox
);
302 if (cmd
->dma
.guest
.ptr
.offset
% PAGE_SIZE
||
303 box
->x
!= 0 || box
->y
!= 0 || box
->z
!= 0 ||
304 box
->srcx
!= 0 || box
->srcy
!= 0 || box
->srcz
!= 0 ||
305 box
->d
!= 1 || box_count
!= 1) {
306 /* TODO handle none page aligned offsets */
307 /* TODO handle more dst & src != 0 */
308 /* TODO handle more then one copy */
309 DRM_ERROR("Cant snoop dma request for cursor!\n");
310 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
311 box
->srcx
, box
->srcy
, box
->srcz
,
312 box
->x
, box
->y
, box
->z
,
313 box
->w
, box
->h
, box
->d
, box_count
,
314 cmd
->dma
.guest
.ptr
.offset
);
318 kmap_offset
= cmd
->dma
.guest
.ptr
.offset
>> PAGE_SHIFT
;
319 kmap_num
= (64*64*4) >> PAGE_SHIFT
;
321 ret
= ttm_bo_reserve(bo
, true, false, NULL
);
322 if (unlikely(ret
!= 0)) {
323 DRM_ERROR("reserve failed\n");
327 ret
= ttm_bo_kmap(bo
, kmap_offset
, kmap_num
, &map
);
328 if (unlikely(ret
!= 0))
331 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
333 if (box
->w
== 64 && cmd
->dma
.guest
.pitch
== 64*4) {
334 memcpy(srf
->snooper
.image
, virtual, 64*64*4);
336 /* Image is unsigned pointer. */
337 for (i
= 0; i
< box
->h
; i
++)
338 memcpy(srf
->snooper
.image
+ i
* 64,
339 virtual + i
* cmd
->dma
.guest
.pitch
,
347 ttm_bo_unreserve(bo
);
351 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
353 * @dev_priv: Pointer to the device private struct.
355 * Clears all legacy hotspots.
357 void vmw_kms_legacy_hotspot_clear(struct vmw_private
*dev_priv
)
359 struct drm_device
*dev
= dev_priv
->dev
;
360 struct vmw_display_unit
*du
;
361 struct drm_crtc
*crtc
;
363 drm_modeset_lock_all(dev
);
364 drm_for_each_crtc(crtc
, dev
) {
365 du
= vmw_crtc_to_du(crtc
);
370 drm_modeset_unlock_all(dev
);
373 void vmw_kms_cursor_post_execbuf(struct vmw_private
*dev_priv
)
375 struct drm_device
*dev
= dev_priv
->dev
;
376 struct vmw_display_unit
*du
;
377 struct drm_crtc
*crtc
;
379 mutex_lock(&dev
->mode_config
.mutex
);
381 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
382 du
= vmw_crtc_to_du(crtc
);
383 if (!du
->cursor_surface
||
384 du
->cursor_age
== du
->cursor_surface
->snooper
.age
)
387 du
->cursor_age
= du
->cursor_surface
->snooper
.age
;
388 vmw_cursor_update_image(dev_priv
,
389 du
->cursor_surface
->snooper
.image
,
391 du
->hotspot_x
+ du
->core_hotspot_x
,
392 du
->hotspot_y
+ du
->core_hotspot_y
);
395 mutex_unlock(&dev
->mode_config
.mutex
);
399 * Generic framebuffer code
403 * Surface framebuffer code
406 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer
*framebuffer
)
408 struct vmw_framebuffer_surface
*vfbs
=
409 vmw_framebuffer_to_vfbs(framebuffer
);
411 drm_framebuffer_cleanup(framebuffer
);
412 vmw_surface_unreference(&vfbs
->surface
);
413 if (vfbs
->base
.user_obj
)
414 ttm_base_object_unref(&vfbs
->base
.user_obj
);
419 static int vmw_framebuffer_surface_dirty(struct drm_framebuffer
*framebuffer
,
420 struct drm_file
*file_priv
,
421 unsigned flags
, unsigned color
,
422 struct drm_clip_rect
*clips
,
425 struct vmw_private
*dev_priv
= vmw_priv(framebuffer
->dev
);
426 struct vmw_framebuffer_surface
*vfbs
=
427 vmw_framebuffer_to_vfbs(framebuffer
);
428 struct drm_clip_rect norect
;
431 /* Legacy Display Unit does not support 3D */
432 if (dev_priv
->active_display_unit
== vmw_du_legacy
)
435 drm_modeset_lock_all(dev_priv
->dev
);
437 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
438 if (unlikely(ret
!= 0)) {
439 drm_modeset_unlock_all(dev_priv
->dev
);
446 norect
.x1
= norect
.y1
= 0;
447 norect
.x2
= framebuffer
->width
;
448 norect
.y2
= framebuffer
->height
;
449 } else if (flags
& DRM_MODE_FB_DIRTY_ANNOTATE_COPY
) {
451 inc
= 2; /* skip source rects */
454 if (dev_priv
->active_display_unit
== vmw_du_screen_object
)
455 ret
= vmw_kms_sou_do_surface_dirty(dev_priv
, &vfbs
->base
,
456 clips
, NULL
, NULL
, 0, 0,
457 num_clips
, inc
, NULL
);
459 ret
= vmw_kms_stdu_surface_dirty(dev_priv
, &vfbs
->base
,
460 clips
, NULL
, NULL
, 0, 0,
461 num_clips
, inc
, NULL
);
463 vmw_fifo_flush(dev_priv
, false);
464 ttm_read_unlock(&dev_priv
->reservation_sem
);
466 drm_modeset_unlock_all(dev_priv
->dev
);
472 * vmw_kms_readback - Perform a readback from the screen system to
473 * a dma-buffer backed framebuffer.
475 * @dev_priv: Pointer to the device private structure.
476 * @file_priv: Pointer to a struct drm_file identifying the caller.
477 * Must be set to NULL if @user_fence_rep is NULL.
478 * @vfb: Pointer to the dma-buffer backed framebuffer.
479 * @user_fence_rep: User-space provided structure for fence information.
480 * Must be set to non-NULL if @file_priv is non-NULL.
481 * @vclips: Array of clip rects.
482 * @num_clips: Number of clip rects in @vclips.
484 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
487 int vmw_kms_readback(struct vmw_private
*dev_priv
,
488 struct drm_file
*file_priv
,
489 struct vmw_framebuffer
*vfb
,
490 struct drm_vmw_fence_rep __user
*user_fence_rep
,
491 struct drm_vmw_rect
*vclips
,
494 switch (dev_priv
->active_display_unit
) {
495 case vmw_du_screen_object
:
496 return vmw_kms_sou_readback(dev_priv
, file_priv
, vfb
,
497 user_fence_rep
, vclips
, num_clips
);
498 case vmw_du_screen_target
:
499 return vmw_kms_stdu_dma(dev_priv
, file_priv
, vfb
,
500 user_fence_rep
, NULL
, vclips
, num_clips
,
504 "Readback called with invalid display system.\n");
511 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs
= {
512 .destroy
= vmw_framebuffer_surface_destroy
,
513 .dirty
= vmw_framebuffer_surface_dirty
,
516 static int vmw_kms_new_framebuffer_surface(struct vmw_private
*dev_priv
,
517 struct vmw_surface
*surface
,
518 struct vmw_framebuffer
**out
,
519 const struct drm_mode_fb_cmd
521 bool is_dmabuf_proxy
)
524 struct drm_device
*dev
= dev_priv
->dev
;
525 struct vmw_framebuffer_surface
*vfbs
;
526 enum SVGA3dSurfaceFormat format
;
529 /* 3D is only supported on HWv8 and newer hosts */
530 if (dev_priv
->active_display_unit
== vmw_du_legacy
)
537 /* Surface must be marked as a scanout. */
538 if (unlikely(!surface
->scanout
))
541 if (unlikely(surface
->mip_levels
[0] != 1 ||
542 surface
->num_sizes
!= 1 ||
543 surface
->base_size
.width
< mode_cmd
->width
||
544 surface
->base_size
.height
< mode_cmd
->height
||
545 surface
->base_size
.depth
!= 1)) {
546 DRM_ERROR("Incompatible surface dimensions "
547 "for requested mode.\n");
551 switch (mode_cmd
->depth
) {
553 format
= SVGA3D_A8R8G8B8
;
556 format
= SVGA3D_X8R8G8B8
;
559 format
= SVGA3D_R5G6B5
;
562 format
= SVGA3D_A1R5G5B5
;
565 DRM_ERROR("Invalid color depth: %d\n", mode_cmd
->depth
);
570 * For DX, surface format validation is done when surface->scanout
573 if (!dev_priv
->has_dx
&& format
!= surface
->format
) {
574 DRM_ERROR("Invalid surface format for requested mode.\n");
578 vfbs
= kzalloc(sizeof(*vfbs
), GFP_KERNEL
);
584 /* XXX get the first 3 from the surface info */
585 vfbs
->base
.base
.bits_per_pixel
= mode_cmd
->bpp
;
586 vfbs
->base
.base
.pitches
[0] = mode_cmd
->pitch
;
587 vfbs
->base
.base
.depth
= mode_cmd
->depth
;
588 vfbs
->base
.base
.width
= mode_cmd
->width
;
589 vfbs
->base
.base
.height
= mode_cmd
->height
;
590 vfbs
->surface
= vmw_surface_reference(surface
);
591 vfbs
->base
.user_handle
= mode_cmd
->handle
;
592 vfbs
->is_dmabuf_proxy
= is_dmabuf_proxy
;
596 ret
= drm_framebuffer_init(dev
, &vfbs
->base
.base
,
597 &vmw_framebuffer_surface_funcs
);
604 vmw_surface_unreference(&surface
);
611 * Dmabuf framebuffer code
614 static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer
*framebuffer
)
616 struct vmw_framebuffer_dmabuf
*vfbd
=
617 vmw_framebuffer_to_vfbd(framebuffer
);
619 drm_framebuffer_cleanup(framebuffer
);
620 vmw_dmabuf_unreference(&vfbd
->buffer
);
621 if (vfbd
->base
.user_obj
)
622 ttm_base_object_unref(&vfbd
->base
.user_obj
);
627 static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer
*framebuffer
,
628 struct drm_file
*file_priv
,
629 unsigned flags
, unsigned color
,
630 struct drm_clip_rect
*clips
,
633 struct vmw_private
*dev_priv
= vmw_priv(framebuffer
->dev
);
634 struct vmw_framebuffer_dmabuf
*vfbd
=
635 vmw_framebuffer_to_vfbd(framebuffer
);
636 struct drm_clip_rect norect
;
637 int ret
, increment
= 1;
639 drm_modeset_lock_all(dev_priv
->dev
);
641 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
642 if (unlikely(ret
!= 0)) {
643 drm_modeset_unlock_all(dev_priv
->dev
);
650 norect
.x1
= norect
.y1
= 0;
651 norect
.x2
= framebuffer
->width
;
652 norect
.y2
= framebuffer
->height
;
653 } else if (flags
& DRM_MODE_FB_DIRTY_ANNOTATE_COPY
) {
658 switch (dev_priv
->active_display_unit
) {
659 case vmw_du_screen_target
:
660 ret
= vmw_kms_stdu_dma(dev_priv
, NULL
, &vfbd
->base
, NULL
,
661 clips
, NULL
, num_clips
, increment
,
664 case vmw_du_screen_object
:
665 ret
= vmw_kms_sou_do_dmabuf_dirty(dev_priv
, &vfbd
->base
,
666 clips
, NULL
, num_clips
,
667 increment
, true, NULL
);
670 ret
= vmw_kms_ldu_do_dmabuf_dirty(dev_priv
, &vfbd
->base
, 0, 0,
671 clips
, num_clips
, increment
);
675 WARN_ONCE(true, "Dirty called with invalid display system.\n");
679 vmw_fifo_flush(dev_priv
, false);
680 ttm_read_unlock(&dev_priv
->reservation_sem
);
682 drm_modeset_unlock_all(dev_priv
->dev
);
687 static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs
= {
688 .destroy
= vmw_framebuffer_dmabuf_destroy
,
689 .dirty
= vmw_framebuffer_dmabuf_dirty
,
693 * Pin the dmabuffer to the start of vram.
695 static int vmw_framebuffer_pin(struct vmw_framebuffer
*vfb
)
697 struct vmw_private
*dev_priv
= vmw_priv(vfb
->base
.dev
);
698 struct vmw_dma_buffer
*buf
;
701 buf
= vfb
->dmabuf
? vmw_framebuffer_to_vfbd(&vfb
->base
)->buffer
:
702 vmw_framebuffer_to_vfbs(&vfb
->base
)->surface
->res
.backup
;
707 switch (dev_priv
->active_display_unit
) {
709 vmw_overlay_pause_all(dev_priv
);
710 ret
= vmw_dmabuf_pin_in_start_of_vram(dev_priv
, buf
, false);
711 vmw_overlay_resume_all(dev_priv
);
713 case vmw_du_screen_object
:
714 case vmw_du_screen_target
:
716 return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv
, buf
,
719 return vmw_dmabuf_pin_in_placement(dev_priv
, buf
,
720 &vmw_mob_placement
, false);
728 static int vmw_framebuffer_unpin(struct vmw_framebuffer
*vfb
)
730 struct vmw_private
*dev_priv
= vmw_priv(vfb
->base
.dev
);
731 struct vmw_dma_buffer
*buf
;
733 buf
= vfb
->dmabuf
? vmw_framebuffer_to_vfbd(&vfb
->base
)->buffer
:
734 vmw_framebuffer_to_vfbs(&vfb
->base
)->surface
->res
.backup
;
739 return vmw_dmabuf_unpin(dev_priv
, buf
, false);
743 * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
746 * @mode_cmd: parameters for the new surface
747 * @dmabuf_mob: MOB backing the DMA buf
748 * @srf_out: newly created surface
750 * When the content FB is a DMA buf, we create a surface as a proxy to the
751 * same buffer. This way we can do a surface copy rather than a surface DMA.
752 * This is a more efficient approach
755 * 0 on success, error code otherwise
757 static int vmw_create_dmabuf_proxy(struct drm_device
*dev
,
758 const struct drm_mode_fb_cmd
*mode_cmd
,
759 struct vmw_dma_buffer
*dmabuf_mob
,
760 struct vmw_surface
**srf_out
)
763 struct drm_vmw_size content_base_size
;
764 struct vmw_resource
*res
;
765 unsigned int bytes_pp
;
768 switch (mode_cmd
->depth
) {
771 format
= SVGA3D_X8R8G8B8
;
777 format
= SVGA3D_R5G6B5
;
787 DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd
->depth
);
791 content_base_size
.width
= mode_cmd
->pitch
/ bytes_pp
;
792 content_base_size
.height
= mode_cmd
->height
;
793 content_base_size
.depth
= 1;
795 ret
= vmw_surface_gb_priv_define(dev
,
796 0, /* kernel visible only */
799 true, /* can be a scanout buffer */
800 1, /* num of mip levels */
806 DRM_ERROR("Failed to allocate proxy content buffer\n");
810 res
= &(*srf_out
)->res
;
812 /* Reserve and switch the backing mob. */
813 mutex_lock(&res
->dev_priv
->cmdbuf_mutex
);
814 (void) vmw_resource_reserve(res
, false, true);
815 vmw_dmabuf_unreference(&res
->backup
);
816 res
->backup
= vmw_dmabuf_reference(dmabuf_mob
);
817 res
->backup_offset
= 0;
818 vmw_resource_unreserve(res
, false, NULL
, 0);
819 mutex_unlock(&res
->dev_priv
->cmdbuf_mutex
);
826 static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private
*dev_priv
,
827 struct vmw_dma_buffer
*dmabuf
,
828 struct vmw_framebuffer
**out
,
829 const struct drm_mode_fb_cmd
833 struct drm_device
*dev
= dev_priv
->dev
;
834 struct vmw_framebuffer_dmabuf
*vfbd
;
835 unsigned int requested_size
;
838 requested_size
= mode_cmd
->height
* mode_cmd
->pitch
;
839 if (unlikely(requested_size
> dmabuf
->base
.num_pages
* PAGE_SIZE
)) {
840 DRM_ERROR("Screen buffer object size is too small "
841 "for requested mode.\n");
845 /* Limited framebuffer color depth support for screen objects */
846 if (dev_priv
->active_display_unit
== vmw_du_screen_object
) {
847 switch (mode_cmd
->depth
) {
850 /* Only support 32 bpp for 32 and 24 depth fbs */
851 if (mode_cmd
->bpp
== 32)
854 DRM_ERROR("Invalid color depth/bbp: %d %d\n",
855 mode_cmd
->depth
, mode_cmd
->bpp
);
859 /* Only support 16 bpp for 16 and 15 depth fbs */
860 if (mode_cmd
->bpp
== 16)
863 DRM_ERROR("Invalid color depth/bbp: %d %d\n",
864 mode_cmd
->depth
, mode_cmd
->bpp
);
867 DRM_ERROR("Invalid color depth: %d\n", mode_cmd
->depth
);
872 vfbd
= kzalloc(sizeof(*vfbd
), GFP_KERNEL
);
878 vfbd
->base
.base
.bits_per_pixel
= mode_cmd
->bpp
;
879 vfbd
->base
.base
.pitches
[0] = mode_cmd
->pitch
;
880 vfbd
->base
.base
.depth
= mode_cmd
->depth
;
881 vfbd
->base
.base
.width
= mode_cmd
->width
;
882 vfbd
->base
.base
.height
= mode_cmd
->height
;
883 vfbd
->base
.dmabuf
= true;
884 vfbd
->buffer
= vmw_dmabuf_reference(dmabuf
);
885 vfbd
->base
.user_handle
= mode_cmd
->handle
;
888 ret
= drm_framebuffer_init(dev
, &vfbd
->base
.base
,
889 &vmw_framebuffer_dmabuf_funcs
);
896 vmw_dmabuf_unreference(&dmabuf
);
903 * vmw_kms_new_framebuffer - Create a new framebuffer.
905 * @dev_priv: Pointer to device private struct.
906 * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
907 * Either @dmabuf or @surface must be NULL.
908 * @surface: Pointer to a surface to wrap the kms framebuffer around.
909 * Either @dmabuf or @surface must be NULL.
910 * @only_2d: No presents will occur to this dma buffer based framebuffer. This
911 * Helps the code to do some important optimizations.
912 * @mode_cmd: Frame-buffer metadata.
914 struct vmw_framebuffer
*
915 vmw_kms_new_framebuffer(struct vmw_private
*dev_priv
,
916 struct vmw_dma_buffer
*dmabuf
,
917 struct vmw_surface
*surface
,
919 const struct drm_mode_fb_cmd
*mode_cmd
)
921 struct vmw_framebuffer
*vfb
= NULL
;
922 bool is_dmabuf_proxy
= false;
926 * We cannot use the SurfaceDMA command in an non-accelerated VM,
927 * therefore, wrap the DMA buf in a surface so we can use the
928 * SurfaceCopy command.
930 if (dmabuf
&& only_2d
&&
931 dev_priv
->active_display_unit
== vmw_du_screen_target
) {
932 ret
= vmw_create_dmabuf_proxy(dev_priv
->dev
, mode_cmd
,
937 is_dmabuf_proxy
= true;
940 /* Create the new framebuffer depending one what we have */
942 ret
= vmw_kms_new_framebuffer_surface(dev_priv
, surface
, &vfb
,
947 * vmw_create_dmabuf_proxy() adds a reference that is no longer
951 vmw_surface_unreference(&surface
);
953 ret
= vmw_kms_new_framebuffer_dmabuf(dev_priv
, dmabuf
, &vfb
,
962 vfb
->pin
= vmw_framebuffer_pin
;
963 vfb
->unpin
= vmw_framebuffer_unpin
;
969 * Generic Kernel modesetting functions
972 static struct drm_framebuffer
*vmw_kms_fb_create(struct drm_device
*dev
,
973 struct drm_file
*file_priv
,
974 const struct drm_mode_fb_cmd2
*mode_cmd2
)
976 struct vmw_private
*dev_priv
= vmw_priv(dev
);
977 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
978 struct vmw_framebuffer
*vfb
= NULL
;
979 struct vmw_surface
*surface
= NULL
;
980 struct vmw_dma_buffer
*bo
= NULL
;
981 struct ttm_base_object
*user_obj
;
982 struct drm_mode_fb_cmd mode_cmd
;
985 mode_cmd
.width
= mode_cmd2
->width
;
986 mode_cmd
.height
= mode_cmd2
->height
;
987 mode_cmd
.pitch
= mode_cmd2
->pitches
[0];
988 mode_cmd
.handle
= mode_cmd2
->handles
[0];
989 drm_fb_get_bpp_depth(mode_cmd2
->pixel_format
, &mode_cmd
.depth
,
993 * This code should be conditioned on Screen Objects not being used.
994 * If screen objects are used, we can allocate a GMR to hold the
995 * requested framebuffer.
998 if (!vmw_kms_validate_mode_vram(dev_priv
,
1001 DRM_ERROR("Requested mode exceed bounding box limit.\n");
1002 return ERR_PTR(-ENOMEM
);
1006 * Take a reference on the user object of the resource
1007 * backing the kms fb. This ensures that user-space handle
1008 * lookups on that resource will always work as long as
1009 * it's registered with a kms framebuffer. This is important,
1010 * since vmw_execbuf_process identifies resources in the
1011 * command stream using user-space handles.
1014 user_obj
= ttm_base_object_lookup(tfile
, mode_cmd
.handle
);
1015 if (unlikely(user_obj
== NULL
)) {
1016 DRM_ERROR("Could not locate requested kms frame buffer.\n");
1017 return ERR_PTR(-ENOENT
);
1021 * End conditioned code.
1024 /* returns either a dmabuf or surface */
1025 ret
= vmw_user_lookup_handle(dev_priv
, tfile
,
1031 vfb
= vmw_kms_new_framebuffer(dev_priv
, bo
, surface
,
1032 !(dev_priv
->capabilities
& SVGA_CAP_3D
),
1040 /* vmw_user_lookup_handle takes one ref so does new_fb */
1042 vmw_dmabuf_unreference(&bo
);
1044 vmw_surface_unreference(&surface
);
1047 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret
);
1048 ttm_base_object_unref(&user_obj
);
1049 return ERR_PTR(ret
);
1051 vfb
->user_obj
= user_obj
;
1056 static const struct drm_mode_config_funcs vmw_kms_funcs
= {
1057 .fb_create
= vmw_kms_fb_create
,
1060 static int vmw_kms_generic_present(struct vmw_private
*dev_priv
,
1061 struct drm_file
*file_priv
,
1062 struct vmw_framebuffer
*vfb
,
1063 struct vmw_surface
*surface
,
1065 int32_t destX
, int32_t destY
,
1066 struct drm_vmw_rect
*clips
,
1069 return vmw_kms_sou_do_surface_dirty(dev_priv
, vfb
, NULL
, clips
,
1070 &surface
->res
, destX
, destY
,
1071 num_clips
, 1, NULL
);
1075 int vmw_kms_present(struct vmw_private
*dev_priv
,
1076 struct drm_file
*file_priv
,
1077 struct vmw_framebuffer
*vfb
,
1078 struct vmw_surface
*surface
,
1080 int32_t destX
, int32_t destY
,
1081 struct drm_vmw_rect
*clips
,
1086 switch (dev_priv
->active_display_unit
) {
1087 case vmw_du_screen_target
:
1088 ret
= vmw_kms_stdu_surface_dirty(dev_priv
, vfb
, NULL
, clips
,
1089 &surface
->res
, destX
, destY
,
1090 num_clips
, 1, NULL
);
1092 case vmw_du_screen_object
:
1093 ret
= vmw_kms_generic_present(dev_priv
, file_priv
, vfb
, surface
,
1094 sid
, destX
, destY
, clips
,
1099 "Present called with invalid display system.\n");
1106 vmw_fifo_flush(dev_priv
, false);
1112 vmw_kms_create_hotplug_mode_update_property(struct vmw_private
*dev_priv
)
1114 if (dev_priv
->hotplug_mode_update_property
)
1117 dev_priv
->hotplug_mode_update_property
=
1118 drm_property_create_range(dev_priv
->dev
,
1119 DRM_MODE_PROP_IMMUTABLE
,
1120 "hotplug_mode_update", 0, 1);
1122 if (!dev_priv
->hotplug_mode_update_property
)
1127 int vmw_kms_init(struct vmw_private
*dev_priv
)
1129 struct drm_device
*dev
= dev_priv
->dev
;
1132 drm_mode_config_init(dev
);
1133 dev
->mode_config
.funcs
= &vmw_kms_funcs
;
1134 dev
->mode_config
.min_width
= 1;
1135 dev
->mode_config
.min_height
= 1;
1136 dev
->mode_config
.max_width
= dev_priv
->texture_max_width
;
1137 dev
->mode_config
.max_height
= dev_priv
->texture_max_height
;
1139 drm_mode_create_suggested_offset_properties(dev
);
1140 vmw_kms_create_hotplug_mode_update_property(dev_priv
);
1142 ret
= vmw_kms_stdu_init_display(dev_priv
);
1144 ret
= vmw_kms_sou_init_display(dev_priv
);
1145 if (ret
) /* Fallback */
1146 ret
= vmw_kms_ldu_init_display(dev_priv
);
1152 int vmw_kms_close(struct vmw_private
*dev_priv
)
1157 * Docs says we should take the lock before calling this function
1158 * but since it destroys encoders and our destructor calls
1159 * drm_encoder_cleanup which takes the lock we deadlock.
1161 drm_mode_config_cleanup(dev_priv
->dev
);
1162 if (dev_priv
->active_display_unit
== vmw_du_screen_object
)
1163 ret
= vmw_kms_sou_close_display(dev_priv
);
1164 else if (dev_priv
->active_display_unit
== vmw_du_screen_target
)
1165 ret
= vmw_kms_stdu_close_display(dev_priv
);
1167 ret
= vmw_kms_ldu_close_display(dev_priv
);
1172 int vmw_kms_cursor_bypass_ioctl(struct drm_device
*dev
, void *data
,
1173 struct drm_file
*file_priv
)
1175 struct drm_vmw_cursor_bypass_arg
*arg
= data
;
1176 struct vmw_display_unit
*du
;
1177 struct drm_crtc
*crtc
;
1181 mutex_lock(&dev
->mode_config
.mutex
);
1182 if (arg
->flags
& DRM_VMW_CURSOR_BYPASS_ALL
) {
1184 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
1185 du
= vmw_crtc_to_du(crtc
);
1186 du
->hotspot_x
= arg
->xhot
;
1187 du
->hotspot_y
= arg
->yhot
;
1190 mutex_unlock(&dev
->mode_config
.mutex
);
1194 crtc
= drm_crtc_find(dev
, arg
->crtc_id
);
1200 du
= vmw_crtc_to_du(crtc
);
1202 du
->hotspot_x
= arg
->xhot
;
1203 du
->hotspot_y
= arg
->yhot
;
1206 mutex_unlock(&dev
->mode_config
.mutex
);
1211 int vmw_kms_write_svga(struct vmw_private
*vmw_priv
,
1212 unsigned width
, unsigned height
, unsigned pitch
,
1213 unsigned bpp
, unsigned depth
)
1215 if (vmw_priv
->capabilities
& SVGA_CAP_PITCHLOCK
)
1216 vmw_write(vmw_priv
, SVGA_REG_PITCHLOCK
, pitch
);
1217 else if (vmw_fifo_have_pitchlock(vmw_priv
))
1218 vmw_mmio_write(pitch
, vmw_priv
->mmio_virt
+
1219 SVGA_FIFO_PITCHLOCK
);
1220 vmw_write(vmw_priv
, SVGA_REG_WIDTH
, width
);
1221 vmw_write(vmw_priv
, SVGA_REG_HEIGHT
, height
);
1222 vmw_write(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
, bpp
);
1224 if (vmw_read(vmw_priv
, SVGA_REG_DEPTH
) != depth
) {
1225 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
1226 depth
, bpp
, vmw_read(vmw_priv
, SVGA_REG_DEPTH
));
1233 int vmw_kms_save_vga(struct vmw_private
*vmw_priv
)
1235 struct vmw_vga_topology_state
*save
;
1238 vmw_priv
->vga_width
= vmw_read(vmw_priv
, SVGA_REG_WIDTH
);
1239 vmw_priv
->vga_height
= vmw_read(vmw_priv
, SVGA_REG_HEIGHT
);
1240 vmw_priv
->vga_bpp
= vmw_read(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
);
1241 if (vmw_priv
->capabilities
& SVGA_CAP_PITCHLOCK
)
1242 vmw_priv
->vga_pitchlock
=
1243 vmw_read(vmw_priv
, SVGA_REG_PITCHLOCK
);
1244 else if (vmw_fifo_have_pitchlock(vmw_priv
))
1245 vmw_priv
->vga_pitchlock
= vmw_mmio_read(vmw_priv
->mmio_virt
+
1246 SVGA_FIFO_PITCHLOCK
);
1248 if (!(vmw_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
))
1251 vmw_priv
->num_displays
= vmw_read(vmw_priv
,
1252 SVGA_REG_NUM_GUEST_DISPLAYS
);
1254 if (vmw_priv
->num_displays
== 0)
1255 vmw_priv
->num_displays
= 1;
1257 for (i
= 0; i
< vmw_priv
->num_displays
; ++i
) {
1258 save
= &vmw_priv
->vga_save
[i
];
1259 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, i
);
1260 save
->primary
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_IS_PRIMARY
);
1261 save
->pos_x
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_POSITION_X
);
1262 save
->pos_y
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_POSITION_Y
);
1263 save
->width
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_WIDTH
);
1264 save
->height
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_HEIGHT
);
1265 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, SVGA_ID_INVALID
);
1266 if (i
== 0 && vmw_priv
->num_displays
== 1 &&
1267 save
->width
== 0 && save
->height
== 0) {
1270 * It should be fairly safe to assume that these
1271 * values are uninitialized.
1274 save
->width
= vmw_priv
->vga_width
- save
->pos_x
;
1275 save
->height
= vmw_priv
->vga_height
- save
->pos_y
;
1282 int vmw_kms_restore_vga(struct vmw_private
*vmw_priv
)
1284 struct vmw_vga_topology_state
*save
;
1287 vmw_write(vmw_priv
, SVGA_REG_WIDTH
, vmw_priv
->vga_width
);
1288 vmw_write(vmw_priv
, SVGA_REG_HEIGHT
, vmw_priv
->vga_height
);
1289 vmw_write(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
, vmw_priv
->vga_bpp
);
1290 if (vmw_priv
->capabilities
& SVGA_CAP_PITCHLOCK
)
1291 vmw_write(vmw_priv
, SVGA_REG_PITCHLOCK
,
1292 vmw_priv
->vga_pitchlock
);
1293 else if (vmw_fifo_have_pitchlock(vmw_priv
))
1294 vmw_mmio_write(vmw_priv
->vga_pitchlock
,
1295 vmw_priv
->mmio_virt
+ SVGA_FIFO_PITCHLOCK
);
1297 if (!(vmw_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
))
1300 for (i
= 0; i
< vmw_priv
->num_displays
; ++i
) {
1301 save
= &vmw_priv
->vga_save
[i
];
1302 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, i
);
1303 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_IS_PRIMARY
, save
->primary
);
1304 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_X
, save
->pos_x
);
1305 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_Y
, save
->pos_y
);
1306 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_WIDTH
, save
->width
);
1307 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_HEIGHT
, save
->height
);
1308 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, SVGA_ID_INVALID
);
1314 bool vmw_kms_validate_mode_vram(struct vmw_private
*dev_priv
,
1318 return ((u64
) pitch
* (u64
) height
) < (u64
)
1319 ((dev_priv
->active_display_unit
== vmw_du_screen_target
) ?
1320 dev_priv
->prim_bb_mem
: dev_priv
->vram_size
);
1325 * Function called by DRM code called with vbl_lock held.
1327 u32
vmw_get_vblank_counter(struct drm_device
*dev
, unsigned int pipe
)
1333 * Function called by DRM code called with vbl_lock held.
1335 int vmw_enable_vblank(struct drm_device
*dev
, unsigned int pipe
)
1341 * Function called by DRM code called with vbl_lock held.
1343 void vmw_disable_vblank(struct drm_device
*dev
, unsigned int pipe
)
1349 * Small shared kms functions.
1352 static int vmw_du_update_layout(struct vmw_private
*dev_priv
, unsigned num
,
1353 struct drm_vmw_rect
*rects
)
1355 struct drm_device
*dev
= dev_priv
->dev
;
1356 struct vmw_display_unit
*du
;
1357 struct drm_connector
*con
;
1359 mutex_lock(&dev
->mode_config
.mutex
);
1365 DRM_INFO("%s: new layout ", __func__
);
1366 for (i
= 0; i
< num
; i
++)
1367 DRM_INFO("(%i, %i %ux%u) ", rects
[i
].x
, rects
[i
].y
,
1368 rects
[i
].w
, rects
[i
].h
);
1373 list_for_each_entry(con
, &dev
->mode_config
.connector_list
, head
) {
1374 du
= vmw_connector_to_du(con
);
1375 if (num
> du
->unit
) {
1376 du
->pref_width
= rects
[du
->unit
].w
;
1377 du
->pref_height
= rects
[du
->unit
].h
;
1378 du
->pref_active
= true;
1379 du
->gui_x
= rects
[du
->unit
].x
;
1380 du
->gui_y
= rects
[du
->unit
].y
;
1381 drm_object_property_set_value
1382 (&con
->base
, dev
->mode_config
.suggested_x_property
,
1384 drm_object_property_set_value
1385 (&con
->base
, dev
->mode_config
.suggested_y_property
,
1388 du
->pref_width
= 800;
1389 du
->pref_height
= 600;
1390 du
->pref_active
= false;
1391 drm_object_property_set_value
1392 (&con
->base
, dev
->mode_config
.suggested_x_property
,
1394 drm_object_property_set_value
1395 (&con
->base
, dev
->mode_config
.suggested_y_property
,
1398 con
->status
= vmw_du_connector_detect(con
, true);
1401 mutex_unlock(&dev
->mode_config
.mutex
);
1402 drm_sysfs_hotplug_event(dev
);
1407 int vmw_du_crtc_gamma_set(struct drm_crtc
*crtc
,
1408 u16
*r
, u16
*g
, u16
*b
,
1411 struct vmw_private
*dev_priv
= vmw_priv(crtc
->dev
);
1414 for (i
= 0; i
< size
; i
++) {
1415 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i
,
1417 vmw_write(dev_priv
, SVGA_PALETTE_BASE
+ i
* 3 + 0, r
[i
] >> 8);
1418 vmw_write(dev_priv
, SVGA_PALETTE_BASE
+ i
* 3 + 1, g
[i
] >> 8);
1419 vmw_write(dev_priv
, SVGA_PALETTE_BASE
+ i
* 3 + 2, b
[i
] >> 8);
1425 int vmw_du_connector_dpms(struct drm_connector
*connector
, int mode
)
1430 enum drm_connector_status
1431 vmw_du_connector_detect(struct drm_connector
*connector
, bool force
)
1433 uint32_t num_displays
;
1434 struct drm_device
*dev
= connector
->dev
;
1435 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1436 struct vmw_display_unit
*du
= vmw_connector_to_du(connector
);
1438 num_displays
= vmw_read(dev_priv
, SVGA_REG_NUM_DISPLAYS
);
1440 return ((vmw_connector_to_du(connector
)->unit
< num_displays
&&
1442 connector_status_connected
: connector_status_disconnected
);
1445 static struct drm_display_mode vmw_kms_connector_builtin
[] = {
1447 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER
, 25175, 640, 656,
1448 752, 800, 0, 480, 489, 492, 525, 0,
1449 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_NVSYNC
) },
1451 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER
, 40000, 800, 840,
1452 968, 1056, 0, 600, 601, 605, 628, 0,
1453 DRM_MODE_FLAG_PHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1455 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER
, 65000, 1024, 1048,
1456 1184, 1344, 0, 768, 771, 777, 806, 0,
1457 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_NVSYNC
) },
1459 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER
, 108000, 1152, 1216,
1460 1344, 1600, 0, 864, 865, 868, 900, 0,
1461 DRM_MODE_FLAG_PHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1463 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER
, 79500, 1280, 1344,
1464 1472, 1664, 0, 768, 771, 778, 798, 0,
1465 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1467 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER
, 83500, 1280, 1352,
1468 1480, 1680, 0, 800, 803, 809, 831, 0,
1469 DRM_MODE_FLAG_PHSYNC
| DRM_MODE_FLAG_NVSYNC
) },
1471 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER
, 108000, 1280, 1376,
1472 1488, 1800, 0, 960, 961, 964, 1000, 0,
1473 DRM_MODE_FLAG_PHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1474 /* 1280x1024@60Hz */
1475 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER
, 108000, 1280, 1328,
1476 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
1477 DRM_MODE_FLAG_PHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1479 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER
, 85500, 1360, 1424,
1480 1536, 1792, 0, 768, 771, 777, 795, 0,
1481 DRM_MODE_FLAG_PHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1482 /* 1440x1050@60Hz */
1483 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER
, 121750, 1400, 1488,
1484 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
1485 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1487 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER
, 106500, 1440, 1520,
1488 1672, 1904, 0, 900, 903, 909, 934, 0,
1489 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1490 /* 1600x1200@60Hz */
1491 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER
, 162000, 1600, 1664,
1492 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
1493 DRM_MODE_FLAG_PHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1494 /* 1680x1050@60Hz */
1495 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER
, 146250, 1680, 1784,
1496 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
1497 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1498 /* 1792x1344@60Hz */
1499 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER
, 204750, 1792, 1920,
1500 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
1501 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1502 /* 1853x1392@60Hz */
1503 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER
, 218250, 1856, 1952,
1504 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
1505 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1506 /* 1920x1200@60Hz */
1507 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER
, 193250, 1920, 2056,
1508 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
1509 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1510 /* 1920x1440@60Hz */
1511 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER
, 234000, 1920, 2048,
1512 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
1513 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1514 /* 2560x1600@60Hz */
1515 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER
, 348500, 2560, 2752,
1516 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
1517 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
) },
1519 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
1523 * vmw_guess_mode_timing - Provide fake timings for a
1524 * 60Hz vrefresh mode.
1526 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
1527 * members filled in.
1529 void vmw_guess_mode_timing(struct drm_display_mode
*mode
)
1531 mode
->hsync_start
= mode
->hdisplay
+ 50;
1532 mode
->hsync_end
= mode
->hsync_start
+ 50;
1533 mode
->htotal
= mode
->hsync_end
+ 50;
1535 mode
->vsync_start
= mode
->vdisplay
+ 50;
1536 mode
->vsync_end
= mode
->vsync_start
+ 50;
1537 mode
->vtotal
= mode
->vsync_end
+ 50;
1539 mode
->clock
= (u32
)mode
->htotal
* (u32
)mode
->vtotal
/ 100 * 6;
1540 mode
->vrefresh
= drm_mode_vrefresh(mode
);
1544 int vmw_du_connector_fill_modes(struct drm_connector
*connector
,
1545 uint32_t max_width
, uint32_t max_height
)
1547 struct vmw_display_unit
*du
= vmw_connector_to_du(connector
);
1548 struct drm_device
*dev
= connector
->dev
;
1549 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1550 struct drm_display_mode
*mode
= NULL
;
1551 struct drm_display_mode
*bmode
;
1552 struct drm_display_mode prefmode
= { DRM_MODE("preferred",
1553 DRM_MODE_TYPE_DRIVER
| DRM_MODE_TYPE_PREFERRED
,
1554 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1555 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
)
1558 u32 assumed_bpp
= 4;
1560 if (dev_priv
->assume_16bpp
)
1563 if (dev_priv
->active_display_unit
== vmw_du_screen_target
) {
1564 max_width
= min(max_width
, dev_priv
->stdu_max_width
);
1565 max_height
= min(max_height
, dev_priv
->stdu_max_height
);
1568 /* Add preferred mode */
1569 mode
= drm_mode_duplicate(dev
, &prefmode
);
1572 mode
->hdisplay
= du
->pref_width
;
1573 mode
->vdisplay
= du
->pref_height
;
1574 vmw_guess_mode_timing(mode
);
1576 if (vmw_kms_validate_mode_vram(dev_priv
,
1577 mode
->hdisplay
* assumed_bpp
,
1579 drm_mode_probed_add(connector
, mode
);
1581 drm_mode_destroy(dev
, mode
);
1585 if (du
->pref_mode
) {
1586 list_del_init(&du
->pref_mode
->head
);
1587 drm_mode_destroy(dev
, du
->pref_mode
);
1590 /* mode might be null here, this is intended */
1591 du
->pref_mode
= mode
;
1593 for (i
= 0; vmw_kms_connector_builtin
[i
].type
!= 0; i
++) {
1594 bmode
= &vmw_kms_connector_builtin
[i
];
1595 if (bmode
->hdisplay
> max_width
||
1596 bmode
->vdisplay
> max_height
)
1599 if (!vmw_kms_validate_mode_vram(dev_priv
,
1600 bmode
->hdisplay
* assumed_bpp
,
1604 mode
= drm_mode_duplicate(dev
, bmode
);
1607 mode
->vrefresh
= drm_mode_vrefresh(mode
);
1609 drm_mode_probed_add(connector
, mode
);
1612 drm_mode_connector_list_update(connector
);
1613 /* Move the prefered mode first, help apps pick the right mode. */
1614 drm_mode_sort(&connector
->modes
);
1619 int vmw_du_connector_set_property(struct drm_connector
*connector
,
1620 struct drm_property
*property
,
1623 struct vmw_display_unit
*du
= vmw_connector_to_du(connector
);
1624 struct vmw_private
*dev_priv
= vmw_priv(connector
->dev
);
1626 if (property
== dev_priv
->implicit_placement_property
)
1627 du
->is_implicit
= val
;
1633 int vmw_kms_update_layout_ioctl(struct drm_device
*dev
, void *data
,
1634 struct drm_file
*file_priv
)
1636 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1637 struct drm_vmw_update_layout_arg
*arg
=
1638 (struct drm_vmw_update_layout_arg
*)data
;
1639 void __user
*user_rects
;
1640 struct drm_vmw_rect
*rects
;
1641 unsigned rects_size
;
1644 u64 total_pixels
= 0;
1645 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
1646 struct drm_vmw_rect bounding_box
= {0};
1648 if (!arg
->num_outputs
) {
1649 struct drm_vmw_rect def_rect
= {0, 0, 800, 600};
1650 vmw_du_update_layout(dev_priv
, 1, &def_rect
);
1654 rects_size
= arg
->num_outputs
* sizeof(struct drm_vmw_rect
);
1655 rects
= kcalloc(arg
->num_outputs
, sizeof(struct drm_vmw_rect
),
1657 if (unlikely(!rects
))
1660 user_rects
= (void __user
*)(unsigned long)arg
->rects
;
1661 ret
= copy_from_user(rects
, user_rects
, rects_size
);
1662 if (unlikely(ret
!= 0)) {
1663 DRM_ERROR("Failed to get rects.\n");
1668 for (i
= 0; i
< arg
->num_outputs
; ++i
) {
1669 if (rects
[i
].x
< 0 ||
1671 rects
[i
].x
+ rects
[i
].w
> mode_config
->max_width
||
1672 rects
[i
].y
+ rects
[i
].h
> mode_config
->max_height
) {
1673 DRM_ERROR("Invalid GUI layout.\n");
1679 * bounding_box.w and bunding_box.h are used as
1680 * lower-right coordinates
1682 if (rects
[i
].x
+ rects
[i
].w
> bounding_box
.w
)
1683 bounding_box
.w
= rects
[i
].x
+ rects
[i
].w
;
1685 if (rects
[i
].y
+ rects
[i
].h
> bounding_box
.h
)
1686 bounding_box
.h
= rects
[i
].y
+ rects
[i
].h
;
1688 total_pixels
+= (u64
) rects
[i
].w
* (u64
) rects
[i
].h
;
1691 if (dev_priv
->active_display_unit
== vmw_du_screen_target
) {
1693 * For Screen Targets, the limits for a toplogy are:
1694 * 1. Bounding box (assuming 32bpp) must be < prim_bb_mem
1695 * 2. Total pixels (assuming 32bpp) must be < prim_bb_mem
1697 u64 bb_mem
= bounding_box
.w
* bounding_box
.h
* 4;
1698 u64 pixel_mem
= total_pixels
* 4;
1700 if (bb_mem
> dev_priv
->prim_bb_mem
) {
1701 DRM_ERROR("Topology is beyond supported limits.\n");
1706 if (pixel_mem
> dev_priv
->prim_bb_mem
) {
1707 DRM_ERROR("Combined output size too large\n");
1713 vmw_du_update_layout(dev_priv
, arg
->num_outputs
, rects
);
1721 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
1722 * on a set of cliprects and a set of display units.
1724 * @dev_priv: Pointer to a device private structure.
1725 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
1726 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
1727 * Cliprects are given in framebuffer coordinates.
1728 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
1729 * be NULL. Cliprects are given in source coordinates.
1730 * @dest_x: X coordinate offset for the crtc / destination clip rects.
1731 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
1732 * @num_clips: Number of cliprects in the @clips or @vclips array.
1733 * @increment: Integer with which to increment the clip counter when looping.
1734 * Used to skip a predetermined number of clip rects.
1735 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
1737 int vmw_kms_helper_dirty(struct vmw_private
*dev_priv
,
1738 struct vmw_framebuffer
*framebuffer
,
1739 const struct drm_clip_rect
*clips
,
1740 const struct drm_vmw_rect
*vclips
,
1741 s32 dest_x
, s32 dest_y
,
1744 struct vmw_kms_dirty
*dirty
)
1746 struct vmw_display_unit
*units
[VMWGFX_NUM_DISPLAY_UNITS
];
1747 struct drm_crtc
*crtc
;
1751 dirty
->dev_priv
= dev_priv
;
1753 list_for_each_entry(crtc
, &dev_priv
->dev
->mode_config
.crtc_list
, head
) {
1754 if (crtc
->primary
->fb
!= &framebuffer
->base
)
1756 units
[num_units
++] = vmw_crtc_to_du(crtc
);
1759 for (k
= 0; k
< num_units
; k
++) {
1760 struct vmw_display_unit
*unit
= units
[k
];
1761 s32 crtc_x
= unit
->crtc
.x
;
1762 s32 crtc_y
= unit
->crtc
.y
;
1763 s32 crtc_width
= unit
->crtc
.mode
.hdisplay
;
1764 s32 crtc_height
= unit
->crtc
.mode
.vdisplay
;
1765 const struct drm_clip_rect
*clips_ptr
= clips
;
1766 const struct drm_vmw_rect
*vclips_ptr
= vclips
;
1769 if (dirty
->fifo_reserve_size
> 0) {
1770 dirty
->cmd
= vmw_fifo_reserve(dev_priv
,
1771 dirty
->fifo_reserve_size
);
1773 DRM_ERROR("Couldn't reserve fifo space "
1774 "for dirty blits.\n");
1777 memset(dirty
->cmd
, 0, dirty
->fifo_reserve_size
);
1779 dirty
->num_hits
= 0;
1780 for (i
= 0; i
< num_clips
; i
++, clips_ptr
+= increment
,
1781 vclips_ptr
+= increment
) {
1786 * Select clip array type. Note that integer type
1787 * in @clips is unsigned short, whereas in @vclips
1791 dirty
->fb_x
= (s32
) clips_ptr
->x1
;
1792 dirty
->fb_y
= (s32
) clips_ptr
->y1
;
1793 dirty
->unit_x2
= (s32
) clips_ptr
->x2
+ dest_x
-
1795 dirty
->unit_y2
= (s32
) clips_ptr
->y2
+ dest_y
-
1798 dirty
->fb_x
= vclips_ptr
->x
;
1799 dirty
->fb_y
= vclips_ptr
->y
;
1800 dirty
->unit_x2
= dirty
->fb_x
+ vclips_ptr
->w
+
1802 dirty
->unit_y2
= dirty
->fb_y
+ vclips_ptr
->h
+
1806 dirty
->unit_x1
= dirty
->fb_x
+ dest_x
- crtc_x
;
1807 dirty
->unit_y1
= dirty
->fb_y
+ dest_y
- crtc_y
;
1809 /* Skip this clip if it's outside the crtc region */
1810 if (dirty
->unit_x1
>= crtc_width
||
1811 dirty
->unit_y1
>= crtc_height
||
1812 dirty
->unit_x2
<= 0 || dirty
->unit_y2
<= 0)
1815 /* Clip right and bottom to crtc limits */
1816 dirty
->unit_x2
= min_t(s32
, dirty
->unit_x2
,
1818 dirty
->unit_y2
= min_t(s32
, dirty
->unit_y2
,
1821 /* Clip left and top to crtc limits */
1822 clip_left
= min_t(s32
, dirty
->unit_x1
, 0);
1823 clip_top
= min_t(s32
, dirty
->unit_y1
, 0);
1824 dirty
->unit_x1
-= clip_left
;
1825 dirty
->unit_y1
-= clip_top
;
1826 dirty
->fb_x
-= clip_left
;
1827 dirty
->fb_y
-= clip_top
;
1832 dirty
->fifo_commit(dirty
);
1839 * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
1840 * command submission.
1842 * @dev_priv. Pointer to a device private structure.
1843 * @buf: The buffer object
1844 * @interruptible: Whether to perform waits as interruptible.
1845 * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
1846 * The buffer will be validated as a GMR. Already pinned buffers will not be
1849 * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
1850 * interrupted by a signal.
1852 int vmw_kms_helper_buffer_prepare(struct vmw_private
*dev_priv
,
1853 struct vmw_dma_buffer
*buf
,
1855 bool validate_as_mob
)
1857 struct ttm_buffer_object
*bo
= &buf
->base
;
1860 ttm_bo_reserve(bo
, false, false, NULL
);
1861 ret
= vmw_validate_single_buffer(dev_priv
, bo
, interruptible
,
1864 ttm_bo_unreserve(bo
);
1870 * vmw_kms_helper_buffer_revert - Undo the actions of
1871 * vmw_kms_helper_buffer_prepare.
1873 * @res: Pointer to the buffer object.
1875 * Helper to be used if an error forces the caller to undo the actions of
1876 * vmw_kms_helper_buffer_prepare.
1878 void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer
*buf
)
1881 ttm_bo_unreserve(&buf
->base
);
1885 * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
1886 * kms command submission.
1888 * @dev_priv: Pointer to a device private structure.
1889 * @file_priv: Pointer to a struct drm_file representing the caller's
1890 * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
1891 * if non-NULL, @user_fence_rep must be non-NULL.
1892 * @buf: The buffer object.
1893 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
1894 * ref-counted fence pointer is returned here.
1895 * @user_fence_rep: Optional pointer to a user-space provided struct
1896 * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
1897 * function copies fence data to user-space in a fail-safe manner.
1899 void vmw_kms_helper_buffer_finish(struct vmw_private
*dev_priv
,
1900 struct drm_file
*file_priv
,
1901 struct vmw_dma_buffer
*buf
,
1902 struct vmw_fence_obj
**out_fence
,
1903 struct drm_vmw_fence_rep __user
*
1906 struct vmw_fence_obj
*fence
;
1910 ret
= vmw_execbuf_fence_commands(file_priv
, dev_priv
, &fence
,
1911 file_priv
? &handle
: NULL
);
1913 vmw_fence_single_bo(&buf
->base
, fence
);
1915 vmw_execbuf_copy_fence_user(dev_priv
, vmw_fpriv(file_priv
),
1916 ret
, user_fence_rep
, fence
,
1921 vmw_fence_obj_unreference(&fence
);
1923 vmw_kms_helper_buffer_revert(buf
);
1928 * vmw_kms_helper_resource_revert - Undo the actions of
1929 * vmw_kms_helper_resource_prepare.
1931 * @res: Pointer to the resource. Typically a surface.
1933 * Helper to be used if an error forces the caller to undo the actions of
1934 * vmw_kms_helper_resource_prepare.
1936 void vmw_kms_helper_resource_revert(struct vmw_resource
*res
)
1938 vmw_kms_helper_buffer_revert(res
->backup
);
1939 vmw_resource_unreserve(res
, false, NULL
, 0);
1940 mutex_unlock(&res
->dev_priv
->cmdbuf_mutex
);
1944 * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
1945 * command submission.
1947 * @res: Pointer to the resource. Typically a surface.
1948 * @interruptible: Whether to perform waits as interruptible.
1950 * Reserves and validates also the backup buffer if a guest-backed resource.
1951 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1952 * interrupted by a signal.
1954 int vmw_kms_helper_resource_prepare(struct vmw_resource
*res
,
1960 ret
= mutex_lock_interruptible(&res
->dev_priv
->cmdbuf_mutex
);
1962 mutex_lock(&res
->dev_priv
->cmdbuf_mutex
);
1964 if (unlikely(ret
!= 0))
1965 return -ERESTARTSYS
;
1967 ret
= vmw_resource_reserve(res
, interruptible
, false);
1972 ret
= vmw_kms_helper_buffer_prepare(res
->dev_priv
, res
->backup
,
1974 res
->dev_priv
->has_mob
);
1978 ret
= vmw_resource_validate(res
);
1984 vmw_kms_helper_buffer_revert(res
->backup
);
1986 vmw_resource_unreserve(res
, false, NULL
, 0);
1988 mutex_unlock(&res
->dev_priv
->cmdbuf_mutex
);
1993 * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
1994 * kms command submission.
1996 * @res: Pointer to the resource. Typically a surface.
1997 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
1998 * ref-counted fence pointer is returned here.
2000 void vmw_kms_helper_resource_finish(struct vmw_resource
*res
,
2001 struct vmw_fence_obj
**out_fence
)
2003 if (res
->backup
|| out_fence
)
2004 vmw_kms_helper_buffer_finish(res
->dev_priv
, NULL
, res
->backup
,
2007 vmw_resource_unreserve(res
, false, NULL
, 0);
2008 mutex_unlock(&res
->dev_priv
->cmdbuf_mutex
);
2012 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2015 * @res: Pointer to the surface resource
2016 * @clips: Clip rects in framebuffer (surface) space.
2017 * @num_clips: Number of clips in @clips.
2018 * @increment: Integer with which to increment the clip counter when looping.
2019 * Used to skip a predetermined number of clip rects.
2021 * This function makes sure the proxy surface is updated from its backing MOB
2022 * using the region given by @clips. The surface resource @res and its backing
2023 * MOB needs to be reserved and validated on call.
2025 int vmw_kms_update_proxy(struct vmw_resource
*res
,
2026 const struct drm_clip_rect
*clips
,
2030 struct vmw_private
*dev_priv
= res
->dev_priv
;
2031 struct drm_vmw_size
*size
= &vmw_res_to_srf(res
)->base_size
;
2033 SVGA3dCmdHeader header
;
2034 SVGA3dCmdUpdateGBImage body
;
2037 size_t copy_size
= 0;
2043 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
) * num_clips
);
2045 DRM_ERROR("Couldn't reserve fifo space for proxy surface "
2050 for (i
= 0; i
< num_clips
; ++i
, clips
+= increment
, ++cmd
) {
2051 box
= &cmd
->body
.box
;
2053 cmd
->header
.id
= SVGA_3D_CMD_UPDATE_GB_IMAGE
;
2054 cmd
->header
.size
= sizeof(cmd
->body
);
2055 cmd
->body
.image
.sid
= res
->id
;
2056 cmd
->body
.image
.face
= 0;
2057 cmd
->body
.image
.mipmap
= 0;
2059 if (clips
->x1
> size
->width
|| clips
->x2
> size
->width
||
2060 clips
->y1
> size
->height
|| clips
->y2
> size
->height
) {
2061 DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2068 box
->w
= clips
->x2
- clips
->x1
;
2069 box
->h
= clips
->y2
- clips
->y1
;
2072 copy_size
+= sizeof(*cmd
);
2075 vmw_fifo_commit(dev_priv
, copy_size
);
2080 int vmw_kms_fbdev_init_data(struct vmw_private
*dev_priv
,
2084 struct drm_connector
**p_con
,
2085 struct drm_crtc
**p_crtc
,
2086 struct drm_display_mode
**p_mode
)
2088 struct drm_connector
*con
;
2089 struct vmw_display_unit
*du
;
2090 struct drm_display_mode
*mode
;
2093 list_for_each_entry(con
, &dev_priv
->dev
->mode_config
.connector_list
,
2102 DRM_ERROR("Could not find initial display unit.\n");
2106 if (list_empty(&con
->modes
))
2107 (void) vmw_du_connector_fill_modes(con
, max_width
, max_height
);
2109 if (list_empty(&con
->modes
)) {
2110 DRM_ERROR("Could not find initial display mode.\n");
2114 du
= vmw_connector_to_du(con
);
2116 *p_crtc
= &du
->crtc
;
2118 list_for_each_entry(mode
, &con
->modes
, head
) {
2119 if (mode
->type
& DRM_MODE_TYPE_PREFERRED
)
2123 if (mode
->type
& DRM_MODE_TYPE_PREFERRED
)
2126 WARN_ONCE(true, "Could not find initial preferred mode.\n");
2127 *p_mode
= list_first_entry(&con
->modes
,
2128 struct drm_display_mode
,
2136 * vmw_kms_del_active - unregister a crtc binding to the implicit framebuffer
2138 * @dev_priv: Pointer to a device private struct.
2139 * @du: The display unit of the crtc.
2141 void vmw_kms_del_active(struct vmw_private
*dev_priv
,
2142 struct vmw_display_unit
*du
)
2144 mutex_lock(&dev_priv
->global_kms_state_mutex
);
2145 if (du
->active_implicit
) {
2146 if (--(dev_priv
->num_implicit
) == 0)
2147 dev_priv
->implicit_fb
= NULL
;
2148 du
->active_implicit
= false;
2150 mutex_unlock(&dev_priv
->global_kms_state_mutex
);
2154 * vmw_kms_add_active - register a crtc binding to an implicit framebuffer
2156 * @vmw_priv: Pointer to a device private struct.
2157 * @du: The display unit of the crtc.
2158 * @vfb: The implicit framebuffer
2160 * Registers a binding to an implicit framebuffer.
2162 void vmw_kms_add_active(struct vmw_private
*dev_priv
,
2163 struct vmw_display_unit
*du
,
2164 struct vmw_framebuffer
*vfb
)
2166 mutex_lock(&dev_priv
->global_kms_state_mutex
);
2167 WARN_ON_ONCE(!dev_priv
->num_implicit
&& dev_priv
->implicit_fb
);
2169 if (!du
->active_implicit
&& du
->is_implicit
) {
2170 dev_priv
->implicit_fb
= vfb
;
2171 du
->active_implicit
= true;
2172 dev_priv
->num_implicit
++;
2174 mutex_unlock(&dev_priv
->global_kms_state_mutex
);
2178 * vmw_kms_screen_object_flippable - Check whether we can page-flip a crtc.
2180 * @dev_priv: Pointer to device-private struct.
2181 * @crtc: The crtc we want to flip.
2183 * Returns true or false depending whether it's OK to flip this crtc
2184 * based on the criterion that we must not have more than one implicit
2185 * frame-buffer at any one time.
2187 bool vmw_kms_crtc_flippable(struct vmw_private
*dev_priv
,
2188 struct drm_crtc
*crtc
)
2190 struct vmw_display_unit
*du
= vmw_crtc_to_du(crtc
);
2193 mutex_lock(&dev_priv
->global_kms_state_mutex
);
2194 ret
= !du
->is_implicit
|| dev_priv
->num_implicit
== 1;
2195 mutex_unlock(&dev_priv
->global_kms_state_mutex
);
2201 * vmw_kms_update_implicit_fb - Update the implicit fb.
2203 * @dev_priv: Pointer to device-private struct.
2204 * @crtc: The crtc the new implicit frame-buffer is bound to.
2206 void vmw_kms_update_implicit_fb(struct vmw_private
*dev_priv
,
2207 struct drm_crtc
*crtc
)
2209 struct vmw_display_unit
*du
= vmw_crtc_to_du(crtc
);
2210 struct vmw_framebuffer
*vfb
;
2212 mutex_lock(&dev_priv
->global_kms_state_mutex
);
2214 if (!du
->is_implicit
)
2217 vfb
= vmw_framebuffer_to_vfb(crtc
->primary
->fb
);
2218 WARN_ON_ONCE(dev_priv
->num_implicit
!= 1 &&
2219 dev_priv
->implicit_fb
!= vfb
);
2221 dev_priv
->implicit_fb
= vfb
;
2223 mutex_unlock(&dev_priv
->global_kms_state_mutex
);
2227 * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
2230 * @dev_priv: Pointer to a device private struct.
2231 * @immutable: Whether the property is immutable.
2233 * Sets up the implicit placement property unless it's already set up.
2236 vmw_kms_create_implicit_placement_property(struct vmw_private
*dev_priv
,
2239 if (dev_priv
->implicit_placement_property
)
2242 dev_priv
->implicit_placement_property
=
2243 drm_property_create_range(dev_priv
->dev
,
2245 DRM_MODE_PROP_IMMUTABLE
: 0,
2246 "implicit_placement", 0, 1);