1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_kms.h"
30 /* Might need a hrtimer here? */
31 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
34 void vmw_display_unit_cleanup(struct vmw_display_unit
*du
)
36 if (du
->cursor_surface
)
37 vmw_surface_unreference(&du
->cursor_surface
);
38 if (du
->cursor_dmabuf
)
39 vmw_dmabuf_unreference(&du
->cursor_dmabuf
);
40 drm_crtc_cleanup(&du
->crtc
);
41 drm_encoder_cleanup(&du
->encoder
);
42 drm_connector_cleanup(&du
->connector
);
46 * Display Unit Cursor functions
49 int vmw_cursor_update_image(struct vmw_private
*dev_priv
,
50 u32
*image
, u32 width
, u32 height
,
51 u32 hotspotX
, u32 hotspotY
)
55 SVGAFifoCmdDefineAlphaCursor cursor
;
57 u32 image_size
= width
* height
* 4;
58 u32 cmd_size
= sizeof(*cmd
) + image_size
;
63 cmd
= vmw_fifo_reserve(dev_priv
, cmd_size
);
64 if (unlikely(cmd
== NULL
)) {
65 DRM_ERROR("Fifo reserve failed.\n");
69 memset(cmd
, 0, sizeof(*cmd
));
71 memcpy(&cmd
[1], image
, image_size
);
73 cmd
->cmd
= cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR
);
74 cmd
->cursor
.id
= cpu_to_le32(0);
75 cmd
->cursor
.width
= cpu_to_le32(width
);
76 cmd
->cursor
.height
= cpu_to_le32(height
);
77 cmd
->cursor
.hotspotX
= cpu_to_le32(hotspotX
);
78 cmd
->cursor
.hotspotY
= cpu_to_le32(hotspotY
);
80 vmw_fifo_commit(dev_priv
, cmd_size
);
85 void vmw_cursor_update_position(struct vmw_private
*dev_priv
,
86 bool show
, int x
, int y
)
88 __le32 __iomem
*fifo_mem
= dev_priv
->mmio_virt
;
91 iowrite32(show
? 1 : 0, fifo_mem
+ SVGA_FIFO_CURSOR_ON
);
92 iowrite32(x
, fifo_mem
+ SVGA_FIFO_CURSOR_X
);
93 iowrite32(y
, fifo_mem
+ SVGA_FIFO_CURSOR_Y
);
94 count
= ioread32(fifo_mem
+ SVGA_FIFO_CURSOR_COUNT
);
95 iowrite32(++count
, fifo_mem
+ SVGA_FIFO_CURSOR_COUNT
);
98 int vmw_du_crtc_cursor_set(struct drm_crtc
*crtc
, struct drm_file
*file_priv
,
99 uint32_t handle
, uint32_t width
, uint32_t height
)
101 struct vmw_private
*dev_priv
= vmw_priv(crtc
->dev
);
102 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
103 struct vmw_display_unit
*du
= vmw_crtc_to_du(crtc
);
104 struct vmw_surface
*surface
= NULL
;
105 struct vmw_dma_buffer
*dmabuf
= NULL
;
109 ret
= vmw_user_surface_lookup_handle(dev_priv
, tfile
,
112 if (!surface
->snooper
.image
) {
113 DRM_ERROR("surface not suitable for cursor\n");
117 ret
= vmw_user_dmabuf_lookup(tfile
,
120 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret
);
126 /* takedown old cursor */
127 if (du
->cursor_surface
) {
128 du
->cursor_surface
->snooper
.crtc
= NULL
;
129 vmw_surface_unreference(&du
->cursor_surface
);
131 if (du
->cursor_dmabuf
)
132 vmw_dmabuf_unreference(&du
->cursor_dmabuf
);
134 /* setup new image */
136 /* vmw_user_surface_lookup takes one reference */
137 du
->cursor_surface
= surface
;
139 du
->cursor_surface
->snooper
.crtc
= crtc
;
140 du
->cursor_age
= du
->cursor_surface
->snooper
.age
;
141 vmw_cursor_update_image(dev_priv
, surface
->snooper
.image
,
142 64, 64, du
->hotspot_x
, du
->hotspot_y
);
144 struct ttm_bo_kmap_obj map
;
145 unsigned long kmap_offset
;
146 unsigned long kmap_num
;
150 /* vmw_user_surface_lookup takes one reference */
151 du
->cursor_dmabuf
= dmabuf
;
154 kmap_num
= (64*64*4) >> PAGE_SHIFT
;
156 ret
= ttm_bo_reserve(&dmabuf
->base
, true, false, false, 0);
157 if (unlikely(ret
!= 0)) {
158 DRM_ERROR("reserve failed\n");
162 ret
= ttm_bo_kmap(&dmabuf
->base
, kmap_offset
, kmap_num
, &map
);
163 if (unlikely(ret
!= 0))
166 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
167 vmw_cursor_update_image(dev_priv
, virtual, 64, 64,
168 du
->hotspot_x
, du
->hotspot_y
);
172 ttm_bo_unreserve(&dmabuf
->base
);
175 vmw_cursor_update_position(dev_priv
, false, 0, 0);
179 vmw_cursor_update_position(dev_priv
, true, du
->cursor_x
, du
->cursor_y
);
184 int vmw_du_crtc_cursor_move(struct drm_crtc
*crtc
, int x
, int y
)
186 struct vmw_private
*dev_priv
= vmw_priv(crtc
->dev
);
187 struct vmw_display_unit
*du
= vmw_crtc_to_du(crtc
);
188 bool shown
= du
->cursor_surface
|| du
->cursor_dmabuf
? true : false;
190 du
->cursor_x
= x
+ crtc
->x
;
191 du
->cursor_y
= y
+ crtc
->y
;
193 vmw_cursor_update_position(dev_priv
, shown
,
194 du
->cursor_x
, du
->cursor_y
);
199 void vmw_kms_cursor_snoop(struct vmw_surface
*srf
,
200 struct ttm_object_file
*tfile
,
201 struct ttm_buffer_object
*bo
,
202 SVGA3dCmdHeader
*header
)
204 struct ttm_bo_kmap_obj map
;
205 unsigned long kmap_offset
;
206 unsigned long kmap_num
;
212 SVGA3dCmdHeader header
;
213 SVGA3dCmdSurfaceDMA dma
;
217 cmd
= container_of(header
, struct vmw_dma_cmd
, header
);
219 /* No snooper installed */
220 if (!srf
->snooper
.image
)
223 if (cmd
->dma
.host
.face
!= 0 || cmd
->dma
.host
.mipmap
!= 0) {
224 DRM_ERROR("face and mipmap for cursors should never != 0\n");
228 if (cmd
->header
.size
< 64) {
229 DRM_ERROR("at least one full copy box must be given\n");
233 box
= (SVGA3dCopyBox
*)&cmd
[1];
234 box_count
= (cmd
->header
.size
- sizeof(SVGA3dCmdSurfaceDMA
)) /
235 sizeof(SVGA3dCopyBox
);
237 if (cmd
->dma
.guest
.pitch
!= (64 * 4) ||
238 cmd
->dma
.guest
.ptr
.offset
% PAGE_SIZE
||
239 box
->x
!= 0 || box
->y
!= 0 || box
->z
!= 0 ||
240 box
->srcx
!= 0 || box
->srcy
!= 0 || box
->srcz
!= 0 ||
241 box
->w
!= 64 || box
->h
!= 64 || box
->d
!= 1 ||
243 /* TODO handle none page aligned offsets */
244 /* TODO handle partial uploads and pitch != 256 */
245 /* TODO handle more then one copy (size != 64) */
246 DRM_ERROR("lazy programer, cant handle wierd stuff\n");
250 kmap_offset
= cmd
->dma
.guest
.ptr
.offset
>> PAGE_SHIFT
;
251 kmap_num
= (64*64*4) >> PAGE_SHIFT
;
253 ret
= ttm_bo_reserve(bo
, true, false, false, 0);
254 if (unlikely(ret
!= 0)) {
255 DRM_ERROR("reserve failed\n");
259 ret
= ttm_bo_kmap(bo
, kmap_offset
, kmap_num
, &map
);
260 if (unlikely(ret
!= 0))
263 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
265 memcpy(srf
->snooper
.image
, virtual, 64*64*4);
268 /* we can't call this function from this function since execbuf has
269 * reserved fifo space.
271 * if (srf->snooper.crtc)
272 * vmw_ldu_crtc_cursor_update_image(dev_priv,
273 * srf->snooper.image, 64, 64,
274 * du->hotspot_x, du->hotspot_y);
279 ttm_bo_unreserve(bo
);
282 void vmw_kms_cursor_post_execbuf(struct vmw_private
*dev_priv
)
284 struct drm_device
*dev
= dev_priv
->dev
;
285 struct vmw_display_unit
*du
;
286 struct drm_crtc
*crtc
;
288 mutex_lock(&dev
->mode_config
.mutex
);
290 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
291 du
= vmw_crtc_to_du(crtc
);
292 if (!du
->cursor_surface
||
293 du
->cursor_age
== du
->cursor_surface
->snooper
.age
)
296 du
->cursor_age
= du
->cursor_surface
->snooper
.age
;
297 vmw_cursor_update_image(dev_priv
,
298 du
->cursor_surface
->snooper
.image
,
299 64, 64, du
->hotspot_x
, du
->hotspot_y
);
302 mutex_unlock(&dev
->mode_config
.mutex
);
306 * Generic framebuffer code
309 int vmw_framebuffer_create_handle(struct drm_framebuffer
*fb
,
310 struct drm_file
*file_priv
,
311 unsigned int *handle
)
320 * Surface framebuffer code
323 #define vmw_framebuffer_to_vfbs(x) \
324 container_of(x, struct vmw_framebuffer_surface, base.base)
326 struct vmw_framebuffer_surface
{
327 struct vmw_framebuffer base
;
328 struct vmw_surface
*surface
;
329 struct delayed_work d_work
;
330 struct mutex work_lock
;
334 void vmw_framebuffer_surface_destroy(struct drm_framebuffer
*framebuffer
)
336 struct vmw_framebuffer_surface
*vfb
=
337 vmw_framebuffer_to_vfbs(framebuffer
);
339 cancel_delayed_work_sync(&vfb
->d_work
);
340 drm_framebuffer_cleanup(framebuffer
);
341 vmw_surface_unreference(&vfb
->surface
);
346 static void vmw_framebuffer_present_fs_callback(struct work_struct
*work
)
348 struct delayed_work
*d_work
=
349 container_of(work
, struct delayed_work
, work
);
350 struct vmw_framebuffer_surface
*vfbs
=
351 container_of(d_work
, struct vmw_framebuffer_surface
, d_work
);
352 struct vmw_surface
*surf
= vfbs
->surface
;
353 struct drm_framebuffer
*framebuffer
= &vfbs
->base
.base
;
354 struct vmw_private
*dev_priv
= vmw_priv(framebuffer
->dev
);
357 SVGA3dCmdHeader header
;
358 SVGA3dCmdPresent body
;
362 mutex_lock(&vfbs
->work_lock
);
363 if (!vfbs
->present_fs
)
366 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
367 if (unlikely(cmd
== NULL
))
370 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_PRESENT
);
371 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
) + sizeof(cmd
->cr
));
372 cmd
->body
.sid
= cpu_to_le32(surf
->res
.id
);
373 cmd
->cr
.x
= cpu_to_le32(0);
374 cmd
->cr
.y
= cpu_to_le32(0);
375 cmd
->cr
.srcx
= cmd
->cr
.x
;
376 cmd
->cr
.srcy
= cmd
->cr
.y
;
377 cmd
->cr
.w
= cpu_to_le32(framebuffer
->width
);
378 cmd
->cr
.h
= cpu_to_le32(framebuffer
->height
);
379 vfbs
->present_fs
= false;
380 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
383 * Will not re-add if already pending.
385 schedule_delayed_work(&vfbs
->d_work
, VMWGFX_PRESENT_RATE
);
387 mutex_unlock(&vfbs
->work_lock
);
391 int vmw_framebuffer_surface_dirty(struct drm_framebuffer
*framebuffer
,
392 unsigned flags
, unsigned color
,
393 struct drm_clip_rect
*clips
,
396 struct vmw_private
*dev_priv
= vmw_priv(framebuffer
->dev
);
397 struct vmw_framebuffer_surface
*vfbs
=
398 vmw_framebuffer_to_vfbs(framebuffer
);
399 struct vmw_surface
*surf
= vfbs
->surface
;
400 struct drm_clip_rect norect
;
405 SVGA3dCmdHeader header
;
406 SVGA3dCmdPresent body
;
411 !(dev_priv
->fifo
.capabilities
&
412 SVGA_FIFO_CAP_SCREEN_OBJECT
)) {
415 mutex_lock(&vfbs
->work_lock
);
416 vfbs
->present_fs
= true;
417 ret
= schedule_delayed_work(&vfbs
->d_work
, VMWGFX_PRESENT_RATE
);
418 mutex_unlock(&vfbs
->work_lock
);
421 * No work pending, Force immediate present.
423 vmw_framebuffer_present_fs_callback(&vfbs
->d_work
.work
);
431 norect
.x1
= norect
.y1
= 0;
432 norect
.x2
= framebuffer
->width
;
433 norect
.y2
= framebuffer
->height
;
434 } else if (flags
& DRM_MODE_FB_DIRTY_ANNOTATE_COPY
) {
436 inc
= 2; /* skip source rects */
439 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
) + (num_clips
- 1) * sizeof(cmd
->cr
));
440 if (unlikely(cmd
== NULL
)) {
441 DRM_ERROR("Fifo reserve failed.\n");
445 memset(cmd
, 0, sizeof(*cmd
));
447 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_PRESENT
);
448 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
) + num_clips
* sizeof(cmd
->cr
));
449 cmd
->body
.sid
= cpu_to_le32(surf
->res
.id
);
451 for (i
= 0, cr
= &cmd
->cr
; i
< num_clips
; i
++, cr
++, clips
+= inc
) {
452 cr
->x
= cpu_to_le16(clips
->x1
);
453 cr
->y
= cpu_to_le16(clips
->y1
);
456 cr
->w
= cpu_to_le16(clips
->x2
- clips
->x1
);
457 cr
->h
= cpu_to_le16(clips
->y2
- clips
->y1
);
460 vmw_fifo_commit(dev_priv
, sizeof(*cmd
) + (num_clips
- 1) * sizeof(cmd
->cr
));
465 static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs
= {
466 .destroy
= vmw_framebuffer_surface_destroy
,
467 .dirty
= vmw_framebuffer_surface_dirty
,
468 .create_handle
= vmw_framebuffer_create_handle
,
471 int vmw_kms_new_framebuffer_surface(struct vmw_private
*dev_priv
,
472 struct vmw_surface
*surface
,
473 struct vmw_framebuffer
**out
,
474 unsigned width
, unsigned height
)
477 struct drm_device
*dev
= dev_priv
->dev
;
478 struct vmw_framebuffer_surface
*vfbs
;
481 vfbs
= kzalloc(sizeof(*vfbs
), GFP_KERNEL
);
487 ret
= drm_framebuffer_init(dev
, &vfbs
->base
.base
,
488 &vmw_framebuffer_surface_funcs
);
492 if (!vmw_surface_reference(surface
)) {
493 DRM_ERROR("failed to reference surface %p\n", surface
);
497 /* XXX get the first 3 from the surface info */
498 vfbs
->base
.base
.bits_per_pixel
= 32;
499 vfbs
->base
.base
.pitch
= width
* 32 / 4;
500 vfbs
->base
.base
.depth
= 24;
501 vfbs
->base
.base
.width
= width
;
502 vfbs
->base
.base
.height
= height
;
503 vfbs
->base
.pin
= NULL
;
504 vfbs
->base
.unpin
= NULL
;
505 vfbs
->surface
= surface
;
506 mutex_init(&vfbs
->work_lock
);
507 INIT_DELAYED_WORK(&vfbs
->d_work
, &vmw_framebuffer_present_fs_callback
);
513 drm_framebuffer_cleanup(&vfbs
->base
.base
);
521 * Dmabuf framebuffer code
524 #define vmw_framebuffer_to_vfbd(x) \
525 container_of(x, struct vmw_framebuffer_dmabuf, base.base)
527 struct vmw_framebuffer_dmabuf
{
528 struct vmw_framebuffer base
;
529 struct vmw_dma_buffer
*buffer
;
532 void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer
*framebuffer
)
534 struct vmw_framebuffer_dmabuf
*vfbd
=
535 vmw_framebuffer_to_vfbd(framebuffer
);
537 drm_framebuffer_cleanup(framebuffer
);
538 vmw_dmabuf_unreference(&vfbd
->buffer
);
543 int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer
*framebuffer
,
544 unsigned flags
, unsigned color
,
545 struct drm_clip_rect
*clips
,
548 struct vmw_private
*dev_priv
= vmw_priv(framebuffer
->dev
);
549 struct drm_clip_rect norect
;
552 SVGAFifoCmdUpdate body
;
554 int i
, increment
= 1;
559 norect
.x1
= norect
.y1
= 0;
560 norect
.x2
= framebuffer
->width
;
561 norect
.y2
= framebuffer
->height
;
562 } else if (flags
& DRM_MODE_FB_DIRTY_ANNOTATE_COPY
) {
567 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
) * num_clips
);
568 if (unlikely(cmd
== NULL
)) {
569 DRM_ERROR("Fifo reserve failed.\n");
573 for (i
= 0; i
< num_clips
; i
++, clips
+= increment
) {
574 cmd
[i
].header
= cpu_to_le32(SVGA_CMD_UPDATE
);
575 cmd
[i
].body
.x
= cpu_to_le32(clips
->x1
);
576 cmd
[i
].body
.y
= cpu_to_le32(clips
->y1
);
577 cmd
[i
].body
.width
= cpu_to_le32(clips
->x2
- clips
->x1
);
578 cmd
[i
].body
.height
= cpu_to_le32(clips
->y2
- clips
->y1
);
581 vmw_fifo_commit(dev_priv
, sizeof(*cmd
) * num_clips
);
586 static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs
= {
587 .destroy
= vmw_framebuffer_dmabuf_destroy
,
588 .dirty
= vmw_framebuffer_dmabuf_dirty
,
589 .create_handle
= vmw_framebuffer_create_handle
,
592 static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer
*vfb
)
594 struct vmw_private
*dev_priv
= vmw_priv(vfb
->base
.dev
);
595 struct vmw_framebuffer_dmabuf
*vfbd
=
596 vmw_framebuffer_to_vfbd(&vfb
->base
);
599 vmw_overlay_pause_all(dev_priv
);
601 ret
= vmw_dmabuf_to_start_of_vram(dev_priv
, vfbd
->buffer
);
603 if (dev_priv
->capabilities
& SVGA_CAP_MULTIMON
) {
604 vmw_write(dev_priv
, SVGA_REG_NUM_GUEST_DISPLAYS
, 1);
605 vmw_write(dev_priv
, SVGA_REG_DISPLAY_ID
, 0);
606 vmw_write(dev_priv
, SVGA_REG_DISPLAY_IS_PRIMARY
, true);
607 vmw_write(dev_priv
, SVGA_REG_DISPLAY_POSITION_X
, 0);
608 vmw_write(dev_priv
, SVGA_REG_DISPLAY_POSITION_Y
, 0);
609 vmw_write(dev_priv
, SVGA_REG_DISPLAY_WIDTH
, 0);
610 vmw_write(dev_priv
, SVGA_REG_DISPLAY_HEIGHT
, 0);
611 vmw_write(dev_priv
, SVGA_REG_DISPLAY_ID
, SVGA_ID_INVALID
);
613 vmw_write(dev_priv
, SVGA_REG_ENABLE
, 1);
614 vmw_write(dev_priv
, SVGA_REG_WIDTH
, vfb
->base
.width
);
615 vmw_write(dev_priv
, SVGA_REG_HEIGHT
, vfb
->base
.height
);
616 vmw_write(dev_priv
, SVGA_REG_BITS_PER_PIXEL
, vfb
->base
.bits_per_pixel
);
617 vmw_write(dev_priv
, SVGA_REG_DEPTH
, vfb
->base
.depth
);
618 vmw_write(dev_priv
, SVGA_REG_RED_MASK
, 0x00ff0000);
619 vmw_write(dev_priv
, SVGA_REG_GREEN_MASK
, 0x0000ff00);
620 vmw_write(dev_priv
, SVGA_REG_BLUE_MASK
, 0x000000ff);
624 vmw_overlay_resume_all(dev_priv
);
629 static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer
*vfb
)
631 struct vmw_private
*dev_priv
= vmw_priv(vfb
->base
.dev
);
632 struct vmw_framebuffer_dmabuf
*vfbd
=
633 vmw_framebuffer_to_vfbd(&vfb
->base
);
636 WARN_ON(!vfbd
->buffer
);
640 return vmw_dmabuf_from_vram(dev_priv
, vfbd
->buffer
);
643 int vmw_kms_new_framebuffer_dmabuf(struct vmw_private
*dev_priv
,
644 struct vmw_dma_buffer
*dmabuf
,
645 struct vmw_framebuffer
**out
,
646 unsigned width
, unsigned height
)
649 struct drm_device
*dev
= dev_priv
->dev
;
650 struct vmw_framebuffer_dmabuf
*vfbd
;
653 vfbd
= kzalloc(sizeof(*vfbd
), GFP_KERNEL
);
659 ret
= drm_framebuffer_init(dev
, &vfbd
->base
.base
,
660 &vmw_framebuffer_dmabuf_funcs
);
664 if (!vmw_dmabuf_reference(dmabuf
)) {
665 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf
);
669 /* XXX get the first 3 from the surface info */
670 vfbd
->base
.base
.bits_per_pixel
= 32;
671 vfbd
->base
.base
.pitch
= width
* 32 / 4;
672 vfbd
->base
.base
.depth
= 24;
673 vfbd
->base
.base
.width
= width
;
674 vfbd
->base
.base
.height
= height
;
675 vfbd
->base
.pin
= vmw_framebuffer_dmabuf_pin
;
676 vfbd
->base
.unpin
= vmw_framebuffer_dmabuf_unpin
;
677 vfbd
->buffer
= dmabuf
;
683 drm_framebuffer_cleanup(&vfbd
->base
.base
);
691 * Generic Kernel modesetting functions
694 static struct drm_framebuffer
*vmw_kms_fb_create(struct drm_device
*dev
,
695 struct drm_file
*file_priv
,
696 struct drm_mode_fb_cmd
*mode_cmd
)
698 struct vmw_private
*dev_priv
= vmw_priv(dev
);
699 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
700 struct vmw_framebuffer
*vfb
= NULL
;
701 struct vmw_surface
*surface
= NULL
;
702 struct vmw_dma_buffer
*bo
= NULL
;
705 ret
= vmw_user_surface_lookup_handle(dev_priv
, tfile
,
706 mode_cmd
->handle
, &surface
);
710 ret
= vmw_kms_new_framebuffer_surface(dev_priv
, surface
, &vfb
,
711 mode_cmd
->width
, mode_cmd
->height
);
713 /* vmw_user_surface_lookup takes one ref so does new_fb */
714 vmw_surface_unreference(&surface
);
717 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret
);
723 DRM_INFO("%s: trying buffer\n", __func__
);
725 ret
= vmw_user_dmabuf_lookup(tfile
, mode_cmd
->handle
, &bo
);
727 DRM_ERROR("failed to find buffer: %i\n", ret
);
731 ret
= vmw_kms_new_framebuffer_dmabuf(dev_priv
, bo
, &vfb
,
732 mode_cmd
->width
, mode_cmd
->height
);
734 /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
735 vmw_dmabuf_unreference(&bo
);
738 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret
);
745 static int vmw_kms_fb_changed(struct drm_device
*dev
)
750 static struct drm_mode_config_funcs vmw_kms_funcs
= {
751 .fb_create
= vmw_kms_fb_create
,
752 .fb_changed
= vmw_kms_fb_changed
,
755 int vmw_kms_init(struct vmw_private
*dev_priv
)
757 struct drm_device
*dev
= dev_priv
->dev
;
760 drm_mode_config_init(dev
);
761 dev
->mode_config
.funcs
= &vmw_kms_funcs
;
762 dev
->mode_config
.min_width
= 640;
763 dev
->mode_config
.min_height
= 480;
764 dev
->mode_config
.max_width
= 2048;
765 dev
->mode_config
.max_height
= 2048;
767 ret
= vmw_kms_init_legacy_display_system(dev_priv
);
772 int vmw_kms_close(struct vmw_private
*dev_priv
)
775 * Docs says we should take the lock before calling this function
776 * but since it destroys encoders and our destructor calls
777 * drm_encoder_cleanup which takes the lock we deadlock.
779 drm_mode_config_cleanup(dev_priv
->dev
);
780 vmw_kms_close_legacy_display_system(dev_priv
);
784 int vmw_kms_cursor_bypass_ioctl(struct drm_device
*dev
, void *data
,
785 struct drm_file
*file_priv
)
787 struct drm_vmw_cursor_bypass_arg
*arg
= data
;
788 struct vmw_display_unit
*du
;
789 struct drm_mode_object
*obj
;
790 struct drm_crtc
*crtc
;
794 mutex_lock(&dev
->mode_config
.mutex
);
795 if (arg
->flags
& DRM_VMW_CURSOR_BYPASS_ALL
) {
797 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
798 du
= vmw_crtc_to_du(crtc
);
799 du
->hotspot_x
= arg
->xhot
;
800 du
->hotspot_y
= arg
->yhot
;
803 mutex_unlock(&dev
->mode_config
.mutex
);
807 obj
= drm_mode_object_find(dev
, arg
->crtc_id
, DRM_MODE_OBJECT_CRTC
);
813 crtc
= obj_to_crtc(obj
);
814 du
= vmw_crtc_to_du(crtc
);
816 du
->hotspot_x
= arg
->xhot
;
817 du
->hotspot_y
= arg
->yhot
;
820 mutex_unlock(&dev
->mode_config
.mutex
);
825 int vmw_kms_save_vga(struct vmw_private
*vmw_priv
)
828 * setup a single multimon monitor with the size
829 * of 0x0, this stops the UI from resizing when we
830 * change the framebuffer size
832 if (vmw_priv
->capabilities
& SVGA_CAP_MULTIMON
) {
833 vmw_write(vmw_priv
, SVGA_REG_NUM_GUEST_DISPLAYS
, 1);
834 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, 0);
835 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_IS_PRIMARY
, true);
836 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_X
, 0);
837 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_Y
, 0);
838 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_WIDTH
, 0);
839 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_HEIGHT
, 0);
840 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, SVGA_ID_INVALID
);
843 vmw_priv
->vga_width
= vmw_read(vmw_priv
, SVGA_REG_WIDTH
);
844 vmw_priv
->vga_height
= vmw_read(vmw_priv
, SVGA_REG_HEIGHT
);
845 vmw_priv
->vga_bpp
= vmw_read(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
);
846 vmw_priv
->vga_depth
= vmw_read(vmw_priv
, SVGA_REG_DEPTH
);
847 vmw_priv
->vga_pseudo
= vmw_read(vmw_priv
, SVGA_REG_PSEUDOCOLOR
);
848 vmw_priv
->vga_red_mask
= vmw_read(vmw_priv
, SVGA_REG_RED_MASK
);
849 vmw_priv
->vga_green_mask
= vmw_read(vmw_priv
, SVGA_REG_GREEN_MASK
);
850 vmw_priv
->vga_blue_mask
= vmw_read(vmw_priv
, SVGA_REG_BLUE_MASK
);
855 int vmw_kms_restore_vga(struct vmw_private
*vmw_priv
)
857 vmw_write(vmw_priv
, SVGA_REG_WIDTH
, vmw_priv
->vga_width
);
858 vmw_write(vmw_priv
, SVGA_REG_HEIGHT
, vmw_priv
->vga_height
);
859 vmw_write(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
, vmw_priv
->vga_bpp
);
860 vmw_write(vmw_priv
, SVGA_REG_DEPTH
, vmw_priv
->vga_depth
);
861 vmw_write(vmw_priv
, SVGA_REG_PSEUDOCOLOR
, vmw_priv
->vga_pseudo
);
862 vmw_write(vmw_priv
, SVGA_REG_RED_MASK
, vmw_priv
->vga_red_mask
);
863 vmw_write(vmw_priv
, SVGA_REG_GREEN_MASK
, vmw_priv
->vga_green_mask
);
864 vmw_write(vmw_priv
, SVGA_REG_BLUE_MASK
, vmw_priv
->vga_blue_mask
);
866 /* TODO check for multimon */
867 vmw_write(vmw_priv
, SVGA_REG_NUM_GUEST_DISPLAYS
, 0);