1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_kms.h"
30 /* Might need a hrtimer here? */
31 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
33 static int vmw_surface_dmabuf_pin(struct vmw_framebuffer
*vfb
);
34 static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer
*vfb
);
36 void vmw_display_unit_cleanup(struct vmw_display_unit
*du
)
38 if (du
->cursor_surface
)
39 vmw_surface_unreference(&du
->cursor_surface
);
40 if (du
->cursor_dmabuf
)
41 vmw_dmabuf_unreference(&du
->cursor_dmabuf
);
42 drm_crtc_cleanup(&du
->crtc
);
43 drm_encoder_cleanup(&du
->encoder
);
44 drm_connector_cleanup(&du
->connector
);
48 * Display Unit Cursor functions
51 int vmw_cursor_update_image(struct vmw_private
*dev_priv
,
52 u32
*image
, u32 width
, u32 height
,
53 u32 hotspotX
, u32 hotspotY
)
57 SVGAFifoCmdDefineAlphaCursor cursor
;
59 u32 image_size
= width
* height
* 4;
60 u32 cmd_size
= sizeof(*cmd
) + image_size
;
65 cmd
= vmw_fifo_reserve(dev_priv
, cmd_size
);
66 if (unlikely(cmd
== NULL
)) {
67 DRM_ERROR("Fifo reserve failed.\n");
71 memset(cmd
, 0, sizeof(*cmd
));
73 memcpy(&cmd
[1], image
, image_size
);
75 cmd
->cmd
= cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR
);
76 cmd
->cursor
.id
= cpu_to_le32(0);
77 cmd
->cursor
.width
= cpu_to_le32(width
);
78 cmd
->cursor
.height
= cpu_to_le32(height
);
79 cmd
->cursor
.hotspotX
= cpu_to_le32(hotspotX
);
80 cmd
->cursor
.hotspotY
= cpu_to_le32(hotspotY
);
82 vmw_fifo_commit(dev_priv
, cmd_size
);
87 void vmw_cursor_update_position(struct vmw_private
*dev_priv
,
88 bool show
, int x
, int y
)
90 __le32 __iomem
*fifo_mem
= dev_priv
->mmio_virt
;
93 iowrite32(show
? 1 : 0, fifo_mem
+ SVGA_FIFO_CURSOR_ON
);
94 iowrite32(x
, fifo_mem
+ SVGA_FIFO_CURSOR_X
);
95 iowrite32(y
, fifo_mem
+ SVGA_FIFO_CURSOR_Y
);
96 count
= ioread32(fifo_mem
+ SVGA_FIFO_CURSOR_COUNT
);
97 iowrite32(++count
, fifo_mem
+ SVGA_FIFO_CURSOR_COUNT
);
100 int vmw_du_crtc_cursor_set(struct drm_crtc
*crtc
, struct drm_file
*file_priv
,
101 uint32_t handle
, uint32_t width
, uint32_t height
)
103 struct vmw_private
*dev_priv
= vmw_priv(crtc
->dev
);
104 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
105 struct vmw_display_unit
*du
= vmw_crtc_to_du(crtc
);
106 struct vmw_surface
*surface
= NULL
;
107 struct vmw_dma_buffer
*dmabuf
= NULL
;
111 ret
= vmw_user_surface_lookup_handle(dev_priv
, tfile
,
114 if (!surface
->snooper
.image
) {
115 DRM_ERROR("surface not suitable for cursor\n");
119 ret
= vmw_user_dmabuf_lookup(tfile
,
122 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret
);
128 /* takedown old cursor */
129 if (du
->cursor_surface
) {
130 du
->cursor_surface
->snooper
.crtc
= NULL
;
131 vmw_surface_unreference(&du
->cursor_surface
);
133 if (du
->cursor_dmabuf
)
134 vmw_dmabuf_unreference(&du
->cursor_dmabuf
);
136 /* setup new image */
138 /* vmw_user_surface_lookup takes one reference */
139 du
->cursor_surface
= surface
;
141 du
->cursor_surface
->snooper
.crtc
= crtc
;
142 du
->cursor_age
= du
->cursor_surface
->snooper
.age
;
143 vmw_cursor_update_image(dev_priv
, surface
->snooper
.image
,
144 64, 64, du
->hotspot_x
, du
->hotspot_y
);
146 struct ttm_bo_kmap_obj map
;
147 unsigned long kmap_offset
;
148 unsigned long kmap_num
;
152 /* vmw_user_surface_lookup takes one reference */
153 du
->cursor_dmabuf
= dmabuf
;
156 kmap_num
= (64*64*4) >> PAGE_SHIFT
;
158 ret
= ttm_bo_reserve(&dmabuf
->base
, true, false, false, 0);
159 if (unlikely(ret
!= 0)) {
160 DRM_ERROR("reserve failed\n");
164 ret
= ttm_bo_kmap(&dmabuf
->base
, kmap_offset
, kmap_num
, &map
);
165 if (unlikely(ret
!= 0))
168 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
169 vmw_cursor_update_image(dev_priv
, virtual, 64, 64,
170 du
->hotspot_x
, du
->hotspot_y
);
174 ttm_bo_unreserve(&dmabuf
->base
);
177 vmw_cursor_update_position(dev_priv
, false, 0, 0);
181 vmw_cursor_update_position(dev_priv
, true, du
->cursor_x
, du
->cursor_y
);
186 int vmw_du_crtc_cursor_move(struct drm_crtc
*crtc
, int x
, int y
)
188 struct vmw_private
*dev_priv
= vmw_priv(crtc
->dev
);
189 struct vmw_display_unit
*du
= vmw_crtc_to_du(crtc
);
190 bool shown
= du
->cursor_surface
|| du
->cursor_dmabuf
? true : false;
192 du
->cursor_x
= x
+ crtc
->x
;
193 du
->cursor_y
= y
+ crtc
->y
;
195 vmw_cursor_update_position(dev_priv
, shown
,
196 du
->cursor_x
, du
->cursor_y
);
201 void vmw_kms_cursor_snoop(struct vmw_surface
*srf
,
202 struct ttm_object_file
*tfile
,
203 struct ttm_buffer_object
*bo
,
204 SVGA3dCmdHeader
*header
)
206 struct ttm_bo_kmap_obj map
;
207 unsigned long kmap_offset
;
208 unsigned long kmap_num
;
214 SVGA3dCmdHeader header
;
215 SVGA3dCmdSurfaceDMA dma
;
219 cmd
= container_of(header
, struct vmw_dma_cmd
, header
);
221 /* No snooper installed */
222 if (!srf
->snooper
.image
)
225 if (cmd
->dma
.host
.face
!= 0 || cmd
->dma
.host
.mipmap
!= 0) {
226 DRM_ERROR("face and mipmap for cursors should never != 0\n");
230 if (cmd
->header
.size
< 64) {
231 DRM_ERROR("at least one full copy box must be given\n");
235 box
= (SVGA3dCopyBox
*)&cmd
[1];
236 box_count
= (cmd
->header
.size
- sizeof(SVGA3dCmdSurfaceDMA
)) /
237 sizeof(SVGA3dCopyBox
);
239 if (cmd
->dma
.guest
.pitch
!= (64 * 4) ||
240 cmd
->dma
.guest
.ptr
.offset
% PAGE_SIZE
||
241 box
->x
!= 0 || box
->y
!= 0 || box
->z
!= 0 ||
242 box
->srcx
!= 0 || box
->srcy
!= 0 || box
->srcz
!= 0 ||
243 box
->w
!= 64 || box
->h
!= 64 || box
->d
!= 1 ||
245 /* TODO handle none page aligned offsets */
246 /* TODO handle partial uploads and pitch != 256 */
247 /* TODO handle more then one copy (size != 64) */
248 DRM_ERROR("lazy programer, cant handle wierd stuff\n");
252 kmap_offset
= cmd
->dma
.guest
.ptr
.offset
>> PAGE_SHIFT
;
253 kmap_num
= (64*64*4) >> PAGE_SHIFT
;
255 ret
= ttm_bo_reserve(bo
, true, false, false, 0);
256 if (unlikely(ret
!= 0)) {
257 DRM_ERROR("reserve failed\n");
261 ret
= ttm_bo_kmap(bo
, kmap_offset
, kmap_num
, &map
);
262 if (unlikely(ret
!= 0))
265 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
267 memcpy(srf
->snooper
.image
, virtual, 64*64*4);
270 /* we can't call this function from this function since execbuf has
271 * reserved fifo space.
273 * if (srf->snooper.crtc)
274 * vmw_ldu_crtc_cursor_update_image(dev_priv,
275 * srf->snooper.image, 64, 64,
276 * du->hotspot_x, du->hotspot_y);
281 ttm_bo_unreserve(bo
);
284 void vmw_kms_cursor_post_execbuf(struct vmw_private
*dev_priv
)
286 struct drm_device
*dev
= dev_priv
->dev
;
287 struct vmw_display_unit
*du
;
288 struct drm_crtc
*crtc
;
290 mutex_lock(&dev
->mode_config
.mutex
);
292 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
293 du
= vmw_crtc_to_du(crtc
);
294 if (!du
->cursor_surface
||
295 du
->cursor_age
== du
->cursor_surface
->snooper
.age
)
298 du
->cursor_age
= du
->cursor_surface
->snooper
.age
;
299 vmw_cursor_update_image(dev_priv
,
300 du
->cursor_surface
->snooper
.image
,
301 64, 64, du
->hotspot_x
, du
->hotspot_y
);
304 mutex_unlock(&dev
->mode_config
.mutex
);
308 * Generic framebuffer code
311 int vmw_framebuffer_create_handle(struct drm_framebuffer
*fb
,
312 struct drm_file
*file_priv
,
313 unsigned int *handle
)
322 * Surface framebuffer code
325 #define vmw_framebuffer_to_vfbs(x) \
326 container_of(x, struct vmw_framebuffer_surface, base.base)
328 struct vmw_framebuffer_surface
{
329 struct vmw_framebuffer base
;
330 struct vmw_surface
*surface
;
331 struct vmw_dma_buffer
*buffer
;
332 struct delayed_work d_work
;
333 struct mutex work_lock
;
335 struct list_head head
;
336 struct drm_master
*master
;
340 * vmw_kms_idle_workqueues - Flush workqueues on this master
342 * @vmaster - Pointer identifying the master, for the surfaces of which
343 * we idle the dirty work queues.
345 * This function should be called with the ttm lock held in exclusive mode
346 * to idle all dirty work queues before the fifo is taken down.
348 * The work task may actually requeue itself, but after the flush returns we're
349 * sure that there's nothing to present, since the ttm lock is held in
350 * exclusive mode, so the fifo will never get used.
353 void vmw_kms_idle_workqueues(struct vmw_master
*vmaster
)
355 struct vmw_framebuffer_surface
*entry
;
357 mutex_lock(&vmaster
->fb_surf_mutex
);
358 list_for_each_entry(entry
, &vmaster
->fb_surf
, head
) {
359 if (cancel_delayed_work_sync(&entry
->d_work
))
360 (void) entry
->d_work
.work
.func(&entry
->d_work
.work
);
362 (void) cancel_delayed_work_sync(&entry
->d_work
);
364 mutex_unlock(&vmaster
->fb_surf_mutex
);
367 void vmw_framebuffer_surface_destroy(struct drm_framebuffer
*framebuffer
)
369 struct vmw_framebuffer_surface
*vfbs
=
370 vmw_framebuffer_to_vfbs(framebuffer
);
371 struct vmw_master
*vmaster
= vmw_master(vfbs
->master
);
374 mutex_lock(&vmaster
->fb_surf_mutex
);
375 list_del(&vfbs
->head
);
376 mutex_unlock(&vmaster
->fb_surf_mutex
);
378 cancel_delayed_work_sync(&vfbs
->d_work
);
379 drm_master_put(&vfbs
->master
);
380 drm_framebuffer_cleanup(framebuffer
);
381 vmw_surface_unreference(&vfbs
->surface
);
386 static void vmw_framebuffer_present_fs_callback(struct work_struct
*work
)
388 struct delayed_work
*d_work
=
389 container_of(work
, struct delayed_work
, work
);
390 struct vmw_framebuffer_surface
*vfbs
=
391 container_of(d_work
, struct vmw_framebuffer_surface
, d_work
);
392 struct vmw_surface
*surf
= vfbs
->surface
;
393 struct drm_framebuffer
*framebuffer
= &vfbs
->base
.base
;
394 struct vmw_private
*dev_priv
= vmw_priv(framebuffer
->dev
);
397 SVGA3dCmdHeader header
;
398 SVGA3dCmdPresent body
;
403 * Strictly we should take the ttm_lock in read mode before accessing
404 * the fifo, to make sure the fifo is present and up. However,
405 * instead we flush all workqueues under the ttm lock in exclusive mode
406 * before taking down the fifo.
408 mutex_lock(&vfbs
->work_lock
);
409 if (!vfbs
->present_fs
)
412 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
413 if (unlikely(cmd
== NULL
))
416 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_PRESENT
);
417 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
) + sizeof(cmd
->cr
));
418 cmd
->body
.sid
= cpu_to_le32(surf
->res
.id
);
419 cmd
->cr
.x
= cpu_to_le32(0);
420 cmd
->cr
.y
= cpu_to_le32(0);
421 cmd
->cr
.srcx
= cmd
->cr
.x
;
422 cmd
->cr
.srcy
= cmd
->cr
.y
;
423 cmd
->cr
.w
= cpu_to_le32(framebuffer
->width
);
424 cmd
->cr
.h
= cpu_to_le32(framebuffer
->height
);
425 vfbs
->present_fs
= false;
426 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
429 * Will not re-add if already pending.
431 schedule_delayed_work(&vfbs
->d_work
, VMWGFX_PRESENT_RATE
);
433 mutex_unlock(&vfbs
->work_lock
);
437 int vmw_framebuffer_surface_dirty(struct drm_framebuffer
*framebuffer
,
438 struct drm_file
*file_priv
,
439 unsigned flags
, unsigned color
,
440 struct drm_clip_rect
*clips
,
443 struct vmw_private
*dev_priv
= vmw_priv(framebuffer
->dev
);
444 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
445 struct vmw_framebuffer_surface
*vfbs
=
446 vmw_framebuffer_to_vfbs(framebuffer
);
447 struct vmw_surface
*surf
= vfbs
->surface
;
448 struct drm_clip_rect norect
;
454 SVGA3dCmdHeader header
;
455 SVGA3dCmdPresent body
;
459 if (unlikely(vfbs
->master
!= file_priv
->master
))
462 ret
= ttm_read_lock(&vmaster
->lock
, true);
463 if (unlikely(ret
!= 0))
467 !(dev_priv
->fifo
.capabilities
&
468 SVGA_FIFO_CAP_SCREEN_OBJECT
)) {
471 mutex_lock(&vfbs
->work_lock
);
472 vfbs
->present_fs
= true;
473 ret
= schedule_delayed_work(&vfbs
->d_work
, VMWGFX_PRESENT_RATE
);
474 mutex_unlock(&vfbs
->work_lock
);
477 * No work pending, Force immediate present.
479 vmw_framebuffer_present_fs_callback(&vfbs
->d_work
.work
);
481 ttm_read_unlock(&vmaster
->lock
);
488 norect
.x1
= norect
.y1
= 0;
489 norect
.x2
= framebuffer
->width
;
490 norect
.y2
= framebuffer
->height
;
491 } else if (flags
& DRM_MODE_FB_DIRTY_ANNOTATE_COPY
) {
493 inc
= 2; /* skip source rects */
496 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
) + (num_clips
- 1) * sizeof(cmd
->cr
));
497 if (unlikely(cmd
== NULL
)) {
498 DRM_ERROR("Fifo reserve failed.\n");
499 ttm_read_unlock(&vmaster
->lock
);
503 memset(cmd
, 0, sizeof(*cmd
));
505 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_PRESENT
);
506 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
) + num_clips
* sizeof(cmd
->cr
));
507 cmd
->body
.sid
= cpu_to_le32(surf
->res
.id
);
509 for (i
= 0, cr
= &cmd
->cr
; i
< num_clips
; i
++, cr
++, clips
+= inc
) {
510 cr
->x
= cpu_to_le16(clips
->x1
);
511 cr
->y
= cpu_to_le16(clips
->y1
);
514 cr
->w
= cpu_to_le16(clips
->x2
- clips
->x1
);
515 cr
->h
= cpu_to_le16(clips
->y2
- clips
->y1
);
518 vmw_fifo_commit(dev_priv
, sizeof(*cmd
) + (num_clips
- 1) * sizeof(cmd
->cr
));
519 ttm_read_unlock(&vmaster
->lock
);
523 static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs
= {
524 .destroy
= vmw_framebuffer_surface_destroy
,
525 .dirty
= vmw_framebuffer_surface_dirty
,
526 .create_handle
= vmw_framebuffer_create_handle
,
529 static int vmw_kms_new_framebuffer_surface(struct vmw_private
*dev_priv
,
530 struct drm_file
*file_priv
,
531 struct vmw_surface
*surface
,
532 struct vmw_framebuffer
**out
,
533 const struct drm_mode_fb_cmd
537 struct drm_device
*dev
= dev_priv
->dev
;
538 struct vmw_framebuffer_surface
*vfbs
;
539 enum SVGA3dSurfaceFormat format
;
540 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
547 if (unlikely(surface
->mip_levels
[0] != 1 ||
548 surface
->num_sizes
!= 1 ||
549 surface
->sizes
[0].width
< mode_cmd
->width
||
550 surface
->sizes
[0].height
< mode_cmd
->height
||
551 surface
->sizes
[0].depth
!= 1)) {
552 DRM_ERROR("Incompatible surface dimensions "
553 "for requested mode.\n");
557 switch (mode_cmd
->depth
) {
559 format
= SVGA3D_A8R8G8B8
;
562 format
= SVGA3D_X8R8G8B8
;
565 format
= SVGA3D_R5G6B5
;
568 format
= SVGA3D_A1R5G5B5
;
571 DRM_ERROR("Invalid color depth: %d\n", mode_cmd
->depth
);
575 if (unlikely(format
!= surface
->format
)) {
576 DRM_ERROR("Invalid surface format for requested mode.\n");
580 vfbs
= kzalloc(sizeof(*vfbs
), GFP_KERNEL
);
586 ret
= drm_framebuffer_init(dev
, &vfbs
->base
.base
,
587 &vmw_framebuffer_surface_funcs
);
591 if (!vmw_surface_reference(surface
)) {
592 DRM_ERROR("failed to reference surface %p\n", surface
);
596 /* XXX get the first 3 from the surface info */
597 vfbs
->base
.base
.bits_per_pixel
= mode_cmd
->bpp
;
598 vfbs
->base
.base
.pitch
= mode_cmd
->pitch
;
599 vfbs
->base
.base
.depth
= mode_cmd
->depth
;
600 vfbs
->base
.base
.width
= mode_cmd
->width
;
601 vfbs
->base
.base
.height
= mode_cmd
->height
;
602 vfbs
->base
.pin
= &vmw_surface_dmabuf_pin
;
603 vfbs
->base
.unpin
= &vmw_surface_dmabuf_unpin
;
604 vfbs
->surface
= surface
;
605 vfbs
->master
= drm_master_get(file_priv
->master
);
606 mutex_init(&vfbs
->work_lock
);
608 mutex_lock(&vmaster
->fb_surf_mutex
);
609 INIT_DELAYED_WORK(&vfbs
->d_work
, &vmw_framebuffer_present_fs_callback
);
610 list_add_tail(&vfbs
->head
, &vmaster
->fb_surf
);
611 mutex_unlock(&vmaster
->fb_surf_mutex
);
618 drm_framebuffer_cleanup(&vfbs
->base
.base
);
626 * Dmabuf framebuffer code
629 #define vmw_framebuffer_to_vfbd(x) \
630 container_of(x, struct vmw_framebuffer_dmabuf, base.base)
632 struct vmw_framebuffer_dmabuf
{
633 struct vmw_framebuffer base
;
634 struct vmw_dma_buffer
*buffer
;
637 void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer
*framebuffer
)
639 struct vmw_framebuffer_dmabuf
*vfbd
=
640 vmw_framebuffer_to_vfbd(framebuffer
);
642 drm_framebuffer_cleanup(framebuffer
);
643 vmw_dmabuf_unreference(&vfbd
->buffer
);
648 int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer
*framebuffer
,
649 struct drm_file
*file_priv
,
650 unsigned flags
, unsigned color
,
651 struct drm_clip_rect
*clips
,
654 struct vmw_private
*dev_priv
= vmw_priv(framebuffer
->dev
);
655 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
656 struct drm_clip_rect norect
;
660 SVGAFifoCmdUpdate body
;
662 int i
, increment
= 1;
664 ret
= ttm_read_lock(&vmaster
->lock
, true);
665 if (unlikely(ret
!= 0))
671 norect
.x1
= norect
.y1
= 0;
672 norect
.x2
= framebuffer
->width
;
673 norect
.y2
= framebuffer
->height
;
674 } else if (flags
& DRM_MODE_FB_DIRTY_ANNOTATE_COPY
) {
679 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
) * num_clips
);
680 if (unlikely(cmd
== NULL
)) {
681 DRM_ERROR("Fifo reserve failed.\n");
682 ttm_read_unlock(&vmaster
->lock
);
686 for (i
= 0; i
< num_clips
; i
++, clips
+= increment
) {
687 cmd
[i
].header
= cpu_to_le32(SVGA_CMD_UPDATE
);
688 cmd
[i
].body
.x
= cpu_to_le32(clips
->x1
);
689 cmd
[i
].body
.y
= cpu_to_le32(clips
->y1
);
690 cmd
[i
].body
.width
= cpu_to_le32(clips
->x2
- clips
->x1
);
691 cmd
[i
].body
.height
= cpu_to_le32(clips
->y2
- clips
->y1
);
694 vmw_fifo_commit(dev_priv
, sizeof(*cmd
) * num_clips
);
695 ttm_read_unlock(&vmaster
->lock
);
700 static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs
= {
701 .destroy
= vmw_framebuffer_dmabuf_destroy
,
702 .dirty
= vmw_framebuffer_dmabuf_dirty
,
703 .create_handle
= vmw_framebuffer_create_handle
,
706 static int vmw_surface_dmabuf_pin(struct vmw_framebuffer
*vfb
)
708 struct vmw_private
*dev_priv
= vmw_priv(vfb
->base
.dev
);
709 struct vmw_framebuffer_surface
*vfbs
=
710 vmw_framebuffer_to_vfbs(&vfb
->base
);
711 unsigned long size
= vfbs
->base
.base
.pitch
* vfbs
->base
.base
.height
;
714 vfbs
->buffer
= kzalloc(sizeof(*vfbs
->buffer
), GFP_KERNEL
);
715 if (unlikely(vfbs
->buffer
== NULL
))
718 vmw_overlay_pause_all(dev_priv
);
719 ret
= vmw_dmabuf_init(dev_priv
, vfbs
->buffer
, size
,
720 &vmw_vram_ne_placement
,
721 false, &vmw_dmabuf_bo_free
);
722 vmw_overlay_resume_all(dev_priv
);
723 if (unlikely(ret
!= 0))
729 static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer
*vfb
)
731 struct ttm_buffer_object
*bo
;
732 struct vmw_framebuffer_surface
*vfbs
=
733 vmw_framebuffer_to_vfbs(&vfb
->base
);
735 if (unlikely(vfbs
->buffer
== NULL
))
738 bo
= &vfbs
->buffer
->base
;
745 static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer
*vfb
)
747 struct vmw_private
*dev_priv
= vmw_priv(vfb
->base
.dev
);
748 struct vmw_framebuffer_dmabuf
*vfbd
=
749 vmw_framebuffer_to_vfbd(&vfb
->base
);
753 vmw_overlay_pause_all(dev_priv
);
755 ret
= vmw_dmabuf_to_start_of_vram(dev_priv
, vfbd
->buffer
);
757 vmw_overlay_resume_all(dev_priv
);
764 static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer
*vfb
)
766 struct vmw_private
*dev_priv
= vmw_priv(vfb
->base
.dev
);
767 struct vmw_framebuffer_dmabuf
*vfbd
=
768 vmw_framebuffer_to_vfbd(&vfb
->base
);
771 WARN_ON(!vfbd
->buffer
);
775 return vmw_dmabuf_from_vram(dev_priv
, vfbd
->buffer
);
778 static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private
*dev_priv
,
779 struct vmw_dma_buffer
*dmabuf
,
780 struct vmw_framebuffer
**out
,
781 const struct drm_mode_fb_cmd
785 struct drm_device
*dev
= dev_priv
->dev
;
786 struct vmw_framebuffer_dmabuf
*vfbd
;
787 unsigned int requested_size
;
790 requested_size
= mode_cmd
->height
* mode_cmd
->pitch
;
791 if (unlikely(requested_size
> dmabuf
->base
.num_pages
* PAGE_SIZE
)) {
792 DRM_ERROR("Screen buffer object size is too small "
793 "for requested mode.\n");
797 vfbd
= kzalloc(sizeof(*vfbd
), GFP_KERNEL
);
803 ret
= drm_framebuffer_init(dev
, &vfbd
->base
.base
,
804 &vmw_framebuffer_dmabuf_funcs
);
808 if (!vmw_dmabuf_reference(dmabuf
)) {
809 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf
);
813 vfbd
->base
.base
.bits_per_pixel
= mode_cmd
->bpp
;
814 vfbd
->base
.base
.pitch
= mode_cmd
->pitch
;
815 vfbd
->base
.base
.depth
= mode_cmd
->depth
;
816 vfbd
->base
.base
.width
= mode_cmd
->width
;
817 vfbd
->base
.base
.height
= mode_cmd
->height
;
818 vfbd
->base
.pin
= vmw_framebuffer_dmabuf_pin
;
819 vfbd
->base
.unpin
= vmw_framebuffer_dmabuf_unpin
;
820 vfbd
->buffer
= dmabuf
;
826 drm_framebuffer_cleanup(&vfbd
->base
.base
);
834 * Generic Kernel modesetting functions
837 static struct drm_framebuffer
*vmw_kms_fb_create(struct drm_device
*dev
,
838 struct drm_file
*file_priv
,
839 struct drm_mode_fb_cmd
*mode_cmd
)
841 struct vmw_private
*dev_priv
= vmw_priv(dev
);
842 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
843 struct vmw_framebuffer
*vfb
= NULL
;
844 struct vmw_surface
*surface
= NULL
;
845 struct vmw_dma_buffer
*bo
= NULL
;
850 * This code should be conditioned on Screen Objects not being used.
851 * If screen objects are used, we can allocate a GMR to hold the
852 * requested framebuffer.
855 required_size
= mode_cmd
->pitch
* mode_cmd
->height
;
856 if (unlikely(required_size
> (u64
) dev_priv
->vram_size
)) {
857 DRM_ERROR("VRAM size is too small for requested mode.\n");
862 * End conditioned code.
865 ret
= vmw_user_surface_lookup_handle(dev_priv
, tfile
,
866 mode_cmd
->handle
, &surface
);
870 if (!surface
->scanout
)
871 goto err_not_scanout
;
873 ret
= vmw_kms_new_framebuffer_surface(dev_priv
, file_priv
, surface
,
876 /* vmw_user_surface_lookup takes one ref so does new_fb */
877 vmw_surface_unreference(&surface
);
880 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret
);
886 DRM_INFO("%s: trying buffer\n", __func__
);
888 ret
= vmw_user_dmabuf_lookup(tfile
, mode_cmd
->handle
, &bo
);
890 DRM_ERROR("failed to find buffer: %i\n", ret
);
891 return ERR_PTR(-ENOENT
);
894 ret
= vmw_kms_new_framebuffer_dmabuf(dev_priv
, bo
, &vfb
,
897 /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
898 vmw_dmabuf_unreference(&bo
);
901 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret
);
908 DRM_ERROR("surface not marked as scanout\n");
909 /* vmw_user_surface_lookup takes one ref */
910 vmw_surface_unreference(&surface
);
912 return ERR_PTR(-EINVAL
);
915 static struct drm_mode_config_funcs vmw_kms_funcs
= {
916 .fb_create
= vmw_kms_fb_create
,
919 int vmw_kms_init(struct vmw_private
*dev_priv
)
921 struct drm_device
*dev
= dev_priv
->dev
;
924 drm_mode_config_init(dev
);
925 dev
->mode_config
.funcs
= &vmw_kms_funcs
;
926 dev
->mode_config
.min_width
= 1;
927 dev
->mode_config
.min_height
= 1;
928 /* assumed largest fb size */
929 dev
->mode_config
.max_width
= 8192;
930 dev
->mode_config
.max_height
= 8192;
932 ret
= vmw_kms_init_legacy_display_system(dev_priv
);
937 int vmw_kms_close(struct vmw_private
*dev_priv
)
940 * Docs says we should take the lock before calling this function
941 * but since it destroys encoders and our destructor calls
942 * drm_encoder_cleanup which takes the lock we deadlock.
944 drm_mode_config_cleanup(dev_priv
->dev
);
945 vmw_kms_close_legacy_display_system(dev_priv
);
949 int vmw_kms_cursor_bypass_ioctl(struct drm_device
*dev
, void *data
,
950 struct drm_file
*file_priv
)
952 struct drm_vmw_cursor_bypass_arg
*arg
= data
;
953 struct vmw_display_unit
*du
;
954 struct drm_mode_object
*obj
;
955 struct drm_crtc
*crtc
;
959 mutex_lock(&dev
->mode_config
.mutex
);
960 if (arg
->flags
& DRM_VMW_CURSOR_BYPASS_ALL
) {
962 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
963 du
= vmw_crtc_to_du(crtc
);
964 du
->hotspot_x
= arg
->xhot
;
965 du
->hotspot_y
= arg
->yhot
;
968 mutex_unlock(&dev
->mode_config
.mutex
);
972 obj
= drm_mode_object_find(dev
, arg
->crtc_id
, DRM_MODE_OBJECT_CRTC
);
978 crtc
= obj_to_crtc(obj
);
979 du
= vmw_crtc_to_du(crtc
);
981 du
->hotspot_x
= arg
->xhot
;
982 du
->hotspot_y
= arg
->yhot
;
985 mutex_unlock(&dev
->mode_config
.mutex
);
990 void vmw_kms_write_svga(struct vmw_private
*vmw_priv
,
991 unsigned width
, unsigned height
, unsigned pitch
,
992 unsigned bbp
, unsigned depth
)
994 if (vmw_priv
->capabilities
& SVGA_CAP_PITCHLOCK
)
995 vmw_write(vmw_priv
, SVGA_REG_PITCHLOCK
, pitch
);
996 else if (vmw_fifo_have_pitchlock(vmw_priv
))
997 iowrite32(pitch
, vmw_priv
->mmio_virt
+ SVGA_FIFO_PITCHLOCK
);
998 vmw_write(vmw_priv
, SVGA_REG_WIDTH
, width
);
999 vmw_write(vmw_priv
, SVGA_REG_HEIGHT
, height
);
1000 vmw_write(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
, bbp
);
1001 vmw_write(vmw_priv
, SVGA_REG_DEPTH
, depth
);
1002 vmw_write(vmw_priv
, SVGA_REG_RED_MASK
, 0x00ff0000);
1003 vmw_write(vmw_priv
, SVGA_REG_GREEN_MASK
, 0x0000ff00);
1004 vmw_write(vmw_priv
, SVGA_REG_BLUE_MASK
, 0x000000ff);
1007 int vmw_kms_save_vga(struct vmw_private
*vmw_priv
)
1009 struct vmw_vga_topology_state
*save
;
1012 vmw_priv
->vga_width
= vmw_read(vmw_priv
, SVGA_REG_WIDTH
);
1013 vmw_priv
->vga_height
= vmw_read(vmw_priv
, SVGA_REG_HEIGHT
);
1014 vmw_priv
->vga_depth
= vmw_read(vmw_priv
, SVGA_REG_DEPTH
);
1015 vmw_priv
->vga_bpp
= vmw_read(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
);
1016 vmw_priv
->vga_pseudo
= vmw_read(vmw_priv
, SVGA_REG_PSEUDOCOLOR
);
1017 vmw_priv
->vga_red_mask
= vmw_read(vmw_priv
, SVGA_REG_RED_MASK
);
1018 vmw_priv
->vga_blue_mask
= vmw_read(vmw_priv
, SVGA_REG_BLUE_MASK
);
1019 vmw_priv
->vga_green_mask
= vmw_read(vmw_priv
, SVGA_REG_GREEN_MASK
);
1020 if (vmw_priv
->capabilities
& SVGA_CAP_PITCHLOCK
)
1021 vmw_priv
->vga_pitchlock
=
1022 vmw_read(vmw_priv
, SVGA_REG_PITCHLOCK
);
1023 else if (vmw_fifo_have_pitchlock(vmw_priv
))
1024 vmw_priv
->vga_pitchlock
= ioread32(vmw_priv
->mmio_virt
+
1025 SVGA_FIFO_PITCHLOCK
);
1027 if (!(vmw_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
))
1030 vmw_priv
->num_displays
= vmw_read(vmw_priv
,
1031 SVGA_REG_NUM_GUEST_DISPLAYS
);
1033 if (vmw_priv
->num_displays
== 0)
1034 vmw_priv
->num_displays
= 1;
1036 for (i
= 0; i
< vmw_priv
->num_displays
; ++i
) {
1037 save
= &vmw_priv
->vga_save
[i
];
1038 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, i
);
1039 save
->primary
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_IS_PRIMARY
);
1040 save
->pos_x
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_POSITION_X
);
1041 save
->pos_y
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_POSITION_Y
);
1042 save
->width
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_WIDTH
);
1043 save
->height
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_HEIGHT
);
1044 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, SVGA_ID_INVALID
);
1045 if (i
== 0 && vmw_priv
->num_displays
== 1 &&
1046 save
->width
== 0 && save
->height
== 0) {
1049 * It should be fairly safe to assume that these
1050 * values are uninitialized.
1053 save
->width
= vmw_priv
->vga_width
- save
->pos_x
;
1054 save
->height
= vmw_priv
->vga_height
- save
->pos_y
;
1061 int vmw_kms_restore_vga(struct vmw_private
*vmw_priv
)
1063 struct vmw_vga_topology_state
*save
;
1066 vmw_write(vmw_priv
, SVGA_REG_WIDTH
, vmw_priv
->vga_width
);
1067 vmw_write(vmw_priv
, SVGA_REG_HEIGHT
, vmw_priv
->vga_height
);
1068 vmw_write(vmw_priv
, SVGA_REG_DEPTH
, vmw_priv
->vga_depth
);
1069 vmw_write(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
, vmw_priv
->vga_bpp
);
1070 vmw_write(vmw_priv
, SVGA_REG_PSEUDOCOLOR
, vmw_priv
->vga_pseudo
);
1071 vmw_write(vmw_priv
, SVGA_REG_RED_MASK
, vmw_priv
->vga_red_mask
);
1072 vmw_write(vmw_priv
, SVGA_REG_GREEN_MASK
, vmw_priv
->vga_green_mask
);
1073 vmw_write(vmw_priv
, SVGA_REG_BLUE_MASK
, vmw_priv
->vga_blue_mask
);
1074 if (vmw_priv
->capabilities
& SVGA_CAP_PITCHLOCK
)
1075 vmw_write(vmw_priv
, SVGA_REG_PITCHLOCK
,
1076 vmw_priv
->vga_pitchlock
);
1077 else if (vmw_fifo_have_pitchlock(vmw_priv
))
1078 iowrite32(vmw_priv
->vga_pitchlock
,
1079 vmw_priv
->mmio_virt
+ SVGA_FIFO_PITCHLOCK
);
1081 if (!(vmw_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
))
1084 for (i
= 0; i
< vmw_priv
->num_displays
; ++i
) {
1085 save
= &vmw_priv
->vga_save
[i
];
1086 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, i
);
1087 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_IS_PRIMARY
, save
->primary
);
1088 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_X
, save
->pos_x
);
1089 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_Y
, save
->pos_y
);
1090 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_WIDTH
, save
->width
);
1091 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_HEIGHT
, save
->height
);
1092 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, SVGA_ID_INVALID
);
1098 int vmw_kms_update_layout_ioctl(struct drm_device
*dev
, void *data
,
1099 struct drm_file
*file_priv
)
1101 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1102 struct drm_vmw_update_layout_arg
*arg
=
1103 (struct drm_vmw_update_layout_arg
*)data
;
1104 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
1105 void __user
*user_rects
;
1106 struct drm_vmw_rect
*rects
;
1107 unsigned rects_size
;
1110 ret
= ttm_read_lock(&vmaster
->lock
, true);
1111 if (unlikely(ret
!= 0))
1114 if (!arg
->num_outputs
) {
1115 struct drm_vmw_rect def_rect
= {0, 0, 800, 600};
1116 vmw_kms_ldu_update_layout(dev_priv
, 1, &def_rect
);
1120 rects_size
= arg
->num_outputs
* sizeof(struct drm_vmw_rect
);
1121 rects
= kzalloc(rects_size
, GFP_KERNEL
);
1122 if (unlikely(!rects
)) {
1127 user_rects
= (void __user
*)(unsigned long)arg
->rects
;
1128 ret
= copy_from_user(rects
, user_rects
, rects_size
);
1129 if (unlikely(ret
!= 0)) {
1130 DRM_ERROR("Failed to get rects.\n");
1135 vmw_kms_ldu_update_layout(dev_priv
, arg
->num_outputs
, rects
);
1140 ttm_read_unlock(&vmaster
->lock
);
1144 bool vmw_kms_validate_mode_vram(struct vmw_private
*dev_priv
,
1148 return ((u64
) pitch
* (u64
) height
) < (u64
) dev_priv
->vram_size
;
1151 u32
vmw_get_vblank_counter(struct drm_device
*dev
, int crtc
)