1 /**************************************************************************
3 * Copyright © 2007 David Airlie
4 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 #include "vmwgfx_drv.h"
32 #include "ttm/ttm_placement.h"
34 #define VMW_DIRTY_DELAY (HZ / 30)
37 struct vmw_private
*vmw_priv
;
41 struct vmw_dma_buffer
*vmw_bo
;
42 struct ttm_bo_kmap_obj map
;
44 u32 pseudo_palette
[17];
66 static int vmw_fb_setcolreg(unsigned regno
, unsigned red
, unsigned green
,
67 unsigned blue
, unsigned transp
,
70 struct vmw_fb_par
*par
= info
->par
;
71 u32
*pal
= par
->pseudo_palette
;
74 DRM_ERROR("Bad regno %u.\n", regno
);
81 pal
[regno
] = ((red
& 0xff00) << 8) |
83 ((blue
& 0xff00) >> 8);
86 DRM_ERROR("Bad depth %u, bpp %u.\n", par
->depth
, par
->bpp
);
93 static int vmw_fb_check_var(struct fb_var_screeninfo
*var
,
96 int depth
= var
->bits_per_pixel
;
97 struct vmw_fb_par
*par
= info
->par
;
98 struct vmw_private
*vmw_priv
= par
->vmw_priv
;
100 switch (var
->bits_per_pixel
) {
102 depth
= (var
->transp
.length
> 0) ? 32 : 24;
105 DRM_ERROR("Bad bpp %u.\n", var
->bits_per_pixel
);
111 var
->red
.offset
= 16;
112 var
->green
.offset
= 8;
113 var
->blue
.offset
= 0;
115 var
->green
.length
= 8;
116 var
->blue
.length
= 8;
117 var
->transp
.length
= 0;
118 var
->transp
.offset
= 0;
121 var
->red
.offset
= 16;
122 var
->green
.offset
= 8;
123 var
->blue
.offset
= 0;
125 var
->green
.length
= 8;
126 var
->blue
.length
= 8;
127 var
->transp
.length
= 8;
128 var
->transp
.offset
= 24;
131 DRM_ERROR("Bad depth %u.\n", depth
);
135 if (!(vmw_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
) &&
136 (var
->xoffset
!= 0 || var
->yoffset
!= 0)) {
137 DRM_ERROR("Can not handle panning without display topology\n");
141 if ((var
->xoffset
+ var
->xres
) > par
->max_width
||
142 (var
->yoffset
+ var
->yres
) > par
->max_height
) {
143 DRM_ERROR("Requested geom can not fit in framebuffer\n");
147 if (!vmw_kms_validate_mode_vram(vmw_priv
,
148 info
->fix
.line_length
,
149 var
->yoffset
+ var
->yres
)) {
150 DRM_ERROR("Requested geom can not fit in framebuffer\n");
157 static int vmw_fb_set_par(struct fb_info
*info
)
159 struct vmw_fb_par
*par
= info
->par
;
160 struct vmw_private
*vmw_priv
= par
->vmw_priv
;
162 vmw_kms_write_svga(vmw_priv
, info
->var
.xres
, info
->var
.yres
,
163 info
->fix
.line_length
,
164 par
->bpp
, par
->depth
);
165 if (vmw_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
) {
166 /* TODO check if pitch and offset changes */
167 vmw_write(vmw_priv
, SVGA_REG_NUM_GUEST_DISPLAYS
, 1);
168 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, 0);
169 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_IS_PRIMARY
, true);
170 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_X
, info
->var
.xoffset
);
171 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_Y
, info
->var
.yoffset
);
172 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_WIDTH
, info
->var
.xres
);
173 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_HEIGHT
, info
->var
.yres
);
174 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, SVGA_ID_INVALID
);
177 /* This is really helpful since if this fails the user
178 * can probably not see anything on the screen.
180 WARN_ON(vmw_read(vmw_priv
, SVGA_REG_FB_OFFSET
) != 0);
185 static int vmw_fb_pan_display(struct fb_var_screeninfo
*var
,
186 struct fb_info
*info
)
191 static int vmw_fb_blank(int blank
, struct fb_info
*info
)
200 static void vmw_fb_dirty_flush(struct vmw_fb_par
*par
)
202 struct vmw_private
*vmw_priv
= par
->vmw_priv
;
203 struct fb_info
*info
= vmw_priv
->fb_info
;
204 int stride
= (info
->fix
.line_length
/ 4);
205 int *src
= (int *)info
->screen_base
;
206 __le32 __iomem
*vram_mem
= par
->bo_ptr
;
212 SVGAFifoCmdUpdate body
;
215 if (vmw_priv
->suspended
)
218 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
219 if (!par
->dirty
.active
) {
220 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
225 w
= min(par
->dirty
.x2
, info
->var
.xres
) - x
;
226 h
= min(par
->dirty
.y2
, info
->var
.yres
) - y
;
227 par
->dirty
.x1
= par
->dirty
.x2
= 0;
228 par
->dirty
.y1
= par
->dirty
.y2
= 0;
229 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
231 for (i
= y
* stride
; i
< info
->fix
.smem_len
/ 4; i
+= stride
) {
232 for (k
= i
+x
; k
< i
+x
+w
&& k
< info
->fix
.smem_len
/ 4; k
++)
233 iowrite32(src
[k
], vram_mem
+ k
);
237 DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__
, x
, y
, w
, h
);
240 cmd
= vmw_fifo_reserve(vmw_priv
, sizeof(*cmd
));
241 if (unlikely(cmd
== NULL
)) {
242 DRM_ERROR("Fifo reserve failed.\n");
246 cmd
->header
= cpu_to_le32(SVGA_CMD_UPDATE
);
247 cmd
->body
.x
= cpu_to_le32(x
);
248 cmd
->body
.y
= cpu_to_le32(y
);
249 cmd
->body
.width
= cpu_to_le32(w
);
250 cmd
->body
.height
= cpu_to_le32(h
);
251 vmw_fifo_commit(vmw_priv
, sizeof(*cmd
));
254 static void vmw_fb_dirty_mark(struct vmw_fb_par
*par
,
255 unsigned x1
, unsigned y1
,
256 unsigned width
, unsigned height
)
258 struct fb_info
*info
= par
->vmw_priv
->fb_info
;
260 unsigned x2
= x1
+ width
;
261 unsigned y2
= y1
+ height
;
263 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
264 if (par
->dirty
.x1
== par
->dirty
.x2
) {
269 /* if we are active start the dirty work
270 * we share the work with the defio system */
271 if (par
->dirty
.active
)
272 schedule_delayed_work(&info
->deferred_work
, VMW_DIRTY_DELAY
);
274 if (x1
< par
->dirty
.x1
)
276 if (y1
< par
->dirty
.y1
)
278 if (x2
> par
->dirty
.x2
)
280 if (y2
> par
->dirty
.y2
)
283 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
286 static void vmw_deferred_io(struct fb_info
*info
,
287 struct list_head
*pagelist
)
289 struct vmw_fb_par
*par
= info
->par
;
290 unsigned long start
, end
, min
, max
;
297 list_for_each_entry(page
, pagelist
, lru
) {
298 start
= page
->index
<< PAGE_SHIFT
;
299 end
= start
+ PAGE_SIZE
- 1;
300 min
= min(min
, start
);
305 y1
= min
/ info
->fix
.line_length
;
306 y2
= (max
/ info
->fix
.line_length
) + 1;
308 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
311 par
->dirty
.x2
= info
->var
.xres
;
313 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
316 vmw_fb_dirty_flush(par
);
319 struct fb_deferred_io vmw_defio
= {
320 .delay
= VMW_DIRTY_DELAY
,
321 .deferred_io
= vmw_deferred_io
,
328 static void vmw_fb_fillrect(struct fb_info
*info
, const struct fb_fillrect
*rect
)
330 cfb_fillrect(info
, rect
);
331 vmw_fb_dirty_mark(info
->par
, rect
->dx
, rect
->dy
,
332 rect
->width
, rect
->height
);
335 static void vmw_fb_copyarea(struct fb_info
*info
, const struct fb_copyarea
*region
)
337 cfb_copyarea(info
, region
);
338 vmw_fb_dirty_mark(info
->par
, region
->dx
, region
->dy
,
339 region
->width
, region
->height
);
342 static void vmw_fb_imageblit(struct fb_info
*info
, const struct fb_image
*image
)
344 cfb_imageblit(info
, image
);
345 vmw_fb_dirty_mark(info
->par
, image
->dx
, image
->dy
,
346 image
->width
, image
->height
);
353 static struct fb_ops vmw_fb_ops
= {
354 .owner
= THIS_MODULE
,
355 .fb_check_var
= vmw_fb_check_var
,
356 .fb_set_par
= vmw_fb_set_par
,
357 .fb_setcolreg
= vmw_fb_setcolreg
,
358 .fb_fillrect
= vmw_fb_fillrect
,
359 .fb_copyarea
= vmw_fb_copyarea
,
360 .fb_imageblit
= vmw_fb_imageblit
,
361 .fb_pan_display
= vmw_fb_pan_display
,
362 .fb_blank
= vmw_fb_blank
,
365 static int vmw_fb_create_bo(struct vmw_private
*vmw_priv
,
366 size_t size
, struct vmw_dma_buffer
**out
)
368 struct vmw_dma_buffer
*vmw_bo
;
369 struct ttm_placement ne_placement
= vmw_vram_ne_placement
;
372 ne_placement
.lpfn
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
375 ret
= ttm_write_lock(&vmw_priv
->fbdev_master
.lock
, false);
376 if (unlikely(ret
!= 0))
379 vmw_bo
= kmalloc(sizeof(*vmw_bo
), GFP_KERNEL
);
383 ret
= vmw_dmabuf_init(vmw_priv
, vmw_bo
, size
,
386 &vmw_dmabuf_bo_free
);
387 if (unlikely(ret
!= 0))
388 goto err_unlock
; /* init frees the buffer on failure */
392 ttm_write_unlock(&vmw_priv
->fbdev_master
.lock
);
397 ttm_write_unlock(&vmw_priv
->fbdev_master
.lock
);
401 int vmw_fb_init(struct vmw_private
*vmw_priv
)
403 struct device
*device
= &vmw_priv
->dev
->pdev
->dev
;
404 struct vmw_fb_par
*par
;
405 struct fb_info
*info
;
406 unsigned initial_width
, initial_height
;
407 unsigned fb_width
, fb_height
;
408 unsigned fb_bbp
, fb_depth
, fb_offset
, fb_pitch
, fb_size
;
411 /* XXX These shouldn't be hardcoded. */
413 initial_height
= 600;
418 /* XXX As shouldn't these be as well. */
419 fb_width
= min(vmw_priv
->fb_max_width
, (unsigned)2048);
420 fb_height
= min(vmw_priv
->fb_max_height
, (unsigned)2048);
422 initial_width
= min(fb_width
, initial_width
);
423 initial_height
= min(fb_height
, initial_height
);
425 fb_pitch
= fb_width
* fb_bbp
/ 8;
426 fb_size
= fb_pitch
* fb_height
;
427 fb_offset
= vmw_read(vmw_priv
, SVGA_REG_FB_OFFSET
);
429 info
= framebuffer_alloc(sizeof(*par
), device
);
436 vmw_priv
->fb_info
= info
;
438 par
->vmw_priv
= vmw_priv
;
439 par
->depth
= fb_depth
;
442 par
->max_width
= fb_width
;
443 par
->max_height
= fb_height
;
446 * Create buffers and alloc memory
448 par
->vmalloc
= vmalloc(fb_size
);
449 if (unlikely(par
->vmalloc
== NULL
)) {
454 ret
= vmw_fb_create_bo(vmw_priv
, fb_size
, &par
->vmw_bo
);
455 if (unlikely(ret
!= 0))
458 ret
= ttm_bo_kmap(&par
->vmw_bo
->base
,
460 par
->vmw_bo
->base
.num_pages
,
462 if (unlikely(ret
!= 0))
464 par
->bo_ptr
= ttm_kmap_obj_virtual(&par
->map
, &par
->bo_iowrite
);
465 par
->bo_size
= fb_size
;
470 strcpy(info
->fix
.id
, "svgadrmfb");
471 info
->fix
.type
= FB_TYPE_PACKED_PIXELS
;
472 info
->fix
.visual
= FB_VISUAL_TRUECOLOR
;
473 info
->fix
.type_aux
= 0;
474 info
->fix
.xpanstep
= 1; /* doing it in hw */
475 info
->fix
.ypanstep
= 1; /* doing it in hw */
476 info
->fix
.ywrapstep
= 0;
477 info
->fix
.accel
= FB_ACCEL_NONE
;
478 info
->fix
.line_length
= fb_pitch
;
480 info
->fix
.smem_start
= 0;
481 info
->fix
.smem_len
= fb_size
;
483 info
->pseudo_palette
= par
->pseudo_palette
;
484 info
->screen_base
= par
->vmalloc
;
485 info
->screen_size
= fb_size
;
487 info
->flags
= FBINFO_DEFAULT
;
488 info
->fbops
= &vmw_fb_ops
;
490 /* 24 depth per default */
491 info
->var
.red
.offset
= 16;
492 info
->var
.green
.offset
= 8;
493 info
->var
.blue
.offset
= 0;
494 info
->var
.red
.length
= 8;
495 info
->var
.green
.length
= 8;
496 info
->var
.blue
.length
= 8;
497 info
->var
.transp
.offset
= 0;
498 info
->var
.transp
.length
= 0;
500 info
->var
.xres_virtual
= fb_width
;
501 info
->var
.yres_virtual
= fb_height
;
502 info
->var
.bits_per_pixel
= par
->bpp
;
503 info
->var
.xoffset
= 0;
504 info
->var
.yoffset
= 0;
505 info
->var
.activate
= FB_ACTIVATE_NOW
;
506 info
->var
.height
= -1;
507 info
->var
.width
= -1;
509 info
->var
.xres
= initial_width
;
510 info
->var
.yres
= initial_height
;
513 info
->pixmap
.size
= 64*1024;
514 info
->pixmap
.buf_align
= 8;
515 info
->pixmap
.access_align
= 32;
516 info
->pixmap
.flags
= FB_PIXMAP_SYSTEM
;
517 info
->pixmap
.scan_align
= 1;
519 info
->pixmap
.size
= 0;
520 info
->pixmap
.buf_align
= 8;
521 info
->pixmap
.access_align
= 32;
522 info
->pixmap
.flags
= FB_PIXMAP_SYSTEM
;
523 info
->pixmap
.scan_align
= 1;
526 info
->apertures
= alloc_apertures(1);
527 if (!info
->apertures
) {
531 info
->apertures
->ranges
[0].base
= vmw_priv
->vram_start
;
532 info
->apertures
->ranges
[0].size
= vmw_priv
->vram_size
;
535 * Dirty & Deferred IO
537 par
->dirty
.x1
= par
->dirty
.x2
= 0;
538 par
->dirty
.y1
= par
->dirty
.y2
= 0;
539 par
->dirty
.active
= true;
540 spin_lock_init(&par
->dirty
.lock
);
541 info
->fbdefio
= &vmw_defio
;
542 fb_deferred_io_init(info
);
544 ret
= register_framebuffer(info
);
545 if (unlikely(ret
!= 0))
551 fb_deferred_io_cleanup(info
);
553 ttm_bo_kunmap(&par
->map
);
555 ttm_bo_unref((struct ttm_buffer_object
**)&par
->vmw_bo
);
558 framebuffer_release(info
);
559 vmw_priv
->fb_info
= NULL
;
564 int vmw_fb_close(struct vmw_private
*vmw_priv
)
566 struct fb_info
*info
;
567 struct vmw_fb_par
*par
;
568 struct ttm_buffer_object
*bo
;
570 if (!vmw_priv
->fb_info
)
573 info
= vmw_priv
->fb_info
;
575 bo
= &par
->vmw_bo
->base
;
579 fb_deferred_io_cleanup(info
);
580 unregister_framebuffer(info
);
582 ttm_bo_kunmap(&par
->map
);
586 framebuffer_release(info
);
591 int vmw_dmabuf_from_vram(struct vmw_private
*vmw_priv
,
592 struct vmw_dma_buffer
*vmw_bo
)
594 struct ttm_buffer_object
*bo
= &vmw_bo
->base
;
597 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
598 if (unlikely(ret
!= 0))
601 ret
= ttm_bo_validate(bo
, &vmw_sys_placement
, false, false, false);
602 ttm_bo_unreserve(bo
);
607 int vmw_dmabuf_to_start_of_vram(struct vmw_private
*vmw_priv
,
608 struct vmw_dma_buffer
*vmw_bo
)
610 struct ttm_buffer_object
*bo
= &vmw_bo
->base
;
611 struct ttm_placement ne_placement
= vmw_vram_ne_placement
;
614 ne_placement
.lpfn
= bo
->num_pages
;
617 ret
= ttm_write_lock(&vmw_priv
->active_master
->lock
, false);
618 if (unlikely(ret
!= 0))
621 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
622 if (unlikely(ret
!= 0))
625 if (bo
->mem
.mem_type
== TTM_PL_VRAM
&&
626 bo
->mem
.start
< bo
->num_pages
&&
628 (void) ttm_bo_validate(bo
, &vmw_sys_placement
, false,
631 ret
= ttm_bo_validate(bo
, &ne_placement
, false, false, false);
633 /* Could probably bug on */
634 WARN_ON(bo
->offset
!= 0);
636 ttm_bo_unreserve(bo
);
638 ttm_write_unlock(&vmw_priv
->active_master
->lock
);
643 int vmw_fb_off(struct vmw_private
*vmw_priv
)
645 struct fb_info
*info
;
646 struct vmw_fb_par
*par
;
649 if (!vmw_priv
->fb_info
)
652 info
= vmw_priv
->fb_info
;
655 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
656 par
->dirty
.active
= false;
657 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
659 flush_delayed_work_sync(&info
->deferred_work
);
662 ttm_bo_kunmap(&par
->map
);
664 vmw_dmabuf_from_vram(vmw_priv
, par
->vmw_bo
);
669 int vmw_fb_on(struct vmw_private
*vmw_priv
)
671 struct fb_info
*info
;
672 struct vmw_fb_par
*par
;
677 if (!vmw_priv
->fb_info
)
680 info
= vmw_priv
->fb_info
;
683 /* we are already active */
684 if (par
->bo_ptr
!= NULL
)
687 /* Make sure that all overlays are stoped when we take over */
688 vmw_overlay_stop_all(vmw_priv
);
690 ret
= vmw_dmabuf_to_start_of_vram(vmw_priv
, par
->vmw_bo
);
691 if (unlikely(ret
!= 0)) {
692 DRM_ERROR("could not move buffer to start of VRAM\n");
696 ret
= ttm_bo_kmap(&par
->vmw_bo
->base
,
698 par
->vmw_bo
->base
.num_pages
,
701 par
->bo_ptr
= ttm_kmap_obj_virtual(&par
->map
, &dummy
);
703 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
704 par
->dirty
.active
= true;
705 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
708 vmw_fb_set_par(info
);
710 vmw_fb_dirty_mark(par
, 0, 0, info
->var
.xres
, info
->var
.yres
);
712 /* If there already was stuff dirty we wont
713 * schedule a new work, so lets do it now */
714 schedule_delayed_work(&info
->deferred_work
, 0);