1 /**************************************************************************
3 * Copyright © 2007 David Airlie
4 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 #include <linux/export.h>
32 #include "vmwgfx_drv.h"
34 #include "ttm/ttm_placement.h"
36 #define VMW_DIRTY_DELAY (HZ / 30)
39 struct vmw_private
*vmw_priv
;
43 struct vmw_dma_buffer
*vmw_bo
;
44 struct ttm_bo_kmap_obj map
;
46 u32 pseudo_palette
[17];
68 static int vmw_fb_setcolreg(unsigned regno
, unsigned red
, unsigned green
,
69 unsigned blue
, unsigned transp
,
72 struct vmw_fb_par
*par
= info
->par
;
73 u32
*pal
= par
->pseudo_palette
;
76 DRM_ERROR("Bad regno %u.\n", regno
);
83 pal
[regno
] = ((red
& 0xff00) << 8) |
85 ((blue
& 0xff00) >> 8);
88 DRM_ERROR("Bad depth %u, bpp %u.\n", par
->depth
, par
->bpp
);
95 static int vmw_fb_check_var(struct fb_var_screeninfo
*var
,
98 int depth
= var
->bits_per_pixel
;
99 struct vmw_fb_par
*par
= info
->par
;
100 struct vmw_private
*vmw_priv
= par
->vmw_priv
;
102 switch (var
->bits_per_pixel
) {
104 depth
= (var
->transp
.length
> 0) ? 32 : 24;
107 DRM_ERROR("Bad bpp %u.\n", var
->bits_per_pixel
);
113 var
->red
.offset
= 16;
114 var
->green
.offset
= 8;
115 var
->blue
.offset
= 0;
117 var
->green
.length
= 8;
118 var
->blue
.length
= 8;
119 var
->transp
.length
= 0;
120 var
->transp
.offset
= 0;
123 var
->red
.offset
= 16;
124 var
->green
.offset
= 8;
125 var
->blue
.offset
= 0;
127 var
->green
.length
= 8;
128 var
->blue
.length
= 8;
129 var
->transp
.length
= 8;
130 var
->transp
.offset
= 24;
133 DRM_ERROR("Bad depth %u.\n", depth
);
137 if (!(vmw_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
) &&
138 (var
->xoffset
!= 0 || var
->yoffset
!= 0)) {
139 DRM_ERROR("Can not handle panning without display topology\n");
143 if ((var
->xoffset
+ var
->xres
) > par
->max_width
||
144 (var
->yoffset
+ var
->yres
) > par
->max_height
) {
145 DRM_ERROR("Requested geom can not fit in framebuffer\n");
149 if (!vmw_kms_validate_mode_vram(vmw_priv
,
150 info
->fix
.line_length
,
151 var
->yoffset
+ var
->yres
)) {
152 DRM_ERROR("Requested geom can not fit in framebuffer\n");
159 static int vmw_fb_set_par(struct fb_info
*info
)
161 struct vmw_fb_par
*par
= info
->par
;
162 struct vmw_private
*vmw_priv
= par
->vmw_priv
;
165 ret
= vmw_kms_write_svga(vmw_priv
, info
->var
.xres
, info
->var
.yres
,
166 info
->fix
.line_length
,
167 par
->bpp
, par
->depth
);
171 if (vmw_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
) {
172 /* TODO check if pitch and offset changes */
173 vmw_write(vmw_priv
, SVGA_REG_NUM_GUEST_DISPLAYS
, 1);
174 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, 0);
175 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_IS_PRIMARY
, true);
176 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_X
, info
->var
.xoffset
);
177 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_Y
, info
->var
.yoffset
);
178 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_WIDTH
, info
->var
.xres
);
179 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_HEIGHT
, info
->var
.yres
);
180 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, SVGA_ID_INVALID
);
183 /* This is really helpful since if this fails the user
184 * can probably not see anything on the screen.
186 WARN_ON(vmw_read(vmw_priv
, SVGA_REG_FB_OFFSET
) != 0);
191 static int vmw_fb_pan_display(struct fb_var_screeninfo
*var
,
192 struct fb_info
*info
)
197 static int vmw_fb_blank(int blank
, struct fb_info
*info
)
206 static void vmw_fb_dirty_flush(struct vmw_fb_par
*par
)
208 struct vmw_private
*vmw_priv
= par
->vmw_priv
;
209 struct fb_info
*info
= vmw_priv
->fb_info
;
210 int stride
= (info
->fix
.line_length
/ 4);
211 int *src
= (int *)info
->screen_base
;
212 __le32 __iomem
*vram_mem
= par
->bo_ptr
;
218 SVGAFifoCmdUpdate body
;
221 if (vmw_priv
->suspended
)
224 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
225 if (!par
->dirty
.active
) {
226 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
231 w
= min(par
->dirty
.x2
, info
->var
.xres
) - x
;
232 h
= min(par
->dirty
.y2
, info
->var
.yres
) - y
;
233 par
->dirty
.x1
= par
->dirty
.x2
= 0;
234 par
->dirty
.y1
= par
->dirty
.y2
= 0;
235 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
237 for (i
= y
* stride
; i
< info
->fix
.smem_len
/ 4; i
+= stride
) {
238 for (k
= i
+x
; k
< i
+x
+w
&& k
< info
->fix
.smem_len
/ 4; k
++)
239 iowrite32(src
[k
], vram_mem
+ k
);
243 DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__
, x
, y
, w
, h
);
246 cmd
= vmw_fifo_reserve(vmw_priv
, sizeof(*cmd
));
247 if (unlikely(cmd
== NULL
)) {
248 DRM_ERROR("Fifo reserve failed.\n");
252 cmd
->header
= cpu_to_le32(SVGA_CMD_UPDATE
);
253 cmd
->body
.x
= cpu_to_le32(x
);
254 cmd
->body
.y
= cpu_to_le32(y
);
255 cmd
->body
.width
= cpu_to_le32(w
);
256 cmd
->body
.height
= cpu_to_le32(h
);
257 vmw_fifo_commit(vmw_priv
, sizeof(*cmd
));
260 static void vmw_fb_dirty_mark(struct vmw_fb_par
*par
,
261 unsigned x1
, unsigned y1
,
262 unsigned width
, unsigned height
)
264 struct fb_info
*info
= par
->vmw_priv
->fb_info
;
266 unsigned x2
= x1
+ width
;
267 unsigned y2
= y1
+ height
;
269 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
270 if (par
->dirty
.x1
== par
->dirty
.x2
) {
275 /* if we are active start the dirty work
276 * we share the work with the defio system */
277 if (par
->dirty
.active
)
278 schedule_delayed_work(&info
->deferred_work
, VMW_DIRTY_DELAY
);
280 if (x1
< par
->dirty
.x1
)
282 if (y1
< par
->dirty
.y1
)
284 if (x2
> par
->dirty
.x2
)
286 if (y2
> par
->dirty
.y2
)
289 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
292 static void vmw_deferred_io(struct fb_info
*info
,
293 struct list_head
*pagelist
)
295 struct vmw_fb_par
*par
= info
->par
;
296 unsigned long start
, end
, min
, max
;
303 list_for_each_entry(page
, pagelist
, lru
) {
304 start
= page
->index
<< PAGE_SHIFT
;
305 end
= start
+ PAGE_SIZE
- 1;
306 min
= min(min
, start
);
311 y1
= min
/ info
->fix
.line_length
;
312 y2
= (max
/ info
->fix
.line_length
) + 1;
314 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
317 par
->dirty
.x2
= info
->var
.xres
;
319 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
322 vmw_fb_dirty_flush(par
);
325 struct fb_deferred_io vmw_defio
= {
326 .delay
= VMW_DIRTY_DELAY
,
327 .deferred_io
= vmw_deferred_io
,
334 static void vmw_fb_fillrect(struct fb_info
*info
, const struct fb_fillrect
*rect
)
336 cfb_fillrect(info
, rect
);
337 vmw_fb_dirty_mark(info
->par
, rect
->dx
, rect
->dy
,
338 rect
->width
, rect
->height
);
341 static void vmw_fb_copyarea(struct fb_info
*info
, const struct fb_copyarea
*region
)
343 cfb_copyarea(info
, region
);
344 vmw_fb_dirty_mark(info
->par
, region
->dx
, region
->dy
,
345 region
->width
, region
->height
);
348 static void vmw_fb_imageblit(struct fb_info
*info
, const struct fb_image
*image
)
350 cfb_imageblit(info
, image
);
351 vmw_fb_dirty_mark(info
->par
, image
->dx
, image
->dy
,
352 image
->width
, image
->height
);
359 static struct fb_ops vmw_fb_ops
= {
360 .owner
= THIS_MODULE
,
361 .fb_check_var
= vmw_fb_check_var
,
362 .fb_set_par
= vmw_fb_set_par
,
363 .fb_setcolreg
= vmw_fb_setcolreg
,
364 .fb_fillrect
= vmw_fb_fillrect
,
365 .fb_copyarea
= vmw_fb_copyarea
,
366 .fb_imageblit
= vmw_fb_imageblit
,
367 .fb_pan_display
= vmw_fb_pan_display
,
368 .fb_blank
= vmw_fb_blank
,
371 static int vmw_fb_create_bo(struct vmw_private
*vmw_priv
,
372 size_t size
, struct vmw_dma_buffer
**out
)
374 struct vmw_dma_buffer
*vmw_bo
;
375 struct ttm_placement ne_placement
= vmw_vram_ne_placement
;
378 ne_placement
.lpfn
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
381 ret
= ttm_write_lock(&vmw_priv
->fbdev_master
.lock
, false);
382 if (unlikely(ret
!= 0))
385 vmw_bo
= kmalloc(sizeof(*vmw_bo
), GFP_KERNEL
);
389 ret
= vmw_dmabuf_init(vmw_priv
, vmw_bo
, size
,
392 &vmw_dmabuf_bo_free
);
393 if (unlikely(ret
!= 0))
394 goto err_unlock
; /* init frees the buffer on failure */
398 ttm_write_unlock(&vmw_priv
->fbdev_master
.lock
);
403 ttm_write_unlock(&vmw_priv
->fbdev_master
.lock
);
407 int vmw_fb_init(struct vmw_private
*vmw_priv
)
409 struct device
*device
= &vmw_priv
->dev
->pdev
->dev
;
410 struct vmw_fb_par
*par
;
411 struct fb_info
*info
;
412 unsigned initial_width
, initial_height
;
413 unsigned fb_width
, fb_height
;
414 unsigned fb_bpp
, fb_depth
, fb_offset
, fb_pitch
, fb_size
;
420 /* XXX As shouldn't these be as well. */
421 fb_width
= min(vmw_priv
->fb_max_width
, (unsigned)2048);
422 fb_height
= min(vmw_priv
->fb_max_height
, (unsigned)2048);
424 initial_width
= min(vmw_priv
->initial_width
, fb_width
);
425 initial_height
= min(vmw_priv
->initial_height
, fb_height
);
427 fb_pitch
= fb_width
* fb_bpp
/ 8;
428 fb_size
= fb_pitch
* fb_height
;
429 fb_offset
= vmw_read(vmw_priv
, SVGA_REG_FB_OFFSET
);
431 info
= framebuffer_alloc(sizeof(*par
), device
);
438 vmw_priv
->fb_info
= info
;
440 par
->vmw_priv
= vmw_priv
;
441 par
->depth
= fb_depth
;
444 par
->max_width
= fb_width
;
445 par
->max_height
= fb_height
;
448 * Create buffers and alloc memory
450 par
->vmalloc
= vmalloc(fb_size
);
451 if (unlikely(par
->vmalloc
== NULL
)) {
456 ret
= vmw_fb_create_bo(vmw_priv
, fb_size
, &par
->vmw_bo
);
457 if (unlikely(ret
!= 0))
460 ret
= ttm_bo_kmap(&par
->vmw_bo
->base
,
462 par
->vmw_bo
->base
.num_pages
,
464 if (unlikely(ret
!= 0))
466 par
->bo_ptr
= ttm_kmap_obj_virtual(&par
->map
, &par
->bo_iowrite
);
467 par
->bo_size
= fb_size
;
472 strcpy(info
->fix
.id
, "svgadrmfb");
473 info
->fix
.type
= FB_TYPE_PACKED_PIXELS
;
474 info
->fix
.visual
= FB_VISUAL_TRUECOLOR
;
475 info
->fix
.type_aux
= 0;
476 info
->fix
.xpanstep
= 1; /* doing it in hw */
477 info
->fix
.ypanstep
= 1; /* doing it in hw */
478 info
->fix
.ywrapstep
= 0;
479 info
->fix
.accel
= FB_ACCEL_NONE
;
480 info
->fix
.line_length
= fb_pitch
;
482 info
->fix
.smem_start
= 0;
483 info
->fix
.smem_len
= fb_size
;
485 info
->pseudo_palette
= par
->pseudo_palette
;
486 info
->screen_base
= par
->vmalloc
;
487 info
->screen_size
= fb_size
;
489 info
->flags
= FBINFO_DEFAULT
;
490 info
->fbops
= &vmw_fb_ops
;
492 /* 24 depth per default */
493 info
->var
.red
.offset
= 16;
494 info
->var
.green
.offset
= 8;
495 info
->var
.blue
.offset
= 0;
496 info
->var
.red
.length
= 8;
497 info
->var
.green
.length
= 8;
498 info
->var
.blue
.length
= 8;
499 info
->var
.transp
.offset
= 0;
500 info
->var
.transp
.length
= 0;
502 info
->var
.xres_virtual
= fb_width
;
503 info
->var
.yres_virtual
= fb_height
;
504 info
->var
.bits_per_pixel
= par
->bpp
;
505 info
->var
.xoffset
= 0;
506 info
->var
.yoffset
= 0;
507 info
->var
.activate
= FB_ACTIVATE_NOW
;
508 info
->var
.height
= -1;
509 info
->var
.width
= -1;
511 info
->var
.xres
= initial_width
;
512 info
->var
.yres
= initial_height
;
514 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
516 info
->apertures
= alloc_apertures(1);
517 if (!info
->apertures
) {
521 info
->apertures
->ranges
[0].base
= vmw_priv
->vram_start
;
522 info
->apertures
->ranges
[0].size
= vmw_priv
->vram_size
;
525 * Dirty & Deferred IO
527 par
->dirty
.x1
= par
->dirty
.x2
= 0;
528 par
->dirty
.y1
= par
->dirty
.y2
= 0;
529 par
->dirty
.active
= true;
530 spin_lock_init(&par
->dirty
.lock
);
531 info
->fbdefio
= &vmw_defio
;
532 fb_deferred_io_init(info
);
534 ret
= register_framebuffer(info
);
535 if (unlikely(ret
!= 0))
541 fb_deferred_io_cleanup(info
);
543 ttm_bo_kunmap(&par
->map
);
545 ttm_bo_unref((struct ttm_buffer_object
**)&par
->vmw_bo
);
548 framebuffer_release(info
);
549 vmw_priv
->fb_info
= NULL
;
554 int vmw_fb_close(struct vmw_private
*vmw_priv
)
556 struct fb_info
*info
;
557 struct vmw_fb_par
*par
;
558 struct ttm_buffer_object
*bo
;
560 if (!vmw_priv
->fb_info
)
563 info
= vmw_priv
->fb_info
;
565 bo
= &par
->vmw_bo
->base
;
569 fb_deferred_io_cleanup(info
);
570 unregister_framebuffer(info
);
572 ttm_bo_kunmap(&par
->map
);
576 framebuffer_release(info
);
581 int vmw_fb_off(struct vmw_private
*vmw_priv
)
583 struct fb_info
*info
;
584 struct vmw_fb_par
*par
;
587 if (!vmw_priv
->fb_info
)
590 info
= vmw_priv
->fb_info
;
593 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
594 par
->dirty
.active
= false;
595 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
597 flush_delayed_work_sync(&info
->deferred_work
);
600 ttm_bo_kunmap(&par
->map
);
602 vmw_dmabuf_unpin(vmw_priv
, par
->vmw_bo
, false);
607 int vmw_fb_on(struct vmw_private
*vmw_priv
)
609 struct fb_info
*info
;
610 struct vmw_fb_par
*par
;
615 if (!vmw_priv
->fb_info
)
618 info
= vmw_priv
->fb_info
;
621 /* we are already active */
622 if (par
->bo_ptr
!= NULL
)
625 /* Make sure that all overlays are stoped when we take over */
626 vmw_overlay_stop_all(vmw_priv
);
628 ret
= vmw_dmabuf_to_start_of_vram(vmw_priv
, par
->vmw_bo
, true, false);
629 if (unlikely(ret
!= 0)) {
630 DRM_ERROR("could not move buffer to start of VRAM\n");
634 ret
= ttm_bo_kmap(&par
->vmw_bo
->base
,
636 par
->vmw_bo
->base
.num_pages
,
639 par
->bo_ptr
= ttm_kmap_obj_virtual(&par
->map
, &dummy
);
641 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
642 par
->dirty
.active
= true;
643 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
646 vmw_fb_set_par(info
);
648 vmw_fb_dirty_mark(par
, 0, 0, info
->var
.xres
, info
->var
.yres
);
650 /* If there already was stuff dirty we wont
651 * schedule a new work, so lets do it now */
652 schedule_delayed_work(&info
->deferred_work
, 0);