1 /**************************************************************************
3 * Copyright © 2007 David Airlie
4 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 #include "vmwgfx_drv.h"
32 #include "ttm/ttm_placement.h"
34 #define VMW_DIRTY_DELAY (HZ / 30)
37 struct vmw_private
*vmw_priv
;
41 struct vmw_dma_buffer
*vmw_bo
;
42 struct ttm_bo_kmap_obj map
;
44 u32 pseudo_palette
[17];
66 static int vmw_fb_setcolreg(unsigned regno
, unsigned red
, unsigned green
,
67 unsigned blue
, unsigned transp
,
70 struct vmw_fb_par
*par
= info
->par
;
71 u32
*pal
= par
->pseudo_palette
;
74 DRM_ERROR("Bad regno %u.\n", regno
);
81 pal
[regno
] = ((red
& 0xff00) << 8) |
83 ((blue
& 0xff00) >> 8);
86 DRM_ERROR("Bad depth %u, bpp %u.\n", par
->depth
, par
->bpp
);
93 static int vmw_fb_check_var(struct fb_var_screeninfo
*var
,
96 int depth
= var
->bits_per_pixel
;
97 struct vmw_fb_par
*par
= info
->par
;
98 struct vmw_private
*vmw_priv
= par
->vmw_priv
;
100 switch (var
->bits_per_pixel
) {
102 depth
= (var
->transp
.length
> 0) ? 32 : 24;
105 DRM_ERROR("Bad bpp %u.\n", var
->bits_per_pixel
);
111 var
->red
.offset
= 16;
112 var
->green
.offset
= 8;
113 var
->blue
.offset
= 0;
115 var
->green
.length
= 8;
116 var
->blue
.length
= 8;
117 var
->transp
.length
= 0;
118 var
->transp
.offset
= 0;
121 var
->red
.offset
= 16;
122 var
->green
.offset
= 8;
123 var
->blue
.offset
= 0;
125 var
->green
.length
= 8;
126 var
->blue
.length
= 8;
127 var
->transp
.length
= 8;
128 var
->transp
.offset
= 24;
131 DRM_ERROR("Bad depth %u.\n", depth
);
135 /* without multimon its hard to resize */
136 if (!(vmw_priv
->capabilities
& SVGA_CAP_MULTIMON
) &&
137 (var
->xres
!= par
->max_width
||
138 var
->yres
!= par
->max_height
)) {
139 DRM_ERROR("Tried to resize, but we don't have multimon\n");
143 if (var
->xres
> par
->max_width
||
144 var
->yres
> par
->max_height
) {
145 DRM_ERROR("Requested geom can not fit in framebuffer\n");
152 static int vmw_fb_set_par(struct fb_info
*info
)
154 struct vmw_fb_par
*par
= info
->par
;
155 struct vmw_private
*vmw_priv
= par
->vmw_priv
;
157 if (vmw_priv
->capabilities
& SVGA_CAP_MULTIMON
) {
158 vmw_write(vmw_priv
, SVGA_REG_NUM_GUEST_DISPLAYS
, 1);
159 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, 0);
160 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_IS_PRIMARY
, true);
161 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_X
, 0);
162 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_Y
, 0);
163 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_WIDTH
, 0);
164 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_HEIGHT
, 0);
165 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, SVGA_ID_INVALID
);
167 vmw_write(vmw_priv
, SVGA_REG_ENABLE
, 1);
168 vmw_write(vmw_priv
, SVGA_REG_WIDTH
, par
->max_width
);
169 vmw_write(vmw_priv
, SVGA_REG_HEIGHT
, par
->max_height
);
170 vmw_write(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
, par
->bpp
);
171 vmw_write(vmw_priv
, SVGA_REG_DEPTH
, par
->depth
);
172 vmw_write(vmw_priv
, SVGA_REG_RED_MASK
, 0x00ff0000);
173 vmw_write(vmw_priv
, SVGA_REG_GREEN_MASK
, 0x0000ff00);
174 vmw_write(vmw_priv
, SVGA_REG_BLUE_MASK
, 0x000000ff);
176 /* TODO check if pitch and offset changes */
178 vmw_write(vmw_priv
, SVGA_REG_NUM_GUEST_DISPLAYS
, 1);
179 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, 0);
180 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_IS_PRIMARY
, true);
181 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_X
, info
->var
.xoffset
);
182 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_Y
, info
->var
.yoffset
);
183 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_WIDTH
, info
->var
.xres
);
184 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_HEIGHT
, info
->var
.yres
);
185 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, SVGA_ID_INVALID
);
187 vmw_write(vmw_priv
, SVGA_REG_WIDTH
, info
->var
.xres
);
188 vmw_write(vmw_priv
, SVGA_REG_HEIGHT
, info
->var
.yres
);
190 /* TODO check if pitch and offset changes */
196 static int vmw_fb_pan_display(struct fb_var_screeninfo
*var
,
197 struct fb_info
*info
)
202 static int vmw_fb_blank(int blank
, struct fb_info
*info
)
211 static void vmw_fb_dirty_flush(struct vmw_fb_par
*par
)
213 struct vmw_private
*vmw_priv
= par
->vmw_priv
;
214 struct fb_info
*info
= vmw_priv
->fb_info
;
215 int stride
= (info
->fix
.line_length
/ 4);
216 int *src
= (int *)info
->screen_base
;
217 __le32 __iomem
*vram_mem
= par
->bo_ptr
;
223 SVGAFifoCmdUpdate body
;
226 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
227 if (!par
->dirty
.active
) {
228 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
233 w
= min(par
->dirty
.x2
, info
->var
.xres
) - x
;
234 h
= min(par
->dirty
.y2
, info
->var
.yres
) - y
;
235 par
->dirty
.x1
= par
->dirty
.x2
= 0;
236 par
->dirty
.y1
= par
->dirty
.y2
= 0;
237 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
239 for (i
= y
* stride
; i
< info
->fix
.smem_len
/ 4; i
+= stride
) {
240 for (k
= i
+x
; k
< i
+x
+w
&& k
< info
->fix
.smem_len
/ 4; k
++)
241 iowrite32(src
[k
], vram_mem
+ k
);
245 DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__
, x
, y
, w
, h
);
248 cmd
= vmw_fifo_reserve(vmw_priv
, sizeof(*cmd
));
249 if (unlikely(cmd
== NULL
)) {
250 DRM_ERROR("Fifo reserve failed.\n");
254 cmd
->header
= cpu_to_le32(SVGA_CMD_UPDATE
);
255 cmd
->body
.x
= cpu_to_le32(x
);
256 cmd
->body
.y
= cpu_to_le32(y
);
257 cmd
->body
.width
= cpu_to_le32(w
);
258 cmd
->body
.height
= cpu_to_le32(h
);
259 vmw_fifo_commit(vmw_priv
, sizeof(*cmd
));
262 static void vmw_fb_dirty_mark(struct vmw_fb_par
*par
,
263 unsigned x1
, unsigned y1
,
264 unsigned width
, unsigned height
)
266 struct fb_info
*info
= par
->vmw_priv
->fb_info
;
268 unsigned x2
= x1
+ width
;
269 unsigned y2
= y1
+ height
;
271 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
272 if (par
->dirty
.x1
== par
->dirty
.x2
) {
277 /* if we are active start the dirty work
278 * we share the work with the defio system */
279 if (par
->dirty
.active
)
280 schedule_delayed_work(&info
->deferred_work
, VMW_DIRTY_DELAY
);
282 if (x1
< par
->dirty
.x1
)
284 if (y1
< par
->dirty
.y1
)
286 if (x2
> par
->dirty
.x2
)
288 if (y2
> par
->dirty
.y2
)
291 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
294 static void vmw_deferred_io(struct fb_info
*info
,
295 struct list_head
*pagelist
)
297 struct vmw_fb_par
*par
= info
->par
;
298 unsigned long start
, end
, min
, max
;
305 list_for_each_entry(page
, pagelist
, lru
) {
306 start
= page
->index
<< PAGE_SHIFT
;
307 end
= start
+ PAGE_SIZE
- 1;
308 min
= min(min
, start
);
313 y1
= min
/ info
->fix
.line_length
;
314 y2
= (max
/ info
->fix
.line_length
) + 1;
316 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
319 par
->dirty
.x2
= info
->var
.xres
;
321 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
324 vmw_fb_dirty_flush(par
);
327 struct fb_deferred_io vmw_defio
= {
328 .delay
= VMW_DIRTY_DELAY
,
329 .deferred_io
= vmw_deferred_io
,
336 static void vmw_fb_fillrect(struct fb_info
*info
, const struct fb_fillrect
*rect
)
338 cfb_fillrect(info
, rect
);
339 vmw_fb_dirty_mark(info
->par
, rect
->dx
, rect
->dy
,
340 rect
->width
, rect
->height
);
343 static void vmw_fb_copyarea(struct fb_info
*info
, const struct fb_copyarea
*region
)
345 cfb_copyarea(info
, region
);
346 vmw_fb_dirty_mark(info
->par
, region
->dx
, region
->dy
,
347 region
->width
, region
->height
);
350 static void vmw_fb_imageblit(struct fb_info
*info
, const struct fb_image
*image
)
352 cfb_imageblit(info
, image
);
353 vmw_fb_dirty_mark(info
->par
, image
->dx
, image
->dy
,
354 image
->width
, image
->height
);
361 static struct fb_ops vmw_fb_ops
= {
362 .owner
= THIS_MODULE
,
363 .fb_check_var
= vmw_fb_check_var
,
364 .fb_set_par
= vmw_fb_set_par
,
365 .fb_setcolreg
= vmw_fb_setcolreg
,
366 .fb_fillrect
= vmw_fb_fillrect
,
367 .fb_copyarea
= vmw_fb_copyarea
,
368 .fb_imageblit
= vmw_fb_imageblit
,
369 .fb_pan_display
= vmw_fb_pan_display
,
370 .fb_blank
= vmw_fb_blank
,
373 static int vmw_fb_create_bo(struct vmw_private
*vmw_priv
,
374 size_t size
, struct vmw_dma_buffer
**out
)
376 struct vmw_dma_buffer
*vmw_bo
;
377 struct ttm_placement ne_placement
= vmw_vram_ne_placement
;
380 ne_placement
.lpfn
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
383 ret
= ttm_write_lock(&vmw_priv
->fbdev_master
.lock
, false);
384 if (unlikely(ret
!= 0))
387 vmw_bo
= kmalloc(sizeof(*vmw_bo
), GFP_KERNEL
);
391 ret
= vmw_dmabuf_init(vmw_priv
, vmw_bo
, size
,
394 &vmw_dmabuf_bo_free
);
395 if (unlikely(ret
!= 0))
396 goto err_unlock
; /* init frees the buffer on failure */
400 ttm_write_unlock(&vmw_priv
->fbdev_master
.lock
);
405 ttm_write_unlock(&vmw_priv
->fbdev_master
.lock
);
409 int vmw_fb_init(struct vmw_private
*vmw_priv
)
411 struct device
*device
= &vmw_priv
->dev
->pdev
->dev
;
412 struct vmw_fb_par
*par
;
413 struct fb_info
*info
;
414 unsigned initial_width
, initial_height
;
415 unsigned fb_width
, fb_height
;
416 unsigned fb_bbp
, fb_depth
, fb_offset
, fb_pitch
, fb_size
;
420 initial_height
= 600;
425 if (vmw_priv
->capabilities
& SVGA_CAP_MULTIMON
) {
426 fb_width
= min(vmw_priv
->fb_max_width
, (unsigned)2048);
427 fb_height
= min(vmw_priv
->fb_max_height
, (unsigned)2048);
429 fb_width
= min(vmw_priv
->fb_max_width
, initial_width
);
430 fb_height
= min(vmw_priv
->fb_max_height
, initial_height
);
433 initial_width
= min(fb_width
, initial_width
);
434 initial_height
= min(fb_height
, initial_height
);
436 vmw_write(vmw_priv
, SVGA_REG_WIDTH
, fb_width
);
437 vmw_write(vmw_priv
, SVGA_REG_HEIGHT
, fb_height
);
438 vmw_write(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
, fb_bbp
);
439 vmw_write(vmw_priv
, SVGA_REG_DEPTH
, fb_depth
);
440 vmw_write(vmw_priv
, SVGA_REG_RED_MASK
, 0x00ff0000);
441 vmw_write(vmw_priv
, SVGA_REG_GREEN_MASK
, 0x0000ff00);
442 vmw_write(vmw_priv
, SVGA_REG_BLUE_MASK
, 0x000000ff);
444 fb_size
= vmw_read(vmw_priv
, SVGA_REG_FB_SIZE
);
445 fb_offset
= vmw_read(vmw_priv
, SVGA_REG_FB_OFFSET
);
446 fb_pitch
= vmw_read(vmw_priv
, SVGA_REG_BYTES_PER_LINE
);
448 DRM_DEBUG("width %u\n", vmw_read(vmw_priv
, SVGA_REG_MAX_WIDTH
));
449 DRM_DEBUG("height %u\n", vmw_read(vmw_priv
, SVGA_REG_MAX_HEIGHT
));
450 DRM_DEBUG("width %u\n", vmw_read(vmw_priv
, SVGA_REG_WIDTH
));
451 DRM_DEBUG("height %u\n", vmw_read(vmw_priv
, SVGA_REG_HEIGHT
));
452 DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
));
453 DRM_DEBUG("depth %u\n", vmw_read(vmw_priv
, SVGA_REG_DEPTH
));
454 DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv
, SVGA_REG_BYTES_PER_LINE
));
455 DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv
, SVGA_REG_RED_MASK
));
456 DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv
, SVGA_REG_GREEN_MASK
));
457 DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv
, SVGA_REG_BLUE_MASK
));
458 DRM_DEBUG("fb_offset 0x%08x\n", fb_offset
);
459 DRM_DEBUG("fb_pitch %u\n", fb_pitch
);
460 DRM_DEBUG("fb_size %u kiB\n", fb_size
/ 1024);
462 info
= framebuffer_alloc(sizeof(*par
), device
);
469 vmw_priv
->fb_info
= info
;
471 par
->vmw_priv
= vmw_priv
;
472 par
->depth
= fb_depth
;
475 par
->max_width
= fb_width
;
476 par
->max_height
= fb_height
;
479 * Create buffers and alloc memory
481 par
->vmalloc
= vmalloc(fb_size
);
482 if (unlikely(par
->vmalloc
== NULL
)) {
487 ret
= vmw_fb_create_bo(vmw_priv
, fb_size
, &par
->vmw_bo
);
488 if (unlikely(ret
!= 0))
491 ret
= ttm_bo_kmap(&par
->vmw_bo
->base
,
493 par
->vmw_bo
->base
.num_pages
,
495 if (unlikely(ret
!= 0))
497 par
->bo_ptr
= ttm_kmap_obj_virtual(&par
->map
, &par
->bo_iowrite
);
498 par
->bo_size
= fb_size
;
503 strcpy(info
->fix
.id
, "svgadrmfb");
504 info
->fix
.type
= FB_TYPE_PACKED_PIXELS
;
505 info
->fix
.visual
= FB_VISUAL_TRUECOLOR
;
506 info
->fix
.type_aux
= 0;
507 info
->fix
.xpanstep
= 1; /* doing it in hw */
508 info
->fix
.ypanstep
= 1; /* doing it in hw */
509 info
->fix
.ywrapstep
= 0;
510 info
->fix
.accel
= FB_ACCEL_NONE
;
511 info
->fix
.line_length
= fb_pitch
;
513 info
->fix
.smem_start
= 0;
514 info
->fix
.smem_len
= fb_size
;
516 info
->fix
.mmio_start
= 0;
517 info
->fix
.mmio_len
= 0;
519 info
->pseudo_palette
= par
->pseudo_palette
;
520 info
->screen_base
= par
->vmalloc
;
521 info
->screen_size
= fb_size
;
523 info
->flags
= FBINFO_DEFAULT
;
524 info
->fbops
= &vmw_fb_ops
;
526 /* 24 depth per default */
527 info
->var
.red
.offset
= 16;
528 info
->var
.green
.offset
= 8;
529 info
->var
.blue
.offset
= 0;
530 info
->var
.red
.length
= 8;
531 info
->var
.green
.length
= 8;
532 info
->var
.blue
.length
= 8;
533 info
->var
.transp
.offset
= 0;
534 info
->var
.transp
.length
= 0;
536 info
->var
.xres_virtual
= fb_width
;
537 info
->var
.yres_virtual
= fb_height
;
538 info
->var
.bits_per_pixel
= par
->bpp
;
539 info
->var
.xoffset
= 0;
540 info
->var
.yoffset
= 0;
541 info
->var
.activate
= FB_ACTIVATE_NOW
;
542 info
->var
.height
= -1;
543 info
->var
.width
= -1;
545 info
->var
.xres
= initial_width
;
546 info
->var
.yres
= initial_height
;
549 info
->pixmap
.size
= 64*1024;
550 info
->pixmap
.buf_align
= 8;
551 info
->pixmap
.access_align
= 32;
552 info
->pixmap
.flags
= FB_PIXMAP_SYSTEM
;
553 info
->pixmap
.scan_align
= 1;
555 info
->pixmap
.size
= 0;
556 info
->pixmap
.buf_align
= 8;
557 info
->pixmap
.access_align
= 32;
558 info
->pixmap
.flags
= FB_PIXMAP_SYSTEM
;
559 info
->pixmap
.scan_align
= 1;
562 info
->aperture_base
= vmw_priv
->vram_start
;
563 info
->aperture_size
= vmw_priv
->vram_size
;
566 * Dirty & Deferred IO
568 par
->dirty
.x1
= par
->dirty
.x2
= 0;
569 par
->dirty
.y1
= par
->dirty
.y1
= 0;
570 par
->dirty
.active
= true;
571 spin_lock_init(&par
->dirty
.lock
);
572 info
->fbdefio
= &vmw_defio
;
573 fb_deferred_io_init(info
);
575 ret
= register_framebuffer(info
);
576 if (unlikely(ret
!= 0))
582 fb_deferred_io_cleanup(info
);
583 ttm_bo_kunmap(&par
->map
);
585 ttm_bo_unref((struct ttm_buffer_object
**)&par
->vmw_bo
);
588 framebuffer_release(info
);
589 vmw_priv
->fb_info
= NULL
;
594 int vmw_fb_close(struct vmw_private
*vmw_priv
)
596 struct fb_info
*info
;
597 struct vmw_fb_par
*par
;
598 struct ttm_buffer_object
*bo
;
600 if (!vmw_priv
->fb_info
)
603 info
= vmw_priv
->fb_info
;
605 bo
= &par
->vmw_bo
->base
;
609 fb_deferred_io_cleanup(info
);
610 unregister_framebuffer(info
);
612 ttm_bo_kunmap(&par
->map
);
616 framebuffer_release(info
);
621 int vmw_dmabuf_from_vram(struct vmw_private
*vmw_priv
,
622 struct vmw_dma_buffer
*vmw_bo
)
624 struct ttm_buffer_object
*bo
= &vmw_bo
->base
;
627 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
628 if (unlikely(ret
!= 0))
631 ret
= ttm_bo_validate(bo
, &vmw_sys_placement
, false, false);
632 ttm_bo_unreserve(bo
);
637 int vmw_dmabuf_to_start_of_vram(struct vmw_private
*vmw_priv
,
638 struct vmw_dma_buffer
*vmw_bo
)
640 struct ttm_buffer_object
*bo
= &vmw_bo
->base
;
641 struct ttm_placement ne_placement
= vmw_vram_ne_placement
;
644 ne_placement
.lpfn
= bo
->num_pages
;
647 ret
= ttm_write_lock(&vmw_priv
->active_master
->lock
, false);
648 if (unlikely(ret
!= 0))
651 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
652 if (unlikely(ret
!= 0))
655 ret
= ttm_bo_validate(bo
, &ne_placement
, false, false);
656 ttm_bo_unreserve(bo
);
658 ttm_write_unlock(&vmw_priv
->active_master
->lock
);
663 int vmw_fb_off(struct vmw_private
*vmw_priv
)
665 struct fb_info
*info
;
666 struct vmw_fb_par
*par
;
669 if (!vmw_priv
->fb_info
)
672 info
= vmw_priv
->fb_info
;
675 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
676 par
->dirty
.active
= false;
677 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
679 flush_scheduled_work();
682 ttm_bo_kunmap(&par
->map
);
684 vmw_dmabuf_from_vram(vmw_priv
, par
->vmw_bo
);
689 int vmw_fb_on(struct vmw_private
*vmw_priv
)
691 struct fb_info
*info
;
692 struct vmw_fb_par
*par
;
697 if (!vmw_priv
->fb_info
)
700 info
= vmw_priv
->fb_info
;
703 /* we are already active */
704 if (par
->bo_ptr
!= NULL
)
707 /* Make sure that all overlays are stoped when we take over */
708 vmw_overlay_stop_all(vmw_priv
);
710 ret
= vmw_dmabuf_to_start_of_vram(vmw_priv
, par
->vmw_bo
);
711 if (unlikely(ret
!= 0)) {
712 DRM_ERROR("could not move buffer to start of VRAM\n");
716 ret
= ttm_bo_kmap(&par
->vmw_bo
->base
,
718 par
->vmw_bo
->base
.num_pages
,
721 par
->bo_ptr
= ttm_kmap_obj_virtual(&par
->map
, &dummy
);
723 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
724 par
->dirty
.active
= true;
725 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
728 vmw_fb_set_par(info
);
730 vmw_fb_dirty_mark(par
, 0, 0, info
->var
.xres
, info
->var
.yres
);
732 /* If there already was stuff dirty we wont
733 * schedule a new work, so lets do it now */
734 schedule_delayed_work(&info
->deferred_work
, 0);