1 /**************************************************************************
3 * Copyright © 2007 David Airlie
4 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 #include <linux/export.h>
32 #include "vmwgfx_drv.h"
33 #include "vmwgfx_kms.h"
35 #include <drm/ttm/ttm_placement.h>
37 #define VMW_DIRTY_DELAY (HZ / 30)
40 struct vmw_private
*vmw_priv
;
44 struct mutex bo_mutex
;
45 struct vmw_dma_buffer
*vmw_bo
;
46 struct ttm_bo_kmap_obj map
;
49 struct drm_framebuffer
*set_fb
;
50 struct drm_display_mode
*set_mode
;
55 u32 pseudo_palette
[17];
69 struct drm_crtc
*crtc
;
70 struct drm_connector
*con
;
71 struct delayed_work local_work
;
74 static int vmw_fb_setcolreg(unsigned regno
, unsigned red
, unsigned green
,
75 unsigned blue
, unsigned transp
,
78 struct vmw_fb_par
*par
= info
->par
;
79 u32
*pal
= par
->pseudo_palette
;
82 DRM_ERROR("Bad regno %u.\n", regno
);
86 switch (par
->set_fb
->format
->depth
) {
89 pal
[regno
] = ((red
& 0xff00) << 8) |
91 ((blue
& 0xff00) >> 8);
94 DRM_ERROR("Bad depth %u, bpp %u.\n",
95 par
->set_fb
->format
->depth
,
96 par
->set_fb
->format
->cpp
[0] * 8);
103 static int vmw_fb_check_var(struct fb_var_screeninfo
*var
,
104 struct fb_info
*info
)
106 int depth
= var
->bits_per_pixel
;
107 struct vmw_fb_par
*par
= info
->par
;
108 struct vmw_private
*vmw_priv
= par
->vmw_priv
;
110 switch (var
->bits_per_pixel
) {
112 depth
= (var
->transp
.length
> 0) ? 32 : 24;
115 DRM_ERROR("Bad bpp %u.\n", var
->bits_per_pixel
);
121 var
->red
.offset
= 16;
122 var
->green
.offset
= 8;
123 var
->blue
.offset
= 0;
125 var
->green
.length
= 8;
126 var
->blue
.length
= 8;
127 var
->transp
.length
= 0;
128 var
->transp
.offset
= 0;
131 var
->red
.offset
= 16;
132 var
->green
.offset
= 8;
133 var
->blue
.offset
= 0;
135 var
->green
.length
= 8;
136 var
->blue
.length
= 8;
137 var
->transp
.length
= 8;
138 var
->transp
.offset
= 24;
141 DRM_ERROR("Bad depth %u.\n", depth
);
145 if ((var
->xoffset
+ var
->xres
) > par
->max_width
||
146 (var
->yoffset
+ var
->yres
) > par
->max_height
) {
147 DRM_ERROR("Requested geom can not fit in framebuffer\n");
151 if (!vmw_kms_validate_mode_vram(vmw_priv
,
152 var
->xres
* var
->bits_per_pixel
/8,
153 var
->yoffset
+ var
->yres
)) {
154 DRM_ERROR("Requested geom can not fit in framebuffer\n");
161 static int vmw_fb_blank(int blank
, struct fb_info
*info
)
170 static void vmw_fb_dirty_flush(struct work_struct
*work
)
172 struct vmw_fb_par
*par
= container_of(work
, struct vmw_fb_par
,
174 struct vmw_private
*vmw_priv
= par
->vmw_priv
;
175 struct fb_info
*info
= vmw_priv
->fb_info
;
176 unsigned long irq_flags
;
177 s32 dst_x1
, dst_x2
, dst_y1
, dst_y2
, w
, h
;
178 u32 cpp
, max_x
, max_y
;
179 struct drm_clip_rect clip
;
180 struct drm_framebuffer
*cur_fb
;
181 u8
*src_ptr
, *dst_ptr
;
183 if (vmw_priv
->suspended
)
186 mutex_lock(&par
->bo_mutex
);
187 cur_fb
= par
->set_fb
;
191 spin_lock_irqsave(&par
->dirty
.lock
, irq_flags
);
192 if (!par
->dirty
.active
) {
193 spin_unlock_irqrestore(&par
->dirty
.lock
, irq_flags
);
198 * Handle panning when copying from vmalloc to framebuffer.
199 * Clip dirty area to framebuffer.
201 cpp
= cur_fb
->format
->cpp
[0];
202 max_x
= par
->fb_x
+ cur_fb
->width
;
203 max_y
= par
->fb_y
+ cur_fb
->height
;
205 dst_x1
= par
->dirty
.x1
- par
->fb_x
;
206 dst_y1
= par
->dirty
.y1
- par
->fb_y
;
207 dst_x1
= max_t(s32
, dst_x1
, 0);
208 dst_y1
= max_t(s32
, dst_y1
, 0);
210 dst_x2
= par
->dirty
.x2
- par
->fb_x
;
211 dst_y2
= par
->dirty
.y2
- par
->fb_y
;
212 dst_x2
= min_t(s32
, dst_x2
, max_x
);
213 dst_y2
= min_t(s32
, dst_y2
, max_y
);
216 w
= max_t(s32
, 0, w
);
217 h
= max_t(s32
, 0, h
);
219 par
->dirty
.x1
= par
->dirty
.x2
= 0;
220 par
->dirty
.y1
= par
->dirty
.y2
= 0;
221 spin_unlock_irqrestore(&par
->dirty
.lock
, irq_flags
);
224 dst_ptr
= (u8
*)par
->bo_ptr
+
225 (dst_y1
* par
->set_fb
->pitches
[0] + dst_x1
* cpp
);
226 src_ptr
= (u8
*)par
->vmalloc
+
227 ((dst_y1
+ par
->fb_y
) * info
->fix
.line_length
+
228 (dst_x1
+ par
->fb_x
) * cpp
);
231 memcpy(dst_ptr
, src_ptr
, w
*cpp
);
232 dst_ptr
+= par
->set_fb
->pitches
[0];
233 src_ptr
+= info
->fix
.line_length
;
241 WARN_ON_ONCE(par
->set_fb
->funcs
->dirty(cur_fb
, NULL
, 0, 0,
243 vmw_fifo_flush(vmw_priv
, false);
246 mutex_unlock(&par
->bo_mutex
);
249 static void vmw_fb_dirty_mark(struct vmw_fb_par
*par
,
250 unsigned x1
, unsigned y1
,
251 unsigned width
, unsigned height
)
254 unsigned x2
= x1
+ width
;
255 unsigned y2
= y1
+ height
;
257 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
258 if (par
->dirty
.x1
== par
->dirty
.x2
) {
263 /* if we are active start the dirty work
264 * we share the work with the defio system */
265 if (par
->dirty
.active
)
266 schedule_delayed_work(&par
->local_work
,
269 if (x1
< par
->dirty
.x1
)
271 if (y1
< par
->dirty
.y1
)
273 if (x2
> par
->dirty
.x2
)
275 if (y2
> par
->dirty
.y2
)
278 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
281 static int vmw_fb_pan_display(struct fb_var_screeninfo
*var
,
282 struct fb_info
*info
)
284 struct vmw_fb_par
*par
= info
->par
;
286 if ((var
->xoffset
+ var
->xres
) > var
->xres_virtual
||
287 (var
->yoffset
+ var
->yres
) > var
->yres_virtual
) {
288 DRM_ERROR("Requested panning can not fit in framebuffer\n");
292 mutex_lock(&par
->bo_mutex
);
293 par
->fb_x
= var
->xoffset
;
294 par
->fb_y
= var
->yoffset
;
296 vmw_fb_dirty_mark(par
, par
->fb_x
, par
->fb_y
, par
->set_fb
->width
,
297 par
->set_fb
->height
);
298 mutex_unlock(&par
->bo_mutex
);
303 static void vmw_deferred_io(struct fb_info
*info
,
304 struct list_head
*pagelist
)
306 struct vmw_fb_par
*par
= info
->par
;
307 unsigned long start
, end
, min
, max
;
314 list_for_each_entry(page
, pagelist
, lru
) {
315 start
= page
->index
<< PAGE_SHIFT
;
316 end
= start
+ PAGE_SIZE
- 1;
317 min
= min(min
, start
);
322 y1
= min
/ info
->fix
.line_length
;
323 y2
= (max
/ info
->fix
.line_length
) + 1;
325 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
328 par
->dirty
.x2
= info
->var
.xres
;
330 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
333 * Since we've already waited on this work once, try to
336 cancel_delayed_work(&par
->local_work
);
337 schedule_delayed_work(&par
->local_work
, 0);
341 static struct fb_deferred_io vmw_defio
= {
342 .delay
= VMW_DIRTY_DELAY
,
343 .deferred_io
= vmw_deferred_io
,
350 static void vmw_fb_fillrect(struct fb_info
*info
, const struct fb_fillrect
*rect
)
352 cfb_fillrect(info
, rect
);
353 vmw_fb_dirty_mark(info
->par
, rect
->dx
, rect
->dy
,
354 rect
->width
, rect
->height
);
357 static void vmw_fb_copyarea(struct fb_info
*info
, const struct fb_copyarea
*region
)
359 cfb_copyarea(info
, region
);
360 vmw_fb_dirty_mark(info
->par
, region
->dx
, region
->dy
,
361 region
->width
, region
->height
);
364 static void vmw_fb_imageblit(struct fb_info
*info
, const struct fb_image
*image
)
366 cfb_imageblit(info
, image
);
367 vmw_fb_dirty_mark(info
->par
, image
->dx
, image
->dy
,
368 image
->width
, image
->height
);
375 static int vmw_fb_create_bo(struct vmw_private
*vmw_priv
,
376 size_t size
, struct vmw_dma_buffer
**out
)
378 struct vmw_dma_buffer
*vmw_bo
;
381 (void) ttm_write_lock(&vmw_priv
->reservation_sem
, false);
383 vmw_bo
= kmalloc(sizeof(*vmw_bo
), GFP_KERNEL
);
389 ret
= vmw_dmabuf_init(vmw_priv
, vmw_bo
, size
,
392 &vmw_dmabuf_bo_free
);
393 if (unlikely(ret
!= 0))
394 goto err_unlock
; /* init frees the buffer on failure */
397 ttm_write_unlock(&vmw_priv
->reservation_sem
);
402 ttm_write_unlock(&vmw_priv
->reservation_sem
);
406 static int vmw_fb_compute_depth(struct fb_var_screeninfo
*var
,
409 switch (var
->bits_per_pixel
) {
411 *depth
= (var
->transp
.length
> 0) ? 32 : 24;
414 DRM_ERROR("Bad bpp %u.\n", var
->bits_per_pixel
);
421 static int vmwgfx_set_config_internal(struct drm_mode_set
*set
)
423 struct drm_crtc
*crtc
= set
->crtc
;
424 struct drm_framebuffer
*fb
;
425 struct drm_crtc
*tmp
;
426 struct drm_modeset_acquire_ctx
*ctx
;
427 struct drm_device
*dev
= set
->crtc
->dev
;
430 ctx
= dev
->mode_config
.acquire_ctx
;
434 * NOTE: ->set_config can also disable other crtcs (if we steal all
435 * connectors from it), hence we need to refcount the fbs across all
436 * crtcs. Atomic modeset will have saner semantics ...
438 drm_for_each_crtc(tmp
, dev
)
439 tmp
->primary
->old_fb
= tmp
->primary
->fb
;
443 ret
= crtc
->funcs
->set_config(set
, ctx
);
445 crtc
->primary
->crtc
= crtc
;
446 crtc
->primary
->fb
= fb
;
449 drm_for_each_crtc(tmp
, dev
) {
450 if (tmp
->primary
->fb
)
451 drm_framebuffer_get(tmp
->primary
->fb
);
452 if (tmp
->primary
->old_fb
)
453 drm_framebuffer_put(tmp
->primary
->old_fb
);
454 tmp
->primary
->old_fb
= NULL
;
457 if (ret
== -EDEADLK
) {
458 dev
->mode_config
.acquire_ctx
= NULL
;
461 drm_modeset_backoff(ctx
);
463 ret
= drm_modeset_lock_all_ctx(dev
, ctx
);
467 dev
->mode_config
.acquire_ctx
= ctx
;
475 static int vmw_fb_kms_detach(struct vmw_fb_par
*par
,
479 struct drm_framebuffer
*cur_fb
= par
->set_fb
;
482 /* Detach the KMS framebuffer from crtcs */
484 struct drm_mode_set set
;
486 set
.crtc
= par
->crtc
;
491 set
.num_connectors
= 0;
492 set
.connectors
= &par
->con
;
493 ret
= vmwgfx_set_config_internal(&set
);
495 DRM_ERROR("Could not unset a mode.\n");
498 drm_mode_destroy(par
->vmw_priv
->dev
, par
->set_mode
);
499 par
->set_mode
= NULL
;
503 drm_framebuffer_unreference(cur_fb
);
507 if (par
->vmw_bo
&& detach_bo
) {
508 struct vmw_private
*vmw_priv
= par
->vmw_priv
;
511 ttm_bo_kunmap(&par
->map
);
515 vmw_dmabuf_unreference(&par
->vmw_bo
);
516 else if (vmw_priv
->active_display_unit
!= vmw_du_legacy
)
517 vmw_dmabuf_unpin(par
->vmw_priv
, par
->vmw_bo
, false);
523 static int vmw_fb_kms_framebuffer(struct fb_info
*info
)
525 struct drm_mode_fb_cmd2 mode_cmd
;
526 struct vmw_fb_par
*par
= info
->par
;
527 struct fb_var_screeninfo
*var
= &info
->var
;
528 struct drm_framebuffer
*cur_fb
;
529 struct vmw_framebuffer
*vfb
;
533 ret
= vmw_fb_compute_depth(var
, &depth
);
537 mode_cmd
.width
= var
->xres
;
538 mode_cmd
.height
= var
->yres
;
539 mode_cmd
.pitches
[0] = ((var
->bits_per_pixel
+ 7) / 8) * mode_cmd
.width
;
540 mode_cmd
.pixel_format
=
541 drm_mode_legacy_fb_format(var
->bits_per_pixel
, depth
);
543 cur_fb
= par
->set_fb
;
544 if (cur_fb
&& cur_fb
->width
== mode_cmd
.width
&&
545 cur_fb
->height
== mode_cmd
.height
&&
546 cur_fb
->format
->format
== mode_cmd
.pixel_format
&&
547 cur_fb
->pitches
[0] == mode_cmd
.pitches
[0])
550 /* Need new buffer object ? */
551 new_bo_size
= (size_t) mode_cmd
.pitches
[0] * (size_t) mode_cmd
.height
;
552 ret
= vmw_fb_kms_detach(par
,
553 par
->bo_size
< new_bo_size
||
554 par
->bo_size
> 2*new_bo_size
,
560 ret
= vmw_fb_create_bo(par
->vmw_priv
, new_bo_size
,
563 DRM_ERROR("Failed creating a buffer object for "
567 par
->bo_size
= new_bo_size
;
570 vfb
= vmw_kms_new_framebuffer(par
->vmw_priv
, par
->vmw_bo
, NULL
,
575 par
->set_fb
= &vfb
->base
;
580 static int vmw_fb_set_par(struct fb_info
*info
)
582 struct vmw_fb_par
*par
= info
->par
;
583 struct vmw_private
*vmw_priv
= par
->vmw_priv
;
584 struct drm_mode_set set
;
585 struct fb_var_screeninfo
*var
= &info
->var
;
586 struct drm_display_mode new_mode
= { DRM_MODE("fb_mode",
587 DRM_MODE_TYPE_DRIVER
,
588 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
589 DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_PVSYNC
)
591 struct drm_display_mode
*old_mode
;
592 struct drm_display_mode
*mode
;
595 old_mode
= par
->set_mode
;
596 mode
= drm_mode_duplicate(vmw_priv
->dev
, &new_mode
);
598 DRM_ERROR("Could not create new fb mode.\n");
602 mode
->hdisplay
= var
->xres
;
603 mode
->vdisplay
= var
->yres
;
604 vmw_guess_mode_timing(mode
);
606 if (old_mode
&& drm_mode_equal(old_mode
, mode
)) {
607 drm_mode_destroy(vmw_priv
->dev
, mode
);
610 } else if (!vmw_kms_validate_mode_vram(vmw_priv
,
612 DIV_ROUND_UP(var
->bits_per_pixel
, 8),
614 drm_mode_destroy(vmw_priv
->dev
, mode
);
618 mutex_lock(&par
->bo_mutex
);
619 drm_modeset_lock_all(vmw_priv
->dev
);
620 ret
= vmw_fb_kms_framebuffer(info
);
624 par
->fb_x
= var
->xoffset
;
625 par
->fb_y
= var
->yoffset
;
627 set
.crtc
= par
->crtc
;
631 set
.fb
= par
->set_fb
;
632 set
.num_connectors
= 1;
633 set
.connectors
= &par
->con
;
635 ret
= vmwgfx_set_config_internal(&set
);
640 struct vmw_framebuffer
*vfb
= vmw_framebuffer_to_vfb(set
.fb
);
643 * Pin before mapping. Since we don't know in what placement
644 * to pin, call into KMS to do it for us. LDU doesn't require
645 * additional pinning because set_config() would've pinned
648 if (vmw_priv
->active_display_unit
!= vmw_du_legacy
) {
651 DRM_ERROR("Could not pin the fbdev "
657 ret
= ttm_bo_kmap(&par
->vmw_bo
->base
, 0,
658 par
->vmw_bo
->base
.num_pages
, &par
->map
);
660 if (vmw_priv
->active_display_unit
!= vmw_du_legacy
)
663 DRM_ERROR("Could not map the fbdev framebuffer.\n");
667 par
->bo_ptr
= ttm_kmap_obj_virtual(&par
->map
, &par
->bo_iowrite
);
671 vmw_fb_dirty_mark(par
, par
->fb_x
, par
->fb_y
,
672 par
->set_fb
->width
, par
->set_fb
->height
);
674 /* If there already was stuff dirty we wont
675 * schedule a new work, so lets do it now */
677 schedule_delayed_work(&par
->local_work
, 0);
681 drm_mode_destroy(vmw_priv
->dev
, old_mode
);
682 par
->set_mode
= mode
;
684 drm_modeset_unlock_all(vmw_priv
->dev
);
685 mutex_unlock(&par
->bo_mutex
);
691 static struct fb_ops vmw_fb_ops
= {
692 .owner
= THIS_MODULE
,
693 .fb_check_var
= vmw_fb_check_var
,
694 .fb_set_par
= vmw_fb_set_par
,
695 .fb_setcolreg
= vmw_fb_setcolreg
,
696 .fb_fillrect
= vmw_fb_fillrect
,
697 .fb_copyarea
= vmw_fb_copyarea
,
698 .fb_imageblit
= vmw_fb_imageblit
,
699 .fb_pan_display
= vmw_fb_pan_display
,
700 .fb_blank
= vmw_fb_blank
,
703 int vmw_fb_init(struct vmw_private
*vmw_priv
)
705 struct device
*device
= &vmw_priv
->dev
->pdev
->dev
;
706 struct vmw_fb_par
*par
;
707 struct fb_info
*info
;
708 unsigned fb_width
, fb_height
;
709 unsigned fb_bpp
, fb_depth
, fb_offset
, fb_pitch
, fb_size
;
710 struct drm_display_mode
*init_mode
;
716 /* XXX As shouldn't these be as well. */
717 fb_width
= min(vmw_priv
->fb_max_width
, (unsigned)2048);
718 fb_height
= min(vmw_priv
->fb_max_height
, (unsigned)2048);
720 fb_pitch
= fb_width
* fb_bpp
/ 8;
721 fb_size
= fb_pitch
* fb_height
;
722 fb_offset
= vmw_read(vmw_priv
, SVGA_REG_FB_OFFSET
);
724 info
= framebuffer_alloc(sizeof(*par
), device
);
731 vmw_priv
->fb_info
= info
;
733 memset(par
, 0, sizeof(*par
));
734 INIT_DELAYED_WORK(&par
->local_work
, &vmw_fb_dirty_flush
);
735 par
->vmw_priv
= vmw_priv
;
737 par
->max_width
= fb_width
;
738 par
->max_height
= fb_height
;
740 drm_modeset_lock_all(vmw_priv
->dev
);
741 ret
= vmw_kms_fbdev_init_data(vmw_priv
, 0, par
->max_width
,
742 par
->max_height
, &par
->con
,
743 &par
->crtc
, &init_mode
);
745 drm_modeset_unlock_all(vmw_priv
->dev
);
749 info
->var
.xres
= init_mode
->hdisplay
;
750 info
->var
.yres
= init_mode
->vdisplay
;
751 drm_modeset_unlock_all(vmw_priv
->dev
);
754 * Create buffers and alloc memory
756 par
->vmalloc
= vzalloc(fb_size
);
757 if (unlikely(par
->vmalloc
== NULL
)) {
765 strcpy(info
->fix
.id
, "svgadrmfb");
766 info
->fix
.type
= FB_TYPE_PACKED_PIXELS
;
767 info
->fix
.visual
= FB_VISUAL_TRUECOLOR
;
768 info
->fix
.type_aux
= 0;
769 info
->fix
.xpanstep
= 1; /* doing it in hw */
770 info
->fix
.ypanstep
= 1; /* doing it in hw */
771 info
->fix
.ywrapstep
= 0;
772 info
->fix
.accel
= FB_ACCEL_NONE
;
773 info
->fix
.line_length
= fb_pitch
;
775 info
->fix
.smem_start
= 0;
776 info
->fix
.smem_len
= fb_size
;
778 info
->pseudo_palette
= par
->pseudo_palette
;
779 info
->screen_base
= (char __iomem
*)par
->vmalloc
;
780 info
->screen_size
= fb_size
;
782 info
->fbops
= &vmw_fb_ops
;
784 /* 24 depth per default */
785 info
->var
.red
.offset
= 16;
786 info
->var
.green
.offset
= 8;
787 info
->var
.blue
.offset
= 0;
788 info
->var
.red
.length
= 8;
789 info
->var
.green
.length
= 8;
790 info
->var
.blue
.length
= 8;
791 info
->var
.transp
.offset
= 0;
792 info
->var
.transp
.length
= 0;
794 info
->var
.xres_virtual
= fb_width
;
795 info
->var
.yres_virtual
= fb_height
;
796 info
->var
.bits_per_pixel
= fb_bpp
;
797 info
->var
.xoffset
= 0;
798 info
->var
.yoffset
= 0;
799 info
->var
.activate
= FB_ACTIVATE_NOW
;
800 info
->var
.height
= -1;
801 info
->var
.width
= -1;
803 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
804 info
->apertures
= alloc_apertures(1);
805 if (!info
->apertures
) {
809 info
->apertures
->ranges
[0].base
= vmw_priv
->vram_start
;
810 info
->apertures
->ranges
[0].size
= vmw_priv
->vram_size
;
813 * Dirty & Deferred IO
815 par
->dirty
.x1
= par
->dirty
.x2
= 0;
816 par
->dirty
.y1
= par
->dirty
.y2
= 0;
817 par
->dirty
.active
= true;
818 spin_lock_init(&par
->dirty
.lock
);
819 mutex_init(&par
->bo_mutex
);
820 info
->fbdefio
= &vmw_defio
;
821 fb_deferred_io_init(info
);
823 ret
= register_framebuffer(info
);
824 if (unlikely(ret
!= 0))
827 vmw_fb_set_par(info
);
832 fb_deferred_io_cleanup(info
);
837 framebuffer_release(info
);
838 vmw_priv
->fb_info
= NULL
;
843 int vmw_fb_close(struct vmw_private
*vmw_priv
)
845 struct fb_info
*info
;
846 struct vmw_fb_par
*par
;
848 if (!vmw_priv
->fb_info
)
851 info
= vmw_priv
->fb_info
;
855 fb_deferred_io_cleanup(info
);
856 cancel_delayed_work_sync(&par
->local_work
);
857 unregister_framebuffer(info
);
859 (void) vmw_fb_kms_detach(par
, true, true);
862 framebuffer_release(info
);
867 int vmw_fb_off(struct vmw_private
*vmw_priv
)
869 struct fb_info
*info
;
870 struct vmw_fb_par
*par
;
873 if (!vmw_priv
->fb_info
)
876 info
= vmw_priv
->fb_info
;
879 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
880 par
->dirty
.active
= false;
881 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
883 flush_delayed_work(&info
->deferred_work
);
884 flush_delayed_work(&par
->local_work
);
886 mutex_lock(&par
->bo_mutex
);
887 drm_modeset_lock_all(vmw_priv
->dev
);
888 (void) vmw_fb_kms_detach(par
, true, false);
889 drm_modeset_unlock_all(vmw_priv
->dev
);
890 mutex_unlock(&par
->bo_mutex
);
895 int vmw_fb_on(struct vmw_private
*vmw_priv
)
897 struct fb_info
*info
;
898 struct vmw_fb_par
*par
;
901 if (!vmw_priv
->fb_info
)
904 info
= vmw_priv
->fb_info
;
907 vmw_fb_set_par(info
);
908 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
909 par
->dirty
.active
= true;
910 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);