OMAPDSS: VENC: fix NULL pointer dereference in DSS2 VENC sysfs debug attr on OMAP4
[zen-stable.git] / drivers / gpu / drm / vmwgfx / vmwgfx_fb.c
blob34e51a1695b84f1d438431501f6df60598bbb79b
1 /**************************************************************************
3 * Copyright © 2007 David Airlie
4 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 #include <linux/export.h>
31 #include "drmP.h"
32 #include "vmwgfx_drv.h"
34 #include "ttm/ttm_placement.h"
36 #define VMW_DIRTY_DELAY (HZ / 30)
38 struct vmw_fb_par {
39 struct vmw_private *vmw_priv;
41 void *vmalloc;
43 struct vmw_dma_buffer *vmw_bo;
44 struct ttm_bo_kmap_obj map;
46 u32 pseudo_palette[17];
48 unsigned depth;
49 unsigned bpp;
51 unsigned max_width;
52 unsigned max_height;
54 void *bo_ptr;
55 unsigned bo_size;
56 bool bo_iowrite;
58 struct {
59 spinlock_t lock;
60 bool active;
61 unsigned x1;
62 unsigned y1;
63 unsigned x2;
64 unsigned y2;
65 } dirty;
68 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
69 unsigned blue, unsigned transp,
70 struct fb_info *info)
72 struct vmw_fb_par *par = info->par;
73 u32 *pal = par->pseudo_palette;
75 if (regno > 15) {
76 DRM_ERROR("Bad regno %u.\n", regno);
77 return 1;
80 switch (par->depth) {
81 case 24:
82 case 32:
83 pal[regno] = ((red & 0xff00) << 8) |
84 (green & 0xff00) |
85 ((blue & 0xff00) >> 8);
86 break;
87 default:
88 DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
89 return 1;
92 return 0;
95 static int vmw_fb_check_var(struct fb_var_screeninfo *var,
96 struct fb_info *info)
98 int depth = var->bits_per_pixel;
99 struct vmw_fb_par *par = info->par;
100 struct vmw_private *vmw_priv = par->vmw_priv;
102 switch (var->bits_per_pixel) {
103 case 32:
104 depth = (var->transp.length > 0) ? 32 : 24;
105 break;
106 default:
107 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
108 return -EINVAL;
111 switch (depth) {
112 case 24:
113 var->red.offset = 16;
114 var->green.offset = 8;
115 var->blue.offset = 0;
116 var->red.length = 8;
117 var->green.length = 8;
118 var->blue.length = 8;
119 var->transp.length = 0;
120 var->transp.offset = 0;
121 break;
122 case 32:
123 var->red.offset = 16;
124 var->green.offset = 8;
125 var->blue.offset = 0;
126 var->red.length = 8;
127 var->green.length = 8;
128 var->blue.length = 8;
129 var->transp.length = 8;
130 var->transp.offset = 24;
131 break;
132 default:
133 DRM_ERROR("Bad depth %u.\n", depth);
134 return -EINVAL;
137 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
138 (var->xoffset != 0 || var->yoffset != 0)) {
139 DRM_ERROR("Can not handle panning without display topology\n");
140 return -EINVAL;
143 if ((var->xoffset + var->xres) > par->max_width ||
144 (var->yoffset + var->yres) > par->max_height) {
145 DRM_ERROR("Requested geom can not fit in framebuffer\n");
146 return -EINVAL;
149 if (!vmw_kms_validate_mode_vram(vmw_priv,
150 info->fix.line_length,
151 var->yoffset + var->yres)) {
152 DRM_ERROR("Requested geom can not fit in framebuffer\n");
153 return -EINVAL;
156 return 0;
159 static int vmw_fb_set_par(struct fb_info *info)
161 struct vmw_fb_par *par = info->par;
162 struct vmw_private *vmw_priv = par->vmw_priv;
163 int ret;
165 ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
166 info->fix.line_length,
167 par->bpp, par->depth);
168 if (ret)
169 return ret;
171 if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
172 /* TODO check if pitch and offset changes */
173 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
174 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
175 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
176 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
177 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
178 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
179 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
180 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
183 /* This is really helpful since if this fails the user
184 * can probably not see anything on the screen.
186 WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
188 return 0;
191 static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
192 struct fb_info *info)
194 return 0;
197 static int vmw_fb_blank(int blank, struct fb_info *info)
199 return 0;
203 * Dirty code
206 static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
208 struct vmw_private *vmw_priv = par->vmw_priv;
209 struct fb_info *info = vmw_priv->fb_info;
210 int stride = (info->fix.line_length / 4);
211 int *src = (int *)info->screen_base;
212 __le32 __iomem *vram_mem = par->bo_ptr;
213 unsigned long flags;
214 unsigned x, y, w, h;
215 int i, k;
216 struct {
217 uint32_t header;
218 SVGAFifoCmdUpdate body;
219 } *cmd;
221 if (vmw_priv->suspended)
222 return;
224 spin_lock_irqsave(&par->dirty.lock, flags);
225 if (!par->dirty.active) {
226 spin_unlock_irqrestore(&par->dirty.lock, flags);
227 return;
229 x = par->dirty.x1;
230 y = par->dirty.y1;
231 w = min(par->dirty.x2, info->var.xres) - x;
232 h = min(par->dirty.y2, info->var.yres) - y;
233 par->dirty.x1 = par->dirty.x2 = 0;
234 par->dirty.y1 = par->dirty.y2 = 0;
235 spin_unlock_irqrestore(&par->dirty.lock, flags);
237 for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
238 for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
239 iowrite32(src[k], vram_mem + k);
242 #if 0
243 DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
244 #endif
246 cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
247 if (unlikely(cmd == NULL)) {
248 DRM_ERROR("Fifo reserve failed.\n");
249 return;
252 cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
253 cmd->body.x = cpu_to_le32(x);
254 cmd->body.y = cpu_to_le32(y);
255 cmd->body.width = cpu_to_le32(w);
256 cmd->body.height = cpu_to_le32(h);
257 vmw_fifo_commit(vmw_priv, sizeof(*cmd));
260 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
261 unsigned x1, unsigned y1,
262 unsigned width, unsigned height)
264 struct fb_info *info = par->vmw_priv->fb_info;
265 unsigned long flags;
266 unsigned x2 = x1 + width;
267 unsigned y2 = y1 + height;
269 spin_lock_irqsave(&par->dirty.lock, flags);
270 if (par->dirty.x1 == par->dirty.x2) {
271 par->dirty.x1 = x1;
272 par->dirty.y1 = y1;
273 par->dirty.x2 = x2;
274 par->dirty.y2 = y2;
275 /* if we are active start the dirty work
276 * we share the work with the defio system */
277 if (par->dirty.active)
278 schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
279 } else {
280 if (x1 < par->dirty.x1)
281 par->dirty.x1 = x1;
282 if (y1 < par->dirty.y1)
283 par->dirty.y1 = y1;
284 if (x2 > par->dirty.x2)
285 par->dirty.x2 = x2;
286 if (y2 > par->dirty.y2)
287 par->dirty.y2 = y2;
289 spin_unlock_irqrestore(&par->dirty.lock, flags);
292 static void vmw_deferred_io(struct fb_info *info,
293 struct list_head *pagelist)
295 struct vmw_fb_par *par = info->par;
296 unsigned long start, end, min, max;
297 unsigned long flags;
298 struct page *page;
299 int y1, y2;
301 min = ULONG_MAX;
302 max = 0;
303 list_for_each_entry(page, pagelist, lru) {
304 start = page->index << PAGE_SHIFT;
305 end = start + PAGE_SIZE - 1;
306 min = min(min, start);
307 max = max(max, end);
310 if (min < max) {
311 y1 = min / info->fix.line_length;
312 y2 = (max / info->fix.line_length) + 1;
314 spin_lock_irqsave(&par->dirty.lock, flags);
315 par->dirty.x1 = 0;
316 par->dirty.y1 = y1;
317 par->dirty.x2 = info->var.xres;
318 par->dirty.y2 = y2;
319 spin_unlock_irqrestore(&par->dirty.lock, flags);
322 vmw_fb_dirty_flush(par);
325 struct fb_deferred_io vmw_defio = {
326 .delay = VMW_DIRTY_DELAY,
327 .deferred_io = vmw_deferred_io,
331 * Draw code
334 static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
336 cfb_fillrect(info, rect);
337 vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
338 rect->width, rect->height);
341 static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
343 cfb_copyarea(info, region);
344 vmw_fb_dirty_mark(info->par, region->dx, region->dy,
345 region->width, region->height);
348 static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
350 cfb_imageblit(info, image);
351 vmw_fb_dirty_mark(info->par, image->dx, image->dy,
352 image->width, image->height);
356 * Bring up code
359 static struct fb_ops vmw_fb_ops = {
360 .owner = THIS_MODULE,
361 .fb_check_var = vmw_fb_check_var,
362 .fb_set_par = vmw_fb_set_par,
363 .fb_setcolreg = vmw_fb_setcolreg,
364 .fb_fillrect = vmw_fb_fillrect,
365 .fb_copyarea = vmw_fb_copyarea,
366 .fb_imageblit = vmw_fb_imageblit,
367 .fb_pan_display = vmw_fb_pan_display,
368 .fb_blank = vmw_fb_blank,
371 static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
372 size_t size, struct vmw_dma_buffer **out)
374 struct vmw_dma_buffer *vmw_bo;
375 struct ttm_placement ne_placement = vmw_vram_ne_placement;
376 int ret;
378 ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
380 /* interuptable? */
381 ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
382 if (unlikely(ret != 0))
383 return ret;
385 vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
386 if (!vmw_bo)
387 goto err_unlock;
389 ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
390 &ne_placement,
391 false,
392 &vmw_dmabuf_bo_free);
393 if (unlikely(ret != 0))
394 goto err_unlock; /* init frees the buffer on failure */
396 *out = vmw_bo;
398 ttm_write_unlock(&vmw_priv->fbdev_master.lock);
400 return 0;
402 err_unlock:
403 ttm_write_unlock(&vmw_priv->fbdev_master.lock);
404 return ret;
407 int vmw_fb_init(struct vmw_private *vmw_priv)
409 struct device *device = &vmw_priv->dev->pdev->dev;
410 struct vmw_fb_par *par;
411 struct fb_info *info;
412 unsigned initial_width, initial_height;
413 unsigned fb_width, fb_height;
414 unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
415 int ret;
417 /* XXX These shouldn't be hardcoded. */
418 initial_width = 800;
419 initial_height = 600;
421 fb_bpp = 32;
422 fb_depth = 24;
424 /* XXX As shouldn't these be as well. */
425 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
426 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
428 initial_width = min(fb_width, initial_width);
429 initial_height = min(fb_height, initial_height);
431 fb_pitch = fb_width * fb_bpp / 8;
432 fb_size = fb_pitch * fb_height;
433 fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
435 info = framebuffer_alloc(sizeof(*par), device);
436 if (!info)
437 return -ENOMEM;
440 * Par
442 vmw_priv->fb_info = info;
443 par = info->par;
444 par->vmw_priv = vmw_priv;
445 par->depth = fb_depth;
446 par->bpp = fb_bpp;
447 par->vmalloc = NULL;
448 par->max_width = fb_width;
449 par->max_height = fb_height;
452 * Create buffers and alloc memory
454 par->vmalloc = vmalloc(fb_size);
455 if (unlikely(par->vmalloc == NULL)) {
456 ret = -ENOMEM;
457 goto err_free;
460 ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
461 if (unlikely(ret != 0))
462 goto err_free;
464 ret = ttm_bo_kmap(&par->vmw_bo->base,
466 par->vmw_bo->base.num_pages,
467 &par->map);
468 if (unlikely(ret != 0))
469 goto err_unref;
470 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
471 par->bo_size = fb_size;
474 * Fixed and var
476 strcpy(info->fix.id, "svgadrmfb");
477 info->fix.type = FB_TYPE_PACKED_PIXELS;
478 info->fix.visual = FB_VISUAL_TRUECOLOR;
479 info->fix.type_aux = 0;
480 info->fix.xpanstep = 1; /* doing it in hw */
481 info->fix.ypanstep = 1; /* doing it in hw */
482 info->fix.ywrapstep = 0;
483 info->fix.accel = FB_ACCEL_NONE;
484 info->fix.line_length = fb_pitch;
486 info->fix.smem_start = 0;
487 info->fix.smem_len = fb_size;
489 info->pseudo_palette = par->pseudo_palette;
490 info->screen_base = par->vmalloc;
491 info->screen_size = fb_size;
493 info->flags = FBINFO_DEFAULT;
494 info->fbops = &vmw_fb_ops;
496 /* 24 depth per default */
497 info->var.red.offset = 16;
498 info->var.green.offset = 8;
499 info->var.blue.offset = 0;
500 info->var.red.length = 8;
501 info->var.green.length = 8;
502 info->var.blue.length = 8;
503 info->var.transp.offset = 0;
504 info->var.transp.length = 0;
506 info->var.xres_virtual = fb_width;
507 info->var.yres_virtual = fb_height;
508 info->var.bits_per_pixel = par->bpp;
509 info->var.xoffset = 0;
510 info->var.yoffset = 0;
511 info->var.activate = FB_ACTIVATE_NOW;
512 info->var.height = -1;
513 info->var.width = -1;
515 info->var.xres = initial_width;
516 info->var.yres = initial_height;
518 #if 0
519 info->pixmap.size = 64*1024;
520 info->pixmap.buf_align = 8;
521 info->pixmap.access_align = 32;
522 info->pixmap.flags = FB_PIXMAP_SYSTEM;
523 info->pixmap.scan_align = 1;
524 #else
525 info->pixmap.size = 0;
526 info->pixmap.buf_align = 8;
527 info->pixmap.access_align = 32;
528 info->pixmap.flags = FB_PIXMAP_SYSTEM;
529 info->pixmap.scan_align = 1;
530 #endif
532 info->apertures = alloc_apertures(1);
533 if (!info->apertures) {
534 ret = -ENOMEM;
535 goto err_aper;
537 info->apertures->ranges[0].base = vmw_priv->vram_start;
538 info->apertures->ranges[0].size = vmw_priv->vram_size;
541 * Dirty & Deferred IO
543 par->dirty.x1 = par->dirty.x2 = 0;
544 par->dirty.y1 = par->dirty.y2 = 0;
545 par->dirty.active = true;
546 spin_lock_init(&par->dirty.lock);
547 info->fbdefio = &vmw_defio;
548 fb_deferred_io_init(info);
550 ret = register_framebuffer(info);
551 if (unlikely(ret != 0))
552 goto err_defio;
554 return 0;
556 err_defio:
557 fb_deferred_io_cleanup(info);
558 err_aper:
559 ttm_bo_kunmap(&par->map);
560 err_unref:
561 ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
562 err_free:
563 vfree(par->vmalloc);
564 framebuffer_release(info);
565 vmw_priv->fb_info = NULL;
567 return ret;
570 int vmw_fb_close(struct vmw_private *vmw_priv)
572 struct fb_info *info;
573 struct vmw_fb_par *par;
574 struct ttm_buffer_object *bo;
576 if (!vmw_priv->fb_info)
577 return 0;
579 info = vmw_priv->fb_info;
580 par = info->par;
581 bo = &par->vmw_bo->base;
582 par->vmw_bo = NULL;
584 /* ??? order */
585 fb_deferred_io_cleanup(info);
586 unregister_framebuffer(info);
588 ttm_bo_kunmap(&par->map);
589 ttm_bo_unref(&bo);
591 vfree(par->vmalloc);
592 framebuffer_release(info);
594 return 0;
597 int vmw_fb_off(struct vmw_private *vmw_priv)
599 struct fb_info *info;
600 struct vmw_fb_par *par;
601 unsigned long flags;
603 if (!vmw_priv->fb_info)
604 return -EINVAL;
606 info = vmw_priv->fb_info;
607 par = info->par;
609 spin_lock_irqsave(&par->dirty.lock, flags);
610 par->dirty.active = false;
611 spin_unlock_irqrestore(&par->dirty.lock, flags);
613 flush_delayed_work_sync(&info->deferred_work);
615 par->bo_ptr = NULL;
616 ttm_bo_kunmap(&par->map);
618 vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
620 return 0;
623 int vmw_fb_on(struct vmw_private *vmw_priv)
625 struct fb_info *info;
626 struct vmw_fb_par *par;
627 unsigned long flags;
628 bool dummy;
629 int ret;
631 if (!vmw_priv->fb_info)
632 return -EINVAL;
634 info = vmw_priv->fb_info;
635 par = info->par;
637 /* we are already active */
638 if (par->bo_ptr != NULL)
639 return 0;
641 /* Make sure that all overlays are stoped when we take over */
642 vmw_overlay_stop_all(vmw_priv);
644 ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
645 if (unlikely(ret != 0)) {
646 DRM_ERROR("could not move buffer to start of VRAM\n");
647 goto err_no_buffer;
650 ret = ttm_bo_kmap(&par->vmw_bo->base,
652 par->vmw_bo->base.num_pages,
653 &par->map);
654 BUG_ON(ret != 0);
655 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
657 spin_lock_irqsave(&par->dirty.lock, flags);
658 par->dirty.active = true;
659 spin_unlock_irqrestore(&par->dirty.lock, flags);
661 err_no_buffer:
662 vmw_fb_set_par(info);
664 vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
666 /* If there already was stuff dirty we wont
667 * schedule a new work, so lets do it now */
668 schedule_delayed_work(&info->deferred_work, 0);
670 return 0;