drm/omap: Fix error handling path in 'omap_dmm_probe()'
[linux/fpc-iii.git] / drivers / gpu / drm / mgag200 / mgag200_fb.c
blobb35b5b2db4ec30adc822d372d6c5cf2cf74969d0
1 /*
2 * Copyright 2010 Matt Turner.
3 * Copyright 2012 Red Hat
5 * This file is subject to the terms and conditions of the GNU General
6 * Public License version 2. See the file COPYING in the main
7 * directory of this archive for more details.
9 * Authors: Matthew Garrett
10 * Matt Turner
11 * Dave Airlie
13 #include <linux/module.h>
14 #include <drm/drmP.h>
15 #include <drm/drm_fb_helper.h>
16 #include <drm/drm_crtc_helper.h>
18 #include <linux/fb.h>
20 #include "mgag200_drv.h"
22 static void mga_dirty_update(struct mga_fbdev *mfbdev,
23 int x, int y, int width, int height)
25 int i;
26 struct drm_gem_object *obj;
27 struct mgag200_bo *bo;
28 int src_offset, dst_offset;
29 int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
30 int ret = -EBUSY;
31 bool unmap = false;
32 bool store_for_later = false;
33 int x2, y2;
34 unsigned long flags;
36 obj = mfbdev->mfb.obj;
37 bo = gem_to_mga_bo(obj);
40 * try and reserve the BO, if we fail with busy
41 * then the BO is being moved and we should
42 * store up the damage until later.
44 if (drm_can_sleep())
45 ret = mgag200_bo_reserve(bo, true);
46 if (ret) {
47 if (ret != -EBUSY)
48 return;
50 store_for_later = true;
53 x2 = x + width - 1;
54 y2 = y + height - 1;
55 spin_lock_irqsave(&mfbdev->dirty_lock, flags);
57 if (mfbdev->y1 < y)
58 y = mfbdev->y1;
59 if (mfbdev->y2 > y2)
60 y2 = mfbdev->y2;
61 if (mfbdev->x1 < x)
62 x = mfbdev->x1;
63 if (mfbdev->x2 > x2)
64 x2 = mfbdev->x2;
66 if (store_for_later) {
67 mfbdev->x1 = x;
68 mfbdev->x2 = x2;
69 mfbdev->y1 = y;
70 mfbdev->y2 = y2;
71 spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
72 return;
75 mfbdev->x1 = mfbdev->y1 = INT_MAX;
76 mfbdev->x2 = mfbdev->y2 = 0;
77 spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
79 if (!bo->kmap.virtual) {
80 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
81 if (ret) {
82 DRM_ERROR("failed to kmap fb updates\n");
83 mgag200_bo_unreserve(bo);
84 return;
86 unmap = true;
88 for (i = y; i <= y2; i++) {
89 /* assume equal stride for now */
90 src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
91 memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp);
94 if (unmap)
95 ttm_bo_kunmap(&bo->kmap);
97 mgag200_bo_unreserve(bo);
100 static void mga_fillrect(struct fb_info *info,
101 const struct fb_fillrect *rect)
103 struct mga_fbdev *mfbdev = info->par;
104 drm_fb_helper_sys_fillrect(info, rect);
105 mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
106 rect->height);
109 static void mga_copyarea(struct fb_info *info,
110 const struct fb_copyarea *area)
112 struct mga_fbdev *mfbdev = info->par;
113 drm_fb_helper_sys_copyarea(info, area);
114 mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
115 area->height);
118 static void mga_imageblit(struct fb_info *info,
119 const struct fb_image *image)
121 struct mga_fbdev *mfbdev = info->par;
122 drm_fb_helper_sys_imageblit(info, image);
123 mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
124 image->height);
128 static struct fb_ops mgag200fb_ops = {
129 .owner = THIS_MODULE,
130 .fb_check_var = drm_fb_helper_check_var,
131 .fb_set_par = drm_fb_helper_set_par,
132 .fb_fillrect = mga_fillrect,
133 .fb_copyarea = mga_copyarea,
134 .fb_imageblit = mga_imageblit,
135 .fb_pan_display = drm_fb_helper_pan_display,
136 .fb_blank = drm_fb_helper_blank,
137 .fb_setcmap = drm_fb_helper_setcmap,
140 static int mgag200fb_create_object(struct mga_fbdev *afbdev,
141 struct drm_mode_fb_cmd2 *mode_cmd,
142 struct drm_gem_object **gobj_p)
144 struct drm_device *dev = afbdev->helper.dev;
145 u32 size;
146 struct drm_gem_object *gobj;
147 int ret = 0;
149 size = mode_cmd->pitches[0] * mode_cmd->height;
150 ret = mgag200_gem_create(dev, size, true, &gobj);
151 if (ret)
152 return ret;
154 *gobj_p = gobj;
155 return ret;
158 static int mgag200fb_create(struct drm_fb_helper *helper,
159 struct drm_fb_helper_surface_size *sizes)
161 struct mga_fbdev *mfbdev =
162 container_of(helper, struct mga_fbdev, helper);
163 struct drm_device *dev = mfbdev->helper.dev;
164 struct drm_mode_fb_cmd2 mode_cmd;
165 struct mga_device *mdev = dev->dev_private;
166 struct fb_info *info;
167 struct drm_framebuffer *fb;
168 struct drm_gem_object *gobj = NULL;
169 int ret;
170 void *sysram;
171 int size;
173 mode_cmd.width = sizes->surface_width;
174 mode_cmd.height = sizes->surface_height;
175 mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
177 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
178 sizes->surface_depth);
179 size = mode_cmd.pitches[0] * mode_cmd.height;
181 ret = mgag200fb_create_object(mfbdev, &mode_cmd, &gobj);
182 if (ret) {
183 DRM_ERROR("failed to create fbcon backing object %d\n", ret);
184 return ret;
187 sysram = vmalloc(size);
188 if (!sysram)
189 goto err_sysram;
191 info = drm_fb_helper_alloc_fbi(helper);
192 if (IS_ERR(info)) {
193 ret = PTR_ERR(info);
194 goto err_alloc_fbi;
197 info->par = mfbdev;
199 ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
200 if (ret)
201 goto err_framebuffer_init;
203 mfbdev->sysram = sysram;
204 mfbdev->size = size;
206 fb = &mfbdev->mfb.base;
208 /* setup helper */
209 mfbdev->helper.fb = fb;
211 strcpy(info->fix.id, "mgadrmfb");
213 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
214 info->fbops = &mgag200fb_ops;
216 /* setup aperture base/size for vesafb takeover */
217 info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
218 info->apertures->ranges[0].size = mdev->mc.vram_size;
220 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
221 drm_fb_helper_fill_var(info, &mfbdev->helper, sizes->fb_width,
222 sizes->fb_height);
224 info->screen_base = sysram;
225 info->screen_size = size;
226 info->pixmap.flags = FB_PIXMAP_SYSTEM;
228 DRM_DEBUG_KMS("allocated %dx%d\n",
229 fb->width, fb->height);
231 return 0;
233 err_framebuffer_init:
234 drm_fb_helper_release_fbi(helper);
235 err_alloc_fbi:
236 vfree(sysram);
237 err_sysram:
238 drm_gem_object_unreference_unlocked(gobj);
240 return ret;
243 static int mga_fbdev_destroy(struct drm_device *dev,
244 struct mga_fbdev *mfbdev)
246 struct mga_framebuffer *mfb = &mfbdev->mfb;
248 drm_fb_helper_unregister_fbi(&mfbdev->helper);
249 drm_fb_helper_release_fbi(&mfbdev->helper);
251 if (mfb->obj) {
252 drm_gem_object_unreference_unlocked(mfb->obj);
253 mfb->obj = NULL;
255 drm_fb_helper_fini(&mfbdev->helper);
256 vfree(mfbdev->sysram);
257 drm_framebuffer_unregister_private(&mfb->base);
258 drm_framebuffer_cleanup(&mfb->base);
260 return 0;
263 static const struct drm_fb_helper_funcs mga_fb_helper_funcs = {
264 .gamma_set = mga_crtc_fb_gamma_set,
265 .gamma_get = mga_crtc_fb_gamma_get,
266 .fb_probe = mgag200fb_create,
269 int mgag200_fbdev_init(struct mga_device *mdev)
271 struct mga_fbdev *mfbdev;
272 int ret;
273 int bpp_sel = 32;
275 /* prefer 16bpp on low end gpus with limited VRAM */
276 if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
277 bpp_sel = 16;
279 mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL);
280 if (!mfbdev)
281 return -ENOMEM;
283 mdev->mfbdev = mfbdev;
284 spin_lock_init(&mfbdev->dirty_lock);
286 drm_fb_helper_prepare(mdev->dev, &mfbdev->helper, &mga_fb_helper_funcs);
288 ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
289 mdev->num_crtc, MGAG200FB_CONN_LIMIT);
290 if (ret)
291 goto err_fb_helper;
293 ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
294 if (ret)
295 goto err_fb_setup;
297 /* disable all the possible outputs/crtcs before entering KMS mode */
298 drm_helper_disable_unused_functions(mdev->dev);
300 ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
301 if (ret)
302 goto err_fb_setup;
304 return 0;
306 err_fb_setup:
307 drm_fb_helper_fini(&mfbdev->helper);
308 err_fb_helper:
309 mdev->mfbdev = NULL;
311 return ret;
314 void mgag200_fbdev_fini(struct mga_device *mdev)
316 if (!mdev->mfbdev)
317 return;
319 mga_fbdev_destroy(mdev->dev, mdev->mfbdev);