x86/xen: resume timer irqs early
[linux/fpc-iii.git] / drivers / gpu / drm / exynos / exynos_drm_fbdev.c
blobe7c2f2d07f193b0393052be2e1bf32921b804da9
1 /* exynos_drm_fbdev.c
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Authors:
5 * Inki Dae <inki.dae@samsung.com>
6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
15 #include <drm/drmP.h>
16 #include <drm/drm_crtc.h>
17 #include <drm/drm_fb_helper.h>
18 #include <drm/drm_crtc_helper.h>
19 #include <drm/exynos_drm.h>
21 #include "exynos_drm_drv.h"
22 #include "exynos_drm_fb.h"
23 #include "exynos_drm_fbdev.h"
24 #include "exynos_drm_gem.h"
25 #include "exynos_drm_iommu.h"
27 #define MAX_CONNECTOR 4
28 #define PREFERRED_BPP 32
30 #define to_exynos_fbdev(x) container_of(x, struct exynos_drm_fbdev,\
31 drm_fb_helper)
33 struct exynos_drm_fbdev {
34 struct drm_fb_helper drm_fb_helper;
35 struct exynos_drm_gem_obj *exynos_gem_obj;
38 static int exynos_drm_fb_mmap(struct fb_info *info,
39 struct vm_area_struct *vma)
41 struct drm_fb_helper *helper = info->par;
42 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
43 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
44 struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
45 unsigned long vm_size;
46 int ret;
48 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
50 vm_size = vma->vm_end - vma->vm_start;
52 if (vm_size > buffer->size)
53 return -EINVAL;
55 ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
56 buffer->dma_addr, buffer->size, &buffer->dma_attrs);
57 if (ret < 0) {
58 DRM_ERROR("failed to mmap.\n");
59 return ret;
62 return 0;
65 static struct fb_ops exynos_drm_fb_ops = {
66 .owner = THIS_MODULE,
67 .fb_mmap = exynos_drm_fb_mmap,
68 .fb_fillrect = cfb_fillrect,
69 .fb_copyarea = cfb_copyarea,
70 .fb_imageblit = cfb_imageblit,
71 .fb_check_var = drm_fb_helper_check_var,
72 .fb_set_par = drm_fb_helper_set_par,
73 .fb_blank = drm_fb_helper_blank,
74 .fb_pan_display = drm_fb_helper_pan_display,
75 .fb_setcmap = drm_fb_helper_setcmap,
78 static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
79 struct drm_framebuffer *fb)
81 struct fb_info *fbi = helper->fbdev;
82 struct drm_device *dev = helper->dev;
83 struct exynos_drm_gem_buf *buffer;
84 unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
85 unsigned long offset;
87 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
88 drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
90 /* RGB formats use only one buffer */
91 buffer = exynos_drm_fb_buffer(fb, 0);
92 if (!buffer) {
93 DRM_LOG_KMS("buffer is null.\n");
94 return -EFAULT;
97 /* map pages with kernel virtual space. */
98 if (!buffer->kvaddr) {
99 if (is_drm_iommu_supported(dev)) {
100 unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
102 buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
103 nr_pages, VM_MAP,
104 pgprot_writecombine(PAGE_KERNEL));
105 } else {
106 phys_addr_t dma_addr = buffer->dma_addr;
107 if (dma_addr)
108 buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr);
109 else
110 buffer->kvaddr = (void __iomem *)NULL;
112 if (!buffer->kvaddr) {
113 DRM_ERROR("failed to map pages to kernel space.\n");
114 return -EIO;
118 /* buffer count to framebuffer always is 1 at booting time. */
119 exynos_drm_fb_set_buf_cnt(fb, 1);
121 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
122 offset += fbi->var.yoffset * fb->pitches[0];
124 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
125 fbi->screen_base = buffer->kvaddr + offset;
126 if (is_drm_iommu_supported(dev))
127 fbi->fix.smem_start = (unsigned long)
128 (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
129 else
130 fbi->fix.smem_start = (unsigned long)buffer->dma_addr;
132 fbi->screen_size = size;
133 fbi->fix.smem_len = size;
135 return 0;
138 static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
139 struct drm_fb_helper_surface_size *sizes)
141 struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
142 struct exynos_drm_gem_obj *exynos_gem_obj;
143 struct drm_device *dev = helper->dev;
144 struct fb_info *fbi;
145 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
146 struct platform_device *pdev = dev->platformdev;
147 unsigned long size;
148 int ret;
150 DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
151 sizes->surface_width, sizes->surface_height,
152 sizes->surface_bpp);
154 mode_cmd.width = sizes->surface_width;
155 mode_cmd.height = sizes->surface_height;
156 mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
157 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
158 sizes->surface_depth);
160 mutex_lock(&dev->struct_mutex);
162 fbi = framebuffer_alloc(0, &pdev->dev);
163 if (!fbi) {
164 DRM_ERROR("failed to allocate fb info.\n");
165 ret = -ENOMEM;
166 goto out;
169 size = mode_cmd.pitches[0] * mode_cmd.height;
171 exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
173 * If physically contiguous memory allocation fails and if IOMMU is
174 * supported then try to get buffer from non physically contiguous
175 * memory area.
177 if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
178 dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
179 exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
180 size);
183 if (IS_ERR(exynos_gem_obj)) {
184 ret = PTR_ERR(exynos_gem_obj);
185 goto err_release_framebuffer;
188 exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
190 helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
191 &exynos_gem_obj->base);
192 if (IS_ERR(helper->fb)) {
193 DRM_ERROR("failed to create drm framebuffer.\n");
194 ret = PTR_ERR(helper->fb);
195 goto err_destroy_gem;
198 helper->fbdev = fbi;
200 fbi->par = helper;
201 fbi->flags = FBINFO_FLAG_DEFAULT;
202 fbi->fbops = &exynos_drm_fb_ops;
204 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
205 if (ret) {
206 DRM_ERROR("failed to allocate cmap.\n");
207 goto err_destroy_framebuffer;
210 ret = exynos_drm_fbdev_update(helper, helper->fb);
211 if (ret < 0)
212 goto err_dealloc_cmap;
214 mutex_unlock(&dev->struct_mutex);
215 return ret;
217 err_dealloc_cmap:
218 fb_dealloc_cmap(&fbi->cmap);
219 err_destroy_framebuffer:
220 drm_framebuffer_cleanup(helper->fb);
221 err_destroy_gem:
222 exynos_drm_gem_destroy(exynos_gem_obj);
223 err_release_framebuffer:
224 framebuffer_release(fbi);
227 * if failed, all resources allocated above would be released by
228 * drm_mode_config_cleanup() when drm_load() had been called prior
229 * to any specific driver such as fimd or hdmi driver.
231 out:
232 mutex_unlock(&dev->struct_mutex);
233 return ret;
236 static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
237 .fb_probe = exynos_drm_fbdev_create,
240 int exynos_drm_fbdev_init(struct drm_device *dev)
242 struct exynos_drm_fbdev *fbdev;
243 struct exynos_drm_private *private = dev->dev_private;
244 struct drm_fb_helper *helper;
245 unsigned int num_crtc;
246 int ret;
248 if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
249 return 0;
251 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
252 if (!fbdev)
253 return -ENOMEM;
255 private->fb_helper = helper = &fbdev->drm_fb_helper;
256 helper->funcs = &exynos_drm_fb_helper_funcs;
258 num_crtc = dev->mode_config.num_crtc;
260 ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR);
261 if (ret < 0) {
262 DRM_ERROR("failed to initialize drm fb helper.\n");
263 goto err_init;
266 ret = drm_fb_helper_single_add_all_connectors(helper);
267 if (ret < 0) {
268 DRM_ERROR("failed to register drm_fb_helper_connector.\n");
269 goto err_setup;
273 /* disable all the possible outputs/crtcs before entering KMS mode */
274 drm_helper_disable_unused_functions(dev);
276 ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
277 if (ret < 0) {
278 DRM_ERROR("failed to set up hw configuration.\n");
279 goto err_setup;
282 return 0;
284 err_setup:
285 drm_fb_helper_fini(helper);
287 err_init:
288 private->fb_helper = NULL;
289 kfree(fbdev);
291 return ret;
294 static void exynos_drm_fbdev_destroy(struct drm_device *dev,
295 struct drm_fb_helper *fb_helper)
297 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
298 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
299 struct drm_framebuffer *fb;
301 if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr)
302 vunmap(exynos_gem_obj->buffer->kvaddr);
304 /* release drm framebuffer and real buffer */
305 if (fb_helper->fb && fb_helper->fb->funcs) {
306 fb = fb_helper->fb;
307 if (fb) {
308 drm_framebuffer_unregister_private(fb);
309 drm_framebuffer_remove(fb);
313 /* release linux framebuffer */
314 if (fb_helper->fbdev) {
315 struct fb_info *info;
316 int ret;
318 info = fb_helper->fbdev;
319 ret = unregister_framebuffer(info);
320 if (ret < 0)
321 DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
323 if (info->cmap.len)
324 fb_dealloc_cmap(&info->cmap);
326 framebuffer_release(info);
329 drm_fb_helper_fini(fb_helper);
332 void exynos_drm_fbdev_fini(struct drm_device *dev)
334 struct exynos_drm_private *private = dev->dev_private;
335 struct exynos_drm_fbdev *fbdev;
337 if (!private || !private->fb_helper)
338 return;
340 fbdev = to_exynos_fbdev(private->fb_helper);
342 if (fbdev->exynos_gem_obj)
343 exynos_drm_gem_destroy(fbdev->exynos_gem_obj);
345 exynos_drm_fbdev_destroy(dev, private->fb_helper);
346 kfree(fbdev);
347 private->fb_helper = NULL;
350 void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
352 struct exynos_drm_private *private = dev->dev_private;
354 if (!private || !private->fb_helper)
355 return;
357 drm_modeset_lock_all(dev);
358 drm_fb_helper_restore_fbdev_mode(private->fb_helper);
359 drm_modeset_unlock_all(dev);