PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / video / mmp / hw / mmp_ctrl.c
blob8621a9f2bdcc63b96da033e29e69da099a74c19c
1 /*
2 * linux/drivers/video/mmp/hw/mmp_ctrl.c
3 * Marvell MMP series Display Controller support
5 * Copyright (C) 2012 Marvell Technology Group Ltd.
6 * Authors: Guoqing Li <ligq@marvell.com>
7 * Lisa Du <cldu@marvell.com>
8 * Zhou Zhu <zzhu3@marvell.com>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
20 * You should have received a copy of the GNU General Public License along with
21 * this program. If not, see <http://www.gnu.org/licenses/>.
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/interrupt.h>
30 #include <linux/slab.h>
31 #include <linux/delay.h>
32 #include <linux/platform_device.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/clk.h>
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
37 #include <linux/uaccess.h>
38 #include <linux/kthread.h>
39 #include <linux/io.h>
41 #include "mmp_ctrl.h"
43 static irqreturn_t ctrl_handle_irq(int irq, void *dev_id)
45 struct mmphw_ctrl *ctrl = (struct mmphw_ctrl *)dev_id;
46 u32 isr, imask, tmp;
48 isr = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR);
49 imask = readl_relaxed(ctrl->reg_base + SPU_IRQ_ENA);
51 do {
52 /* clear clock only */
53 tmp = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR);
54 if (tmp & isr)
55 writel_relaxed(~isr, ctrl->reg_base + SPU_IRQ_ISR);
56 } while ((isr = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR)) & imask);
58 return IRQ_HANDLED;
61 static u32 fmt_to_reg(struct mmp_overlay *overlay, int pix_fmt)
63 u32 rbswap = 0, uvswap = 0, yuvswap = 0,
64 csc_en = 0, val = 0,
65 vid = overlay_is_vid(overlay);
67 switch (pix_fmt) {
68 case PIXFMT_RGB565:
69 case PIXFMT_RGB1555:
70 case PIXFMT_RGB888PACK:
71 case PIXFMT_RGB888UNPACK:
72 case PIXFMT_RGBA888:
73 rbswap = 1;
74 break;
75 case PIXFMT_VYUY:
76 case PIXFMT_YVU422P:
77 case PIXFMT_YVU420P:
78 uvswap = 1;
79 break;
80 case PIXFMT_YUYV:
81 yuvswap = 1;
82 break;
83 default:
84 break;
87 switch (pix_fmt) {
88 case PIXFMT_RGB565:
89 case PIXFMT_BGR565:
90 break;
91 case PIXFMT_RGB1555:
92 case PIXFMT_BGR1555:
93 val = 0x1;
94 break;
95 case PIXFMT_RGB888PACK:
96 case PIXFMT_BGR888PACK:
97 val = 0x2;
98 break;
99 case PIXFMT_RGB888UNPACK:
100 case PIXFMT_BGR888UNPACK:
101 val = 0x3;
102 break;
103 case PIXFMT_RGBA888:
104 case PIXFMT_BGRA888:
105 val = 0x4;
106 break;
107 case PIXFMT_UYVY:
108 case PIXFMT_VYUY:
109 case PIXFMT_YUYV:
110 val = 0x5;
111 csc_en = 1;
112 break;
113 case PIXFMT_YUV422P:
114 case PIXFMT_YVU422P:
115 val = 0x6;
116 csc_en = 1;
117 break;
118 case PIXFMT_YUV420P:
119 case PIXFMT_YVU420P:
120 val = 0x7;
121 csc_en = 1;
122 break;
123 default:
124 break;
127 return (dma_palette(0) | dma_fmt(vid, val) |
128 dma_swaprb(vid, rbswap) | dma_swapuv(vid, uvswap) |
129 dma_swapyuv(vid, yuvswap) | dma_csc(vid, csc_en));
132 static void dmafetch_set_fmt(struct mmp_overlay *overlay)
134 u32 tmp;
135 struct mmp_path *path = overlay->path;
136 tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
137 tmp &= ~dma_mask(overlay_is_vid(overlay));
138 tmp |= fmt_to_reg(overlay, overlay->win.pix_fmt);
139 writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
142 static void overlay_set_win(struct mmp_overlay *overlay, struct mmp_win *win)
144 struct lcd_regs *regs = path_regs(overlay->path);
146 /* assert win supported */
147 memcpy(&overlay->win, win, sizeof(struct mmp_win));
149 mutex_lock(&overlay->access_ok);
151 if (overlay_is_vid(overlay)) {
152 writel_relaxed(win->pitch[0], &regs->v_pitch_yc);
153 writel_relaxed(win->pitch[2] << 16 |
154 win->pitch[1], &regs->v_pitch_uv);
156 writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->v_size);
157 writel_relaxed((win->ydst << 16) | win->xdst, &regs->v_size_z);
158 writel_relaxed(win->ypos << 16 | win->xpos, &regs->v_start);
159 } else {
160 writel_relaxed(win->pitch[0], &regs->g_pitch);
162 writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->g_size);
163 writel_relaxed((win->ydst << 16) | win->xdst, &regs->g_size_z);
164 writel_relaxed(win->ypos << 16 | win->xpos, &regs->g_start);
167 dmafetch_set_fmt(overlay);
168 mutex_unlock(&overlay->access_ok);
171 static void dmafetch_onoff(struct mmp_overlay *overlay, int on)
173 u32 mask = overlay_is_vid(overlay) ? CFG_DMA_ENA_MASK :
174 CFG_GRA_ENA_MASK;
175 u32 enable = overlay_is_vid(overlay) ? CFG_DMA_ENA(1) : CFG_GRA_ENA(1);
176 u32 tmp;
177 struct mmp_path *path = overlay->path;
179 mutex_lock(&overlay->access_ok);
180 tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
181 tmp &= ~mask;
182 tmp |= (on ? enable : 0);
183 writel(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
184 mutex_unlock(&overlay->access_ok);
187 static void path_enabledisable(struct mmp_path *path, int on)
189 u32 tmp;
190 mutex_lock(&path->access_ok);
191 tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
192 if (on)
193 tmp &= ~SCLK_DISABLE;
194 else
195 tmp |= SCLK_DISABLE;
196 writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
197 mutex_unlock(&path->access_ok);
200 static void path_onoff(struct mmp_path *path, int on)
202 if (path->status == on) {
203 dev_info(path->dev, "path %s is already %s\n",
204 path->name, stat_name(path->status));
205 return;
208 if (on) {
209 path_enabledisable(path, 1);
211 if (path->panel && path->panel->set_onoff)
212 path->panel->set_onoff(path->panel, 1);
213 } else {
214 if (path->panel && path->panel->set_onoff)
215 path->panel->set_onoff(path->panel, 0);
217 path_enabledisable(path, 0);
219 path->status = on;
222 static void overlay_set_onoff(struct mmp_overlay *overlay, int on)
224 if (overlay->status == on) {
225 dev_info(overlay_to_ctrl(overlay)->dev, "overlay %s is already %s\n",
226 overlay->path->name, stat_name(overlay->status));
227 return;
229 overlay->status = on;
230 dmafetch_onoff(overlay, on);
231 if (overlay->path->ops.check_status(overlay->path)
232 != overlay->path->status)
233 path_onoff(overlay->path, on);
236 static void overlay_set_fetch(struct mmp_overlay *overlay, int fetch_id)
238 overlay->dmafetch_id = fetch_id;
241 static int overlay_set_addr(struct mmp_overlay *overlay, struct mmp_addr *addr)
243 struct lcd_regs *regs = path_regs(overlay->path);
245 /* FIXME: assert addr supported */
246 memcpy(&overlay->addr, addr, sizeof(struct mmp_addr));
248 if (overlay_is_vid(overlay)) {
249 writel_relaxed(addr->phys[0], &regs->v_y0);
250 writel_relaxed(addr->phys[1], &regs->v_u0);
251 writel_relaxed(addr->phys[2], &regs->v_v0);
252 } else
253 writel_relaxed(addr->phys[0], &regs->g_0);
255 return overlay->addr.phys[0];
258 static void path_set_mode(struct mmp_path *path, struct mmp_mode *mode)
260 struct lcd_regs *regs = path_regs(path);
261 u32 total_x, total_y, vsync_ctrl, tmp, sclk_src, sclk_div,
262 link_config = path_to_path_plat(path)->link_config,
263 dsi_rbswap = path_to_path_plat(path)->link_config;
265 /* FIXME: assert videomode supported */
266 memcpy(&path->mode, mode, sizeof(struct mmp_mode));
268 mutex_lock(&path->access_ok);
270 /* polarity of timing signals */
271 tmp = readl_relaxed(ctrl_regs(path) + intf_ctrl(path->id)) & 0x1;
272 tmp |= mode->vsync_invert ? 0 : 0x8;
273 tmp |= mode->hsync_invert ? 0 : 0x4;
274 tmp |= link_config & CFG_DUMBMODE_MASK;
275 tmp |= CFG_DUMB_ENA(1);
276 writel_relaxed(tmp, ctrl_regs(path) + intf_ctrl(path->id));
278 /* interface rb_swap setting */
279 tmp = readl_relaxed(ctrl_regs(path) + intf_rbswap_ctrl(path->id)) &
280 (~(CFG_INTFRBSWAP_MASK));
281 tmp |= dsi_rbswap & CFG_INTFRBSWAP_MASK;
282 writel_relaxed(tmp, ctrl_regs(path) + intf_rbswap_ctrl(path->id));
284 writel_relaxed((mode->yres << 16) | mode->xres, &regs->screen_active);
285 writel_relaxed((mode->left_margin << 16) | mode->right_margin,
286 &regs->screen_h_porch);
287 writel_relaxed((mode->upper_margin << 16) | mode->lower_margin,
288 &regs->screen_v_porch);
289 total_x = mode->xres + mode->left_margin + mode->right_margin +
290 mode->hsync_len;
291 total_y = mode->yres + mode->upper_margin + mode->lower_margin +
292 mode->vsync_len;
293 writel_relaxed((total_y << 16) | total_x, &regs->screen_size);
295 /* vsync ctrl */
296 if (path->output_type == PATH_OUT_DSI)
297 vsync_ctrl = 0x01330133;
298 else
299 vsync_ctrl = ((mode->xres + mode->right_margin) << 16)
300 | (mode->xres + mode->right_margin);
301 writel_relaxed(vsync_ctrl, &regs->vsync_ctrl);
303 /* set pixclock div */
304 sclk_src = clk_get_rate(path_to_ctrl(path)->clk);
305 sclk_div = sclk_src / mode->pixclock_freq;
306 if (sclk_div * mode->pixclock_freq < sclk_src)
307 sclk_div++;
309 dev_info(path->dev, "%s sclk_src %d sclk_div 0x%x pclk %d\n",
310 __func__, sclk_src, sclk_div, mode->pixclock_freq);
312 tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
313 tmp &= ~CLK_INT_DIV_MASK;
314 tmp |= sclk_div;
315 writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
317 mutex_unlock(&path->access_ok);
320 static struct mmp_overlay_ops mmphw_overlay_ops = {
321 .set_fetch = overlay_set_fetch,
322 .set_onoff = overlay_set_onoff,
323 .set_win = overlay_set_win,
324 .set_addr = overlay_set_addr,
327 static void ctrl_set_default(struct mmphw_ctrl *ctrl)
329 u32 tmp, irq_mask;
332 * LCD Global control(LCD_TOP_CTRL) should be configed before
333 * any other LCD registers read/write, or there maybe issues.
335 tmp = readl_relaxed(ctrl->reg_base + LCD_TOP_CTRL);
336 tmp |= 0xfff0;
337 writel_relaxed(tmp, ctrl->reg_base + LCD_TOP_CTRL);
340 /* disable all interrupts */
341 irq_mask = path_imasks(0) | err_imask(0) |
342 path_imasks(1) | err_imask(1);
343 tmp = readl_relaxed(ctrl->reg_base + SPU_IRQ_ENA);
344 tmp &= ~irq_mask;
345 tmp |= irq_mask;
346 writel_relaxed(tmp, ctrl->reg_base + SPU_IRQ_ENA);
349 static void path_set_default(struct mmp_path *path)
351 struct lcd_regs *regs = path_regs(path);
352 u32 dma_ctrl1, mask, tmp, path_config;
354 path_config = path_to_path_plat(path)->path_config;
356 /* Configure IOPAD: should be parallel only */
357 if (PATH_OUT_PARALLEL == path->output_type) {
358 mask = CFG_IOPADMODE_MASK | CFG_BURST_MASK | CFG_BOUNDARY_MASK;
359 tmp = readl_relaxed(ctrl_regs(path) + SPU_IOPAD_CONTROL);
360 tmp &= ~mask;
361 tmp |= path_config;
362 writel_relaxed(tmp, ctrl_regs(path) + SPU_IOPAD_CONTROL);
365 /* Select path clock source */
366 tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
367 tmp &= ~SCLK_SRC_SEL_MASK;
368 tmp |= path_config;
369 writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
372 * Configure default bits: vsync triggers DMA,
373 * power save enable, configure alpha registers to
374 * display 100% graphics, and set pixel command.
376 dma_ctrl1 = 0x2032ff81;
378 dma_ctrl1 |= CFG_VSYNC_INV_MASK;
379 writel_relaxed(dma_ctrl1, ctrl_regs(path) + dma_ctrl(1, path->id));
381 /* Configure default register values */
382 writel_relaxed(0x00000000, &regs->blank_color);
383 writel_relaxed(0x00000000, &regs->g_1);
384 writel_relaxed(0x00000000, &regs->g_start);
387 * 1.enable multiple burst request in DMA AXI
388 * bus arbiter for faster read if not tv path;
389 * 2.enable horizontal smooth filter;
391 mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK | CFG_ARBFAST_ENA(1);
392 tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
393 tmp |= mask;
394 if (PATH_TV == path->id)
395 tmp &= ~CFG_ARBFAST_ENA(1);
396 writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
399 static int path_init(struct mmphw_path_plat *path_plat,
400 struct mmp_mach_path_config *config)
402 struct mmphw_ctrl *ctrl = path_plat->ctrl;
403 struct mmp_path_info *path_info;
404 struct mmp_path *path = NULL;
406 dev_info(ctrl->dev, "%s: %s\n", __func__, config->name);
408 /* init driver data */
409 path_info = kzalloc(sizeof(struct mmp_path_info), GFP_KERNEL);
410 if (!path_info) {
411 dev_err(ctrl->dev, "%s: unable to alloc path_info for %s\n",
412 __func__, config->name);
413 return 0;
415 path_info->name = config->name;
416 path_info->id = path_plat->id;
417 path_info->dev = ctrl->dev;
418 path_info->overlay_num = config->overlay_num;
419 path_info->overlay_ops = &mmphw_overlay_ops;
420 path_info->set_mode = path_set_mode;
421 path_info->plat_data = path_plat;
423 /* create/register platform device */
424 path = mmp_register_path(path_info);
425 if (!path) {
426 kfree(path_info);
427 return 0;
429 path_plat->path = path;
430 path_plat->path_config = config->path_config;
431 path_plat->link_config = config->link_config;
432 path_plat->dsi_rbswap = config->dsi_rbswap;
433 path_set_default(path);
435 kfree(path_info);
436 return 1;
439 static void path_deinit(struct mmphw_path_plat *path_plat)
441 if (!path_plat)
442 return;
444 if (path_plat->path)
445 mmp_unregister_path(path_plat->path);
448 static int mmphw_probe(struct platform_device *pdev)
450 struct mmp_mach_plat_info *mi;
451 struct resource *res;
452 int ret, i, size, irq;
453 struct mmphw_path_plat *path_plat;
454 struct mmphw_ctrl *ctrl = NULL;
456 /* get resources from platform data */
457 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
458 if (res == NULL) {
459 dev_err(&pdev->dev, "%s: no IO memory defined\n", __func__);
460 ret = -ENOENT;
461 goto failed;
464 irq = platform_get_irq(pdev, 0);
465 if (irq < 0) {
466 dev_err(&pdev->dev, "%s: no IRQ defined\n", __func__);
467 ret = -ENOENT;
468 goto failed;
471 /* get configs from platform data */
472 mi = pdev->dev.platform_data;
473 if (mi == NULL || !mi->path_num || !mi->paths) {
474 dev_err(&pdev->dev, "%s: no platform data defined\n", __func__);
475 ret = -EINVAL;
476 goto failed;
479 /* allocate */
480 size = sizeof(struct mmphw_ctrl) + sizeof(struct mmphw_path_plat) *
481 mi->path_num;
482 ctrl = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
483 if (!ctrl) {
484 ret = -ENOMEM;
485 goto failed;
488 ctrl->name = mi->name;
489 ctrl->path_num = mi->path_num;
490 ctrl->dev = &pdev->dev;
491 ctrl->irq = irq;
492 platform_set_drvdata(pdev, ctrl);
493 mutex_init(&ctrl->access_ok);
495 /* map registers.*/
496 if (!devm_request_mem_region(ctrl->dev, res->start,
497 resource_size(res), ctrl->name)) {
498 dev_err(ctrl->dev,
499 "can't request region for resource %pR\n", res);
500 ret = -EINVAL;
501 goto failed;
504 ctrl->reg_base = devm_ioremap_nocache(ctrl->dev,
505 res->start, resource_size(res));
506 if (ctrl->reg_base == NULL) {
507 dev_err(ctrl->dev, "%s: res %x - %x map failed\n", __func__,
508 res->start, res->end);
509 ret = -ENOMEM;
510 goto failed;
513 /* request irq */
514 ret = devm_request_irq(ctrl->dev, ctrl->irq, ctrl_handle_irq,
515 IRQF_SHARED, "lcd_controller", ctrl);
516 if (ret < 0) {
517 dev_err(ctrl->dev, "%s unable to request IRQ %d\n",
518 __func__, ctrl->irq);
519 ret = -ENXIO;
520 goto failed;
523 /* get clock */
524 ctrl->clk = devm_clk_get(ctrl->dev, mi->clk_name);
525 if (IS_ERR(ctrl->clk)) {
526 dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name);
527 ret = -ENOENT;
528 goto failed;
530 clk_prepare_enable(ctrl->clk);
532 /* init global regs */
533 ctrl_set_default(ctrl);
535 /* init pathes from machine info and register them */
536 for (i = 0; i < ctrl->path_num; i++) {
537 /* get from config and machine info */
538 path_plat = &ctrl->path_plats[i];
539 path_plat->id = i;
540 path_plat->ctrl = ctrl;
542 /* path init */
543 if (!path_init(path_plat, &mi->paths[i])) {
544 ret = -EINVAL;
545 goto failed_path_init;
549 #ifdef CONFIG_MMP_DISP_SPI
550 ret = lcd_spi_register(ctrl);
551 if (ret < 0)
552 goto failed_path_init;
553 #endif
555 dev_info(ctrl->dev, "device init done\n");
557 return 0;
559 failed_path_init:
560 for (i = 0; i < ctrl->path_num; i++) {
561 path_plat = &ctrl->path_plats[i];
562 path_deinit(path_plat);
565 clk_disable_unprepare(ctrl->clk);
566 failed:
567 dev_err(&pdev->dev, "device init failed\n");
569 return ret;
572 static struct platform_driver mmphw_driver = {
573 .driver = {
574 .name = "mmp-disp",
575 .owner = THIS_MODULE,
577 .probe = mmphw_probe,
580 static int mmphw_init(void)
582 return platform_driver_register(&mmphw_driver);
584 module_init(mmphw_init);
586 MODULE_AUTHOR("Li Guoqing<ligq@marvell.com>");
587 MODULE_DESCRIPTION("Framebuffer driver for mmp");
588 MODULE_LICENSE("GPL");