1 /* drivers/video/msm_fb/mdp.c
3 * MSM MDP Interface (used by framebuffer core)
5 * Copyright (C) 2007 QUALCOMM Incorporated
6 * Copyright (C) 2007 Google Incorporated
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/kernel.h>
20 #include <linux/msm_mdp.h>
21 #include <linux/interrupt.h>
22 #include <linux/wait.h>
23 #include <linux/clk.h>
24 #include <linux/file.h>
25 #include <linux/android_pmem.h>
26 #include <linux/major.h>
27 #include <linux/msm_hw3d.h>
29 #include <mach/msm_iomap.h>
30 #include <mach/msm_fb.h>
31 #include <linux/platform_device.h>
35 #include <asm/mach-types.h>
37 struct class *mdp_class
;
39 #define MDP_CMD_DEBUG_ACCESS_BASE (0x10000)
41 static DECLARE_WAIT_QUEUE_HEAD(mdp_ppp_waitqueue
);
42 static unsigned int mdp_irq_mask
;
43 struct clk
*mdp_clk_to_disable_later
= 0;
44 static struct mdp_blit_req
*timeout_req
;
45 #ifdef CONFIG_FB_MSM_OVERLAY
46 extern int mdp4_overlay_get(struct mdp_device
*mdp_dev
, struct fb_info
*info
, struct mdp_overlay
*req
);
47 extern int mdp4_overlay_set(struct mdp_device
*mdp_dev
, struct fb_info
*info
, struct mdp_overlay
*req
);
48 extern int mdp4_overlay_unset(struct mdp_device
*mdp_dev
, struct fb_info
*info
, int ndx
);
49 extern int mdp4_overlay_play(struct mdp_device
*mdp_dev
, struct fb_info
*info
, struct msmfb_overlay_data
*req
,
50 struct file
**pp_src_file
);
51 extern void mdp4_mddi_overlay(void *priv
, uint32_t addr
, uint32_t stride
,
52 uint32_t width
, uint32_t height
, uint32_t x
,
56 DEFINE_MUTEX(mdp_mutex
);
58 static int locked_enable_mdp_irq(struct mdp_info
*mdp
, uint32_t mask
)
62 /* if the mask bits are already set return an error, this interrupt
63 * is already enabled */
64 if (mdp_irq_mask
& mask
) {
65 pr_err("mdp irq already on %x %x\n", mdp_irq_mask
, mask
);
68 /* if the mdp irq is not already enabled enable it */
72 clk_set_rate(mdp
->ebi1_clk
, 128000000);
75 /* clear out any previous irqs for the requested mask*/
76 mdp_writel(mdp
, mask
, MDP_INTR_CLEAR
);
78 /* update the irq mask to reflect the fact that the interrupt is
81 mdp_writel(mdp
, mdp_irq_mask
, MDP_INTR_ENABLE
);
85 static int enable_mdp_irq(struct mdp_info
*mdp
, uint32_t mask
)
90 spin_lock_irqsave(&mdp
->lock
, flags
);
91 ret
= locked_enable_mdp_irq(mdp
, mask
);
92 spin_unlock_irqrestore(&mdp
->lock
, flags
);
96 static int locked_disable_mdp_irq(struct mdp_info
*mdp
, uint32_t mask
)
98 /* this interrupt is already disabled! */
99 if (!(mdp_irq_mask
& mask
)) {
100 printk(KERN_ERR
"mdp irq already off %x %x\n",
104 /* update the irq mask to reflect the fact that the interrupt is
106 mdp_irq_mask
&= ~(mask
);
107 mdp_writel(mdp
, mdp_irq_mask
, MDP_INTR_ENABLE
);
109 /* if no one is waiting on the interrupt, disable it */
111 disable_irq_nosync(mdp
->irq
);
113 clk_disable(mdp
->clk
);
114 clk_set_rate(mdp
->ebi1_clk
, 0);
119 int disable_mdp_irq(struct mdp_info
*mdp
, uint32_t mask
)
121 unsigned long irq_flags
;
124 spin_lock_irqsave(&mdp
->lock
, irq_flags
);
125 ret
= locked_disable_mdp_irq(mdp
, mask
);
126 spin_unlock_irqrestore(&mdp
->lock
, irq_flags
);
130 static irqreturn_t
mdp_isr(int irq
, void *data
)
133 unsigned long irq_flags
;
134 struct mdp_info
*mdp
= data
;
137 spin_lock_irqsave(&mdp
->lock
, irq_flags
);
139 status
= mdp_readl(mdp
, MDP_INTR_STATUS
);
140 mdp_writel(mdp
, status
, MDP_INTR_CLEAR
);
143 #if defined(CONFIG_MACH_HTCLEO)
144 status
&= ~0x10000; // Cotulla
147 // pr_info("%s: status=%08x (irq_mask=%08x)\n", __func__, status,
149 status
&= mdp_irq_mask
;
150 #ifdef CONFIG_MSM_MDP40
151 if (mdp
->mdp_dev
.overrides
& MSM_MDP4_MDDI_DMA_SWITCH
) {
152 if(status
&& mdp
->out_if
[MSM_MDDI_PMDH_INTERFACE
].dma_cb
!= NULL
)
153 status
|= (INTR_OVERLAY0_DONE
| MDP_DMA_S_DONE
);
156 for (i
= 0; i
< MSM_MDP_NUM_INTERFACES
; ++i
) {
157 struct mdp_out_interface
*out_if
= &mdp
->out_if
[i
];
158 if (status
& out_if
->dma_mask
) {
159 if (out_if
->dma_cb
) {
160 out_if
->dma_cb
->func(out_if
->dma_cb
);
161 out_if
->dma_cb
= NULL
;
163 wake_up(&out_if
->dma_waitqueue
);
165 if (status
& out_if
->irq_mask
) {
166 out_if
->irq_cb
->func(out_if
->irq_cb
);
167 out_if
->irq_cb
= NULL
;
171 if (status
& DL0_ROI_DONE
)
172 wake_up(&mdp_ppp_waitqueue
);
175 locked_disable_mdp_irq(mdp
, status
);
177 spin_unlock_irqrestore(&mdp
->lock
, irq_flags
);
181 static uint32_t mdp_check_mask(struct mdp_info
*mdp
, uint32_t mask
)
184 unsigned long irq_flags
;
186 spin_lock_irqsave(&mdp
->lock
, irq_flags
);
187 ret
= mdp_irq_mask
& mask
;
188 spin_unlock_irqrestore(&mdp
->lock
, irq_flags
);
192 static int mdp_wait(struct mdp_info
*mdp
, uint32_t mask
, wait_queue_head_t
*wq
)
195 unsigned long irq_flags
;
197 // pr_info("%s: WAITING for 0x%x\n", __func__, mask);
198 wait_event_timeout(*wq
, !mdp_check_mask(mdp
, mask
), HZ
);
200 spin_lock_irqsave(&mdp
->lock
, irq_flags
);
201 if (mdp_irq_mask
& mask
) {
202 locked_disable_mdp_irq(mdp
, mask
);
203 pr_warning("%s: timeout waiting for mdp to complete 0x%x\n",
206 mdp_dump_blit(timeout_req
);
210 // pr_info("%s: SUCCESS waiting for 0x%x\n", __func__, mask);
212 spin_unlock_irqrestore(&mdp
->lock
, irq_flags
);
217 void mdp_dma_wait(struct mdp_device
*mdp_dev
, int interface
)
219 #define MDP_MAX_TIMEOUTS 20
220 static int timeout_count
;
221 struct mdp_info
*mdp
= container_of(mdp_dev
, struct mdp_info
, mdp_dev
);
222 unsigned int mask
= 0;
223 wait_queue_head_t
*wq
;
226 case MSM_MDDI_PMDH_INTERFACE
:
227 case MSM_MDDI_EMDH_INTERFACE
:
228 case MSM_LCDC_INTERFACE
:
229 case MSM_TV_INTERFACE
:
230 BUG_ON(!mdp
->out_if
[interface
].registered
);
231 mask
= mdp
->out_if
[interface
].dma_mask
;
232 wq
= &mdp
->out_if
[interface
].dma_waitqueue
;
235 pr_err("%s: Unknown interface %d\n", __func__
, interface
);
239 if (mdp_wait(mdp
, mask
, wq
) == -ETIMEDOUT
)
244 if (timeout_count
> MDP_MAX_TIMEOUTS
) {
245 printk(KERN_ERR
"mdp: dma failed %d times, somethings wrong!\n",
251 static int mdp_ppp_wait(struct mdp_info
*mdp
)
253 return mdp_wait(mdp
, DL0_ROI_DONE
, &mdp_ppp_waitqueue
);
256 static void mdp_dmas_to_mddi(void *priv
, uint32_t addr
, uint32_t stride
,
257 uint32_t width
, uint32_t height
, uint32_t x
, uint32_t y
)
259 struct mdp_info
*mdp
= priv
;
261 uint32_t video_packet_parameter
;
262 uint16_t ld_param
= 1;
265 if(machine_is_htcleo()) {
266 dma2_cfg
= DMA_PACK_ALIGN_MSB
|
267 DMA_PACK_PATTERN_RGB
;
269 dma2_cfg
|= mdp
->format
;
271 dma2_cfg
|= DMA_OUT_SEL_LCDC
;
273 dma2_cfg
|= DMA_IBUF_FORMAT_RGB565
;
276 dma2_cfg
= DMA_PACK_TIGHT
|
279 DMA_IBUF_NONCONTIGUOUS
;
281 dma2_cfg
|= mdp
->format
;
283 #if defined CONFIG_MSM_MDP22 || defined CONFIG_MSM_MDP30
284 if (mdp
->format
== DMA_IBUF_FORMAT_RGB888_OR_ARGB8888
)
286 if (mdp
->format
== DMA_IBUF_FORMAT_XRGB8888
)
288 dma2_cfg
|= DMA_PACK_PATTERN_BGR
;
290 dma2_cfg
|= DMA_PACK_PATTERN_RGB
;
292 dma2_cfg
|= DMA_OUT_SEL_MDDI
;
294 dma2_cfg
|= DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY
;
296 dma2_cfg
|= DMA_DITHER_EN
;
299 if (mdp
->mdp_dev
.color_format
== MSM_MDP_OUT_IF_FMT_RGB565
) {
300 dma2_cfg
|= DMA_DSTC0G_6BITS
| DMA_DSTC1B_5BITS
| DMA_DSTC2R_5BITS
;
301 video_packet_parameter
= MDDI_VDO_PACKET_DESC_RGB565
;
302 } else if (mdp
->mdp_dev
.color_format
== MSM_MDP_OUT_IF_FMT_RGB666
) {
303 dma2_cfg
|= DMA_DSTC0G_6BITS
| DMA_DSTC1B_6BITS
| DMA_DSTC2R_6BITS
;
304 video_packet_parameter
= MDDI_VDO_PACKET_DESC_RGB666
;
307 /* setup size, address, and stride */
308 mdp_writel(mdp
, (height
<< 16) | (width
), MDP_DMA_S_SIZE
);
309 mdp_writel(mdp
, addr
, MDP_DMA_S_IBUF_ADDR
);
310 mdp_writel(mdp
, stride
, MDP_DMA_S_IBUF_Y_STRIDE
);
312 /* set y & x offset and MDDI transaction parameters */
313 mdp_writel(mdp
, (y
<< 16) | (x
), MDP_DMA_S_OUT_XY
);
314 mdp_writel(mdp
, ld_param
, MDP_MDDI_PARAM_WR_SEL
);
315 if (mdp
->mdp_dev
.overrides
& MSM_MDP_PANEL_IGNORE_PIXEL_DATA
) {
316 mdp_writel(mdp
, (video_packet_parameter
<< 16) | 0xE3,
320 mdp_writel(mdp
, (video_packet_parameter
<< 16) | MDDI_VDO_PACKET_PRIM
,
324 mdp_writel(mdp
, dma2_cfg
, MDP_DMA_S_CONFIG
);
325 mdp_writel(mdp
, 0, MDP_DMA_S_START
);
328 static void mdp_dma_to_mddi(void *priv
, uint32_t addr
, uint32_t stride
,
329 uint32_t width
, uint32_t height
, uint32_t x
,
332 struct mdp_info
*mdp
= priv
;
333 uint32_t dma2_cfg
= 0;
334 uint32_t video_packet_parameter
= 0;
335 uint16_t ld_param
= 0; /* 0=PRIM, 1=SECD, 2=EXT */
337 #if !defined(CONFIG_MSM_MDP30)
338 dma2_cfg
= DMA_PACK_TIGHT
|
341 DMA_IBUF_NONCONTIGUOUS
;
344 dma2_cfg
|= mdp
->format
;
346 #if defined CONFIG_MSM_MDP22 || defined CONFIG_MSM_MDP30
347 if (mdp
->format
== DMA_IBUF_FORMAT_RGB888_OR_ARGB8888
)
349 if (mdp
->format
== DMA_IBUF_FORMAT_XRGB8888
)
351 dma2_cfg
|= DMA_PACK_PATTERN_BGR
;
353 dma2_cfg
|= DMA_PACK_PATTERN_RGB
;
355 dma2_cfg
|= DMA_OUT_SEL_MDDI
;
357 dma2_cfg
|= DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY
;
359 #if !defined(CONFIG_MSM_MDP30)
360 dma2_cfg
|= DMA_DITHER_EN
;
363 if (mdp
->mdp_dev
.color_format
== MSM_MDP_OUT_IF_FMT_RGB565
) {
364 dma2_cfg
|= DMA_DSTC0G_6BITS
| DMA_DSTC1B_5BITS
| DMA_DSTC2R_5BITS
;
365 video_packet_parameter
= MDDI_VDO_PACKET_DESC_RGB565
;
366 } else if (mdp
->mdp_dev
.color_format
== MSM_MDP_OUT_IF_FMT_RGB666
) {
367 dma2_cfg
|= DMA_DSTC0G_6BITS
| DMA_DSTC1B_6BITS
| DMA_DSTC2R_6BITS
;
368 video_packet_parameter
= MDDI_VDO_PACKET_DESC_RGB666
;
372 #if defined(CONFIG_MSM_MDP30) || defined(CONFIG_MSM_MDP302)
373 writel(height
<< 16 | width
, mdp
->base
+ 0x90004);
374 writel(addr
, mdp
->base
+ 0x90008);
375 writel(stride
, mdp
->base
+ 0x9000c);
377 /* set y & x offset and MDDI transaction parameters */
378 writel(y
<< 16 | x
, mdp
->base
+ 0x90010);
379 writel(ld_param
, mdp
->base
+ 0x00090);
380 writel((video_packet_parameter
<< 16) | MDDI_VDO_PACKET_PRIM
,
381 mdp
->base
+ 0x00094);
383 writel(dma2_cfg
, mdp
->base
+ 0x90000);
386 writel(0, mdp
->base
+ 0x0044);
387 #elif defined(CONFIG_MSM_MDP22)
388 /* setup size, address, and stride */
389 mdp_writel(mdp
, (height
<< 16) | (width
),
390 MDP_CMD_DEBUG_ACCESS_BASE
+ 0x0184);
391 mdp_writel(mdp
, addr
, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x0188);
392 mdp_writel(mdp
, stride
, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x018C);
394 /* set y & x offset and MDDI transaction parameters */
395 mdp_writel(mdp
, (y
<< 16) | (x
), MDP_CMD_DEBUG_ACCESS_BASE
+ 0x0194);
396 mdp_writel(mdp
, ld_param
, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x01a0);
397 mdp_writel(mdp
, (video_packet_parameter
<< 16) | MDDI_VDO_PACKET_PRIM
,
398 MDP_CMD_DEBUG_ACCESS_BASE
+ 0x01a4);
400 mdp_writel(mdp
, dma2_cfg
, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x0180);
403 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x0044);
405 /* setup size, address, and stride */
406 mdp_writel(mdp
, (height
<< 16) | (width
), MDP_DMA_P_SIZE
);
407 mdp_writel(mdp
, addr
, MDP_DMA_P_IBUF_ADDR
);
408 mdp_writel(mdp
, stride
, MDP_DMA_P_IBUF_Y_STRIDE
);
410 /* set y & x offset and MDDI transaction parameters */
411 mdp_writel(mdp
, (y
<< 16) | (x
), MDP_DMA_P_OUT_XY
);
412 mdp_writel(mdp
, ld_param
, MDP_MDDI_PARAM_WR_SEL
);
413 mdp_writel(mdp
, (video_packet_parameter
<< 16) | MDDI_VDO_PACKET_PRIM
,
416 mdp_writel(mdp
, dma2_cfg
, MDP_DMA_P_CONFIG
);
417 mdp_writel(mdp
, 0, MDP_DMA_P_START
);
421 void mdp_dma(struct mdp_device
*mdp_dev
, uint32_t addr
, uint32_t stride
,
422 uint32_t width
, uint32_t height
, uint32_t x
, uint32_t y
,
423 struct msmfb_callback
*callback
, int interface
)
425 struct mdp_info
*mdp
= container_of(mdp_dev
, struct mdp_info
, mdp_dev
);
426 struct mdp_out_interface
*out_if
;
429 if (interface
< 0 || interface
>= MSM_MDP_NUM_INTERFACES
||
430 !mdp
->out_if
[interface
].registered
) {
431 pr_err("%s: Unknown interface: %d\n", __func__
, interface
);
434 out_if
= &mdp
->out_if
[interface
];
436 spin_lock_irqsave(&mdp
->lock
, flags
);
437 if (locked_enable_mdp_irq(mdp
, out_if
->dma_mask
)) {
438 pr_err("%s: busy\n", __func__
);
442 out_if
->dma_cb
= callback
;
443 out_if
->dma_start(out_if
->priv
, addr
, stride
, width
, height
, x
, y
);
445 spin_unlock_irqrestore(&mdp
->lock
, flags
);
448 static int get_img(struct mdp_img
*img
, struct fb_info
*info
,
449 unsigned long *start
, unsigned long *len
,
452 int put_needed
, ret
= 0;
454 unsigned long vstart
;
456 if (!get_pmem_file(img
->memory_id
, start
, &vstart
, len
, filep
))
458 else if (!get_msm_hw3d_file(img
->memory_id
, &img
->offset
, start
, len
,
462 file
= fget_light(img
->memory_id
, &put_needed
);
466 if (MAJOR(file
->f_dentry
->d_inode
->i_rdev
) == FB_MAJOR
) {
467 *start
= info
->fix
.smem_start
;
468 *len
= info
->fix
.smem_len
;
472 fput_light(file
, put_needed
);
477 static void put_img(struct file
*file
)
480 if (is_pmem_file(file
))
482 else if (is_msm_hw3d_file(file
))
483 put_msm_hw3d_file(file
);
487 void mdp_configure_dma(struct mdp_device
*mdp_dev
)
489 struct mdp_info
*mdp
= container_of(mdp_dev
, struct mdp_info
, mdp_dev
);
492 if (!mdp
->dma_config_dirty
)
494 dma_cfg
= mdp_readl(mdp
, MDP_DMA_P_CONFIG
);
495 dma_cfg
&= ~DMA_IBUF_FORMAT_MASK
;
496 dma_cfg
&= ~DMA_PACK_PATTERN_MASK
;
497 dma_cfg
|= (mdp
->format
| mdp
->pack_pattern
);
498 mdp_writel(mdp
, dma_cfg
, MDP_DMA_P_CONFIG
);
499 mdp
->dma_config_dirty
= false;
504 int mdp_check_output_format(struct mdp_device
*mdp_dev
, int bpp
)
517 int mdp_set_output_format(struct mdp_device
*mdp_dev
, int bpp
)
519 struct mdp_info
*mdp
= container_of(mdp_dev
, struct mdp_info
, mdp_dev
);
520 uint32_t format
, pack_pattern
= DMA_PACK_PATTERN_RGB
;
524 format
= DMA_IBUF_FORMAT_RGB565
;
525 pack_pattern
= DMA_PACK_PATTERN_RGB
;
527 #if defined CONFIG_MSM_MDP22 || defined CONFIG_MSM_MDP30
530 format
= DMA_IBUF_FORMAT_RGB888_OR_ARGB8888
;
534 format
= DMA_IBUF_FORMAT_RGB888
;
535 pack_pattern
= DMA_PACK_PATTERN_BGR
;
538 format
= DMA_IBUF_FORMAT_XRGB8888
;
539 pack_pattern
= DMA_PACK_PATTERN_BGR
;
545 if (format
!= mdp
->format
|| pack_pattern
!= mdp
->pack_pattern
) {
546 mdp
->format
= format
;
547 mdp
->pack_pattern
= pack_pattern
;
548 mdp
->dma_config_dirty
= true;
554 static void dump_req(struct mdp_blit_req
*req
,
555 unsigned long src_start
, unsigned long src_len
,
556 unsigned long dst_start
, unsigned long dst_len
)
558 pr_err("flags: 0x%x\n", req
->flags
);
559 pr_err("src_start: 0x%08lx\n", src_start
);
560 pr_err("src_len: 0x%08lx\n", src_len
);
561 pr_err("src.offset: 0x%x\n", req
->src
.offset
);
562 pr_err("src.format: 0x%x\n", req
->src
.format
);
563 pr_err("src.width: %d\n", req
->src
.width
);
564 pr_err("src.height: %d\n", req
->src
.height
);
565 pr_err("src_rect.x: %d\n", req
->src_rect
.x
);
566 pr_err("src_rect.y: %d\n", req
->src_rect
.y
);
567 pr_err("src_rect.w: %d\n", req
->src_rect
.w
);
568 pr_err("src_rect.h: %d\n", req
->src_rect
.h
);
570 pr_err("dst_start: 0x%08lx\n", dst_start
);
571 pr_err("dst_len: 0x%08lx\n", dst_len
);
572 pr_err("dst.offset: 0x%x\n", req
->dst
.offset
);
573 pr_err("dst.format: 0x%x\n", req
->dst
.format
);
574 pr_err("dst.width: %d\n", req
->dst
.width
);
575 pr_err("dst.height: %d\n", req
->dst
.height
);
576 pr_err("dst_rect.x: %d\n", req
->dst_rect
.x
);
577 pr_err("dst_rect.y: %d\n", req
->dst_rect
.y
);
578 pr_err("dst_rect.w: %d\n", req
->dst_rect
.w
);
579 pr_err("dst_rect.h: %d\n", req
->dst_rect
.h
);
582 int mdp_blit_and_wait(struct mdp_info
*mdp
, struct mdp_blit_req
*req
,
583 struct file
*src_file
, unsigned long src_start
, unsigned long src_len
,
584 struct file
*dst_file
, unsigned long dst_start
, unsigned long dst_len
)
587 enable_mdp_irq(mdp
, DL0_ROI_DONE
);
588 ret
= mdp_ppp_blit(mdp
, req
,
589 src_file
, src_start
, src_len
,
590 dst_file
, dst_start
, dst_len
);
592 disable_mdp_irq(mdp
, DL0_ROI_DONE
);
595 ret
= mdp_ppp_wait(mdp
);
597 printk(KERN_ERR
"%s: failed!\n", __func__
);
598 pr_err("original request:\n");
599 dump_req(mdp
->req
, src_start
, src_len
, dst_start
, dst_len
);
600 pr_err("dead request:\n");
601 dump_req(req
, src_start
, src_len
, dst_start
, dst_len
);
608 int mdp_blit(struct mdp_device
*mdp_dev
, struct fb_info
*fb
,
609 struct mdp_blit_req
*req
)
612 unsigned long src_start
= 0, src_len
= 0, dst_start
= 0, dst_len
= 0;
613 struct mdp_info
*mdp
= container_of(mdp_dev
, struct mdp_info
, mdp_dev
);
614 struct file
*src_file
= 0, *dst_file
= 0;
616 #if defined(CONFIG_MSM_MDP31) || defined(CONFIG_MSM_MDP302)
617 if (req
->flags
& MDP_ROT_90
) {
618 if (unlikely(((req
->dst_rect
.h
== 1) &&
619 ((req
->src_rect
.w
!= 1) ||
620 (req
->dst_rect
.w
!= req
->src_rect
.h
))) ||
621 ((req
->dst_rect
.w
== 1) && ((req
->src_rect
.h
!= 1) ||
622 (req
->dst_rect
.h
!= req
->src_rect
.w
))))) {
623 pr_err("mpd_ppp: error scaling when size is 1!\n");
627 if (unlikely(((req
->dst_rect
.w
== 1) &&
628 ((req
->src_rect
.w
!= 1) ||
629 (req
->dst_rect
.h
!= req
->src_rect
.h
))) ||
630 ((req
->dst_rect
.h
== 1) && ((req
->src_rect
.h
!= 1) ||
631 (req
->dst_rect
.h
!= req
->src_rect
.h
))))) {
632 pr_err("mpd_ppp: error scaling when size is 1!\n");
638 /* WORKAROUND FOR HARDWARE BUG IN BG TILE FETCH */
639 if (unlikely(req
->src_rect
.h
== 0 ||
640 req
->src_rect
.w
== 0)) {
641 printk(KERN_ERR
"mdp_ppp: src img of zero size!\n");
644 if (unlikely(req
->dst_rect
.h
== 0 ||
645 req
->dst_rect
.w
== 0))
648 /* do this first so that if this fails, the caller can always
649 * safely call put_img */
650 if (unlikely(get_img(&req
->src
, fb
, &src_start
, &src_len
, &src_file
))) {
651 printk(KERN_ERR
"mdp_ppp: could not retrieve src image from "
656 if (unlikely(get_img(&req
->dst
, fb
, &dst_start
, &dst_len
, &dst_file
))) {
657 printk(KERN_ERR
"mdp_ppp: could not retrieve dst image from "
662 mutex_lock(&mdp_mutex
);
665 /* transp_masking unimplemented */
666 req
->transp_mask
= MDP_TRANSP_NOP
;
668 #if !defined(CONFIG_MSM_MDP31) && !defined(CONFIG_MSM_MDP302)
669 if (unlikely((req
->transp_mask
!= MDP_TRANSP_NOP
||
670 req
->alpha
!= MDP_ALPHA_NOP
||
671 HAS_ALPHA(req
->src
.format
)) &&
672 (req
->flags
& MDP_ROT_90
&&
673 req
->dst_rect
.w
<= 16 && req
->dst_rect
.h
>= 16))) {
675 unsigned int tiles
= req
->dst_rect
.h
/ 16;
676 unsigned int remainder
= req
->dst_rect
.h
% 16;
677 req
->src_rect
.w
= 16*req
->src_rect
.w
/ req
->dst_rect
.h
;
678 req
->dst_rect
.h
= 16;
679 for (i
= 0; i
< tiles
; i
++) {
680 ret
= mdp_blit_and_wait(mdp
, req
,
681 src_file
, src_start
, src_len
,
682 dst_file
, dst_start
, dst_len
);
685 req
->dst_rect
.y
+= 16;
686 req
->src_rect
.x
+= req
->src_rect
.w
;
690 req
->src_rect
.w
= remainder
*req
->src_rect
.w
/ req
->dst_rect
.h
;
691 req
->dst_rect
.h
= remainder
;
694 /* Workarounds for MDP 3.1 hardware bugs */
695 if (unlikely((mdp_get_bytes_per_pixel(req
->dst
.format
) == 4) &&
696 (req
->dst_rect
.w
!= 1) &&
697 (((req
->dst_rect
.w
% 8) == 6) ||
698 ((req
->dst_rect
.w
% 32) == 3) ||
699 ((req
->dst_rect
.w
% 32) == 1)))) {
700 ret
= mdp_ppp_blit_split_width(mdp
, req
,
701 src_file
, src_start
, src_len
,
702 dst_file
, dst_start
, dst_len
);
704 } else if (unlikely((req
->dst_rect
.w
!= 1) && (req
->dst_rect
.h
!= 1) &&
705 ((req
->dst_rect
.h
% 32) == 3 ||
706 (req
->dst_rect
.h
% 32) == 1))) {
707 ret
= mdp_ppp_blit_split_height(mdp
, req
,
708 src_file
, src_start
, src_len
,
709 dst_file
, dst_start
, dst_len
);
713 ret
= mdp_blit_and_wait(mdp
, req
,
714 src_file
, src_start
, src_len
,
715 dst_file
, dst_start
, dst_len
);
719 mutex_unlock(&mdp_mutex
);
723 int mdp_fb_mirror(struct mdp_device
*mdp_dev
,
724 struct fb_info
*src_fb
, struct fb_info
*dst_fb
,
725 struct mdp_blit_req
*req
)
728 struct mdp_info
*mdp
= container_of(mdp_dev
, struct mdp_info
, mdp_dev
);
730 if (!src_fb
|| !dst_fb
)
733 enable_mdp_irq(mdp
, DL0_ROI_DONE
);
734 ret
= mdp_ppp_blit(mdp
, req
,
735 -1, src_fb
->fix
.smem_start
, src_fb
->fix
.smem_len
,
736 -1, dst_fb
->fix
.smem_start
, dst_fb
->fix
.smem_len
);
740 ret
= mdp_ppp_wait(mdp
);
742 pr_err("mdp_ppp_wait error\n");
743 goto err_wait_failed
;
748 disable_mdp_irq(mdp
, DL0_ROI_DONE
);
754 void mdp_set_grp_disp(struct mdp_device
*mdp_dev
, unsigned disp_id
)
756 struct mdp_info
*mdp
= container_of(mdp_dev
, struct mdp_info
, mdp_dev
);
759 mdp_writel(mdp
, disp_id
, MDP_FULL_BYPASS_WORD43
);
762 /* used by output interface drivers like mddi and lcdc */
763 int mdp_out_if_register(struct mdp_device
*mdp_dev
, int interface
,
764 void *private_data
, uint32_t dma_mask
,
765 mdp_dma_start_func_t dma_start
)
767 struct mdp_info
*mdp
= container_of(mdp_dev
, struct mdp_info
, mdp_dev
);
771 if (interface
< 0 || interface
>= MSM_MDP_NUM_INTERFACES
) {
772 pr_err("%s: invalid interface (%d)\n", __func__
, interface
);
776 spin_lock_irqsave(&mdp
->lock
, flags
);
778 if (mdp
->out_if
[interface
].registered
) {
779 pr_err("%s: interface (%d) already registered\n", __func__
,
785 init_waitqueue_head(&mdp
->out_if
[interface
].dma_waitqueue
);
786 mdp
->out_if
[interface
].registered
= 1;
787 mdp
->out_if
[interface
].priv
= private_data
;
788 mdp
->out_if
[interface
].dma_mask
= dma_mask
;
789 mdp
->out_if
[interface
].dma_start
= dma_start
;
790 mdp
->out_if
[interface
].dma_cb
= NULL
;
793 spin_unlock_irqrestore(&mdp
->lock
, flags
);
797 int mdp_out_if_req_irq(struct mdp_device
*mdp_dev
, int interface
,
798 uint32_t mask
, struct msmfb_callback
*cb
)
800 struct mdp_info
*mdp
= container_of(mdp_dev
, struct mdp_info
, mdp_dev
);
804 if (interface
< 0 || interface
>= MSM_MDP_NUM_INTERFACES
) {
805 pr_err("%s: invalid interface (%d)\n", __func__
, interface
);
807 } else if (!mdp
->out_if
[interface
].registered
) {
808 pr_err("%s: interface (%d) not registered\n", __func__
,
813 spin_lock_irqsave(&mdp
->lock
, flags
);
816 ret
= locked_enable_mdp_irq(mdp
, mask
);
818 pr_err("%s: busy\n", __func__
);
821 mdp
->out_if
[interface
].irq_mask
= mask
;
822 mdp
->out_if
[interface
].irq_cb
= cb
;
824 locked_disable_mdp_irq(mdp
, mask
);
825 mdp
->out_if
[interface
].irq_mask
= 0;
826 mdp
->out_if
[interface
].irq_cb
= NULL
;
830 spin_unlock_irqrestore(&mdp
->lock
, flags
);
834 int register_mdp_client(struct class_interface
*cint
)
837 pr_err("mdp: no mdp_class when registering mdp client\n");
840 cint
->class = mdp_class
;
841 return class_interface_register(cint
);
844 #ifdef CONFIG_MSM_MDP40
845 void mdp_hw_init(struct mdp_info
*mdp
)
848 mdp_writel(mdp
, 0, MDP_INTR_ENABLE
);
851 #include "mdp_csc_table.h"
853 void mdp_check_tearing(struct mdp_info
*mdp
, struct msm_mdp_platform_data
*pdata
)
855 mdp_writel(mdp
, pdata
->sync_config
, MDP_SYNC_CONFIG_0
);
856 mdp_writel(mdp
, 1, MDP_TEAR_CHECK_EN
);
857 mdp_writel(mdp
, pdata
->sync_thresh
, MDP_SYNC_THRESH_0
);
858 mdp_writel(mdp
, pdata
->sync_start_pos
, MDP_PRIM_START_POS
);
860 void mdp_hw_init(struct mdp_info
*mdp
)
867 mdp_writel(mdp
, 0, MDP_INTR_ENABLE
);
869 /* debug interface write access */
870 mdp_writel(mdp
, 1, 0x60);
871 mdp_writel(mdp
, 1, MDP_EBI2_PORTMAP_MODE
);
873 #ifndef CONFIG_MSM_MDP22
874 lcdc_enabled
= mdp_readl(mdp
, MDP_LCDC_EN
);
876 mdp_writel(mdp
, 0, MDP_LCDC_EN
);
877 /* enable auto clock gating for all blocks by default */
878 mdp_writel(mdp
, 0xffffffff, MDP_CGC_EN
);
879 /* reset color/gamma correct parms */
880 mdp_writel(mdp
, 0, MDP_DMA_P_COLOR_CORRECT_CONFIG
);
883 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x01f8);
884 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x01fc);
885 mdp_writel(mdp
, 1, 0x60);
887 for (n
= 0; n
< ARRAY_SIZE(csc_color_lut
); n
++)
888 mdp_writel(mdp
, csc_color_lut
[n
].val
, csc_color_lut
[n
].reg
);
890 /* clear up unused fg/main registers */
891 /* comp.plane 2&3 ystride */
892 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x0120);
894 /* unpacked pattern */
895 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x012c);
896 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x0130);
897 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x0134);
898 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x0158);
899 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x015c);
900 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x0160);
901 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x0170);
902 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x0174);
903 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x017c);
905 /* comp.plane 2 & 3 */
906 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x0114);
907 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x0118);
909 /* clear unused bg registers */
910 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x01c8);
911 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x01d0);
912 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x01dc);
913 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x01e0);
914 mdp_writel(mdp
, 0, MDP_CMD_DEBUG_ACCESS_BASE
+ 0x01e4);
916 for (n
= 0; n
< ARRAY_SIZE(csc_matrix_config_table
); n
++)
917 mdp_writel(mdp
, csc_matrix_config_table
[n
].val
,
918 csc_matrix_config_table
[n
].reg
);
920 mdp_ppp_init_scale(mdp
);
922 #ifndef CONFIG_MSM_MDP31
923 mdp_writel(mdp
, 0x04000400, MDP_COMMAND_CONFIG
);
925 #ifndef CONFIG_MSM_MDP22
927 mdp_writel(mdp
, 1, MDP_LCDC_EN
);
930 #endif //CONFIG_MSM_MDP40
932 int mdp_probe(struct platform_device
*pdev
)
934 struct resource
*resource
;
936 struct mdp_info
*mdp
;
937 struct msm_mdp_platform_data
*pdata
= pdev
->dev
.platform_data
;
939 resource
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
941 pr_err("mdp: can not get mdp mem resource!\n");
945 mdp
= kzalloc(sizeof(struct mdp_info
), GFP_KERNEL
);
949 spin_lock_init(&mdp
->lock
);
951 mdp
->irq
= platform_get_irq(pdev
, 0);
953 pr_err("mdp: can not get mdp irq\n");
958 mdp
->base
= ioremap(resource
->start
,
959 resource
->end
- resource
->start
);
960 if (mdp
->base
== 0) {
961 printk(KERN_ERR
"msmfb: cannot allocate mdp regs!\n");
966 mdp
->mdp_dev
.dma
= mdp_dma
;
967 mdp
->mdp_dev
.dma_wait
= mdp_dma_wait
;
968 mdp
->mdp_dev
.blit
= mdp_blit
;
969 #ifdef CONFIG_FB_MSM_OVERLAY
970 mdp
->mdp_dev
.overlay_get
= mdp4_overlay_get
;
971 mdp
->mdp_dev
.overlay_set
= mdp4_overlay_set
;
972 mdp
->mdp_dev
.overlay_unset
= mdp4_overlay_unset
;
973 mdp
->mdp_dev
.overlay_play
= mdp4_overlay_play
;
975 mdp
->mdp_dev
.set_grp_disp
= mdp_set_grp_disp
;
976 mdp
->mdp_dev
.set_output_format
= mdp_set_output_format
;
977 mdp
->mdp_dev
.check_output_format
= mdp_check_output_format
;
978 mdp
->mdp_dev
.configure_dma
= mdp_configure_dma
;
980 if (pdata
== NULL
|| pdata
->overrides
== 0)
981 mdp
->mdp_dev
.overrides
= 0;
982 else if(pdata
->overrides
)
983 mdp
->mdp_dev
.overrides
= pdata
->overrides
;
985 if (pdata
== NULL
|| pdata
->color_format
== 0)
986 mdp
->mdp_dev
.color_format
= MSM_MDP_OUT_IF_FMT_RGB565
;
987 else if(pdata
->color_format
)
988 mdp
->mdp_dev
.color_format
= pdata
->color_format
;
990 if (pdata
== NULL
|| pdata
->dma_channel
== MDP_DMA_P
) {
991 #ifdef CONFIG_MSM_MDP40
992 if (mdp
->mdp_dev
.overrides
& MSM_MDP4_MDDI_DMA_SWITCH
) {
993 ret
= mdp_out_if_register(&mdp
->mdp_dev
,
994 MSM_MDDI_PMDH_INTERFACE
, mdp
, INTR_OVERLAY0_DONE
995 | MDP_DMA_S_DONE
, mdp4_mddi_overlay
);
997 ret
= mdp_out_if_register(&mdp
->mdp_dev
,
998 MSM_MDDI_PMDH_INTERFACE
, mdp
, INTR_OVERLAY0_DONE
,
1002 ret
= mdp_out_if_register(&mdp
->mdp_dev
,
1003 MSM_MDDI_PMDH_INTERFACE
, mdp
, MDP_DMA_P_DONE
,
1006 } else if (pdata
->dma_channel
== MDP_DMA_S
) {
1007 ret
= mdp_out_if_register(&mdp
->mdp_dev
,
1008 MSM_MDDI_PMDH_INTERFACE
, mdp
, MDP_DMA_S_DONE
,
1013 goto error_mddi_pmdh_register
;
1015 mdp
->clk
= clk_get(&pdev
->dev
, "mdp_clk");
1016 if (IS_ERR(mdp
->clk
)) {
1017 printk(KERN_INFO
"mdp: failed to get mdp clk");
1018 ret
= PTR_ERR(mdp
->clk
);
1019 goto error_get_mdp_clk
;
1022 mdp
->ebi1_clk
= clk_get(NULL
, "ebi1_clk");
1023 if (IS_ERR(mdp
->ebi1_clk
)) {
1024 pr_err("mdp: failed to get ebi1 clk\n");
1025 ret
= PTR_ERR(mdp
->ebi1_clk
);
1026 goto error_get_ebi1_clk
;
1030 ret
= request_irq(mdp
->irq
, mdp_isr
, IRQF_DISABLED
, "msm_mdp", mdp
);
1032 goto error_request_irq
;
1033 disable_irq(mdp
->irq
);
1035 clk_enable(mdp
->clk
);
1036 mdp_clk_to_disable_later
= mdp
->clk
;
1037 #ifdef CONFIG_MSM_MDP40
1039 if (mdp_readl(mdp
, 0xc0000))
1040 mdp_writel(mdp
, 0x8, 0x0038);
1042 mdp_writel(mdp
, 0xa, 0x0038); //mddi
1043 //FIXME: should select mddi or lcdc interface
1044 //mdp_writel(mdp, 0x8, 0x0038); //lcdc
1047 #ifdef CONFIG_MSM_MDP40
1048 extern void mdp4_hw_init(struct mdp_info
*mdp
);
1054 #if defined CONFIG_MSM_MDP302
1055 /* enable the tearing check in MDP */
1056 if(pdata
!= NULL
&& pdata
->tearing_check
)
1057 mdp_check_tearing(mdp
, pdata
);
1059 /* register mdp device */
1060 mdp
->mdp_dev
.dev
.parent
= &pdev
->dev
;
1061 mdp
->mdp_dev
.dev
.class = mdp_class
;
1062 dev_set_name(&mdp
->mdp_dev
.dev
, "mdp%d", pdev
->id
);
1064 /* if you can remove the platform device you'd have to implement
1066 mdp_dev.release = mdp_class; */
1068 ret
= device_register(&mdp
->mdp_dev
.dev
);
1070 goto error_device_register
;
1072 pr_info("%s: initialized\n", __func__
);
1076 error_device_register
:
1077 free_irq(mdp
->irq
, mdp
);
1079 clk_put(mdp
->ebi1_clk
);
1083 error_mddi_pmdh_register
:
1091 static struct platform_driver msm_mdp_driver
= {
1093 .driver
= {.name
= "msm_mdp"},
1096 static int __init
mdp_lateinit(void)
1098 if (mdp_clk_to_disable_later
)
1099 clk_disable(mdp_clk_to_disable_later
);
1103 static int __init
mdp_init(void)
1105 mdp_class
= class_create(THIS_MODULE
, "msm_mdp");
1106 if (IS_ERR(mdp_class
)) {
1107 printk(KERN_ERR
"Error creating mdp class\n");
1108 return PTR_ERR(mdp_class
);
1110 return platform_driver_register(&msm_mdp_driver
);
1113 subsys_initcall(mdp_init
);
1114 late_initcall(mdp_lateinit
);