1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
6 #include <linux/irqdomain.h>
12 #define to_mdp5_mdss(x) container_of(x, struct mdp5_mdss, base)
17 void __iomem
*mmio
, *vbif
;
19 struct regulator
*vdd
;
23 struct clk
*vsync_clk
;
26 volatile unsigned long enabled_mask
;
27 struct irq_domain
*domain
;
31 static inline void mdss_write(struct mdp5_mdss
*mdp5_mdss
, u32 reg
, u32 data
)
33 msm_writel(data
, mdp5_mdss
->mmio
+ reg
);
36 static inline u32
mdss_read(struct mdp5_mdss
*mdp5_mdss
, u32 reg
)
38 return msm_readl(mdp5_mdss
->mmio
+ reg
);
41 static irqreturn_t
mdss_irq(int irq
, void *arg
)
43 struct mdp5_mdss
*mdp5_mdss
= arg
;
46 intr
= mdss_read(mdp5_mdss
, REG_MDSS_HW_INTR_STATUS
);
48 VERB("intr=%08x", intr
);
51 irq_hw_number_t hwirq
= fls(intr
) - 1;
53 generic_handle_irq(irq_find_mapping(
54 mdp5_mdss
->irqcontroller
.domain
, hwirq
));
55 intr
&= ~(1 << hwirq
);
62 * interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc)
63 * can register to get their irq's delivered
66 #define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_MDP | \
67 MDSS_HW_INTR_STATUS_INTR_DSI0 | \
68 MDSS_HW_INTR_STATUS_INTR_DSI1 | \
69 MDSS_HW_INTR_STATUS_INTR_HDMI | \
70 MDSS_HW_INTR_STATUS_INTR_EDP)
72 static void mdss_hw_mask_irq(struct irq_data
*irqd
)
74 struct mdp5_mdss
*mdp5_mdss
= irq_data_get_irq_chip_data(irqd
);
76 smp_mb__before_atomic();
77 clear_bit(irqd
->hwirq
, &mdp5_mdss
->irqcontroller
.enabled_mask
);
78 smp_mb__after_atomic();
81 static void mdss_hw_unmask_irq(struct irq_data
*irqd
)
83 struct mdp5_mdss
*mdp5_mdss
= irq_data_get_irq_chip_data(irqd
);
85 smp_mb__before_atomic();
86 set_bit(irqd
->hwirq
, &mdp5_mdss
->irqcontroller
.enabled_mask
);
87 smp_mb__after_atomic();
90 static struct irq_chip mdss_hw_irq_chip
= {
92 .irq_mask
= mdss_hw_mask_irq
,
93 .irq_unmask
= mdss_hw_unmask_irq
,
96 static int mdss_hw_irqdomain_map(struct irq_domain
*d
, unsigned int irq
,
97 irq_hw_number_t hwirq
)
99 struct mdp5_mdss
*mdp5_mdss
= d
->host_data
;
101 if (!(VALID_IRQS
& (1 << hwirq
)))
104 irq_set_chip_and_handler(irq
, &mdss_hw_irq_chip
, handle_level_irq
);
105 irq_set_chip_data(irq
, mdp5_mdss
);
110 static const struct irq_domain_ops mdss_hw_irqdomain_ops
= {
111 .map
= mdss_hw_irqdomain_map
,
112 .xlate
= irq_domain_xlate_onecell
,
116 static int mdss_irq_domain_init(struct mdp5_mdss
*mdp5_mdss
)
118 struct device
*dev
= mdp5_mdss
->base
.dev
->dev
;
119 struct irq_domain
*d
;
121 d
= irq_domain_add_linear(dev
->of_node
, 32, &mdss_hw_irqdomain_ops
,
124 DRM_DEV_ERROR(dev
, "mdss irq domain add failed\n");
128 mdp5_mdss
->irqcontroller
.enabled_mask
= 0;
129 mdp5_mdss
->irqcontroller
.domain
= d
;
134 static int mdp5_mdss_enable(struct msm_mdss
*mdss
)
136 struct mdp5_mdss
*mdp5_mdss
= to_mdp5_mdss(mdss
);
139 clk_prepare_enable(mdp5_mdss
->ahb_clk
);
140 if (mdp5_mdss
->axi_clk
)
141 clk_prepare_enable(mdp5_mdss
->axi_clk
);
142 if (mdp5_mdss
->vsync_clk
)
143 clk_prepare_enable(mdp5_mdss
->vsync_clk
);
148 static int mdp5_mdss_disable(struct msm_mdss
*mdss
)
150 struct mdp5_mdss
*mdp5_mdss
= to_mdp5_mdss(mdss
);
153 if (mdp5_mdss
->vsync_clk
)
154 clk_disable_unprepare(mdp5_mdss
->vsync_clk
);
155 if (mdp5_mdss
->axi_clk
)
156 clk_disable_unprepare(mdp5_mdss
->axi_clk
);
157 clk_disable_unprepare(mdp5_mdss
->ahb_clk
);
162 static int msm_mdss_get_clocks(struct mdp5_mdss
*mdp5_mdss
)
164 struct platform_device
*pdev
=
165 to_platform_device(mdp5_mdss
->base
.dev
->dev
);
167 mdp5_mdss
->ahb_clk
= msm_clk_get(pdev
, "iface");
168 if (IS_ERR(mdp5_mdss
->ahb_clk
))
169 mdp5_mdss
->ahb_clk
= NULL
;
171 mdp5_mdss
->axi_clk
= msm_clk_get(pdev
, "bus");
172 if (IS_ERR(mdp5_mdss
->axi_clk
))
173 mdp5_mdss
->axi_clk
= NULL
;
175 mdp5_mdss
->vsync_clk
= msm_clk_get(pdev
, "vsync");
176 if (IS_ERR(mdp5_mdss
->vsync_clk
))
177 mdp5_mdss
->vsync_clk
= NULL
;
182 static void mdp5_mdss_destroy(struct drm_device
*dev
)
184 struct msm_drm_private
*priv
= dev
->dev_private
;
185 struct mdp5_mdss
*mdp5_mdss
= to_mdp5_mdss(priv
->mdss
);
190 irq_domain_remove(mdp5_mdss
->irqcontroller
.domain
);
191 mdp5_mdss
->irqcontroller
.domain
= NULL
;
193 regulator_disable(mdp5_mdss
->vdd
);
195 pm_runtime_disable(dev
->dev
);
198 static const struct msm_mdss_funcs mdss_funcs
= {
199 .enable
= mdp5_mdss_enable
,
200 .disable
= mdp5_mdss_disable
,
201 .destroy
= mdp5_mdss_destroy
,
204 int mdp5_mdss_init(struct drm_device
*dev
)
206 struct platform_device
*pdev
= to_platform_device(dev
->dev
);
207 struct msm_drm_private
*priv
= dev
->dev_private
;
208 struct mdp5_mdss
*mdp5_mdss
;
213 if (!of_device_is_compatible(dev
->dev
->of_node
, "qcom,mdss"))
216 mdp5_mdss
= devm_kzalloc(dev
->dev
, sizeof(*mdp5_mdss
), GFP_KERNEL
);
222 mdp5_mdss
->base
.dev
= dev
;
224 mdp5_mdss
->mmio
= msm_ioremap(pdev
, "mdss_phys", "MDSS");
225 if (IS_ERR(mdp5_mdss
->mmio
)) {
226 ret
= PTR_ERR(mdp5_mdss
->mmio
);
230 mdp5_mdss
->vbif
= msm_ioremap(pdev
, "vbif_phys", "VBIF");
231 if (IS_ERR(mdp5_mdss
->vbif
)) {
232 ret
= PTR_ERR(mdp5_mdss
->vbif
);
236 ret
= msm_mdss_get_clocks(mdp5_mdss
);
238 DRM_DEV_ERROR(dev
->dev
, "failed to get clocks: %d\n", ret
);
242 /* Regulator to enable GDSCs in downstream kernels */
243 mdp5_mdss
->vdd
= devm_regulator_get(dev
->dev
, "vdd");
244 if (IS_ERR(mdp5_mdss
->vdd
)) {
245 ret
= PTR_ERR(mdp5_mdss
->vdd
);
249 ret
= regulator_enable(mdp5_mdss
->vdd
);
251 DRM_DEV_ERROR(dev
->dev
, "failed to enable regulator vdd: %d\n",
256 ret
= devm_request_irq(dev
->dev
, platform_get_irq(pdev
, 0),
257 mdss_irq
, 0, "mdss_isr", mdp5_mdss
);
259 DRM_DEV_ERROR(dev
->dev
, "failed to init irq: %d\n", ret
);
263 ret
= mdss_irq_domain_init(mdp5_mdss
);
265 DRM_DEV_ERROR(dev
->dev
, "failed to init sub-block irqs: %d\n", ret
);
269 mdp5_mdss
->base
.funcs
= &mdss_funcs
;
270 priv
->mdss
= &mdp5_mdss
->base
;
272 pm_runtime_enable(dev
->dev
);
276 regulator_disable(mdp5_mdss
->vdd
);