WIP FPC-III support
[linux/fpc-iii.git] / drivers / gpu / drm / msm / disp / mdp5 / mdp5_mdss.c
blob09bd46ad820bd2b4af1026f6eeb7ff7c4f54791e
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
4 */
6 #include <linux/irqdomain.h>
7 #include <linux/irq.h>
9 #include "msm_drv.h"
10 #include "mdp5_kms.h"
12 #define to_mdp5_mdss(x) container_of(x, struct mdp5_mdss, base)
14 struct mdp5_mdss {
15 struct msm_mdss base;
17 void __iomem *mmio, *vbif;
19 struct regulator *vdd;
21 struct clk *ahb_clk;
22 struct clk *axi_clk;
23 struct clk *vsync_clk;
25 struct {
26 volatile unsigned long enabled_mask;
27 struct irq_domain *domain;
28 } irqcontroller;
31 static inline void mdss_write(struct mdp5_mdss *mdp5_mdss, u32 reg, u32 data)
33 msm_writel(data, mdp5_mdss->mmio + reg);
36 static inline u32 mdss_read(struct mdp5_mdss *mdp5_mdss, u32 reg)
38 return msm_readl(mdp5_mdss->mmio + reg);
41 static irqreturn_t mdss_irq(int irq, void *arg)
43 struct mdp5_mdss *mdp5_mdss = arg;
44 u32 intr;
46 intr = mdss_read(mdp5_mdss, REG_MDSS_HW_INTR_STATUS);
48 VERB("intr=%08x", intr);
50 while (intr) {
51 irq_hw_number_t hwirq = fls(intr) - 1;
53 generic_handle_irq(irq_find_mapping(
54 mdp5_mdss->irqcontroller.domain, hwirq));
55 intr &= ~(1 << hwirq);
58 return IRQ_HANDLED;
62 * interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc)
63 * can register to get their irq's delivered
66 #define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_MDP | \
67 MDSS_HW_INTR_STATUS_INTR_DSI0 | \
68 MDSS_HW_INTR_STATUS_INTR_DSI1 | \
69 MDSS_HW_INTR_STATUS_INTR_HDMI | \
70 MDSS_HW_INTR_STATUS_INTR_EDP)
72 static void mdss_hw_mask_irq(struct irq_data *irqd)
74 struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
76 smp_mb__before_atomic();
77 clear_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
78 smp_mb__after_atomic();
81 static void mdss_hw_unmask_irq(struct irq_data *irqd)
83 struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
85 smp_mb__before_atomic();
86 set_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
87 smp_mb__after_atomic();
90 static struct irq_chip mdss_hw_irq_chip = {
91 .name = "mdss",
92 .irq_mask = mdss_hw_mask_irq,
93 .irq_unmask = mdss_hw_unmask_irq,
96 static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
97 irq_hw_number_t hwirq)
99 struct mdp5_mdss *mdp5_mdss = d->host_data;
101 if (!(VALID_IRQS & (1 << hwirq)))
102 return -EPERM;
104 irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
105 irq_set_chip_data(irq, mdp5_mdss);
107 return 0;
110 static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
111 .map = mdss_hw_irqdomain_map,
112 .xlate = irq_domain_xlate_onecell,
116 static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
118 struct device *dev = mdp5_mdss->base.dev->dev;
119 struct irq_domain *d;
121 d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
122 mdp5_mdss);
123 if (!d) {
124 DRM_DEV_ERROR(dev, "mdss irq domain add failed\n");
125 return -ENXIO;
128 mdp5_mdss->irqcontroller.enabled_mask = 0;
129 mdp5_mdss->irqcontroller.domain = d;
131 return 0;
134 static int mdp5_mdss_enable(struct msm_mdss *mdss)
136 struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
137 DBG("");
139 clk_prepare_enable(mdp5_mdss->ahb_clk);
140 if (mdp5_mdss->axi_clk)
141 clk_prepare_enable(mdp5_mdss->axi_clk);
142 if (mdp5_mdss->vsync_clk)
143 clk_prepare_enable(mdp5_mdss->vsync_clk);
145 return 0;
148 static int mdp5_mdss_disable(struct msm_mdss *mdss)
150 struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
151 DBG("");
153 if (mdp5_mdss->vsync_clk)
154 clk_disable_unprepare(mdp5_mdss->vsync_clk);
155 if (mdp5_mdss->axi_clk)
156 clk_disable_unprepare(mdp5_mdss->axi_clk);
157 clk_disable_unprepare(mdp5_mdss->ahb_clk);
159 return 0;
162 static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss)
164 struct platform_device *pdev =
165 to_platform_device(mdp5_mdss->base.dev->dev);
167 mdp5_mdss->ahb_clk = msm_clk_get(pdev, "iface");
168 if (IS_ERR(mdp5_mdss->ahb_clk))
169 mdp5_mdss->ahb_clk = NULL;
171 mdp5_mdss->axi_clk = msm_clk_get(pdev, "bus");
172 if (IS_ERR(mdp5_mdss->axi_clk))
173 mdp5_mdss->axi_clk = NULL;
175 mdp5_mdss->vsync_clk = msm_clk_get(pdev, "vsync");
176 if (IS_ERR(mdp5_mdss->vsync_clk))
177 mdp5_mdss->vsync_clk = NULL;
179 return 0;
182 static void mdp5_mdss_destroy(struct drm_device *dev)
184 struct msm_drm_private *priv = dev->dev_private;
185 struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(priv->mdss);
187 if (!mdp5_mdss)
188 return;
190 irq_domain_remove(mdp5_mdss->irqcontroller.domain);
191 mdp5_mdss->irqcontroller.domain = NULL;
193 regulator_disable(mdp5_mdss->vdd);
195 pm_runtime_disable(dev->dev);
198 static const struct msm_mdss_funcs mdss_funcs = {
199 .enable = mdp5_mdss_enable,
200 .disable = mdp5_mdss_disable,
201 .destroy = mdp5_mdss_destroy,
204 int mdp5_mdss_init(struct drm_device *dev)
206 struct platform_device *pdev = to_platform_device(dev->dev);
207 struct msm_drm_private *priv = dev->dev_private;
208 struct mdp5_mdss *mdp5_mdss;
209 int ret;
211 DBG("");
213 if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
214 return 0;
216 mdp5_mdss = devm_kzalloc(dev->dev, sizeof(*mdp5_mdss), GFP_KERNEL);
217 if (!mdp5_mdss) {
218 ret = -ENOMEM;
219 goto fail;
222 mdp5_mdss->base.dev = dev;
224 mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
225 if (IS_ERR(mdp5_mdss->mmio)) {
226 ret = PTR_ERR(mdp5_mdss->mmio);
227 goto fail;
230 mdp5_mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
231 if (IS_ERR(mdp5_mdss->vbif)) {
232 ret = PTR_ERR(mdp5_mdss->vbif);
233 goto fail;
236 ret = msm_mdss_get_clocks(mdp5_mdss);
237 if (ret) {
238 DRM_DEV_ERROR(dev->dev, "failed to get clocks: %d\n", ret);
239 goto fail;
242 /* Regulator to enable GDSCs in downstream kernels */
243 mdp5_mdss->vdd = devm_regulator_get(dev->dev, "vdd");
244 if (IS_ERR(mdp5_mdss->vdd)) {
245 ret = PTR_ERR(mdp5_mdss->vdd);
246 goto fail;
249 ret = regulator_enable(mdp5_mdss->vdd);
250 if (ret) {
251 DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n",
252 ret);
253 goto fail;
256 ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
257 mdss_irq, 0, "mdss_isr", mdp5_mdss);
258 if (ret) {
259 DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret);
260 goto fail_irq;
263 ret = mdss_irq_domain_init(mdp5_mdss);
264 if (ret) {
265 DRM_DEV_ERROR(dev->dev, "failed to init sub-block irqs: %d\n", ret);
266 goto fail_irq;
269 mdp5_mdss->base.funcs = &mdss_funcs;
270 priv->mdss = &mdp5_mdss->base;
272 pm_runtime_enable(dev->dev);
274 return 0;
275 fail_irq:
276 regulator_disable(mdp5_mdss->vdd);
277 fail:
278 return ret;