drm/amdkfd: Add memory exception handling
[linux/fpc-iii.git] / drivers / gpu / drm / msm / mdp / mdp5 / mdp5_irq.c
blob33bd4c6160dd0ded0fce6f676e02ee803c2572c7
1 /*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/irqdomain.h>
19 #include <linux/irq.h>
21 #include "msm_drv.h"
22 #include "mdp5_kms.h"
24 void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
26 mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask);
29 static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
31 DRM_ERROR("errors: %08x\n", irqstatus);
34 void mdp5_irq_preinstall(struct msm_kms *kms)
36 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
37 mdp5_enable(mdp5_kms);
38 mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), 0xffffffff);
39 mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000);
40 mdp5_disable(mdp5_kms);
43 int mdp5_irq_postinstall(struct msm_kms *kms)
45 struct mdp_kms *mdp_kms = to_mdp_kms(kms);
46 struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
47 struct mdp_irq *error_handler = &mdp5_kms->error_handler;
49 error_handler->irq = mdp5_irq_error_handler;
50 error_handler->irqmask = MDP5_IRQ_INTF0_UNDER_RUN |
51 MDP5_IRQ_INTF1_UNDER_RUN |
52 MDP5_IRQ_INTF2_UNDER_RUN |
53 MDP5_IRQ_INTF3_UNDER_RUN;
55 mdp_irq_register(mdp_kms, error_handler);
57 return 0;
60 void mdp5_irq_uninstall(struct msm_kms *kms)
62 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
63 mdp5_enable(mdp5_kms);
64 mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000);
65 mdp5_disable(mdp5_kms);
68 static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
70 struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
71 struct drm_device *dev = mdp5_kms->dev;
72 struct msm_drm_private *priv = dev->dev_private;
73 unsigned int id;
74 uint32_t status;
76 status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0));
77 mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status);
79 VERB("status=%08x", status);
81 mdp_dispatch_irqs(mdp_kms, status);
83 for (id = 0; id < priv->num_crtcs; id++)
84 if (status & mdp5_crtc_vblank(priv->crtcs[id]))
85 drm_handle_vblank(dev, id);
88 irqreturn_t mdp5_irq(struct msm_kms *kms)
90 struct mdp_kms *mdp_kms = to_mdp_kms(kms);
91 struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
92 uint32_t intr;
94 intr = mdp5_read(mdp5_kms, REG_MDSS_HW_INTR_STATUS);
96 VERB("intr=%08x", intr);
98 if (intr & MDSS_HW_INTR_STATUS_INTR_MDP) {
99 mdp5_irq_mdp(mdp_kms);
100 intr &= ~MDSS_HW_INTR_STATUS_INTR_MDP;
103 while (intr) {
104 irq_hw_number_t hwirq = fls(intr) - 1;
105 generic_handle_irq(irq_find_mapping(
106 mdp5_kms->irqcontroller.domain, hwirq));
107 intr &= ~(1 << hwirq);
110 return IRQ_HANDLED;
113 int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
115 mdp_update_vblank_mask(to_mdp_kms(kms),
116 mdp5_crtc_vblank(crtc), true);
117 return 0;
120 void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
122 mdp_update_vblank_mask(to_mdp_kms(kms),
123 mdp5_crtc_vblank(crtc), false);
127 * interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc)
128 * can register to get their irq's delivered
131 #define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_DSI0 | \
132 MDSS_HW_INTR_STATUS_INTR_DSI1 | \
133 MDSS_HW_INTR_STATUS_INTR_HDMI | \
134 MDSS_HW_INTR_STATUS_INTR_EDP)
136 static void mdp5_hw_mask_irq(struct irq_data *irqd)
138 struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
139 smp_mb__before_atomic();
140 clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
141 smp_mb__after_atomic();
144 static void mdp5_hw_unmask_irq(struct irq_data *irqd)
146 struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
147 smp_mb__before_atomic();
148 set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
149 smp_mb__after_atomic();
152 static struct irq_chip mdp5_hw_irq_chip = {
153 .name = "mdp5",
154 .irq_mask = mdp5_hw_mask_irq,
155 .irq_unmask = mdp5_hw_unmask_irq,
158 static int mdp5_hw_irqdomain_map(struct irq_domain *d,
159 unsigned int irq, irq_hw_number_t hwirq)
161 struct mdp5_kms *mdp5_kms = d->host_data;
163 if (!(VALID_IRQS & (1 << hwirq)))
164 return -EPERM;
166 irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
167 irq_set_chip_data(irq, mdp5_kms);
168 set_irq_flags(irq, IRQF_VALID);
170 return 0;
173 static struct irq_domain_ops mdp5_hw_irqdomain_ops = {
174 .map = mdp5_hw_irqdomain_map,
175 .xlate = irq_domain_xlate_onecell,
179 int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms)
181 struct device *dev = mdp5_kms->dev->dev;
182 struct irq_domain *d;
184 d = irq_domain_add_linear(dev->of_node, 32,
185 &mdp5_hw_irqdomain_ops, mdp5_kms);
186 if (!d) {
187 dev_err(dev, "mdp5 irq domain add failed\n");
188 return -ENXIO;
191 mdp5_kms->irqcontroller.enabled_mask = 0;
192 mdp5_kms->irqcontroller.domain = d;
194 return 0;
197 void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms)
199 if (mdp5_kms->irqcontroller.domain) {
200 irq_domain_remove(mdp5_kms->irqcontroller.domain);
201 mdp5_kms->irqcontroller.domain = NULL;