x86/xen: resume timer irqs early
[linux/fpc-iii.git] / drivers / gpu / drm / msm / mdp4 / mdp4_irq.c
blob5c6b7fca4eddc2c99a24955ab6496577a00cdf42
1 /*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "msm_drv.h"
20 #include "mdp4_kms.h"
23 struct mdp4_irq_wait {
24 struct mdp4_irq irq;
25 int count;
28 static DECLARE_WAIT_QUEUE_HEAD(wait_event);
30 static DEFINE_SPINLOCK(list_lock);
32 static void update_irq(struct mdp4_kms *mdp4_kms)
34 struct mdp4_irq *irq;
35 uint32_t irqmask = mdp4_kms->vblank_mask;
37 BUG_ON(!spin_is_locked(&list_lock));
39 list_for_each_entry(irq, &mdp4_kms->irq_list, node)
40 irqmask |= irq->irqmask;
42 mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, irqmask);
45 static void update_irq_unlocked(struct mdp4_kms *mdp4_kms)
47 unsigned long flags;
48 spin_lock_irqsave(&list_lock, flags);
49 update_irq(mdp4_kms);
50 spin_unlock_irqrestore(&list_lock, flags);
53 static void mdp4_irq_error_handler(struct mdp4_irq *irq, uint32_t irqstatus)
55 DRM_ERROR("errors: %08x\n", irqstatus);
58 void mdp4_irq_preinstall(struct msm_kms *kms)
60 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
61 mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
64 int mdp4_irq_postinstall(struct msm_kms *kms)
66 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
67 struct mdp4_irq *error_handler = &mdp4_kms->error_handler;
69 INIT_LIST_HEAD(&mdp4_kms->irq_list);
71 error_handler->irq = mdp4_irq_error_handler;
72 error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
73 MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
75 mdp4_irq_register(mdp4_kms, error_handler);
77 return 0;
80 void mdp4_irq_uninstall(struct msm_kms *kms)
82 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
83 mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
86 irqreturn_t mdp4_irq(struct msm_kms *kms)
88 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
89 struct drm_device *dev = mdp4_kms->dev;
90 struct msm_drm_private *priv = dev->dev_private;
91 struct mdp4_irq *handler, *n;
92 unsigned long flags;
93 unsigned int id;
94 uint32_t status;
96 status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
97 mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
99 VERB("status=%08x", status);
101 for (id = 0; id < priv->num_crtcs; id++)
102 if (status & mdp4_crtc_vblank(priv->crtcs[id]))
103 drm_handle_vblank(dev, id);
105 spin_lock_irqsave(&list_lock, flags);
106 mdp4_kms->in_irq = true;
107 list_for_each_entry_safe(handler, n, &mdp4_kms->irq_list, node) {
108 if (handler->irqmask & status) {
109 spin_unlock_irqrestore(&list_lock, flags);
110 handler->irq(handler, handler->irqmask & status);
111 spin_lock_irqsave(&list_lock, flags);
114 mdp4_kms->in_irq = false;
115 update_irq(mdp4_kms);
116 spin_unlock_irqrestore(&list_lock, flags);
118 return IRQ_HANDLED;
121 int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
123 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
124 unsigned long flags;
126 spin_lock_irqsave(&list_lock, flags);
127 mdp4_kms->vblank_mask |= mdp4_crtc_vblank(crtc);
128 update_irq(mdp4_kms);
129 spin_unlock_irqrestore(&list_lock, flags);
131 return 0;
134 void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
136 struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
137 unsigned long flags;
139 spin_lock_irqsave(&list_lock, flags);
140 mdp4_kms->vblank_mask &= ~mdp4_crtc_vblank(crtc);
141 update_irq(mdp4_kms);
142 spin_unlock_irqrestore(&list_lock, flags);
145 static void wait_irq(struct mdp4_irq *irq, uint32_t irqstatus)
147 struct mdp4_irq_wait *wait =
148 container_of(irq, struct mdp4_irq_wait, irq);
149 wait->count--;
150 wake_up_all(&wait_event);
153 void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask)
155 struct mdp4_irq_wait wait = {
156 .irq = {
157 .irq = wait_irq,
158 .irqmask = irqmask,
160 .count = 1,
162 mdp4_irq_register(mdp4_kms, &wait.irq);
163 wait_event(wait_event, (wait.count <= 0));
164 mdp4_irq_unregister(mdp4_kms, &wait.irq);
167 void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
169 unsigned long flags;
170 bool needs_update = false;
172 spin_lock_irqsave(&list_lock, flags);
174 if (!irq->registered) {
175 irq->registered = true;
176 list_add(&irq->node, &mdp4_kms->irq_list);
177 needs_update = !mdp4_kms->in_irq;
180 spin_unlock_irqrestore(&list_lock, flags);
182 if (needs_update)
183 update_irq_unlocked(mdp4_kms);
186 void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
188 unsigned long flags;
189 bool needs_update = false;
191 spin_lock_irqsave(&list_lock, flags);
193 if (irq->registered) {
194 irq->registered = false;
195 list_del(&irq->node);
196 needs_update = !mdp4_kms->in_irq;
199 spin_unlock_irqrestore(&list_lock, flags);
201 if (needs_update)
202 update_irq_unlocked(mdp4_kms);