PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / gpu / drm / msm / mdp / mdp_kms.c
blob3be48f7c36beafa362a90f1c6c631848806779cf
1 /*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "msm_drv.h"
20 #include "mdp_kms.h"
23 struct mdp_irq_wait {
24 struct mdp_irq irq;
25 int count;
28 static DECLARE_WAIT_QUEUE_HEAD(wait_event);
30 static DEFINE_SPINLOCK(list_lock);
32 static void update_irq(struct mdp_kms *mdp_kms)
34 struct mdp_irq *irq;
35 uint32_t irqmask = mdp_kms->vblank_mask;
37 BUG_ON(!spin_is_locked(&list_lock));
39 list_for_each_entry(irq, &mdp_kms->irq_list, node)
40 irqmask |= irq->irqmask;
42 mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
45 static void update_irq_unlocked(struct mdp_kms *mdp_kms)
47 unsigned long flags;
48 spin_lock_irqsave(&list_lock, flags);
49 update_irq(mdp_kms);
50 spin_unlock_irqrestore(&list_lock, flags);
53 void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status)
55 struct mdp_irq *handler, *n;
56 unsigned long flags;
58 spin_lock_irqsave(&list_lock, flags);
59 mdp_kms->in_irq = true;
60 list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) {
61 if (handler->irqmask & status) {
62 spin_unlock_irqrestore(&list_lock, flags);
63 handler->irq(handler, handler->irqmask & status);
64 spin_lock_irqsave(&list_lock, flags);
67 mdp_kms->in_irq = false;
68 update_irq(mdp_kms);
69 spin_unlock_irqrestore(&list_lock, flags);
73 void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
75 unsigned long flags;
77 spin_lock_irqsave(&list_lock, flags);
78 if (enable)
79 mdp_kms->vblank_mask |= mask;
80 else
81 mdp_kms->vblank_mask &= ~mask;
82 update_irq(mdp_kms);
83 spin_unlock_irqrestore(&list_lock, flags);
86 static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus)
88 struct mdp_irq_wait *wait =
89 container_of(irq, struct mdp_irq_wait, irq);
90 wait->count--;
91 wake_up_all(&wait_event);
94 void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask)
96 struct mdp_irq_wait wait = {
97 .irq = {
98 .irq = wait_irq,
99 .irqmask = irqmask,
101 .count = 1,
103 mdp_irq_register(mdp_kms, &wait.irq);
104 wait_event(wait_event, (wait.count <= 0));
105 mdp_irq_unregister(mdp_kms, &wait.irq);
108 void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
110 unsigned long flags;
111 bool needs_update = false;
113 spin_lock_irqsave(&list_lock, flags);
115 if (!irq->registered) {
116 irq->registered = true;
117 list_add(&irq->node, &mdp_kms->irq_list);
118 needs_update = !mdp_kms->in_irq;
121 spin_unlock_irqrestore(&list_lock, flags);
123 if (needs_update)
124 update_irq_unlocked(mdp_kms);
127 void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
129 unsigned long flags;
130 bool needs_update = false;
132 spin_lock_irqsave(&list_lock, flags);
134 if (irq->registered) {
135 irq->registered = false;
136 list_del(&irq->node);
137 needs_update = !mdp_kms->in_irq;
140 spin_unlock_irqrestore(&list_lock, flags);
142 if (needs_update)
143 update_irq_unlocked(mdp_kms);