Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / gpu / drm / msm / disp / mdp_kms.c
blob3c35ccfc733154b6fff1b66969f98888e00df94a
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
8 #include "msm_drv.h"
9 #include "mdp_kms.h"
12 struct mdp_irq_wait {
13 struct mdp_irq irq;
14 int count;
17 static DECLARE_WAIT_QUEUE_HEAD(wait_event);
19 static DEFINE_SPINLOCK(list_lock);
21 static void update_irq(struct mdp_kms *mdp_kms)
23 struct mdp_irq *irq;
24 uint32_t irqmask = mdp_kms->vblank_mask;
26 assert_spin_locked(&list_lock);
28 list_for_each_entry(irq, &mdp_kms->irq_list, node)
29 irqmask |= irq->irqmask;
31 mdp_kms->funcs->set_irqmask(mdp_kms, irqmask, mdp_kms->cur_irq_mask);
32 mdp_kms->cur_irq_mask = irqmask;
35 /* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
36 * link changes, this must be called to figure out the new global irqmask
38 void mdp_irq_update(struct mdp_kms *mdp_kms)
40 unsigned long flags;
41 spin_lock_irqsave(&list_lock, flags);
42 update_irq(mdp_kms);
43 spin_unlock_irqrestore(&list_lock, flags);
46 void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status)
48 struct mdp_irq *handler, *n;
49 unsigned long flags;
51 spin_lock_irqsave(&list_lock, flags);
52 mdp_kms->in_irq = true;
53 list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) {
54 if (handler->irqmask & status) {
55 spin_unlock_irqrestore(&list_lock, flags);
56 handler->irq(handler, handler->irqmask & status);
57 spin_lock_irqsave(&list_lock, flags);
60 mdp_kms->in_irq = false;
61 update_irq(mdp_kms);
62 spin_unlock_irqrestore(&list_lock, flags);
66 void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
68 unsigned long flags;
70 spin_lock_irqsave(&list_lock, flags);
71 if (enable)
72 mdp_kms->vblank_mask |= mask;
73 else
74 mdp_kms->vblank_mask &= ~mask;
75 update_irq(mdp_kms);
76 spin_unlock_irqrestore(&list_lock, flags);
79 static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus)
81 struct mdp_irq_wait *wait =
82 container_of(irq, struct mdp_irq_wait, irq);
83 wait->count--;
84 wake_up_all(&wait_event);
87 void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask)
89 struct mdp_irq_wait wait = {
90 .irq = {
91 .irq = wait_irq,
92 .irqmask = irqmask,
94 .count = 1,
96 mdp_irq_register(mdp_kms, &wait.irq);
97 wait_event_timeout(wait_event, (wait.count <= 0),
98 msecs_to_jiffies(100));
99 mdp_irq_unregister(mdp_kms, &wait.irq);
102 void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
104 unsigned long flags;
105 bool needs_update = false;
107 spin_lock_irqsave(&list_lock, flags);
109 if (!irq->registered) {
110 irq->registered = true;
111 list_add(&irq->node, &mdp_kms->irq_list);
112 needs_update = !mdp_kms->in_irq;
115 spin_unlock_irqrestore(&list_lock, flags);
117 if (needs_update)
118 mdp_irq_update(mdp_kms);
121 void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
123 unsigned long flags;
124 bool needs_update = false;
126 spin_lock_irqsave(&list_lock, flags);
128 if (irq->registered) {
129 irq->registered = false;
130 list_del(&irq->node);
131 needs_update = !mdp_kms->in_irq;
134 spin_unlock_irqrestore(&list_lock, flags);
136 if (needs_update)
137 mdp_irq_update(mdp_kms);