accel/ivpu: Move recovery work to system_unbound_wq
[drm/drm-misc.git] / drivers / base / power / wakeirq.c
blob5a5a9e978e85f3fc9d89cb7d43527dc1dd42a9b1
1 // SPDX-License-Identifier: GPL-2.0
2 /* Device wakeirq helper functions */
3 #include <linux/device.h>
4 #include <linux/interrupt.h>
5 #include <linux/irq.h>
6 #include <linux/slab.h>
7 #include <linux/pm_runtime.h>
8 #include <linux/pm_wakeirq.h>
10 #include "power.h"
12 /**
13 * dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ
14 * @dev: Device entry
15 * @wirq: Wake irq specific data
17 * Internal function to attach a dedicated wake-up interrupt as a wake IRQ.
19 static int dev_pm_attach_wake_irq(struct device *dev, struct wake_irq *wirq)
21 unsigned long flags;
23 if (!dev || !wirq)
24 return -EINVAL;
26 spin_lock_irqsave(&dev->power.lock, flags);
27 if (dev_WARN_ONCE(dev, dev->power.wakeirq,
28 "wake irq already initialized\n")) {
29 spin_unlock_irqrestore(&dev->power.lock, flags);
30 return -EEXIST;
33 dev->power.wakeirq = wirq;
34 device_wakeup_attach_irq(dev, wirq);
36 spin_unlock_irqrestore(&dev->power.lock, flags);
37 return 0;
40 /**
41 * dev_pm_set_wake_irq - Attach device IO interrupt as wake IRQ
42 * @dev: Device entry
43 * @irq: Device IO interrupt
45 * Attach a device IO interrupt as a wake IRQ. The wake IRQ gets
46 * automatically configured for wake-up from suspend based
47 * on the device specific sysfs wakeup entry. Typically called
48 * during driver probe after calling device_init_wakeup().
50 int dev_pm_set_wake_irq(struct device *dev, int irq)
52 struct wake_irq *wirq;
53 int err;
55 if (irq < 0)
56 return -EINVAL;
58 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
59 if (!wirq)
60 return -ENOMEM;
62 wirq->dev = dev;
63 wirq->irq = irq;
65 err = dev_pm_attach_wake_irq(dev, wirq);
66 if (err)
67 kfree(wirq);
69 return err;
71 EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq);
73 /**
74 * dev_pm_clear_wake_irq - Detach a device IO interrupt wake IRQ
75 * @dev: Device entry
77 * Detach a device wake IRQ and free resources.
79 * Note that it's OK for drivers to call this without calling
80 * dev_pm_set_wake_irq() as all the driver instances may not have
81 * a wake IRQ configured. This avoid adding wake IRQ specific
82 * checks into the drivers.
84 void dev_pm_clear_wake_irq(struct device *dev)
86 struct wake_irq *wirq = dev->power.wakeirq;
87 unsigned long flags;
89 if (!wirq)
90 return;
92 spin_lock_irqsave(&dev->power.lock, flags);
93 device_wakeup_detach_irq(dev);
94 dev->power.wakeirq = NULL;
95 spin_unlock_irqrestore(&dev->power.lock, flags);
97 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
98 free_irq(wirq->irq, wirq);
99 wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
101 kfree(wirq->name);
102 kfree(wirq);
104 EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
107 * handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
108 * @irq: Device specific dedicated wake-up interrupt
109 * @_wirq: Wake IRQ data
111 * Some devices have a separate wake-up interrupt in addition to the
112 * device IO interrupt. The wake-up interrupt signals that a device
113 * should be woken up from it's idle state. This handler uses device
114 * specific pm_runtime functions to wake the device, and then it's
115 * up to the device to do whatever it needs to. Note that as the
116 * device may need to restore context and start up regulators, we
117 * use a threaded IRQ.
119 * Also note that we are not resending the lost device interrupts.
120 * We assume that the wake-up interrupt just needs to wake-up the
121 * device, and then device's pm_runtime_resume() can deal with the
122 * situation.
124 static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
126 struct wake_irq *wirq = _wirq;
127 int res;
129 /* Maybe abort suspend? */
130 if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
131 pm_wakeup_event(wirq->dev, 0);
133 return IRQ_HANDLED;
136 /* We don't want RPM_ASYNC or RPM_NOWAIT here */
137 res = pm_runtime_resume(wirq->dev);
138 if (res < 0)
139 dev_warn(wirq->dev,
140 "wake IRQ with no resume: %i\n", res);
142 return IRQ_HANDLED;
145 static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag)
147 struct wake_irq *wirq;
148 int err;
150 if (irq < 0)
151 return -EINVAL;
153 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
154 if (!wirq)
155 return -ENOMEM;
157 wirq->name = kasprintf(GFP_KERNEL, "%s:wakeup", dev_name(dev));
158 if (!wirq->name) {
159 err = -ENOMEM;
160 goto err_free;
163 wirq->dev = dev;
164 wirq->irq = irq;
166 /* Prevent deferred spurious wakeirqs with disable_irq_nosync() */
167 irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
170 * Consumer device may need to power up and restore state
171 * so we use a threaded irq.
173 err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
174 IRQF_ONESHOT | IRQF_NO_AUTOEN,
175 wirq->name, wirq);
176 if (err)
177 goto err_free_name;
179 err = dev_pm_attach_wake_irq(dev, wirq);
180 if (err)
181 goto err_free_irq;
183 wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag;
185 return err;
187 err_free_irq:
188 free_irq(irq, wirq);
189 err_free_name:
190 kfree(wirq->name);
191 err_free:
192 kfree(wirq);
194 return err;
198 * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
199 * @dev: Device entry
200 * @irq: Device wake-up interrupt
202 * Unless your hardware has separate wake-up interrupts in addition
203 * to the device IO interrupts, you don't need this.
205 * Sets up a threaded interrupt handler for a device that has
206 * a dedicated wake-up interrupt in addition to the device IO
207 * interrupt.
209 int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
211 return __dev_pm_set_dedicated_wake_irq(dev, irq, 0);
213 EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
216 * dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt
217 * with reverse enable ordering
218 * @dev: Device entry
219 * @irq: Device wake-up interrupt
221 * Unless your hardware has separate wake-up interrupts in addition
222 * to the device IO interrupts, you don't need this.
224 * Sets up a threaded interrupt handler for a device that has a dedicated
225 * wake-up interrupt in addition to the device IO interrupt. It sets
226 * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend()
227 * to enable dedicated wake-up interrupt after running the runtime suspend
228 * callback for @dev.
230 int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
232 return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE);
234 EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse);
237 * dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
238 * @dev: Device
239 * @can_change_status: Can change wake-up interrupt status
241 * Enables wakeirq conditionally. We need to enable wake-up interrupt
242 * lazily on the first rpm_suspend(). This is needed as the consumer device
243 * starts in RPM_SUSPENDED state, and the first pm_runtime_get() would
244 * otherwise try to disable already disabled wakeirq. The wake-up interrupt
245 * starts disabled with IRQ_NOAUTOEN set.
247 * Should be only called from rpm_suspend() and rpm_resume() path.
248 * Caller must hold &dev->power.lock to change wirq->status
250 void dev_pm_enable_wake_irq_check(struct device *dev,
251 bool can_change_status)
253 struct wake_irq *wirq = dev->power.wakeirq;
255 if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
256 return;
258 if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
259 goto enable;
260 } else if (can_change_status) {
261 wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
262 goto enable;
265 return;
267 enable:
268 if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) {
269 enable_irq(wirq->irq);
270 wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
275 * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
276 * @dev: Device
277 * @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE
279 * Disables wake-up interrupt conditionally based on status.
280 * Should be only called from rpm_suspend() and rpm_resume() path.
282 void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
284 struct wake_irq *wirq = dev->power.wakeirq;
286 if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
287 return;
289 if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
290 return;
292 if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) {
293 wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED;
294 disable_irq_nosync(wirq->irq);
299 * dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before
300 * @dev: Device using the wake IRQ
302 * Enable wake IRQ conditionally based on status, mainly used if want to
303 * enable wake IRQ after running ->runtime_suspend() which depends on
304 * WAKE_IRQ_DEDICATED_REVERSE.
306 * Should be only called from rpm_suspend() path.
308 void dev_pm_enable_wake_irq_complete(struct device *dev)
310 struct wake_irq *wirq = dev->power.wakeirq;
312 if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
313 return;
315 if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
316 wirq->status & WAKE_IRQ_DEDICATED_REVERSE) {
317 enable_irq(wirq->irq);
318 wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
323 * dev_pm_arm_wake_irq - Arm device wake-up
324 * @wirq: Device wake-up interrupt
326 * Sets up the wake-up event conditionally based on the
327 * device_may_wake().
329 void dev_pm_arm_wake_irq(struct wake_irq *wirq)
331 if (!wirq)
332 return;
334 if (device_may_wakeup(wirq->dev)) {
335 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
336 !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
337 enable_irq(wirq->irq);
339 enable_irq_wake(wirq->irq);
344 * dev_pm_disarm_wake_irq - Disarm device wake-up
345 * @wirq: Device wake-up interrupt
347 * Clears up the wake-up event conditionally based on the
348 * device_may_wake().
350 void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
352 if (!wirq)
353 return;
355 if (device_may_wakeup(wirq->dev)) {
356 disable_irq_wake(wirq->irq);
358 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
359 !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
360 disable_irq_nosync(wirq->irq);