treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / intel_wakeref.h
blob7d1e676b71ef7d4be0dffccebc13e7e074f44360
1 /*
2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
5 */
7 #ifndef INTEL_WAKEREF_H
8 #define INTEL_WAKEREF_H
10 #include <linux/atomic.h>
11 #include <linux/bits.h>
12 #include <linux/lockdep.h>
13 #include <linux/mutex.h>
14 #include <linux/refcount.h>
15 #include <linux/stackdepot.h>
16 #include <linux/timer.h>
17 #include <linux/workqueue.h>
19 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
20 #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
21 #else
22 #define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
23 #endif
25 struct intel_runtime_pm;
26 struct intel_wakeref;
28 typedef depot_stack_handle_t intel_wakeref_t;
30 struct intel_wakeref_ops {
31 int (*get)(struct intel_wakeref *wf);
32 int (*put)(struct intel_wakeref *wf);
35 struct intel_wakeref {
36 atomic_t count;
37 struct mutex mutex;
39 intel_wakeref_t wakeref;
41 struct intel_runtime_pm *rpm;
42 const struct intel_wakeref_ops *ops;
44 struct work_struct work;
47 struct intel_wakeref_lockclass {
48 struct lock_class_key mutex;
49 struct lock_class_key work;
52 void __intel_wakeref_init(struct intel_wakeref *wf,
53 struct intel_runtime_pm *rpm,
54 const struct intel_wakeref_ops *ops,
55 struct intel_wakeref_lockclass *key);
56 #define intel_wakeref_init(wf, rpm, ops) do { \
57 static struct intel_wakeref_lockclass __key; \
59 __intel_wakeref_init((wf), (rpm), (ops), &__key); \
60 } while (0)
62 int __intel_wakeref_get_first(struct intel_wakeref *wf);
63 void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
65 /**
66 * intel_wakeref_get: Acquire the wakeref
67 * @wf: the wakeref
69 * Acquire a hold on the wakeref. The first user to do so, will acquire
70 * the runtime pm wakeref and then call the @fn underneath the wakeref
71 * mutex.
73 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
74 * will be released and the acquisition unwound, and an error reported.
76 * Returns: 0 if the wakeref was acquired successfully, or a negative error
77 * code otherwise.
79 static inline int
80 intel_wakeref_get(struct intel_wakeref *wf)
82 might_sleep();
83 if (unlikely(!atomic_inc_not_zero(&wf->count)))
84 return __intel_wakeref_get_first(wf);
86 return 0;
89 /**
90 * __intel_wakeref_get: Acquire the wakeref, again
91 * @wf: the wakeref
93 * Increment the wakeref counter, only valid if it is already held by
94 * the caller.
96 * See intel_wakeref_get().
98 static inline void
99 __intel_wakeref_get(struct intel_wakeref *wf)
101 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
102 atomic_inc(&wf->count);
106 * intel_wakeref_get_if_in_use: Acquire the wakeref
107 * @wf: the wakeref
109 * Acquire a hold on the wakeref, but only if the wakeref is already
110 * active.
112 * Returns: true if the wakeref was acquired, false otherwise.
114 static inline bool
115 intel_wakeref_get_if_active(struct intel_wakeref *wf)
117 return atomic_inc_not_zero(&wf->count);
121 * intel_wakeref_put_flags: Release the wakeref
122 * @wf: the wakeref
123 * @flags: control flags
125 * Release our hold on the wakeref. When there are no more users,
126 * the runtime pm wakeref will be released after the @fn callback is called
127 * underneath the wakeref mutex.
129 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
130 * is retained and an error reported.
132 * Returns: 0 if the wakeref was released successfully, or a negative error
133 * code otherwise.
135 static inline void
136 __intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
137 #define INTEL_WAKEREF_PUT_ASYNC BIT(0)
139 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
140 if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
141 __intel_wakeref_put_last(wf, flags);
144 static inline void
145 intel_wakeref_put(struct intel_wakeref *wf)
147 might_sleep();
148 __intel_wakeref_put(wf, 0);
151 static inline void
152 intel_wakeref_put_async(struct intel_wakeref *wf)
154 __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
158 * intel_wakeref_lock: Lock the wakeref (mutex)
159 * @wf: the wakeref
161 * Locks the wakeref to prevent it being acquired or released. New users
162 * can still adjust the counter, but the wakeref itself (and callback)
163 * cannot be acquired or released.
165 static inline void
166 intel_wakeref_lock(struct intel_wakeref *wf)
167 __acquires(wf->mutex)
169 mutex_lock(&wf->mutex);
173 * intel_wakeref_unlock: Unlock the wakeref
174 * @wf: the wakeref
176 * Releases a previously acquired intel_wakeref_lock().
178 static inline void
179 intel_wakeref_unlock(struct intel_wakeref *wf)
180 __releases(wf->mutex)
182 mutex_unlock(&wf->mutex);
186 * intel_wakeref_unlock_wait: Wait until the active callback is complete
187 * @wf: the wakeref
189 * Waits for the active callback (under the @wf->mutex or another CPU) is
190 * complete.
192 static inline void
193 intel_wakeref_unlock_wait(struct intel_wakeref *wf)
195 mutex_lock(&wf->mutex);
196 mutex_unlock(&wf->mutex);
197 flush_work(&wf->work);
201 * intel_wakeref_is_active: Query whether the wakeref is currently held
202 * @wf: the wakeref
204 * Returns: true if the wakeref is currently held.
206 static inline bool
207 intel_wakeref_is_active(const struct intel_wakeref *wf)
209 return READ_ONCE(wf->wakeref);
213 * __intel_wakeref_defer_park: Defer the current park callback
214 * @wf: the wakeref
216 static inline void
217 __intel_wakeref_defer_park(struct intel_wakeref *wf)
219 lockdep_assert_held(&wf->mutex);
220 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
221 atomic_set_release(&wf->count, 1);
225 * intel_wakeref_wait_for_idle: Wait until the wakeref is idle
226 * @wf: the wakeref
228 * Wait for the earlier asynchronous release of the wakeref. Note
229 * this will wait for any third party as well, so make sure you only wait
230 * when you have control over the wakeref and trust no one else is acquiring
231 * it.
233 * Return: 0 on success, error code if killed.
235 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
237 struct intel_wakeref_auto {
238 struct intel_runtime_pm *rpm;
239 struct timer_list timer;
240 intel_wakeref_t wakeref;
241 spinlock_t lock;
242 refcount_t count;
246 * intel_wakeref_auto: Delay the runtime-pm autosuspend
247 * @wf: the wakeref
248 * @timeout: relative timeout in jiffies
250 * The runtime-pm core uses a suspend delay after the last wakeref
251 * is released before triggering runtime suspend of the device. That
252 * delay is configurable via sysfs with little regard to the device
253 * characteristics. Instead, we want to tune the autosuspend based on our
254 * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied
255 * timeout.
257 * Pass @timeout = 0 to cancel a previous autosuspend by executing the
258 * suspend immediately.
260 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout);
262 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
263 struct intel_runtime_pm *rpm);
264 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf);
266 #endif /* INTEL_WAKEREF_H */