2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #ifndef INTEL_WAKEREF_H
8 #define INTEL_WAKEREF_H
10 #include <linux/atomic.h>
11 #include <linux/bits.h>
12 #include <linux/lockdep.h>
13 #include <linux/mutex.h>
14 #include <linux/refcount.h>
15 #include <linux/stackdepot.h>
16 #include <linux/timer.h>
17 #include <linux/workqueue.h>
19 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
20 #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
22 #define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
25 struct intel_runtime_pm
;
28 typedef depot_stack_handle_t intel_wakeref_t
;
30 struct intel_wakeref_ops
{
31 int (*get
)(struct intel_wakeref
*wf
);
32 int (*put
)(struct intel_wakeref
*wf
);
35 struct intel_wakeref
{
39 intel_wakeref_t wakeref
;
41 struct intel_runtime_pm
*rpm
;
42 const struct intel_wakeref_ops
*ops
;
44 struct work_struct work
;
47 struct intel_wakeref_lockclass
{
48 struct lock_class_key mutex
;
49 struct lock_class_key work
;
52 void __intel_wakeref_init(struct intel_wakeref
*wf
,
53 struct intel_runtime_pm
*rpm
,
54 const struct intel_wakeref_ops
*ops
,
55 struct intel_wakeref_lockclass
*key
);
56 #define intel_wakeref_init(wf, rpm, ops) do { \
57 static struct intel_wakeref_lockclass __key; \
59 __intel_wakeref_init((wf), (rpm), (ops), &__key); \
62 int __intel_wakeref_get_first(struct intel_wakeref
*wf
);
63 void __intel_wakeref_put_last(struct intel_wakeref
*wf
, unsigned long flags
);
66 * intel_wakeref_get: Acquire the wakeref
69 * Acquire a hold on the wakeref. The first user to do so, will acquire
70 * the runtime pm wakeref and then call the @fn underneath the wakeref
73 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
74 * will be released and the acquisition unwound, and an error reported.
76 * Returns: 0 if the wakeref was acquired successfully, or a negative error
80 intel_wakeref_get(struct intel_wakeref
*wf
)
83 if (unlikely(!atomic_inc_not_zero(&wf
->count
)))
84 return __intel_wakeref_get_first(wf
);
90 * __intel_wakeref_get: Acquire the wakeref, again
93 * Increment the wakeref counter, only valid if it is already held by
96 * See intel_wakeref_get().
99 __intel_wakeref_get(struct intel_wakeref
*wf
)
101 INTEL_WAKEREF_BUG_ON(atomic_read(&wf
->count
) <= 0);
102 atomic_inc(&wf
->count
);
106 * intel_wakeref_get_if_in_use: Acquire the wakeref
109 * Acquire a hold on the wakeref, but only if the wakeref is already
112 * Returns: true if the wakeref was acquired, false otherwise.
115 intel_wakeref_get_if_active(struct intel_wakeref
*wf
)
117 return atomic_inc_not_zero(&wf
->count
);
121 * intel_wakeref_put_flags: Release the wakeref
123 * @flags: control flags
125 * Release our hold on the wakeref. When there are no more users,
126 * the runtime pm wakeref will be released after the @fn callback is called
127 * underneath the wakeref mutex.
129 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
130 * is retained and an error reported.
132 * Returns: 0 if the wakeref was released successfully, or a negative error
136 __intel_wakeref_put(struct intel_wakeref
*wf
, unsigned long flags
)
137 #define INTEL_WAKEREF_PUT_ASYNC BIT(0)
139 INTEL_WAKEREF_BUG_ON(atomic_read(&wf
->count
) <= 0);
140 if (unlikely(!atomic_add_unless(&wf
->count
, -1, 1)))
141 __intel_wakeref_put_last(wf
, flags
);
145 intel_wakeref_put(struct intel_wakeref
*wf
)
148 __intel_wakeref_put(wf
, 0);
152 intel_wakeref_put_async(struct intel_wakeref
*wf
)
154 __intel_wakeref_put(wf
, INTEL_WAKEREF_PUT_ASYNC
);
158 * intel_wakeref_lock: Lock the wakeref (mutex)
161 * Locks the wakeref to prevent it being acquired or released. New users
162 * can still adjust the counter, but the wakeref itself (and callback)
163 * cannot be acquired or released.
166 intel_wakeref_lock(struct intel_wakeref
*wf
)
167 __acquires(wf
->mutex
)
169 mutex_lock(&wf
->mutex
);
173 * intel_wakeref_unlock: Unlock the wakeref
176 * Releases a previously acquired intel_wakeref_lock().
179 intel_wakeref_unlock(struct intel_wakeref
*wf
)
180 __releases(wf
->mutex
)
182 mutex_unlock(&wf
->mutex
);
186 * intel_wakeref_unlock_wait: Wait until the active callback is complete
189 * Waits for the active callback (under the @wf->mutex or another CPU) is
193 intel_wakeref_unlock_wait(struct intel_wakeref
*wf
)
195 mutex_lock(&wf
->mutex
);
196 mutex_unlock(&wf
->mutex
);
197 flush_work(&wf
->work
);
201 * intel_wakeref_is_active: Query whether the wakeref is currently held
204 * Returns: true if the wakeref is currently held.
207 intel_wakeref_is_active(const struct intel_wakeref
*wf
)
209 return READ_ONCE(wf
->wakeref
);
213 * __intel_wakeref_defer_park: Defer the current park callback
217 __intel_wakeref_defer_park(struct intel_wakeref
*wf
)
219 lockdep_assert_held(&wf
->mutex
);
220 INTEL_WAKEREF_BUG_ON(atomic_read(&wf
->count
));
221 atomic_set_release(&wf
->count
, 1);
225 * intel_wakeref_wait_for_idle: Wait until the wakeref is idle
228 * Wait for the earlier asynchronous release of the wakeref. Note
229 * this will wait for any third party as well, so make sure you only wait
230 * when you have control over the wakeref and trust no one else is acquiring
233 * Return: 0 on success, error code if killed.
235 int intel_wakeref_wait_for_idle(struct intel_wakeref
*wf
);
237 struct intel_wakeref_auto
{
238 struct intel_runtime_pm
*rpm
;
239 struct timer_list timer
;
240 intel_wakeref_t wakeref
;
246 * intel_wakeref_auto: Delay the runtime-pm autosuspend
248 * @timeout: relative timeout in jiffies
250 * The runtime-pm core uses a suspend delay after the last wakeref
251 * is released before triggering runtime suspend of the device. That
252 * delay is configurable via sysfs with little regard to the device
253 * characteristics. Instead, we want to tune the autosuspend based on our
254 * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied
257 * Pass @timeout = 0 to cancel a previous autosuspend by executing the
258 * suspend immediately.
260 void intel_wakeref_auto(struct intel_wakeref_auto
*wf
, unsigned long timeout
);
262 void intel_wakeref_auto_init(struct intel_wakeref_auto
*wf
,
263 struct intel_runtime_pm
*rpm
);
264 void intel_wakeref_auto_fini(struct intel_wakeref_auto
*wf
);
266 #endif /* INTEL_WAKEREF_H */