drm/imagination: Use the drm_sched_job_has_dependency helper
[drm/drm-misc.git] / kernel / locking / ww_mutex.h
blob37f025a096c9d1a3cb78eb6155a35978d4a322ac
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #ifndef WW_RT
5 #define MUTEX mutex
6 #define MUTEX_WAITER mutex_waiter
8 static inline struct mutex_waiter *
9 __ww_waiter_first(struct mutex *lock)
11 struct mutex_waiter *w;
13 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list);
14 if (list_entry_is_head(w, &lock->wait_list, list))
15 return NULL;
17 return w;
20 static inline struct mutex_waiter *
21 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
23 w = list_next_entry(w, list);
24 if (list_entry_is_head(w, &lock->wait_list, list))
25 return NULL;
27 return w;
30 static inline struct mutex_waiter *
31 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
33 w = list_prev_entry(w, list);
34 if (list_entry_is_head(w, &lock->wait_list, list))
35 return NULL;
37 return w;
40 static inline struct mutex_waiter *
41 __ww_waiter_last(struct mutex *lock)
43 struct mutex_waiter *w;
45 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list);
46 if (list_entry_is_head(w, &lock->wait_list, list))
47 return NULL;
49 return w;
52 static inline void
53 __ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos)
55 struct list_head *p = &lock->wait_list;
56 if (pos)
57 p = &pos->list;
58 __mutex_add_waiter(lock, waiter, p);
61 static inline struct task_struct *
62 __ww_mutex_owner(struct mutex *lock)
64 return __mutex_owner(lock);
67 static inline bool
68 __ww_mutex_has_waiters(struct mutex *lock)
70 return atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS;
73 static inline void lock_wait_lock(struct mutex *lock, unsigned long *flags)
75 raw_spin_lock_irqsave(&lock->wait_lock, *flags);
78 static inline void unlock_wait_lock(struct mutex *lock, unsigned long *flags)
80 raw_spin_unlock_irqrestore(&lock->wait_lock, *flags);
83 static inline void lockdep_assert_wait_lock_held(struct mutex *lock)
85 lockdep_assert_held(&lock->wait_lock);
88 #else /* WW_RT */
90 #define MUTEX rt_mutex
91 #define MUTEX_WAITER rt_mutex_waiter
93 static inline struct rt_mutex_waiter *
94 __ww_waiter_first(struct rt_mutex *lock)
96 struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root);
97 if (!n)
98 return NULL;
99 return rb_entry(n, struct rt_mutex_waiter, tree.entry);
102 static inline struct rt_mutex_waiter *
103 __ww_waiter_next(struct rt_mutex *lock, struct rt_mutex_waiter *w)
105 struct rb_node *n = rb_next(&w->tree.entry);
106 if (!n)
107 return NULL;
108 return rb_entry(n, struct rt_mutex_waiter, tree.entry);
111 static inline struct rt_mutex_waiter *
112 __ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w)
114 struct rb_node *n = rb_prev(&w->tree.entry);
115 if (!n)
116 return NULL;
117 return rb_entry(n, struct rt_mutex_waiter, tree.entry);
120 static inline struct rt_mutex_waiter *
121 __ww_waiter_last(struct rt_mutex *lock)
123 struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root);
124 if (!n)
125 return NULL;
126 return rb_entry(n, struct rt_mutex_waiter, tree.entry);
129 static inline void
130 __ww_waiter_add(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct rt_mutex_waiter *pos)
132 /* RT unconditionally adds the waiter first and then removes it on error */
135 static inline struct task_struct *
136 __ww_mutex_owner(struct rt_mutex *lock)
138 return rt_mutex_owner(&lock->rtmutex);
141 static inline bool
142 __ww_mutex_has_waiters(struct rt_mutex *lock)
144 return rt_mutex_has_waiters(&lock->rtmutex);
147 static inline void lock_wait_lock(struct rt_mutex *lock, unsigned long *flags)
149 raw_spin_lock_irqsave(&lock->rtmutex.wait_lock, *flags);
152 static inline void unlock_wait_lock(struct rt_mutex *lock, unsigned long *flags)
154 raw_spin_unlock_irqrestore(&lock->rtmutex.wait_lock, *flags);
157 static inline void lockdep_assert_wait_lock_held(struct rt_mutex *lock)
159 lockdep_assert_held(&lock->rtmutex.wait_lock);
162 #endif /* WW_RT */
165 * Wait-Die:
166 * The newer transactions are killed when:
167 * It (the new transaction) makes a request for a lock being held
168 * by an older transaction.
170 * Wound-Wait:
171 * The newer transactions are wounded when:
172 * An older transaction makes a request for a lock being held by
173 * the newer transaction.
177 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
178 * it.
180 static __always_inline void
181 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
183 #ifdef DEBUG_WW_MUTEXES
185 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
186 * but released with a normal mutex_unlock in this call.
188 * This should never happen, always use ww_mutex_unlock.
190 DEBUG_LOCKS_WARN_ON(ww->ctx);
193 * Not quite done after calling ww_acquire_done() ?
195 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
197 if (ww_ctx->contending_lock) {
199 * After -EDEADLK you tried to
200 * acquire a different ww_mutex? Bad!
202 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
205 * You called ww_mutex_lock after receiving -EDEADLK,
206 * but 'forgot' to unlock everything else first?
208 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
209 ww_ctx->contending_lock = NULL;
213 * Naughty, using a different class will lead to undefined behavior!
215 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
216 #endif
217 ww_ctx->acquired++;
218 ww->ctx = ww_ctx;
222 * Determine if @a is 'less' than @b. IOW, either @a is a lower priority task
223 * or, when of equal priority, a younger transaction than @b.
225 * Depending on the algorithm, @a will either need to wait for @b, or die.
227 static inline bool
228 __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
231 * Can only do the RT prio for WW_RT, because task->prio isn't stable due to PI,
232 * so the wait_list ordering will go wobbly. rt_mutex re-queues the waiter and
233 * isn't affected by this.
235 #ifdef WW_RT
236 /* kernel prio; less is more */
237 int a_prio = a->task->prio;
238 int b_prio = b->task->prio;
240 if (rt_or_dl_prio(a_prio) || rt_or_dl_prio(b_prio)) {
242 if (a_prio > b_prio)
243 return true;
245 if (a_prio < b_prio)
246 return false;
248 /* equal static prio */
250 if (dl_prio(a_prio)) {
251 if (dl_time_before(b->task->dl.deadline,
252 a->task->dl.deadline))
253 return true;
255 if (dl_time_before(a->task->dl.deadline,
256 b->task->dl.deadline))
257 return false;
260 /* equal prio */
262 #endif
264 /* FIFO order tie break -- bigger is younger */
265 return (signed long)(a->stamp - b->stamp) > 0;
269 * Wait-Die; wake a lesser waiter context (when locks held) such that it can
270 * die.
272 * Among waiters with context, only the first one can have other locks acquired
273 * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
274 * __ww_mutex_check_kill() wake any but the earliest context.
276 static bool
277 __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
278 struct ww_acquire_ctx *ww_ctx, struct wake_q_head *wake_q)
280 if (!ww_ctx->is_wait_die)
281 return false;
283 if (waiter->ww_ctx->acquired > 0 && __ww_ctx_less(waiter->ww_ctx, ww_ctx)) {
284 #ifndef WW_RT
285 debug_mutex_wake_waiter(lock, waiter);
286 #endif
287 wake_q_add(wake_q, waiter->task);
290 return true;
294 * Wound-Wait; wound a lesser @hold_ctx if it holds the lock.
296 * Wound the lock holder if there are waiters with more important transactions
297 * than the lock holders. Even if multiple waiters may wound the lock holder,
298 * it's sufficient that only one does.
300 static bool __ww_mutex_wound(struct MUTEX *lock,
301 struct ww_acquire_ctx *ww_ctx,
302 struct ww_acquire_ctx *hold_ctx,
303 struct wake_q_head *wake_q)
305 struct task_struct *owner = __ww_mutex_owner(lock);
307 lockdep_assert_wait_lock_held(lock);
310 * Possible through __ww_mutex_add_waiter() when we race with
311 * ww_mutex_set_context_fastpath(). In that case we'll get here again
312 * through __ww_mutex_check_waiters().
314 if (!hold_ctx)
315 return false;
318 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
319 * it cannot go away because we'll have FLAG_WAITERS set and hold
320 * wait_lock.
322 if (!owner)
323 return false;
325 if (ww_ctx->acquired > 0 && __ww_ctx_less(hold_ctx, ww_ctx)) {
326 hold_ctx->wounded = 1;
329 * wake_up_process() paired with set_current_state()
330 * inserts sufficient barriers to make sure @owner either sees
331 * it's wounded in __ww_mutex_check_kill() or has a
332 * wakeup pending to re-read the wounded state.
334 if (owner != current)
335 wake_q_add(wake_q, owner);
337 return true;
340 return false;
344 * We just acquired @lock under @ww_ctx, if there are more important contexts
345 * waiting behind us on the wait-list, check if they need to die, or wound us.
347 * See __ww_mutex_add_waiter() for the list-order construction; basically the
348 * list is ordered by stamp, smallest (oldest) first.
350 * This relies on never mixing wait-die/wound-wait on the same wait-list;
351 * which is currently ensured by that being a ww_class property.
353 * The current task must not be on the wait list.
355 static void
356 __ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx,
357 struct wake_q_head *wake_q)
359 struct MUTEX_WAITER *cur;
361 lockdep_assert_wait_lock_held(lock);
363 for (cur = __ww_waiter_first(lock); cur;
364 cur = __ww_waiter_next(lock, cur)) {
366 if (!cur->ww_ctx)
367 continue;
369 if (__ww_mutex_die(lock, cur, ww_ctx, wake_q) ||
370 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx, wake_q))
371 break;
376 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
377 * and wake up any waiters so they can recheck.
379 static __always_inline void
380 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
382 DEFINE_WAKE_Q(wake_q);
383 unsigned long flags;
385 ww_mutex_lock_acquired(lock, ctx);
388 * The lock->ctx update should be visible on all cores before
389 * the WAITERS check is done, otherwise contended waiters might be
390 * missed. The contended waiters will either see ww_ctx == NULL
391 * and keep spinning, or it will acquire wait_lock, add itself
392 * to waiter list and sleep.
394 smp_mb(); /* See comments above and below. */
397 * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS
398 * MB MB
399 * [R] MUTEX_FLAG_WAITERS [R] ww->ctx
401 * The memory barrier above pairs with the memory barrier in
402 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
403 * and/or !empty list.
405 if (likely(!__ww_mutex_has_waiters(&lock->base)))
406 return;
409 * Uh oh, we raced in fastpath, check if any of the waiters need to
410 * die or wound us.
412 lock_wait_lock(&lock->base, &flags);
413 __ww_mutex_check_waiters(&lock->base, ctx, &wake_q);
414 preempt_disable();
415 unlock_wait_lock(&lock->base, &flags);
416 wake_up_q(&wake_q);
417 preempt_enable();
420 static __always_inline int
421 __ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
423 if (ww_ctx->acquired > 0) {
424 #ifdef DEBUG_WW_MUTEXES
425 struct ww_mutex *ww;
427 ww = container_of(lock, struct ww_mutex, base);
428 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
429 ww_ctx->contending_lock = ww;
430 #endif
431 return -EDEADLK;
434 return 0;
438 * Check the wound condition for the current lock acquire.
440 * Wound-Wait: If we're wounded, kill ourself.
442 * Wait-Die: If we're trying to acquire a lock already held by an older
443 * context, kill ourselves.
445 * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
446 * look at waiters before us in the wait-list.
448 static inline int
449 __ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
450 struct ww_acquire_ctx *ctx)
452 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
453 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
454 struct MUTEX_WAITER *cur;
456 if (ctx->acquired == 0)
457 return 0;
459 if (!ctx->is_wait_die) {
460 if (ctx->wounded)
461 return __ww_mutex_kill(lock, ctx);
463 return 0;
466 if (hold_ctx && __ww_ctx_less(ctx, hold_ctx))
467 return __ww_mutex_kill(lock, ctx);
470 * If there is a waiter in front of us that has a context, then its
471 * stamp is earlier than ours and we must kill ourself.
473 for (cur = __ww_waiter_prev(lock, waiter); cur;
474 cur = __ww_waiter_prev(lock, cur)) {
476 if (!cur->ww_ctx)
477 continue;
479 return __ww_mutex_kill(lock, ctx);
482 return 0;
486 * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
487 * first. Such that older contexts are preferred to acquire the lock over
488 * younger contexts.
490 * Waiters without context are interspersed in FIFO order.
492 * Furthermore, for Wait-Die kill ourself immediately when possible (there are
493 * older contexts already waiting) to avoid unnecessary waiting and for
494 * Wound-Wait ensure we wound the owning context when it is younger.
496 static inline int
497 __ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
498 struct MUTEX *lock,
499 struct ww_acquire_ctx *ww_ctx,
500 struct wake_q_head *wake_q)
502 struct MUTEX_WAITER *cur, *pos = NULL;
503 bool is_wait_die;
505 if (!ww_ctx) {
506 __ww_waiter_add(lock, waiter, NULL);
507 return 0;
510 is_wait_die = ww_ctx->is_wait_die;
513 * Add the waiter before the first waiter with a higher stamp.
514 * Waiters without a context are skipped to avoid starving
515 * them. Wait-Die waiters may die here. Wound-Wait waiters
516 * never die here, but they are sorted in stamp order and
517 * may wound the lock holder.
519 for (cur = __ww_waiter_last(lock); cur;
520 cur = __ww_waiter_prev(lock, cur)) {
522 if (!cur->ww_ctx)
523 continue;
525 if (__ww_ctx_less(ww_ctx, cur->ww_ctx)) {
527 * Wait-Die: if we find an older context waiting, there
528 * is no point in queueing behind it, as we'd have to
529 * die the moment it would acquire the lock.
531 if (is_wait_die) {
532 int ret = __ww_mutex_kill(lock, ww_ctx);
534 if (ret)
535 return ret;
538 break;
541 pos = cur;
543 /* Wait-Die: ensure younger waiters die. */
544 __ww_mutex_die(lock, cur, ww_ctx, wake_q);
547 __ww_waiter_add(lock, waiter, pos);
550 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
551 * wound that such that we might proceed.
553 if (!is_wait_die) {
554 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
557 * See ww_mutex_set_context_fastpath(). Orders setting
558 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
559 * such that either we or the fastpath will wound @ww->ctx.
561 smp_mb();
562 __ww_mutex_wound(lock, ww_ctx, ww->ctx, wake_q);
565 return 0;
568 static inline void __ww_mutex_unlock(struct ww_mutex *lock)
570 if (lock->ctx) {
571 #ifdef DEBUG_WW_MUTEXES
572 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
573 #endif
574 if (lock->ctx->acquired > 0)
575 lock->ctx->acquired--;
576 lock->ctx = NULL;