1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2021 Facebook
5 #ifndef __MMAP_UNLOCK_WORK_H__
6 #define __MMAP_UNLOCK_WORK_H__
7 #include <linux/irq_work.h>
9 /* irq_work to run mmap_read_unlock() in irq_work */
10 struct mmap_unlock_irq_work
{
11 struct irq_work irq_work
;
15 DECLARE_PER_CPU(struct mmap_unlock_irq_work
, mmap_unlock_work
);
18 * We cannot do mmap_read_unlock() when the irq is disabled, because of
19 * risk to deadlock with rq_lock. To look up vma when the irqs are
20 * disabled, we need to run mmap_read_unlock() in irq_work. We use a
21 * percpu variable to do the irq_work. If the irq_work is already used
22 * by another lookup, we fall over.
24 static inline bool bpf_mmap_unlock_get_irq_work(struct mmap_unlock_irq_work
**work_ptr
)
26 struct mmap_unlock_irq_work
*work
= NULL
;
27 bool irq_work_busy
= false;
29 if (irqs_disabled()) {
30 if (!IS_ENABLED(CONFIG_PREEMPT_RT
)) {
31 work
= this_cpu_ptr(&mmap_unlock_work
);
32 if (irq_work_is_busy(&work
->irq_work
)) {
33 /* cannot queue more up_read, fallback */
38 * PREEMPT_RT does not allow to trylock mmap sem in
39 * interrupt disabled context. Force the fallback code.
49 static inline void bpf_mmap_unlock_mm(struct mmap_unlock_irq_work
*work
, struct mm_struct
*mm
)
56 /* The lock will be released once we're out of interrupt
57 * context. Tell lockdep that we've released it now so
58 * it doesn't complain that we forgot to release it.
60 rwsem_release(&mm
->mmap_lock
.dep_map
, _RET_IP_
);
61 irq_work_queue(&work
->irq_work
);
65 #endif /* __MMAP_UNLOCK_WORK_H__ */