1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
5 * Provides a framework for enqueueing and running callbacks from hardirq
6 * context. The enqueueing is NMI-safe.
10 #include <linux/kernel.h>
11 #include <linux/export.h>
12 #include <linux/irq_work.h>
13 #include <linux/percpu.h>
14 #include <linux/hardirq.h>
15 #include <linux/irqflags.h>
16 #include <linux/sched.h>
17 #include <linux/tick.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <linux/smp.h>
21 #include <linux/smpboot.h>
22 #include <asm/processor.h>
23 #include <linux/kasan.h>
25 #include <trace/events/ipi.h>
27 static DEFINE_PER_CPU(struct llist_head
, raised_list
);
28 static DEFINE_PER_CPU(struct llist_head
, lazy_list
);
29 static DEFINE_PER_CPU(struct task_struct
*, irq_workd
);
31 static void wake_irq_workd(void)
33 struct task_struct
*tsk
= __this_cpu_read(irq_workd
);
35 if (!llist_empty(this_cpu_ptr(&lazy_list
)) && tsk
)
40 static void irq_work_wake(struct irq_work
*entry
)
45 static DEFINE_PER_CPU(struct irq_work
, irq_work_wakeup
) =
46 IRQ_WORK_INIT_HARD(irq_work_wake
);
49 static int irq_workd_should_run(unsigned int cpu
)
51 return !llist_empty(this_cpu_ptr(&lazy_list
));
55 * Claim the entry so that no one else will poke at it.
57 static bool irq_work_claim(struct irq_work
*work
)
61 oflags
= atomic_fetch_or(IRQ_WORK_CLAIMED
| CSD_TYPE_IRQ_WORK
, &work
->node
.a_flags
);
63 * If the work is already pending, no need to raise the IPI.
64 * The pairing smp_mb() in irq_work_single() makes sure
65 * everything we did before is visible.
67 if (oflags
& IRQ_WORK_PENDING
)
72 void __weak
arch_irq_work_raise(void)
75 * Lame architectures will get the timer tick callback
79 static __always_inline
void irq_work_raise(struct irq_work
*work
)
81 if (trace_ipi_send_cpu_enabled() && arch_irq_work_has_interrupt())
82 trace_ipi_send_cpu(smp_processor_id(), _RET_IP_
, work
->func
);
84 arch_irq_work_raise();
87 /* Enqueue on current CPU, work must already be claimed and preempt disabled */
88 static void __irq_work_queue_local(struct irq_work
*work
)
90 struct llist_head
*list
;
91 bool rt_lazy_work
= false;
92 bool lazy_work
= false;
95 work_flags
= atomic_read(&work
->node
.a_flags
);
96 if (work_flags
& IRQ_WORK_LAZY
)
98 else if (IS_ENABLED(CONFIG_PREEMPT_RT
) &&
99 !(work_flags
& IRQ_WORK_HARD_IRQ
))
102 if (lazy_work
|| rt_lazy_work
)
103 list
= this_cpu_ptr(&lazy_list
);
105 list
= this_cpu_ptr(&raised_list
);
107 if (!llist_add(&work
->node
.llist
, list
))
110 /* If the work is "lazy", handle it from next tick if any */
111 if (!lazy_work
|| tick_nohz_tick_stopped())
112 irq_work_raise(work
);
115 /* Enqueue the irq work @work on the current CPU */
116 bool irq_work_queue(struct irq_work
*work
)
118 /* Only queue if not already pending */
119 if (!irq_work_claim(work
))
122 /* Queue the entry and raise the IPI if needed. */
124 __irq_work_queue_local(work
);
129 EXPORT_SYMBOL_GPL(irq_work_queue
);
132 * Enqueue the irq_work @work on @cpu unless it's already pending
135 * Can be re-enqueued while the callback is still in progress.
137 bool irq_work_queue_on(struct irq_work
*work
, int cpu
)
140 return irq_work_queue(work
);
142 #else /* CONFIG_SMP: */
143 /* All work should have been flushed before going offline */
144 WARN_ON_ONCE(cpu_is_offline(cpu
));
146 /* Only queue if not already pending */
147 if (!irq_work_claim(work
))
150 kasan_record_aux_stack_noalloc(work
);
153 if (cpu
!= smp_processor_id()) {
154 /* Arch remote IPI send/receive backend aren't NMI safe */
155 WARN_ON_ONCE(in_nmi());
158 * On PREEMPT_RT the items which are not marked as
159 * IRQ_WORK_HARD_IRQ are added to the lazy list and a HARD work
160 * item is used on the remote CPU to wake the thread.
162 if (IS_ENABLED(CONFIG_PREEMPT_RT
) &&
163 !(atomic_read(&work
->node
.a_flags
) & IRQ_WORK_HARD_IRQ
)) {
165 if (!llist_add(&work
->node
.llist
, &per_cpu(lazy_list
, cpu
)))
168 work
= &per_cpu(irq_work_wakeup
, cpu
);
169 if (!irq_work_claim(work
))
173 __smp_call_single_queue(cpu
, &work
->node
.llist
);
175 __irq_work_queue_local(work
);
181 #endif /* CONFIG_SMP */
184 bool irq_work_needs_cpu(void)
186 struct llist_head
*raised
, *lazy
;
188 raised
= this_cpu_ptr(&raised_list
);
189 lazy
= this_cpu_ptr(&lazy_list
);
191 if (llist_empty(raised
) || arch_irq_work_has_interrupt())
192 if (llist_empty(lazy
))
195 /* All work should have been flushed before going offline */
196 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
201 void irq_work_single(void *arg
)
203 struct irq_work
*work
= arg
;
207 * Clear the PENDING bit, after this point the @work can be re-used.
208 * The PENDING bit acts as a lock, and we own it, so we can clear it
209 * without atomic ops.
211 flags
= atomic_read(&work
->node
.a_flags
);
212 flags
&= ~IRQ_WORK_PENDING
;
213 atomic_set(&work
->node
.a_flags
, flags
);
216 * See irq_work_claim().
220 lockdep_irq_work_enter(flags
);
222 lockdep_irq_work_exit(flags
);
225 * Clear the BUSY bit, if set, and return to the free state if no-one
226 * else claimed it meanwhile.
228 (void)atomic_cmpxchg(&work
->node
.a_flags
, flags
, flags
& ~IRQ_WORK_BUSY
);
230 if ((IS_ENABLED(CONFIG_PREEMPT_RT
) && !irq_work_is_hard(work
)) ||
231 !arch_irq_work_has_interrupt())
232 rcuwait_wake_up(&work
->irqwait
);
235 static void irq_work_run_list(struct llist_head
*list
)
237 struct irq_work
*work
, *tmp
;
238 struct llist_node
*llnode
;
241 * On PREEMPT_RT IRQ-work which is not marked as HARD will be processed
242 * in a per-CPU thread in preemptible context. Only the items which are
243 * marked as IRQ_WORK_HARD_IRQ will be processed in hardirq context.
245 BUG_ON(!irqs_disabled() && !IS_ENABLED(CONFIG_PREEMPT_RT
));
247 if (llist_empty(list
))
250 llnode
= llist_del_all(list
);
251 llist_for_each_entry_safe(work
, tmp
, llnode
, node
.llist
)
252 irq_work_single(work
);
256 * hotplug calls this through:
257 * hotplug_cfd() -> flush_smp_call_function_queue()
259 void irq_work_run(void)
261 irq_work_run_list(this_cpu_ptr(&raised_list
));
262 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
263 irq_work_run_list(this_cpu_ptr(&lazy_list
));
267 EXPORT_SYMBOL_GPL(irq_work_run
);
269 void irq_work_tick(void)
271 struct llist_head
*raised
= this_cpu_ptr(&raised_list
);
273 if (!llist_empty(raised
) && !arch_irq_work_has_interrupt())
274 irq_work_run_list(raised
);
276 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
277 irq_work_run_list(this_cpu_ptr(&lazy_list
));
283 * Synchronize against the irq_work @entry, ensures the entry is not
286 void irq_work_sync(struct irq_work
*work
)
288 lockdep_assert_irqs_enabled();
291 if ((IS_ENABLED(CONFIG_PREEMPT_RT
) && !irq_work_is_hard(work
)) ||
292 !arch_irq_work_has_interrupt()) {
293 rcuwait_wait_event(&work
->irqwait
, !irq_work_is_busy(work
),
294 TASK_UNINTERRUPTIBLE
);
298 while (irq_work_is_busy(work
))
301 EXPORT_SYMBOL_GPL(irq_work_sync
);
303 static void run_irq_workd(unsigned int cpu
)
305 irq_work_run_list(this_cpu_ptr(&lazy_list
));
308 static void irq_workd_setup(unsigned int cpu
)
310 sched_set_fifo_low(current
);
313 static struct smp_hotplug_thread irqwork_threads
= {
315 .setup
= irq_workd_setup
,
316 .thread_should_run
= irq_workd_should_run
,
317 .thread_fn
= run_irq_workd
,
318 .thread_comm
= "irq_work/%u",
321 static __init
int irq_work_init_threads(void)
323 if (IS_ENABLED(CONFIG_PREEMPT_RT
))
324 BUG_ON(smpboot_register_percpu_thread(&irqwork_threads
));
327 early_initcall(irq_work_init_threads
);