drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / kernel / irq / cpuhotplug.c
blob15a7654eff684f06dac92b98e36f2287c017469b
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Generic cpu hotunplug interrupt migration code copied from the
4 * arch/arm implementation
6 * Copyright (C) Russell King
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/interrupt.h>
13 #include <linux/ratelimit.h>
14 #include <linux/irq.h>
15 #include <linux/sched/isolation.h>
17 #include "internals.h"
19 /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
20 static inline bool irq_needs_fixup(struct irq_data *d)
22 const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
23 unsigned int cpu = smp_processor_id();
25 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
27 * The cpumask_empty() check is a workaround for interrupt chips,
28 * which do not implement effective affinity, but the architecture has
29 * enabled the config switch. Use the general affinity mask instead.
31 if (cpumask_empty(m))
32 m = irq_data_get_affinity_mask(d);
35 * Sanity check. If the mask is not empty when excluding the outgoing
36 * CPU then it must contain at least one online CPU. The outgoing CPU
37 * has been removed from the online mask already.
39 if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
40 !cpumask_intersects(m, cpu_online_mask)) {
42 * If this happens then there was a missed IRQ fixup at some
43 * point. Warn about it and enforce fixup.
45 pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
46 cpumask_pr_args(m), d->irq, cpu);
47 return true;
49 #endif
50 return cpumask_test_cpu(cpu, m);
53 static bool migrate_one_irq(struct irq_desc *desc)
55 struct irq_data *d = irq_desc_get_irq_data(desc);
56 struct irq_chip *chip = irq_data_get_irq_chip(d);
57 bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
58 const struct cpumask *affinity;
59 bool brokeaff = false;
60 int err;
63 * IRQ chip might be already torn down, but the irq descriptor is
64 * still in the radix tree. Also if the chip has no affinity setter,
65 * nothing can be done here.
67 if (!chip || !chip->irq_set_affinity) {
68 pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
69 return false;
73 * Complete an eventually pending irq move cleanup. If this
74 * interrupt was moved in hard irq context, then the vectors need
75 * to be cleaned up. It can't wait until this interrupt actually
76 * happens and this CPU was involved.
78 irq_force_complete_move(desc);
81 * No move required, if:
82 * - Interrupt is per cpu
83 * - Interrupt is not started
84 * - Affinity mask does not include this CPU.
86 * Note: Do not check desc->action as this might be a chained
87 * interrupt.
89 if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
91 * If an irq move is pending, abort it if the dying CPU is
92 * the sole target.
94 irq_fixup_move_pending(desc, false);
95 return false;
99 * If there is a setaffinity pending, then try to reuse the pending
100 * mask, so the last change of the affinity does not get lost. If
101 * there is no move pending or the pending mask does not contain
102 * any online CPU, use the current affinity mask.
104 if (irq_fixup_move_pending(desc, true))
105 affinity = irq_desc_get_pending_mask(desc);
106 else
107 affinity = irq_data_get_affinity_mask(d);
109 /* Mask the chip for interrupts which cannot move in process context */
110 if (maskchip && chip->irq_mask)
111 chip->irq_mask(d);
113 if (!cpumask_intersects(affinity, cpu_online_mask)) {
115 * If the interrupt is managed, then shut it down and leave
116 * the affinity untouched.
118 if (irqd_affinity_is_managed(d)) {
119 irqd_set_managed_shutdown(d);
120 irq_shutdown_and_deactivate(desc);
121 return false;
123 affinity = cpu_online_mask;
124 brokeaff = true;
127 * Do not set the force argument of irq_do_set_affinity() as this
128 * disables the masking of offline CPUs from the supplied affinity
129 * mask and therefore might keep/reassign the irq to the outgoing
130 * CPU.
132 err = irq_do_set_affinity(d, affinity, false);
135 * If there are online CPUs in the affinity mask, but they have no
136 * vectors left to make the migration work, try to break the
137 * affinity by migrating to any online CPU.
139 if (err == -ENOSPC && !irqd_affinity_is_managed(d) && affinity != cpu_online_mask) {
140 pr_debug("IRQ%u: set affinity failed for %*pbl, re-try with online CPUs\n",
141 d->irq, cpumask_pr_args(affinity));
143 affinity = cpu_online_mask;
144 brokeaff = true;
146 err = irq_do_set_affinity(d, affinity, false);
149 if (err) {
150 pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
151 d->irq, err);
152 brokeaff = false;
155 if (maskchip && chip->irq_unmask)
156 chip->irq_unmask(d);
158 return brokeaff;
162 * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
164 * The current CPU has been marked offline. Migrate IRQs off this CPU.
165 * If the affinity settings do not allow other CPUs, force them onto any
166 * available CPU.
168 * Note: we must iterate over all IRQs, whether they have an attached
169 * action structure or not, as we need to get chained interrupts too.
171 void irq_migrate_all_off_this_cpu(void)
173 struct irq_desc *desc;
174 unsigned int irq;
176 for_each_active_irq(irq) {
177 bool affinity_broken;
179 desc = irq_to_desc(irq);
180 raw_spin_lock(&desc->lock);
181 affinity_broken = migrate_one_irq(desc);
182 raw_spin_unlock(&desc->lock);
184 if (affinity_broken) {
185 pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n",
186 irq, smp_processor_id());
191 static bool hk_should_isolate(struct irq_data *data, unsigned int cpu)
193 const struct cpumask *hk_mask;
195 if (!housekeeping_enabled(HK_TYPE_MANAGED_IRQ))
196 return false;
198 hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
199 if (cpumask_subset(irq_data_get_effective_affinity_mask(data), hk_mask))
200 return false;
202 return cpumask_test_cpu(cpu, hk_mask);
205 static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
207 struct irq_data *data = irq_desc_get_irq_data(desc);
208 const struct cpumask *affinity = irq_data_get_affinity_mask(data);
210 if (!irqd_affinity_is_managed(data) || !desc->action ||
211 !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
212 return;
215 * Don't restore suspended interrupts here when a system comes back
216 * from S3. They are reenabled via resume_device_irqs().
218 if (desc->istate & IRQS_SUSPENDED)
219 return;
221 if (irqd_is_managed_and_shutdown(data))
222 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
225 * If the interrupt can only be directed to a single target
226 * CPU then it is already assigned to a CPU in the affinity
227 * mask. No point in trying to move it around unless the
228 * isolation mechanism requests to move it to an upcoming
229 * housekeeping CPU.
231 if (!irqd_is_single_target(data) || hk_should_isolate(data, cpu))
232 irq_set_affinity_locked(data, affinity, false);
236 * irq_affinity_online_cpu - Restore affinity for managed interrupts
237 * @cpu: Upcoming CPU for which interrupts should be restored
239 int irq_affinity_online_cpu(unsigned int cpu)
241 struct irq_desc *desc;
242 unsigned int irq;
244 irq_lock_sparse();
245 for_each_active_irq(irq) {
246 desc = irq_to_desc(irq);
247 raw_spin_lock_irq(&desc->lock);
248 irq_restore_affinity_of_irq(desc, cpu);
249 raw_spin_unlock_irq(&desc->lock);
251 irq_unlock_sparse();
253 return 0;