de4x5 fixes
[pv_ops_mirror.git] / kernel / irq / migration.c
blob77b7acc875c5df6755954161736b72918fff49ad
2 #include <linux/irq.h>
4 void set_pending_irq(unsigned int irq, cpumask_t mask)
6 struct irq_desc *desc = irq_desc + irq;
7 unsigned long flags;
9 spin_lock_irqsave(&desc->lock, flags);
10 desc->status |= IRQ_MOVE_PENDING;
11 irq_desc[irq].pending_mask = mask;
12 spin_unlock_irqrestore(&desc->lock, flags);
15 void move_masked_irq(int irq)
17 struct irq_desc *desc = irq_desc + irq;
18 cpumask_t tmp;
20 if (likely(!(desc->status & IRQ_MOVE_PENDING)))
21 return;
24 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
26 if (CHECK_IRQ_PER_CPU(desc->status)) {
27 WARN_ON(1);
28 return;
31 desc->status &= ~IRQ_MOVE_PENDING;
33 if (unlikely(cpus_empty(irq_desc[irq].pending_mask)))
34 return;
36 if (!desc->chip->set_affinity)
37 return;
39 assert_spin_locked(&desc->lock);
41 cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map);
44 * If there was a valid mask to work with, please
45 * do the disable, re-program, enable sequence.
46 * This is *not* particularly important for level triggered
47 * but in a edge trigger case, we might be setting rte
48 * when an active trigger is comming in. This could
49 * cause some ioapics to mal-function.
50 * Being paranoid i guess!
52 * For correct operation this depends on the caller
53 * masking the irqs.
55 if (likely(!cpus_empty(tmp))) {
56 desc->chip->set_affinity(irq,tmp);
58 cpus_clear(irq_desc[irq].pending_mask);
61 void move_native_irq(int irq)
63 struct irq_desc *desc = irq_desc + irq;
65 if (likely(!(desc->status & IRQ_MOVE_PENDING)))
66 return;
68 if (unlikely(desc->status & IRQ_DISABLED))
69 return;
71 desc->chip->mask(irq);
72 move_masked_irq(irq);
73 desc->chip->unmask(irq);