2 * Common interrupt code for 32 and 64 bit
5 #include <linux/interrupt.h>
6 #include <linux/kernel_stat.h>
8 #include <linux/seq_file.h>
10 #include <linux/ftrace.h>
11 #include <linux/delay.h>
12 #include <linux/export.h>
15 #include <asm/io_apic.h>
19 #include <asm/hw_irq.h>
21 atomic_t irq_err_count
;
23 /* Function pointer for generic interrupt vector handling */
24 void (*x86_platform_ipi_callback
)(void) = NULL
;
27 * 'what should we do if we get a hw irq event on an illegal vector'.
28 * each architecture has to answer this themselves.
30 void ack_bad_irq(unsigned int irq
)
32 if (printk_ratelimit())
33 pr_err("unexpected IRQ trap at vector %02x\n", irq
);
36 * Currently unexpected vectors happen only on SMP and APIC.
37 * We _must_ ack these because every local APIC has only N
38 * irq slots per priority level, and a 'hanging, unacked' IRQ
39 * holds up an irq slot - in excessive cases (when multiple
40 * unexpected vectors occur) that might lock up the APIC
42 * But only ack when the APIC is enabled -AK
47 #define irq_stats(x) (&per_cpu(irq_stat, x))
49 * /proc/interrupts printing for arch specific interrupts
51 int arch_show_interrupts(struct seq_file
*p
, int prec
)
55 seq_printf(p
, "%*s: ", prec
, "NMI");
56 for_each_online_cpu(j
)
57 seq_printf(p
, "%10u ", irq_stats(j
)->__nmi_count
);
58 seq_printf(p
, " Non-maskable interrupts\n");
59 #ifdef CONFIG_X86_LOCAL_APIC
60 seq_printf(p
, "%*s: ", prec
, "LOC");
61 for_each_online_cpu(j
)
62 seq_printf(p
, "%10u ", irq_stats(j
)->apic_timer_irqs
);
63 seq_printf(p
, " Local timer interrupts\n");
65 seq_printf(p
, "%*s: ", prec
, "SPU");
66 for_each_online_cpu(j
)
67 seq_printf(p
, "%10u ", irq_stats(j
)->irq_spurious_count
);
68 seq_printf(p
, " Spurious interrupts\n");
69 seq_printf(p
, "%*s: ", prec
, "PMI");
70 for_each_online_cpu(j
)
71 seq_printf(p
, "%10u ", irq_stats(j
)->apic_perf_irqs
);
72 seq_printf(p
, " Performance monitoring interrupts\n");
73 seq_printf(p
, "%*s: ", prec
, "IWI");
74 for_each_online_cpu(j
)
75 seq_printf(p
, "%10u ", irq_stats(j
)->apic_irq_work_irqs
);
76 seq_printf(p
, " IRQ work interrupts\n");
78 if (x86_platform_ipi_callback
) {
79 seq_printf(p
, "%*s: ", prec
, "PLT");
80 for_each_online_cpu(j
)
81 seq_printf(p
, "%10u ", irq_stats(j
)->x86_platform_ipis
);
82 seq_printf(p
, " Platform interrupts\n");
85 seq_printf(p
, "%*s: ", prec
, "RES");
86 for_each_online_cpu(j
)
87 seq_printf(p
, "%10u ", irq_stats(j
)->irq_resched_count
);
88 seq_printf(p
, " Rescheduling interrupts\n");
89 seq_printf(p
, "%*s: ", prec
, "CAL");
90 for_each_online_cpu(j
)
91 seq_printf(p
, "%10u ", irq_stats(j
)->irq_call_count
);
92 seq_printf(p
, " Function call interrupts\n");
93 seq_printf(p
, "%*s: ", prec
, "TLB");
94 for_each_online_cpu(j
)
95 seq_printf(p
, "%10u ", irq_stats(j
)->irq_tlb_count
);
96 seq_printf(p
, " TLB shootdowns\n");
98 #ifdef CONFIG_X86_THERMAL_VECTOR
99 seq_printf(p
, "%*s: ", prec
, "TRM");
100 for_each_online_cpu(j
)
101 seq_printf(p
, "%10u ", irq_stats(j
)->irq_thermal_count
);
102 seq_printf(p
, " Thermal event interrupts\n");
104 #ifdef CONFIG_X86_MCE_THRESHOLD
105 seq_printf(p
, "%*s: ", prec
, "THR");
106 for_each_online_cpu(j
)
107 seq_printf(p
, "%10u ", irq_stats(j
)->irq_threshold_count
);
108 seq_printf(p
, " Threshold APIC interrupts\n");
110 #ifdef CONFIG_X86_MCE
111 seq_printf(p
, "%*s: ", prec
, "MCE");
112 for_each_online_cpu(j
)
113 seq_printf(p
, "%10u ", per_cpu(mce_exception_count
, j
));
114 seq_printf(p
, " Machine check exceptions\n");
115 seq_printf(p
, "%*s: ", prec
, "MCP");
116 for_each_online_cpu(j
)
117 seq_printf(p
, "%10u ", per_cpu(mce_poll_count
, j
));
118 seq_printf(p
, " Machine check polls\n");
120 seq_printf(p
, "%*s: %10u\n", prec
, "ERR", atomic_read(&irq_err_count
));
121 #if defined(CONFIG_X86_IO_APIC)
122 seq_printf(p
, "%*s: %10u\n", prec
, "MIS", atomic_read(&irq_mis_count
));
130 u64
arch_irq_stat_cpu(unsigned int cpu
)
132 u64 sum
= irq_stats(cpu
)->__nmi_count
;
134 #ifdef CONFIG_X86_LOCAL_APIC
135 sum
+= irq_stats(cpu
)->apic_timer_irqs
;
136 sum
+= irq_stats(cpu
)->irq_spurious_count
;
137 sum
+= irq_stats(cpu
)->apic_perf_irqs
;
138 sum
+= irq_stats(cpu
)->apic_irq_work_irqs
;
140 if (x86_platform_ipi_callback
)
141 sum
+= irq_stats(cpu
)->x86_platform_ipis
;
143 sum
+= irq_stats(cpu
)->irq_resched_count
;
144 sum
+= irq_stats(cpu
)->irq_call_count
;
145 sum
+= irq_stats(cpu
)->irq_tlb_count
;
147 #ifdef CONFIG_X86_THERMAL_VECTOR
148 sum
+= irq_stats(cpu
)->irq_thermal_count
;
150 #ifdef CONFIG_X86_MCE_THRESHOLD
151 sum
+= irq_stats(cpu
)->irq_threshold_count
;
153 #ifdef CONFIG_X86_MCE
154 sum
+= per_cpu(mce_exception_count
, cpu
);
155 sum
+= per_cpu(mce_poll_count
, cpu
);
160 u64
arch_irq_stat(void)
162 u64 sum
= atomic_read(&irq_err_count
);
164 #ifdef CONFIG_X86_IO_APIC
165 sum
+= atomic_read(&irq_mis_count
);
172 * do_IRQ handles all normal device IRQ's (the special
173 * SMP cross-CPU interrupts have their own specific
176 unsigned int __irq_entry
do_IRQ(struct pt_regs
*regs
)
178 struct pt_regs
*old_regs
= set_irq_regs(regs
);
180 /* high bit used in ret_from_ code */
181 unsigned vector
= ~regs
->orig_ax
;
187 irq
= __this_cpu_read(vector_irq
[vector
]);
189 if (!handle_irq(irq
, regs
)) {
192 if (printk_ratelimit())
193 pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n",
194 __func__
, smp_processor_id(), vector
, irq
);
199 set_irq_regs(old_regs
);
204 * Handler for X86_PLATFORM_IPI_VECTOR.
206 void smp_x86_platform_ipi(struct pt_regs
*regs
)
208 struct pt_regs
*old_regs
= set_irq_regs(regs
);
216 inc_irq_stat(x86_platform_ipis
);
218 if (x86_platform_ipi_callback
)
219 x86_platform_ipi_callback();
223 set_irq_regs(old_regs
);
226 EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq
);
228 #ifdef CONFIG_HOTPLUG_CPU
229 /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
230 void fixup_irqs(void)
232 unsigned int irq
, vector
;
234 struct irq_desc
*desc
;
235 struct irq_data
*data
;
236 struct irq_chip
*chip
;
238 for_each_irq_desc(irq
, desc
) {
239 int break_affinity
= 0;
240 int set_affinity
= 1;
241 const struct cpumask
*affinity
;
248 /* interrupt's are disabled at this point */
249 raw_spin_lock(&desc
->lock
);
251 data
= irq_desc_get_irq_data(desc
);
252 affinity
= data
->affinity
;
253 if (!irq_has_action(irq
) || irqd_is_per_cpu(data
) ||
254 cpumask_subset(affinity
, cpu_online_mask
)) {
255 raw_spin_unlock(&desc
->lock
);
260 * Complete the irq move. This cpu is going down and for
261 * non intr-remapping case, we can't wait till this interrupt
262 * arrives at this cpu before completing the irq move.
264 irq_force_complete_move(irq
);
266 if (cpumask_any_and(affinity
, cpu_online_mask
) >= nr_cpu_ids
) {
268 affinity
= cpu_all_mask
;
271 chip
= irq_data_get_irq_chip(data
);
272 if (!irqd_can_move_in_process_context(data
) && chip
->irq_mask
)
273 chip
->irq_mask(data
);
275 if (chip
->irq_set_affinity
)
276 chip
->irq_set_affinity(data
, affinity
, true);
277 else if (!(warned
++))
280 if (!irqd_can_move_in_process_context(data
) &&
281 !irqd_irq_disabled(data
) && chip
->irq_unmask
)
282 chip
->irq_unmask(data
);
284 raw_spin_unlock(&desc
->lock
);
286 if (break_affinity
&& set_affinity
)
287 printk("Broke affinity for irq %i\n", irq
);
288 else if (!set_affinity
)
289 printk("Cannot set affinity for irq %i\n", irq
);
293 * We can remove mdelay() and then send spuriuous interrupts to
294 * new cpu targets for all the irqs that were handled previously by
295 * this cpu. While it works, I have seen spurious interrupt messages
296 * (nothing wrong but still...).
298 * So for now, retain mdelay(1) and check the IRR and then send those
299 * interrupts to new targets as this cpu is already offlined...
303 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
306 if (__this_cpu_read(vector_irq
[vector
]) < 0)
309 irr
= apic_read(APIC_IRR
+ (vector
/ 32 * 0x10));
310 if (irr
& (1 << (vector
% 32))) {
311 irq
= __this_cpu_read(vector_irq
[vector
]);
313 desc
= irq_to_desc(irq
);
314 data
= irq_desc_get_irq_data(desc
);
315 chip
= irq_data_get_irq_chip(data
);
316 raw_spin_lock(&desc
->lock
);
317 if (chip
->irq_retrigger
)
318 chip
->irq_retrigger(data
);
319 raw_spin_unlock(&desc
->lock
);