mtd: nand: omap: Fix comment in platform data using wrong Kconfig symbol
[linux/fpc-iii.git] / arch / x86 / kernel / irq.c
blob59b5f2ea7c2f32d8c02181be432262b2df97ea1a
1 /*
2 * Common interrupt code for 32 and 64 bit
3 */
4 #include <linux/cpu.h>
5 #include <linux/interrupt.h>
6 #include <linux/kernel_stat.h>
7 #include <linux/of.h>
8 #include <linux/seq_file.h>
9 #include <linux/smp.h>
10 #include <linux/ftrace.h>
11 #include <linux/delay.h>
12 #include <linux/export.h>
13 #include <linux/irq.h>
15 #include <asm/apic.h>
16 #include <asm/io_apic.h>
17 #include <asm/irq.h>
18 #include <asm/mce.h>
19 #include <asm/hw_irq.h>
20 #include <asm/desc.h>
22 #define CREATE_TRACE_POINTS
23 #include <asm/trace/irq_vectors.h>
25 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
26 EXPORT_PER_CPU_SYMBOL(irq_stat);
28 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
29 EXPORT_PER_CPU_SYMBOL(irq_regs);
31 atomic_t irq_err_count;
34 * 'what should we do if we get a hw irq event on an illegal vector'.
35 * each architecture has to answer this themselves.
37 void ack_bad_irq(unsigned int irq)
39 if (printk_ratelimit())
40 pr_err("unexpected IRQ trap at vector %02x\n", irq);
43 * Currently unexpected vectors happen only on SMP and APIC.
44 * We _must_ ack these because every local APIC has only N
45 * irq slots per priority level, and a 'hanging, unacked' IRQ
46 * holds up an irq slot - in excessive cases (when multiple
47 * unexpected vectors occur) that might lock up the APIC
48 * completely.
49 * But only ack when the APIC is enabled -AK
51 ack_APIC_irq();
54 #define irq_stats(x) (&per_cpu(irq_stat, x))
56 * /proc/interrupts printing for arch specific interrupts
58 int arch_show_interrupts(struct seq_file *p, int prec)
60 int j;
62 seq_printf(p, "%*s: ", prec, "NMI");
63 for_each_online_cpu(j)
64 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
65 seq_puts(p, " Non-maskable interrupts\n");
66 #ifdef CONFIG_X86_LOCAL_APIC
67 seq_printf(p, "%*s: ", prec, "LOC");
68 for_each_online_cpu(j)
69 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
70 seq_puts(p, " Local timer interrupts\n");
72 seq_printf(p, "%*s: ", prec, "SPU");
73 for_each_online_cpu(j)
74 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
75 seq_puts(p, " Spurious interrupts\n");
76 seq_printf(p, "%*s: ", prec, "PMI");
77 for_each_online_cpu(j)
78 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
79 seq_puts(p, " Performance monitoring interrupts\n");
80 seq_printf(p, "%*s: ", prec, "IWI");
81 for_each_online_cpu(j)
82 seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
83 seq_puts(p, " IRQ work interrupts\n");
84 seq_printf(p, "%*s: ", prec, "RTR");
85 for_each_online_cpu(j)
86 seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
87 seq_puts(p, " APIC ICR read retries\n");
88 if (x86_platform_ipi_callback) {
89 seq_printf(p, "%*s: ", prec, "PLT");
90 for_each_online_cpu(j)
91 seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
92 seq_puts(p, " Platform interrupts\n");
94 #endif
95 #ifdef CONFIG_SMP
96 seq_printf(p, "%*s: ", prec, "RES");
97 for_each_online_cpu(j)
98 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
99 seq_puts(p, " Rescheduling interrupts\n");
100 seq_printf(p, "%*s: ", prec, "CAL");
101 for_each_online_cpu(j)
102 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
103 seq_puts(p, " Function call interrupts\n");
104 seq_printf(p, "%*s: ", prec, "TLB");
105 for_each_online_cpu(j)
106 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
107 seq_puts(p, " TLB shootdowns\n");
108 #endif
109 #ifdef CONFIG_X86_THERMAL_VECTOR
110 seq_printf(p, "%*s: ", prec, "TRM");
111 for_each_online_cpu(j)
112 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
113 seq_puts(p, " Thermal event interrupts\n");
114 #endif
115 #ifdef CONFIG_X86_MCE_THRESHOLD
116 seq_printf(p, "%*s: ", prec, "THR");
117 for_each_online_cpu(j)
118 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
119 seq_puts(p, " Threshold APIC interrupts\n");
120 #endif
121 #ifdef CONFIG_X86_MCE_AMD
122 seq_printf(p, "%*s: ", prec, "DFR");
123 for_each_online_cpu(j)
124 seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count);
125 seq_puts(p, " Deferred Error APIC interrupts\n");
126 #endif
127 #ifdef CONFIG_X86_MCE
128 seq_printf(p, "%*s: ", prec, "MCE");
129 for_each_online_cpu(j)
130 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
131 seq_puts(p, " Machine check exceptions\n");
132 seq_printf(p, "%*s: ", prec, "MCP");
133 for_each_online_cpu(j)
134 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
135 seq_puts(p, " Machine check polls\n");
136 #endif
137 #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
138 if (test_bit(HYPERVISOR_CALLBACK_VECTOR, system_vectors)) {
139 seq_printf(p, "%*s: ", prec, "HYP");
140 for_each_online_cpu(j)
141 seq_printf(p, "%10u ",
142 irq_stats(j)->irq_hv_callback_count);
143 seq_puts(p, " Hypervisor callback interrupts\n");
145 #endif
146 #if IS_ENABLED(CONFIG_HYPERV)
147 if (test_bit(HYPERV_REENLIGHTENMENT_VECTOR, system_vectors)) {
148 seq_printf(p, "%*s: ", prec, "HRE");
149 for_each_online_cpu(j)
150 seq_printf(p, "%10u ",
151 irq_stats(j)->irq_hv_reenlightenment_count);
152 seq_puts(p, " Hyper-V reenlightenment interrupts\n");
154 if (test_bit(HYPERV_STIMER0_VECTOR, system_vectors)) {
155 seq_printf(p, "%*s: ", prec, "HVS");
156 for_each_online_cpu(j)
157 seq_printf(p, "%10u ",
158 irq_stats(j)->hyperv_stimer0_count);
159 seq_puts(p, " Hyper-V stimer0 interrupts\n");
161 #endif
162 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
163 #if defined(CONFIG_X86_IO_APIC)
164 seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
165 #endif
166 #ifdef CONFIG_HAVE_KVM
167 seq_printf(p, "%*s: ", prec, "PIN");
168 for_each_online_cpu(j)
169 seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
170 seq_puts(p, " Posted-interrupt notification event\n");
172 seq_printf(p, "%*s: ", prec, "NPI");
173 for_each_online_cpu(j)
174 seq_printf(p, "%10u ",
175 irq_stats(j)->kvm_posted_intr_nested_ipis);
176 seq_puts(p, " Nested posted-interrupt event\n");
178 seq_printf(p, "%*s: ", prec, "PIW");
179 for_each_online_cpu(j)
180 seq_printf(p, "%10u ",
181 irq_stats(j)->kvm_posted_intr_wakeup_ipis);
182 seq_puts(p, " Posted-interrupt wakeup event\n");
183 #endif
184 return 0;
188 * /proc/stat helpers
190 u64 arch_irq_stat_cpu(unsigned int cpu)
192 u64 sum = irq_stats(cpu)->__nmi_count;
194 #ifdef CONFIG_X86_LOCAL_APIC
195 sum += irq_stats(cpu)->apic_timer_irqs;
196 sum += irq_stats(cpu)->irq_spurious_count;
197 sum += irq_stats(cpu)->apic_perf_irqs;
198 sum += irq_stats(cpu)->apic_irq_work_irqs;
199 sum += irq_stats(cpu)->icr_read_retry_count;
200 if (x86_platform_ipi_callback)
201 sum += irq_stats(cpu)->x86_platform_ipis;
202 #endif
203 #ifdef CONFIG_SMP
204 sum += irq_stats(cpu)->irq_resched_count;
205 sum += irq_stats(cpu)->irq_call_count;
206 #endif
207 #ifdef CONFIG_X86_THERMAL_VECTOR
208 sum += irq_stats(cpu)->irq_thermal_count;
209 #endif
210 #ifdef CONFIG_X86_MCE_THRESHOLD
211 sum += irq_stats(cpu)->irq_threshold_count;
212 #endif
213 #ifdef CONFIG_X86_MCE
214 sum += per_cpu(mce_exception_count, cpu);
215 sum += per_cpu(mce_poll_count, cpu);
216 #endif
217 return sum;
220 u64 arch_irq_stat(void)
222 u64 sum = atomic_read(&irq_err_count);
223 return sum;
228 * do_IRQ handles all normal device IRQ's (the special
229 * SMP cross-CPU interrupts have their own specific
230 * handlers).
232 __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
234 struct pt_regs *old_regs = set_irq_regs(regs);
235 struct irq_desc * desc;
236 /* high bit used in ret_from_ code */
237 unsigned vector = ~regs->orig_ax;
239 entering_irq();
241 /* entering_irq() tells RCU that we're not quiescent. Check it. */
242 RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
244 desc = __this_cpu_read(vector_irq[vector]);
246 if (!handle_irq(desc, regs)) {
247 ack_APIC_irq();
249 if (desc != VECTOR_RETRIGGERED) {
250 pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
251 __func__, smp_processor_id(),
252 vector);
253 } else {
254 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
258 exiting_irq();
260 set_irq_regs(old_regs);
261 return 1;
264 #ifdef CONFIG_X86_LOCAL_APIC
265 /* Function pointer for generic interrupt vector handling */
266 void (*x86_platform_ipi_callback)(void) = NULL;
268 * Handler for X86_PLATFORM_IPI_VECTOR.
270 __visible void __irq_entry smp_x86_platform_ipi(struct pt_regs *regs)
272 struct pt_regs *old_regs = set_irq_regs(regs);
274 entering_ack_irq();
275 trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
276 inc_irq_stat(x86_platform_ipis);
277 if (x86_platform_ipi_callback)
278 x86_platform_ipi_callback();
279 trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
280 exiting_irq();
281 set_irq_regs(old_regs);
283 #endif
285 #ifdef CONFIG_HAVE_KVM
286 static void dummy_handler(void) {}
287 static void (*kvm_posted_intr_wakeup_handler)(void) = dummy_handler;
289 void kvm_set_posted_intr_wakeup_handler(void (*handler)(void))
291 if (handler)
292 kvm_posted_intr_wakeup_handler = handler;
293 else
294 kvm_posted_intr_wakeup_handler = dummy_handler;
296 EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler);
299 * Handler for POSTED_INTERRUPT_VECTOR.
301 __visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs)
303 struct pt_regs *old_regs = set_irq_regs(regs);
305 entering_ack_irq();
306 inc_irq_stat(kvm_posted_intr_ipis);
307 exiting_irq();
308 set_irq_regs(old_regs);
312 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
314 __visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs)
316 struct pt_regs *old_regs = set_irq_regs(regs);
318 entering_ack_irq();
319 inc_irq_stat(kvm_posted_intr_wakeup_ipis);
320 kvm_posted_intr_wakeup_handler();
321 exiting_irq();
322 set_irq_regs(old_regs);
326 * Handler for POSTED_INTERRUPT_NESTED_VECTOR.
328 __visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs)
330 struct pt_regs *old_regs = set_irq_regs(regs);
332 entering_ack_irq();
333 inc_irq_stat(kvm_posted_intr_nested_ipis);
334 exiting_irq();
335 set_irq_regs(old_regs);
337 #endif
340 #ifdef CONFIG_HOTPLUG_CPU
341 /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
342 void fixup_irqs(void)
344 unsigned int irr, vector;
345 struct irq_desc *desc;
346 struct irq_data *data;
347 struct irq_chip *chip;
349 irq_migrate_all_off_this_cpu();
352 * We can remove mdelay() and then send spuriuous interrupts to
353 * new cpu targets for all the irqs that were handled previously by
354 * this cpu. While it works, I have seen spurious interrupt messages
355 * (nothing wrong but still...).
357 * So for now, retain mdelay(1) and check the IRR and then send those
358 * interrupts to new targets as this cpu is already offlined...
360 mdelay(1);
363 * We can walk the vector array of this cpu without holding
364 * vector_lock because the cpu is already marked !online, so
365 * nothing else will touch it.
367 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
368 if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
369 continue;
371 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
372 if (irr & (1 << (vector % 32))) {
373 desc = __this_cpu_read(vector_irq[vector]);
375 raw_spin_lock(&desc->lock);
376 data = irq_desc_get_irq_data(desc);
377 chip = irq_data_get_irq_chip(data);
378 if (chip->irq_retrigger) {
379 chip->irq_retrigger(data);
380 __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
382 raw_spin_unlock(&desc->lock);
384 if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
385 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
388 #endif