1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2004, 2011
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Holger Smolinski <Holger.Smolinski@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
8 * This file contains interrupt related functions.
11 #include <linux/kernel_stat.h>
12 #include <linux/interrupt.h>
13 #include <linux/seq_file.h>
14 #include <linux/proc_fs.h>
15 #include <linux/profile.h>
16 #include <linux/export.h>
17 #include <linux/kernel.h>
18 #include <linux/ftrace.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
21 #include <linux/init.h>
22 #include <linux/cpu.h>
23 #include <linux/irq.h>
24 #include <asm/irq_regs.h>
25 #include <asm/cputime.h>
26 #include <asm/lowcore.h>
28 #include <asm/hw_irq.h>
29 #include <asm/stacktrace.h>
32 DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat
, irq_stat
);
33 EXPORT_PER_CPU_SYMBOL_GPL(irq_stat
);
42 * The list of "main" irq classes on s390. This is the list of interrupts
43 * that appear both in /proc/stat ("intr" line) and /proc/interrupts.
44 * Historically only external and I/O interrupts have been part of /proc/stat.
45 * We can't add the split external and I/O sub classes since the first field
46 * in the "intr" line in /proc/stat is supposed to be the sum of all other
48 * Since the external and I/O interrupt fields are already sums we would end
49 * up with having a sum which accounts each interrupt twice.
51 static const struct irq_class irqclass_main_desc
[NR_IRQS_BASE
] = {
52 {.irq
= EXT_INTERRUPT
, .name
= "EXT"},
53 {.irq
= IO_INTERRUPT
, .name
= "I/O"},
54 {.irq
= THIN_INTERRUPT
, .name
= "AIO"},
58 * The list of split external and I/O interrupts that appear only in
60 * In addition this list contains non external / I/O events like NMIs.
62 static const struct irq_class irqclass_sub_desc
[] = {
63 {.irq
= IRQEXT_CLK
, .name
= "CLK", .desc
= "[EXT] Clock Comparator"},
64 {.irq
= IRQEXT_EXC
, .name
= "EXC", .desc
= "[EXT] External Call"},
65 {.irq
= IRQEXT_EMS
, .name
= "EMS", .desc
= "[EXT] Emergency Signal"},
66 {.irq
= IRQEXT_TMR
, .name
= "TMR", .desc
= "[EXT] CPU Timer"},
67 {.irq
= IRQEXT_TLA
, .name
= "TAL", .desc
= "[EXT] Timing Alert"},
68 {.irq
= IRQEXT_PFL
, .name
= "PFL", .desc
= "[EXT] Pseudo Page Fault"},
69 {.irq
= IRQEXT_DSD
, .name
= "DSD", .desc
= "[EXT] DASD Diag"},
70 {.irq
= IRQEXT_VRT
, .name
= "VRT", .desc
= "[EXT] Virtio"},
71 {.irq
= IRQEXT_SCP
, .name
= "SCP", .desc
= "[EXT] Service Call"},
72 {.irq
= IRQEXT_IUC
, .name
= "IUC", .desc
= "[EXT] IUCV"},
73 {.irq
= IRQEXT_CMS
, .name
= "CMS", .desc
= "[EXT] CPU-Measurement: Sampling"},
74 {.irq
= IRQEXT_CMC
, .name
= "CMC", .desc
= "[EXT] CPU-Measurement: Counter"},
75 {.irq
= IRQEXT_FTP
, .name
= "FTP", .desc
= "[EXT] HMC FTP Service"},
76 {.irq
= IRQIO_CIO
, .name
= "CIO", .desc
= "[I/O] Common I/O Layer Interrupt"},
77 {.irq
= IRQIO_DAS
, .name
= "DAS", .desc
= "[I/O] DASD"},
78 {.irq
= IRQIO_C15
, .name
= "C15", .desc
= "[I/O] 3215"},
79 {.irq
= IRQIO_C70
, .name
= "C70", .desc
= "[I/O] 3270"},
80 {.irq
= IRQIO_TAP
, .name
= "TAP", .desc
= "[I/O] Tape"},
81 {.irq
= IRQIO_VMR
, .name
= "VMR", .desc
= "[I/O] Unit Record Devices"},
82 {.irq
= IRQIO_LCS
, .name
= "LCS", .desc
= "[I/O] LCS"},
83 {.irq
= IRQIO_CTC
, .name
= "CTC", .desc
= "[I/O] CTC"},
84 {.irq
= IRQIO_ADM
, .name
= "ADM", .desc
= "[I/O] EADM Subchannel"},
85 {.irq
= IRQIO_CSC
, .name
= "CSC", .desc
= "[I/O] CHSC Subchannel"},
86 {.irq
= IRQIO_VIR
, .name
= "VIR", .desc
= "[I/O] Virtual I/O Devices"},
87 {.irq
= IRQIO_QAI
, .name
= "QAI", .desc
= "[AIO] QDIO Adapter Interrupt"},
88 {.irq
= IRQIO_APB
, .name
= "APB", .desc
= "[AIO] AP Bus"},
89 {.irq
= IRQIO_PCF
, .name
= "PCF", .desc
= "[AIO] PCI Floating Interrupt"},
90 {.irq
= IRQIO_PCD
, .name
= "PCD", .desc
= "[AIO] PCI Directed Interrupt"},
91 {.irq
= IRQIO_MSI
, .name
= "MSI", .desc
= "[AIO] MSI Interrupt"},
92 {.irq
= IRQIO_VAI
, .name
= "VAI", .desc
= "[AIO] Virtual I/O Devices AI"},
93 {.irq
= IRQIO_GAL
, .name
= "GAL", .desc
= "[AIO] GIB Alert"},
94 {.irq
= NMI_NMI
, .name
= "NMI", .desc
= "[NMI] Machine Check"},
95 {.irq
= CPU_RST
, .name
= "RST", .desc
= "[CPU] CPU Restart"},
98 void do_IRQ(struct pt_regs
*regs
, int irq
)
100 struct pt_regs
*old_regs
;
102 old_regs
= set_irq_regs(regs
);
104 if (tod_after_eq(S390_lowcore
.int_clock
,
105 S390_lowcore
.clock_comparator
))
106 /* Serve timer interrupts first. */
107 clock_comparator_work();
108 generic_handle_irq(irq
);
110 set_irq_regs(old_regs
);
113 static void show_msi_interrupt(struct seq_file
*p
, int irq
)
115 struct irq_desc
*desc
;
120 desc
= irq_to_desc(irq
);
124 raw_spin_lock_irqsave(&desc
->lock
, flags
);
125 seq_printf(p
, "%3d: ", irq
);
126 for_each_online_cpu(cpu
)
127 seq_printf(p
, "%10u ", kstat_irqs_cpu(irq
, cpu
));
129 if (desc
->irq_data
.chip
)
130 seq_printf(p
, " %8s", desc
->irq_data
.chip
->name
);
133 seq_printf(p
, " %s", desc
->action
->name
);
136 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
142 * show_interrupts is needed by /proc/interrupts.
144 int show_interrupts(struct seq_file
*p
, void *v
)
146 int index
= *(loff_t
*) v
;
152 for_each_online_cpu(cpu
)
153 seq_printf(p
, "CPU%-8d", cpu
);
156 if (index
< NR_IRQS_BASE
) {
157 seq_printf(p
, "%s: ", irqclass_main_desc
[index
].name
);
158 irq
= irqclass_main_desc
[index
].irq
;
159 for_each_online_cpu(cpu
)
160 seq_printf(p
, "%10u ", kstat_irqs_cpu(irq
, cpu
));
164 if (index
< nr_irqs
) {
165 show_msi_interrupt(p
, index
);
168 for (index
= 0; index
< NR_ARCH_IRQS
; index
++) {
169 seq_printf(p
, "%s: ", irqclass_sub_desc
[index
].name
);
170 irq
= irqclass_sub_desc
[index
].irq
;
171 for_each_online_cpu(cpu
)
172 seq_printf(p
, "%10u ",
173 per_cpu(irq_stat
, cpu
).irqs
[irq
]);
174 if (irqclass_sub_desc
[index
].desc
)
175 seq_printf(p
, " %s", irqclass_sub_desc
[index
].desc
);
183 unsigned int arch_dynirq_lower_bound(unsigned int from
)
185 return from
< NR_IRQS_BASE
? NR_IRQS_BASE
: from
;
189 * Switch to the asynchronous interrupt stack for softirq execution.
191 void do_softirq_own_stack(void)
193 unsigned long old
, new;
195 old
= current_stack_pointer();
196 /* Check against async. stack address range. */
197 new = S390_lowcore
.async_stack
;
198 if (((new - old
) >> (PAGE_SHIFT
+ THREAD_SIZE_ORDER
)) != 0) {
199 CALL_ON_STACK(__do_softirq
, new, 0);
201 /* We are already on the async stack. */
207 * ext_int_hash[index] is the list head for all external interrupts that hash
210 static struct hlist_head ext_int_hash
[32] ____cacheline_aligned
;
212 struct ext_int_info
{
213 ext_int_handler_t handler
;
214 struct hlist_node entry
;
219 /* ext_int_hash_lock protects the handler lists for external interrupts */
220 static DEFINE_SPINLOCK(ext_int_hash_lock
);
222 static inline int ext_hash(u16 code
)
224 BUILD_BUG_ON(!is_power_of_2(ARRAY_SIZE(ext_int_hash
)));
226 return (code
+ (code
>> 9)) & (ARRAY_SIZE(ext_int_hash
) - 1);
229 int register_external_irq(u16 code
, ext_int_handler_t handler
)
231 struct ext_int_info
*p
;
235 p
= kmalloc(sizeof(*p
), GFP_ATOMIC
);
239 p
->handler
= handler
;
240 index
= ext_hash(code
);
242 spin_lock_irqsave(&ext_int_hash_lock
, flags
);
243 hlist_add_head_rcu(&p
->entry
, &ext_int_hash
[index
]);
244 spin_unlock_irqrestore(&ext_int_hash_lock
, flags
);
247 EXPORT_SYMBOL(register_external_irq
);
249 int unregister_external_irq(u16 code
, ext_int_handler_t handler
)
251 struct ext_int_info
*p
;
253 int index
= ext_hash(code
);
255 spin_lock_irqsave(&ext_int_hash_lock
, flags
);
256 hlist_for_each_entry_rcu(p
, &ext_int_hash
[index
], entry
) {
257 if (p
->code
== code
&& p
->handler
== handler
) {
258 hlist_del_rcu(&p
->entry
);
262 spin_unlock_irqrestore(&ext_int_hash_lock
, flags
);
265 EXPORT_SYMBOL(unregister_external_irq
);
267 static irqreturn_t
do_ext_interrupt(int irq
, void *dummy
)
269 struct pt_regs
*regs
= get_irq_regs();
270 struct ext_code ext_code
;
271 struct ext_int_info
*p
;
274 ext_code
= *(struct ext_code
*) ®s
->int_code
;
275 if (ext_code
.code
!= EXT_IRQ_CLK_COMP
)
276 set_cpu_flag(CIF_NOHZ_DELAY
);
278 index
= ext_hash(ext_code
.code
);
280 hlist_for_each_entry_rcu(p
, &ext_int_hash
[index
], entry
) {
281 if (unlikely(p
->code
!= ext_code
.code
))
283 p
->handler(ext_code
, regs
->int_parm
, regs
->int_parm_long
);
289 static void __init
init_ext_interrupts(void)
293 for (idx
= 0; idx
< ARRAY_SIZE(ext_int_hash
); idx
++)
294 INIT_HLIST_HEAD(&ext_int_hash
[idx
]);
296 irq_set_chip_and_handler(EXT_INTERRUPT
,
297 &dummy_irq_chip
, handle_percpu_irq
);
298 if (request_irq(EXT_INTERRUPT
, do_ext_interrupt
, 0, "EXT", NULL
))
299 panic("Failed to register EXT interrupt\n");
302 void __init
init_IRQ(void)
304 BUILD_BUG_ON(ARRAY_SIZE(irqclass_sub_desc
) != NR_ARCH_IRQS
);
305 init_cio_interrupts();
306 init_airq_interrupts();
307 init_ext_interrupts();
310 static DEFINE_SPINLOCK(irq_subclass_lock
);
311 static unsigned char irq_subclass_refcount
[64];
313 void irq_subclass_register(enum irq_subclass subclass
)
315 spin_lock(&irq_subclass_lock
);
316 if (!irq_subclass_refcount
[subclass
])
317 ctl_set_bit(0, subclass
);
318 irq_subclass_refcount
[subclass
]++;
319 spin_unlock(&irq_subclass_lock
);
321 EXPORT_SYMBOL(irq_subclass_register
);
323 void irq_subclass_unregister(enum irq_subclass subclass
)
325 spin_lock(&irq_subclass_lock
);
326 irq_subclass_refcount
[subclass
]--;
327 if (!irq_subclass_refcount
[subclass
])
328 ctl_clear_bit(0, subclass
);
329 spin_unlock(&irq_subclass_lock
);
331 EXPORT_SYMBOL(irq_subclass_unregister
);