1 // SPDX-License-Identifier: GPL-2.0
2 #define KMSG_COMPONENT "zpci"
3 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
5 #include <linux/kernel.h>
7 #include <linux/kernel_stat.h>
10 #include <linux/smp.h>
15 static enum {FLOATING
, DIRECTED
} irq_delivery
;
17 #define SIC_IRQ_MODE_ALL 0
18 #define SIC_IRQ_MODE_SINGLE 1
19 #define SIC_IRQ_MODE_DIRECT 4
20 #define SIC_IRQ_MODE_D_ALL 16
21 #define SIC_IRQ_MODE_D_SINGLE 17
22 #define SIC_IRQ_MODE_SET_CPU 18
26 * FLOATING - summary bit per function
27 * DIRECTED - summary bit per cpu (only used in fallback path)
29 static struct airq_iv
*zpci_sbv
;
32 * interrupt bit vectors
33 * FLOATING - interrupt bit vector per function
34 * DIRECTED - interrupt bit vector per cpu
36 static struct airq_iv
**zpci_ibv
;
38 /* Modify PCI: Register adapter interruptions */
39 static int zpci_set_airq(struct zpci_dev
*zdev
)
41 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, 0, ZPCI_MOD_FC_REG_INT
);
42 struct zpci_fib fib
= {0};
45 fib
.fmt0
.isc
= PCI_ISC
;
46 fib
.fmt0
.sum
= 1; /* enable summary notifications */
47 fib
.fmt0
.noi
= airq_iv_end(zdev
->aibv
);
48 fib
.fmt0
.aibv
= (unsigned long) zdev
->aibv
->vector
;
49 fib
.fmt0
.aibvo
= 0; /* each zdev has its own interrupt vector */
50 fib
.fmt0
.aisb
= (unsigned long) zpci_sbv
->vector
+ (zdev
->aisb
/64)*8;
51 fib
.fmt0
.aisbo
= zdev
->aisb
& 63;
53 return zpci_mod_fc(req
, &fib
, &status
) ? -EIO
: 0;
56 /* Modify PCI: Unregister adapter interruptions */
57 static int zpci_clear_airq(struct zpci_dev
*zdev
)
59 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, 0, ZPCI_MOD_FC_DEREG_INT
);
60 struct zpci_fib fib
= {0};
63 cc
= zpci_mod_fc(req
, &fib
, &status
);
64 if (cc
== 3 || (cc
== 1 && status
== 24))
65 /* Function already gone or IRQs already deregistered. */
71 /* Modify PCI: Register CPU directed interruptions */
72 static int zpci_set_directed_irq(struct zpci_dev
*zdev
)
74 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, 0, ZPCI_MOD_FC_REG_INT_D
);
75 struct zpci_fib fib
= {0};
79 fib
.fmt1
.noi
= zdev
->msi_nr_irqs
;
80 fib
.fmt1
.dibvo
= zdev
->msi_first_bit
;
82 return zpci_mod_fc(req
, &fib
, &status
) ? -EIO
: 0;
85 /* Modify PCI: Unregister CPU directed interruptions */
86 static int zpci_clear_directed_irq(struct zpci_dev
*zdev
)
88 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, 0, ZPCI_MOD_FC_DEREG_INT_D
);
89 struct zpci_fib fib
= {0};
93 cc
= zpci_mod_fc(req
, &fib
, &status
);
94 if (cc
== 3 || (cc
== 1 && status
== 24))
95 /* Function already gone or IRQs already deregistered. */
101 static int zpci_set_irq_affinity(struct irq_data
*data
, const struct cpumask
*dest
,
104 struct msi_desc
*entry
= irq_get_msi_desc(data
->irq
);
105 struct msi_msg msg
= entry
->msg
;
107 msg
.address_lo
&= 0xff0000ff;
108 msg
.address_lo
|= (cpumask_first(dest
) << 8);
109 pci_write_msi_msg(data
->irq
, &msg
);
111 return IRQ_SET_MASK_OK
;
114 static struct irq_chip zpci_irq_chip
= {
116 .irq_unmask
= pci_msi_unmask_irq
,
117 .irq_mask
= pci_msi_mask_irq
,
118 .irq_set_affinity
= zpci_set_irq_affinity
,
121 static void zpci_handle_cpu_local_irq(bool rescan
)
123 struct airq_iv
*dibv
= zpci_ibv
[smp_processor_id()];
128 /* Scan the directed IRQ bit vector */
129 bit
= airq_iv_scan(dibv
, bit
, airq_iv_end(dibv
));
131 if (!rescan
|| irqs_on
++)
132 /* End of second scan with interrupts on. */
134 /* First scan complete, reenable interrupts. */
135 if (zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE
, PCI_ISC
))
140 inc_irq_stat(IRQIO_MSI
);
141 generic_handle_irq(airq_iv_get_data(dibv
, bit
));
145 struct cpu_irq_data
{
146 call_single_data_t csd
;
149 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_irq_data
, irq_data
);
151 static void zpci_handle_remote_irq(void *data
)
153 atomic_t
*scheduled
= data
;
156 zpci_handle_cpu_local_irq(false);
157 } while (atomic_dec_return(scheduled
));
160 static void zpci_handle_fallback_irq(void)
162 struct cpu_irq_data
*cpu_data
;
167 cpu
= airq_iv_scan(zpci_sbv
, cpu
, airq_iv_end(zpci_sbv
));
170 /* End of second scan with interrupts on. */
172 /* First scan complete, reenable interrupts. */
173 if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE
, PCI_ISC
))
178 cpu_data
= &per_cpu(irq_data
, cpu
);
179 if (atomic_inc_return(&cpu_data
->scheduled
) > 1)
182 cpu_data
->csd
.func
= zpci_handle_remote_irq
;
183 cpu_data
->csd
.info
= &cpu_data
->scheduled
;
184 cpu_data
->csd
.flags
= 0;
185 smp_call_function_single_async(cpu
, &cpu_data
->csd
);
189 static void zpci_directed_irq_handler(struct airq_struct
*airq
, bool floating
)
192 inc_irq_stat(IRQIO_PCF
);
193 zpci_handle_fallback_irq();
195 inc_irq_stat(IRQIO_PCD
);
196 zpci_handle_cpu_local_irq(true);
200 static void zpci_floating_irq_handler(struct airq_struct
*airq
, bool floating
)
202 unsigned long si
, ai
;
203 struct airq_iv
*aibv
;
206 inc_irq_stat(IRQIO_PCF
);
208 /* Scan adapter summary indicator bit vector */
209 si
= airq_iv_scan(zpci_sbv
, si
, airq_iv_end(zpci_sbv
));
212 /* End of second scan with interrupts on. */
214 /* First scan complete, reenable interrupts. */
215 if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE
, PCI_ISC
))
221 /* Scan the adapter interrupt vector for this device. */
224 ai
= airq_iv_scan(aibv
, ai
, airq_iv_end(aibv
));
227 inc_irq_stat(IRQIO_MSI
);
228 airq_iv_lock(aibv
, ai
);
229 generic_handle_irq(airq_iv_get_data(aibv
, ai
));
230 airq_iv_unlock(aibv
, ai
);
235 int arch_setup_msi_irqs(struct pci_dev
*pdev
, int nvec
, int type
)
237 struct zpci_dev
*zdev
= to_zpci(pdev
);
238 unsigned int hwirq
, msi_vecs
, cpu
;
240 struct msi_desc
*msi
;
245 zdev
->msi_first_bit
= -1U;
246 if (type
== PCI_CAP_ID_MSI
&& nvec
> 1)
248 msi_vecs
= min_t(unsigned int, nvec
, zdev
->max_msi
);
250 if (irq_delivery
== DIRECTED
) {
251 /* Allocate cpu vector bits */
252 bit
= airq_iv_alloc(zpci_ibv
[0], msi_vecs
);
256 /* Allocate adapter summary indicator bit */
257 bit
= airq_iv_alloc_bit(zpci_sbv
);
262 /* Create adapter interrupt vector */
263 zdev
->aibv
= airq_iv_create(msi_vecs
, AIRQ_IV_DATA
| AIRQ_IV_BITLOCK
);
267 /* Wire up shortcut pointer */
268 zpci_ibv
[bit
] = zdev
->aibv
;
269 /* Each function has its own interrupt vector */
273 /* Request MSI interrupts */
275 for_each_pci_msi_entry(msi
, pdev
) {
277 if (hwirq
- bit
>= msi_vecs
)
279 irq
= __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE
, msi
->affinity
);
282 rc
= irq_set_msi_desc(irq
, msi
);
285 irq_set_chip_and_handler(irq
, &zpci_irq_chip
,
287 msg
.data
= hwirq
- bit
;
288 if (irq_delivery
== DIRECTED
) {
289 msg
.address_lo
= zdev
->msi_addr
& 0xff0000ff;
290 msg
.address_lo
|= msi
->affinity
?
291 (cpumask_first(&msi
->affinity
->mask
) << 8) : 0;
292 for_each_possible_cpu(cpu
) {
293 airq_iv_set_data(zpci_ibv
[cpu
], hwirq
, irq
);
296 msg
.address_lo
= zdev
->msi_addr
& 0xffffffff;
297 airq_iv_set_data(zdev
->aibv
, hwirq
, irq
);
299 msg
.address_hi
= zdev
->msi_addr
>> 32;
300 pci_write_msi_msg(irq
, &msg
);
304 zdev
->msi_first_bit
= bit
;
305 zdev
->msi_nr_irqs
= msi_vecs
;
307 if (irq_delivery
== DIRECTED
)
308 rc
= zpci_set_directed_irq(zdev
);
310 rc
= zpci_set_airq(zdev
);
314 return (msi_vecs
== nvec
) ? 0 : msi_vecs
;
317 void arch_teardown_msi_irqs(struct pci_dev
*pdev
)
319 struct zpci_dev
*zdev
= to_zpci(pdev
);
320 struct msi_desc
*msi
;
323 /* Disable interrupts */
324 if (irq_delivery
== DIRECTED
)
325 rc
= zpci_clear_directed_irq(zdev
);
327 rc
= zpci_clear_airq(zdev
);
331 /* Release MSI interrupts */
332 for_each_pci_msi_entry(msi
, pdev
) {
335 if (msi
->msi_attrib
.is_msix
)
336 __pci_msix_desc_mask_irq(msi
, 1);
338 __pci_msi_desc_mask_irq(msi
, 1, 1);
339 irq_set_msi_desc(msi
->irq
, NULL
);
340 irq_free_desc(msi
->irq
);
341 msi
->msg
.address_lo
= 0;
342 msi
->msg
.address_hi
= 0;
347 if (zdev
->aisb
!= -1UL) {
348 zpci_ibv
[zdev
->aisb
] = NULL
;
349 airq_iv_free_bit(zpci_sbv
, zdev
->aisb
);
353 airq_iv_release(zdev
->aibv
);
357 if ((irq_delivery
== DIRECTED
) && zdev
->msi_first_bit
!= -1U)
358 airq_iv_free(zpci_ibv
[0], zdev
->msi_first_bit
, zdev
->msi_nr_irqs
);
361 static struct airq_struct zpci_airq
= {
362 .handler
= zpci_floating_irq_handler
,
366 static void __init
cpu_enable_directed_irq(void *unused
)
368 union zpci_sic_iib iib
= {{0}};
370 iib
.cdiib
.dibv_addr
= (u64
) zpci_ibv
[smp_processor_id()]->vector
;
372 __zpci_set_irq_ctrl(SIC_IRQ_MODE_SET_CPU
, 0, &iib
);
373 zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE
, PCI_ISC
);
376 static int __init
zpci_directed_irq_init(void)
378 union zpci_sic_iib iib
= {{0}};
381 zpci_sbv
= airq_iv_create(num_possible_cpus(), 0);
385 iib
.diib
.isc
= PCI_ISC
;
386 iib
.diib
.nr_cpus
= num_possible_cpus();
387 iib
.diib
.disb_addr
= (u64
) zpci_sbv
->vector
;
388 __zpci_set_irq_ctrl(SIC_IRQ_MODE_DIRECT
, 0, &iib
);
390 zpci_ibv
= kcalloc(num_possible_cpus(), sizeof(*zpci_ibv
),
395 for_each_possible_cpu(cpu
) {
397 * Per CPU IRQ vectors look the same but bit-allocation
398 * is only done on the first vector.
400 zpci_ibv
[cpu
] = airq_iv_create(cache_line_size() * BITS_PER_BYTE
,
403 (!cpu
? AIRQ_IV_ALLOC
: 0));
407 on_each_cpu(cpu_enable_directed_irq
, NULL
, 1);
409 zpci_irq_chip
.irq_set_affinity
= zpci_set_irq_affinity
;
414 static int __init
zpci_floating_irq_init(void)
416 zpci_ibv
= kcalloc(ZPCI_NR_DEVICES
, sizeof(*zpci_ibv
), GFP_KERNEL
);
420 zpci_sbv
= airq_iv_create(ZPCI_NR_DEVICES
, AIRQ_IV_ALLOC
);
431 int __init
zpci_irq_init(void)
435 irq_delivery
= sclp
.has_dirq
? DIRECTED
: FLOATING
;
436 if (s390_pci_force_floating
)
437 irq_delivery
= FLOATING
;
439 if (irq_delivery
== DIRECTED
)
440 zpci_airq
.handler
= zpci_directed_irq_handler
;
442 rc
= register_adapter_interrupt(&zpci_airq
);
445 /* Set summary to 1 to be called every time for the ISC. */
446 *zpci_airq
.lsi_ptr
= 1;
448 switch (irq_delivery
) {
450 rc
= zpci_floating_irq_init();
453 rc
= zpci_directed_irq_init();
461 * Enable floating IRQs (with suppression after one IRQ). When using
462 * directed IRQs this enables the fallback path.
464 zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE
, PCI_ISC
);
468 unregister_adapter_interrupt(&zpci_airq
);
473 void __init
zpci_irq_exit(void)
477 if (irq_delivery
== DIRECTED
) {
478 for_each_possible_cpu(cpu
) {
479 airq_iv_release(zpci_ibv
[cpu
]);
484 airq_iv_release(zpci_sbv
);
485 unregister_adapter_interrupt(&zpci_airq
);