1 // SPDX-License-Identifier: GPL-2.0
2 #define KMSG_COMPONENT "zpci"
3 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
5 #include <linux/kernel.h>
7 #include <linux/kernel_stat.h>
10 #include <linux/smp.h>
15 static enum {FLOATING
, DIRECTED
} irq_delivery
;
17 #define SIC_IRQ_MODE_ALL 0
18 #define SIC_IRQ_MODE_SINGLE 1
19 #define SIC_IRQ_MODE_DIRECT 4
20 #define SIC_IRQ_MODE_D_ALL 16
21 #define SIC_IRQ_MODE_D_SINGLE 17
22 #define SIC_IRQ_MODE_SET_CPU 18
26 * FLOATING - summary bit per function
27 * DIRECTED - summary bit per cpu (only used in fallback path)
29 static struct airq_iv
*zpci_sbv
;
32 * interrupt bit vectors
33 * FLOATING - interrupt bit vector per function
34 * DIRECTED - interrupt bit vector per cpu
36 static struct airq_iv
**zpci_ibv
;
38 /* Modify PCI: Register adapter interruptions */
39 static int zpci_set_airq(struct zpci_dev
*zdev
)
41 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, 0, ZPCI_MOD_FC_REG_INT
);
42 struct zpci_fib fib
= {0};
45 fib
.fmt0
.isc
= PCI_ISC
;
46 fib
.fmt0
.sum
= 1; /* enable summary notifications */
47 fib
.fmt0
.noi
= airq_iv_end(zdev
->aibv
);
48 fib
.fmt0
.aibv
= (unsigned long) zdev
->aibv
->vector
;
49 fib
.fmt0
.aibvo
= 0; /* each zdev has its own interrupt vector */
50 fib
.fmt0
.aisb
= (unsigned long) zpci_sbv
->vector
+ (zdev
->aisb
/64)*8;
51 fib
.fmt0
.aisbo
= zdev
->aisb
& 63;
53 return zpci_mod_fc(req
, &fib
, &status
) ? -EIO
: 0;
56 /* Modify PCI: Unregister adapter interruptions */
57 static int zpci_clear_airq(struct zpci_dev
*zdev
)
59 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, 0, ZPCI_MOD_FC_DEREG_INT
);
60 struct zpci_fib fib
= {0};
63 cc
= zpci_mod_fc(req
, &fib
, &status
);
64 if (cc
== 3 || (cc
== 1 && status
== 24))
65 /* Function already gone or IRQs already deregistered. */
71 /* Modify PCI: Register CPU directed interruptions */
72 static int zpci_set_directed_irq(struct zpci_dev
*zdev
)
74 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, 0, ZPCI_MOD_FC_REG_INT_D
);
75 struct zpci_fib fib
= {0};
79 fib
.fmt1
.noi
= zdev
->msi_nr_irqs
;
80 fib
.fmt1
.dibvo
= zdev
->msi_first_bit
;
82 return zpci_mod_fc(req
, &fib
, &status
) ? -EIO
: 0;
85 /* Modify PCI: Unregister CPU directed interruptions */
86 static int zpci_clear_directed_irq(struct zpci_dev
*zdev
)
88 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, 0, ZPCI_MOD_FC_DEREG_INT_D
);
89 struct zpci_fib fib
= {0};
93 cc
= zpci_mod_fc(req
, &fib
, &status
);
94 if (cc
== 3 || (cc
== 1 && status
== 24))
95 /* Function already gone or IRQs already deregistered. */
101 static int zpci_set_irq_affinity(struct irq_data
*data
, const struct cpumask
*dest
,
104 struct msi_desc
*entry
= irq_get_msi_desc(data
->irq
);
105 struct msi_msg msg
= entry
->msg
;
107 msg
.address_lo
&= 0xff0000ff;
108 msg
.address_lo
|= (cpumask_first(dest
) << 8);
109 pci_write_msi_msg(data
->irq
, &msg
);
111 return IRQ_SET_MASK_OK
;
114 static struct irq_chip zpci_irq_chip
= {
116 .irq_unmask
= pci_msi_unmask_irq
,
117 .irq_mask
= pci_msi_mask_irq
,
120 static void zpci_handle_cpu_local_irq(bool rescan
)
122 struct airq_iv
*dibv
= zpci_ibv
[smp_processor_id()];
127 /* Scan the directed IRQ bit vector */
128 bit
= airq_iv_scan(dibv
, bit
, airq_iv_end(dibv
));
130 if (!rescan
|| irqs_on
++)
131 /* End of second scan with interrupts on. */
133 /* First scan complete, reenable interrupts. */
134 if (zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE
, PCI_ISC
))
139 inc_irq_stat(IRQIO_MSI
);
140 generic_handle_irq(airq_iv_get_data(dibv
, bit
));
144 struct cpu_irq_data
{
145 call_single_data_t csd
;
148 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_irq_data
, irq_data
);
150 static void zpci_handle_remote_irq(void *data
)
152 atomic_t
*scheduled
= data
;
155 zpci_handle_cpu_local_irq(false);
156 } while (atomic_dec_return(scheduled
));
159 static void zpci_handle_fallback_irq(void)
161 struct cpu_irq_data
*cpu_data
;
166 cpu
= airq_iv_scan(zpci_sbv
, cpu
, airq_iv_end(zpci_sbv
));
169 /* End of second scan with interrupts on. */
171 /* First scan complete, reenable interrupts. */
172 if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE
, PCI_ISC
))
177 cpu_data
= &per_cpu(irq_data
, cpu
);
178 if (atomic_inc_return(&cpu_data
->scheduled
) > 1)
181 cpu_data
->csd
.func
= zpci_handle_remote_irq
;
182 cpu_data
->csd
.info
= &cpu_data
->scheduled
;
183 cpu_data
->csd
.flags
= 0;
184 smp_call_function_single_async(cpu
, &cpu_data
->csd
);
188 static void zpci_directed_irq_handler(struct airq_struct
*airq
, bool floating
)
191 inc_irq_stat(IRQIO_PCF
);
192 zpci_handle_fallback_irq();
194 inc_irq_stat(IRQIO_PCD
);
195 zpci_handle_cpu_local_irq(true);
199 static void zpci_floating_irq_handler(struct airq_struct
*airq
, bool floating
)
201 unsigned long si
, ai
;
202 struct airq_iv
*aibv
;
205 inc_irq_stat(IRQIO_PCF
);
207 /* Scan adapter summary indicator bit vector */
208 si
= airq_iv_scan(zpci_sbv
, si
, airq_iv_end(zpci_sbv
));
211 /* End of second scan with interrupts on. */
213 /* First scan complete, reenable interrupts. */
214 if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE
, PCI_ISC
))
220 /* Scan the adapter interrupt vector for this device. */
223 ai
= airq_iv_scan(aibv
, ai
, airq_iv_end(aibv
));
226 inc_irq_stat(IRQIO_MSI
);
227 airq_iv_lock(aibv
, ai
);
228 generic_handle_irq(airq_iv_get_data(aibv
, ai
));
229 airq_iv_unlock(aibv
, ai
);
234 int arch_setup_msi_irqs(struct pci_dev
*pdev
, int nvec
, int type
)
236 struct zpci_dev
*zdev
= to_zpci(pdev
);
237 unsigned int hwirq
, msi_vecs
, cpu
;
239 struct msi_desc
*msi
;
244 zdev
->msi_first_bit
= -1U;
245 if (type
== PCI_CAP_ID_MSI
&& nvec
> 1)
247 msi_vecs
= min_t(unsigned int, nvec
, zdev
->max_msi
);
249 if (irq_delivery
== DIRECTED
) {
250 /* Allocate cpu vector bits */
251 bit
= airq_iv_alloc(zpci_ibv
[0], msi_vecs
);
255 /* Allocate adapter summary indicator bit */
256 bit
= airq_iv_alloc_bit(zpci_sbv
);
261 /* Create adapter interrupt vector */
262 zdev
->aibv
= airq_iv_create(msi_vecs
, AIRQ_IV_DATA
| AIRQ_IV_BITLOCK
);
266 /* Wire up shortcut pointer */
267 zpci_ibv
[bit
] = zdev
->aibv
;
268 /* Each function has its own interrupt vector */
272 /* Request MSI interrupts */
274 for_each_pci_msi_entry(msi
, pdev
) {
276 if (hwirq
- bit
>= msi_vecs
)
278 irq
= __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE
,
279 (irq_delivery
== DIRECTED
) ?
280 msi
->affinity
: NULL
);
283 rc
= irq_set_msi_desc(irq
, msi
);
286 irq_set_chip_and_handler(irq
, &zpci_irq_chip
,
288 msg
.data
= hwirq
- bit
;
289 if (irq_delivery
== DIRECTED
) {
290 msg
.address_lo
= zdev
->msi_addr
& 0xff0000ff;
291 msg
.address_lo
|= msi
->affinity
?
292 (cpumask_first(&msi
->affinity
->mask
) << 8) : 0;
293 for_each_possible_cpu(cpu
) {
294 airq_iv_set_data(zpci_ibv
[cpu
], hwirq
, irq
);
297 msg
.address_lo
= zdev
->msi_addr
& 0xffffffff;
298 airq_iv_set_data(zdev
->aibv
, hwirq
, irq
);
300 msg
.address_hi
= zdev
->msi_addr
>> 32;
301 pci_write_msi_msg(irq
, &msg
);
305 zdev
->msi_first_bit
= bit
;
306 zdev
->msi_nr_irqs
= msi_vecs
;
308 if (irq_delivery
== DIRECTED
)
309 rc
= zpci_set_directed_irq(zdev
);
311 rc
= zpci_set_airq(zdev
);
315 return (msi_vecs
== nvec
) ? 0 : msi_vecs
;
318 void arch_teardown_msi_irqs(struct pci_dev
*pdev
)
320 struct zpci_dev
*zdev
= to_zpci(pdev
);
321 struct msi_desc
*msi
;
324 /* Disable interrupts */
325 if (irq_delivery
== DIRECTED
)
326 rc
= zpci_clear_directed_irq(zdev
);
328 rc
= zpci_clear_airq(zdev
);
332 /* Release MSI interrupts */
333 for_each_pci_msi_entry(msi
, pdev
) {
336 if (msi
->msi_attrib
.is_msix
)
337 __pci_msix_desc_mask_irq(msi
, 1);
339 __pci_msi_desc_mask_irq(msi
, 1, 1);
340 irq_set_msi_desc(msi
->irq
, NULL
);
341 irq_free_desc(msi
->irq
);
342 msi
->msg
.address_lo
= 0;
343 msi
->msg
.address_hi
= 0;
348 if (zdev
->aisb
!= -1UL) {
349 zpci_ibv
[zdev
->aisb
] = NULL
;
350 airq_iv_free_bit(zpci_sbv
, zdev
->aisb
);
354 airq_iv_release(zdev
->aibv
);
358 if ((irq_delivery
== DIRECTED
) && zdev
->msi_first_bit
!= -1U)
359 airq_iv_free(zpci_ibv
[0], zdev
->msi_first_bit
, zdev
->msi_nr_irqs
);
362 static struct airq_struct zpci_airq
= {
363 .handler
= zpci_floating_irq_handler
,
367 static void __init
cpu_enable_directed_irq(void *unused
)
369 union zpci_sic_iib iib
= {{0}};
371 iib
.cdiib
.dibv_addr
= (u64
) zpci_ibv
[smp_processor_id()]->vector
;
373 __zpci_set_irq_ctrl(SIC_IRQ_MODE_SET_CPU
, 0, &iib
);
374 zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE
, PCI_ISC
);
377 static int __init
zpci_directed_irq_init(void)
379 union zpci_sic_iib iib
= {{0}};
382 zpci_sbv
= airq_iv_create(num_possible_cpus(), 0);
386 iib
.diib
.isc
= PCI_ISC
;
387 iib
.diib
.nr_cpus
= num_possible_cpus();
388 iib
.diib
.disb_addr
= (u64
) zpci_sbv
->vector
;
389 __zpci_set_irq_ctrl(SIC_IRQ_MODE_DIRECT
, 0, &iib
);
391 zpci_ibv
= kcalloc(num_possible_cpus(), sizeof(*zpci_ibv
),
396 for_each_possible_cpu(cpu
) {
398 * Per CPU IRQ vectors look the same but bit-allocation
399 * is only done on the first vector.
401 zpci_ibv
[cpu
] = airq_iv_create(cache_line_size() * BITS_PER_BYTE
,
404 (!cpu
? AIRQ_IV_ALLOC
: 0));
408 on_each_cpu(cpu_enable_directed_irq
, NULL
, 1);
410 zpci_irq_chip
.irq_set_affinity
= zpci_set_irq_affinity
;
415 static int __init
zpci_floating_irq_init(void)
417 zpci_ibv
= kcalloc(ZPCI_NR_DEVICES
, sizeof(*zpci_ibv
), GFP_KERNEL
);
421 zpci_sbv
= airq_iv_create(ZPCI_NR_DEVICES
, AIRQ_IV_ALLOC
);
432 int __init
zpci_irq_init(void)
436 irq_delivery
= sclp
.has_dirq
? DIRECTED
: FLOATING
;
437 if (s390_pci_force_floating
)
438 irq_delivery
= FLOATING
;
440 if (irq_delivery
== DIRECTED
)
441 zpci_airq
.handler
= zpci_directed_irq_handler
;
443 rc
= register_adapter_interrupt(&zpci_airq
);
446 /* Set summary to 1 to be called every time for the ISC. */
447 *zpci_airq
.lsi_ptr
= 1;
449 switch (irq_delivery
) {
451 rc
= zpci_floating_irq_init();
454 rc
= zpci_directed_irq_init();
462 * Enable floating IRQs (with suppression after one IRQ). When using
463 * directed IRQs this enables the fallback path.
465 zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE
, PCI_ISC
);
469 unregister_adapter_interrupt(&zpci_airq
);
474 void __init
zpci_irq_exit(void)
478 if (irq_delivery
== DIRECTED
) {
479 for_each_possible_cpu(cpu
) {
480 airq_iv_release(zpci_ibv
[cpu
]);
485 airq_iv_release(zpci_sbv
);
486 unregister_adapter_interrupt(&zpci_airq
);