1 // SPDX-License-Identifier: GPL-2.0
2 #define KMSG_COMPONENT "zpci"
3 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
5 #include <linux/kernel.h>
7 #include <linux/kernel_stat.h>
10 #include <linux/smp.h>
15 static enum {FLOATING
, DIRECTED
} irq_delivery
;
17 #define SIC_IRQ_MODE_ALL 0
18 #define SIC_IRQ_MODE_SINGLE 1
19 #define SIC_IRQ_MODE_DIRECT 4
20 #define SIC_IRQ_MODE_D_ALL 16
21 #define SIC_IRQ_MODE_D_SINGLE 17
22 #define SIC_IRQ_MODE_SET_CPU 18
26 * FLOATING - summary bit per function
27 * DIRECTED - summary bit per cpu (only used in fallback path)
29 static struct airq_iv
*zpci_sbv
;
32 * interrupt bit vectors
33 * FLOATING - interrupt bit vector per function
34 * DIRECTED - interrupt bit vector per cpu
36 static struct airq_iv
**zpci_ibv
;
38 /* Modify PCI: Register adapter interruptions */
39 static int zpci_set_airq(struct zpci_dev
*zdev
)
41 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, 0, ZPCI_MOD_FC_REG_INT
);
42 struct zpci_fib fib
= {0};
45 fib
.fmt0
.isc
= PCI_ISC
;
46 fib
.fmt0
.sum
= 1; /* enable summary notifications */
47 fib
.fmt0
.noi
= airq_iv_end(zdev
->aibv
);
48 fib
.fmt0
.aibv
= (unsigned long) zdev
->aibv
->vector
;
49 fib
.fmt0
.aibvo
= 0; /* each zdev has its own interrupt vector */
50 fib
.fmt0
.aisb
= (unsigned long) zpci_sbv
->vector
+ (zdev
->aisb
/64)*8;
51 fib
.fmt0
.aisbo
= zdev
->aisb
& 63;
53 return zpci_mod_fc(req
, &fib
, &status
) ? -EIO
: 0;
56 /* Modify PCI: Unregister adapter interruptions */
57 static int zpci_clear_airq(struct zpci_dev
*zdev
)
59 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, 0, ZPCI_MOD_FC_DEREG_INT
);
60 struct zpci_fib fib
= {0};
63 cc
= zpci_mod_fc(req
, &fib
, &status
);
64 if (cc
== 3 || (cc
== 1 && status
== 24))
65 /* Function already gone or IRQs already deregistered. */
71 /* Modify PCI: Register CPU directed interruptions */
72 static int zpci_set_directed_irq(struct zpci_dev
*zdev
)
74 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, 0, ZPCI_MOD_FC_REG_INT_D
);
75 struct zpci_fib fib
= {0};
79 fib
.fmt1
.noi
= zdev
->msi_nr_irqs
;
80 fib
.fmt1
.dibvo
= zdev
->msi_first_bit
;
82 return zpci_mod_fc(req
, &fib
, &status
) ? -EIO
: 0;
85 /* Modify PCI: Unregister CPU directed interruptions */
86 static int zpci_clear_directed_irq(struct zpci_dev
*zdev
)
88 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, 0, ZPCI_MOD_FC_DEREG_INT_D
);
89 struct zpci_fib fib
= {0};
93 cc
= zpci_mod_fc(req
, &fib
, &status
);
94 if (cc
== 3 || (cc
== 1 && status
== 24))
95 /* Function already gone or IRQs already deregistered. */
101 static int zpci_set_irq_affinity(struct irq_data
*data
, const struct cpumask
*dest
,
104 struct msi_desc
*entry
= irq_get_msi_desc(data
->irq
);
105 struct msi_msg msg
= entry
->msg
;
106 int cpu_addr
= smp_cpu_get_cpu_address(cpumask_first(dest
));
108 msg
.address_lo
&= 0xff0000ff;
109 msg
.address_lo
|= (cpu_addr
<< 8);
110 pci_write_msi_msg(data
->irq
, &msg
);
112 return IRQ_SET_MASK_OK
;
115 static struct irq_chip zpci_irq_chip
= {
117 .irq_unmask
= pci_msi_unmask_irq
,
118 .irq_mask
= pci_msi_mask_irq
,
121 static void zpci_handle_cpu_local_irq(bool rescan
)
123 struct airq_iv
*dibv
= zpci_ibv
[smp_processor_id()];
128 /* Scan the directed IRQ bit vector */
129 bit
= airq_iv_scan(dibv
, bit
, airq_iv_end(dibv
));
131 if (!rescan
|| irqs_on
++)
132 /* End of second scan with interrupts on. */
134 /* First scan complete, reenable interrupts. */
135 if (zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE
, PCI_ISC
))
140 inc_irq_stat(IRQIO_MSI
);
141 generic_handle_irq(airq_iv_get_data(dibv
, bit
));
145 struct cpu_irq_data
{
146 call_single_data_t csd
;
149 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_irq_data
, irq_data
);
151 static void zpci_handle_remote_irq(void *data
)
153 atomic_t
*scheduled
= data
;
156 zpci_handle_cpu_local_irq(false);
157 } while (atomic_dec_return(scheduled
));
160 static void zpci_handle_fallback_irq(void)
162 struct cpu_irq_data
*cpu_data
;
167 cpu
= airq_iv_scan(zpci_sbv
, cpu
, airq_iv_end(zpci_sbv
));
170 /* End of second scan with interrupts on. */
172 /* First scan complete, reenable interrupts. */
173 if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE
, PCI_ISC
))
178 cpu_data
= &per_cpu(irq_data
, cpu
);
179 if (atomic_inc_return(&cpu_data
->scheduled
) > 1)
182 INIT_CSD(&cpu_data
->csd
, zpci_handle_remote_irq
, &cpu_data
->scheduled
);
183 smp_call_function_single_async(cpu
, &cpu_data
->csd
);
187 static void zpci_directed_irq_handler(struct airq_struct
*airq
, bool floating
)
190 inc_irq_stat(IRQIO_PCF
);
191 zpci_handle_fallback_irq();
193 inc_irq_stat(IRQIO_PCD
);
194 zpci_handle_cpu_local_irq(true);
198 static void zpci_floating_irq_handler(struct airq_struct
*airq
, bool floating
)
200 unsigned long si
, ai
;
201 struct airq_iv
*aibv
;
204 inc_irq_stat(IRQIO_PCF
);
206 /* Scan adapter summary indicator bit vector */
207 si
= airq_iv_scan(zpci_sbv
, si
, airq_iv_end(zpci_sbv
));
210 /* End of second scan with interrupts on. */
212 /* First scan complete, reenable interrupts. */
213 if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE
, PCI_ISC
))
219 /* Scan the adapter interrupt vector for this device. */
222 ai
= airq_iv_scan(aibv
, ai
, airq_iv_end(aibv
));
225 inc_irq_stat(IRQIO_MSI
);
226 airq_iv_lock(aibv
, ai
);
227 generic_handle_irq(airq_iv_get_data(aibv
, ai
));
228 airq_iv_unlock(aibv
, ai
);
233 int arch_setup_msi_irqs(struct pci_dev
*pdev
, int nvec
, int type
)
235 struct zpci_dev
*zdev
= to_zpci(pdev
);
236 unsigned int hwirq
, msi_vecs
, cpu
;
238 struct msi_desc
*msi
;
244 zdev
->msi_first_bit
= -1U;
245 if (type
== PCI_CAP_ID_MSI
&& nvec
> 1)
247 msi_vecs
= min_t(unsigned int, nvec
, zdev
->max_msi
);
249 if (irq_delivery
== DIRECTED
) {
250 /* Allocate cpu vector bits */
251 bit
= airq_iv_alloc(zpci_ibv
[0], msi_vecs
);
255 /* Allocate adapter summary indicator bit */
256 bit
= airq_iv_alloc_bit(zpci_sbv
);
261 /* Create adapter interrupt vector */
262 zdev
->aibv
= airq_iv_create(msi_vecs
, AIRQ_IV_DATA
| AIRQ_IV_BITLOCK
);
266 /* Wire up shortcut pointer */
267 zpci_ibv
[bit
] = zdev
->aibv
;
268 /* Each function has its own interrupt vector */
272 /* Request MSI interrupts */
274 for_each_pci_msi_entry(msi
, pdev
) {
276 if (hwirq
- bit
>= msi_vecs
)
278 irq
= __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE
,
279 (irq_delivery
== DIRECTED
) ?
280 msi
->affinity
: NULL
);
283 rc
= irq_set_msi_desc(irq
, msi
);
286 irq_set_chip_and_handler(irq
, &zpci_irq_chip
,
288 msg
.data
= hwirq
- bit
;
289 if (irq_delivery
== DIRECTED
) {
291 cpu
= cpumask_first(&msi
->affinity
->mask
);
294 cpu_addr
= smp_cpu_get_cpu_address(cpu
);
296 msg
.address_lo
= zdev
->msi_addr
& 0xff0000ff;
297 msg
.address_lo
|= (cpu_addr
<< 8);
299 for_each_possible_cpu(cpu
) {
300 airq_iv_set_data(zpci_ibv
[cpu
], hwirq
, irq
);
303 msg
.address_lo
= zdev
->msi_addr
& 0xffffffff;
304 airq_iv_set_data(zdev
->aibv
, hwirq
, irq
);
306 msg
.address_hi
= zdev
->msi_addr
>> 32;
307 pci_write_msi_msg(irq
, &msg
);
311 zdev
->msi_first_bit
= bit
;
312 zdev
->msi_nr_irqs
= msi_vecs
;
314 if (irq_delivery
== DIRECTED
)
315 rc
= zpci_set_directed_irq(zdev
);
317 rc
= zpci_set_airq(zdev
);
321 return (msi_vecs
== nvec
) ? 0 : msi_vecs
;
324 void arch_teardown_msi_irqs(struct pci_dev
*pdev
)
326 struct zpci_dev
*zdev
= to_zpci(pdev
);
327 struct msi_desc
*msi
;
330 /* Disable interrupts */
331 if (irq_delivery
== DIRECTED
)
332 rc
= zpci_clear_directed_irq(zdev
);
334 rc
= zpci_clear_airq(zdev
);
338 /* Release MSI interrupts */
339 for_each_pci_msi_entry(msi
, pdev
) {
342 if (msi
->msi_attrib
.is_msix
)
343 __pci_msix_desc_mask_irq(msi
, 1);
345 __pci_msi_desc_mask_irq(msi
, 1, 1);
346 irq_set_msi_desc(msi
->irq
, NULL
);
347 irq_free_desc(msi
->irq
);
348 msi
->msg
.address_lo
= 0;
349 msi
->msg
.address_hi
= 0;
354 if (zdev
->aisb
!= -1UL) {
355 zpci_ibv
[zdev
->aisb
] = NULL
;
356 airq_iv_free_bit(zpci_sbv
, zdev
->aisb
);
360 airq_iv_release(zdev
->aibv
);
364 if ((irq_delivery
== DIRECTED
) && zdev
->msi_first_bit
!= -1U)
365 airq_iv_free(zpci_ibv
[0], zdev
->msi_first_bit
, zdev
->msi_nr_irqs
);
368 static struct airq_struct zpci_airq
= {
369 .handler
= zpci_floating_irq_handler
,
373 static void __init
cpu_enable_directed_irq(void *unused
)
375 union zpci_sic_iib iib
= {{0}};
377 iib
.cdiib
.dibv_addr
= (u64
) zpci_ibv
[smp_processor_id()]->vector
;
379 __zpci_set_irq_ctrl(SIC_IRQ_MODE_SET_CPU
, 0, &iib
);
380 zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE
, PCI_ISC
);
383 static int __init
zpci_directed_irq_init(void)
385 union zpci_sic_iib iib
= {{0}};
388 zpci_sbv
= airq_iv_create(num_possible_cpus(), 0);
392 iib
.diib
.isc
= PCI_ISC
;
393 iib
.diib
.nr_cpus
= num_possible_cpus();
394 iib
.diib
.disb_addr
= (u64
) zpci_sbv
->vector
;
395 __zpci_set_irq_ctrl(SIC_IRQ_MODE_DIRECT
, 0, &iib
);
397 zpci_ibv
= kcalloc(num_possible_cpus(), sizeof(*zpci_ibv
),
402 for_each_possible_cpu(cpu
) {
404 * Per CPU IRQ vectors look the same but bit-allocation
405 * is only done on the first vector.
407 zpci_ibv
[cpu
] = airq_iv_create(cache_line_size() * BITS_PER_BYTE
,
410 (!cpu
? AIRQ_IV_ALLOC
: 0));
414 on_each_cpu(cpu_enable_directed_irq
, NULL
, 1);
416 zpci_irq_chip
.irq_set_affinity
= zpci_set_irq_affinity
;
421 static int __init
zpci_floating_irq_init(void)
423 zpci_ibv
= kcalloc(ZPCI_NR_DEVICES
, sizeof(*zpci_ibv
), GFP_KERNEL
);
427 zpci_sbv
= airq_iv_create(ZPCI_NR_DEVICES
, AIRQ_IV_ALLOC
);
438 int __init
zpci_irq_init(void)
442 irq_delivery
= sclp
.has_dirq
? DIRECTED
: FLOATING
;
443 if (s390_pci_force_floating
)
444 irq_delivery
= FLOATING
;
446 if (irq_delivery
== DIRECTED
)
447 zpci_airq
.handler
= zpci_directed_irq_handler
;
449 rc
= register_adapter_interrupt(&zpci_airq
);
452 /* Set summary to 1 to be called every time for the ISC. */
453 *zpci_airq
.lsi_ptr
= 1;
455 switch (irq_delivery
) {
457 rc
= zpci_floating_irq_init();
460 rc
= zpci_directed_irq_init();
468 * Enable floating IRQs (with suppression after one IRQ). When using
469 * directed IRQs this enables the fallback path.
471 zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE
, PCI_ISC
);
475 unregister_adapter_interrupt(&zpci_airq
);
480 void __init
zpci_irq_exit(void)
484 if (irq_delivery
== DIRECTED
) {
485 for_each_possible_cpu(cpu
) {
486 airq_iv_release(zpci_ibv
[cpu
]);
491 airq_iv_release(zpci_sbv
);
492 unregister_adapter_interrupt(&zpci_airq
);