2 * Platform dependent support for SGI SN
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
11 #include <linux/irq.h>
12 #include <asm/sn/intr.h>
13 #include <asm/sn/addrs.h>
14 #include <asm/sn/arch.h>
15 #include "xtalk/xwidgetdev.h"
16 #include <asm/sn/pcibus_provider_defs.h>
17 #include <asm/sn/pcidev.h>
18 #include "pci/pcibr_provider.h"
19 #include <asm/sn/shub_mmr.h>
20 #include <asm/sn/sn_sal.h>
22 static void force_interrupt(int irq
);
23 static void register_intr_pda(struct sn_irq_info
*sn_irq_info
);
24 static void unregister_intr_pda(struct sn_irq_info
*sn_irq_info
);
26 extern int sn_force_interrupt_flag
;
27 extern int sn_ioif_inited
;
28 struct sn_irq_info
**sn_irq
;
30 static inline uint64_t sn_intr_alloc(nasid_t local_nasid
, int local_widget
,
32 int req_irq
, nasid_t req_nasid
,
35 struct ia64_sal_retval ret_stuff
;
39 SAL_CALL_NOLOCK(ret_stuff
, (u64
) SN_SAL_IOIF_INTERRUPT
,
40 (u64
) SAL_INTR_ALLOC
, (u64
) local_nasid
,
41 (u64
) local_widget
, (u64
) sn_irq_info
, (u64
) req_irq
,
42 (u64
) req_nasid
, (u64
) req_slice
);
43 return ret_stuff
.status
;
46 static inline void sn_intr_free(nasid_t local_nasid
, int local_widget
,
47 struct sn_irq_info
*sn_irq_info
)
49 struct ia64_sal_retval ret_stuff
;
53 SAL_CALL_NOLOCK(ret_stuff
, (u64
) SN_SAL_IOIF_INTERRUPT
,
54 (u64
) SAL_INTR_FREE
, (u64
) local_nasid
,
55 (u64
) local_widget
, (u64
) sn_irq_info
->irq_irq
,
56 (u64
) sn_irq_info
->irq_cookie
, 0, 0);
59 static unsigned int sn_startup_irq(unsigned int irq
)
64 static void sn_shutdown_irq(unsigned int irq
)
68 static void sn_disable_irq(unsigned int irq
)
72 static void sn_enable_irq(unsigned int irq
)
76 static void sn_ack_irq(unsigned int irq
)
78 uint64_t event_occurred
, mask
= 0;
84 HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid
, SH_EVENT_OCCURRED
));
85 mask
= event_occurred
& SH_ALL_INT_MASK
;
86 HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid
, SH_EVENT_OCCURRED_ALIAS
),
88 __set_bit(irq
, (volatile void *)pda
->sn_in_service_ivecs
);
93 static void sn_end_irq(unsigned int irq
)
97 uint64_t event_occurred
;
100 if (ivec
== SGI_UART_VECTOR
) {
102 event_occurred
= HUB_L((uint64_t *) GLOBAL_MMR_ADDR
103 (nasid
, SH_EVENT_OCCURRED
));
104 /* If the UART bit is set here, we may have received an
105 * interrupt from the UART that the driver missed. To
106 * make sure, we IPI ourselves to force us to look again.
108 if (event_occurred
& SH_EVENT_OCCURRED_UART_INT_MASK
) {
109 platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR
,
113 __clear_bit(ivec
, (volatile void *)pda
->sn_in_service_ivecs
);
114 if (sn_force_interrupt_flag
)
115 force_interrupt(irq
);
118 static void sn_set_affinity_irq(unsigned int irq
, cpumask_t mask
)
120 struct sn_irq_info
*sn_irq_info
= sn_irq
[irq
];
121 struct sn_irq_info
*tmp_sn_irq_info
;
123 nasid_t t_nasid
; /* nasid to target */
124 int t_slice
; /* slice to target */
126 /* allocate a temp sn_irq_info struct to get new target info */
127 tmp_sn_irq_info
= kmalloc(sizeof(*tmp_sn_irq_info
), GFP_KERNEL
);
128 if (!tmp_sn_irq_info
)
131 cpuid
= first_cpu(mask
);
132 cpuphys
= cpu_physical_id(cpuid
);
133 t_nasid
= cpuid_to_nasid(cpuid
);
134 t_slice
= cpuid_to_slice(cpuid
);
136 while (sn_irq_info
) {
139 uint64_t bridge
= (uint64_t) sn_irq_info
->irq_bridge
;
140 nasid_t local_nasid
= NASID_GET(bridge
);
143 break; /* irq is not a device interrupt */
146 local_widget
= TIO_SWIN_WIDGETNUM(bridge
);
148 local_widget
= SWIN_WIDGETNUM(bridge
);
150 /* Free the old PROM sn_irq_info structure */
151 sn_intr_free(local_nasid
, local_widget
, sn_irq_info
);
153 /* allocate a new PROM sn_irq_info struct */
154 status
= sn_intr_alloc(local_nasid
, local_widget
,
155 __pa(tmp_sn_irq_info
), irq
, t_nasid
,
159 /* Update kernels sn_irq_info with new target info */
160 unregister_intr_pda(sn_irq_info
);
161 sn_irq_info
->irq_cpuid
= cpuid
;
162 sn_irq_info
->irq_nasid
= t_nasid
;
163 sn_irq_info
->irq_slice
= t_slice
;
164 sn_irq_info
->irq_xtalkaddr
=
165 tmp_sn_irq_info
->irq_xtalkaddr
;
166 sn_irq_info
->irq_cookie
= tmp_sn_irq_info
->irq_cookie
;
167 register_intr_pda(sn_irq_info
);
169 if (IS_PCI_BRIDGE_ASIC(sn_irq_info
->irq_bridge_type
)) {
170 pcibr_change_devices_irq(sn_irq_info
);
173 sn_irq_info
= sn_irq_info
->irq_next
;
176 set_irq_affinity_info((irq
& 0xff), cpuphys
, 0);
179 break; /* snp_affinity failed the intr_alloc */
182 kfree(tmp_sn_irq_info
);
185 struct hw_interrupt_type irq_type_sn
= {
196 unsigned int sn_local_vector_to_irq(u8 vector
)
198 return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector
));
201 void sn_irq_init(void)
204 irq_desc_t
*base_desc
= irq_desc
;
206 for (i
= 0; i
< NR_IRQS
; i
++) {
207 if (base_desc
[i
].handler
== &no_irq_type
) {
208 base_desc
[i
].handler
= &irq_type_sn
;
213 static void register_intr_pda(struct sn_irq_info
*sn_irq_info
)
215 int irq
= sn_irq_info
->irq_irq
;
216 int cpu
= sn_irq_info
->irq_cpuid
;
218 if (pdacpu(cpu
)->sn_last_irq
< irq
) {
219 pdacpu(cpu
)->sn_last_irq
= irq
;
222 if (pdacpu(cpu
)->sn_first_irq
== 0 || pdacpu(cpu
)->sn_first_irq
> irq
) {
223 pdacpu(cpu
)->sn_first_irq
= irq
;
227 static void unregister_intr_pda(struct sn_irq_info
*sn_irq_info
)
229 int irq
= sn_irq_info
->irq_irq
;
230 int cpu
= sn_irq_info
->irq_cpuid
;
231 struct sn_irq_info
*tmp_irq_info
;
234 if (pdacpu(cpu
)->sn_last_irq
== irq
) {
236 for (i
= pdacpu(cpu
)->sn_last_irq
- 1; i
; i
--) {
237 tmp_irq_info
= sn_irq
[i
];
238 while (tmp_irq_info
) {
239 if (tmp_irq_info
->irq_cpuid
== cpu
) {
243 tmp_irq_info
= tmp_irq_info
->irq_next
;
249 pdacpu(cpu
)->sn_last_irq
= i
;
252 if (pdacpu(cpu
)->sn_first_irq
== irq
) {
254 for (i
= pdacpu(cpu
)->sn_first_irq
+ 1; i
< NR_IRQS
; i
++) {
255 tmp_irq_info
= sn_irq
[i
];
256 while (tmp_irq_info
) {
257 if (tmp_irq_info
->irq_cpuid
== cpu
) {
261 tmp_irq_info
= tmp_irq_info
->irq_next
;
267 pdacpu(cpu
)->sn_first_irq
= ((i
== NR_IRQS
) ? 0 : i
);
271 struct sn_irq_info
*sn_irq_alloc(nasid_t local_nasid
, int local_widget
, int irq
,
272 nasid_t nasid
, int slice
)
274 struct sn_irq_info
*sn_irq_info
;
277 sn_irq_info
= kmalloc(sizeof(*sn_irq_info
), GFP_KERNEL
);
278 if (sn_irq_info
== NULL
)
281 memset(sn_irq_info
, 0x0, sizeof(*sn_irq_info
));
284 sn_intr_alloc(local_nasid
, local_widget
, __pa(sn_irq_info
), irq
,
295 void sn_irq_free(struct sn_irq_info
*sn_irq_info
)
297 uint64_t bridge
= (uint64_t) sn_irq_info
->irq_bridge
;
298 nasid_t local_nasid
= NASID_GET(bridge
);
301 if (local_nasid
& 1) /* tio check */
302 local_widget
= TIO_SWIN_WIDGETNUM(bridge
);
304 local_widget
= SWIN_WIDGETNUM(bridge
);
306 sn_intr_free(local_nasid
, local_widget
, sn_irq_info
);
311 void sn_irq_fixup(struct pci_dev
*pci_dev
, struct sn_irq_info
*sn_irq_info
)
313 nasid_t nasid
= sn_irq_info
->irq_nasid
;
314 int slice
= sn_irq_info
->irq_slice
;
315 int cpu
= nasid_slice_to_cpuid(nasid
, slice
);
317 sn_irq_info
->irq_cpuid
= cpu
;
318 sn_irq_info
->irq_pciioinfo
= SN_PCIDEV_INFO(pci_dev
);
320 /* link it into the sn_irq[irq] list */
321 sn_irq_info
->irq_next
= sn_irq
[sn_irq_info
->irq_irq
];
322 sn_irq
[sn_irq_info
->irq_irq
] = sn_irq_info
;
324 (void)register_intr_pda(sn_irq_info
);
327 static void force_interrupt(int irq
)
329 struct sn_irq_info
*sn_irq_info
;
333 sn_irq_info
= sn_irq
[irq
];
334 while (sn_irq_info
) {
335 if (IS_PCI_BRIDGE_ASIC(sn_irq_info
->irq_bridge_type
) &&
336 (sn_irq_info
->irq_bridge
!= NULL
)) {
337 pcibr_force_interrupt(sn_irq_info
);
339 sn_irq_info
= sn_irq_info
->irq_next
;
344 * Check for lost interrupts. If the PIC int_status reg. says that
345 * an interrupt has been sent, but not handled, and the interrupt
346 * is not pending in either the cpu irr regs or in the soft irr regs,
347 * and the interrupt is not in service, then the interrupt may have
348 * been lost. Force an interrupt on that pin. It is possible that
349 * the interrupt is in flight, so we may generate a spurious interrupt,
350 * but we should never miss a real lost interrupt.
352 static void sn_check_intr(int irq
, struct sn_irq_info
*sn_irq_info
)
358 struct pcidev_info
*pcidev_info
;
359 struct pcibus_info
*pcibus_info
;
361 pcidev_info
= (struct pcidev_info
*)sn_irq_info
->irq_pciioinfo
;
366 (struct pcibus_info
*)pcidev_info
->pdi_host_pcidev_info
->
368 regval
= pcireg_intr_status_get(pcibus_info
);
370 irr_reg_num
= irq_to_vector(irq
) / 64;
371 irr_bit
= irq_to_vector(irq
) % 64;
372 switch (irr_reg_num
) {
374 irr_reg
= ia64_getreg(_IA64_REG_CR_IRR0
);
377 irr_reg
= ia64_getreg(_IA64_REG_CR_IRR1
);
380 irr_reg
= ia64_getreg(_IA64_REG_CR_IRR2
);
383 irr_reg
= ia64_getreg(_IA64_REG_CR_IRR3
);
386 if (!test_bit(irr_bit
, &irr_reg
)) {
387 if (!test_bit(irq
, pda
->sn_soft_irr
)) {
388 if (!test_bit(irq
, pda
->sn_in_service_ivecs
)) {
390 if (sn_irq_info
->irq_int_bit
& regval
&
391 sn_irq_info
->irq_last_intr
) {
394 irq_int_bit
& regval
);
395 pcibr_force_interrupt(sn_irq_info
);
400 sn_irq_info
->irq_last_intr
= regval
;
403 void sn_lb_int_war_check(void)
407 if (!sn_ioif_inited
|| pda
->sn_first_irq
== 0)
409 for (i
= pda
->sn_first_irq
; i
<= pda
->sn_last_irq
; i
++) {
410 struct sn_irq_info
*sn_irq_info
= sn_irq
[i
];
411 while (sn_irq_info
) {
412 /* Only call for PCI bridges that are fully initialized. */
413 if (IS_PCI_BRIDGE_ASIC(sn_irq_info
->irq_bridge_type
) &&
414 (sn_irq_info
->irq_bridge
!= NULL
)) {
415 sn_check_intr(i
, sn_irq_info
);
417 sn_irq_info
= sn_irq_info
->irq_next
;