2 * linux/arch/alpha/kernel/sys_marvel.c
7 #include <linux/kernel.h>
8 #include <linux/types.h>
10 #include <linux/sched.h>
11 #include <linux/pci.h>
12 #include <linux/init.h>
13 #include <linux/bitops.h>
15 #include <asm/ptrace.h>
16 #include <asm/system.h>
19 #include <asm/mmu_context.h>
21 #include <asm/pgtable.h>
22 #include <asm/core_marvel.h>
23 #include <asm/hwrpb.h>
24 #include <asm/tlbflush.h>
31 #include "machvec_impl.h"
33 #if NR_IRQS < MARVEL_NR_IRQS
34 # error NR_IRQS < MARVEL_NR_IRQS !!!
42 io7_device_interrupt(unsigned long vector
)
48 * Vector is 0x800 + (interrupt)
50 * where (interrupt) is:
52 * ...16|15 14|13 4|3 0
53 * -----+-----+--------+---
58 * 0x0800 - 0x0ff0 - 0x0800 + (LSI id << 4)
59 * 0x1000 - 0x2ff0 - 0x1000 + (MSI_DAT<8:0> << 4)
62 irq
= ((vector
& 0xffff) - 0x800) >> 4;
64 irq
+= 16; /* offset for legacy */
65 irq
&= MARVEL_IRQ_VEC_IRQ_MASK
; /* not too many bits */
66 irq
|= pid
<< MARVEL_IRQ_VEC_PE_SHIFT
; /* merge the pid */
71 static volatile unsigned long *
72 io7_get_irq_ctl(unsigned int irq
, struct io7
**pio7
)
74 volatile unsigned long *ctl
;
78 pid
= irq
>> MARVEL_IRQ_VEC_PE_SHIFT
;
80 if (!(io7
= marvel_find_io7(pid
))) {
82 "%s for nonexistent io7 -- vec %x, pid %d\n",
83 __FUNCTION__
, irq
, pid
);
87 irq
&= MARVEL_IRQ_VEC_IRQ_MASK
; /* isolate the vector */
88 irq
-= 16; /* subtract legacy bias */
92 "%s for invalid irq -- pid %d adjusted irq %x\n",
93 __FUNCTION__
, pid
, irq
);
97 ctl
= &io7
->csrs
->PO7_LSI_CTL
[irq
& 0xff].csr
; /* assume LSI */
98 if (irq
>= 0x80) /* MSI */
99 ctl
= &io7
->csrs
->PO7_MSI_CTL
[((irq
- 0x80) >> 5) & 0x0f].csr
;
101 if (pio7
) *pio7
= io7
;
106 io7_enable_irq(unsigned int irq
)
108 volatile unsigned long *ctl
;
111 ctl
= io7_get_irq_ctl(irq
, &io7
);
113 printk(KERN_ERR
"%s: get_ctl failed for irq %x\n",
118 spin_lock(&io7
->irq_lock
);
122 spin_unlock(&io7
->irq_lock
);
126 io7_disable_irq(unsigned int irq
)
128 volatile unsigned long *ctl
;
131 ctl
= io7_get_irq_ctl(irq
, &io7
);
133 printk(KERN_ERR
"%s: get_ctl failed for irq %x\n",
138 spin_lock(&io7
->irq_lock
);
139 *ctl
&= ~(1UL << 24);
142 spin_unlock(&io7
->irq_lock
);
146 io7_startup_irq(unsigned int irq
)
149 return 0; /* never anything pending */
153 io7_end_irq(unsigned int irq
)
155 if (!(irq_desc
[irq
].status
& (IRQ_DISABLED
|IRQ_INPROGRESS
)))
160 marvel_irq_noop(unsigned int irq
)
166 marvel_irq_noop_return(unsigned int irq
)
171 static struct hw_interrupt_type marvel_legacy_irq_type
= {
172 .typename
= "LEGACY",
173 .startup
= marvel_irq_noop_return
,
174 .shutdown
= marvel_irq_noop
,
175 .enable
= marvel_irq_noop
,
176 .disable
= marvel_irq_noop
,
177 .ack
= marvel_irq_noop
,
178 .end
= marvel_irq_noop
,
181 static struct hw_interrupt_type io7_lsi_irq_type
= {
183 .startup
= io7_startup_irq
,
184 .shutdown
= io7_disable_irq
,
185 .enable
= io7_enable_irq
,
186 .disable
= io7_disable_irq
,
187 .ack
= io7_disable_irq
,
191 static struct hw_interrupt_type io7_msi_irq_type
= {
193 .startup
= io7_startup_irq
,
194 .shutdown
= io7_disable_irq
,
195 .enable
= io7_enable_irq
,
196 .disable
= io7_disable_irq
,
197 .ack
= marvel_irq_noop
,
202 io7_redirect_irq(struct io7
*io7
,
203 volatile unsigned long *csr
,
209 val
&= ~(0x1ffUL
<< 24); /* clear the target pid */
210 val
|= ((unsigned long)where
<< 24); /* set the new target pid */
218 io7_redirect_one_lsi(struct io7
*io7
, unsigned int which
, unsigned int where
)
223 * LSI_CTL has target PID @ 14
225 val
= io7
->csrs
->PO7_LSI_CTL
[which
].csr
;
226 val
&= ~(0x1ffUL
<< 14); /* clear the target pid */
227 val
|= ((unsigned long)where
<< 14); /* set the new target pid */
229 io7
->csrs
->PO7_LSI_CTL
[which
].csr
= val
;
231 io7
->csrs
->PO7_LSI_CTL
[which
].csr
;
235 io7_redirect_one_msi(struct io7
*io7
, unsigned int which
, unsigned int where
)
240 * MSI_CTL has target PID @ 14
242 val
= io7
->csrs
->PO7_MSI_CTL
[which
].csr
;
243 val
&= ~(0x1ffUL
<< 14); /* clear the target pid */
244 val
|= ((unsigned long)where
<< 14); /* set the new target pid */
246 io7
->csrs
->PO7_MSI_CTL
[which
].csr
= val
;
248 io7
->csrs
->PO7_MSI_CTL
[which
].csr
;
252 init_one_io7_lsi(struct io7
*io7
, unsigned int which
, unsigned int where
)
255 * LSI_CTL has target PID @ 14
257 io7
->csrs
->PO7_LSI_CTL
[which
].csr
= ((unsigned long)where
<< 14);
259 io7
->csrs
->PO7_LSI_CTL
[which
].csr
;
263 init_one_io7_msi(struct io7
*io7
, unsigned int which
, unsigned int where
)
266 * MSI_CTL has target PID @ 14
268 io7
->csrs
->PO7_MSI_CTL
[which
].csr
= ((unsigned long)where
<< 14);
270 io7
->csrs
->PO7_MSI_CTL
[which
].csr
;
274 init_io7_irqs(struct io7
*io7
,
275 struct hw_interrupt_type
*lsi_ops
,
276 struct hw_interrupt_type
*msi_ops
)
278 long base
= (io7
->pe
<< MARVEL_IRQ_VEC_PE_SHIFT
) + 16;
281 printk("Initializing interrupts for IO7 at PE %u - base %lx\n",
285 * Where should interrupts from this IO7 go?
287 * They really should be sent to the local CPU to avoid having to
288 * traverse the mesh, but if it's not an SMP kernel, they have to
289 * go to the boot CPU. Send them all to the boot CPU for now,
290 * as each secondary starts, it can redirect it's local device
293 printk(" Interrupts reported to CPU at PE %u\n", boot_cpuid
);
295 spin_lock(&io7
->irq_lock
);
297 /* set up the error irqs */
298 io7_redirect_irq(io7
, &io7
->csrs
->HLT_CTL
.csr
, boot_cpuid
);
299 io7_redirect_irq(io7
, &io7
->csrs
->HPI_CTL
.csr
, boot_cpuid
);
300 io7_redirect_irq(io7
, &io7
->csrs
->CRD_CTL
.csr
, boot_cpuid
);
301 io7_redirect_irq(io7
, &io7
->csrs
->STV_CTL
.csr
, boot_cpuid
);
302 io7_redirect_irq(io7
, &io7
->csrs
->HEI_CTL
.csr
, boot_cpuid
);
304 /* Set up the lsi irqs. */
305 for (i
= 0; i
< 128; ++i
) {
306 irq_desc
[base
+ i
].status
= IRQ_DISABLED
| IRQ_LEVEL
;
307 irq_desc
[base
+ i
].chip
= lsi_ops
;
310 /* Disable the implemented irqs in hardware. */
311 for (i
= 0; i
< 0x60; ++i
)
312 init_one_io7_lsi(io7
, i
, boot_cpuid
);
314 init_one_io7_lsi(io7
, 0x74, boot_cpuid
);
315 init_one_io7_lsi(io7
, 0x75, boot_cpuid
);
318 /* Set up the msi irqs. */
319 for (i
= 128; i
< (128 + 512); ++i
) {
320 irq_desc
[base
+ i
].status
= IRQ_DISABLED
| IRQ_LEVEL
;
321 irq_desc
[base
+ i
].chip
= msi_ops
;
324 for (i
= 0; i
< 16; ++i
)
325 init_one_io7_msi(io7
, i
, boot_cpuid
);
327 spin_unlock(&io7
->irq_lock
);
331 marvel_init_irq(void)
334 struct io7
*io7
= NULL
;
336 /* Reserve the legacy irqs. */
337 for (i
= 0; i
< 16; ++i
) {
338 irq_desc
[i
].status
= IRQ_DISABLED
;
339 irq_desc
[i
].chip
= &marvel_legacy_irq_type
;
342 /* Init the io7 irqs. */
343 for (io7
= NULL
; (io7
= marvel_next_io7(io7
)) != NULL
; )
344 init_io7_irqs(io7
, &io7_lsi_irq_type
, &io7_msi_irq_type
);
348 marvel_map_irq(struct pci_dev
*dev
, u8 slot
, u8 pin
)
350 struct pci_controller
*hose
= dev
->sysdata
;
351 struct io7_port
*io7_port
= hose
->sysdata
;
352 struct io7
*io7
= io7_port
->io7
;
353 int msi_loc
, msi_data_off
;
359 pci_read_config_byte(dev
, PCI_INTERRUPT_LINE
, &intline
);
362 msi_loc
= pci_find_capability(dev
, PCI_CAP_ID_MSI
);
365 pci_read_config_word(dev
, msi_loc
+ PCI_MSI_FLAGS
, &msg_ctl
);
367 if (msg_ctl
& PCI_MSI_FLAGS_ENABLE
) {
368 msi_data_off
= PCI_MSI_DATA_32
;
369 if (msg_ctl
& PCI_MSI_FLAGS_64BIT
)
370 msi_data_off
= PCI_MSI_DATA_64
;
371 pci_read_config_word(dev
, msi_loc
+ msi_data_off
, &msg_dat
);
373 irq
= msg_dat
& 0x1ff; /* we use msg_data<8:0> */
374 irq
+= 0x80; /* offset for lsi */
377 printk("PCI:%d:%d:%d (hose %d) is using MSI\n",
379 PCI_SLOT(dev
->devfn
),
380 PCI_FUNC(dev
->devfn
),
382 printk(" %d message(s) from 0x%04x\n",
383 1 << ((msg_ctl
& PCI_MSI_FLAGS_QSIZE
) >> 4),
385 printk(" reporting on %d IRQ(s) from %d (0x%x)\n",
386 1 << ((msg_ctl
& PCI_MSI_FLAGS_QSIZE
) >> 4),
387 (irq
+ 16) | (io7
->pe
<< MARVEL_IRQ_VEC_PE_SHIFT
),
388 (irq
+ 16) | (io7
->pe
<< MARVEL_IRQ_VEC_PE_SHIFT
));
392 pci_write_config_word(dev
, msi_loc
+ PCI_MSI_FLAGS
,
393 msg_ctl
& ~PCI_MSI_FLAGS_ENABLE
);
394 pci_read_config_byte(dev
, PCI_INTERRUPT_LINE
, &intline
);
397 printk(" forcing LSI interrupt on irq %d [0x%x]\n", irq
, irq
);
401 irq
+= 16; /* offset for legacy */
402 irq
|= io7
->pe
<< MARVEL_IRQ_VEC_PE_SHIFT
; /* merge the pid */
408 marvel_init_pci(void)
412 marvel_register_error_handlers();
416 locate_and_init_vga(NULL
);
418 /* Clear any io7 errors. */
419 for (io7
= NULL
; (io7
= marvel_next_io7(io7
)) != NULL
; )
420 io7_clear_errors(io7
);
424 marvel_init_rtc(void)
430 marvel_smp_callin(void)
432 int cpuid
= hard_smp_processor_id();
433 struct io7
*io7
= marvel_find_io7(cpuid
);
440 * There is a local IO7 - redirect all of its interrupts here.
442 printk("Redirecting IO7 interrupts to local CPU at PE %u\n", cpuid
);
444 /* Redirect the error IRQS here. */
445 io7_redirect_irq(io7
, &io7
->csrs
->HLT_CTL
.csr
, cpuid
);
446 io7_redirect_irq(io7
, &io7
->csrs
->HPI_CTL
.csr
, cpuid
);
447 io7_redirect_irq(io7
, &io7
->csrs
->CRD_CTL
.csr
, cpuid
);
448 io7_redirect_irq(io7
, &io7
->csrs
->STV_CTL
.csr
, cpuid
);
449 io7_redirect_irq(io7
, &io7
->csrs
->HEI_CTL
.csr
, cpuid
);
451 /* Redirect the implemented LSIs here. */
452 for (i
= 0; i
< 0x60; ++i
)
453 io7_redirect_one_lsi(io7
, i
, cpuid
);
455 io7_redirect_one_lsi(io7
, 0x74, cpuid
);
456 io7_redirect_one_lsi(io7
, 0x75, cpuid
);
458 /* Redirect the MSIs here. */
459 for (i
= 0; i
< 16; ++i
)
460 io7_redirect_one_msi(io7
, i
, cpuid
);
466 struct alpha_machine_vector marvel_ev7_mv __initmv
= {
467 .vector_name
= "MARVEL/EV7",
471 .machine_check
= marvel_machine_check
,
472 .max_isa_dma_address
= ALPHA_MAX_ISA_DMA_ADDRESS
,
473 .min_io_address
= DEFAULT_IO_BASE
,
474 .min_mem_address
= DEFAULT_MEM_BASE
,
475 .pci_dac_offset
= IO7_DAC_OFFSET
,
477 .nr_irqs
= MARVEL_NR_IRQS
,
478 .device_interrupt
= io7_device_interrupt
,
480 .agp_info
= marvel_agp_info
,
482 .smp_callin
= marvel_smp_callin
,
483 .init_arch
= marvel_init_arch
,
484 .init_irq
= marvel_init_irq
,
485 .init_rtc
= marvel_init_rtc
,
486 .init_pci
= marvel_init_pci
,
487 .kill_arch
= marvel_kill_arch
,
488 .pci_map_irq
= marvel_map_irq
,
489 .pci_swizzle
= common_swizzle
,
491 .pa_to_nid
= marvel_pa_to_nid
,
492 .cpuid_to_nid
= marvel_cpuid_to_nid
,
493 .node_mem_start
= marvel_node_mem_start
,
494 .node_mem_size
= marvel_node_mem_size
,