[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / arch / alpha / kernel / core_wildfire.c
blob2b767a1bad962774def0b111553ed67bfd719a80
1 /*
2 * linux/arch/alpha/kernel/core_wildfire.c
4 * Wildfire support.
6 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
7 */
9 #define __EXTERN_INLINE inline
10 #include <asm/io.h>
11 #include <asm/core_wildfire.h>
12 #undef __EXTERN_INLINE
14 #include <linux/types.h>
15 #include <linux/pci.h>
16 #include <linux/sched.h>
17 #include <linux/init.h>
19 #include <asm/ptrace.h>
20 #include <asm/smp.h>
22 #include "proto.h"
23 #include "pci_impl.h"
25 #define DEBUG_CONFIG 0
26 #define DEBUG_DUMP_REGS 0
27 #define DEBUG_DUMP_CONFIG 1
29 #if DEBUG_CONFIG
30 # define DBG_CFG(args) printk args
31 #else
32 # define DBG_CFG(args)
33 #endif
35 #if DEBUG_DUMP_REGS
36 static void wildfire_dump_pci_regs(int qbbno, int hoseno);
37 static void wildfire_dump_pca_regs(int qbbno, int pcano);
38 static void wildfire_dump_qsa_regs(int qbbno);
39 static void wildfire_dump_qsd_regs(int qbbno);
40 static void wildfire_dump_iop_regs(int qbbno);
41 static void wildfire_dump_gp_regs(int qbbno);
42 #endif
43 #if DEBUG_DUMP_CONFIG
44 static void wildfire_dump_hardware_config(void);
45 #endif
47 unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB];
48 unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB];
49 #define QBB_MAP_EMPTY 0xff
51 unsigned long wildfire_hard_qbb_mask;
52 unsigned long wildfire_soft_qbb_mask;
53 unsigned long wildfire_gp_mask;
54 unsigned long wildfire_hs_mask;
55 unsigned long wildfire_iop_mask;
56 unsigned long wildfire_ior_mask;
57 unsigned long wildfire_pca_mask;
58 unsigned long wildfire_cpu_mask;
59 unsigned long wildfire_mem_mask;
61 void __init
62 wildfire_init_hose(int qbbno, int hoseno)
64 struct pci_controller *hose;
65 wildfire_pci *pci;
67 hose = alloc_pci_controller();
68 hose->io_space = alloc_resource();
69 hose->mem_space = alloc_resource();
71 /* This is for userland consumption. */
72 hose->sparse_mem_base = 0;
73 hose->sparse_io_base = 0;
74 hose->dense_mem_base = WILDFIRE_MEM(qbbno, hoseno);
75 hose->dense_io_base = WILDFIRE_IO(qbbno, hoseno);
77 hose->config_space_base = WILDFIRE_CONF(qbbno, hoseno);
78 hose->index = (qbbno << 3) + hoseno;
80 hose->io_space->start = WILDFIRE_IO(qbbno, hoseno) - WILDFIRE_IO_BIAS;
81 hose->io_space->end = hose->io_space->start + WILDFIRE_IO_SPACE - 1;
82 hose->io_space->name = pci_io_names[hoseno];
83 hose->io_space->flags = IORESOURCE_IO;
85 hose->mem_space->start = WILDFIRE_MEM(qbbno, hoseno)-WILDFIRE_MEM_BIAS;
86 hose->mem_space->end = hose->mem_space->start + 0xffffffff;
87 hose->mem_space->name = pci_mem_names[hoseno];
88 hose->mem_space->flags = IORESOURCE_MEM;
90 if (request_resource(&ioport_resource, hose->io_space) < 0)
91 printk(KERN_ERR "Failed to request IO on qbb %d hose %d\n",
92 qbbno, hoseno);
93 if (request_resource(&iomem_resource, hose->mem_space) < 0)
94 printk(KERN_ERR "Failed to request MEM on qbb %d hose %d\n",
95 qbbno, hoseno);
97 #if DEBUG_DUMP_REGS
98 wildfire_dump_pci_regs(qbbno, hoseno);
99 #endif
102 * Set up the PCI to main memory translation windows.
104 * Note: Window 3 is scatter-gather only
106 * Window 0 is scatter-gather 8MB at 8MB (for isa)
107 * Window 1 is direct access 1GB at 1GB
108 * Window 2 is direct access 1GB at 2GB
109 * Window 3 is scatter-gather 128MB at 3GB
110 * ??? We ought to scale window 3 memory.
113 hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
114 hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000, 0);
116 pci = WILDFIRE_pci(qbbno, hoseno);
118 pci->pci_window[0].wbase.csr = hose->sg_isa->dma_base | 3;
119 pci->pci_window[0].wmask.csr = (hose->sg_isa->size - 1) & 0xfff00000;
120 pci->pci_window[0].tbase.csr = virt_to_phys(hose->sg_isa->ptes);
122 pci->pci_window[1].wbase.csr = 0x40000000 | 1;
123 pci->pci_window[1].wmask.csr = (0x40000000 -1) & 0xfff00000;
124 pci->pci_window[1].tbase.csr = 0;
126 pci->pci_window[2].wbase.csr = 0x80000000 | 1;
127 pci->pci_window[2].wmask.csr = (0x40000000 -1) & 0xfff00000;
128 pci->pci_window[2].tbase.csr = 0x40000000;
130 pci->pci_window[3].wbase.csr = hose->sg_pci->dma_base | 3;
131 pci->pci_window[3].wmask.csr = (hose->sg_pci->size - 1) & 0xfff00000;
132 pci->pci_window[3].tbase.csr = virt_to_phys(hose->sg_pci->ptes);
134 wildfire_pci_tbi(hose, 0, 0); /* Flush TLB at the end. */
137 void __init
138 wildfire_init_pca(int qbbno, int pcano)
141 /* Test for PCA existence first. */
142 if (!WILDFIRE_PCA_EXISTS(qbbno, pcano))
143 return;
145 #if DEBUG_DUMP_REGS
146 wildfire_dump_pca_regs(qbbno, pcano);
147 #endif
149 /* Do both hoses of the PCA. */
150 wildfire_init_hose(qbbno, (pcano << 1) + 0);
151 wildfire_init_hose(qbbno, (pcano << 1) + 1);
154 void __init
155 wildfire_init_qbb(int qbbno)
157 int pcano;
159 /* Test for QBB existence first. */
160 if (!WILDFIRE_QBB_EXISTS(qbbno))
161 return;
163 #if DEBUG_DUMP_REGS
164 wildfire_dump_qsa_regs(qbbno);
165 wildfire_dump_qsd_regs(qbbno);
166 wildfire_dump_iop_regs(qbbno);
167 wildfire_dump_gp_regs(qbbno);
168 #endif
170 /* Init all PCAs here. */
171 for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) {
172 wildfire_init_pca(qbbno, pcano);
176 void __init
177 wildfire_hardware_probe(void)
179 unsigned long temp;
180 unsigned int hard_qbb, soft_qbb;
181 wildfire_fast_qsd *fast = WILDFIRE_fast_qsd();
182 wildfire_qsd *qsd;
183 wildfire_qsa *qsa;
184 wildfire_iop *iop;
185 wildfire_gp *gp;
186 wildfire_ne *ne;
187 wildfire_fe *fe;
188 int i;
190 temp = fast->qsd_whami.csr;
191 #if 0
192 printk(KERN_ERR "fast QSD_WHAMI at base %p is 0x%lx\n", fast, temp);
193 #endif
195 hard_qbb = (temp >> 8) & 7;
196 soft_qbb = (temp >> 4) & 7;
198 /* Init the HW configuration variables. */
199 wildfire_hard_qbb_mask = (1 << hard_qbb);
200 wildfire_soft_qbb_mask = (1 << soft_qbb);
202 wildfire_gp_mask = 0;
203 wildfire_hs_mask = 0;
204 wildfire_iop_mask = 0;
205 wildfire_ior_mask = 0;
206 wildfire_pca_mask = 0;
208 wildfire_cpu_mask = 0;
209 wildfire_mem_mask = 0;
211 memset(wildfire_hard_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB);
212 memset(wildfire_soft_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB);
214 /* First, determine which QBBs are present. */
215 qsa = WILDFIRE_qsa(soft_qbb);
217 temp = qsa->qsa_qbb_id.csr;
218 #if 0
219 printk(KERN_ERR "QSA_QBB_ID at base %p is 0x%lx\n", qsa, temp);
220 #endif
222 if (temp & 0x40) /* Is there an HS? */
223 wildfire_hs_mask = 1;
225 if (temp & 0x20) { /* Is there a GP? */
226 gp = WILDFIRE_gp(soft_qbb);
227 temp = 0;
228 for (i = 0; i < 4; i++) {
229 temp |= gp->gpa_qbb_map[i].csr << (i * 8);
230 #if 0
231 printk(KERN_ERR "GPA_QBB_MAP[%d] at base %p is 0x%lx\n",
232 i, gp, temp);
233 #endif
236 for (hard_qbb = 0; hard_qbb < WILDFIRE_MAX_QBB; hard_qbb++) {
237 if (temp & 8) { /* Is there a QBB? */
238 soft_qbb = temp & 7;
239 wildfire_hard_qbb_mask |= (1 << hard_qbb);
240 wildfire_soft_qbb_mask |= (1 << soft_qbb);
242 temp >>= 4;
244 wildfire_gp_mask = wildfire_soft_qbb_mask;
247 /* Next determine each QBBs resources. */
248 for (soft_qbb = 0; soft_qbb < WILDFIRE_MAX_QBB; soft_qbb++) {
249 if (WILDFIRE_QBB_EXISTS(soft_qbb)) {
250 qsd = WILDFIRE_qsd(soft_qbb);
251 temp = qsd->qsd_whami.csr;
252 #if 0
253 printk(KERN_ERR "QSD_WHAMI at base %p is 0x%lx\n", qsd, temp);
254 #endif
255 hard_qbb = (temp >> 8) & 7;
256 wildfire_hard_qbb_map[hard_qbb] = soft_qbb;
257 wildfire_soft_qbb_map[soft_qbb] = hard_qbb;
259 qsa = WILDFIRE_qsa(soft_qbb);
260 temp = qsa->qsa_qbb_pop[0].csr;
261 #if 0
262 printk(KERN_ERR "QSA_QBB_POP_0 at base %p is 0x%lx\n", qsa, temp);
263 #endif
264 wildfire_cpu_mask |= ((temp >> 0) & 0xf) << (soft_qbb << 2);
265 wildfire_mem_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2);
267 temp = qsa->qsa_qbb_pop[1].csr;
268 #if 0
269 printk(KERN_ERR "QSA_QBB_POP_1 at base %p is 0x%lx\n", qsa, temp);
270 #endif
271 wildfire_iop_mask |= (1 << soft_qbb);
272 wildfire_ior_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2);
274 temp = qsa->qsa_qbb_id.csr;
275 #if 0
276 printk(KERN_ERR "QSA_QBB_ID at %p is 0x%lx\n", qsa, temp);
277 #endif
278 if (temp & 0x20)
279 wildfire_gp_mask |= (1 << soft_qbb);
281 /* Probe for PCA existence here. */
282 for (i = 0; i < WILDFIRE_PCA_PER_QBB; i++) {
283 iop = WILDFIRE_iop(soft_qbb);
284 ne = WILDFIRE_ne(soft_qbb, i);
285 fe = WILDFIRE_fe(soft_qbb, i);
287 if ((iop->iop_hose[i].init.csr & 1) == 1 &&
288 ((ne->ne_what_am_i.csr & 0xf00000300UL) == 0x100000300UL) &&
289 ((fe->fe_what_am_i.csr & 0xf00000300UL) == 0x100000200UL))
291 wildfire_pca_mask |= 1 << ((soft_qbb << 2) + i);
297 #if DEBUG_DUMP_CONFIG
298 wildfire_dump_hardware_config();
299 #endif
302 void __init
303 wildfire_init_arch(void)
305 int qbbno;
307 /* With multiple PCI buses, we play with I/O as physical addrs. */
308 ioport_resource.end = ~0UL;
311 /* Probe the hardware for info about configuration. */
312 wildfire_hardware_probe();
314 /* Now init all the found QBBs. */
315 for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) {
316 wildfire_init_qbb(qbbno);
319 /* Normal direct PCI DMA mapping. */
320 __direct_map_base = 0x40000000UL;
321 __direct_map_size = 0x80000000UL;
324 void
325 wildfire_machine_check(unsigned long vector, unsigned long la_ptr,
326 struct pt_regs * regs)
328 mb();
329 mb(); /* magic */
330 draina();
331 /* FIXME: clear pci errors */
332 wrmces(0x7);
333 mb();
335 process_mcheck_info(vector, la_ptr, regs, "WILDFIRE",
336 mcheck_expected(smp_processor_id()));
339 void
340 wildfire_kill_arch(int mode)
344 void
345 wildfire_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
347 int qbbno = hose->index >> 3;
348 int hoseno = hose->index & 7;
349 wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno);
351 mb();
352 pci->pci_flush_tlb.csr; /* reading does the trick */
355 static int
356 mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
357 unsigned long *pci_addr, unsigned char *type1)
359 struct pci_controller *hose = pbus->sysdata;
360 unsigned long addr;
361 u8 bus = pbus->number;
363 DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
364 "pci_addr=0x%p, type1=0x%p)\n",
365 bus, device_fn, where, pci_addr, type1));
367 if (!pbus->parent) /* No parent means peer PCI bus. */
368 bus = 0;
369 *type1 = (bus != 0);
371 addr = (bus << 16) | (device_fn << 8) | where;
372 addr |= hose->config_space_base;
374 *pci_addr = addr;
375 DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
376 return 0;
379 static int
380 wildfire_read_config(struct pci_bus *bus, unsigned int devfn, int where,
381 int size, u32 *value)
383 unsigned long addr;
384 unsigned char type1;
386 if (mk_conf_addr(bus, devfn, where, &addr, &type1))
387 return PCIBIOS_DEVICE_NOT_FOUND;
389 switch (size) {
390 case 1:
391 *value = __kernel_ldbu(*(vucp)addr);
392 break;
393 case 2:
394 *value = __kernel_ldwu(*(vusp)addr);
395 break;
396 case 4:
397 *value = *(vuip)addr;
398 break;
401 return PCIBIOS_SUCCESSFUL;
404 static int
405 wildfire_write_config(struct pci_bus *bus, unsigned int devfn, int where,
406 int size, u32 value)
408 unsigned long addr;
409 unsigned char type1;
411 if (mk_conf_addr(bus, devfn, where, &addr, &type1))
412 return PCIBIOS_DEVICE_NOT_FOUND;
414 switch (size) {
415 case 1:
416 __kernel_stb(value, *(vucp)addr);
417 mb();
418 __kernel_ldbu(*(vucp)addr);
419 break;
420 case 2:
421 __kernel_stw(value, *(vusp)addr);
422 mb();
423 __kernel_ldwu(*(vusp)addr);
424 break;
425 case 4:
426 *(vuip)addr = value;
427 mb();
428 *(vuip)addr;
429 break;
432 return PCIBIOS_SUCCESSFUL;
435 struct pci_ops wildfire_pci_ops =
437 .read = wildfire_read_config,
438 .write = wildfire_write_config,
443 * NUMA Support
445 int wildfire_pa_to_nid(unsigned long pa)
447 return pa >> 36;
450 int wildfire_cpuid_to_nid(int cpuid)
452 /* assume 4 CPUs per node */
453 return cpuid >> 2;
456 unsigned long wildfire_node_mem_start(int nid)
458 /* 64GB per node */
459 return (unsigned long)nid * (64UL * 1024 * 1024 * 1024);
462 unsigned long wildfire_node_mem_size(int nid)
464 /* 64GB per node */
465 return 64UL * 1024 * 1024 * 1024;
468 #if DEBUG_DUMP_REGS
470 static void __init
471 wildfire_dump_pci_regs(int qbbno, int hoseno)
473 wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno);
474 int i;
476 printk(KERN_ERR "PCI registers for QBB %d hose %d (%p)\n",
477 qbbno, hoseno, pci);
479 printk(KERN_ERR " PCI_IO_ADDR_EXT: 0x%16lx\n",
480 pci->pci_io_addr_ext.csr);
481 printk(KERN_ERR " PCI_CTRL: 0x%16lx\n", pci->pci_ctrl.csr);
482 printk(KERN_ERR " PCI_ERR_SUM: 0x%16lx\n", pci->pci_err_sum.csr);
483 printk(KERN_ERR " PCI_ERR_ADDR: 0x%16lx\n", pci->pci_err_addr.csr);
484 printk(KERN_ERR " PCI_STALL_CNT: 0x%16lx\n", pci->pci_stall_cnt.csr);
485 printk(KERN_ERR " PCI_PEND_INT: 0x%16lx\n", pci->pci_pend_int.csr);
486 printk(KERN_ERR " PCI_SENT_INT: 0x%16lx\n", pci->pci_sent_int.csr);
488 printk(KERN_ERR " DMA window registers for QBB %d hose %d (%p)\n",
489 qbbno, hoseno, pci);
490 for (i = 0; i < 4; i++) {
491 printk(KERN_ERR " window %d: 0x%16lx 0x%16lx 0x%16lx\n", i,
492 pci->pci_window[i].wbase.csr,
493 pci->pci_window[i].wmask.csr,
494 pci->pci_window[i].tbase.csr);
496 printk(KERN_ERR "\n");
499 static void __init
500 wildfire_dump_pca_regs(int qbbno, int pcano)
502 wildfire_pca *pca = WILDFIRE_pca(qbbno, pcano);
503 int i;
505 printk(KERN_ERR "PCA registers for QBB %d PCA %d (%p)\n",
506 qbbno, pcano, pca);
508 printk(KERN_ERR " PCA_WHAT_AM_I: 0x%16lx\n", pca->pca_what_am_i.csr);
509 printk(KERN_ERR " PCA_ERR_SUM: 0x%16lx\n", pca->pca_err_sum.csr);
510 printk(KERN_ERR " PCA_PEND_INT: 0x%16lx\n", pca->pca_pend_int.csr);
511 printk(KERN_ERR " PCA_SENT_INT: 0x%16lx\n", pca->pca_sent_int.csr);
512 printk(KERN_ERR " PCA_STDIO_EL: 0x%16lx\n",
513 pca->pca_stdio_edge_level.csr);
515 printk(KERN_ERR " PCA target registers for QBB %d PCA %d (%p)\n",
516 qbbno, pcano, pca);
517 for (i = 0; i < 4; i++) {
518 printk(KERN_ERR " target %d: 0x%16lx 0x%16lx\n", i,
519 pca->pca_int[i].target.csr,
520 pca->pca_int[i].enable.csr);
523 printk(KERN_ERR "\n");
526 static void __init
527 wildfire_dump_qsa_regs(int qbbno)
529 wildfire_qsa *qsa = WILDFIRE_qsa(qbbno);
530 int i;
532 printk(KERN_ERR "QSA registers for QBB %d (%p)\n", qbbno, qsa);
534 printk(KERN_ERR " QSA_QBB_ID: 0x%16lx\n", qsa->qsa_qbb_id.csr);
535 printk(KERN_ERR " QSA_PORT_ENA: 0x%16lx\n", qsa->qsa_port_ena.csr);
536 printk(KERN_ERR " QSA_REF_INT: 0x%16lx\n", qsa->qsa_ref_int.csr);
538 for (i = 0; i < 5; i++)
539 printk(KERN_ERR " QSA_CONFIG_%d: 0x%16lx\n",
540 i, qsa->qsa_config[i].csr);
542 for (i = 0; i < 2; i++)
543 printk(KERN_ERR " QSA_QBB_POP_%d: 0x%16lx\n",
544 i, qsa->qsa_qbb_pop[0].csr);
546 printk(KERN_ERR "\n");
549 static void __init
550 wildfire_dump_qsd_regs(int qbbno)
552 wildfire_qsd *qsd = WILDFIRE_qsd(qbbno);
554 printk(KERN_ERR "QSD registers for QBB %d (%p)\n", qbbno, qsd);
556 printk(KERN_ERR " QSD_WHAMI: 0x%16lx\n", qsd->qsd_whami.csr);
557 printk(KERN_ERR " QSD_REV: 0x%16lx\n", qsd->qsd_rev.csr);
558 printk(KERN_ERR " QSD_PORT_PRESENT: 0x%16lx\n",
559 qsd->qsd_port_present.csr);
560 printk(KERN_ERR " QSD_PORT_ACTUVE: 0x%16lx\n",
561 qsd->qsd_port_active.csr);
562 printk(KERN_ERR " QSD_FAULT_ENA: 0x%16lx\n",
563 qsd->qsd_fault_ena.csr);
564 printk(KERN_ERR " QSD_CPU_INT_ENA: 0x%16lx\n",
565 qsd->qsd_cpu_int_ena.csr);
566 printk(KERN_ERR " QSD_MEM_CONFIG: 0x%16lx\n",
567 qsd->qsd_mem_config.csr);
568 printk(KERN_ERR " QSD_ERR_SUM: 0x%16lx\n",
569 qsd->qsd_err_sum.csr);
571 printk(KERN_ERR "\n");
574 static void __init
575 wildfire_dump_iop_regs(int qbbno)
577 wildfire_iop *iop = WILDFIRE_iop(qbbno);
578 int i;
580 printk(KERN_ERR "IOP registers for QBB %d (%p)\n", qbbno, iop);
582 printk(KERN_ERR " IOA_CONFIG: 0x%16lx\n", iop->ioa_config.csr);
583 printk(KERN_ERR " IOD_CONFIG: 0x%16lx\n", iop->iod_config.csr);
584 printk(KERN_ERR " IOP_SWITCH_CREDITS: 0x%16lx\n",
585 iop->iop_switch_credits.csr);
586 printk(KERN_ERR " IOP_HOSE_CREDITS: 0x%16lx\n",
587 iop->iop_hose_credits.csr);
589 for (i = 0; i < 4; i++)
590 printk(KERN_ERR " IOP_HOSE_%d_INIT: 0x%16lx\n",
591 i, iop->iop_hose[i].init.csr);
592 for (i = 0; i < 4; i++)
593 printk(KERN_ERR " IOP_DEV_INT_TARGET_%d: 0x%16lx\n",
594 i, iop->iop_dev_int[i].target.csr);
596 printk(KERN_ERR "\n");
599 static void __init
600 wildfire_dump_gp_regs(int qbbno)
602 wildfire_gp *gp = WILDFIRE_gp(qbbno);
603 int i;
605 printk(KERN_ERR "GP registers for QBB %d (%p)\n", qbbno, gp);
606 for (i = 0; i < 4; i++)
607 printk(KERN_ERR " GPA_QBB_MAP_%d: 0x%16lx\n",
608 i, gp->gpa_qbb_map[i].csr);
610 printk(KERN_ERR " GPA_MEM_POP_MAP: 0x%16lx\n",
611 gp->gpa_mem_pop_map.csr);
612 printk(KERN_ERR " GPA_SCRATCH: 0x%16lx\n", gp->gpa_scratch.csr);
613 printk(KERN_ERR " GPA_DIAG: 0x%16lx\n", gp->gpa_diag.csr);
614 printk(KERN_ERR " GPA_CONFIG_0: 0x%16lx\n", gp->gpa_config_0.csr);
615 printk(KERN_ERR " GPA_INIT_ID: 0x%16lx\n", gp->gpa_init_id.csr);
616 printk(KERN_ERR " GPA_CONFIG_2: 0x%16lx\n", gp->gpa_config_2.csr);
618 printk(KERN_ERR "\n");
620 #endif /* DUMP_REGS */
622 #if DEBUG_DUMP_CONFIG
623 static void __init
624 wildfire_dump_hardware_config(void)
626 int i;
628 printk(KERN_ERR "Probed Hardware Configuration\n");
630 printk(KERN_ERR " hard_qbb_mask: 0x%16lx\n", wildfire_hard_qbb_mask);
631 printk(KERN_ERR " soft_qbb_mask: 0x%16lx\n", wildfire_soft_qbb_mask);
633 printk(KERN_ERR " gp_mask: 0x%16lx\n", wildfire_gp_mask);
634 printk(KERN_ERR " hs_mask: 0x%16lx\n", wildfire_hs_mask);
635 printk(KERN_ERR " iop_mask: 0x%16lx\n", wildfire_iop_mask);
636 printk(KERN_ERR " ior_mask: 0x%16lx\n", wildfire_ior_mask);
637 printk(KERN_ERR " pca_mask: 0x%16lx\n", wildfire_pca_mask);
639 printk(KERN_ERR " cpu_mask: 0x%16lx\n", wildfire_cpu_mask);
640 printk(KERN_ERR " mem_mask: 0x%16lx\n", wildfire_mem_mask);
642 printk(" hard_qbb_map: ");
643 for (i = 0; i < WILDFIRE_MAX_QBB; i++)
644 if (wildfire_hard_qbb_map[i] == QBB_MAP_EMPTY)
645 printk("--- ");
646 else
647 printk("%3d ", wildfire_hard_qbb_map[i]);
648 printk("\n");
650 printk(" soft_qbb_map: ");
651 for (i = 0; i < WILDFIRE_MAX_QBB; i++)
652 if (wildfire_soft_qbb_map[i] == QBB_MAP_EMPTY)
653 printk("--- ");
654 else
655 printk("%3d ", wildfire_soft_qbb_map[i]);
656 printk("\n");
658 #endif /* DUMP_CONFIG */