2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/delay.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
22 #include <linux/netdevice.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
34 #define BCM47162_DMP() ((sih->chip == BCM47162_CHIP_ID) && \
35 (sih->chiprev == 0) && \
36 (sii->coreid[sii->curidx] == MIPS74K_CORE_ID))
41 get_erom_ent(si_t
*sih
, u32
**eromptr
, u32 mask
, u32 match
)
44 uint inv
= 0, nom
= 0;
47 ent
= R_REG(si_osh(sih
), *eromptr
);
53 if ((ent
& ER_VALID
) == 0) {
58 if (ent
== (ER_END
| ER_VALID
))
61 if ((ent
& mask
) == match
)
67 SI_VMSG(("%s: Returning ent 0x%08x\n", __func__
, ent
));
69 SI_VMSG((" after %d invalid and %d non-matching entries\n",
76 get_asd(si_t
*sih
, u32
**eromptr
, uint sp
, uint ad
, uint st
,
77 u32
*addrl
, u32
*addrh
, u32
*sizel
, u32
*sizeh
)
81 asd
= get_erom_ent(sih
, eromptr
, ER_VALID
, ER_VALID
);
82 if (((asd
& ER_TAG1
) != ER_ADD
) ||
83 (((asd
& AD_SP_MASK
) >> AD_SP_SHIFT
) != sp
) ||
84 ((asd
& AD_ST_MASK
) != st
)) {
85 /* This is not what we want, "push" it back */
89 *addrl
= asd
& AD_ADDR_MASK
;
91 *addrh
= get_erom_ent(sih
, eromptr
, 0, 0);
95 sz
= asd
& AD_SZ_MASK
;
96 if (sz
== AD_SZ_SZD
) {
97 szd
= get_erom_ent(sih
, eromptr
, 0, 0);
98 *sizel
= szd
& SD_SZ_MASK
;
100 *sizeh
= get_erom_ent(sih
, eromptr
, 0, 0);
102 *sizel
= AD_SZ_BASE
<< (sz
>> AD_SZ_SHIFT
);
104 SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
105 sp
, ad
, st
, *sizeh
, *sizel
, *addrh
, *addrl
));
110 static void ai_hwfixup(si_info_t
*sii
)
114 /* parse the enumeration rom to identify all cores */
115 void ai_scan(si_t
*sih
, void *regs
, uint devid
)
117 si_info_t
*sii
= SI_INFO(sih
);
118 chipcregs_t
*cc
= (chipcregs_t
*) regs
;
119 u32 erombase
, *eromptr
, *eromlim
;
121 erombase
= R_REG(sii
->osh
, &cc
->eromptr
);
123 switch (sih
->bustype
) {
125 eromptr
= (u32
*) REG_MAP(erombase
, SI_CORE_SIZE
);
129 /* Set wrappers address */
130 sii
->curwrap
= (void *)((unsigned long)regs
+ SI_CORE_SIZE
);
132 /* Now point the window at the erom */
133 pci_write_config_dword(sii
->osh
->pdev
, PCI_BAR0_WIN
, erombase
);
141 eromptr
= (u32
*)(unsigned long)erombase
;
145 SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n",
150 eromlim
= eromptr
+ (ER_REMAPCONTROL
/ sizeof(u32
));
152 SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", regs
, erombase
, eromptr
, eromlim
));
153 while (eromptr
< eromlim
) {
154 u32 cia
, cib
, cid
, mfg
, crev
, nmw
, nsw
, nmp
, nsp
;
155 u32 mpd
, asd
, addrl
, addrh
, sizel
, sizeh
;
162 /* Grok a component */
163 cia
= get_erom_ent(sih
, &eromptr
, ER_TAG
, ER_CI
);
164 if (cia
== (ER_END
| ER_VALID
)) {
165 SI_VMSG(("Found END of erom after %d cores\n",
171 cib
= get_erom_ent(sih
, &eromptr
, 0, 0);
173 if ((cib
& ER_TAG
) != ER_CI
) {
174 SI_ERROR(("CIA not followed by CIB\n"));
178 cid
= (cia
& CIA_CID_MASK
) >> CIA_CID_SHIFT
;
179 mfg
= (cia
& CIA_MFG_MASK
) >> CIA_MFG_SHIFT
;
180 crev
= (cib
& CIB_REV_MASK
) >> CIB_REV_SHIFT
;
181 nmw
= (cib
& CIB_NMW_MASK
) >> CIB_NMW_SHIFT
;
182 nsw
= (cib
& CIB_NSW_MASK
) >> CIB_NSW_SHIFT
;
183 nmp
= (cib
& CIB_NMP_MASK
) >> CIB_NMP_SHIFT
;
184 nsp
= (cib
& CIB_NSP_MASK
) >> CIB_NSP_SHIFT
;
186 SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " "nsw = %d, nmp = %d & nsp = %d\n", mfg
, cid
, crev
, base
, nmw
, nsw
, nmp
, nsp
));
188 if (((mfg
== MFGID_ARM
) && (cid
== DEF_AI_COMP
)) || (nsp
== 0))
190 if ((nmw
+ nsw
== 0)) {
191 /* A component which is not a core */
192 if (cid
== OOB_ROUTER_CORE_ID
) {
193 asd
= get_asd(sih
, &eromptr
, 0, 0, AD_ST_SLAVE
,
194 &addrl
, &addrh
, &sizel
, &sizeh
);
196 sii
->oob_router
= addrl
;
203 /* sii->eromptr[idx] = base; */
206 sii
->coreid
[idx
] = cid
;
208 for (i
= 0; i
< nmp
; i
++) {
209 mpd
= get_erom_ent(sih
, &eromptr
, ER_VALID
, ER_VALID
);
210 if ((mpd
& ER_TAG
) != ER_MP
) {
211 SI_ERROR(("Not enough MP entries for component 0x%x\n", cid
));
214 SI_VMSG((" Master port %d, mp: %d id: %d\n", i
,
215 (mpd
& MPD_MP_MASK
) >> MPD_MP_SHIFT
,
216 (mpd
& MPD_MUI_MASK
) >> MPD_MUI_SHIFT
));
219 /* First Slave Address Descriptor should be port 0:
220 * the main register space for the core
223 get_asd(sih
, &eromptr
, 0, 0, AD_ST_SLAVE
, &addrl
, &addrh
,
226 /* Try again to see if it is a bridge */
228 get_asd(sih
, &eromptr
, 0, 0, AD_ST_BRIDGE
, &addrl
,
229 &addrh
, &sizel
, &sizeh
);
232 else if ((addrh
!= 0) || (sizeh
!= 0)
233 || (sizel
!= SI_CORE_SIZE
)) {
234 SI_ERROR(("First Slave ASD for core 0x%04x malformed " "(0x%08x)\n", cid
, asd
));
238 sii
->coresba
[idx
] = addrl
;
239 sii
->coresba_size
[idx
] = sizel
;
240 /* Get any more ASDs in port 0 */
244 get_asd(sih
, &eromptr
, 0, j
, AD_ST_SLAVE
, &addrl
,
245 &addrh
, &sizel
, &sizeh
);
246 if ((asd
!= 0) && (j
== 1) && (sizel
== SI_CORE_SIZE
)) {
247 sii
->coresba2
[idx
] = addrl
;
248 sii
->coresba2_size
[idx
] = sizel
;
253 /* Go through the ASDs for other slave ports */
254 for (i
= 1; i
< nsp
; i
++) {
258 get_asd(sih
, &eromptr
, i
, j
++, AD_ST_SLAVE
,
259 &addrl
, &addrh
, &sizel
, &sizeh
);
262 SI_ERROR((" SP %d has no address descriptors\n",
268 /* Now get master wrappers */
269 for (i
= 0; i
< nmw
; i
++) {
271 get_asd(sih
, &eromptr
, i
, 0, AD_ST_MWRAP
, &addrl
,
272 &addrh
, &sizel
, &sizeh
);
274 SI_ERROR(("Missing descriptor for MW %d\n", i
));
277 if ((sizeh
!= 0) || (sizel
!= SI_CORE_SIZE
)) {
278 SI_ERROR(("Master wrapper %d is not 4KB\n", i
));
282 sii
->wrapba
[idx
] = addrl
;
285 /* And finally slave wrappers */
286 for (i
= 0; i
< nsw
; i
++) {
287 uint fwp
= (nsp
== 1) ? 0 : 1;
289 get_asd(sih
, &eromptr
, fwp
+ i
, 0, AD_ST_SWRAP
,
290 &addrl
, &addrh
, &sizel
, &sizeh
);
292 SI_ERROR(("Missing descriptor for SW %d\n", i
));
295 if ((sizeh
!= 0) || (sizel
!= SI_CORE_SIZE
)) {
296 SI_ERROR(("Slave wrapper %d is not 4KB\n", i
));
299 if ((nmw
== 0) && (i
== 0))
300 sii
->wrapba
[idx
] = addrl
;
303 /* Don't record bridges */
311 SI_ERROR(("Reached end of erom without finding END"));
318 /* This function changes the logical "focus" to the indicated core.
319 * Return the current core's virtual address.
321 void *ai_setcoreidx(si_t
*sih
, uint coreidx
)
323 si_info_t
*sii
= SI_INFO(sih
);
324 u32 addr
= sii
->coresba
[coreidx
];
325 u32 wrap
= sii
->wrapba
[coreidx
];
328 if (coreidx
>= sii
->numcores
)
332 * If the user has provided an interrupt mask enabled function,
333 * then assert interrupts are disabled before switching the core.
335 ASSERT((sii
->intrsenabled_fn
== NULL
)
336 || !(*(sii
)->intrsenabled_fn
) ((sii
)->intr_arg
));
338 switch (sih
->bustype
) {
341 if (!sii
->regs
[coreidx
]) {
342 sii
->regs
[coreidx
] = REG_MAP(addr
, SI_CORE_SIZE
);
343 ASSERT(GOODREGS(sii
->regs
[coreidx
]));
345 sii
->curmap
= regs
= sii
->regs
[coreidx
];
346 if (!sii
->wrappers
[coreidx
]) {
347 sii
->wrappers
[coreidx
] = REG_MAP(wrap
, SI_CORE_SIZE
);
348 ASSERT(GOODREGS(sii
->wrappers
[coreidx
]));
350 sii
->curwrap
= sii
->wrappers
[coreidx
];
354 /* point bar0 window */
355 pci_write_config_dword(sii
->osh
->pdev
, PCI_BAR0_WIN
, addr
);
357 /* point bar0 2nd 4KB window */
358 pci_write_config_dword(sii
->osh
->pdev
, PCI_BAR0_WIN2
, wrap
);
365 sii
->curmap
= regs
= (void *)(unsigned long)addr
;
366 sii
->curwrap
= (void *)(unsigned long)wrap
;
376 sii
->curidx
= coreidx
;
381 /* Return the number of address spaces in current core */
382 int ai_numaddrspaces(si_t
*sih
)
387 /* Return the address of the nth address space in the current core */
388 u32
ai_addrspace(si_t
*sih
, uint asidx
)
397 return sii
->coresba
[cidx
];
399 return sii
->coresba2
[cidx
];
401 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__
, asidx
));
406 /* Return the size of the nth address space in the current core */
407 u32
ai_addrspacesize(si_t
*sih
, uint asidx
)
416 return sii
->coresba_size
[cidx
];
418 return sii
->coresba2_size
[cidx
];
420 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__
, asidx
));
425 uint
ai_flag(si_t
*sih
)
431 if (BCM47162_DMP()) {
432 SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __func__
));
437 return R_REG(sii
->osh
, &ai
->oobselouta30
) & 0x1f;
440 void ai_setint(si_t
*sih
, int siflag
)
444 void ai_write_wrap_reg(si_t
*sih
, u32 offset
, u32 val
)
446 si_info_t
*sii
= SI_INFO(sih
);
447 u32
*w
= (u32
*) sii
->curwrap
;
448 W_REG(sii
->osh
, w
+ (offset
/ 4), val
);
452 uint
ai_corevendor(si_t
*sih
)
458 cia
= sii
->cia
[sii
->curidx
];
459 return (cia
& CIA_MFG_MASK
) >> CIA_MFG_SHIFT
;
462 uint
ai_corerev(si_t
*sih
)
468 cib
= sii
->cib
[sii
->curidx
];
469 return (cib
& CIB_REV_MASK
) >> CIB_REV_SHIFT
;
472 bool ai_iscoreup(si_t
*sih
)
480 return (((R_REG(sii
->osh
, &ai
->ioctrl
) & (SICF_FGC
| SICF_CLOCK_EN
)) ==
482 && ((R_REG(sii
->osh
, &ai
->resetctrl
) & AIRC_RESET
) == 0));
486 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
487 * switch back to the original core, and return the new value.
489 * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
491 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
492 * and (on newer pci cores) chipcommon registers.
494 uint
ai_corereg(si_t
*sih
, uint coreidx
, uint regoff
, uint mask
, uint val
)
505 ASSERT(GOODIDX(coreidx
));
506 ASSERT(regoff
< SI_CORE_SIZE
);
507 ASSERT((val
& ~mask
) == 0);
509 if (coreidx
>= SI_MAXCORES
)
512 if (sih
->bustype
== SI_BUS
) {
513 /* If internal bus, we can always get at everything */
515 /* map if does not exist */
516 if (!sii
->regs
[coreidx
]) {
517 sii
->regs
[coreidx
] = REG_MAP(sii
->coresba
[coreidx
],
519 ASSERT(GOODREGS(sii
->regs
[coreidx
]));
521 r
= (u32
*) ((unsigned char *) sii
->regs
[coreidx
] + regoff
);
522 } else if (sih
->bustype
== PCI_BUS
) {
523 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
525 if ((sii
->coreid
[coreidx
] == CC_CORE_ID
) && SI_FAST(sii
)) {
526 /* Chipc registers are mapped at 12KB */
529 r
= (u32
*) ((char *)sii
->curmap
+
530 PCI_16KB0_CCREGS_OFFSET
+ regoff
);
531 } else if (sii
->pub
.buscoreidx
== coreidx
) {
532 /* pci registers are at either in the last 2KB of an 8KB window
533 * or, in pcie and pci rev 13 at 8KB
537 r
= (u32
*) ((char *)sii
->curmap
+
538 PCI_16KB0_PCIREGS_OFFSET
+
541 r
= (u32
*) ((char *)sii
->curmap
+
542 ((regoff
>= SBCONFIGOFF
) ?
543 PCI_BAR0_PCISBR_OFFSET
:
544 PCI_BAR0_PCIREGS_OFFSET
) +
550 INTR_OFF(sii
, intr_val
);
552 /* save current core index */
553 origidx
= si_coreidx(&sii
->pub
);
556 r
= (u32
*) ((unsigned char *) ai_setcoreidx(&sii
->pub
, coreidx
) +
563 w
= (R_REG(sii
->osh
, r
) & ~mask
) | val
;
564 W_REG(sii
->osh
, r
, w
);
568 w
= R_REG(sii
->osh
, r
);
571 /* restore core index */
572 if (origidx
!= coreidx
)
573 ai_setcoreidx(&sii
->pub
, origidx
);
575 INTR_RESTORE(sii
, intr_val
);
581 void ai_core_disable(si_t
*sih
, u32 bits
)
589 ASSERT(GOODREGS(sii
->curwrap
));
592 /* if core is already in reset, just return */
593 if (R_REG(sii
->osh
, &ai
->resetctrl
) & AIRC_RESET
)
596 W_REG(sii
->osh
, &ai
->ioctrl
, bits
);
597 dummy
= R_REG(sii
->osh
, &ai
->ioctrl
);
600 W_REG(sii
->osh
, &ai
->resetctrl
, AIRC_RESET
);
604 /* reset and re-enable a core
606 * bits - core specific bits that are set during and after reset sequence
607 * resetbits - core specific bits that are set only during reset sequence
609 void ai_core_reset(si_t
*sih
, u32 bits
, u32 resetbits
)
616 ASSERT(GOODREGS(sii
->curwrap
));
620 * Must do the disable sequence first to work for arbitrary current core state.
622 ai_core_disable(sih
, (bits
| resetbits
));
625 * Now do the initialization sequence.
627 W_REG(sii
->osh
, &ai
->ioctrl
, (bits
| SICF_FGC
| SICF_CLOCK_EN
));
628 dummy
= R_REG(sii
->osh
, &ai
->ioctrl
);
629 W_REG(sii
->osh
, &ai
->resetctrl
, 0);
632 W_REG(sii
->osh
, &ai
->ioctrl
, (bits
| SICF_CLOCK_EN
));
633 dummy
= R_REG(sii
->osh
, &ai
->ioctrl
);
637 void ai_core_cflags_wo(si_t
*sih
, u32 mask
, u32 val
)
645 if (BCM47162_DMP()) {
646 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
651 ASSERT(GOODREGS(sii
->curwrap
));
654 ASSERT((val
& ~mask
) == 0);
657 w
= ((R_REG(sii
->osh
, &ai
->ioctrl
) & ~mask
) | val
);
658 W_REG(sii
->osh
, &ai
->ioctrl
, w
);
662 u32
ai_core_cflags(si_t
*sih
, u32 mask
, u32 val
)
669 if (BCM47162_DMP()) {
670 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
675 ASSERT(GOODREGS(sii
->curwrap
));
678 ASSERT((val
& ~mask
) == 0);
681 w
= ((R_REG(sii
->osh
, &ai
->ioctrl
) & ~mask
) | val
);
682 W_REG(sii
->osh
, &ai
->ioctrl
, w
);
685 return R_REG(sii
->osh
, &ai
->ioctrl
);
688 u32
ai_core_sflags(si_t
*sih
, u32 mask
, u32 val
)
695 if (BCM47162_DMP()) {
696 SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0", __func__
));
700 ASSERT(GOODREGS(sii
->curwrap
));
703 ASSERT((val
& ~mask
) == 0);
704 ASSERT((mask
& ~SISF_CORE_BITS
) == 0);
707 w
= ((R_REG(sii
->osh
, &ai
->iostatus
) & ~mask
) | val
);
708 W_REG(sii
->osh
, &ai
->iostatus
, w
);
711 return R_REG(sii
->osh
, &ai
->iostatus
);