2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
5 * Copyright (C) 2010, Broadcom Corporation. All Rights Reserved.
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
14 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
16 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
17 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 * $Id: sbutils.c,v 1.687 2009-11-05 01:06:56 Exp $
34 #include "siutils_priv.h"
37 /* local prototypes */
38 static uint
_sb_coreidx(si_info_t
*sii
, uint32 sba
);
39 static uint
_sb_scan(si_info_t
*sii
, uint32 sba
, void *regs
, uint bus
, uint32 sbba
,
41 static uint32
_sb_coresba(si_info_t
*sii
);
42 static void *_sb_setcoreidx(si_info_t
*sii
, uint coreidx
);
44 #define SET_SBREG(sii, r, mask, val) \
45 W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
46 #define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
49 #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
50 #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
52 #define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
53 #define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
54 #define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
55 #define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
58 sb_read_sbreg(si_info_t
*sii
, volatile uint32
*sbr
)
61 uint32 val
, intr_val
= 0;
65 * compact flash only has 11 bits address, while we needs 12 bits address.
66 * MEM_SEG will be OR'd with other 11 bits address in hardware,
67 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
68 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
71 INTR_OFF(sii
, intr_val
);
73 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, MEM_SEG
, &tmp
, 1);
74 sbr
= (volatile uint32
*)((uintptr
)sbr
& ~(1 << 11)); /* mask out bit 11 */
77 val
= R_REG(sii
->osh
, sbr
);
81 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, MEM_SEG
, &tmp
, 1);
82 INTR_RESTORE(sii
, intr_val
);
89 sb_write_sbreg(si_info_t
*sii
, volatile uint32
*sbr
, uint32 v
)
92 volatile uint32 dummy
;
97 * compact flash only has 11 bits address, while we needs 12 bits address.
98 * MEM_SEG will be OR'd with other 11 bits address in hardware,
99 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
100 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
103 INTR_OFF(sii
, intr_val
);
105 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, MEM_SEG
, &tmp
, 1);
106 sbr
= (volatile uint32
*)((uintptr
)sbr
& ~(1 << 11)); /* mask out bit 11 */
109 if (BUSTYPE(sii
->pub
.bustype
) == PCMCIA_BUS
) {
111 dummy
= R_REG(sii
->osh
, sbr
);
112 W_REG(sii
->osh
, ((volatile uint16
*)sbr
+ 1), (uint16
)((v
>> 16) & 0xffff));
113 dummy
= R_REG(sii
->osh
, sbr
);
114 W_REG(sii
->osh
, (volatile uint16
*)sbr
, (uint16
)(v
& 0xffff));
116 dummy
= R_REG(sii
->osh
, sbr
);
117 W_REG(sii
->osh
, (volatile uint16
*)sbr
, (uint16
)(v
& 0xffff));
118 dummy
= R_REG(sii
->osh
, sbr
);
119 W_REG(sii
->osh
, ((volatile uint16
*)sbr
+ 1), (uint16
)((v
>> 16) & 0xffff));
120 #endif /* IL_BIGENDIAN */
122 W_REG(sii
->osh
, sbr
, v
);
126 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, MEM_SEG
, &tmp
, 1);
127 INTR_RESTORE(sii
, intr_val
);
138 sb
= REGS2SB(sii
->curmap
);
140 return ((R_SBREG(sii
, &sb
->sbidhigh
) & SBIDH_CC_MASK
) >> SBIDH_CC_SHIFT
);
144 sb_intflag(si_t
*sih
)
149 uint origidx
, intflag
, intr_val
= 0;
153 INTR_OFF(sii
, intr_val
);
154 origidx
= si_coreidx(sih
);
155 corereg
= si_setcore(sih
, CC_CORE_ID
, 0);
156 ASSERT(corereg
!= NULL
);
157 sb
= REGS2SB(corereg
);
158 intflag
= R_SBREG(sii
, &sb
->sbflagst
);
159 sb_setcoreidx(sih
, origidx
);
160 INTR_RESTORE(sii
, intr_val
);
172 sb
= REGS2SB(sii
->curmap
);
174 return R_SBREG(sii
, &sb
->sbtpsflag
) & SBTPS_NUM0_MASK
;
178 sb_setint(si_t
*sih
, int siflag
)
185 sb
= REGS2SB(sii
->curmap
);
191 W_SBREG(sii
, &sb
->sbintvec
, vec
);
194 /* return core index of the core with address 'sba' */
196 BCMATTACHFN(_sb_coreidx
)(si_info_t
*sii
, uint32 sba
)
200 for (i
= 0; i
< sii
->numcores
; i
++)
201 if (sba
== sii
->coresba
[i
])
206 /* return core address of the current core */
208 BCMATTACHFN(_sb_coresba
)(si_info_t
*sii
)
213 switch (BUSTYPE(sii
->pub
.bustype
)) {
215 sbconfig_t
*sb
= REGS2SB(sii
->curmap
);
216 sbaddr
= sb_base(R_SBREG(sii
, &sb
->sbadmatch0
));
221 sbaddr
= OSL_PCI_READ_CONFIG(sii
->osh
, PCI_BAR0_WIN
, sizeof(uint32
));
226 OSL_PCMCIA_READ_ATTR(sii
->osh
, PCMCIA_ADDR0
, &tmp
, 1);
227 sbaddr
= (uint32
)tmp
<< 12;
228 OSL_PCMCIA_READ_ATTR(sii
->osh
, PCMCIA_ADDR1
, &tmp
, 1);
229 sbaddr
|= (uint32
)tmp
<< 16;
230 OSL_PCMCIA_READ_ATTR(sii
->osh
, PCMCIA_ADDR2
, &tmp
, 1);
231 sbaddr
|= (uint32
)tmp
<< 24;
238 sbaddr
= (uint32
)(uintptr
)sii
->curmap
;
243 sbaddr
= BADCOREADDR
;
251 sb_corevendor(si_t
*sih
)
257 sb
= REGS2SB(sii
->curmap
);
259 return ((R_SBREG(sii
, &sb
->sbidhigh
) & SBIDH_VC_MASK
) >> SBIDH_VC_SHIFT
);
263 sb_corerev(si_t
*sih
)
270 sb
= REGS2SB(sii
->curmap
);
271 sbidh
= R_SBREG(sii
, &sb
->sbidhigh
);
273 return (SBCOREREV(sbidh
));
276 /* set core-specific control flags */
278 sb_core_cflags_wo(si_t
*sih
, uint32 mask
, uint32 val
)
285 sb
= REGS2SB(sii
->curmap
);
287 ASSERT((val
& ~mask
) == 0);
290 w
= (R_SBREG(sii
, &sb
->sbtmstatelow
) & ~(mask
<< SBTML_SICF_SHIFT
)) |
291 (val
<< SBTML_SICF_SHIFT
);
292 W_SBREG(sii
, &sb
->sbtmstatelow
, w
);
295 /* set/clear core-specific control flags */
297 sb_core_cflags(si_t
*sih
, uint32 mask
, uint32 val
)
304 sb
= REGS2SB(sii
->curmap
);
306 ASSERT((val
& ~mask
) == 0);
310 w
= (R_SBREG(sii
, &sb
->sbtmstatelow
) & ~(mask
<< SBTML_SICF_SHIFT
)) |
311 (val
<< SBTML_SICF_SHIFT
);
312 W_SBREG(sii
, &sb
->sbtmstatelow
, w
);
315 /* return the new value
316 * for write operation, the following readback ensures the completion of write opration.
318 return (R_SBREG(sii
, &sb
->sbtmstatelow
) >> SBTML_SICF_SHIFT
);
321 /* set/clear core-specific status flags */
323 sb_core_sflags(si_t
*sih
, uint32 mask
, uint32 val
)
330 sb
= REGS2SB(sii
->curmap
);
332 ASSERT((val
& ~mask
) == 0);
333 ASSERT((mask
& ~SISF_CORE_BITS
) == 0);
337 w
= (R_SBREG(sii
, &sb
->sbtmstatehigh
) & ~(mask
<< SBTMH_SISF_SHIFT
)) |
338 (val
<< SBTMH_SISF_SHIFT
);
339 W_SBREG(sii
, &sb
->sbtmstatehigh
, w
);
342 /* return the new value */
343 return (R_SBREG(sii
, &sb
->sbtmstatehigh
) >> SBTMH_SISF_SHIFT
);
347 sb_iscoreup(si_t
*sih
)
353 sb
= REGS2SB(sii
->curmap
);
355 return ((R_SBREG(sii
, &sb
->sbtmstatelow
) &
356 (SBTML_RESET
| SBTML_REJ_MASK
| (SICF_CLOCK_EN
<< SBTML_SICF_SHIFT
))) ==
357 (SICF_CLOCK_EN
<< SBTML_SICF_SHIFT
));
361 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
362 * switch back to the original core, and return the new value.
364 * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
366 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
367 * and (on newer pci cores) chipcommon registers.
370 sb_corereg(si_t
*sih
, uint coreidx
, uint regoff
, uint mask
, uint val
)
381 ASSERT(GOODIDX(coreidx
));
382 ASSERT(regoff
< SI_CORE_SIZE
);
383 ASSERT((val
& ~mask
) == 0);
385 if (coreidx
>= SI_MAXCORES
)
388 if (BUSTYPE(sii
->pub
.bustype
) == SI_BUS
) {
389 /* If internal bus, we can always get at everything */
391 /* map if does not exist */
392 if (!sii
->regs
[coreidx
]) {
393 sii
->regs
[coreidx
] = REG_MAP(sii
->coresba
[coreidx
],
395 ASSERT(GOODREGS(sii
->regs
[coreidx
]));
397 r
= (uint32
*)((uchar
*)sii
->regs
[coreidx
] + regoff
);
398 } else if (BUSTYPE(sii
->pub
.bustype
) == PCI_BUS
) {
399 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
401 if ((sii
->coreid
[coreidx
] == CC_CORE_ID
) && SI_FAST(sii
)) {
402 /* Chipc registers are mapped at 12KB */
405 r
= (uint32
*)((char *)sii
->curmap
+ PCI_16KB0_CCREGS_OFFSET
+ regoff
);
406 } else if (sii
->pub
.buscoreidx
== coreidx
) {
407 /* pci registers are at either in the last 2KB of an 8KB window
408 * or, in pcie and pci rev 13 at 8KB
412 r
= (uint32
*)((char *)sii
->curmap
+
413 PCI_16KB0_PCIREGS_OFFSET
+ regoff
);
415 r
= (uint32
*)((char *)sii
->curmap
+
416 ((regoff
>= SBCONFIGOFF
) ?
417 PCI_BAR0_PCISBR_OFFSET
: PCI_BAR0_PCIREGS_OFFSET
) +
423 INTR_OFF(sii
, intr_val
);
425 /* save current core index */
426 origidx
= si_coreidx(&sii
->pub
);
429 r
= (uint32
*) ((uchar
*)sb_setcoreidx(&sii
->pub
, coreidx
) + regoff
);
435 if (regoff
>= SBCONFIGOFF
) {
436 w
= (R_SBREG(sii
, r
) & ~mask
) | val
;
439 w
= (R_REG(sii
->osh
, r
) & ~mask
) | val
;
440 W_REG(sii
->osh
, r
, w
);
445 if (regoff
>= SBCONFIGOFF
)
448 if ((CHIPID(sii
->pub
.chip
) == BCM5354_CHIP_ID
) &&
449 (coreidx
== SI_CC_IDX
) &&
450 (regoff
== OFFSETOF(chipcregs_t
, watchdog
))) {
453 w
= R_REG(sii
->osh
, r
);
457 /* restore core index */
458 if (origidx
!= coreidx
)
459 sb_setcoreidx(&sii
->pub
, origidx
);
461 INTR_RESTORE(sii
, intr_val
);
467 /* Scan the enumeration space to find all cores starting from the given
468 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
469 * is the default core address at chip POR time and 'regs' is the virtual
470 * address that the default core is mapped at. 'ncores' is the number of
471 * cores expected on bus 'sbba'. It returns the total number of cores
472 * starting from bus 'sbba', inclusive.
474 #define SB_MAXBUSES 2
476 BCMATTACHFN(_sb_scan
)(si_info_t
*sii
, uint32 sba
, void *regs
, uint bus
, uint32 sbba
, uint numcores
)
482 if (bus
>= SB_MAXBUSES
) {
483 SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba
, bus
));
486 SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba
, numcores
));
488 /* Scan all cores on the bus starting from core 0.
489 * Core addresses must be contiguous on each bus.
491 for (i
= 0, next
= sii
->numcores
; i
< numcores
&& next
< SB_BUS_MAXCORES
; i
++, next
++) {
492 sii
->coresba
[next
] = sbba
+ (i
* SI_CORE_SIZE
);
494 /* keep and reuse the initial register mapping */
495 if ((BUSTYPE(sii
->pub
.bustype
) == SI_BUS
) && (sii
->coresba
[next
] == sba
)) {
496 SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs
, next
));
497 sii
->regs
[next
] = regs
;
500 /* change core to 'next' and read its coreid */
501 sii
->curmap
= _sb_setcoreidx(sii
, next
);
504 sii
->coreid
[next
] = sb_coreid(&sii
->pub
);
506 /* core specific processing... */
507 /* chipc provides # cores */
508 if (sii
->coreid
[next
] == CC_CORE_ID
) {
509 chipcregs_t
*cc
= (chipcregs_t
*)sii
->curmap
;
510 uint32 ccrev
= sb_corerev(&sii
->pub
);
512 /* determine numcores - this is the total # cores in the chip */
513 if (((ccrev
== 4) || (ccrev
>= 6)))
514 numcores
= (R_REG(sii
->osh
, &cc
->chipid
) & CID_CC_MASK
) >>
518 uint chip
= CHIPID(sii
->pub
.chip
);
520 if (chip
== BCM4306_CHIP_ID
) /* < 4306c0 */
522 else if (chip
== BCM4704_CHIP_ID
)
524 else if (chip
== BCM5365_CHIP_ID
)
527 SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
533 SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores
,
534 sii
->pub
.issim
? "QT" : ""));
536 /* scan bridged SB(s) and add results to the end of the list */
537 else if (sii
->coreid
[next
] == OCP_CORE_ID
) {
538 sbconfig_t
*sb
= REGS2SB(sii
->curmap
);
539 uint32 nsbba
= R_SBREG(sii
, &sb
->sbadmatch1
);
542 sii
->numcores
= next
+ 1;
544 if ((nsbba
& 0xfff00000) != SI_ENUM_BASE
)
547 if (_sb_coreidx(sii
, nsbba
) != BADIDX
)
550 nsbcc
= (R_SBREG(sii
, &sb
->sbtmstatehigh
) & 0x000f0000) >> 16;
551 nsbcc
= _sb_scan(sii
, sba
, regs
, bus
+ 1, nsbba
, nsbcc
);
552 if (sbba
== SI_ENUM_BASE
)
558 SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i
, sbba
));
560 sii
->numcores
= i
+ ncc
;
561 return sii
->numcores
;
564 /* scan the sb enumerated space to identify all cores */
566 BCMATTACHFN(sb_scan
)(si_t
*sih
, void *regs
, uint devid
)
573 sb
= REGS2SB(sii
->curmap
);
575 sii
->pub
.socirev
= (R_SBREG(sii
, &sb
->sbidlow
) & SBIDL_RV_MASK
) >> SBIDL_RV_SHIFT
;
577 /* Save the current core info and validate it later till we know
578 * for sure what is good and what is bad.
580 origsba
= _sb_coresba(sii
);
582 /* scan all SB(s) starting from SI_ENUM_BASE */
583 sii
->numcores
= _sb_scan(sii
, origsba
, regs
, 0, SI_ENUM_BASE
, 1);
587 * This function changes logical "focus" to the indicated core;
588 * must be called with interrupts off.
589 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
592 sb_setcoreidx(si_t
*sih
, uint coreidx
)
598 if (coreidx
>= sii
->numcores
)
602 * If the user has provided an interrupt mask enabled function,
603 * then assert interrupts are disabled before switching the core.
605 ASSERT((sii
->intrsenabled_fn
== NULL
) || !(*(sii
)->intrsenabled_fn
)((sii
)->intr_arg
));
607 sii
->curmap
= _sb_setcoreidx(sii
, coreidx
);
608 sii
->curidx
= coreidx
;
610 return (sii
->curmap
);
613 /* This function changes the logical "focus" to the indicated core.
614 * Return the current core's virtual address.
617 _sb_setcoreidx(si_info_t
*sii
, uint coreidx
)
619 uint32 sbaddr
= sii
->coresba
[coreidx
];
622 switch (BUSTYPE(sii
->pub
.bustype
)) {
625 if (!sii
->regs
[coreidx
]) {
626 sii
->regs
[coreidx
] = REG_MAP(sbaddr
, SI_CORE_SIZE
);
627 ASSERT(GOODREGS(sii
->regs
[coreidx
]));
629 regs
= sii
->regs
[coreidx
];
633 /* point bar0 window */
634 OSL_PCI_WRITE_CONFIG(sii
->osh
, PCI_BAR0_WIN
, 4, sbaddr
);
639 uint8 tmp
= (sbaddr
>> 12) & 0x0f;
640 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, PCMCIA_ADDR0
, &tmp
, 1);
641 tmp
= (sbaddr
>> 16) & 0xff;
642 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, PCMCIA_ADDR1
, &tmp
, 1);
643 tmp
= (sbaddr
>> 24) & 0xff;
644 OSL_PCMCIA_WRITE_ATTR(sii
->osh
, PCMCIA_ADDR2
, &tmp
, 1);
652 if (!sii
->regs
[coreidx
]) {
653 sii
->regs
[coreidx
] = (void *)(uintptr
)sbaddr
;
654 ASSERT(GOODREGS(sii
->regs
[coreidx
]));
656 regs
= sii
->regs
[coreidx
];
669 /* Return the address of sbadmatch0/1/2/3 register */
670 static volatile uint32
*
671 sb_admatch(si_info_t
*sii
, uint asidx
)
674 volatile uint32
*addrm
;
676 sb
= REGS2SB(sii
->curmap
);
680 addrm
= &sb
->sbadmatch0
;
684 addrm
= &sb
->sbadmatch1
;
688 addrm
= &sb
->sbadmatch2
;
692 addrm
= &sb
->sbadmatch3
;
696 SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__
, asidx
));
703 /* Return the number of address spaces in current core */
705 sb_numaddrspaces(si_t
*sih
)
711 sb
= REGS2SB(sii
->curmap
);
713 /* + 1 because of enumeration space */
714 return ((R_SBREG(sii
, &sb
->sbidlow
) & SBIDL_AR_MASK
) >> SBIDL_AR_SHIFT
) + 1;
717 /* Return the address of the nth address space in the current core */
719 sb_addrspace(si_t
*sih
, uint asidx
)
725 return (sb_base(R_SBREG(sii
, sb_admatch(sii
, asidx
))));
728 /* Return the size of the nth address space in the current core */
730 sb_addrspacesize(si_t
*sih
, uint asidx
)
736 return (sb_size(R_SBREG(sii
, sb_admatch(sii
, asidx
))));
739 #if defined(BCMDBG_ERR) || defined(BCMASSERT_SUPPORT) || defined(BCMDBG_DUMP)
740 /* traverse all cores to find and clear source of serror */
742 sb_serr_clear(si_info_t
*sii
)
746 uint i
, intr_val
= 0;
747 void *corereg
= NULL
;
749 INTR_OFF(sii
, intr_val
);
750 origidx
= si_coreidx(&sii
->pub
);
752 for (i
= 0; i
< sii
->numcores
; i
++) {
753 corereg
= sb_setcoreidx(&sii
->pub
, i
);
754 if (NULL
!= corereg
) {
755 sb
= REGS2SB(corereg
);
756 if ((R_SBREG(sii
, &sb
->sbtmstatehigh
)) & SBTMH_SERR
) {
757 AND_SBREG(sii
, &sb
->sbtmstatehigh
, ~SBTMH_SERR
);
758 SI_ERROR(("sb_serr_clear: SError at core 0x%x\n",
759 sb_coreid(&sii
->pub
)));
764 sb_setcoreidx(&sii
->pub
, origidx
);
765 INTR_RESTORE(sii
, intr_val
);
769 * Check if any inband, outband or timeout errors has happened and clear them.
770 * Must be called with chip clk on !
773 sb_taclear(si_t
*sih
, bool details
)
780 uint32 inband
= 0, serror
= 0, timeout
= 0;
781 void *corereg
= NULL
;
782 volatile uint32 imstate
, tmstate
;
784 bool printed
= FALSE
;
789 if (BUSTYPE(sii
->pub
.bustype
) == PCI_BUS
) {
790 volatile uint32 stcmd
;
792 /* inband error is Target abort for PCI */
793 stcmd
= OSL_PCI_READ_CONFIG(sii
->osh
, PCI_CFG_CMD
, sizeof(uint32
));
794 inband
= stcmd
& PCI_STAT_TA
;
798 SI_ERROR(("\ninband:\n"));
799 si_viewall((void*)sii
, FALSE
);
803 OSL_PCI_WRITE_CONFIG(sii
->osh
, PCI_CFG_CMD
, sizeof(uint32
), stcmd
);
807 stcmd
= OSL_PCI_READ_CONFIG(sii
->osh
, PCI_INT_STATUS
, sizeof(uint32
));
808 serror
= stcmd
& PCI_SBIM_STATUS_SERR
;
812 SI_ERROR(("\nserror:\n"));
814 si_viewall((void*)sii
, FALSE
);
819 OSL_PCI_WRITE_CONFIG(sii
->osh
, PCI_INT_STATUS
, sizeof(uint32
), stcmd
);
823 imstate
= sb_corereg(sih
, sii
->pub
.buscoreidx
,
824 SBCONFIGOFF
+ OFFSETOF(sbconfig_t
, sbimstate
), 0, 0);
825 if ((imstate
!= 0xffffffff) && (imstate
& (SBIM_IBE
| SBIM_TO
))) {
826 sb_corereg(sih
, sii
->pub
.buscoreidx
,
827 SBCONFIGOFF
+ OFFSETOF(sbconfig_t
, sbimstate
), ~0,
828 (imstate
& ~(SBIM_IBE
| SBIM_TO
)));
829 /* inband = imstate & SBIM_IBE; same as TA above */
830 timeout
= imstate
& SBIM_TO
;
834 SI_ERROR(("\ntimeout:\n"));
836 si_viewall((void*)sii
, FALSE
);
844 /* dump errlog for sonics >= 2.3 */
845 if (sii
->pub
.socirev
== SONICS_2_2
)
848 uint32 imerrlog
, imerrloga
;
849 imerrlog
= sb_corereg(sih
, sii
->pub
.buscoreidx
, SBIMERRLOG
, 0, 0);
850 if (imerrlog
& SBTMEL_EC
) {
851 imerrloga
= sb_corereg(sih
, sii
->pub
.buscoreidx
,
854 sb_corereg(sih
, sii
->pub
.buscoreidx
, SBIMERRLOG
, ~0, 0);
855 SI_ERROR(("sb_taclear: ImErrLog 0x%x, ImErrLogA 0x%x\n",
856 imerrlog
, imerrloga
));
862 } else if (BUSTYPE(sii
->pub
.bustype
) == PCMCIA_BUS
) {
864 INTR_OFF(sii
, intr_val
);
865 origidx
= si_coreidx(sih
);
867 corereg
= si_setcore(sih
, PCMCIA_CORE_ID
, 0);
868 if (NULL
!= corereg
) {
869 sb
= REGS2SB(corereg
);
871 imstate
= R_SBREG(sii
, &sb
->sbimstate
);
872 /* handle surprise removal */
873 if ((imstate
!= 0xffffffff) && (imstate
& (SBIM_IBE
| SBIM_TO
))) {
874 AND_SBREG(sii
, &sb
->sbimstate
, ~(SBIM_IBE
| SBIM_TO
));
875 inband
= imstate
& SBIM_IBE
;
876 timeout
= imstate
& SBIM_TO
;
878 tmstate
= R_SBREG(sii
, &sb
->sbtmstatehigh
);
879 if ((tmstate
!= 0xffffffff) && (tmstate
& SBTMH_INT_STATUS
)) {
884 OR_SBREG(sii
, &sb
->sbtmstatelow
, SBTML_INT_ACK
);
885 AND_SBREG(sii
, &sb
->sbtmstatelow
, ~SBTML_INT_ACK
);
888 sb_setcoreidx(sih
, origidx
);
889 INTR_RESTORE(sii
, intr_val
);
894 if (inband
| timeout
| serror
) {
896 SI_ERROR(("sb_taclear: inband 0x%x, serror 0x%x, timeout 0x%x!\n",
897 inband
, serror
, timeout
));
904 /* do buffered registers update */
914 origidx
= sii
->curidx
;
915 ASSERT(GOODIDX(origidx
));
917 INTR_OFF(sii
, intr_val
);
919 /* switch over to chipcommon core if there is one, else use pci */
920 if (sii
->pub
.ccrev
!= NOREV
) {
921 chipcregs_t
*ccregs
= (chipcregs_t
*)si_setcore(sih
, CC_CORE_ID
, 0);
923 /* do the buffer registers update */
924 W_REG(sii
->osh
, &ccregs
->broadcastaddress
, SB_COMMIT
);
925 W_REG(sii
->osh
, &ccregs
->broadcastdata
, 0x0);
926 } else if (PCI(sii
)) {
927 sbpciregs_t
*pciregs
= (sbpciregs_t
*)si_setcore(sih
, PCI_CORE_ID
, 0);
929 /* do the buffer registers update */
930 W_REG(sii
->osh
, &pciregs
->bcastaddr
, SB_COMMIT
);
931 W_REG(sii
->osh
, &pciregs
->bcastdata
, 0x0);
935 /* restore core index */
936 sb_setcoreidx(sih
, origidx
);
937 INTR_RESTORE(sii
, intr_val
);
941 sb_core_disable(si_t
*sih
, uint32 bits
)
944 volatile uint32 dummy
;
949 ASSERT(GOODREGS(sii
->curmap
));
950 sb
= REGS2SB(sii
->curmap
);
952 /* if core is already in reset, just return */
953 if (R_SBREG(sii
, &sb
->sbtmstatelow
) & SBTML_RESET
)
956 /* if clocks are not enabled, put into reset and return */
957 if ((R_SBREG(sii
, &sb
->sbtmstatelow
) & (SICF_CLOCK_EN
<< SBTML_SICF_SHIFT
)) == 0)
960 /* set target reject and spin until busy is clear (preserve core-specific bits) */
961 OR_SBREG(sii
, &sb
->sbtmstatelow
, SBTML_REJ
);
962 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
964 SPINWAIT((R_SBREG(sii
, &sb
->sbtmstatehigh
) & SBTMH_BUSY
), 100000);
965 if (R_SBREG(sii
, &sb
->sbtmstatehigh
) & SBTMH_BUSY
)
966 SI_ERROR(("%s: target state still busy\n", __FUNCTION__
));
968 if (R_SBREG(sii
, &sb
->sbidlow
) & SBIDL_INIT
) {
969 OR_SBREG(sii
, &sb
->sbimstate
, SBIM_RJ
);
970 dummy
= R_SBREG(sii
, &sb
->sbimstate
);
972 SPINWAIT((R_SBREG(sii
, &sb
->sbimstate
) & SBIM_BY
), 100000);
975 /* set reset and reject while enabling the clocks */
976 W_SBREG(sii
, &sb
->sbtmstatelow
,
977 (((bits
| SICF_FGC
| SICF_CLOCK_EN
) << SBTML_SICF_SHIFT
) |
978 SBTML_REJ
| SBTML_RESET
));
979 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
982 /* don't forget to clear the initiator reject bit */
983 if (R_SBREG(sii
, &sb
->sbidlow
) & SBIDL_INIT
)
984 AND_SBREG(sii
, &sb
->sbimstate
, ~SBIM_RJ
);
987 /* leave reset and reject asserted */
988 W_SBREG(sii
, &sb
->sbtmstatelow
, ((bits
<< SBTML_SICF_SHIFT
) | SBTML_REJ
| SBTML_RESET
));
992 /* reset and re-enable a core
994 * bits - core specific bits that are set during and after reset sequence
995 * resetbits - core specific bits that are set only during reset sequence
998 sb_core_reset(si_t
*sih
, uint32 bits
, uint32 resetbits
)
1002 volatile uint32 dummy
;
1005 ASSERT(GOODREGS(sii
->curmap
));
1006 sb
= REGS2SB(sii
->curmap
);
1009 * Must do the disable sequence first to work for arbitrary current core state.
1011 sb_core_disable(sih
, (bits
| resetbits
));
1014 * Now do the initialization sequence.
1017 /* set reset while enabling the clock and forcing them on throughout the core */
1018 W_SBREG(sii
, &sb
->sbtmstatelow
,
1019 (((bits
| resetbits
| SICF_FGC
| SICF_CLOCK_EN
) << SBTML_SICF_SHIFT
) |
1021 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
1024 if (R_SBREG(sii
, &sb
->sbtmstatehigh
) & SBTMH_SERR
) {
1025 W_SBREG(sii
, &sb
->sbtmstatehigh
, 0);
1027 if ((dummy
= R_SBREG(sii
, &sb
->sbimstate
)) & (SBIM_IBE
| SBIM_TO
)) {
1028 AND_SBREG(sii
, &sb
->sbimstate
, ~(SBIM_IBE
| SBIM_TO
));
1031 /* clear reset and allow it to propagate throughout the core */
1032 W_SBREG(sii
, &sb
->sbtmstatelow
,
1033 ((bits
| resetbits
| SICF_FGC
| SICF_CLOCK_EN
) << SBTML_SICF_SHIFT
));
1034 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
1037 /* leave clock enabled */
1038 W_SBREG(sii
, &sb
->sbtmstatelow
, ((bits
| SICF_CLOCK_EN
) << SBTML_SICF_SHIFT
));
1039 dummy
= R_SBREG(sii
, &sb
->sbtmstatelow
);
1044 * Set the initiator timeout for the "master core".
1045 * The master core is defined to be the core in control
1046 * of the chip and so it issues accesses to non-memory
1047 * locations (Because of dma *any* core can access memeory).
1049 * The routine uses the bus to decide who is the master:
1052 * PCI_BUS => pci or pcie
1053 * PCMCIA_BUS => pcmcia
1054 * SDIO_BUS => pcmcia
1056 * This routine exists so callers can disable initiator
1057 * timeouts so accesses to very slow devices like otp
1058 * won't cause an abort. The routine allows arbitrary
1059 * settings of the service and request timeouts, though.
1061 * Returns the timeout state before changing it or -1
1065 #define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
1068 sb_set_initiator_to(si_t
*sih
, uint32 to
, uint idx
)
1073 uint32 tmp
, ret
= 0xffffffff;
1078 if ((to
& ~TO_MASK
) != 0)
1081 /* Figure out the master core */
1082 if (idx
== BADIDX
) {
1083 switch (BUSTYPE(sii
->pub
.bustype
)) {
1085 idx
= sii
->pub
.buscoreidx
;
1091 idx
= si_findcoreidx(sih
, PCMCIA_CORE_ID
, 0);
1094 idx
= si_findcoreidx(sih
, MIPS33_CORE_ID
, 0);
1103 INTR_OFF(sii
, intr_val
);
1104 origidx
= si_coreidx(sih
);
1106 sb
= REGS2SB(sb_setcoreidx(sih
, idx
));
1108 tmp
= R_SBREG(sii
, &sb
->sbimconfiglow
);
1109 ret
= tmp
& TO_MASK
;
1110 W_SBREG(sii
, &sb
->sbimconfiglow
, (tmp
& ~TO_MASK
) | to
);
1113 sb_setcoreidx(sih
, origidx
);
1114 INTR_RESTORE(sii
, intr_val
);
1119 sb_base(uint32 admatch
)
1124 type
= admatch
& SBAM_TYPE_MASK
;
1130 base
= admatch
& SBAM_BASE0_MASK
;
1131 } else if (type
== 1) {
1132 ASSERT(!(admatch
& SBAM_ADNEG
)); /* neg not supported */
1133 base
= admatch
& SBAM_BASE1_MASK
;
1134 } else if (type
== 2) {
1135 ASSERT(!(admatch
& SBAM_ADNEG
)); /* neg not supported */
1136 base
= admatch
& SBAM_BASE2_MASK
;
1143 sb_size(uint32 admatch
)
1148 type
= admatch
& SBAM_TYPE_MASK
;
1154 size
= 1 << (((admatch
& SBAM_ADINT0_MASK
) >> SBAM_ADINT0_SHIFT
) + 1);
1155 } else if (type
== 1) {
1156 ASSERT(!(admatch
& SBAM_ADNEG
)); /* neg not supported */
1157 size
= 1 << (((admatch
& SBAM_ADINT1_MASK
) >> SBAM_ADINT1_SHIFT
) + 1);
1158 } else if (type
== 2) {
1159 ASSERT(!(admatch
& SBAM_ADNEG
)); /* neg not supported */
1160 size
= 1 << (((admatch
& SBAM_ADINT2_MASK
) >> SBAM_ADINT2_SHIFT
) + 1);
1166 #if defined(BCMDBG) || defined(BCMDBG_DUMP)
1167 /* print interesting sbconfig registers */
1169 sb_dumpregs(si_t
*sih
, struct bcmstrbuf
*b
)
1173 uint origidx
, i
, intr_val
= 0;
1176 origidx
= sii
->curidx
;
1178 INTR_OFF(sii
, intr_val
);
1180 for (i
= 0; i
< sii
->numcores
; i
++) {
1181 sb
= REGS2SB(sb_setcoreidx(sih
, i
));
1183 bcm_bprintf(b
, "core 0x%x: \n", sii
->coreid
[i
]);
1185 if (sii
->pub
.socirev
> SONICS_2_2
)
1186 bcm_bprintf(b
, "sbimerrlog 0x%x sbimerrloga 0x%x\n",
1187 sb_corereg(sih
, si_coreidx(&sii
->pub
), SBIMERRLOG
, 0, 0),
1188 sb_corereg(sih
, si_coreidx(&sii
->pub
), SBIMERRLOGA
, 0, 0));
1190 bcm_bprintf(b
, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x "
1191 "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n",
1192 R_SBREG(sii
, &sb
->sbtmstatelow
), R_SBREG(sii
, &sb
->sbtmstatehigh
),
1193 R_SBREG(sii
, &sb
->sbidhigh
), R_SBREG(sii
, &sb
->sbimstate
),
1194 R_SBREG(sii
, &sb
->sbimconfiglow
), R_SBREG(sii
, &sb
->sbimconfighigh
));
1197 sb_setcoreidx(sih
, origidx
);
1198 INTR_RESTORE(sii
, intr_val
);
1200 #endif /* BCMDBG || BCMDBG_DUMP */
1204 sb_view(si_t
*sih
, bool verbose
)
1210 sb
= REGS2SB(sii
->curmap
);
1212 SI_ERROR(("\nCore ID: 0x%x\n", sb_coreid(&sii
->pub
)));
1214 if (sii
->pub
.socirev
> SONICS_2_2
)
1215 SI_ERROR(("sbimerrlog 0x%x sbimerrloga 0x%x\n",
1216 sb_corereg(sih
, si_coreidx(&sii
->pub
), SBIMERRLOG
, 0, 0),
1217 sb_corereg(sih
, si_coreidx(&sii
->pub
), SBIMERRLOGA
, 0, 0)));
1219 /* Print important or helpful registers */
1220 SI_ERROR(("sbtmerrloga 0x%x sbtmerrlog 0x%x\n",
1221 R_SBREG(sii
, &sb
->sbtmerrloga
), R_SBREG(sii
, &sb
->sbtmerrlog
)));
1222 SI_ERROR(("sbimstate 0x%x sbtmstatelow 0x%x sbtmstatehigh 0x%x\n",
1223 R_SBREG(sii
, &sb
->sbimstate
),
1224 R_SBREG(sii
, &sb
->sbtmstatelow
), R_SBREG(sii
, &sb
->sbtmstatehigh
)));
1225 SI_ERROR(("sbimconfiglow 0x%x sbtmconfiglow 0x%x\nsbtmconfighigh 0x%x sbidhigh 0x%x\n",
1226 R_SBREG(sii
, &sb
->sbimconfiglow
), R_SBREG(sii
, &sb
->sbtmconfiglow
),
1227 R_SBREG(sii
, &sb
->sbtmconfighigh
), R_SBREG(sii
, &sb
->sbidhigh
)));
1229 /* Print more detailed registers that are otherwise not relevant */
1231 SI_ERROR(("sbipsflag 0x%x sbtpsflag 0x%x\n",
1232 R_SBREG(sii
, &sb
->sbipsflag
), R_SBREG(sii
, &sb
->sbtpsflag
)));
1233 SI_ERROR(("sbadmatch3 0x%x sbadmatch2 0x%x\nsbadmatch1 0x%x sbadmatch0 0x%x\n",
1234 R_SBREG(sii
, &sb
->sbadmatch3
), R_SBREG(sii
, &sb
->sbadmatch2
),
1235 R_SBREG(sii
, &sb
->sbadmatch1
), R_SBREG(sii
, &sb
->sbadmatch0
)));
1236 SI_ERROR(("sbintvec 0x%x sbbwa0 0x%x sbimconfighigh 0x%x\n",
1237 R_SBREG(sii
, &sb
->sbintvec
), R_SBREG(sii
, &sb
->sbbwa0
),
1238 R_SBREG(sii
, &sb
->sbimconfighigh
)));
1239 SI_ERROR(("sbbconfig 0x%x sbbstate 0x%x\n",
1240 R_SBREG(sii
, &sb
->sbbconfig
), R_SBREG(sii
, &sb
->sbbstate
)));
1241 SI_ERROR(("sbactcnfg 0x%x sbflagst 0x%x sbidlow 0x%x \n\n",
1242 R_SBREG(sii
, &sb
->sbactcnfg
), R_SBREG(sii
, &sb
->sbflagst
),
1243 R_SBREG(sii
, &sb
->sbidlow
)));