Merge branch 'gadget' into for-next
[zen-stable.git] / drivers / staging / brcm80211 / brcmsmac / aiutils.c
bloba61185f70a7cd950a078bfc26840ba1cc19540eb
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/delay.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <bcmdefs.h>
21 #include <linux/module.h>
22 #include <linux/pci.h>
23 #include <bcmutils.h>
24 #include <aiutils.h>
25 #include <hndsoc.h>
26 #include <sbchipc.h>
27 #include <pcicfg.h>
28 #include <bcmdevs.h>
30 /* ********** from siutils.c *********** */
31 #include <pci_core.h>
32 #include <pcie_core.h>
33 #include <nicpci.h>
34 #include <bcmnvram.h>
35 #include <bcmsrom.h>
36 #include <wlc_pmu.h>
38 #define BCM47162_DMP() ((sih->chip == BCM47162_CHIP_ID) && \
39 (sih->chiprev == 0) && \
40 (sii->coreid[sii->curidx] == MIPS74K_CORE_ID))
42 /* EROM parsing */
44 static u32
45 get_erom_ent(si_t *sih, u32 **eromptr, u32 mask, u32 match)
47 u32 ent;
48 uint inv = 0, nom = 0;
50 while (true) {
51 ent = R_REG(*eromptr);
52 (*eromptr)++;
54 if (mask == 0)
55 break;
57 if ((ent & ER_VALID) == 0) {
58 inv++;
59 continue;
62 if (ent == (ER_END | ER_VALID))
63 break;
65 if ((ent & mask) == match)
66 break;
68 nom++;
71 SI_VMSG(("%s: Returning ent 0x%08x\n", __func__, ent));
72 if (inv + nom) {
73 SI_VMSG((" after %d invalid and %d non-matching entries\n",
74 inv, nom));
76 return ent;
79 static u32
80 get_asd(si_t *sih, u32 **eromptr, uint sp, uint ad, uint st,
81 u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh)
83 u32 asd, sz, szd;
85 asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
86 if (((asd & ER_TAG1) != ER_ADD) ||
87 (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
88 ((asd & AD_ST_MASK) != st)) {
89 /* This is not what we want, "push" it back */
90 (*eromptr)--;
91 return 0;
93 *addrl = asd & AD_ADDR_MASK;
94 if (asd & AD_AG32)
95 *addrh = get_erom_ent(sih, eromptr, 0, 0);
96 else
97 *addrh = 0;
98 *sizeh = 0;
99 sz = asd & AD_SZ_MASK;
100 if (sz == AD_SZ_SZD) {
101 szd = get_erom_ent(sih, eromptr, 0, 0);
102 *sizel = szd & SD_SZ_MASK;
103 if (szd & SD_SG32)
104 *sizeh = get_erom_ent(sih, eromptr, 0, 0);
105 } else
106 *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
108 SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
109 sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
111 return asd;
114 static void ai_hwfixup(si_info_t *sii)
118 /* parse the enumeration rom to identify all cores */
119 void ai_scan(si_t *sih, void *regs, uint devid)
121 si_info_t *sii = SI_INFO(sih);
122 chipcregs_t *cc = (chipcregs_t *) regs;
123 u32 erombase, *eromptr, *eromlim;
125 erombase = R_REG(&cc->eromptr);
127 switch (sih->bustype) {
128 case SI_BUS:
129 eromptr = (u32 *) REG_MAP(erombase, SI_CORE_SIZE);
130 break;
132 case PCI_BUS:
133 /* Set wrappers address */
134 sii->curwrap = (void *)((unsigned long)regs + SI_CORE_SIZE);
136 /* Now point the window at the erom */
137 pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, erombase);
138 eromptr = regs;
139 break;
141 case SPI_BUS:
142 case SDIO_BUS:
143 eromptr = (u32 *)(unsigned long)erombase;
144 break;
146 default:
147 SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n",
148 sih->bustype));
149 return;
151 eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32));
153 SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", regs, erombase, eromptr, eromlim));
154 while (eromptr < eromlim) {
155 u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
156 u32 mpd, asd, addrl, addrh, sizel, sizeh;
157 u32 *base;
158 uint i, j, idx;
159 bool br;
161 br = false;
163 /* Grok a component */
164 cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
165 if (cia == (ER_END | ER_VALID)) {
166 SI_VMSG(("Found END of erom after %d cores\n",
167 sii->numcores));
168 ai_hwfixup(sii);
169 return;
171 base = eromptr - 1;
172 cib = get_erom_ent(sih, &eromptr, 0, 0);
174 if ((cib & ER_TAG) != ER_CI) {
175 SI_ERROR(("CIA not followed by CIB\n"));
176 goto error;
179 cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
180 mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
181 crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
182 nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
183 nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
184 nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
185 nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
187 SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " "nsw = %d, nmp = %d & nsp = %d\n", mfg, cid, crev, base, nmw, nsw, nmp, nsp));
189 if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
190 continue;
191 if ((nmw + nsw == 0)) {
192 /* A component which is not a core */
193 if (cid == OOB_ROUTER_CORE_ID) {
194 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
195 &addrl, &addrh, &sizel, &sizeh);
196 if (asd != 0) {
197 sii->oob_router = addrl;
200 continue;
203 idx = sii->numcores;
204 /* sii->eromptr[idx] = base; */
205 sii->cia[idx] = cia;
206 sii->cib[idx] = cib;
207 sii->coreid[idx] = cid;
209 for (i = 0; i < nmp; i++) {
210 mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
211 if ((mpd & ER_TAG) != ER_MP) {
212 SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
213 goto error;
215 SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
216 (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
217 (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
220 /* First Slave Address Descriptor should be port 0:
221 * the main register space for the core
223 asd =
224 get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
225 &sizel, &sizeh);
226 if (asd == 0) {
227 /* Try again to see if it is a bridge */
228 asd =
229 get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl,
230 &addrh, &sizel, &sizeh);
231 if (asd != 0)
232 br = true;
233 else if ((addrh != 0) || (sizeh != 0)
234 || (sizel != SI_CORE_SIZE)) {
235 SI_ERROR(("First Slave ASD for core 0x%04x malformed " "(0x%08x)\n", cid, asd));
236 goto error;
239 sii->coresba[idx] = addrl;
240 sii->coresba_size[idx] = sizel;
241 /* Get any more ASDs in port 0 */
242 j = 1;
243 do {
244 asd =
245 get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl,
246 &addrh, &sizel, &sizeh);
247 if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
248 sii->coresba2[idx] = addrl;
249 sii->coresba2_size[idx] = sizel;
251 j++;
252 } while (asd != 0);
254 /* Go through the ASDs for other slave ports */
255 for (i = 1; i < nsp; i++) {
256 j = 0;
257 do {
258 asd =
259 get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE,
260 &addrl, &addrh, &sizel, &sizeh);
261 } while (asd != 0);
262 if (j == 0) {
263 SI_ERROR((" SP %d has no address descriptors\n",
264 i));
265 goto error;
269 /* Now get master wrappers */
270 for (i = 0; i < nmw; i++) {
271 asd =
272 get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl,
273 &addrh, &sizel, &sizeh);
274 if (asd == 0) {
275 SI_ERROR(("Missing descriptor for MW %d\n", i));
276 goto error;
278 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
279 SI_ERROR(("Master wrapper %d is not 4KB\n", i));
280 goto error;
282 if (i == 0)
283 sii->wrapba[idx] = addrl;
286 /* And finally slave wrappers */
287 for (i = 0; i < nsw; i++) {
288 uint fwp = (nsp == 1) ? 0 : 1;
289 asd =
290 get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP,
291 &addrl, &addrh, &sizel, &sizeh);
292 if (asd == 0) {
293 SI_ERROR(("Missing descriptor for SW %d\n", i));
294 goto error;
296 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
297 SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
298 goto error;
300 if ((nmw == 0) && (i == 0))
301 sii->wrapba[idx] = addrl;
304 /* Don't record bridges */
305 if (br)
306 continue;
308 /* Done with core */
309 sii->numcores++;
312 SI_ERROR(("Reached end of erom without finding END"));
314 error:
315 sii->numcores = 0;
316 return;
319 /* This function changes the logical "focus" to the indicated core.
320 * Return the current core's virtual address.
322 void *ai_setcoreidx(si_t *sih, uint coreidx)
324 si_info_t *sii = SI_INFO(sih);
325 u32 addr = sii->coresba[coreidx];
326 u32 wrap = sii->wrapba[coreidx];
327 void *regs;
329 if (coreidx >= sii->numcores)
330 return NULL;
332 switch (sih->bustype) {
333 case SI_BUS:
334 /* map new one */
335 if (!sii->regs[coreidx]) {
336 sii->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
338 sii->curmap = regs = sii->regs[coreidx];
339 if (!sii->wrappers[coreidx]) {
340 sii->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
342 sii->curwrap = sii->wrappers[coreidx];
343 break;
345 case PCI_BUS:
346 /* point bar0 window */
347 pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, addr);
348 regs = sii->curmap;
349 /* point bar0 2nd 4KB window */
350 pci_write_config_dword(sii->pbus, PCI_BAR0_WIN2, wrap);
351 break;
353 case SPI_BUS:
354 case SDIO_BUS:
355 sii->curmap = regs = (void *)(unsigned long)addr;
356 sii->curwrap = (void *)(unsigned long)wrap;
357 break;
359 default:
360 regs = NULL;
361 break;
364 sii->curmap = regs;
365 sii->curidx = coreidx;
367 return regs;
370 /* Return the number of address spaces in current core */
371 int ai_numaddrspaces(si_t *sih)
373 return 2;
376 /* Return the address of the nth address space in the current core */
377 u32 ai_addrspace(si_t *sih, uint asidx)
379 si_info_t *sii;
380 uint cidx;
382 sii = SI_INFO(sih);
383 cidx = sii->curidx;
385 if (asidx == 0)
386 return sii->coresba[cidx];
387 else if (asidx == 1)
388 return sii->coresba2[cidx];
389 else {
390 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
391 return 0;
395 /* Return the size of the nth address space in the current core */
396 u32 ai_addrspacesize(si_t *sih, uint asidx)
398 si_info_t *sii;
399 uint cidx;
401 sii = SI_INFO(sih);
402 cidx = sii->curidx;
404 if (asidx == 0)
405 return sii->coresba_size[cidx];
406 else if (asidx == 1)
407 return sii->coresba2_size[cidx];
408 else {
409 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
410 return 0;
414 uint ai_flag(si_t *sih)
416 si_info_t *sii;
417 aidmp_t *ai;
419 sii = SI_INFO(sih);
420 if (BCM47162_DMP()) {
421 SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __func__));
422 return sii->curidx;
424 ai = sii->curwrap;
426 return R_REG(&ai->oobselouta30) & 0x1f;
429 void ai_setint(si_t *sih, int siflag)
433 uint ai_corevendor(si_t *sih)
435 si_info_t *sii;
436 u32 cia;
438 sii = SI_INFO(sih);
439 cia = sii->cia[sii->curidx];
440 return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
443 uint ai_corerev(si_t *sih)
445 si_info_t *sii;
446 u32 cib;
448 sii = SI_INFO(sih);
449 cib = sii->cib[sii->curidx];
450 return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
453 bool ai_iscoreup(si_t *sih)
455 si_info_t *sii;
456 aidmp_t *ai;
458 sii = SI_INFO(sih);
459 ai = sii->curwrap;
461 return (((R_REG(&ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
462 SICF_CLOCK_EN)
463 && ((R_REG(&ai->resetctrl) & AIRC_RESET) == 0));
466 void ai_core_cflags_wo(si_t *sih, u32 mask, u32 val)
468 si_info_t *sii;
469 aidmp_t *ai;
470 u32 w;
472 sii = SI_INFO(sih);
474 if (BCM47162_DMP()) {
475 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
476 __func__));
477 return;
480 ai = sii->curwrap;
482 if (mask || val) {
483 w = ((R_REG(&ai->ioctrl) & ~mask) | val);
484 W_REG(&ai->ioctrl, w);
488 u32 ai_core_cflags(si_t *sih, u32 mask, u32 val)
490 si_info_t *sii;
491 aidmp_t *ai;
492 u32 w;
494 sii = SI_INFO(sih);
495 if (BCM47162_DMP()) {
496 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
497 __func__));
498 return 0;
501 ai = sii->curwrap;
503 if (mask || val) {
504 w = ((R_REG(&ai->ioctrl) & ~mask) | val);
505 W_REG(&ai->ioctrl, w);
508 return R_REG(&ai->ioctrl);
511 u32 ai_core_sflags(si_t *sih, u32 mask, u32 val)
513 si_info_t *sii;
514 aidmp_t *ai;
515 u32 w;
517 sii = SI_INFO(sih);
518 if (BCM47162_DMP()) {
519 SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0", __func__));
520 return 0;
523 ai = sii->curwrap;
525 if (mask || val) {
526 w = ((R_REG(&ai->iostatus) & ~mask) | val);
527 W_REG(&ai->iostatus, w);
530 return R_REG(&ai->iostatus);
533 /* *************** from siutils.c ************** */
534 /* local prototypes */
535 static si_info_t *ai_doattach(si_info_t *sii, uint devid, void *regs,
536 uint bustype, void *sdh, char **vars,
537 uint *varsz);
538 static bool ai_buscore_prep(si_info_t *sii, uint bustype, uint devid,
539 void *sdh);
540 static bool ai_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype,
541 u32 savewin, uint *origidx, void *regs);
542 static void ai_nvram_process(si_info_t *sii, char *pvars);
544 /* dev path concatenation util */
545 static char *ai_devpathvar(si_t *sih, char *var, int len, const char *name);
546 static bool _ai_clkctl_cc(si_info_t *sii, uint mode);
547 static bool ai_ispcie(si_info_t *sii);
549 /* global variable to indicate reservation/release of gpio's */
550 static u32 ai_gpioreservation;
553 * Allocate a si handle.
554 * devid - pci device id (used to determine chip#)
555 * osh - opaque OS handle
556 * regs - virtual address of initial core registers
557 * bustype - pci/sb/sdio/etc
558 * vars - pointer to a pointer area for "environment" variables
559 * varsz - pointer to int to return the size of the vars
561 si_t *ai_attach(uint devid, void *regs, uint bustype,
562 void *sdh, char **vars, uint *varsz)
564 si_info_t *sii;
566 /* alloc si_info_t */
567 sii = kmalloc(sizeof(si_info_t), GFP_ATOMIC);
568 if (sii == NULL) {
569 SI_ERROR(("si_attach: malloc failed!\n"));
570 return NULL;
573 if (ai_doattach(sii, devid, regs, bustype, sdh, vars, varsz) ==
574 NULL) {
575 kfree(sii);
576 return NULL;
578 sii->vars = vars ? *vars : NULL;
579 sii->varsz = varsz ? *varsz : 0;
581 return (si_t *) sii;
584 /* global kernel resource */
585 static si_info_t ksii;
587 static bool ai_buscore_prep(si_info_t *sii, uint bustype, uint devid,
588 void *sdh)
590 /* kludge to enable the clock on the 4306 which lacks a slowclock */
591 if (bustype == PCI_BUS && !ai_ispcie(sii))
592 ai_clkctl_xtal(&sii->pub, XTAL | PLL, ON);
593 return true;
596 static bool ai_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype,
597 u32 savewin, uint *origidx, void *regs)
599 bool pci, pcie;
600 uint i;
601 uint pciidx, pcieidx, pcirev, pcierev;
603 cc = ai_setcoreidx(&sii->pub, SI_CC_IDX);
605 /* get chipcommon rev */
606 sii->pub.ccrev = (int)ai_corerev(&sii->pub);
608 /* get chipcommon chipstatus */
609 if (sii->pub.ccrev >= 11)
610 sii->pub.chipst = R_REG(&cc->chipstatus);
612 /* get chipcommon capabilites */
613 sii->pub.cccaps = R_REG(&cc->capabilities);
614 /* get chipcommon extended capabilities */
616 if (sii->pub.ccrev >= 35)
617 sii->pub.cccaps_ext = R_REG(&cc->capabilities_ext);
619 /* get pmu rev and caps */
620 if (sii->pub.cccaps & CC_CAP_PMU) {
621 sii->pub.pmucaps = R_REG(&cc->pmucapabilities);
622 sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
625 /* figure out bus/orignal core idx */
626 sii->pub.buscoretype = NODEV_CORE_ID;
627 sii->pub.buscorerev = NOREV;
628 sii->pub.buscoreidx = BADIDX;
630 pci = pcie = false;
631 pcirev = pcierev = NOREV;
632 pciidx = pcieidx = BADIDX;
634 for (i = 0; i < sii->numcores; i++) {
635 uint cid, crev;
637 ai_setcoreidx(&sii->pub, i);
638 cid = ai_coreid(&sii->pub);
639 crev = ai_corerev(&sii->pub);
641 /* Display cores found */
642 SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
643 i, cid, crev, sii->coresba[i], sii->regs[i]));
645 if (bustype == PCI_BUS) {
646 if (cid == PCI_CORE_ID) {
647 pciidx = i;
648 pcirev = crev;
649 pci = true;
650 } else if (cid == PCIE_CORE_ID) {
651 pcieidx = i;
652 pcierev = crev;
653 pcie = true;
657 /* find the core idx before entering this func. */
658 if ((savewin && (savewin == sii->coresba[i])) ||
659 (regs == sii->regs[i]))
660 *origidx = i;
663 if (pci && pcie) {
664 if (ai_ispcie(sii))
665 pci = false;
666 else
667 pcie = false;
669 if (pci) {
670 sii->pub.buscoretype = PCI_CORE_ID;
671 sii->pub.buscorerev = pcirev;
672 sii->pub.buscoreidx = pciidx;
673 } else if (pcie) {
674 sii->pub.buscoretype = PCIE_CORE_ID;
675 sii->pub.buscorerev = pcierev;
676 sii->pub.buscoreidx = pcieidx;
679 SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx,
680 sii->pub.buscoretype, sii->pub.buscorerev));
682 /* fixup necessary chip/core configurations */
683 if (sii->pub.bustype == PCI_BUS) {
684 if (SI_FAST(sii)) {
685 if (!sii->pch) {
686 sii->pch = (void *)pcicore_init(
687 &sii->pub, sii->pbus,
688 (void *)PCIEREGS(sii));
689 if (sii->pch == NULL)
690 return false;
693 if (ai_pci_fixcfg(&sii->pub)) {
694 SI_ERROR(("si_doattach: si_pci_fixcfg failed\n"));
695 return false;
699 /* return to the original core */
700 ai_setcoreidx(&sii->pub, *origidx);
702 return true;
705 static __used void ai_nvram_process(si_info_t *sii, char *pvars)
707 uint w = 0;
709 /* get boardtype and boardrev */
710 switch (sii->pub.bustype) {
711 case PCI_BUS:
712 /* do a pci config read to get subsystem id and subvendor id */
713 pci_read_config_dword(sii->pbus, PCI_SUBSYSTEM_VENDOR_ID, &w);
714 /* Let nvram variables override subsystem Vend/ID */
715 sii->pub.boardvendor = (u16)ai_getdevpathintvar(&sii->pub,
716 "boardvendor");
717 if (sii->pub.boardvendor == 0)
718 sii->pub.boardvendor = w & 0xffff;
719 else
720 SI_ERROR(("Overriding boardvendor: 0x%x instead of "
721 "0x%x\n", sii->pub.boardvendor, w & 0xffff));
722 sii->pub.boardtype = (u16)ai_getdevpathintvar(&sii->pub,
723 "boardtype");
724 if (sii->pub.boardtype == 0)
725 sii->pub.boardtype = (w >> 16) & 0xffff;
726 else
727 SI_ERROR(("Overriding boardtype: 0x%x instead of 0x%x\n"
728 , sii->pub.boardtype, (w >> 16) & 0xffff));
729 break;
731 sii->pub.boardvendor = getintvar(pvars, "manfid");
732 sii->pub.boardtype = getintvar(pvars, "prodid");
733 break;
735 case SI_BUS:
736 case JTAG_BUS:
737 sii->pub.boardvendor = PCI_VENDOR_ID_BROADCOM;
738 sii->pub.boardtype = getintvar(pvars, "prodid");
739 if (pvars == NULL || (sii->pub.boardtype == 0)) {
740 sii->pub.boardtype = getintvar(NULL, "boardtype");
741 if (sii->pub.boardtype == 0)
742 sii->pub.boardtype = 0xffff;
744 break;
747 if (sii->pub.boardtype == 0) {
748 SI_ERROR(("si_doattach: unknown board type\n"));
751 sii->pub.boardflags = getintvar(pvars, "boardflags");
754 static si_info_t *ai_doattach(si_info_t *sii, uint devid,
755 void *regs, uint bustype, void *pbus,
756 char **vars, uint *varsz)
758 struct si_pub *sih = &sii->pub;
759 u32 w, savewin;
760 chipcregs_t *cc;
761 char *pvars = NULL;
762 uint socitype;
763 uint origidx;
765 memset((unsigned char *) sii, 0, sizeof(si_info_t));
767 savewin = 0;
769 sih->buscoreidx = BADIDX;
771 sii->curmap = regs;
772 sii->pbus = pbus;
774 /* check to see if we are a si core mimic'ing a pci core */
775 if (bustype == PCI_BUS) {
776 pci_read_config_dword(sii->pbus, PCI_SPROM_CONTROL, &w);
777 if (w == 0xffffffff) {
778 SI_ERROR(("%s: incoming bus is PCI but it's a lie, "
779 " switching to SI devid:0x%x\n",
780 __func__, devid));
781 bustype = SI_BUS;
785 /* find Chipcommon address */
786 if (bustype == PCI_BUS) {
787 pci_read_config_dword(sii->pbus, PCI_BAR0_WIN, &savewin);
788 if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
789 savewin = SI_ENUM_BASE;
790 pci_write_config_dword(sii->pbus, PCI_BAR0_WIN,
791 SI_ENUM_BASE);
792 cc = (chipcregs_t *) regs;
793 } else {
794 cc = (chipcregs_t *) REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
797 sih->bustype = bustype;
799 /* bus/core/clk setup for register access */
800 if (!ai_buscore_prep(sii, bustype, devid, pbus)) {
801 SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n",
802 bustype));
803 return NULL;
807 * ChipID recognition.
808 * We assume we can read chipid at offset 0 from the regs arg.
809 * If we add other chiptypes (or if we need to support old sdio
810 * hosts w/o chipcommon), some way of recognizing them needs to
811 * be added here.
813 w = R_REG(&cc->chipid);
814 socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
815 /* Might as wll fill in chip id rev & pkg */
816 sih->chip = w & CID_ID_MASK;
817 sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
818 sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
820 sih->issim = IS_SIM(sih->chippkg);
822 /* scan for cores */
823 if (socitype == SOCI_AI) {
824 SI_MSG(("Found chip type AI (0x%08x)\n", w));
825 /* pass chipc address instead of original core base */
826 ai_scan(&sii->pub, (void *)cc, devid);
827 } else {
828 SI_ERROR(("Found chip of unknown type (0x%08x)\n", w));
829 return NULL;
831 /* no cores found, bail out */
832 if (sii->numcores == 0) {
833 SI_ERROR(("si_doattach: could not find any cores\n"));
834 return NULL;
836 /* bus/core/clk setup */
837 origidx = SI_CC_IDX;
838 if (!ai_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
839 SI_ERROR(("si_doattach: si_buscore_setup failed\n"));
840 goto exit;
843 /* assume current core is CC */
844 if ((sii->pub.ccrev == 0x25)
846 ((sih->chip == BCM43236_CHIP_ID
847 || sih->chip == BCM43235_CHIP_ID
848 || sih->chip == BCM43238_CHIP_ID)
849 && (sii->pub.chiprev <= 2))) {
851 if ((cc->chipstatus & CST43236_BP_CLK) != 0) {
852 uint clkdiv;
853 clkdiv = R_REG(&cc->clkdiv);
854 /* otp_clk_div is even number, 120/14 < 9mhz */
855 clkdiv = (clkdiv & ~CLKD_OTP) | (14 << CLKD_OTP_SHIFT);
856 W_REG(&cc->clkdiv, clkdiv);
857 SI_ERROR(("%s: set clkdiv to %x\n", __func__, clkdiv));
859 udelay(10);
862 /* Init nvram from flash if it exists */
863 nvram_init();
865 /* Init nvram from sprom/otp if they exist */
866 if (srom_var_init
867 (&sii->pub, bustype, regs, vars, varsz)) {
868 SI_ERROR(("si_doattach: srom_var_init failed: bad srom\n"));
869 goto exit;
871 pvars = vars ? *vars : NULL;
872 ai_nvram_process(sii, pvars);
874 /* === NVRAM, clock is ready === */
875 cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
876 W_REG(&cc->gpiopullup, 0);
877 W_REG(&cc->gpiopulldown, 0);
878 ai_setcoreidx(sih, origidx);
880 /* PMU specific initializations */
881 if (PMUCTL_ENAB(sih)) {
882 u32 xtalfreq;
883 si_pmu_init(sih);
884 si_pmu_chip_init(sih);
885 xtalfreq = getintvar(pvars, "xtalfreq");
886 /* If xtalfreq var not available, try to measure it */
887 if (xtalfreq == 0)
888 xtalfreq = si_pmu_measure_alpclk(sih);
889 si_pmu_pll_init(sih, xtalfreq);
890 si_pmu_res_init(sih);
891 si_pmu_swreg_init(sih);
894 /* setup the GPIO based LED powersave register */
895 w = getintvar(pvars, "leddc");
896 if (w == 0)
897 w = DEFAULT_GPIOTIMERVAL;
898 ai_corereg(sih, SI_CC_IDX, offsetof(chipcregs_t, gpiotimerval), ~0, w);
900 if (PCIE(sii)) {
901 pcicore_attach(sii->pch, pvars, SI_DOATTACH);
904 if ((sih->chip == BCM43224_CHIP_ID) ||
905 (sih->chip == BCM43421_CHIP_ID)) {
907 * enable 12 mA drive strenth for 43224 and
908 * set chipControl register bit 15
910 if (sih->chiprev == 0) {
911 SI_MSG(("Applying 43224A0 WARs\n"));
912 ai_corereg(sih, SI_CC_IDX,
913 offsetof(chipcregs_t, chipcontrol),
914 CCTRL43224_GPIO_TOGGLE,
915 CCTRL43224_GPIO_TOGGLE);
916 si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
917 CCTRL_43224A0_12MA_LED_DRIVE);
919 if (sih->chiprev >= 1) {
920 SI_MSG(("Applying 43224B0+ WARs\n"));
921 si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
922 CCTRL_43224B0_12MA_LED_DRIVE);
926 if (sih->chip == BCM4313_CHIP_ID) {
928 * enable 12 mA drive strenth for 4313 and
929 * set chipControl register bit 1
931 SI_MSG(("Applying 4313 WARs\n"));
932 si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
933 CCTRL_4313_12MA_LED_DRIVE);
936 if (sih->chip == BCM4331_CHIP_ID) {
937 /* Enable Ext PA lines depending on chip package option */
938 ai_chipcontrl_epa4331(sih, true);
941 return sii;
942 exit:
943 if (sih->bustype == PCI_BUS) {
944 if (sii->pch)
945 pcicore_deinit(sii->pch);
946 sii->pch = NULL;
949 return NULL;
952 /* may be called with core in reset */
953 void ai_detach(si_t *sih)
955 si_info_t *sii;
956 uint idx;
958 struct si_pub *si_local = NULL;
959 bcopy(&sih, &si_local, sizeof(si_t **));
961 sii = SI_INFO(sih);
963 if (sii == NULL)
964 return;
966 if (sih->bustype == SI_BUS)
967 for (idx = 0; idx < SI_MAXCORES; idx++)
968 if (sii->regs[idx]) {
969 iounmap(sii->regs[idx]);
970 sii->regs[idx] = NULL;
973 nvram_exit(); /* free up nvram buffers */
975 if (sih->bustype == PCI_BUS) {
976 if (sii->pch)
977 pcicore_deinit(sii->pch);
978 sii->pch = NULL;
981 if (sii != &ksii)
982 kfree(sii);
985 /* register driver interrupt disabling and restoring callback functions */
986 void
987 ai_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
988 void *intrsenabled_fn, void *intr_arg)
990 si_info_t *sii;
992 sii = SI_INFO(sih);
993 sii->intr_arg = intr_arg;
994 sii->intrsoff_fn = (si_intrsoff_t) intrsoff_fn;
995 sii->intrsrestore_fn = (si_intrsrestore_t) intrsrestore_fn;
996 sii->intrsenabled_fn = (si_intrsenabled_t) intrsenabled_fn;
997 /* save current core id. when this function called, the current core
998 * must be the core which provides driver functions(il, et, wl, etc.)
1000 sii->dev_coreid = sii->coreid[sii->curidx];
1003 void ai_deregister_intr_callback(si_t *sih)
1005 si_info_t *sii;
1007 sii = SI_INFO(sih);
1008 sii->intrsoff_fn = NULL;
1011 uint ai_coreid(si_t *sih)
1013 si_info_t *sii;
1015 sii = SI_INFO(sih);
1016 return sii->coreid[sii->curidx];
1019 uint ai_coreidx(si_t *sih)
1021 si_info_t *sii;
1023 sii = SI_INFO(sih);
1024 return sii->curidx;
1027 bool ai_backplane64(si_t *sih)
1029 return (sih->cccaps & CC_CAP_BKPLN64) != 0;
1032 /* return index of coreid or BADIDX if not found */
1033 uint ai_findcoreidx(si_t *sih, uint coreid, uint coreunit)
1035 si_info_t *sii;
1036 uint found;
1037 uint i;
1039 sii = SI_INFO(sih);
1041 found = 0;
1043 for (i = 0; i < sii->numcores; i++)
1044 if (sii->coreid[i] == coreid) {
1045 if (found == coreunit)
1046 return i;
1047 found++;
1050 return BADIDX;
1054 * This function changes logical "focus" to the indicated core;
1055 * must be called with interrupts off.
1056 * Moreover, callers should keep interrupts off during switching
1057 * out of and back to d11 core.
1059 void *ai_setcore(si_t *sih, uint coreid, uint coreunit)
1061 uint idx;
1063 idx = ai_findcoreidx(sih, coreid, coreunit);
1064 if (!GOODIDX(idx))
1065 return NULL;
1067 return ai_setcoreidx(sih, idx);
1070 /* Turn off interrupt as required by ai_setcore, before switch core */
1071 void *ai_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val)
1073 void *cc;
1074 si_info_t *sii;
1076 sii = SI_INFO(sih);
1078 if (SI_FAST(sii)) {
1079 /* Overloading the origidx variable to remember the coreid,
1080 * this works because the core ids cannot be confused with
1081 * core indices.
1083 *origidx = coreid;
1084 if (coreid == CC_CORE_ID)
1085 return (void *)CCREGS_FAST(sii);
1086 else if (coreid == sih->buscoretype)
1087 return (void *)PCIEREGS(sii);
1089 INTR_OFF(sii, *intr_val);
1090 *origidx = sii->curidx;
1091 cc = ai_setcore(sih, coreid, 0);
1092 return cc;
1095 /* restore coreidx and restore interrupt */
1096 void ai_restore_core(si_t *sih, uint coreid, uint intr_val)
1098 si_info_t *sii;
1100 sii = SI_INFO(sih);
1101 if (SI_FAST(sii)
1102 && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
1103 return;
1105 ai_setcoreidx(sih, coreid);
1106 INTR_RESTORE(sii, intr_val);
1109 void ai_write_wrapperreg(si_t *sih, u32 offset, u32 val)
1111 si_info_t *sii = SI_INFO(sih);
1112 u32 *w = (u32 *) sii->curwrap;
1113 W_REG(w + (offset / 4), val);
1114 return;
1118 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set
1119 * operation, switch back to the original core, and return the new value.
1121 * When using the silicon backplane, no fiddling with interrupts or core
1122 * switches is needed.
1124 * Also, when using pci/pcie, we can optimize away the core switching for pci
1125 * registers and (on newer pci cores) chipcommon registers.
1127 uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
1129 uint origidx = 0;
1130 u32 *r = NULL;
1131 uint w;
1132 uint intr_val = 0;
1133 bool fast = false;
1134 si_info_t *sii;
1136 sii = SI_INFO(sih);
1138 if (coreidx >= SI_MAXCORES)
1139 return 0;
1141 if (sih->bustype == SI_BUS) {
1142 /* If internal bus, we can always get at everything */
1143 fast = true;
1144 /* map if does not exist */
1145 if (!sii->regs[coreidx]) {
1146 sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
1147 SI_CORE_SIZE);
1149 r = (u32 *) ((unsigned char *) sii->regs[coreidx] + regoff);
1150 } else if (sih->bustype == PCI_BUS) {
1152 * If pci/pcie, we can get at pci/pcie regs
1153 * and on newer cores to chipc
1155 if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
1156 /* Chipc registers are mapped at 12KB */
1158 fast = true;
1159 r = (u32 *) ((char *)sii->curmap +
1160 PCI_16KB0_CCREGS_OFFSET + regoff);
1161 } else if (sii->pub.buscoreidx == coreidx) {
1163 * pci registers are at either in the last 2KB of
1164 * an 8KB window or, in pcie and pci rev 13 at 8KB
1166 fast = true;
1167 if (SI_FAST(sii))
1168 r = (u32 *) ((char *)sii->curmap +
1169 PCI_16KB0_PCIREGS_OFFSET +
1170 regoff);
1171 else
1172 r = (u32 *) ((char *)sii->curmap +
1173 ((regoff >= SBCONFIGOFF) ?
1174 PCI_BAR0_PCISBR_OFFSET :
1175 PCI_BAR0_PCIREGS_OFFSET) +
1176 regoff);
1180 if (!fast) {
1181 INTR_OFF(sii, intr_val);
1183 /* save current core index */
1184 origidx = ai_coreidx(&sii->pub);
1186 /* switch core */
1187 r = (u32 *) ((unsigned char *) ai_setcoreidx(&sii->pub, coreidx)
1188 + regoff);
1191 /* mask and set */
1192 if (mask || val) {
1193 w = (R_REG(r) & ~mask) | val;
1194 W_REG(r, w);
1197 /* readback */
1198 w = R_REG(r);
1200 if (!fast) {
1201 /* restore core index */
1202 if (origidx != coreidx)
1203 ai_setcoreidx(&sii->pub, origidx);
1205 INTR_RESTORE(sii, intr_val);
1208 return w;
1211 void ai_core_disable(si_t *sih, u32 bits)
1213 si_info_t *sii;
1214 u32 dummy;
1215 aidmp_t *ai;
1217 sii = SI_INFO(sih);
1219 ai = sii->curwrap;
1221 /* if core is already in reset, just return */
1222 if (R_REG(&ai->resetctrl) & AIRC_RESET)
1223 return;
1225 W_REG(&ai->ioctrl, bits);
1226 dummy = R_REG(&ai->ioctrl);
1227 udelay(10);
1229 W_REG(&ai->resetctrl, AIRC_RESET);
1230 udelay(1);
1233 /* reset and re-enable a core
1234 * inputs:
1235 * bits - core specific bits that are set during and after reset sequence
1236 * resetbits - core specific bits that are set only during reset sequence
1238 void ai_core_reset(si_t *sih, u32 bits, u32 resetbits)
1240 si_info_t *sii;
1241 aidmp_t *ai;
1242 u32 dummy;
1244 sii = SI_INFO(sih);
1245 ai = sii->curwrap;
1248 * Must do the disable sequence first to work
1249 * for arbitrary current core state.
1251 ai_core_disable(sih, (bits | resetbits));
1254 * Now do the initialization sequence.
1256 W_REG(&ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
1257 dummy = R_REG(&ai->ioctrl);
1258 W_REG(&ai->resetctrl, 0);
1259 udelay(1);
1261 W_REG(&ai->ioctrl, (bits | SICF_CLOCK_EN));
1262 dummy = R_REG(&ai->ioctrl);
1263 udelay(1);
1266 /* return the slow clock source - LPO, XTAL, or PCI */
1267 static uint ai_slowclk_src(si_info_t *sii)
1269 chipcregs_t *cc;
1270 u32 val;
1272 if (sii->pub.ccrev < 6) {
1273 if (sii->pub.bustype == PCI_BUS) {
1274 pci_read_config_dword(sii->pbus, PCI_GPIO_OUT,
1275 &val);
1276 if (val & PCI_CFG_GPIO_SCS)
1277 return SCC_SS_PCI;
1279 return SCC_SS_XTAL;
1280 } else if (sii->pub.ccrev < 10) {
1281 cc = (chipcregs_t *) ai_setcoreidx(&sii->pub, sii->curidx);
1282 return R_REG(&cc->slow_clk_ctl) & SCC_SS_MASK;
1283 } else /* Insta-clock */
1284 return SCC_SS_XTAL;
1288 * return the ILP (slowclock) min or max frequency
1289 * precondition: we've established the chip has dynamic clk control
1291 static uint ai_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc)
1293 u32 slowclk;
1294 uint div;
1296 slowclk = ai_slowclk_src(sii);
1297 if (sii->pub.ccrev < 6) {
1298 if (slowclk == SCC_SS_PCI)
1299 return max_freq ? (PCIMAXFREQ / 64)
1300 : (PCIMINFREQ / 64);
1301 else
1302 return max_freq ? (XTALMAXFREQ / 32)
1303 : (XTALMINFREQ / 32);
1304 } else if (sii->pub.ccrev < 10) {
1305 div = 4 *
1306 (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >>
1307 SCC_CD_SHIFT) + 1);
1308 if (slowclk == SCC_SS_LPO)
1309 return max_freq ? LPOMAXFREQ : LPOMINFREQ;
1310 else if (slowclk == SCC_SS_XTAL)
1311 return max_freq ? (XTALMAXFREQ / div)
1312 : (XTALMINFREQ / div);
1313 else if (slowclk == SCC_SS_PCI)
1314 return max_freq ? (PCIMAXFREQ / div)
1315 : (PCIMINFREQ / div);
1316 } else {
1317 /* Chipc rev 10 is InstaClock */
1318 div = R_REG(&cc->system_clk_ctl) >> SYCC_CD_SHIFT;
1319 div = 4 * (div + 1);
1320 return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div);
1322 return 0;
1325 static void ai_clkctl_setdelay(si_info_t *sii, void *chipcregs)
1327 chipcregs_t *cc = (chipcregs_t *) chipcregs;
1328 uint slowmaxfreq, pll_delay, slowclk;
1329 uint pll_on_delay, fref_sel_delay;
1331 pll_delay = PLL_DELAY;
1334 * If the slow clock is not sourced by the xtal then
1335 * add the xtal_on_delay since the xtal will also be
1336 * powered down by dynamic clk control logic.
1339 slowclk = ai_slowclk_src(sii);
1340 if (slowclk != SCC_SS_XTAL)
1341 pll_delay += XTAL_ON_DELAY;
1343 /* Starting with 4318 it is ILP that is used for the delays */
1344 slowmaxfreq =
1345 ai_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? false : true, cc);
1347 pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
1348 fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
1350 W_REG(&cc->pll_on_delay, pll_on_delay);
1351 W_REG(&cc->fref_sel_delay, fref_sel_delay);
1354 /* initialize power control delay registers */
1355 void ai_clkctl_init(si_t *sih)
1357 si_info_t *sii;
1358 uint origidx = 0;
1359 chipcregs_t *cc;
1360 bool fast;
1362 if (!CCCTL_ENAB(sih))
1363 return;
1365 sii = SI_INFO(sih);
1366 fast = SI_FAST(sii);
1367 if (!fast) {
1368 origidx = sii->curidx;
1369 cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
1370 if (cc == NULL)
1371 return;
1372 } else {
1373 cc = (chipcregs_t *) CCREGS_FAST(sii);
1374 if (cc == NULL)
1375 return;
1378 /* set all Instaclk chip ILP to 1 MHz */
1379 if (sih->ccrev >= 10)
1380 SET_REG(&cc->system_clk_ctl, SYCC_CD_MASK,
1381 (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
1383 ai_clkctl_setdelay(sii, (void *)cc);
1385 if (!fast)
1386 ai_setcoreidx(sih, origidx);
1390 * return the value suitable for writing to the
1391 * dot11 core FAST_PWRUP_DELAY register
1393 u16 ai_clkctl_fast_pwrup_delay(si_t *sih)
1395 si_info_t *sii;
1396 uint origidx = 0;
1397 chipcregs_t *cc;
1398 uint slowminfreq;
1399 u16 fpdelay;
1400 uint intr_val = 0;
1401 bool fast;
1403 sii = SI_INFO(sih);
1404 if (PMUCTL_ENAB(sih)) {
1405 INTR_OFF(sii, intr_val);
1406 fpdelay = si_pmu_fast_pwrup_delay(sih);
1407 INTR_RESTORE(sii, intr_val);
1408 return fpdelay;
1411 if (!CCCTL_ENAB(sih))
1412 return 0;
1414 fast = SI_FAST(sii);
1415 fpdelay = 0;
1416 if (!fast) {
1417 origidx = sii->curidx;
1418 INTR_OFF(sii, intr_val);
1419 cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
1420 if (cc == NULL)
1421 goto done;
1422 } else {
1423 cc = (chipcregs_t *) CCREGS_FAST(sii);
1424 if (cc == NULL)
1425 goto done;
1428 slowminfreq = ai_slowclk_freq(sii, false, cc);
1429 fpdelay = (((R_REG(&cc->pll_on_delay) + 2) * 1000000) +
1430 (slowminfreq - 1)) / slowminfreq;
1432 done:
1433 if (!fast) {
1434 ai_setcoreidx(sih, origidx);
1435 INTR_RESTORE(sii, intr_val);
1437 return fpdelay;
1440 /* turn primary xtal and/or pll off/on */
1441 int ai_clkctl_xtal(si_t *sih, uint what, bool on)
1443 si_info_t *sii;
1444 u32 in, out, outen;
1446 sii = SI_INFO(sih);
1448 switch (sih->bustype) {
1450 case PCI_BUS:
1451 /* pcie core doesn't have any mapping to control the xtal pu */
1452 if (PCIE(sii))
1453 return -1;
1455 pci_read_config_dword(sii->pbus, PCI_GPIO_IN, &in);
1456 pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, &out);
1457 pci_read_config_dword(sii->pbus, PCI_GPIO_OUTEN, &outen);
1460 * Avoid glitching the clock if GPRS is already using it.
1461 * We can't actually read the state of the PLLPD so we infer it
1462 * by the value of XTAL_PU which *is* readable via gpioin.
1464 if (on && (in & PCI_CFG_GPIO_XTAL))
1465 return 0;
1467 if (what & XTAL)
1468 outen |= PCI_CFG_GPIO_XTAL;
1469 if (what & PLL)
1470 outen |= PCI_CFG_GPIO_PLL;
1472 if (on) {
1473 /* turn primary xtal on */
1474 if (what & XTAL) {
1475 out |= PCI_CFG_GPIO_XTAL;
1476 if (what & PLL)
1477 out |= PCI_CFG_GPIO_PLL;
1478 pci_write_config_dword(sii->pbus,
1479 PCI_GPIO_OUT, out);
1480 pci_write_config_dword(sii->pbus,
1481 PCI_GPIO_OUTEN, outen);
1482 udelay(XTAL_ON_DELAY);
1485 /* turn pll on */
1486 if (what & PLL) {
1487 out &= ~PCI_CFG_GPIO_PLL;
1488 pci_write_config_dword(sii->pbus,
1489 PCI_GPIO_OUT, out);
1490 mdelay(2);
1492 } else {
1493 if (what & XTAL)
1494 out &= ~PCI_CFG_GPIO_XTAL;
1495 if (what & PLL)
1496 out |= PCI_CFG_GPIO_PLL;
1497 pci_write_config_dword(sii->pbus,
1498 PCI_GPIO_OUT, out);
1499 pci_write_config_dword(sii->pbus,
1500 PCI_GPIO_OUTEN, outen);
1503 default:
1504 return -1;
1507 return 0;
1511 * clock control policy function throught chipcommon
1513 * set dynamic clk control mode (forceslow, forcefast, dynamic)
1514 * returns true if we are forcing fast clock
1515 * this is a wrapper over the next internal function
1516 * to allow flexible policy settings for outside caller
1518 bool ai_clkctl_cc(si_t *sih, uint mode)
1520 si_info_t *sii;
1522 sii = SI_INFO(sih);
1524 /* chipcommon cores prior to rev6 don't support dynamic clock control */
1525 if (sih->ccrev < 6)
1526 return false;
1528 if (PCI_FORCEHT(sii))
1529 return mode == CLK_FAST;
1531 return _ai_clkctl_cc(sii, mode);
1534 /* clk control mechanism through chipcommon, no policy checking */
1535 static bool _ai_clkctl_cc(si_info_t *sii, uint mode)
1537 uint origidx = 0;
1538 chipcregs_t *cc;
1539 u32 scc;
1540 uint intr_val = 0;
1541 bool fast = SI_FAST(sii);
1543 /* chipcommon cores prior to rev6 don't support dynamic clock control */
1544 if (sii->pub.ccrev < 6)
1545 return false;
1547 if (!fast) {
1548 INTR_OFF(sii, intr_val);
1549 origidx = sii->curidx;
1551 if ((sii->pub.bustype == SI_BUS) &&
1552 ai_setcore(&sii->pub, MIPS33_CORE_ID, 0) &&
1553 (ai_corerev(&sii->pub) <= 7) && (sii->pub.ccrev >= 10))
1554 goto done;
1556 cc = (chipcregs_t *) ai_setcore(&sii->pub, CC_CORE_ID, 0);
1557 } else {
1558 cc = (chipcregs_t *) CCREGS_FAST(sii);
1559 if (cc == NULL)
1560 goto done;
1563 if (!CCCTL_ENAB(&sii->pub) && (sii->pub.ccrev < 20))
1564 goto done;
1566 switch (mode) {
1567 case CLK_FAST: /* FORCEHT, fast (pll) clock */
1568 if (sii->pub.ccrev < 10) {
1570 * don't forget to force xtal back
1571 * on before we clear SCC_DYN_XTAL..
1573 ai_clkctl_xtal(&sii->pub, XTAL, ON);
1574 SET_REG(&cc->slow_clk_ctl,
1575 (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
1576 } else if (sii->pub.ccrev < 20) {
1577 OR_REG(&cc->system_clk_ctl, SYCC_HR);
1578 } else {
1579 OR_REG(&cc->clk_ctl_st, CCS_FORCEHT);
1582 /* wait for the PLL */
1583 if (PMUCTL_ENAB(&sii->pub)) {
1584 u32 htavail = CCS_HTAVAIL;
1585 SPINWAIT(((R_REG(&cc->clk_ctl_st) & htavail)
1586 == 0), PMU_MAX_TRANSITION_DLY);
1587 } else {
1588 udelay(PLL_DELAY);
1590 break;
1592 case CLK_DYNAMIC: /* enable dynamic clock control */
1593 if (sii->pub.ccrev < 10) {
1594 scc = R_REG(&cc->slow_clk_ctl);
1595 scc &= ~(SCC_FS | SCC_IP | SCC_XC);
1596 if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
1597 scc |= SCC_XC;
1598 W_REG(&cc->slow_clk_ctl, scc);
1601 * for dynamic control, we have to
1602 * release our xtal_pu "force on"
1604 if (scc & SCC_XC)
1605 ai_clkctl_xtal(&sii->pub, XTAL, OFF);
1606 } else if (sii->pub.ccrev < 20) {
1607 /* Instaclock */
1608 AND_REG(&cc->system_clk_ctl, ~SYCC_HR);
1609 } else {
1610 AND_REG(&cc->clk_ctl_st, ~CCS_FORCEHT);
1612 break;
1614 default:
1615 break;
1618 done:
1619 if (!fast) {
1620 ai_setcoreidx(&sii->pub, origidx);
1621 INTR_RESTORE(sii, intr_val);
1623 return mode == CLK_FAST;
1626 /* Build device path. Support SI, PCI, and JTAG for now. */
1627 int ai_devpath(si_t *sih, char *path, int size)
1629 int slen;
1631 if (!path || size <= 0)
1632 return -1;
1634 switch (sih->bustype) {
1635 case SI_BUS:
1636 case JTAG_BUS:
1637 slen = snprintf(path, (size_t) size, "sb/%u/", ai_coreidx(sih));
1638 break;
1639 case PCI_BUS:
1640 slen = snprintf(path, (size_t) size, "pci/%u/%u/",
1641 ((struct pci_dev *)((SI_INFO(sih))->pbus))->bus->number,
1642 PCI_SLOT(
1643 ((struct pci_dev *)((SI_INFO(sih))->pbus))->devfn));
1644 break;
1646 default:
1647 slen = -1;
1648 break;
1651 if (slen < 0 || slen >= size) {
1652 path[0] = '\0';
1653 return -1;
1656 return 0;
1659 /* Get a variable, but only if it has a devpath prefix */
1660 char *ai_getdevpathvar(si_t *sih, const char *name)
1662 char varname[SI_DEVPATH_BUFSZ + 32];
1664 ai_devpathvar(sih, varname, sizeof(varname), name);
1666 return getvar(NULL, varname);
1669 /* Get a variable, but only if it has a devpath prefix */
1670 int ai_getdevpathintvar(si_t *sih, const char *name)
1672 #if defined(BCMBUSTYPE) && (BCMBUSTYPE == SI_BUS)
1673 return getintvar(NULL, name);
1674 #else
1675 char varname[SI_DEVPATH_BUFSZ + 32];
1677 ai_devpathvar(sih, varname, sizeof(varname), name);
1679 return getintvar(NULL, varname);
1680 #endif
1683 char *ai_getnvramflvar(si_t *sih, const char *name)
1685 return getvar(NULL, name);
1688 /* Concatenate the dev path with a varname into the given 'var' buffer
1689 * and return the 'var' pointer. Nothing is done to the arguments if
1690 * len == 0 or var is NULL, var is still returned. On overflow, the
1691 * first char will be set to '\0'.
1693 static char *ai_devpathvar(si_t *sih, char *var, int len, const char *name)
1695 uint path_len;
1697 if (!var || len <= 0)
1698 return var;
1700 if (ai_devpath(sih, var, len) == 0) {
1701 path_len = strlen(var);
1703 if (strlen(name) + 1 > (uint) (len - path_len))
1704 var[0] = '\0';
1705 else
1706 strncpy(var + path_len, name, len - path_len - 1);
1709 return var;
1712 /* return true if PCIE capability exists in the pci config space */
1713 static __used bool ai_ispcie(si_info_t *sii)
1715 u8 cap_ptr;
1717 if (sii->pub.bustype != PCI_BUS)
1718 return false;
1720 cap_ptr =
1721 pcicore_find_pci_capability(sii->pbus, PCI_CAP_ID_EXP, NULL,
1722 NULL);
1723 if (!cap_ptr)
1724 return false;
1726 return true;
1729 bool ai_pci_war16165(si_t *sih)
1731 si_info_t *sii;
1733 sii = SI_INFO(sih);
1735 return PCI(sii) && (sih->buscorerev <= 10);
1738 void ai_pci_up(si_t *sih)
1740 si_info_t *sii;
1742 sii = SI_INFO(sih);
1744 /* if not pci bus, we're done */
1745 if (sih->bustype != PCI_BUS)
1746 return;
1748 if (PCI_FORCEHT(sii))
1749 _ai_clkctl_cc(sii, CLK_FAST);
1751 if (PCIE(sii))
1752 pcicore_up(sii->pch, SI_PCIUP);
1756 /* Unconfigure and/or apply various WARs when system is going to sleep mode */
1757 void ai_pci_sleep(si_t *sih)
1759 si_info_t *sii;
1761 sii = SI_INFO(sih);
1763 pcicore_sleep(sii->pch);
1766 /* Unconfigure and/or apply various WARs when going down */
1767 void ai_pci_down(si_t *sih)
1769 si_info_t *sii;
1771 sii = SI_INFO(sih);
1773 /* if not pci bus, we're done */
1774 if (sih->bustype != PCI_BUS)
1775 return;
1777 /* release FORCEHT since chip is going to "down" state */
1778 if (PCI_FORCEHT(sii))
1779 _ai_clkctl_cc(sii, CLK_DYNAMIC);
1781 pcicore_down(sii->pch, SI_PCIDOWN);
1785 * Configure the pci core for pci client (NIC) action
1786 * coremask is the bitvec of cores by index to be enabled.
1788 void ai_pci_setup(si_t *sih, uint coremask)
1790 si_info_t *sii;
1791 struct sbpciregs *pciregs = NULL;
1792 u32 siflag = 0, w;
1793 uint idx = 0;
1795 sii = SI_INFO(sih);
1797 if (sii->pub.bustype != PCI_BUS)
1798 return;
1800 if (PCI(sii)) {
1801 /* get current core index */
1802 idx = sii->curidx;
1804 /* we interrupt on this backplane flag number */
1805 siflag = ai_flag(sih);
1807 /* switch over to pci core */
1808 pciregs = ai_setcoreidx(sih, sii->pub.buscoreidx);
1812 * Enable sb->pci interrupts. Assume
1813 * PCI rev 2.3 support was added in pci core rev 6 and things changed..
1815 if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) {
1816 /* pci config write to set this core bit in PCIIntMask */
1817 pci_read_config_dword(sii->pbus, PCI_INT_MASK, &w);
1818 w |= (coremask << PCI_SBIM_SHIFT);
1819 pci_write_config_dword(sii->pbus, PCI_INT_MASK, w);
1820 } else {
1821 /* set sbintvec bit for our flag number */
1822 ai_setint(sih, siflag);
1825 if (PCI(sii)) {
1826 OR_REG(&pciregs->sbtopci2,
1827 (SBTOPCI_PREF | SBTOPCI_BURST));
1828 if (sii->pub.buscorerev >= 11) {
1829 OR_REG(&pciregs->sbtopci2,
1830 SBTOPCI_RC_READMULTI);
1831 w = R_REG(&pciregs->clkrun);
1832 W_REG(&pciregs->clkrun,
1833 (w | PCI_CLKRUN_DSBL));
1834 w = R_REG(&pciregs->clkrun);
1837 /* switch back to previous core */
1838 ai_setcoreidx(sih, idx);
1843 * Fixup SROMless PCI device's configuration.
1844 * The current core may be changed upon return.
1846 int ai_pci_fixcfg(si_t *sih)
1848 uint origidx, pciidx;
1849 struct sbpciregs *pciregs = NULL;
1850 sbpcieregs_t *pcieregs = NULL;
1851 void *regs = NULL;
1852 u16 val16, *reg16 = NULL;
1854 si_info_t *sii = SI_INFO(sih);
1856 /* Fixup PI in SROM shadow area to enable the correct PCI core access */
1857 /* save the current index */
1858 origidx = ai_coreidx(&sii->pub);
1860 /* check 'pi' is correct and fix it if not */
1861 if (sii->pub.buscoretype == PCIE_CORE_ID) {
1862 pcieregs = ai_setcore(&sii->pub, PCIE_CORE_ID, 0);
1863 regs = pcieregs;
1864 reg16 = &pcieregs->sprom[SRSH_PI_OFFSET];
1865 } else if (sii->pub.buscoretype == PCI_CORE_ID) {
1866 pciregs = ai_setcore(&sii->pub, PCI_CORE_ID, 0);
1867 regs = pciregs;
1868 reg16 = &pciregs->sprom[SRSH_PI_OFFSET];
1870 pciidx = ai_coreidx(&sii->pub);
1871 val16 = R_REG(reg16);
1872 if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (u16) pciidx) {
1873 val16 =
1874 (u16) (pciidx << SRSH_PI_SHIFT) | (val16 &
1875 ~SRSH_PI_MASK);
1876 W_REG(reg16, val16);
1879 /* restore the original index */
1880 ai_setcoreidx(&sii->pub, origidx);
1882 pcicore_hwup(sii->pch);
1883 return 0;
1886 /* mask&set gpiocontrol bits */
1887 u32 ai_gpiocontrol(si_t *sih, u32 mask, u32 val, u8 priority)
1889 uint regoff;
1891 regoff = 0;
1893 /* gpios could be shared on router platforms
1894 * ignore reservation if it's high priority (e.g., test apps)
1896 if ((priority != GPIO_HI_PRIORITY) &&
1897 (sih->bustype == SI_BUS) && (val || mask)) {
1898 mask = priority ? (ai_gpioreservation & mask) :
1899 ((ai_gpioreservation | mask) & ~(ai_gpioreservation));
1900 val &= mask;
1903 regoff = offsetof(chipcregs_t, gpiocontrol);
1904 return ai_corereg(sih, SI_CC_IDX, regoff, mask, val);
1907 void ai_chipcontrl_epa4331(si_t *sih, bool on)
1909 si_info_t *sii;
1910 chipcregs_t *cc;
1911 uint origidx;
1912 u32 val;
1914 sii = SI_INFO(sih);
1915 origidx = ai_coreidx(sih);
1917 cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
1919 val = R_REG(&cc->chipcontrol);
1921 if (on) {
1922 if (sih->chippkg == 9 || sih->chippkg == 0xb) {
1923 /* Ext PA Controls for 4331 12x9 Package */
1924 W_REG(&cc->chipcontrol, val |
1925 (CCTRL4331_EXTPA_EN |
1926 CCTRL4331_EXTPA_ON_GPIO2_5));
1927 } else {
1928 /* Ext PA Controls for 4331 12x12 Package */
1929 W_REG(&cc->chipcontrol,
1930 val | (CCTRL4331_EXTPA_EN));
1932 } else {
1933 val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
1934 W_REG(&cc->chipcontrol, val);
1937 ai_setcoreidx(sih, origidx);
1940 /* Enable BT-COEX & Ex-PA for 4313 */
1941 void ai_epa_4313war(si_t *sih)
1943 si_info_t *sii;
1944 chipcregs_t *cc;
1945 uint origidx;
1947 sii = SI_INFO(sih);
1948 origidx = ai_coreidx(sih);
1950 cc = (chipcregs_t *) ai_setcore(sih, CC_CORE_ID, 0);
1952 /* EPA Fix */
1953 W_REG(&cc->gpiocontrol,
1954 R_REG(&cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
1956 ai_setcoreidx(sih, origidx);
1959 /* check if the device is removed */
1960 bool ai_deviceremoved(si_t *sih)
1962 u32 w;
1963 si_info_t *sii;
1965 sii = SI_INFO(sih);
1967 switch (sih->bustype) {
1968 case PCI_BUS:
1969 pci_read_config_dword(sii->pbus, PCI_VENDOR_ID, &w);
1970 if ((w & 0xFFFF) != PCI_VENDOR_ID_BROADCOM)
1971 return true;
1972 break;
1974 return false;
1977 bool ai_is_sprom_available(si_t *sih)
1979 if (sih->ccrev >= 31) {
1980 si_info_t *sii;
1981 uint origidx;
1982 chipcregs_t *cc;
1983 u32 sromctrl;
1985 if ((sih->cccaps & CC_CAP_SROM) == 0)
1986 return false;
1988 sii = SI_INFO(sih);
1989 origidx = sii->curidx;
1990 cc = ai_setcoreidx(sih, SI_CC_IDX);
1991 sromctrl = R_REG(&cc->sromcontrol);
1992 ai_setcoreidx(sih, origidx);
1993 return sromctrl & SRC_PRESENT;
1996 switch (sih->chip) {
1997 case BCM4329_CHIP_ID:
1998 return (sih->chipst & CST4329_SPROM_SEL) != 0;
1999 case BCM4319_CHIP_ID:
2000 return (sih->chipst & CST4319_SPROM_SEL) != 0;
2001 case BCM4336_CHIP_ID:
2002 return (sih->chipst & CST4336_SPROM_PRESENT) != 0;
2003 case BCM4330_CHIP_ID:
2004 return (sih->chipst & CST4330_SPROM_PRESENT) != 0;
2005 case BCM4313_CHIP_ID:
2006 return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
2007 case BCM4331_CHIP_ID:
2008 return (sih->chipst & CST4331_SPROM_PRESENT) != 0;
2009 default:
2010 return true;
2014 bool ai_is_otp_disabled(si_t *sih)
2016 switch (sih->chip) {
2017 case BCM4329_CHIP_ID:
2018 return (sih->chipst & CST4329_SPROM_OTP_SEL_MASK) ==
2019 CST4329_OTP_PWRDN;
2020 case BCM4319_CHIP_ID:
2021 return (sih->chipst & CST4319_SPROM_OTP_SEL_MASK) ==
2022 CST4319_OTP_PWRDN;
2023 case BCM4336_CHIP_ID:
2024 return (sih->chipst & CST4336_OTP_PRESENT) == 0;
2025 case BCM4330_CHIP_ID:
2026 return (sih->chipst & CST4330_OTP_PRESENT) == 0;
2027 case BCM4313_CHIP_ID:
2028 return (sih->chipst & CST4313_OTP_PRESENT) == 0;
2029 /* These chips always have their OTP on */
2030 case BCM43224_CHIP_ID:
2031 case BCM43225_CHIP_ID:
2032 case BCM43421_CHIP_ID:
2033 case BCM43235_CHIP_ID:
2034 case BCM43236_CHIP_ID:
2035 case BCM43238_CHIP_ID:
2036 case BCM4331_CHIP_ID:
2037 default:
2038 return false;
2042 bool ai_is_otp_powered(si_t *sih)
2044 if (PMUCTL_ENAB(sih))
2045 return si_pmu_is_otp_powered(sih);
2046 return true;
2049 void ai_otp_power(si_t *sih, bool on)
2051 if (PMUCTL_ENAB(sih))
2052 si_pmu_otp_power(sih, on);
2053 udelay(1000);