ocfs2: Make the left masklogs compat.
[taoma-kernel.git] / drivers / staging / brcm80211 / util / aiutils.c
blobddd2f9d64c209137e940563f6e358973edc92475
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/delay.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <bcmdefs.h>
21 #ifdef BRCM_FULLMAC
22 #include <linux/netdevice.h>
23 #endif
24 #include <osl.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 #include <bcmutils.h>
28 #include <siutils.h>
29 #include <hndsoc.h>
30 #include <sbchipc.h>
31 #include <pcicfg.h>
32 #include <bcmdevs.h>
34 #define BCM47162_DMP() ((sih->chip == BCM47162_CHIP_ID) && \
35 (sih->chiprev == 0) && \
36 (sii->coreid[sii->curidx] == MIPS74K_CORE_ID))
38 /* EROM parsing */
40 static u32
41 get_erom_ent(si_t *sih, u32 **eromptr, u32 mask, u32 match)
43 u32 ent;
44 uint inv = 0, nom = 0;
46 while (true) {
47 ent = R_REG(si_osh(sih), *eromptr);
48 (*eromptr)++;
50 if (mask == 0)
51 break;
53 if ((ent & ER_VALID) == 0) {
54 inv++;
55 continue;
58 if (ent == (ER_END | ER_VALID))
59 break;
61 if ((ent & mask) == match)
62 break;
64 nom++;
67 SI_VMSG(("%s: Returning ent 0x%08x\n", __func__, ent));
68 if (inv + nom) {
69 SI_VMSG((" after %d invalid and %d non-matching entries\n",
70 inv, nom));
72 return ent;
75 static u32
76 get_asd(si_t *sih, u32 **eromptr, uint sp, uint ad, uint st,
77 u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh)
79 u32 asd, sz, szd;
81 asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
82 if (((asd & ER_TAG1) != ER_ADD) ||
83 (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
84 ((asd & AD_ST_MASK) != st)) {
85 /* This is not what we want, "push" it back */
86 (*eromptr)--;
87 return 0;
89 *addrl = asd & AD_ADDR_MASK;
90 if (asd & AD_AG32)
91 *addrh = get_erom_ent(sih, eromptr, 0, 0);
92 else
93 *addrh = 0;
94 *sizeh = 0;
95 sz = asd & AD_SZ_MASK;
96 if (sz == AD_SZ_SZD) {
97 szd = get_erom_ent(sih, eromptr, 0, 0);
98 *sizel = szd & SD_SZ_MASK;
99 if (szd & SD_SG32)
100 *sizeh = get_erom_ent(sih, eromptr, 0, 0);
101 } else
102 *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
104 SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
105 sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
107 return asd;
110 static void ai_hwfixup(si_info_t *sii)
114 /* parse the enumeration rom to identify all cores */
115 void ai_scan(si_t *sih, void *regs, uint devid)
117 si_info_t *sii = SI_INFO(sih);
118 chipcregs_t *cc = (chipcregs_t *) regs;
119 u32 erombase, *eromptr, *eromlim;
121 erombase = R_REG(sii->osh, &cc->eromptr);
123 switch (sih->bustype) {
124 case SI_BUS:
125 eromptr = (u32 *) REG_MAP(erombase, SI_CORE_SIZE);
126 break;
128 case PCI_BUS:
129 /* Set wrappers address */
130 sii->curwrap = (void *)((unsigned long)regs + SI_CORE_SIZE);
132 /* Now point the window at the erom */
133 pci_write_config_dword(sii->osh->pdev, PCI_BAR0_WIN, erombase);
134 eromptr = regs;
135 break;
137 #ifdef BCMSDIO
138 case SPI_BUS:
139 case SDIO_BUS:
140 #endif /* BCMSDIO */
141 eromptr = (u32 *)(unsigned long)erombase;
142 break;
144 default:
145 SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n",
146 sih->bustype));
147 ASSERT(0);
148 return;
150 eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32));
152 SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", regs, erombase, eromptr, eromlim));
153 while (eromptr < eromlim) {
154 u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
155 u32 mpd, asd, addrl, addrh, sizel, sizeh;
156 u32 *base;
157 uint i, j, idx;
158 bool br;
160 br = false;
162 /* Grok a component */
163 cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
164 if (cia == (ER_END | ER_VALID)) {
165 SI_VMSG(("Found END of erom after %d cores\n",
166 sii->numcores));
167 ai_hwfixup(sii);
168 return;
170 base = eromptr - 1;
171 cib = get_erom_ent(sih, &eromptr, 0, 0);
173 if ((cib & ER_TAG) != ER_CI) {
174 SI_ERROR(("CIA not followed by CIB\n"));
175 goto error;
178 cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
179 mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
180 crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
181 nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
182 nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
183 nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
184 nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
186 SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " "nsw = %d, nmp = %d & nsp = %d\n", mfg, cid, crev, base, nmw, nsw, nmp, nsp));
188 if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
189 continue;
190 if ((nmw + nsw == 0)) {
191 /* A component which is not a core */
192 if (cid == OOB_ROUTER_CORE_ID) {
193 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
194 &addrl, &addrh, &sizel, &sizeh);
195 if (asd != 0) {
196 sii->oob_router = addrl;
199 continue;
202 idx = sii->numcores;
203 /* sii->eromptr[idx] = base; */
204 sii->cia[idx] = cia;
205 sii->cib[idx] = cib;
206 sii->coreid[idx] = cid;
208 for (i = 0; i < nmp; i++) {
209 mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
210 if ((mpd & ER_TAG) != ER_MP) {
211 SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
212 goto error;
214 SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
215 (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
216 (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
219 /* First Slave Address Descriptor should be port 0:
220 * the main register space for the core
222 asd =
223 get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
224 &sizel, &sizeh);
225 if (asd == 0) {
226 /* Try again to see if it is a bridge */
227 asd =
228 get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl,
229 &addrh, &sizel, &sizeh);
230 if (asd != 0)
231 br = true;
232 else if ((addrh != 0) || (sizeh != 0)
233 || (sizel != SI_CORE_SIZE)) {
234 SI_ERROR(("First Slave ASD for core 0x%04x malformed " "(0x%08x)\n", cid, asd));
235 goto error;
238 sii->coresba[idx] = addrl;
239 sii->coresba_size[idx] = sizel;
240 /* Get any more ASDs in port 0 */
241 j = 1;
242 do {
243 asd =
244 get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl,
245 &addrh, &sizel, &sizeh);
246 if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
247 sii->coresba2[idx] = addrl;
248 sii->coresba2_size[idx] = sizel;
250 j++;
251 } while (asd != 0);
253 /* Go through the ASDs for other slave ports */
254 for (i = 1; i < nsp; i++) {
255 j = 0;
256 do {
257 asd =
258 get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE,
259 &addrl, &addrh, &sizel, &sizeh);
260 } while (asd != 0);
261 if (j == 0) {
262 SI_ERROR((" SP %d has no address descriptors\n",
263 i));
264 goto error;
268 /* Now get master wrappers */
269 for (i = 0; i < nmw; i++) {
270 asd =
271 get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl,
272 &addrh, &sizel, &sizeh);
273 if (asd == 0) {
274 SI_ERROR(("Missing descriptor for MW %d\n", i));
275 goto error;
277 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
278 SI_ERROR(("Master wrapper %d is not 4KB\n", i));
279 goto error;
281 if (i == 0)
282 sii->wrapba[idx] = addrl;
285 /* And finally slave wrappers */
286 for (i = 0; i < nsw; i++) {
287 uint fwp = (nsp == 1) ? 0 : 1;
288 asd =
289 get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP,
290 &addrl, &addrh, &sizel, &sizeh);
291 if (asd == 0) {
292 SI_ERROR(("Missing descriptor for SW %d\n", i));
293 goto error;
295 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
296 SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
297 goto error;
299 if ((nmw == 0) && (i == 0))
300 sii->wrapba[idx] = addrl;
303 /* Don't record bridges */
304 if (br)
305 continue;
307 /* Done with core */
308 sii->numcores++;
311 SI_ERROR(("Reached end of erom without finding END"));
313 error:
314 sii->numcores = 0;
315 return;
318 /* This function changes the logical "focus" to the indicated core.
319 * Return the current core's virtual address.
321 void *ai_setcoreidx(si_t *sih, uint coreidx)
323 si_info_t *sii = SI_INFO(sih);
324 u32 addr = sii->coresba[coreidx];
325 u32 wrap = sii->wrapba[coreidx];
326 void *regs;
328 if (coreidx >= sii->numcores)
329 return NULL;
332 * If the user has provided an interrupt mask enabled function,
333 * then assert interrupts are disabled before switching the core.
335 ASSERT((sii->intrsenabled_fn == NULL)
336 || !(*(sii)->intrsenabled_fn) ((sii)->intr_arg));
338 switch (sih->bustype) {
339 case SI_BUS:
340 /* map new one */
341 if (!sii->regs[coreidx]) {
342 sii->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
343 ASSERT(GOODREGS(sii->regs[coreidx]));
345 sii->curmap = regs = sii->regs[coreidx];
346 if (!sii->wrappers[coreidx]) {
347 sii->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
348 ASSERT(GOODREGS(sii->wrappers[coreidx]));
350 sii->curwrap = sii->wrappers[coreidx];
351 break;
353 case PCI_BUS:
354 /* point bar0 window */
355 pci_write_config_dword(sii->osh->pdev, PCI_BAR0_WIN, addr);
356 regs = sii->curmap;
357 /* point bar0 2nd 4KB window */
358 pci_write_config_dword(sii->osh->pdev, PCI_BAR0_WIN2, wrap);
359 break;
361 #ifdef BCMSDIO
362 case SPI_BUS:
363 case SDIO_BUS:
364 #endif /* BCMSDIO */
365 sii->curmap = regs = (void *)(unsigned long)addr;
366 sii->curwrap = (void *)(unsigned long)wrap;
367 break;
369 default:
370 ASSERT(0);
371 regs = NULL;
372 break;
375 sii->curmap = regs;
376 sii->curidx = coreidx;
378 return regs;
381 /* Return the number of address spaces in current core */
382 int ai_numaddrspaces(si_t *sih)
384 return 2;
387 /* Return the address of the nth address space in the current core */
388 u32 ai_addrspace(si_t *sih, uint asidx)
390 si_info_t *sii;
391 uint cidx;
393 sii = SI_INFO(sih);
394 cidx = sii->curidx;
396 if (asidx == 0)
397 return sii->coresba[cidx];
398 else if (asidx == 1)
399 return sii->coresba2[cidx];
400 else {
401 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
402 return 0;
406 /* Return the size of the nth address space in the current core */
407 u32 ai_addrspacesize(si_t *sih, uint asidx)
409 si_info_t *sii;
410 uint cidx;
412 sii = SI_INFO(sih);
413 cidx = sii->curidx;
415 if (asidx == 0)
416 return sii->coresba_size[cidx];
417 else if (asidx == 1)
418 return sii->coresba2_size[cidx];
419 else {
420 SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
421 return 0;
425 uint ai_flag(si_t *sih)
427 si_info_t *sii;
428 aidmp_t *ai;
430 sii = SI_INFO(sih);
431 if (BCM47162_DMP()) {
432 SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __func__));
433 return sii->curidx;
435 ai = sii->curwrap;
437 return R_REG(sii->osh, &ai->oobselouta30) & 0x1f;
440 void ai_setint(si_t *sih, int siflag)
444 void ai_write_wrap_reg(si_t *sih, u32 offset, u32 val)
446 si_info_t *sii = SI_INFO(sih);
447 u32 *w = (u32 *) sii->curwrap;
448 W_REG(sii->osh, w + (offset / 4), val);
449 return;
452 uint ai_corevendor(si_t *sih)
454 si_info_t *sii;
455 u32 cia;
457 sii = SI_INFO(sih);
458 cia = sii->cia[sii->curidx];
459 return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
462 uint ai_corerev(si_t *sih)
464 si_info_t *sii;
465 u32 cib;
467 sii = SI_INFO(sih);
468 cib = sii->cib[sii->curidx];
469 return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
472 bool ai_iscoreup(si_t *sih)
474 si_info_t *sii;
475 aidmp_t *ai;
477 sii = SI_INFO(sih);
478 ai = sii->curwrap;
480 return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
481 SICF_CLOCK_EN)
482 && ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
486 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
487 * switch back to the original core, and return the new value.
489 * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
491 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
492 * and (on newer pci cores) chipcommon registers.
494 uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
496 uint origidx = 0;
497 u32 *r = NULL;
498 uint w;
499 uint intr_val = 0;
500 bool fast = false;
501 si_info_t *sii;
503 sii = SI_INFO(sih);
505 ASSERT(GOODIDX(coreidx));
506 ASSERT(regoff < SI_CORE_SIZE);
507 ASSERT((val & ~mask) == 0);
509 if (coreidx >= SI_MAXCORES)
510 return 0;
512 if (sih->bustype == SI_BUS) {
513 /* If internal bus, we can always get at everything */
514 fast = true;
515 /* map if does not exist */
516 if (!sii->regs[coreidx]) {
517 sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
518 SI_CORE_SIZE);
519 ASSERT(GOODREGS(sii->regs[coreidx]));
521 r = (u32 *) ((unsigned char *) sii->regs[coreidx] + regoff);
522 } else if (sih->bustype == PCI_BUS) {
523 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
525 if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
526 /* Chipc registers are mapped at 12KB */
528 fast = true;
529 r = (u32 *) ((char *)sii->curmap +
530 PCI_16KB0_CCREGS_OFFSET + regoff);
531 } else if (sii->pub.buscoreidx == coreidx) {
532 /* pci registers are at either in the last 2KB of an 8KB window
533 * or, in pcie and pci rev 13 at 8KB
535 fast = true;
536 if (SI_FAST(sii))
537 r = (u32 *) ((char *)sii->curmap +
538 PCI_16KB0_PCIREGS_OFFSET +
539 regoff);
540 else
541 r = (u32 *) ((char *)sii->curmap +
542 ((regoff >= SBCONFIGOFF) ?
543 PCI_BAR0_PCISBR_OFFSET :
544 PCI_BAR0_PCIREGS_OFFSET) +
545 regoff);
549 if (!fast) {
550 INTR_OFF(sii, intr_val);
552 /* save current core index */
553 origidx = si_coreidx(&sii->pub);
555 /* switch core */
556 r = (u32 *) ((unsigned char *) ai_setcoreidx(&sii->pub, coreidx) +
557 regoff);
559 ASSERT(r != NULL);
561 /* mask and set */
562 if (mask || val) {
563 w = (R_REG(sii->osh, r) & ~mask) | val;
564 W_REG(sii->osh, r, w);
567 /* readback */
568 w = R_REG(sii->osh, r);
570 if (!fast) {
571 /* restore core index */
572 if (origidx != coreidx)
573 ai_setcoreidx(&sii->pub, origidx);
575 INTR_RESTORE(sii, intr_val);
578 return w;
581 void ai_core_disable(si_t *sih, u32 bits)
583 si_info_t *sii;
584 volatile u32 dummy;
585 aidmp_t *ai;
587 sii = SI_INFO(sih);
589 ASSERT(GOODREGS(sii->curwrap));
590 ai = sii->curwrap;
592 /* if core is already in reset, just return */
593 if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET)
594 return;
596 W_REG(sii->osh, &ai->ioctrl, bits);
597 dummy = R_REG(sii->osh, &ai->ioctrl);
598 udelay(10);
600 W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
601 udelay(1);
604 /* reset and re-enable a core
605 * inputs:
606 * bits - core specific bits that are set during and after reset sequence
607 * resetbits - core specific bits that are set only during reset sequence
609 void ai_core_reset(si_t *sih, u32 bits, u32 resetbits)
611 si_info_t *sii;
612 aidmp_t *ai;
613 volatile u32 dummy;
615 sii = SI_INFO(sih);
616 ASSERT(GOODREGS(sii->curwrap));
617 ai = sii->curwrap;
620 * Must do the disable sequence first to work for arbitrary current core state.
622 ai_core_disable(sih, (bits | resetbits));
625 * Now do the initialization sequence.
627 W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
628 dummy = R_REG(sii->osh, &ai->ioctrl);
629 W_REG(sii->osh, &ai->resetctrl, 0);
630 udelay(1);
632 W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
633 dummy = R_REG(sii->osh, &ai->ioctrl);
634 udelay(1);
637 void ai_core_cflags_wo(si_t *sih, u32 mask, u32 val)
639 si_info_t *sii;
640 aidmp_t *ai;
641 u32 w;
643 sii = SI_INFO(sih);
645 if (BCM47162_DMP()) {
646 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
647 __func__));
648 return;
651 ASSERT(GOODREGS(sii->curwrap));
652 ai = sii->curwrap;
654 ASSERT((val & ~mask) == 0);
656 if (mask || val) {
657 w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
658 W_REG(sii->osh, &ai->ioctrl, w);
662 u32 ai_core_cflags(si_t *sih, u32 mask, u32 val)
664 si_info_t *sii;
665 aidmp_t *ai;
666 u32 w;
668 sii = SI_INFO(sih);
669 if (BCM47162_DMP()) {
670 SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
671 __func__));
672 return 0;
675 ASSERT(GOODREGS(sii->curwrap));
676 ai = sii->curwrap;
678 ASSERT((val & ~mask) == 0);
680 if (mask || val) {
681 w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
682 W_REG(sii->osh, &ai->ioctrl, w);
685 return R_REG(sii->osh, &ai->ioctrl);
688 u32 ai_core_sflags(si_t *sih, u32 mask, u32 val)
690 si_info_t *sii;
691 aidmp_t *ai;
692 u32 w;
694 sii = SI_INFO(sih);
695 if (BCM47162_DMP()) {
696 SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0", __func__));
697 return 0;
700 ASSERT(GOODREGS(sii->curwrap));
701 ai = sii->curwrap;
703 ASSERT((val & ~mask) == 0);
704 ASSERT((mask & ~SISF_CORE_BITS) == 0);
706 if (mask || val) {
707 w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
708 W_REG(sii->osh, &ai->iostatus, w);
711 return R_REG(sii->osh, &ai->iostatus);