[MINI2440] Updated defconfig to add (optional) packages
[openwrt/mini2440.git] / package / broadcom-wl / src / driver / hnddma.c
blobc6f6a13ed781447a3e1f30f822e9c948996b9ada
1 /*
2 * Generic Broadcom Home Networking Division (HND) DMA module.
3 * This supports the following chips: BCM42xx, 44xx, 47xx .
5 * Copyright 2007, Broadcom Corporation
6 * All Rights Reserved.
7 *
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
13 * $Id$
16 #include <typedefs.h>
17 #include <bcmdefs.h>
18 #include <osl.h>
19 #include "linux_osl.h"
20 #include <bcmendian.h>
21 #include <sbconfig.h>
22 #include "bcmutils.h"
23 #include <bcmdevs.h>
24 #include <sbutils.h>
26 #include "sbhnddma.h"
27 #include "hnddma.h"
29 /* debug/trace */
30 #ifdef BCMDBG
31 #define DMA_ERROR(args) if (!(*di->msg_level & 1)); else printf args
32 #define DMA_TRACE(args) if (!(*di->msg_level & 2)); else printf args
33 #else
34 #define DMA_ERROR(args)
35 #define DMA_TRACE(args)
36 #endif
38 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
39 static uint dma_msg_level = 0;
41 #define MAXNAMEL 8 /* 8 char names */
43 #define DI_INFO(dmah) (dma_info_t *)dmah
44 typedef struct osl_dmainfo osldma_t;
46 /* dma engine software state */
47 typedef struct dma_info
49 struct hnddma_pub hnddma; /* exported structure, don't use hnddma_t,
50 * which could be const
52 uint *msg_level; /* message level pointer */
53 char name[MAXNAMEL]; /* callers name for diag msgs */
55 void *osh; /* os handle */
56 sb_t *sbh; /* sb handle */
58 bool dma64; /* dma64 enabled */
59 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
61 dma32regs_t *d32txregs; /* 32 bits dma tx engine registers */
62 dma32regs_t *d32rxregs; /* 32 bits dma rx engine registers */
63 dma64regs_t *d64txregs; /* 64 bits dma tx engine registers */
64 dma64regs_t *d64rxregs; /* 64 bits dma rx engine registers */
66 uint32 dma64align; /* either 8k or 4k depends on number of dd */
67 dma32dd_t *txd32; /* pointer to dma32 tx descriptor ring */
68 dma64dd_t *txd64; /* pointer to dma64 tx descriptor ring */
69 uint ntxd; /* # tx descriptors tunable */
70 uint txin; /* index of next descriptor to reclaim */
71 uint txout; /* index of next descriptor to post */
72 void **txp; /* pointer to parallel array of pointers to packets */
73 osldma_t *tx_dmah; /* DMA TX descriptor ring handle */
74 osldma_t **txp_dmah; /* DMA TX packet data handle */
75 ulong txdpa; /* physical address of descriptor ring */
76 uint txdalign; /* #bytes added to alloc'd mem to align txd */
77 uint txdalloc; /* #bytes allocated for the ring */
79 dma32dd_t *rxd32; /* pointer to dma32 rx descriptor ring */
80 dma64dd_t *rxd64; /* pointer to dma64 rx descriptor ring */
81 uint nrxd; /* # rx descriptors tunable */
82 uint rxin; /* index of next descriptor to reclaim */
83 uint rxout; /* index of next descriptor to post */
84 void **rxp; /* pointer to parallel array of pointers to packets */
85 osldma_t *rx_dmah; /* DMA RX descriptor ring handle */
86 osldma_t **rxp_dmah; /* DMA RX packet data handle */
87 ulong rxdpa; /* physical address of descriptor ring */
88 uint rxdalign; /* #bytes added to alloc'd mem to align rxd */
89 uint rxdalloc; /* #bytes allocated for the ring */
91 /* tunables */
92 uint rxbufsize; /* rx buffer size in bytes,
93 not including the extra headroom
95 uint nrxpost; /* # rx buffers to keep posted */
96 uint rxoffset; /* rxcontrol offset */
97 uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
98 uint ddoffsethigh; /* high 32 bits */
99 uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
100 uint dataoffsethigh; /* high 32 bits */
101 } dma_info_t;
103 #ifdef BCMDMA64
104 #define DMA64_ENAB(di) ((di)->dma64)
105 #define DMA64_CAP TRUE
106 #else
107 #define DMA64_ENAB(di) (0)
108 #define DMA64_CAP FALSE
109 #endif
111 /* descriptor bumping macros */
112 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
113 #define TXD(x) XXD((x), di->ntxd)
114 #define RXD(x) XXD((x), di->nrxd)
115 #define NEXTTXD(i) TXD(i + 1)
116 #define PREVTXD(i) TXD(i - 1)
117 #define NEXTRXD(i) RXD(i + 1)
118 #define NTXDACTIVE(h, t) TXD(t - h)
119 #define NRXDACTIVE(h, t) RXD(t - h)
121 /* macros to convert between byte offsets and indexes */
122 #define B2I(bytes, type) ((bytes) / sizeof(type))
123 #define I2B(index, type) ((index) * sizeof(type))
125 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
126 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
129 /* common prototypes */
130 static bool _dma_isaddrext (dma_info_t * di);
131 static bool _dma_alloc (dma_info_t * di, uint direction);
132 static void _dma_detach (dma_info_t * di);
133 static void _dma_ddtable_init (dma_info_t * di, uint direction, ulong pa);
134 static void _dma_rxinit (dma_info_t * di);
135 static void *_dma_rx (dma_info_t * di);
136 static void _dma_rxfill (dma_info_t * di);
137 static void _dma_rxreclaim (dma_info_t * di);
138 static void _dma_rxenable (dma_info_t * di);
139 static void *_dma_getnextrxp (dma_info_t * di, bool forceall);
141 static void _dma_txblock (dma_info_t * di);
142 static void _dma_txunblock (dma_info_t * di);
143 static uint _dma_txactive (dma_info_t * di);
145 static void *_dma_peeknexttxp (dma_info_t * di);
146 static uintptr _dma_getvar (dma_info_t * di, const char *name);
147 static void _dma_counterreset (dma_info_t * di);
148 static void _dma_fifoloopbackenable (dma_info_t * di);
150 /* ** 32 bit DMA prototypes */
151 static bool dma32_alloc (dma_info_t * di, uint direction);
152 static bool dma32_txreset (dma_info_t * di);
153 static bool dma32_rxreset (dma_info_t * di);
154 static bool dma32_txsuspendedidle (dma_info_t * di);
155 static int dma32_txfast (dma_info_t * di, void *p0, bool commit);
156 static void *dma32_getnexttxp (dma_info_t * di, bool forceall);
157 static void *dma32_getnextrxp (dma_info_t * di, bool forceall);
158 static void dma32_txrotate (dma_info_t * di);
159 static bool dma32_rxidle (dma_info_t * di);
160 static void dma32_txinit (dma_info_t * di);
161 static bool dma32_txenabled (dma_info_t * di);
162 static void dma32_txsuspend (dma_info_t * di);
163 static void dma32_txresume (dma_info_t * di);
164 static bool dma32_txsuspended (dma_info_t * di);
165 static void dma32_txreclaim (dma_info_t * di, bool forceall);
166 static bool dma32_txstopped (dma_info_t * di);
167 static bool dma32_rxstopped (dma_info_t * di);
168 static bool dma32_rxenabled (dma_info_t * di);
169 static bool _dma32_addrext (osl_t * osh, dma32regs_t * dma32regs);
171 /* ** 64 bit DMA prototypes and stubs */
172 #ifdef BCMDMA64
173 static bool dma64_alloc (dma_info_t * di, uint direction);
174 static bool dma64_txreset (dma_info_t * di);
175 static bool dma64_rxreset (dma_info_t * di);
176 static bool dma64_txsuspendedidle (dma_info_t * di);
177 static int dma64_txfast (dma_info_t * di, void *p0, bool commit);
178 static void *dma64_getnexttxp (dma_info_t * di, bool forceall);
179 static void *dma64_getnextrxp (dma_info_t * di, bool forceall);
180 static void dma64_txrotate (dma_info_t * di);
182 static bool dma64_rxidle (dma_info_t * di);
183 static void dma64_txinit (dma_info_t * di);
184 static bool dma64_txenabled (dma_info_t * di);
185 static void dma64_txsuspend (dma_info_t * di);
186 static void dma64_txresume (dma_info_t * di);
187 static bool dma64_txsuspended (dma_info_t * di);
188 static void dma64_txreclaim (dma_info_t * di, bool forceall);
189 static bool dma64_txstopped (dma_info_t * di);
190 static bool dma64_rxstopped (dma_info_t * di);
191 static bool dma64_rxenabled (dma_info_t * di);
192 static bool _dma64_addrext (osl_t * osh, dma64regs_t * dma64regs);
194 #else
195 static bool
196 dma64_alloc (dma_info_t * di, uint direction)
198 return FALSE;
200 static bool
201 dma64_txreset (dma_info_t * di)
203 return FALSE;
205 static bool
206 dma64_rxreset (dma_info_t * di)
208 return FALSE;
210 static bool
211 dma64_txsuspendedidle (dma_info_t * di)
213 return FALSE;
215 static int
216 dma64_txfast (dma_info_t * di, void *p0, bool commit)
218 return 0;
220 static void *
221 dma64_getnexttxp (dma_info_t * di, bool forceall)
223 return NULL;
225 static void *
226 dma64_getnextrxp (dma_info_t * di, bool forceall)
228 return NULL;
230 static void
231 dma64_txrotate (dma_info_t * di)
233 return;
236 static bool
237 dma64_rxidle (dma_info_t * di)
239 return FALSE;
241 static void
242 dma64_txinit (dma_info_t * di)
244 return;
246 static bool
247 dma64_txenabled (dma_info_t * di)
249 return FALSE;
251 static void
252 dma64_txsuspend (dma_info_t * di)
254 return;
256 static void
257 dma64_txresume (dma_info_t * di)
259 return;
261 static bool
262 dma64_txsuspended (dma_info_t * di)
264 return FALSE;
266 static void
267 dma64_txreclaim (dma_info_t * di, bool forceall)
269 return;
271 static bool
272 dma64_txstopped (dma_info_t * di)
274 return FALSE;
276 static bool
277 dma64_rxstopped (dma_info_t * di)
279 return FALSE;
281 static bool
282 dma64_rxenabled (dma_info_t * di)
284 return FALSE;
286 static bool
287 _dma64_addrext (osl_t * osh, dma64regs_t * dma64regs)
289 return FALSE;
292 #endif /* BCMDMA64 */
294 #ifdef BCMDBG
295 static void dma32_dumpring (dma_info_t * di, struct bcmstrbuf *b,
296 dma32dd_t * ring, uint start, uint end,
297 uint max_num);
298 static void dma32_dump (dma_info_t * di, struct bcmstrbuf *b, bool dumpring);
299 static void dma32_dumptx (dma_info_t * di, struct bcmstrbuf *b,
300 bool dumpring);
301 static void dma32_dumprx (dma_info_t * di, struct bcmstrbuf *b,
302 bool dumpring);
304 static void dma64_dumpring (dma_info_t * di, struct bcmstrbuf *b,
305 dma64dd_t * ring, uint start, uint end,
306 uint max_num);
307 static void dma64_dump (dma_info_t * di, struct bcmstrbuf *b, bool dumpring);
308 static void dma64_dumptx (dma_info_t * di, struct bcmstrbuf *b,
309 bool dumpring);
310 static void dma64_dumprx (dma_info_t * di, struct bcmstrbuf *b,
311 bool dumpring);
312 #endif
315 static di_fcn_t dma64proc = {
316 (di_detach_t) _dma_detach,
317 (di_txinit_t) dma64_txinit,
318 (di_txreset_t) dma64_txreset,
319 (di_txenabled_t) dma64_txenabled,
320 (di_txsuspend_t) dma64_txsuspend,
321 (di_txresume_t) dma64_txresume,
322 (di_txsuspended_t) dma64_txsuspended,
323 (di_txsuspendedidle_t) dma64_txsuspendedidle,
324 (di_txfast_t) dma64_txfast,
325 (di_txstopped_t) dma64_txstopped,
326 (di_txreclaim_t) dma64_txreclaim,
327 (di_getnexttxp_t) dma64_getnexttxp,
328 (di_peeknexttxp_t) _dma_peeknexttxp,
329 (di_txblock_t) _dma_txblock,
330 (di_txunblock_t) _dma_txunblock,
331 (di_txactive_t) _dma_txactive,
332 (di_txrotate_t) dma64_txrotate,
334 (di_rxinit_t) _dma_rxinit,
335 (di_rxreset_t) dma64_rxreset,
336 (di_rxidle_t) dma64_rxidle,
337 (di_rxstopped_t) dma64_rxstopped,
338 (di_rxenable_t) _dma_rxenable,
339 (di_rxenabled_t) dma64_rxenabled,
340 (di_rx_t) _dma_rx,
341 (di_rxfill_t) _dma_rxfill,
342 (di_rxreclaim_t) _dma_rxreclaim,
343 (di_getnextrxp_t) _dma_getnextrxp,
345 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
346 (di_getvar_t) _dma_getvar,
347 (di_counterreset_t) _dma_counterreset,
349 #ifdef BCMDBG
350 (di_dump_t) dma64_dump,
351 (di_dumptx_t) dma64_dumptx,
352 (di_dumprx_t) dma64_dumprx,
353 #else
354 NULL,
355 NULL,
356 NULL,
357 #endif
361 static di_fcn_t dma32proc = {
362 (di_detach_t) _dma_detach,
363 (di_txinit_t) dma32_txinit,
364 (di_txreset_t) dma32_txreset,
365 (di_txenabled_t) dma32_txenabled,
366 (di_txsuspend_t) dma32_txsuspend,
367 (di_txresume_t) dma32_txresume,
368 (di_txsuspended_t) dma32_txsuspended,
369 (di_txsuspendedidle_t) dma32_txsuspendedidle,
370 (di_txfast_t) dma32_txfast,
371 (di_txstopped_t) dma32_txstopped,
372 (di_txreclaim_t) dma32_txreclaim,
373 (di_getnexttxp_t) dma32_getnexttxp,
374 (di_peeknexttxp_t) _dma_peeknexttxp,
375 (di_txblock_t) _dma_txblock,
376 (di_txunblock_t) _dma_txunblock,
377 (di_txactive_t) _dma_txactive,
378 (di_txrotate_t) dma32_txrotate,
380 (di_rxinit_t) _dma_rxinit,
381 (di_rxreset_t) dma32_rxreset,
382 (di_rxidle_t) dma32_rxidle,
383 (di_rxstopped_t) dma32_rxstopped,
384 (di_rxenable_t) _dma_rxenable,
385 (di_rxenabled_t) dma32_rxenabled,
386 (di_rx_t) _dma_rx,
387 (di_rxfill_t) _dma_rxfill,
388 (di_rxreclaim_t) _dma_rxreclaim,
389 (di_getnextrxp_t) _dma_getnextrxp,
391 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
392 (di_getvar_t) _dma_getvar,
393 (di_counterreset_t) _dma_counterreset,
395 #ifdef BCMDBG
396 (di_dump_t) dma32_dump,
397 (di_dumptx_t) dma32_dumptx,
398 (di_dumprx_t) dma32_dumprx,
399 #else
400 NULL,
401 NULL,
402 NULL,
403 #endif
407 hnddma_t *
408 dma_attach (osl_t * osh, char *name, sb_t * sbh, void *dmaregstx,
409 void *dmaregsrx, uint ntxd, uint nrxd, uint rxbufsize,
410 uint nrxpost, uint rxoffset, uint * msg_level)
412 dma_info_t *di;
413 uint size;
415 /* allocate private info structure */
416 if ((di = MALLOC (osh, sizeof (dma_info_t))) == NULL)
418 #ifdef BCMDBG
419 printf ("dma_attach: out of memory, malloced %d bytes\n",
420 MALLOCED (osh));
421 #endif
422 return (NULL);
424 bzero ((char *) di, sizeof (dma_info_t));
426 di->msg_level = msg_level ? msg_level : &dma_msg_level;
428 /* old chips w/o sb is no longer supported */
429 ASSERT (sbh != NULL);
431 di->dma64 = ((sb_coreflagshi (sbh, 0, 0) & SBTMH_DMA64) == SBTMH_DMA64);
433 #ifndef BCMDMA64
434 if (di->dma64)
436 DMA_ERROR (("dma_attach: driver doesn't have the capability to support "
437 "64 bits DMA\n"));
438 goto fail;
440 #endif
442 /* check arguments */
443 ASSERT (ISPOWEROF2 (ntxd));
444 ASSERT (ISPOWEROF2 (nrxd));
445 if (nrxd == 0)
446 ASSERT (dmaregsrx == NULL);
447 if (ntxd == 0)
448 ASSERT (dmaregstx == NULL);
451 /* init dma reg pointer */
452 if (di->dma64)
454 ASSERT (ntxd <= D64MAXDD);
455 ASSERT (nrxd <= D64MAXDD);
456 di->d64txregs = (dma64regs_t *) dmaregstx;
457 di->d64rxregs = (dma64regs_t *) dmaregsrx;
459 di->dma64align = D64RINGALIGN;
460 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2))
462 /* for smaller dd table, HW relax the alignment requirement */
463 di->dma64align = D64RINGALIGN / 2;
466 else
468 ASSERT (ntxd <= D32MAXDD);
469 ASSERT (nrxd <= D32MAXDD);
470 di->d32txregs = (dma32regs_t *) dmaregstx;
471 di->d32rxregs = (dma32regs_t *) dmaregsrx;
474 DMA_TRACE (("%s: dma_attach: %s osh %p ntxd %d nrxd %d rxbufsize %d nrxpost %d " "rxoffset %d dmaregstx %p dmaregsrx %p\n", name, (di->dma64 ? "DMA64" : "DMA32"), osh, ntxd, nrxd, rxbufsize, nrxpost, rxoffset, dmaregstx, dmaregsrx));
476 /* make a private copy of our callers name */
477 strncpy (di->name, name, MAXNAMEL);
478 di->name[MAXNAMEL - 1] = '\0';
480 di->osh = osh;
481 di->sbh = sbh;
483 /* save tunables */
484 di->ntxd = ntxd;
485 di->nrxd = nrxd;
487 /* the actual dma size doesn't include the extra headroom */
488 if (rxbufsize > BCMEXTRAHDROOM)
489 di->rxbufsize = rxbufsize - BCMEXTRAHDROOM;
490 else
491 di->rxbufsize = rxbufsize;
493 di->nrxpost = nrxpost;
494 di->rxoffset = rxoffset;
497 * figure out the DMA physical address offset for dd and data
498 * for old chips w/o sb, use zero
499 * for new chips w sb,
500 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
501 * Other bus: use zero
502 * SB_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
504 di->ddoffsetlow = 0;
505 di->dataoffsetlow = 0;
506 /* for pci bus, add offset */
507 if (sbh->bustype == PCI_BUS)
509 if ((sbh->buscoretype == SB_PCIE) && di->dma64)
511 /* pcie with DMA64 */
512 di->ddoffsetlow = 0;
513 di->ddoffsethigh = SB_PCIE_DMA_H32;
515 else
517 /* pci(DMA32/DMA64) or pcie with DMA32 */
518 di->ddoffsetlow = SB_PCI_DMA;
519 di->ddoffsethigh = 0;
521 di->dataoffsetlow = di->ddoffsetlow;
522 di->dataoffsethigh = di->ddoffsethigh;
525 #if defined(__mips__) && defined(IL_BIGENDIAN)
526 di->dataoffsetlow = di->dataoffsetlow + SB_SDRAM_SWAPPED;
527 #endif
529 di->addrext = _dma_isaddrext (di);
531 /* allocate tx packet pointer vector */
532 if (ntxd)
534 size = ntxd * sizeof (void *);
535 if ((di->txp = MALLOC (osh, size)) == NULL)
537 DMA_ERROR (("%s: dma_attach: out of tx memory, malloced %d bytes\n",
538 di->name, MALLOCED (osh)));
539 goto fail;
541 bzero ((char *) di->txp, size);
544 /* allocate rx packet pointer vector */
545 if (nrxd)
547 size = nrxd * sizeof (void *);
548 if ((di->rxp = MALLOC (osh, size)) == NULL)
550 DMA_ERROR (("%s: dma_attach: out of rx memory, malloced %d bytes\n",
551 di->name, MALLOCED (osh)));
552 goto fail;
554 bzero ((char *) di->rxp, size);
557 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
558 if (ntxd)
560 if (!_dma_alloc (di, DMA_TX))
561 goto fail;
564 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
565 if (nrxd)
567 if (!_dma_alloc (di, DMA_RX))
568 goto fail;
571 if ((di->ddoffsetlow == SB_PCI_DMA) && (di->txdpa > SB_PCI_DMA_SZ)
572 && !di->addrext)
574 DMA_ERROR (("%s: dma_attach: txdpa 0x%lx: addrext not supported\n",
575 di->name, di->txdpa));
576 goto fail;
578 if ((di->ddoffsetlow == SB_PCI_DMA) && (di->rxdpa > SB_PCI_DMA_SZ)
579 && !di->addrext)
581 DMA_ERROR (("%s: dma_attach: rxdpa 0x%lx: addrext not supported\n",
582 di->name, di->rxdpa));
583 goto fail;
586 DMA_TRACE (("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext));
588 /* allocate tx packet pointer vector and DMA mapping vectors */
589 if (ntxd)
592 size = ntxd * sizeof (osldma_t **);
593 if ((di->txp_dmah = (osldma_t **) MALLOC (osh, size)) == NULL)
594 goto fail;
595 bzero ((char *) di->txp_dmah, size);
597 else
598 di->txp_dmah = NULL;
600 /* allocate rx packet pointer vector and DMA mapping vectors */
601 if (nrxd)
604 size = nrxd * sizeof (osldma_t **);
605 if ((di->rxp_dmah = (osldma_t **) MALLOC (osh, size)) == NULL)
606 goto fail;
607 bzero ((char *) di->rxp_dmah, size);
610 else
611 di->rxp_dmah = NULL;
613 /* initialize opsvec of function pointers */
614 di->hnddma.di_fn = DMA64_ENAB (di) ? dma64proc : dma32proc;
616 return ((hnddma_t *) di);
618 fail:
619 _dma_detach (di);
620 return (NULL);
623 /* init the tx or rx descriptor */
624 static INLINE void
625 dma32_dd_upd (dma_info_t * di, dma32dd_t * ddring, ulong pa, uint outidx,
626 uint32 * flags, uint32 bufcount)
628 /* dma32 uses 32 bits control to fit both flags and bufcounter */
629 *flags = *flags | (bufcount & CTRL_BC_MASK);
631 if ((di->dataoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH))
633 W_SM (&ddring[outidx].addr, BUS_SWAP32 (pa + di->dataoffsetlow));
634 W_SM (&ddring[outidx].ctrl, BUS_SWAP32 (*flags));
636 else
638 /* address extension */
639 uint32 ae;
640 ASSERT (di->addrext);
641 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
642 pa &= ~PCI32ADDR_HIGH;
644 *flags |= (ae << CTRL_AE_SHIFT);
645 W_SM (&ddring[outidx].addr, BUS_SWAP32 (pa + di->dataoffsetlow));
646 W_SM (&ddring[outidx].ctrl, BUS_SWAP32 (*flags));
650 static INLINE void
651 dma64_dd_upd (dma_info_t * di, dma64dd_t * ddring, ulong pa, uint outidx,
652 uint32 * flags, uint32 bufcount)
654 uint32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
656 /* PCI bus with big(>1G) physical address, use address extension */
657 if ((di->dataoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH))
659 W_SM (&ddring[outidx].addrlow, BUS_SWAP32 (pa + di->dataoffsetlow));
660 W_SM (&ddring[outidx].addrhigh, BUS_SWAP32 (0 + di->dataoffsethigh));
661 W_SM (&ddring[outidx].ctrl1, BUS_SWAP32 (*flags));
662 W_SM (&ddring[outidx].ctrl2, BUS_SWAP32 (ctrl2));
664 else
666 /* address extension */
667 uint32 ae;
668 ASSERT (di->addrext);
670 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
671 pa &= ~PCI32ADDR_HIGH;
673 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
674 W_SM (&ddring[outidx].addrlow, BUS_SWAP32 (pa + di->dataoffsetlow));
675 W_SM (&ddring[outidx].addrhigh, BUS_SWAP32 (0 + di->dataoffsethigh));
676 W_SM (&ddring[outidx].ctrl1, BUS_SWAP32 (*flags));
677 W_SM (&ddring[outidx].ctrl2, BUS_SWAP32 (ctrl2));
681 static bool
682 _dma32_addrext (osl_t * osh, dma32regs_t * dma32regs)
684 uint32 w;
686 OR_REG (osh, &dma32regs->control, XC_AE);
687 w = R_REG (osh, &dma32regs->control);
688 AND_REG (osh, &dma32regs->control, ~XC_AE);
689 return ((w & XC_AE) == XC_AE);
692 static bool
693 _dma_alloc (dma_info_t * di, uint direction)
695 if (DMA64_ENAB (di))
697 return dma64_alloc (di, direction);
699 else
701 return dma32_alloc (di, direction);
705 /* !! may be called with core in reset */
706 static void
707 _dma_detach (dma_info_t * di)
709 if (di == NULL)
710 return;
712 DMA_TRACE (("%s: dma_detach\n", di->name));
714 /* shouldn't be here if descriptors are unreclaimed */
715 ASSERT (di->txin == di->txout);
716 ASSERT (di->rxin == di->rxout);
718 /* free dma descriptor rings */
719 if (DMA64_ENAB (di))
721 if (di->txd64)
722 DMA_FREE_CONSISTENT (di->osh,
723 ((int8 *) (uintptr) di->txd64 - di->txdalign),
724 di->txdalloc, (di->txdpa - di->txdalign),
725 &di->tx_dmah);
726 if (di->rxd64)
727 DMA_FREE_CONSISTENT (di->osh,
728 ((int8 *) (uintptr) di->rxd64 - di->rxdalign),
729 di->rxdalloc, (di->rxdpa - di->rxdalign),
730 &di->rx_dmah);
732 else
734 if (di->txd32)
735 DMA_FREE_CONSISTENT (di->osh,
736 ((int8 *) (uintptr) di->txd32 - di->txdalign),
737 di->txdalloc, (di->txdpa - di->txdalign),
738 &di->tx_dmah);
739 if (di->rxd32)
740 DMA_FREE_CONSISTENT (di->osh,
741 ((int8 *) (uintptr) di->rxd32 - di->rxdalign),
742 di->rxdalloc, (di->rxdpa - di->rxdalign),
743 &di->rx_dmah);
746 /* free packet pointer vectors */
747 if (di->txp)
748 MFREE (di->osh, (void *) di->txp, (di->ntxd * sizeof (void *)));
749 if (di->rxp)
750 MFREE (di->osh, (void *) di->rxp, (di->nrxd * sizeof (void *)));
752 /* free tx packet DMA handles */
753 if (di->txp_dmah)
754 MFREE (di->osh, (void *) di->txp_dmah, di->ntxd * sizeof (osldma_t **));
756 /* free rx packet DMA handles */
757 if (di->rxp_dmah)
758 MFREE (di->osh, (void *) di->rxp_dmah, di->nrxd * sizeof (osldma_t **));
760 /* free our private info structure */
761 MFREE (di->osh, (void *) di, sizeof (dma_info_t));
765 /* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
766 static bool
767 _dma_isaddrext (dma_info_t * di)
769 if (DMA64_ENAB (di))
771 /* DMA64 supports full 32 bits or 64 bits. AE is always valid */
773 /* not all tx or rx channel are available */
774 if (di->d64txregs != NULL)
776 if (!_dma64_addrext (di->osh, di->d64txregs))
778 DMA_ERROR (("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n", di->name));
779 ASSERT (0);
781 return TRUE;
783 else if (di->d64rxregs != NULL)
785 if (!_dma64_addrext (di->osh, di->d64rxregs))
787 DMA_ERROR (("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n", di->name));
788 ASSERT (0);
790 return TRUE;
792 return FALSE;
794 else if (di->d32txregs)
795 return (_dma32_addrext (di->osh, di->d32txregs));
796 else if (di->d32rxregs)
797 return (_dma32_addrext (di->osh, di->d32rxregs));
798 return FALSE;
801 /* initialize descriptor table base address */
802 static void
803 _dma_ddtable_init (dma_info_t * di, uint direction, ulong pa)
805 if (DMA64_ENAB (di))
808 if ((di->ddoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH))
810 if (direction == DMA_TX)
812 W_REG (di->osh, &di->d64txregs->addrlow,
813 (pa + di->ddoffsetlow));
814 W_REG (di->osh, &di->d64txregs->addrhigh, di->ddoffsethigh);
816 else
818 W_REG (di->osh, &di->d64rxregs->addrlow,
819 (pa + di->ddoffsetlow));
820 W_REG (di->osh, &di->d64rxregs->addrhigh, di->ddoffsethigh);
823 else
825 /* DMA64 32bits address extension */
826 uint32 ae;
827 ASSERT (di->addrext);
829 /* shift the high bit(s) from pa to ae */
830 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
831 pa &= ~PCI32ADDR_HIGH;
833 if (direction == DMA_TX)
835 W_REG (di->osh, &di->d64txregs->addrlow,
836 (pa + di->ddoffsetlow));
837 W_REG (di->osh, &di->d64txregs->addrhigh, di->ddoffsethigh);
838 SET_REG (di->osh, &di->d64txregs->control, D64_XC_AE,
839 (ae << D64_XC_AE_SHIFT));
841 else
843 W_REG (di->osh, &di->d64rxregs->addrlow,
844 (pa + di->ddoffsetlow));
845 W_REG (di->osh, &di->d64rxregs->addrhigh, di->ddoffsethigh);
846 SET_REG (di->osh, &di->d64rxregs->control, D64_RC_AE,
847 (ae << D64_RC_AE_SHIFT));
852 else
854 if ((di->ddoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH))
856 if (direction == DMA_TX)
857 W_REG (di->osh, &di->d32txregs->addr, (pa + di->ddoffsetlow));
858 else
859 W_REG (di->osh, &di->d32rxregs->addr, (pa + di->ddoffsetlow));
861 else
863 /* dma32 address extension */
864 uint32 ae;
865 ASSERT (di->addrext);
867 /* shift the high bit(s) from pa to ae */
868 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
869 pa &= ~PCI32ADDR_HIGH;
871 if (direction == DMA_TX)
873 W_REG (di->osh, &di->d32txregs->addr, (pa + di->ddoffsetlow));
874 SET_REG (di->osh, &di->d32txregs->control, XC_AE,
875 ae << XC_AE_SHIFT);
877 else
879 W_REG (di->osh, &di->d32rxregs->addr, (pa + di->ddoffsetlow));
880 SET_REG (di->osh, &di->d32rxregs->control, RC_AE,
881 ae << RC_AE_SHIFT);
887 static void
888 _dma_fifoloopbackenable (dma_info_t * di)
890 DMA_TRACE (("%s: dma_fifoloopbackenable\n", di->name));
891 if (DMA64_ENAB (di))
892 OR_REG (di->osh, &di->d64txregs->control, D64_XC_LE);
893 else
894 OR_REG (di->osh, &di->d32txregs->control, XC_LE);
897 static void
898 _dma_rxinit (dma_info_t * di)
900 DMA_TRACE (("%s: dma_rxinit\n", di->name));
902 if (di->nrxd == 0)
903 return;
905 di->rxin = di->rxout = 0;
907 /* clear rx descriptor ring */
908 if (DMA64_ENAB (di))
909 BZERO_SM ((void *) (uintptr) di->rxd64, (di->nrxd * sizeof (dma64dd_t)));
910 else
911 BZERO_SM ((void *) (uintptr) di->rxd32, (di->nrxd * sizeof (dma32dd_t)));
913 _dma_rxenable (di);
914 _dma_ddtable_init (di, DMA_RX, di->rxdpa);
917 static void
918 _dma_rxenable (dma_info_t * di)
920 DMA_TRACE (("%s: dma_rxenable\n", di->name));
922 if (DMA64_ENAB (di))
923 W_REG (di->osh, &di->d64rxregs->control,
924 ((di->rxoffset << D64_RC_RO_SHIFT) | D64_RC_RE));
925 else
926 W_REG (di->osh, &di->d32rxregs->control,
927 ((di->rxoffset << RC_RO_SHIFT) | RC_RE));
930 /* !! rx entry routine, returns a pointer to the next frame received,
931 * or NULL if there are no more
933 static void *
934 _dma_rx (dma_info_t * di)
936 void *p;
937 uint len;
938 int skiplen = 0;
940 while ((p = _dma_getnextrxp (di, FALSE)))
942 /* skip giant packets which span multiple rx descriptors */
943 if (skiplen > 0)
945 skiplen -= di->rxbufsize;
946 if (skiplen < 0)
947 skiplen = 0;
948 PKTFREE (di->osh, p, FALSE);
949 continue;
952 len = ltoh16 (*(uint16 *) (PKTDATA (di->osh, p)));
953 DMA_TRACE (("%s: dma_rx len %d\n", di->name, len));
955 /* bad frame length check */
956 if (len > (di->rxbufsize - di->rxoffset))
958 DMA_ERROR (("%s: dma_rx: bad frame length (%d)\n", di->name, len));
959 if (len > 0)
960 skiplen = len - (di->rxbufsize - di->rxoffset);
961 PKTFREE (di->osh, p, FALSE);
962 di->hnddma.rxgiants++;
963 continue;
966 /* set actual length */
967 PKTSETLEN (di->osh, p, (di->rxoffset + len));
969 break;
972 return (p);
975 /* post receive buffers */
976 static void
977 _dma_rxfill (dma_info_t * di)
979 void *p;
980 uint rxin, rxout;
981 uint32 flags = 0;
982 uint n;
983 uint i;
984 uint32 pa;
985 uint extra_offset = 0;
988 * Determine how many receive buffers we're lacking
989 * from the full complement, allocate, initialize,
990 * and post them, then update the chip rx lastdscr.
993 rxin = di->rxin;
994 rxout = di->rxout;
996 n = di->nrxpost - NRXDACTIVE (rxin, rxout);
998 DMA_TRACE (("%s: dma_rxfill: post %d\n", di->name, n));
1000 if (di->rxbufsize > BCMEXTRAHDROOM)
1001 extra_offset = BCMEXTRAHDROOM;
1003 for (i = 0; i < n; i++)
1005 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
1006 size to be allocated
1008 if ((p = PKTGET (di->osh, di->rxbufsize + extra_offset, FALSE)) == NULL)
1010 DMA_ERROR (("%s: dma_rxfill: out of rxbufs\n", di->name));
1011 di->hnddma.rxnobuf++;
1012 break;
1014 /* reserve an extra headroom, if applicable */
1015 if (extra_offset)
1016 PKTPULL (di->osh, p, extra_offset);
1018 /* Do a cached write instead of uncached write since DMA_MAP
1019 * will flush the cache.
1021 *(uint32 *) (PKTDATA (di->osh, p)) = 0;
1023 pa = (uint32) DMA_MAP (di->osh, PKTDATA (di->osh, p),
1024 di->rxbufsize, DMA_RX, p, &di->rxp_dmah[rxout]);
1026 ASSERT (ISALIGNED (pa, 4));
1028 /* save the free packet pointer */
1029 ASSERT (di->rxp[rxout] == NULL);
1030 di->rxp[rxout] = p;
1032 /* reset flags for each descriptor */
1033 flags = 0;
1034 if (DMA64_ENAB (di))
1036 if (rxout == (di->nrxd - 1))
1037 flags = D64_CTRL1_EOT;
1039 dma64_dd_upd (di, di->rxd64, pa, rxout, &flags, di->rxbufsize);
1041 else
1043 if (rxout == (di->nrxd - 1))
1044 flags = CTRL_EOT;
1046 dma32_dd_upd (di, di->rxd32, pa, rxout, &flags, di->rxbufsize);
1048 rxout = NEXTRXD (rxout);
1051 di->rxout = rxout;
1053 /* update the chip lastdscr pointer */
1054 if (DMA64_ENAB (di))
1056 W_REG (di->osh, &di->d64rxregs->ptr, I2B (rxout, dma64dd_t));
1058 else
1060 W_REG (di->osh, &di->d32rxregs->ptr, I2B (rxout, dma32dd_t));
1064 /* like getnexttxp but no reclaim */
1065 static void *
1066 _dma_peeknexttxp (dma_info_t * di)
1068 uint end, i;
1070 if (di->ntxd == 0)
1071 return (NULL);
1073 if (DMA64_ENAB (di))
1075 end =
1076 B2I (R_REG (di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK,
1077 dma64dd_t);
1079 else
1081 end =
1082 B2I (R_REG (di->osh, &di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
1085 for (i = di->txin; i != end; i = NEXTTXD (i))
1086 if (di->txp[i])
1087 return (di->txp[i]);
1089 return (NULL);
1092 static void
1093 _dma_rxreclaim (dma_info_t * di)
1095 void *p;
1097 /* "unused local" warning suppression for OSLs that
1098 * define PKTFREE() without using the di->osh arg
1100 di = di;
1102 DMA_TRACE (("%s: dma_rxreclaim\n", di->name));
1104 while ((p = _dma_getnextrxp (di, TRUE)))
1105 PKTFREE (di->osh, p, FALSE);
1108 static void *
1109 _dma_getnextrxp (dma_info_t * di, bool forceall)
1111 if (di->nrxd == 0)
1112 return (NULL);
1114 if (DMA64_ENAB (di))
1116 return dma64_getnextrxp (di, forceall);
1118 else
1120 return dma32_getnextrxp (di, forceall);
1124 static void
1125 _dma_txblock (dma_info_t * di)
1127 di->hnddma.txavail = 0;
1130 static void
1131 _dma_txunblock (dma_info_t * di)
1133 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
1136 static uint
1137 _dma_txactive (dma_info_t * di)
1139 return (NTXDACTIVE (di->txin, di->txout));
1142 static void
1143 _dma_counterreset (dma_info_t * di)
1145 /* reset all software counter */
1146 di->hnddma.rxgiants = 0;
1147 di->hnddma.rxnobuf = 0;
1148 di->hnddma.txnobuf = 0;
1151 /* get the address of the var in order to change later */
1152 static uintptr
1153 _dma_getvar (dma_info_t * di, const char *name)
1155 if (!strcmp (name, "&txavail"))
1156 return ((uintptr) & (di->hnddma.txavail));
1157 else
1159 ASSERT (0);
1161 return (0);
1164 void
1165 dma_txpioloopback (osl_t * osh, dma32regs_t * regs)
1167 OR_REG (osh, &regs->control, XC_LE);
1170 #ifdef BCMDBG
1171 static void
1172 dma32_dumpring (dma_info_t * di, struct bcmstrbuf *b, dma32dd_t * ring,
1173 uint start, uint end, uint max_num)
1175 uint i;
1177 for (i = start; i != end; i = XXD ((i + 1), max_num))
1179 /* in the format of high->low 8 bytes */
1180 bcm_bprintf (b, "ring index %d: 0x%x %x\n", i, ring[i].addr,
1181 ring[i].ctrl);
1185 static void
1186 dma32_dumptx (dma_info_t * di, struct bcmstrbuf *b, bool dumpring)
1188 if (di->ntxd == 0)
1189 return;
1191 bcm_bprintf (b, "DMA32: txd32 %p txdpa 0x%lx txp %p txin %d txout %d "
1192 "txavail %d\n", di->txd32, di->txdpa, di->txp, di->txin,
1193 di->txout, di->hnddma.txavail);
1195 bcm_bprintf (b, "xmtcontrol 0x%x xmtaddr 0x%x xmtptr 0x%x xmtstatus 0x%x\n",
1196 R_REG (di->osh, &di->d32txregs->control),
1197 R_REG (di->osh, &di->d32txregs->addr),
1198 R_REG (di->osh, &di->d32txregs->ptr),
1199 R_REG (di->osh, &di->d32txregs->status));
1201 if (dumpring && di->txd32)
1202 dma32_dumpring (di, b, di->txd32, di->txin, di->txout, di->ntxd);
1205 static void
1206 dma32_dumprx (dma_info_t * di, struct bcmstrbuf *b, bool dumpring)
1208 if (di->nrxd == 0)
1209 return;
1211 bcm_bprintf (b, "DMA32: rxd32 %p rxdpa 0x%lx rxp %p rxin %d rxout %d\n",
1212 di->rxd32, di->rxdpa, di->rxp, di->rxin, di->rxout);
1214 bcm_bprintf (b, "rcvcontrol 0x%x rcvaddr 0x%x rcvptr 0x%x rcvstatus 0x%x\n",
1215 R_REG (di->osh, &di->d32rxregs->control),
1216 R_REG (di->osh, &di->d32rxregs->addr),
1217 R_REG (di->osh, &di->d32rxregs->ptr),
1218 R_REG (di->osh, &di->d32rxregs->status));
1219 if (di->rxd32 && dumpring)
1220 dma32_dumpring (di, b, di->rxd32, di->rxin, di->rxout, di->nrxd);
1223 static void
1224 dma32_dump (dma_info_t * di, struct bcmstrbuf *b, bool dumpring)
1226 dma32_dumptx (di, b, dumpring);
1227 dma32_dumprx (di, b, dumpring);
1230 static void
1231 dma64_dumpring (dma_info_t * di, struct bcmstrbuf *b, dma64dd_t * ring,
1232 uint start, uint end, uint max_num)
1234 uint i;
1236 for (i = start; i != end; i = XXD ((i + 1), max_num))
1238 /* in the format of high->low 16 bytes */
1239 bcm_bprintf (b, "ring index %d: 0x%x %x %x %x\n",
1240 i, ring[i].addrhigh, ring[i].addrlow, ring[i].ctrl2,
1241 ring[i].ctrl1);
1245 static void
1246 dma64_dumptx (dma_info_t * di, struct bcmstrbuf *b, bool dumpring)
1248 if (di->ntxd == 0)
1249 return;
1251 bcm_bprintf (b, "DMA64: txd64 %p txdpa 0x%lx txp %p txin %d txout %d "
1252 "txavail %d\n", di->txd64, di->txdpa, di->txp, di->txin,
1253 di->txout, di->hnddma.txavail);
1255 bcm_bprintf (b, "xmtcontrol 0x%x xmtaddrlow 0x%x xmtaddrhigh 0x%x "
1256 "xmtptr 0x%x xmtstatus0 0x%x xmtstatus1 0x%x\n",
1257 R_REG (di->osh, &di->d64txregs->control),
1258 R_REG (di->osh, &di->d64txregs->addrlow),
1259 R_REG (di->osh, &di->d64txregs->addrhigh),
1260 R_REG (di->osh, &di->d64txregs->ptr),
1261 R_REG (di->osh, &di->d64txregs->status0),
1262 R_REG (di->osh, &di->d64txregs->status1));
1264 if (dumpring && di->txd64)
1266 dma64_dumpring (di, b, di->txd64, di->txin, di->txout, di->ntxd);
1270 static void
1271 dma64_dumprx (dma_info_t * di, struct bcmstrbuf *b, bool dumpring)
1273 if (di->nrxd == 0)
1274 return;
1276 bcm_bprintf (b, "DMA64: rxd64 %p rxdpa 0x%lx rxp %p rxin %d rxout %d\n",
1277 di->rxd64, di->rxdpa, di->rxp, di->rxin, di->rxout);
1279 bcm_bprintf (b, "rcvcontrol 0x%x rcvaddrlow 0x%x rcvaddrhigh 0x%x rcvptr "
1280 "0x%x rcvstatus0 0x%x rcvstatus1 0x%x\n",
1281 R_REG (di->osh, &di->d64rxregs->control),
1282 R_REG (di->osh, &di->d64rxregs->addrlow),
1283 R_REG (di->osh, &di->d64rxregs->addrhigh),
1284 R_REG (di->osh, &di->d64rxregs->ptr),
1285 R_REG (di->osh, &di->d64rxregs->status0),
1286 R_REG (di->osh, &di->d64rxregs->status1));
1287 if (di->rxd64 && dumpring)
1289 dma64_dumpring (di, b, di->rxd64, di->rxin, di->rxout, di->nrxd);
1293 static void
1294 dma64_dump (dma_info_t * di, struct bcmstrbuf *b, bool dumpring)
1296 dma64_dumptx (di, b, dumpring);
1297 dma64_dumprx (di, b, dumpring);
1300 #endif /* BCMDBG */
1303 /* 32 bits DMA functions */
1304 static void
1305 dma32_txinit (dma_info_t * di)
1307 DMA_TRACE (("%s: dma_txinit\n", di->name));
1309 if (di->ntxd == 0)
1310 return;
1312 di->txin = di->txout = 0;
1313 di->hnddma.txavail = di->ntxd - 1;
1315 /* clear tx descriptor ring */
1316 BZERO_SM ((void *) (uintptr) di->txd32, (di->ntxd * sizeof (dma32dd_t)));
1317 W_REG (di->osh, &di->d32txregs->control, XC_XE);
1318 _dma_ddtable_init (di, DMA_TX, di->txdpa);
1321 static bool
1322 dma32_txenabled (dma_info_t * di)
1324 uint32 xc;
1326 /* If the chip is dead, it is not enabled :-) */
1327 xc = R_REG (di->osh, &di->d32txregs->control);
1328 return ((xc != 0xffffffff) && (xc & XC_XE));
1331 static void
1332 dma32_txsuspend (dma_info_t * di)
1334 DMA_TRACE (("%s: dma_txsuspend\n", di->name));
1336 if (di->ntxd == 0)
1337 return;
1339 OR_REG (di->osh, &di->d32txregs->control, XC_SE);
1342 static void
1343 dma32_txresume (dma_info_t * di)
1345 DMA_TRACE (("%s: dma_txresume\n", di->name));
1347 if (di->ntxd == 0)
1348 return;
1350 AND_REG (di->osh, &di->d32txregs->control, ~XC_SE);
1353 static bool
1354 dma32_txsuspended (dma_info_t * di)
1356 return (di->ntxd == 0)
1357 || ((R_REG (di->osh, &di->d32txregs->control) & XC_SE) == XC_SE);
1360 static void
1361 dma32_txreclaim (dma_info_t * di, bool forceall)
1363 void *p;
1365 DMA_TRACE (("%s: dma_txreclaim %s\n", di->name, forceall ? "all" : ""));
1367 while ((p = dma32_getnexttxp (di, forceall)))
1368 PKTFREE (di->osh, p, TRUE);
1371 static bool
1372 dma32_txstopped (dma_info_t * di)
1374 return ((R_REG (di->osh, &di->d32txregs->status) & XS_XS_MASK) ==
1375 XS_XS_STOPPED);
1378 static bool
1379 dma32_rxstopped (dma_info_t * di)
1381 return ((R_REG (di->osh, &di->d32rxregs->status) & RS_RS_MASK) ==
1382 RS_RS_STOPPED);
1385 static bool
1386 dma32_alloc (dma_info_t * di, uint direction)
1388 uint size;
1389 uint ddlen;
1390 void *va;
1392 ddlen = sizeof (dma32dd_t);
1394 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
1396 if (!ISALIGNED (DMA_CONSISTENT_ALIGN, D32RINGALIGN))
1397 size += D32RINGALIGN;
1400 if (direction == DMA_TX)
1402 if ((va =
1403 DMA_ALLOC_CONSISTENT (di->osh, size, &di->txdpa,
1404 &di->tx_dmah)) == NULL)
1406 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
1407 di->name));
1408 return FALSE;
1411 di->txd32 = (dma32dd_t *) ROUNDUP ((uintptr) va, D32RINGALIGN);
1412 di->txdalign = (uint) ((int8 *) (uintptr) di->txd32 - (int8 *) va);
1413 di->txdpa += di->txdalign;
1414 di->txdalloc = size;
1415 ASSERT (ISALIGNED ((uintptr) di->txd32, D32RINGALIGN));
1417 else
1419 if ((va =
1420 DMA_ALLOC_CONSISTENT (di->osh, size, &di->rxdpa,
1421 &di->rx_dmah)) == NULL)
1423 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
1424 di->name));
1425 return FALSE;
1427 di->rxd32 = (dma32dd_t *) ROUNDUP ((uintptr) va, D32RINGALIGN);
1428 di->rxdalign = (uint) ((int8 *) (uintptr) di->rxd32 - (int8 *) va);
1429 di->rxdpa += di->rxdalign;
1430 di->rxdalloc = size;
1431 ASSERT (ISALIGNED ((uintptr) di->rxd32, D32RINGALIGN));
1434 return TRUE;
1437 static bool
1438 dma32_txreset (dma_info_t * di)
1440 uint32 status;
1442 if (di->ntxd == 0)
1443 return TRUE;
1445 /* suspend tx DMA first */
1446 W_REG (di->osh, &di->d32txregs->control, XC_SE);
1447 SPINWAIT (((status = (R_REG (di->osh, &di->d32txregs->status) & XS_XS_MASK))
1448 != XS_XS_DISABLED) &&
1449 (status != XS_XS_IDLE) && (status != XS_XS_STOPPED), (10000));
1451 W_REG (di->osh, &di->d32txregs->control, 0);
1452 SPINWAIT (((status = (R_REG (di->osh,
1453 &di->d32txregs->status) & XS_XS_MASK)) !=
1454 XS_XS_DISABLED), 10000);
1456 /* wait for the last transaction to complete */
1457 OSL_DELAY (300);
1459 return (status == XS_XS_DISABLED);
1462 static bool
1463 dma32_rxidle (dma_info_t * di)
1465 DMA_TRACE (("%s: dma_rxidle\n", di->name));
1467 if (di->nrxd == 0)
1468 return TRUE;
1470 return ((R_REG (di->osh, &di->d32rxregs->status) & RS_CD_MASK) ==
1471 R_REG (di->osh, &di->d32rxregs->ptr));
1474 static bool
1475 dma32_rxreset (dma_info_t * di)
1477 uint32 status;
1479 if (di->nrxd == 0)
1480 return TRUE;
1482 W_REG (di->osh, &di->d32rxregs->control, 0);
1483 SPINWAIT (((status = (R_REG (di->osh,
1484 &di->d32rxregs->status) & RS_RS_MASK)) !=
1485 RS_RS_DISABLED), 10000);
1487 return (status == RS_RS_DISABLED);
1490 static bool
1491 dma32_rxenabled (dma_info_t * di)
1493 uint32 rc;
1495 rc = R_REG (di->osh, &di->d32rxregs->control);
1496 return ((rc != 0xffffffff) && (rc & RC_RE));
1499 static bool
1500 dma32_txsuspendedidle (dma_info_t * di)
1502 if (di->ntxd == 0)
1503 return TRUE;
1505 if (!(R_REG (di->osh, &di->d32txregs->control) & XC_SE))
1506 return 0;
1508 if ((R_REG (di->osh, &di->d32txregs->status) & XS_XS_MASK) != XS_XS_IDLE)
1509 return 0;
1511 OSL_DELAY (2);
1512 return ((R_REG (di->osh, &di->d32txregs->status) & XS_XS_MASK) ==
1513 XS_XS_IDLE);
1516 /* !! tx entry routine
1517 * supports full 32bit dma engine buffer addressing so
1518 * dma buffers can cross 4 Kbyte page boundaries.
1520 static int
1521 dma32_txfast (dma_info_t * di, void *p0, bool commit)
1523 void *p, *next;
1524 uchar *data;
1525 uint len;
1526 uint txout;
1527 uint32 flags = 0;
1528 uint32 pa;
1530 DMA_TRACE (("%s: dma_txfast\n", di->name));
1532 txout = di->txout;
1535 * Walk the chain of packet buffers
1536 * allocating and initializing transmit descriptor entries.
1538 for (p = p0; p; p = next)
1540 data = PKTDATA (di->osh, p);
1541 len = PKTLEN (di->osh, p);
1542 next = PKTNEXT (di->osh, p);
1544 /* return nonzero if out of tx descriptors */
1545 if (NEXTTXD (txout) == di->txin)
1546 goto outoftxd;
1548 if (len == 0)
1549 continue;
1551 /* get physical address of buffer start */
1552 pa =
1553 (uint32) DMA_MAP (di->osh, data, len, DMA_TX, p,
1554 &di->txp_dmah[txout]);
1556 flags = 0;
1557 if (p == p0)
1558 flags |= CTRL_SOF;
1559 if (next == NULL)
1560 flags |= (CTRL_IOC | CTRL_EOF);
1561 if (txout == (di->ntxd - 1))
1562 flags |= CTRL_EOT;
1564 dma32_dd_upd (di, di->txd32, pa, txout, &flags, len);
1565 ASSERT (di->txp[txout] == NULL);
1567 txout = NEXTTXD (txout);
1570 /* if last txd eof not set, fix it */
1571 if (!(flags & CTRL_EOF))
1572 W_SM (&di->txd32[PREVTXD (txout)].ctrl,
1573 BUS_SWAP32 (flags | CTRL_IOC | CTRL_EOF));
1575 /* save the packet */
1576 di->txp[PREVTXD (txout)] = p0;
1578 /* bump the tx descriptor index */
1579 di->txout = txout;
1581 /* kick the chip */
1582 if (commit)
1583 W_REG (di->osh, &di->d32txregs->ptr, I2B (txout, dma32dd_t));
1585 /* tx flow control */
1586 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
1588 return (0);
1590 outoftxd:
1591 DMA_ERROR (("%s: dma_txfast: out of txds\n", di->name));
1592 PKTFREE (di->osh, p0, TRUE);
1593 di->hnddma.txavail = 0;
1594 di->hnddma.txnobuf++;
1595 return (-1);
1599 * Reclaim next completed txd (txds if using chained buffers) and
1600 * return associated packet.
1601 * If 'force' is true, reclaim txd(s) and return associated packet
1602 * regardless of the value of the hardware "curr" pointer.
1604 static void *
1605 dma32_getnexttxp (dma_info_t * di, bool forceall)
1607 uint start, end, i;
1608 void *txp;
1610 DMA_TRACE (("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));
1612 if (di->ntxd == 0)
1613 return (NULL);
1615 txp = NULL;
1617 start = di->txin;
1618 if (forceall)
1619 end = di->txout;
1620 else
1621 end =
1622 B2I (R_REG (di->osh, &di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
1624 if ((start == 0) && (end > di->txout))
1625 goto bogus;
1627 for (i = start; i != end && !txp; i = NEXTTXD (i))
1629 DMA_UNMAP (di->osh,
1630 (BUS_SWAP32 (R_SM (&di->txd32[i].addr)) - di->dataoffsetlow),
1631 (BUS_SWAP32 (R_SM (&di->txd32[i].ctrl)) & CTRL_BC_MASK),
1632 DMA_TX, di->txp[i], &di->txp_dmah[i]);
1634 W_SM (&di->txd32[i].addr, 0xdeadbeef);
1635 txp = di->txp[i];
1636 di->txp[i] = NULL;
1639 di->txin = i;
1641 /* tx flow control */
1642 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
1644 return (txp);
1646 bogus:
1648 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
1649 start, end, di->txout, forceall));
1651 return (NULL);
1654 static void *
1655 dma32_getnextrxp (dma_info_t * di, bool forceall)
1657 uint i;
1658 void *rxp;
1660 /* if forcing, dma engine must be disabled */
1661 ASSERT (!forceall || !dma32_rxenabled (di));
1663 i = di->rxin;
1665 /* return if no packets posted */
1666 if (i == di->rxout)
1667 return (NULL);
1669 /* ignore curr if forceall */
1670 if (!forceall
1671 && (i ==
1672 B2I (R_REG (di->osh, &di->d32rxregs->status) & RS_CD_MASK,
1673 dma32dd_t)))
1674 return (NULL);
1676 /* get the packet pointer that corresponds to the rx descriptor */
1677 rxp = di->rxp[i];
1678 ASSERT (rxp);
1679 di->rxp[i] = NULL;
1681 /* clear this packet from the descriptor ring */
1682 DMA_UNMAP (di->osh,
1683 (BUS_SWAP32 (R_SM (&di->rxd32[i].addr)) - di->dataoffsetlow),
1684 di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]);
1686 W_SM (&di->rxd32[i].addr, 0xdeadbeef);
1688 di->rxin = NEXTRXD (i);
1690 return (rxp);
1694 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1696 static void
1697 dma32_txrotate (dma_info_t * di)
1699 uint ad;
1700 uint nactive;
1701 uint rot;
1702 uint old, new;
1703 uint32 w;
1704 uint first, last;
1706 ASSERT (dma32_txsuspendedidle (di));
1708 nactive = _dma_txactive (di);
1709 ad =
1710 B2I (((R_REG (di->osh, &di->d32txregs->status) & XS_AD_MASK) >>
1711 XS_AD_SHIFT), dma32dd_t);
1712 rot = TXD (ad - di->txin);
1714 ASSERT (rot < di->ntxd);
1716 /* full-ring case is a lot harder - don't worry about this */
1717 if (rot >= (di->ntxd - nactive))
1719 DMA_ERROR (("%s: dma_txrotate: ring full - punt\n", di->name));
1720 return;
1723 first = di->txin;
1724 last = PREVTXD (di->txout);
1726 /* move entries starting at last and moving backwards to first */
1727 for (old = last; old != PREVTXD (first); old = PREVTXD (old))
1729 new = TXD (old + rot);
1732 * Move the tx dma descriptor.
1733 * EOT is set only in the last entry in the ring.
1735 w = BUS_SWAP32 (R_SM (&di->txd32[old].ctrl)) & ~CTRL_EOT;
1736 if (new == (di->ntxd - 1))
1737 w |= CTRL_EOT;
1738 W_SM (&di->txd32[new].ctrl, BUS_SWAP32 (w));
1739 W_SM (&di->txd32[new].addr, R_SM (&di->txd32[old].addr));
1741 /* zap the old tx dma descriptor address field */
1742 W_SM (&di->txd32[old].addr, BUS_SWAP32 (0xdeadbeef));
1744 /* move the corresponding txp[] entry */
1745 ASSERT (di->txp[new] == NULL);
1746 di->txp[new] = di->txp[old];
1747 di->txp[old] = NULL;
1750 /* update txin and txout */
1751 di->txin = ad;
1752 di->txout = TXD (di->txout + rot);
1753 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
1755 /* kick the chip */
1756 W_REG (di->osh, &di->d32txregs->ptr, I2B (di->txout, dma32dd_t));
1759 /* 64 bits DMA functions */
1761 #ifdef BCMDMA64
1762 static void
1763 dma64_txinit (dma_info_t * di)
1765 DMA_TRACE (("%s: dma_txinit\n", di->name));
1767 if (di->ntxd == 0)
1768 return;
1770 di->txin = di->txout = 0;
1771 di->hnddma.txavail = di->ntxd - 1;
1773 /* clear tx descriptor ring */
1774 BZERO_SM ((void *) (uintptr) di->txd64, (di->ntxd * sizeof (dma64dd_t)));
1775 W_REG (di->osh, &di->d64txregs->control, D64_XC_XE);
1776 _dma_ddtable_init (di, DMA_TX, di->txdpa);
1779 static bool
1780 dma64_txenabled (dma_info_t * di)
1782 uint32 xc;
1784 /* If the chip is dead, it is not enabled :-) */
1785 xc = R_REG (di->osh, &di->d64txregs->control);
1786 return ((xc != 0xffffffff) && (xc & D64_XC_XE));
1789 static void
1790 dma64_txsuspend (dma_info_t * di)
1792 DMA_TRACE (("%s: dma_txsuspend\n", di->name));
1794 if (di->ntxd == 0)
1795 return;
1797 OR_REG (di->osh, &di->d64txregs->control, D64_XC_SE);
1800 static void
1801 dma64_txresume (dma_info_t * di)
1803 DMA_TRACE (("%s: dma_txresume\n", di->name));
1805 if (di->ntxd == 0)
1806 return;
1808 AND_REG (di->osh, &di->d64txregs->control, ~D64_XC_SE);
1811 static bool
1812 dma64_txsuspended (dma_info_t * di)
1814 return (di->ntxd == 0)
1815 || ((R_REG (di->osh, &di->d64txregs->control) & D64_XC_SE) == D64_XC_SE);
1818 static void
1819 dma64_txreclaim (dma_info_t * di, bool forceall)
1821 void *p;
1823 DMA_TRACE (("%s: dma_txreclaim %s\n", di->name, forceall ? "all" : ""));
1825 while ((p = dma64_getnexttxp (di, forceall)))
1826 PKTFREE (di->osh, p, TRUE);
1829 static bool
1830 dma64_txstopped (dma_info_t * di)
1832 return ((R_REG (di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
1833 D64_XS0_XS_STOPPED);
1836 static bool
1837 dma64_rxstopped (dma_info_t * di)
1839 return ((R_REG (di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK) ==
1840 D64_RS0_RS_STOPPED);
1843 static bool
1844 dma64_alloc (dma_info_t * di, uint direction)
1846 uint size;
1847 uint ddlen;
1848 uint32 alignbytes;
1849 void *va;
1851 ddlen = sizeof (dma64dd_t);
1853 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
1855 alignbytes = di->dma64align;
1857 if (!ISALIGNED (DMA_CONSISTENT_ALIGN, alignbytes))
1858 size += alignbytes;
1860 if (direction == DMA_TX)
1862 if ((va =
1863 DMA_ALLOC_CONSISTENT (di->osh, size, &di->txdpa,
1864 &di->tx_dmah)) == NULL)
1866 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
1867 di->name));
1868 return FALSE;
1871 di->txd64 = (dma64dd_t *) ROUNDUP ((uintptr) va, alignbytes);
1872 di->txdalign = (uint) ((int8 *) (uintptr) di->txd64 - (int8 *) va);
1873 di->txdpa += di->txdalign;
1874 di->txdalloc = size;
1875 ASSERT (ISALIGNED ((uintptr) di->txd64, alignbytes));
1877 else
1879 if ((va =
1880 DMA_ALLOC_CONSISTENT (di->osh, size, &di->rxdpa,
1881 &di->rx_dmah)) == NULL)
1883 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
1884 di->name));
1885 return FALSE;
1887 di->rxd64 = (dma64dd_t *) ROUNDUP ((uintptr) va, alignbytes);
1888 di->rxdalign = (uint) ((int8 *) (uintptr) di->rxd64 - (int8 *) va);
1889 di->rxdpa += di->rxdalign;
1890 di->rxdalloc = size;
1891 ASSERT (ISALIGNED ((uintptr) di->rxd64, alignbytes));
1894 return TRUE;
1897 static bool
1898 dma64_txreset (dma_info_t * di)
1900 uint32 status;
1902 if (di->ntxd == 0)
1903 return TRUE;
1905 /* suspend tx DMA first */
1906 W_REG (di->osh, &di->d64txregs->control, D64_XC_SE);
1907 SPINWAIT (((status =
1908 (R_REG (di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK)) !=
1909 D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
1910 && (status != D64_XS0_XS_STOPPED), 10000);
1912 W_REG (di->osh, &di->d64txregs->control, 0);
1913 SPINWAIT (((status =
1914 (R_REG (di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK)) !=
1915 D64_XS0_XS_DISABLED), 10000);
1917 /* wait for the last transaction to complete */
1918 OSL_DELAY (300);
1920 return (status == D64_XS0_XS_DISABLED);
1923 static bool
1924 dma64_rxidle (dma_info_t * di)
1926 DMA_TRACE (("%s: dma_rxidle\n", di->name));
1928 if (di->nrxd == 0)
1929 return TRUE;
1931 return ((R_REG (di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
1932 R_REG (di->osh, &di->d64rxregs->ptr));
1935 static bool
1936 dma64_rxreset (dma_info_t * di)
1938 uint32 status;
1940 if (di->nrxd == 0)
1941 return TRUE;
1943 W_REG (di->osh, &di->d64rxregs->control, 0);
1944 SPINWAIT (((status =
1945 (R_REG (di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK)) !=
1946 D64_RS0_RS_DISABLED), 10000);
1948 return (status == D64_RS0_RS_DISABLED);
1951 static bool
1952 dma64_rxenabled (dma_info_t * di)
1954 uint32 rc;
1956 rc = R_REG (di->osh, &di->d64rxregs->control);
1957 return ((rc != 0xffffffff) && (rc & D64_RC_RE));
1960 static bool
1961 dma64_txsuspendedidle (dma_info_t * di)
1964 if (di->ntxd == 0)
1965 return TRUE;
1967 if (!(R_REG (di->osh, &di->d64txregs->control) & D64_XC_SE))
1968 return 0;
1970 if ((R_REG (di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
1971 D64_XS0_XS_IDLE)
1972 return 1;
1974 return 0;
1978 /* !! tx entry routine */
1979 static int
1980 dma64_txfast (dma_info_t * di, void *p0, bool commit)
1982 void *p, *next;
1983 uchar *data;
1984 uint len;
1985 uint txout;
1986 uint32 flags = 0;
1987 uint32 pa;
1989 DMA_TRACE (("%s: dma_txfast\n", di->name));
1991 txout = di->txout;
1994 * Walk the chain of packet buffers
1995 * allocating and initializing transmit descriptor entries.
1997 for (p = p0; p; p = next)
1999 data = PKTDATA (di->osh, p);
2000 len = PKTLEN (di->osh, p);
2001 next = PKTNEXT (di->osh, p);
2003 /* return nonzero if out of tx descriptors */
2004 if (NEXTTXD (txout) == di->txin)
2005 goto outoftxd;
2007 if (len == 0)
2008 continue;
2010 /* get physical address of buffer start */
2011 pa =
2012 (uint32) DMA_MAP (di->osh, data, len, DMA_TX, p,
2013 &di->txp_dmah[txout]);
2015 flags = 0;
2016 if (p == p0)
2017 flags |= D64_CTRL1_SOF;
2018 if (next == NULL)
2019 flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
2020 if (txout == (di->ntxd - 1))
2021 flags |= D64_CTRL1_EOT;
2023 dma64_dd_upd (di, di->txd64, pa, txout, &flags, len);
2024 ASSERT (di->txp[txout] == NULL);
2026 txout = NEXTTXD (txout);
2029 /* if last txd eof not set, fix it */
2030 if (!(flags & D64_CTRL1_EOF))
2031 W_SM (&di->txd64[PREVTXD (txout)].ctrl1,
2032 BUS_SWAP32 (flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
2034 /* save the packet */
2035 di->txp[PREVTXD (txout)] = p0;
2037 /* bump the tx descriptor index */
2038 di->txout = txout;
2040 /* kick the chip */
2041 if (commit)
2042 W_REG (di->osh, &di->d64txregs->ptr, I2B (txout, dma64dd_t));
2044 /* tx flow control */
2045 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
2047 return (0);
2049 outoftxd:
2050 DMA_ERROR (("%s: dma_txfast: out of txds\n", di->name));
2051 PKTFREE (di->osh, p0, TRUE);
2052 di->hnddma.txavail = 0;
2053 di->hnddma.txnobuf++;
2054 return (-1);
2058 * Reclaim next completed txd (txds if using chained buffers) and
2059 * return associated packet.
2060 * If 'force' is true, reclaim txd(s) and return associated packet
2061 * regardless of the value of the hardware "curr" pointer.
2063 static void *
2064 dma64_getnexttxp (dma_info_t * di, bool forceall)
2066 uint start, end, i;
2067 void *txp;
2069 DMA_TRACE (("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));
2071 if (di->ntxd == 0)
2072 return (NULL);
2074 txp = NULL;
2076 start = di->txin;
2077 if (forceall)
2078 end = di->txout;
2079 else
2080 end =
2081 B2I (R_REG (di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK,
2082 dma64dd_t);
2084 if ((start == 0) && (end > di->txout))
2085 goto bogus;
2087 for (i = start; i != end && !txp; i = NEXTTXD (i))
2089 DMA_UNMAP (di->osh,
2090 (BUS_SWAP32 (R_SM (&di->txd64[i].addrlow)) -
2091 di->dataoffsetlow),
2092 (BUS_SWAP32 (R_SM (&di->txd64[i].ctrl2)) &
2093 D64_CTRL2_BC_MASK), DMA_TX, di->txp[i], &di->txp_dmah[i]);
2095 W_SM (&di->txd64[i].addrlow, 0xdeadbeef);
2096 W_SM (&di->txd64[i].addrhigh, 0xdeadbeef);
2098 txp = di->txp[i];
2099 di->txp[i] = NULL;
2102 di->txin = i;
2104 /* tx flow control */
2105 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
2107 return (txp);
2109 bogus:
2111 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
2112 start, end, di->txout, forceall));
2114 return (NULL);
2117 static void *
2118 dma64_getnextrxp (dma_info_t * di, bool forceall)
2120 uint i;
2121 void *rxp;
2123 /* if forcing, dma engine must be disabled */
2124 ASSERT (!forceall || !dma64_rxenabled (di));
2126 i = di->rxin;
2128 /* return if no packets posted */
2129 if (i == di->rxout)
2130 return (NULL);
2132 /* ignore curr if forceall */
2133 if (!forceall &&
2134 (i ==
2135 B2I (R_REG (di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK,
2136 dma64dd_t)))
2137 return (NULL);
2139 /* get the packet pointer that corresponds to the rx descriptor */
2140 rxp = di->rxp[i];
2141 ASSERT (rxp);
2142 di->rxp[i] = NULL;
2144 /* clear this packet from the descriptor ring */
2145 DMA_UNMAP (di->osh,
2146 (BUS_SWAP32 (R_SM (&di->rxd64[i].addrlow)) - di->dataoffsetlow),
2147 di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]);
2149 W_SM (&di->rxd64[i].addrlow, 0xdeadbeef);
2150 W_SM (&di->rxd64[i].addrhigh, 0xdeadbeef);
2152 di->rxin = NEXTRXD (i);
2154 return (rxp);
2157 static bool
2158 _dma64_addrext (osl_t * osh, dma64regs_t * dma64regs)
2160 uint32 w;
2161 OR_REG (osh, &dma64regs->control, D64_XC_AE);
2162 w = R_REG (osh, &dma64regs->control);
2163 AND_REG (osh, &dma64regs->control, ~D64_XC_AE);
2164 return ((w & D64_XC_AE) == D64_XC_AE);
2168 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
2170 static void
2171 dma64_txrotate (dma_info_t * di)
2173 uint ad;
2174 uint nactive;
2175 uint rot;
2176 uint old, new;
2177 uint32 w;
2178 uint first, last;
2180 ASSERT (dma64_txsuspendedidle (di));
2182 nactive = _dma_txactive (di);
2183 ad =
2184 B2I ((R_REG (di->osh, &di->d64txregs->status1) & D64_XS1_AD_MASK),
2185 dma64dd_t);
2186 rot = TXD (ad - di->txin);
2188 ASSERT (rot < di->ntxd);
2190 /* full-ring case is a lot harder - don't worry about this */
2191 if (rot >= (di->ntxd - nactive))
2193 DMA_ERROR (("%s: dma_txrotate: ring full - punt\n", di->name));
2194 return;
2197 first = di->txin;
2198 last = PREVTXD (di->txout);
2200 /* move entries starting at last and moving backwards to first */
2201 for (old = last; old != PREVTXD (first); old = PREVTXD (old))
2203 new = TXD (old + rot);
2206 * Move the tx dma descriptor.
2207 * EOT is set only in the last entry in the ring.
2209 w = BUS_SWAP32 (R_SM (&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT;
2210 if (new == (di->ntxd - 1))
2211 w |= D64_CTRL1_EOT;
2212 W_SM (&di->txd64[new].ctrl1, BUS_SWAP32 (w));
2214 w = BUS_SWAP32 (R_SM (&di->txd64[old].ctrl2));
2215 W_SM (&di->txd64[new].ctrl2, BUS_SWAP32 (w));
2217 W_SM (&di->txd64[new].addrlow, R_SM (&di->txd64[old].addrlow));
2218 W_SM (&di->txd64[new].addrhigh, R_SM (&di->txd64[old].addrhigh));
2220 /* zap the old tx dma descriptor address field */
2221 W_SM (&di->txd64[old].addrlow, BUS_SWAP32 (0xdeadbeef));
2222 W_SM (&di->txd64[old].addrhigh, BUS_SWAP32 (0xdeadbeef));
2224 /* move the corresponding txp[] entry */
2225 ASSERT (di->txp[new] == NULL);
2226 di->txp[new] = di->txp[old];
2227 di->txp[old] = NULL;
2230 /* update txin and txout */
2231 di->txin = ad;
2232 di->txout = TXD (di->txout + rot);
2233 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
2235 /* kick the chip */
2236 W_REG (di->osh, &di->d64txregs->ptr, I2B (di->txout, dma64dd_t));
2239 #endif /* BCMDMA64 */
2241 uint
2242 dma_addrwidth (sb_t * sbh, void *dmaregs)
2244 dma32regs_t *dma32regs;
2245 osl_t *osh;
2247 osh = sb_osh (sbh);
2249 if (DMA64_CAP)
2251 /* DMA engine is 64-bit capable */
2252 if (((sb_coreflagshi (sbh, 0, 0) & SBTMH_DMA64) == SBTMH_DMA64))
2254 /* backplane are 64 bits capable */
2255 if (sb_backplane64 (sbh))
2256 /* If bus is System Backplane or PCIE then we can access 64-bits */
2257 if ((BUSTYPE (sbh->bustype) == SB_BUS) ||
2258 ((BUSTYPE (sbh->bustype) == PCI_BUS) &&
2259 sbh->buscoretype == SB_PCIE))
2260 return (DMADDRWIDTH_64);
2262 /* DMA64 is always 32 bits capable, AE is always TRUE */
2263 #ifdef BCMDMA64
2264 ASSERT (_dma64_addrext (osh, (dma64regs_t *) dmaregs));
2265 #endif
2266 return (DMADDRWIDTH_32);
2270 /* Start checking for 32-bit / 30-bit addressing */
2271 dma32regs = (dma32regs_t *) dmaregs;
2273 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
2274 if ((BUSTYPE (sbh->bustype) == SB_BUS) ||
2275 ((BUSTYPE (sbh->bustype) == PCI_BUS) && sbh->buscoretype == SB_PCIE) ||
2276 (_dma32_addrext (osh, dma32regs)))
2277 return (DMADDRWIDTH_32);
2279 /* Fallthru */
2280 return (DMADDRWIDTH_30);