Pull bugzilla-9429 into release branch
[pv_ops_mirror.git] / drivers / net / wireless / b43legacy / dma.c
blob8cb3dc4c474540d5a201a26e83993729bebd703b
1 /*
3 Broadcom B43legacy wireless driver
5 DMA ringbuffer and descriptor allocation/management
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
30 #include "b43legacy.h"
31 #include "dma.h"
32 #include "main.h"
33 #include "debugfs.h"
34 #include "xmit.h"
36 #include <linux/dma-mapping.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <net/dst.h>
42 /* 32bit DMA ops. */
43 static
44 struct b43legacy_dmadesc_generic *op32_idx2desc(
45 struct b43legacy_dmaring *ring,
46 int slot,
47 struct b43legacy_dmadesc_meta **meta)
49 struct b43legacy_dmadesc32 *desc;
51 *meta = &(ring->meta[slot]);
52 desc = ring->descbase;
53 desc = &(desc[slot]);
55 return (struct b43legacy_dmadesc_generic *)desc;
58 static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
59 struct b43legacy_dmadesc_generic *desc,
60 dma_addr_t dmaaddr, u16 bufsize,
61 int start, int end, int irq)
63 struct b43legacy_dmadesc32 *descbase = ring->descbase;
64 int slot;
65 u32 ctl;
66 u32 addr;
67 u32 addrext;
69 slot = (int)(&(desc->dma32) - descbase);
70 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
72 addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
73 addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK)
74 >> SSB_DMA_TRANSLATION_SHIFT;
75 addr |= ssb_dma_translation(ring->dev->dev);
76 ctl = (bufsize - ring->frameoffset)
77 & B43legacy_DMA32_DCTL_BYTECNT;
78 if (slot == ring->nr_slots - 1)
79 ctl |= B43legacy_DMA32_DCTL_DTABLEEND;
80 if (start)
81 ctl |= B43legacy_DMA32_DCTL_FRAMESTART;
82 if (end)
83 ctl |= B43legacy_DMA32_DCTL_FRAMEEND;
84 if (irq)
85 ctl |= B43legacy_DMA32_DCTL_IRQ;
86 ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT)
87 & B43legacy_DMA32_DCTL_ADDREXT_MASK;
89 desc->dma32.control = cpu_to_le32(ctl);
90 desc->dma32.address = cpu_to_le32(addr);
93 static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot)
95 b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX,
96 (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
99 static void op32_tx_suspend(struct b43legacy_dmaring *ring)
101 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
102 b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
103 | B43legacy_DMA32_TXSUSPEND);
106 static void op32_tx_resume(struct b43legacy_dmaring *ring)
108 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
109 b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
110 & ~B43legacy_DMA32_TXSUSPEND);
113 static int op32_get_current_rxslot(struct b43legacy_dmaring *ring)
115 u32 val;
117 val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS);
118 val &= B43legacy_DMA32_RXDPTR;
120 return (val / sizeof(struct b43legacy_dmadesc32));
123 static void op32_set_current_rxslot(struct b43legacy_dmaring *ring,
124 int slot)
126 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
127 (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
130 static const struct b43legacy_dma_ops dma32_ops = {
131 .idx2desc = op32_idx2desc,
132 .fill_descriptor = op32_fill_descriptor,
133 .poke_tx = op32_poke_tx,
134 .tx_suspend = op32_tx_suspend,
135 .tx_resume = op32_tx_resume,
136 .get_current_rxslot = op32_get_current_rxslot,
137 .set_current_rxslot = op32_set_current_rxslot,
140 /* 64bit DMA ops. */
141 static
142 struct b43legacy_dmadesc_generic *op64_idx2desc(
143 struct b43legacy_dmaring *ring,
144 int slot,
145 struct b43legacy_dmadesc_meta
146 **meta)
148 struct b43legacy_dmadesc64 *desc;
150 *meta = &(ring->meta[slot]);
151 desc = ring->descbase;
152 desc = &(desc[slot]);
154 return (struct b43legacy_dmadesc_generic *)desc;
157 static void op64_fill_descriptor(struct b43legacy_dmaring *ring,
158 struct b43legacy_dmadesc_generic *desc,
159 dma_addr_t dmaaddr, u16 bufsize,
160 int start, int end, int irq)
162 struct b43legacy_dmadesc64 *descbase = ring->descbase;
163 int slot;
164 u32 ctl0 = 0;
165 u32 ctl1 = 0;
166 u32 addrlo;
167 u32 addrhi;
168 u32 addrext;
170 slot = (int)(&(desc->dma64) - descbase);
171 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
173 addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
174 addrhi = (((u64)dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
175 addrext = (((u64)dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
176 >> SSB_DMA_TRANSLATION_SHIFT;
177 addrhi |= ssb_dma_translation(ring->dev->dev);
178 if (slot == ring->nr_slots - 1)
179 ctl0 |= B43legacy_DMA64_DCTL0_DTABLEEND;
180 if (start)
181 ctl0 |= B43legacy_DMA64_DCTL0_FRAMESTART;
182 if (end)
183 ctl0 |= B43legacy_DMA64_DCTL0_FRAMEEND;
184 if (irq)
185 ctl0 |= B43legacy_DMA64_DCTL0_IRQ;
186 ctl1 |= (bufsize - ring->frameoffset)
187 & B43legacy_DMA64_DCTL1_BYTECNT;
188 ctl1 |= (addrext << B43legacy_DMA64_DCTL1_ADDREXT_SHIFT)
189 & B43legacy_DMA64_DCTL1_ADDREXT_MASK;
191 desc->dma64.control0 = cpu_to_le32(ctl0);
192 desc->dma64.control1 = cpu_to_le32(ctl1);
193 desc->dma64.address_low = cpu_to_le32(addrlo);
194 desc->dma64.address_high = cpu_to_le32(addrhi);
197 static void op64_poke_tx(struct b43legacy_dmaring *ring, int slot)
199 b43legacy_dma_write(ring, B43legacy_DMA64_TXINDEX,
200 (u32)(slot * sizeof(struct b43legacy_dmadesc64)));
203 static void op64_tx_suspend(struct b43legacy_dmaring *ring)
205 b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL,
206 b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL)
207 | B43legacy_DMA64_TXSUSPEND);
210 static void op64_tx_resume(struct b43legacy_dmaring *ring)
212 b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL,
213 b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL)
214 & ~B43legacy_DMA64_TXSUSPEND);
217 static int op64_get_current_rxslot(struct b43legacy_dmaring *ring)
219 u32 val;
221 val = b43legacy_dma_read(ring, B43legacy_DMA64_RXSTATUS);
222 val &= B43legacy_DMA64_RXSTATDPTR;
224 return (val / sizeof(struct b43legacy_dmadesc64));
227 static void op64_set_current_rxslot(struct b43legacy_dmaring *ring,
228 int slot)
230 b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX,
231 (u32)(slot * sizeof(struct b43legacy_dmadesc64)));
234 static const struct b43legacy_dma_ops dma64_ops = {
235 .idx2desc = op64_idx2desc,
236 .fill_descriptor = op64_fill_descriptor,
237 .poke_tx = op64_poke_tx,
238 .tx_suspend = op64_tx_suspend,
239 .tx_resume = op64_tx_resume,
240 .get_current_rxslot = op64_get_current_rxslot,
241 .set_current_rxslot = op64_set_current_rxslot,
245 static inline int free_slots(struct b43legacy_dmaring *ring)
247 return (ring->nr_slots - ring->used_slots);
250 static inline int next_slot(struct b43legacy_dmaring *ring, int slot)
252 B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
253 if (slot == ring->nr_slots - 1)
254 return 0;
255 return slot + 1;
258 static inline int prev_slot(struct b43legacy_dmaring *ring, int slot)
260 B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
261 if (slot == 0)
262 return ring->nr_slots - 1;
263 return slot - 1;
266 #ifdef CONFIG_B43LEGACY_DEBUG
267 static void update_max_used_slots(struct b43legacy_dmaring *ring,
268 int current_used_slots)
270 if (current_used_slots <= ring->max_used_slots)
271 return;
272 ring->max_used_slots = current_used_slots;
273 if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE))
274 b43legacydbg(ring->dev->wl,
275 "max_used_slots increased to %d on %s ring %d\n",
276 ring->max_used_slots,
277 ring->tx ? "TX" : "RX",
278 ring->index);
280 #else
281 static inline
282 void update_max_used_slots(struct b43legacy_dmaring *ring,
283 int current_used_slots)
285 #endif /* DEBUG */
287 /* Request a slot for usage. */
288 static inline
289 int request_slot(struct b43legacy_dmaring *ring)
291 int slot;
293 B43legacy_WARN_ON(!ring->tx);
294 B43legacy_WARN_ON(ring->stopped);
295 B43legacy_WARN_ON(free_slots(ring) == 0);
297 slot = next_slot(ring, ring->current_slot);
298 ring->current_slot = slot;
299 ring->used_slots++;
301 update_max_used_slots(ring, ring->used_slots);
303 return slot;
306 /* Mac80211-queue to b43legacy-ring mapping */
307 static struct b43legacy_dmaring *priority_to_txring(
308 struct b43legacy_wldev *dev,
309 int queue_priority)
311 struct b43legacy_dmaring *ring;
313 /*FIXME: For now we always run on TX-ring-1 */
314 return dev->dma.tx_ring1;
316 /* 0 = highest priority */
317 switch (queue_priority) {
318 default:
319 B43legacy_WARN_ON(1);
320 /* fallthrough */
321 case 0:
322 ring = dev->dma.tx_ring3;
323 break;
324 case 1:
325 ring = dev->dma.tx_ring2;
326 break;
327 case 2:
328 ring = dev->dma.tx_ring1;
329 break;
330 case 3:
331 ring = dev->dma.tx_ring0;
332 break;
333 case 4:
334 ring = dev->dma.tx_ring4;
335 break;
336 case 5:
337 ring = dev->dma.tx_ring5;
338 break;
341 return ring;
344 /* Bcm4301-ring to mac80211-queue mapping */
345 static inline int txring_to_priority(struct b43legacy_dmaring *ring)
347 static const u8 idx_to_prio[] =
348 { 3, 2, 1, 0, 4, 5, };
350 /*FIXME: have only one queue, for now */
351 return 0;
353 return idx_to_prio[ring->index];
357 u16 b43legacy_dmacontroller_base(int dma64bit, int controller_idx)
359 static const u16 map64[] = {
360 B43legacy_MMIO_DMA64_BASE0,
361 B43legacy_MMIO_DMA64_BASE1,
362 B43legacy_MMIO_DMA64_BASE2,
363 B43legacy_MMIO_DMA64_BASE3,
364 B43legacy_MMIO_DMA64_BASE4,
365 B43legacy_MMIO_DMA64_BASE5,
367 static const u16 map32[] = {
368 B43legacy_MMIO_DMA32_BASE0,
369 B43legacy_MMIO_DMA32_BASE1,
370 B43legacy_MMIO_DMA32_BASE2,
371 B43legacy_MMIO_DMA32_BASE3,
372 B43legacy_MMIO_DMA32_BASE4,
373 B43legacy_MMIO_DMA32_BASE5,
376 if (dma64bit) {
377 B43legacy_WARN_ON(!(controller_idx >= 0 &&
378 controller_idx < ARRAY_SIZE(map64)));
379 return map64[controller_idx];
381 B43legacy_WARN_ON(!(controller_idx >= 0 &&
382 controller_idx < ARRAY_SIZE(map32)));
383 return map32[controller_idx];
386 static inline
387 dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
388 unsigned char *buf,
389 size_t len,
390 int tx)
392 dma_addr_t dmaaddr;
394 if (tx)
395 dmaaddr = dma_map_single(ring->dev->dev->dev,
396 buf, len,
397 DMA_TO_DEVICE);
398 else
399 dmaaddr = dma_map_single(ring->dev->dev->dev,
400 buf, len,
401 DMA_FROM_DEVICE);
403 return dmaaddr;
406 static inline
407 void unmap_descbuffer(struct b43legacy_dmaring *ring,
408 dma_addr_t addr,
409 size_t len,
410 int tx)
412 if (tx)
413 dma_unmap_single(ring->dev->dev->dev,
414 addr, len,
415 DMA_TO_DEVICE);
416 else
417 dma_unmap_single(ring->dev->dev->dev,
418 addr, len,
419 DMA_FROM_DEVICE);
422 static inline
423 void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
424 dma_addr_t addr,
425 size_t len)
427 B43legacy_WARN_ON(ring->tx);
429 dma_sync_single_for_cpu(ring->dev->dev->dev,
430 addr, len, DMA_FROM_DEVICE);
433 static inline
434 void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
435 dma_addr_t addr,
436 size_t len)
438 B43legacy_WARN_ON(ring->tx);
440 dma_sync_single_for_device(ring->dev->dev->dev,
441 addr, len, DMA_FROM_DEVICE);
444 static inline
445 void free_descriptor_buffer(struct b43legacy_dmaring *ring,
446 struct b43legacy_dmadesc_meta *meta,
447 int irq_context)
449 if (meta->skb) {
450 if (irq_context)
451 dev_kfree_skb_irq(meta->skb);
452 else
453 dev_kfree_skb(meta->skb);
454 meta->skb = NULL;
458 static int alloc_ringmemory(struct b43legacy_dmaring *ring)
460 struct device *dev = ring->dev->dev->dev;
462 ring->descbase = dma_alloc_coherent(dev, B43legacy_DMA_RINGMEMSIZE,
463 &(ring->dmabase), GFP_KERNEL);
464 if (!ring->descbase) {
465 b43legacyerr(ring->dev->wl, "DMA ringmemory allocation"
466 " failed\n");
467 return -ENOMEM;
469 memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE);
471 return 0;
474 static void free_ringmemory(struct b43legacy_dmaring *ring)
476 struct device *dev = ring->dev->dev->dev;
478 dma_free_coherent(dev, B43legacy_DMA_RINGMEMSIZE,
479 ring->descbase, ring->dmabase);
482 /* Reset the RX DMA channel */
483 int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
484 u16 mmio_base, int dma64)
486 int i;
487 u32 value;
488 u16 offset;
490 might_sleep();
492 offset = dma64 ? B43legacy_DMA64_RXCTL : B43legacy_DMA32_RXCTL;
493 b43legacy_write32(dev, mmio_base + offset, 0);
494 for (i = 0; i < 10; i++) {
495 offset = dma64 ? B43legacy_DMA64_RXSTATUS :
496 B43legacy_DMA32_RXSTATUS;
497 value = b43legacy_read32(dev, mmio_base + offset);
498 if (dma64) {
499 value &= B43legacy_DMA64_RXSTAT;
500 if (value == B43legacy_DMA64_RXSTAT_DISABLED) {
501 i = -1;
502 break;
504 } else {
505 value &= B43legacy_DMA32_RXSTATE;
506 if (value == B43legacy_DMA32_RXSTAT_DISABLED) {
507 i = -1;
508 break;
511 msleep(1);
513 if (i != -1) {
514 b43legacyerr(dev->wl, "DMA RX reset timed out\n");
515 return -ENODEV;
518 return 0;
521 /* Reset the RX DMA channel */
522 int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
523 u16 mmio_base, int dma64)
525 int i;
526 u32 value;
527 u16 offset;
529 might_sleep();
531 for (i = 0; i < 10; i++) {
532 offset = dma64 ? B43legacy_DMA64_TXSTATUS :
533 B43legacy_DMA32_TXSTATUS;
534 value = b43legacy_read32(dev, mmio_base + offset);
535 if (dma64) {
536 value &= B43legacy_DMA64_TXSTAT;
537 if (value == B43legacy_DMA64_TXSTAT_DISABLED ||
538 value == B43legacy_DMA64_TXSTAT_IDLEWAIT ||
539 value == B43legacy_DMA64_TXSTAT_STOPPED)
540 break;
541 } else {
542 value &= B43legacy_DMA32_TXSTATE;
543 if (value == B43legacy_DMA32_TXSTAT_DISABLED ||
544 value == B43legacy_DMA32_TXSTAT_IDLEWAIT ||
545 value == B43legacy_DMA32_TXSTAT_STOPPED)
546 break;
548 msleep(1);
550 offset = dma64 ? B43legacy_DMA64_TXCTL : B43legacy_DMA32_TXCTL;
551 b43legacy_write32(dev, mmio_base + offset, 0);
552 for (i = 0; i < 10; i++) {
553 offset = dma64 ? B43legacy_DMA64_TXSTATUS :
554 B43legacy_DMA32_TXSTATUS;
555 value = b43legacy_read32(dev, mmio_base + offset);
556 if (dma64) {
557 value &= B43legacy_DMA64_TXSTAT;
558 if (value == B43legacy_DMA64_TXSTAT_DISABLED) {
559 i = -1;
560 break;
562 } else {
563 value &= B43legacy_DMA32_TXSTATE;
564 if (value == B43legacy_DMA32_TXSTAT_DISABLED) {
565 i = -1;
566 break;
569 msleep(1);
571 if (i != -1) {
572 b43legacyerr(dev->wl, "DMA TX reset timed out\n");
573 return -ENODEV;
575 /* ensure the reset is completed. */
576 msleep(1);
578 return 0;
581 static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
582 struct b43legacy_dmadesc_generic *desc,
583 struct b43legacy_dmadesc_meta *meta,
584 gfp_t gfp_flags)
586 struct b43legacy_rxhdr_fw3 *rxhdr;
587 struct b43legacy_hwtxstatus *txstat;
588 dma_addr_t dmaaddr;
589 struct sk_buff *skb;
591 B43legacy_WARN_ON(ring->tx);
593 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
594 if (unlikely(!skb))
595 return -ENOMEM;
596 dmaaddr = map_descbuffer(ring, skb->data,
597 ring->rx_buffersize, 0);
598 if (dma_mapping_error(dmaaddr)) {
599 /* ugh. try to realloc in zone_dma */
600 gfp_flags |= GFP_DMA;
602 dev_kfree_skb_any(skb);
604 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
605 if (unlikely(!skb))
606 return -ENOMEM;
607 dmaaddr = map_descbuffer(ring, skb->data,
608 ring->rx_buffersize, 0);
611 if (dma_mapping_error(dmaaddr)) {
612 dev_kfree_skb_any(skb);
613 return -EIO;
616 meta->skb = skb;
617 meta->dmaaddr = dmaaddr;
618 ring->ops->fill_descriptor(ring, desc, dmaaddr,
619 ring->rx_buffersize, 0, 0, 0);
621 rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data);
622 rxhdr->frame_len = 0;
623 txstat = (struct b43legacy_hwtxstatus *)(skb->data);
624 txstat->cookie = 0;
626 return 0;
629 /* Allocate the initial descbuffers.
630 * This is used for an RX ring only.
632 static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring)
634 int i;
635 int err = -ENOMEM;
636 struct b43legacy_dmadesc_generic *desc;
637 struct b43legacy_dmadesc_meta *meta;
639 for (i = 0; i < ring->nr_slots; i++) {
640 desc = ring->ops->idx2desc(ring, i, &meta);
642 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
643 if (err) {
644 b43legacyerr(ring->dev->wl,
645 "Failed to allocate initial descbuffers\n");
646 goto err_unwind;
649 mb(); /* all descbuffer setup before next line */
650 ring->used_slots = ring->nr_slots;
651 err = 0;
652 out:
653 return err;
655 err_unwind:
656 for (i--; i >= 0; i--) {
657 desc = ring->ops->idx2desc(ring, i, &meta);
659 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
660 dev_kfree_skb(meta->skb);
662 goto out;
665 /* Do initial setup of the DMA controller.
666 * Reset the controller, write the ring busaddress
667 * and switch the "enable" bit on.
669 static int dmacontroller_setup(struct b43legacy_dmaring *ring)
671 int err = 0;
672 u32 value;
673 u32 addrext;
674 u32 trans = ssb_dma_translation(ring->dev->dev);
676 if (ring->tx) {
677 if (ring->dma64) {
678 u64 ringbase = (u64)(ring->dmabase);
680 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
681 >> SSB_DMA_TRANSLATION_SHIFT;
682 value = B43legacy_DMA64_TXENABLE;
683 value |= (addrext << B43legacy_DMA64_TXADDREXT_SHIFT)
684 & B43legacy_DMA64_TXADDREXT_MASK;
685 b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL,
686 value);
687 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO,
688 (ringbase & 0xFFFFFFFF));
689 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI,
690 ((ringbase >> 32)
691 & ~SSB_DMA_TRANSLATION_MASK)
692 | trans);
693 } else {
694 u32 ringbase = (u32)(ring->dmabase);
696 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
697 >> SSB_DMA_TRANSLATION_SHIFT;
698 value = B43legacy_DMA32_TXENABLE;
699 value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT)
700 & B43legacy_DMA32_TXADDREXT_MASK;
701 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
702 value);
703 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
704 (ringbase &
705 ~SSB_DMA_TRANSLATION_MASK)
706 | trans);
708 } else {
709 err = alloc_initial_descbuffers(ring);
710 if (err)
711 goto out;
712 if (ring->dma64) {
713 u64 ringbase = (u64)(ring->dmabase);
715 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
716 >> SSB_DMA_TRANSLATION_SHIFT;
717 value = (ring->frameoffset <<
718 B43legacy_DMA64_RXFROFF_SHIFT);
719 value |= B43legacy_DMA64_RXENABLE;
720 value |= (addrext << B43legacy_DMA64_RXADDREXT_SHIFT)
721 & B43legacy_DMA64_RXADDREXT_MASK;
722 b43legacy_dma_write(ring, B43legacy_DMA64_RXCTL,
723 value);
724 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO,
725 (ringbase & 0xFFFFFFFF));
726 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI,
727 ((ringbase >> 32) &
728 ~SSB_DMA_TRANSLATION_MASK) |
729 trans);
730 b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX,
731 200);
732 } else {
733 u32 ringbase = (u32)(ring->dmabase);
735 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
736 >> SSB_DMA_TRANSLATION_SHIFT;
737 value = (ring->frameoffset <<
738 B43legacy_DMA32_RXFROFF_SHIFT);
739 value |= B43legacy_DMA32_RXENABLE;
740 value |= (addrext <<
741 B43legacy_DMA32_RXADDREXT_SHIFT)
742 & B43legacy_DMA32_RXADDREXT_MASK;
743 b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL,
744 value);
745 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
746 (ringbase &
747 ~SSB_DMA_TRANSLATION_MASK)
748 | trans);
749 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
750 200);
754 out:
755 return err;
758 /* Shutdown the DMA controller. */
759 static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
761 if (ring->tx) {
762 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
763 ring->dma64);
764 if (ring->dma64) {
765 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, 0);
766 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, 0);
767 } else
768 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
769 } else {
770 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
771 ring->dma64);
772 if (ring->dma64) {
773 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 0);
774 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, 0);
775 } else
776 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
780 static void free_all_descbuffers(struct b43legacy_dmaring *ring)
782 struct b43legacy_dmadesc_generic *desc;
783 struct b43legacy_dmadesc_meta *meta;
784 int i;
786 if (!ring->used_slots)
787 return;
788 for (i = 0; i < ring->nr_slots; i++) {
789 desc = ring->ops->idx2desc(ring, i, &meta);
791 if (!meta->skb) {
792 B43legacy_WARN_ON(!ring->tx);
793 continue;
795 if (ring->tx)
796 unmap_descbuffer(ring, meta->dmaaddr,
797 meta->skb->len, 1);
798 else
799 unmap_descbuffer(ring, meta->dmaaddr,
800 ring->rx_buffersize, 0);
801 free_descriptor_buffer(ring, meta, 0);
805 static u64 supported_dma_mask(struct b43legacy_wldev *dev)
807 u32 tmp;
808 u16 mmio_base;
810 tmp = b43legacy_read32(dev, SSB_TMSHIGH);
811 if (tmp & SSB_TMSHIGH_DMA64)
812 return DMA_64BIT_MASK;
813 mmio_base = b43legacy_dmacontroller_base(0, 0);
814 b43legacy_write32(dev,
815 mmio_base + B43legacy_DMA32_TXCTL,
816 B43legacy_DMA32_TXADDREXT_MASK);
817 tmp = b43legacy_read32(dev, mmio_base +
818 B43legacy_DMA32_TXCTL);
819 if (tmp & B43legacy_DMA32_TXADDREXT_MASK)
820 return DMA_32BIT_MASK;
822 return DMA_30BIT_MASK;
825 /* Main initialization function. */
826 static
827 struct b43legacy_dmaring *b43legacy_setup_dmaring(
828 struct b43legacy_wldev *dev,
829 int controller_index,
830 int for_tx,
831 int dma64)
833 struct b43legacy_dmaring *ring;
834 int err;
835 int nr_slots;
836 dma_addr_t dma_test;
838 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
839 if (!ring)
840 goto out;
842 nr_slots = B43legacy_RXRING_SLOTS;
843 if (for_tx)
844 nr_slots = B43legacy_TXRING_SLOTS;
846 ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta),
847 GFP_KERNEL);
848 if (!ring->meta)
849 goto err_kfree_ring;
850 if (for_tx) {
851 ring->txhdr_cache = kcalloc(nr_slots,
852 sizeof(struct b43legacy_txhdr_fw3),
853 GFP_KERNEL);
854 if (!ring->txhdr_cache)
855 goto err_kfree_meta;
857 /* test for ability to dma to txhdr_cache */
858 dma_test = dma_map_single(dev->dev->dev,
859 ring->txhdr_cache,
860 sizeof(struct b43legacy_txhdr_fw3),
861 DMA_TO_DEVICE);
863 if (dma_mapping_error(dma_test)) {
864 /* ugh realloc */
865 kfree(ring->txhdr_cache);
866 ring->txhdr_cache = kcalloc(nr_slots,
867 sizeof(struct b43legacy_txhdr_fw3),
868 GFP_KERNEL | GFP_DMA);
869 if (!ring->txhdr_cache)
870 goto err_kfree_meta;
872 dma_test = dma_map_single(dev->dev->dev,
873 ring->txhdr_cache,
874 sizeof(struct b43legacy_txhdr_fw3),
875 DMA_TO_DEVICE);
877 if (dma_mapping_error(dma_test))
878 goto err_kfree_txhdr_cache;
881 dma_unmap_single(dev->dev->dev,
882 dma_test, sizeof(struct b43legacy_txhdr_fw3),
883 DMA_TO_DEVICE);
886 ring->dev = dev;
887 ring->nr_slots = nr_slots;
888 ring->mmio_base = b43legacy_dmacontroller_base(dma64,
889 controller_index);
890 ring->index = controller_index;
891 ring->dma64 = !!dma64;
892 if (dma64)
893 ring->ops = &dma64_ops;
894 else
895 ring->ops = &dma32_ops;
896 if (for_tx) {
897 ring->tx = 1;
898 ring->current_slot = -1;
899 } else {
900 if (ring->index == 0) {
901 ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE;
902 ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET;
903 } else if (ring->index == 3) {
904 ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE;
905 ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET;
906 } else
907 B43legacy_WARN_ON(1);
909 spin_lock_init(&ring->lock);
910 #ifdef CONFIG_B43LEGACY_DEBUG
911 ring->last_injected_overflow = jiffies;
912 #endif
914 err = alloc_ringmemory(ring);
915 if (err)
916 goto err_kfree_txhdr_cache;
917 err = dmacontroller_setup(ring);
918 if (err)
919 goto err_free_ringmemory;
921 out:
922 return ring;
924 err_free_ringmemory:
925 free_ringmemory(ring);
926 err_kfree_txhdr_cache:
927 kfree(ring->txhdr_cache);
928 err_kfree_meta:
929 kfree(ring->meta);
930 err_kfree_ring:
931 kfree(ring);
932 ring = NULL;
933 goto out;
936 /* Main cleanup function. */
937 static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
939 if (!ring)
940 return;
942 b43legacydbg(ring->dev->wl, "DMA-%s 0x%04X (%s) max used slots:"
943 " %d/%d\n", (ring->dma64) ? "64" : "32", ring->mmio_base,
944 (ring->tx) ? "TX" : "RX",
945 ring->max_used_slots, ring->nr_slots);
946 /* Device IRQs are disabled prior entering this function,
947 * so no need to take care of concurrency with rx handler stuff.
949 dmacontroller_cleanup(ring);
950 free_all_descbuffers(ring);
951 free_ringmemory(ring);
953 kfree(ring->txhdr_cache);
954 kfree(ring->meta);
955 kfree(ring);
958 void b43legacy_dma_free(struct b43legacy_wldev *dev)
960 struct b43legacy_dma *dma;
962 if (b43legacy_using_pio(dev))
963 return;
964 dma = &dev->dma;
966 b43legacy_destroy_dmaring(dma->rx_ring3);
967 dma->rx_ring3 = NULL;
968 b43legacy_destroy_dmaring(dma->rx_ring0);
969 dma->rx_ring0 = NULL;
971 b43legacy_destroy_dmaring(dma->tx_ring5);
972 dma->tx_ring5 = NULL;
973 b43legacy_destroy_dmaring(dma->tx_ring4);
974 dma->tx_ring4 = NULL;
975 b43legacy_destroy_dmaring(dma->tx_ring3);
976 dma->tx_ring3 = NULL;
977 b43legacy_destroy_dmaring(dma->tx_ring2);
978 dma->tx_ring2 = NULL;
979 b43legacy_destroy_dmaring(dma->tx_ring1);
980 dma->tx_ring1 = NULL;
981 b43legacy_destroy_dmaring(dma->tx_ring0);
982 dma->tx_ring0 = NULL;
985 int b43legacy_dma_init(struct b43legacy_wldev *dev)
987 struct b43legacy_dma *dma = &dev->dma;
988 struct b43legacy_dmaring *ring;
989 int err;
990 u64 dmamask;
991 int dma64 = 0;
993 dmamask = supported_dma_mask(dev);
994 if (dmamask == DMA_64BIT_MASK)
995 dma64 = 1;
997 err = ssb_dma_set_mask(dev->dev, dmamask);
998 if (err) {
999 #ifdef BCM43XX_PIO
1000 b43legacywarn(dev->wl, "DMA for this device not supported. "
1001 "Falling back to PIO\n");
1002 dev->__using_pio = 1;
1003 return -EAGAIN;
1004 #else
1005 b43legacyerr(dev->wl, "DMA for this device not supported and "
1006 "no PIO support compiled in\n");
1007 return -EOPNOTSUPP;
1008 #endif
1011 err = -ENOMEM;
1012 /* setup TX DMA channels. */
1013 ring = b43legacy_setup_dmaring(dev, 0, 1, dma64);
1014 if (!ring)
1015 goto out;
1016 dma->tx_ring0 = ring;
1018 ring = b43legacy_setup_dmaring(dev, 1, 1, dma64);
1019 if (!ring)
1020 goto err_destroy_tx0;
1021 dma->tx_ring1 = ring;
1023 ring = b43legacy_setup_dmaring(dev, 2, 1, dma64);
1024 if (!ring)
1025 goto err_destroy_tx1;
1026 dma->tx_ring2 = ring;
1028 ring = b43legacy_setup_dmaring(dev, 3, 1, dma64);
1029 if (!ring)
1030 goto err_destroy_tx2;
1031 dma->tx_ring3 = ring;
1033 ring = b43legacy_setup_dmaring(dev, 4, 1, dma64);
1034 if (!ring)
1035 goto err_destroy_tx3;
1036 dma->tx_ring4 = ring;
1038 ring = b43legacy_setup_dmaring(dev, 5, 1, dma64);
1039 if (!ring)
1040 goto err_destroy_tx4;
1041 dma->tx_ring5 = ring;
1043 /* setup RX DMA channels. */
1044 ring = b43legacy_setup_dmaring(dev, 0, 0, dma64);
1045 if (!ring)
1046 goto err_destroy_tx5;
1047 dma->rx_ring0 = ring;
1049 if (dev->dev->id.revision < 5) {
1050 ring = b43legacy_setup_dmaring(dev, 3, 0, dma64);
1051 if (!ring)
1052 goto err_destroy_rx0;
1053 dma->rx_ring3 = ring;
1056 b43legacydbg(dev->wl, "%d-bit DMA initialized\n",
1057 (dmamask == DMA_64BIT_MASK) ? 64 :
1058 (dmamask == DMA_32BIT_MASK) ? 32 : 30);
1059 err = 0;
1060 out:
1061 return err;
1063 err_destroy_rx0:
1064 b43legacy_destroy_dmaring(dma->rx_ring0);
1065 dma->rx_ring0 = NULL;
1066 err_destroy_tx5:
1067 b43legacy_destroy_dmaring(dma->tx_ring5);
1068 dma->tx_ring5 = NULL;
1069 err_destroy_tx4:
1070 b43legacy_destroy_dmaring(dma->tx_ring4);
1071 dma->tx_ring4 = NULL;
1072 err_destroy_tx3:
1073 b43legacy_destroy_dmaring(dma->tx_ring3);
1074 dma->tx_ring3 = NULL;
1075 err_destroy_tx2:
1076 b43legacy_destroy_dmaring(dma->tx_ring2);
1077 dma->tx_ring2 = NULL;
1078 err_destroy_tx1:
1079 b43legacy_destroy_dmaring(dma->tx_ring1);
1080 dma->tx_ring1 = NULL;
1081 err_destroy_tx0:
1082 b43legacy_destroy_dmaring(dma->tx_ring0);
1083 dma->tx_ring0 = NULL;
1084 goto out;
1087 /* Generate a cookie for the TX header. */
1088 static u16 generate_cookie(struct b43legacy_dmaring *ring,
1089 int slot)
1091 u16 cookie = 0x1000;
1093 /* Use the upper 4 bits of the cookie as
1094 * DMA controller ID and store the slot number
1095 * in the lower 12 bits.
1096 * Note that the cookie must never be 0, as this
1097 * is a special value used in RX path.
1099 switch (ring->index) {
1100 case 0:
1101 cookie = 0xA000;
1102 break;
1103 case 1:
1104 cookie = 0xB000;
1105 break;
1106 case 2:
1107 cookie = 0xC000;
1108 break;
1109 case 3:
1110 cookie = 0xD000;
1111 break;
1112 case 4:
1113 cookie = 0xE000;
1114 break;
1115 case 5:
1116 cookie = 0xF000;
1117 break;
1119 B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000));
1120 cookie |= (u16)slot;
1122 return cookie;
1125 /* Inspect a cookie and find out to which controller/slot it belongs. */
1126 static
1127 struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
1128 u16 cookie, int *slot)
1130 struct b43legacy_dma *dma = &dev->dma;
1131 struct b43legacy_dmaring *ring = NULL;
1133 switch (cookie & 0xF000) {
1134 case 0xA000:
1135 ring = dma->tx_ring0;
1136 break;
1137 case 0xB000:
1138 ring = dma->tx_ring1;
1139 break;
1140 case 0xC000:
1141 ring = dma->tx_ring2;
1142 break;
1143 case 0xD000:
1144 ring = dma->tx_ring3;
1145 break;
1146 case 0xE000:
1147 ring = dma->tx_ring4;
1148 break;
1149 case 0xF000:
1150 ring = dma->tx_ring5;
1151 break;
1152 default:
1153 B43legacy_WARN_ON(1);
1155 *slot = (cookie & 0x0FFF);
1156 B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
1158 return ring;
1161 static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1162 struct sk_buff *skb,
1163 struct ieee80211_tx_control *ctl)
1165 const struct b43legacy_dma_ops *ops = ring->ops;
1166 u8 *header;
1167 int slot;
1168 int err;
1169 struct b43legacy_dmadesc_generic *desc;
1170 struct b43legacy_dmadesc_meta *meta;
1171 struct b43legacy_dmadesc_meta *meta_hdr;
1172 struct sk_buff *bounce_skb;
1174 #define SLOTS_PER_PACKET 2
1175 B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
1177 /* Get a slot for the header. */
1178 slot = request_slot(ring);
1179 desc = ops->idx2desc(ring, slot, &meta_hdr);
1180 memset(meta_hdr, 0, sizeof(*meta_hdr));
1182 header = &(ring->txhdr_cache[slot * sizeof(
1183 struct b43legacy_txhdr_fw3)]);
1184 b43legacy_generate_txhdr(ring->dev, header,
1185 skb->data, skb->len, ctl,
1186 generate_cookie(ring, slot));
1188 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1189 sizeof(struct b43legacy_txhdr_fw3), 1);
1190 if (dma_mapping_error(meta_hdr->dmaaddr))
1191 return -EIO;
1192 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1193 sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0);
1195 /* Get a slot for the payload. */
1196 slot = request_slot(ring);
1197 desc = ops->idx2desc(ring, slot, &meta);
1198 memset(meta, 0, sizeof(*meta));
1200 memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
1201 meta->skb = skb;
1202 meta->is_last_fragment = 1;
1204 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1205 /* create a bounce buffer in zone_dma on mapping failure. */
1206 if (dma_mapping_error(meta->dmaaddr)) {
1207 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1208 if (!bounce_skb) {
1209 err = -ENOMEM;
1210 goto out_unmap_hdr;
1213 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1214 dev_kfree_skb_any(skb);
1215 skb = bounce_skb;
1216 meta->skb = skb;
1217 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1218 if (dma_mapping_error(meta->dmaaddr)) {
1219 err = -EIO;
1220 goto out_free_bounce;
1224 ops->fill_descriptor(ring, desc, meta->dmaaddr,
1225 skb->len, 0, 1, 1);
1227 wmb(); /* previous stuff MUST be done */
1228 /* Now transfer the whole frame. */
1229 ops->poke_tx(ring, next_slot(ring, slot));
1230 return 0;
1232 out_free_bounce:
1233 dev_kfree_skb_any(skb);
1234 out_unmap_hdr:
1235 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1236 sizeof(struct b43legacy_txhdr_fw3), 1);
1237 return err;
1240 static inline
1241 int should_inject_overflow(struct b43legacy_dmaring *ring)
1243 #ifdef CONFIG_B43LEGACY_DEBUG
1244 if (unlikely(b43legacy_debug(ring->dev,
1245 B43legacy_DBG_DMAOVERFLOW))) {
1246 /* Check if we should inject another ringbuffer overflow
1247 * to test handling of this situation in the stack. */
1248 unsigned long next_overflow;
1250 next_overflow = ring->last_injected_overflow + HZ;
1251 if (time_after(jiffies, next_overflow)) {
1252 ring->last_injected_overflow = jiffies;
1253 b43legacydbg(ring->dev->wl,
1254 "Injecting TX ring overflow on "
1255 "DMA controller %d\n", ring->index);
1256 return 1;
1259 #endif /* CONFIG_B43LEGACY_DEBUG */
1260 return 0;
1263 int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1264 struct sk_buff *skb,
1265 struct ieee80211_tx_control *ctl)
1267 struct b43legacy_dmaring *ring;
1268 int err = 0;
1269 unsigned long flags;
1271 ring = priority_to_txring(dev, ctl->queue);
1272 spin_lock_irqsave(&ring->lock, flags);
1273 B43legacy_WARN_ON(!ring->tx);
1274 if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
1275 b43legacywarn(dev->wl, "DMA queue overflow\n");
1276 err = -ENOSPC;
1277 goto out_unlock;
1279 /* Check if the queue was stopped in mac80211,
1280 * but we got called nevertheless.
1281 * That would be a mac80211 bug. */
1282 B43legacy_BUG_ON(ring->stopped);
1284 err = dma_tx_fragment(ring, skb, ctl);
1285 if (unlikely(err)) {
1286 b43legacyerr(dev->wl, "DMA tx mapping failure\n");
1287 goto out_unlock;
1289 ring->nr_tx_packets++;
1290 if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1291 should_inject_overflow(ring)) {
1292 /* This TX ring is full. */
1293 ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring));
1294 ring->stopped = 1;
1295 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1296 b43legacydbg(dev->wl, "Stopped TX ring %d\n",
1297 ring->index);
1299 out_unlock:
1300 spin_unlock_irqrestore(&ring->lock, flags);
1302 return err;
1305 void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1306 const struct b43legacy_txstatus *status)
1308 const struct b43legacy_dma_ops *ops;
1309 struct b43legacy_dmaring *ring;
1310 struct b43legacy_dmadesc_generic *desc;
1311 struct b43legacy_dmadesc_meta *meta;
1312 int slot;
1314 ring = parse_cookie(dev, status->cookie, &slot);
1315 if (unlikely(!ring))
1316 return;
1317 B43legacy_WARN_ON(!irqs_disabled());
1318 spin_lock(&ring->lock);
1320 B43legacy_WARN_ON(!ring->tx);
1321 ops = ring->ops;
1322 while (1) {
1323 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1324 desc = ops->idx2desc(ring, slot, &meta);
1326 if (meta->skb)
1327 unmap_descbuffer(ring, meta->dmaaddr,
1328 meta->skb->len, 1);
1329 else
1330 unmap_descbuffer(ring, meta->dmaaddr,
1331 sizeof(struct b43legacy_txhdr_fw3),
1334 if (meta->is_last_fragment) {
1335 B43legacy_WARN_ON(!meta->skb);
1336 /* Call back to inform the ieee80211 subsystem about the
1337 * status of the transmission.
1338 * Some fields of txstat are already filled in dma_tx().
1340 if (status->acked) {
1341 meta->txstat.flags |= IEEE80211_TX_STATUS_ACK;
1342 } else {
1343 if (!(meta->txstat.control.flags
1344 & IEEE80211_TXCTL_NO_ACK))
1345 meta->txstat.excessive_retries = 1;
1347 if (status->frame_count == 0) {
1348 /* The frame was not transmitted at all. */
1349 meta->txstat.retry_count = 0;
1350 } else
1351 meta->txstat.retry_count = status->frame_count
1352 - 1;
1353 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb,
1354 &(meta->txstat));
1355 /* skb is freed by ieee80211_tx_status_irqsafe() */
1356 meta->skb = NULL;
1357 } else {
1358 /* No need to call free_descriptor_buffer here, as
1359 * this is only the txhdr, which is not allocated.
1361 B43legacy_WARN_ON(meta->skb != NULL);
1364 /* Everything unmapped and free'd. So it's not used anymore. */
1365 ring->used_slots--;
1367 if (meta->is_last_fragment)
1368 break;
1369 slot = next_slot(ring, slot);
1371 dev->stats.last_tx = jiffies;
1372 if (ring->stopped) {
1373 B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1374 ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring));
1375 ring->stopped = 0;
1376 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1377 b43legacydbg(dev->wl, "Woke up TX ring %d\n",
1378 ring->index);
1381 spin_unlock(&ring->lock);
1384 void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
1385 struct ieee80211_tx_queue_stats *stats)
1387 const int nr_queues = dev->wl->hw->queues;
1388 struct b43legacy_dmaring *ring;
1389 struct ieee80211_tx_queue_stats_data *data;
1390 unsigned long flags;
1391 int i;
1393 for (i = 0; i < nr_queues; i++) {
1394 data = &(stats->data[i]);
1395 ring = priority_to_txring(dev, i);
1397 spin_lock_irqsave(&ring->lock, flags);
1398 data->len = ring->used_slots / SLOTS_PER_PACKET;
1399 data->limit = ring->nr_slots / SLOTS_PER_PACKET;
1400 data->count = ring->nr_tx_packets;
1401 spin_unlock_irqrestore(&ring->lock, flags);
1405 static void dma_rx(struct b43legacy_dmaring *ring,
1406 int *slot)
1408 const struct b43legacy_dma_ops *ops = ring->ops;
1409 struct b43legacy_dmadesc_generic *desc;
1410 struct b43legacy_dmadesc_meta *meta;
1411 struct b43legacy_rxhdr_fw3 *rxhdr;
1412 struct sk_buff *skb;
1413 u16 len;
1414 int err;
1415 dma_addr_t dmaaddr;
1417 desc = ops->idx2desc(ring, *slot, &meta);
1419 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1420 skb = meta->skb;
1422 if (ring->index == 3) {
1423 /* We received an xmit status. */
1424 struct b43legacy_hwtxstatus *hw =
1425 (struct b43legacy_hwtxstatus *)skb->data;
1426 int i = 0;
1428 while (hw->cookie == 0) {
1429 if (i > 100)
1430 break;
1431 i++;
1432 udelay(2);
1433 barrier();
1435 b43legacy_handle_hwtxstatus(ring->dev, hw);
1436 /* recycle the descriptor buffer. */
1437 sync_descbuffer_for_device(ring, meta->dmaaddr,
1438 ring->rx_buffersize);
1440 return;
1442 rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data;
1443 len = le16_to_cpu(rxhdr->frame_len);
1444 if (len == 0) {
1445 int i = 0;
1447 do {
1448 udelay(2);
1449 barrier();
1450 len = le16_to_cpu(rxhdr->frame_len);
1451 } while (len == 0 && i++ < 5);
1452 if (unlikely(len == 0)) {
1453 /* recycle the descriptor buffer. */
1454 sync_descbuffer_for_device(ring, meta->dmaaddr,
1455 ring->rx_buffersize);
1456 goto drop;
1459 if (unlikely(len > ring->rx_buffersize)) {
1460 /* The data did not fit into one descriptor buffer
1461 * and is split over multiple buffers.
1462 * This should never happen, as we try to allocate buffers
1463 * big enough. So simply ignore this packet.
1465 int cnt = 0;
1466 s32 tmp = len;
1468 while (1) {
1469 desc = ops->idx2desc(ring, *slot, &meta);
1470 /* recycle the descriptor buffer. */
1471 sync_descbuffer_for_device(ring, meta->dmaaddr,
1472 ring->rx_buffersize);
1473 *slot = next_slot(ring, *slot);
1474 cnt++;
1475 tmp -= ring->rx_buffersize;
1476 if (tmp <= 0)
1477 break;
1479 b43legacyerr(ring->dev->wl, "DMA RX buffer too small "
1480 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1481 len, ring->rx_buffersize, cnt);
1482 goto drop;
1485 dmaaddr = meta->dmaaddr;
1486 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1487 if (unlikely(err)) {
1488 b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()"
1489 " failed\n");
1490 sync_descbuffer_for_device(ring, dmaaddr,
1491 ring->rx_buffersize);
1492 goto drop;
1495 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1496 skb_put(skb, len + ring->frameoffset);
1497 skb_pull(skb, ring->frameoffset);
1499 b43legacy_rx(ring->dev, skb, rxhdr);
1500 drop:
1501 return;
1504 void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
1506 const struct b43legacy_dma_ops *ops = ring->ops;
1507 int slot;
1508 int current_slot;
1509 int used_slots = 0;
1511 B43legacy_WARN_ON(ring->tx);
1512 current_slot = ops->get_current_rxslot(ring);
1513 B43legacy_WARN_ON(!(current_slot >= 0 && current_slot <
1514 ring->nr_slots));
1516 slot = ring->current_slot;
1517 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1518 dma_rx(ring, &slot);
1519 update_max_used_slots(ring, ++used_slots);
1521 ops->set_current_rxslot(ring, slot);
1522 ring->current_slot = slot;
1525 static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
1527 unsigned long flags;
1529 spin_lock_irqsave(&ring->lock, flags);
1530 B43legacy_WARN_ON(!ring->tx);
1531 ring->ops->tx_suspend(ring);
1532 spin_unlock_irqrestore(&ring->lock, flags);
1535 static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
1537 unsigned long flags;
1539 spin_lock_irqsave(&ring->lock, flags);
1540 B43legacy_WARN_ON(!ring->tx);
1541 ring->ops->tx_resume(ring);
1542 spin_unlock_irqrestore(&ring->lock, flags);
1545 void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev)
1547 b43legacy_power_saving_ctl_bits(dev, -1, 1);
1548 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0);
1549 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1);
1550 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2);
1551 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3);
1552 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4);
1553 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5);
1556 void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev)
1558 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5);
1559 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4);
1560 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3);
1561 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2);
1562 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1);
1563 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0);
1564 b43legacy_power_saving_ctl_bits(dev, -1, -1);