ARM: mm: Recreate kernel mappings in early_paging_init()
[linux/fpc-iii.git] / drivers / net / wireless / b43legacy / dma.c
blob42eb26c99e11cea54a2b063de4120ec69ae33b22
1 /*
3 Broadcom B43legacy wireless driver
5 DMA ringbuffer and descriptor allocation/management
7 Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
30 #include "b43legacy.h"
31 #include "dma.h"
32 #include "main.h"
33 #include "debugfs.h"
34 #include "xmit.h"
36 #include <linux/dma-mapping.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <linux/slab.h>
41 #include <net/dst.h>
43 /* 32bit DMA ops. */
44 static
45 struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring,
46 int slot,
47 struct b43legacy_dmadesc_meta **meta)
49 struct b43legacy_dmadesc32 *desc;
51 *meta = &(ring->meta[slot]);
52 desc = ring->descbase;
53 desc = &(desc[slot]);
55 return desc;
58 static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
59 struct b43legacy_dmadesc32 *desc,
60 dma_addr_t dmaaddr, u16 bufsize,
61 int start, int end, int irq)
63 struct b43legacy_dmadesc32 *descbase = ring->descbase;
64 int slot;
65 u32 ctl;
66 u32 addr;
67 u32 addrext;
69 slot = (int)(desc - descbase);
70 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
72 addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
73 addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK)
74 >> SSB_DMA_TRANSLATION_SHIFT;
75 addr |= ring->dev->dma.translation;
76 ctl = (bufsize - ring->frameoffset)
77 & B43legacy_DMA32_DCTL_BYTECNT;
78 if (slot == ring->nr_slots - 1)
79 ctl |= B43legacy_DMA32_DCTL_DTABLEEND;
80 if (start)
81 ctl |= B43legacy_DMA32_DCTL_FRAMESTART;
82 if (end)
83 ctl |= B43legacy_DMA32_DCTL_FRAMEEND;
84 if (irq)
85 ctl |= B43legacy_DMA32_DCTL_IRQ;
86 ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT)
87 & B43legacy_DMA32_DCTL_ADDREXT_MASK;
89 desc->control = cpu_to_le32(ctl);
90 desc->address = cpu_to_le32(addr);
93 static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot)
95 b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX,
96 (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
99 static void op32_tx_suspend(struct b43legacy_dmaring *ring)
101 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
102 b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
103 | B43legacy_DMA32_TXSUSPEND);
106 static void op32_tx_resume(struct b43legacy_dmaring *ring)
108 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
109 b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
110 & ~B43legacy_DMA32_TXSUSPEND);
113 static int op32_get_current_rxslot(struct b43legacy_dmaring *ring)
115 u32 val;
117 val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS);
118 val &= B43legacy_DMA32_RXDPTR;
120 return (val / sizeof(struct b43legacy_dmadesc32));
123 static void op32_set_current_rxslot(struct b43legacy_dmaring *ring,
124 int slot)
126 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
127 (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
130 static inline int free_slots(struct b43legacy_dmaring *ring)
132 return (ring->nr_slots - ring->used_slots);
135 static inline int next_slot(struct b43legacy_dmaring *ring, int slot)
137 B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
138 if (slot == ring->nr_slots - 1)
139 return 0;
140 return slot + 1;
143 static inline int prev_slot(struct b43legacy_dmaring *ring, int slot)
145 B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
146 if (slot == 0)
147 return ring->nr_slots - 1;
148 return slot - 1;
151 #ifdef CONFIG_B43LEGACY_DEBUG
152 static void update_max_used_slots(struct b43legacy_dmaring *ring,
153 int current_used_slots)
155 if (current_used_slots <= ring->max_used_slots)
156 return;
157 ring->max_used_slots = current_used_slots;
158 if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE))
159 b43legacydbg(ring->dev->wl,
160 "max_used_slots increased to %d on %s ring %d\n",
161 ring->max_used_slots,
162 ring->tx ? "TX" : "RX",
163 ring->index);
165 #else
166 static inline
167 void update_max_used_slots(struct b43legacy_dmaring *ring,
168 int current_used_slots)
170 #endif /* DEBUG */
172 /* Request a slot for usage. */
173 static inline
174 int request_slot(struct b43legacy_dmaring *ring)
176 int slot;
178 B43legacy_WARN_ON(!ring->tx);
179 B43legacy_WARN_ON(ring->stopped);
180 B43legacy_WARN_ON(free_slots(ring) == 0);
182 slot = next_slot(ring, ring->current_slot);
183 ring->current_slot = slot;
184 ring->used_slots++;
186 update_max_used_slots(ring, ring->used_slots);
188 return slot;
191 /* Mac80211-queue to b43legacy-ring mapping */
192 static struct b43legacy_dmaring *priority_to_txring(
193 struct b43legacy_wldev *dev,
194 int queue_priority)
196 struct b43legacy_dmaring *ring;
198 /*FIXME: For now we always run on TX-ring-1 */
199 return dev->dma.tx_ring1;
201 /* 0 = highest priority */
202 switch (queue_priority) {
203 default:
204 B43legacy_WARN_ON(1);
205 /* fallthrough */
206 case 0:
207 ring = dev->dma.tx_ring3;
208 break;
209 case 1:
210 ring = dev->dma.tx_ring2;
211 break;
212 case 2:
213 ring = dev->dma.tx_ring1;
214 break;
215 case 3:
216 ring = dev->dma.tx_ring0;
217 break;
218 case 4:
219 ring = dev->dma.tx_ring4;
220 break;
221 case 5:
222 ring = dev->dma.tx_ring5;
223 break;
226 return ring;
229 /* Bcm4301-ring to mac80211-queue mapping */
230 static inline int txring_to_priority(struct b43legacy_dmaring *ring)
232 static const u8 idx_to_prio[] =
233 { 3, 2, 1, 0, 4, 5, };
235 /*FIXME: have only one queue, for now */
236 return 0;
238 return idx_to_prio[ring->index];
242 static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
243 int controller_idx)
245 static const u16 map32[] = {
246 B43legacy_MMIO_DMA32_BASE0,
247 B43legacy_MMIO_DMA32_BASE1,
248 B43legacy_MMIO_DMA32_BASE2,
249 B43legacy_MMIO_DMA32_BASE3,
250 B43legacy_MMIO_DMA32_BASE4,
251 B43legacy_MMIO_DMA32_BASE5,
254 B43legacy_WARN_ON(!(controller_idx >= 0 &&
255 controller_idx < ARRAY_SIZE(map32)));
256 return map32[controller_idx];
259 static inline
260 dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
261 unsigned char *buf,
262 size_t len,
263 int tx)
265 dma_addr_t dmaaddr;
267 if (tx)
268 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
269 buf, len,
270 DMA_TO_DEVICE);
271 else
272 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
273 buf, len,
274 DMA_FROM_DEVICE);
276 return dmaaddr;
279 static inline
280 void unmap_descbuffer(struct b43legacy_dmaring *ring,
281 dma_addr_t addr,
282 size_t len,
283 int tx)
285 if (tx)
286 dma_unmap_single(ring->dev->dev->dma_dev,
287 addr, len,
288 DMA_TO_DEVICE);
289 else
290 dma_unmap_single(ring->dev->dev->dma_dev,
291 addr, len,
292 DMA_FROM_DEVICE);
295 static inline
296 void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
297 dma_addr_t addr,
298 size_t len)
300 B43legacy_WARN_ON(ring->tx);
302 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
303 addr, len, DMA_FROM_DEVICE);
306 static inline
307 void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
308 dma_addr_t addr,
309 size_t len)
311 B43legacy_WARN_ON(ring->tx);
313 dma_sync_single_for_device(ring->dev->dev->dma_dev,
314 addr, len, DMA_FROM_DEVICE);
317 static inline
318 void free_descriptor_buffer(struct b43legacy_dmaring *ring,
319 struct b43legacy_dmadesc_meta *meta,
320 int irq_context)
322 if (meta->skb) {
323 if (irq_context)
324 dev_kfree_skb_irq(meta->skb);
325 else
326 dev_kfree_skb(meta->skb);
327 meta->skb = NULL;
331 static int alloc_ringmemory(struct b43legacy_dmaring *ring)
333 /* GFP flags must match the flags in free_ringmemory()! */
334 ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev,
335 B43legacy_DMA_RINGMEMSIZE,
336 &(ring->dmabase), GFP_KERNEL);
337 if (!ring->descbase)
338 return -ENOMEM;
340 return 0;
343 static void free_ringmemory(struct b43legacy_dmaring *ring)
345 dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
346 ring->descbase, ring->dmabase);
349 /* Reset the RX DMA channel */
350 static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
351 u16 mmio_base,
352 enum b43legacy_dmatype type)
354 int i;
355 u32 value;
356 u16 offset;
358 might_sleep();
360 offset = B43legacy_DMA32_RXCTL;
361 b43legacy_write32(dev, mmio_base + offset, 0);
362 for (i = 0; i < 10; i++) {
363 offset = B43legacy_DMA32_RXSTATUS;
364 value = b43legacy_read32(dev, mmio_base + offset);
365 value &= B43legacy_DMA32_RXSTATE;
366 if (value == B43legacy_DMA32_RXSTAT_DISABLED) {
367 i = -1;
368 break;
370 msleep(1);
372 if (i != -1) {
373 b43legacyerr(dev->wl, "DMA RX reset timed out\n");
374 return -ENODEV;
377 return 0;
380 /* Reset the RX DMA channel */
381 static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
382 u16 mmio_base,
383 enum b43legacy_dmatype type)
385 int i;
386 u32 value;
387 u16 offset;
389 might_sleep();
391 for (i = 0; i < 10; i++) {
392 offset = B43legacy_DMA32_TXSTATUS;
393 value = b43legacy_read32(dev, mmio_base + offset);
394 value &= B43legacy_DMA32_TXSTATE;
395 if (value == B43legacy_DMA32_TXSTAT_DISABLED ||
396 value == B43legacy_DMA32_TXSTAT_IDLEWAIT ||
397 value == B43legacy_DMA32_TXSTAT_STOPPED)
398 break;
399 msleep(1);
401 offset = B43legacy_DMA32_TXCTL;
402 b43legacy_write32(dev, mmio_base + offset, 0);
403 for (i = 0; i < 10; i++) {
404 offset = B43legacy_DMA32_TXSTATUS;
405 value = b43legacy_read32(dev, mmio_base + offset);
406 value &= B43legacy_DMA32_TXSTATE;
407 if (value == B43legacy_DMA32_TXSTAT_DISABLED) {
408 i = -1;
409 break;
411 msleep(1);
413 if (i != -1) {
414 b43legacyerr(dev->wl, "DMA TX reset timed out\n");
415 return -ENODEV;
417 /* ensure the reset is completed. */
418 msleep(1);
420 return 0;
423 /* Check if a DMA mapping address is invalid. */
424 static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
425 dma_addr_t addr,
426 size_t buffersize,
427 bool dma_to_device)
429 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
430 return 1;
432 switch (ring->type) {
433 case B43legacy_DMA_30BIT:
434 if ((u64)addr + buffersize > (1ULL << 30))
435 goto address_error;
436 break;
437 case B43legacy_DMA_32BIT:
438 if ((u64)addr + buffersize > (1ULL << 32))
439 goto address_error;
440 break;
443 /* The address is OK. */
444 return 0;
446 address_error:
447 /* We can't support this address. Unmap it again. */
448 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
450 return 1;
453 static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
454 struct b43legacy_dmadesc32 *desc,
455 struct b43legacy_dmadesc_meta *meta,
456 gfp_t gfp_flags)
458 struct b43legacy_rxhdr_fw3 *rxhdr;
459 struct b43legacy_hwtxstatus *txstat;
460 dma_addr_t dmaaddr;
461 struct sk_buff *skb;
463 B43legacy_WARN_ON(ring->tx);
465 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
466 if (unlikely(!skb))
467 return -ENOMEM;
468 dmaaddr = map_descbuffer(ring, skb->data,
469 ring->rx_buffersize, 0);
470 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
471 /* ugh. try to realloc in zone_dma */
472 gfp_flags |= GFP_DMA;
474 dev_kfree_skb_any(skb);
476 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
477 if (unlikely(!skb))
478 return -ENOMEM;
479 dmaaddr = map_descbuffer(ring, skb->data,
480 ring->rx_buffersize, 0);
483 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
484 dev_kfree_skb_any(skb);
485 return -EIO;
488 meta->skb = skb;
489 meta->dmaaddr = dmaaddr;
490 op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0);
492 rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data);
493 rxhdr->frame_len = 0;
494 txstat = (struct b43legacy_hwtxstatus *)(skb->data);
495 txstat->cookie = 0;
497 return 0;
500 /* Allocate the initial descbuffers.
501 * This is used for an RX ring only.
503 static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring)
505 int i;
506 int err = -ENOMEM;
507 struct b43legacy_dmadesc32 *desc;
508 struct b43legacy_dmadesc_meta *meta;
510 for (i = 0; i < ring->nr_slots; i++) {
511 desc = op32_idx2desc(ring, i, &meta);
513 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
514 if (err) {
515 b43legacyerr(ring->dev->wl,
516 "Failed to allocate initial descbuffers\n");
517 goto err_unwind;
520 mb(); /* all descbuffer setup before next line */
521 ring->used_slots = ring->nr_slots;
522 err = 0;
523 out:
524 return err;
526 err_unwind:
527 for (i--; i >= 0; i--) {
528 desc = op32_idx2desc(ring, i, &meta);
530 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
531 dev_kfree_skb(meta->skb);
533 goto out;
536 /* Do initial setup of the DMA controller.
537 * Reset the controller, write the ring busaddress
538 * and switch the "enable" bit on.
540 static int dmacontroller_setup(struct b43legacy_dmaring *ring)
542 int err = 0;
543 u32 value;
544 u32 addrext;
545 u32 trans = ring->dev->dma.translation;
546 u32 ringbase = (u32)(ring->dmabase);
548 if (ring->tx) {
549 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
550 >> SSB_DMA_TRANSLATION_SHIFT;
551 value = B43legacy_DMA32_TXENABLE;
552 value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT)
553 & B43legacy_DMA32_TXADDREXT_MASK;
554 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value);
555 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
556 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
557 | trans);
558 } else {
559 err = alloc_initial_descbuffers(ring);
560 if (err)
561 goto out;
563 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
564 >> SSB_DMA_TRANSLATION_SHIFT;
565 value = (ring->frameoffset <<
566 B43legacy_DMA32_RXFROFF_SHIFT);
567 value |= B43legacy_DMA32_RXENABLE;
568 value |= (addrext << B43legacy_DMA32_RXADDREXT_SHIFT)
569 & B43legacy_DMA32_RXADDREXT_MASK;
570 b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value);
571 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
572 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
573 | trans);
574 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 200);
577 out:
578 return err;
581 /* Shutdown the DMA controller. */
582 static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
584 if (ring->tx) {
585 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
586 ring->type);
587 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
588 } else {
589 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
590 ring->type);
591 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
595 static void free_all_descbuffers(struct b43legacy_dmaring *ring)
597 struct b43legacy_dmadesc_meta *meta;
598 int i;
600 if (!ring->used_slots)
601 return;
602 for (i = 0; i < ring->nr_slots; i++) {
603 op32_idx2desc(ring, i, &meta);
605 if (!meta->skb) {
606 B43legacy_WARN_ON(!ring->tx);
607 continue;
609 if (ring->tx)
610 unmap_descbuffer(ring, meta->dmaaddr,
611 meta->skb->len, 1);
612 else
613 unmap_descbuffer(ring, meta->dmaaddr,
614 ring->rx_buffersize, 0);
615 free_descriptor_buffer(ring, meta, 0);
619 static u64 supported_dma_mask(struct b43legacy_wldev *dev)
621 u32 tmp;
622 u16 mmio_base;
624 mmio_base = b43legacy_dmacontroller_base(0, 0);
625 b43legacy_write32(dev,
626 mmio_base + B43legacy_DMA32_TXCTL,
627 B43legacy_DMA32_TXADDREXT_MASK);
628 tmp = b43legacy_read32(dev, mmio_base +
629 B43legacy_DMA32_TXCTL);
630 if (tmp & B43legacy_DMA32_TXADDREXT_MASK)
631 return DMA_BIT_MASK(32);
633 return DMA_BIT_MASK(30);
636 static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask)
638 if (dmamask == DMA_BIT_MASK(30))
639 return B43legacy_DMA_30BIT;
640 if (dmamask == DMA_BIT_MASK(32))
641 return B43legacy_DMA_32BIT;
642 B43legacy_WARN_ON(1);
643 return B43legacy_DMA_30BIT;
646 /* Main initialization function. */
647 static
648 struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
649 int controller_index,
650 int for_tx,
651 enum b43legacy_dmatype type)
653 struct b43legacy_dmaring *ring;
654 int err;
655 int nr_slots;
656 dma_addr_t dma_test;
658 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
659 if (!ring)
660 goto out;
661 ring->type = type;
662 ring->dev = dev;
664 nr_slots = B43legacy_RXRING_SLOTS;
665 if (for_tx)
666 nr_slots = B43legacy_TXRING_SLOTS;
668 ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta),
669 GFP_KERNEL);
670 if (!ring->meta)
671 goto err_kfree_ring;
672 if (for_tx) {
673 ring->txhdr_cache = kcalloc(nr_slots,
674 sizeof(struct b43legacy_txhdr_fw3),
675 GFP_KERNEL);
676 if (!ring->txhdr_cache)
677 goto err_kfree_meta;
679 /* test for ability to dma to txhdr_cache */
680 dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
681 sizeof(struct b43legacy_txhdr_fw3),
682 DMA_TO_DEVICE);
684 if (b43legacy_dma_mapping_error(ring, dma_test,
685 sizeof(struct b43legacy_txhdr_fw3), 1)) {
686 /* ugh realloc */
687 kfree(ring->txhdr_cache);
688 ring->txhdr_cache = kcalloc(nr_slots,
689 sizeof(struct b43legacy_txhdr_fw3),
690 GFP_KERNEL | GFP_DMA);
691 if (!ring->txhdr_cache)
692 goto err_kfree_meta;
694 dma_test = dma_map_single(dev->dev->dma_dev,
695 ring->txhdr_cache,
696 sizeof(struct b43legacy_txhdr_fw3),
697 DMA_TO_DEVICE);
699 if (b43legacy_dma_mapping_error(ring, dma_test,
700 sizeof(struct b43legacy_txhdr_fw3), 1))
701 goto err_kfree_txhdr_cache;
704 dma_unmap_single(dev->dev->dma_dev, dma_test,
705 sizeof(struct b43legacy_txhdr_fw3),
706 DMA_TO_DEVICE);
709 ring->nr_slots = nr_slots;
710 ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
711 ring->index = controller_index;
712 if (for_tx) {
713 ring->tx = true;
714 ring->current_slot = -1;
715 } else {
716 if (ring->index == 0) {
717 ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE;
718 ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET;
719 } else if (ring->index == 3) {
720 ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE;
721 ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET;
722 } else
723 B43legacy_WARN_ON(1);
725 #ifdef CONFIG_B43LEGACY_DEBUG
726 ring->last_injected_overflow = jiffies;
727 #endif
729 err = alloc_ringmemory(ring);
730 if (err)
731 goto err_kfree_txhdr_cache;
732 err = dmacontroller_setup(ring);
733 if (err)
734 goto err_free_ringmemory;
736 out:
737 return ring;
739 err_free_ringmemory:
740 free_ringmemory(ring);
741 err_kfree_txhdr_cache:
742 kfree(ring->txhdr_cache);
743 err_kfree_meta:
744 kfree(ring->meta);
745 err_kfree_ring:
746 kfree(ring);
747 ring = NULL;
748 goto out;
751 /* Main cleanup function. */
752 static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
754 if (!ring)
755 return;
757 b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
758 " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base,
759 (ring->tx) ? "TX" : "RX", ring->max_used_slots,
760 ring->nr_slots);
761 /* Device IRQs are disabled prior entering this function,
762 * so no need to take care of concurrency with rx handler stuff.
764 dmacontroller_cleanup(ring);
765 free_all_descbuffers(ring);
766 free_ringmemory(ring);
768 kfree(ring->txhdr_cache);
769 kfree(ring->meta);
770 kfree(ring);
773 void b43legacy_dma_free(struct b43legacy_wldev *dev)
775 struct b43legacy_dma *dma;
777 if (b43legacy_using_pio(dev))
778 return;
779 dma = &dev->dma;
781 b43legacy_destroy_dmaring(dma->rx_ring3);
782 dma->rx_ring3 = NULL;
783 b43legacy_destroy_dmaring(dma->rx_ring0);
784 dma->rx_ring0 = NULL;
786 b43legacy_destroy_dmaring(dma->tx_ring5);
787 dma->tx_ring5 = NULL;
788 b43legacy_destroy_dmaring(dma->tx_ring4);
789 dma->tx_ring4 = NULL;
790 b43legacy_destroy_dmaring(dma->tx_ring3);
791 dma->tx_ring3 = NULL;
792 b43legacy_destroy_dmaring(dma->tx_ring2);
793 dma->tx_ring2 = NULL;
794 b43legacy_destroy_dmaring(dma->tx_ring1);
795 dma->tx_ring1 = NULL;
796 b43legacy_destroy_dmaring(dma->tx_ring0);
797 dma->tx_ring0 = NULL;
800 static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
802 u64 orig_mask = mask;
803 bool fallback = false;
804 int err;
806 /* Try to set the DMA mask. If it fails, try falling back to a
807 * lower mask, as we can always also support a lower one. */
808 while (1) {
809 err = dma_set_mask(dev->dev->dma_dev, mask);
810 if (!err) {
811 err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
812 if (!err)
813 break;
815 if (mask == DMA_BIT_MASK(64)) {
816 mask = DMA_BIT_MASK(32);
817 fallback = true;
818 continue;
820 if (mask == DMA_BIT_MASK(32)) {
821 mask = DMA_BIT_MASK(30);
822 fallback = true;
823 continue;
825 b43legacyerr(dev->wl, "The machine/kernel does not support "
826 "the required %u-bit DMA mask\n",
827 (unsigned int)dma_mask_to_engine_type(orig_mask));
828 return -EOPNOTSUPP;
830 if (fallback) {
831 b43legacyinfo(dev->wl, "DMA mask fallback from %u-bit to %u-"
832 "bit\n",
833 (unsigned int)dma_mask_to_engine_type(orig_mask),
834 (unsigned int)dma_mask_to_engine_type(mask));
837 return 0;
840 int b43legacy_dma_init(struct b43legacy_wldev *dev)
842 struct b43legacy_dma *dma = &dev->dma;
843 struct b43legacy_dmaring *ring;
844 int err;
845 u64 dmamask;
846 enum b43legacy_dmatype type;
848 dmamask = supported_dma_mask(dev);
849 type = dma_mask_to_engine_type(dmamask);
850 err = b43legacy_dma_set_mask(dev, dmamask);
851 if (err) {
852 #ifdef CONFIG_B43LEGACY_PIO
853 b43legacywarn(dev->wl, "DMA for this device not supported. "
854 "Falling back to PIO\n");
855 dev->__using_pio = true;
856 return -EAGAIN;
857 #else
858 b43legacyerr(dev->wl, "DMA for this device not supported and "
859 "no PIO support compiled in\n");
860 return -EOPNOTSUPP;
861 #endif
863 dma->translation = ssb_dma_translation(dev->dev);
865 err = -ENOMEM;
866 /* setup TX DMA channels. */
867 ring = b43legacy_setup_dmaring(dev, 0, 1, type);
868 if (!ring)
869 goto out;
870 dma->tx_ring0 = ring;
872 ring = b43legacy_setup_dmaring(dev, 1, 1, type);
873 if (!ring)
874 goto err_destroy_tx0;
875 dma->tx_ring1 = ring;
877 ring = b43legacy_setup_dmaring(dev, 2, 1, type);
878 if (!ring)
879 goto err_destroy_tx1;
880 dma->tx_ring2 = ring;
882 ring = b43legacy_setup_dmaring(dev, 3, 1, type);
883 if (!ring)
884 goto err_destroy_tx2;
885 dma->tx_ring3 = ring;
887 ring = b43legacy_setup_dmaring(dev, 4, 1, type);
888 if (!ring)
889 goto err_destroy_tx3;
890 dma->tx_ring4 = ring;
892 ring = b43legacy_setup_dmaring(dev, 5, 1, type);
893 if (!ring)
894 goto err_destroy_tx4;
895 dma->tx_ring5 = ring;
897 /* setup RX DMA channels. */
898 ring = b43legacy_setup_dmaring(dev, 0, 0, type);
899 if (!ring)
900 goto err_destroy_tx5;
901 dma->rx_ring0 = ring;
903 if (dev->dev->id.revision < 5) {
904 ring = b43legacy_setup_dmaring(dev, 3, 0, type);
905 if (!ring)
906 goto err_destroy_rx0;
907 dma->rx_ring3 = ring;
910 b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type);
911 err = 0;
912 out:
913 return err;
915 err_destroy_rx0:
916 b43legacy_destroy_dmaring(dma->rx_ring0);
917 dma->rx_ring0 = NULL;
918 err_destroy_tx5:
919 b43legacy_destroy_dmaring(dma->tx_ring5);
920 dma->tx_ring5 = NULL;
921 err_destroy_tx4:
922 b43legacy_destroy_dmaring(dma->tx_ring4);
923 dma->tx_ring4 = NULL;
924 err_destroy_tx3:
925 b43legacy_destroy_dmaring(dma->tx_ring3);
926 dma->tx_ring3 = NULL;
927 err_destroy_tx2:
928 b43legacy_destroy_dmaring(dma->tx_ring2);
929 dma->tx_ring2 = NULL;
930 err_destroy_tx1:
931 b43legacy_destroy_dmaring(dma->tx_ring1);
932 dma->tx_ring1 = NULL;
933 err_destroy_tx0:
934 b43legacy_destroy_dmaring(dma->tx_ring0);
935 dma->tx_ring0 = NULL;
936 goto out;
939 /* Generate a cookie for the TX header. */
940 static u16 generate_cookie(struct b43legacy_dmaring *ring,
941 int slot)
943 u16 cookie = 0x1000;
945 /* Use the upper 4 bits of the cookie as
946 * DMA controller ID and store the slot number
947 * in the lower 12 bits.
948 * Note that the cookie must never be 0, as this
949 * is a special value used in RX path.
951 switch (ring->index) {
952 case 0:
953 cookie = 0xA000;
954 break;
955 case 1:
956 cookie = 0xB000;
957 break;
958 case 2:
959 cookie = 0xC000;
960 break;
961 case 3:
962 cookie = 0xD000;
963 break;
964 case 4:
965 cookie = 0xE000;
966 break;
967 case 5:
968 cookie = 0xF000;
969 break;
971 B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000));
972 cookie |= (u16)slot;
974 return cookie;
977 /* Inspect a cookie and find out to which controller/slot it belongs. */
978 static
979 struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
980 u16 cookie, int *slot)
982 struct b43legacy_dma *dma = &dev->dma;
983 struct b43legacy_dmaring *ring = NULL;
985 switch (cookie & 0xF000) {
986 case 0xA000:
987 ring = dma->tx_ring0;
988 break;
989 case 0xB000:
990 ring = dma->tx_ring1;
991 break;
992 case 0xC000:
993 ring = dma->tx_ring2;
994 break;
995 case 0xD000:
996 ring = dma->tx_ring3;
997 break;
998 case 0xE000:
999 ring = dma->tx_ring4;
1000 break;
1001 case 0xF000:
1002 ring = dma->tx_ring5;
1003 break;
1004 default:
1005 B43legacy_WARN_ON(1);
1007 *slot = (cookie & 0x0FFF);
1008 B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
1010 return ring;
1013 static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1014 struct sk_buff **in_skb)
1016 struct sk_buff *skb = *in_skb;
1017 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1018 u8 *header;
1019 int slot, old_top_slot, old_used_slots;
1020 int err;
1021 struct b43legacy_dmadesc32 *desc;
1022 struct b43legacy_dmadesc_meta *meta;
1023 struct b43legacy_dmadesc_meta *meta_hdr;
1024 struct sk_buff *bounce_skb;
1026 #define SLOTS_PER_PACKET 2
1027 B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
1029 old_top_slot = ring->current_slot;
1030 old_used_slots = ring->used_slots;
1032 /* Get a slot for the header. */
1033 slot = request_slot(ring);
1034 desc = op32_idx2desc(ring, slot, &meta_hdr);
1035 memset(meta_hdr, 0, sizeof(*meta_hdr));
1037 header = &(ring->txhdr_cache[slot * sizeof(
1038 struct b43legacy_txhdr_fw3)]);
1039 err = b43legacy_generate_txhdr(ring->dev, header,
1040 skb->data, skb->len, info,
1041 generate_cookie(ring, slot));
1042 if (unlikely(err)) {
1043 ring->current_slot = old_top_slot;
1044 ring->used_slots = old_used_slots;
1045 return err;
1048 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1049 sizeof(struct b43legacy_txhdr_fw3), 1);
1050 if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
1051 sizeof(struct b43legacy_txhdr_fw3), 1)) {
1052 ring->current_slot = old_top_slot;
1053 ring->used_slots = old_used_slots;
1054 return -EIO;
1056 op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1057 sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0);
1059 /* Get a slot for the payload. */
1060 slot = request_slot(ring);
1061 desc = op32_idx2desc(ring, slot, &meta);
1062 memset(meta, 0, sizeof(*meta));
1064 meta->skb = skb;
1065 meta->is_last_fragment = true;
1067 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1068 /* create a bounce buffer in zone_dma on mapping failure. */
1069 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1070 bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1071 if (!bounce_skb) {
1072 ring->current_slot = old_top_slot;
1073 ring->used_slots = old_used_slots;
1074 err = -ENOMEM;
1075 goto out_unmap_hdr;
1078 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1079 memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
1080 bounce_skb->dev = skb->dev;
1081 skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
1082 info = IEEE80211_SKB_CB(bounce_skb);
1084 dev_kfree_skb_any(skb);
1085 skb = bounce_skb;
1086 *in_skb = bounce_skb;
1087 meta->skb = skb;
1088 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1089 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1090 ring->current_slot = old_top_slot;
1091 ring->used_slots = old_used_slots;
1092 err = -EIO;
1093 goto out_free_bounce;
1097 op32_fill_descriptor(ring, desc, meta->dmaaddr,
1098 skb->len, 0, 1, 1);
1100 wmb(); /* previous stuff MUST be done */
1101 /* Now transfer the whole frame. */
1102 op32_poke_tx(ring, next_slot(ring, slot));
1103 return 0;
1105 out_free_bounce:
1106 dev_kfree_skb_any(skb);
1107 out_unmap_hdr:
1108 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1109 sizeof(struct b43legacy_txhdr_fw3), 1);
1110 return err;
1113 static inline
1114 int should_inject_overflow(struct b43legacy_dmaring *ring)
1116 #ifdef CONFIG_B43LEGACY_DEBUG
1117 if (unlikely(b43legacy_debug(ring->dev,
1118 B43legacy_DBG_DMAOVERFLOW))) {
1119 /* Check if we should inject another ringbuffer overflow
1120 * to test handling of this situation in the stack. */
1121 unsigned long next_overflow;
1123 next_overflow = ring->last_injected_overflow + HZ;
1124 if (time_after(jiffies, next_overflow)) {
1125 ring->last_injected_overflow = jiffies;
1126 b43legacydbg(ring->dev->wl,
1127 "Injecting TX ring overflow on "
1128 "DMA controller %d\n", ring->index);
1129 return 1;
1132 #endif /* CONFIG_B43LEGACY_DEBUG */
1133 return 0;
1136 int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1137 struct sk_buff *skb)
1139 struct b43legacy_dmaring *ring;
1140 int err = 0;
1142 ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
1143 B43legacy_WARN_ON(!ring->tx);
1145 if (unlikely(ring->stopped)) {
1146 /* We get here only because of a bug in mac80211.
1147 * Because of a race, one packet may be queued after
1148 * the queue is stopped, thus we got called when we shouldn't.
1149 * For now, just refuse the transmit. */
1150 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1151 b43legacyerr(dev->wl, "Packet after queue stopped\n");
1152 return -ENOSPC;
1155 if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) {
1156 /* If we get here, we have a real error with the queue
1157 * full, but queues not stopped. */
1158 b43legacyerr(dev->wl, "DMA queue overflow\n");
1159 return -ENOSPC;
1162 /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
1163 * into the skb data or cb now. */
1164 err = dma_tx_fragment(ring, &skb);
1165 if (unlikely(err == -ENOKEY)) {
1166 /* Drop this packet, as we don't have the encryption key
1167 * anymore and must not transmit it unencrypted. */
1168 dev_kfree_skb_any(skb);
1169 return 0;
1171 if (unlikely(err)) {
1172 b43legacyerr(dev->wl, "DMA tx mapping failure\n");
1173 return err;
1175 if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1176 should_inject_overflow(ring)) {
1177 /* This TX ring is full. */
1178 unsigned int skb_mapping = skb_get_queue_mapping(skb);
1179 ieee80211_stop_queue(dev->wl->hw, skb_mapping);
1180 dev->wl->tx_queue_stopped[skb_mapping] = 1;
1181 ring->stopped = true;
1182 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1183 b43legacydbg(dev->wl, "Stopped TX ring %d\n",
1184 ring->index);
1186 return err;
1189 void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1190 const struct b43legacy_txstatus *status)
1192 struct b43legacy_dmaring *ring;
1193 struct b43legacy_dmadesc_meta *meta;
1194 int retry_limit;
1195 int slot;
1196 int firstused;
1198 ring = parse_cookie(dev, status->cookie, &slot);
1199 if (unlikely(!ring))
1200 return;
1201 B43legacy_WARN_ON(!ring->tx);
1203 /* Sanity check: TX packets are processed in-order on one ring.
1204 * Check if the slot deduced from the cookie really is the first
1205 * used slot. */
1206 firstused = ring->current_slot - ring->used_slots + 1;
1207 if (firstused < 0)
1208 firstused = ring->nr_slots + firstused;
1209 if (unlikely(slot != firstused)) {
1210 /* This possibly is a firmware bug and will result in
1211 * malfunction, memory leaks and/or stall of DMA functionality.
1213 b43legacydbg(dev->wl, "Out of order TX status report on DMA "
1214 "ring %d. Expected %d, but got %d\n",
1215 ring->index, firstused, slot);
1216 return;
1219 while (1) {
1220 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1221 op32_idx2desc(ring, slot, &meta);
1223 if (meta->skb)
1224 unmap_descbuffer(ring, meta->dmaaddr,
1225 meta->skb->len, 1);
1226 else
1227 unmap_descbuffer(ring, meta->dmaaddr,
1228 sizeof(struct b43legacy_txhdr_fw3),
1231 if (meta->is_last_fragment) {
1232 struct ieee80211_tx_info *info;
1233 BUG_ON(!meta->skb);
1234 info = IEEE80211_SKB_CB(meta->skb);
1236 /* preserve the confiured retry limit before clearing the status
1237 * The xmit function has overwritten the rc's value with the actual
1238 * retry limit done by the hardware */
1239 retry_limit = info->status.rates[0].count;
1240 ieee80211_tx_info_clear_status(info);
1242 if (status->acked)
1243 info->flags |= IEEE80211_TX_STAT_ACK;
1245 if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) {
1247 * If the short retries (RTS, not data frame) have exceeded
1248 * the limit, the hw will not have tried the selected rate,
1249 * but will have used the fallback rate instead.
1250 * Don't let the rate control count attempts for the selected
1251 * rate in this case, otherwise the statistics will be off.
1253 info->status.rates[0].count = 0;
1254 info->status.rates[1].count = status->frame_count;
1255 } else {
1256 if (status->frame_count > retry_limit) {
1257 info->status.rates[0].count = retry_limit;
1258 info->status.rates[1].count = status->frame_count -
1259 retry_limit;
1261 } else {
1262 info->status.rates[0].count = status->frame_count;
1263 info->status.rates[1].idx = -1;
1267 /* Call back to inform the ieee80211 subsystem about the
1268 * status of the transmission.
1269 * Some fields of txstat are already filled in dma_tx().
1271 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1272 /* skb is freed by ieee80211_tx_status_irqsafe() */
1273 meta->skb = NULL;
1274 } else {
1275 /* No need to call free_descriptor_buffer here, as
1276 * this is only the txhdr, which is not allocated.
1278 B43legacy_WARN_ON(meta->skb != NULL);
1281 /* Everything unmapped and free'd. So it's not used anymore. */
1282 ring->used_slots--;
1284 if (meta->is_last_fragment)
1285 break;
1286 slot = next_slot(ring, slot);
1288 dev->stats.last_tx = jiffies;
1289 if (ring->stopped) {
1290 B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1291 ring->stopped = false;
1294 if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
1295 dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
1296 } else {
1297 /* If the driver queue is running wake the corresponding
1298 * mac80211 queue. */
1299 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1300 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1301 b43legacydbg(dev->wl, "Woke up TX ring %d\n",
1302 ring->index);
1304 /* Add work to the queue. */
1305 ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
1308 static void dma_rx(struct b43legacy_dmaring *ring,
1309 int *slot)
1311 struct b43legacy_dmadesc32 *desc;
1312 struct b43legacy_dmadesc_meta *meta;
1313 struct b43legacy_rxhdr_fw3 *rxhdr;
1314 struct sk_buff *skb;
1315 u16 len;
1316 int err;
1317 dma_addr_t dmaaddr;
1319 desc = op32_idx2desc(ring, *slot, &meta);
1321 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1322 skb = meta->skb;
1324 if (ring->index == 3) {
1325 /* We received an xmit status. */
1326 struct b43legacy_hwtxstatus *hw =
1327 (struct b43legacy_hwtxstatus *)skb->data;
1328 int i = 0;
1330 while (hw->cookie == 0) {
1331 if (i > 100)
1332 break;
1333 i++;
1334 udelay(2);
1335 barrier();
1337 b43legacy_handle_hwtxstatus(ring->dev, hw);
1338 /* recycle the descriptor buffer. */
1339 sync_descbuffer_for_device(ring, meta->dmaaddr,
1340 ring->rx_buffersize);
1342 return;
1344 rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data;
1345 len = le16_to_cpu(rxhdr->frame_len);
1346 if (len == 0) {
1347 int i = 0;
1349 do {
1350 udelay(2);
1351 barrier();
1352 len = le16_to_cpu(rxhdr->frame_len);
1353 } while (len == 0 && i++ < 5);
1354 if (unlikely(len == 0)) {
1355 /* recycle the descriptor buffer. */
1356 sync_descbuffer_for_device(ring, meta->dmaaddr,
1357 ring->rx_buffersize);
1358 goto drop;
1361 if (unlikely(len > ring->rx_buffersize)) {
1362 /* The data did not fit into one descriptor buffer
1363 * and is split over multiple buffers.
1364 * This should never happen, as we try to allocate buffers
1365 * big enough. So simply ignore this packet.
1367 int cnt = 0;
1368 s32 tmp = len;
1370 while (1) {
1371 desc = op32_idx2desc(ring, *slot, &meta);
1372 /* recycle the descriptor buffer. */
1373 sync_descbuffer_for_device(ring, meta->dmaaddr,
1374 ring->rx_buffersize);
1375 *slot = next_slot(ring, *slot);
1376 cnt++;
1377 tmp -= ring->rx_buffersize;
1378 if (tmp <= 0)
1379 break;
1381 b43legacyerr(ring->dev->wl, "DMA RX buffer too small "
1382 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1383 len, ring->rx_buffersize, cnt);
1384 goto drop;
1387 dmaaddr = meta->dmaaddr;
1388 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1389 if (unlikely(err)) {
1390 b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()"
1391 " failed\n");
1392 sync_descbuffer_for_device(ring, dmaaddr,
1393 ring->rx_buffersize);
1394 goto drop;
1397 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1398 skb_put(skb, len + ring->frameoffset);
1399 skb_pull(skb, ring->frameoffset);
1401 b43legacy_rx(ring->dev, skb, rxhdr);
1402 drop:
1403 return;
1406 void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
1408 int slot;
1409 int current_slot;
1410 int used_slots = 0;
1412 B43legacy_WARN_ON(ring->tx);
1413 current_slot = op32_get_current_rxslot(ring);
1414 B43legacy_WARN_ON(!(current_slot >= 0 && current_slot <
1415 ring->nr_slots));
1417 slot = ring->current_slot;
1418 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1419 dma_rx(ring, &slot);
1420 update_max_used_slots(ring, ++used_slots);
1422 op32_set_current_rxslot(ring, slot);
1423 ring->current_slot = slot;
1426 static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
1428 B43legacy_WARN_ON(!ring->tx);
1429 op32_tx_suspend(ring);
1432 static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
1434 B43legacy_WARN_ON(!ring->tx);
1435 op32_tx_resume(ring);
1438 void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev)
1440 b43legacy_power_saving_ctl_bits(dev, -1, 1);
1441 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0);
1442 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1);
1443 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2);
1444 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3);
1445 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4);
1446 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5);
1449 void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev)
1451 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5);
1452 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4);
1453 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3);
1454 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2);
1455 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1);
1456 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0);
1457 b43legacy_power_saving_ctl_bits(dev, -1, -1);