3 Broadcom B43 wireless driver
5 DMA ringbuffer and descriptor allocation/management
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
36 #include <linux/dma-mapping.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <linux/etherdevice.h>
41 #include <asm/div64.h>
44 /* Required number of TX DMA slots per TX frame.
45 * This currently is 2, because we put the header and the ieee80211 frame
46 * into separate slots. */
47 #define TX_SLOTS_PER_FRAME 2
52 struct b43_dmadesc_generic
*op32_idx2desc(struct b43_dmaring
*ring
,
54 struct b43_dmadesc_meta
**meta
)
56 struct b43_dmadesc32
*desc
;
58 *meta
= &(ring
->meta
[slot
]);
59 desc
= ring
->descbase
;
62 return (struct b43_dmadesc_generic
*)desc
;
65 static void op32_fill_descriptor(struct b43_dmaring
*ring
,
66 struct b43_dmadesc_generic
*desc
,
67 dma_addr_t dmaaddr
, u16 bufsize
,
68 int start
, int end
, int irq
)
70 struct b43_dmadesc32
*descbase
= ring
->descbase
;
76 slot
= (int)(&(desc
->dma32
) - descbase
);
77 B43_WARN_ON(!(slot
>= 0 && slot
< ring
->nr_slots
));
79 addr
= (u32
) (dmaaddr
& ~SSB_DMA_TRANSLATION_MASK
);
80 addrext
= (u32
) (dmaaddr
& SSB_DMA_TRANSLATION_MASK
)
81 >> SSB_DMA_TRANSLATION_SHIFT
;
82 addr
|= ssb_dma_translation(ring
->dev
->dev
);
83 ctl
= bufsize
& B43_DMA32_DCTL_BYTECNT
;
84 if (slot
== ring
->nr_slots
- 1)
85 ctl
|= B43_DMA32_DCTL_DTABLEEND
;
87 ctl
|= B43_DMA32_DCTL_FRAMESTART
;
89 ctl
|= B43_DMA32_DCTL_FRAMEEND
;
91 ctl
|= B43_DMA32_DCTL_IRQ
;
92 ctl
|= (addrext
<< B43_DMA32_DCTL_ADDREXT_SHIFT
)
93 & B43_DMA32_DCTL_ADDREXT_MASK
;
95 desc
->dma32
.control
= cpu_to_le32(ctl
);
96 desc
->dma32
.address
= cpu_to_le32(addr
);
99 static void op32_poke_tx(struct b43_dmaring
*ring
, int slot
)
101 b43_dma_write(ring
, B43_DMA32_TXINDEX
,
102 (u32
) (slot
* sizeof(struct b43_dmadesc32
)));
105 static void op32_tx_suspend(struct b43_dmaring
*ring
)
107 b43_dma_write(ring
, B43_DMA32_TXCTL
, b43_dma_read(ring
, B43_DMA32_TXCTL
)
108 | B43_DMA32_TXSUSPEND
);
111 static void op32_tx_resume(struct b43_dmaring
*ring
)
113 b43_dma_write(ring
, B43_DMA32_TXCTL
, b43_dma_read(ring
, B43_DMA32_TXCTL
)
114 & ~B43_DMA32_TXSUSPEND
);
117 static int op32_get_current_rxslot(struct b43_dmaring
*ring
)
121 val
= b43_dma_read(ring
, B43_DMA32_RXSTATUS
);
122 val
&= B43_DMA32_RXDPTR
;
124 return (val
/ sizeof(struct b43_dmadesc32
));
127 static void op32_set_current_rxslot(struct b43_dmaring
*ring
, int slot
)
129 b43_dma_write(ring
, B43_DMA32_RXINDEX
,
130 (u32
) (slot
* sizeof(struct b43_dmadesc32
)));
133 static const struct b43_dma_ops dma32_ops
= {
134 .idx2desc
= op32_idx2desc
,
135 .fill_descriptor
= op32_fill_descriptor
,
136 .poke_tx
= op32_poke_tx
,
137 .tx_suspend
= op32_tx_suspend
,
138 .tx_resume
= op32_tx_resume
,
139 .get_current_rxslot
= op32_get_current_rxslot
,
140 .set_current_rxslot
= op32_set_current_rxslot
,
145 struct b43_dmadesc_generic
*op64_idx2desc(struct b43_dmaring
*ring
,
147 struct b43_dmadesc_meta
**meta
)
149 struct b43_dmadesc64
*desc
;
151 *meta
= &(ring
->meta
[slot
]);
152 desc
= ring
->descbase
;
153 desc
= &(desc
[slot
]);
155 return (struct b43_dmadesc_generic
*)desc
;
158 static void op64_fill_descriptor(struct b43_dmaring
*ring
,
159 struct b43_dmadesc_generic
*desc
,
160 dma_addr_t dmaaddr
, u16 bufsize
,
161 int start
, int end
, int irq
)
163 struct b43_dmadesc64
*descbase
= ring
->descbase
;
165 u32 ctl0
= 0, ctl1
= 0;
169 slot
= (int)(&(desc
->dma64
) - descbase
);
170 B43_WARN_ON(!(slot
>= 0 && slot
< ring
->nr_slots
));
172 addrlo
= (u32
) (dmaaddr
& 0xFFFFFFFF);
173 addrhi
= (((u64
) dmaaddr
>> 32) & ~SSB_DMA_TRANSLATION_MASK
);
174 addrext
= (((u64
) dmaaddr
>> 32) & SSB_DMA_TRANSLATION_MASK
)
175 >> SSB_DMA_TRANSLATION_SHIFT
;
176 addrhi
|= (ssb_dma_translation(ring
->dev
->dev
) << 1);
177 if (slot
== ring
->nr_slots
- 1)
178 ctl0
|= B43_DMA64_DCTL0_DTABLEEND
;
180 ctl0
|= B43_DMA64_DCTL0_FRAMESTART
;
182 ctl0
|= B43_DMA64_DCTL0_FRAMEEND
;
184 ctl0
|= B43_DMA64_DCTL0_IRQ
;
185 ctl1
|= bufsize
& B43_DMA64_DCTL1_BYTECNT
;
186 ctl1
|= (addrext
<< B43_DMA64_DCTL1_ADDREXT_SHIFT
)
187 & B43_DMA64_DCTL1_ADDREXT_MASK
;
189 desc
->dma64
.control0
= cpu_to_le32(ctl0
);
190 desc
->dma64
.control1
= cpu_to_le32(ctl1
);
191 desc
->dma64
.address_low
= cpu_to_le32(addrlo
);
192 desc
->dma64
.address_high
= cpu_to_le32(addrhi
);
195 static void op64_poke_tx(struct b43_dmaring
*ring
, int slot
)
197 b43_dma_write(ring
, B43_DMA64_TXINDEX
,
198 (u32
) (slot
* sizeof(struct b43_dmadesc64
)));
201 static void op64_tx_suspend(struct b43_dmaring
*ring
)
203 b43_dma_write(ring
, B43_DMA64_TXCTL
, b43_dma_read(ring
, B43_DMA64_TXCTL
)
204 | B43_DMA64_TXSUSPEND
);
207 static void op64_tx_resume(struct b43_dmaring
*ring
)
209 b43_dma_write(ring
, B43_DMA64_TXCTL
, b43_dma_read(ring
, B43_DMA64_TXCTL
)
210 & ~B43_DMA64_TXSUSPEND
);
213 static int op64_get_current_rxslot(struct b43_dmaring
*ring
)
217 val
= b43_dma_read(ring
, B43_DMA64_RXSTATUS
);
218 val
&= B43_DMA64_RXSTATDPTR
;
220 return (val
/ sizeof(struct b43_dmadesc64
));
223 static void op64_set_current_rxslot(struct b43_dmaring
*ring
, int slot
)
225 b43_dma_write(ring
, B43_DMA64_RXINDEX
,
226 (u32
) (slot
* sizeof(struct b43_dmadesc64
)));
229 static const struct b43_dma_ops dma64_ops
= {
230 .idx2desc
= op64_idx2desc
,
231 .fill_descriptor
= op64_fill_descriptor
,
232 .poke_tx
= op64_poke_tx
,
233 .tx_suspend
= op64_tx_suspend
,
234 .tx_resume
= op64_tx_resume
,
235 .get_current_rxslot
= op64_get_current_rxslot
,
236 .set_current_rxslot
= op64_set_current_rxslot
,
239 static inline int free_slots(struct b43_dmaring
*ring
)
241 return (ring
->nr_slots
- ring
->used_slots
);
244 static inline int next_slot(struct b43_dmaring
*ring
, int slot
)
246 B43_WARN_ON(!(slot
>= -1 && slot
<= ring
->nr_slots
- 1));
247 if (slot
== ring
->nr_slots
- 1)
252 static inline int prev_slot(struct b43_dmaring
*ring
, int slot
)
254 B43_WARN_ON(!(slot
>= 0 && slot
<= ring
->nr_slots
- 1));
256 return ring
->nr_slots
- 1;
260 #ifdef CONFIG_B43_DEBUG
261 static void update_max_used_slots(struct b43_dmaring
*ring
,
262 int current_used_slots
)
264 if (current_used_slots
<= ring
->max_used_slots
)
266 ring
->max_used_slots
= current_used_slots
;
267 if (b43_debug(ring
->dev
, B43_DBG_DMAVERBOSE
)) {
268 b43dbg(ring
->dev
->wl
,
269 "max_used_slots increased to %d on %s ring %d\n",
270 ring
->max_used_slots
,
271 ring
->tx
? "TX" : "RX", ring
->index
);
276 void update_max_used_slots(struct b43_dmaring
*ring
, int current_used_slots
)
281 /* Request a slot for usage. */
282 static inline int request_slot(struct b43_dmaring
*ring
)
286 B43_WARN_ON(!ring
->tx
);
287 B43_WARN_ON(ring
->stopped
);
288 B43_WARN_ON(free_slots(ring
) == 0);
290 slot
= next_slot(ring
, ring
->current_slot
);
291 ring
->current_slot
= slot
;
294 update_max_used_slots(ring
, ring
->used_slots
);
299 static u16
b43_dmacontroller_base(enum b43_dmatype type
, int controller_idx
)
301 static const u16 map64
[] = {
302 B43_MMIO_DMA64_BASE0
,
303 B43_MMIO_DMA64_BASE1
,
304 B43_MMIO_DMA64_BASE2
,
305 B43_MMIO_DMA64_BASE3
,
306 B43_MMIO_DMA64_BASE4
,
307 B43_MMIO_DMA64_BASE5
,
309 static const u16 map32
[] = {
310 B43_MMIO_DMA32_BASE0
,
311 B43_MMIO_DMA32_BASE1
,
312 B43_MMIO_DMA32_BASE2
,
313 B43_MMIO_DMA32_BASE3
,
314 B43_MMIO_DMA32_BASE4
,
315 B43_MMIO_DMA32_BASE5
,
318 if (type
== B43_DMA_64BIT
) {
319 B43_WARN_ON(!(controller_idx
>= 0 &&
320 controller_idx
< ARRAY_SIZE(map64
)));
321 return map64
[controller_idx
];
323 B43_WARN_ON(!(controller_idx
>= 0 &&
324 controller_idx
< ARRAY_SIZE(map32
)));
325 return map32
[controller_idx
];
329 dma_addr_t
map_descbuffer(struct b43_dmaring
*ring
,
330 unsigned char *buf
, size_t len
, int tx
)
335 dmaaddr
= ssb_dma_map_single(ring
->dev
->dev
,
336 buf
, len
, DMA_TO_DEVICE
);
338 dmaaddr
= ssb_dma_map_single(ring
->dev
->dev
,
339 buf
, len
, DMA_FROM_DEVICE
);
346 void unmap_descbuffer(struct b43_dmaring
*ring
,
347 dma_addr_t addr
, size_t len
, int tx
)
350 ssb_dma_unmap_single(ring
->dev
->dev
,
351 addr
, len
, DMA_TO_DEVICE
);
353 ssb_dma_unmap_single(ring
->dev
->dev
,
354 addr
, len
, DMA_FROM_DEVICE
);
359 void sync_descbuffer_for_cpu(struct b43_dmaring
*ring
,
360 dma_addr_t addr
, size_t len
)
362 B43_WARN_ON(ring
->tx
);
363 ssb_dma_sync_single_for_cpu(ring
->dev
->dev
,
364 addr
, len
, DMA_FROM_DEVICE
);
368 void sync_descbuffer_for_device(struct b43_dmaring
*ring
,
369 dma_addr_t addr
, size_t len
)
371 B43_WARN_ON(ring
->tx
);
372 ssb_dma_sync_single_for_device(ring
->dev
->dev
,
373 addr
, len
, DMA_FROM_DEVICE
);
377 void free_descriptor_buffer(struct b43_dmaring
*ring
,
378 struct b43_dmadesc_meta
*meta
)
381 dev_kfree_skb_any(meta
->skb
);
386 static int alloc_ringmemory(struct b43_dmaring
*ring
)
388 gfp_t flags
= GFP_KERNEL
;
390 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
391 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
392 * has shown that 4K is sufficient for the latter as long as the buffer
393 * does not cross an 8K boundary.
395 * For unknown reasons - possibly a hardware error - the BCM4311 rev
396 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
397 * which accounts for the GFP_DMA flag below.
399 * The flags here must match the flags in free_ringmemory below!
401 if (ring
->type
== B43_DMA_64BIT
)
403 ring
->descbase
= ssb_dma_alloc_consistent(ring
->dev
->dev
,
405 &(ring
->dmabase
), flags
);
406 if (!ring
->descbase
) {
407 b43err(ring
->dev
->wl
, "DMA ringmemory allocation failed\n");
410 memset(ring
->descbase
, 0, B43_DMA_RINGMEMSIZE
);
415 static void free_ringmemory(struct b43_dmaring
*ring
)
417 gfp_t flags
= GFP_KERNEL
;
419 if (ring
->type
== B43_DMA_64BIT
)
422 ssb_dma_free_consistent(ring
->dev
->dev
, B43_DMA_RINGMEMSIZE
,
423 ring
->descbase
, ring
->dmabase
, flags
);
426 /* Reset the RX DMA channel */
427 static int b43_dmacontroller_rx_reset(struct b43_wldev
*dev
, u16 mmio_base
,
428 enum b43_dmatype type
)
436 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_RXCTL
: B43_DMA32_RXCTL
;
437 b43_write32(dev
, mmio_base
+ offset
, 0);
438 for (i
= 0; i
< 10; i
++) {
439 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_RXSTATUS
:
441 value
= b43_read32(dev
, mmio_base
+ offset
);
442 if (type
== B43_DMA_64BIT
) {
443 value
&= B43_DMA64_RXSTAT
;
444 if (value
== B43_DMA64_RXSTAT_DISABLED
) {
449 value
&= B43_DMA32_RXSTATE
;
450 if (value
== B43_DMA32_RXSTAT_DISABLED
) {
458 b43err(dev
->wl
, "DMA RX reset timed out\n");
465 /* Reset the TX DMA channel */
466 static int b43_dmacontroller_tx_reset(struct b43_wldev
*dev
, u16 mmio_base
,
467 enum b43_dmatype type
)
475 for (i
= 0; i
< 10; i
++) {
476 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_TXSTATUS
:
478 value
= b43_read32(dev
, mmio_base
+ offset
);
479 if (type
== B43_DMA_64BIT
) {
480 value
&= B43_DMA64_TXSTAT
;
481 if (value
== B43_DMA64_TXSTAT_DISABLED
||
482 value
== B43_DMA64_TXSTAT_IDLEWAIT
||
483 value
== B43_DMA64_TXSTAT_STOPPED
)
486 value
&= B43_DMA32_TXSTATE
;
487 if (value
== B43_DMA32_TXSTAT_DISABLED
||
488 value
== B43_DMA32_TXSTAT_IDLEWAIT
||
489 value
== B43_DMA32_TXSTAT_STOPPED
)
494 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_TXCTL
: B43_DMA32_TXCTL
;
495 b43_write32(dev
, mmio_base
+ offset
, 0);
496 for (i
= 0; i
< 10; i
++) {
497 offset
= (type
== B43_DMA_64BIT
) ? B43_DMA64_TXSTATUS
:
499 value
= b43_read32(dev
, mmio_base
+ offset
);
500 if (type
== B43_DMA_64BIT
) {
501 value
&= B43_DMA64_TXSTAT
;
502 if (value
== B43_DMA64_TXSTAT_DISABLED
) {
507 value
&= B43_DMA32_TXSTATE
;
508 if (value
== B43_DMA32_TXSTAT_DISABLED
) {
516 b43err(dev
->wl
, "DMA TX reset timed out\n");
519 /* ensure the reset is completed. */
525 /* Check if a DMA mapping address is invalid. */
526 static bool b43_dma_mapping_error(struct b43_dmaring
*ring
,
528 size_t buffersize
, bool dma_to_device
)
530 if (unlikely(ssb_dma_mapping_error(ring
->dev
->dev
, addr
)))
533 switch (ring
->type
) {
535 if ((u64
)addr
+ buffersize
> (1ULL << 30))
539 if ((u64
)addr
+ buffersize
> (1ULL << 32))
543 /* Currently we can't have addresses beyond
544 * 64bit in the kernel. */
548 /* The address is OK. */
552 /* We can't support this address. Unmap it again. */
553 unmap_descbuffer(ring
, addr
, buffersize
, dma_to_device
);
558 static bool b43_rx_buffer_is_poisoned(struct b43_dmaring
*ring
, struct sk_buff
*skb
)
560 unsigned char *f
= skb
->data
+ ring
->frameoffset
;
562 return ((f
[0] & f
[1] & f
[2] & f
[3] & f
[4] & f
[5] & f
[6] & f
[7]) == 0xFF);
565 static void b43_poison_rx_buffer(struct b43_dmaring
*ring
, struct sk_buff
*skb
)
567 struct b43_rxhdr_fw4
*rxhdr
;
568 unsigned char *frame
;
570 /* This poisons the RX buffer to detect DMA failures. */
572 rxhdr
= (struct b43_rxhdr_fw4
*)(skb
->data
);
573 rxhdr
->frame_len
= 0;
575 B43_WARN_ON(ring
->rx_buffersize
< ring
->frameoffset
+ sizeof(struct b43_plcp_hdr6
) + 2);
576 frame
= skb
->data
+ ring
->frameoffset
;
577 memset(frame
, 0xFF, sizeof(struct b43_plcp_hdr6
) + 2 /* padding */);
580 static int setup_rx_descbuffer(struct b43_dmaring
*ring
,
581 struct b43_dmadesc_generic
*desc
,
582 struct b43_dmadesc_meta
*meta
, gfp_t gfp_flags
)
587 B43_WARN_ON(ring
->tx
);
589 skb
= __dev_alloc_skb(ring
->rx_buffersize
, gfp_flags
);
592 b43_poison_rx_buffer(ring
, skb
);
593 dmaaddr
= map_descbuffer(ring
, skb
->data
, ring
->rx_buffersize
, 0);
594 if (b43_dma_mapping_error(ring
, dmaaddr
, ring
->rx_buffersize
, 0)) {
595 /* ugh. try to realloc in zone_dma */
596 gfp_flags
|= GFP_DMA
;
598 dev_kfree_skb_any(skb
);
600 skb
= __dev_alloc_skb(ring
->rx_buffersize
, gfp_flags
);
603 b43_poison_rx_buffer(ring
, skb
);
604 dmaaddr
= map_descbuffer(ring
, skb
->data
,
605 ring
->rx_buffersize
, 0);
606 if (b43_dma_mapping_error(ring
, dmaaddr
, ring
->rx_buffersize
, 0)) {
607 b43err(ring
->dev
->wl
, "RX DMA buffer allocation failed\n");
608 dev_kfree_skb_any(skb
);
614 meta
->dmaaddr
= dmaaddr
;
615 ring
->ops
->fill_descriptor(ring
, desc
, dmaaddr
,
616 ring
->rx_buffersize
, 0, 0, 0);
621 /* Allocate the initial descbuffers.
622 * This is used for an RX ring only.
624 static int alloc_initial_descbuffers(struct b43_dmaring
*ring
)
626 int i
, err
= -ENOMEM
;
627 struct b43_dmadesc_generic
*desc
;
628 struct b43_dmadesc_meta
*meta
;
630 for (i
= 0; i
< ring
->nr_slots
; i
++) {
631 desc
= ring
->ops
->idx2desc(ring
, i
, &meta
);
633 err
= setup_rx_descbuffer(ring
, desc
, meta
, GFP_KERNEL
);
635 b43err(ring
->dev
->wl
,
636 "Failed to allocate initial descbuffers\n");
641 ring
->used_slots
= ring
->nr_slots
;
647 for (i
--; i
>= 0; i
--) {
648 desc
= ring
->ops
->idx2desc(ring
, i
, &meta
);
650 unmap_descbuffer(ring
, meta
->dmaaddr
, ring
->rx_buffersize
, 0);
651 dev_kfree_skb(meta
->skb
);
656 /* Do initial setup of the DMA controller.
657 * Reset the controller, write the ring busaddress
658 * and switch the "enable" bit on.
660 static int dmacontroller_setup(struct b43_dmaring
*ring
)
665 u32 trans
= ssb_dma_translation(ring
->dev
->dev
);
668 if (ring
->type
== B43_DMA_64BIT
) {
669 u64 ringbase
= (u64
) (ring
->dmabase
);
671 addrext
= ((ringbase
>> 32) & SSB_DMA_TRANSLATION_MASK
)
672 >> SSB_DMA_TRANSLATION_SHIFT
;
673 value
= B43_DMA64_TXENABLE
;
674 value
|= (addrext
<< B43_DMA64_TXADDREXT_SHIFT
)
675 & B43_DMA64_TXADDREXT_MASK
;
676 b43_dma_write(ring
, B43_DMA64_TXCTL
, value
);
677 b43_dma_write(ring
, B43_DMA64_TXRINGLO
,
678 (ringbase
& 0xFFFFFFFF));
679 b43_dma_write(ring
, B43_DMA64_TXRINGHI
,
681 ~SSB_DMA_TRANSLATION_MASK
)
684 u32 ringbase
= (u32
) (ring
->dmabase
);
686 addrext
= (ringbase
& SSB_DMA_TRANSLATION_MASK
)
687 >> SSB_DMA_TRANSLATION_SHIFT
;
688 value
= B43_DMA32_TXENABLE
;
689 value
|= (addrext
<< B43_DMA32_TXADDREXT_SHIFT
)
690 & B43_DMA32_TXADDREXT_MASK
;
691 b43_dma_write(ring
, B43_DMA32_TXCTL
, value
);
692 b43_dma_write(ring
, B43_DMA32_TXRING
,
693 (ringbase
& ~SSB_DMA_TRANSLATION_MASK
)
697 err
= alloc_initial_descbuffers(ring
);
700 if (ring
->type
== B43_DMA_64BIT
) {
701 u64 ringbase
= (u64
) (ring
->dmabase
);
703 addrext
= ((ringbase
>> 32) & SSB_DMA_TRANSLATION_MASK
)
704 >> SSB_DMA_TRANSLATION_SHIFT
;
705 value
= (ring
->frameoffset
<< B43_DMA64_RXFROFF_SHIFT
);
706 value
|= B43_DMA64_RXENABLE
;
707 value
|= (addrext
<< B43_DMA64_RXADDREXT_SHIFT
)
708 & B43_DMA64_RXADDREXT_MASK
;
709 b43_dma_write(ring
, B43_DMA64_RXCTL
, value
);
710 b43_dma_write(ring
, B43_DMA64_RXRINGLO
,
711 (ringbase
& 0xFFFFFFFF));
712 b43_dma_write(ring
, B43_DMA64_RXRINGHI
,
714 ~SSB_DMA_TRANSLATION_MASK
)
716 b43_dma_write(ring
, B43_DMA64_RXINDEX
, ring
->nr_slots
*
717 sizeof(struct b43_dmadesc64
));
719 u32 ringbase
= (u32
) (ring
->dmabase
);
721 addrext
= (ringbase
& SSB_DMA_TRANSLATION_MASK
)
722 >> SSB_DMA_TRANSLATION_SHIFT
;
723 value
= (ring
->frameoffset
<< B43_DMA32_RXFROFF_SHIFT
);
724 value
|= B43_DMA32_RXENABLE
;
725 value
|= (addrext
<< B43_DMA32_RXADDREXT_SHIFT
)
726 & B43_DMA32_RXADDREXT_MASK
;
727 b43_dma_write(ring
, B43_DMA32_RXCTL
, value
);
728 b43_dma_write(ring
, B43_DMA32_RXRING
,
729 (ringbase
& ~SSB_DMA_TRANSLATION_MASK
)
731 b43_dma_write(ring
, B43_DMA32_RXINDEX
, ring
->nr_slots
*
732 sizeof(struct b43_dmadesc32
));
740 /* Shutdown the DMA controller. */
741 static void dmacontroller_cleanup(struct b43_dmaring
*ring
)
744 b43_dmacontroller_tx_reset(ring
->dev
, ring
->mmio_base
,
746 if (ring
->type
== B43_DMA_64BIT
) {
747 b43_dma_write(ring
, B43_DMA64_TXRINGLO
, 0);
748 b43_dma_write(ring
, B43_DMA64_TXRINGHI
, 0);
750 b43_dma_write(ring
, B43_DMA32_TXRING
, 0);
752 b43_dmacontroller_rx_reset(ring
->dev
, ring
->mmio_base
,
754 if (ring
->type
== B43_DMA_64BIT
) {
755 b43_dma_write(ring
, B43_DMA64_RXRINGLO
, 0);
756 b43_dma_write(ring
, B43_DMA64_RXRINGHI
, 0);
758 b43_dma_write(ring
, B43_DMA32_RXRING
, 0);
762 static void free_all_descbuffers(struct b43_dmaring
*ring
)
764 struct b43_dmadesc_generic
*desc
;
765 struct b43_dmadesc_meta
*meta
;
768 if (!ring
->used_slots
)
770 for (i
= 0; i
< ring
->nr_slots
; i
++) {
771 desc
= ring
->ops
->idx2desc(ring
, i
, &meta
);
774 B43_WARN_ON(!ring
->tx
);
778 unmap_descbuffer(ring
, meta
->dmaaddr
,
781 unmap_descbuffer(ring
, meta
->dmaaddr
,
782 ring
->rx_buffersize
, 0);
784 free_descriptor_buffer(ring
, meta
);
788 static u64
supported_dma_mask(struct b43_wldev
*dev
)
793 tmp
= b43_read32(dev
, SSB_TMSHIGH
);
794 if (tmp
& SSB_TMSHIGH_DMA64
)
795 return DMA_BIT_MASK(64);
796 mmio_base
= b43_dmacontroller_base(0, 0);
797 b43_write32(dev
, mmio_base
+ B43_DMA32_TXCTL
, B43_DMA32_TXADDREXT_MASK
);
798 tmp
= b43_read32(dev
, mmio_base
+ B43_DMA32_TXCTL
);
799 if (tmp
& B43_DMA32_TXADDREXT_MASK
)
800 return DMA_BIT_MASK(32);
802 return DMA_BIT_MASK(30);
805 static enum b43_dmatype
dma_mask_to_engine_type(u64 dmamask
)
807 if (dmamask
== DMA_BIT_MASK(30))
808 return B43_DMA_30BIT
;
809 if (dmamask
== DMA_BIT_MASK(32))
810 return B43_DMA_32BIT
;
811 if (dmamask
== DMA_BIT_MASK(64))
812 return B43_DMA_64BIT
;
814 return B43_DMA_30BIT
;
817 /* Main initialization function. */
819 struct b43_dmaring
*b43_setup_dmaring(struct b43_wldev
*dev
,
820 int controller_index
,
822 enum b43_dmatype type
)
824 struct b43_dmaring
*ring
;
828 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
832 ring
->nr_slots
= B43_RXRING_SLOTS
;
834 ring
->nr_slots
= B43_TXRING_SLOTS
;
836 ring
->meta
= kcalloc(ring
->nr_slots
, sizeof(struct b43_dmadesc_meta
),
843 ring
->mmio_base
= b43_dmacontroller_base(type
, controller_index
);
844 ring
->index
= controller_index
;
845 if (type
== B43_DMA_64BIT
)
846 ring
->ops
= &dma64_ops
;
848 ring
->ops
= &dma32_ops
;
851 ring
->current_slot
= -1;
853 if (ring
->index
== 0) {
854 ring
->rx_buffersize
= B43_DMA0_RX_BUFFERSIZE
;
855 ring
->frameoffset
= B43_DMA0_RX_FRAMEOFFSET
;
859 #ifdef CONFIG_B43_DEBUG
860 ring
->last_injected_overflow
= jiffies
;
864 /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
865 BUILD_BUG_ON(B43_TXRING_SLOTS
% TX_SLOTS_PER_FRAME
!= 0);
867 ring
->txhdr_cache
= kcalloc(ring
->nr_slots
/ TX_SLOTS_PER_FRAME
,
870 if (!ring
->txhdr_cache
)
873 /* test for ability to dma to txhdr_cache */
874 dma_test
= ssb_dma_map_single(dev
->dev
,
879 if (b43_dma_mapping_error(ring
, dma_test
,
880 b43_txhdr_size(dev
), 1)) {
882 kfree(ring
->txhdr_cache
);
883 ring
->txhdr_cache
= kcalloc(ring
->nr_slots
/ TX_SLOTS_PER_FRAME
,
885 GFP_KERNEL
| GFP_DMA
);
886 if (!ring
->txhdr_cache
)
889 dma_test
= ssb_dma_map_single(dev
->dev
,
894 if (b43_dma_mapping_error(ring
, dma_test
,
895 b43_txhdr_size(dev
), 1)) {
898 "TXHDR DMA allocation failed\n");
899 goto err_kfree_txhdr_cache
;
903 ssb_dma_unmap_single(dev
->dev
,
904 dma_test
, b43_txhdr_size(dev
),
908 err
= alloc_ringmemory(ring
);
910 goto err_kfree_txhdr_cache
;
911 err
= dmacontroller_setup(ring
);
913 goto err_free_ringmemory
;
919 free_ringmemory(ring
);
920 err_kfree_txhdr_cache
:
921 kfree(ring
->txhdr_cache
);
930 #define divide(a, b) ({ \
936 #define modulo(a, b) ({ \
941 /* Main cleanup function. */
942 static void b43_destroy_dmaring(struct b43_dmaring
*ring
,
943 const char *ringname
)
948 #ifdef CONFIG_B43_DEBUG
950 /* Print some statistics. */
951 u64 failed_packets
= ring
->nr_failed_tx_packets
;
952 u64 succeed_packets
= ring
->nr_succeed_tx_packets
;
953 u64 nr_packets
= failed_packets
+ succeed_packets
;
954 u64 permille_failed
= 0, average_tries
= 0;
957 permille_failed
= divide(failed_packets
* 1000, nr_packets
);
959 average_tries
= divide(ring
->nr_total_packet_tries
* 100, nr_packets
);
961 b43dbg(ring
->dev
->wl
, "DMA-%u %s: "
962 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
963 "Average tries %llu.%02llu\n",
964 (unsigned int)(ring
->type
), ringname
,
965 ring
->max_used_slots
,
967 (unsigned long long)failed_packets
,
968 (unsigned long long)nr_packets
,
969 (unsigned long long)divide(permille_failed
, 10),
970 (unsigned long long)modulo(permille_failed
, 10),
971 (unsigned long long)divide(average_tries
, 100),
972 (unsigned long long)modulo(average_tries
, 100));
976 /* Device IRQs are disabled prior entering this function,
977 * so no need to take care of concurrency with rx handler stuff.
979 dmacontroller_cleanup(ring
);
980 free_all_descbuffers(ring
);
981 free_ringmemory(ring
);
983 kfree(ring
->txhdr_cache
);
988 #define destroy_ring(dma, ring) do { \
989 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
990 (dma)->ring = NULL; \
993 void b43_dma_free(struct b43_wldev
*dev
)
997 if (b43_using_pio_transfers(dev
))
1001 destroy_ring(dma
, rx_ring
);
1002 destroy_ring(dma
, tx_ring_AC_BK
);
1003 destroy_ring(dma
, tx_ring_AC_BE
);
1004 destroy_ring(dma
, tx_ring_AC_VI
);
1005 destroy_ring(dma
, tx_ring_AC_VO
);
1006 destroy_ring(dma
, tx_ring_mcast
);
1009 static int b43_dma_set_mask(struct b43_wldev
*dev
, u64 mask
)
1011 u64 orig_mask
= mask
;
1015 /* Try to set the DMA mask. If it fails, try falling back to a
1016 * lower mask, as we can always also support a lower one. */
1018 err
= ssb_dma_set_mask(dev
->dev
, mask
);
1021 if (mask
== DMA_BIT_MASK(64)) {
1022 mask
= DMA_BIT_MASK(32);
1026 if (mask
== DMA_BIT_MASK(32)) {
1027 mask
= DMA_BIT_MASK(30);
1031 b43err(dev
->wl
, "The machine/kernel does not support "
1032 "the required %u-bit DMA mask\n",
1033 (unsigned int)dma_mask_to_engine_type(orig_mask
));
1037 b43info(dev
->wl
, "DMA mask fallback from %u-bit to %u-bit\n",
1038 (unsigned int)dma_mask_to_engine_type(orig_mask
),
1039 (unsigned int)dma_mask_to_engine_type(mask
));
1045 int b43_dma_init(struct b43_wldev
*dev
)
1047 struct b43_dma
*dma
= &dev
->dma
;
1050 enum b43_dmatype type
;
1052 dmamask
= supported_dma_mask(dev
);
1053 type
= dma_mask_to_engine_type(dmamask
);
1054 err
= b43_dma_set_mask(dev
, dmamask
);
1059 /* setup TX DMA channels. */
1060 dma
->tx_ring_AC_BK
= b43_setup_dmaring(dev
, 0, 1, type
);
1061 if (!dma
->tx_ring_AC_BK
)
1064 dma
->tx_ring_AC_BE
= b43_setup_dmaring(dev
, 1, 1, type
);
1065 if (!dma
->tx_ring_AC_BE
)
1066 goto err_destroy_bk
;
1068 dma
->tx_ring_AC_VI
= b43_setup_dmaring(dev
, 2, 1, type
);
1069 if (!dma
->tx_ring_AC_VI
)
1070 goto err_destroy_be
;
1072 dma
->tx_ring_AC_VO
= b43_setup_dmaring(dev
, 3, 1, type
);
1073 if (!dma
->tx_ring_AC_VO
)
1074 goto err_destroy_vi
;
1076 dma
->tx_ring_mcast
= b43_setup_dmaring(dev
, 4, 1, type
);
1077 if (!dma
->tx_ring_mcast
)
1078 goto err_destroy_vo
;
1080 /* setup RX DMA channel. */
1081 dma
->rx_ring
= b43_setup_dmaring(dev
, 0, 0, type
);
1083 goto err_destroy_mcast
;
1085 /* No support for the TX status DMA ring. */
1086 B43_WARN_ON(dev
->dev
->id
.revision
< 5);
1088 b43dbg(dev
->wl
, "%u-bit DMA initialized\n",
1089 (unsigned int)type
);
1095 destroy_ring(dma
, tx_ring_mcast
);
1097 destroy_ring(dma
, tx_ring_AC_VO
);
1099 destroy_ring(dma
, tx_ring_AC_VI
);
1101 destroy_ring(dma
, tx_ring_AC_BE
);
1103 destroy_ring(dma
, tx_ring_AC_BK
);
1107 /* Generate a cookie for the TX header. */
1108 static u16
generate_cookie(struct b43_dmaring
*ring
, int slot
)
1112 /* Use the upper 4 bits of the cookie as
1113 * DMA controller ID and store the slot number
1114 * in the lower 12 bits.
1115 * Note that the cookie must never be 0, as this
1116 * is a special value used in RX path.
1117 * It can also not be 0xFFFF because that is special
1118 * for multicast frames.
1120 cookie
= (((u16
)ring
->index
+ 1) << 12);
1121 B43_WARN_ON(slot
& ~0x0FFF);
1122 cookie
|= (u16
)slot
;
1127 /* Inspect a cookie and find out to which controller/slot it belongs. */
1129 struct b43_dmaring
*parse_cookie(struct b43_wldev
*dev
, u16 cookie
, int *slot
)
1131 struct b43_dma
*dma
= &dev
->dma
;
1132 struct b43_dmaring
*ring
= NULL
;
1134 switch (cookie
& 0xF000) {
1136 ring
= dma
->tx_ring_AC_BK
;
1139 ring
= dma
->tx_ring_AC_BE
;
1142 ring
= dma
->tx_ring_AC_VI
;
1145 ring
= dma
->tx_ring_AC_VO
;
1148 ring
= dma
->tx_ring_mcast
;
1153 *slot
= (cookie
& 0x0FFF);
1154 B43_WARN_ON(!(ring
&& *slot
>= 0 && *slot
< ring
->nr_slots
));
1159 static int dma_tx_fragment(struct b43_dmaring
*ring
,
1160 struct sk_buff
*skb
)
1162 const struct b43_dma_ops
*ops
= ring
->ops
;
1163 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1165 int slot
, old_top_slot
, old_used_slots
;
1167 struct b43_dmadesc_generic
*desc
;
1168 struct b43_dmadesc_meta
*meta
;
1169 struct b43_dmadesc_meta
*meta_hdr
;
1170 struct sk_buff
*bounce_skb
;
1172 size_t hdrsize
= b43_txhdr_size(ring
->dev
);
1174 /* Important note: If the number of used DMA slots per TX frame
1175 * is changed here, the TX_SLOTS_PER_FRAME definition at the top of
1176 * the file has to be updated, too!
1179 old_top_slot
= ring
->current_slot
;
1180 old_used_slots
= ring
->used_slots
;
1182 /* Get a slot for the header. */
1183 slot
= request_slot(ring
);
1184 desc
= ops
->idx2desc(ring
, slot
, &meta_hdr
);
1185 memset(meta_hdr
, 0, sizeof(*meta_hdr
));
1187 header
= &(ring
->txhdr_cache
[(slot
/ TX_SLOTS_PER_FRAME
) * hdrsize
]);
1188 cookie
= generate_cookie(ring
, slot
);
1189 err
= b43_generate_txhdr(ring
->dev
, header
,
1191 if (unlikely(err
)) {
1192 ring
->current_slot
= old_top_slot
;
1193 ring
->used_slots
= old_used_slots
;
1197 meta_hdr
->dmaaddr
= map_descbuffer(ring
, (unsigned char *)header
,
1199 if (b43_dma_mapping_error(ring
, meta_hdr
->dmaaddr
, hdrsize
, 1)) {
1200 ring
->current_slot
= old_top_slot
;
1201 ring
->used_slots
= old_used_slots
;
1204 ops
->fill_descriptor(ring
, desc
, meta_hdr
->dmaaddr
,
1207 /* Get a slot for the payload. */
1208 slot
= request_slot(ring
);
1209 desc
= ops
->idx2desc(ring
, slot
, &meta
);
1210 memset(meta
, 0, sizeof(*meta
));
1213 meta
->is_last_fragment
= 1;
1215 meta
->dmaaddr
= map_descbuffer(ring
, skb
->data
, skb
->len
, 1);
1216 /* create a bounce buffer in zone_dma on mapping failure. */
1217 if (b43_dma_mapping_error(ring
, meta
->dmaaddr
, skb
->len
, 1)) {
1218 bounce_skb
= __dev_alloc_skb(skb
->len
, GFP_ATOMIC
| GFP_DMA
);
1220 ring
->current_slot
= old_top_slot
;
1221 ring
->used_slots
= old_used_slots
;
1226 memcpy(skb_put(bounce_skb
, skb
->len
), skb
->data
, skb
->len
);
1227 dev_kfree_skb_any(skb
);
1230 meta
->dmaaddr
= map_descbuffer(ring
, skb
->data
, skb
->len
, 1);
1231 if (b43_dma_mapping_error(ring
, meta
->dmaaddr
, skb
->len
, 1)) {
1232 ring
->current_slot
= old_top_slot
;
1233 ring
->used_slots
= old_used_slots
;
1235 goto out_free_bounce
;
1239 ops
->fill_descriptor(ring
, desc
, meta
->dmaaddr
, skb
->len
, 0, 1, 1);
1241 if (info
->flags
& IEEE80211_TX_CTL_SEND_AFTER_DTIM
) {
1242 /* Tell the firmware about the cookie of the last
1243 * mcast frame, so it can clear the more-data bit in it. */
1244 b43_shm_write16(ring
->dev
, B43_SHM_SHARED
,
1245 B43_SHM_SH_MCASTCOOKIE
, cookie
);
1247 /* Now transfer the whole frame. */
1249 ops
->poke_tx(ring
, next_slot(ring
, slot
));
1253 dev_kfree_skb_any(skb
);
1255 unmap_descbuffer(ring
, meta_hdr
->dmaaddr
,
1260 static inline int should_inject_overflow(struct b43_dmaring
*ring
)
1262 #ifdef CONFIG_B43_DEBUG
1263 if (unlikely(b43_debug(ring
->dev
, B43_DBG_DMAOVERFLOW
))) {
1264 /* Check if we should inject another ringbuffer overflow
1265 * to test handling of this situation in the stack. */
1266 unsigned long next_overflow
;
1268 next_overflow
= ring
->last_injected_overflow
+ HZ
;
1269 if (time_after(jiffies
, next_overflow
)) {
1270 ring
->last_injected_overflow
= jiffies
;
1271 b43dbg(ring
->dev
->wl
,
1272 "Injecting TX ring overflow on "
1273 "DMA controller %d\n", ring
->index
);
1277 #endif /* CONFIG_B43_DEBUG */
1281 /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
1282 static struct b43_dmaring
*select_ring_by_priority(struct b43_wldev
*dev
,
1285 struct b43_dmaring
*ring
;
1287 if (dev
->qos_enabled
) {
1288 /* 0 = highest priority */
1289 switch (queue_prio
) {
1294 ring
= dev
->dma
.tx_ring_AC_VO
;
1297 ring
= dev
->dma
.tx_ring_AC_VI
;
1300 ring
= dev
->dma
.tx_ring_AC_BE
;
1303 ring
= dev
->dma
.tx_ring_AC_BK
;
1307 ring
= dev
->dma
.tx_ring_AC_BE
;
1312 int b43_dma_tx(struct b43_wldev
*dev
, struct sk_buff
*skb
)
1314 struct b43_dmaring
*ring
;
1315 struct ieee80211_hdr
*hdr
;
1317 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1319 hdr
= (struct ieee80211_hdr
*)skb
->data
;
1320 if (info
->flags
& IEEE80211_TX_CTL_SEND_AFTER_DTIM
) {
1321 /* The multicast ring will be sent after the DTIM */
1322 ring
= dev
->dma
.tx_ring_mcast
;
1323 /* Set the more-data bit. Ucode will clear it on
1324 * the last frame for us. */
1325 hdr
->frame_control
|= cpu_to_le16(IEEE80211_FCTL_MOREDATA
);
1327 /* Decide by priority where to put this frame. */
1328 ring
= select_ring_by_priority(
1329 dev
, skb_get_queue_mapping(skb
));
1332 B43_WARN_ON(!ring
->tx
);
1334 if (unlikely(ring
->stopped
)) {
1335 /* We get here only because of a bug in mac80211.
1336 * Because of a race, one packet may be queued after
1337 * the queue is stopped, thus we got called when we shouldn't.
1338 * For now, just refuse the transmit. */
1339 if (b43_debug(dev
, B43_DBG_DMAVERBOSE
))
1340 b43err(dev
->wl
, "Packet after queue stopped\n");
1345 if (unlikely(WARN_ON(free_slots(ring
) < TX_SLOTS_PER_FRAME
))) {
1346 /* If we get here, we have a real error with the queue
1347 * full, but queues not stopped. */
1348 b43err(dev
->wl
, "DMA queue overflow\n");
1353 /* Assign the queue number to the ring (if not already done before)
1354 * so TX status handling can use it. The queue to ring mapping is
1355 * static, so we don't need to store it per frame. */
1356 ring
->queue_prio
= skb_get_queue_mapping(skb
);
1358 err
= dma_tx_fragment(ring
, skb
);
1359 if (unlikely(err
== -ENOKEY
)) {
1360 /* Drop this packet, as we don't have the encryption key
1361 * anymore and must not transmit it unencrypted. */
1362 dev_kfree_skb_any(skb
);
1366 if (unlikely(err
)) {
1367 b43err(dev
->wl
, "DMA tx mapping failure\n");
1370 ring
->nr_tx_packets
++;
1371 if ((free_slots(ring
) < TX_SLOTS_PER_FRAME
) ||
1372 should_inject_overflow(ring
)) {
1373 /* This TX ring is full. */
1374 ieee80211_stop_queue(dev
->wl
->hw
, skb_get_queue_mapping(skb
));
1376 if (b43_debug(dev
, B43_DBG_DMAVERBOSE
)) {
1377 b43dbg(dev
->wl
, "Stopped TX ring %d\n", ring
->index
);
1385 void b43_dma_handle_txstatus(struct b43_wldev
*dev
,
1386 const struct b43_txstatus
*status
)
1388 const struct b43_dma_ops
*ops
;
1389 struct b43_dmaring
*ring
;
1390 struct b43_dmadesc_generic
*desc
;
1391 struct b43_dmadesc_meta
*meta
;
1395 ring
= parse_cookie(dev
, status
->cookie
, &slot
);
1396 if (unlikely(!ring
))
1399 B43_WARN_ON(!ring
->tx
);
1402 B43_WARN_ON(!(slot
>= 0 && slot
< ring
->nr_slots
));
1403 desc
= ops
->idx2desc(ring
, slot
, &meta
);
1406 unmap_descbuffer(ring
, meta
->dmaaddr
, meta
->skb
->len
,
1409 unmap_descbuffer(ring
, meta
->dmaaddr
,
1410 b43_txhdr_size(dev
), 1);
1412 if (meta
->is_last_fragment
) {
1413 struct ieee80211_tx_info
*info
;
1417 info
= IEEE80211_SKB_CB(meta
->skb
);
1420 * Call back to inform the ieee80211 subsystem about
1421 * the status of the transmission.
1423 frame_succeed
= b43_fill_txstatus_report(dev
, info
, status
);
1424 #ifdef CONFIG_B43_DEBUG
1426 ring
->nr_succeed_tx_packets
++;
1428 ring
->nr_failed_tx_packets
++;
1429 ring
->nr_total_packet_tries
+= status
->frame_count
;
1431 ieee80211_tx_status(dev
->wl
->hw
, meta
->skb
);
1433 /* skb is freed by ieee80211_tx_status() */
1436 /* No need to call free_descriptor_buffer here, as
1437 * this is only the txhdr, which is not allocated.
1439 B43_WARN_ON(meta
->skb
);
1442 /* Everything unmapped and free'd. So it's not used anymore. */
1445 if (meta
->is_last_fragment
)
1447 slot
= next_slot(ring
, slot
);
1449 if (ring
->stopped
) {
1450 B43_WARN_ON(free_slots(ring
) < TX_SLOTS_PER_FRAME
);
1451 ieee80211_wake_queue(dev
->wl
->hw
, ring
->queue_prio
);
1453 if (b43_debug(dev
, B43_DBG_DMAVERBOSE
)) {
1454 b43dbg(dev
->wl
, "Woke up TX ring %d\n", ring
->index
);
1459 void b43_dma_get_tx_stats(struct b43_wldev
*dev
,
1460 struct ieee80211_tx_queue_stats
*stats
)
1462 const int nr_queues
= dev
->wl
->hw
->queues
;
1463 struct b43_dmaring
*ring
;
1466 for (i
= 0; i
< nr_queues
; i
++) {
1467 ring
= select_ring_by_priority(dev
, i
);
1469 stats
[i
].len
= ring
->used_slots
/ TX_SLOTS_PER_FRAME
;
1470 stats
[i
].limit
= ring
->nr_slots
/ TX_SLOTS_PER_FRAME
;
1471 stats
[i
].count
= ring
->nr_tx_packets
;
1475 static void dma_rx(struct b43_dmaring
*ring
, int *slot
)
1477 const struct b43_dma_ops
*ops
= ring
->ops
;
1478 struct b43_dmadesc_generic
*desc
;
1479 struct b43_dmadesc_meta
*meta
;
1480 struct b43_rxhdr_fw4
*rxhdr
;
1481 struct sk_buff
*skb
;
1486 desc
= ops
->idx2desc(ring
, *slot
, &meta
);
1488 sync_descbuffer_for_cpu(ring
, meta
->dmaaddr
, ring
->rx_buffersize
);
1491 rxhdr
= (struct b43_rxhdr_fw4
*)skb
->data
;
1492 len
= le16_to_cpu(rxhdr
->frame_len
);
1499 len
= le16_to_cpu(rxhdr
->frame_len
);
1500 } while (len
== 0 && i
++ < 5);
1501 if (unlikely(len
== 0)) {
1502 dmaaddr
= meta
->dmaaddr
;
1503 goto drop_recycle_buffer
;
1506 if (unlikely(b43_rx_buffer_is_poisoned(ring
, skb
))) {
1507 /* Something went wrong with the DMA.
1508 * The device did not touch the buffer and did not overwrite the poison. */
1509 b43dbg(ring
->dev
->wl
, "DMA RX: Dropping poisoned buffer.\n");
1510 dmaaddr
= meta
->dmaaddr
;
1511 goto drop_recycle_buffer
;
1513 if (unlikely(len
> ring
->rx_buffersize
)) {
1514 /* The data did not fit into one descriptor buffer
1515 * and is split over multiple buffers.
1516 * This should never happen, as we try to allocate buffers
1517 * big enough. So simply ignore this packet.
1523 desc
= ops
->idx2desc(ring
, *slot
, &meta
);
1524 /* recycle the descriptor buffer. */
1525 b43_poison_rx_buffer(ring
, meta
->skb
);
1526 sync_descbuffer_for_device(ring
, meta
->dmaaddr
,
1527 ring
->rx_buffersize
);
1528 *slot
= next_slot(ring
, *slot
);
1530 tmp
-= ring
->rx_buffersize
;
1534 b43err(ring
->dev
->wl
, "DMA RX buffer too small "
1535 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1536 len
, ring
->rx_buffersize
, cnt
);
1540 dmaaddr
= meta
->dmaaddr
;
1541 err
= setup_rx_descbuffer(ring
, desc
, meta
, GFP_ATOMIC
);
1542 if (unlikely(err
)) {
1543 b43dbg(ring
->dev
->wl
, "DMA RX: setup_rx_descbuffer() failed\n");
1544 goto drop_recycle_buffer
;
1547 unmap_descbuffer(ring
, dmaaddr
, ring
->rx_buffersize
, 0);
1548 skb_put(skb
, len
+ ring
->frameoffset
);
1549 skb_pull(skb
, ring
->frameoffset
);
1551 b43_rx(ring
->dev
, skb
, rxhdr
);
1555 drop_recycle_buffer
:
1556 /* Poison and recycle the RX buffer. */
1557 b43_poison_rx_buffer(ring
, skb
);
1558 sync_descbuffer_for_device(ring
, dmaaddr
, ring
->rx_buffersize
);
1561 void b43_dma_rx(struct b43_dmaring
*ring
)
1563 const struct b43_dma_ops
*ops
= ring
->ops
;
1564 int slot
, current_slot
;
1567 B43_WARN_ON(ring
->tx
);
1568 current_slot
= ops
->get_current_rxslot(ring
);
1569 B43_WARN_ON(!(current_slot
>= 0 && current_slot
< ring
->nr_slots
));
1571 slot
= ring
->current_slot
;
1572 for (; slot
!= current_slot
; slot
= next_slot(ring
, slot
)) {
1573 dma_rx(ring
, &slot
);
1574 update_max_used_slots(ring
, ++used_slots
);
1576 ops
->set_current_rxslot(ring
, slot
);
1577 ring
->current_slot
= slot
;
1580 static void b43_dma_tx_suspend_ring(struct b43_dmaring
*ring
)
1582 B43_WARN_ON(!ring
->tx
);
1583 ring
->ops
->tx_suspend(ring
);
1586 static void b43_dma_tx_resume_ring(struct b43_dmaring
*ring
)
1588 B43_WARN_ON(!ring
->tx
);
1589 ring
->ops
->tx_resume(ring
);
1592 void b43_dma_tx_suspend(struct b43_wldev
*dev
)
1594 b43_power_saving_ctl_bits(dev
, B43_PS_AWAKE
);
1595 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring_AC_BK
);
1596 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring_AC_BE
);
1597 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring_AC_VI
);
1598 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring_AC_VO
);
1599 b43_dma_tx_suspend_ring(dev
->dma
.tx_ring_mcast
);
1602 void b43_dma_tx_resume(struct b43_wldev
*dev
)
1604 b43_dma_tx_resume_ring(dev
->dma
.tx_ring_mcast
);
1605 b43_dma_tx_resume_ring(dev
->dma
.tx_ring_AC_VO
);
1606 b43_dma_tx_resume_ring(dev
->dma
.tx_ring_AC_VI
);
1607 b43_dma_tx_resume_ring(dev
->dma
.tx_ring_AC_BE
);
1608 b43_dma_tx_resume_ring(dev
->dma
.tx_ring_AC_BK
);
1609 b43_power_saving_ctl_bits(dev
, 0);
1612 #ifdef CONFIG_B43_PIO
1613 static void direct_fifo_rx(struct b43_wldev
*dev
, enum b43_dmatype type
,
1614 u16 mmio_base
, bool enable
)
1618 if (type
== B43_DMA_64BIT
) {
1619 ctl
= b43_read32(dev
, mmio_base
+ B43_DMA64_RXCTL
);
1620 ctl
&= ~B43_DMA64_RXDIRECTFIFO
;
1622 ctl
|= B43_DMA64_RXDIRECTFIFO
;
1623 b43_write32(dev
, mmio_base
+ B43_DMA64_RXCTL
, ctl
);
1625 ctl
= b43_read32(dev
, mmio_base
+ B43_DMA32_RXCTL
);
1626 ctl
&= ~B43_DMA32_RXDIRECTFIFO
;
1628 ctl
|= B43_DMA32_RXDIRECTFIFO
;
1629 b43_write32(dev
, mmio_base
+ B43_DMA32_RXCTL
, ctl
);
1633 /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
1634 * This is called from PIO code, so DMA structures are not available. */
1635 void b43_dma_direct_fifo_rx(struct b43_wldev
*dev
,
1636 unsigned int engine_index
, bool enable
)
1638 enum b43_dmatype type
;
1641 type
= dma_mask_to_engine_type(supported_dma_mask(dev
));
1643 mmio_base
= b43_dmacontroller_base(type
, engine_index
);
1644 direct_fifo_rx(dev
, type
, mmio_base
, enable
);
1646 #endif /* CONFIG_B43_PIO */