3 Broadcom BCM43xx wireless driver
5 DMA ringbuffer and descriptor allocation/management
7 Copyright (c) 2005, 2006 Michael Buesch <mbuesch@freenet.de>
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
31 #include "bcm43xx_dma.h"
32 #include "bcm43xx_main.h"
33 #include "bcm43xx_debugfs.h"
34 #include "bcm43xx_power.h"
35 #include "bcm43xx_xmit.h"
37 #include <linux/dma-mapping.h>
38 #include <linux/pci.h>
39 #include <linux/delay.h>
40 #include <linux/skbuff.h>
43 static inline int free_slots(struct bcm43xx_dmaring
*ring
)
45 return (ring
->nr_slots
- ring
->used_slots
);
48 static inline int next_slot(struct bcm43xx_dmaring
*ring
, int slot
)
50 assert(slot
>= -1 && slot
<= ring
->nr_slots
- 1);
51 if (slot
== ring
->nr_slots
- 1)
56 static inline int prev_slot(struct bcm43xx_dmaring
*ring
, int slot
)
58 assert(slot
>= 0 && slot
<= ring
->nr_slots
- 1);
60 return ring
->nr_slots
- 1;
64 /* Request a slot for usage. */
66 int request_slot(struct bcm43xx_dmaring
*ring
)
71 assert(!ring
->suspended
);
72 assert(free_slots(ring
) != 0);
74 slot
= next_slot(ring
, ring
->current_slot
);
75 ring
->current_slot
= slot
;
78 /* Check the number of available slots and suspend TX,
79 * if we are running low on free slots.
81 if (unlikely(free_slots(ring
) < ring
->suspend_mark
)) {
82 netif_stop_queue(ring
->bcm
->net_dev
);
85 #ifdef CONFIG_BCM43XX_DEBUG
86 if (ring
->used_slots
> ring
->max_used_slots
)
87 ring
->max_used_slots
= ring
->used_slots
;
88 #endif /* CONFIG_BCM43XX_DEBUG*/
93 /* Return a slot to the free slots. */
95 void return_slot(struct bcm43xx_dmaring
*ring
, int slot
)
101 /* Check if TX is suspended and check if we have
102 * enough free slots to resume it again.
104 if (unlikely(ring
->suspended
)) {
105 if (free_slots(ring
) >= ring
->resume_mark
) {
107 netif_wake_queue(ring
->bcm
->net_dev
);
112 u16
bcm43xx_dmacontroller_base(int dma64bit
, int controller_idx
)
114 static const u16 map64
[] = {
115 BCM43xx_MMIO_DMA64_BASE0
,
116 BCM43xx_MMIO_DMA64_BASE1
,
117 BCM43xx_MMIO_DMA64_BASE2
,
118 BCM43xx_MMIO_DMA64_BASE3
,
119 BCM43xx_MMIO_DMA64_BASE4
,
120 BCM43xx_MMIO_DMA64_BASE5
,
122 static const u16 map32
[] = {
123 BCM43xx_MMIO_DMA32_BASE0
,
124 BCM43xx_MMIO_DMA32_BASE1
,
125 BCM43xx_MMIO_DMA32_BASE2
,
126 BCM43xx_MMIO_DMA32_BASE3
,
127 BCM43xx_MMIO_DMA32_BASE4
,
128 BCM43xx_MMIO_DMA32_BASE5
,
132 assert(controller_idx
>= 0 &&
133 controller_idx
< ARRAY_SIZE(map64
));
134 return map64
[controller_idx
];
136 assert(controller_idx
>= 0 &&
137 controller_idx
< ARRAY_SIZE(map32
));
138 return map32
[controller_idx
];
142 dma_addr_t
map_descbuffer(struct bcm43xx_dmaring
*ring
,
148 int direction
= PCI_DMA_FROMDEVICE
;
151 direction
= PCI_DMA_TODEVICE
;
153 dmaaddr
= pci_map_single(ring
->bcm
->pci_dev
,
161 void unmap_descbuffer(struct bcm43xx_dmaring
*ring
,
167 pci_unmap_single(ring
->bcm
->pci_dev
,
171 pci_unmap_single(ring
->bcm
->pci_dev
,
178 void sync_descbuffer_for_cpu(struct bcm43xx_dmaring
*ring
,
184 pci_dma_sync_single_for_cpu(ring
->bcm
->pci_dev
,
185 addr
, len
, PCI_DMA_FROMDEVICE
);
189 void sync_descbuffer_for_device(struct bcm43xx_dmaring
*ring
,
195 pci_dma_sync_single_for_cpu(ring
->bcm
->pci_dev
,
196 addr
, len
, PCI_DMA_TODEVICE
);
199 /* Unmap and free a descriptor buffer. */
201 void free_descriptor_buffer(struct bcm43xx_dmaring
*ring
,
202 struct bcm43xx_dmadesc_meta
*meta
,
207 dev_kfree_skb_irq(meta
->skb
);
209 dev_kfree_skb(meta
->skb
);
213 static int alloc_ringmemory(struct bcm43xx_dmaring
*ring
)
215 ring
->descbase
= pci_alloc_consistent(ring
->bcm
->pci_dev
, BCM43xx_DMA_RINGMEMSIZE
,
217 if (!ring
->descbase
) {
218 /* Allocation may have failed due to pci_alloc_consistent
219 insisting on use of GFP_DMA, which is more restrictive
221 struct dma_desc
*rx_ring
;
222 dma_addr_t rx_ring_dma
;
224 rx_ring
= kzalloc(BCM43xx_DMA_RINGMEMSIZE
, GFP_KERNEL
);
228 rx_ring_dma
= pci_map_single(ring
->bcm
->pci_dev
, rx_ring
,
229 BCM43xx_DMA_RINGMEMSIZE
,
230 PCI_DMA_BIDIRECTIONAL
);
232 if (pci_dma_mapping_error(rx_ring_dma
) ||
233 rx_ring_dma
+ BCM43xx_DMA_RINGMEMSIZE
> ring
->bcm
->dma_mask
) {
235 if (!pci_dma_mapping_error(rx_ring_dma
))
236 pci_unmap_single(ring
->bcm
->pci_dev
,
237 rx_ring_dma
, BCM43xx_DMA_RINGMEMSIZE
,
238 PCI_DMA_BIDIRECTIONAL
);
239 rx_ring_dma
= pci_map_single(ring
->bcm
->pci_dev
,
240 rx_ring
, BCM43xx_DMA_RINGMEMSIZE
,
241 PCI_DMA_BIDIRECTIONAL
);
242 if (pci_dma_mapping_error(rx_ring_dma
) ||
243 rx_ring_dma
+ BCM43xx_DMA_RINGMEMSIZE
> ring
->bcm
->dma_mask
) {
245 if (!pci_dma_mapping_error(rx_ring_dma
))
246 pci_unmap_single(ring
->bcm
->pci_dev
,
247 rx_ring_dma
, BCM43xx_DMA_RINGMEMSIZE
,
248 PCI_DMA_BIDIRECTIONAL
);
253 ring
->descbase
= rx_ring
;
254 ring
->dmabase
= rx_ring_dma
;
256 memset(ring
->descbase
, 0, BCM43xx_DMA_RINGMEMSIZE
);
260 printk(KERN_ERR PFX
"DMA ringmemory allocation failed\n");
264 static void free_ringmemory(struct bcm43xx_dmaring
*ring
)
266 struct device
*dev
= &(ring
->bcm
->pci_dev
->dev
);
268 dma_free_coherent(dev
, BCM43xx_DMA_RINGMEMSIZE
,
269 ring
->descbase
, ring
->dmabase
);
272 /* Reset the RX DMA channel */
273 int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private
*bcm
,
274 u16 mmio_base
, int dma64
)
280 offset
= dma64
? BCM43xx_DMA64_RXCTL
: BCM43xx_DMA32_RXCTL
;
281 bcm43xx_write32(bcm
, mmio_base
+ offset
, 0);
282 for (i
= 0; i
< 1000; i
++) {
283 offset
= dma64
? BCM43xx_DMA64_RXSTATUS
: BCM43xx_DMA32_RXSTATUS
;
284 value
= bcm43xx_read32(bcm
, mmio_base
+ offset
);
286 value
&= BCM43xx_DMA64_RXSTAT
;
287 if (value
== BCM43xx_DMA64_RXSTAT_DISABLED
) {
292 value
&= BCM43xx_DMA32_RXSTATE
;
293 if (value
== BCM43xx_DMA32_RXSTAT_DISABLED
) {
301 printk(KERN_ERR PFX
"Error: Wait on DMA RX status timed out.\n");
308 /* Reset the RX DMA channel */
309 int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private
*bcm
,
310 u16 mmio_base
, int dma64
)
316 for (i
= 0; i
< 1000; i
++) {
317 offset
= dma64
? BCM43xx_DMA64_TXSTATUS
: BCM43xx_DMA32_TXSTATUS
;
318 value
= bcm43xx_read32(bcm
, mmio_base
+ offset
);
320 value
&= BCM43xx_DMA64_TXSTAT
;
321 if (value
== BCM43xx_DMA64_TXSTAT_DISABLED
||
322 value
== BCM43xx_DMA64_TXSTAT_IDLEWAIT
||
323 value
== BCM43xx_DMA64_TXSTAT_STOPPED
)
326 value
&= BCM43xx_DMA32_TXSTATE
;
327 if (value
== BCM43xx_DMA32_TXSTAT_DISABLED
||
328 value
== BCM43xx_DMA32_TXSTAT_IDLEWAIT
||
329 value
== BCM43xx_DMA32_TXSTAT_STOPPED
)
334 offset
= dma64
? BCM43xx_DMA64_TXCTL
: BCM43xx_DMA32_TXCTL
;
335 bcm43xx_write32(bcm
, mmio_base
+ offset
, 0);
336 for (i
= 0; i
< 1000; i
++) {
337 offset
= dma64
? BCM43xx_DMA64_TXSTATUS
: BCM43xx_DMA32_TXSTATUS
;
338 value
= bcm43xx_read32(bcm
, mmio_base
+ offset
);
340 value
&= BCM43xx_DMA64_TXSTAT
;
341 if (value
== BCM43xx_DMA64_TXSTAT_DISABLED
) {
346 value
&= BCM43xx_DMA32_TXSTATE
;
347 if (value
== BCM43xx_DMA32_TXSTAT_DISABLED
) {
355 printk(KERN_ERR PFX
"Error: Wait on DMA TX status timed out.\n");
358 /* ensure the reset is completed. */
364 static void fill_descriptor(struct bcm43xx_dmaring
*ring
,
365 struct bcm43xx_dmadesc_generic
*desc
,
368 int start
, int end
, int irq
)
372 slot
= bcm43xx_dma_desc2idx(ring
, desc
);
373 assert(slot
>= 0 && slot
< ring
->nr_slots
);
376 u32 ctl0
= 0, ctl1
= 0;
380 addrlo
= (u32
)(dmaaddr
& 0xFFFFFFFF);
381 addrhi
= (((u64
)dmaaddr
>> 32) & ~BCM43xx_DMA64_ROUTING
);
382 addrext
= (((u64
)dmaaddr
>> 32) >> BCM43xx_DMA64_ROUTING_SHIFT
);
383 addrhi
|= ring
->routing
;
384 if (slot
== ring
->nr_slots
- 1)
385 ctl0
|= BCM43xx_DMA64_DCTL0_DTABLEEND
;
387 ctl0
|= BCM43xx_DMA64_DCTL0_FRAMESTART
;
389 ctl0
|= BCM43xx_DMA64_DCTL0_FRAMEEND
;
391 ctl0
|= BCM43xx_DMA64_DCTL0_IRQ
;
392 ctl1
|= (bufsize
- ring
->frameoffset
)
393 & BCM43xx_DMA64_DCTL1_BYTECNT
;
394 ctl1
|= (addrext
<< BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT
)
395 & BCM43xx_DMA64_DCTL1_ADDREXT_MASK
;
397 desc
->dma64
.control0
= cpu_to_le32(ctl0
);
398 desc
->dma64
.control1
= cpu_to_le32(ctl1
);
399 desc
->dma64
.address_low
= cpu_to_le32(addrlo
);
400 desc
->dma64
.address_high
= cpu_to_le32(addrhi
);
406 addr
= (u32
)(dmaaddr
& ~BCM43xx_DMA32_ROUTING
);
407 addrext
= (u32
)(dmaaddr
& BCM43xx_DMA32_ROUTING
)
408 >> BCM43xx_DMA32_ROUTING_SHIFT
;
409 addr
|= ring
->routing
;
410 ctl
= (bufsize
- ring
->frameoffset
)
411 & BCM43xx_DMA32_DCTL_BYTECNT
;
412 if (slot
== ring
->nr_slots
- 1)
413 ctl
|= BCM43xx_DMA32_DCTL_DTABLEEND
;
415 ctl
|= BCM43xx_DMA32_DCTL_FRAMESTART
;
417 ctl
|= BCM43xx_DMA32_DCTL_FRAMEEND
;
419 ctl
|= BCM43xx_DMA32_DCTL_IRQ
;
420 ctl
|= (addrext
<< BCM43xx_DMA32_DCTL_ADDREXT_SHIFT
)
421 & BCM43xx_DMA32_DCTL_ADDREXT_MASK
;
423 desc
->dma32
.control
= cpu_to_le32(ctl
);
424 desc
->dma32
.address
= cpu_to_le32(addr
);
428 static int setup_rx_descbuffer(struct bcm43xx_dmaring
*ring
,
429 struct bcm43xx_dmadesc_generic
*desc
,
430 struct bcm43xx_dmadesc_meta
*meta
,
433 struct bcm43xx_rxhdr
*rxhdr
;
434 struct bcm43xx_hwxmitstatus
*xmitstat
;
440 skb
= __dev_alloc_skb(ring
->rx_buffersize
, gfp_flags
);
443 dmaaddr
= map_descbuffer(ring
, skb
->data
, ring
->rx_buffersize
, 0);
444 /* This hardware bug work-around adapted from the b44 driver.
445 The chip may be unable to do PCI DMA to/from anything above 1GB */
446 if (pci_dma_mapping_error(dmaaddr
) ||
447 dmaaddr
+ ring
->rx_buffersize
> ring
->bcm
->dma_mask
) {
448 /* This one has 30-bit addressing... */
449 if (!pci_dma_mapping_error(dmaaddr
))
450 pci_unmap_single(ring
->bcm
->pci_dev
,
451 dmaaddr
, ring
->rx_buffersize
,
453 dev_kfree_skb_any(skb
);
454 skb
= __dev_alloc_skb(ring
->rx_buffersize
,GFP_DMA
);
457 dmaaddr
= pci_map_single(ring
->bcm
->pci_dev
,
458 skb
->data
, ring
->rx_buffersize
,
460 if (pci_dma_mapping_error(dmaaddr
) ||
461 dmaaddr
+ ring
->rx_buffersize
> ring
->bcm
->dma_mask
) {
463 dev_kfree_skb_any(skb
);
468 meta
->dmaaddr
= dmaaddr
;
469 skb
->dev
= ring
->bcm
->net_dev
;
471 fill_descriptor(ring
, desc
, dmaaddr
,
472 ring
->rx_buffersize
, 0, 0, 0);
474 rxhdr
= (struct bcm43xx_rxhdr
*)(skb
->data
);
475 rxhdr
->frame_length
= 0;
477 xmitstat
= (struct bcm43xx_hwxmitstatus
*)(skb
->data
);
478 xmitstat
->cookie
= 0;
483 /* Allocate the initial descbuffers.
484 * This is used for an RX ring only.
486 static int alloc_initial_descbuffers(struct bcm43xx_dmaring
*ring
)
488 int i
, err
= -ENOMEM
;
489 struct bcm43xx_dmadesc_generic
*desc
;
490 struct bcm43xx_dmadesc_meta
*meta
;
492 for (i
= 0; i
< ring
->nr_slots
; i
++) {
493 desc
= bcm43xx_dma_idx2desc(ring
, i
, &meta
);
495 err
= setup_rx_descbuffer(ring
, desc
, meta
, GFP_KERNEL
);
500 ring
->used_slots
= ring
->nr_slots
;
506 for (i
--; i
>= 0; i
--) {
507 desc
= bcm43xx_dma_idx2desc(ring
, i
, &meta
);
509 unmap_descbuffer(ring
, meta
->dmaaddr
, ring
->rx_buffersize
, 0);
510 dev_kfree_skb(meta
->skb
);
515 /* Do initial setup of the DMA controller.
516 * Reset the controller, write the ring busaddress
517 * and switch the "enable" bit on.
519 static int dmacontroller_setup(struct bcm43xx_dmaring
*ring
)
527 u64 ringbase
= (u64
)(ring
->dmabase
);
529 addrext
= ((ringbase
>> 32) >> BCM43xx_DMA64_ROUTING_SHIFT
);
530 value
= BCM43xx_DMA64_TXENABLE
;
531 value
|= (addrext
<< BCM43xx_DMA64_TXADDREXT_SHIFT
)
532 & BCM43xx_DMA64_TXADDREXT_MASK
;
533 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXCTL
, value
);
534 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXRINGLO
,
535 (ringbase
& 0xFFFFFFFF));
536 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXRINGHI
,
537 ((ringbase
>> 32) & ~BCM43xx_DMA64_ROUTING
)
540 u32 ringbase
= (u32
)(ring
->dmabase
);
542 addrext
= (ringbase
>> BCM43xx_DMA32_ROUTING_SHIFT
);
543 value
= BCM43xx_DMA32_TXENABLE
;
544 value
|= (addrext
<< BCM43xx_DMA32_TXADDREXT_SHIFT
)
545 & BCM43xx_DMA32_TXADDREXT_MASK
;
546 bcm43xx_dma_write(ring
, BCM43xx_DMA32_TXCTL
, value
);
547 bcm43xx_dma_write(ring
, BCM43xx_DMA32_TXRING
,
548 (ringbase
& ~BCM43xx_DMA32_ROUTING
)
552 err
= alloc_initial_descbuffers(ring
);
556 u64 ringbase
= (u64
)(ring
->dmabase
);
558 addrext
= ((ringbase
>> 32) >> BCM43xx_DMA64_ROUTING_SHIFT
);
559 value
= (ring
->frameoffset
<< BCM43xx_DMA64_RXFROFF_SHIFT
);
560 value
|= BCM43xx_DMA64_RXENABLE
;
561 value
|= (addrext
<< BCM43xx_DMA64_RXADDREXT_SHIFT
)
562 & BCM43xx_DMA64_RXADDREXT_MASK
;
563 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXCTL
, value
);
564 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXRINGLO
,
565 (ringbase
& 0xFFFFFFFF));
566 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXRINGHI
,
567 ((ringbase
>> 32) & ~BCM43xx_DMA64_ROUTING
)
569 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXINDEX
, 200);
571 u32 ringbase
= (u32
)(ring
->dmabase
);
573 addrext
= (ringbase
>> BCM43xx_DMA32_ROUTING_SHIFT
);
574 value
= (ring
->frameoffset
<< BCM43xx_DMA32_RXFROFF_SHIFT
);
575 value
|= BCM43xx_DMA32_RXENABLE
;
576 value
|= (addrext
<< BCM43xx_DMA32_RXADDREXT_SHIFT
)
577 & BCM43xx_DMA32_RXADDREXT_MASK
;
578 bcm43xx_dma_write(ring
, BCM43xx_DMA32_RXCTL
, value
);
579 bcm43xx_dma_write(ring
, BCM43xx_DMA32_RXRING
,
580 (ringbase
& ~BCM43xx_DMA32_ROUTING
)
582 bcm43xx_dma_write(ring
, BCM43xx_DMA32_RXINDEX
, 200);
590 /* Shutdown the DMA controller. */
591 static void dmacontroller_cleanup(struct bcm43xx_dmaring
*ring
)
594 bcm43xx_dmacontroller_tx_reset(ring
->bcm
, ring
->mmio_base
, ring
->dma64
);
596 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXRINGLO
, 0);
597 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXRINGHI
, 0);
599 bcm43xx_dma_write(ring
, BCM43xx_DMA32_TXRING
, 0);
601 bcm43xx_dmacontroller_rx_reset(ring
->bcm
, ring
->mmio_base
, ring
->dma64
);
603 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXRINGLO
, 0);
604 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXRINGHI
, 0);
606 bcm43xx_dma_write(ring
, BCM43xx_DMA32_RXRING
, 0);
610 static void free_all_descbuffers(struct bcm43xx_dmaring
*ring
)
612 struct bcm43xx_dmadesc_generic
*desc
;
613 struct bcm43xx_dmadesc_meta
*meta
;
616 if (!ring
->used_slots
)
618 for (i
= 0; i
< ring
->nr_slots
; i
++) {
619 desc
= bcm43xx_dma_idx2desc(ring
, i
, &meta
);
626 unmap_descbuffer(ring
, meta
->dmaaddr
,
629 unmap_descbuffer(ring
, meta
->dmaaddr
,
630 ring
->rx_buffersize
, 0);
632 free_descriptor_buffer(ring
, meta
, 0);
636 /* Main initialization function. */
638 struct bcm43xx_dmaring
* bcm43xx_setup_dmaring(struct bcm43xx_private
*bcm
,
639 int controller_index
,
643 struct bcm43xx_dmaring
*ring
;
647 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
651 nr_slots
= BCM43xx_RXRING_SLOTS
;
653 nr_slots
= BCM43xx_TXRING_SLOTS
;
655 ring
->meta
= kcalloc(nr_slots
, sizeof(struct bcm43xx_dmadesc_meta
),
660 ring
->routing
= BCM43xx_DMA32_CLIENTTRANS
;
662 ring
->routing
= BCM43xx_DMA64_CLIENTTRANS
;
665 ring
->nr_slots
= nr_slots
;
666 ring
->suspend_mark
= ring
->nr_slots
* BCM43xx_TXSUSPEND_PERCENT
/ 100;
667 ring
->resume_mark
= ring
->nr_slots
* BCM43xx_TXRESUME_PERCENT
/ 100;
668 assert(ring
->suspend_mark
< ring
->resume_mark
);
669 ring
->mmio_base
= bcm43xx_dmacontroller_base(dma64
, controller_index
);
670 ring
->index
= controller_index
;
671 ring
->dma64
= !!dma64
;
674 ring
->current_slot
= -1;
676 if (ring
->index
== 0) {
677 ring
->rx_buffersize
= BCM43xx_DMA0_RX_BUFFERSIZE
;
678 ring
->frameoffset
= BCM43xx_DMA0_RX_FRAMEOFFSET
;
679 } else if (ring
->index
== 3) {
680 ring
->rx_buffersize
= BCM43xx_DMA3_RX_BUFFERSIZE
;
681 ring
->frameoffset
= BCM43xx_DMA3_RX_FRAMEOFFSET
;
686 err
= alloc_ringmemory(ring
);
689 err
= dmacontroller_setup(ring
);
691 goto err_free_ringmemory
;
695 printk(KERN_ERR PFX
"Error in bcm43xx_setup_dmaring\n");
699 free_ringmemory(ring
);
708 /* Main cleanup function. */
709 static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring
*ring
)
714 dprintk(KERN_INFO PFX
"DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
715 (ring
->dma64
) ? "64" : "32",
717 (ring
->tx
) ? "TX" : "RX",
718 ring
->max_used_slots
, ring
->nr_slots
);
719 /* Device IRQs are disabled prior entering this function,
720 * so no need to take care of concurrency with rx handler stuff.
722 dmacontroller_cleanup(ring
);
723 free_all_descbuffers(ring
);
724 free_ringmemory(ring
);
730 void bcm43xx_dma_free(struct bcm43xx_private
*bcm
)
732 struct bcm43xx_dma
*dma
;
734 if (bcm43xx_using_pio(bcm
))
736 dma
= bcm43xx_current_dma(bcm
);
738 bcm43xx_destroy_dmaring(dma
->rx_ring3
);
739 dma
->rx_ring3
= NULL
;
740 bcm43xx_destroy_dmaring(dma
->rx_ring0
);
741 dma
->rx_ring0
= NULL
;
743 bcm43xx_destroy_dmaring(dma
->tx_ring5
);
744 dma
->tx_ring5
= NULL
;
745 bcm43xx_destroy_dmaring(dma
->tx_ring4
);
746 dma
->tx_ring4
= NULL
;
747 bcm43xx_destroy_dmaring(dma
->tx_ring3
);
748 dma
->tx_ring3
= NULL
;
749 bcm43xx_destroy_dmaring(dma
->tx_ring2
);
750 dma
->tx_ring2
= NULL
;
751 bcm43xx_destroy_dmaring(dma
->tx_ring1
);
752 dma
->tx_ring1
= NULL
;
753 bcm43xx_destroy_dmaring(dma
->tx_ring0
);
754 dma
->tx_ring0
= NULL
;
757 int bcm43xx_dma_init(struct bcm43xx_private
*bcm
)
759 struct bcm43xx_dma
*dma
= bcm43xx_current_dma(bcm
);
760 struct bcm43xx_dmaring
*ring
;
764 bcm
->dma_mask
= bcm43xx_get_supported_dma_mask(bcm
);
765 if (bcm
->dma_mask
== DMA_64BIT_MASK
)
767 err
= pci_set_dma_mask(bcm
->pci_dev
, bcm
->dma_mask
);
770 err
= pci_set_consistent_dma_mask(bcm
->pci_dev
, bcm
->dma_mask
);
774 /* setup TX DMA channels. */
775 ring
= bcm43xx_setup_dmaring(bcm
, 0, 1, dma64
);
778 dma
->tx_ring0
= ring
;
780 ring
= bcm43xx_setup_dmaring(bcm
, 1, 1, dma64
);
782 goto err_destroy_tx0
;
783 dma
->tx_ring1
= ring
;
785 ring
= bcm43xx_setup_dmaring(bcm
, 2, 1, dma64
);
787 goto err_destroy_tx1
;
788 dma
->tx_ring2
= ring
;
790 ring
= bcm43xx_setup_dmaring(bcm
, 3, 1, dma64
);
792 goto err_destroy_tx2
;
793 dma
->tx_ring3
= ring
;
795 ring
= bcm43xx_setup_dmaring(bcm
, 4, 1, dma64
);
797 goto err_destroy_tx3
;
798 dma
->tx_ring4
= ring
;
800 ring
= bcm43xx_setup_dmaring(bcm
, 5, 1, dma64
);
802 goto err_destroy_tx4
;
803 dma
->tx_ring5
= ring
;
805 /* setup RX DMA channels. */
806 ring
= bcm43xx_setup_dmaring(bcm
, 0, 0, dma64
);
808 goto err_destroy_tx5
;
809 dma
->rx_ring0
= ring
;
811 if (bcm
->current_core
->rev
< 5) {
812 ring
= bcm43xx_setup_dmaring(bcm
, 3, 0, dma64
);
814 goto err_destroy_rx0
;
815 dma
->rx_ring3
= ring
;
818 dprintk(KERN_INFO PFX
"%d-bit DMA initialized\n",
819 (bcm
->dma_mask
== DMA_64BIT_MASK
) ? 64 :
820 (bcm
->dma_mask
== DMA_32BIT_MASK
) ? 32 : 30);
826 bcm43xx_destroy_dmaring(dma
->rx_ring0
);
827 dma
->rx_ring0
= NULL
;
829 bcm43xx_destroy_dmaring(dma
->tx_ring5
);
830 dma
->tx_ring5
= NULL
;
832 bcm43xx_destroy_dmaring(dma
->tx_ring4
);
833 dma
->tx_ring4
= NULL
;
835 bcm43xx_destroy_dmaring(dma
->tx_ring3
);
836 dma
->tx_ring3
= NULL
;
838 bcm43xx_destroy_dmaring(dma
->tx_ring2
);
839 dma
->tx_ring2
= NULL
;
841 bcm43xx_destroy_dmaring(dma
->tx_ring1
);
842 dma
->tx_ring1
= NULL
;
844 bcm43xx_destroy_dmaring(dma
->tx_ring0
);
845 dma
->tx_ring0
= NULL
;
847 #ifdef CONFIG_BCM43XX_PIO
848 printk(KERN_WARNING PFX
"DMA not supported on this device."
849 " Falling back to PIO.\n");
850 bcm
->__using_pio
= 1;
853 printk(KERN_ERR PFX
"FATAL: DMA not supported and PIO not configured. "
854 "Please recompile the driver with PIO support.\n");
856 #endif /* CONFIG_BCM43XX_PIO */
859 /* Generate a cookie for the TX header. */
860 static u16
generate_cookie(struct bcm43xx_dmaring
*ring
,
865 /* Use the upper 4 bits of the cookie as
866 * DMA controller ID and store the slot number
867 * in the lower 12 bits.
868 * Note that the cookie must never be 0, as this
869 * is a special value used in RX path.
871 switch (ring
->index
) {
891 assert(((u16
)slot
& 0xF000) == 0x0000);
897 /* Inspect a cookie and find out to which controller/slot it belongs. */
899 struct bcm43xx_dmaring
* parse_cookie(struct bcm43xx_private
*bcm
,
900 u16 cookie
, int *slot
)
902 struct bcm43xx_dma
*dma
= bcm43xx_current_dma(bcm
);
903 struct bcm43xx_dmaring
*ring
= NULL
;
905 switch (cookie
& 0xF000) {
907 ring
= dma
->tx_ring0
;
910 ring
= dma
->tx_ring1
;
913 ring
= dma
->tx_ring2
;
916 ring
= dma
->tx_ring3
;
919 ring
= dma
->tx_ring4
;
922 ring
= dma
->tx_ring5
;
927 *slot
= (cookie
& 0x0FFF);
928 assert(*slot
>= 0 && *slot
< ring
->nr_slots
);
933 static void dmacontroller_poke_tx(struct bcm43xx_dmaring
*ring
,
939 /* Everything is ready to start. Buffers are DMA mapped and
940 * associated with slots.
941 * "slot" is the last slot of the new frame we want to transmit.
942 * Close your seat belts now, please.
945 slot
= next_slot(ring
, slot
);
946 offset
= (ring
->dma64
) ? BCM43xx_DMA64_TXINDEX
: BCM43xx_DMA32_TXINDEX
;
947 descsize
= (ring
->dma64
) ? sizeof(struct bcm43xx_dmadesc64
)
948 : sizeof(struct bcm43xx_dmadesc32
);
949 bcm43xx_dma_write(ring
, offset
,
950 (u32
)(slot
* descsize
));
953 static void dma_tx_fragment(struct bcm43xx_dmaring
*ring
,
958 struct bcm43xx_dmadesc_generic
*desc
;
959 struct bcm43xx_dmadesc_meta
*meta
;
961 struct sk_buff
*bounce_skb
;
963 assert(skb_shinfo(skb
)->nr_frags
== 0);
965 slot
= request_slot(ring
);
966 desc
= bcm43xx_dma_idx2desc(ring
, slot
, &meta
);
968 /* Add a device specific TX header. */
969 assert(skb_headroom(skb
) >= sizeof(struct bcm43xx_txhdr
));
970 /* Reserve enough headroom for the device tx header. */
971 __skb_push(skb
, sizeof(struct bcm43xx_txhdr
));
972 /* Now calculate and add the tx header.
973 * The tx header includes the PLCP header.
975 bcm43xx_generate_txhdr(ring
->bcm
,
976 (struct bcm43xx_txhdr
*)skb
->data
,
977 skb
->data
+ sizeof(struct bcm43xx_txhdr
),
978 skb
->len
- sizeof(struct bcm43xx_txhdr
),
980 generate_cookie(ring
, slot
));
981 dmaaddr
= map_descbuffer(ring
, skb
->data
, skb
->len
, 1);
982 if (dma_mapping_error(dmaaddr
) || dmaaddr
+ skb
->len
> ring
->bcm
->dma_mask
) {
983 /* chip cannot handle DMA to/from > 1GB, use bounce buffer (copied from b44 driver) */
984 if (!dma_mapping_error(dmaaddr
))
985 unmap_descbuffer(ring
, dmaaddr
, skb
->len
, 1);
986 bounce_skb
= __dev_alloc_skb(skb
->len
, GFP_ATOMIC
|GFP_DMA
);
989 dmaaddr
= map_descbuffer(ring
, bounce_skb
->data
, bounce_skb
->len
, 1);
990 if (dma_mapping_error(dmaaddr
) || dmaaddr
+ skb
->len
> ring
->bcm
->dma_mask
) {
991 if (!dma_mapping_error(dmaaddr
))
992 unmap_descbuffer(ring
, dmaaddr
, skb
->len
, 1);
993 dev_kfree_skb_any(bounce_skb
);
997 skb_copy_from_linear_data(skb
, skb_put(bounce_skb
, skb
->len
),
999 dev_kfree_skb_any(skb
);
1004 meta
->dmaaddr
= dmaaddr
;
1006 fill_descriptor(ring
, desc
, dmaaddr
,
1009 /* Now transfer the whole frame. */
1010 dmacontroller_poke_tx(ring
, slot
);
1013 int bcm43xx_dma_tx(struct bcm43xx_private
*bcm
,
1014 struct ieee80211_txb
*txb
)
1016 /* We just received a packet from the kernel network subsystem.
1017 * Add headers and DMA map the memory. Poke
1018 * the device to send the stuff.
1019 * Note that this is called from atomic context.
1021 struct bcm43xx_dmaring
*ring
= bcm43xx_current_dma(bcm
)->tx_ring1
;
1023 struct sk_buff
*skb
;
1026 if (unlikely(free_slots(ring
) < txb
->nr_frags
)) {
1027 /* The queue should be stopped,
1028 * if we are low on free slots.
1029 * If this ever triggers, we have to lower the suspend_mark.
1031 dprintkl(KERN_ERR PFX
"Out of DMA descriptor slots!\n");
1035 for (i
= 0; i
< txb
->nr_frags
; i
++) {
1036 skb
= txb
->fragments
[i
];
1037 /* Take skb from ieee80211_txb_free */
1038 txb
->fragments
[i
] = NULL
;
1039 dma_tx_fragment(ring
, skb
, i
);
1041 ieee80211_txb_free(txb
);
1046 void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private
*bcm
,
1047 struct bcm43xx_xmitstatus
*status
)
1049 struct bcm43xx_dmaring
*ring
;
1050 struct bcm43xx_dmadesc_generic
*desc
;
1051 struct bcm43xx_dmadesc_meta
*meta
;
1052 int is_last_fragment
;
1056 ring
= parse_cookie(bcm
, status
->cookie
, &slot
);
1060 assert(slot
>= 0 && slot
< ring
->nr_slots
);
1061 desc
= bcm43xx_dma_idx2desc(ring
, slot
, &meta
);
1064 tmp
= le32_to_cpu(desc
->dma64
.control0
);
1065 is_last_fragment
= !!(tmp
& BCM43xx_DMA64_DCTL0_FRAMEEND
);
1067 tmp
= le32_to_cpu(desc
->dma32
.control
);
1068 is_last_fragment
= !!(tmp
& BCM43xx_DMA32_DCTL_FRAMEEND
);
1070 unmap_descbuffer(ring
, meta
->dmaaddr
, meta
->skb
->len
, 1);
1071 free_descriptor_buffer(ring
, meta
, 1);
1072 /* Everything belonging to the slot is unmapped
1073 * and freed, so we can return it.
1075 return_slot(ring
, slot
);
1077 if (is_last_fragment
)
1079 slot
= next_slot(ring
, slot
);
1081 bcm
->stats
.last_tx
= jiffies
;
1084 static void dma_rx(struct bcm43xx_dmaring
*ring
,
1087 struct bcm43xx_dmadesc_generic
*desc
;
1088 struct bcm43xx_dmadesc_meta
*meta
;
1089 struct bcm43xx_rxhdr
*rxhdr
;
1090 struct sk_buff
*skb
;
1095 desc
= bcm43xx_dma_idx2desc(ring
, *slot
, &meta
);
1097 sync_descbuffer_for_cpu(ring
, meta
->dmaaddr
, ring
->rx_buffersize
);
1100 if (ring
->index
== 3) {
1101 /* We received an xmit status. */
1102 struct bcm43xx_hwxmitstatus
*hw
= (struct bcm43xx_hwxmitstatus
*)skb
->data
;
1103 struct bcm43xx_xmitstatus stat
;
1106 stat
.cookie
= le16_to_cpu(hw
->cookie
);
1107 while (stat
.cookie
== 0) {
1108 if (unlikely(++i
>= 10000)) {
1114 stat
.cookie
= le16_to_cpu(hw
->cookie
);
1116 stat
.flags
= hw
->flags
;
1117 stat
.cnt1
= hw
->cnt1
;
1118 stat
.cnt2
= hw
->cnt2
;
1119 stat
.seq
= le16_to_cpu(hw
->seq
);
1120 stat
.unknown
= le16_to_cpu(hw
->unknown
);
1122 bcm43xx_debugfs_log_txstat(ring
->bcm
, &stat
);
1123 bcm43xx_dma_handle_xmitstatus(ring
->bcm
, &stat
);
1124 /* recycle the descriptor buffer. */
1125 sync_descbuffer_for_device(ring
, meta
->dmaaddr
, ring
->rx_buffersize
);
1129 rxhdr
= (struct bcm43xx_rxhdr
*)skb
->data
;
1130 len
= le16_to_cpu(rxhdr
->frame_length
);
1137 len
= le16_to_cpu(rxhdr
->frame_length
);
1138 } while (len
== 0 && i
++ < 5);
1139 if (unlikely(len
== 0)) {
1140 /* recycle the descriptor buffer. */
1141 sync_descbuffer_for_device(ring
, meta
->dmaaddr
,
1142 ring
->rx_buffersize
);
1146 if (unlikely(len
> ring
->rx_buffersize
)) {
1147 /* The data did not fit into one descriptor buffer
1148 * and is split over multiple buffers.
1149 * This should never happen, as we try to allocate buffers
1150 * big enough. So simply ignore this packet.
1156 desc
= bcm43xx_dma_idx2desc(ring
, *slot
, &meta
);
1157 /* recycle the descriptor buffer. */
1158 sync_descbuffer_for_device(ring
, meta
->dmaaddr
,
1159 ring
->rx_buffersize
);
1160 *slot
= next_slot(ring
, *slot
);
1162 tmp
-= ring
->rx_buffersize
;
1166 printkl(KERN_ERR PFX
"DMA RX buffer too small "
1167 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1168 len
, ring
->rx_buffersize
, cnt
);
1171 len
-= IEEE80211_FCS_LEN
;
1173 dmaaddr
= meta
->dmaaddr
;
1174 err
= setup_rx_descbuffer(ring
, desc
, meta
, GFP_ATOMIC
);
1175 if (unlikely(err
)) {
1176 dprintkl(KERN_ERR PFX
"DMA RX: setup_rx_descbuffer() failed\n");
1177 sync_descbuffer_for_device(ring
, dmaaddr
,
1178 ring
->rx_buffersize
);
1182 unmap_descbuffer(ring
, dmaaddr
, ring
->rx_buffersize
, 0);
1183 skb_put(skb
, len
+ ring
->frameoffset
);
1184 skb_pull(skb
, ring
->frameoffset
);
1186 err
= bcm43xx_rx(ring
->bcm
, skb
, rxhdr
);
1188 dev_kfree_skb_irq(skb
);
1196 void bcm43xx_dma_rx(struct bcm43xx_dmaring
*ring
)
1200 int slot
, current_slot
;
1201 #ifdef CONFIG_BCM43XX_DEBUG
1207 status
= bcm43xx_dma_read(ring
, BCM43xx_DMA64_RXSTATUS
);
1208 descptr
= (status
& BCM43xx_DMA64_RXSTATDPTR
);
1209 current_slot
= descptr
/ sizeof(struct bcm43xx_dmadesc64
);
1211 status
= bcm43xx_dma_read(ring
, BCM43xx_DMA32_RXSTATUS
);
1212 descptr
= (status
& BCM43xx_DMA32_RXDPTR
);
1213 current_slot
= descptr
/ sizeof(struct bcm43xx_dmadesc32
);
1215 assert(current_slot
>= 0 && current_slot
< ring
->nr_slots
);
1217 slot
= ring
->current_slot
;
1218 for ( ; slot
!= current_slot
; slot
= next_slot(ring
, slot
)) {
1219 dma_rx(ring
, &slot
);
1220 #ifdef CONFIG_BCM43XX_DEBUG
1221 if (++used_slots
> ring
->max_used_slots
)
1222 ring
->max_used_slots
= used_slots
;
1226 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXINDEX
,
1227 (u32
)(slot
* sizeof(struct bcm43xx_dmadesc64
)));
1229 bcm43xx_dma_write(ring
, BCM43xx_DMA32_RXINDEX
,
1230 (u32
)(slot
* sizeof(struct bcm43xx_dmadesc32
)));
1232 ring
->current_slot
= slot
;
1235 void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring
*ring
)
1238 bcm43xx_power_saving_ctl_bits(ring
->bcm
, -1, 1);
1240 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXCTL
,
1241 bcm43xx_dma_read(ring
, BCM43xx_DMA64_TXCTL
)
1242 | BCM43xx_DMA64_TXSUSPEND
);
1244 bcm43xx_dma_write(ring
, BCM43xx_DMA32_TXCTL
,
1245 bcm43xx_dma_read(ring
, BCM43xx_DMA32_TXCTL
)
1246 | BCM43xx_DMA32_TXSUSPEND
);
1250 void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring
*ring
)
1254 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXCTL
,
1255 bcm43xx_dma_read(ring
, BCM43xx_DMA64_TXCTL
)
1256 & ~BCM43xx_DMA64_TXSUSPEND
);
1258 bcm43xx_dma_write(ring
, BCM43xx_DMA32_TXCTL
,
1259 bcm43xx_dma_read(ring
, BCM43xx_DMA32_TXCTL
)
1260 & ~BCM43xx_DMA32_TXSUSPEND
);
1262 bcm43xx_power_saving_ctl_bits(ring
->bcm
, -1, -1);