3 Broadcom BCM43xx wireless driver
5 DMA ringbuffer and descriptor allocation/management
7 Copyright (c) 2005, 2006 Michael Buesch <mbuesch@freenet.de>
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
31 #include "bcm43xx_dma.h"
32 #include "bcm43xx_main.h"
33 #include "bcm43xx_debugfs.h"
34 #include "bcm43xx_power.h"
35 #include "bcm43xx_xmit.h"
37 #include <linux/dma-mapping.h>
38 #include <linux/pci.h>
39 #include <linux/delay.h>
40 #include <linux/skbuff.h>
43 static inline int free_slots(struct bcm43xx_dmaring
*ring
)
45 return (ring
->nr_slots
- ring
->used_slots
);
48 static inline int next_slot(struct bcm43xx_dmaring
*ring
, int slot
)
50 assert(slot
>= -1 && slot
<= ring
->nr_slots
- 1);
51 if (slot
== ring
->nr_slots
- 1)
56 static inline int prev_slot(struct bcm43xx_dmaring
*ring
, int slot
)
58 assert(slot
>= 0 && slot
<= ring
->nr_slots
- 1);
60 return ring
->nr_slots
- 1;
64 /* Request a slot for usage. */
66 int request_slot(struct bcm43xx_dmaring
*ring
)
71 assert(!ring
->suspended
);
72 assert(free_slots(ring
) != 0);
74 slot
= next_slot(ring
, ring
->current_slot
);
75 ring
->current_slot
= slot
;
78 /* Check the number of available slots and suspend TX,
79 * if we are running low on free slots.
81 if (unlikely(free_slots(ring
) < ring
->suspend_mark
)) {
82 netif_stop_queue(ring
->bcm
->net_dev
);
85 #ifdef CONFIG_BCM43XX_DEBUG
86 if (ring
->used_slots
> ring
->max_used_slots
)
87 ring
->max_used_slots
= ring
->used_slots
;
88 #endif /* CONFIG_BCM43XX_DEBUG*/
93 /* Return a slot to the free slots. */
95 void return_slot(struct bcm43xx_dmaring
*ring
, int slot
)
101 /* Check if TX is suspended and check if we have
102 * enough free slots to resume it again.
104 if (unlikely(ring
->suspended
)) {
105 if (free_slots(ring
) >= ring
->resume_mark
) {
107 netif_wake_queue(ring
->bcm
->net_dev
);
112 u16
bcm43xx_dmacontroller_base(int dma64bit
, int controller_idx
)
114 static const u16 map64
[] = {
115 BCM43xx_MMIO_DMA64_BASE0
,
116 BCM43xx_MMIO_DMA64_BASE1
,
117 BCM43xx_MMIO_DMA64_BASE2
,
118 BCM43xx_MMIO_DMA64_BASE3
,
119 BCM43xx_MMIO_DMA64_BASE4
,
120 BCM43xx_MMIO_DMA64_BASE5
,
122 static const u16 map32
[] = {
123 BCM43xx_MMIO_DMA32_BASE0
,
124 BCM43xx_MMIO_DMA32_BASE1
,
125 BCM43xx_MMIO_DMA32_BASE2
,
126 BCM43xx_MMIO_DMA32_BASE3
,
127 BCM43xx_MMIO_DMA32_BASE4
,
128 BCM43xx_MMIO_DMA32_BASE5
,
132 assert(controller_idx
>= 0 &&
133 controller_idx
< ARRAY_SIZE(map64
));
134 return map64
[controller_idx
];
136 assert(controller_idx
>= 0 &&
137 controller_idx
< ARRAY_SIZE(map32
));
138 return map32
[controller_idx
];
142 dma_addr_t
map_descbuffer(struct bcm43xx_dmaring
*ring
,
150 dmaaddr
= dma_map_single(&ring
->bcm
->pci_dev
->dev
,
154 dmaaddr
= dma_map_single(&ring
->bcm
->pci_dev
->dev
,
163 void unmap_descbuffer(struct bcm43xx_dmaring
*ring
,
169 dma_unmap_single(&ring
->bcm
->pci_dev
->dev
,
173 dma_unmap_single(&ring
->bcm
->pci_dev
->dev
,
180 void sync_descbuffer_for_cpu(struct bcm43xx_dmaring
*ring
,
186 dma_sync_single_for_cpu(&ring
->bcm
->pci_dev
->dev
,
187 addr
, len
, DMA_FROM_DEVICE
);
191 void sync_descbuffer_for_device(struct bcm43xx_dmaring
*ring
,
197 dma_sync_single_for_device(&ring
->bcm
->pci_dev
->dev
,
198 addr
, len
, DMA_FROM_DEVICE
);
201 /* Unmap and free a descriptor buffer. */
203 void free_descriptor_buffer(struct bcm43xx_dmaring
*ring
,
204 struct bcm43xx_dmadesc_meta
*meta
,
209 dev_kfree_skb_irq(meta
->skb
);
211 dev_kfree_skb(meta
->skb
);
215 static int alloc_ringmemory(struct bcm43xx_dmaring
*ring
)
217 struct device
*dev
= &(ring
->bcm
->pci_dev
->dev
);
219 ring
->descbase
= dma_alloc_coherent(dev
, BCM43xx_DMA_RINGMEMSIZE
,
220 &(ring
->dmabase
), GFP_KERNEL
);
221 if (!ring
->descbase
) {
222 printk(KERN_ERR PFX
"DMA ringmemory allocation failed\n");
225 memset(ring
->descbase
, 0, BCM43xx_DMA_RINGMEMSIZE
);
230 static void free_ringmemory(struct bcm43xx_dmaring
*ring
)
232 struct device
*dev
= &(ring
->bcm
->pci_dev
->dev
);
234 dma_free_coherent(dev
, BCM43xx_DMA_RINGMEMSIZE
,
235 ring
->descbase
, ring
->dmabase
);
238 /* Reset the RX DMA channel */
239 int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private
*bcm
,
240 u16 mmio_base
, int dma64
)
246 offset
= dma64
? BCM43xx_DMA64_RXCTL
: BCM43xx_DMA32_RXCTL
;
247 bcm43xx_write32(bcm
, mmio_base
+ offset
, 0);
248 for (i
= 0; i
< 1000; i
++) {
249 offset
= dma64
? BCM43xx_DMA64_RXSTATUS
: BCM43xx_DMA32_RXSTATUS
;
250 value
= bcm43xx_read32(bcm
, mmio_base
+ offset
);
252 value
&= BCM43xx_DMA64_RXSTAT
;
253 if (value
== BCM43xx_DMA64_RXSTAT_DISABLED
) {
258 value
&= BCM43xx_DMA32_RXSTATE
;
259 if (value
== BCM43xx_DMA32_RXSTAT_DISABLED
) {
267 printk(KERN_ERR PFX
"Error: Wait on DMA RX status timed out.\n");
274 /* Reset the RX DMA channel */
275 int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private
*bcm
,
276 u16 mmio_base
, int dma64
)
282 for (i
= 0; i
< 1000; i
++) {
283 offset
= dma64
? BCM43xx_DMA64_TXSTATUS
: BCM43xx_DMA32_TXSTATUS
;
284 value
= bcm43xx_read32(bcm
, mmio_base
+ offset
);
286 value
&= BCM43xx_DMA64_TXSTAT
;
287 if (value
== BCM43xx_DMA64_TXSTAT_DISABLED
||
288 value
== BCM43xx_DMA64_TXSTAT_IDLEWAIT
||
289 value
== BCM43xx_DMA64_TXSTAT_STOPPED
)
292 value
&= BCM43xx_DMA32_TXSTATE
;
293 if (value
== BCM43xx_DMA32_TXSTAT_DISABLED
||
294 value
== BCM43xx_DMA32_TXSTAT_IDLEWAIT
||
295 value
== BCM43xx_DMA32_TXSTAT_STOPPED
)
300 offset
= dma64
? BCM43xx_DMA64_TXCTL
: BCM43xx_DMA32_TXCTL
;
301 bcm43xx_write32(bcm
, mmio_base
+ offset
, 0);
302 for (i
= 0; i
< 1000; i
++) {
303 offset
= dma64
? BCM43xx_DMA64_TXSTATUS
: BCM43xx_DMA32_TXSTATUS
;
304 value
= bcm43xx_read32(bcm
, mmio_base
+ offset
);
306 value
&= BCM43xx_DMA64_TXSTAT
;
307 if (value
== BCM43xx_DMA64_TXSTAT_DISABLED
) {
312 value
&= BCM43xx_DMA32_TXSTATE
;
313 if (value
== BCM43xx_DMA32_TXSTAT_DISABLED
) {
321 printk(KERN_ERR PFX
"Error: Wait on DMA TX status timed out.\n");
324 /* ensure the reset is completed. */
330 static void fill_descriptor(struct bcm43xx_dmaring
*ring
,
331 struct bcm43xx_dmadesc_generic
*desc
,
334 int start
, int end
, int irq
)
338 slot
= bcm43xx_dma_desc2idx(ring
, desc
);
339 assert(slot
>= 0 && slot
< ring
->nr_slots
);
342 u32 ctl0
= 0, ctl1
= 0;
346 addrlo
= (u32
)(dmaaddr
& 0xFFFFFFFF);
347 addrhi
= (((u64
)dmaaddr
>> 32) & ~BCM43xx_DMA64_ROUTING
);
348 addrext
= (((u64
)dmaaddr
>> 32) >> BCM43xx_DMA64_ROUTING_SHIFT
);
349 addrhi
|= ring
->routing
;
350 if (slot
== ring
->nr_slots
- 1)
351 ctl0
|= BCM43xx_DMA64_DCTL0_DTABLEEND
;
353 ctl0
|= BCM43xx_DMA64_DCTL0_FRAMESTART
;
355 ctl0
|= BCM43xx_DMA64_DCTL0_FRAMEEND
;
357 ctl0
|= BCM43xx_DMA64_DCTL0_IRQ
;
358 ctl1
|= (bufsize
- ring
->frameoffset
)
359 & BCM43xx_DMA64_DCTL1_BYTECNT
;
360 ctl1
|= (addrext
<< BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT
)
361 & BCM43xx_DMA64_DCTL1_ADDREXT_MASK
;
363 desc
->dma64
.control0
= cpu_to_le32(ctl0
);
364 desc
->dma64
.control1
= cpu_to_le32(ctl1
);
365 desc
->dma64
.address_low
= cpu_to_le32(addrlo
);
366 desc
->dma64
.address_high
= cpu_to_le32(addrhi
);
372 addr
= (u32
)(dmaaddr
& ~BCM43xx_DMA32_ROUTING
);
373 addrext
= (u32
)(dmaaddr
& BCM43xx_DMA32_ROUTING
)
374 >> BCM43xx_DMA32_ROUTING_SHIFT
;
375 addr
|= ring
->routing
;
376 ctl
= (bufsize
- ring
->frameoffset
)
377 & BCM43xx_DMA32_DCTL_BYTECNT
;
378 if (slot
== ring
->nr_slots
- 1)
379 ctl
|= BCM43xx_DMA32_DCTL_DTABLEEND
;
381 ctl
|= BCM43xx_DMA32_DCTL_FRAMESTART
;
383 ctl
|= BCM43xx_DMA32_DCTL_FRAMEEND
;
385 ctl
|= BCM43xx_DMA32_DCTL_IRQ
;
386 ctl
|= (addrext
<< BCM43xx_DMA32_DCTL_ADDREXT_SHIFT
)
387 & BCM43xx_DMA32_DCTL_ADDREXT_MASK
;
389 desc
->dma32
.control
= cpu_to_le32(ctl
);
390 desc
->dma32
.address
= cpu_to_le32(addr
);
394 static int setup_rx_descbuffer(struct bcm43xx_dmaring
*ring
,
395 struct bcm43xx_dmadesc_generic
*desc
,
396 struct bcm43xx_dmadesc_meta
*meta
,
399 struct bcm43xx_rxhdr
*rxhdr
;
400 struct bcm43xx_hwxmitstatus
*xmitstat
;
406 skb
= __dev_alloc_skb(ring
->rx_buffersize
, gfp_flags
);
409 dmaaddr
= map_descbuffer(ring
, skb
->data
, ring
->rx_buffersize
, 0);
411 meta
->dmaaddr
= dmaaddr
;
412 skb
->dev
= ring
->bcm
->net_dev
;
414 fill_descriptor(ring
, desc
, dmaaddr
,
415 ring
->rx_buffersize
, 0, 0, 0);
417 rxhdr
= (struct bcm43xx_rxhdr
*)(skb
->data
);
418 rxhdr
->frame_length
= 0;
420 xmitstat
= (struct bcm43xx_hwxmitstatus
*)(skb
->data
);
421 xmitstat
->cookie
= 0;
426 /* Allocate the initial descbuffers.
427 * This is used for an RX ring only.
429 static int alloc_initial_descbuffers(struct bcm43xx_dmaring
*ring
)
431 int i
, err
= -ENOMEM
;
432 struct bcm43xx_dmadesc_generic
*desc
;
433 struct bcm43xx_dmadesc_meta
*meta
;
435 for (i
= 0; i
< ring
->nr_slots
; i
++) {
436 desc
= bcm43xx_dma_idx2desc(ring
, i
, &meta
);
438 err
= setup_rx_descbuffer(ring
, desc
, meta
, GFP_KERNEL
);
443 ring
->used_slots
= ring
->nr_slots
;
449 for (i
--; i
>= 0; i
--) {
450 desc
= bcm43xx_dma_idx2desc(ring
, i
, &meta
);
452 unmap_descbuffer(ring
, meta
->dmaaddr
, ring
->rx_buffersize
, 0);
453 dev_kfree_skb(meta
->skb
);
458 /* Do initial setup of the DMA controller.
459 * Reset the controller, write the ring busaddress
460 * and switch the "enable" bit on.
462 static int dmacontroller_setup(struct bcm43xx_dmaring
*ring
)
470 u64 ringbase
= (u64
)(ring
->dmabase
);
472 addrext
= ((ringbase
>> 32) >> BCM43xx_DMA64_ROUTING_SHIFT
);
473 value
= BCM43xx_DMA64_TXENABLE
;
474 value
|= (addrext
<< BCM43xx_DMA64_TXADDREXT_SHIFT
)
475 & BCM43xx_DMA64_TXADDREXT_MASK
;
476 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXCTL
, value
);
477 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXRINGLO
,
478 (ringbase
& 0xFFFFFFFF));
479 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXRINGHI
,
480 ((ringbase
>> 32) & ~BCM43xx_DMA64_ROUTING
)
483 u32 ringbase
= (u32
)(ring
->dmabase
);
485 addrext
= (ringbase
>> BCM43xx_DMA32_ROUTING_SHIFT
);
486 value
= BCM43xx_DMA32_TXENABLE
;
487 value
|= (addrext
<< BCM43xx_DMA32_TXADDREXT_SHIFT
)
488 & BCM43xx_DMA32_TXADDREXT_MASK
;
489 bcm43xx_dma_write(ring
, BCM43xx_DMA32_TXCTL
, value
);
490 bcm43xx_dma_write(ring
, BCM43xx_DMA32_TXRING
,
491 (ringbase
& ~BCM43xx_DMA32_ROUTING
)
495 err
= alloc_initial_descbuffers(ring
);
499 u64 ringbase
= (u64
)(ring
->dmabase
);
501 addrext
= ((ringbase
>> 32) >> BCM43xx_DMA64_ROUTING_SHIFT
);
502 value
= (ring
->frameoffset
<< BCM43xx_DMA64_RXFROFF_SHIFT
);
503 value
|= BCM43xx_DMA64_RXENABLE
;
504 value
|= (addrext
<< BCM43xx_DMA64_RXADDREXT_SHIFT
)
505 & BCM43xx_DMA64_RXADDREXT_MASK
;
506 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXCTL
, value
);
507 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXRINGLO
,
508 (ringbase
& 0xFFFFFFFF));
509 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXRINGHI
,
510 ((ringbase
>> 32) & ~BCM43xx_DMA64_ROUTING
)
512 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXINDEX
, 200);
514 u32 ringbase
= (u32
)(ring
->dmabase
);
516 addrext
= (ringbase
>> BCM43xx_DMA32_ROUTING_SHIFT
);
517 value
= (ring
->frameoffset
<< BCM43xx_DMA32_RXFROFF_SHIFT
);
518 value
|= BCM43xx_DMA32_RXENABLE
;
519 value
|= (addrext
<< BCM43xx_DMA32_RXADDREXT_SHIFT
)
520 & BCM43xx_DMA32_RXADDREXT_MASK
;
521 bcm43xx_dma_write(ring
, BCM43xx_DMA32_RXCTL
, value
);
522 bcm43xx_dma_write(ring
, BCM43xx_DMA32_RXRING
,
523 (ringbase
& ~BCM43xx_DMA32_ROUTING
)
525 bcm43xx_dma_write(ring
, BCM43xx_DMA32_RXINDEX
, 200);
533 /* Shutdown the DMA controller. */
534 static void dmacontroller_cleanup(struct bcm43xx_dmaring
*ring
)
537 bcm43xx_dmacontroller_tx_reset(ring
->bcm
, ring
->mmio_base
, ring
->dma64
);
539 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXRINGLO
, 0);
540 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXRINGHI
, 0);
542 bcm43xx_dma_write(ring
, BCM43xx_DMA32_TXRING
, 0);
544 bcm43xx_dmacontroller_rx_reset(ring
->bcm
, ring
->mmio_base
, ring
->dma64
);
546 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXRINGLO
, 0);
547 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXRINGHI
, 0);
549 bcm43xx_dma_write(ring
, BCM43xx_DMA32_RXRING
, 0);
553 static void free_all_descbuffers(struct bcm43xx_dmaring
*ring
)
555 struct bcm43xx_dmadesc_generic
*desc
;
556 struct bcm43xx_dmadesc_meta
*meta
;
559 if (!ring
->used_slots
)
561 for (i
= 0; i
< ring
->nr_slots
; i
++) {
562 desc
= bcm43xx_dma_idx2desc(ring
, i
, &meta
);
569 unmap_descbuffer(ring
, meta
->dmaaddr
,
572 unmap_descbuffer(ring
, meta
->dmaaddr
,
573 ring
->rx_buffersize
, 0);
575 free_descriptor_buffer(ring
, meta
, 0);
579 /* Main initialization function. */
581 struct bcm43xx_dmaring
* bcm43xx_setup_dmaring(struct bcm43xx_private
*bcm
,
582 int controller_index
,
586 struct bcm43xx_dmaring
*ring
;
590 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
594 nr_slots
= BCM43xx_RXRING_SLOTS
;
596 nr_slots
= BCM43xx_TXRING_SLOTS
;
598 ring
->meta
= kcalloc(nr_slots
, sizeof(struct bcm43xx_dmadesc_meta
),
603 ring
->routing
= BCM43xx_DMA32_CLIENTTRANS
;
605 ring
->routing
= BCM43xx_DMA64_CLIENTTRANS
;
606 #ifdef CONFIG_BCM947XX
607 if (bcm
->pci_dev
->bus
->number
== 0)
608 ring
->routing
= dma64
? BCM43xx_DMA64_NOTRANS
: BCM43xx_DMA32_NOTRANS
;
612 ring
->nr_slots
= nr_slots
;
613 ring
->suspend_mark
= ring
->nr_slots
* BCM43xx_TXSUSPEND_PERCENT
/ 100;
614 ring
->resume_mark
= ring
->nr_slots
* BCM43xx_TXRESUME_PERCENT
/ 100;
615 assert(ring
->suspend_mark
< ring
->resume_mark
);
616 ring
->mmio_base
= bcm43xx_dmacontroller_base(dma64
, controller_index
);
617 ring
->index
= controller_index
;
618 ring
->dma64
= !!dma64
;
621 ring
->current_slot
= -1;
623 if (ring
->index
== 0) {
624 ring
->rx_buffersize
= BCM43xx_DMA0_RX_BUFFERSIZE
;
625 ring
->frameoffset
= BCM43xx_DMA0_RX_FRAMEOFFSET
;
626 } else if (ring
->index
== 3) {
627 ring
->rx_buffersize
= BCM43xx_DMA3_RX_BUFFERSIZE
;
628 ring
->frameoffset
= BCM43xx_DMA3_RX_FRAMEOFFSET
;
633 err
= alloc_ringmemory(ring
);
636 err
= dmacontroller_setup(ring
);
638 goto err_free_ringmemory
;
644 free_ringmemory(ring
);
653 /* Main cleanup function. */
654 static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring
*ring
)
659 dprintk(KERN_INFO PFX
"DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
660 (ring
->dma64
) ? "64" : "32",
662 (ring
->tx
) ? "TX" : "RX",
663 ring
->max_used_slots
, ring
->nr_slots
);
664 /* Device IRQs are disabled prior entering this function,
665 * so no need to take care of concurrency with rx handler stuff.
667 dmacontroller_cleanup(ring
);
668 free_all_descbuffers(ring
);
669 free_ringmemory(ring
);
675 void bcm43xx_dma_free(struct bcm43xx_private
*bcm
)
677 struct bcm43xx_dma
*dma
;
679 if (bcm43xx_using_pio(bcm
))
681 dma
= bcm43xx_current_dma(bcm
);
683 bcm43xx_destroy_dmaring(dma
->rx_ring3
);
684 dma
->rx_ring3
= NULL
;
685 bcm43xx_destroy_dmaring(dma
->rx_ring0
);
686 dma
->rx_ring0
= NULL
;
688 bcm43xx_destroy_dmaring(dma
->tx_ring5
);
689 dma
->tx_ring5
= NULL
;
690 bcm43xx_destroy_dmaring(dma
->tx_ring4
);
691 dma
->tx_ring4
= NULL
;
692 bcm43xx_destroy_dmaring(dma
->tx_ring3
);
693 dma
->tx_ring3
= NULL
;
694 bcm43xx_destroy_dmaring(dma
->tx_ring2
);
695 dma
->tx_ring2
= NULL
;
696 bcm43xx_destroy_dmaring(dma
->tx_ring1
);
697 dma
->tx_ring1
= NULL
;
698 bcm43xx_destroy_dmaring(dma
->tx_ring0
);
699 dma
->tx_ring0
= NULL
;
702 int bcm43xx_dma_init(struct bcm43xx_private
*bcm
)
704 struct bcm43xx_dma
*dma
= bcm43xx_current_dma(bcm
);
705 struct bcm43xx_dmaring
*ring
;
708 u64 mask
= bcm43xx_get_supported_dma_mask(bcm
);
711 if (mask
== DMA_64BIT_MASK
) {
714 } else if (mask
== DMA_32BIT_MASK
)
718 err
= pci_set_dma_mask(bcm
->pci_dev
, mask
);
719 err
|= pci_set_consistent_dma_mask(bcm
->pci_dev
, mask
);
721 #ifdef CONFIG_BCM43XX_PIO
722 printk(KERN_WARNING PFX
"DMA not supported on this device."
723 " Falling back to PIO.\n");
724 bcm
->__using_pio
= 1;
727 printk(KERN_ERR PFX
"FATAL: DMA not supported and PIO not configured. "
728 "Please recompile the driver with PIO support.\n");
730 #endif /* CONFIG_BCM43XX_PIO */
733 /* setup TX DMA channels. */
734 ring
= bcm43xx_setup_dmaring(bcm
, 0, 1, dma64
);
737 dma
->tx_ring0
= ring
;
739 ring
= bcm43xx_setup_dmaring(bcm
, 1, 1, dma64
);
741 goto err_destroy_tx0
;
742 dma
->tx_ring1
= ring
;
744 ring
= bcm43xx_setup_dmaring(bcm
, 2, 1, dma64
);
746 goto err_destroy_tx1
;
747 dma
->tx_ring2
= ring
;
749 ring
= bcm43xx_setup_dmaring(bcm
, 3, 1, dma64
);
751 goto err_destroy_tx2
;
752 dma
->tx_ring3
= ring
;
754 ring
= bcm43xx_setup_dmaring(bcm
, 4, 1, dma64
);
756 goto err_destroy_tx3
;
757 dma
->tx_ring4
= ring
;
759 ring
= bcm43xx_setup_dmaring(bcm
, 5, 1, dma64
);
761 goto err_destroy_tx4
;
762 dma
->tx_ring5
= ring
;
764 /* setup RX DMA channels. */
765 ring
= bcm43xx_setup_dmaring(bcm
, 0, 0, dma64
);
767 goto err_destroy_tx5
;
768 dma
->rx_ring0
= ring
;
770 if (bcm
->current_core
->rev
< 5) {
771 ring
= bcm43xx_setup_dmaring(bcm
, 3, 0, dma64
);
773 goto err_destroy_rx0
;
774 dma
->rx_ring3
= ring
;
777 dprintk(KERN_INFO PFX
"%d-bit DMA initialized\n", nobits
);
783 bcm43xx_destroy_dmaring(dma
->rx_ring0
);
784 dma
->rx_ring0
= NULL
;
786 bcm43xx_destroy_dmaring(dma
->tx_ring5
);
787 dma
->tx_ring5
= NULL
;
789 bcm43xx_destroy_dmaring(dma
->tx_ring4
);
790 dma
->tx_ring4
= NULL
;
792 bcm43xx_destroy_dmaring(dma
->tx_ring3
);
793 dma
->tx_ring3
= NULL
;
795 bcm43xx_destroy_dmaring(dma
->tx_ring2
);
796 dma
->tx_ring2
= NULL
;
798 bcm43xx_destroy_dmaring(dma
->tx_ring1
);
799 dma
->tx_ring1
= NULL
;
801 bcm43xx_destroy_dmaring(dma
->tx_ring0
);
802 dma
->tx_ring0
= NULL
;
806 /* Generate a cookie for the TX header. */
807 static u16
generate_cookie(struct bcm43xx_dmaring
*ring
,
812 /* Use the upper 4 bits of the cookie as
813 * DMA controller ID and store the slot number
814 * in the lower 12 bits.
815 * Note that the cookie must never be 0, as this
816 * is a special value used in RX path.
818 switch (ring
->index
) {
838 assert(((u16
)slot
& 0xF000) == 0x0000);
844 /* Inspect a cookie and find out to which controller/slot it belongs. */
846 struct bcm43xx_dmaring
* parse_cookie(struct bcm43xx_private
*bcm
,
847 u16 cookie
, int *slot
)
849 struct bcm43xx_dma
*dma
= bcm43xx_current_dma(bcm
);
850 struct bcm43xx_dmaring
*ring
= NULL
;
852 switch (cookie
& 0xF000) {
854 ring
= dma
->tx_ring0
;
857 ring
= dma
->tx_ring1
;
860 ring
= dma
->tx_ring2
;
863 ring
= dma
->tx_ring3
;
866 ring
= dma
->tx_ring4
;
869 ring
= dma
->tx_ring5
;
874 *slot
= (cookie
& 0x0FFF);
875 assert(*slot
>= 0 && *slot
< ring
->nr_slots
);
880 static void dmacontroller_poke_tx(struct bcm43xx_dmaring
*ring
,
886 /* Everything is ready to start. Buffers are DMA mapped and
887 * associated with slots.
888 * "slot" is the last slot of the new frame we want to transmit.
889 * Close your seat belts now, please.
892 slot
= next_slot(ring
, slot
);
893 offset
= (ring
->dma64
) ? BCM43xx_DMA64_TXINDEX
: BCM43xx_DMA32_TXINDEX
;
894 descsize
= (ring
->dma64
) ? sizeof(struct bcm43xx_dmadesc64
)
895 : sizeof(struct bcm43xx_dmadesc32
);
896 bcm43xx_dma_write(ring
, offset
,
897 (u32
)(slot
* descsize
));
900 static void dma_tx_fragment(struct bcm43xx_dmaring
*ring
,
905 struct bcm43xx_dmadesc_generic
*desc
;
906 struct bcm43xx_dmadesc_meta
*meta
;
909 assert(skb_shinfo(skb
)->nr_frags
== 0);
911 slot
= request_slot(ring
);
912 desc
= bcm43xx_dma_idx2desc(ring
, slot
, &meta
);
914 /* Add a device specific TX header. */
915 assert(skb_headroom(skb
) >= sizeof(struct bcm43xx_txhdr
));
916 /* Reserve enough headroom for the device tx header. */
917 __skb_push(skb
, sizeof(struct bcm43xx_txhdr
));
918 /* Now calculate and add the tx header.
919 * The tx header includes the PLCP header.
921 bcm43xx_generate_txhdr(ring
->bcm
,
922 (struct bcm43xx_txhdr
*)skb
->data
,
923 skb
->data
+ sizeof(struct bcm43xx_txhdr
),
924 skb
->len
- sizeof(struct bcm43xx_txhdr
),
926 generate_cookie(ring
, slot
));
929 dmaaddr
= map_descbuffer(ring
, skb
->data
, skb
->len
, 1);
930 meta
->dmaaddr
= dmaaddr
;
932 fill_descriptor(ring
, desc
, dmaaddr
,
935 /* Now transfer the whole frame. */
936 dmacontroller_poke_tx(ring
, slot
);
939 int bcm43xx_dma_tx(struct bcm43xx_private
*bcm
,
940 struct ieee80211_txb
*txb
)
942 /* We just received a packet from the kernel network subsystem.
943 * Add headers and DMA map the memory. Poke
944 * the device to send the stuff.
945 * Note that this is called from atomic context.
947 struct bcm43xx_dmaring
*ring
= bcm43xx_current_dma(bcm
)->tx_ring1
;
952 if (unlikely(free_slots(ring
) < txb
->nr_frags
)) {
953 /* The queue should be stopped,
954 * if we are low on free slots.
955 * If this ever triggers, we have to lower the suspend_mark.
957 dprintkl(KERN_ERR PFX
"Out of DMA descriptor slots!\n");
961 for (i
= 0; i
< txb
->nr_frags
; i
++) {
962 skb
= txb
->fragments
[i
];
963 /* Take skb from ieee80211_txb_free */
964 txb
->fragments
[i
] = NULL
;
965 dma_tx_fragment(ring
, skb
, i
);
967 ieee80211_txb_free(txb
);
972 void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private
*bcm
,
973 struct bcm43xx_xmitstatus
*status
)
975 struct bcm43xx_dmaring
*ring
;
976 struct bcm43xx_dmadesc_generic
*desc
;
977 struct bcm43xx_dmadesc_meta
*meta
;
978 int is_last_fragment
;
982 ring
= parse_cookie(bcm
, status
->cookie
, &slot
);
986 assert(slot
>= 0 && slot
< ring
->nr_slots
);
987 desc
= bcm43xx_dma_idx2desc(ring
, slot
, &meta
);
990 tmp
= le32_to_cpu(desc
->dma64
.control0
);
991 is_last_fragment
= !!(tmp
& BCM43xx_DMA64_DCTL0_FRAMEEND
);
993 tmp
= le32_to_cpu(desc
->dma32
.control
);
994 is_last_fragment
= !!(tmp
& BCM43xx_DMA32_DCTL_FRAMEEND
);
996 unmap_descbuffer(ring
, meta
->dmaaddr
, meta
->skb
->len
, 1);
997 free_descriptor_buffer(ring
, meta
, 1);
998 /* Everything belonging to the slot is unmapped
999 * and freed, so we can return it.
1001 return_slot(ring
, slot
);
1003 if (is_last_fragment
)
1005 slot
= next_slot(ring
, slot
);
1007 bcm
->stats
.last_tx
= jiffies
;
1010 static void dma_rx(struct bcm43xx_dmaring
*ring
,
1013 struct bcm43xx_dmadesc_generic
*desc
;
1014 struct bcm43xx_dmadesc_meta
*meta
;
1015 struct bcm43xx_rxhdr
*rxhdr
;
1016 struct sk_buff
*skb
;
1021 desc
= bcm43xx_dma_idx2desc(ring
, *slot
, &meta
);
1023 sync_descbuffer_for_cpu(ring
, meta
->dmaaddr
, ring
->rx_buffersize
);
1026 if (ring
->index
== 3) {
1027 /* We received an xmit status. */
1028 struct bcm43xx_hwxmitstatus
*hw
= (struct bcm43xx_hwxmitstatus
*)skb
->data
;
1029 struct bcm43xx_xmitstatus stat
;
1032 stat
.cookie
= le16_to_cpu(hw
->cookie
);
1033 while (stat
.cookie
== 0) {
1034 if (unlikely(++i
>= 10000)) {
1040 stat
.cookie
= le16_to_cpu(hw
->cookie
);
1042 stat
.flags
= hw
->flags
;
1043 stat
.cnt1
= hw
->cnt1
;
1044 stat
.cnt2
= hw
->cnt2
;
1045 stat
.seq
= le16_to_cpu(hw
->seq
);
1046 stat
.unknown
= le16_to_cpu(hw
->unknown
);
1048 bcm43xx_debugfs_log_txstat(ring
->bcm
, &stat
);
1049 bcm43xx_dma_handle_xmitstatus(ring
->bcm
, &stat
);
1050 /* recycle the descriptor buffer. */
1051 sync_descbuffer_for_device(ring
, meta
->dmaaddr
, ring
->rx_buffersize
);
1055 rxhdr
= (struct bcm43xx_rxhdr
*)skb
->data
;
1056 len
= le16_to_cpu(rxhdr
->frame_length
);
1063 len
= le16_to_cpu(rxhdr
->frame_length
);
1064 } while (len
== 0 && i
++ < 5);
1065 if (unlikely(len
== 0)) {
1066 /* recycle the descriptor buffer. */
1067 sync_descbuffer_for_device(ring
, meta
->dmaaddr
,
1068 ring
->rx_buffersize
);
1072 if (unlikely(len
> ring
->rx_buffersize
)) {
1073 /* The data did not fit into one descriptor buffer
1074 * and is split over multiple buffers.
1075 * This should never happen, as we try to allocate buffers
1076 * big enough. So simply ignore this packet.
1082 desc
= bcm43xx_dma_idx2desc(ring
, *slot
, &meta
);
1083 /* recycle the descriptor buffer. */
1084 sync_descbuffer_for_device(ring
, meta
->dmaaddr
,
1085 ring
->rx_buffersize
);
1086 *slot
= next_slot(ring
, *slot
);
1088 tmp
-= ring
->rx_buffersize
;
1092 printkl(KERN_ERR PFX
"DMA RX buffer too small "
1093 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1094 len
, ring
->rx_buffersize
, cnt
);
1097 len
-= IEEE80211_FCS_LEN
;
1099 dmaaddr
= meta
->dmaaddr
;
1100 err
= setup_rx_descbuffer(ring
, desc
, meta
, GFP_ATOMIC
);
1101 if (unlikely(err
)) {
1102 dprintkl(KERN_ERR PFX
"DMA RX: setup_rx_descbuffer() failed\n");
1103 sync_descbuffer_for_device(ring
, dmaaddr
,
1104 ring
->rx_buffersize
);
1108 unmap_descbuffer(ring
, dmaaddr
, ring
->rx_buffersize
, 0);
1109 skb_put(skb
, len
+ ring
->frameoffset
);
1110 skb_pull(skb
, ring
->frameoffset
);
1112 err
= bcm43xx_rx(ring
->bcm
, skb
, rxhdr
);
1114 dev_kfree_skb_irq(skb
);
1122 void bcm43xx_dma_rx(struct bcm43xx_dmaring
*ring
)
1126 int slot
, current_slot
;
1127 #ifdef CONFIG_BCM43XX_DEBUG
1133 status
= bcm43xx_dma_read(ring
, BCM43xx_DMA64_RXSTATUS
);
1134 descptr
= (status
& BCM43xx_DMA64_RXSTATDPTR
);
1135 current_slot
= descptr
/ sizeof(struct bcm43xx_dmadesc64
);
1137 status
= bcm43xx_dma_read(ring
, BCM43xx_DMA32_RXSTATUS
);
1138 descptr
= (status
& BCM43xx_DMA32_RXDPTR
);
1139 current_slot
= descptr
/ sizeof(struct bcm43xx_dmadesc32
);
1141 assert(current_slot
>= 0 && current_slot
< ring
->nr_slots
);
1143 slot
= ring
->current_slot
;
1144 for ( ; slot
!= current_slot
; slot
= next_slot(ring
, slot
)) {
1145 dma_rx(ring
, &slot
);
1146 #ifdef CONFIG_BCM43XX_DEBUG
1147 if (++used_slots
> ring
->max_used_slots
)
1148 ring
->max_used_slots
= used_slots
;
1152 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXINDEX
,
1153 (u32
)(slot
* sizeof(struct bcm43xx_dmadesc64
)));
1155 bcm43xx_dma_write(ring
, BCM43xx_DMA32_RXINDEX
,
1156 (u32
)(slot
* sizeof(struct bcm43xx_dmadesc32
)));
1158 ring
->current_slot
= slot
;
1161 void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring
*ring
)
1164 bcm43xx_power_saving_ctl_bits(ring
->bcm
, -1, 1);
1166 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXCTL
,
1167 bcm43xx_dma_read(ring
, BCM43xx_DMA64_TXCTL
)
1168 | BCM43xx_DMA64_TXSUSPEND
);
1170 bcm43xx_dma_write(ring
, BCM43xx_DMA32_TXCTL
,
1171 bcm43xx_dma_read(ring
, BCM43xx_DMA32_TXCTL
)
1172 | BCM43xx_DMA32_TXSUSPEND
);
1176 void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring
*ring
)
1180 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXCTL
,
1181 bcm43xx_dma_read(ring
, BCM43xx_DMA64_TXCTL
)
1182 & ~BCM43xx_DMA64_TXSUSPEND
);
1184 bcm43xx_dma_write(ring
, BCM43xx_DMA32_TXCTL
,
1185 bcm43xx_dma_read(ring
, BCM43xx_DMA32_TXCTL
)
1186 & ~BCM43xx_DMA32_TXSUSPEND
);
1188 bcm43xx_power_saving_ctl_bits(ring
->bcm
, -1, -1);