3 Broadcom B43legacy wireless driver
7 Copyright (c) 2005 Michael Buesch <m@bues.ch>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
22 Boston, MA 02110-1301, USA.
26 #include "b43legacy.h"
31 #include <linux/delay.h>
32 #include <linux/slab.h>
35 static void tx_start(struct b43legacy_pioqueue
*queue
)
37 b43legacy_pio_write(queue
, B43legacy_PIO_TXCTL
,
38 B43legacy_PIO_TXCTL_INIT
);
41 static void tx_octet(struct b43legacy_pioqueue
*queue
,
44 if (queue
->need_workarounds
) {
45 b43legacy_pio_write(queue
, B43legacy_PIO_TXDATA
, octet
);
46 b43legacy_pio_write(queue
, B43legacy_PIO_TXCTL
,
47 B43legacy_PIO_TXCTL_WRITELO
);
49 b43legacy_pio_write(queue
, B43legacy_PIO_TXCTL
,
50 B43legacy_PIO_TXCTL_WRITELO
);
51 b43legacy_pio_write(queue
, B43legacy_PIO_TXDATA
, octet
);
55 static u16
tx_get_next_word(const u8
*txhdr
,
61 unsigned int i
= *pos
;
70 ret
= le16_to_cpu(*((__le16
*)(source
+ i
)));
76 static void tx_data(struct b43legacy_pioqueue
*queue
,
84 if (queue
->need_workarounds
) {
85 data
= tx_get_next_word(txhdr
, packet
,
86 sizeof(struct b43legacy_txhdr_fw3
), &i
);
87 b43legacy_pio_write(queue
, B43legacy_PIO_TXDATA
, data
);
89 b43legacy_pio_write(queue
, B43legacy_PIO_TXCTL
,
90 B43legacy_PIO_TXCTL_WRITELO
|
91 B43legacy_PIO_TXCTL_WRITEHI
);
92 while (i
< octets
- 1) {
93 data
= tx_get_next_word(txhdr
, packet
,
94 sizeof(struct b43legacy_txhdr_fw3
), &i
);
95 b43legacy_pio_write(queue
, B43legacy_PIO_TXDATA
, data
);
98 tx_octet(queue
, packet
[octets
-
99 sizeof(struct b43legacy_txhdr_fw3
) - 1]);
102 static void tx_complete(struct b43legacy_pioqueue
*queue
,
105 if (queue
->need_workarounds
) {
106 b43legacy_pio_write(queue
, B43legacy_PIO_TXDATA
,
107 skb
->data
[skb
->len
- 1]);
108 b43legacy_pio_write(queue
, B43legacy_PIO_TXCTL
,
109 B43legacy_PIO_TXCTL_WRITELO
|
110 B43legacy_PIO_TXCTL_COMPLETE
);
112 b43legacy_pio_write(queue
, B43legacy_PIO_TXCTL
,
113 B43legacy_PIO_TXCTL_COMPLETE
);
116 static u16
generate_cookie(struct b43legacy_pioqueue
*queue
,
117 struct b43legacy_pio_txpacket
*packet
)
122 /* We use the upper 4 bits for the PIO
123 * controller ID and the lower 12 bits
124 * for the packet index (in the cache).
126 switch (queue
->mmio_base
) {
127 case B43legacy_MMIO_PIO1_BASE
:
129 case B43legacy_MMIO_PIO2_BASE
:
132 case B43legacy_MMIO_PIO3_BASE
:
135 case B43legacy_MMIO_PIO4_BASE
:
139 B43legacy_WARN_ON(1);
141 packetindex
= pio_txpacket_getindex(packet
);
142 B43legacy_WARN_ON(!(((u16
)packetindex
& 0xF000) == 0x0000));
143 cookie
|= (u16
)packetindex
;
149 struct b43legacy_pioqueue
*parse_cookie(struct b43legacy_wldev
*dev
,
151 struct b43legacy_pio_txpacket
**packet
)
153 struct b43legacy_pio
*pio
= &dev
->pio
;
154 struct b43legacy_pioqueue
*queue
= NULL
;
157 switch (cookie
& 0xF000) {
171 B43legacy_WARN_ON(1);
173 packetindex
= (cookie
& 0x0FFF);
174 B43legacy_WARN_ON(!(packetindex
>= 0 && packetindex
175 < B43legacy_PIO_MAXTXPACKETS
));
176 *packet
= &(queue
->tx_packets_cache
[packetindex
]);
182 struct b43legacy_txhdr_fw3 txhdr_fw3
;
185 static int pio_tx_write_fragment(struct b43legacy_pioqueue
*queue
,
187 struct b43legacy_pio_txpacket
*packet
,
190 union txhdr_union txhdr_data
;
195 txhdr
= (u8
*)(&txhdr_data
.txhdr_fw3
);
197 B43legacy_WARN_ON(skb_shinfo(skb
)->nr_frags
!= 0);
198 err
= b43legacy_generate_txhdr(queue
->dev
,
199 txhdr
, skb
->data
, skb
->len
,
200 IEEE80211_SKB_CB(skb
),
201 generate_cookie(queue
, packet
));
206 octets
= skb
->len
+ txhdr_size
;
207 if (queue
->need_workarounds
)
209 tx_data(queue
, txhdr
, (u8
*)skb
->data
, octets
);
210 tx_complete(queue
, skb
);
215 static void free_txpacket(struct b43legacy_pio_txpacket
*packet
,
218 struct b43legacy_pioqueue
*queue
= packet
->queue
;
222 dev_kfree_skb_irq(packet
->skb
);
224 dev_kfree_skb(packet
->skb
);
226 list_move(&packet
->list
, &queue
->txfree
);
230 static int pio_tx_packet(struct b43legacy_pio_txpacket
*packet
)
232 struct b43legacy_pioqueue
*queue
= packet
->queue
;
233 struct sk_buff
*skb
= packet
->skb
;
237 octets
= (u16
)skb
->len
+ sizeof(struct b43legacy_txhdr_fw3
);
238 if (queue
->tx_devq_size
< octets
) {
239 b43legacywarn(queue
->dev
->wl
, "PIO queue too small. "
240 "Dropping packet.\n");
241 /* Drop it silently (return success) */
242 free_txpacket(packet
, 1);
245 B43legacy_WARN_ON(queue
->tx_devq_packets
>
246 B43legacy_PIO_MAXTXDEVQPACKETS
);
247 B43legacy_WARN_ON(queue
->tx_devq_used
> queue
->tx_devq_size
);
248 /* Check if there is sufficient free space on the device
249 * TX queue. If not, return and let the TX tasklet
252 if (queue
->tx_devq_packets
== B43legacy_PIO_MAXTXDEVQPACKETS
)
254 if (queue
->tx_devq_used
+ octets
> queue
->tx_devq_size
)
256 /* Now poke the device. */
257 err
= pio_tx_write_fragment(queue
, skb
, packet
,
258 sizeof(struct b43legacy_txhdr_fw3
));
259 if (unlikely(err
== -ENOKEY
)) {
260 /* Drop this packet, as we don't have the encryption key
261 * anymore and must not transmit it unencrypted. */
262 free_txpacket(packet
, 1);
266 /* Account for the packet size.
267 * (We must not overflow the device TX queue)
269 queue
->tx_devq_packets
++;
270 queue
->tx_devq_used
+= octets
;
272 /* Transmission started, everything ok, move the
273 * packet to the txrunning list.
275 list_move_tail(&packet
->list
, &queue
->txrunning
);
280 static void tx_tasklet(unsigned long d
)
282 struct b43legacy_pioqueue
*queue
= (struct b43legacy_pioqueue
*)d
;
283 struct b43legacy_wldev
*dev
= queue
->dev
;
285 struct b43legacy_pio_txpacket
*packet
, *tmp_packet
;
289 spin_lock_irqsave(&dev
->wl
->irq_lock
, flags
);
290 if (queue
->tx_frozen
)
292 txctl
= b43legacy_pio_read(queue
, B43legacy_PIO_TXCTL
);
293 if (txctl
& B43legacy_PIO_TXCTL_SUSPEND
)
296 list_for_each_entry_safe(packet
, tmp_packet
, &queue
->txqueue
, list
) {
297 /* Try to transmit the packet. This can fail, if
298 * the device queue is full. In case of failure, the
299 * packet is left in the txqueue.
300 * If transmission succeed, the packet is moved to txrunning.
301 * If it is impossible to transmit the packet, it
304 err
= pio_tx_packet(packet
);
309 spin_unlock_irqrestore(&dev
->wl
->irq_lock
, flags
);
312 static void setup_txqueues(struct b43legacy_pioqueue
*queue
)
314 struct b43legacy_pio_txpacket
*packet
;
317 queue
->nr_txfree
= B43legacy_PIO_MAXTXPACKETS
;
318 for (i
= 0; i
< B43legacy_PIO_MAXTXPACKETS
; i
++) {
319 packet
= &(queue
->tx_packets_cache
[i
]);
321 packet
->queue
= queue
;
322 INIT_LIST_HEAD(&packet
->list
);
324 list_add(&packet
->list
, &queue
->txfree
);
329 struct b43legacy_pioqueue
*b43legacy_setup_pioqueue(struct b43legacy_wldev
*dev
,
332 struct b43legacy_pioqueue
*queue
;
336 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
341 queue
->mmio_base
= pio_mmio_base
;
342 queue
->need_workarounds
= (dev
->dev
->id
.revision
< 3);
344 INIT_LIST_HEAD(&queue
->txfree
);
345 INIT_LIST_HEAD(&queue
->txqueue
);
346 INIT_LIST_HEAD(&queue
->txrunning
);
347 tasklet_init(&queue
->txtask
, tx_tasklet
,
348 (unsigned long)queue
);
350 value
= b43legacy_read32(dev
, B43legacy_MMIO_MACCTL
);
351 value
&= ~B43legacy_MACCTL_BE
;
352 b43legacy_write32(dev
, B43legacy_MMIO_MACCTL
, value
);
354 qsize
= b43legacy_read16(dev
, queue
->mmio_base
355 + B43legacy_PIO_TXQBUFSIZE
);
357 b43legacyerr(dev
->wl
, "This card does not support PIO "
358 "operation mode. Please use DMA mode "
359 "(module parameter pio=0).\n");
362 if (qsize
<= B43legacy_PIO_TXQADJUST
) {
363 b43legacyerr(dev
->wl
, "PIO tx device-queue too small (%u)\n",
367 qsize
-= B43legacy_PIO_TXQADJUST
;
368 queue
->tx_devq_size
= qsize
;
370 setup_txqueues(queue
);
381 static void cancel_transfers(struct b43legacy_pioqueue
*queue
)
383 struct b43legacy_pio_txpacket
*packet
, *tmp_packet
;
385 tasklet_disable(&queue
->txtask
);
387 list_for_each_entry_safe(packet
, tmp_packet
, &queue
->txrunning
, list
)
388 free_txpacket(packet
, 0);
389 list_for_each_entry_safe(packet
, tmp_packet
, &queue
->txqueue
, list
)
390 free_txpacket(packet
, 0);
393 static void b43legacy_destroy_pioqueue(struct b43legacy_pioqueue
*queue
)
398 cancel_transfers(queue
);
402 void b43legacy_pio_free(struct b43legacy_wldev
*dev
)
404 struct b43legacy_pio
*pio
;
406 if (!b43legacy_using_pio(dev
))
410 b43legacy_destroy_pioqueue(pio
->queue3
);
412 b43legacy_destroy_pioqueue(pio
->queue2
);
414 b43legacy_destroy_pioqueue(pio
->queue1
);
416 b43legacy_destroy_pioqueue(pio
->queue0
);
420 int b43legacy_pio_init(struct b43legacy_wldev
*dev
)
422 struct b43legacy_pio
*pio
= &dev
->pio
;
423 struct b43legacy_pioqueue
*queue
;
426 queue
= b43legacy_setup_pioqueue(dev
, B43legacy_MMIO_PIO1_BASE
);
431 queue
= b43legacy_setup_pioqueue(dev
, B43legacy_MMIO_PIO2_BASE
);
436 queue
= b43legacy_setup_pioqueue(dev
, B43legacy_MMIO_PIO3_BASE
);
441 queue
= b43legacy_setup_pioqueue(dev
, B43legacy_MMIO_PIO4_BASE
);
446 if (dev
->dev
->id
.revision
< 3)
447 dev
->irq_mask
|= B43legacy_IRQ_PIO_WORKAROUND
;
449 b43legacydbg(dev
->wl
, "PIO initialized\n");
455 b43legacy_destroy_pioqueue(pio
->queue2
);
458 b43legacy_destroy_pioqueue(pio
->queue1
);
461 b43legacy_destroy_pioqueue(pio
->queue0
);
466 int b43legacy_pio_tx(struct b43legacy_wldev
*dev
,
469 struct b43legacy_pioqueue
*queue
= dev
->pio
.queue1
;
470 struct b43legacy_pio_txpacket
*packet
;
472 B43legacy_WARN_ON(queue
->tx_suspended
);
473 B43legacy_WARN_ON(list_empty(&queue
->txfree
));
475 packet
= list_entry(queue
->txfree
.next
, struct b43legacy_pio_txpacket
,
479 list_move_tail(&packet
->list
, &queue
->txqueue
);
481 B43legacy_WARN_ON(queue
->nr_txfree
>= B43legacy_PIO_MAXTXPACKETS
);
483 tasklet_schedule(&queue
->txtask
);
488 void b43legacy_pio_handle_txstatus(struct b43legacy_wldev
*dev
,
489 const struct b43legacy_txstatus
*status
)
491 struct b43legacy_pioqueue
*queue
;
492 struct b43legacy_pio_txpacket
*packet
;
493 struct ieee80211_tx_info
*info
;
496 queue
= parse_cookie(dev
, status
->cookie
, &packet
);
497 B43legacy_WARN_ON(!queue
);
502 queue
->tx_devq_packets
--;
503 queue
->tx_devq_used
-= (packet
->skb
->len
+
504 sizeof(struct b43legacy_txhdr_fw3
));
506 info
= IEEE80211_SKB_CB(packet
->skb
);
508 /* preserve the confiured retry limit before clearing the status
509 * The xmit function has overwritten the rc's value with the actual
510 * retry limit done by the hardware */
511 retry_limit
= info
->status
.rates
[0].count
;
512 ieee80211_tx_info_clear_status(info
);
515 info
->flags
|= IEEE80211_TX_STAT_ACK
;
517 if (status
->rts_count
> dev
->wl
->hw
->conf
.short_frame_max_tx_count
) {
519 * If the short retries (RTS, not data frame) have exceeded
520 * the limit, the hw will not have tried the selected rate,
521 * but will have used the fallback rate instead.
522 * Don't let the rate control count attempts for the selected
523 * rate in this case, otherwise the statistics will be off.
525 info
->status
.rates
[0].count
= 0;
526 info
->status
.rates
[1].count
= status
->frame_count
;
528 if (status
->frame_count
> retry_limit
) {
529 info
->status
.rates
[0].count
= retry_limit
;
530 info
->status
.rates
[1].count
= status
->frame_count
-
534 info
->status
.rates
[0].count
= status
->frame_count
;
535 info
->status
.rates
[1].idx
= -1;
538 ieee80211_tx_status_irqsafe(dev
->wl
->hw
, packet
->skb
);
541 free_txpacket(packet
, 1);
542 /* If there are packets on the txqueue, poke the tasklet
545 if (!list_empty(&queue
->txqueue
))
546 tasklet_schedule(&queue
->txtask
);
549 static void pio_rx_error(struct b43legacy_pioqueue
*queue
,
555 b43legacyerr(queue
->dev
->wl
, "PIO RX error: %s\n", error
);
556 b43legacy_pio_write(queue
, B43legacy_PIO_RXCTL
,
557 B43legacy_PIO_RXCTL_READY
);
559 B43legacy_WARN_ON(queue
->mmio_base
!= B43legacy_MMIO_PIO1_BASE
);
560 for (i
= 0; i
< 15; i
++) {
562 b43legacy_pio_read(queue
, B43legacy_PIO_RXDATA
);
567 void b43legacy_pio_rx(struct b43legacy_pioqueue
*queue
)
569 __le16 preamble
[21] = { 0 };
570 struct b43legacy_rxhdr_fw3
*rxhdr
;
575 int preamble_readwords
;
578 tmp
= b43legacy_pio_read(queue
, B43legacy_PIO_RXCTL
);
579 if (!(tmp
& B43legacy_PIO_RXCTL_DATAAVAILABLE
))
581 b43legacy_pio_write(queue
, B43legacy_PIO_RXCTL
,
582 B43legacy_PIO_RXCTL_DATAAVAILABLE
);
584 for (i
= 0; i
< 10; i
++) {
585 tmp
= b43legacy_pio_read(queue
, B43legacy_PIO_RXCTL
);
586 if (tmp
& B43legacy_PIO_RXCTL_READY
)
590 b43legacydbg(queue
->dev
->wl
, "PIO RX timed out\n");
594 len
= b43legacy_pio_read(queue
, B43legacy_PIO_RXDATA
);
595 if (unlikely(len
> 0x700)) {
596 pio_rx_error(queue
, 0, "len > 0x700");
599 if (unlikely(len
== 0 && queue
->mmio_base
!=
600 B43legacy_MMIO_PIO4_BASE
)) {
601 pio_rx_error(queue
, 0, "len == 0");
604 preamble
[0] = cpu_to_le16(len
);
605 if (queue
->mmio_base
== B43legacy_MMIO_PIO4_BASE
)
606 preamble_readwords
= 14 / sizeof(u16
);
608 preamble_readwords
= 18 / sizeof(u16
);
609 for (i
= 0; i
< preamble_readwords
; i
++) {
610 tmp
= b43legacy_pio_read(queue
, B43legacy_PIO_RXDATA
);
611 preamble
[i
+ 1] = cpu_to_le16(tmp
);
613 rxhdr
= (struct b43legacy_rxhdr_fw3
*)preamble
;
614 macstat
= le16_to_cpu(rxhdr
->mac_status
);
615 if (macstat
& B43legacy_RX_MAC_FCSERR
) {
617 (queue
->mmio_base
== B43legacy_MMIO_PIO1_BASE
),
621 if (queue
->mmio_base
== B43legacy_MMIO_PIO4_BASE
) {
622 /* We received an xmit status. */
623 struct b43legacy_hwtxstatus
*hw
;
625 hw
= (struct b43legacy_hwtxstatus
*)(preamble
+ 1);
626 b43legacy_handle_hwtxstatus(queue
->dev
, hw
);
631 skb
= dev_alloc_skb(len
);
632 if (unlikely(!skb
)) {
633 pio_rx_error(queue
, 1, "OOM");
637 for (i
= 0; i
< len
- 1; i
+= 2) {
638 tmp
= b43legacy_pio_read(queue
, B43legacy_PIO_RXDATA
);
639 *((__le16
*)(skb
->data
+ i
)) = cpu_to_le16(tmp
);
642 tmp
= b43legacy_pio_read(queue
, B43legacy_PIO_RXDATA
);
643 skb
->data
[len
- 1] = (tmp
& 0x00FF);
645 b43legacy_rx(queue
->dev
, skb
, rxhdr
);
648 void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue
*queue
)
650 b43legacy_power_saving_ctl_bits(queue
->dev
, -1, 1);
651 b43legacy_pio_write(queue
, B43legacy_PIO_TXCTL
,
652 b43legacy_pio_read(queue
, B43legacy_PIO_TXCTL
)
653 | B43legacy_PIO_TXCTL_SUSPEND
);
656 void b43legacy_pio_tx_resume(struct b43legacy_pioqueue
*queue
)
658 b43legacy_pio_write(queue
, B43legacy_PIO_TXCTL
,
659 b43legacy_pio_read(queue
, B43legacy_PIO_TXCTL
)
660 & ~B43legacy_PIO_TXCTL_SUSPEND
);
661 b43legacy_power_saving_ctl_bits(queue
->dev
, -1, -1);
662 tasklet_schedule(&queue
->txtask
);
665 void b43legacy_pio_freeze_txqueues(struct b43legacy_wldev
*dev
)
667 struct b43legacy_pio
*pio
;
669 B43legacy_WARN_ON(!b43legacy_using_pio(dev
));
671 pio
->queue0
->tx_frozen
= 1;
672 pio
->queue1
->tx_frozen
= 1;
673 pio
->queue2
->tx_frozen
= 1;
674 pio
->queue3
->tx_frozen
= 1;
677 void b43legacy_pio_thaw_txqueues(struct b43legacy_wldev
*dev
)
679 struct b43legacy_pio
*pio
;
681 B43legacy_WARN_ON(!b43legacy_using_pio(dev
));
683 pio
->queue0
->tx_frozen
= 0;
684 pio
->queue1
->tx_frozen
= 0;
685 pio
->queue2
->tx_frozen
= 0;
686 pio
->queue3
->tx_frozen
= 0;
687 if (!list_empty(&pio
->queue0
->txqueue
))
688 tasklet_schedule(&pio
->queue0
->txtask
);
689 if (!list_empty(&pio
->queue1
->txqueue
))
690 tasklet_schedule(&pio
->queue1
->txtask
);
691 if (!list_empty(&pio
->queue2
->txqueue
))
692 tasklet_schedule(&pio
->queue2
->txtask
);
693 if (!list_empty(&pio
->queue3
->txqueue
))
694 tasklet_schedule(&pio
->queue3
->txtask
);