1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 * Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
4 * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
5 * Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
6 * Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
7 * Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
8 * Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
9 * Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
10 * <http://rt2x00.serialmonkey.com>
14 * Abstract: rt2800 MMIO device routines.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/export.h>
22 #include "rt2x00mmio.h"
24 #include "rt2800lib.h"
25 #include "rt2800mmio.h"
27 unsigned int rt2800mmio_get_dma_done(struct data_queue
*queue
)
29 struct rt2x00_dev
*rt2x00dev
= queue
->rt2x00dev
;
30 struct queue_entry
*entry
;
39 idx
= rt2x00mmio_register_read(rt2x00dev
, TX_DTX_IDX(qid
));
42 idx
= rt2x00mmio_register_read(rt2x00dev
, TX_DTX_IDX(5));
45 entry
= rt2x00queue_get_entry(queue
, Q_INDEX_DMA_DONE
);
46 idx
= entry
->entry_idx
;
56 EXPORT_SYMBOL_GPL(rt2800mmio_get_dma_done
);
59 * TX descriptor initialization
61 __le32
*rt2800mmio_get_txwi(struct queue_entry
*entry
)
63 return (__le32
*) entry
->skb
->data
;
65 EXPORT_SYMBOL_GPL(rt2800mmio_get_txwi
);
67 void rt2800mmio_write_tx_desc(struct queue_entry
*entry
,
68 struct txentry_desc
*txdesc
)
70 struct skb_frame_desc
*skbdesc
= get_skb_frame_desc(entry
->skb
);
71 struct queue_entry_priv_mmio
*entry_priv
= entry
->priv_data
;
72 __le32
*txd
= entry_priv
->desc
;
74 const unsigned int txwi_size
= entry
->queue
->winfo_size
;
77 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
78 * must contains a TXWI structure + 802.11 header + padding + 802.11
79 * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
80 * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
81 * data. It means that LAST_SEC0 is always 0.
85 * Initialize TX descriptor
88 rt2x00_set_field32(&word
, TXD_W0_SD_PTR0
, skbdesc
->skb_dma
);
89 rt2x00_desc_write(txd
, 0, word
);
92 rt2x00_set_field32(&word
, TXD_W1_SD_LEN1
, entry
->skb
->len
);
93 rt2x00_set_field32(&word
, TXD_W1_LAST_SEC1
,
94 !test_bit(ENTRY_TXD_MORE_FRAG
, &txdesc
->flags
));
95 rt2x00_set_field32(&word
, TXD_W1_BURST
,
96 test_bit(ENTRY_TXD_BURST
, &txdesc
->flags
));
97 rt2x00_set_field32(&word
, TXD_W1_SD_LEN0
, txwi_size
);
98 rt2x00_set_field32(&word
, TXD_W1_LAST_SEC0
, 0);
99 rt2x00_set_field32(&word
, TXD_W1_DMA_DONE
, 0);
100 rt2x00_desc_write(txd
, 1, word
);
103 rt2x00_set_field32(&word
, TXD_W2_SD_PTR1
,
104 skbdesc
->skb_dma
+ txwi_size
);
105 rt2x00_desc_write(txd
, 2, word
);
108 rt2x00_set_field32(&word
, TXD_W3_WIV
,
109 !test_bit(ENTRY_TXD_ENCRYPT_IV
, &txdesc
->flags
));
110 rt2x00_set_field32(&word
, TXD_W3_QSEL
, 2);
111 rt2x00_desc_write(txd
, 3, word
);
114 * Register descriptor details in skb frame descriptor.
117 skbdesc
->desc_len
= TXD_DESC_SIZE
;
119 EXPORT_SYMBOL_GPL(rt2800mmio_write_tx_desc
);
122 * RX control handlers
124 void rt2800mmio_fill_rxdone(struct queue_entry
*entry
,
125 struct rxdone_entry_desc
*rxdesc
)
127 struct queue_entry_priv_mmio
*entry_priv
= entry
->priv_data
;
128 __le32
*rxd
= entry_priv
->desc
;
131 word
= rt2x00_desc_read(rxd
, 3);
133 if (rt2x00_get_field32(word
, RXD_W3_CRC_ERROR
))
134 rxdesc
->flags
|= RX_FLAG_FAILED_FCS_CRC
;
137 * Unfortunately we don't know the cipher type used during
138 * decryption. This prevents us from correct providing
139 * correct statistics through debugfs.
141 rxdesc
->cipher_status
= rt2x00_get_field32(word
, RXD_W3_CIPHER_ERROR
);
143 if (rt2x00_get_field32(word
, RXD_W3_DECRYPTED
)) {
145 * Hardware has stripped IV/EIV data from 802.11 frame during
146 * decryption. Unfortunately the descriptor doesn't contain
147 * any fields with the EIV/IV data either, so they can't
148 * be restored by rt2x00lib.
150 rxdesc
->flags
|= RX_FLAG_IV_STRIPPED
;
153 * The hardware has already checked the Michael Mic and has
154 * stripped it from the frame. Signal this to mac80211.
156 rxdesc
->flags
|= RX_FLAG_MMIC_STRIPPED
;
158 if (rxdesc
->cipher_status
== RX_CRYPTO_SUCCESS
) {
159 rxdesc
->flags
|= RX_FLAG_DECRYPTED
;
160 } else if (rxdesc
->cipher_status
== RX_CRYPTO_FAIL_MIC
) {
162 * In order to check the Michael Mic, the packet must have
163 * been decrypted. Mac80211 doesnt check the MMIC failure
164 * flag to initiate MMIC countermeasures if the decoded flag
167 rxdesc
->flags
|= RX_FLAG_DECRYPTED
;
169 rxdesc
->flags
|= RX_FLAG_MMIC_ERROR
;
173 if (rt2x00_get_field32(word
, RXD_W3_MY_BSS
))
174 rxdesc
->dev_flags
|= RXDONE_MY_BSS
;
176 if (rt2x00_get_field32(word
, RXD_W3_L2PAD
))
177 rxdesc
->dev_flags
|= RXDONE_L2PAD
;
180 * Process the RXWI structure that is at the start of the buffer.
182 rt2800_process_rxwi(entry
, rxdesc
);
184 EXPORT_SYMBOL_GPL(rt2800mmio_fill_rxdone
);
187 * Interrupt functions.
189 static void rt2800mmio_wakeup(struct rt2x00_dev
*rt2x00dev
)
191 struct ieee80211_conf conf
= { .flags
= 0 };
192 struct rt2x00lib_conf libconf
= { .conf
= &conf
};
194 rt2800_config(rt2x00dev
, &libconf
, IEEE80211_CONF_CHANGE_PS
);
197 static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev
*rt2x00dev
,
198 struct rt2x00_field32 irq_field
)
203 * Enable a single interrupt. The interrupt mask register
204 * access needs locking.
206 spin_lock_irq(&rt2x00dev
->irqmask_lock
);
207 reg
= rt2x00mmio_register_read(rt2x00dev
, INT_MASK_CSR
);
208 rt2x00_set_field32(®
, irq_field
, 1);
209 rt2x00mmio_register_write(rt2x00dev
, INT_MASK_CSR
, reg
);
210 spin_unlock_irq(&rt2x00dev
->irqmask_lock
);
213 void rt2800mmio_pretbtt_tasklet(struct tasklet_struct
*t
)
215 struct rt2x00_dev
*rt2x00dev
= from_tasklet(rt2x00dev
, t
,
217 rt2x00lib_pretbtt(rt2x00dev
);
218 if (test_bit(DEVICE_STATE_ENABLED_RADIO
, &rt2x00dev
->flags
))
219 rt2800mmio_enable_interrupt(rt2x00dev
, INT_MASK_CSR_PRE_TBTT
);
221 EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet
);
223 void rt2800mmio_tbtt_tasklet(struct tasklet_struct
*t
)
225 struct rt2x00_dev
*rt2x00dev
= from_tasklet(rt2x00dev
, t
, tbtt_tasklet
);
226 struct rt2800_drv_data
*drv_data
= rt2x00dev
->drv_data
;
229 rt2x00lib_beacondone(rt2x00dev
);
231 if (rt2x00dev
->intf_ap_count
) {
233 * The rt2800pci hardware tbtt timer is off by 1us per tbtt
234 * causing beacon skew and as a result causing problems with
235 * some powersaving clients over time. Shorten the beacon
236 * interval every 64 beacons by 64us to mitigate this effect.
238 if (drv_data
->tbtt_tick
== (BCN_TBTT_OFFSET
- 2)) {
239 reg
= rt2x00mmio_register_read(rt2x00dev
, BCN_TIME_CFG
);
240 rt2x00_set_field32(®
, BCN_TIME_CFG_BEACON_INTERVAL
,
241 (rt2x00dev
->beacon_int
* 16) - 1);
242 rt2x00mmio_register_write(rt2x00dev
, BCN_TIME_CFG
, reg
);
243 } else if (drv_data
->tbtt_tick
== (BCN_TBTT_OFFSET
- 1)) {
244 reg
= rt2x00mmio_register_read(rt2x00dev
, BCN_TIME_CFG
);
245 rt2x00_set_field32(®
, BCN_TIME_CFG_BEACON_INTERVAL
,
246 (rt2x00dev
->beacon_int
* 16));
247 rt2x00mmio_register_write(rt2x00dev
, BCN_TIME_CFG
, reg
);
249 drv_data
->tbtt_tick
++;
250 drv_data
->tbtt_tick
%= BCN_TBTT_OFFSET
;
253 if (test_bit(DEVICE_STATE_ENABLED_RADIO
, &rt2x00dev
->flags
))
254 rt2800mmio_enable_interrupt(rt2x00dev
, INT_MASK_CSR_TBTT
);
256 EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet
);
258 void rt2800mmio_rxdone_tasklet(struct tasklet_struct
*t
)
260 struct rt2x00_dev
*rt2x00dev
= from_tasklet(rt2x00dev
, t
,
262 if (rt2x00mmio_rxdone(rt2x00dev
))
263 tasklet_schedule(&rt2x00dev
->rxdone_tasklet
);
264 else if (test_bit(DEVICE_STATE_ENABLED_RADIO
, &rt2x00dev
->flags
))
265 rt2800mmio_enable_interrupt(rt2x00dev
, INT_MASK_CSR_RX_DONE
);
267 EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet
);
269 void rt2800mmio_autowake_tasklet(struct tasklet_struct
*t
)
271 struct rt2x00_dev
*rt2x00dev
= from_tasklet(rt2x00dev
, t
,
273 rt2800mmio_wakeup(rt2x00dev
);
274 if (test_bit(DEVICE_STATE_ENABLED_RADIO
, &rt2x00dev
->flags
))
275 rt2800mmio_enable_interrupt(rt2x00dev
,
276 INT_MASK_CSR_AUTO_WAKEUP
);
278 EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet
);
280 static void rt2800mmio_fetch_txstatus(struct rt2x00_dev
*rt2x00dev
)
286 * The TX_FIFO_STATUS interrupt needs special care. We should
287 * read TX_STA_FIFO but we should do it immediately as otherwise
288 * the register can overflow and we would lose status reports.
290 * Hence, read the TX_STA_FIFO register and copy all tx status
291 * reports into a kernel FIFO which is handled in the txstatus
292 * tasklet. We use a tasklet to process the tx status reports
293 * because we can schedule the tasklet multiple times (when the
294 * interrupt fires again during tx status processing).
296 * We also read statuses from tx status timeout timer, use
297 * lock to prevent concurent writes to fifo.
300 spin_lock_irqsave(&rt2x00dev
->irqmask_lock
, flags
);
302 while (!kfifo_is_full(&rt2x00dev
->txstatus_fifo
)) {
303 status
= rt2x00mmio_register_read(rt2x00dev
, TX_STA_FIFO
);
304 if (!rt2x00_get_field32(status
, TX_STA_FIFO_VALID
))
307 kfifo_put(&rt2x00dev
->txstatus_fifo
, status
);
310 spin_unlock_irqrestore(&rt2x00dev
->irqmask_lock
, flags
);
313 void rt2800mmio_txstatus_tasklet(struct tasklet_struct
*t
)
315 struct rt2x00_dev
*rt2x00dev
= from_tasklet(rt2x00dev
, t
,
318 rt2800_txdone(rt2x00dev
, 16);
320 if (!kfifo_is_empty(&rt2x00dev
->txstatus_fifo
))
321 tasklet_schedule(&rt2x00dev
->txstatus_tasklet
);
324 EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet
);
326 irqreturn_t
rt2800mmio_interrupt(int irq
, void *dev_instance
)
328 struct rt2x00_dev
*rt2x00dev
= dev_instance
;
331 /* Read status and ACK all interrupts */
332 reg
= rt2x00mmio_register_read(rt2x00dev
, INT_SOURCE_CSR
);
333 rt2x00mmio_register_write(rt2x00dev
, INT_SOURCE_CSR
, reg
);
338 if (!test_bit(DEVICE_STATE_ENABLED_RADIO
, &rt2x00dev
->flags
))
342 * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
343 * for interrupts and interrupt masks we can just use the value of
344 * INT_SOURCE_CSR to create the interrupt mask.
348 if (rt2x00_get_field32(reg
, INT_SOURCE_CSR_TX_FIFO_STATUS
)) {
349 rt2x00_set_field32(&mask
, INT_MASK_CSR_TX_FIFO_STATUS
, 1);
350 rt2800mmio_fetch_txstatus(rt2x00dev
);
351 if (!kfifo_is_empty(&rt2x00dev
->txstatus_fifo
))
352 tasklet_schedule(&rt2x00dev
->txstatus_tasklet
);
355 if (rt2x00_get_field32(reg
, INT_SOURCE_CSR_PRE_TBTT
))
356 tasklet_hi_schedule(&rt2x00dev
->pretbtt_tasklet
);
358 if (rt2x00_get_field32(reg
, INT_SOURCE_CSR_TBTT
))
359 tasklet_hi_schedule(&rt2x00dev
->tbtt_tasklet
);
361 if (rt2x00_get_field32(reg
, INT_SOURCE_CSR_RX_DONE
))
362 tasklet_schedule(&rt2x00dev
->rxdone_tasklet
);
364 if (rt2x00_get_field32(reg
, INT_SOURCE_CSR_AUTO_WAKEUP
))
365 tasklet_schedule(&rt2x00dev
->autowake_tasklet
);
368 * Disable all interrupts for which a tasklet was scheduled right now,
369 * the tasklet will reenable the appropriate interrupts.
371 spin_lock(&rt2x00dev
->irqmask_lock
);
372 reg
= rt2x00mmio_register_read(rt2x00dev
, INT_MASK_CSR
);
374 rt2x00mmio_register_write(rt2x00dev
, INT_MASK_CSR
, reg
);
375 spin_unlock(&rt2x00dev
->irqmask_lock
);
379 EXPORT_SYMBOL_GPL(rt2800mmio_interrupt
);
381 void rt2800mmio_toggle_irq(struct rt2x00_dev
*rt2x00dev
,
382 enum dev_state state
)
388 * When interrupts are being enabled, the interrupt registers
389 * should clear the register to assure a clean state.
391 if (state
== STATE_RADIO_IRQ_ON
) {
392 reg
= rt2x00mmio_register_read(rt2x00dev
, INT_SOURCE_CSR
);
393 rt2x00mmio_register_write(rt2x00dev
, INT_SOURCE_CSR
, reg
);
396 spin_lock_irqsave(&rt2x00dev
->irqmask_lock
, flags
);
398 if (state
== STATE_RADIO_IRQ_ON
) {
399 rt2x00_set_field32(®
, INT_MASK_CSR_RX_DONE
, 1);
400 rt2x00_set_field32(®
, INT_MASK_CSR_TBTT
, 1);
401 rt2x00_set_field32(®
, INT_MASK_CSR_PRE_TBTT
, 1);
402 rt2x00_set_field32(®
, INT_MASK_CSR_TX_FIFO_STATUS
, 1);
403 rt2x00_set_field32(®
, INT_MASK_CSR_AUTO_WAKEUP
, 1);
405 rt2x00mmio_register_write(rt2x00dev
, INT_MASK_CSR
, reg
);
406 spin_unlock_irqrestore(&rt2x00dev
->irqmask_lock
, flags
);
408 if (state
== STATE_RADIO_IRQ_OFF
) {
410 * Wait for possibly running tasklets to finish.
412 tasklet_kill(&rt2x00dev
->txstatus_tasklet
);
413 tasklet_kill(&rt2x00dev
->rxdone_tasklet
);
414 tasklet_kill(&rt2x00dev
->autowake_tasklet
);
415 tasklet_kill(&rt2x00dev
->tbtt_tasklet
);
416 tasklet_kill(&rt2x00dev
->pretbtt_tasklet
);
419 EXPORT_SYMBOL_GPL(rt2800mmio_toggle_irq
);
424 void rt2800mmio_start_queue(struct data_queue
*queue
)
426 struct rt2x00_dev
*rt2x00dev
= queue
->rt2x00dev
;
429 switch (queue
->qid
) {
431 reg
= rt2x00mmio_register_read(rt2x00dev
, MAC_SYS_CTRL
);
432 rt2x00_set_field32(®
, MAC_SYS_CTRL_ENABLE_RX
, 1);
433 rt2x00mmio_register_write(rt2x00dev
, MAC_SYS_CTRL
, reg
);
436 reg
= rt2x00mmio_register_read(rt2x00dev
, BCN_TIME_CFG
);
437 rt2x00_set_field32(®
, BCN_TIME_CFG_TSF_TICKING
, 1);
438 rt2x00_set_field32(®
, BCN_TIME_CFG_TBTT_ENABLE
, 1);
439 rt2x00_set_field32(®
, BCN_TIME_CFG_BEACON_GEN
, 1);
440 rt2x00mmio_register_write(rt2x00dev
, BCN_TIME_CFG
, reg
);
442 reg
= rt2x00mmio_register_read(rt2x00dev
, INT_TIMER_EN
);
443 rt2x00_set_field32(®
, INT_TIMER_EN_PRE_TBTT_TIMER
, 1);
444 rt2x00mmio_register_write(rt2x00dev
, INT_TIMER_EN
, reg
);
450 EXPORT_SYMBOL_GPL(rt2800mmio_start_queue
);
453 #define TXSTATUS_TIMEOUT 200000000
455 void rt2800mmio_kick_queue(struct data_queue
*queue
)
457 struct rt2x00_dev
*rt2x00dev
= queue
->rt2x00dev
;
458 struct queue_entry
*entry
;
460 switch (queue
->qid
) {
465 WARN_ON_ONCE(rt2x00queue_empty(queue
));
466 entry
= rt2x00queue_get_entry(queue
, Q_INDEX
);
467 rt2x00mmio_register_write(rt2x00dev
, TX_CTX_IDX(queue
->qid
),
469 hrtimer_start(&rt2x00dev
->txstatus_timer
,
470 TXSTATUS_TIMEOUT
, HRTIMER_MODE_REL
);
473 entry
= rt2x00queue_get_entry(queue
, Q_INDEX
);
474 rt2x00mmio_register_write(rt2x00dev
, TX_CTX_IDX(5),
481 EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue
);
483 void rt2800mmio_flush_queue(struct data_queue
*queue
, bool drop
)
485 struct rt2x00_dev
*rt2x00dev
= queue
->rt2x00dev
;
486 bool tx_queue
= false;
489 switch (queue
->qid
) {
502 for (i
= 0; i
< 5; i
++) {
504 * Check if the driver is already done, otherwise we
505 * have to sleep a little while to give the driver/hw
506 * the oppurtunity to complete interrupt process itself.
508 if (rt2x00queue_empty(queue
))
512 * For TX queues schedule completion tasklet to catch
513 * tx status timeouts, othewise just wait.
516 queue_work(rt2x00dev
->workqueue
, &rt2x00dev
->txdone_work
);
519 * Wait for a little while to give the driver
520 * the oppurtunity to recover itself.
525 EXPORT_SYMBOL_GPL(rt2800mmio_flush_queue
);
527 void rt2800mmio_stop_queue(struct data_queue
*queue
)
529 struct rt2x00_dev
*rt2x00dev
= queue
->rt2x00dev
;
532 switch (queue
->qid
) {
534 reg
= rt2x00mmio_register_read(rt2x00dev
, MAC_SYS_CTRL
);
535 rt2x00_set_field32(®
, MAC_SYS_CTRL_ENABLE_RX
, 0);
536 rt2x00mmio_register_write(rt2x00dev
, MAC_SYS_CTRL
, reg
);
539 reg
= rt2x00mmio_register_read(rt2x00dev
, BCN_TIME_CFG
);
540 rt2x00_set_field32(®
, BCN_TIME_CFG_TSF_TICKING
, 0);
541 rt2x00_set_field32(®
, BCN_TIME_CFG_TBTT_ENABLE
, 0);
542 rt2x00_set_field32(®
, BCN_TIME_CFG_BEACON_GEN
, 0);
543 rt2x00mmio_register_write(rt2x00dev
, BCN_TIME_CFG
, reg
);
545 reg
= rt2x00mmio_register_read(rt2x00dev
, INT_TIMER_EN
);
546 rt2x00_set_field32(®
, INT_TIMER_EN_PRE_TBTT_TIMER
, 0);
547 rt2x00mmio_register_write(rt2x00dev
, INT_TIMER_EN
, reg
);
550 * Wait for current invocation to finish. The tasklet
551 * won't be scheduled anymore afterwards since we disabled
552 * the TBTT and PRE TBTT timer.
554 tasklet_kill(&rt2x00dev
->tbtt_tasklet
);
555 tasklet_kill(&rt2x00dev
->pretbtt_tasklet
);
562 EXPORT_SYMBOL_GPL(rt2800mmio_stop_queue
);
564 void rt2800mmio_queue_init(struct data_queue
*queue
)
566 struct rt2x00_dev
*rt2x00dev
= queue
->rt2x00dev
;
567 unsigned short txwi_size
, rxwi_size
;
569 rt2800_get_txwi_rxwi_size(rt2x00dev
, &txwi_size
, &rxwi_size
);
571 switch (queue
->qid
) {
574 queue
->data_size
= AGGREGATION_SIZE
;
575 queue
->desc_size
= RXD_DESC_SIZE
;
576 queue
->winfo_size
= rxwi_size
;
577 queue
->priv_size
= sizeof(struct queue_entry_priv_mmio
);
585 queue
->data_size
= AGGREGATION_SIZE
;
586 queue
->desc_size
= TXD_DESC_SIZE
;
587 queue
->winfo_size
= txwi_size
;
588 queue
->priv_size
= sizeof(struct queue_entry_priv_mmio
);
593 queue
->data_size
= 0; /* No DMA required for beacons */
594 queue
->desc_size
= TXD_DESC_SIZE
;
595 queue
->winfo_size
= txwi_size
;
596 queue
->priv_size
= sizeof(struct queue_entry_priv_mmio
);
605 EXPORT_SYMBOL_GPL(rt2800mmio_queue_init
);
608 * Initialization functions.
610 bool rt2800mmio_get_entry_state(struct queue_entry
*entry
)
612 struct queue_entry_priv_mmio
*entry_priv
= entry
->priv_data
;
615 if (entry
->queue
->qid
== QID_RX
) {
616 word
= rt2x00_desc_read(entry_priv
->desc
, 1);
618 return (!rt2x00_get_field32(word
, RXD_W1_DMA_DONE
));
620 word
= rt2x00_desc_read(entry_priv
->desc
, 1);
622 return (!rt2x00_get_field32(word
, TXD_W1_DMA_DONE
));
625 EXPORT_SYMBOL_GPL(rt2800mmio_get_entry_state
);
627 void rt2800mmio_clear_entry(struct queue_entry
*entry
)
629 struct queue_entry_priv_mmio
*entry_priv
= entry
->priv_data
;
630 struct skb_frame_desc
*skbdesc
= get_skb_frame_desc(entry
->skb
);
631 struct rt2x00_dev
*rt2x00dev
= entry
->queue
->rt2x00dev
;
634 if (entry
->queue
->qid
== QID_RX
) {
635 word
= rt2x00_desc_read(entry_priv
->desc
, 0);
636 rt2x00_set_field32(&word
, RXD_W0_SDP0
, skbdesc
->skb_dma
);
637 rt2x00_desc_write(entry_priv
->desc
, 0, word
);
639 word
= rt2x00_desc_read(entry_priv
->desc
, 1);
640 rt2x00_set_field32(&word
, RXD_W1_DMA_DONE
, 0);
641 rt2x00_desc_write(entry_priv
->desc
, 1, word
);
644 * Set RX IDX in register to inform hardware that we have
645 * handled this entry and it is available for reuse again.
647 rt2x00mmio_register_write(rt2x00dev
, RX_CRX_IDX
,
650 word
= rt2x00_desc_read(entry_priv
->desc
, 1);
651 rt2x00_set_field32(&word
, TXD_W1_DMA_DONE
, 1);
652 rt2x00_desc_write(entry_priv
->desc
, 1, word
);
654 /* If last entry stop txstatus timer */
655 if (entry
->queue
->length
== 1)
656 hrtimer_cancel(&rt2x00dev
->txstatus_timer
);
659 EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry
);
661 int rt2800mmio_init_queues(struct rt2x00_dev
*rt2x00dev
)
663 struct queue_entry_priv_mmio
*entry_priv
;
666 * Initialize registers.
668 entry_priv
= rt2x00dev
->tx
[0].entries
[0].priv_data
;
669 rt2x00mmio_register_write(rt2x00dev
, TX_BASE_PTR0
,
670 entry_priv
->desc_dma
);
671 rt2x00mmio_register_write(rt2x00dev
, TX_MAX_CNT0
,
672 rt2x00dev
->tx
[0].limit
);
673 rt2x00mmio_register_write(rt2x00dev
, TX_CTX_IDX0
, 0);
674 rt2x00mmio_register_write(rt2x00dev
, TX_DTX_IDX0
, 0);
676 entry_priv
= rt2x00dev
->tx
[1].entries
[0].priv_data
;
677 rt2x00mmio_register_write(rt2x00dev
, TX_BASE_PTR1
,
678 entry_priv
->desc_dma
);
679 rt2x00mmio_register_write(rt2x00dev
, TX_MAX_CNT1
,
680 rt2x00dev
->tx
[1].limit
);
681 rt2x00mmio_register_write(rt2x00dev
, TX_CTX_IDX1
, 0);
682 rt2x00mmio_register_write(rt2x00dev
, TX_DTX_IDX1
, 0);
684 entry_priv
= rt2x00dev
->tx
[2].entries
[0].priv_data
;
685 rt2x00mmio_register_write(rt2x00dev
, TX_BASE_PTR2
,
686 entry_priv
->desc_dma
);
687 rt2x00mmio_register_write(rt2x00dev
, TX_MAX_CNT2
,
688 rt2x00dev
->tx
[2].limit
);
689 rt2x00mmio_register_write(rt2x00dev
, TX_CTX_IDX2
, 0);
690 rt2x00mmio_register_write(rt2x00dev
, TX_DTX_IDX2
, 0);
692 entry_priv
= rt2x00dev
->tx
[3].entries
[0].priv_data
;
693 rt2x00mmio_register_write(rt2x00dev
, TX_BASE_PTR3
,
694 entry_priv
->desc_dma
);
695 rt2x00mmio_register_write(rt2x00dev
, TX_MAX_CNT3
,
696 rt2x00dev
->tx
[3].limit
);
697 rt2x00mmio_register_write(rt2x00dev
, TX_CTX_IDX3
, 0);
698 rt2x00mmio_register_write(rt2x00dev
, TX_DTX_IDX3
, 0);
700 rt2x00mmio_register_write(rt2x00dev
, TX_BASE_PTR4
, 0);
701 rt2x00mmio_register_write(rt2x00dev
, TX_MAX_CNT4
, 0);
702 rt2x00mmio_register_write(rt2x00dev
, TX_CTX_IDX4
, 0);
703 rt2x00mmio_register_write(rt2x00dev
, TX_DTX_IDX4
, 0);
705 rt2x00mmio_register_write(rt2x00dev
, TX_BASE_PTR5
, 0);
706 rt2x00mmio_register_write(rt2x00dev
, TX_MAX_CNT5
, 0);
707 rt2x00mmio_register_write(rt2x00dev
, TX_CTX_IDX5
, 0);
708 rt2x00mmio_register_write(rt2x00dev
, TX_DTX_IDX5
, 0);
710 entry_priv
= rt2x00dev
->rx
->entries
[0].priv_data
;
711 rt2x00mmio_register_write(rt2x00dev
, RX_BASE_PTR
,
712 entry_priv
->desc_dma
);
713 rt2x00mmio_register_write(rt2x00dev
, RX_MAX_CNT
,
714 rt2x00dev
->rx
[0].limit
);
715 rt2x00mmio_register_write(rt2x00dev
, RX_CRX_IDX
,
716 rt2x00dev
->rx
[0].limit
- 1);
717 rt2x00mmio_register_write(rt2x00dev
, RX_DRX_IDX
, 0);
719 rt2800_disable_wpdma(rt2x00dev
);
721 rt2x00mmio_register_write(rt2x00dev
, DELAY_INT_CFG
, 0);
725 EXPORT_SYMBOL_GPL(rt2800mmio_init_queues
);
727 int rt2800mmio_init_registers(struct rt2x00_dev
*rt2x00dev
)
734 reg
= rt2x00mmio_register_read(rt2x00dev
, WPDMA_RST_IDX
);
735 rt2x00_set_field32(®
, WPDMA_RST_IDX_DTX_IDX0
, 1);
736 rt2x00_set_field32(®
, WPDMA_RST_IDX_DTX_IDX1
, 1);
737 rt2x00_set_field32(®
, WPDMA_RST_IDX_DTX_IDX2
, 1);
738 rt2x00_set_field32(®
, WPDMA_RST_IDX_DTX_IDX3
, 1);
739 rt2x00_set_field32(®
, WPDMA_RST_IDX_DTX_IDX4
, 1);
740 rt2x00_set_field32(®
, WPDMA_RST_IDX_DTX_IDX5
, 1);
741 rt2x00_set_field32(®
, WPDMA_RST_IDX_DRX_IDX0
, 1);
742 rt2x00mmio_register_write(rt2x00dev
, WPDMA_RST_IDX
, reg
);
744 rt2x00mmio_register_write(rt2x00dev
, PBF_SYS_CTRL
, 0x00000e1f);
745 rt2x00mmio_register_write(rt2x00dev
, PBF_SYS_CTRL
, 0x00000e00);
747 if (rt2x00_is_pcie(rt2x00dev
) &&
748 (rt2x00_rt(rt2x00dev
, RT3090
) ||
749 rt2x00_rt(rt2x00dev
, RT3390
) ||
750 rt2x00_rt(rt2x00dev
, RT3572
) ||
751 rt2x00_rt(rt2x00dev
, RT3593
) ||
752 rt2x00_rt(rt2x00dev
, RT5390
) ||
753 rt2x00_rt(rt2x00dev
, RT5392
) ||
754 rt2x00_rt(rt2x00dev
, RT5592
))) {
755 reg
= rt2x00mmio_register_read(rt2x00dev
, AUX_CTRL
);
756 rt2x00_set_field32(®
, AUX_CTRL_FORCE_PCIE_CLK
, 1);
757 rt2x00_set_field32(®
, AUX_CTRL_WAKE_PCIE_EN
, 1);
758 rt2x00mmio_register_write(rt2x00dev
, AUX_CTRL
, reg
);
761 rt2x00mmio_register_write(rt2x00dev
, PWR_PIN_CFG
, 0x00000003);
764 rt2x00_set_field32(®
, MAC_SYS_CTRL_RESET_CSR
, 1);
765 rt2x00_set_field32(®
, MAC_SYS_CTRL_RESET_BBP
, 1);
766 rt2x00mmio_register_write(rt2x00dev
, MAC_SYS_CTRL
, reg
);
768 rt2x00mmio_register_write(rt2x00dev
, MAC_SYS_CTRL
, 0x00000000);
772 EXPORT_SYMBOL_GPL(rt2800mmio_init_registers
);
775 * Device state switch handlers.
777 int rt2800mmio_enable_radio(struct rt2x00_dev
*rt2x00dev
)
779 /* Wait for DMA, ignore error until we initialize queues. */
780 rt2800_wait_wpdma_ready(rt2x00dev
);
782 if (unlikely(rt2800mmio_init_queues(rt2x00dev
)))
785 return rt2800_enable_radio(rt2x00dev
);
787 EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio
);
789 static void rt2800mmio_work_txdone(struct work_struct
*work
)
791 struct rt2x00_dev
*rt2x00dev
=
792 container_of(work
, struct rt2x00_dev
, txdone_work
);
794 if (!test_bit(DEVICE_STATE_ENABLED_RADIO
, &rt2x00dev
->flags
))
797 while (!kfifo_is_empty(&rt2x00dev
->txstatus_fifo
) ||
798 rt2800_txstatus_timeout(rt2x00dev
)) {
800 tasklet_disable(&rt2x00dev
->txstatus_tasklet
);
801 rt2800_txdone(rt2x00dev
, UINT_MAX
);
802 rt2800_txdone_nostatus(rt2x00dev
);
803 tasklet_enable(&rt2x00dev
->txstatus_tasklet
);
806 if (rt2800_txstatus_pending(rt2x00dev
))
807 hrtimer_start(&rt2x00dev
->txstatus_timer
,
808 TXSTATUS_TIMEOUT
, HRTIMER_MODE_REL
);
811 static enum hrtimer_restart
rt2800mmio_tx_sta_fifo_timeout(struct hrtimer
*timer
)
813 struct rt2x00_dev
*rt2x00dev
=
814 container_of(timer
, struct rt2x00_dev
, txstatus_timer
);
816 if (!test_bit(DEVICE_STATE_ENABLED_RADIO
, &rt2x00dev
->flags
))
819 if (!rt2800_txstatus_pending(rt2x00dev
))
822 rt2800mmio_fetch_txstatus(rt2x00dev
);
823 if (!kfifo_is_empty(&rt2x00dev
->txstatus_fifo
))
824 tasklet_schedule(&rt2x00dev
->txstatus_tasklet
);
826 queue_work(rt2x00dev
->workqueue
, &rt2x00dev
->txdone_work
);
828 return HRTIMER_NORESTART
;
831 int rt2800mmio_probe_hw(struct rt2x00_dev
*rt2x00dev
)
835 retval
= rt2800_probe_hw(rt2x00dev
);
840 * Set txstatus timer function.
842 rt2x00dev
->txstatus_timer
.function
= rt2800mmio_tx_sta_fifo_timeout
;
845 * Overwrite TX done handler
847 INIT_WORK(&rt2x00dev
->txdone_work
, rt2800mmio_work_txdone
);
851 EXPORT_SYMBOL_GPL(rt2800mmio_probe_hw
);
853 MODULE_AUTHOR(DRV_PROJECT
);
854 MODULE_VERSION(DRV_VERSION
);
855 MODULE_DESCRIPTION("rt2800 MMIO library");
856 MODULE_LICENSE("GPL");