dmaengine: imx-sdma: Let the core do the device node validation
[linux/fpc-iii.git] / drivers / net / wireless / ralink / rt2x00 / rt2800mmio.c
blobecc4c9332ec7d2a7e49cba7691d299c302af521b
1 /* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
2 * Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
3 * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
4 * Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
5 * Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
6 * Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
7 * Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
8 * Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
9 * <http://rt2x00.serialmonkey.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see <http://www.gnu.org/licenses/>.
25 /* Module: rt2800mmio
26 * Abstract: rt2800 MMIO device routines.
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/export.h>
33 #include "rt2x00.h"
34 #include "rt2x00mmio.h"
35 #include "rt2800.h"
36 #include "rt2800lib.h"
37 #include "rt2800mmio.h"
40 * TX descriptor initialization
42 __le32 *rt2800mmio_get_txwi(struct queue_entry *entry)
44 return (__le32 *) entry->skb->data;
46 EXPORT_SYMBOL_GPL(rt2800mmio_get_txwi);
48 void rt2800mmio_write_tx_desc(struct queue_entry *entry,
49 struct txentry_desc *txdesc)
51 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
52 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
53 __le32 *txd = entry_priv->desc;
54 u32 word;
55 const unsigned int txwi_size = entry->queue->winfo_size;
58 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
59 * must contains a TXWI structure + 802.11 header + padding + 802.11
60 * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
61 * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
62 * data. It means that LAST_SEC0 is always 0.
66 * Initialize TX descriptor
68 word = 0;
69 rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
70 rt2x00_desc_write(txd, 0, word);
72 word = 0;
73 rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
74 rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
75 !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
76 rt2x00_set_field32(&word, TXD_W1_BURST,
77 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
78 rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
79 rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
80 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
81 rt2x00_desc_write(txd, 1, word);
83 word = 0;
84 rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
85 skbdesc->skb_dma + txwi_size);
86 rt2x00_desc_write(txd, 2, word);
88 word = 0;
89 rt2x00_set_field32(&word, TXD_W3_WIV,
90 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
91 rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
92 rt2x00_desc_write(txd, 3, word);
95 * Register descriptor details in skb frame descriptor.
97 skbdesc->desc = txd;
98 skbdesc->desc_len = TXD_DESC_SIZE;
100 EXPORT_SYMBOL_GPL(rt2800mmio_write_tx_desc);
103 * RX control handlers
105 void rt2800mmio_fill_rxdone(struct queue_entry *entry,
106 struct rxdone_entry_desc *rxdesc)
108 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
109 __le32 *rxd = entry_priv->desc;
110 u32 word;
112 word = rt2x00_desc_read(rxd, 3);
114 if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
115 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
118 * Unfortunately we don't know the cipher type used during
119 * decryption. This prevents us from correct providing
120 * correct statistics through debugfs.
122 rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
124 if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
126 * Hardware has stripped IV/EIV data from 802.11 frame during
127 * decryption. Unfortunately the descriptor doesn't contain
128 * any fields with the EIV/IV data either, so they can't
129 * be restored by rt2x00lib.
131 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
134 * The hardware has already checked the Michael Mic and has
135 * stripped it from the frame. Signal this to mac80211.
137 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
139 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) {
140 rxdesc->flags |= RX_FLAG_DECRYPTED;
141 } else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) {
143 * In order to check the Michael Mic, the packet must have
144 * been decrypted. Mac80211 doesnt check the MMIC failure
145 * flag to initiate MMIC countermeasures if the decoded flag
146 * has not been set.
148 rxdesc->flags |= RX_FLAG_DECRYPTED;
150 rxdesc->flags |= RX_FLAG_MMIC_ERROR;
154 if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
155 rxdesc->dev_flags |= RXDONE_MY_BSS;
157 if (rt2x00_get_field32(word, RXD_W3_L2PAD))
158 rxdesc->dev_flags |= RXDONE_L2PAD;
161 * Process the RXWI structure that is at the start of the buffer.
163 rt2800_process_rxwi(entry, rxdesc);
165 EXPORT_SYMBOL_GPL(rt2800mmio_fill_rxdone);
168 * Interrupt functions.
170 static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev)
172 struct ieee80211_conf conf = { .flags = 0 };
173 struct rt2x00lib_conf libconf = { .conf = &conf };
175 rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
178 static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev,
179 struct rt2x00_field32 irq_field)
181 u32 reg;
184 * Enable a single interrupt. The interrupt mask register
185 * access needs locking.
187 spin_lock_irq(&rt2x00dev->irqmask_lock);
188 reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR);
189 rt2x00_set_field32(&reg, irq_field, 1);
190 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
191 spin_unlock_irq(&rt2x00dev->irqmask_lock);
194 void rt2800mmio_pretbtt_tasklet(unsigned long data)
196 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
197 rt2x00lib_pretbtt(rt2x00dev);
198 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
199 rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
201 EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet);
203 void rt2800mmio_tbtt_tasklet(unsigned long data)
205 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
206 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
207 u32 reg;
209 rt2x00lib_beacondone(rt2x00dev);
211 if (rt2x00dev->intf_ap_count) {
213 * The rt2800pci hardware tbtt timer is off by 1us per tbtt
214 * causing beacon skew and as a result causing problems with
215 * some powersaving clients over time. Shorten the beacon
216 * interval every 64 beacons by 64us to mitigate this effect.
218 if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
219 reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG);
220 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
221 (rt2x00dev->beacon_int * 16) - 1);
222 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
223 } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
224 reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG);
225 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
226 (rt2x00dev->beacon_int * 16));
227 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
229 drv_data->tbtt_tick++;
230 drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
233 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
234 rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
236 EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet);
238 void rt2800mmio_rxdone_tasklet(unsigned long data)
240 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
241 if (rt2x00mmio_rxdone(rt2x00dev))
242 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
243 else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
244 rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
246 EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet);
248 void rt2800mmio_autowake_tasklet(unsigned long data)
250 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
251 rt2800mmio_wakeup(rt2x00dev);
252 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
253 rt2800mmio_enable_interrupt(rt2x00dev,
254 INT_MASK_CSR_AUTO_WAKEUP);
256 EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet);
258 static void rt2800mmio_fetch_txstatus(struct rt2x00_dev *rt2x00dev)
260 u32 status;
261 unsigned long flags;
264 * The TX_FIFO_STATUS interrupt needs special care. We should
265 * read TX_STA_FIFO but we should do it immediately as otherwise
266 * the register can overflow and we would lose status reports.
268 * Hence, read the TX_STA_FIFO register and copy all tx status
269 * reports into a kernel FIFO which is handled in the txstatus
270 * tasklet. We use a tasklet to process the tx status reports
271 * because we can schedule the tasklet multiple times (when the
272 * interrupt fires again during tx status processing).
274 * We also read statuses from tx status timeout timer, use
275 * lock to prevent concurent writes to fifo.
278 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
280 while (!kfifo_is_full(&rt2x00dev->txstatus_fifo)) {
281 status = rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO);
282 if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
283 break;
285 kfifo_put(&rt2x00dev->txstatus_fifo, status);
288 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
291 void rt2800mmio_txstatus_tasklet(unsigned long data)
293 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
295 rt2800_txdone(rt2x00dev, 16);
297 if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo))
298 tasklet_schedule(&rt2x00dev->txstatus_tasklet);
301 EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet);
303 irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance)
305 struct rt2x00_dev *rt2x00dev = dev_instance;
306 u32 reg, mask;
308 /* Read status and ACK all interrupts */
309 reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR);
310 rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
312 if (!reg)
313 return IRQ_NONE;
315 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
316 return IRQ_HANDLED;
319 * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
320 * for interrupts and interrupt masks we can just use the value of
321 * INT_SOURCE_CSR to create the interrupt mask.
323 mask = ~reg;
325 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
326 rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
327 rt2800mmio_fetch_txstatus(rt2x00dev);
328 if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo))
329 tasklet_schedule(&rt2x00dev->txstatus_tasklet);
332 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
333 tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
335 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
336 tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
338 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
339 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
341 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
342 tasklet_schedule(&rt2x00dev->autowake_tasklet);
345 * Disable all interrupts for which a tasklet was scheduled right now,
346 * the tasklet will reenable the appropriate interrupts.
348 spin_lock(&rt2x00dev->irqmask_lock);
349 reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR);
350 reg &= mask;
351 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
352 spin_unlock(&rt2x00dev->irqmask_lock);
354 return IRQ_HANDLED;
356 EXPORT_SYMBOL_GPL(rt2800mmio_interrupt);
358 void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
359 enum dev_state state)
361 u32 reg;
362 unsigned long flags;
365 * When interrupts are being enabled, the interrupt registers
366 * should clear the register to assure a clean state.
368 if (state == STATE_RADIO_IRQ_ON) {
369 reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR);
370 rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
373 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
374 reg = 0;
375 if (state == STATE_RADIO_IRQ_ON) {
376 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
377 rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
378 rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
379 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
380 rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
382 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
383 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
385 if (state == STATE_RADIO_IRQ_OFF) {
387 * Wait for possibly running tasklets to finish.
389 tasklet_kill(&rt2x00dev->txstatus_tasklet);
390 tasklet_kill(&rt2x00dev->rxdone_tasklet);
391 tasklet_kill(&rt2x00dev->autowake_tasklet);
392 tasklet_kill(&rt2x00dev->tbtt_tasklet);
393 tasklet_kill(&rt2x00dev->pretbtt_tasklet);
396 EXPORT_SYMBOL_GPL(rt2800mmio_toggle_irq);
399 * Queue handlers.
401 void rt2800mmio_start_queue(struct data_queue *queue)
403 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
404 u32 reg;
406 switch (queue->qid) {
407 case QID_RX:
408 reg = rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL);
409 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
410 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
411 break;
412 case QID_BEACON:
413 reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG);
414 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
415 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
416 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
417 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
419 reg = rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN);
420 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
421 rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
422 break;
423 default:
424 break;
427 EXPORT_SYMBOL_GPL(rt2800mmio_start_queue);
429 /* 200 ms */
430 #define TXSTATUS_TIMEOUT 200000000
432 void rt2800mmio_kick_queue(struct data_queue *queue)
434 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
435 struct queue_entry *entry;
437 switch (queue->qid) {
438 case QID_AC_VO:
439 case QID_AC_VI:
440 case QID_AC_BE:
441 case QID_AC_BK:
442 WARN_ON_ONCE(rt2x00queue_empty(queue));
443 entry = rt2x00queue_get_entry(queue, Q_INDEX);
444 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
445 entry->entry_idx);
446 hrtimer_start(&rt2x00dev->txstatus_timer,
447 TXSTATUS_TIMEOUT, HRTIMER_MODE_REL);
448 break;
449 case QID_MGMT:
450 entry = rt2x00queue_get_entry(queue, Q_INDEX);
451 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
452 entry->entry_idx);
453 break;
454 default:
455 break;
458 EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue);
460 void rt2800mmio_flush_queue(struct data_queue *queue, bool drop)
462 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
463 bool tx_queue = false;
464 unsigned int i;
466 switch (queue->qid) {
467 case QID_AC_VO:
468 case QID_AC_VI:
469 case QID_AC_BE:
470 case QID_AC_BK:
471 tx_queue = true;
472 break;
473 case QID_RX:
474 break;
475 default:
476 return;
479 for (i = 0; i < 5; i++) {
481 * Check if the driver is already done, otherwise we
482 * have to sleep a little while to give the driver/hw
483 * the oppurtunity to complete interrupt process itself.
485 if (rt2x00queue_empty(queue))
486 break;
489 * For TX queues schedule completion tasklet to catch
490 * tx status timeouts, othewise just wait.
492 if (tx_queue)
493 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
496 * Wait for a little while to give the driver
497 * the oppurtunity to recover itself.
499 msleep(50);
502 EXPORT_SYMBOL_GPL(rt2800mmio_flush_queue);
504 void rt2800mmio_stop_queue(struct data_queue *queue)
506 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
507 u32 reg;
509 switch (queue->qid) {
510 case QID_RX:
511 reg = rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL);
512 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
513 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
514 break;
515 case QID_BEACON:
516 reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG);
517 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
518 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
519 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
520 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
522 reg = rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN);
523 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
524 rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
527 * Wait for current invocation to finish. The tasklet
528 * won't be scheduled anymore afterwards since we disabled
529 * the TBTT and PRE TBTT timer.
531 tasklet_kill(&rt2x00dev->tbtt_tasklet);
532 tasklet_kill(&rt2x00dev->pretbtt_tasklet);
534 break;
535 default:
536 break;
539 EXPORT_SYMBOL_GPL(rt2800mmio_stop_queue);
541 void rt2800mmio_queue_init(struct data_queue *queue)
543 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
544 unsigned short txwi_size, rxwi_size;
546 rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
548 switch (queue->qid) {
549 case QID_RX:
550 queue->limit = 128;
551 queue->data_size = AGGREGATION_SIZE;
552 queue->desc_size = RXD_DESC_SIZE;
553 queue->winfo_size = rxwi_size;
554 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
555 break;
557 case QID_AC_VO:
558 case QID_AC_VI:
559 case QID_AC_BE:
560 case QID_AC_BK:
561 queue->limit = 64;
562 queue->data_size = AGGREGATION_SIZE;
563 queue->desc_size = TXD_DESC_SIZE;
564 queue->winfo_size = txwi_size;
565 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
566 break;
568 case QID_BEACON:
569 queue->limit = 8;
570 queue->data_size = 0; /* No DMA required for beacons */
571 queue->desc_size = TXD_DESC_SIZE;
572 queue->winfo_size = txwi_size;
573 queue->priv_size = sizeof(struct queue_entry_priv_mmio);
574 break;
576 case QID_ATIM:
577 /* fallthrough */
578 default:
579 BUG();
580 break;
583 EXPORT_SYMBOL_GPL(rt2800mmio_queue_init);
586 * Initialization functions.
588 bool rt2800mmio_get_entry_state(struct queue_entry *entry)
590 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
591 u32 word;
593 if (entry->queue->qid == QID_RX) {
594 word = rt2x00_desc_read(entry_priv->desc, 1);
596 return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
597 } else {
598 word = rt2x00_desc_read(entry_priv->desc, 1);
600 return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
603 EXPORT_SYMBOL_GPL(rt2800mmio_get_entry_state);
605 void rt2800mmio_clear_entry(struct queue_entry *entry)
607 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
608 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
609 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
610 u32 word;
612 if (entry->queue->qid == QID_RX) {
613 word = rt2x00_desc_read(entry_priv->desc, 0);
614 rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
615 rt2x00_desc_write(entry_priv->desc, 0, word);
617 word = rt2x00_desc_read(entry_priv->desc, 1);
618 rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
619 rt2x00_desc_write(entry_priv->desc, 1, word);
622 * Set RX IDX in register to inform hardware that we have
623 * handled this entry and it is available for reuse again.
625 rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
626 entry->entry_idx);
627 } else {
628 word = rt2x00_desc_read(entry_priv->desc, 1);
629 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
630 rt2x00_desc_write(entry_priv->desc, 1, word);
632 /* If last entry stop txstatus timer */
633 if (entry->queue->length == 1)
634 hrtimer_cancel(&rt2x00dev->txstatus_timer);
637 EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry);
639 int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev)
641 struct queue_entry_priv_mmio *entry_priv;
644 * Initialize registers.
646 entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
647 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
648 entry_priv->desc_dma);
649 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
650 rt2x00dev->tx[0].limit);
651 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
652 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
654 entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
655 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
656 entry_priv->desc_dma);
657 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
658 rt2x00dev->tx[1].limit);
659 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
660 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
662 entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
663 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
664 entry_priv->desc_dma);
665 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
666 rt2x00dev->tx[2].limit);
667 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
668 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
670 entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
671 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
672 entry_priv->desc_dma);
673 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
674 rt2x00dev->tx[3].limit);
675 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
676 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
678 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
679 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
680 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
681 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
683 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
684 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
685 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
686 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
688 entry_priv = rt2x00dev->rx->entries[0].priv_data;
689 rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
690 entry_priv->desc_dma);
691 rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
692 rt2x00dev->rx[0].limit);
693 rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
694 rt2x00dev->rx[0].limit - 1);
695 rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
697 rt2800_disable_wpdma(rt2x00dev);
699 rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
701 return 0;
703 EXPORT_SYMBOL_GPL(rt2800mmio_init_queues);
705 int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev)
707 u32 reg;
710 * Reset DMA indexes
712 reg = rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX);
713 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
714 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
715 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
716 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
717 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
718 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
719 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
720 rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
722 rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
723 rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
725 if (rt2x00_is_pcie(rt2x00dev) &&
726 (rt2x00_rt(rt2x00dev, RT3090) ||
727 rt2x00_rt(rt2x00dev, RT3390) ||
728 rt2x00_rt(rt2x00dev, RT3572) ||
729 rt2x00_rt(rt2x00dev, RT3593) ||
730 rt2x00_rt(rt2x00dev, RT5390) ||
731 rt2x00_rt(rt2x00dev, RT5392) ||
732 rt2x00_rt(rt2x00dev, RT5592))) {
733 reg = rt2x00mmio_register_read(rt2x00dev, AUX_CTRL);
734 rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
735 rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
736 rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
739 rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
741 reg = 0;
742 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
743 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
744 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
746 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
748 return 0;
750 EXPORT_SYMBOL_GPL(rt2800mmio_init_registers);
753 * Device state switch handlers.
755 int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev)
757 /* Wait for DMA, ignore error until we initialize queues. */
758 rt2800_wait_wpdma_ready(rt2x00dev);
760 if (unlikely(rt2800mmio_init_queues(rt2x00dev)))
761 return -EIO;
763 return rt2800_enable_radio(rt2x00dev);
765 EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio);
767 static void rt2800mmio_work_txdone(struct work_struct *work)
769 struct rt2x00_dev *rt2x00dev =
770 container_of(work, struct rt2x00_dev, txdone_work);
772 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
773 return;
775 while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) ||
776 rt2800_txstatus_timeout(rt2x00dev)) {
778 tasklet_disable(&rt2x00dev->txstatus_tasklet);
779 rt2800_txdone(rt2x00dev, UINT_MAX);
780 rt2800_txdone_nostatus(rt2x00dev);
781 tasklet_enable(&rt2x00dev->txstatus_tasklet);
784 if (rt2800_txstatus_pending(rt2x00dev))
785 hrtimer_start(&rt2x00dev->txstatus_timer,
786 TXSTATUS_TIMEOUT, HRTIMER_MODE_REL);
789 static enum hrtimer_restart rt2800mmio_tx_sta_fifo_timeout(struct hrtimer *timer)
791 struct rt2x00_dev *rt2x00dev =
792 container_of(timer, struct rt2x00_dev, txstatus_timer);
794 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
795 goto out;
797 if (!rt2800_txstatus_pending(rt2x00dev))
798 goto out;
800 rt2800mmio_fetch_txstatus(rt2x00dev);
801 if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo))
802 tasklet_schedule(&rt2x00dev->txstatus_tasklet);
803 else
804 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
805 out:
806 return HRTIMER_NORESTART;
809 int rt2800mmio_probe_hw(struct rt2x00_dev *rt2x00dev)
811 int retval;
813 retval = rt2800_probe_hw(rt2x00dev);
814 if (retval)
815 return retval;
818 * Set txstatus timer function.
820 rt2x00dev->txstatus_timer.function = rt2800mmio_tx_sta_fifo_timeout;
823 * Overwrite TX done handler
825 INIT_WORK(&rt2x00dev->txdone_work, rt2800mmio_work_txdone);
827 return 0;
829 EXPORT_SYMBOL_GPL(rt2800mmio_probe_hw);
831 MODULE_AUTHOR(DRV_PROJECT);
832 MODULE_VERSION(DRV_VERSION);
833 MODULE_DESCRIPTION("rt2800 MMIO library");
834 MODULE_LICENSE("GPL");