dt-bindings: mtd: ingenic: Use standard ecc-engine property
[linux/fpc-iii.git] / drivers / net / wireless / ralink / rt2x00 / rt2x00mmio.c
blob4956a54151cbc5ac7fa5141a227456cdb7552ff4
1 /*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, see <http://www.gnu.org/licenses/>.
20 Module: rt2x00mmio
21 Abstract: rt2x00 generic mmio device routines.
24 #include <linux/dma-mapping.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
29 #include "rt2x00.h"
30 #include "rt2x00mmio.h"
33 * Register access.
35 int rt2x00mmio_regbusy_read(struct rt2x00_dev *rt2x00dev,
36 const unsigned int offset,
37 const struct rt2x00_field32 field,
38 u32 *reg)
40 unsigned int i;
42 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
43 return 0;
45 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
46 *reg = rt2x00mmio_register_read(rt2x00dev, offset);
47 if (!rt2x00_get_field32(*reg, field))
48 return 1;
49 udelay(REGISTER_BUSY_DELAY);
52 printk_once(KERN_ERR "%s() Indirect register access failed: "
53 "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg);
54 *reg = ~0;
56 return 0;
58 EXPORT_SYMBOL_GPL(rt2x00mmio_regbusy_read);
60 bool rt2x00mmio_rxdone(struct rt2x00_dev *rt2x00dev)
62 struct data_queue *queue = rt2x00dev->rx;
63 struct queue_entry *entry;
64 struct queue_entry_priv_mmio *entry_priv;
65 struct skb_frame_desc *skbdesc;
66 int max_rx = 16;
68 while (--max_rx) {
69 entry = rt2x00queue_get_entry(queue, Q_INDEX);
70 entry_priv = entry->priv_data;
72 if (rt2x00dev->ops->lib->get_entry_state(entry))
73 break;
76 * Fill in desc fields of the skb descriptor
78 skbdesc = get_skb_frame_desc(entry->skb);
79 skbdesc->desc = entry_priv->desc;
80 skbdesc->desc_len = entry->queue->desc_size;
83 * DMA is already done, notify rt2x00lib that
84 * it finished successfully.
86 rt2x00lib_dmastart(entry);
87 rt2x00lib_dmadone(entry);
90 * Send the frame to rt2x00lib for further processing.
92 rt2x00lib_rxdone(entry, GFP_ATOMIC);
95 return !max_rx;
97 EXPORT_SYMBOL_GPL(rt2x00mmio_rxdone);
99 void rt2x00mmio_flush_queue(struct data_queue *queue, bool drop)
101 unsigned int i;
103 for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
104 msleep(50);
106 EXPORT_SYMBOL_GPL(rt2x00mmio_flush_queue);
109 * Device initialization handlers.
111 static int rt2x00mmio_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
112 struct data_queue *queue)
114 struct queue_entry_priv_mmio *entry_priv;
115 void *addr;
116 dma_addr_t dma;
117 unsigned int i;
120 * Allocate DMA memory for descriptor and buffer.
122 addr = dma_alloc_coherent(rt2x00dev->dev,
123 queue->limit * queue->desc_size, &dma,
124 GFP_KERNEL);
125 if (!addr)
126 return -ENOMEM;
129 * Initialize all queue entries to contain valid addresses.
131 for (i = 0; i < queue->limit; i++) {
132 entry_priv = queue->entries[i].priv_data;
133 entry_priv->desc = addr + i * queue->desc_size;
134 entry_priv->desc_dma = dma + i * queue->desc_size;
137 return 0;
140 static void rt2x00mmio_free_queue_dma(struct rt2x00_dev *rt2x00dev,
141 struct data_queue *queue)
143 struct queue_entry_priv_mmio *entry_priv =
144 queue->entries[0].priv_data;
146 if (entry_priv->desc)
147 dma_free_coherent(rt2x00dev->dev,
148 queue->limit * queue->desc_size,
149 entry_priv->desc, entry_priv->desc_dma);
150 entry_priv->desc = NULL;
153 int rt2x00mmio_initialize(struct rt2x00_dev *rt2x00dev)
155 struct data_queue *queue;
156 int status;
159 * Allocate DMA
161 queue_for_each(rt2x00dev, queue) {
162 status = rt2x00mmio_alloc_queue_dma(rt2x00dev, queue);
163 if (status)
164 goto exit;
168 * Register interrupt handler.
170 status = request_irq(rt2x00dev->irq,
171 rt2x00dev->ops->lib->irq_handler,
172 IRQF_SHARED, rt2x00dev->name, rt2x00dev);
173 if (status) {
174 rt2x00_err(rt2x00dev, "IRQ %d allocation failed (error %d)\n",
175 rt2x00dev->irq, status);
176 goto exit;
179 return 0;
181 exit:
182 queue_for_each(rt2x00dev, queue)
183 rt2x00mmio_free_queue_dma(rt2x00dev, queue);
185 return status;
187 EXPORT_SYMBOL_GPL(rt2x00mmio_initialize);
189 void rt2x00mmio_uninitialize(struct rt2x00_dev *rt2x00dev)
191 struct data_queue *queue;
194 * Free irq line.
196 free_irq(rt2x00dev->irq, rt2x00dev);
199 * Free DMA
201 queue_for_each(rt2x00dev, queue)
202 rt2x00mmio_free_queue_dma(rt2x00dev, queue);
204 EXPORT_SYMBOL_GPL(rt2x00mmio_uninitialize);
207 * rt2x00mmio module information.
209 MODULE_AUTHOR(DRV_PROJECT);
210 MODULE_VERSION(DRV_VERSION);
211 MODULE_DESCRIPTION("rt2x00 mmio library");
212 MODULE_LICENSE("GPL");