i2c: gpio: fault-injector: refactor incomplete transfer
[linux/fpc-iii.git] / drivers / net / wireless / mediatek / mt76 / dma.c
blob3dbedcedc2c4da58c783754792e549602040119f
1 /*
2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/dma-mapping.h>
18 #include "mt76.h"
19 #include "dma.h"
21 #define DMA_DUMMY_TXWI ((void *) ~0)
23 static int
24 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
26 int size;
27 int i;
29 spin_lock_init(&q->lock);
30 INIT_LIST_HEAD(&q->swq);
32 size = q->ndesc * sizeof(struct mt76_desc);
33 q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
34 if (!q->desc)
35 return -ENOMEM;
37 size = q->ndesc * sizeof(*q->entry);
38 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
39 if (!q->entry)
40 return -ENOMEM;
42 /* clear descriptors */
43 for (i = 0; i < q->ndesc; i++)
44 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
46 iowrite32(q->desc_dma, &q->regs->desc_base);
47 iowrite32(0, &q->regs->cpu_idx);
48 iowrite32(0, &q->regs->dma_idx);
49 iowrite32(q->ndesc, &q->regs->ring_size);
51 return 0;
54 static int
55 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
56 struct mt76_queue_buf *buf, int nbufs, u32 info,
57 struct sk_buff *skb, void *txwi)
59 struct mt76_desc *desc;
60 u32 ctrl;
61 int i, idx = -1;
63 if (txwi)
64 q->entry[q->head].txwi = DMA_DUMMY_TXWI;
66 for (i = 0; i < nbufs; i += 2, buf += 2) {
67 u32 buf0 = buf[0].addr, buf1 = 0;
69 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
70 if (i < nbufs - 1) {
71 buf1 = buf[1].addr;
72 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
75 if (i == nbufs - 1)
76 ctrl |= MT_DMA_CTL_LAST_SEC0;
77 else if (i == nbufs - 2)
78 ctrl |= MT_DMA_CTL_LAST_SEC1;
80 idx = q->head;
81 q->head = (q->head + 1) % q->ndesc;
83 desc = &q->desc[idx];
85 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
86 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
87 WRITE_ONCE(desc->info, cpu_to_le32(info));
88 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
90 q->queued++;
93 q->entry[idx].txwi = txwi;
94 q->entry[idx].skb = skb;
96 return idx;
99 static void
100 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
101 struct mt76_queue_entry *prev_e)
103 struct mt76_queue_entry *e = &q->entry[idx];
104 __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
105 u32 ctrl = le32_to_cpu(__ctrl);
107 if (!e->txwi || !e->skb) {
108 __le32 addr = READ_ONCE(q->desc[idx].buf0);
109 u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
111 dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
112 DMA_TO_DEVICE);
115 if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
116 __le32 addr = READ_ONCE(q->desc[idx].buf1);
117 u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
119 dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
120 DMA_TO_DEVICE);
123 if (e->txwi == DMA_DUMMY_TXWI)
124 e->txwi = NULL;
126 *prev_e = *e;
127 memset(e, 0, sizeof(*e));
130 static void
131 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
133 q->head = ioread32(&q->regs->dma_idx);
134 q->tail = q->head;
135 iowrite32(q->head, &q->regs->cpu_idx);
138 static void
139 mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
141 struct mt76_queue *q = &dev->q_tx[qid];
142 struct mt76_queue_entry entry;
143 bool wake = false;
144 int last;
146 if (!q->ndesc)
147 return;
149 spin_lock_bh(&q->lock);
150 if (flush)
151 last = -1;
152 else
153 last = ioread32(&q->regs->dma_idx);
155 while (q->queued && q->tail != last) {
156 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
157 if (entry.schedule)
158 q->swq_queued--;
160 if (entry.skb)
161 dev->drv->tx_complete_skb(dev, q, &entry, flush);
163 if (entry.txwi) {
164 mt76_put_txwi(dev, entry.txwi);
165 wake = true;
168 q->tail = (q->tail + 1) % q->ndesc;
169 q->queued--;
171 if (!flush && q->tail == last)
172 last = ioread32(&q->regs->dma_idx);
175 if (!flush)
176 mt76_txq_schedule(dev, q);
177 else
178 mt76_dma_sync_idx(dev, q);
180 wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
182 if (!q->queued)
183 wake_up(&dev->tx_wait);
185 spin_unlock_bh(&q->lock);
187 if (wake)
188 ieee80211_wake_queue(dev->hw, qid);
191 static void *
192 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
193 int *len, u32 *info, bool *more)
195 struct mt76_queue_entry *e = &q->entry[idx];
196 struct mt76_desc *desc = &q->desc[idx];
197 dma_addr_t buf_addr;
198 void *buf = e->buf;
199 int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
201 buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
202 if (len) {
203 u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
204 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
205 *more = !(ctl & MT_DMA_CTL_LAST_SEC0);
208 if (info)
209 *info = le32_to_cpu(desc->info);
211 dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
212 e->buf = NULL;
214 return buf;
217 static void *
218 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
219 int *len, u32 *info, bool *more)
221 int idx = q->tail;
223 *more = false;
224 if (!q->queued)
225 return NULL;
227 if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
228 return NULL;
230 q->tail = (q->tail + 1) % q->ndesc;
231 q->queued--;
233 return mt76_dma_get_buf(dev, q, idx, len, info, more);
236 static void
237 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
239 iowrite32(q->head, &q->regs->cpu_idx);
242 static int
243 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
245 dma_addr_t addr;
246 void *buf;
247 int frames = 0;
248 int len = SKB_WITH_OVERHEAD(q->buf_size);
249 int offset = q->buf_offset;
250 int idx;
251 void *(*alloc)(unsigned int fragsz);
253 if (napi)
254 alloc = napi_alloc_frag;
255 else
256 alloc = netdev_alloc_frag;
258 spin_lock_bh(&q->lock);
260 while (q->queued < q->ndesc - 1) {
261 struct mt76_queue_buf qbuf;
263 buf = alloc(q->buf_size);
264 if (!buf)
265 break;
267 addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
268 if (dma_mapping_error(dev->dev, addr)) {
269 skb_free_frag(buf);
270 break;
273 qbuf.addr = addr + offset;
274 qbuf.len = len - offset;
275 idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
276 frames++;
279 if (frames)
280 mt76_dma_kick_queue(dev, q);
282 spin_unlock_bh(&q->lock);
284 return frames;
287 static void
288 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
290 void *buf;
291 bool more;
293 spin_lock_bh(&q->lock);
294 do {
295 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
296 if (!buf)
297 break;
299 skb_free_frag(buf);
300 } while (1);
301 spin_unlock_bh(&q->lock);
304 static void
305 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
307 struct mt76_queue *q = &dev->q_rx[qid];
308 int i;
310 for (i = 0; i < q->ndesc; i++)
311 q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
313 mt76_dma_rx_cleanup(dev, q);
314 mt76_dma_sync_idx(dev, q);
315 mt76_dma_rx_fill(dev, q, false);
318 static void
319 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
320 int len, bool more)
322 struct page *page = virt_to_head_page(data);
323 int offset = data - page_address(page);
324 struct sk_buff *skb = q->rx_head;
326 offset += q->buf_offset;
327 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
328 q->buf_size);
330 if (more)
331 return;
333 q->rx_head = NULL;
334 dev->drv->rx_skb(dev, q - dev->q_rx, skb);
337 static int
338 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
340 struct sk_buff *skb;
341 unsigned char *data;
342 int len;
343 int done = 0;
344 bool more;
346 while (done < budget) {
347 u32 info;
349 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
350 if (!data)
351 break;
353 if (q->rx_head) {
354 mt76_add_fragment(dev, q, data, len, more);
355 continue;
358 skb = build_skb(data, q->buf_size);
359 if (!skb) {
360 skb_free_frag(data);
361 continue;
364 skb_reserve(skb, q->buf_offset);
365 if (skb->tail + len > skb->end) {
366 dev_kfree_skb(skb);
367 continue;
370 if (q == &dev->q_rx[MT_RXQ_MCU]) {
371 u32 *rxfce = (u32 *) skb->cb;
372 *rxfce = info;
375 __skb_put(skb, len);
376 done++;
378 if (more) {
379 q->rx_head = skb;
380 continue;
383 dev->drv->rx_skb(dev, q - dev->q_rx, skb);
386 mt76_dma_rx_fill(dev, q, true);
387 return done;
390 static int
391 mt76_dma_rx_poll(struct napi_struct *napi, int budget)
393 struct mt76_dev *dev;
394 int qid, done = 0, cur;
396 dev = container_of(napi->dev, struct mt76_dev, napi_dev);
397 qid = napi - dev->napi;
399 rcu_read_lock();
401 do {
402 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
403 mt76_rx_poll_complete(dev, qid);
404 done += cur;
405 } while (cur && done < budget);
407 rcu_read_unlock();
409 if (done < budget) {
410 napi_complete(napi);
411 dev->drv->rx_poll_complete(dev, qid);
414 return done;
417 static int
418 mt76_dma_init(struct mt76_dev *dev)
420 int i;
422 init_dummy_netdev(&dev->napi_dev);
424 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
425 netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
426 64);
427 mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
428 skb_queue_head_init(&dev->rx_skb[i]);
429 napi_enable(&dev->napi[i]);
432 return 0;
435 static const struct mt76_queue_ops mt76_dma_ops = {
436 .init = mt76_dma_init,
437 .alloc = mt76_dma_alloc_queue,
438 .add_buf = mt76_dma_add_buf,
439 .tx_cleanup = mt76_dma_tx_cleanup,
440 .rx_reset = mt76_dma_rx_reset,
441 .kick = mt76_dma_kick_queue,
444 int mt76_dma_attach(struct mt76_dev *dev)
446 dev->queue_ops = &mt76_dma_ops;
447 return 0;
449 EXPORT_SYMBOL_GPL(mt76_dma_attach);
451 void mt76_dma_cleanup(struct mt76_dev *dev)
453 int i;
455 for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
456 mt76_dma_tx_cleanup(dev, i, true);
458 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
459 netif_napi_del(&dev->napi[i]);
460 mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
463 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);