WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / wireless / mediatek / mt7601u / dma.c
blob5f99054f535b4d6ae3da175462136da2f4eb024b
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 */
6 #include "mt7601u.h"
7 #include "dma.h"
8 #include "usb.h"
9 #include "trace.h"
11 static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
12 struct mt7601u_dma_buf_rx *e, gfp_t gfp);
14 static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
16 const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
17 unsigned int hdrlen;
19 if (unlikely(len < 10))
20 return 0;
21 hdrlen = ieee80211_hdrlen(hdr->frame_control);
22 if (unlikely(hdrlen > len))
23 return 0;
24 return hdrlen;
27 static struct sk_buff *
28 mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
29 void *data, u32 seg_len, u32 truesize, struct page *p)
31 struct sk_buff *skb;
32 u32 true_len, hdr_len = 0, copy, frag;
34 skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
35 if (!skb)
36 return NULL;
38 true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
39 if (!true_len || true_len > seg_len)
40 goto bad_frame;
42 hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
43 if (!hdr_len)
44 goto bad_frame;
46 if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
47 skb_put_data(skb, data, hdr_len);
49 data += hdr_len + 2;
50 true_len -= hdr_len;
51 hdr_len = 0;
54 /* If not doing paged RX allocated skb will always have enough space */
55 copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
56 frag = true_len - copy;
58 skb_put_data(skb, data, copy);
59 data += copy;
61 if (frag) {
62 skb_add_rx_frag(skb, 0, p, data - page_address(p),
63 frag, truesize);
64 get_page(p);
67 return skb;
69 bad_frame:
70 dev_err_ratelimited(dev->dev, "Error: incorrect frame len:%u hdr:%u\n",
71 true_len, hdr_len);
72 dev_kfree_skb(skb);
73 return NULL;
76 static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
77 u32 seg_len, struct page *p)
79 struct sk_buff *skb;
80 struct mt7601u_rxwi *rxwi;
81 u32 fce_info, truesize = seg_len;
83 /* DMA_INFO field at the beginning of the segment contains only some of
84 * the information, we need to read the FCE descriptor from the end.
86 fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
87 seg_len -= MT_FCE_INFO_LEN;
89 data += MT_DMA_HDR_LEN;
90 seg_len -= MT_DMA_HDR_LEN;
92 rxwi = (struct mt7601u_rxwi *) data;
93 data += sizeof(struct mt7601u_rxwi);
94 seg_len -= sizeof(struct mt7601u_rxwi);
96 if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
97 dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
98 if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
99 dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
101 trace_mt_rx(dev, rxwi, fce_info);
103 skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
104 if (!skb)
105 return;
107 spin_lock(&dev->mac_lock);
108 ieee80211_rx(dev->hw, skb);
109 spin_unlock(&dev->mac_lock);
112 static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
114 u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
115 sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
116 u16 dma_len = get_unaligned_le16(data);
118 if (data_len < min_seg_len ||
119 WARN_ON_ONCE(!dma_len) ||
120 WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) ||
121 WARN_ON_ONCE(dma_len & 0x3))
122 return 0;
124 return MT_DMA_HDRS + dma_len;
127 static void
128 mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
130 u32 seg_len, data_len = e->urb->actual_length;
131 u8 *data = page_address(e->p);
132 struct page *new_p = NULL;
133 int cnt = 0;
135 if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
136 return;
138 /* Copy if there is very little data in the buffer. */
139 if (data_len > 512)
140 new_p = dev_alloc_pages(MT_RX_ORDER);
142 while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
143 mt7601u_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
145 data_len -= seg_len;
146 data += seg_len;
147 cnt++;
150 if (cnt > 1)
151 trace_mt_rx_dma_aggr(dev, cnt, !!new_p);
153 if (new_p) {
154 /* we have one extra ref from the allocator */
155 __free_pages(e->p, MT_RX_ORDER);
157 e->p = new_p;
161 static struct mt7601u_dma_buf_rx *
162 mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
164 struct mt7601u_rx_queue *q = &dev->rx_q;
165 struct mt7601u_dma_buf_rx *buf = NULL;
166 unsigned long flags;
168 spin_lock_irqsave(&dev->rx_lock, flags);
170 if (!q->pending)
171 goto out;
173 buf = &q->e[q->start];
174 q->pending--;
175 q->start = (q->start + 1) % q->entries;
176 out:
177 spin_unlock_irqrestore(&dev->rx_lock, flags);
179 return buf;
182 static void mt7601u_complete_rx(struct urb *urb)
184 struct mt7601u_dev *dev = urb->context;
185 struct mt7601u_rx_queue *q = &dev->rx_q;
186 unsigned long flags;
188 /* do no schedule rx tasklet if urb has been unlinked
189 * or the device has been removed
191 switch (urb->status) {
192 case -ECONNRESET:
193 case -ESHUTDOWN:
194 case -ENOENT:
195 return;
196 default:
197 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
198 urb->status);
199 fallthrough;
200 case 0:
201 break;
204 spin_lock_irqsave(&dev->rx_lock, flags);
205 if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
206 goto out;
208 q->end = (q->end + 1) % q->entries;
209 q->pending++;
210 tasklet_schedule(&dev->rx_tasklet);
211 out:
212 spin_unlock_irqrestore(&dev->rx_lock, flags);
215 static void mt7601u_rx_tasklet(struct tasklet_struct *t)
217 struct mt7601u_dev *dev = from_tasklet(dev, t, rx_tasklet);
218 struct mt7601u_dma_buf_rx *e;
220 while ((e = mt7601u_rx_get_pending_entry(dev))) {
221 if (e->urb->status)
222 continue;
224 mt7601u_rx_process_entry(dev, e);
225 mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
229 static void mt7601u_complete_tx(struct urb *urb)
231 struct mt7601u_tx_queue *q = urb->context;
232 struct mt7601u_dev *dev = q->dev;
233 struct sk_buff *skb;
234 unsigned long flags;
236 switch (urb->status) {
237 case -ECONNRESET:
238 case -ESHUTDOWN:
239 case -ENOENT:
240 return;
241 default:
242 dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
243 urb->status);
244 fallthrough;
245 case 0:
246 break;
249 spin_lock_irqsave(&dev->tx_lock, flags);
250 if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
251 goto out;
253 skb = q->e[q->start].skb;
254 q->e[q->start].skb = NULL;
255 trace_mt_tx_dma_done(dev, skb);
257 __skb_queue_tail(&dev->tx_skb_done, skb);
258 tasklet_schedule(&dev->tx_tasklet);
260 if (q->used == q->entries - q->entries / 8)
261 ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
263 q->start = (q->start + 1) % q->entries;
264 q->used--;
265 out:
266 spin_unlock_irqrestore(&dev->tx_lock, flags);
269 static void mt7601u_tx_tasklet(struct tasklet_struct *t)
271 struct mt7601u_dev *dev = from_tasklet(dev, t, tx_tasklet);
272 struct sk_buff_head skbs;
273 unsigned long flags;
275 __skb_queue_head_init(&skbs);
277 spin_lock_irqsave(&dev->tx_lock, flags);
279 set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
280 if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
281 queue_delayed_work(dev->stat_wq, &dev->stat_work,
282 msecs_to_jiffies(10));
284 skb_queue_splice_init(&dev->tx_skb_done, &skbs);
286 spin_unlock_irqrestore(&dev->tx_lock, flags);
288 while (!skb_queue_empty(&skbs)) {
289 struct sk_buff *skb = __skb_dequeue(&skbs);
291 mt7601u_tx_status(dev, skb);
295 static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
296 struct sk_buff *skb, u8 ep)
298 struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
299 unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
300 struct mt7601u_dma_buf_tx *e;
301 struct mt7601u_tx_queue *q = &dev->tx_q[ep];
302 unsigned long flags;
303 int ret;
305 spin_lock_irqsave(&dev->tx_lock, flags);
307 if (WARN_ON(q->entries <= q->used)) {
308 ret = -ENOSPC;
309 goto out;
312 e = &q->e[q->end];
313 e->skb = skb;
314 usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
315 mt7601u_complete_tx, q);
316 ret = usb_submit_urb(e->urb, GFP_ATOMIC);
317 if (ret) {
318 /* Special-handle ENODEV from TX urb submission because it will
319 * often be the first ENODEV we see after device is removed.
321 if (ret == -ENODEV)
322 set_bit(MT7601U_STATE_REMOVED, &dev->state);
323 else
324 dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
325 ret);
326 goto out;
329 q->end = (q->end + 1) % q->entries;
330 q->used++;
332 if (q->used >= q->entries)
333 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
334 out:
335 spin_unlock_irqrestore(&dev->tx_lock, flags);
337 return ret;
340 /* Map hardware Q to USB endpoint number */
341 static u8 q2ep(u8 qid)
343 /* TODO: take management packets to queue 5 */
344 return qid + 1;
347 /* Map USB endpoint number to Q id in the DMA engine */
348 static enum mt76_qsel ep2dmaq(u8 ep)
350 if (ep == 5)
351 return MT_QSEL_MGMT;
352 return MT_QSEL_EDCA;
355 int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
356 struct mt76_wcid *wcid, int hw_q)
358 u8 ep = q2ep(hw_q);
359 u32 dma_flags;
360 int ret;
362 dma_flags = MT_TXD_PKT_INFO_80211;
363 if (wcid->hw_key_idx == 0xff)
364 dma_flags |= MT_TXD_PKT_INFO_WIV;
366 ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
367 if (ret)
368 return ret;
370 ret = mt7601u_dma_submit_tx(dev, skb, ep);
371 if (ret) {
372 ieee80211_free_txskb(dev->hw, skb);
373 return ret;
376 return 0;
379 static void mt7601u_kill_rx(struct mt7601u_dev *dev)
381 int i;
383 for (i = 0; i < dev->rx_q.entries; i++)
384 usb_poison_urb(dev->rx_q.e[i].urb);
387 static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
388 struct mt7601u_dma_buf_rx *e, gfp_t gfp)
390 struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
391 u8 *buf = page_address(e->p);
392 unsigned pipe;
393 int ret;
395 pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
397 usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
398 mt7601u_complete_rx, dev);
400 trace_mt_submit_urb(dev, e->urb);
401 ret = usb_submit_urb(e->urb, gfp);
402 if (ret)
403 dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
405 return ret;
408 static int mt7601u_submit_rx(struct mt7601u_dev *dev)
410 int i, ret;
412 for (i = 0; i < dev->rx_q.entries; i++) {
413 ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
414 if (ret)
415 return ret;
418 return 0;
421 static void mt7601u_free_rx(struct mt7601u_dev *dev)
423 int i;
425 for (i = 0; i < dev->rx_q.entries; i++) {
426 __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
427 usb_free_urb(dev->rx_q.e[i].urb);
431 static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
433 int i;
435 memset(&dev->rx_q, 0, sizeof(dev->rx_q));
436 dev->rx_q.dev = dev;
437 dev->rx_q.entries = N_RX_ENTRIES;
439 for (i = 0; i < N_RX_ENTRIES; i++) {
440 dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
441 dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
443 if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
444 return -ENOMEM;
447 return 0;
450 static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
452 int i;
454 for (i = 0; i < q->entries; i++) {
455 usb_poison_urb(q->e[i].urb);
456 if (q->e[i].skb)
457 mt7601u_tx_status(q->dev, q->e[i].skb);
458 usb_free_urb(q->e[i].urb);
462 static void mt7601u_free_tx(struct mt7601u_dev *dev)
464 int i;
466 if (!dev->tx_q)
467 return;
469 for (i = 0; i < __MT_EP_OUT_MAX; i++)
470 mt7601u_free_tx_queue(&dev->tx_q[i]);
473 static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
474 struct mt7601u_tx_queue *q)
476 int i;
478 q->dev = dev;
479 q->entries = N_TX_ENTRIES;
481 for (i = 0; i < N_TX_ENTRIES; i++) {
482 q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
483 if (!q->e[i].urb)
484 return -ENOMEM;
487 return 0;
490 static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
492 int i;
494 dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
495 sizeof(*dev->tx_q), GFP_KERNEL);
496 if (!dev->tx_q)
497 return -ENOMEM;
499 for (i = 0; i < __MT_EP_OUT_MAX; i++)
500 if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
501 return -ENOMEM;
503 return 0;
506 int mt7601u_dma_init(struct mt7601u_dev *dev)
508 int ret = -ENOMEM;
510 tasklet_setup(&dev->tx_tasklet, mt7601u_tx_tasklet);
511 tasklet_setup(&dev->rx_tasklet, mt7601u_rx_tasklet);
513 ret = mt7601u_alloc_tx(dev);
514 if (ret)
515 goto err;
516 ret = mt7601u_alloc_rx(dev);
517 if (ret)
518 goto err;
520 ret = mt7601u_submit_rx(dev);
521 if (ret)
522 goto err;
524 return 0;
525 err:
526 mt7601u_dma_cleanup(dev);
527 return ret;
530 void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
532 mt7601u_kill_rx(dev);
534 tasklet_kill(&dev->rx_tasklet);
536 mt7601u_free_rx(dev);
537 mt7601u_free_tx(dev);
539 tasklet_kill(&dev->tx_tasklet);