2 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
19 static int mt7601u_submit_rx_buf(struct mt7601u_dev
*dev
,
20 struct mt7601u_dma_buf_rx
*e
, gfp_t gfp
);
22 static unsigned int ieee80211_get_hdrlen_from_buf(const u8
*data
, unsigned len
)
24 const struct ieee80211_hdr
*hdr
= (const struct ieee80211_hdr
*)data
;
27 if (unlikely(len
< 10))
29 hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
30 if (unlikely(hdrlen
> len
))
35 static struct sk_buff
*
36 mt7601u_rx_skb_from_seg(struct mt7601u_dev
*dev
, struct mt7601u_rxwi
*rxwi
,
37 void *data
, u32 seg_len
, u32 truesize
, struct page
*p
)
40 u32 true_len
, hdr_len
= 0, copy
, frag
;
42 skb
= alloc_skb(p
? 128 : seg_len
, GFP_ATOMIC
);
46 true_len
= mt76_mac_process_rx(dev
, skb
, data
, rxwi
);
47 if (!true_len
|| true_len
> seg_len
)
50 hdr_len
= ieee80211_get_hdrlen_from_buf(data
, true_len
);
54 if (rxwi
->rxinfo
& cpu_to_le32(MT_RXINFO_L2PAD
)) {
55 skb_put_data(skb
, data
, hdr_len
);
62 /* If not doing paged RX allocated skb will always have enough space */
63 copy
= (true_len
<= skb_tailroom(skb
)) ? true_len
: hdr_len
+ 8;
64 frag
= true_len
- copy
;
66 skb_put_data(skb
, data
, copy
);
70 skb_add_rx_frag(skb
, 0, p
, data
- page_address(p
),
78 dev_err_ratelimited(dev
->dev
, "Error: incorrect frame len:%u hdr:%u\n",
84 static void mt7601u_rx_process_seg(struct mt7601u_dev
*dev
, u8
*data
,
85 u32 seg_len
, struct page
*p
)
88 struct mt7601u_rxwi
*rxwi
;
89 u32 fce_info
, truesize
= seg_len
;
91 /* DMA_INFO field at the beginning of the segment contains only some of
92 * the information, we need to read the FCE descriptor from the end.
94 fce_info
= get_unaligned_le32(data
+ seg_len
- MT_FCE_INFO_LEN
);
95 seg_len
-= MT_FCE_INFO_LEN
;
97 data
+= MT_DMA_HDR_LEN
;
98 seg_len
-= MT_DMA_HDR_LEN
;
100 rxwi
= (struct mt7601u_rxwi
*) data
;
101 data
+= sizeof(struct mt7601u_rxwi
);
102 seg_len
-= sizeof(struct mt7601u_rxwi
);
104 if (unlikely(rxwi
->zero
[0] || rxwi
->zero
[1] || rxwi
->zero
[2]))
105 dev_err_once(dev
->dev
, "Error: RXWI zero fields are set\n");
106 if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE
, fce_info
)))
107 dev_err_once(dev
->dev
, "Error: RX path seen a non-pkt urb\n");
109 trace_mt_rx(dev
, rxwi
, fce_info
);
111 skb
= mt7601u_rx_skb_from_seg(dev
, rxwi
, data
, seg_len
, truesize
, p
);
115 spin_lock(&dev
->mac_lock
);
116 ieee80211_rx(dev
->hw
, skb
);
117 spin_unlock(&dev
->mac_lock
);
120 static u16
mt7601u_rx_next_seg_len(u8
*data
, u32 data_len
)
122 u32 min_seg_len
= MT_DMA_HDR_LEN
+ MT_RX_INFO_LEN
+
123 sizeof(struct mt7601u_rxwi
) + MT_FCE_INFO_LEN
;
124 u16 dma_len
= get_unaligned_le16(data
);
126 if (data_len
< min_seg_len
||
128 WARN_ON(dma_len
+ MT_DMA_HDRS
> data_len
) ||
129 WARN_ON(dma_len
& 0x3))
132 return MT_DMA_HDRS
+ dma_len
;
136 mt7601u_rx_process_entry(struct mt7601u_dev
*dev
, struct mt7601u_dma_buf_rx
*e
)
138 u32 seg_len
, data_len
= e
->urb
->actual_length
;
139 u8
*data
= page_address(e
->p
);
140 struct page
*new_p
= NULL
;
143 if (!test_bit(MT7601U_STATE_INITIALIZED
, &dev
->state
))
146 /* Copy if there is very little data in the buffer. */
148 new_p
= dev_alloc_pages(MT_RX_ORDER
);
150 while ((seg_len
= mt7601u_rx_next_seg_len(data
, data_len
))) {
151 mt7601u_rx_process_seg(dev
, data
, seg_len
, new_p
? e
->p
: NULL
);
159 trace_mt_rx_dma_aggr(dev
, cnt
, !!new_p
);
162 /* we have one extra ref from the allocator */
163 __free_pages(e
->p
, MT_RX_ORDER
);
169 static struct mt7601u_dma_buf_rx
*
170 mt7601u_rx_get_pending_entry(struct mt7601u_dev
*dev
)
172 struct mt7601u_rx_queue
*q
= &dev
->rx_q
;
173 struct mt7601u_dma_buf_rx
*buf
= NULL
;
176 spin_lock_irqsave(&dev
->rx_lock
, flags
);
181 buf
= &q
->e
[q
->start
];
183 q
->start
= (q
->start
+ 1) % q
->entries
;
185 spin_unlock_irqrestore(&dev
->rx_lock
, flags
);
190 static void mt7601u_complete_rx(struct urb
*urb
)
192 struct mt7601u_dev
*dev
= urb
->context
;
193 struct mt7601u_rx_queue
*q
= &dev
->rx_q
;
196 /* do no schedule rx tasklet if urb has been unlinked
197 * or the device has been removed
199 switch (urb
->status
) {
205 dev_err_ratelimited(dev
->dev
, "rx urb failed: %d\n",
212 spin_lock_irqsave(&dev
->rx_lock
, flags
);
213 if (WARN_ONCE(q
->e
[q
->end
].urb
!= urb
, "RX urb mismatch"))
216 q
->end
= (q
->end
+ 1) % q
->entries
;
218 tasklet_schedule(&dev
->rx_tasklet
);
220 spin_unlock_irqrestore(&dev
->rx_lock
, flags
);
223 static void mt7601u_rx_tasklet(unsigned long data
)
225 struct mt7601u_dev
*dev
= (struct mt7601u_dev
*) data
;
226 struct mt7601u_dma_buf_rx
*e
;
228 while ((e
= mt7601u_rx_get_pending_entry(dev
))) {
232 mt7601u_rx_process_entry(dev
, e
);
233 mt7601u_submit_rx_buf(dev
, e
, GFP_ATOMIC
);
237 static void mt7601u_complete_tx(struct urb
*urb
)
239 struct mt7601u_tx_queue
*q
= urb
->context
;
240 struct mt7601u_dev
*dev
= q
->dev
;
244 switch (urb
->status
) {
250 dev_err_ratelimited(dev
->dev
, "tx urb failed: %d\n",
257 spin_lock_irqsave(&dev
->tx_lock
, flags
);
258 if (WARN_ONCE(q
->e
[q
->start
].urb
!= urb
, "TX urb mismatch"))
261 skb
= q
->e
[q
->start
].skb
;
262 q
->e
[q
->start
].skb
= NULL
;
263 trace_mt_tx_dma_done(dev
, skb
);
265 __skb_queue_tail(&dev
->tx_skb_done
, skb
);
266 tasklet_schedule(&dev
->tx_tasklet
);
268 if (q
->used
== q
->entries
- q
->entries
/ 8)
269 ieee80211_wake_queue(dev
->hw
, skb_get_queue_mapping(skb
));
271 q
->start
= (q
->start
+ 1) % q
->entries
;
274 spin_unlock_irqrestore(&dev
->tx_lock
, flags
);
277 static void mt7601u_tx_tasklet(unsigned long data
)
279 struct mt7601u_dev
*dev
= (struct mt7601u_dev
*) data
;
280 struct sk_buff_head skbs
;
283 __skb_queue_head_init(&skbs
);
285 spin_lock_irqsave(&dev
->tx_lock
, flags
);
287 set_bit(MT7601U_STATE_MORE_STATS
, &dev
->state
);
288 if (!test_and_set_bit(MT7601U_STATE_READING_STATS
, &dev
->state
))
289 queue_delayed_work(dev
->stat_wq
, &dev
->stat_work
,
290 msecs_to_jiffies(10));
292 skb_queue_splice_init(&dev
->tx_skb_done
, &skbs
);
294 spin_unlock_irqrestore(&dev
->tx_lock
, flags
);
296 while (!skb_queue_empty(&skbs
)) {
297 struct sk_buff
*skb
= __skb_dequeue(&skbs
);
299 mt7601u_tx_status(dev
, skb
);
303 static int mt7601u_dma_submit_tx(struct mt7601u_dev
*dev
,
304 struct sk_buff
*skb
, u8 ep
)
306 struct usb_device
*usb_dev
= mt7601u_to_usb_dev(dev
);
307 unsigned snd_pipe
= usb_sndbulkpipe(usb_dev
, dev
->out_eps
[ep
]);
308 struct mt7601u_dma_buf_tx
*e
;
309 struct mt7601u_tx_queue
*q
= &dev
->tx_q
[ep
];
313 spin_lock_irqsave(&dev
->tx_lock
, flags
);
315 if (WARN_ON(q
->entries
<= q
->used
)) {
322 usb_fill_bulk_urb(e
->urb
, usb_dev
, snd_pipe
, skb
->data
, skb
->len
,
323 mt7601u_complete_tx
, q
);
324 ret
= usb_submit_urb(e
->urb
, GFP_ATOMIC
);
326 /* Special-handle ENODEV from TX urb submission because it will
327 * often be the first ENODEV we see after device is removed.
330 set_bit(MT7601U_STATE_REMOVED
, &dev
->state
);
332 dev_err(dev
->dev
, "Error: TX urb submit failed:%d\n",
337 q
->end
= (q
->end
+ 1) % q
->entries
;
340 if (q
->used
>= q
->entries
)
341 ieee80211_stop_queue(dev
->hw
, skb_get_queue_mapping(skb
));
343 spin_unlock_irqrestore(&dev
->tx_lock
, flags
);
348 /* Map hardware Q to USB endpoint number */
349 static u8
q2ep(u8 qid
)
351 /* TODO: take management packets to queue 5 */
355 /* Map USB endpoint number to Q id in the DMA engine */
356 static enum mt76_qsel
ep2dmaq(u8 ep
)
363 int mt7601u_dma_enqueue_tx(struct mt7601u_dev
*dev
, struct sk_buff
*skb
,
364 struct mt76_wcid
*wcid
, int hw_q
)
370 dma_flags
= MT_TXD_PKT_INFO_80211
;
371 if (wcid
->hw_key_idx
== 0xff)
372 dma_flags
|= MT_TXD_PKT_INFO_WIV
;
374 ret
= mt7601u_dma_skb_wrap_pkt(skb
, ep2dmaq(ep
), dma_flags
);
378 ret
= mt7601u_dma_submit_tx(dev
, skb
, ep
);
380 ieee80211_free_txskb(dev
->hw
, skb
);
387 static void mt7601u_kill_rx(struct mt7601u_dev
*dev
)
391 for (i
= 0; i
< dev
->rx_q
.entries
; i
++)
392 usb_poison_urb(dev
->rx_q
.e
[i
].urb
);
395 static int mt7601u_submit_rx_buf(struct mt7601u_dev
*dev
,
396 struct mt7601u_dma_buf_rx
*e
, gfp_t gfp
)
398 struct usb_device
*usb_dev
= mt7601u_to_usb_dev(dev
);
399 u8
*buf
= page_address(e
->p
);
403 pipe
= usb_rcvbulkpipe(usb_dev
, dev
->in_eps
[MT_EP_IN_PKT_RX
]);
405 usb_fill_bulk_urb(e
->urb
, usb_dev
, pipe
, buf
, MT_RX_URB_SIZE
,
406 mt7601u_complete_rx
, dev
);
408 trace_mt_submit_urb(dev
, e
->urb
);
409 ret
= usb_submit_urb(e
->urb
, gfp
);
411 dev_err(dev
->dev
, "Error: submit RX URB failed:%d\n", ret
);
416 static int mt7601u_submit_rx(struct mt7601u_dev
*dev
)
420 for (i
= 0; i
< dev
->rx_q
.entries
; i
++) {
421 ret
= mt7601u_submit_rx_buf(dev
, &dev
->rx_q
.e
[i
], GFP_KERNEL
);
429 static void mt7601u_free_rx(struct mt7601u_dev
*dev
)
433 for (i
= 0; i
< dev
->rx_q
.entries
; i
++) {
434 __free_pages(dev
->rx_q
.e
[i
].p
, MT_RX_ORDER
);
435 usb_free_urb(dev
->rx_q
.e
[i
].urb
);
439 static int mt7601u_alloc_rx(struct mt7601u_dev
*dev
)
443 memset(&dev
->rx_q
, 0, sizeof(dev
->rx_q
));
445 dev
->rx_q
.entries
= N_RX_ENTRIES
;
447 for (i
= 0; i
< N_RX_ENTRIES
; i
++) {
448 dev
->rx_q
.e
[i
].urb
= usb_alloc_urb(0, GFP_KERNEL
);
449 dev
->rx_q
.e
[i
].p
= dev_alloc_pages(MT_RX_ORDER
);
451 if (!dev
->rx_q
.e
[i
].urb
|| !dev
->rx_q
.e
[i
].p
)
458 static void mt7601u_free_tx_queue(struct mt7601u_tx_queue
*q
)
462 for (i
= 0; i
< q
->entries
; i
++) {
463 usb_poison_urb(q
->e
[i
].urb
);
465 mt7601u_tx_status(q
->dev
, q
->e
[i
].skb
);
466 usb_free_urb(q
->e
[i
].urb
);
470 static void mt7601u_free_tx(struct mt7601u_dev
*dev
)
477 for (i
= 0; i
< __MT_EP_OUT_MAX
; i
++)
478 mt7601u_free_tx_queue(&dev
->tx_q
[i
]);
481 static int mt7601u_alloc_tx_queue(struct mt7601u_dev
*dev
,
482 struct mt7601u_tx_queue
*q
)
487 q
->entries
= N_TX_ENTRIES
;
489 for (i
= 0; i
< N_TX_ENTRIES
; i
++) {
490 q
->e
[i
].urb
= usb_alloc_urb(0, GFP_KERNEL
);
498 static int mt7601u_alloc_tx(struct mt7601u_dev
*dev
)
502 dev
->tx_q
= devm_kcalloc(dev
->dev
, __MT_EP_OUT_MAX
,
503 sizeof(*dev
->tx_q
), GFP_KERNEL
);
507 for (i
= 0; i
< __MT_EP_OUT_MAX
; i
++)
508 if (mt7601u_alloc_tx_queue(dev
, &dev
->tx_q
[i
]))
514 int mt7601u_dma_init(struct mt7601u_dev
*dev
)
518 tasklet_init(&dev
->tx_tasklet
, mt7601u_tx_tasklet
, (unsigned long) dev
);
519 tasklet_init(&dev
->rx_tasklet
, mt7601u_rx_tasklet
, (unsigned long) dev
);
521 ret
= mt7601u_alloc_tx(dev
);
524 ret
= mt7601u_alloc_rx(dev
);
528 ret
= mt7601u_submit_rx(dev
);
534 mt7601u_dma_cleanup(dev
);
538 void mt7601u_dma_cleanup(struct mt7601u_dev
*dev
)
540 mt7601u_kill_rx(dev
);
542 tasklet_kill(&dev
->rx_tasklet
);
544 mt7601u_free_rx(dev
);
545 mt7601u_free_tx(dev
);
547 tasklet_kill(&dev
->tx_tasklet
);