2 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
19 static int mt7601u_submit_rx_buf(struct mt7601u_dev
*dev
,
20 struct mt7601u_dma_buf_rx
*e
, gfp_t gfp
);
22 static unsigned int ieee80211_get_hdrlen_from_buf(const u8
*data
, unsigned len
)
24 const struct ieee80211_hdr
*hdr
= (const struct ieee80211_hdr
*)data
;
27 if (unlikely(len
< 10))
29 hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
30 if (unlikely(hdrlen
> len
))
35 static struct sk_buff
*
36 mt7601u_rx_skb_from_seg(struct mt7601u_dev
*dev
, struct mt7601u_rxwi
*rxwi
,
37 void *data
, u32 seg_len
, u32 truesize
, struct page
*p
)
40 u32 true_len
, hdr_len
= 0, copy
, frag
;
42 skb
= alloc_skb(p
? 128 : seg_len
, GFP_ATOMIC
);
46 true_len
= mt76_mac_process_rx(dev
, skb
, data
, rxwi
);
47 if (!true_len
|| true_len
> seg_len
)
50 hdr_len
= ieee80211_get_hdrlen_from_buf(data
, true_len
);
54 if (rxwi
->rxinfo
& cpu_to_le32(MT_RXINFO_L2PAD
)) {
55 skb_put_data(skb
, data
, hdr_len
);
62 /* If not doing paged RX allocated skb will always have enough space */
63 copy
= (true_len
<= skb_tailroom(skb
)) ? true_len
: hdr_len
+ 8;
64 frag
= true_len
- copy
;
66 skb_put_data(skb
, data
, copy
);
70 skb_add_rx_frag(skb
, 0, p
, data
- page_address(p
),
78 dev_err_ratelimited(dev
->dev
, "Error: incorrect frame len:%u hdr:%u\n",
84 static void mt7601u_rx_process_seg(struct mt7601u_dev
*dev
, u8
*data
,
85 u32 seg_len
, struct page
*p
)
88 struct mt7601u_rxwi
*rxwi
;
89 u32 fce_info
, truesize
= seg_len
;
91 /* DMA_INFO field at the beginning of the segment contains only some of
92 * the information, we need to read the FCE descriptor from the end.
94 fce_info
= get_unaligned_le32(data
+ seg_len
- MT_FCE_INFO_LEN
);
95 seg_len
-= MT_FCE_INFO_LEN
;
97 data
+= MT_DMA_HDR_LEN
;
98 seg_len
-= MT_DMA_HDR_LEN
;
100 rxwi
= (struct mt7601u_rxwi
*) data
;
101 data
+= sizeof(struct mt7601u_rxwi
);
102 seg_len
-= sizeof(struct mt7601u_rxwi
);
104 if (unlikely(rxwi
->zero
[0] || rxwi
->zero
[1] || rxwi
->zero
[2]))
105 dev_err_once(dev
->dev
, "Error: RXWI zero fields are set\n");
106 if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE
, fce_info
)))
107 dev_err_once(dev
->dev
, "Error: RX path seen a non-pkt urb\n");
109 trace_mt_rx(dev
, rxwi
, fce_info
);
111 skb
= mt7601u_rx_skb_from_seg(dev
, rxwi
, data
, seg_len
, truesize
, p
);
115 spin_lock(&dev
->mac_lock
);
116 ieee80211_rx(dev
->hw
, skb
);
117 spin_unlock(&dev
->mac_lock
);
120 static u16
mt7601u_rx_next_seg_len(u8
*data
, u32 data_len
)
122 u32 min_seg_len
= MT_DMA_HDR_LEN
+ MT_RX_INFO_LEN
+
123 sizeof(struct mt7601u_rxwi
) + MT_FCE_INFO_LEN
;
124 u16 dma_len
= get_unaligned_le16(data
);
126 if (data_len
< min_seg_len
||
128 WARN_ON(dma_len
+ MT_DMA_HDRS
> data_len
) ||
129 WARN_ON(dma_len
& 0x3))
132 return MT_DMA_HDRS
+ dma_len
;
136 mt7601u_rx_process_entry(struct mt7601u_dev
*dev
, struct mt7601u_dma_buf_rx
*e
)
138 u32 seg_len
, data_len
= e
->urb
->actual_length
;
139 u8
*data
= page_address(e
->p
);
140 struct page
*new_p
= NULL
;
143 if (!test_bit(MT7601U_STATE_INITIALIZED
, &dev
->state
))
146 /* Copy if there is very little data in the buffer. */
148 new_p
= dev_alloc_pages(MT_RX_ORDER
);
150 while ((seg_len
= mt7601u_rx_next_seg_len(data
, data_len
))) {
151 mt7601u_rx_process_seg(dev
, data
, seg_len
, new_p
? e
->p
: NULL
);
159 trace_mt_rx_dma_aggr(dev
, cnt
, !!new_p
);
162 /* we have one extra ref from the allocator */
163 __free_pages(e
->p
, MT_RX_ORDER
);
169 static struct mt7601u_dma_buf_rx
*
170 mt7601u_rx_get_pending_entry(struct mt7601u_dev
*dev
)
172 struct mt7601u_rx_queue
*q
= &dev
->rx_q
;
173 struct mt7601u_dma_buf_rx
*buf
= NULL
;
176 spin_lock_irqsave(&dev
->rx_lock
, flags
);
181 buf
= &q
->e
[q
->start
];
183 q
->start
= (q
->start
+ 1) % q
->entries
;
185 spin_unlock_irqrestore(&dev
->rx_lock
, flags
);
190 static void mt7601u_complete_rx(struct urb
*urb
)
192 struct mt7601u_dev
*dev
= urb
->context
;
193 struct mt7601u_rx_queue
*q
= &dev
->rx_q
;
196 spin_lock_irqsave(&dev
->rx_lock
, flags
);
198 if (mt7601u_urb_has_error(urb
))
199 dev_err(dev
->dev
, "Error: RX urb failed:%d\n", urb
->status
);
200 if (WARN_ONCE(q
->e
[q
->end
].urb
!= urb
, "RX urb mismatch"))
203 q
->end
= (q
->end
+ 1) % q
->entries
;
205 tasklet_schedule(&dev
->rx_tasklet
);
207 spin_unlock_irqrestore(&dev
->rx_lock
, flags
);
210 static void mt7601u_rx_tasklet(unsigned long data
)
212 struct mt7601u_dev
*dev
= (struct mt7601u_dev
*) data
;
213 struct mt7601u_dma_buf_rx
*e
;
215 while ((e
= mt7601u_rx_get_pending_entry(dev
))) {
219 mt7601u_rx_process_entry(dev
, e
);
220 mt7601u_submit_rx_buf(dev
, e
, GFP_ATOMIC
);
224 static void mt7601u_complete_tx(struct urb
*urb
)
226 struct mt7601u_tx_queue
*q
= urb
->context
;
227 struct mt7601u_dev
*dev
= q
->dev
;
231 spin_lock_irqsave(&dev
->tx_lock
, flags
);
233 if (mt7601u_urb_has_error(urb
))
234 dev_err(dev
->dev
, "Error: TX urb failed:%d\n", urb
->status
);
235 if (WARN_ONCE(q
->e
[q
->start
].urb
!= urb
, "TX urb mismatch"))
238 skb
= q
->e
[q
->start
].skb
;
239 trace_mt_tx_dma_done(dev
, skb
);
241 __skb_queue_tail(&dev
->tx_skb_done
, skb
);
242 tasklet_schedule(&dev
->tx_tasklet
);
244 if (q
->used
== q
->entries
- q
->entries
/ 8)
245 ieee80211_wake_queue(dev
->hw
, skb_get_queue_mapping(skb
));
247 q
->start
= (q
->start
+ 1) % q
->entries
;
250 spin_unlock_irqrestore(&dev
->tx_lock
, flags
);
253 static void mt7601u_tx_tasklet(unsigned long data
)
255 struct mt7601u_dev
*dev
= (struct mt7601u_dev
*) data
;
256 struct sk_buff_head skbs
;
259 __skb_queue_head_init(&skbs
);
261 spin_lock_irqsave(&dev
->tx_lock
, flags
);
263 set_bit(MT7601U_STATE_MORE_STATS
, &dev
->state
);
264 if (!test_and_set_bit(MT7601U_STATE_READING_STATS
, &dev
->state
))
265 queue_delayed_work(dev
->stat_wq
, &dev
->stat_work
,
266 msecs_to_jiffies(10));
268 skb_queue_splice_init(&dev
->tx_skb_done
, &skbs
);
270 spin_unlock_irqrestore(&dev
->tx_lock
, flags
);
272 while (!skb_queue_empty(&skbs
)) {
273 struct sk_buff
*skb
= __skb_dequeue(&skbs
);
275 mt7601u_tx_status(dev
, skb
);
279 static int mt7601u_dma_submit_tx(struct mt7601u_dev
*dev
,
280 struct sk_buff
*skb
, u8 ep
)
282 struct usb_device
*usb_dev
= mt7601u_to_usb_dev(dev
);
283 unsigned snd_pipe
= usb_sndbulkpipe(usb_dev
, dev
->out_eps
[ep
]);
284 struct mt7601u_dma_buf_tx
*e
;
285 struct mt7601u_tx_queue
*q
= &dev
->tx_q
[ep
];
289 spin_lock_irqsave(&dev
->tx_lock
, flags
);
291 if (WARN_ON(q
->entries
<= q
->used
)) {
298 usb_fill_bulk_urb(e
->urb
, usb_dev
, snd_pipe
, skb
->data
, skb
->len
,
299 mt7601u_complete_tx
, q
);
300 ret
= usb_submit_urb(e
->urb
, GFP_ATOMIC
);
302 /* Special-handle ENODEV from TX urb submission because it will
303 * often be the first ENODEV we see after device is removed.
306 set_bit(MT7601U_STATE_REMOVED
, &dev
->state
);
308 dev_err(dev
->dev
, "Error: TX urb submit failed:%d\n",
313 q
->end
= (q
->end
+ 1) % q
->entries
;
316 if (q
->used
>= q
->entries
)
317 ieee80211_stop_queue(dev
->hw
, skb_get_queue_mapping(skb
));
319 spin_unlock_irqrestore(&dev
->tx_lock
, flags
);
324 /* Map hardware Q to USB endpoint number */
325 static u8
q2ep(u8 qid
)
327 /* TODO: take management packets to queue 5 */
331 /* Map USB endpoint number to Q id in the DMA engine */
332 static enum mt76_qsel
ep2dmaq(u8 ep
)
339 int mt7601u_dma_enqueue_tx(struct mt7601u_dev
*dev
, struct sk_buff
*skb
,
340 struct mt76_wcid
*wcid
, int hw_q
)
346 dma_flags
= MT_TXD_PKT_INFO_80211
;
347 if (wcid
->hw_key_idx
== 0xff)
348 dma_flags
|= MT_TXD_PKT_INFO_WIV
;
350 ret
= mt7601u_dma_skb_wrap_pkt(skb
, ep2dmaq(ep
), dma_flags
);
354 ret
= mt7601u_dma_submit_tx(dev
, skb
, ep
);
356 ieee80211_free_txskb(dev
->hw
, skb
);
363 static void mt7601u_kill_rx(struct mt7601u_dev
*dev
)
368 spin_lock_irqsave(&dev
->rx_lock
, flags
);
370 for (i
= 0; i
< dev
->rx_q
.entries
; i
++) {
371 int next
= dev
->rx_q
.end
;
373 spin_unlock_irqrestore(&dev
->rx_lock
, flags
);
374 usb_poison_urb(dev
->rx_q
.e
[next
].urb
);
375 spin_lock_irqsave(&dev
->rx_lock
, flags
);
378 spin_unlock_irqrestore(&dev
->rx_lock
, flags
);
381 static int mt7601u_submit_rx_buf(struct mt7601u_dev
*dev
,
382 struct mt7601u_dma_buf_rx
*e
, gfp_t gfp
)
384 struct usb_device
*usb_dev
= mt7601u_to_usb_dev(dev
);
385 u8
*buf
= page_address(e
->p
);
389 pipe
= usb_rcvbulkpipe(usb_dev
, dev
->in_eps
[MT_EP_IN_PKT_RX
]);
391 usb_fill_bulk_urb(e
->urb
, usb_dev
, pipe
, buf
, MT_RX_URB_SIZE
,
392 mt7601u_complete_rx
, dev
);
394 trace_mt_submit_urb(dev
, e
->urb
);
395 ret
= usb_submit_urb(e
->urb
, gfp
);
397 dev_err(dev
->dev
, "Error: submit RX URB failed:%d\n", ret
);
402 static int mt7601u_submit_rx(struct mt7601u_dev
*dev
)
406 for (i
= 0; i
< dev
->rx_q
.entries
; i
++) {
407 ret
= mt7601u_submit_rx_buf(dev
, &dev
->rx_q
.e
[i
], GFP_KERNEL
);
415 static void mt7601u_free_rx(struct mt7601u_dev
*dev
)
419 for (i
= 0; i
< dev
->rx_q
.entries
; i
++) {
420 __free_pages(dev
->rx_q
.e
[i
].p
, MT_RX_ORDER
);
421 usb_free_urb(dev
->rx_q
.e
[i
].urb
);
425 static int mt7601u_alloc_rx(struct mt7601u_dev
*dev
)
429 memset(&dev
->rx_q
, 0, sizeof(dev
->rx_q
));
431 dev
->rx_q
.entries
= N_RX_ENTRIES
;
433 for (i
= 0; i
< N_RX_ENTRIES
; i
++) {
434 dev
->rx_q
.e
[i
].urb
= usb_alloc_urb(0, GFP_KERNEL
);
435 dev
->rx_q
.e
[i
].p
= dev_alloc_pages(MT_RX_ORDER
);
437 if (!dev
->rx_q
.e
[i
].urb
|| !dev
->rx_q
.e
[i
].p
)
444 static void mt7601u_free_tx_queue(struct mt7601u_tx_queue
*q
)
450 for (i
= 0; i
< q
->entries
; i
++) {
451 usb_poison_urb(q
->e
[i
].urb
);
452 usb_free_urb(q
->e
[i
].urb
);
456 static void mt7601u_free_tx(struct mt7601u_dev
*dev
)
463 for (i
= 0; i
< __MT_EP_OUT_MAX
; i
++)
464 mt7601u_free_tx_queue(&dev
->tx_q
[i
]);
467 static int mt7601u_alloc_tx_queue(struct mt7601u_dev
*dev
,
468 struct mt7601u_tx_queue
*q
)
473 q
->entries
= N_TX_ENTRIES
;
475 for (i
= 0; i
< N_TX_ENTRIES
; i
++) {
476 q
->e
[i
].urb
= usb_alloc_urb(0, GFP_KERNEL
);
484 static int mt7601u_alloc_tx(struct mt7601u_dev
*dev
)
488 dev
->tx_q
= devm_kcalloc(dev
->dev
, __MT_EP_OUT_MAX
,
489 sizeof(*dev
->tx_q
), GFP_KERNEL
);
493 for (i
= 0; i
< __MT_EP_OUT_MAX
; i
++)
494 if (mt7601u_alloc_tx_queue(dev
, &dev
->tx_q
[i
]))
500 int mt7601u_dma_init(struct mt7601u_dev
*dev
)
504 tasklet_init(&dev
->tx_tasklet
, mt7601u_tx_tasklet
, (unsigned long) dev
);
505 tasklet_init(&dev
->rx_tasklet
, mt7601u_rx_tasklet
, (unsigned long) dev
);
507 ret
= mt7601u_alloc_tx(dev
);
510 ret
= mt7601u_alloc_rx(dev
);
514 ret
= mt7601u_submit_rx(dev
);
520 mt7601u_dma_cleanup(dev
);
524 void mt7601u_dma_cleanup(struct mt7601u_dev
*dev
)
526 mt7601u_kill_rx(dev
);
528 tasklet_kill(&dev
->rx_tasklet
);
530 mt7601u_free_rx(dev
);
531 mt7601u_free_tx(dev
);
533 tasklet_kill(&dev
->tx_tasklet
);