1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
11 static int mt7601u_submit_rx_buf(struct mt7601u_dev
*dev
,
12 struct mt7601u_dma_buf_rx
*e
, gfp_t gfp
);
14 static unsigned int ieee80211_get_hdrlen_from_buf(const u8
*data
, unsigned len
)
16 const struct ieee80211_hdr
*hdr
= (const struct ieee80211_hdr
*)data
;
19 if (unlikely(len
< 10))
21 hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
22 if (unlikely(hdrlen
> len
))
27 static struct sk_buff
*
28 mt7601u_rx_skb_from_seg(struct mt7601u_dev
*dev
, struct mt7601u_rxwi
*rxwi
,
29 void *data
, u32 seg_len
, u32 truesize
, struct page
*p
)
32 u32 true_len
, hdr_len
= 0, copy
, frag
;
34 skb
= alloc_skb(p
? 128 : seg_len
, GFP_ATOMIC
);
38 true_len
= mt76_mac_process_rx(dev
, skb
, data
, rxwi
);
39 if (!true_len
|| true_len
> seg_len
)
42 hdr_len
= ieee80211_get_hdrlen_from_buf(data
, true_len
);
46 if (rxwi
->rxinfo
& cpu_to_le32(MT_RXINFO_L2PAD
)) {
47 skb_put_data(skb
, data
, hdr_len
);
54 /* If not doing paged RX allocated skb will always have enough space */
55 copy
= (true_len
<= skb_tailroom(skb
)) ? true_len
: hdr_len
+ 8;
56 frag
= true_len
- copy
;
58 skb_put_data(skb
, data
, copy
);
62 skb_add_rx_frag(skb
, 0, p
, data
- page_address(p
),
70 dev_err_ratelimited(dev
->dev
, "Error: incorrect frame len:%u hdr:%u\n",
76 static void mt7601u_rx_process_seg(struct mt7601u_dev
*dev
, u8
*data
,
77 u32 seg_len
, struct page
*p
)
80 struct mt7601u_rxwi
*rxwi
;
81 u32 fce_info
, truesize
= seg_len
;
83 /* DMA_INFO field at the beginning of the segment contains only some of
84 * the information, we need to read the FCE descriptor from the end.
86 fce_info
= get_unaligned_le32(data
+ seg_len
- MT_FCE_INFO_LEN
);
87 seg_len
-= MT_FCE_INFO_LEN
;
89 data
+= MT_DMA_HDR_LEN
;
90 seg_len
-= MT_DMA_HDR_LEN
;
92 rxwi
= (struct mt7601u_rxwi
*) data
;
93 data
+= sizeof(struct mt7601u_rxwi
);
94 seg_len
-= sizeof(struct mt7601u_rxwi
);
96 if (unlikely(rxwi
->zero
[0] || rxwi
->zero
[1] || rxwi
->zero
[2]))
97 dev_err_once(dev
->dev
, "Error: RXWI zero fields are set\n");
98 if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE
, fce_info
)))
99 dev_err_once(dev
->dev
, "Error: RX path seen a non-pkt urb\n");
101 trace_mt_rx(dev
, rxwi
, fce_info
);
103 skb
= mt7601u_rx_skb_from_seg(dev
, rxwi
, data
, seg_len
, truesize
, p
);
107 spin_lock(&dev
->mac_lock
);
108 ieee80211_rx(dev
->hw
, skb
);
109 spin_unlock(&dev
->mac_lock
);
112 static u16
mt7601u_rx_next_seg_len(u8
*data
, u32 data_len
)
114 u32 min_seg_len
= MT_DMA_HDR_LEN
+ MT_RX_INFO_LEN
+
115 sizeof(struct mt7601u_rxwi
) + MT_FCE_INFO_LEN
;
116 u16 dma_len
= get_unaligned_le16(data
);
118 if (data_len
< min_seg_len
||
119 WARN_ON_ONCE(!dma_len
) ||
120 WARN_ON_ONCE(dma_len
+ MT_DMA_HDRS
> data_len
) ||
121 WARN_ON_ONCE(dma_len
& 0x3))
124 return MT_DMA_HDRS
+ dma_len
;
128 mt7601u_rx_process_entry(struct mt7601u_dev
*dev
, struct mt7601u_dma_buf_rx
*e
)
130 u32 seg_len
, data_len
= e
->urb
->actual_length
;
131 u8
*data
= page_address(e
->p
);
132 struct page
*new_p
= NULL
;
135 if (!test_bit(MT7601U_STATE_INITIALIZED
, &dev
->state
))
138 /* Copy if there is very little data in the buffer. */
140 new_p
= dev_alloc_pages(MT_RX_ORDER
);
142 while ((seg_len
= mt7601u_rx_next_seg_len(data
, data_len
))) {
143 mt7601u_rx_process_seg(dev
, data
, seg_len
, new_p
? e
->p
: NULL
);
151 trace_mt_rx_dma_aggr(dev
, cnt
, !!new_p
);
154 /* we have one extra ref from the allocator */
155 __free_pages(e
->p
, MT_RX_ORDER
);
161 static struct mt7601u_dma_buf_rx
*
162 mt7601u_rx_get_pending_entry(struct mt7601u_dev
*dev
)
164 struct mt7601u_rx_queue
*q
= &dev
->rx_q
;
165 struct mt7601u_dma_buf_rx
*buf
= NULL
;
168 spin_lock_irqsave(&dev
->rx_lock
, flags
);
173 buf
= &q
->e
[q
->start
];
175 q
->start
= (q
->start
+ 1) % q
->entries
;
177 spin_unlock_irqrestore(&dev
->rx_lock
, flags
);
182 static void mt7601u_complete_rx(struct urb
*urb
)
184 struct mt7601u_dev
*dev
= urb
->context
;
185 struct mt7601u_rx_queue
*q
= &dev
->rx_q
;
188 /* do no schedule rx tasklet if urb has been unlinked
189 * or the device has been removed
191 switch (urb
->status
) {
197 dev_err_ratelimited(dev
->dev
, "rx urb failed: %d\n",
204 spin_lock_irqsave(&dev
->rx_lock
, flags
);
205 if (WARN_ONCE(q
->e
[q
->end
].urb
!= urb
, "RX urb mismatch"))
208 q
->end
= (q
->end
+ 1) % q
->entries
;
210 tasklet_schedule(&dev
->rx_tasklet
);
212 spin_unlock_irqrestore(&dev
->rx_lock
, flags
);
215 static void mt7601u_rx_tasklet(struct tasklet_struct
*t
)
217 struct mt7601u_dev
*dev
= from_tasklet(dev
, t
, rx_tasklet
);
218 struct mt7601u_dma_buf_rx
*e
;
220 while ((e
= mt7601u_rx_get_pending_entry(dev
))) {
224 mt7601u_rx_process_entry(dev
, e
);
225 mt7601u_submit_rx_buf(dev
, e
, GFP_ATOMIC
);
229 static void mt7601u_complete_tx(struct urb
*urb
)
231 struct mt7601u_tx_queue
*q
= urb
->context
;
232 struct mt7601u_dev
*dev
= q
->dev
;
236 switch (urb
->status
) {
242 dev_err_ratelimited(dev
->dev
, "tx urb failed: %d\n",
249 spin_lock_irqsave(&dev
->tx_lock
, flags
);
250 if (WARN_ONCE(q
->e
[q
->start
].urb
!= urb
, "TX urb mismatch"))
253 skb
= q
->e
[q
->start
].skb
;
254 q
->e
[q
->start
].skb
= NULL
;
255 trace_mt_tx_dma_done(dev
, skb
);
257 __skb_queue_tail(&dev
->tx_skb_done
, skb
);
258 tasklet_schedule(&dev
->tx_tasklet
);
260 if (q
->used
== q
->entries
- q
->entries
/ 8)
261 ieee80211_wake_queue(dev
->hw
, skb_get_queue_mapping(skb
));
263 q
->start
= (q
->start
+ 1) % q
->entries
;
266 spin_unlock_irqrestore(&dev
->tx_lock
, flags
);
269 static void mt7601u_tx_tasklet(struct tasklet_struct
*t
)
271 struct mt7601u_dev
*dev
= from_tasklet(dev
, t
, tx_tasklet
);
272 struct sk_buff_head skbs
;
275 __skb_queue_head_init(&skbs
);
277 spin_lock_irqsave(&dev
->tx_lock
, flags
);
279 set_bit(MT7601U_STATE_MORE_STATS
, &dev
->state
);
280 if (!test_and_set_bit(MT7601U_STATE_READING_STATS
, &dev
->state
))
281 queue_delayed_work(dev
->stat_wq
, &dev
->stat_work
,
282 msecs_to_jiffies(10));
284 skb_queue_splice_init(&dev
->tx_skb_done
, &skbs
);
286 spin_unlock_irqrestore(&dev
->tx_lock
, flags
);
288 while (!skb_queue_empty(&skbs
)) {
289 struct sk_buff
*skb
= __skb_dequeue(&skbs
);
291 mt7601u_tx_status(dev
, skb
);
295 static int mt7601u_dma_submit_tx(struct mt7601u_dev
*dev
,
296 struct sk_buff
*skb
, u8 ep
)
298 struct usb_device
*usb_dev
= mt7601u_to_usb_dev(dev
);
299 unsigned snd_pipe
= usb_sndbulkpipe(usb_dev
, dev
->out_eps
[ep
]);
300 struct mt7601u_dma_buf_tx
*e
;
301 struct mt7601u_tx_queue
*q
= &dev
->tx_q
[ep
];
305 spin_lock_irqsave(&dev
->tx_lock
, flags
);
307 if (WARN_ON(q
->entries
<= q
->used
)) {
314 usb_fill_bulk_urb(e
->urb
, usb_dev
, snd_pipe
, skb
->data
, skb
->len
,
315 mt7601u_complete_tx
, q
);
316 ret
= usb_submit_urb(e
->urb
, GFP_ATOMIC
);
318 /* Special-handle ENODEV from TX urb submission because it will
319 * often be the first ENODEV we see after device is removed.
322 set_bit(MT7601U_STATE_REMOVED
, &dev
->state
);
324 dev_err(dev
->dev
, "Error: TX urb submit failed:%d\n",
329 q
->end
= (q
->end
+ 1) % q
->entries
;
332 if (q
->used
>= q
->entries
)
333 ieee80211_stop_queue(dev
->hw
, skb_get_queue_mapping(skb
));
335 spin_unlock_irqrestore(&dev
->tx_lock
, flags
);
340 /* Map hardware Q to USB endpoint number */
341 static u8
q2ep(u8 qid
)
343 /* TODO: take management packets to queue 5 */
347 /* Map USB endpoint number to Q id in the DMA engine */
348 static enum mt76_qsel
ep2dmaq(u8 ep
)
355 int mt7601u_dma_enqueue_tx(struct mt7601u_dev
*dev
, struct sk_buff
*skb
,
356 struct mt76_wcid
*wcid
, int hw_q
)
362 dma_flags
= MT_TXD_PKT_INFO_80211
;
363 if (wcid
->hw_key_idx
== 0xff)
364 dma_flags
|= MT_TXD_PKT_INFO_WIV
;
366 ret
= mt7601u_dma_skb_wrap_pkt(skb
, ep2dmaq(ep
), dma_flags
);
370 ret
= mt7601u_dma_submit_tx(dev
, skb
, ep
);
372 ieee80211_free_txskb(dev
->hw
, skb
);
379 static void mt7601u_kill_rx(struct mt7601u_dev
*dev
)
383 for (i
= 0; i
< dev
->rx_q
.entries
; i
++)
384 usb_poison_urb(dev
->rx_q
.e
[i
].urb
);
387 static int mt7601u_submit_rx_buf(struct mt7601u_dev
*dev
,
388 struct mt7601u_dma_buf_rx
*e
, gfp_t gfp
)
390 struct usb_device
*usb_dev
= mt7601u_to_usb_dev(dev
);
391 u8
*buf
= page_address(e
->p
);
395 pipe
= usb_rcvbulkpipe(usb_dev
, dev
->in_eps
[MT_EP_IN_PKT_RX
]);
397 usb_fill_bulk_urb(e
->urb
, usb_dev
, pipe
, buf
, MT_RX_URB_SIZE
,
398 mt7601u_complete_rx
, dev
);
400 trace_mt_submit_urb(dev
, e
->urb
);
401 ret
= usb_submit_urb(e
->urb
, gfp
);
403 dev_err(dev
->dev
, "Error: submit RX URB failed:%d\n", ret
);
408 static int mt7601u_submit_rx(struct mt7601u_dev
*dev
)
412 for (i
= 0; i
< dev
->rx_q
.entries
; i
++) {
413 ret
= mt7601u_submit_rx_buf(dev
, &dev
->rx_q
.e
[i
], GFP_KERNEL
);
421 static void mt7601u_free_rx(struct mt7601u_dev
*dev
)
425 for (i
= 0; i
< dev
->rx_q
.entries
; i
++) {
426 __free_pages(dev
->rx_q
.e
[i
].p
, MT_RX_ORDER
);
427 usb_free_urb(dev
->rx_q
.e
[i
].urb
);
431 static int mt7601u_alloc_rx(struct mt7601u_dev
*dev
)
435 memset(&dev
->rx_q
, 0, sizeof(dev
->rx_q
));
437 dev
->rx_q
.entries
= N_RX_ENTRIES
;
439 for (i
= 0; i
< N_RX_ENTRIES
; i
++) {
440 dev
->rx_q
.e
[i
].urb
= usb_alloc_urb(0, GFP_KERNEL
);
441 dev
->rx_q
.e
[i
].p
= dev_alloc_pages(MT_RX_ORDER
);
443 if (!dev
->rx_q
.e
[i
].urb
|| !dev
->rx_q
.e
[i
].p
)
450 static void mt7601u_free_tx_queue(struct mt7601u_tx_queue
*q
)
454 for (i
= 0; i
< q
->entries
; i
++) {
455 usb_poison_urb(q
->e
[i
].urb
);
457 mt7601u_tx_status(q
->dev
, q
->e
[i
].skb
);
458 usb_free_urb(q
->e
[i
].urb
);
462 static void mt7601u_free_tx(struct mt7601u_dev
*dev
)
469 for (i
= 0; i
< __MT_EP_OUT_MAX
; i
++)
470 mt7601u_free_tx_queue(&dev
->tx_q
[i
]);
473 static int mt7601u_alloc_tx_queue(struct mt7601u_dev
*dev
,
474 struct mt7601u_tx_queue
*q
)
479 q
->entries
= N_TX_ENTRIES
;
481 for (i
= 0; i
< N_TX_ENTRIES
; i
++) {
482 q
->e
[i
].urb
= usb_alloc_urb(0, GFP_KERNEL
);
490 static int mt7601u_alloc_tx(struct mt7601u_dev
*dev
)
494 dev
->tx_q
= devm_kcalloc(dev
->dev
, __MT_EP_OUT_MAX
,
495 sizeof(*dev
->tx_q
), GFP_KERNEL
);
499 for (i
= 0; i
< __MT_EP_OUT_MAX
; i
++)
500 if (mt7601u_alloc_tx_queue(dev
, &dev
->tx_q
[i
]))
506 int mt7601u_dma_init(struct mt7601u_dev
*dev
)
510 tasklet_setup(&dev
->tx_tasklet
, mt7601u_tx_tasklet
);
511 tasklet_setup(&dev
->rx_tasklet
, mt7601u_rx_tasklet
);
513 ret
= mt7601u_alloc_tx(dev
);
516 ret
= mt7601u_alloc_rx(dev
);
520 ret
= mt7601u_submit_rx(dev
);
526 mt7601u_dma_cleanup(dev
);
530 void mt7601u_dma_cleanup(struct mt7601u_dev
*dev
)
532 mt7601u_kill_rx(dev
);
534 tasklet_kill(&dev
->rx_tasklet
);
536 mt7601u_free_rx(dev
);
537 mt7601u_free_tx(dev
);
539 tasklet_kill(&dev
->tx_tasklet
);