2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/module.h>
19 #include "usb_trace.h"
22 #define MT_VEND_REQ_MAX_RETRY 10
23 #define MT_VEND_REQ_TOUT_MS 300
25 static bool disable_usb_sg
;
26 module_param_named(disable_usb_sg
, disable_usb_sg
, bool, 0644);
27 MODULE_PARM_DESC(disable_usb_sg
, "Disable usb scatter-gather support");
29 /* should be called with usb_ctrl_mtx locked */
30 static int __mt76u_vendor_request(struct mt76_dev
*dev
, u8 req
,
31 u8 req_type
, u16 val
, u16 offset
,
32 void *buf
, size_t len
)
34 struct usb_device
*udev
= to_usb_device(dev
->dev
);
38 pipe
= (req_type
& USB_DIR_IN
) ? usb_rcvctrlpipe(udev
, 0)
39 : usb_sndctrlpipe(udev
, 0);
40 for (i
= 0; i
< MT_VEND_REQ_MAX_RETRY
; i
++) {
41 if (test_bit(MT76_REMOVED
, &dev
->state
))
44 ret
= usb_control_msg(udev
, pipe
, req
, req_type
, val
,
45 offset
, buf
, len
, MT_VEND_REQ_TOUT_MS
);
47 set_bit(MT76_REMOVED
, &dev
->state
);
48 if (ret
>= 0 || ret
== -ENODEV
)
50 usleep_range(5000, 10000);
53 dev_err(dev
->dev
, "vendor request req:%02x off:%04x failed:%d\n",
58 int mt76u_vendor_request(struct mt76_dev
*dev
, u8 req
,
59 u8 req_type
, u16 val
, u16 offset
,
60 void *buf
, size_t len
)
64 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
65 ret
= __mt76u_vendor_request(dev
, req
, req_type
,
66 val
, offset
, buf
, len
);
67 trace_usb_reg_wr(dev
, offset
, val
);
68 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
72 EXPORT_SYMBOL_GPL(mt76u_vendor_request
);
74 /* should be called with usb_ctrl_mtx locked */
75 static u32
__mt76u_rr(struct mt76_dev
*dev
, u32 addr
)
77 struct mt76_usb
*usb
= &dev
->usb
;
83 switch (addr
& MT_VEND_TYPE_MASK
) {
84 case MT_VEND_TYPE_EEPROM
:
85 req
= MT_VEND_READ_EEPROM
;
87 case MT_VEND_TYPE_CFG
:
88 req
= MT_VEND_READ_CFG
;
91 req
= MT_VEND_MULTI_READ
;
94 offset
= addr
& ~MT_VEND_TYPE_MASK
;
96 ret
= __mt76u_vendor_request(dev
, req
,
97 USB_DIR_IN
| USB_TYPE_VENDOR
,
98 0, offset
, usb
->data
, sizeof(__le32
));
99 if (ret
== sizeof(__le32
))
100 data
= get_unaligned_le32(usb
->data
);
101 trace_usb_reg_rr(dev
, addr
, data
);
106 static u32
mt76u_rr(struct mt76_dev
*dev
, u32 addr
)
110 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
111 ret
= __mt76u_rr(dev
, addr
);
112 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
117 /* should be called with usb_ctrl_mtx locked */
118 static void __mt76u_wr(struct mt76_dev
*dev
, u32 addr
, u32 val
)
120 struct mt76_usb
*usb
= &dev
->usb
;
124 switch (addr
& MT_VEND_TYPE_MASK
) {
125 case MT_VEND_TYPE_CFG
:
126 req
= MT_VEND_WRITE_CFG
;
129 req
= MT_VEND_MULTI_WRITE
;
132 offset
= addr
& ~MT_VEND_TYPE_MASK
;
134 put_unaligned_le32(val
, usb
->data
);
135 __mt76u_vendor_request(dev
, req
,
136 USB_DIR_OUT
| USB_TYPE_VENDOR
, 0,
137 offset
, usb
->data
, sizeof(__le32
));
138 trace_usb_reg_wr(dev
, addr
, val
);
141 static void mt76u_wr(struct mt76_dev
*dev
, u32 addr
, u32 val
)
143 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
144 __mt76u_wr(dev
, addr
, val
);
145 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
148 static u32
mt76u_rmw(struct mt76_dev
*dev
, u32 addr
,
151 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
152 val
|= __mt76u_rr(dev
, addr
) & ~mask
;
153 __mt76u_wr(dev
, addr
, val
);
154 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
159 static void mt76u_copy(struct mt76_dev
*dev
, u32 offset
,
160 const void *data
, int len
)
162 struct mt76_usb
*usb
= &dev
->usb
;
163 const u32
*val
= data
;
166 mutex_lock(&usb
->usb_ctrl_mtx
);
167 for (i
= 0; i
< (len
/ 4); i
++) {
168 put_unaligned_le32(val
[i
], usb
->data
);
169 ret
= __mt76u_vendor_request(dev
, MT_VEND_MULTI_WRITE
,
170 USB_DIR_OUT
| USB_TYPE_VENDOR
,
171 0, offset
+ i
* 4, usb
->data
,
176 mutex_unlock(&usb
->usb_ctrl_mtx
);
179 void mt76u_single_wr(struct mt76_dev
*dev
, const u8 req
,
180 const u16 offset
, const u32 val
)
182 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
183 __mt76u_vendor_request(dev
, req
,
184 USB_DIR_OUT
| USB_TYPE_VENDOR
,
185 val
& 0xffff, offset
, NULL
, 0);
186 __mt76u_vendor_request(dev
, req
,
187 USB_DIR_OUT
| USB_TYPE_VENDOR
,
188 val
>> 16, offset
+ 2, NULL
, 0);
189 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
191 EXPORT_SYMBOL_GPL(mt76u_single_wr
);
194 mt76u_req_wr_rp(struct mt76_dev
*dev
, u32 base
,
195 const struct mt76_reg_pair
*data
, int len
)
197 struct mt76_usb
*usb
= &dev
->usb
;
199 mutex_lock(&usb
->usb_ctrl_mtx
);
201 __mt76u_wr(dev
, base
+ data
->reg
, data
->value
);
205 mutex_unlock(&usb
->usb_ctrl_mtx
);
211 mt76u_wr_rp(struct mt76_dev
*dev
, u32 base
,
212 const struct mt76_reg_pair
*data
, int n
)
214 if (test_bit(MT76_STATE_MCU_RUNNING
, &dev
->state
))
215 return dev
->mcu_ops
->mcu_wr_rp(dev
, base
, data
, n
);
217 return mt76u_req_wr_rp(dev
, base
, data
, n
);
221 mt76u_req_rd_rp(struct mt76_dev
*dev
, u32 base
, struct mt76_reg_pair
*data
,
224 struct mt76_usb
*usb
= &dev
->usb
;
226 mutex_lock(&usb
->usb_ctrl_mtx
);
228 data
->value
= __mt76u_rr(dev
, base
+ data
->reg
);
232 mutex_unlock(&usb
->usb_ctrl_mtx
);
238 mt76u_rd_rp(struct mt76_dev
*dev
, u32 base
,
239 struct mt76_reg_pair
*data
, int n
)
241 if (test_bit(MT76_STATE_MCU_RUNNING
, &dev
->state
))
242 return dev
->mcu_ops
->mcu_rd_rp(dev
, base
, data
, n
);
244 return mt76u_req_rd_rp(dev
, base
, data
, n
);
247 static bool mt76u_check_sg(struct mt76_dev
*dev
)
249 struct usb_device
*udev
= to_usb_device(dev
->dev
);
251 return (!disable_usb_sg
&& udev
->bus
->sg_tablesize
> 0 &&
252 (udev
->bus
->no_sg_constraint
||
253 udev
->speed
== USB_SPEED_WIRELESS
));
257 mt76u_set_endpoints(struct usb_interface
*intf
,
258 struct mt76_usb
*usb
)
260 struct usb_host_interface
*intf_desc
= intf
->cur_altsetting
;
261 struct usb_endpoint_descriptor
*ep_desc
;
262 int i
, in_ep
= 0, out_ep
= 0;
264 for (i
= 0; i
< intf_desc
->desc
.bNumEndpoints
; i
++) {
265 ep_desc
= &intf_desc
->endpoint
[i
].desc
;
267 if (usb_endpoint_is_bulk_in(ep_desc
) &&
268 in_ep
< __MT_EP_IN_MAX
) {
269 usb
->in_ep
[in_ep
] = usb_endpoint_num(ep_desc
);
270 usb
->in_max_packet
= usb_endpoint_maxp(ep_desc
);
272 } else if (usb_endpoint_is_bulk_out(ep_desc
) &&
273 out_ep
< __MT_EP_OUT_MAX
) {
274 usb
->out_ep
[out_ep
] = usb_endpoint_num(ep_desc
);
275 usb
->out_max_packet
= usb_endpoint_maxp(ep_desc
);
280 if (in_ep
!= __MT_EP_IN_MAX
|| out_ep
!= __MT_EP_OUT_MAX
)
286 mt76u_fill_rx_sg(struct mt76_dev
*dev
, struct mt76_queue
*q
, struct urb
*urb
,
291 for (i
= 0; i
< nsgs
; i
++) {
296 data
= page_frag_alloc(&q
->rx_page
, q
->buf_size
, gfp
);
300 page
= virt_to_head_page(data
);
301 offset
= data
- page_address(page
);
302 sg_set_page(&urb
->sg
[i
], page
, q
->buf_size
, offset
);
308 for (j
= nsgs
; j
< urb
->num_sgs
; j
++)
309 skb_free_frag(sg_virt(&urb
->sg
[j
]));
313 urb
->num_sgs
= max_t(int, i
, urb
->num_sgs
);
314 urb
->transfer_buffer_length
= urb
->num_sgs
* q
->buf_size
,
315 sg_init_marker(urb
->sg
, urb
->num_sgs
);
317 return i
? : -ENOMEM
;
321 mt76u_refill_rx(struct mt76_dev
*dev
, struct urb
*urb
, int nsgs
, gfp_t gfp
)
323 struct mt76_queue
*q
= &dev
->q_rx
[MT_RXQ_MAIN
];
325 if (dev
->usb
.sg_en
) {
326 return mt76u_fill_rx_sg(dev
, q
, urb
, nsgs
, gfp
);
328 urb
->transfer_buffer_length
= q
->buf_size
;
329 urb
->transfer_buffer
= page_frag_alloc(&q
->rx_page
,
331 return urb
->transfer_buffer
? 0 : -ENOMEM
;
336 mt76u_urb_alloc(struct mt76_dev
*dev
, struct mt76_queue_entry
*e
)
338 unsigned int size
= sizeof(struct urb
);
341 size
+= MT_SG_MAX_SIZE
* sizeof(struct scatterlist
);
343 e
->urb
= kzalloc(size
, GFP_KERNEL
);
347 usb_init_urb(e
->urb
);
350 e
->urb
->sg
= (struct scatterlist
*)(e
->urb
+ 1);
356 mt76u_rx_urb_alloc(struct mt76_dev
*dev
, struct mt76_queue_entry
*e
)
360 err
= mt76u_urb_alloc(dev
, e
);
364 return mt76u_refill_rx(dev
, e
->urb
, MT_SG_MAX_SIZE
, GFP_KERNEL
);
367 static void mt76u_urb_free(struct urb
*urb
)
371 for (i
= 0; i
< urb
->num_sgs
; i
++)
372 skb_free_frag(sg_virt(&urb
->sg
[i
]));
374 if (urb
->transfer_buffer
)
375 skb_free_frag(urb
->transfer_buffer
);
381 mt76u_fill_bulk_urb(struct mt76_dev
*dev
, int dir
, int index
,
382 struct urb
*urb
, usb_complete_t complete_fn
,
385 struct usb_device
*udev
= to_usb_device(dev
->dev
);
388 if (dir
== USB_DIR_IN
)
389 pipe
= usb_rcvbulkpipe(udev
, dev
->usb
.in_ep
[index
]);
391 pipe
= usb_sndbulkpipe(udev
, dev
->usb
.out_ep
[index
]);
395 urb
->complete
= complete_fn
;
396 urb
->context
= context
;
399 static inline struct urb
*
400 mt76u_get_next_rx_entry(struct mt76_dev
*dev
)
402 struct mt76_queue
*q
= &dev
->q_rx
[MT_RXQ_MAIN
];
403 struct urb
*urb
= NULL
;
406 spin_lock_irqsave(&q
->lock
, flags
);
408 urb
= q
->entry
[q
->head
].urb
;
409 q
->head
= (q
->head
+ 1) % q
->ndesc
;
412 spin_unlock_irqrestore(&q
->lock
, flags
);
417 static int mt76u_get_rx_entry_len(u8
*data
, u32 data_len
)
419 u16 dma_len
, min_len
;
421 dma_len
= get_unaligned_le16(data
);
422 min_len
= MT_DMA_HDR_LEN
+ MT_RX_RXWI_LEN
+
425 if (data_len
< min_len
|| !dma_len
||
426 dma_len
+ MT_DMA_HDR_LEN
> data_len
||
433 mt76u_process_rx_entry(struct mt76_dev
*dev
, struct urb
*urb
)
435 struct mt76_queue
*q
= &dev
->q_rx
[MT_RXQ_MAIN
];
436 u8
*data
= urb
->num_sgs
? sg_virt(&urb
->sg
[0]) : urb
->transfer_buffer
;
437 int data_len
= urb
->num_sgs
? urb
->sg
[0].length
: urb
->actual_length
;
441 if (!test_bit(MT76_STATE_INITIALIZED
, &dev
->state
))
444 len
= mt76u_get_rx_entry_len(data
, urb
->actual_length
);
448 data_len
= min_t(int, len
, data_len
- MT_DMA_HDR_LEN
);
449 if (MT_DMA_HDR_LEN
+ data_len
> SKB_WITH_OVERHEAD(q
->buf_size
)) {
450 dev_err_ratelimited(dev
->dev
, "rx data too big %d\n", data_len
);
454 skb
= build_skb(data
, q
->buf_size
);
458 skb_reserve(skb
, MT_DMA_HDR_LEN
);
459 __skb_put(skb
, data_len
);
462 while (len
> 0 && nsgs
< urb
->num_sgs
) {
463 data_len
= min_t(int, len
, urb
->sg
[nsgs
].length
);
464 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
465 sg_page(&urb
->sg
[nsgs
]),
466 urb
->sg
[nsgs
].offset
,
467 data_len
, q
->buf_size
);
471 dev
->drv
->rx_skb(dev
, MT_RXQ_MAIN
, skb
);
476 static void mt76u_complete_rx(struct urb
*urb
)
478 struct mt76_dev
*dev
= urb
->context
;
479 struct mt76_queue
*q
= &dev
->q_rx
[MT_RXQ_MAIN
];
482 trace_rx_urb(dev
, urb
);
484 switch (urb
->status
) {
490 dev_err_ratelimited(dev
->dev
, "rx urb failed: %d\n",
497 spin_lock_irqsave(&q
->lock
, flags
);
498 if (WARN_ONCE(q
->entry
[q
->tail
].urb
!= urb
, "rx urb mismatch"))
501 q
->tail
= (q
->tail
+ 1) % q
->ndesc
;
503 tasklet_schedule(&dev
->usb
.rx_tasklet
);
505 spin_unlock_irqrestore(&q
->lock
, flags
);
509 mt76u_submit_rx_buf(struct mt76_dev
*dev
, struct urb
*urb
)
511 mt76u_fill_bulk_urb(dev
, USB_DIR_IN
, MT_EP_IN_PKT_RX
, urb
,
512 mt76u_complete_rx
, dev
);
513 trace_submit_urb(dev
, urb
);
515 return usb_submit_urb(urb
, GFP_ATOMIC
);
518 static void mt76u_rx_tasklet(unsigned long data
)
520 struct mt76_dev
*dev
= (struct mt76_dev
*)data
;
527 urb
= mt76u_get_next_rx_entry(dev
);
531 count
= mt76u_process_rx_entry(dev
, urb
);
533 err
= mt76u_refill_rx(dev
, urb
, count
, GFP_ATOMIC
);
537 mt76u_submit_rx_buf(dev
, urb
);
539 mt76_rx_poll_complete(dev
, MT_RXQ_MAIN
, NULL
);
544 static int mt76u_submit_rx_buffers(struct mt76_dev
*dev
)
546 struct mt76_queue
*q
= &dev
->q_rx
[MT_RXQ_MAIN
];
550 spin_lock_irqsave(&q
->lock
, flags
);
551 for (i
= 0; i
< q
->ndesc
; i
++) {
552 err
= mt76u_submit_rx_buf(dev
, q
->entry
[i
].urb
);
556 q
->head
= q
->tail
= 0;
558 spin_unlock_irqrestore(&q
->lock
, flags
);
563 static int mt76u_alloc_rx(struct mt76_dev
*dev
)
565 struct mt76_usb
*usb
= &dev
->usb
;
566 struct mt76_queue
*q
= &dev
->q_rx
[MT_RXQ_MAIN
];
569 usb
->mcu
.data
= devm_kmalloc(dev
->dev
, MCU_RESP_URB_SIZE
, GFP_KERNEL
);
573 spin_lock_init(&q
->lock
);
574 q
->entry
= devm_kcalloc(dev
->dev
,
575 MT_NUM_RX_ENTRIES
, sizeof(*q
->entry
),
580 q
->buf_size
= dev
->usb
.sg_en
? MT_RX_BUF_SIZE
: PAGE_SIZE
;
581 q
->ndesc
= MT_NUM_RX_ENTRIES
;
582 for (i
= 0; i
< q
->ndesc
; i
++) {
583 err
= mt76u_rx_urb_alloc(dev
, &q
->entry
[i
]);
588 return mt76u_submit_rx_buffers(dev
);
591 static void mt76u_free_rx(struct mt76_dev
*dev
)
593 struct mt76_queue
*q
= &dev
->q_rx
[MT_RXQ_MAIN
];
597 for (i
= 0; i
< q
->ndesc
; i
++)
598 mt76u_urb_free(q
->entry
[i
].urb
);
603 page
= virt_to_page(q
->rx_page
.va
);
604 __page_frag_cache_drain(page
, q
->rx_page
.pagecnt_bias
);
605 memset(&q
->rx_page
, 0, sizeof(q
->rx_page
));
608 void mt76u_stop_rx(struct mt76_dev
*dev
)
610 struct mt76_queue
*q
= &dev
->q_rx
[MT_RXQ_MAIN
];
613 for (i
= 0; i
< q
->ndesc
; i
++)
614 usb_poison_urb(q
->entry
[i
].urb
);
616 tasklet_kill(&dev
->usb
.rx_tasklet
);
618 EXPORT_SYMBOL_GPL(mt76u_stop_rx
);
620 int mt76u_resume_rx(struct mt76_dev
*dev
)
622 struct mt76_queue
*q
= &dev
->q_rx
[MT_RXQ_MAIN
];
625 for (i
= 0; i
< q
->ndesc
; i
++)
626 usb_unpoison_urb(q
->entry
[i
].urb
);
628 return mt76u_submit_rx_buffers(dev
);
630 EXPORT_SYMBOL_GPL(mt76u_resume_rx
);
632 static void mt76u_tx_tasklet(unsigned long data
)
634 struct mt76_dev
*dev
= (struct mt76_dev
*)data
;
635 struct mt76_queue_entry entry
;
636 struct mt76_sw_queue
*sq
;
637 struct mt76_queue
*q
;
641 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
642 u32 n_dequeued
= 0, n_sw_dequeued
= 0;
647 while (q
->queued
> n_dequeued
) {
648 if (!q
->entry
[q
->head
].done
)
651 if (q
->entry
[q
->head
].schedule
) {
652 q
->entry
[q
->head
].schedule
= false;
656 entry
= q
->entry
[q
->head
];
657 q
->entry
[q
->head
].done
= false;
658 q
->head
= (q
->head
+ 1) % q
->ndesc
;
661 dev
->drv
->tx_complete_skb(dev
, i
, &entry
);
664 spin_lock_bh(&q
->lock
);
666 sq
->swq_queued
-= n_sw_dequeued
;
667 q
->queued
-= n_dequeued
;
669 wake
= q
->stopped
&& q
->queued
< q
->ndesc
- 8;
674 wake_up(&dev
->tx_wait
);
676 spin_unlock_bh(&q
->lock
);
678 mt76_txq_schedule(dev
, i
);
680 if (!test_and_set_bit(MT76_READING_STATS
, &dev
->state
))
681 ieee80211_queue_delayed_work(dev
->hw
,
683 msecs_to_jiffies(10));
686 ieee80211_wake_queue(dev
->hw
, i
);
690 static void mt76u_tx_status_data(struct work_struct
*work
)
692 struct mt76_usb
*usb
;
693 struct mt76_dev
*dev
;
697 usb
= container_of(work
, struct mt76_usb
, stat_work
.work
);
698 dev
= container_of(usb
, struct mt76_dev
, usb
);
701 if (test_bit(MT76_REMOVED
, &dev
->state
))
704 if (!dev
->drv
->tx_status_data(dev
, &update
))
709 if (count
&& test_bit(MT76_STATE_RUNNING
, &dev
->state
))
710 ieee80211_queue_delayed_work(dev
->hw
, &usb
->stat_work
,
711 msecs_to_jiffies(10));
713 clear_bit(MT76_READING_STATS
, &dev
->state
);
716 static void mt76u_complete_tx(struct urb
*urb
)
718 struct mt76_dev
*dev
= dev_get_drvdata(&urb
->dev
->dev
);
719 struct mt76_queue_entry
*e
= urb
->context
;
721 if (mt76u_urb_error(urb
))
722 dev_err(dev
->dev
, "tx urb failed: %d\n", urb
->status
);
725 tasklet_schedule(&dev
->tx_tasklet
);
729 mt76u_tx_setup_buffers(struct mt76_dev
*dev
, struct sk_buff
*skb
,
732 urb
->transfer_buffer_length
= skb
->len
;
734 if (!dev
->usb
.sg_en
) {
735 urb
->transfer_buffer
= skb
->data
;
738 sg_init_table(urb
->sg
, MT_SG_MAX_SIZE
);
739 urb
->num_sgs
= skb_to_sgvec(skb
, urb
->sg
, 0, skb
->len
);
740 if (urb
->num_sgs
== 0)
747 mt76u_tx_queue_skb(struct mt76_dev
*dev
, enum mt76_txq_id qid
,
748 struct sk_buff
*skb
, struct mt76_wcid
*wcid
,
749 struct ieee80211_sta
*sta
)
751 struct mt76_queue
*q
= dev
->q_tx
[qid
].q
;
752 struct mt76_tx_info tx_info
= {
758 if (q
->queued
== q
->ndesc
)
761 skb
->prev
= skb
->next
= NULL
;
762 err
= dev
->drv
->tx_prepare_skb(dev
, NULL
, qid
, wcid
, sta
, &tx_info
);
766 err
= mt76u_tx_setup_buffers(dev
, tx_info
.skb
, q
->entry
[idx
].urb
);
770 mt76u_fill_bulk_urb(dev
, USB_DIR_OUT
, q2ep(q
->hw_idx
),
771 q
->entry
[idx
].urb
, mt76u_complete_tx
,
774 q
->tail
= (q
->tail
+ 1) % q
->ndesc
;
775 q
->entry
[idx
].skb
= tx_info
.skb
;
781 static void mt76u_tx_kick(struct mt76_dev
*dev
, struct mt76_queue
*q
)
786 while (q
->first
!= q
->tail
) {
787 urb
= q
->entry
[q
->first
].urb
;
789 trace_submit_urb(dev
, urb
);
790 err
= usb_submit_urb(urb
, GFP_ATOMIC
);
793 set_bit(MT76_REMOVED
, &dev
->state
);
795 dev_err(dev
->dev
, "tx urb submit failed:%d\n",
799 q
->first
= (q
->first
+ 1) % q
->ndesc
;
803 static int mt76u_alloc_tx(struct mt76_dev
*dev
)
805 struct mt76_queue
*q
;
808 for (i
= 0; i
<= MT_TXQ_PSD
; i
++) {
809 INIT_LIST_HEAD(&dev
->q_tx
[i
].swq
);
811 if (i
>= IEEE80211_NUM_ACS
) {
812 dev
->q_tx
[i
].q
= dev
->q_tx
[0].q
;
816 q
= devm_kzalloc(dev
->dev
, sizeof(*q
), GFP_KERNEL
);
820 spin_lock_init(&q
->lock
);
821 q
->hw_idx
= mt76_ac_to_hwq(i
);
824 q
->entry
= devm_kcalloc(dev
->dev
,
825 MT_NUM_TX_ENTRIES
, sizeof(*q
->entry
),
830 q
->ndesc
= MT_NUM_TX_ENTRIES
;
831 for (j
= 0; j
< q
->ndesc
; j
++) {
832 err
= mt76u_urb_alloc(dev
, &q
->entry
[j
]);
840 static void mt76u_free_tx(struct mt76_dev
*dev
)
842 struct mt76_queue
*q
;
845 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
847 for (j
= 0; j
< q
->ndesc
; j
++)
848 usb_free_urb(q
->entry
[j
].urb
);
852 void mt76u_stop_tx(struct mt76_dev
*dev
)
854 struct mt76_queue_entry entry
;
855 struct mt76_queue
*q
;
858 ret
= wait_event_timeout(dev
->tx_wait
, !mt76_has_tx_pending(dev
), HZ
/5);
860 dev_err(dev
->dev
, "timed out waiting for pending tx\n");
862 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
864 for (j
= 0; j
< q
->ndesc
; j
++)
865 usb_kill_urb(q
->entry
[j
].urb
);
868 tasklet_kill(&dev
->tx_tasklet
);
870 /* On device removal we maight queue skb's, but mt76u_tx_kick()
871 * will fail to submit urb, cleanup those skb's manually.
873 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
876 /* Assure we are in sync with killed tasklet. */
877 spin_lock_bh(&q
->lock
);
879 entry
= q
->entry
[q
->head
];
880 q
->head
= (q
->head
+ 1) % q
->ndesc
;
883 dev
->drv
->tx_complete_skb(dev
, i
, &entry
);
885 spin_unlock_bh(&q
->lock
);
889 cancel_delayed_work_sync(&dev
->usb
.stat_work
);
890 clear_bit(MT76_READING_STATS
, &dev
->state
);
892 mt76_tx_status_check(dev
, NULL
, true);
894 EXPORT_SYMBOL_GPL(mt76u_stop_tx
);
896 void mt76u_queues_deinit(struct mt76_dev
*dev
)
904 EXPORT_SYMBOL_GPL(mt76u_queues_deinit
);
906 int mt76u_alloc_queues(struct mt76_dev
*dev
)
910 err
= mt76u_alloc_rx(dev
);
914 return mt76u_alloc_tx(dev
);
916 EXPORT_SYMBOL_GPL(mt76u_alloc_queues
);
918 static const struct mt76_queue_ops usb_queue_ops
= {
919 .tx_queue_skb
= mt76u_tx_queue_skb
,
920 .kick
= mt76u_tx_kick
,
923 int mt76u_init(struct mt76_dev
*dev
,
924 struct usb_interface
*intf
)
926 static const struct mt76_bus_ops mt76u_ops
= {
931 .wr_rp
= mt76u_wr_rp
,
932 .rd_rp
= mt76u_rd_rp
,
933 .type
= MT76_BUS_USB
,
935 struct mt76_usb
*usb
= &dev
->usb
;
937 tasklet_init(&usb
->rx_tasklet
, mt76u_rx_tasklet
, (unsigned long)dev
);
938 tasklet_init(&dev
->tx_tasklet
, mt76u_tx_tasklet
, (unsigned long)dev
);
939 INIT_DELAYED_WORK(&usb
->stat_work
, mt76u_tx_status_data
);
940 skb_queue_head_init(&dev
->rx_skb
[MT_RXQ_MAIN
]);
942 mutex_init(&usb
->mcu
.mutex
);
944 mutex_init(&usb
->usb_ctrl_mtx
);
945 dev
->bus
= &mt76u_ops
;
946 dev
->queue_ops
= &usb_queue_ops
;
948 usb
->sg_en
= mt76u_check_sg(dev
);
950 return mt76u_set_endpoints(intf
, usb
);
952 EXPORT_SYMBOL_GPL(mt76u_init
);
954 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
955 MODULE_LICENSE("Dual BSD/GPL");