1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
6 #include <linux/module.h>
11 #define MT_VEND_REQ_MAX_RETRY 10
12 #define MT_VEND_REQ_TOUT_MS 300
14 static bool disable_usb_sg
;
15 module_param_named(disable_usb_sg
, disable_usb_sg
, bool, 0644);
16 MODULE_PARM_DESC(disable_usb_sg
, "Disable usb scatter-gather support");
18 static int __mt76u_vendor_request(struct mt76_dev
*dev
, u8 req
,
19 u8 req_type
, u16 val
, u16 offset
,
20 void *buf
, size_t len
)
22 struct usb_interface
*uintf
= to_usb_interface(dev
->dev
);
23 struct usb_device
*udev
= interface_to_usbdev(uintf
);
27 lockdep_assert_held(&dev
->usb
.usb_ctrl_mtx
);
29 pipe
= (req_type
& USB_DIR_IN
) ? usb_rcvctrlpipe(udev
, 0)
30 : usb_sndctrlpipe(udev
, 0);
31 for (i
= 0; i
< MT_VEND_REQ_MAX_RETRY
; i
++) {
32 if (test_bit(MT76_REMOVED
, &dev
->phy
.state
))
35 ret
= usb_control_msg(udev
, pipe
, req
, req_type
, val
,
36 offset
, buf
, len
, MT_VEND_REQ_TOUT_MS
);
38 set_bit(MT76_REMOVED
, &dev
->phy
.state
);
39 if (ret
>= 0 || ret
== -ENODEV
)
41 usleep_range(5000, 10000);
44 dev_err(dev
->dev
, "vendor request req:%02x off:%04x failed:%d\n",
49 int mt76u_vendor_request(struct mt76_dev
*dev
, u8 req
,
50 u8 req_type
, u16 val
, u16 offset
,
51 void *buf
, size_t len
)
55 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
56 ret
= __mt76u_vendor_request(dev
, req
, req_type
,
57 val
, offset
, buf
, len
);
58 trace_usb_reg_wr(dev
, offset
, val
);
59 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
63 EXPORT_SYMBOL_GPL(mt76u_vendor_request
);
65 static u32
___mt76u_rr(struct mt76_dev
*dev
, u8 req
, u32 addr
)
67 struct mt76_usb
*usb
= &dev
->usb
;
71 ret
= __mt76u_vendor_request(dev
, req
,
72 USB_DIR_IN
| USB_TYPE_VENDOR
,
73 addr
>> 16, addr
, usb
->data
,
75 if (ret
== sizeof(__le32
))
76 data
= get_unaligned_le32(usb
->data
);
77 trace_usb_reg_rr(dev
, addr
, data
);
82 static u32
__mt76u_rr(struct mt76_dev
*dev
, u32 addr
)
86 switch (addr
& MT_VEND_TYPE_MASK
) {
87 case MT_VEND_TYPE_EEPROM
:
88 req
= MT_VEND_READ_EEPROM
;
90 case MT_VEND_TYPE_CFG
:
91 req
= MT_VEND_READ_CFG
;
94 req
= MT_VEND_MULTI_READ
;
98 return ___mt76u_rr(dev
, req
, addr
& ~MT_VEND_TYPE_MASK
);
101 static u32
mt76u_rr(struct mt76_dev
*dev
, u32 addr
)
105 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
106 ret
= __mt76u_rr(dev
, addr
);
107 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
112 static u32
mt76u_rr_ext(struct mt76_dev
*dev
, u32 addr
)
116 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
117 ret
= ___mt76u_rr(dev
, MT_VEND_READ_EXT
, addr
);
118 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
123 static void ___mt76u_wr(struct mt76_dev
*dev
, u8 req
,
126 struct mt76_usb
*usb
= &dev
->usb
;
128 put_unaligned_le32(val
, usb
->data
);
129 __mt76u_vendor_request(dev
, req
,
130 USB_DIR_OUT
| USB_TYPE_VENDOR
,
131 addr
>> 16, addr
, usb
->data
,
133 trace_usb_reg_wr(dev
, addr
, val
);
136 static void __mt76u_wr(struct mt76_dev
*dev
, u32 addr
, u32 val
)
140 switch (addr
& MT_VEND_TYPE_MASK
) {
141 case MT_VEND_TYPE_CFG
:
142 req
= MT_VEND_WRITE_CFG
;
145 req
= MT_VEND_MULTI_WRITE
;
148 ___mt76u_wr(dev
, req
, addr
& ~MT_VEND_TYPE_MASK
, val
);
151 static void mt76u_wr(struct mt76_dev
*dev
, u32 addr
, u32 val
)
153 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
154 __mt76u_wr(dev
, addr
, val
);
155 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
158 static void mt76u_wr_ext(struct mt76_dev
*dev
, u32 addr
, u32 val
)
160 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
161 ___mt76u_wr(dev
, MT_VEND_WRITE_EXT
, addr
, val
);
162 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
165 static u32
mt76u_rmw(struct mt76_dev
*dev
, u32 addr
,
168 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
169 val
|= __mt76u_rr(dev
, addr
) & ~mask
;
170 __mt76u_wr(dev
, addr
, val
);
171 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
176 static u32
mt76u_rmw_ext(struct mt76_dev
*dev
, u32 addr
,
179 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
180 val
|= ___mt76u_rr(dev
, MT_VEND_READ_EXT
, addr
) & ~mask
;
181 ___mt76u_wr(dev
, MT_VEND_WRITE_EXT
, addr
, val
);
182 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
187 static void mt76u_copy(struct mt76_dev
*dev
, u32 offset
,
188 const void *data
, int len
)
190 struct mt76_usb
*usb
= &dev
->usb
;
191 const u8
*val
= data
;
193 int current_batch_size
;
196 /* Assure that always a multiple of 4 bytes are copied,
197 * otherwise beacons can be corrupted.
198 * See: "mt76: round up length on mt76_wr_copy"
199 * Commit 850e8f6fbd5d0003b0
201 len
= round_up(len
, 4);
203 mutex_lock(&usb
->usb_ctrl_mtx
);
205 current_batch_size
= min_t(int, usb
->data_len
, len
- i
);
206 memcpy(usb
->data
, val
+ i
, current_batch_size
);
207 ret
= __mt76u_vendor_request(dev
, MT_VEND_MULTI_WRITE
,
208 USB_DIR_OUT
| USB_TYPE_VENDOR
,
209 0, offset
+ i
, usb
->data
,
214 i
+= current_batch_size
;
216 mutex_unlock(&usb
->usb_ctrl_mtx
);
219 static void mt76u_copy_ext(struct mt76_dev
*dev
, u32 offset
,
220 const void *data
, int len
)
222 struct mt76_usb
*usb
= &dev
->usb
;
223 int ret
, i
= 0, batch_len
;
224 const u8
*val
= data
;
226 len
= round_up(len
, 4);
227 mutex_lock(&usb
->usb_ctrl_mtx
);
229 batch_len
= min_t(int, usb
->data_len
, len
- i
);
230 memcpy(usb
->data
, val
+ i
, batch_len
);
231 ret
= __mt76u_vendor_request(dev
, MT_VEND_WRITE_EXT
,
232 USB_DIR_OUT
| USB_TYPE_VENDOR
,
233 (offset
+ i
) >> 16, offset
+ i
,
234 usb
->data
, batch_len
);
240 mutex_unlock(&usb
->usb_ctrl_mtx
);
244 mt76u_read_copy_ext(struct mt76_dev
*dev
, u32 offset
,
247 struct mt76_usb
*usb
= &dev
->usb
;
248 int i
= 0, batch_len
, ret
;
251 len
= round_up(len
, 4);
252 mutex_lock(&usb
->usb_ctrl_mtx
);
254 batch_len
= min_t(int, usb
->data_len
, len
- i
);
255 ret
= __mt76u_vendor_request(dev
, MT_VEND_READ_EXT
,
256 USB_DIR_IN
| USB_TYPE_VENDOR
,
257 (offset
+ i
) >> 16, offset
+ i
,
258 usb
->data
, batch_len
);
262 memcpy(val
+ i
, usb
->data
, batch_len
);
265 mutex_unlock(&usb
->usb_ctrl_mtx
);
268 void mt76u_single_wr(struct mt76_dev
*dev
, const u8 req
,
269 const u16 offset
, const u32 val
)
271 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
272 __mt76u_vendor_request(dev
, req
,
273 USB_DIR_OUT
| USB_TYPE_VENDOR
,
274 val
& 0xffff, offset
, NULL
, 0);
275 __mt76u_vendor_request(dev
, req
,
276 USB_DIR_OUT
| USB_TYPE_VENDOR
,
277 val
>> 16, offset
+ 2, NULL
, 0);
278 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
280 EXPORT_SYMBOL_GPL(mt76u_single_wr
);
283 mt76u_req_wr_rp(struct mt76_dev
*dev
, u32 base
,
284 const struct mt76_reg_pair
*data
, int len
)
286 struct mt76_usb
*usb
= &dev
->usb
;
288 mutex_lock(&usb
->usb_ctrl_mtx
);
290 __mt76u_wr(dev
, base
+ data
->reg
, data
->value
);
294 mutex_unlock(&usb
->usb_ctrl_mtx
);
300 mt76u_wr_rp(struct mt76_dev
*dev
, u32 base
,
301 const struct mt76_reg_pair
*data
, int n
)
303 if (test_bit(MT76_STATE_MCU_RUNNING
, &dev
->phy
.state
))
304 return dev
->mcu_ops
->mcu_wr_rp(dev
, base
, data
, n
);
306 return mt76u_req_wr_rp(dev
, base
, data
, n
);
310 mt76u_req_rd_rp(struct mt76_dev
*dev
, u32 base
, struct mt76_reg_pair
*data
,
313 struct mt76_usb
*usb
= &dev
->usb
;
315 mutex_lock(&usb
->usb_ctrl_mtx
);
317 data
->value
= __mt76u_rr(dev
, base
+ data
->reg
);
321 mutex_unlock(&usb
->usb_ctrl_mtx
);
327 mt76u_rd_rp(struct mt76_dev
*dev
, u32 base
,
328 struct mt76_reg_pair
*data
, int n
)
330 if (test_bit(MT76_STATE_MCU_RUNNING
, &dev
->phy
.state
))
331 return dev
->mcu_ops
->mcu_rd_rp(dev
, base
, data
, n
);
333 return mt76u_req_rd_rp(dev
, base
, data
, n
);
336 static bool mt76u_check_sg(struct mt76_dev
*dev
)
338 struct usb_interface
*uintf
= to_usb_interface(dev
->dev
);
339 struct usb_device
*udev
= interface_to_usbdev(uintf
);
341 return (!disable_usb_sg
&& udev
->bus
->sg_tablesize
> 0 &&
342 (udev
->bus
->no_sg_constraint
||
343 udev
->speed
== USB_SPEED_WIRELESS
));
347 mt76u_set_endpoints(struct usb_interface
*intf
,
348 struct mt76_usb
*usb
)
350 struct usb_host_interface
*intf_desc
= intf
->cur_altsetting
;
351 struct usb_endpoint_descriptor
*ep_desc
;
352 int i
, in_ep
= 0, out_ep
= 0;
354 for (i
= 0; i
< intf_desc
->desc
.bNumEndpoints
; i
++) {
355 ep_desc
= &intf_desc
->endpoint
[i
].desc
;
357 if (usb_endpoint_is_bulk_in(ep_desc
) &&
358 in_ep
< __MT_EP_IN_MAX
) {
359 usb
->in_ep
[in_ep
] = usb_endpoint_num(ep_desc
);
361 } else if (usb_endpoint_is_bulk_out(ep_desc
) &&
362 out_ep
< __MT_EP_OUT_MAX
) {
363 usb
->out_ep
[out_ep
] = usb_endpoint_num(ep_desc
);
368 if (in_ep
!= __MT_EP_IN_MAX
|| out_ep
!= __MT_EP_OUT_MAX
)
374 mt76u_fill_rx_sg(struct mt76_dev
*dev
, struct mt76_queue
*q
, struct urb
*urb
,
379 for (i
= 0; i
< nsgs
; i
++) {
384 data
= page_frag_alloc(&q
->rx_page
, q
->buf_size
, gfp
);
388 page
= virt_to_head_page(data
);
389 offset
= data
- page_address(page
);
390 sg_set_page(&urb
->sg
[i
], page
, q
->buf_size
, offset
);
396 for (j
= nsgs
; j
< urb
->num_sgs
; j
++)
397 skb_free_frag(sg_virt(&urb
->sg
[j
]));
401 urb
->num_sgs
= max_t(int, i
, urb
->num_sgs
);
402 urb
->transfer_buffer_length
= urb
->num_sgs
* q
->buf_size
;
403 sg_init_marker(urb
->sg
, urb
->num_sgs
);
405 return i
? : -ENOMEM
;
409 mt76u_refill_rx(struct mt76_dev
*dev
, struct mt76_queue
*q
,
410 struct urb
*urb
, int nsgs
, gfp_t gfp
)
412 enum mt76_rxq_id qid
= q
- &dev
->q_rx
[MT_RXQ_MAIN
];
414 if (qid
== MT_RXQ_MAIN
&& dev
->usb
.sg_en
)
415 return mt76u_fill_rx_sg(dev
, q
, urb
, nsgs
, gfp
);
417 urb
->transfer_buffer_length
= q
->buf_size
;
418 urb
->transfer_buffer
= page_frag_alloc(&q
->rx_page
, q
->buf_size
, gfp
);
420 return urb
->transfer_buffer
? 0 : -ENOMEM
;
424 mt76u_urb_alloc(struct mt76_dev
*dev
, struct mt76_queue_entry
*e
,
427 unsigned int size
= sizeof(struct urb
);
430 size
+= sg_max_size
* sizeof(struct scatterlist
);
432 e
->urb
= kzalloc(size
, GFP_KERNEL
);
436 usb_init_urb(e
->urb
);
438 if (dev
->usb
.sg_en
&& sg_max_size
> 0)
439 e
->urb
->sg
= (struct scatterlist
*)(e
->urb
+ 1);
445 mt76u_rx_urb_alloc(struct mt76_dev
*dev
, struct mt76_queue
*q
,
446 struct mt76_queue_entry
*e
)
448 enum mt76_rxq_id qid
= q
- &dev
->q_rx
[MT_RXQ_MAIN
];
451 sg_size
= qid
== MT_RXQ_MAIN
? MT_RX_SG_MAX_SIZE
: 0;
452 err
= mt76u_urb_alloc(dev
, e
, sg_size
);
456 return mt76u_refill_rx(dev
, q
, e
->urb
, sg_size
, GFP_KERNEL
);
459 static void mt76u_urb_free(struct urb
*urb
)
463 for (i
= 0; i
< urb
->num_sgs
; i
++)
464 skb_free_frag(sg_virt(&urb
->sg
[i
]));
466 if (urb
->transfer_buffer
)
467 skb_free_frag(urb
->transfer_buffer
);
473 mt76u_fill_bulk_urb(struct mt76_dev
*dev
, int dir
, int index
,
474 struct urb
*urb
, usb_complete_t complete_fn
,
477 struct usb_interface
*uintf
= to_usb_interface(dev
->dev
);
478 struct usb_device
*udev
= interface_to_usbdev(uintf
);
481 if (dir
== USB_DIR_IN
)
482 pipe
= usb_rcvbulkpipe(udev
, dev
->usb
.in_ep
[index
]);
484 pipe
= usb_sndbulkpipe(udev
, dev
->usb
.out_ep
[index
]);
488 urb
->complete
= complete_fn
;
489 urb
->context
= context
;
493 mt76u_get_next_rx_entry(struct mt76_queue
*q
)
495 struct urb
*urb
= NULL
;
498 spin_lock_irqsave(&q
->lock
, flags
);
500 urb
= q
->entry
[q
->tail
].urb
;
501 q
->tail
= (q
->tail
+ 1) % q
->ndesc
;
504 spin_unlock_irqrestore(&q
->lock
, flags
);
510 mt76u_get_rx_entry_len(struct mt76_dev
*dev
, u8
*data
,
513 u16 dma_len
, min_len
;
515 dma_len
= get_unaligned_le16(data
);
516 if (dev
->drv
->drv_flags
& MT_DRV_RX_DMA_HDR
)
519 min_len
= MT_DMA_HDR_LEN
+ MT_RX_RXWI_LEN
+ MT_FCE_INFO_LEN
;
520 if (data_len
< min_len
|| !dma_len
||
521 dma_len
+ MT_DMA_HDR_LEN
> data_len
||
527 static struct sk_buff
*
528 mt76u_build_rx_skb(struct mt76_dev
*dev
, void *data
,
529 int len
, int buf_size
)
531 int head_room
, drv_flags
= dev
->drv
->drv_flags
;
534 head_room
= drv_flags
& MT_DRV_RX_DMA_HDR
? 0 : MT_DMA_HDR_LEN
;
535 if (SKB_WITH_OVERHEAD(buf_size
) < head_room
+ len
) {
538 /* slow path, not enough space for data and
541 skb
= alloc_skb(MT_SKB_HEAD_LEN
, GFP_ATOMIC
);
545 skb_put_data(skb
, data
+ head_room
, MT_SKB_HEAD_LEN
);
546 data
+= head_room
+ MT_SKB_HEAD_LEN
;
547 page
= virt_to_head_page(data
);
548 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
549 page
, data
- page_address(page
),
550 len
- MT_SKB_HEAD_LEN
, buf_size
);
556 skb
= build_skb(data
, buf_size
);
560 skb_reserve(skb
, head_room
);
567 mt76u_process_rx_entry(struct mt76_dev
*dev
, struct urb
*urb
,
570 u8
*data
= urb
->num_sgs
? sg_virt(&urb
->sg
[0]) : urb
->transfer_buffer
;
571 int data_len
= urb
->num_sgs
? urb
->sg
[0].length
: urb
->actual_length
;
572 int len
, nsgs
= 1, head_room
, drv_flags
= dev
->drv
->drv_flags
;
575 if (!test_bit(MT76_STATE_INITIALIZED
, &dev
->phy
.state
))
578 len
= mt76u_get_rx_entry_len(dev
, data
, urb
->actual_length
);
582 head_room
= drv_flags
& MT_DRV_RX_DMA_HDR
? 0 : MT_DMA_HDR_LEN
;
583 data_len
= min_t(int, len
, data_len
- head_room
);
584 skb
= mt76u_build_rx_skb(dev
, data
, data_len
, buf_size
);
589 while (len
> 0 && nsgs
< urb
->num_sgs
) {
590 data_len
= min_t(int, len
, urb
->sg
[nsgs
].length
);
591 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
592 sg_page(&urb
->sg
[nsgs
]),
593 urb
->sg
[nsgs
].offset
, data_len
,
598 dev
->drv
->rx_skb(dev
, MT_RXQ_MAIN
, skb
);
603 static void mt76u_complete_rx(struct urb
*urb
)
605 struct mt76_dev
*dev
= dev_get_drvdata(&urb
->dev
->dev
);
606 struct mt76_queue
*q
= urb
->context
;
609 trace_rx_urb(dev
, urb
);
611 switch (urb
->status
) {
617 dev_err_ratelimited(dev
->dev
, "rx urb failed: %d\n",
624 spin_lock_irqsave(&q
->lock
, flags
);
625 if (WARN_ONCE(q
->entry
[q
->head
].urb
!= urb
, "rx urb mismatch"))
628 q
->head
= (q
->head
+ 1) % q
->ndesc
;
630 mt76_worker_schedule(&dev
->usb
.rx_worker
);
632 spin_unlock_irqrestore(&q
->lock
, flags
);
636 mt76u_submit_rx_buf(struct mt76_dev
*dev
, enum mt76_rxq_id qid
,
639 int ep
= qid
== MT_RXQ_MAIN
? MT_EP_IN_PKT_RX
: MT_EP_IN_CMD_RESP
;
641 mt76u_fill_bulk_urb(dev
, USB_DIR_IN
, ep
, urb
,
642 mt76u_complete_rx
, &dev
->q_rx
[qid
]);
643 trace_submit_urb(dev
, urb
);
645 return usb_submit_urb(urb
, GFP_ATOMIC
);
649 mt76u_process_rx_queue(struct mt76_dev
*dev
, struct mt76_queue
*q
)
651 int qid
= q
- &dev
->q_rx
[MT_RXQ_MAIN
];
656 urb
= mt76u_get_next_rx_entry(q
);
660 count
= mt76u_process_rx_entry(dev
, urb
, q
->buf_size
);
662 err
= mt76u_refill_rx(dev
, q
, urb
, count
, GFP_ATOMIC
);
666 mt76u_submit_rx_buf(dev
, qid
, urb
);
668 if (qid
== MT_RXQ_MAIN
) {
670 mt76_rx_poll_complete(dev
, MT_RXQ_MAIN
, NULL
);
675 static void mt76u_rx_worker(struct mt76_worker
*w
)
677 struct mt76_usb
*usb
= container_of(w
, struct mt76_usb
, rx_worker
);
678 struct mt76_dev
*dev
= container_of(usb
, struct mt76_dev
, usb
);
682 mt76_for_each_q_rx(dev
, i
)
683 mt76u_process_rx_queue(dev
, &dev
->q_rx
[i
]);
688 mt76u_submit_rx_buffers(struct mt76_dev
*dev
, enum mt76_rxq_id qid
)
690 struct mt76_queue
*q
= &dev
->q_rx
[qid
];
694 spin_lock_irqsave(&q
->lock
, flags
);
695 for (i
= 0; i
< q
->ndesc
; i
++) {
696 err
= mt76u_submit_rx_buf(dev
, qid
, q
->entry
[i
].urb
);
700 q
->head
= q
->tail
= 0;
702 spin_unlock_irqrestore(&q
->lock
, flags
);
708 mt76u_alloc_rx_queue(struct mt76_dev
*dev
, enum mt76_rxq_id qid
)
710 struct mt76_queue
*q
= &dev
->q_rx
[qid
];
713 spin_lock_init(&q
->lock
);
714 q
->entry
= devm_kcalloc(dev
->dev
,
715 MT_NUM_RX_ENTRIES
, sizeof(*q
->entry
),
720 q
->ndesc
= MT_NUM_RX_ENTRIES
;
721 q
->buf_size
= PAGE_SIZE
;
723 for (i
= 0; i
< q
->ndesc
; i
++) {
724 err
= mt76u_rx_urb_alloc(dev
, q
, &q
->entry
[i
]);
729 return mt76u_submit_rx_buffers(dev
, qid
);
732 int mt76u_alloc_mcu_queue(struct mt76_dev
*dev
)
734 return mt76u_alloc_rx_queue(dev
, MT_RXQ_MCU
);
736 EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue
);
739 mt76u_free_rx_queue(struct mt76_dev
*dev
, struct mt76_queue
*q
)
744 for (i
= 0; i
< q
->ndesc
; i
++) {
745 if (!q
->entry
[i
].urb
)
748 mt76u_urb_free(q
->entry
[i
].urb
);
749 q
->entry
[i
].urb
= NULL
;
755 page
= virt_to_page(q
->rx_page
.va
);
756 __page_frag_cache_drain(page
, q
->rx_page
.pagecnt_bias
);
757 memset(&q
->rx_page
, 0, sizeof(q
->rx_page
));
760 static void mt76u_free_rx(struct mt76_dev
*dev
)
764 mt76_worker_teardown(&dev
->usb
.rx_worker
);
766 mt76_for_each_q_rx(dev
, i
)
767 mt76u_free_rx_queue(dev
, &dev
->q_rx
[i
]);
770 void mt76u_stop_rx(struct mt76_dev
*dev
)
774 mt76_worker_disable(&dev
->usb
.rx_worker
);
776 mt76_for_each_q_rx(dev
, i
) {
777 struct mt76_queue
*q
= &dev
->q_rx
[i
];
780 for (j
= 0; j
< q
->ndesc
; j
++)
781 usb_poison_urb(q
->entry
[j
].urb
);
784 EXPORT_SYMBOL_GPL(mt76u_stop_rx
);
786 int mt76u_resume_rx(struct mt76_dev
*dev
)
790 mt76_for_each_q_rx(dev
, i
) {
791 struct mt76_queue
*q
= &dev
->q_rx
[i
];
794 for (j
= 0; j
< q
->ndesc
; j
++)
795 usb_unpoison_urb(q
->entry
[j
].urb
);
797 err
= mt76u_submit_rx_buffers(dev
, i
);
802 mt76_worker_enable(&dev
->usb
.rx_worker
);
806 EXPORT_SYMBOL_GPL(mt76u_resume_rx
);
808 static void mt76u_status_worker(struct mt76_worker
*w
)
810 struct mt76_usb
*usb
= container_of(w
, struct mt76_usb
, status_worker
);
811 struct mt76_dev
*dev
= container_of(usb
, struct mt76_dev
, usb
);
812 struct mt76_queue_entry entry
;
813 struct mt76_queue
*q
;
816 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
817 q
= dev
->phy
.q_tx
[i
];
821 while (q
->queued
> 0) {
822 if (!q
->entry
[q
->tail
].done
)
825 entry
= q
->entry
[q
->tail
];
826 q
->entry
[q
->tail
].done
= false;
828 mt76_queue_tx_complete(dev
, q
, &entry
);
832 wake_up(&dev
->tx_wait
);
834 mt76_worker_schedule(&dev
->tx_worker
);
836 if (dev
->drv
->tx_status_data
&&
837 !test_and_set_bit(MT76_READING_STATS
, &dev
->phy
.state
))
838 queue_work(dev
->wq
, &dev
->usb
.stat_work
);
842 static void mt76u_tx_status_data(struct work_struct
*work
)
844 struct mt76_usb
*usb
;
845 struct mt76_dev
*dev
;
849 usb
= container_of(work
, struct mt76_usb
, stat_work
);
850 dev
= container_of(usb
, struct mt76_dev
, usb
);
853 if (test_bit(MT76_REMOVED
, &dev
->phy
.state
))
856 if (!dev
->drv
->tx_status_data(dev
, &update
))
861 if (count
&& test_bit(MT76_STATE_RUNNING
, &dev
->phy
.state
))
862 queue_work(dev
->wq
, &usb
->stat_work
);
864 clear_bit(MT76_READING_STATS
, &dev
->phy
.state
);
867 static void mt76u_complete_tx(struct urb
*urb
)
869 struct mt76_dev
*dev
= dev_get_drvdata(&urb
->dev
->dev
);
870 struct mt76_queue_entry
*e
= urb
->context
;
872 if (mt76u_urb_error(urb
))
873 dev_err(dev
->dev
, "tx urb failed: %d\n", urb
->status
);
876 mt76_worker_schedule(&dev
->usb
.status_worker
);
880 mt76u_tx_setup_buffers(struct mt76_dev
*dev
, struct sk_buff
*skb
,
883 urb
->transfer_buffer_length
= skb
->len
;
885 if (!dev
->usb
.sg_en
) {
886 urb
->transfer_buffer
= skb
->data
;
890 sg_init_table(urb
->sg
, MT_TX_SG_MAX_SIZE
);
891 urb
->num_sgs
= skb_to_sgvec(skb
, urb
->sg
, 0, skb
->len
);
899 mt76u_tx_queue_skb(struct mt76_dev
*dev
, struct mt76_queue
*q
,
900 struct sk_buff
*skb
, struct mt76_wcid
*wcid
,
901 struct ieee80211_sta
*sta
)
903 struct mt76_tx_info tx_info
= {
909 if (q
->queued
== q
->ndesc
)
912 skb
->prev
= skb
->next
= NULL
;
913 err
= dev
->drv
->tx_prepare_skb(dev
, NULL
, q
->qid
, wcid
, sta
, &tx_info
);
917 err
= mt76u_tx_setup_buffers(dev
, tx_info
.skb
, q
->entry
[idx
].urb
);
921 mt76u_fill_bulk_urb(dev
, USB_DIR_OUT
, q2ep(q
->hw_idx
),
922 q
->entry
[idx
].urb
, mt76u_complete_tx
,
925 q
->head
= (q
->head
+ 1) % q
->ndesc
;
926 q
->entry
[idx
].skb
= tx_info
.skb
;
932 static void mt76u_tx_kick(struct mt76_dev
*dev
, struct mt76_queue
*q
)
937 while (q
->first
!= q
->head
) {
938 urb
= q
->entry
[q
->first
].urb
;
940 trace_submit_urb(dev
, urb
);
941 err
= usb_submit_urb(urb
, GFP_ATOMIC
);
944 set_bit(MT76_REMOVED
, &dev
->phy
.state
);
946 dev_err(dev
->dev
, "tx urb submit failed:%d\n",
950 q
->first
= (q
->first
+ 1) % q
->ndesc
;
954 static u8
mt76u_ac_to_hwq(struct mt76_dev
*dev
, u8 ac
)
956 if (mt76_chip(dev
) == 0x7663) {
957 static const u8 lmac_queue_map
[] = {
958 /* ac to lmac mapping */
959 [IEEE80211_AC_BK
] = 0,
960 [IEEE80211_AC_BE
] = 1,
961 [IEEE80211_AC_VI
] = 2,
962 [IEEE80211_AC_VO
] = 4,
965 if (WARN_ON(ac
>= ARRAY_SIZE(lmac_queue_map
)))
968 return lmac_queue_map
[ac
];
971 return mt76_ac_to_hwq(ac
);
974 static int mt76u_alloc_tx(struct mt76_dev
*dev
)
976 struct mt76_queue
*q
;
979 for (i
= 0; i
<= MT_TXQ_PSD
; i
++) {
980 if (i
>= IEEE80211_NUM_ACS
) {
981 dev
->phy
.q_tx
[i
] = dev
->phy
.q_tx
[0];
985 q
= devm_kzalloc(dev
->dev
, sizeof(*q
), GFP_KERNEL
);
989 spin_lock_init(&q
->lock
);
990 q
->hw_idx
= mt76u_ac_to_hwq(dev
, i
);
993 dev
->phy
.q_tx
[i
] = q
;
995 q
->entry
= devm_kcalloc(dev
->dev
,
996 MT_NUM_TX_ENTRIES
, sizeof(*q
->entry
),
1001 q
->ndesc
= MT_NUM_TX_ENTRIES
;
1002 for (j
= 0; j
< q
->ndesc
; j
++) {
1003 err
= mt76u_urb_alloc(dev
, &q
->entry
[j
],
1012 static void mt76u_free_tx(struct mt76_dev
*dev
)
1016 mt76_worker_teardown(&dev
->usb
.status_worker
);
1018 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
1019 struct mt76_queue
*q
;
1022 q
= dev
->phy
.q_tx
[i
];
1026 for (j
= 0; j
< q
->ndesc
; j
++) {
1027 usb_free_urb(q
->entry
[j
].urb
);
1028 q
->entry
[j
].urb
= NULL
;
1033 void mt76u_stop_tx(struct mt76_dev
*dev
)
1037 mt76_worker_disable(&dev
->usb
.status_worker
);
1039 ret
= wait_event_timeout(dev
->tx_wait
, !mt76_has_tx_pending(&dev
->phy
),
1042 struct mt76_queue_entry entry
;
1043 struct mt76_queue
*q
;
1046 dev_err(dev
->dev
, "timed out waiting for pending tx\n");
1048 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
1049 q
= dev
->phy
.q_tx
[i
];
1053 for (j
= 0; j
< q
->ndesc
; j
++)
1054 usb_kill_urb(q
->entry
[j
].urb
);
1057 mt76_worker_disable(&dev
->tx_worker
);
1059 /* On device removal we maight queue skb's, but mt76u_tx_kick()
1060 * will fail to submit urb, cleanup those skb's manually.
1062 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
1063 q
= dev
->phy
.q_tx
[i
];
1067 while (q
->queued
> 0) {
1068 entry
= q
->entry
[q
->tail
];
1069 q
->entry
[q
->tail
].done
= false;
1070 mt76_queue_tx_complete(dev
, q
, &entry
);
1074 mt76_worker_enable(&dev
->tx_worker
);
1077 cancel_work_sync(&dev
->usb
.stat_work
);
1078 clear_bit(MT76_READING_STATS
, &dev
->phy
.state
);
1080 mt76_worker_enable(&dev
->usb
.status_worker
);
1082 mt76_tx_status_check(dev
, NULL
, true);
1084 EXPORT_SYMBOL_GPL(mt76u_stop_tx
);
1086 void mt76u_queues_deinit(struct mt76_dev
*dev
)
1094 EXPORT_SYMBOL_GPL(mt76u_queues_deinit
);
1096 int mt76u_alloc_queues(struct mt76_dev
*dev
)
1100 err
= mt76u_alloc_rx_queue(dev
, MT_RXQ_MAIN
);
1104 return mt76u_alloc_tx(dev
);
1106 EXPORT_SYMBOL_GPL(mt76u_alloc_queues
);
1108 static const struct mt76_queue_ops usb_queue_ops
= {
1109 .tx_queue_skb
= mt76u_tx_queue_skb
,
1110 .kick
= mt76u_tx_kick
,
1113 int mt76u_init(struct mt76_dev
*dev
,
1114 struct usb_interface
*intf
, bool ext
)
1116 static struct mt76_bus_ops mt76u_ops
= {
1117 .read_copy
= mt76u_read_copy_ext
,
1118 .wr_rp
= mt76u_wr_rp
,
1119 .rd_rp
= mt76u_rd_rp
,
1120 .type
= MT76_BUS_USB
,
1122 struct usb_device
*udev
= interface_to_usbdev(intf
);
1123 struct mt76_usb
*usb
= &dev
->usb
;
1126 mt76u_ops
.rr
= ext
? mt76u_rr_ext
: mt76u_rr
;
1127 mt76u_ops
.wr
= ext
? mt76u_wr_ext
: mt76u_wr
;
1128 mt76u_ops
.rmw
= ext
? mt76u_rmw_ext
: mt76u_rmw
;
1129 mt76u_ops
.write_copy
= ext
? mt76u_copy_ext
: mt76u_copy
;
1131 INIT_WORK(&usb
->stat_work
, mt76u_tx_status_data
);
1133 usb
->data_len
= usb_maxpacket(udev
, usb_sndctrlpipe(udev
, 0), 1);
1134 if (usb
->data_len
< 32)
1137 usb
->data
= devm_kmalloc(dev
->dev
, usb
->data_len
, GFP_KERNEL
);
1141 mutex_init(&usb
->usb_ctrl_mtx
);
1142 dev
->bus
= &mt76u_ops
;
1143 dev
->queue_ops
= &usb_queue_ops
;
1145 dev_set_drvdata(&udev
->dev
, dev
);
1147 usb
->sg_en
= mt76u_check_sg(dev
);
1149 err
= mt76u_set_endpoints(intf
, usb
);
1153 err
= mt76_worker_setup(dev
->hw
, &usb
->rx_worker
, mt76u_rx_worker
,
1158 err
= mt76_worker_setup(dev
->hw
, &usb
->status_worker
,
1159 mt76u_status_worker
, "usb-status");
1163 sched_set_fifo_low(usb
->rx_worker
.task
);
1164 sched_set_fifo_low(usb
->status_worker
.task
);
1168 EXPORT_SYMBOL_GPL(mt76u_init
);
1170 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
1171 MODULE_LICENSE("Dual BSD/GPL");