1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
6 #include <linux/module.h>
11 #define MT_VEND_REQ_MAX_RETRY 10
12 #define MT_VEND_REQ_TOUT_MS 300
14 static bool disable_usb_sg
;
15 module_param_named(disable_usb_sg
, disable_usb_sg
, bool, 0644);
16 MODULE_PARM_DESC(disable_usb_sg
, "Disable usb scatter-gather support");
18 static int __mt76u_vendor_request(struct mt76_dev
*dev
, u8 req
,
19 u8 req_type
, u16 val
, u16 offset
,
20 void *buf
, size_t len
)
22 struct usb_interface
*uintf
= to_usb_interface(dev
->dev
);
23 struct usb_device
*udev
= interface_to_usbdev(uintf
);
27 lockdep_assert_held(&dev
->usb
.usb_ctrl_mtx
);
29 pipe
= (req_type
& USB_DIR_IN
) ? usb_rcvctrlpipe(udev
, 0)
30 : usb_sndctrlpipe(udev
, 0);
31 for (i
= 0; i
< MT_VEND_REQ_MAX_RETRY
; i
++) {
32 if (test_bit(MT76_REMOVED
, &dev
->phy
.state
))
35 ret
= usb_control_msg(udev
, pipe
, req
, req_type
, val
,
36 offset
, buf
, len
, MT_VEND_REQ_TOUT_MS
);
38 set_bit(MT76_REMOVED
, &dev
->phy
.state
);
39 if (ret
>= 0 || ret
== -ENODEV
)
41 usleep_range(5000, 10000);
44 dev_err(dev
->dev
, "vendor request req:%02x off:%04x failed:%d\n",
49 int mt76u_vendor_request(struct mt76_dev
*dev
, u8 req
,
50 u8 req_type
, u16 val
, u16 offset
,
51 void *buf
, size_t len
)
55 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
56 ret
= __mt76u_vendor_request(dev
, req
, req_type
,
57 val
, offset
, buf
, len
);
58 trace_usb_reg_wr(dev
, offset
, val
);
59 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
63 EXPORT_SYMBOL_GPL(mt76u_vendor_request
);
65 static u32
___mt76u_rr(struct mt76_dev
*dev
, u8 req
, u32 addr
)
67 struct mt76_usb
*usb
= &dev
->usb
;
71 ret
= __mt76u_vendor_request(dev
, req
,
72 USB_DIR_IN
| USB_TYPE_VENDOR
,
73 addr
>> 16, addr
, usb
->data
,
75 if (ret
== sizeof(__le32
))
76 data
= get_unaligned_le32(usb
->data
);
77 trace_usb_reg_rr(dev
, addr
, data
);
82 static u32
__mt76u_rr(struct mt76_dev
*dev
, u32 addr
)
86 switch (addr
& MT_VEND_TYPE_MASK
) {
87 case MT_VEND_TYPE_EEPROM
:
88 req
= MT_VEND_READ_EEPROM
;
90 case MT_VEND_TYPE_CFG
:
91 req
= MT_VEND_READ_CFG
;
94 req
= MT_VEND_MULTI_READ
;
98 return ___mt76u_rr(dev
, req
, addr
& ~MT_VEND_TYPE_MASK
);
101 static u32
mt76u_rr(struct mt76_dev
*dev
, u32 addr
)
105 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
106 ret
= __mt76u_rr(dev
, addr
);
107 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
112 static u32
mt76u_rr_ext(struct mt76_dev
*dev
, u32 addr
)
116 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
117 ret
= ___mt76u_rr(dev
, MT_VEND_READ_EXT
, addr
);
118 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
123 static void ___mt76u_wr(struct mt76_dev
*dev
, u8 req
,
126 struct mt76_usb
*usb
= &dev
->usb
;
128 put_unaligned_le32(val
, usb
->data
);
129 __mt76u_vendor_request(dev
, req
,
130 USB_DIR_OUT
| USB_TYPE_VENDOR
,
131 addr
>> 16, addr
, usb
->data
,
133 trace_usb_reg_wr(dev
, addr
, val
);
136 static void __mt76u_wr(struct mt76_dev
*dev
, u32 addr
, u32 val
)
140 switch (addr
& MT_VEND_TYPE_MASK
) {
141 case MT_VEND_TYPE_CFG
:
142 req
= MT_VEND_WRITE_CFG
;
145 req
= MT_VEND_MULTI_WRITE
;
148 ___mt76u_wr(dev
, req
, addr
& ~MT_VEND_TYPE_MASK
, val
);
151 static void mt76u_wr(struct mt76_dev
*dev
, u32 addr
, u32 val
)
153 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
154 __mt76u_wr(dev
, addr
, val
);
155 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
158 static void mt76u_wr_ext(struct mt76_dev
*dev
, u32 addr
, u32 val
)
160 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
161 ___mt76u_wr(dev
, MT_VEND_WRITE_EXT
, addr
, val
);
162 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
165 static u32
mt76u_rmw(struct mt76_dev
*dev
, u32 addr
,
168 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
169 val
|= __mt76u_rr(dev
, addr
) & ~mask
;
170 __mt76u_wr(dev
, addr
, val
);
171 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
176 static u32
mt76u_rmw_ext(struct mt76_dev
*dev
, u32 addr
,
179 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
180 val
|= ___mt76u_rr(dev
, MT_VEND_READ_EXT
, addr
) & ~mask
;
181 ___mt76u_wr(dev
, MT_VEND_WRITE_EXT
, addr
, val
);
182 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
187 static void mt76u_copy(struct mt76_dev
*dev
, u32 offset
,
188 const void *data
, int len
)
190 struct mt76_usb
*usb
= &dev
->usb
;
191 const u8
*val
= data
;
193 int current_batch_size
;
196 /* Assure that always a multiple of 4 bytes are copied,
197 * otherwise beacons can be corrupted.
198 * See: "mt76: round up length on mt76_wr_copy"
199 * Commit 850e8f6fbd5d0003b0
201 len
= round_up(len
, 4);
203 mutex_lock(&usb
->usb_ctrl_mtx
);
205 current_batch_size
= min_t(int, usb
->data_len
, len
- i
);
206 memcpy(usb
->data
, val
+ i
, current_batch_size
);
207 ret
= __mt76u_vendor_request(dev
, MT_VEND_MULTI_WRITE
,
208 USB_DIR_OUT
| USB_TYPE_VENDOR
,
209 0, offset
+ i
, usb
->data
,
214 i
+= current_batch_size
;
216 mutex_unlock(&usb
->usb_ctrl_mtx
);
219 static void mt76u_copy_ext(struct mt76_dev
*dev
, u32 offset
,
220 const void *data
, int len
)
222 struct mt76_usb
*usb
= &dev
->usb
;
223 int ret
, i
= 0, batch_len
;
224 const u8
*val
= data
;
226 len
= round_up(len
, 4);
227 mutex_lock(&usb
->usb_ctrl_mtx
);
229 batch_len
= min_t(int, usb
->data_len
, len
- i
);
230 memcpy(usb
->data
, val
+ i
, batch_len
);
231 ret
= __mt76u_vendor_request(dev
, MT_VEND_WRITE_EXT
,
232 USB_DIR_OUT
| USB_TYPE_VENDOR
,
233 (offset
+ i
) >> 16, offset
+ i
,
234 usb
->data
, batch_len
);
240 mutex_unlock(&usb
->usb_ctrl_mtx
);
244 mt76u_read_copy_ext(struct mt76_dev
*dev
, u32 offset
,
247 struct mt76_usb
*usb
= &dev
->usb
;
248 int i
= 0, batch_len
, ret
;
251 len
= round_up(len
, 4);
252 mutex_lock(&usb
->usb_ctrl_mtx
);
254 batch_len
= min_t(int, usb
->data_len
, len
- i
);
255 ret
= __mt76u_vendor_request(dev
, MT_VEND_READ_EXT
,
256 USB_DIR_IN
| USB_TYPE_VENDOR
,
257 (offset
+ i
) >> 16, offset
+ i
,
258 usb
->data
, batch_len
);
262 memcpy(val
+ i
, usb
->data
, batch_len
);
265 mutex_unlock(&usb
->usb_ctrl_mtx
);
268 void mt76u_single_wr(struct mt76_dev
*dev
, const u8 req
,
269 const u16 offset
, const u32 val
)
271 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
272 __mt76u_vendor_request(dev
, req
,
273 USB_DIR_OUT
| USB_TYPE_VENDOR
,
274 val
& 0xffff, offset
, NULL
, 0);
275 __mt76u_vendor_request(dev
, req
,
276 USB_DIR_OUT
| USB_TYPE_VENDOR
,
277 val
>> 16, offset
+ 2, NULL
, 0);
278 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
280 EXPORT_SYMBOL_GPL(mt76u_single_wr
);
283 mt76u_req_wr_rp(struct mt76_dev
*dev
, u32 base
,
284 const struct mt76_reg_pair
*data
, int len
)
286 struct mt76_usb
*usb
= &dev
->usb
;
288 mutex_lock(&usb
->usb_ctrl_mtx
);
290 __mt76u_wr(dev
, base
+ data
->reg
, data
->value
);
294 mutex_unlock(&usb
->usb_ctrl_mtx
);
300 mt76u_wr_rp(struct mt76_dev
*dev
, u32 base
,
301 const struct mt76_reg_pair
*data
, int n
)
303 if (test_bit(MT76_STATE_MCU_RUNNING
, &dev
->phy
.state
))
304 return dev
->mcu_ops
->mcu_wr_rp(dev
, base
, data
, n
);
306 return mt76u_req_wr_rp(dev
, base
, data
, n
);
310 mt76u_req_rd_rp(struct mt76_dev
*dev
, u32 base
, struct mt76_reg_pair
*data
,
313 struct mt76_usb
*usb
= &dev
->usb
;
315 mutex_lock(&usb
->usb_ctrl_mtx
);
317 data
->value
= __mt76u_rr(dev
, base
+ data
->reg
);
321 mutex_unlock(&usb
->usb_ctrl_mtx
);
327 mt76u_rd_rp(struct mt76_dev
*dev
, u32 base
,
328 struct mt76_reg_pair
*data
, int n
)
330 if (test_bit(MT76_STATE_MCU_RUNNING
, &dev
->phy
.state
))
331 return dev
->mcu_ops
->mcu_rd_rp(dev
, base
, data
, n
);
333 return mt76u_req_rd_rp(dev
, base
, data
, n
);
336 static bool mt76u_check_sg(struct mt76_dev
*dev
)
338 struct usb_interface
*uintf
= to_usb_interface(dev
->dev
);
339 struct usb_device
*udev
= interface_to_usbdev(uintf
);
341 return (!disable_usb_sg
&& udev
->bus
->sg_tablesize
> 0 &&
342 (udev
->bus
->no_sg_constraint
||
343 udev
->speed
== USB_SPEED_WIRELESS
));
347 mt76u_set_endpoints(struct usb_interface
*intf
,
348 struct mt76_usb
*usb
)
350 struct usb_host_interface
*intf_desc
= intf
->cur_altsetting
;
351 struct usb_endpoint_descriptor
*ep_desc
;
352 int i
, in_ep
= 0, out_ep
= 0;
354 for (i
= 0; i
< intf_desc
->desc
.bNumEndpoints
; i
++) {
355 ep_desc
= &intf_desc
->endpoint
[i
].desc
;
357 if (usb_endpoint_is_bulk_in(ep_desc
) &&
358 in_ep
< __MT_EP_IN_MAX
) {
359 usb
->in_ep
[in_ep
] = usb_endpoint_num(ep_desc
);
361 } else if (usb_endpoint_is_bulk_out(ep_desc
) &&
362 out_ep
< __MT_EP_OUT_MAX
) {
363 usb
->out_ep
[out_ep
] = usb_endpoint_num(ep_desc
);
368 if (in_ep
!= __MT_EP_IN_MAX
|| out_ep
!= __MT_EP_OUT_MAX
)
374 mt76u_fill_rx_sg(struct mt76_dev
*dev
, struct mt76_queue
*q
, struct urb
*urb
,
379 for (i
= 0; i
< nsgs
; i
++) {
384 data
= page_frag_alloc(&q
->rx_page
, q
->buf_size
, gfp
);
388 page
= virt_to_head_page(data
);
389 offset
= data
- page_address(page
);
390 sg_set_page(&urb
->sg
[i
], page
, q
->buf_size
, offset
);
396 for (j
= nsgs
; j
< urb
->num_sgs
; j
++)
397 skb_free_frag(sg_virt(&urb
->sg
[j
]));
401 urb
->num_sgs
= max_t(int, i
, urb
->num_sgs
);
402 urb
->transfer_buffer_length
= urb
->num_sgs
* q
->buf_size
;
403 sg_init_marker(urb
->sg
, urb
->num_sgs
);
405 return i
? : -ENOMEM
;
409 mt76u_refill_rx(struct mt76_dev
*dev
, struct mt76_queue
*q
,
410 struct urb
*urb
, int nsgs
, gfp_t gfp
)
412 enum mt76_rxq_id qid
= q
- &dev
->q_rx
[MT_RXQ_MAIN
];
414 if (qid
== MT_RXQ_MAIN
&& dev
->usb
.sg_en
)
415 return mt76u_fill_rx_sg(dev
, q
, urb
, nsgs
, gfp
);
417 urb
->transfer_buffer_length
= q
->buf_size
;
418 urb
->transfer_buffer
= page_frag_alloc(&q
->rx_page
, q
->buf_size
, gfp
);
420 return urb
->transfer_buffer
? 0 : -ENOMEM
;
424 mt76u_urb_alloc(struct mt76_dev
*dev
, struct mt76_queue_entry
*e
,
427 unsigned int size
= sizeof(struct urb
);
430 size
+= sg_max_size
* sizeof(struct scatterlist
);
432 e
->urb
= kzalloc(size
, GFP_KERNEL
);
436 usb_init_urb(e
->urb
);
438 if (dev
->usb
.sg_en
&& sg_max_size
> 0)
439 e
->urb
->sg
= (struct scatterlist
*)(e
->urb
+ 1);
445 mt76u_rx_urb_alloc(struct mt76_dev
*dev
, struct mt76_queue
*q
,
446 struct mt76_queue_entry
*e
)
448 enum mt76_rxq_id qid
= q
- &dev
->q_rx
[MT_RXQ_MAIN
];
451 sg_size
= qid
== MT_RXQ_MAIN
? MT_RX_SG_MAX_SIZE
: 0;
452 err
= mt76u_urb_alloc(dev
, e
, sg_size
);
456 return mt76u_refill_rx(dev
, q
, e
->urb
, sg_size
, GFP_KERNEL
);
459 static void mt76u_urb_free(struct urb
*urb
)
463 for (i
= 0; i
< urb
->num_sgs
; i
++)
464 skb_free_frag(sg_virt(&urb
->sg
[i
]));
466 if (urb
->transfer_buffer
)
467 skb_free_frag(urb
->transfer_buffer
);
473 mt76u_fill_bulk_urb(struct mt76_dev
*dev
, int dir
, int index
,
474 struct urb
*urb
, usb_complete_t complete_fn
,
477 struct usb_interface
*uintf
= to_usb_interface(dev
->dev
);
478 struct usb_device
*udev
= interface_to_usbdev(uintf
);
481 if (dir
== USB_DIR_IN
)
482 pipe
= usb_rcvbulkpipe(udev
, dev
->usb
.in_ep
[index
]);
484 pipe
= usb_sndbulkpipe(udev
, dev
->usb
.out_ep
[index
]);
488 urb
->complete
= complete_fn
;
489 urb
->context
= context
;
493 mt76u_get_next_rx_entry(struct mt76_queue
*q
)
495 struct urb
*urb
= NULL
;
498 spin_lock_irqsave(&q
->lock
, flags
);
500 urb
= q
->entry
[q
->head
].urb
;
501 q
->head
= (q
->head
+ 1) % q
->ndesc
;
504 spin_unlock_irqrestore(&q
->lock
, flags
);
510 mt76u_get_rx_entry_len(struct mt76_dev
*dev
, u8
*data
,
513 u16 dma_len
, min_len
;
515 dma_len
= get_unaligned_le16(data
);
516 if (dev
->drv
->drv_flags
& MT_DRV_RX_DMA_HDR
)
519 min_len
= MT_DMA_HDR_LEN
+ MT_RX_RXWI_LEN
+ MT_FCE_INFO_LEN
;
520 if (data_len
< min_len
|| !dma_len
||
521 dma_len
+ MT_DMA_HDR_LEN
> data_len
||
527 static struct sk_buff
*
528 mt76u_build_rx_skb(struct mt76_dev
*dev
, void *data
,
529 int len
, int buf_size
)
531 int head_room
, drv_flags
= dev
->drv
->drv_flags
;
534 head_room
= drv_flags
& MT_DRV_RX_DMA_HDR
? 0 : MT_DMA_HDR_LEN
;
535 if (SKB_WITH_OVERHEAD(buf_size
) < head_room
+ len
) {
538 /* slow path, not enough space for data and
541 skb
= alloc_skb(MT_SKB_HEAD_LEN
, GFP_ATOMIC
);
545 skb_put_data(skb
, data
+ head_room
, MT_SKB_HEAD_LEN
);
546 data
+= head_room
+ MT_SKB_HEAD_LEN
;
547 page
= virt_to_head_page(data
);
548 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
549 page
, data
- page_address(page
),
550 len
- MT_SKB_HEAD_LEN
, buf_size
);
556 skb
= build_skb(data
, buf_size
);
560 skb_reserve(skb
, head_room
);
567 mt76u_process_rx_entry(struct mt76_dev
*dev
, struct urb
*urb
,
570 u8
*data
= urb
->num_sgs
? sg_virt(&urb
->sg
[0]) : urb
->transfer_buffer
;
571 int data_len
= urb
->num_sgs
? urb
->sg
[0].length
: urb
->actual_length
;
572 int len
, nsgs
= 1, head_room
, drv_flags
= dev
->drv
->drv_flags
;
575 if (!test_bit(MT76_STATE_INITIALIZED
, &dev
->phy
.state
))
578 len
= mt76u_get_rx_entry_len(dev
, data
, urb
->actual_length
);
582 head_room
= drv_flags
& MT_DRV_RX_DMA_HDR
? 0 : MT_DMA_HDR_LEN
;
583 data_len
= min_t(int, len
, data_len
- head_room
);
584 skb
= mt76u_build_rx_skb(dev
, data
, data_len
, buf_size
);
589 while (len
> 0 && nsgs
< urb
->num_sgs
) {
590 data_len
= min_t(int, len
, urb
->sg
[nsgs
].length
);
591 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
592 sg_page(&urb
->sg
[nsgs
]),
593 urb
->sg
[nsgs
].offset
, data_len
,
598 dev
->drv
->rx_skb(dev
, MT_RXQ_MAIN
, skb
);
603 static void mt76u_complete_rx(struct urb
*urb
)
605 struct mt76_dev
*dev
= dev_get_drvdata(&urb
->dev
->dev
);
606 struct mt76_queue
*q
= urb
->context
;
609 trace_rx_urb(dev
, urb
);
611 switch (urb
->status
) {
617 dev_err_ratelimited(dev
->dev
, "rx urb failed: %d\n",
624 spin_lock_irqsave(&q
->lock
, flags
);
625 if (WARN_ONCE(q
->entry
[q
->tail
].urb
!= urb
, "rx urb mismatch"))
628 q
->tail
= (q
->tail
+ 1) % q
->ndesc
;
630 tasklet_schedule(&dev
->usb
.rx_tasklet
);
632 spin_unlock_irqrestore(&q
->lock
, flags
);
636 mt76u_submit_rx_buf(struct mt76_dev
*dev
, enum mt76_rxq_id qid
,
639 int ep
= qid
== MT_RXQ_MAIN
? MT_EP_IN_PKT_RX
: MT_EP_IN_CMD_RESP
;
641 mt76u_fill_bulk_urb(dev
, USB_DIR_IN
, ep
, urb
,
642 mt76u_complete_rx
, &dev
->q_rx
[qid
]);
643 trace_submit_urb(dev
, urb
);
645 return usb_submit_urb(urb
, GFP_ATOMIC
);
649 mt76u_process_rx_queue(struct mt76_dev
*dev
, struct mt76_queue
*q
)
651 int qid
= q
- &dev
->q_rx
[MT_RXQ_MAIN
];
656 urb
= mt76u_get_next_rx_entry(q
);
660 count
= mt76u_process_rx_entry(dev
, urb
, q
->buf_size
);
662 err
= mt76u_refill_rx(dev
, q
, urb
, count
, GFP_ATOMIC
);
666 mt76u_submit_rx_buf(dev
, qid
, urb
);
668 if (qid
== MT_RXQ_MAIN
)
669 mt76_rx_poll_complete(dev
, MT_RXQ_MAIN
, NULL
);
672 static void mt76u_rx_tasklet(unsigned long data
)
674 struct mt76_dev
*dev
= (struct mt76_dev
*)data
;
675 struct mt76_queue
*q
;
679 for (i
= 0; i
< __MT_RXQ_MAX
; i
++) {
684 mt76u_process_rx_queue(dev
, q
);
690 mt76u_submit_rx_buffers(struct mt76_dev
*dev
, enum mt76_rxq_id qid
)
692 struct mt76_queue
*q
= &dev
->q_rx
[qid
];
696 spin_lock_irqsave(&q
->lock
, flags
);
697 for (i
= 0; i
< q
->ndesc
; i
++) {
698 err
= mt76u_submit_rx_buf(dev
, qid
, q
->entry
[i
].urb
);
702 q
->head
= q
->tail
= 0;
704 spin_unlock_irqrestore(&q
->lock
, flags
);
710 mt76u_alloc_rx_queue(struct mt76_dev
*dev
, enum mt76_rxq_id qid
)
712 struct mt76_queue
*q
= &dev
->q_rx
[qid
];
715 spin_lock_init(&q
->lock
);
716 q
->entry
= devm_kcalloc(dev
->dev
,
717 MT_NUM_RX_ENTRIES
, sizeof(*q
->entry
),
722 q
->ndesc
= MT_NUM_RX_ENTRIES
;
723 q
->buf_size
= PAGE_SIZE
;
725 for (i
= 0; i
< q
->ndesc
; i
++) {
726 err
= mt76u_rx_urb_alloc(dev
, q
, &q
->entry
[i
]);
731 return mt76u_submit_rx_buffers(dev
, qid
);
734 int mt76u_alloc_mcu_queue(struct mt76_dev
*dev
)
736 return mt76u_alloc_rx_queue(dev
, MT_RXQ_MCU
);
738 EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue
);
741 mt76u_free_rx_queue(struct mt76_dev
*dev
, struct mt76_queue
*q
)
746 for (i
= 0; i
< q
->ndesc
; i
++)
747 mt76u_urb_free(q
->entry
[i
].urb
);
752 page
= virt_to_page(q
->rx_page
.va
);
753 __page_frag_cache_drain(page
, q
->rx_page
.pagecnt_bias
);
754 memset(&q
->rx_page
, 0, sizeof(q
->rx_page
));
757 static void mt76u_free_rx(struct mt76_dev
*dev
)
759 struct mt76_queue
*q
;
762 for (i
= 0; i
< __MT_RXQ_MAX
; i
++) {
767 mt76u_free_rx_queue(dev
, q
);
771 void mt76u_stop_rx(struct mt76_dev
*dev
)
773 struct mt76_queue
*q
;
776 for (i
= 0; i
< __MT_RXQ_MAX
; i
++) {
781 for (j
= 0; j
< q
->ndesc
; j
++)
782 usb_poison_urb(q
->entry
[j
].urb
);
785 tasklet_kill(&dev
->usb
.rx_tasklet
);
787 EXPORT_SYMBOL_GPL(mt76u_stop_rx
);
789 int mt76u_resume_rx(struct mt76_dev
*dev
)
791 struct mt76_queue
*q
;
794 for (i
= 0; i
< __MT_RXQ_MAX
; i
++) {
800 for (j
= 0; j
< q
->ndesc
; j
++)
801 usb_unpoison_urb(q
->entry
[j
].urb
);
803 err
= mt76u_submit_rx_buffers(dev
, i
);
810 EXPORT_SYMBOL_GPL(mt76u_resume_rx
);
812 static void mt76u_tx_tasklet(unsigned long data
)
814 struct mt76_dev
*dev
= (struct mt76_dev
*)data
;
815 struct mt76_queue_entry entry
;
816 struct mt76_sw_queue
*sq
;
817 struct mt76_queue
*q
;
821 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
822 u32 n_dequeued
= 0, n_sw_dequeued
= 0;
827 while (q
->queued
> n_dequeued
) {
828 if (!q
->entry
[q
->head
].done
)
831 if (q
->entry
[q
->head
].schedule
) {
832 q
->entry
[q
->head
].schedule
= false;
836 entry
= q
->entry
[q
->head
];
837 q
->entry
[q
->head
].done
= false;
838 q
->head
= (q
->head
+ 1) % q
->ndesc
;
841 dev
->drv
->tx_complete_skb(dev
, i
, &entry
);
844 spin_lock_bh(&q
->lock
);
846 sq
->swq_queued
-= n_sw_dequeued
;
847 q
->queued
-= n_dequeued
;
849 wake
= q
->stopped
&& q
->queued
< q
->ndesc
- 8;
854 wake_up(&dev
->tx_wait
);
856 spin_unlock_bh(&q
->lock
);
858 mt76_txq_schedule(&dev
->phy
, i
);
860 if (dev
->drv
->tx_status_data
&&
861 !test_and_set_bit(MT76_READING_STATS
, &dev
->phy
.state
))
862 queue_work(dev
->usb
.wq
, &dev
->usb
.stat_work
);
864 ieee80211_wake_queue(dev
->hw
, i
);
868 static void mt76u_tx_status_data(struct work_struct
*work
)
870 struct mt76_usb
*usb
;
871 struct mt76_dev
*dev
;
875 usb
= container_of(work
, struct mt76_usb
, stat_work
);
876 dev
= container_of(usb
, struct mt76_dev
, usb
);
879 if (test_bit(MT76_REMOVED
, &dev
->phy
.state
))
882 if (!dev
->drv
->tx_status_data(dev
, &update
))
887 if (count
&& test_bit(MT76_STATE_RUNNING
, &dev
->phy
.state
))
888 queue_work(usb
->wq
, &usb
->stat_work
);
890 clear_bit(MT76_READING_STATS
, &dev
->phy
.state
);
893 static void mt76u_complete_tx(struct urb
*urb
)
895 struct mt76_dev
*dev
= dev_get_drvdata(&urb
->dev
->dev
);
896 struct mt76_queue_entry
*e
= urb
->context
;
898 if (mt76u_urb_error(urb
))
899 dev_err(dev
->dev
, "tx urb failed: %d\n", urb
->status
);
902 tasklet_schedule(&dev
->tx_tasklet
);
906 mt76u_tx_setup_buffers(struct mt76_dev
*dev
, struct sk_buff
*skb
,
909 urb
->transfer_buffer_length
= skb
->len
;
911 if (!dev
->usb
.sg_en
) {
912 urb
->transfer_buffer
= skb
->data
;
916 sg_init_table(urb
->sg
, MT_TX_SG_MAX_SIZE
);
917 urb
->num_sgs
= skb_to_sgvec(skb
, urb
->sg
, 0, skb
->len
);
924 int mt76u_skb_dma_info(struct sk_buff
*skb
, u32 info
)
926 struct sk_buff
*iter
, *last
= skb
;
929 put_unaligned_le32(info
, skb_push(skb
, sizeof(info
)));
930 /* Add zero pad of 4 - 7 bytes */
931 pad
= round_up(skb
->len
, 4) + 4 - skb
->len
;
933 /* First packet of a A-MSDU burst keeps track of the whole burst
934 * length, need to update length of it and the last packet.
936 skb_walk_frags(skb
, iter
) {
939 skb
->data_len
+= pad
;
945 if (skb_pad(last
, pad
))
947 __skb_put(last
, pad
);
951 EXPORT_SYMBOL_GPL(mt76u_skb_dma_info
);
954 mt76u_tx_queue_skb(struct mt76_dev
*dev
, enum mt76_txq_id qid
,
955 struct sk_buff
*skb
, struct mt76_wcid
*wcid
,
956 struct ieee80211_sta
*sta
)
958 struct mt76_queue
*q
= dev
->q_tx
[qid
].q
;
959 struct mt76_tx_info tx_info
= {
965 if (q
->queued
== q
->ndesc
)
968 skb
->prev
= skb
->next
= NULL
;
969 err
= dev
->drv
->tx_prepare_skb(dev
, NULL
, qid
, wcid
, sta
, &tx_info
);
973 err
= mt76u_tx_setup_buffers(dev
, tx_info
.skb
, q
->entry
[idx
].urb
);
977 mt76u_fill_bulk_urb(dev
, USB_DIR_OUT
, q2ep(q
->hw_idx
),
978 q
->entry
[idx
].urb
, mt76u_complete_tx
,
981 q
->tail
= (q
->tail
+ 1) % q
->ndesc
;
982 q
->entry
[idx
].skb
= tx_info
.skb
;
988 static void mt76u_tx_kick(struct mt76_dev
*dev
, struct mt76_queue
*q
)
993 while (q
->first
!= q
->tail
) {
994 urb
= q
->entry
[q
->first
].urb
;
996 trace_submit_urb(dev
, urb
);
997 err
= usb_submit_urb(urb
, GFP_ATOMIC
);
1000 set_bit(MT76_REMOVED
, &dev
->phy
.state
);
1002 dev_err(dev
->dev
, "tx urb submit failed:%d\n",
1006 q
->first
= (q
->first
+ 1) % q
->ndesc
;
1010 static u8
mt76u_ac_to_hwq(struct mt76_dev
*dev
, u8 ac
)
1012 if (mt76_chip(dev
) == 0x7663)
1015 return mt76_ac_to_hwq(ac
);
1018 static int mt76u_alloc_tx(struct mt76_dev
*dev
)
1020 struct mt76_queue
*q
;
1023 for (i
= 0; i
<= MT_TXQ_PSD
; i
++) {
1024 INIT_LIST_HEAD(&dev
->q_tx
[i
].swq
);
1026 if (i
>= IEEE80211_NUM_ACS
) {
1027 dev
->q_tx
[i
].q
= dev
->q_tx
[0].q
;
1031 q
= devm_kzalloc(dev
->dev
, sizeof(*q
), GFP_KERNEL
);
1035 spin_lock_init(&q
->lock
);
1036 q
->hw_idx
= mt76u_ac_to_hwq(dev
, i
);
1039 q
->entry
= devm_kcalloc(dev
->dev
,
1040 MT_NUM_TX_ENTRIES
, sizeof(*q
->entry
),
1045 q
->ndesc
= MT_NUM_TX_ENTRIES
;
1046 for (j
= 0; j
< q
->ndesc
; j
++) {
1047 err
= mt76u_urb_alloc(dev
, &q
->entry
[j
],
1056 static void mt76u_free_tx(struct mt76_dev
*dev
)
1058 struct mt76_queue
*q
;
1061 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
1063 for (j
= 0; j
< q
->ndesc
; j
++)
1064 usb_free_urb(q
->entry
[j
].urb
);
1068 void mt76u_stop_tx(struct mt76_dev
*dev
)
1070 struct mt76_queue_entry entry
;
1071 struct mt76_queue
*q
;
1074 ret
= wait_event_timeout(dev
->tx_wait
, !mt76_has_tx_pending(&dev
->phy
),
1077 dev_err(dev
->dev
, "timed out waiting for pending tx\n");
1079 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
1081 for (j
= 0; j
< q
->ndesc
; j
++)
1082 usb_kill_urb(q
->entry
[j
].urb
);
1085 tasklet_kill(&dev
->tx_tasklet
);
1087 /* On device removal we maight queue skb's, but mt76u_tx_kick()
1088 * will fail to submit urb, cleanup those skb's manually.
1090 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
1093 /* Assure we are in sync with killed tasklet. */
1094 spin_lock_bh(&q
->lock
);
1096 entry
= q
->entry
[q
->head
];
1097 q
->head
= (q
->head
+ 1) % q
->ndesc
;
1100 dev
->drv
->tx_complete_skb(dev
, i
, &entry
);
1102 spin_unlock_bh(&q
->lock
);
1106 cancel_work_sync(&dev
->usb
.stat_work
);
1107 clear_bit(MT76_READING_STATS
, &dev
->phy
.state
);
1109 mt76_tx_status_check(dev
, NULL
, true);
1111 EXPORT_SYMBOL_GPL(mt76u_stop_tx
);
1113 void mt76u_queues_deinit(struct mt76_dev
*dev
)
1121 EXPORT_SYMBOL_GPL(mt76u_queues_deinit
);
1123 int mt76u_alloc_queues(struct mt76_dev
*dev
)
1127 err
= mt76u_alloc_rx_queue(dev
, MT_RXQ_MAIN
);
1131 return mt76u_alloc_tx(dev
);
1133 EXPORT_SYMBOL_GPL(mt76u_alloc_queues
);
1135 static const struct mt76_queue_ops usb_queue_ops
= {
1136 .tx_queue_skb
= mt76u_tx_queue_skb
,
1137 .kick
= mt76u_tx_kick
,
1140 void mt76u_deinit(struct mt76_dev
*dev
)
1143 destroy_workqueue(dev
->usb
.wq
);
1147 EXPORT_SYMBOL_GPL(mt76u_deinit
);
1149 int mt76u_init(struct mt76_dev
*dev
,
1150 struct usb_interface
*intf
, bool ext
)
1152 static struct mt76_bus_ops mt76u_ops
= {
1153 .read_copy
= mt76u_read_copy_ext
,
1154 .wr_rp
= mt76u_wr_rp
,
1155 .rd_rp
= mt76u_rd_rp
,
1156 .type
= MT76_BUS_USB
,
1158 struct usb_device
*udev
= interface_to_usbdev(intf
);
1159 struct mt76_usb
*usb
= &dev
->usb
;
1162 mt76u_ops
.rr
= ext
? mt76u_rr_ext
: mt76u_rr
;
1163 mt76u_ops
.wr
= ext
? mt76u_wr_ext
: mt76u_wr
;
1164 mt76u_ops
.rmw
= ext
? mt76u_rmw_ext
: mt76u_rmw
;
1165 mt76u_ops
.write_copy
= ext
? mt76u_copy_ext
: mt76u_copy
;
1167 tasklet_init(&usb
->rx_tasklet
, mt76u_rx_tasklet
, (unsigned long)dev
);
1168 tasklet_init(&dev
->tx_tasklet
, mt76u_tx_tasklet
, (unsigned long)dev
);
1169 INIT_WORK(&usb
->stat_work
, mt76u_tx_status_data
);
1171 usb
->wq
= alloc_workqueue("mt76u", WQ_UNBOUND
, 0);
1175 usb
->data_len
= usb_maxpacket(udev
, usb_sndctrlpipe(udev
, 0), 1);
1176 if (usb
->data_len
< 32)
1179 usb
->data
= devm_kmalloc(dev
->dev
, usb
->data_len
, GFP_KERNEL
);
1183 mutex_init(&usb
->usb_ctrl_mtx
);
1184 dev
->bus
= &mt76u_ops
;
1185 dev
->queue_ops
= &usb_queue_ops
;
1187 dev_set_drvdata(&udev
->dev
, dev
);
1189 usb
->sg_en
= mt76u_check_sg(dev
);
1191 err
= mt76u_set_endpoints(intf
, usb
);
1201 EXPORT_SYMBOL_GPL(mt76u_init
);
1203 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
1204 MODULE_LICENSE("Dual BSD/GPL");