1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
9 mt76_txq_get_qid(struct ieee80211_txq
*txq
)
18 mt76_tx_check_agg_ssn(struct ieee80211_sta
*sta
, struct sk_buff
*skb
)
20 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
21 struct ieee80211_txq
*txq
;
22 struct mt76_txq
*mtxq
;
25 if (!sta
|| !ieee80211_is_data_qos(hdr
->frame_control
) ||
26 !ieee80211_is_data_present(hdr
->frame_control
))
29 tid
= skb
->priority
& IEEE80211_QOS_CTL_TAG1D_MASK
;
31 mtxq
= (struct mt76_txq
*)txq
->drv_priv
;
35 mtxq
->agg_ssn
= le16_to_cpu(hdr
->seq_ctrl
) + 0x10;
37 EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn
);
40 mt76_tx_status_lock(struct mt76_dev
*dev
, struct sk_buff_head
*list
)
41 __acquires(&dev
->status_lock
)
43 __skb_queue_head_init(list
);
44 spin_lock_bh(&dev
->status_lock
);
46 EXPORT_SYMBOL_GPL(mt76_tx_status_lock
);
49 mt76_tx_status_unlock(struct mt76_dev
*dev
, struct sk_buff_head
*list
)
50 __releases(&dev
->status_lock
)
52 struct ieee80211_hw
*hw
;
55 spin_unlock_bh(&dev
->status_lock
);
58 while ((skb
= __skb_dequeue(list
)) != NULL
) {
59 struct ieee80211_tx_status status
= {
61 .info
= IEEE80211_SKB_CB(skb
),
63 struct ieee80211_rate_status rs
= {};
64 struct mt76_tx_cb
*cb
= mt76_tx_skb_cb(skb
);
65 struct mt76_wcid
*wcid
;
67 wcid
= rcu_dereference(dev
->wcid
[cb
->wcid
]);
69 status
.sta
= wcid_to_sta(wcid
);
70 if (status
.sta
&& (wcid
->rate
.flags
|| wcid
->rate
.legacy
)) {
71 rs
.rate_idx
= wcid
->rate
;
79 hw
= mt76_tx_status_get_hw(dev
, skb
);
80 spin_lock_bh(&dev
->rx_lock
);
81 ieee80211_tx_status_ext(hw
, &status
);
82 spin_unlock_bh(&dev
->rx_lock
);
86 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock
);
89 __mt76_tx_status_skb_done(struct mt76_dev
*dev
, struct sk_buff
*skb
, u8 flags
,
90 struct sk_buff_head
*list
)
92 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
93 struct mt76_tx_cb
*cb
= mt76_tx_skb_cb(skb
);
94 u8 done
= MT_TX_CB_DMA_DONE
| MT_TX_CB_TXS_DONE
;
99 if ((flags
& done
) != done
)
102 /* Tx status can be unreliable. if it fails, mark the frame as ACKed */
103 if (flags
& MT_TX_CB_TXS_FAILED
) {
104 info
->status
.rates
[0].count
= 0;
105 info
->status
.rates
[0].idx
= -1;
106 info
->flags
|= IEEE80211_TX_STAT_ACK
;
109 __skb_queue_tail(list
, skb
);
113 mt76_tx_status_skb_done(struct mt76_dev
*dev
, struct sk_buff
*skb
,
114 struct sk_buff_head
*list
)
116 __mt76_tx_status_skb_done(dev
, skb
, MT_TX_CB_TXS_DONE
, list
);
118 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done
);
121 mt76_tx_status_skb_add(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
,
124 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
125 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
126 struct mt76_tx_cb
*cb
= mt76_tx_skb_cb(skb
);
129 memset(cb
, 0, sizeof(*cb
));
131 if (!wcid
|| !rcu_access_pointer(dev
->wcid
[wcid
->idx
]))
132 return MT_PACKET_ID_NO_ACK
;
134 if (info
->flags
& IEEE80211_TX_CTL_NO_ACK
)
135 return MT_PACKET_ID_NO_ACK
;
137 if (!(info
->flags
& (IEEE80211_TX_CTL_REQ_TX_STATUS
|
138 IEEE80211_TX_CTL_RATE_CTRL_PROBE
))) {
139 if (mtk_wed_device_active(&dev
->mmio
.wed
) &&
140 ((info
->flags
& IEEE80211_TX_CTL_HW_80211_ENCAP
) ||
141 ieee80211_is_data(hdr
->frame_control
)))
142 return MT_PACKET_ID_WED
;
144 return MT_PACKET_ID_NO_SKB
;
147 spin_lock_bh(&dev
->status_lock
);
149 pid
= idr_alloc(&wcid
->pktid
, skb
, MT_PACKET_ID_FIRST
,
150 MT_PACKET_ID_MASK
, GFP_ATOMIC
);
152 pid
= MT_PACKET_ID_NO_SKB
;
156 cb
->wcid
= wcid
->idx
;
159 if (list_empty(&wcid
->list
))
160 list_add_tail(&wcid
->list
, &dev
->wcid_list
);
163 spin_unlock_bh(&dev
->status_lock
);
167 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add
);
170 mt76_tx_status_skb_get(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
, int pktid
,
171 struct sk_buff_head
*list
)
176 lockdep_assert_held(&dev
->status_lock
);
178 skb
= idr_remove(&wcid
->pktid
, pktid
);
182 /* look for stale entries in the wcid idr queue */
183 idr_for_each_entry(&wcid
->pktid
, skb
, id
) {
184 struct mt76_tx_cb
*cb
= mt76_tx_skb_cb(skb
);
187 if (!(cb
->flags
& MT_TX_CB_DMA_DONE
))
190 if (time_is_after_jiffies(cb
->jiffies
+
191 MT_TX_STATUS_SKB_TIMEOUT
))
195 /* It has been too long since DMA_DONE, time out this packet
196 * and stop waiting for TXS callback.
198 idr_remove(&wcid
->pktid
, cb
->pktid
);
199 __mt76_tx_status_skb_done(dev
, skb
, MT_TX_CB_TXS_FAILED
|
200 MT_TX_CB_TXS_DONE
, list
);
204 if (idr_is_empty(&wcid
->pktid
))
205 list_del_init(&wcid
->list
);
209 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get
);
212 mt76_tx_status_check(struct mt76_dev
*dev
, bool flush
)
214 struct mt76_wcid
*wcid
, *tmp
;
215 struct sk_buff_head list
;
217 mt76_tx_status_lock(dev
, &list
);
218 list_for_each_entry_safe(wcid
, tmp
, &dev
->wcid_list
, list
)
219 mt76_tx_status_skb_get(dev
, wcid
, flush
? -1 : 0, &list
);
220 mt76_tx_status_unlock(dev
, &list
);
222 EXPORT_SYMBOL_GPL(mt76_tx_status_check
);
225 mt76_tx_check_non_aql(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
,
228 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
231 if (!wcid
|| info
->tx_time_est
)
234 pending
= atomic_dec_return(&wcid
->non_aql_packets
);
236 atomic_cmpxchg(&wcid
->non_aql_packets
, pending
, 0);
239 void __mt76_tx_complete_skb(struct mt76_dev
*dev
, u16 wcid_idx
, struct sk_buff
*skb
,
240 struct list_head
*free_list
)
242 struct mt76_tx_cb
*cb
= mt76_tx_skb_cb(skb
);
243 struct ieee80211_tx_status status
= {
245 .free_list
= free_list
,
247 struct mt76_wcid
*wcid
= NULL
;
248 struct ieee80211_hw
*hw
;
249 struct sk_buff_head list
;
253 if (wcid_idx
< ARRAY_SIZE(dev
->wcid
))
254 wcid
= rcu_dereference(dev
->wcid
[wcid_idx
]);
256 mt76_tx_check_non_aql(dev
, wcid
, skb
);
258 #ifdef CONFIG_NL80211_TESTMODE
259 if (mt76_is_testmode_skb(dev
, skb
, &hw
)) {
260 struct mt76_phy
*phy
= hw
->priv
;
262 if (skb
== phy
->test
.tx_skb
)
264 if (phy
->test
.tx_queued
== phy
->test
.tx_done
)
265 wake_up(&dev
->tx_wait
);
267 dev_kfree_skb_any(skb
);
272 if (cb
->pktid
< MT_PACKET_ID_FIRST
) {
273 struct ieee80211_rate_status rs
= {};
275 hw
= mt76_tx_status_get_hw(dev
, skb
);
276 status
.sta
= wcid_to_sta(wcid
);
277 if (status
.sta
&& (wcid
->rate
.flags
|| wcid
->rate
.legacy
)) {
278 rs
.rate_idx
= wcid
->rate
;
282 spin_lock_bh(&dev
->rx_lock
);
283 ieee80211_tx_status_ext(hw
, &status
);
284 spin_unlock_bh(&dev
->rx_lock
);
288 mt76_tx_status_lock(dev
, &list
);
289 cb
->jiffies
= jiffies
;
290 __mt76_tx_status_skb_done(dev
, skb
, MT_TX_CB_DMA_DONE
, &list
);
291 mt76_tx_status_unlock(dev
, &list
);
296 EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb
);
299 __mt76_tx_queue_skb(struct mt76_phy
*phy
, int qid
, struct sk_buff
*skb
,
300 struct mt76_wcid
*wcid
, struct ieee80211_sta
*sta
,
303 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
304 struct mt76_queue
*q
= phy
->q_tx
[qid
];
305 struct mt76_dev
*dev
= phy
->dev
;
310 non_aql
= !info
->tx_time_est
;
311 idx
= dev
->queue_ops
->tx_queue_skb(phy
, q
, qid
, skb
, wcid
, sta
);
315 wcid
= (struct mt76_wcid
*)sta
->drv_priv
;
319 q
->entry
[idx
].wcid
= wcid
->idx
;
324 pending
= atomic_inc_return(&wcid
->non_aql_packets
);
325 if (stop
&& pending
>= MT_MAX_NON_AQL_PKT
)
332 mt76_tx(struct mt76_phy
*phy
, struct ieee80211_sta
*sta
,
333 struct mt76_wcid
*wcid
, struct sk_buff
*skb
)
335 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
336 struct sk_buff_head
*head
;
338 if (mt76_testmode_enabled(phy
)) {
339 ieee80211_free_txskb(phy
->hw
, skb
);
343 if (WARN_ON(skb_get_queue_mapping(skb
) >= MT_TXQ_PSD
))
344 skb_set_queue_mapping(skb
, MT_TXQ_BE
);
346 if (wcid
&& !(wcid
->tx_info
& MT_WCID_TX_INFO_SET
))
347 ieee80211_get_tx_rates(info
->control
.vif
, sta
, skb
,
348 info
->control
.rates
, 1);
350 info
->hw_queue
|= FIELD_PREP(MT_TX_HW_QUEUE_PHY
, phy
->band_idx
);
352 if ((info
->flags
& IEEE80211_TX_CTL_TX_OFFCHAN
) ||
353 (info
->control
.flags
& IEEE80211_TX_CTRL_DONT_USE_RATE_MASK
))
354 head
= &wcid
->tx_offchannel
;
356 head
= &wcid
->tx_pending
;
358 spin_lock_bh(&head
->lock
);
359 __skb_queue_tail(head
, skb
);
360 spin_unlock_bh(&head
->lock
);
362 spin_lock_bh(&phy
->tx_lock
);
363 if (list_empty(&wcid
->tx_list
))
364 list_add_tail(&wcid
->tx_list
, &phy
->tx_list
);
365 spin_unlock_bh(&phy
->tx_lock
);
367 mt76_worker_schedule(&phy
->dev
->tx_worker
);
369 EXPORT_SYMBOL_GPL(mt76_tx
);
371 static struct sk_buff
*
372 mt76_txq_dequeue(struct mt76_phy
*phy
, struct mt76_txq
*mtxq
)
374 struct ieee80211_txq
*txq
= mtxq_to_txq(mtxq
);
375 struct ieee80211_tx_info
*info
;
378 skb
= ieee80211_tx_dequeue(phy
->hw
, txq
);
382 info
= IEEE80211_SKB_CB(skb
);
383 info
->hw_queue
|= FIELD_PREP(MT_TX_HW_QUEUE_PHY
, phy
->band_idx
);
389 mt76_queue_ps_skb(struct mt76_phy
*phy
, struct ieee80211_sta
*sta
,
390 struct sk_buff
*skb
, bool last
)
392 struct mt76_wcid
*wcid
= (struct mt76_wcid
*)sta
->drv_priv
;
393 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
395 info
->control
.flags
|= IEEE80211_TX_CTRL_PS_RESPONSE
;
397 info
->flags
|= IEEE80211_TX_STATUS_EOSP
|
398 IEEE80211_TX_CTL_REQ_TX_STATUS
;
400 mt76_skb_set_moredata(skb
, !last
);
401 __mt76_tx_queue_skb(phy
, MT_TXQ_PSD
, skb
, wcid
, sta
, NULL
);
405 mt76_release_buffered_frames(struct ieee80211_hw
*hw
, struct ieee80211_sta
*sta
,
406 u16 tids
, int nframes
,
407 enum ieee80211_frame_release_type reason
,
410 struct mt76_phy
*phy
= hw
->priv
;
411 struct mt76_dev
*dev
= phy
->dev
;
412 struct sk_buff
*last_skb
= NULL
;
413 struct mt76_queue
*hwq
= phy
->q_tx
[MT_TXQ_PSD
];
416 spin_lock_bh(&hwq
->lock
);
417 for (i
= 0; tids
&& nframes
; i
++, tids
>>= 1) {
418 struct ieee80211_txq
*txq
= sta
->txq
[i
];
419 struct mt76_txq
*mtxq
= (struct mt76_txq
*)txq
->drv_priv
;
426 skb
= mt76_txq_dequeue(phy
, mtxq
);
432 mt76_queue_ps_skb(phy
, sta
, last_skb
, false);
439 mt76_queue_ps_skb(phy
, sta
, last_skb
, true);
440 dev
->queue_ops
->kick(dev
, hwq
);
442 ieee80211_sta_eosp(sta
);
445 spin_unlock_bh(&hwq
->lock
);
447 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames
);
450 mt76_txq_stopped(struct mt76_queue
*q
)
452 return q
->stopped
|| q
->blocked
||
453 q
->queued
+ MT_TXQ_FREE_THR
>= q
->ndesc
;
457 mt76_txq_send_burst(struct mt76_phy
*phy
, struct mt76_queue
*q
,
458 struct mt76_txq
*mtxq
, struct mt76_wcid
*wcid
)
460 struct mt76_dev
*dev
= phy
->dev
;
461 struct ieee80211_txq
*txq
= mtxq_to_txq(mtxq
);
462 enum mt76_txq_id qid
= mt76_txq_get_qid(txq
);
463 struct ieee80211_tx_info
*info
;
469 if (test_bit(MT_WCID_FLAG_PS
, &wcid
->flags
))
472 if (atomic_read(&wcid
->non_aql_packets
) >= MT_MAX_NON_AQL_PKT
)
475 skb
= mt76_txq_dequeue(phy
, mtxq
);
479 info
= IEEE80211_SKB_CB(skb
);
480 if (!(wcid
->tx_info
& MT_WCID_TX_INFO_SET
))
481 ieee80211_get_tx_rates(txq
->vif
, txq
->sta
, skb
,
482 info
->control
.rates
, 1);
485 idx
= __mt76_tx_queue_skb(phy
, qid
, skb
, wcid
, txq
->sta
, &stop
);
486 spin_unlock(&q
->lock
);
491 if (test_bit(MT76_RESET
, &phy
->state
) || phy
->offchannel
)
494 if (stop
|| mt76_txq_stopped(q
))
497 skb
= mt76_txq_dequeue(phy
, mtxq
);
501 info
= IEEE80211_SKB_CB(skb
);
502 if (!(wcid
->tx_info
& MT_WCID_TX_INFO_SET
))
503 ieee80211_get_tx_rates(txq
->vif
, txq
->sta
, skb
,
504 info
->control
.rates
, 1);
507 idx
= __mt76_tx_queue_skb(phy
, qid
, skb
, wcid
, txq
->sta
, &stop
);
508 spin_unlock(&q
->lock
);
516 dev
->queue_ops
->kick(dev
, q
);
517 spin_unlock(&q
->lock
);
523 mt76_txq_schedule_list(struct mt76_phy
*phy
, enum mt76_txq_id qid
)
525 struct mt76_queue
*q
= phy
->q_tx
[qid
];
526 struct mt76_dev
*dev
= phy
->dev
;
527 struct ieee80211_txq
*txq
;
528 struct mt76_txq
*mtxq
;
529 struct mt76_wcid
*wcid
;
535 if (test_bit(MT76_RESET
, &phy
->state
) || phy
->offchannel
)
538 if (dev
->queue_ops
->tx_cleanup
&&
539 q
->queued
+ 2 * MT_TXQ_FREE_THR
>= q
->ndesc
) {
540 dev
->queue_ops
->tx_cleanup(dev
, q
, false);
543 txq
= ieee80211_next_txq(phy
->hw
, qid
);
547 mtxq
= (struct mt76_txq
*)txq
->drv_priv
;
548 wcid
= rcu_dereference(dev
->wcid
[mtxq
->wcid
]);
549 if (!wcid
|| test_bit(MT_WCID_FLAG_PS
, &wcid
->flags
))
552 if (mtxq
->send_bar
&& mtxq
->aggr
) {
553 struct ieee80211_txq
*txq
= mtxq_to_txq(mtxq
);
554 struct ieee80211_sta
*sta
= txq
->sta
;
555 struct ieee80211_vif
*vif
= txq
->vif
;
556 u16 agg_ssn
= mtxq
->agg_ssn
;
559 mtxq
->send_bar
= false;
560 ieee80211_send_bar(vif
, sta
->addr
, tid
, agg_ssn
);
563 if (!mt76_txq_stopped(q
))
564 n_frames
= mt76_txq_send_burst(phy
, q
, mtxq
, wcid
);
566 ieee80211_return_txq(phy
->hw
, txq
, false);
568 if (unlikely(n_frames
< 0))
577 void mt76_txq_schedule(struct mt76_phy
*phy
, enum mt76_txq_id qid
)
581 if (qid
>= 4 || phy
->offchannel
)
588 ieee80211_txq_schedule_start(phy
->hw
, qid
);
589 len
= mt76_txq_schedule_list(phy
, qid
);
590 ieee80211_txq_schedule_end(phy
->hw
, qid
);
596 EXPORT_SYMBOL_GPL(mt76_txq_schedule
);
599 mt76_txq_schedule_pending_wcid(struct mt76_phy
*phy
, struct mt76_wcid
*wcid
,
600 struct sk_buff_head
*head
)
602 struct mt76_dev
*dev
= phy
->dev
;
603 struct ieee80211_sta
*sta
;
604 struct mt76_queue
*q
;
608 spin_lock(&head
->lock
);
609 while ((skb
= skb_peek(head
)) != NULL
) {
610 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
611 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
612 int qid
= skb_get_queue_mapping(skb
);
614 if ((dev
->drv
->drv_flags
& MT_DRV_HW_MGMT_TXQ
) &&
615 !(info
->flags
& IEEE80211_TX_CTL_HW_80211_ENCAP
) &&
616 !ieee80211_is_data(hdr
->frame_control
) &&
617 !ieee80211_is_bufferable_mmpdu(skb
))
621 if (mt76_txq_stopped(q
) || test_bit(MT76_RESET
, &phy
->state
)) {
626 __skb_unlink(skb
, head
);
627 spin_unlock(&head
->lock
);
629 sta
= wcid_to_sta(wcid
);
631 __mt76_tx_queue_skb(phy
, qid
, skb
, wcid
, sta
, NULL
);
632 dev
->queue_ops
->kick(dev
, q
);
633 spin_unlock(&q
->lock
);
635 spin_lock(&head
->lock
);
637 spin_unlock(&head
->lock
);
642 static void mt76_txq_schedule_pending(struct mt76_phy
*phy
)
646 if (list_empty(&phy
->tx_list
))
652 spin_lock(&phy
->tx_lock
);
653 list_splice_init(&phy
->tx_list
, &tx_list
);
654 while (!list_empty(&tx_list
)) {
655 struct mt76_wcid
*wcid
;
658 wcid
= list_first_entry(&tx_list
, struct mt76_wcid
, tx_list
);
659 list_del_init(&wcid
->tx_list
);
661 spin_unlock(&phy
->tx_lock
);
662 ret
= mt76_txq_schedule_pending_wcid(phy
, wcid
, &wcid
->tx_offchannel
);
663 if (ret
>= 0 && !phy
->offchannel
)
664 ret
= mt76_txq_schedule_pending_wcid(phy
, wcid
, &wcid
->tx_pending
);
665 spin_lock(&phy
->tx_lock
);
667 if (!skb_queue_empty(&wcid
->tx_pending
) &&
668 !skb_queue_empty(&wcid
->tx_offchannel
) &&
669 list_empty(&wcid
->tx_list
))
670 list_add_tail(&wcid
->tx_list
, &phy
->tx_list
);
675 spin_unlock(&phy
->tx_lock
);
681 void mt76_txq_schedule_all(struct mt76_phy
*phy
)
685 mt76_txq_schedule_pending(phy
);
686 for (i
= 0; i
<= MT_TXQ_BK
; i
++)
687 mt76_txq_schedule(phy
, i
);
689 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all
);
691 void mt76_tx_worker_run(struct mt76_dev
*dev
)
693 struct mt76_phy
*phy
;
696 for (i
= 0; i
< ARRAY_SIZE(dev
->phys
); i
++) {
701 mt76_txq_schedule_all(phy
);
704 #ifdef CONFIG_NL80211_TESTMODE
705 for (i
= 0; i
< ARRAY_SIZE(dev
->phys
); i
++) {
707 if (!phy
|| !phy
->test
.tx_pending
)
710 mt76_testmode_tx_pending(phy
);
714 EXPORT_SYMBOL_GPL(mt76_tx_worker_run
);
716 void mt76_tx_worker(struct mt76_worker
*w
)
718 struct mt76_dev
*dev
= container_of(w
, struct mt76_dev
, tx_worker
);
720 mt76_tx_worker_run(dev
);
723 void mt76_stop_tx_queues(struct mt76_phy
*phy
, struct ieee80211_sta
*sta
,
728 for (i
= 0; i
< ARRAY_SIZE(sta
->txq
); i
++) {
729 struct ieee80211_txq
*txq
= sta
->txq
[i
];
730 struct mt76_queue
*hwq
;
731 struct mt76_txq
*mtxq
;
736 hwq
= phy
->q_tx
[mt76_txq_get_qid(txq
)];
737 mtxq
= (struct mt76_txq
*)txq
->drv_priv
;
739 spin_lock_bh(&hwq
->lock
);
740 mtxq
->send_bar
= mtxq
->aggr
&& send_bar
;
741 spin_unlock_bh(&hwq
->lock
);
744 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues
);
746 void mt76_wake_tx_queue(struct ieee80211_hw
*hw
, struct ieee80211_txq
*txq
)
748 struct mt76_phy
*phy
= hw
->priv
;
749 struct mt76_dev
*dev
= phy
->dev
;
751 if (!test_bit(MT76_STATE_RUNNING
, &phy
->state
))
754 mt76_worker_schedule(&dev
->tx_worker
);
756 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue
);
758 u8
mt76_ac_to_hwq(u8 ac
)
760 static const u8 wmm_queue_map
[] = {
761 [IEEE80211_AC_BE
] = 0,
762 [IEEE80211_AC_BK
] = 1,
763 [IEEE80211_AC_VI
] = 2,
764 [IEEE80211_AC_VO
] = 3,
767 if (WARN_ON(ac
>= IEEE80211_NUM_ACS
))
770 return wmm_queue_map
[ac
];
772 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq
);
774 int mt76_skb_adjust_pad(struct sk_buff
*skb
, int pad
)
776 struct sk_buff
*iter
, *last
= skb
;
778 /* First packet of a A-MSDU burst keeps track of the whole burst
779 * length, need to update length of it and the last packet.
781 skb_walk_frags(skb
, iter
) {
784 skb
->data_len
+= pad
;
790 if (skb_pad(last
, pad
))
793 __skb_put(last
, pad
);
797 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad
);
799 void mt76_queue_tx_complete(struct mt76_dev
*dev
, struct mt76_queue
*q
,
800 struct mt76_queue_entry
*e
)
803 dev
->drv
->tx_complete_skb(dev
, e
);
805 spin_lock_bh(&q
->lock
);
806 q
->tail
= (q
->tail
+ 1) % q
->ndesc
;
808 spin_unlock_bh(&q
->lock
);
810 EXPORT_SYMBOL_GPL(mt76_queue_tx_complete
);
812 void __mt76_set_tx_blocked(struct mt76_dev
*dev
, bool blocked
)
814 struct mt76_phy
*phy
= &dev
->phy
;
815 struct mt76_queue
*q
= phy
->q_tx
[0];
817 if (blocked
== q
->blocked
)
820 q
->blocked
= blocked
;
822 phy
= dev
->phys
[MT_BAND1
];
825 q
->blocked
= blocked
;
827 phy
= dev
->phys
[MT_BAND2
];
830 q
->blocked
= blocked
;
834 mt76_worker_schedule(&dev
->tx_worker
);
836 EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked
);
838 int mt76_token_consume(struct mt76_dev
*dev
, struct mt76_txwi_cache
**ptxwi
)
842 spin_lock_bh(&dev
->token_lock
);
844 token
= idr_alloc(&dev
->token
, *ptxwi
, 0, dev
->token_size
, GFP_ATOMIC
);
848 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
849 if (mtk_wed_device_active(&dev
->mmio
.wed
) &&
850 token
>= dev
->mmio
.wed
.wlan
.token_start
)
851 dev
->wed_token_count
++;
854 if (dev
->token_count
>= dev
->token_size
- MT76_TOKEN_FREE_THR
)
855 __mt76_set_tx_blocked(dev
, true);
857 spin_unlock_bh(&dev
->token_lock
);
861 EXPORT_SYMBOL_GPL(mt76_token_consume
);
863 int mt76_rx_token_consume(struct mt76_dev
*dev
, void *ptr
,
864 struct mt76_txwi_cache
*t
, dma_addr_t phys
)
868 spin_lock_bh(&dev
->rx_token_lock
);
869 token
= idr_alloc(&dev
->rx_token
, t
, 0, dev
->rx_token_size
,
875 spin_unlock_bh(&dev
->rx_token_lock
);
879 EXPORT_SYMBOL_GPL(mt76_rx_token_consume
);
881 struct mt76_txwi_cache
*
882 mt76_token_release(struct mt76_dev
*dev
, int token
, bool *wake
)
884 struct mt76_txwi_cache
*txwi
;
886 spin_lock_bh(&dev
->token_lock
);
888 txwi
= idr_remove(&dev
->token
, token
);
892 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
893 if (mtk_wed_device_active(&dev
->mmio
.wed
) &&
894 token
>= dev
->mmio
.wed
.wlan
.token_start
&&
895 --dev
->wed_token_count
== 0)
896 wake_up(&dev
->tx_wait
);
900 if (dev
->token_count
< dev
->token_size
- MT76_TOKEN_FREE_THR
&&
901 dev
->phy
.q_tx
[0]->blocked
)
904 spin_unlock_bh(&dev
->token_lock
);
908 EXPORT_SYMBOL_GPL(mt76_token_release
);
910 struct mt76_txwi_cache
*
911 mt76_rx_token_release(struct mt76_dev
*dev
, int token
)
913 struct mt76_txwi_cache
*t
;
915 spin_lock_bh(&dev
->rx_token_lock
);
916 t
= idr_remove(&dev
->rx_token
, token
);
917 spin_unlock_bh(&dev
->rx_token_lock
);
921 EXPORT_SYMBOL_GPL(mt76_rx_token_release
);