1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
9 mt76_txq_get_qid(struct ieee80211_txq
*txq
)
18 mt76_tx_check_agg_ssn(struct ieee80211_sta
*sta
, struct sk_buff
*skb
)
20 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
21 struct ieee80211_txq
*txq
;
22 struct mt76_txq
*mtxq
;
25 if (!sta
|| !ieee80211_is_data_qos(hdr
->frame_control
) ||
26 !ieee80211_is_data_present(hdr
->frame_control
))
29 tid
= skb
->priority
& IEEE80211_QOS_CTL_TAG1D_MASK
;
31 mtxq
= (struct mt76_txq
*)txq
->drv_priv
;
35 mtxq
->agg_ssn
= le16_to_cpu(hdr
->seq_ctrl
) + 0x10;
37 EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn
);
40 mt76_tx_status_lock(struct mt76_dev
*dev
, struct sk_buff_head
*list
)
41 __acquires(&dev
->status_list
.lock
)
43 __skb_queue_head_init(list
);
44 spin_lock_bh(&dev
->status_list
.lock
);
46 EXPORT_SYMBOL_GPL(mt76_tx_status_lock
);
49 mt76_tx_status_unlock(struct mt76_dev
*dev
, struct sk_buff_head
*list
)
50 __releases(&dev
->status_list
.lock
)
52 struct ieee80211_hw
*hw
;
55 spin_unlock_bh(&dev
->status_list
.lock
);
57 while ((skb
= __skb_dequeue(list
)) != NULL
) {
58 hw
= mt76_tx_status_get_hw(dev
, skb
);
59 ieee80211_tx_status(hw
, skb
);
63 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock
);
66 __mt76_tx_status_skb_done(struct mt76_dev
*dev
, struct sk_buff
*skb
, u8 flags
,
67 struct sk_buff_head
*list
)
69 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
70 struct mt76_tx_cb
*cb
= mt76_tx_skb_cb(skb
);
71 u8 done
= MT_TX_CB_DMA_DONE
| MT_TX_CB_TXS_DONE
;
76 if ((flags
& done
) != done
)
79 __skb_unlink(skb
, &dev
->status_list
);
81 /* Tx status can be unreliable. if it fails, mark the frame as ACKed */
82 if (flags
& MT_TX_CB_TXS_FAILED
) {
83 ieee80211_tx_info_clear_status(info
);
84 info
->status
.rates
[0].idx
= -1;
85 info
->flags
|= IEEE80211_TX_STAT_ACK
;
88 __skb_queue_tail(list
, skb
);
92 mt76_tx_status_skb_done(struct mt76_dev
*dev
, struct sk_buff
*skb
,
93 struct sk_buff_head
*list
)
95 __mt76_tx_status_skb_done(dev
, skb
, MT_TX_CB_TXS_DONE
, list
);
97 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done
);
100 mt76_tx_status_skb_add(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
,
103 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
104 struct mt76_tx_cb
*cb
= mt76_tx_skb_cb(skb
);
108 return MT_PACKET_ID_NO_ACK
;
110 if (info
->flags
& IEEE80211_TX_CTL_NO_ACK
)
111 return MT_PACKET_ID_NO_ACK
;
113 if (!(info
->flags
& (IEEE80211_TX_CTL_REQ_TX_STATUS
|
114 IEEE80211_TX_CTL_RATE_CTRL_PROBE
)))
115 return MT_PACKET_ID_NO_SKB
;
117 spin_lock_bh(&dev
->status_list
.lock
);
119 memset(cb
, 0, sizeof(*cb
));
120 wcid
->packet_id
= (wcid
->packet_id
+ 1) & MT_PACKET_ID_MASK
;
121 if (wcid
->packet_id
== MT_PACKET_ID_NO_ACK
||
122 wcid
->packet_id
== MT_PACKET_ID_NO_SKB
)
123 wcid
->packet_id
= MT_PACKET_ID_FIRST
;
125 pid
= wcid
->packet_id
;
126 cb
->wcid
= wcid
->idx
;
128 cb
->jiffies
= jiffies
;
130 __skb_queue_tail(&dev
->status_list
, skb
);
131 spin_unlock_bh(&dev
->status_list
.lock
);
135 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add
);
138 mt76_tx_status_skb_get(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
, int pktid
,
139 struct sk_buff_head
*list
)
141 struct sk_buff
*skb
, *tmp
;
143 skb_queue_walk_safe(&dev
->status_list
, skb
, tmp
) {
144 struct mt76_tx_cb
*cb
= mt76_tx_skb_cb(skb
);
146 if (wcid
&& cb
->wcid
!= wcid
->idx
)
149 if (cb
->pktid
== pktid
)
152 if (pktid
>= 0 && !time_after(jiffies
, cb
->jiffies
+
153 MT_TX_STATUS_SKB_TIMEOUT
))
156 __mt76_tx_status_skb_done(dev
, skb
, MT_TX_CB_TXS_FAILED
|
157 MT_TX_CB_TXS_DONE
, list
);
162 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get
);
165 mt76_tx_status_check(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
, bool flush
)
167 struct sk_buff_head list
;
169 mt76_tx_status_lock(dev
, &list
);
170 mt76_tx_status_skb_get(dev
, wcid
, flush
? -1 : 0, &list
);
171 mt76_tx_status_unlock(dev
, &list
);
173 EXPORT_SYMBOL_GPL(mt76_tx_status_check
);
176 mt76_tx_check_non_aql(struct mt76_dev
*dev
, u16 wcid_idx
, struct sk_buff
*skb
)
178 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
179 struct mt76_wcid
*wcid
;
182 if (info
->tx_time_est
)
185 if (wcid_idx
>= ARRAY_SIZE(dev
->wcid
))
190 wcid
= rcu_dereference(dev
->wcid
[wcid_idx
]);
192 pending
= atomic_dec_return(&wcid
->non_aql_packets
);
194 atomic_cmpxchg(&wcid
->non_aql_packets
, pending
, 0);
200 void mt76_tx_complete_skb(struct mt76_dev
*dev
, u16 wcid_idx
, struct sk_buff
*skb
)
202 struct ieee80211_hw
*hw
;
203 struct sk_buff_head list
;
205 #ifdef CONFIG_NL80211_TESTMODE
206 if (skb
== dev
->test
.tx_skb
) {
208 if (dev
->test
.tx_queued
== dev
->test
.tx_done
)
209 wake_up(&dev
->tx_wait
);
213 mt76_tx_check_non_aql(dev
, wcid_idx
, skb
);
216 hw
= mt76_tx_status_get_hw(dev
, skb
);
217 ieee80211_free_txskb(hw
, skb
);
221 mt76_tx_status_lock(dev
, &list
);
222 __mt76_tx_status_skb_done(dev
, skb
, MT_TX_CB_DMA_DONE
, &list
);
223 mt76_tx_status_unlock(dev
, &list
);
225 EXPORT_SYMBOL_GPL(mt76_tx_complete_skb
);
228 __mt76_tx_queue_skb(struct mt76_phy
*phy
, int qid
, struct sk_buff
*skb
,
229 struct mt76_wcid
*wcid
, struct ieee80211_sta
*sta
,
232 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
233 struct mt76_queue
*q
= phy
->q_tx
[qid
];
234 struct mt76_dev
*dev
= phy
->dev
;
239 non_aql
= !info
->tx_time_est
;
240 idx
= dev
->queue_ops
->tx_queue_skb(dev
, q
, skb
, wcid
, sta
);
241 if (idx
< 0 || !sta
|| !non_aql
)
244 wcid
= (struct mt76_wcid
*)sta
->drv_priv
;
245 q
->entry
[idx
].wcid
= wcid
->idx
;
246 pending
= atomic_inc_return(&wcid
->non_aql_packets
);
247 if (stop
&& pending
>= MT_MAX_NON_AQL_PKT
)
254 mt76_tx(struct mt76_phy
*phy
, struct ieee80211_sta
*sta
,
255 struct mt76_wcid
*wcid
, struct sk_buff
*skb
)
257 struct mt76_dev
*dev
= phy
->dev
;
258 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
259 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
260 struct mt76_queue
*q
;
261 int qid
= skb_get_queue_mapping(skb
);
262 bool ext_phy
= phy
!= &dev
->phy
;
264 if (mt76_testmode_enabled(dev
)) {
265 ieee80211_free_txskb(phy
->hw
, skb
);
269 if (WARN_ON(qid
>= MT_TXQ_PSD
)) {
271 skb_set_queue_mapping(skb
, qid
);
274 if ((dev
->drv
->drv_flags
& MT_DRV_HW_MGMT_TXQ
) &&
275 !(info
->flags
& IEEE80211_TX_CTL_HW_80211_ENCAP
) &&
276 !ieee80211_is_data(hdr
->frame_control
) &&
277 !ieee80211_is_bufferable_mmpdu(hdr
->frame_control
)) {
279 skb_set_queue_mapping(skb
, qid
);
282 if (!(wcid
->tx_info
& MT_WCID_TX_INFO_SET
))
283 ieee80211_get_tx_rates(info
->control
.vif
, sta
, skb
,
284 info
->control
.rates
, 1);
287 info
->hw_queue
|= MT_TX_HW_QUEUE_EXT_PHY
;
291 spin_lock_bh(&q
->lock
);
292 __mt76_tx_queue_skb(phy
, qid
, skb
, wcid
, sta
, NULL
);
293 dev
->queue_ops
->kick(dev
, q
);
294 spin_unlock_bh(&q
->lock
);
296 EXPORT_SYMBOL_GPL(mt76_tx
);
298 static struct sk_buff
*
299 mt76_txq_dequeue(struct mt76_phy
*phy
, struct mt76_txq
*mtxq
)
301 struct ieee80211_txq
*txq
= mtxq_to_txq(mtxq
);
302 struct ieee80211_tx_info
*info
;
303 bool ext_phy
= phy
!= &phy
->dev
->phy
;
306 skb
= ieee80211_tx_dequeue(phy
->hw
, txq
);
310 info
= IEEE80211_SKB_CB(skb
);
312 info
->hw_queue
|= MT_TX_HW_QUEUE_EXT_PHY
;
318 mt76_queue_ps_skb(struct mt76_phy
*phy
, struct ieee80211_sta
*sta
,
319 struct sk_buff
*skb
, bool last
)
321 struct mt76_wcid
*wcid
= (struct mt76_wcid
*)sta
->drv_priv
;
322 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
324 info
->control
.flags
|= IEEE80211_TX_CTRL_PS_RESPONSE
;
326 info
->flags
|= IEEE80211_TX_STATUS_EOSP
|
327 IEEE80211_TX_CTL_REQ_TX_STATUS
;
329 mt76_skb_set_moredata(skb
, !last
);
330 __mt76_tx_queue_skb(phy
, MT_TXQ_PSD
, skb
, wcid
, sta
, NULL
);
334 mt76_release_buffered_frames(struct ieee80211_hw
*hw
, struct ieee80211_sta
*sta
,
335 u16 tids
, int nframes
,
336 enum ieee80211_frame_release_type reason
,
339 struct mt76_phy
*phy
= hw
->priv
;
340 struct mt76_dev
*dev
= phy
->dev
;
341 struct sk_buff
*last_skb
= NULL
;
342 struct mt76_queue
*hwq
= phy
->q_tx
[MT_TXQ_PSD
];
345 spin_lock_bh(&hwq
->lock
);
346 for (i
= 0; tids
&& nframes
; i
++, tids
>>= 1) {
347 struct ieee80211_txq
*txq
= sta
->txq
[i
];
348 struct mt76_txq
*mtxq
= (struct mt76_txq
*)txq
->drv_priv
;
355 skb
= mt76_txq_dequeue(phy
, mtxq
);
361 mt76_queue_ps_skb(phy
, sta
, last_skb
, false);
368 mt76_queue_ps_skb(phy
, sta
, last_skb
, true);
369 dev
->queue_ops
->kick(dev
, hwq
);
371 ieee80211_sta_eosp(sta
);
374 spin_unlock_bh(&hwq
->lock
);
376 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames
);
379 mt76_txq_stopped(struct mt76_queue
*q
)
381 return q
->stopped
|| q
->blocked
||
382 q
->queued
+ MT_TXQ_FREE_THR
>= q
->ndesc
;
386 mt76_txq_send_burst(struct mt76_phy
*phy
, struct mt76_queue
*q
,
387 struct mt76_txq
*mtxq
)
389 struct mt76_dev
*dev
= phy
->dev
;
390 struct ieee80211_txq
*txq
= mtxq_to_txq(mtxq
);
391 enum mt76_txq_id qid
= mt76_txq_get_qid(txq
);
392 struct mt76_wcid
*wcid
= mtxq
->wcid
;
393 struct ieee80211_tx_info
*info
;
399 if (test_bit(MT_WCID_FLAG_PS
, &wcid
->flags
))
402 if (atomic_read(&wcid
->non_aql_packets
) >= MT_MAX_NON_AQL_PKT
)
405 skb
= mt76_txq_dequeue(phy
, mtxq
);
409 info
= IEEE80211_SKB_CB(skb
);
410 if (!(wcid
->tx_info
& MT_WCID_TX_INFO_SET
))
411 ieee80211_get_tx_rates(txq
->vif
, txq
->sta
, skb
,
412 info
->control
.rates
, 1);
414 idx
= __mt76_tx_queue_skb(phy
, qid
, skb
, wcid
, txq
->sta
, &stop
);
419 if (test_bit(MT76_STATE_PM
, &phy
->state
) ||
420 test_bit(MT76_RESET
, &phy
->state
))
423 if (stop
|| mt76_txq_stopped(q
))
426 skb
= mt76_txq_dequeue(phy
, mtxq
);
430 info
= IEEE80211_SKB_CB(skb
);
431 if (!(wcid
->tx_info
& MT_WCID_TX_INFO_SET
))
432 ieee80211_get_tx_rates(txq
->vif
, txq
->sta
, skb
,
433 info
->control
.rates
, 1);
435 idx
= __mt76_tx_queue_skb(phy
, qid
, skb
, wcid
, txq
->sta
, &stop
);
442 dev
->queue_ops
->kick(dev
, q
);
448 mt76_txq_schedule_list(struct mt76_phy
*phy
, enum mt76_txq_id qid
)
450 struct mt76_queue
*q
= phy
->q_tx
[qid
];
451 struct mt76_dev
*dev
= phy
->dev
;
452 struct ieee80211_txq
*txq
;
453 struct mt76_txq
*mtxq
;
454 struct mt76_wcid
*wcid
;
457 spin_lock_bh(&q
->lock
);
459 if (test_bit(MT76_STATE_PM
, &phy
->state
) ||
460 test_bit(MT76_RESET
, &phy
->state
)) {
465 if (dev
->queue_ops
->tx_cleanup
&&
466 q
->queued
+ 2 * MT_TXQ_FREE_THR
>= q
->ndesc
) {
467 spin_unlock_bh(&q
->lock
);
468 dev
->queue_ops
->tx_cleanup(dev
, q
, false);
469 spin_lock_bh(&q
->lock
);
472 if (mt76_txq_stopped(q
))
475 txq
= ieee80211_next_txq(phy
->hw
, qid
);
479 mtxq
= (struct mt76_txq
*)txq
->drv_priv
;
481 if (wcid
&& test_bit(MT_WCID_FLAG_PS
, &wcid
->flags
))
484 if (mtxq
->send_bar
&& mtxq
->aggr
) {
485 struct ieee80211_txq
*txq
= mtxq_to_txq(mtxq
);
486 struct ieee80211_sta
*sta
= txq
->sta
;
487 struct ieee80211_vif
*vif
= txq
->vif
;
488 u16 agg_ssn
= mtxq
->agg_ssn
;
491 mtxq
->send_bar
= false;
492 spin_unlock_bh(&q
->lock
);
493 ieee80211_send_bar(vif
, sta
->addr
, tid
, agg_ssn
);
494 spin_lock_bh(&q
->lock
);
497 ret
+= mt76_txq_send_burst(phy
, q
, mtxq
);
498 ieee80211_return_txq(phy
->hw
, txq
, false);
500 spin_unlock_bh(&q
->lock
);
505 void mt76_txq_schedule(struct mt76_phy
*phy
, enum mt76_txq_id qid
)
515 ieee80211_txq_schedule_start(phy
->hw
, qid
);
516 len
= mt76_txq_schedule_list(phy
, qid
);
517 ieee80211_txq_schedule_end(phy
->hw
, qid
);
522 EXPORT_SYMBOL_GPL(mt76_txq_schedule
);
524 void mt76_txq_schedule_all(struct mt76_phy
*phy
)
528 for (i
= 0; i
<= MT_TXQ_BK
; i
++)
529 mt76_txq_schedule(phy
, i
);
531 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all
);
533 void mt76_tx_worker(struct mt76_worker
*w
)
535 struct mt76_dev
*dev
= container_of(w
, struct mt76_dev
, tx_worker
);
537 mt76_txq_schedule_all(&dev
->phy
);
539 mt76_txq_schedule_all(dev
->phy2
);
541 #ifdef CONFIG_NL80211_TESTMODE
542 if (dev
->test
.tx_pending
)
543 mt76_testmode_tx_pending(dev
);
547 void mt76_stop_tx_queues(struct mt76_phy
*phy
, struct ieee80211_sta
*sta
,
552 for (i
= 0; i
< ARRAY_SIZE(sta
->txq
); i
++) {
553 struct ieee80211_txq
*txq
= sta
->txq
[i
];
554 struct mt76_queue
*hwq
;
555 struct mt76_txq
*mtxq
;
560 hwq
= phy
->q_tx
[mt76_txq_get_qid(txq
)];
561 mtxq
= (struct mt76_txq
*)txq
->drv_priv
;
563 spin_lock_bh(&hwq
->lock
);
564 mtxq
->send_bar
= mtxq
->aggr
&& send_bar
;
565 spin_unlock_bh(&hwq
->lock
);
568 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues
);
570 void mt76_wake_tx_queue(struct ieee80211_hw
*hw
, struct ieee80211_txq
*txq
)
572 struct mt76_phy
*phy
= hw
->priv
;
573 struct mt76_dev
*dev
= phy
->dev
;
575 if (!test_bit(MT76_STATE_RUNNING
, &phy
->state
))
578 mt76_worker_schedule(&dev
->tx_worker
);
580 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue
);
582 u8
mt76_ac_to_hwq(u8 ac
)
584 static const u8 wmm_queue_map
[] = {
585 [IEEE80211_AC_BE
] = 0,
586 [IEEE80211_AC_BK
] = 1,
587 [IEEE80211_AC_VI
] = 2,
588 [IEEE80211_AC_VO
] = 3,
591 if (WARN_ON(ac
>= IEEE80211_NUM_ACS
))
594 return wmm_queue_map
[ac
];
596 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq
);
598 int mt76_skb_adjust_pad(struct sk_buff
*skb
, int pad
)
600 struct sk_buff
*iter
, *last
= skb
;
602 /* First packet of a A-MSDU burst keeps track of the whole burst
603 * length, need to update length of it and the last packet.
605 skb_walk_frags(skb
, iter
) {
608 skb
->data_len
+= pad
;
614 if (skb_pad(last
, pad
))
617 __skb_put(last
, pad
);
621 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad
);
623 void mt76_queue_tx_complete(struct mt76_dev
*dev
, struct mt76_queue
*q
,
624 struct mt76_queue_entry
*e
)
627 dev
->drv
->tx_complete_skb(dev
, e
);
629 spin_lock_bh(&q
->lock
);
630 q
->tail
= (q
->tail
+ 1) % q
->ndesc
;
632 spin_unlock_bh(&q
->lock
);
634 EXPORT_SYMBOL_GPL(mt76_queue_tx_complete
);