1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
8 static struct mt76_txwi_cache
*
9 mt76_alloc_txwi(struct mt76_dev
*dev
)
11 struct mt76_txwi_cache
*t
;
16 size
= L1_CACHE_ALIGN(dev
->drv
->txwi_size
+ sizeof(*t
));
17 txwi
= devm_kzalloc(dev
->dev
, size
, GFP_ATOMIC
);
21 addr
= dma_map_single(dev
->dev
, txwi
, dev
->drv
->txwi_size
,
23 t
= (struct mt76_txwi_cache
*)(txwi
+ dev
->drv
->txwi_size
);
29 static struct mt76_txwi_cache
*
30 __mt76_get_txwi(struct mt76_dev
*dev
)
32 struct mt76_txwi_cache
*t
= NULL
;
34 spin_lock_bh(&dev
->lock
);
35 if (!list_empty(&dev
->txwi_cache
)) {
36 t
= list_first_entry(&dev
->txwi_cache
, struct mt76_txwi_cache
,
40 spin_unlock_bh(&dev
->lock
);
45 struct mt76_txwi_cache
*
46 mt76_get_txwi(struct mt76_dev
*dev
)
48 struct mt76_txwi_cache
*t
= __mt76_get_txwi(dev
);
53 return mt76_alloc_txwi(dev
);
57 mt76_put_txwi(struct mt76_dev
*dev
, struct mt76_txwi_cache
*t
)
62 spin_lock_bh(&dev
->lock
);
63 list_add(&t
->list
, &dev
->txwi_cache
);
64 spin_unlock_bh(&dev
->lock
);
66 EXPORT_SYMBOL_GPL(mt76_put_txwi
);
68 void mt76_tx_free(struct mt76_dev
*dev
)
70 struct mt76_txwi_cache
*t
;
72 while ((t
= __mt76_get_txwi(dev
)) != NULL
)
73 dma_unmap_single(dev
->dev
, t
->dma_addr
, dev
->drv
->txwi_size
,
78 mt76_txq_get_qid(struct ieee80211_txq
*txq
)
87 mt76_check_agg_ssn(struct mt76_txq
*mtxq
, struct sk_buff
*skb
)
89 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
91 if (!ieee80211_is_data_qos(hdr
->frame_control
) ||
92 !ieee80211_is_data_present(hdr
->frame_control
))
95 mtxq
->agg_ssn
= le16_to_cpu(hdr
->seq_ctrl
) + 0x10;
99 mt76_tx_status_lock(struct mt76_dev
*dev
, struct sk_buff_head
*list
)
100 __acquires(&dev
->status_list
.lock
)
102 __skb_queue_head_init(list
);
103 spin_lock_bh(&dev
->status_list
.lock
);
104 __acquire(&dev
->status_list
.lock
);
106 EXPORT_SYMBOL_GPL(mt76_tx_status_lock
);
109 mt76_tx_status_unlock(struct mt76_dev
*dev
, struct sk_buff_head
*list
)
110 __releases(&dev
->status_list
.unlock
)
114 spin_unlock_bh(&dev
->status_list
.lock
);
115 __release(&dev
->status_list
.unlock
);
117 while ((skb
= __skb_dequeue(list
)) != NULL
)
118 ieee80211_tx_status(dev
->hw
, skb
);
120 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock
);
123 __mt76_tx_status_skb_done(struct mt76_dev
*dev
, struct sk_buff
*skb
, u8 flags
,
124 struct sk_buff_head
*list
)
126 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
127 struct mt76_tx_cb
*cb
= mt76_tx_skb_cb(skb
);
128 u8 done
= MT_TX_CB_DMA_DONE
| MT_TX_CB_TXS_DONE
;
133 if ((flags
& done
) != done
)
136 __skb_unlink(skb
, &dev
->status_list
);
138 /* Tx status can be unreliable. if it fails, mark the frame as ACKed */
139 if (flags
& MT_TX_CB_TXS_FAILED
) {
140 ieee80211_tx_info_clear_status(info
);
141 info
->status
.rates
[0].idx
= -1;
142 info
->flags
|= IEEE80211_TX_STAT_ACK
;
145 __skb_queue_tail(list
, skb
);
149 mt76_tx_status_skb_done(struct mt76_dev
*dev
, struct sk_buff
*skb
,
150 struct sk_buff_head
*list
)
152 __mt76_tx_status_skb_done(dev
, skb
, MT_TX_CB_TXS_DONE
, list
);
154 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done
);
157 mt76_tx_status_skb_add(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
,
160 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
161 struct mt76_tx_cb
*cb
= mt76_tx_skb_cb(skb
);
165 return MT_PACKET_ID_NO_ACK
;
167 if (info
->flags
& IEEE80211_TX_CTL_NO_ACK
)
168 return MT_PACKET_ID_NO_ACK
;
170 if (!(info
->flags
& (IEEE80211_TX_CTL_REQ_TX_STATUS
|
171 IEEE80211_TX_CTL_RATE_CTRL_PROBE
)))
172 return MT_PACKET_ID_NO_SKB
;
174 spin_lock_bh(&dev
->status_list
.lock
);
176 memset(cb
, 0, sizeof(*cb
));
177 wcid
->packet_id
= (wcid
->packet_id
+ 1) & MT_PACKET_ID_MASK
;
178 if (wcid
->packet_id
== MT_PACKET_ID_NO_ACK
||
179 wcid
->packet_id
== MT_PACKET_ID_NO_SKB
)
180 wcid
->packet_id
= MT_PACKET_ID_FIRST
;
182 pid
= wcid
->packet_id
;
183 cb
->wcid
= wcid
->idx
;
185 cb
->jiffies
= jiffies
;
187 __skb_queue_tail(&dev
->status_list
, skb
);
188 spin_unlock_bh(&dev
->status_list
.lock
);
192 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add
);
195 mt76_tx_status_skb_get(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
, int pktid
,
196 struct sk_buff_head
*list
)
198 struct sk_buff
*skb
, *tmp
;
200 skb_queue_walk_safe(&dev
->status_list
, skb
, tmp
) {
201 struct mt76_tx_cb
*cb
= mt76_tx_skb_cb(skb
);
203 if (wcid
&& cb
->wcid
!= wcid
->idx
)
206 if (cb
->pktid
== pktid
)
209 if (pktid
>= 0 && !time_after(jiffies
, cb
->jiffies
+
210 MT_TX_STATUS_SKB_TIMEOUT
))
213 __mt76_tx_status_skb_done(dev
, skb
, MT_TX_CB_TXS_FAILED
|
214 MT_TX_CB_TXS_DONE
, list
);
219 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get
);
222 mt76_tx_status_check(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
, bool flush
)
224 struct sk_buff_head list
;
226 mt76_tx_status_lock(dev
, &list
);
227 mt76_tx_status_skb_get(dev
, wcid
, flush
? -1 : 0, &list
);
228 mt76_tx_status_unlock(dev
, &list
);
230 EXPORT_SYMBOL_GPL(mt76_tx_status_check
);
232 void mt76_tx_complete_skb(struct mt76_dev
*dev
, struct sk_buff
*skb
)
234 struct sk_buff_head list
;
237 ieee80211_free_txskb(dev
->hw
, skb
);
241 mt76_tx_status_lock(dev
, &list
);
242 __mt76_tx_status_skb_done(dev
, skb
, MT_TX_CB_DMA_DONE
, &list
);
243 mt76_tx_status_unlock(dev
, &list
);
245 EXPORT_SYMBOL_GPL(mt76_tx_complete_skb
);
248 mt76_tx(struct mt76_dev
*dev
, struct ieee80211_sta
*sta
,
249 struct mt76_wcid
*wcid
, struct sk_buff
*skb
)
251 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
252 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
253 struct mt76_queue
*q
;
254 int qid
= skb_get_queue_mapping(skb
);
256 if (WARN_ON(qid
>= MT_TXQ_PSD
)) {
258 skb_set_queue_mapping(skb
, qid
);
261 if (!(wcid
->tx_info
& MT_WCID_TX_INFO_SET
))
262 ieee80211_get_tx_rates(info
->control
.vif
, sta
, skb
,
263 info
->control
.rates
, 1);
265 if (sta
&& ieee80211_is_data_qos(hdr
->frame_control
)) {
266 struct ieee80211_txq
*txq
;
267 struct mt76_txq
*mtxq
;
270 tid
= skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
272 mtxq
= (struct mt76_txq
*)txq
->drv_priv
;
275 mt76_check_agg_ssn(mtxq
, skb
);
278 q
= dev
->q_tx
[qid
].q
;
280 spin_lock_bh(&q
->lock
);
281 dev
->queue_ops
->tx_queue_skb(dev
, qid
, skb
, wcid
, sta
);
282 dev
->queue_ops
->kick(dev
, q
);
284 if (q
->queued
> q
->ndesc
- 8 && !q
->stopped
) {
285 ieee80211_stop_queue(dev
->hw
, skb_get_queue_mapping(skb
));
289 spin_unlock_bh(&q
->lock
);
291 EXPORT_SYMBOL_GPL(mt76_tx
);
293 static struct sk_buff
*
294 mt76_txq_dequeue(struct mt76_dev
*dev
, struct mt76_txq
*mtxq
, bool ps
)
296 struct ieee80211_txq
*txq
= mtxq_to_txq(mtxq
);
299 skb
= skb_dequeue(&mtxq
->retry_q
);
301 u8 tid
= skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
303 if (ps
&& skb_queue_empty(&mtxq
->retry_q
))
304 ieee80211_sta_set_buffered(txq
->sta
, tid
, false);
309 skb
= ieee80211_tx_dequeue(dev
->hw
, txq
);
317 mt76_queue_ps_skb(struct mt76_dev
*dev
, struct ieee80211_sta
*sta
,
318 struct sk_buff
*skb
, bool last
)
320 struct mt76_wcid
*wcid
= (struct mt76_wcid
*)sta
->drv_priv
;
321 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
323 info
->control
.flags
|= IEEE80211_TX_CTRL_PS_RESPONSE
;
325 info
->flags
|= IEEE80211_TX_STATUS_EOSP
|
326 IEEE80211_TX_CTL_REQ_TX_STATUS
;
328 mt76_skb_set_moredata(skb
, !last
);
329 dev
->queue_ops
->tx_queue_skb(dev
, MT_TXQ_PSD
, skb
, wcid
, sta
);
333 mt76_release_buffered_frames(struct ieee80211_hw
*hw
, struct ieee80211_sta
*sta
,
334 u16 tids
, int nframes
,
335 enum ieee80211_frame_release_type reason
,
338 struct mt76_dev
*dev
= hw
->priv
;
339 struct sk_buff
*last_skb
= NULL
;
340 struct mt76_queue
*hwq
= dev
->q_tx
[MT_TXQ_PSD
].q
;
343 spin_lock_bh(&hwq
->lock
);
344 for (i
= 0; tids
&& nframes
; i
++, tids
>>= 1) {
345 struct ieee80211_txq
*txq
= sta
->txq
[i
];
346 struct mt76_txq
*mtxq
= (struct mt76_txq
*)txq
->drv_priv
;
353 skb
= mt76_txq_dequeue(dev
, mtxq
, true);
358 mt76_check_agg_ssn(mtxq
, skb
);
362 mt76_queue_ps_skb(dev
, sta
, last_skb
, false);
369 mt76_queue_ps_skb(dev
, sta
, last_skb
, true);
370 dev
->queue_ops
->kick(dev
, hwq
);
372 ieee80211_sta_eosp(sta
);
375 spin_unlock_bh(&hwq
->lock
);
377 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames
);
380 mt76_txq_send_burst(struct mt76_dev
*dev
, struct mt76_sw_queue
*sq
,
381 struct mt76_txq
*mtxq
)
383 struct ieee80211_txq
*txq
= mtxq_to_txq(mtxq
);
384 enum mt76_txq_id qid
= mt76_txq_get_qid(txq
);
385 struct mt76_wcid
*wcid
= mtxq
->wcid
;
386 struct mt76_queue
*hwq
= sq
->q
;
387 struct ieee80211_tx_info
*info
;
389 int n_frames
= 1, limit
;
390 struct ieee80211_tx_rate tx_rate
;
395 if (test_bit(MT_WCID_FLAG_PS
, &wcid
->flags
))
398 skb
= mt76_txq_dequeue(dev
, mtxq
, false);
402 info
= IEEE80211_SKB_CB(skb
);
403 if (!(wcid
->tx_info
& MT_WCID_TX_INFO_SET
))
404 ieee80211_get_tx_rates(txq
->vif
, txq
->sta
, skb
,
405 info
->control
.rates
, 1);
406 tx_rate
= info
->control
.rates
[0];
408 probe
= (info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
);
409 ampdu
= IEEE80211_SKB_CB(skb
)->flags
& IEEE80211_TX_CTL_AMPDU
;
410 limit
= ampdu
? 16 : 3;
413 mt76_check_agg_ssn(mtxq
, skb
);
415 idx
= dev
->queue_ops
->tx_queue_skb(dev
, qid
, skb
, wcid
, txq
->sta
);
426 if (test_bit(MT76_RESET
, &dev
->state
))
429 skb
= mt76_txq_dequeue(dev
, mtxq
, false);
433 info
= IEEE80211_SKB_CB(skb
);
434 cur_ampdu
= info
->flags
& IEEE80211_TX_CTL_AMPDU
;
436 if (ampdu
!= cur_ampdu
||
437 (info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
)) {
438 skb_queue_tail(&mtxq
->retry_q
, skb
);
442 info
->control
.rates
[0] = tx_rate
;
445 mt76_check_agg_ssn(mtxq
, skb
);
447 idx
= dev
->queue_ops
->tx_queue_skb(dev
, qid
, skb
, wcid
,
453 } while (n_frames
< limit
);
456 hwq
->entry
[idx
].qid
= sq
- dev
->q_tx
;
457 hwq
->entry
[idx
].schedule
= true;
461 dev
->queue_ops
->kick(dev
, hwq
);
467 mt76_txq_schedule_list(struct mt76_dev
*dev
, enum mt76_txq_id qid
)
469 struct mt76_sw_queue
*sq
= &dev
->q_tx
[qid
];
470 struct mt76_queue
*hwq
= sq
->q
;
471 struct ieee80211_txq
*txq
;
472 struct mt76_txq
*mtxq
;
473 struct mt76_wcid
*wcid
;
476 spin_lock_bh(&hwq
->lock
);
478 if (sq
->swq_queued
>= 4)
481 if (test_bit(MT76_RESET
, &dev
->state
)) {
486 txq
= ieee80211_next_txq(dev
->hw
, qid
);
490 mtxq
= (struct mt76_txq
*)txq
->drv_priv
;
492 if (wcid
&& test_bit(MT_WCID_FLAG_PS
, &wcid
->flags
))
495 if (mtxq
->send_bar
&& mtxq
->aggr
) {
496 struct ieee80211_txq
*txq
= mtxq_to_txq(mtxq
);
497 struct ieee80211_sta
*sta
= txq
->sta
;
498 struct ieee80211_vif
*vif
= txq
->vif
;
499 u16 agg_ssn
= mtxq
->agg_ssn
;
502 mtxq
->send_bar
= false;
503 spin_unlock_bh(&hwq
->lock
);
504 ieee80211_send_bar(vif
, sta
->addr
, tid
, agg_ssn
);
505 spin_lock_bh(&hwq
->lock
);
508 ret
+= mt76_txq_send_burst(dev
, sq
, mtxq
);
509 ieee80211_return_txq(dev
->hw
, txq
,
510 !skb_queue_empty(&mtxq
->retry_q
));
512 spin_unlock_bh(&hwq
->lock
);
517 void mt76_txq_schedule(struct mt76_dev
*dev
, enum mt76_txq_id qid
)
519 struct mt76_sw_queue
*sq
= &dev
->q_tx
[qid
];
525 if (sq
->swq_queued
>= 4)
531 ieee80211_txq_schedule_start(dev
->hw
, qid
);
532 len
= mt76_txq_schedule_list(dev
, qid
);
533 ieee80211_txq_schedule_end(dev
->hw
, qid
);
538 EXPORT_SYMBOL_GPL(mt76_txq_schedule
);
540 void mt76_txq_schedule_all(struct mt76_dev
*dev
)
544 for (i
= 0; i
<= MT_TXQ_BK
; i
++)
545 mt76_txq_schedule(dev
, i
);
547 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all
);
549 void mt76_tx_tasklet(unsigned long data
)
551 struct mt76_dev
*dev
= (struct mt76_dev
*)data
;
553 mt76_txq_schedule_all(dev
);
556 void mt76_stop_tx_queues(struct mt76_dev
*dev
, struct ieee80211_sta
*sta
,
561 for (i
= 0; i
< ARRAY_SIZE(sta
->txq
); i
++) {
562 struct ieee80211_txq
*txq
= sta
->txq
[i
];
563 struct mt76_queue
*hwq
;
564 struct mt76_txq
*mtxq
;
569 mtxq
= (struct mt76_txq
*)txq
->drv_priv
;
572 spin_lock_bh(&hwq
->lock
);
573 mtxq
->send_bar
= mtxq
->aggr
&& send_bar
;
574 spin_unlock_bh(&hwq
->lock
);
577 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues
);
579 void mt76_wake_tx_queue(struct ieee80211_hw
*hw
, struct ieee80211_txq
*txq
)
581 struct mt76_dev
*dev
= hw
->priv
;
583 if (!test_bit(MT76_STATE_RUNNING
, &dev
->state
))
586 tasklet_schedule(&dev
->tx_tasklet
);
588 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue
);
590 void mt76_txq_remove(struct mt76_dev
*dev
, struct ieee80211_txq
*txq
)
592 struct mt76_txq
*mtxq
;
598 mtxq
= (struct mt76_txq
*)txq
->drv_priv
;
600 while ((skb
= skb_dequeue(&mtxq
->retry_q
)) != NULL
)
601 ieee80211_free_txskb(dev
->hw
, skb
);
603 EXPORT_SYMBOL_GPL(mt76_txq_remove
);
605 void mt76_txq_init(struct mt76_dev
*dev
, struct ieee80211_txq
*txq
)
607 struct mt76_txq
*mtxq
= (struct mt76_txq
*)txq
->drv_priv
;
609 skb_queue_head_init(&mtxq
->retry_q
);
611 mtxq
->swq
= &dev
->q_tx
[mt76_txq_get_qid(txq
)];
613 EXPORT_SYMBOL_GPL(mt76_txq_init
);
615 u8
mt76_ac_to_hwq(u8 ac
)
617 static const u8 wmm_queue_map
[] = {
618 [IEEE80211_AC_BE
] = 0,
619 [IEEE80211_AC_BK
] = 1,
620 [IEEE80211_AC_VI
] = 2,
621 [IEEE80211_AC_VO
] = 3,
624 if (WARN_ON(ac
>= IEEE80211_NUM_ACS
))
627 return wmm_queue_map
[ac
];
629 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq
);