2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 static struct mt76_txwi_cache
*
20 mt76_alloc_txwi(struct mt76_dev
*dev
)
22 struct mt76_txwi_cache
*t
;
26 size
= (sizeof(*t
) + L1_CACHE_BYTES
- 1) & ~(L1_CACHE_BYTES
- 1);
27 t
= devm_kzalloc(dev
->dev
, size
, GFP_ATOMIC
);
31 addr
= dma_map_single(dev
->dev
, &t
->txwi
, sizeof(t
->txwi
),
38 static struct mt76_txwi_cache
*
39 __mt76_get_txwi(struct mt76_dev
*dev
)
41 struct mt76_txwi_cache
*t
= NULL
;
43 spin_lock_bh(&dev
->lock
);
44 if (!list_empty(&dev
->txwi_cache
)) {
45 t
= list_first_entry(&dev
->txwi_cache
, struct mt76_txwi_cache
,
49 spin_unlock_bh(&dev
->lock
);
54 static struct mt76_txwi_cache
*
55 mt76_get_txwi(struct mt76_dev
*dev
)
57 struct mt76_txwi_cache
*t
= __mt76_get_txwi(dev
);
62 return mt76_alloc_txwi(dev
);
66 mt76_put_txwi(struct mt76_dev
*dev
, struct mt76_txwi_cache
*t
)
71 spin_lock_bh(&dev
->lock
);
72 list_add(&t
->list
, &dev
->txwi_cache
);
73 spin_unlock_bh(&dev
->lock
);
76 void mt76_tx_free(struct mt76_dev
*dev
)
78 struct mt76_txwi_cache
*t
;
80 while ((t
= __mt76_get_txwi(dev
)) != NULL
)
81 dma_unmap_single(dev
->dev
, t
->dma_addr
, sizeof(t
->txwi
),
86 mt76_txq_get_qid(struct ieee80211_txq
*txq
)
94 int mt76_tx_queue_skb(struct mt76_dev
*dev
, struct mt76_queue
*q
,
95 struct sk_buff
*skb
, struct mt76_wcid
*wcid
,
96 struct ieee80211_sta
*sta
)
98 struct mt76_queue_entry e
;
99 struct mt76_txwi_cache
*t
;
100 struct mt76_queue_buf buf
[32];
101 struct sk_buff
*iter
;
107 t
= mt76_get_txwi(dev
);
109 ieee80211_free_txskb(dev
->hw
, skb
);
113 dma_sync_single_for_cpu(dev
->dev
, t
->dma_addr
, sizeof(t
->txwi
),
115 ret
= dev
->drv
->tx_prepare_skb(dev
, &t
->txwi
, skb
, q
, wcid
, sta
,
117 dma_sync_single_for_device(dev
->dev
, t
->dma_addr
, sizeof(t
->txwi
),
122 len
= skb
->len
- skb
->data_len
;
123 addr
= dma_map_single(dev
->dev
, skb
->data
, len
, DMA_TO_DEVICE
);
124 if (dma_mapping_error(dev
->dev
, addr
)) {
130 buf
[n
].addr
= t
->dma_addr
;
131 buf
[n
++].len
= dev
->drv
->txwi_size
;
135 skb_walk_frags(skb
, iter
) {
136 if (n
== ARRAY_SIZE(buf
))
139 addr
= dma_map_single(dev
->dev
, iter
->data
, iter
->len
,
141 if (dma_mapping_error(dev
->dev
, addr
))
145 buf
[n
++].len
= iter
->len
;
148 if (q
->queued
+ (n
+ 1) / 2 >= q
->ndesc
- 1)
151 return dev
->queue_ops
->add_buf(dev
, q
, buf
, n
, tx_info
, skb
, t
);
155 for (n
--; n
> 0; n
--)
156 dma_unmap_single(dev
->dev
, buf
[n
].addr
, buf
[n
].len
,
162 dev
->drv
->tx_complete_skb(dev
, q
, &e
, true);
163 mt76_put_txwi(dev
, t
);
166 EXPORT_SYMBOL_GPL(mt76_tx_queue_skb
);
169 mt76_tx(struct mt76_dev
*dev
, struct ieee80211_sta
*sta
,
170 struct mt76_wcid
*wcid
, struct sk_buff
*skb
)
172 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
173 struct mt76_queue
*q
;
174 int qid
= skb_get_queue_mapping(skb
);
176 if (WARN_ON(qid
>= MT_TXQ_PSD
)) {
178 skb_set_queue_mapping(skb
, qid
);
181 if (!wcid
->tx_rate_set
)
182 ieee80211_get_tx_rates(info
->control
.vif
, sta
, skb
,
183 info
->control
.rates
, 1);
187 spin_lock_bh(&q
->lock
);
188 mt76_tx_queue_skb(dev
, q
, skb
, wcid
, sta
);
189 dev
->queue_ops
->kick(dev
, q
);
191 if (q
->queued
> q
->ndesc
- 8)
192 ieee80211_stop_queue(dev
->hw
, skb_get_queue_mapping(skb
));
193 spin_unlock_bh(&q
->lock
);
195 EXPORT_SYMBOL_GPL(mt76_tx
);
197 static struct sk_buff
*
198 mt76_txq_dequeue(struct mt76_dev
*dev
, struct mt76_txq
*mtxq
, bool ps
)
200 struct ieee80211_txq
*txq
= mtxq_to_txq(mtxq
);
203 skb
= skb_dequeue(&mtxq
->retry_q
);
205 u8 tid
= skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
207 if (ps
&& skb_queue_empty(&mtxq
->retry_q
))
208 ieee80211_sta_set_buffered(txq
->sta
, tid
, false);
213 skb
= ieee80211_tx_dequeue(dev
->hw
, txq
);
221 mt76_check_agg_ssn(struct mt76_txq
*mtxq
, struct sk_buff
*skb
)
223 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*) skb
->data
;
225 if (!ieee80211_is_data_qos(hdr
->frame_control
))
228 mtxq
->agg_ssn
= le16_to_cpu(hdr
->seq_ctrl
) + 0x10;
232 mt76_queue_ps_skb(struct mt76_dev
*dev
, struct ieee80211_sta
*sta
,
233 struct sk_buff
*skb
, bool last
)
235 struct mt76_wcid
*wcid
= (struct mt76_wcid
*) sta
->drv_priv
;
236 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
237 struct mt76_queue
*hwq
= &dev
->q_tx
[MT_TXQ_PSD
];
239 info
->control
.flags
|= IEEE80211_TX_CTRL_PS_RESPONSE
;
241 info
->flags
|= IEEE80211_TX_STATUS_EOSP
;
243 mt76_skb_set_moredata(skb
, !last
);
244 mt76_tx_queue_skb(dev
, hwq
, skb
, wcid
, sta
);
248 mt76_release_buffered_frames(struct ieee80211_hw
*hw
, struct ieee80211_sta
*sta
,
249 u16 tids
, int nframes
,
250 enum ieee80211_frame_release_type reason
,
253 struct mt76_dev
*dev
= hw
->priv
;
254 struct sk_buff
*last_skb
= NULL
;
255 struct mt76_queue
*hwq
= &dev
->q_tx
[MT_TXQ_PSD
];
258 spin_lock_bh(&hwq
->lock
);
259 for (i
= 0; tids
&& nframes
; i
++, tids
>>= 1) {
260 struct ieee80211_txq
*txq
= sta
->txq
[i
];
261 struct mt76_txq
*mtxq
= (struct mt76_txq
*) txq
->drv_priv
;
268 skb
= mt76_txq_dequeue(dev
, mtxq
, true);
273 mt76_check_agg_ssn(mtxq
, skb
);
277 mt76_queue_ps_skb(dev
, sta
, last_skb
, false);
284 mt76_queue_ps_skb(dev
, sta
, last_skb
, true);
285 dev
->queue_ops
->kick(dev
, hwq
);
287 spin_unlock_bh(&hwq
->lock
);
289 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames
);
292 mt76_txq_send_burst(struct mt76_dev
*dev
, struct mt76_queue
*hwq
,
293 struct mt76_txq
*mtxq
, bool *empty
)
295 struct ieee80211_txq
*txq
= mtxq_to_txq(mtxq
);
296 struct ieee80211_tx_info
*info
;
297 struct mt76_wcid
*wcid
= mtxq
->wcid
;
299 int n_frames
= 1, limit
;
300 struct ieee80211_tx_rate tx_rate
;
305 skb
= mt76_txq_dequeue(dev
, mtxq
, false);
311 info
= IEEE80211_SKB_CB(skb
);
312 if (!wcid
->tx_rate_set
)
313 ieee80211_get_tx_rates(txq
->vif
, txq
->sta
, skb
,
314 info
->control
.rates
, 1);
315 tx_rate
= info
->control
.rates
[0];
317 probe
= (info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
);
318 ampdu
= IEEE80211_SKB_CB(skb
)->flags
& IEEE80211_TX_CTL_AMPDU
;
319 limit
= ampdu
? 16 : 3;
322 mt76_check_agg_ssn(mtxq
, skb
);
324 idx
= mt76_tx_queue_skb(dev
, hwq
, skb
, wcid
, txq
->sta
);
335 if (test_bit(MT76_SCANNING
, &dev
->state
) ||
336 test_bit(MT76_RESET
, &dev
->state
))
339 skb
= mt76_txq_dequeue(dev
, mtxq
, false);
345 info
= IEEE80211_SKB_CB(skb
);
346 cur_ampdu
= info
->flags
& IEEE80211_TX_CTL_AMPDU
;
348 if (ampdu
!= cur_ampdu
||
349 (info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
)) {
350 skb_queue_tail(&mtxq
->retry_q
, skb
);
354 info
->control
.rates
[0] = tx_rate
;
357 mt76_check_agg_ssn(mtxq
, skb
);
359 idx
= mt76_tx_queue_skb(dev
, hwq
, skb
, wcid
, txq
->sta
);
364 } while (n_frames
< limit
);
368 hwq
->entry
[idx
].schedule
= true;
371 dev
->queue_ops
->kick(dev
, hwq
);
377 mt76_txq_schedule_list(struct mt76_dev
*dev
, struct mt76_queue
*hwq
)
379 struct mt76_txq
*mtxq
, *mtxq_last
;
383 mtxq_last
= list_last_entry(&hwq
->swq
, struct mt76_txq
, list
);
384 while (!list_empty(&hwq
->swq
)) {
388 mtxq
= list_first_entry(&hwq
->swq
, struct mt76_txq
, list
);
389 if (mtxq
->send_bar
&& mtxq
->aggr
) {
390 struct ieee80211_txq
*txq
= mtxq_to_txq(mtxq
);
391 struct ieee80211_sta
*sta
= txq
->sta
;
392 struct ieee80211_vif
*vif
= txq
->vif
;
393 u16 agg_ssn
= mtxq
->agg_ssn
;
396 mtxq
->send_bar
= false;
397 spin_unlock_bh(&hwq
->lock
);
398 ieee80211_send_bar(vif
, sta
->addr
, tid
, agg_ssn
);
399 spin_lock_bh(&hwq
->lock
);
403 list_del_init(&mtxq
->list
);
405 cur
= mt76_txq_send_burst(dev
, hwq
, mtxq
, &empty
);
407 list_add_tail(&mtxq
->list
, &hwq
->swq
);
414 if (mtxq
== mtxq_last
)
421 void mt76_txq_schedule(struct mt76_dev
*dev
, struct mt76_queue
*hwq
)
426 if (hwq
->swq_queued
>= 4 || list_empty(&hwq
->swq
))
429 len
= mt76_txq_schedule_list(dev
, hwq
);
432 EXPORT_SYMBOL_GPL(mt76_txq_schedule
);
434 void mt76_txq_schedule_all(struct mt76_dev
*dev
)
438 for (i
= 0; i
<= MT_TXQ_BK
; i
++) {
439 struct mt76_queue
*q
= &dev
->q_tx
[i
];
441 spin_lock_bh(&q
->lock
);
442 mt76_txq_schedule(dev
, q
);
443 spin_unlock_bh(&q
->lock
);
446 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all
);
448 void mt76_stop_tx_queues(struct mt76_dev
*dev
, struct ieee80211_sta
*sta
,
453 for (i
= 0; i
< ARRAY_SIZE(sta
->txq
); i
++) {
454 struct ieee80211_txq
*txq
= sta
->txq
[i
];
455 struct mt76_txq
*mtxq
= (struct mt76_txq
*) txq
->drv_priv
;
457 spin_lock_bh(&mtxq
->hwq
->lock
);
458 mtxq
->send_bar
= mtxq
->aggr
&& send_bar
;
459 if (!list_empty(&mtxq
->list
))
460 list_del_init(&mtxq
->list
);
461 spin_unlock_bh(&mtxq
->hwq
->lock
);
464 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues
);
466 void mt76_wake_tx_queue(struct ieee80211_hw
*hw
, struct ieee80211_txq
*txq
)
468 struct mt76_dev
*dev
= hw
->priv
;
469 struct mt76_txq
*mtxq
= (struct mt76_txq
*) txq
->drv_priv
;
470 struct mt76_queue
*hwq
= mtxq
->hwq
;
472 spin_lock_bh(&hwq
->lock
);
473 if (list_empty(&mtxq
->list
))
474 list_add_tail(&mtxq
->list
, &hwq
->swq
);
475 mt76_txq_schedule(dev
, hwq
);
476 spin_unlock_bh(&hwq
->lock
);
478 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue
);
480 void mt76_txq_remove(struct mt76_dev
*dev
, struct ieee80211_txq
*txq
)
482 struct mt76_txq
*mtxq
;
483 struct mt76_queue
*hwq
;
489 mtxq
= (struct mt76_txq
*) txq
->drv_priv
;
492 spin_lock_bh(&hwq
->lock
);
493 if (!list_empty(&mtxq
->list
))
494 list_del(&mtxq
->list
);
495 spin_unlock_bh(&hwq
->lock
);
497 while ((skb
= skb_dequeue(&mtxq
->retry_q
)) != NULL
)
498 ieee80211_free_txskb(dev
->hw
, skb
);
500 EXPORT_SYMBOL_GPL(mt76_txq_remove
);
502 void mt76_txq_init(struct mt76_dev
*dev
, struct ieee80211_txq
*txq
)
504 struct mt76_txq
*mtxq
= (struct mt76_txq
*) txq
->drv_priv
;
506 INIT_LIST_HEAD(&mtxq
->list
);
507 skb_queue_head_init(&mtxq
->retry_q
);
509 mtxq
->hwq
= &dev
->q_tx
[mt76_txq_get_qid(txq
)];
511 EXPORT_SYMBOL_GPL(mt76_txq_init
);