treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / net / wireless / realtek / rtw88 / tx.c
blob24c39c60c99afbda4ed8d39ca88bc8a8464a5fec
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019 Realtek Corporation
3 */
5 #include "main.h"
6 #include "tx.h"
7 #include "fw.h"
8 #include "ps.h"
10 static
11 void rtw_tx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
12 struct sk_buff *skb)
14 struct ieee80211_hdr *hdr;
15 struct rtw_vif *rtwvif;
17 hdr = (struct ieee80211_hdr *)skb->data;
19 if (!ieee80211_is_data(hdr->frame_control))
20 return;
22 if (!is_broadcast_ether_addr(hdr->addr1) &&
23 !is_multicast_ether_addr(hdr->addr1)) {
24 rtwdev->stats.tx_unicast += skb->len;
25 rtwdev->stats.tx_cnt++;
26 if (vif) {
27 rtwvif = (struct rtw_vif *)vif->drv_priv;
28 rtwvif->stats.tx_unicast += skb->len;
29 rtwvif->stats.tx_cnt++;
34 void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb)
36 __le32 *txdesc = (__le32 *)skb->data;
38 SET_TX_DESC_TXPKTSIZE(txdesc, pkt_info->tx_pkt_size);
39 SET_TX_DESC_OFFSET(txdesc, pkt_info->offset);
40 SET_TX_DESC_PKT_OFFSET(txdesc, pkt_info->pkt_offset);
41 SET_TX_DESC_QSEL(txdesc, pkt_info->qsel);
42 SET_TX_DESC_BMC(txdesc, pkt_info->bmc);
43 SET_TX_DESC_RATE_ID(txdesc, pkt_info->rate_id);
44 SET_TX_DESC_DATARATE(txdesc, pkt_info->rate);
45 SET_TX_DESC_DISDATAFB(txdesc, pkt_info->dis_rate_fallback);
46 SET_TX_DESC_USE_RATE(txdesc, pkt_info->use_rate);
47 SET_TX_DESC_SEC_TYPE(txdesc, pkt_info->sec_type);
48 SET_TX_DESC_DATA_BW(txdesc, pkt_info->bw);
49 SET_TX_DESC_SW_SEQ(txdesc, pkt_info->seq);
50 SET_TX_DESC_MAX_AGG_NUM(txdesc, pkt_info->ampdu_factor);
51 SET_TX_DESC_AMPDU_DENSITY(txdesc, pkt_info->ampdu_density);
52 SET_TX_DESC_DATA_STBC(txdesc, pkt_info->stbc);
53 SET_TX_DESC_DATA_LDPC(txdesc, pkt_info->ldpc);
54 SET_TX_DESC_AGG_EN(txdesc, pkt_info->ampdu_en);
55 SET_TX_DESC_LS(txdesc, pkt_info->ls);
56 SET_TX_DESC_DATA_SHORT(txdesc, pkt_info->short_gi);
57 SET_TX_DESC_SPE_RPT(txdesc, pkt_info->report);
58 SET_TX_DESC_SW_DEFINE(txdesc, pkt_info->sn);
59 SET_TX_DESC_USE_RTS(txdesc, pkt_info->rts);
61 EXPORT_SYMBOL(rtw_tx_fill_tx_desc);
63 static u8 get_tx_ampdu_factor(struct ieee80211_sta *sta)
65 u8 exp = sta->ht_cap.ampdu_factor;
67 /* the least ampdu factor is 8K, and the value in the tx desc is the
68 * max aggregation num, which represents val * 2 packets can be
69 * aggregated in an AMPDU, so here we should use 8/2=4 as the base
71 return (BIT(2) << exp) - 1;
74 static u8 get_tx_ampdu_density(struct ieee80211_sta *sta)
76 return sta->ht_cap.ampdu_density;
79 static u8 get_highest_ht_tx_rate(struct rtw_dev *rtwdev,
80 struct ieee80211_sta *sta)
82 u8 rate;
84 if (rtwdev->hal.rf_type == RF_2T2R && sta->ht_cap.mcs.rx_mask[1] != 0)
85 rate = DESC_RATEMCS15;
86 else
87 rate = DESC_RATEMCS7;
89 return rate;
92 static u8 get_highest_vht_tx_rate(struct rtw_dev *rtwdev,
93 struct ieee80211_sta *sta)
95 struct rtw_efuse *efuse = &rtwdev->efuse;
96 u8 rate;
97 u16 tx_mcs_map;
99 tx_mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.tx_mcs_map);
100 if (efuse->hw_cap.nss == 1) {
101 switch (tx_mcs_map & 0x3) {
102 case IEEE80211_VHT_MCS_SUPPORT_0_7:
103 rate = DESC_RATEVHT1SS_MCS7;
104 break;
105 case IEEE80211_VHT_MCS_SUPPORT_0_8:
106 rate = DESC_RATEVHT1SS_MCS8;
107 break;
108 default:
109 case IEEE80211_VHT_MCS_SUPPORT_0_9:
110 rate = DESC_RATEVHT1SS_MCS9;
111 break;
113 } else if (efuse->hw_cap.nss >= 2) {
114 switch ((tx_mcs_map & 0xc) >> 2) {
115 case IEEE80211_VHT_MCS_SUPPORT_0_7:
116 rate = DESC_RATEVHT2SS_MCS7;
117 break;
118 case IEEE80211_VHT_MCS_SUPPORT_0_8:
119 rate = DESC_RATEVHT2SS_MCS8;
120 break;
121 default:
122 case IEEE80211_VHT_MCS_SUPPORT_0_9:
123 rate = DESC_RATEVHT2SS_MCS9;
124 break;
126 } else {
127 rate = DESC_RATEVHT1SS_MCS9;
130 return rate;
133 static void rtw_tx_report_enable(struct rtw_dev *rtwdev,
134 struct rtw_tx_pkt_info *pkt_info)
136 struct rtw_tx_report *tx_report = &rtwdev->tx_report;
138 /* [11:8], reserved, fills with zero
139 * [7:2], tx report sequence number
140 * [1:0], firmware use, fills with zero
142 pkt_info->sn = (atomic_inc_return(&tx_report->sn) << 2) & 0xfc;
143 pkt_info->report = true;
146 void rtw_tx_report_purge_timer(struct timer_list *t)
148 struct rtw_dev *rtwdev = from_timer(rtwdev, t, tx_report.purge_timer);
149 struct rtw_tx_report *tx_report = &rtwdev->tx_report;
150 unsigned long flags;
152 if (skb_queue_len(&tx_report->queue) == 0)
153 return;
155 WARN(1, "purge skb(s) not reported by firmware\n");
157 spin_lock_irqsave(&tx_report->q_lock, flags);
158 skb_queue_purge(&tx_report->queue);
159 spin_unlock_irqrestore(&tx_report->q_lock, flags);
162 void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn)
164 struct rtw_tx_report *tx_report = &rtwdev->tx_report;
165 unsigned long flags;
166 u8 *drv_data;
168 /* pass sn to tx report handler through driver data */
169 drv_data = (u8 *)IEEE80211_SKB_CB(skb)->status.status_driver_data;
170 *drv_data = sn;
172 spin_lock_irqsave(&tx_report->q_lock, flags);
173 __skb_queue_tail(&tx_report->queue, skb);
174 spin_unlock_irqrestore(&tx_report->q_lock, flags);
176 mod_timer(&tx_report->purge_timer, jiffies + RTW_TX_PROBE_TIMEOUT);
178 EXPORT_SYMBOL(rtw_tx_report_enqueue);
180 static void rtw_tx_report_tx_status(struct rtw_dev *rtwdev,
181 struct sk_buff *skb, bool acked)
183 struct ieee80211_tx_info *info;
185 info = IEEE80211_SKB_CB(skb);
186 ieee80211_tx_info_clear_status(info);
187 if (acked)
188 info->flags |= IEEE80211_TX_STAT_ACK;
189 else
190 info->flags &= ~IEEE80211_TX_STAT_ACK;
192 ieee80211_tx_status_irqsafe(rtwdev->hw, skb);
195 void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
197 struct rtw_tx_report *tx_report = &rtwdev->tx_report;
198 struct rtw_c2h_cmd *c2h;
199 struct sk_buff *cur, *tmp;
200 unsigned long flags;
201 u8 sn, st;
202 u8 *n;
204 c2h = get_c2h_from_skb(skb);
206 sn = GET_CCX_REPORT_SEQNUM(c2h->payload);
207 st = GET_CCX_REPORT_STATUS(c2h->payload);
209 spin_lock_irqsave(&tx_report->q_lock, flags);
210 skb_queue_walk_safe(&tx_report->queue, cur, tmp) {
211 n = (u8 *)IEEE80211_SKB_CB(cur)->status.status_driver_data;
212 if (*n == sn) {
213 __skb_unlink(cur, &tx_report->queue);
214 rtw_tx_report_tx_status(rtwdev, cur, st == 0);
215 break;
218 spin_unlock_irqrestore(&tx_report->q_lock, flags);
221 static void rtw_tx_mgmt_pkt_info_update(struct rtw_dev *rtwdev,
222 struct rtw_tx_pkt_info *pkt_info,
223 struct ieee80211_tx_control *control,
224 struct sk_buff *skb)
226 pkt_info->use_rate = true;
227 pkt_info->rate_id = 6;
228 pkt_info->dis_rate_fallback = true;
231 static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
232 struct rtw_tx_pkt_info *pkt_info,
233 struct ieee80211_tx_control *control,
234 struct sk_buff *skb)
236 struct ieee80211_sta *sta = control->sta;
237 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
238 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
239 struct rtw_sta_info *si;
240 u16 seq;
241 u8 ampdu_factor = 0;
242 u8 ampdu_density = 0;
243 bool ampdu_en = false;
244 u8 rate = DESC_RATE6M;
245 u8 rate_id = 6;
246 u8 bw = RTW_CHANNEL_WIDTH_20;
247 bool stbc = false;
248 bool ldpc = false;
250 seq = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
252 /* for broadcast/multicast, use default values */
253 if (!sta)
254 goto out;
256 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
257 ampdu_en = true;
258 ampdu_factor = get_tx_ampdu_factor(sta);
259 ampdu_density = get_tx_ampdu_density(sta);
262 if (info->control.use_rts)
263 pkt_info->rts = true;
265 if (sta->vht_cap.vht_supported)
266 rate = get_highest_vht_tx_rate(rtwdev, sta);
267 else if (sta->ht_cap.ht_supported)
268 rate = get_highest_ht_tx_rate(rtwdev, sta);
269 else if (sta->supp_rates[0] <= 0xf)
270 rate = DESC_RATE11M;
271 else
272 rate = DESC_RATE54M;
274 si = (struct rtw_sta_info *)sta->drv_priv;
276 bw = si->bw_mode;
277 rate_id = si->rate_id;
278 stbc = si->stbc_en;
279 ldpc = si->ldpc_en;
281 out:
282 pkt_info->seq = seq;
283 pkt_info->ampdu_factor = ampdu_factor;
284 pkt_info->ampdu_density = ampdu_density;
285 pkt_info->ampdu_en = ampdu_en;
286 pkt_info->rate = rate;
287 pkt_info->rate_id = rate_id;
288 pkt_info->bw = bw;
289 pkt_info->stbc = stbc;
290 pkt_info->ldpc = ldpc;
293 void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
294 struct rtw_tx_pkt_info *pkt_info,
295 struct ieee80211_tx_control *control,
296 struct sk_buff *skb)
298 struct rtw_chip_info *chip = rtwdev->chip;
299 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
300 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
301 struct rtw_sta_info *si;
302 struct ieee80211_vif *vif = NULL;
303 __le16 fc = hdr->frame_control;
304 u8 sec_type = 0;
305 bool bmc;
307 if (control->sta) {
308 si = (struct rtw_sta_info *)control->sta->drv_priv;
309 vif = si->vif;
312 if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc))
313 rtw_tx_mgmt_pkt_info_update(rtwdev, pkt_info, control, skb);
314 else if (ieee80211_is_data(fc))
315 rtw_tx_data_pkt_info_update(rtwdev, pkt_info, control, skb);
317 if (info->control.hw_key) {
318 struct ieee80211_key_conf *key = info->control.hw_key;
320 switch (key->cipher) {
321 case WLAN_CIPHER_SUITE_WEP40:
322 case WLAN_CIPHER_SUITE_WEP104:
323 case WLAN_CIPHER_SUITE_TKIP:
324 sec_type = 0x01;
325 break;
326 case WLAN_CIPHER_SUITE_CCMP:
327 sec_type = 0x03;
328 break;
329 default:
330 break;
334 bmc = is_broadcast_ether_addr(hdr->addr1) ||
335 is_multicast_ether_addr(hdr->addr1);
337 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
338 rtw_tx_report_enable(rtwdev, pkt_info);
340 pkt_info->bmc = bmc;
341 pkt_info->sec_type = sec_type;
342 pkt_info->tx_pkt_size = skb->len;
343 pkt_info->offset = chip->tx_pkt_desc_sz;
344 pkt_info->qsel = skb->priority;
345 pkt_info->ls = true;
347 /* maybe merge with tx status ? */
348 rtw_tx_stats(rtwdev, vif, skb);
351 void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
352 struct rtw_tx_pkt_info *pkt_info,
353 struct sk_buff *skb)
355 struct rtw_chip_info *chip = rtwdev->chip;
356 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
357 bool bmc;
359 bmc = is_broadcast_ether_addr(hdr->addr1) ||
360 is_multicast_ether_addr(hdr->addr1);
361 pkt_info->use_rate = true;
362 pkt_info->rate_id = 6;
363 pkt_info->dis_rate_fallback = true;
364 pkt_info->bmc = bmc;
365 pkt_info->tx_pkt_size = skb->len;
366 pkt_info->offset = chip->tx_pkt_desc_sz;
367 pkt_info->qsel = TX_DESC_QSEL_MGMT;
368 pkt_info->ls = true;
371 void rtw_tx(struct rtw_dev *rtwdev,
372 struct ieee80211_tx_control *control,
373 struct sk_buff *skb)
375 struct rtw_tx_pkt_info pkt_info = {0};
377 rtw_tx_pkt_info_update(rtwdev, &pkt_info, control, skb);
378 if (rtw_hci_tx(rtwdev, &pkt_info, skb))
379 goto out;
381 return;
383 out:
384 ieee80211_free_txskb(rtwdev->hw, skb);
387 static void rtw_txq_check_agg(struct rtw_dev *rtwdev,
388 struct rtw_txq *rtwtxq,
389 struct sk_buff *skb)
391 struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
392 struct ieee80211_tx_info *info;
393 struct rtw_sta_info *si;
395 if (test_bit(RTW_TXQ_AMPDU, &rtwtxq->flags)) {
396 info = IEEE80211_SKB_CB(skb);
397 info->flags |= IEEE80211_TX_CTL_AMPDU;
398 return;
401 if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
402 return;
404 if (test_bit(RTW_TXQ_BLOCK_BA, &rtwtxq->flags))
405 return;
407 if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
408 return;
410 if (!txq->sta)
411 return;
413 si = (struct rtw_sta_info *)txq->sta->drv_priv;
414 set_bit(txq->tid, si->tid_ba);
416 ieee80211_queue_work(rtwdev->hw, &rtwdev->ba_work);
419 static bool rtw_txq_dequeue(struct rtw_dev *rtwdev,
420 struct rtw_txq *rtwtxq)
422 struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
423 struct ieee80211_tx_control control;
424 struct sk_buff *skb;
426 skb = ieee80211_tx_dequeue(rtwdev->hw, txq);
427 if (!skb)
428 return false;
430 rtw_txq_check_agg(rtwdev, rtwtxq, skb);
432 control.sta = txq->sta;
433 rtw_tx(rtwdev, &control, skb);
434 rtwtxq->last_push = jiffies;
436 return true;
439 static void rtw_txq_push(struct rtw_dev *rtwdev,
440 struct rtw_txq *rtwtxq,
441 unsigned long frames)
443 int i;
445 rcu_read_lock();
447 for (i = 0; i < frames; i++)
448 if (!rtw_txq_dequeue(rtwdev, rtwtxq))
449 break;
451 rcu_read_unlock();
454 void rtw_tx_tasklet(unsigned long data)
456 struct rtw_dev *rtwdev = (void *)data;
457 struct rtw_txq *rtwtxq, *tmp;
459 spin_lock_bh(&rtwdev->txq_lock);
461 list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->txqs, list) {
462 struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
463 unsigned long frame_cnt;
464 unsigned long byte_cnt;
466 ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
467 rtw_txq_push(rtwdev, rtwtxq, frame_cnt);
469 list_del_init(&rtwtxq->list);
472 spin_unlock_bh(&rtwdev->txq_lock);
475 void rtw_txq_init(struct rtw_dev *rtwdev, struct ieee80211_txq *txq)
477 struct rtw_txq *rtwtxq;
479 if (!txq)
480 return;
482 rtwtxq = (struct rtw_txq *)txq->drv_priv;
483 INIT_LIST_HEAD(&rtwtxq->list);
486 void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq)
488 struct rtw_txq *rtwtxq;
490 if (!txq)
491 return;
493 rtwtxq = (struct rtw_txq *)txq->drv_priv;
494 spin_lock_bh(&rtwdev->txq_lock);
495 if (!list_empty(&rtwtxq->list))
496 list_del_init(&rtwtxq->list);
497 spin_unlock_bh(&rtwdev->txq_lock);