1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
8 #include "mt76x02_trace.h"
10 void mt76x02_mac_reset_counters(struct mt76x02_dev
*dev
)
14 mt76_rr(dev
, MT_RX_STAT_0
);
15 mt76_rr(dev
, MT_RX_STAT_1
);
16 mt76_rr(dev
, MT_RX_STAT_2
);
17 mt76_rr(dev
, MT_TX_STA_0
);
18 mt76_rr(dev
, MT_TX_STA_1
);
19 mt76_rr(dev
, MT_TX_STA_2
);
21 for (i
= 0; i
< 16; i
++)
22 mt76_rr(dev
, MT_TX_AGG_CNT(i
));
24 for (i
= 0; i
< 16; i
++)
25 mt76_rr(dev
, MT_TX_STAT_FIFO
);
27 memset(dev
->mt76
.aggr_stats
, 0, sizeof(dev
->mt76
.aggr_stats
));
29 EXPORT_SYMBOL_GPL(mt76x02_mac_reset_counters
);
31 static enum mt76x02_cipher_type
32 mt76x02_mac_get_key_info(struct ieee80211_key_conf
*key
, u8
*key_data
)
34 memset(key_data
, 0, 32);
36 return MT_CIPHER_NONE
;
39 return MT_CIPHER_NONE
;
41 memcpy(key_data
, key
->key
, key
->keylen
);
43 switch (key
->cipher
) {
44 case WLAN_CIPHER_SUITE_WEP40
:
45 return MT_CIPHER_WEP40
;
46 case WLAN_CIPHER_SUITE_WEP104
:
47 return MT_CIPHER_WEP104
;
48 case WLAN_CIPHER_SUITE_TKIP
:
49 return MT_CIPHER_TKIP
;
50 case WLAN_CIPHER_SUITE_CCMP
:
51 return MT_CIPHER_AES_CCMP
;
53 return MT_CIPHER_NONE
;
57 int mt76x02_mac_shared_key_setup(struct mt76x02_dev
*dev
, u8 vif_idx
,
58 u8 key_idx
, struct ieee80211_key_conf
*key
)
60 enum mt76x02_cipher_type cipher
;
64 cipher
= mt76x02_mac_get_key_info(key
, key_data
);
65 if (cipher
== MT_CIPHER_NONE
&& key
)
68 val
= mt76_rr(dev
, MT_SKEY_MODE(vif_idx
));
69 val
&= ~(MT_SKEY_MODE_MASK
<< MT_SKEY_MODE_SHIFT(vif_idx
, key_idx
));
70 val
|= cipher
<< MT_SKEY_MODE_SHIFT(vif_idx
, key_idx
);
71 mt76_wr(dev
, MT_SKEY_MODE(vif_idx
), val
);
73 mt76_wr_copy(dev
, MT_SKEY(vif_idx
, key_idx
), key_data
,
78 EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup
);
80 void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev
*dev
, u8 idx
,
81 struct ieee80211_key_conf
*key
)
83 enum mt76x02_cipher_type cipher
;
88 cipher
= mt76x02_mac_get_key_info(key
, key_data
);
89 iv
= mt76_rr(dev
, MT_WCID_IV(idx
));
90 eiv
= mt76_rr(dev
, MT_WCID_IV(idx
) + 4);
93 if (cipher
== MT_CIPHER_TKIP
) {
94 pn
|= (iv
>> 16) & 0xff;
95 pn
|= (iv
& 0xff) << 8;
96 } else if (cipher
>= MT_CIPHER_AES_CCMP
) {
102 atomic64_set(&key
->tx_pn
, pn
);
105 int mt76x02_mac_wcid_set_key(struct mt76x02_dev
*dev
, u8 idx
,
106 struct ieee80211_key_conf
*key
)
108 enum mt76x02_cipher_type cipher
;
113 cipher
= mt76x02_mac_get_key_info(key
, key_data
);
114 if (cipher
== MT_CIPHER_NONE
&& key
)
117 mt76_wr_copy(dev
, MT_WCID_KEY(idx
), key_data
, sizeof(key_data
));
118 mt76_rmw_field(dev
, MT_WCID_ATTR(idx
), MT_WCID_ATTR_PKEY_MODE
, cipher
);
120 memset(iv_data
, 0, sizeof(iv_data
));
122 mt76_rmw_field(dev
, MT_WCID_ATTR(idx
), MT_WCID_ATTR_PAIRWISE
,
123 !!(key
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
));
125 pn
= atomic64_read(&key
->tx_pn
);
127 iv_data
[3] = key
->keyidx
<< 6;
128 if (cipher
>= MT_CIPHER_TKIP
) {
130 put_unaligned_le32(pn
>> 16, &iv_data
[4]);
133 if (cipher
== MT_CIPHER_TKIP
) {
134 iv_data
[0] = (pn
>> 8) & 0xff;
135 iv_data
[1] = (iv_data
[0] | 0x20) & 0x7f;
136 iv_data
[2] = pn
& 0xff;
137 } else if (cipher
>= MT_CIPHER_AES_CCMP
) {
138 put_unaligned_le16((pn
& 0xffff), &iv_data
[0]);
142 mt76_wr_copy(dev
, MT_WCID_IV(idx
), iv_data
, sizeof(iv_data
));
147 void mt76x02_mac_wcid_setup(struct mt76x02_dev
*dev
, u8 idx
,
150 struct mt76_wcid_addr addr
= {};
153 attr
= FIELD_PREP(MT_WCID_ATTR_BSS_IDX
, vif_idx
& 7) |
154 FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT
, !!(vif_idx
& 8));
156 mt76_wr(dev
, MT_WCID_ATTR(idx
), attr
);
162 memcpy(addr
.macaddr
, mac
, ETH_ALEN
);
164 mt76_wr_copy(dev
, MT_WCID_ADDR(idx
), &addr
, sizeof(addr
));
166 EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup
);
168 void mt76x02_mac_wcid_set_drop(struct mt76x02_dev
*dev
, u8 idx
, bool drop
)
170 u32 val
= mt76_rr(dev
, MT_WCID_DROP(idx
));
171 u32 bit
= MT_WCID_DROP_MASK(idx
);
173 /* prevent unnecessary writes */
174 if ((val
& bit
) != (bit
* drop
))
175 mt76_wr(dev
, MT_WCID_DROP(idx
), (val
& ~bit
) | (bit
* drop
));
179 mt76x02_mac_tx_rate_val(struct mt76x02_dev
*dev
,
180 const struct ieee80211_tx_rate
*rate
, u8
*nss_val
)
182 u8 phy
, rate_idx
, nss
, bw
= 0;
185 if (rate
->flags
& IEEE80211_TX_RC_VHT_MCS
) {
186 rate_idx
= rate
->idx
;
187 nss
= 1 + (rate
->idx
>> 4);
188 phy
= MT_PHY_TYPE_VHT
;
189 if (rate
->flags
& IEEE80211_TX_RC_80_MHZ_WIDTH
)
191 else if (rate
->flags
& IEEE80211_TX_RC_40_MHZ_WIDTH
)
193 } else if (rate
->flags
& IEEE80211_TX_RC_MCS
) {
194 rate_idx
= rate
->idx
;
195 nss
= 1 + (rate
->idx
>> 3);
196 phy
= MT_PHY_TYPE_HT
;
197 if (rate
->flags
& IEEE80211_TX_RC_GREEN_FIELD
)
198 phy
= MT_PHY_TYPE_HT_GF
;
199 if (rate
->flags
& IEEE80211_TX_RC_40_MHZ_WIDTH
)
202 const struct ieee80211_rate
*r
;
203 int band
= dev
->mt76
.chandef
.chan
->band
;
206 r
= &dev
->mt76
.hw
->wiphy
->bands
[band
]->bitrates
[rate
->idx
];
207 if (rate
->flags
& IEEE80211_TX_RC_USE_SHORT_PREAMBLE
)
208 val
= r
->hw_value_short
;
213 rate_idx
= val
& 0xff;
217 rateval
= FIELD_PREP(MT_RXWI_RATE_INDEX
, rate_idx
);
218 rateval
|= FIELD_PREP(MT_RXWI_RATE_PHY
, phy
);
219 rateval
|= FIELD_PREP(MT_RXWI_RATE_BW
, bw
);
220 if (rate
->flags
& IEEE80211_TX_RC_SHORT_GI
)
221 rateval
|= MT_RXWI_RATE_SGI
;
224 return cpu_to_le16(rateval
);
227 void mt76x02_mac_wcid_set_rate(struct mt76x02_dev
*dev
, struct mt76_wcid
*wcid
,
228 const struct ieee80211_tx_rate
*rate
)
230 s8 max_txpwr_adj
= mt76x02_tx_get_max_txpwr_adj(dev
, rate
);
235 rateval
= mt76x02_mac_tx_rate_val(dev
, rate
, &nss
);
236 tx_info
= FIELD_PREP(MT_WCID_TX_INFO_RATE
, rateval
) |
237 FIELD_PREP(MT_WCID_TX_INFO_NSS
, nss
) |
238 FIELD_PREP(MT_WCID_TX_INFO_TXPWR_ADJ
, max_txpwr_adj
) |
240 wcid
->tx_info
= tx_info
;
243 void mt76x02_mac_set_short_preamble(struct mt76x02_dev
*dev
, bool enable
)
246 mt76_set(dev
, MT_AUTO_RSP_CFG
, MT_AUTO_RSP_PREAMB_SHORT
);
248 mt76_clear(dev
, MT_AUTO_RSP_CFG
, MT_AUTO_RSP_PREAMB_SHORT
);
251 bool mt76x02_mac_load_tx_status(struct mt76x02_dev
*dev
,
252 struct mt76x02_tx_status
*stat
)
256 stat2
= mt76_rr(dev
, MT_TX_STAT_FIFO_EXT
);
257 stat1
= mt76_rr(dev
, MT_TX_STAT_FIFO
);
259 stat
->valid
= !!(stat1
& MT_TX_STAT_FIFO_VALID
);
263 stat
->success
= !!(stat1
& MT_TX_STAT_FIFO_SUCCESS
);
264 stat
->aggr
= !!(stat1
& MT_TX_STAT_FIFO_AGGR
);
265 stat
->ack_req
= !!(stat1
& MT_TX_STAT_FIFO_ACKREQ
);
266 stat
->wcid
= FIELD_GET(MT_TX_STAT_FIFO_WCID
, stat1
);
267 stat
->rate
= FIELD_GET(MT_TX_STAT_FIFO_RATE
, stat1
);
269 stat
->retry
= FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY
, stat2
);
270 stat
->pktid
= FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID
, stat2
);
272 trace_mac_txstat_fetch(dev
, stat
);
278 mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate
*txrate
, u16 rate
,
279 enum nl80211_band band
)
281 u8 idx
= FIELD_GET(MT_RXWI_RATE_INDEX
, rate
);
287 switch (FIELD_GET(MT_RXWI_RATE_PHY
, rate
)) {
288 case MT_PHY_TYPE_OFDM
:
289 if (band
== NL80211_BAND_2GHZ
)
294 case MT_PHY_TYPE_CCK
:
300 case MT_PHY_TYPE_HT_GF
:
301 txrate
->flags
|= IEEE80211_TX_RC_GREEN_FIELD
;
304 txrate
->flags
|= IEEE80211_TX_RC_MCS
;
307 case MT_PHY_TYPE_VHT
:
308 txrate
->flags
|= IEEE80211_TX_RC_VHT_MCS
;
315 switch (FIELD_GET(MT_RXWI_RATE_BW
, rate
)) {
319 txrate
->flags
|= IEEE80211_TX_RC_40_MHZ_WIDTH
;
322 txrate
->flags
|= IEEE80211_TX_RC_80_MHZ_WIDTH
;
328 if (rate
& MT_RXWI_RATE_SGI
)
329 txrate
->flags
|= IEEE80211_TX_RC_SHORT_GI
;
334 void mt76x02_mac_write_txwi(struct mt76x02_dev
*dev
, struct mt76x02_txwi
*txwi
,
335 struct sk_buff
*skb
, struct mt76_wcid
*wcid
,
336 struct ieee80211_sta
*sta
, int len
)
338 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
339 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
340 struct ieee80211_tx_rate
*rate
= &info
->control
.rates
[0];
341 struct ieee80211_key_conf
*key
= info
->control
.hw_key
;
343 u16 rate_ht_mask
= FIELD_PREP(MT_RXWI_RATE_PHY
, BIT(1) | BIT(2));
346 s8 txpwr_adj
, max_txpwr_adj
;
347 u8 ccmp_pn
[8], nstreams
= dev
->mt76
.chainmask
& 0xf;
349 memset(txwi
, 0, sizeof(*txwi
));
351 if (!info
->control
.hw_key
&& wcid
&& wcid
->hw_key_idx
!= 0xff &&
352 ieee80211_has_protected(hdr
->frame_control
)) {
354 ieee80211_get_tx_rates(info
->control
.vif
, sta
, skb
,
355 info
->control
.rates
, 1);
359 txwi
->wcid
= wcid
->idx
;
363 if (wcid
&& wcid
->sw_iv
&& key
) {
364 u64 pn
= atomic64_inc_return(&key
->tx_pn
);
367 ccmp_pn
[1] = pn
>> 8;
369 ccmp_pn
[3] = 0x20 | (key
->keyidx
<< 6);
370 ccmp_pn
[4] = pn
>> 16;
371 ccmp_pn
[5] = pn
>> 24;
372 ccmp_pn
[6] = pn
>> 32;
373 ccmp_pn
[7] = pn
>> 40;
374 txwi
->iv
= *((__le32
*)&ccmp_pn
[0]);
375 txwi
->eiv
= *((__le32
*)&ccmp_pn
[4]);
378 if (wcid
&& (rate
->idx
< 0 || !rate
->count
)) {
379 wcid_tx_info
= wcid
->tx_info
;
380 txwi
->rate
= FIELD_GET(MT_WCID_TX_INFO_RATE
, wcid_tx_info
);
381 max_txpwr_adj
= FIELD_GET(MT_WCID_TX_INFO_TXPWR_ADJ
,
383 nss
= FIELD_GET(MT_WCID_TX_INFO_NSS
, wcid_tx_info
);
385 txwi
->rate
= mt76x02_mac_tx_rate_val(dev
, rate
, &nss
);
386 max_txpwr_adj
= mt76x02_tx_get_max_txpwr_adj(dev
, rate
);
389 txpwr_adj
= mt76x02_tx_get_txpwr_adj(dev
, dev
->mt76
.txpower_conf
,
391 txwi
->ctl2
= FIELD_PREP(MT_TX_PWR_ADJ
, txpwr_adj
);
393 if (nstreams
> 1 && mt76_rev(&dev
->mt76
) >= MT76XX_REV_E4
)
394 txwi
->txstream
= 0x13;
395 else if (nstreams
> 1 && mt76_rev(&dev
->mt76
) >= MT76XX_REV_E3
&&
396 !(txwi
->rate
& cpu_to_le16(rate_ht_mask
)))
397 txwi
->txstream
= 0x93;
399 if (is_mt76x2(dev
) && (info
->flags
& IEEE80211_TX_CTL_LDPC
))
400 txwi
->rate
|= cpu_to_le16(MT_RXWI_RATE_LDPC
);
401 if ((info
->flags
& IEEE80211_TX_CTL_STBC
) && nss
== 1)
402 txwi
->rate
|= cpu_to_le16(MT_RXWI_RATE_STBC
);
403 if (nss
> 1 && sta
&& sta
->smps_mode
== IEEE80211_SMPS_DYNAMIC
)
404 txwi_flags
|= MT_TXWI_FLAGS_MMPS
;
405 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
))
406 txwi
->ack_ctl
|= MT_TXWI_ACK_CTL_REQ
;
407 if (info
->flags
& IEEE80211_TX_CTL_ASSIGN_SEQ
)
408 txwi
->ack_ctl
|= MT_TXWI_ACK_CTL_NSEQ
;
409 if ((info
->flags
& IEEE80211_TX_CTL_AMPDU
) && sta
) {
410 u8 ba_size
= IEEE80211_MIN_AMPDU_BUF
;
412 ba_size
<<= sta
->ht_cap
.ampdu_factor
;
413 ba_size
= min_t(int, 63, ba_size
- 1);
414 if (info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
)
416 txwi
->ack_ctl
|= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW
, ba_size
);
418 txwi_flags
|= MT_TXWI_FLAGS_AMPDU
|
419 FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY
,
420 sta
->ht_cap
.ampdu_density
);
423 if (ieee80211_is_probe_resp(hdr
->frame_control
) ||
424 ieee80211_is_beacon(hdr
->frame_control
))
425 txwi_flags
|= MT_TXWI_FLAGS_TS
;
427 txwi
->flags
|= cpu_to_le16(txwi_flags
);
428 txwi
->len_ctl
= cpu_to_le16(len
);
430 EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi
);
433 mt76x02_tx_rate_fallback(struct ieee80211_tx_rate
*rates
, int idx
, int phy
)
443 case MT_PHY_TYPE_VHT
:
444 mcs
= ieee80211_rate_get_vht_mcs(rates
);
445 nss
= ieee80211_rate_get_vht_nss(rates
);
448 nss
= max_t(int, nss
- 1, 1);
452 ieee80211_rate_set_vht(rates
+ 1, mcs
, nss
);
454 case MT_PHY_TYPE_HT_GF
:
456 /* MCS 8 falls back to MCS 0 */
457 if (rates
[0].idx
== 8) {
463 rates
[1].idx
= max_t(int, rates
[0].idx
- 1, 0);
469 mt76x02_mac_fill_tx_status(struct mt76x02_dev
*dev
, struct mt76x02_sta
*msta
,
470 struct ieee80211_tx_info
*info
,
471 struct mt76x02_tx_status
*st
, int n_frames
)
473 struct ieee80211_tx_rate
*rate
= info
->status
.rates
;
474 struct ieee80211_tx_rate last_rate
;
476 int retry
= st
->retry
;
483 phy
= FIELD_GET(MT_RXWI_RATE_PHY
, st
->rate
);
485 if (st
->pktid
& MT_PACKET_ID_HAS_RATE
) {
486 first_rate
= st
->rate
& ~MT_PKTID_RATE
;
487 first_rate
|= st
->pktid
& MT_PKTID_RATE
;
489 mt76x02_mac_process_tx_rate(&rate
[0], first_rate
,
490 dev
->mt76
.chandef
.chan
->band
);
491 } else if (rate
[0].idx
< 0) {
495 mt76x02_mac_process_tx_rate(&rate
[0], msta
->wcid
.tx_info
,
496 dev
->mt76
.chandef
.chan
->band
);
499 mt76x02_mac_process_tx_rate(&last_rate
, st
->rate
,
500 dev
->mt76
.chandef
.chan
->band
);
502 for (i
= 0; i
< ARRAY_SIZE(info
->status
.rates
); i
++) {
504 if (i
+ 1 == ARRAY_SIZE(info
->status
.rates
)) {
505 info
->status
.rates
[i
] = last_rate
;
506 info
->status
.rates
[i
].count
= max_t(int, retry
, 1);
510 mt76x02_tx_rate_fallback(info
->status
.rates
, i
, phy
);
511 if (info
->status
.rates
[i
].idx
== last_rate
.idx
)
515 if (i
+ 1 < ARRAY_SIZE(info
->status
.rates
)) {
516 info
->status
.rates
[i
+ 1].idx
= -1;
517 info
->status
.rates
[i
+ 1].count
= 0;
520 info
->status
.ampdu_len
= n_frames
;
521 info
->status
.ampdu_ack_len
= st
->success
? n_frames
: 0;
524 info
->flags
|= IEEE80211_TX_CTL_AMPDU
|
525 IEEE80211_TX_STAT_AMPDU
;
528 info
->flags
|= IEEE80211_TX_CTL_NO_ACK
;
529 else if (st
->success
)
530 info
->flags
|= IEEE80211_TX_STAT_ACK
;
533 void mt76x02_send_tx_status(struct mt76x02_dev
*dev
,
534 struct mt76x02_tx_status
*stat
, u8
*update
)
536 struct ieee80211_tx_info info
= {};
537 struct ieee80211_tx_status status
= {
540 static const u8 ac_to_tid
[4] = {
541 [IEEE80211_AC_BE
] = 0,
542 [IEEE80211_AC_BK
] = 1,
543 [IEEE80211_AC_VI
] = 4,
544 [IEEE80211_AC_VO
] = 6
546 struct mt76_wcid
*wcid
= NULL
;
547 struct mt76x02_sta
*msta
= NULL
;
548 struct mt76_dev
*mdev
= &dev
->mt76
;
549 struct sk_buff_head list
;
555 if (stat
->pktid
== MT_PACKET_ID_NO_ACK
)
560 if (stat
->wcid
< ARRAY_SIZE(dev
->mt76
.wcid
))
561 wcid
= rcu_dereference(dev
->mt76
.wcid
[stat
->wcid
]);
563 if (wcid
&& wcid
->sta
) {
566 priv
= msta
= container_of(wcid
, struct mt76x02_sta
, wcid
);
567 status
.sta
= container_of(priv
, struct ieee80211_sta
,
571 mt76_tx_status_lock(mdev
, &list
);
574 if (mt76_is_skb_pktid(stat
->pktid
))
575 status
.skb
= mt76_tx_status_skb_get(mdev
, wcid
,
578 status
.info
= IEEE80211_SKB_CB(status
.skb
);
581 if (!status
.skb
&& !(stat
->pktid
& MT_PACKET_ID_HAS_RATE
)) {
582 mt76_tx_status_unlock(mdev
, &list
);
587 if (msta
&& stat
->aggr
&& !status
.skb
) {
588 u32 stat_val
, stat_cache
;
590 stat_val
= stat
->rate
;
591 stat_val
|= ((u32
)stat
->retry
) << 16;
592 stat_cache
= msta
->status
.rate
;
593 stat_cache
|= ((u32
)msta
->status
.retry
) << 16;
595 if (*update
== 0 && stat_val
== stat_cache
&&
596 stat
->wcid
== msta
->status
.wcid
&& msta
->n_frames
< 32) {
598 mt76_tx_status_unlock(mdev
, &list
);
602 cur_pktid
= msta
->status
.pktid
;
603 mt76x02_mac_fill_tx_status(dev
, msta
, status
.info
,
604 &msta
->status
, msta
->n_frames
);
606 msta
->status
= *stat
;
610 cur_pktid
= stat
->pktid
;
611 mt76x02_mac_fill_tx_status(dev
, msta
, status
.info
, stat
, 1);
617 len
= status
.skb
->len
;
618 ac
= skb_get_queue_mapping(status
.skb
);
619 mt76_tx_status_skb_done(mdev
, status
.skb
, &list
);
621 len
= status
.info
->status
.ampdu_len
* ewma_pktlen_read(&msta
->pktlen
);
622 ac
= FIELD_GET(MT_PKTID_AC
, cur_pktid
);
625 mt76_tx_status_unlock(mdev
, &list
);
628 ieee80211_tx_status_ext(mt76_hw(dev
), &status
);
633 duration
= mt76_calc_tx_airtime(&dev
->mt76
, &info
, len
);
635 spin_lock_bh(&dev
->mt76
.cc_lock
);
636 dev
->tx_airtime
+= duration
;
637 spin_unlock_bh(&dev
->mt76
.cc_lock
);
640 ieee80211_sta_register_airtime(status
.sta
, ac_to_tid
[ac
], duration
, 0);
647 mt76x02_mac_process_rate(struct mt76x02_dev
*dev
,
648 struct mt76_rx_status
*status
,
651 u8 idx
= FIELD_GET(MT_RXWI_RATE_INDEX
, rate
);
653 switch (FIELD_GET(MT_RXWI_RATE_PHY
, rate
)) {
654 case MT_PHY_TYPE_OFDM
:
658 if (status
->band
== NL80211_BAND_2GHZ
)
661 status
->rate_idx
= idx
;
663 case MT_PHY_TYPE_CCK
:
666 status
->enc_flags
|= RX_ENC_FLAG_SHORTPRE
;
672 status
->rate_idx
= idx
;
674 case MT_PHY_TYPE_HT_GF
:
675 status
->enc_flags
|= RX_ENC_FLAG_HT_GF
;
678 status
->encoding
= RX_ENC_HT
;
679 status
->rate_idx
= idx
;
681 case MT_PHY_TYPE_VHT
: {
682 u8 n_rxstream
= dev
->mt76
.chainmask
& 0xf;
684 status
->encoding
= RX_ENC_VHT
;
685 status
->rate_idx
= FIELD_GET(MT_RATE_INDEX_VHT_IDX
, idx
);
686 status
->nss
= min_t(u8
, n_rxstream
,
687 FIELD_GET(MT_RATE_INDEX_VHT_NSS
, idx
) + 1);
694 if (rate
& MT_RXWI_RATE_LDPC
)
695 status
->enc_flags
|= RX_ENC_FLAG_LDPC
;
697 if (rate
& MT_RXWI_RATE_SGI
)
698 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
700 if (rate
& MT_RXWI_RATE_STBC
)
701 status
->enc_flags
|= 1 << RX_ENC_FLAG_STBC_SHIFT
;
703 switch (FIELD_GET(MT_RXWI_RATE_BW
, rate
)) {
707 status
->bw
= RATE_INFO_BW_40
;
710 status
->bw
= RATE_INFO_BW_80
;
719 void mt76x02_mac_setaddr(struct mt76x02_dev
*dev
, const u8
*addr
)
721 static const u8 null_addr
[ETH_ALEN
] = {};
724 ether_addr_copy(dev
->mt76
.macaddr
, addr
);
726 if (!is_valid_ether_addr(dev
->mt76
.macaddr
)) {
727 eth_random_addr(dev
->mt76
.macaddr
);
728 dev_info(dev
->mt76
.dev
,
729 "Invalid MAC address, using random address %pM\n",
733 mt76_wr(dev
, MT_MAC_ADDR_DW0
, get_unaligned_le32(dev
->mt76
.macaddr
));
734 mt76_wr(dev
, MT_MAC_ADDR_DW1
,
735 get_unaligned_le16(dev
->mt76
.macaddr
+ 4) |
736 FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK
, 0xff));
738 mt76_wr(dev
, MT_MAC_BSSID_DW0
,
739 get_unaligned_le32(dev
->mt76
.macaddr
));
740 mt76_wr(dev
, MT_MAC_BSSID_DW1
,
741 get_unaligned_le16(dev
->mt76
.macaddr
+ 4) |
742 FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE
, 3) | /* 8 APs + 8 STAs */
743 MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT
);
745 for (i
= 0; i
< 16; i
++)
746 mt76x02_mac_set_bssid(dev
, i
, null_addr
);
748 EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr
);
751 mt76x02_mac_get_rssi(struct mt76x02_dev
*dev
, s8 rssi
, int chain
)
753 struct mt76x02_rx_freq_cal
*cal
= &dev
->cal
.rx
;
755 rssi
+= cal
->rssi_offset
[chain
];
756 rssi
-= cal
->lna_gain
;
761 int mt76x02_mac_process_rx(struct mt76x02_dev
*dev
, struct sk_buff
*skb
,
764 struct mt76_rx_status
*status
= (struct mt76_rx_status
*)skb
->cb
;
765 struct mt76x02_rxwi
*rxwi
= rxi
;
766 struct mt76x02_sta
*sta
;
767 u32 rxinfo
= le32_to_cpu(rxwi
->rxinfo
);
768 u32 ctl
= le32_to_cpu(rxwi
->ctl
);
769 u16 rate
= le16_to_cpu(rxwi
->rate
);
770 u16 tid_sn
= le16_to_cpu(rxwi
->tid_sn
);
771 bool unicast
= rxwi
->rxinfo
& cpu_to_le32(MT_RXINFO_UNICAST
);
772 int pad_len
= 0, nstreams
= dev
->mt76
.chainmask
& 0xf;
778 if (!test_bit(MT76_STATE_RUNNING
, &dev
->mt76
.state
))
781 if (rxinfo
& MT_RXINFO_L2PAD
)
784 if (rxinfo
& MT_RXINFO_DECRYPT
) {
785 status
->flag
|= RX_FLAG_DECRYPTED
;
786 status
->flag
|= RX_FLAG_MMIC_STRIPPED
;
787 status
->flag
|= RX_FLAG_MIC_STRIPPED
;
788 status
->flag
|= RX_FLAG_IV_STRIPPED
;
791 wcid
= FIELD_GET(MT_RXWI_CTL_WCID
, ctl
);
792 sta
= mt76x02_rx_get_sta(&dev
->mt76
, wcid
);
793 status
->wcid
= mt76x02_rx_get_sta_wcid(sta
, unicast
);
795 len
= FIELD_GET(MT_RXWI_CTL_MPDU_LEN
, ctl
);
796 pn_len
= FIELD_GET(MT_RXINFO_PN_LEN
, rxinfo
);
798 int offset
= ieee80211_get_hdrlen_from_skb(skb
) + pad_len
;
799 u8
*data
= skb
->data
+ offset
;
801 status
->iv
[0] = data
[7];
802 status
->iv
[1] = data
[6];
803 status
->iv
[2] = data
[5];
804 status
->iv
[3] = data
[4];
805 status
->iv
[4] = data
[1];
806 status
->iv
[5] = data
[0];
809 * Driver CCMP validation can't deal with fragments.
810 * Let mac80211 take care of it.
812 if (rxinfo
& MT_RXINFO_FRAG
) {
813 status
->flag
&= ~RX_FLAG_IV_STRIPPED
;
815 pad_len
+= pn_len
<< 2;
820 mt76x02_remove_hdr_pad(skb
, pad_len
);
822 if ((rxinfo
& MT_RXINFO_BA
) && !(rxinfo
& MT_RXINFO_NULL
))
825 if (rxinfo
& MT_RXINFO_AMPDU
) {
826 status
->flag
|= RX_FLAG_AMPDU_DETAILS
;
827 status
->ampdu_ref
= dev
->mt76
.ampdu_ref
;
830 * When receiving an A-MPDU subframe and RSSI info is not valid,
831 * we can assume that more subframes belonging to the same A-MPDU
832 * are coming. The last one will have valid RSSI info
834 if (rxinfo
& MT_RXINFO_RSSI
) {
835 if (!++dev
->mt76
.ampdu_ref
)
836 dev
->mt76
.ampdu_ref
++;
840 if (WARN_ON_ONCE(len
> skb
->len
))
845 status
->chains
= BIT(0);
846 signal
= mt76x02_mac_get_rssi(dev
, rxwi
->rssi
[0], 0);
847 status
->chain_signal
[0] = signal
;
849 status
->chains
|= BIT(1);
850 status
->chain_signal
[1] = mt76x02_mac_get_rssi(dev
,
853 signal
= max_t(s8
, signal
, status
->chain_signal
[1]);
855 status
->signal
= signal
;
856 status
->freq
= dev
->mt76
.chandef
.chan
->center_freq
;
857 status
->band
= dev
->mt76
.chandef
.chan
->band
;
859 status
->tid
= FIELD_GET(MT_RXWI_TID
, tid_sn
);
860 status
->seqno
= FIELD_GET(MT_RXWI_SN
, tid_sn
);
862 return mt76x02_mac_process_rate(dev
, status
, rate
);
865 void mt76x02_mac_poll_tx_status(struct mt76x02_dev
*dev
, bool irq
)
867 struct mt76x02_tx_status stat
= {};
871 if (!test_bit(MT76_STATE_RUNNING
, &dev
->mt76
.state
))
874 trace_mac_txstat_poll(dev
);
876 while (!irq
|| !kfifo_is_full(&dev
->txstatus_fifo
)) {
877 if (!spin_trylock(&dev
->txstatus_fifo_lock
))
880 ret
= mt76x02_mac_load_tx_status(dev
, &stat
);
881 spin_unlock(&dev
->txstatus_fifo_lock
);
887 mt76x02_send_tx_status(dev
, &stat
, &update
);
891 kfifo_put(&dev
->txstatus_fifo
, stat
);
895 void mt76x02_tx_complete_skb(struct mt76_dev
*mdev
, enum mt76_txq_id qid
,
896 struct mt76_queue_entry
*e
)
898 struct mt76x02_dev
*dev
= container_of(mdev
, struct mt76x02_dev
, mt76
);
899 struct mt76x02_txwi
*txwi
;
903 dev_kfree_skb_any(e
->skb
);
907 mt76x02_mac_poll_tx_status(dev
, false);
909 txwi_ptr
= mt76_get_txwi_ptr(mdev
, e
->txwi
);
910 txwi
= (struct mt76x02_txwi
*)txwi_ptr
;
911 trace_mac_txdone_add(dev
, txwi
->wcid
, txwi
->pktid
);
913 mt76_tx_complete_skb(mdev
, e
->skb
);
915 EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb
);
917 void mt76x02_mac_set_rts_thresh(struct mt76x02_dev
*dev
, u32 val
)
922 data
= FIELD_PREP(MT_PROT_CFG_CTRL
, 1) |
923 MT_PROT_CFG_RTS_THRESH
;
925 mt76_rmw_field(dev
, MT_TX_RTS_CFG
, MT_TX_RTS_CFG_THRESH
, val
);
927 mt76_rmw(dev
, MT_CCK_PROT_CFG
,
928 MT_PROT_CFG_CTRL
| MT_PROT_CFG_RTS_THRESH
, data
);
929 mt76_rmw(dev
, MT_OFDM_PROT_CFG
,
930 MT_PROT_CFG_CTRL
| MT_PROT_CFG_RTS_THRESH
, data
);
933 void mt76x02_mac_set_tx_protection(struct mt76x02_dev
*dev
, bool legacy_prot
,
936 int mode
= ht_mode
& IEEE80211_HT_OP_MODE_PROTECTION
;
937 bool non_gf
= !!(ht_mode
& IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT
);
943 for (i
= 0; i
< ARRAY_SIZE(prot
); i
++) {
944 prot
[i
] = mt76_rr(dev
, MT_CCK_PROT_CFG
+ i
* 4);
945 prot
[i
] &= ~MT_PROT_CFG_CTRL
;
947 prot
[i
] &= ~MT_PROT_CFG_RATE
;
950 for (i
= 0; i
< ARRAY_SIZE(vht_prot
); i
++) {
951 vht_prot
[i
] = mt76_rr(dev
, MT_TX_PROT_CFG6
+ i
* 4);
952 vht_prot
[i
] &= ~(MT_PROT_CFG_CTRL
| MT_PROT_CFG_RATE
);
955 rts_thr
= mt76_get_field(dev
, MT_TX_RTS_CFG
, MT_TX_RTS_CFG_THRESH
);
957 if (rts_thr
!= 0xffff)
958 prot
[0] |= MT_PROT_CTRL_RTS_CTS
;
961 prot
[1] |= MT_PROT_CTRL_CTS2SELF
;
963 prot
[2] |= MT_PROT_RATE_CCK_11
;
964 prot
[3] |= MT_PROT_RATE_CCK_11
;
965 prot
[4] |= MT_PROT_RATE_CCK_11
;
966 prot
[5] |= MT_PROT_RATE_CCK_11
;
968 vht_prot
[0] |= MT_PROT_RATE_CCK_11
;
969 vht_prot
[1] |= MT_PROT_RATE_CCK_11
;
970 vht_prot
[2] |= MT_PROT_RATE_CCK_11
;
972 if (rts_thr
!= 0xffff)
973 prot
[1] |= MT_PROT_CTRL_RTS_CTS
;
975 prot
[2] |= MT_PROT_RATE_OFDM_24
;
976 prot
[3] |= MT_PROT_RATE_DUP_OFDM_24
;
977 prot
[4] |= MT_PROT_RATE_OFDM_24
;
978 prot
[5] |= MT_PROT_RATE_DUP_OFDM_24
;
980 vht_prot
[0] |= MT_PROT_RATE_OFDM_24
;
981 vht_prot
[1] |= MT_PROT_RATE_DUP_OFDM_24
;
982 vht_prot
[2] |= MT_PROT_RATE_SGI_OFDM_24
;
986 case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER
:
987 case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED
:
988 prot
[2] |= MT_PROT_CTRL_RTS_CTS
;
989 prot
[3] |= MT_PROT_CTRL_RTS_CTS
;
990 prot
[4] |= MT_PROT_CTRL_RTS_CTS
;
991 prot
[5] |= MT_PROT_CTRL_RTS_CTS
;
992 vht_prot
[0] |= MT_PROT_CTRL_RTS_CTS
;
993 vht_prot
[1] |= MT_PROT_CTRL_RTS_CTS
;
994 vht_prot
[2] |= MT_PROT_CTRL_RTS_CTS
;
996 case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ
:
997 prot
[3] |= MT_PROT_CTRL_RTS_CTS
;
998 prot
[5] |= MT_PROT_CTRL_RTS_CTS
;
999 vht_prot
[1] |= MT_PROT_CTRL_RTS_CTS
;
1000 vht_prot
[2] |= MT_PROT_CTRL_RTS_CTS
;
1005 prot
[4] |= MT_PROT_CTRL_RTS_CTS
;
1006 prot
[5] |= MT_PROT_CTRL_RTS_CTS
;
1009 for (i
= 0; i
< ARRAY_SIZE(prot
); i
++)
1010 mt76_wr(dev
, MT_CCK_PROT_CFG
+ i
* 4, prot
[i
]);
1012 for (i
= 0; i
< ARRAY_SIZE(vht_prot
); i
++)
1013 mt76_wr(dev
, MT_TX_PROT_CFG6
+ i
* 4, vht_prot
[i
]);
1016 void mt76x02_update_channel(struct mt76_dev
*mdev
)
1018 struct mt76x02_dev
*dev
= container_of(mdev
, struct mt76x02_dev
, mt76
);
1019 struct mt76_channel_state
*state
;
1021 state
= mdev
->chan_state
;
1022 state
->cc_busy
+= mt76_rr(dev
, MT_CH_BUSY
);
1024 spin_lock_bh(&dev
->mt76
.cc_lock
);
1025 state
->cc_tx
+= dev
->tx_airtime
;
1026 dev
->tx_airtime
= 0;
1027 spin_unlock_bh(&dev
->mt76
.cc_lock
);
1029 EXPORT_SYMBOL_GPL(mt76x02_update_channel
);
1031 static void mt76x02_check_mac_err(struct mt76x02_dev
*dev
)
1033 u32 val
= mt76_rr(dev
, 0x10f4);
1035 if (!(val
& BIT(29)) || !(val
& (BIT(7) | BIT(5))))
1038 dev_err(dev
->mt76
.dev
, "mac specific condition occurred\n");
1040 mt76_set(dev
, MT_MAC_SYS_CTRL
, MT_MAC_SYS_CTRL_RESET_CSR
);
1042 mt76_wr(dev
, MT_MAC_SYS_CTRL
,
1043 MT_MAC_SYS_CTRL_ENABLE_TX
| MT_MAC_SYS_CTRL_ENABLE_RX
);
1047 mt76x02_edcca_tx_enable(struct mt76x02_dev
*dev
, bool enable
)
1052 mt76_set(dev
, MT_MAC_SYS_CTRL
, MT_MAC_SYS_CTRL_ENABLE_TX
);
1053 mt76_set(dev
, MT_AUTO_RSP_CFG
, MT_AUTO_RSP_EN
);
1055 data
= mt76_rr(dev
, MT_TX_PIN_CFG
);
1056 data
|= MT_TX_PIN_CFG_TXANT
|
1057 MT_TX_PIN_CFG_RXANT
|
1060 mt76_wr(dev
, MT_TX_PIN_CFG
, data
);
1062 mt76_clear(dev
, MT_MAC_SYS_CTRL
, MT_MAC_SYS_CTRL_ENABLE_TX
);
1063 mt76_clear(dev
, MT_AUTO_RSP_CFG
, MT_AUTO_RSP_EN
);
1064 /* disable pa-lna */
1065 mt76_clear(dev
, MT_TX_PIN_CFG
, MT_TX_PIN_CFG_TXANT
);
1066 mt76_clear(dev
, MT_TX_PIN_CFG
, MT_TX_PIN_CFG_RXANT
);
1068 dev
->ed_tx_blocked
= !enable
;
1071 void mt76x02_edcca_init(struct mt76x02_dev
*dev
)
1073 dev
->ed_trigger
= 0;
1076 if (dev
->ed_monitor
) {
1077 struct ieee80211_channel
*chan
= dev
->mt76
.chandef
.chan
;
1078 u8 ed_th
= chan
->band
== NL80211_BAND_5GHZ
? 0x0e : 0x20;
1080 mt76_clear(dev
, MT_TX_LINK_CFG
, MT_TX_CFACK_EN
);
1081 mt76_set(dev
, MT_TXOP_CTRL_CFG
, MT_TXOP_ED_CCA_EN
);
1082 mt76_rmw(dev
, MT_BBP(AGC
, 2), GENMASK(15, 0),
1083 ed_th
<< 8 | ed_th
);
1084 mt76_set(dev
, MT_TXOP_HLDR_ET
, MT_TXOP_HLDR_TX40M_BLK_EN
);
1086 mt76_set(dev
, MT_TX_LINK_CFG
, MT_TX_CFACK_EN
);
1087 mt76_clear(dev
, MT_TXOP_CTRL_CFG
, MT_TXOP_ED_CCA_EN
);
1088 if (is_mt76x2(dev
)) {
1089 mt76_wr(dev
, MT_BBP(AGC
, 2), 0x00007070);
1090 mt76_set(dev
, MT_TXOP_HLDR_ET
,
1091 MT_TXOP_HLDR_TX40M_BLK_EN
);
1093 mt76_wr(dev
, MT_BBP(AGC
, 2), 0x003a6464);
1094 mt76_clear(dev
, MT_TXOP_HLDR_ET
,
1095 MT_TXOP_HLDR_TX40M_BLK_EN
);
1098 mt76x02_edcca_tx_enable(dev
, true);
1099 dev
->ed_monitor_learning
= true;
1101 /* clear previous CCA timer value */
1102 mt76_rr(dev
, MT_ED_CCA_TIMER
);
1103 dev
->ed_time
= ktime_get_boottime();
1105 EXPORT_SYMBOL_GPL(mt76x02_edcca_init
);
1107 #define MT_EDCCA_TH 92
1108 #define MT_EDCCA_BLOCK_TH 2
1109 #define MT_EDCCA_LEARN_TH 50
1110 #define MT_EDCCA_LEARN_CCA 180
1111 #define MT_EDCCA_LEARN_TIMEOUT (20 * HZ)
1113 static void mt76x02_edcca_check(struct mt76x02_dev
*dev
)
1116 u32 active
, val
, busy
;
1118 cur_time
= ktime_get_boottime();
1119 val
= mt76_rr(dev
, MT_ED_CCA_TIMER
);
1121 active
= ktime_to_us(ktime_sub(cur_time
, dev
->ed_time
));
1122 dev
->ed_time
= cur_time
;
1124 busy
= (val
* 100) / active
;
1125 busy
= min_t(u32
, busy
, 100);
1127 if (busy
> MT_EDCCA_TH
) {
1132 dev
->ed_trigger
= 0;
1135 if (dev
->cal
.agc_lowest_gain
&&
1136 dev
->cal
.false_cca
> MT_EDCCA_LEARN_CCA
&&
1137 dev
->ed_trigger
> MT_EDCCA_LEARN_TH
) {
1138 dev
->ed_monitor_learning
= false;
1139 dev
->ed_trigger_timeout
= jiffies
+ 20 * HZ
;
1140 } else if (!dev
->ed_monitor_learning
&&
1141 time_is_after_jiffies(dev
->ed_trigger_timeout
)) {
1142 dev
->ed_monitor_learning
= true;
1143 mt76x02_edcca_tx_enable(dev
, true);
1146 if (dev
->ed_monitor_learning
)
1149 if (dev
->ed_trigger
> MT_EDCCA_BLOCK_TH
&& !dev
->ed_tx_blocked
)
1150 mt76x02_edcca_tx_enable(dev
, false);
1151 else if (dev
->ed_silent
> MT_EDCCA_BLOCK_TH
&& dev
->ed_tx_blocked
)
1152 mt76x02_edcca_tx_enable(dev
, true);
1155 void mt76x02_mac_work(struct work_struct
*work
)
1157 struct mt76x02_dev
*dev
= container_of(work
, struct mt76x02_dev
,
1158 mt76
.mac_work
.work
);
1161 mutex_lock(&dev
->mt76
.mutex
);
1163 mt76_update_survey(&dev
->mt76
);
1164 for (i
= 0, idx
= 0; i
< 16; i
++) {
1165 u32 val
= mt76_rr(dev
, MT_TX_AGG_CNT(i
));
1167 dev
->mt76
.aggr_stats
[idx
++] += val
& 0xffff;
1168 dev
->mt76
.aggr_stats
[idx
++] += val
>> 16;
1171 if (!dev
->mt76
.beacon_mask
)
1172 mt76x02_check_mac_err(dev
);
1174 if (dev
->ed_monitor
)
1175 mt76x02_edcca_check(dev
);
1177 mutex_unlock(&dev
->mt76
.mutex
);
1179 mt76_tx_status_check(&dev
->mt76
, NULL
, false);
1181 ieee80211_queue_delayed_work(mt76_hw(dev
), &dev
->mt76
.mac_work
,
1182 MT_MAC_WORK_INTERVAL
);
1185 void mt76x02_mac_cc_reset(struct mt76x02_dev
*dev
)
1187 dev
->mt76
.survey_time
= ktime_get_boottime();
1189 mt76_wr(dev
, MT_CH_TIME_CFG
,
1190 MT_CH_TIME_CFG_TIMER_EN
|
1191 MT_CH_TIME_CFG_TX_AS_BUSY
|
1192 MT_CH_TIME_CFG_RX_AS_BUSY
|
1193 MT_CH_TIME_CFG_NAV_AS_BUSY
|
1194 MT_CH_TIME_CFG_EIFS_AS_BUSY
|
1196 FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR
, 1));
1198 /* channel cycle counters read-and-clear */
1199 mt76_rr(dev
, MT_CH_BUSY
);
1200 mt76_rr(dev
, MT_CH_IDLE
);
1202 EXPORT_SYMBOL_GPL(mt76x02_mac_cc_reset
);
1204 void mt76x02_mac_set_bssid(struct mt76x02_dev
*dev
, u8 idx
, const u8
*addr
)
1207 mt76_wr(dev
, MT_MAC_APC_BSSID_L(idx
), get_unaligned_le32(addr
));
1208 mt76_rmw_field(dev
, MT_MAC_APC_BSSID_H(idx
), MT_MAC_APC_BSSID_H_ADDR
,
1209 get_unaligned_le16(addr
+ 4));