1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
8 #include "mt76x02_trace.h"
11 void mt76x02_mac_reset_counters(struct mt76x02_dev
*dev
)
15 mt76_rr(dev
, MT_RX_STAT_0
);
16 mt76_rr(dev
, MT_RX_STAT_1
);
17 mt76_rr(dev
, MT_RX_STAT_2
);
18 mt76_rr(dev
, MT_TX_STA_0
);
19 mt76_rr(dev
, MT_TX_STA_1
);
20 mt76_rr(dev
, MT_TX_STA_2
);
22 for (i
= 0; i
< 16; i
++)
23 mt76_rr(dev
, MT_TX_AGG_CNT(i
));
25 for (i
= 0; i
< 16; i
++)
26 mt76_rr(dev
, MT_TX_STAT_FIFO
);
28 memset(dev
->mt76
.aggr_stats
, 0, sizeof(dev
->mt76
.aggr_stats
));
30 EXPORT_SYMBOL_GPL(mt76x02_mac_reset_counters
);
32 static enum mt76x02_cipher_type
33 mt76x02_mac_get_key_info(struct ieee80211_key_conf
*key
, u8
*key_data
)
35 memset(key_data
, 0, 32);
37 return MT_CIPHER_NONE
;
40 return MT_CIPHER_NONE
;
42 memcpy(key_data
, key
->key
, key
->keylen
);
44 switch (key
->cipher
) {
45 case WLAN_CIPHER_SUITE_WEP40
:
46 return MT_CIPHER_WEP40
;
47 case WLAN_CIPHER_SUITE_WEP104
:
48 return MT_CIPHER_WEP104
;
49 case WLAN_CIPHER_SUITE_TKIP
:
50 return MT_CIPHER_TKIP
;
51 case WLAN_CIPHER_SUITE_CCMP
:
52 return MT_CIPHER_AES_CCMP
;
54 return MT_CIPHER_NONE
;
58 int mt76x02_mac_shared_key_setup(struct mt76x02_dev
*dev
, u8 vif_idx
,
59 u8 key_idx
, struct ieee80211_key_conf
*key
)
61 enum mt76x02_cipher_type cipher
;
65 cipher
= mt76x02_mac_get_key_info(key
, key_data
);
66 if (cipher
== MT_CIPHER_NONE
&& key
)
69 val
= mt76_rr(dev
, MT_SKEY_MODE(vif_idx
));
70 val
&= ~(MT_SKEY_MODE_MASK
<< MT_SKEY_MODE_SHIFT(vif_idx
, key_idx
));
71 val
|= cipher
<< MT_SKEY_MODE_SHIFT(vif_idx
, key_idx
);
72 mt76_wr(dev
, MT_SKEY_MODE(vif_idx
), val
);
74 mt76_wr_copy(dev
, MT_SKEY(vif_idx
, key_idx
), key_data
,
79 EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup
);
81 void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev
*dev
, u8 idx
,
82 struct ieee80211_key_conf
*key
)
84 enum mt76x02_cipher_type cipher
;
89 cipher
= mt76x02_mac_get_key_info(key
, key_data
);
90 iv
= mt76_rr(dev
, MT_WCID_IV(idx
));
91 eiv
= mt76_rr(dev
, MT_WCID_IV(idx
) + 4);
94 if (cipher
== MT_CIPHER_TKIP
) {
95 pn
|= (iv
>> 16) & 0xff;
96 pn
|= (iv
& 0xff) << 8;
97 } else if (cipher
>= MT_CIPHER_AES_CCMP
) {
103 atomic64_set(&key
->tx_pn
, pn
);
106 int mt76x02_mac_wcid_set_key(struct mt76x02_dev
*dev
, u8 idx
,
107 struct ieee80211_key_conf
*key
)
109 enum mt76x02_cipher_type cipher
;
114 cipher
= mt76x02_mac_get_key_info(key
, key_data
);
115 if (cipher
== MT_CIPHER_NONE
&& key
)
118 mt76_wr_copy(dev
, MT_WCID_KEY(idx
), key_data
, sizeof(key_data
));
119 mt76_rmw_field(dev
, MT_WCID_ATTR(idx
), MT_WCID_ATTR_PKEY_MODE
, cipher
);
121 memset(iv_data
, 0, sizeof(iv_data
));
123 mt76_rmw_field(dev
, MT_WCID_ATTR(idx
), MT_WCID_ATTR_PAIRWISE
,
124 !!(key
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
));
126 pn
= atomic64_read(&key
->tx_pn
);
128 iv_data
[3] = key
->keyidx
<< 6;
129 if (cipher
>= MT_CIPHER_TKIP
) {
131 put_unaligned_le32(pn
>> 16, &iv_data
[4]);
134 if (cipher
== MT_CIPHER_TKIP
) {
135 iv_data
[0] = (pn
>> 8) & 0xff;
136 iv_data
[1] = (iv_data
[0] | 0x20) & 0x7f;
137 iv_data
[2] = pn
& 0xff;
138 } else if (cipher
>= MT_CIPHER_AES_CCMP
) {
139 put_unaligned_le16((pn
& 0xffff), &iv_data
[0]);
143 mt76_wr_copy(dev
, MT_WCID_IV(idx
), iv_data
, sizeof(iv_data
));
148 void mt76x02_mac_wcid_setup(struct mt76x02_dev
*dev
, u8 idx
,
151 struct mt76_wcid_addr addr
= {};
154 attr
= FIELD_PREP(MT_WCID_ATTR_BSS_IDX
, vif_idx
& 7) |
155 FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT
, !!(vif_idx
& 8));
157 mt76_wr(dev
, MT_WCID_ATTR(idx
), attr
);
163 memcpy(addr
.macaddr
, mac
, ETH_ALEN
);
165 mt76_wr_copy(dev
, MT_WCID_ADDR(idx
), &addr
, sizeof(addr
));
167 EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup
);
169 void mt76x02_mac_wcid_set_drop(struct mt76x02_dev
*dev
, u8 idx
, bool drop
)
171 u32 val
= mt76_rr(dev
, MT_WCID_DROP(idx
));
172 u32 bit
= MT_WCID_DROP_MASK(idx
);
174 /* prevent unnecessary writes */
175 if ((val
& bit
) != (bit
* drop
))
176 mt76_wr(dev
, MT_WCID_DROP(idx
), (val
& ~bit
) | (bit
* drop
));
180 mt76x02_mac_tx_rate_val(struct mt76x02_dev
*dev
,
181 const struct ieee80211_tx_rate
*rate
, u8
*nss_val
)
183 u8 phy
, rate_idx
, nss
, bw
= 0;
186 if (rate
->flags
& IEEE80211_TX_RC_VHT_MCS
) {
187 rate_idx
= rate
->idx
;
188 nss
= 1 + (rate
->idx
>> 4);
189 phy
= MT_PHY_TYPE_VHT
;
190 if (rate
->flags
& IEEE80211_TX_RC_80_MHZ_WIDTH
)
192 else if (rate
->flags
& IEEE80211_TX_RC_40_MHZ_WIDTH
)
194 } else if (rate
->flags
& IEEE80211_TX_RC_MCS
) {
195 rate_idx
= rate
->idx
;
196 nss
= 1 + (rate
->idx
>> 3);
197 phy
= MT_PHY_TYPE_HT
;
198 if (rate
->flags
& IEEE80211_TX_RC_GREEN_FIELD
)
199 phy
= MT_PHY_TYPE_HT_GF
;
200 if (rate
->flags
& IEEE80211_TX_RC_40_MHZ_WIDTH
)
203 const struct ieee80211_rate
*r
;
204 int band
= dev
->mphy
.chandef
.chan
->band
;
207 r
= &dev
->mt76
.hw
->wiphy
->bands
[band
]->bitrates
[rate
->idx
];
208 if (rate
->flags
& IEEE80211_TX_RC_USE_SHORT_PREAMBLE
)
209 val
= r
->hw_value_short
;
214 rate_idx
= val
& 0xff;
218 rateval
= FIELD_PREP(MT_RXWI_RATE_INDEX
, rate_idx
);
219 rateval
|= FIELD_PREP(MT_RXWI_RATE_PHY
, phy
);
220 rateval
|= FIELD_PREP(MT_RXWI_RATE_BW
, bw
);
221 if (rate
->flags
& IEEE80211_TX_RC_SHORT_GI
)
222 rateval
|= MT_RXWI_RATE_SGI
;
225 return cpu_to_le16(rateval
);
228 void mt76x02_mac_wcid_set_rate(struct mt76x02_dev
*dev
, struct mt76_wcid
*wcid
,
229 const struct ieee80211_tx_rate
*rate
)
231 s8 max_txpwr_adj
= mt76x02_tx_get_max_txpwr_adj(dev
, rate
);
236 rateval
= mt76x02_mac_tx_rate_val(dev
, rate
, &nss
);
237 tx_info
= FIELD_PREP(MT_WCID_TX_INFO_RATE
, rateval
) |
238 FIELD_PREP(MT_WCID_TX_INFO_NSS
, nss
) |
239 FIELD_PREP(MT_WCID_TX_INFO_TXPWR_ADJ
, max_txpwr_adj
) |
241 wcid
->tx_info
= tx_info
;
244 void mt76x02_mac_set_short_preamble(struct mt76x02_dev
*dev
, bool enable
)
247 mt76_set(dev
, MT_AUTO_RSP_CFG
, MT_AUTO_RSP_PREAMB_SHORT
);
249 mt76_clear(dev
, MT_AUTO_RSP_CFG
, MT_AUTO_RSP_PREAMB_SHORT
);
252 bool mt76x02_mac_load_tx_status(struct mt76x02_dev
*dev
,
253 struct mt76x02_tx_status
*stat
)
257 stat2
= mt76_rr(dev
, MT_TX_STAT_FIFO_EXT
);
258 stat1
= mt76_rr(dev
, MT_TX_STAT_FIFO
);
260 stat
->valid
= !!(stat1
& MT_TX_STAT_FIFO_VALID
);
264 stat
->success
= !!(stat1
& MT_TX_STAT_FIFO_SUCCESS
);
265 stat
->aggr
= !!(stat1
& MT_TX_STAT_FIFO_AGGR
);
266 stat
->ack_req
= !!(stat1
& MT_TX_STAT_FIFO_ACKREQ
);
267 stat
->wcid
= FIELD_GET(MT_TX_STAT_FIFO_WCID
, stat1
);
268 stat
->rate
= FIELD_GET(MT_TX_STAT_FIFO_RATE
, stat1
);
270 stat
->retry
= FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY
, stat2
);
271 stat
->pktid
= FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID
, stat2
);
273 trace_mac_txstat_fetch(dev
, stat
);
279 mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate
*txrate
, u16 rate
,
280 enum nl80211_band band
)
282 u8 idx
= FIELD_GET(MT_RXWI_RATE_INDEX
, rate
);
288 switch (FIELD_GET(MT_RXWI_RATE_PHY
, rate
)) {
289 case MT_PHY_TYPE_OFDM
:
290 if (band
== NL80211_BAND_2GHZ
)
295 case MT_PHY_TYPE_CCK
:
301 case MT_PHY_TYPE_HT_GF
:
302 txrate
->flags
|= IEEE80211_TX_RC_GREEN_FIELD
;
305 txrate
->flags
|= IEEE80211_TX_RC_MCS
;
308 case MT_PHY_TYPE_VHT
:
309 txrate
->flags
|= IEEE80211_TX_RC_VHT_MCS
;
316 switch (FIELD_GET(MT_RXWI_RATE_BW
, rate
)) {
320 txrate
->flags
|= IEEE80211_TX_RC_40_MHZ_WIDTH
;
323 txrate
->flags
|= IEEE80211_TX_RC_80_MHZ_WIDTH
;
329 if (rate
& MT_RXWI_RATE_SGI
)
330 txrate
->flags
|= IEEE80211_TX_RC_SHORT_GI
;
335 void mt76x02_mac_write_txwi(struct mt76x02_dev
*dev
, struct mt76x02_txwi
*txwi
,
336 struct sk_buff
*skb
, struct mt76_wcid
*wcid
,
337 struct ieee80211_sta
*sta
, int len
)
339 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
340 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
341 struct ieee80211_tx_rate
*rate
= &info
->control
.rates
[0];
342 struct ieee80211_key_conf
*key
= info
->control
.hw_key
;
344 u16 rate_ht_mask
= FIELD_PREP(MT_RXWI_RATE_PHY
, BIT(1) | BIT(2));
347 s8 txpwr_adj
, max_txpwr_adj
;
348 u8 ccmp_pn
[8], nstreams
= dev
->chainmask
& 0xf;
350 memset(txwi
, 0, sizeof(*txwi
));
352 if (!info
->control
.hw_key
&& wcid
&& wcid
->hw_key_idx
!= 0xff &&
353 ieee80211_has_protected(hdr
->frame_control
)) {
355 ieee80211_get_tx_rates(info
->control
.vif
, sta
, skb
,
356 info
->control
.rates
, 1);
360 txwi
->wcid
= wcid
->idx
;
364 if (wcid
&& wcid
->sw_iv
&& key
) {
365 u64 pn
= atomic64_inc_return(&key
->tx_pn
);
368 ccmp_pn
[1] = pn
>> 8;
370 ccmp_pn
[3] = 0x20 | (key
->keyidx
<< 6);
371 ccmp_pn
[4] = pn
>> 16;
372 ccmp_pn
[5] = pn
>> 24;
373 ccmp_pn
[6] = pn
>> 32;
374 ccmp_pn
[7] = pn
>> 40;
375 txwi
->iv
= *((__le32
*)&ccmp_pn
[0]);
376 txwi
->eiv
= *((__le32
*)&ccmp_pn
[4]);
379 if (wcid
&& (rate
->idx
< 0 || !rate
->count
)) {
380 wcid_tx_info
= wcid
->tx_info
;
381 txwi
->rate
= FIELD_GET(MT_WCID_TX_INFO_RATE
, wcid_tx_info
);
382 max_txpwr_adj
= FIELD_GET(MT_WCID_TX_INFO_TXPWR_ADJ
,
384 nss
= FIELD_GET(MT_WCID_TX_INFO_NSS
, wcid_tx_info
);
386 txwi
->rate
= mt76x02_mac_tx_rate_val(dev
, rate
, &nss
);
387 max_txpwr_adj
= mt76x02_tx_get_max_txpwr_adj(dev
, rate
);
390 txpwr_adj
= mt76x02_tx_get_txpwr_adj(dev
, dev
->txpower_conf
,
392 txwi
->ctl2
= FIELD_PREP(MT_TX_PWR_ADJ
, txpwr_adj
);
394 if (nstreams
> 1 && mt76_rev(&dev
->mt76
) >= MT76XX_REV_E4
)
395 txwi
->txstream
= 0x13;
396 else if (nstreams
> 1 && mt76_rev(&dev
->mt76
) >= MT76XX_REV_E3
&&
397 !(txwi
->rate
& cpu_to_le16(rate_ht_mask
)))
398 txwi
->txstream
= 0x93;
400 if (is_mt76x2(dev
) && (info
->flags
& IEEE80211_TX_CTL_LDPC
))
401 txwi
->rate
|= cpu_to_le16(MT_RXWI_RATE_LDPC
);
402 if ((info
->flags
& IEEE80211_TX_CTL_STBC
) && nss
== 1)
403 txwi
->rate
|= cpu_to_le16(MT_RXWI_RATE_STBC
);
404 if (nss
> 1 && sta
&& sta
->smps_mode
== IEEE80211_SMPS_DYNAMIC
)
405 txwi_flags
|= MT_TXWI_FLAGS_MMPS
;
406 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
))
407 txwi
->ack_ctl
|= MT_TXWI_ACK_CTL_REQ
;
408 if (info
->flags
& IEEE80211_TX_CTL_ASSIGN_SEQ
)
409 txwi
->ack_ctl
|= MT_TXWI_ACK_CTL_NSEQ
;
410 if ((info
->flags
& IEEE80211_TX_CTL_AMPDU
) && sta
) {
411 u8 ba_size
= IEEE80211_MIN_AMPDU_BUF
;
413 ba_size
<<= sta
->ht_cap
.ampdu_factor
;
414 ba_size
= min_t(int, 63, ba_size
- 1);
415 if (info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
)
417 txwi
->ack_ctl
|= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW
, ba_size
);
419 txwi_flags
|= MT_TXWI_FLAGS_AMPDU
|
420 FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY
,
421 sta
->ht_cap
.ampdu_density
);
424 if (ieee80211_is_probe_resp(hdr
->frame_control
) ||
425 ieee80211_is_beacon(hdr
->frame_control
))
426 txwi_flags
|= MT_TXWI_FLAGS_TS
;
428 txwi
->flags
|= cpu_to_le16(txwi_flags
);
429 txwi
->len_ctl
= cpu_to_le16(len
);
431 EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi
);
434 mt76x02_tx_rate_fallback(struct ieee80211_tx_rate
*rates
, int idx
, int phy
)
444 case MT_PHY_TYPE_VHT
:
445 mcs
= ieee80211_rate_get_vht_mcs(rates
);
446 nss
= ieee80211_rate_get_vht_nss(rates
);
449 nss
= max_t(int, nss
- 1, 1);
453 ieee80211_rate_set_vht(rates
+ 1, mcs
, nss
);
455 case MT_PHY_TYPE_HT_GF
:
457 /* MCS 8 falls back to MCS 0 */
458 if (rates
[0].idx
== 8) {
464 rates
[1].idx
= max_t(int, rates
[0].idx
- 1, 0);
470 mt76x02_mac_fill_tx_status(struct mt76x02_dev
*dev
, struct mt76x02_sta
*msta
,
471 struct ieee80211_tx_info
*info
,
472 struct mt76x02_tx_status
*st
, int n_frames
)
474 struct ieee80211_tx_rate
*rate
= info
->status
.rates
;
475 struct ieee80211_tx_rate last_rate
;
477 int retry
= st
->retry
;
484 phy
= FIELD_GET(MT_RXWI_RATE_PHY
, st
->rate
);
486 if (st
->pktid
& MT_PACKET_ID_HAS_RATE
) {
487 first_rate
= st
->rate
& ~MT_PKTID_RATE
;
488 first_rate
|= st
->pktid
& MT_PKTID_RATE
;
490 mt76x02_mac_process_tx_rate(&rate
[0], first_rate
,
491 dev
->mphy
.chandef
.chan
->band
);
492 } else if (rate
[0].idx
< 0) {
496 mt76x02_mac_process_tx_rate(&rate
[0], msta
->wcid
.tx_info
,
497 dev
->mphy
.chandef
.chan
->band
);
500 mt76x02_mac_process_tx_rate(&last_rate
, st
->rate
,
501 dev
->mphy
.chandef
.chan
->band
);
503 for (i
= 0; i
< ARRAY_SIZE(info
->status
.rates
); i
++) {
505 if (i
+ 1 == ARRAY_SIZE(info
->status
.rates
)) {
506 info
->status
.rates
[i
] = last_rate
;
507 info
->status
.rates
[i
].count
= max_t(int, retry
, 1);
511 mt76x02_tx_rate_fallback(info
->status
.rates
, i
, phy
);
512 if (info
->status
.rates
[i
].idx
== last_rate
.idx
)
516 if (i
+ 1 < ARRAY_SIZE(info
->status
.rates
)) {
517 info
->status
.rates
[i
+ 1].idx
= -1;
518 info
->status
.rates
[i
+ 1].count
= 0;
521 info
->status
.ampdu_len
= n_frames
;
522 info
->status
.ampdu_ack_len
= st
->success
? n_frames
: 0;
525 info
->flags
|= IEEE80211_TX_CTL_AMPDU
|
526 IEEE80211_TX_STAT_AMPDU
;
529 info
->flags
|= IEEE80211_TX_CTL_NO_ACK
;
530 else if (st
->success
)
531 info
->flags
|= IEEE80211_TX_STAT_ACK
;
534 void mt76x02_send_tx_status(struct mt76x02_dev
*dev
,
535 struct mt76x02_tx_status
*stat
, u8
*update
)
537 struct ieee80211_tx_info info
= {};
538 struct ieee80211_tx_status status
= {
541 static const u8 ac_to_tid
[4] = {
542 [IEEE80211_AC_BE
] = 0,
543 [IEEE80211_AC_BK
] = 1,
544 [IEEE80211_AC_VI
] = 4,
545 [IEEE80211_AC_VO
] = 6
547 struct mt76_wcid
*wcid
= NULL
;
548 struct mt76x02_sta
*msta
= NULL
;
549 struct mt76_dev
*mdev
= &dev
->mt76
;
550 struct sk_buff_head list
;
556 if (stat
->pktid
== MT_PACKET_ID_NO_ACK
)
561 if (stat
->wcid
< ARRAY_SIZE(dev
->mt76
.wcid
))
562 wcid
= rcu_dereference(dev
->mt76
.wcid
[stat
->wcid
]);
564 if (wcid
&& wcid
->sta
) {
567 priv
= msta
= container_of(wcid
, struct mt76x02_sta
, wcid
);
568 status
.sta
= container_of(priv
, struct ieee80211_sta
,
572 mt76_tx_status_lock(mdev
, &list
);
575 if (mt76_is_skb_pktid(stat
->pktid
))
576 status
.skb
= mt76_tx_status_skb_get(mdev
, wcid
,
579 status
.info
= IEEE80211_SKB_CB(status
.skb
);
582 if (!status
.skb
&& !(stat
->pktid
& MT_PACKET_ID_HAS_RATE
)) {
583 mt76_tx_status_unlock(mdev
, &list
);
588 if (msta
&& stat
->aggr
&& !status
.skb
) {
589 u32 stat_val
, stat_cache
;
591 stat_val
= stat
->rate
;
592 stat_val
|= ((u32
)stat
->retry
) << 16;
593 stat_cache
= msta
->status
.rate
;
594 stat_cache
|= ((u32
)msta
->status
.retry
) << 16;
596 if (*update
== 0 && stat_val
== stat_cache
&&
597 stat
->wcid
== msta
->status
.wcid
&& msta
->n_frames
< 32) {
599 mt76_tx_status_unlock(mdev
, &list
);
603 cur_pktid
= msta
->status
.pktid
;
604 mt76x02_mac_fill_tx_status(dev
, msta
, status
.info
,
605 &msta
->status
, msta
->n_frames
);
607 msta
->status
= *stat
;
611 cur_pktid
= stat
->pktid
;
612 mt76x02_mac_fill_tx_status(dev
, msta
, status
.info
, stat
, 1);
618 len
= status
.skb
->len
;
619 ac
= skb_get_queue_mapping(status
.skb
);
620 mt76_tx_status_skb_done(mdev
, status
.skb
, &list
);
622 len
= status
.info
->status
.ampdu_len
* ewma_pktlen_read(&msta
->pktlen
);
623 ac
= FIELD_GET(MT_PKTID_AC
, cur_pktid
);
626 mt76_tx_status_unlock(mdev
, &list
);
629 ieee80211_tx_status_ext(mt76_hw(dev
), &status
);
634 duration
= ieee80211_calc_tx_airtime(mt76_hw(dev
), &info
, len
);
636 spin_lock_bh(&dev
->mt76
.cc_lock
);
637 dev
->tx_airtime
+= duration
;
638 spin_unlock_bh(&dev
->mt76
.cc_lock
);
641 ieee80211_sta_register_airtime(status
.sta
, ac_to_tid
[ac
], duration
, 0);
648 mt76x02_mac_process_rate(struct mt76x02_dev
*dev
,
649 struct mt76_rx_status
*status
,
652 u8 idx
= FIELD_GET(MT_RXWI_RATE_INDEX
, rate
);
654 switch (FIELD_GET(MT_RXWI_RATE_PHY
, rate
)) {
655 case MT_PHY_TYPE_OFDM
:
659 if (status
->band
== NL80211_BAND_2GHZ
)
662 status
->rate_idx
= idx
;
664 case MT_PHY_TYPE_CCK
:
667 status
->enc_flags
|= RX_ENC_FLAG_SHORTPRE
;
673 status
->rate_idx
= idx
;
675 case MT_PHY_TYPE_HT_GF
:
676 status
->enc_flags
|= RX_ENC_FLAG_HT_GF
;
679 status
->encoding
= RX_ENC_HT
;
680 status
->rate_idx
= idx
;
682 case MT_PHY_TYPE_VHT
: {
683 u8 n_rxstream
= dev
->chainmask
& 0xf;
685 status
->encoding
= RX_ENC_VHT
;
686 status
->rate_idx
= FIELD_GET(MT_RATE_INDEX_VHT_IDX
, idx
);
687 status
->nss
= min_t(u8
, n_rxstream
,
688 FIELD_GET(MT_RATE_INDEX_VHT_NSS
, idx
) + 1);
695 if (rate
& MT_RXWI_RATE_LDPC
)
696 status
->enc_flags
|= RX_ENC_FLAG_LDPC
;
698 if (rate
& MT_RXWI_RATE_SGI
)
699 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
701 if (rate
& MT_RXWI_RATE_STBC
)
702 status
->enc_flags
|= 1 << RX_ENC_FLAG_STBC_SHIFT
;
704 switch (FIELD_GET(MT_RXWI_RATE_BW
, rate
)) {
708 status
->bw
= RATE_INFO_BW_40
;
711 status
->bw
= RATE_INFO_BW_80
;
720 void mt76x02_mac_setaddr(struct mt76x02_dev
*dev
, const u8
*addr
)
722 static const u8 null_addr
[ETH_ALEN
] = {};
725 ether_addr_copy(dev
->mt76
.macaddr
, addr
);
727 if (!is_valid_ether_addr(dev
->mt76
.macaddr
)) {
728 eth_random_addr(dev
->mt76
.macaddr
);
729 dev_info(dev
->mt76
.dev
,
730 "Invalid MAC address, using random address %pM\n",
734 mt76_wr(dev
, MT_MAC_ADDR_DW0
, get_unaligned_le32(dev
->mt76
.macaddr
));
735 mt76_wr(dev
, MT_MAC_ADDR_DW1
,
736 get_unaligned_le16(dev
->mt76
.macaddr
+ 4) |
737 FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK
, 0xff));
739 mt76_wr(dev
, MT_MAC_BSSID_DW0
,
740 get_unaligned_le32(dev
->mt76
.macaddr
));
741 mt76_wr(dev
, MT_MAC_BSSID_DW1
,
742 get_unaligned_le16(dev
->mt76
.macaddr
+ 4) |
743 FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE
, 3) | /* 8 APs + 8 STAs */
744 MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT
);
745 /* enable 7 additional beacon slots and control them with bypass mask */
746 mt76_rmw_field(dev
, MT_MAC_BSSID_DW1
, MT_MAC_BSSID_DW1_MBEACON_N
, 7);
748 for (i
= 0; i
< 16; i
++)
749 mt76x02_mac_set_bssid(dev
, i
, null_addr
);
751 EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr
);
754 mt76x02_mac_get_rssi(struct mt76x02_dev
*dev
, s8 rssi
, int chain
)
756 struct mt76x02_rx_freq_cal
*cal
= &dev
->cal
.rx
;
758 rssi
+= cal
->rssi_offset
[chain
];
759 rssi
-= cal
->lna_gain
;
764 int mt76x02_mac_process_rx(struct mt76x02_dev
*dev
, struct sk_buff
*skb
,
767 struct mt76_rx_status
*status
= (struct mt76_rx_status
*)skb
->cb
;
768 struct mt76x02_rxwi
*rxwi
= rxi
;
769 struct mt76x02_sta
*sta
;
770 u32 rxinfo
= le32_to_cpu(rxwi
->rxinfo
);
771 u32 ctl
= le32_to_cpu(rxwi
->ctl
);
772 u16 rate
= le16_to_cpu(rxwi
->rate
);
773 u16 tid_sn
= le16_to_cpu(rxwi
->tid_sn
);
774 bool unicast
= rxwi
->rxinfo
& cpu_to_le32(MT_RXINFO_UNICAST
);
775 int pad_len
= 0, nstreams
= dev
->chainmask
& 0xf;
781 if (!test_bit(MT76_STATE_RUNNING
, &dev
->mphy
.state
))
784 if (rxinfo
& MT_RXINFO_L2PAD
)
787 if (rxinfo
& MT_RXINFO_DECRYPT
) {
788 status
->flag
|= RX_FLAG_DECRYPTED
;
789 status
->flag
|= RX_FLAG_MMIC_STRIPPED
;
790 status
->flag
|= RX_FLAG_MIC_STRIPPED
;
791 status
->flag
|= RX_FLAG_IV_STRIPPED
;
794 wcid
= FIELD_GET(MT_RXWI_CTL_WCID
, ctl
);
795 sta
= mt76x02_rx_get_sta(&dev
->mt76
, wcid
);
796 status
->wcid
= mt76x02_rx_get_sta_wcid(sta
, unicast
);
798 len
= FIELD_GET(MT_RXWI_CTL_MPDU_LEN
, ctl
);
799 pn_len
= FIELD_GET(MT_RXINFO_PN_LEN
, rxinfo
);
801 int offset
= ieee80211_get_hdrlen_from_skb(skb
) + pad_len
;
802 u8
*data
= skb
->data
+ offset
;
804 status
->iv
[0] = data
[7];
805 status
->iv
[1] = data
[6];
806 status
->iv
[2] = data
[5];
807 status
->iv
[3] = data
[4];
808 status
->iv
[4] = data
[1];
809 status
->iv
[5] = data
[0];
812 * Driver CCMP validation can't deal with fragments.
813 * Let mac80211 take care of it.
815 if (rxinfo
& MT_RXINFO_FRAG
) {
816 status
->flag
&= ~RX_FLAG_IV_STRIPPED
;
818 pad_len
+= pn_len
<< 2;
823 mt76x02_remove_hdr_pad(skb
, pad_len
);
825 if ((rxinfo
& MT_RXINFO_BA
) && !(rxinfo
& MT_RXINFO_NULL
))
828 if (rxinfo
& MT_RXINFO_AMPDU
) {
829 status
->flag
|= RX_FLAG_AMPDU_DETAILS
;
830 status
->ampdu_ref
= dev
->ampdu_ref
;
833 * When receiving an A-MPDU subframe and RSSI info is not valid,
834 * we can assume that more subframes belonging to the same A-MPDU
835 * are coming. The last one will have valid RSSI info
837 if (rxinfo
& MT_RXINFO_RSSI
) {
838 if (!++dev
->ampdu_ref
)
843 if (WARN_ON_ONCE(len
> skb
->len
))
848 status
->chains
= BIT(0);
849 signal
= mt76x02_mac_get_rssi(dev
, rxwi
->rssi
[0], 0);
850 status
->chain_signal
[0] = signal
;
852 status
->chains
|= BIT(1);
853 status
->chain_signal
[1] = mt76x02_mac_get_rssi(dev
,
856 signal
= max_t(s8
, signal
, status
->chain_signal
[1]);
858 status
->signal
= signal
;
859 status
->freq
= dev
->mphy
.chandef
.chan
->center_freq
;
860 status
->band
= dev
->mphy
.chandef
.chan
->band
;
862 status
->tid
= FIELD_GET(MT_RXWI_TID
, tid_sn
);
863 status
->seqno
= FIELD_GET(MT_RXWI_SN
, tid_sn
);
865 return mt76x02_mac_process_rate(dev
, status
, rate
);
868 void mt76x02_mac_poll_tx_status(struct mt76x02_dev
*dev
, bool irq
)
870 struct mt76x02_tx_status stat
= {};
874 if (!test_bit(MT76_STATE_RUNNING
, &dev
->mphy
.state
))
877 trace_mac_txstat_poll(dev
);
879 while (!irq
|| !kfifo_is_full(&dev
->txstatus_fifo
)) {
880 if (!spin_trylock(&dev
->txstatus_fifo_lock
))
883 ret
= mt76x02_mac_load_tx_status(dev
, &stat
);
884 spin_unlock(&dev
->txstatus_fifo_lock
);
890 mt76x02_send_tx_status(dev
, &stat
, &update
);
894 kfifo_put(&dev
->txstatus_fifo
, stat
);
898 void mt76x02_tx_complete_skb(struct mt76_dev
*mdev
, enum mt76_txq_id qid
,
899 struct mt76_queue_entry
*e
)
901 struct mt76x02_dev
*dev
= container_of(mdev
, struct mt76x02_dev
, mt76
);
902 struct mt76x02_txwi
*txwi
;
906 dev_kfree_skb_any(e
->skb
);
910 mt76x02_mac_poll_tx_status(dev
, false);
912 txwi_ptr
= mt76_get_txwi_ptr(mdev
, e
->txwi
);
913 txwi
= (struct mt76x02_txwi
*)txwi_ptr
;
914 trace_mac_txdone(mdev
, txwi
->wcid
, txwi
->pktid
);
916 mt76_tx_complete_skb(mdev
, e
->skb
);
918 EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb
);
920 void mt76x02_mac_set_rts_thresh(struct mt76x02_dev
*dev
, u32 val
)
925 data
= FIELD_PREP(MT_PROT_CFG_CTRL
, 1) |
926 MT_PROT_CFG_RTS_THRESH
;
928 mt76_rmw_field(dev
, MT_TX_RTS_CFG
, MT_TX_RTS_CFG_THRESH
, val
);
930 mt76_rmw(dev
, MT_CCK_PROT_CFG
,
931 MT_PROT_CFG_CTRL
| MT_PROT_CFG_RTS_THRESH
, data
);
932 mt76_rmw(dev
, MT_OFDM_PROT_CFG
,
933 MT_PROT_CFG_CTRL
| MT_PROT_CFG_RTS_THRESH
, data
);
936 void mt76x02_mac_set_tx_protection(struct mt76x02_dev
*dev
, bool legacy_prot
,
939 int mode
= ht_mode
& IEEE80211_HT_OP_MODE_PROTECTION
;
940 bool non_gf
= !!(ht_mode
& IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT
);
946 for (i
= 0; i
< ARRAY_SIZE(prot
); i
++) {
947 prot
[i
] = mt76_rr(dev
, MT_CCK_PROT_CFG
+ i
* 4);
948 prot
[i
] &= ~MT_PROT_CFG_CTRL
;
950 prot
[i
] &= ~MT_PROT_CFG_RATE
;
953 for (i
= 0; i
< ARRAY_SIZE(vht_prot
); i
++) {
954 vht_prot
[i
] = mt76_rr(dev
, MT_TX_PROT_CFG6
+ i
* 4);
955 vht_prot
[i
] &= ~(MT_PROT_CFG_CTRL
| MT_PROT_CFG_RATE
);
958 rts_thr
= mt76_get_field(dev
, MT_TX_RTS_CFG
, MT_TX_RTS_CFG_THRESH
);
960 if (rts_thr
!= 0xffff)
961 prot
[0] |= MT_PROT_CTRL_RTS_CTS
;
964 prot
[1] |= MT_PROT_CTRL_CTS2SELF
;
966 prot
[2] |= MT_PROT_RATE_CCK_11
;
967 prot
[3] |= MT_PROT_RATE_CCK_11
;
968 prot
[4] |= MT_PROT_RATE_CCK_11
;
969 prot
[5] |= MT_PROT_RATE_CCK_11
;
971 vht_prot
[0] |= MT_PROT_RATE_CCK_11
;
972 vht_prot
[1] |= MT_PROT_RATE_CCK_11
;
973 vht_prot
[2] |= MT_PROT_RATE_CCK_11
;
975 if (rts_thr
!= 0xffff)
976 prot
[1] |= MT_PROT_CTRL_RTS_CTS
;
978 prot
[2] |= MT_PROT_RATE_OFDM_24
;
979 prot
[3] |= MT_PROT_RATE_DUP_OFDM_24
;
980 prot
[4] |= MT_PROT_RATE_OFDM_24
;
981 prot
[5] |= MT_PROT_RATE_DUP_OFDM_24
;
983 vht_prot
[0] |= MT_PROT_RATE_OFDM_24
;
984 vht_prot
[1] |= MT_PROT_RATE_DUP_OFDM_24
;
985 vht_prot
[2] |= MT_PROT_RATE_SGI_OFDM_24
;
989 case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER
:
990 case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED
:
991 prot
[2] |= MT_PROT_CTRL_RTS_CTS
;
992 prot
[3] |= MT_PROT_CTRL_RTS_CTS
;
993 prot
[4] |= MT_PROT_CTRL_RTS_CTS
;
994 prot
[5] |= MT_PROT_CTRL_RTS_CTS
;
995 vht_prot
[0] |= MT_PROT_CTRL_RTS_CTS
;
996 vht_prot
[1] |= MT_PROT_CTRL_RTS_CTS
;
997 vht_prot
[2] |= MT_PROT_CTRL_RTS_CTS
;
999 case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ
:
1000 prot
[3] |= MT_PROT_CTRL_RTS_CTS
;
1001 prot
[5] |= MT_PROT_CTRL_RTS_CTS
;
1002 vht_prot
[1] |= MT_PROT_CTRL_RTS_CTS
;
1003 vht_prot
[2] |= MT_PROT_CTRL_RTS_CTS
;
1008 prot
[4] |= MT_PROT_CTRL_RTS_CTS
;
1009 prot
[5] |= MT_PROT_CTRL_RTS_CTS
;
1012 for (i
= 0; i
< ARRAY_SIZE(prot
); i
++)
1013 mt76_wr(dev
, MT_CCK_PROT_CFG
+ i
* 4, prot
[i
]);
1015 for (i
= 0; i
< ARRAY_SIZE(vht_prot
); i
++)
1016 mt76_wr(dev
, MT_TX_PROT_CFG6
+ i
* 4, vht_prot
[i
]);
1019 void mt76x02_update_channel(struct mt76_dev
*mdev
)
1021 struct mt76x02_dev
*dev
= container_of(mdev
, struct mt76x02_dev
, mt76
);
1022 struct mt76_channel_state
*state
;
1024 state
= mdev
->phy
.chan_state
;
1025 state
->cc_busy
+= mt76_rr(dev
, MT_CH_BUSY
);
1027 spin_lock_bh(&dev
->mt76
.cc_lock
);
1028 state
->cc_tx
+= dev
->tx_airtime
;
1029 dev
->tx_airtime
= 0;
1030 spin_unlock_bh(&dev
->mt76
.cc_lock
);
1032 EXPORT_SYMBOL_GPL(mt76x02_update_channel
);
1034 static void mt76x02_check_mac_err(struct mt76x02_dev
*dev
)
1036 u32 val
= mt76_rr(dev
, 0x10f4);
1038 if (!(val
& BIT(29)) || !(val
& (BIT(7) | BIT(5))))
1041 dev_err(dev
->mt76
.dev
, "mac specific condition occurred\n");
1043 mt76_set(dev
, MT_MAC_SYS_CTRL
, MT_MAC_SYS_CTRL_RESET_CSR
);
1045 mt76_wr(dev
, MT_MAC_SYS_CTRL
,
1046 MT_MAC_SYS_CTRL_ENABLE_TX
| MT_MAC_SYS_CTRL_ENABLE_RX
);
1050 mt76x02_edcca_tx_enable(struct mt76x02_dev
*dev
, bool enable
)
1055 mt76_set(dev
, MT_MAC_SYS_CTRL
, MT_MAC_SYS_CTRL_ENABLE_TX
);
1056 mt76_set(dev
, MT_AUTO_RSP_CFG
, MT_AUTO_RSP_EN
);
1058 data
= mt76_rr(dev
, MT_TX_PIN_CFG
);
1059 data
|= MT_TX_PIN_CFG_TXANT
|
1060 MT_TX_PIN_CFG_RXANT
|
1063 mt76_wr(dev
, MT_TX_PIN_CFG
, data
);
1065 mt76_clear(dev
, MT_MAC_SYS_CTRL
, MT_MAC_SYS_CTRL_ENABLE_TX
);
1066 mt76_clear(dev
, MT_AUTO_RSP_CFG
, MT_AUTO_RSP_EN
);
1067 /* disable pa-lna */
1068 mt76_clear(dev
, MT_TX_PIN_CFG
, MT_TX_PIN_CFG_TXANT
);
1069 mt76_clear(dev
, MT_TX_PIN_CFG
, MT_TX_PIN_CFG_RXANT
);
1071 dev
->ed_tx_blocked
= !enable
;
1074 void mt76x02_edcca_init(struct mt76x02_dev
*dev
)
1076 dev
->ed_trigger
= 0;
1079 if (dev
->ed_monitor
) {
1080 struct ieee80211_channel
*chan
= dev
->mphy
.chandef
.chan
;
1081 u8 ed_th
= chan
->band
== NL80211_BAND_5GHZ
? 0x0e : 0x20;
1083 mt76_clear(dev
, MT_TX_LINK_CFG
, MT_TX_CFACK_EN
);
1084 mt76_set(dev
, MT_TXOP_CTRL_CFG
, MT_TXOP_ED_CCA_EN
);
1085 mt76_rmw(dev
, MT_BBP(AGC
, 2), GENMASK(15, 0),
1086 ed_th
<< 8 | ed_th
);
1087 mt76_set(dev
, MT_TXOP_HLDR_ET
, MT_TXOP_HLDR_TX40M_BLK_EN
);
1089 mt76_set(dev
, MT_TX_LINK_CFG
, MT_TX_CFACK_EN
);
1090 mt76_clear(dev
, MT_TXOP_CTRL_CFG
, MT_TXOP_ED_CCA_EN
);
1091 if (is_mt76x2(dev
)) {
1092 mt76_wr(dev
, MT_BBP(AGC
, 2), 0x00007070);
1093 mt76_set(dev
, MT_TXOP_HLDR_ET
,
1094 MT_TXOP_HLDR_TX40M_BLK_EN
);
1096 mt76_wr(dev
, MT_BBP(AGC
, 2), 0x003a6464);
1097 mt76_clear(dev
, MT_TXOP_HLDR_ET
,
1098 MT_TXOP_HLDR_TX40M_BLK_EN
);
1101 mt76x02_edcca_tx_enable(dev
, true);
1102 dev
->ed_monitor_learning
= true;
1104 /* clear previous CCA timer value */
1105 mt76_rr(dev
, MT_ED_CCA_TIMER
);
1106 dev
->ed_time
= ktime_get_boottime();
1108 EXPORT_SYMBOL_GPL(mt76x02_edcca_init
);
1110 #define MT_EDCCA_TH 92
1111 #define MT_EDCCA_BLOCK_TH 2
1112 #define MT_EDCCA_LEARN_TH 50
1113 #define MT_EDCCA_LEARN_CCA 180
1114 #define MT_EDCCA_LEARN_TIMEOUT (20 * HZ)
1116 static void mt76x02_edcca_check(struct mt76x02_dev
*dev
)
1119 u32 active
, val
, busy
;
1121 cur_time
= ktime_get_boottime();
1122 val
= mt76_rr(dev
, MT_ED_CCA_TIMER
);
1124 active
= ktime_to_us(ktime_sub(cur_time
, dev
->ed_time
));
1125 dev
->ed_time
= cur_time
;
1127 busy
= (val
* 100) / active
;
1128 busy
= min_t(u32
, busy
, 100);
1130 if (busy
> MT_EDCCA_TH
) {
1135 dev
->ed_trigger
= 0;
1138 if (dev
->cal
.agc_lowest_gain
&&
1139 dev
->cal
.false_cca
> MT_EDCCA_LEARN_CCA
&&
1140 dev
->ed_trigger
> MT_EDCCA_LEARN_TH
) {
1141 dev
->ed_monitor_learning
= false;
1142 dev
->ed_trigger_timeout
= jiffies
+ 20 * HZ
;
1143 } else if (!dev
->ed_monitor_learning
&&
1144 time_is_after_jiffies(dev
->ed_trigger_timeout
)) {
1145 dev
->ed_monitor_learning
= true;
1146 mt76x02_edcca_tx_enable(dev
, true);
1149 if (dev
->ed_monitor_learning
)
1152 if (dev
->ed_trigger
> MT_EDCCA_BLOCK_TH
&& !dev
->ed_tx_blocked
)
1153 mt76x02_edcca_tx_enable(dev
, false);
1154 else if (dev
->ed_silent
> MT_EDCCA_BLOCK_TH
&& dev
->ed_tx_blocked
)
1155 mt76x02_edcca_tx_enable(dev
, true);
1158 void mt76x02_mac_work(struct work_struct
*work
)
1160 struct mt76x02_dev
*dev
= container_of(work
, struct mt76x02_dev
,
1161 mt76
.mac_work
.work
);
1164 mutex_lock(&dev
->mt76
.mutex
);
1166 mt76_update_survey(&dev
->mt76
);
1167 for (i
= 0, idx
= 0; i
< 16; i
++) {
1168 u32 val
= mt76_rr(dev
, MT_TX_AGG_CNT(i
));
1170 dev
->mt76
.aggr_stats
[idx
++] += val
& 0xffff;
1171 dev
->mt76
.aggr_stats
[idx
++] += val
>> 16;
1174 if (!dev
->mt76
.beacon_mask
)
1175 mt76x02_check_mac_err(dev
);
1177 if (dev
->ed_monitor
)
1178 mt76x02_edcca_check(dev
);
1180 mutex_unlock(&dev
->mt76
.mutex
);
1182 mt76_tx_status_check(&dev
->mt76
, NULL
, false);
1184 ieee80211_queue_delayed_work(mt76_hw(dev
), &dev
->mt76
.mac_work
,
1185 MT_MAC_WORK_INTERVAL
);
1188 void mt76x02_mac_cc_reset(struct mt76x02_dev
*dev
)
1190 dev
->mphy
.survey_time
= ktime_get_boottime();
1192 mt76_wr(dev
, MT_CH_TIME_CFG
,
1193 MT_CH_TIME_CFG_TIMER_EN
|
1194 MT_CH_TIME_CFG_TX_AS_BUSY
|
1195 MT_CH_TIME_CFG_RX_AS_BUSY
|
1196 MT_CH_TIME_CFG_NAV_AS_BUSY
|
1197 MT_CH_TIME_CFG_EIFS_AS_BUSY
|
1199 FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR
, 1));
1201 /* channel cycle counters read-and-clear */
1202 mt76_rr(dev
, MT_CH_BUSY
);
1203 mt76_rr(dev
, MT_CH_IDLE
);
1205 EXPORT_SYMBOL_GPL(mt76x02_mac_cc_reset
);
1207 void mt76x02_mac_set_bssid(struct mt76x02_dev
*dev
, u8 idx
, const u8
*addr
)
1210 mt76_wr(dev
, MT_MAC_APC_BSSID_L(idx
), get_unaligned_le32(addr
));
1211 mt76_rmw_field(dev
, MT_MAC_APC_BSSID_H(idx
), MT_MAC_APC_BSSID_H_ADDR
,
1212 get_unaligned_le16(addr
+ 4));