1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
8 #include "mt76x02_trace.h"
11 void mt76x02_mac_reset_counters(struct mt76x02_dev
*dev
)
15 mt76_rr(dev
, MT_RX_STAT_0
);
16 mt76_rr(dev
, MT_RX_STAT_1
);
17 mt76_rr(dev
, MT_RX_STAT_2
);
18 mt76_rr(dev
, MT_TX_STA_0
);
19 mt76_rr(dev
, MT_TX_STA_1
);
20 mt76_rr(dev
, MT_TX_STA_2
);
22 for (i
= 0; i
< 16; i
++)
23 mt76_rr(dev
, MT_TX_AGG_CNT(i
));
25 for (i
= 0; i
< 16; i
++)
26 mt76_rr(dev
, MT_TX_STAT_FIFO
);
28 memset(dev
->mphy
.aggr_stats
, 0, sizeof(dev
->mphy
.aggr_stats
));
30 EXPORT_SYMBOL_GPL(mt76x02_mac_reset_counters
);
32 static enum mt76x02_cipher_type
33 mt76x02_mac_get_key_info(struct ieee80211_key_conf
*key
, u8
*key_data
)
35 memset(key_data
, 0, 32);
37 return MT76X02_CIPHER_NONE
;
40 return MT76X02_CIPHER_NONE
;
42 memcpy(key_data
, key
->key
, key
->keylen
);
44 switch (key
->cipher
) {
45 case WLAN_CIPHER_SUITE_WEP40
:
46 return MT76X02_CIPHER_WEP40
;
47 case WLAN_CIPHER_SUITE_WEP104
:
48 return MT76X02_CIPHER_WEP104
;
49 case WLAN_CIPHER_SUITE_TKIP
:
50 return MT76X02_CIPHER_TKIP
;
51 case WLAN_CIPHER_SUITE_CCMP
:
52 return MT76X02_CIPHER_AES_CCMP
;
54 return MT76X02_CIPHER_NONE
;
58 int mt76x02_mac_shared_key_setup(struct mt76x02_dev
*dev
, u8 vif_idx
,
59 u8 key_idx
, struct ieee80211_key_conf
*key
)
61 enum mt76x02_cipher_type cipher
;
65 cipher
= mt76x02_mac_get_key_info(key
, key_data
);
66 if (cipher
== MT76X02_CIPHER_NONE
&& key
)
69 val
= mt76_rr(dev
, MT_SKEY_MODE(vif_idx
));
70 val
&= ~(MT_SKEY_MODE_MASK
<< MT_SKEY_MODE_SHIFT(vif_idx
, key_idx
));
71 val
|= cipher
<< MT_SKEY_MODE_SHIFT(vif_idx
, key_idx
);
72 mt76_wr(dev
, MT_SKEY_MODE(vif_idx
), val
);
74 mt76_wr_copy(dev
, MT_SKEY(vif_idx
, key_idx
), key_data
,
79 EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup
);
81 void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev
*dev
, u8 idx
,
82 struct ieee80211_key_conf
*key
)
84 enum mt76x02_cipher_type cipher
;
89 cipher
= mt76x02_mac_get_key_info(key
, key_data
);
90 iv
= mt76_rr(dev
, MT_WCID_IV(idx
));
91 eiv
= mt76_rr(dev
, MT_WCID_IV(idx
) + 4);
94 if (cipher
== MT76X02_CIPHER_TKIP
) {
95 pn
|= (iv
>> 16) & 0xff;
96 pn
|= (iv
& 0xff) << 8;
97 } else if (cipher
>= MT76X02_CIPHER_AES_CCMP
) {
103 atomic64_set(&key
->tx_pn
, pn
);
106 int mt76x02_mac_wcid_set_key(struct mt76x02_dev
*dev
, u8 idx
,
107 struct ieee80211_key_conf
*key
)
109 enum mt76x02_cipher_type cipher
;
114 cipher
= mt76x02_mac_get_key_info(key
, key_data
);
115 if (cipher
== MT76X02_CIPHER_NONE
&& key
)
118 mt76_wr_copy(dev
, MT_WCID_KEY(idx
), key_data
, sizeof(key_data
));
119 mt76_rmw_field(dev
, MT_WCID_ATTR(idx
), MT_WCID_ATTR_PKEY_MODE
, cipher
);
121 memset(iv_data
, 0, sizeof(iv_data
));
123 mt76_rmw_field(dev
, MT_WCID_ATTR(idx
), MT_WCID_ATTR_PAIRWISE
,
124 !!(key
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
));
126 pn
= atomic64_read(&key
->tx_pn
);
128 iv_data
[3] = key
->keyidx
<< 6;
129 if (cipher
>= MT76X02_CIPHER_TKIP
) {
131 put_unaligned_le32(pn
>> 16, &iv_data
[4]);
134 if (cipher
== MT76X02_CIPHER_TKIP
) {
135 iv_data
[0] = (pn
>> 8) & 0xff;
136 iv_data
[1] = (iv_data
[0] | 0x20) & 0x7f;
137 iv_data
[2] = pn
& 0xff;
138 } else if (cipher
>= MT76X02_CIPHER_AES_CCMP
) {
139 put_unaligned_le16((pn
& 0xffff), &iv_data
[0]);
143 mt76_wr_copy(dev
, MT_WCID_IV(idx
), iv_data
, sizeof(iv_data
));
148 void mt76x02_mac_wcid_setup(struct mt76x02_dev
*dev
, u8 idx
,
151 struct mt76_wcid_addr addr
= {};
154 attr
= FIELD_PREP(MT_WCID_ATTR_BSS_IDX
, vif_idx
& 7) |
155 FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT
, !!(vif_idx
& 8));
157 mt76_wr(dev
, MT_WCID_ATTR(idx
), attr
);
163 memcpy(addr
.macaddr
, mac
, ETH_ALEN
);
165 mt76_wr_copy(dev
, MT_WCID_ADDR(idx
), &addr
, sizeof(addr
));
167 EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup
);
169 void mt76x02_mac_wcid_set_drop(struct mt76x02_dev
*dev
, u8 idx
, bool drop
)
171 u32 val
= mt76_rr(dev
, MT_WCID_DROP(idx
));
172 u32 bit
= MT_WCID_DROP_MASK(idx
);
174 /* prevent unnecessary writes */
175 if ((val
& bit
) != (bit
* drop
))
176 mt76_wr(dev
, MT_WCID_DROP(idx
), (val
& ~bit
) | (bit
* drop
));
180 mt76x02_mac_tx_rate_val(struct mt76x02_dev
*dev
,
181 const struct ieee80211_tx_rate
*rate
, u8
*nss_val
)
183 u8 phy
, rate_idx
, nss
, bw
= 0;
186 if (rate
->flags
& IEEE80211_TX_RC_VHT_MCS
) {
187 rate_idx
= rate
->idx
;
188 nss
= 1 + (rate
->idx
>> 4);
189 phy
= MT_PHY_TYPE_VHT
;
190 if (rate
->flags
& IEEE80211_TX_RC_80_MHZ_WIDTH
)
192 else if (rate
->flags
& IEEE80211_TX_RC_40_MHZ_WIDTH
)
194 } else if (rate
->flags
& IEEE80211_TX_RC_MCS
) {
195 rate_idx
= rate
->idx
;
196 nss
= 1 + (rate
->idx
>> 3);
197 phy
= MT_PHY_TYPE_HT
;
198 if (rate
->flags
& IEEE80211_TX_RC_GREEN_FIELD
)
199 phy
= MT_PHY_TYPE_HT_GF
;
200 if (rate
->flags
& IEEE80211_TX_RC_40_MHZ_WIDTH
)
203 const struct ieee80211_rate
*r
;
204 int band
= dev
->mphy
.chandef
.chan
->band
;
207 r
= &dev
->mt76
.hw
->wiphy
->bands
[band
]->bitrates
[rate
->idx
];
208 if (rate
->flags
& IEEE80211_TX_RC_USE_SHORT_PREAMBLE
)
209 val
= r
->hw_value_short
;
214 rate_idx
= val
& 0xff;
218 rateval
= FIELD_PREP(MT_RXWI_RATE_INDEX
, rate_idx
);
219 rateval
|= FIELD_PREP(MT_RXWI_RATE_PHY
, phy
);
220 rateval
|= FIELD_PREP(MT_RXWI_RATE_BW
, bw
);
221 if (rate
->flags
& IEEE80211_TX_RC_SHORT_GI
)
222 rateval
|= MT_RXWI_RATE_SGI
;
228 void mt76x02_mac_wcid_set_rate(struct mt76x02_dev
*dev
, struct mt76_wcid
*wcid
,
229 const struct ieee80211_tx_rate
*rate
)
231 s8 max_txpwr_adj
= mt76x02_tx_get_max_txpwr_adj(dev
, rate
);
236 rateval
= mt76x02_mac_tx_rate_val(dev
, rate
, &nss
);
237 tx_info
= FIELD_PREP(MT_WCID_TX_INFO_RATE
, rateval
) |
238 FIELD_PREP(MT_WCID_TX_INFO_NSS
, nss
) |
239 FIELD_PREP(MT_WCID_TX_INFO_TXPWR_ADJ
, max_txpwr_adj
) |
241 wcid
->tx_info
= tx_info
;
244 void mt76x02_mac_set_short_preamble(struct mt76x02_dev
*dev
, bool enable
)
247 mt76_set(dev
, MT_AUTO_RSP_CFG
, MT_AUTO_RSP_PREAMB_SHORT
);
249 mt76_clear(dev
, MT_AUTO_RSP_CFG
, MT_AUTO_RSP_PREAMB_SHORT
);
252 bool mt76x02_mac_load_tx_status(struct mt76x02_dev
*dev
,
253 struct mt76x02_tx_status
*stat
)
257 stat2
= mt76_rr(dev
, MT_TX_STAT_FIFO_EXT
);
258 stat1
= mt76_rr(dev
, MT_TX_STAT_FIFO
);
260 stat
->valid
= !!(stat1
& MT_TX_STAT_FIFO_VALID
);
264 stat
->success
= !!(stat1
& MT_TX_STAT_FIFO_SUCCESS
);
265 stat
->aggr
= !!(stat1
& MT_TX_STAT_FIFO_AGGR
);
266 stat
->ack_req
= !!(stat1
& MT_TX_STAT_FIFO_ACKREQ
);
267 stat
->wcid
= FIELD_GET(MT_TX_STAT_FIFO_WCID
, stat1
);
268 stat
->rate
= FIELD_GET(MT_TX_STAT_FIFO_RATE
, stat1
);
270 stat
->retry
= FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY
, stat2
);
271 stat
->pktid
= FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID
, stat2
);
273 trace_mac_txstat_fetch(dev
, stat
);
279 mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate
*txrate
, u16 rate
,
280 enum nl80211_band band
)
282 u8 idx
= FIELD_GET(MT_RXWI_RATE_INDEX
, rate
);
288 switch (FIELD_GET(MT_RXWI_RATE_PHY
, rate
)) {
289 case MT_PHY_TYPE_OFDM
:
290 if (band
== NL80211_BAND_2GHZ
)
295 case MT_PHY_TYPE_CCK
:
301 case MT_PHY_TYPE_HT_GF
:
302 txrate
->flags
|= IEEE80211_TX_RC_GREEN_FIELD
;
305 txrate
->flags
|= IEEE80211_TX_RC_MCS
;
308 case MT_PHY_TYPE_VHT
:
309 txrate
->flags
|= IEEE80211_TX_RC_VHT_MCS
;
316 switch (FIELD_GET(MT_RXWI_RATE_BW
, rate
)) {
320 txrate
->flags
|= IEEE80211_TX_RC_40_MHZ_WIDTH
;
323 txrate
->flags
|= IEEE80211_TX_RC_80_MHZ_WIDTH
;
329 if (rate
& MT_RXWI_RATE_SGI
)
330 txrate
->flags
|= IEEE80211_TX_RC_SHORT_GI
;
335 void mt76x02_mac_write_txwi(struct mt76x02_dev
*dev
, struct mt76x02_txwi
*txwi
,
336 struct sk_buff
*skb
, struct mt76_wcid
*wcid
,
337 struct ieee80211_sta
*sta
, int len
)
339 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
340 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
341 struct ieee80211_tx_rate
*rate
= &info
->control
.rates
[0];
342 struct ieee80211_key_conf
*key
= info
->control
.hw_key
;
344 u16 rate_ht_mask
= FIELD_PREP(MT_RXWI_RATE_PHY
, BIT(1) | BIT(2));
345 u16 txwi_flags
= 0, rateval
;
347 s8 txpwr_adj
, max_txpwr_adj
;
348 u8 ccmp_pn
[8], nstreams
= dev
->mphy
.chainmask
& 0xf;
350 memset(txwi
, 0, sizeof(*txwi
));
352 mt76_tx_check_agg_ssn(sta
, skb
);
354 if (!info
->control
.hw_key
&& wcid
&& wcid
->hw_key_idx
!= 0xff &&
355 ieee80211_has_protected(hdr
->frame_control
)) {
357 ieee80211_get_tx_rates(info
->control
.vif
, sta
, skb
,
358 info
->control
.rates
, 1);
362 txwi
->wcid
= wcid
->idx
;
366 if (wcid
&& wcid
->sw_iv
&& key
) {
367 u64 pn
= atomic64_inc_return(&key
->tx_pn
);
370 ccmp_pn
[1] = pn
>> 8;
372 ccmp_pn
[3] = 0x20 | (key
->keyidx
<< 6);
373 ccmp_pn
[4] = pn
>> 16;
374 ccmp_pn
[5] = pn
>> 24;
375 ccmp_pn
[6] = pn
>> 32;
376 ccmp_pn
[7] = pn
>> 40;
377 txwi
->iv
= *((__le32
*)&ccmp_pn
[0]);
378 txwi
->eiv
= *((__le32
*)&ccmp_pn
[4]);
381 if (wcid
&& (rate
->idx
< 0 || !rate
->count
)) {
382 wcid_tx_info
= wcid
->tx_info
;
383 rateval
= FIELD_GET(MT_WCID_TX_INFO_RATE
, wcid_tx_info
);
384 max_txpwr_adj
= FIELD_GET(MT_WCID_TX_INFO_TXPWR_ADJ
,
386 nss
= FIELD_GET(MT_WCID_TX_INFO_NSS
, wcid_tx_info
);
388 rateval
= mt76x02_mac_tx_rate_val(dev
, rate
, &nss
);
389 max_txpwr_adj
= mt76x02_tx_get_max_txpwr_adj(dev
, rate
);
391 txwi
->rate
= cpu_to_le16(rateval
);
393 txpwr_adj
= mt76x02_tx_get_txpwr_adj(dev
, dev
->txpower_conf
,
395 txwi
->ctl2
= FIELD_PREP(MT_TX_PWR_ADJ
, txpwr_adj
);
397 if (nstreams
> 1 && mt76_rev(&dev
->mt76
) >= MT76XX_REV_E4
)
398 txwi
->txstream
= 0x13;
399 else if (nstreams
> 1 && mt76_rev(&dev
->mt76
) >= MT76XX_REV_E3
&&
400 !(txwi
->rate
& cpu_to_le16(rate_ht_mask
)))
401 txwi
->txstream
= 0x93;
403 if (is_mt76x2(dev
) && (info
->flags
& IEEE80211_TX_CTL_LDPC
))
404 txwi
->rate
|= cpu_to_le16(MT_RXWI_RATE_LDPC
);
405 if ((info
->flags
& IEEE80211_TX_CTL_STBC
) && nss
== 1)
406 txwi
->rate
|= cpu_to_le16(MT_RXWI_RATE_STBC
);
407 if (nss
> 1 && sta
&& sta
->deflink
.smps_mode
== IEEE80211_SMPS_DYNAMIC
)
408 txwi_flags
|= MT_TXWI_FLAGS_MMPS
;
409 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
))
410 txwi
->ack_ctl
|= MT_TXWI_ACK_CTL_REQ
;
411 if (info
->flags
& IEEE80211_TX_CTL_ASSIGN_SEQ
)
412 txwi
->ack_ctl
|= MT_TXWI_ACK_CTL_NSEQ
;
413 if ((info
->flags
& IEEE80211_TX_CTL_AMPDU
) && sta
) {
414 u8 ba_size
= IEEE80211_MIN_AMPDU_BUF
;
415 u8 ampdu_density
= sta
->deflink
.ht_cap
.ampdu_density
;
417 ba_size
<<= sta
->deflink
.ht_cap
.ampdu_factor
;
418 ba_size
= min_t(int, 63, ba_size
- 1);
419 if (info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
)
421 txwi
->ack_ctl
|= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW
, ba_size
);
423 if (ampdu_density
< IEEE80211_HT_MPDU_DENSITY_4
)
424 ampdu_density
= IEEE80211_HT_MPDU_DENSITY_4
;
426 txwi_flags
|= MT_TXWI_FLAGS_AMPDU
|
427 FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY
, ampdu_density
);
430 if (ieee80211_is_probe_resp(hdr
->frame_control
) ||
431 ieee80211_is_beacon(hdr
->frame_control
))
432 txwi_flags
|= MT_TXWI_FLAGS_TS
;
434 txwi
->flags
|= cpu_to_le16(txwi_flags
);
435 txwi
->len_ctl
= cpu_to_le16(len
);
437 EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi
);
440 mt76x02_tx_rate_fallback(struct ieee80211_tx_rate
*rates
, int idx
, int phy
)
450 case MT_PHY_TYPE_VHT
:
451 mcs
= ieee80211_rate_get_vht_mcs(rates
);
452 nss
= ieee80211_rate_get_vht_nss(rates
);
455 nss
= max_t(int, nss
- 1, 1);
459 ieee80211_rate_set_vht(rates
+ 1, mcs
, nss
);
461 case MT_PHY_TYPE_HT_GF
:
463 /* MCS 8 falls back to MCS 0 */
464 if (rates
[0].idx
== 8) {
470 rates
[1].idx
= max_t(int, rates
[0].idx
- 1, 0);
476 mt76x02_mac_fill_tx_status(struct mt76x02_dev
*dev
, struct mt76x02_sta
*msta
,
477 struct ieee80211_tx_info
*info
,
478 struct mt76x02_tx_status
*st
, int n_frames
)
480 struct ieee80211_tx_rate
*rate
= info
->status
.rates
;
481 struct ieee80211_tx_rate last_rate
;
483 int retry
= st
->retry
;
490 phy
= FIELD_GET(MT_RXWI_RATE_PHY
, st
->rate
);
492 if (st
->pktid
& MT_PACKET_ID_HAS_RATE
) {
493 first_rate
= st
->rate
& ~MT_PKTID_RATE
;
494 first_rate
|= st
->pktid
& MT_PKTID_RATE
;
496 mt76x02_mac_process_tx_rate(&rate
[0], first_rate
,
497 dev
->mphy
.chandef
.chan
->band
);
498 } else if (rate
[0].idx
< 0) {
502 mt76x02_mac_process_tx_rate(&rate
[0], msta
->wcid
.tx_info
,
503 dev
->mphy
.chandef
.chan
->band
);
506 mt76x02_mac_process_tx_rate(&last_rate
, st
->rate
,
507 dev
->mphy
.chandef
.chan
->band
);
509 for (i
= 0; i
< ARRAY_SIZE(info
->status
.rates
); i
++) {
511 if (i
+ 1 == ARRAY_SIZE(info
->status
.rates
)) {
512 info
->status
.rates
[i
] = last_rate
;
513 info
->status
.rates
[i
].count
= max_t(int, retry
, 1);
517 mt76x02_tx_rate_fallback(info
->status
.rates
, i
, phy
);
518 if (info
->status
.rates
[i
].idx
== last_rate
.idx
)
522 if (i
+ 1 < ARRAY_SIZE(info
->status
.rates
)) {
523 info
->status
.rates
[i
+ 1].idx
= -1;
524 info
->status
.rates
[i
+ 1].count
= 0;
527 info
->status
.ampdu_len
= n_frames
;
528 info
->status
.ampdu_ack_len
= st
->success
? n_frames
: 0;
531 info
->flags
|= IEEE80211_TX_CTL_AMPDU
|
532 IEEE80211_TX_STAT_AMPDU
;
535 info
->flags
|= IEEE80211_TX_CTL_NO_ACK
;
536 else if (st
->success
)
537 info
->flags
|= IEEE80211_TX_STAT_ACK
;
540 void mt76x02_send_tx_status(struct mt76x02_dev
*dev
,
541 struct mt76x02_tx_status
*stat
, u8
*update
)
543 struct ieee80211_tx_info info
= {};
544 struct ieee80211_tx_status status
= {
547 static const u8 ac_to_tid
[4] = {
548 [IEEE80211_AC_BE
] = 0,
549 [IEEE80211_AC_BK
] = 1,
550 [IEEE80211_AC_VI
] = 4,
551 [IEEE80211_AC_VO
] = 6
553 struct mt76_wcid
*wcid
= NULL
;
554 struct mt76x02_sta
*msta
= NULL
;
555 struct mt76_dev
*mdev
= &dev
->mt76
;
556 struct sk_buff_head list
;
562 if (stat
->pktid
== MT_PACKET_ID_NO_ACK
)
567 if (stat
->wcid
< MT76x02_N_WCIDS
)
568 wcid
= rcu_dereference(dev
->mt76
.wcid
[stat
->wcid
]);
570 if (wcid
&& wcid
->sta
) {
573 priv
= msta
= container_of(wcid
, struct mt76x02_sta
, wcid
);
574 status
.sta
= container_of(priv
, struct ieee80211_sta
,
578 mt76_tx_status_lock(mdev
, &list
);
581 if (mt76_is_skb_pktid(stat
->pktid
))
582 status
.skb
= mt76_tx_status_skb_get(mdev
, wcid
,
585 status
.info
= IEEE80211_SKB_CB(status
.skb
);
588 if (!status
.skb
&& !(stat
->pktid
& MT_PACKET_ID_HAS_RATE
)) {
589 mt76_tx_status_unlock(mdev
, &list
);
594 if (msta
&& stat
->aggr
&& !status
.skb
) {
595 u32 stat_val
, stat_cache
;
597 stat_val
= stat
->rate
;
598 stat_val
|= ((u32
)stat
->retry
) << 16;
599 stat_cache
= msta
->status
.rate
;
600 stat_cache
|= ((u32
)msta
->status
.retry
) << 16;
602 if (*update
== 0 && stat_val
== stat_cache
&&
603 stat
->wcid
== msta
->status
.wcid
&& msta
->n_frames
< 32) {
605 mt76_tx_status_unlock(mdev
, &list
);
609 cur_pktid
= msta
->status
.pktid
;
610 mt76x02_mac_fill_tx_status(dev
, msta
, status
.info
,
611 &msta
->status
, msta
->n_frames
);
613 msta
->status
= *stat
;
617 cur_pktid
= stat
->pktid
;
618 mt76x02_mac_fill_tx_status(dev
, msta
, status
.info
, stat
, 1);
624 len
= status
.skb
->len
;
625 ac
= skb_get_queue_mapping(status
.skb
);
626 mt76_tx_status_skb_done(mdev
, status
.skb
, &list
);
628 len
= status
.info
->status
.ampdu_len
* ewma_pktlen_read(&msta
->pktlen
);
629 ac
= FIELD_GET(MT_PKTID_AC
, cur_pktid
);
632 mt76_tx_status_unlock(mdev
, &list
);
635 spin_lock_bh(&dev
->mt76
.rx_lock
);
636 ieee80211_tx_status_ext(mt76_hw(dev
), &status
);
637 spin_unlock_bh(&dev
->mt76
.rx_lock
);
643 duration
= ieee80211_calc_tx_airtime(mt76_hw(dev
), &info
, len
);
645 spin_lock_bh(&dev
->mt76
.cc_lock
);
646 dev
->tx_airtime
+= duration
;
647 spin_unlock_bh(&dev
->mt76
.cc_lock
);
650 ieee80211_sta_register_airtime(status
.sta
, ac_to_tid
[ac
], duration
, 0);
657 mt76x02_mac_process_rate(struct mt76x02_dev
*dev
,
658 struct mt76_rx_status
*status
,
661 u8 idx
= FIELD_GET(MT_RXWI_RATE_INDEX
, rate
);
663 switch (FIELD_GET(MT_RXWI_RATE_PHY
, rate
)) {
664 case MT_PHY_TYPE_OFDM
:
668 if (status
->band
== NL80211_BAND_2GHZ
)
671 status
->rate_idx
= idx
;
673 case MT_PHY_TYPE_CCK
:
676 status
->enc_flags
|= RX_ENC_FLAG_SHORTPRE
;
682 status
->rate_idx
= idx
;
684 case MT_PHY_TYPE_HT_GF
:
685 status
->enc_flags
|= RX_ENC_FLAG_HT_GF
;
688 status
->encoding
= RX_ENC_HT
;
689 status
->rate_idx
= idx
;
691 case MT_PHY_TYPE_VHT
: {
692 u8 n_rxstream
= dev
->mphy
.chainmask
& 0xf;
694 status
->encoding
= RX_ENC_VHT
;
695 status
->rate_idx
= FIELD_GET(MT_RATE_INDEX_VHT_IDX
, idx
);
696 status
->nss
= min_t(u8
, n_rxstream
,
697 FIELD_GET(MT_RATE_INDEX_VHT_NSS
, idx
) + 1);
704 if (rate
& MT_RXWI_RATE_LDPC
)
705 status
->enc_flags
|= RX_ENC_FLAG_LDPC
;
707 if (rate
& MT_RXWI_RATE_SGI
)
708 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
710 if (rate
& MT_RXWI_RATE_STBC
)
711 status
->enc_flags
|= 1 << RX_ENC_FLAG_STBC_SHIFT
;
713 switch (FIELD_GET(MT_RXWI_RATE_BW
, rate
)) {
717 status
->bw
= RATE_INFO_BW_40
;
720 status
->bw
= RATE_INFO_BW_80
;
729 void mt76x02_mac_setaddr(struct mt76x02_dev
*dev
, const u8
*addr
)
731 static const u8 null_addr
[ETH_ALEN
] = {};
734 ether_addr_copy(dev
->mphy
.macaddr
, addr
);
736 if (!is_valid_ether_addr(dev
->mphy
.macaddr
)) {
737 eth_random_addr(dev
->mphy
.macaddr
);
738 dev_info(dev
->mt76
.dev
,
739 "Invalid MAC address, using random address %pM\n",
743 mt76_wr(dev
, MT_MAC_ADDR_DW0
, get_unaligned_le32(dev
->mphy
.macaddr
));
744 mt76_wr(dev
, MT_MAC_ADDR_DW1
,
745 get_unaligned_le16(dev
->mphy
.macaddr
+ 4) |
746 FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK
, 0xff));
748 mt76_wr(dev
, MT_MAC_BSSID_DW0
,
749 get_unaligned_le32(dev
->mphy
.macaddr
));
750 mt76_wr(dev
, MT_MAC_BSSID_DW1
,
751 get_unaligned_le16(dev
->mphy
.macaddr
+ 4) |
752 FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE
, 3) | /* 8 APs + 8 STAs */
753 MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT
);
754 /* enable 7 additional beacon slots and control them with bypass mask */
755 mt76_rmw_field(dev
, MT_MAC_BSSID_DW1
, MT_MAC_BSSID_DW1_MBEACON_N
, 7);
757 for (i
= 0; i
< 16; i
++)
758 mt76x02_mac_set_bssid(dev
, i
, null_addr
);
760 EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr
);
763 mt76x02_mac_get_rssi(struct mt76x02_dev
*dev
, s8 rssi
, int chain
)
765 struct mt76x02_rx_freq_cal
*cal
= &dev
->cal
.rx
;
767 rssi
+= cal
->rssi_offset
[chain
];
768 rssi
-= cal
->lna_gain
;
773 int mt76x02_mac_process_rx(struct mt76x02_dev
*dev
, struct sk_buff
*skb
,
776 struct mt76_rx_status
*status
= (struct mt76_rx_status
*)skb
->cb
;
777 struct ieee80211_hdr
*hdr
;
778 struct mt76x02_rxwi
*rxwi
= rxi
;
779 struct mt76x02_sta
*sta
;
780 u32 rxinfo
= le32_to_cpu(rxwi
->rxinfo
);
781 u32 ctl
= le32_to_cpu(rxwi
->ctl
);
782 u16 rate
= le16_to_cpu(rxwi
->rate
);
783 u16 tid_sn
= le16_to_cpu(rxwi
->tid_sn
);
784 bool unicast
= rxwi
->rxinfo
& cpu_to_le32(MT_RXINFO_UNICAST
);
785 int pad_len
= 0, nstreams
= dev
->mphy
.chainmask
& 0xf;
791 if (!test_bit(MT76_STATE_RUNNING
, &dev
->mphy
.state
))
794 if (rxinfo
& MT_RXINFO_L2PAD
)
797 if (rxinfo
& MT_RXINFO_DECRYPT
) {
798 status
->flag
|= RX_FLAG_DECRYPTED
;
799 status
->flag
|= RX_FLAG_MMIC_STRIPPED
;
800 status
->flag
|= RX_FLAG_MIC_STRIPPED
;
801 status
->flag
|= RX_FLAG_IV_STRIPPED
;
804 wcid
= FIELD_GET(MT_RXWI_CTL_WCID
, ctl
);
805 sta
= mt76x02_rx_get_sta(&dev
->mt76
, wcid
);
806 status
->wcid
= mt76x02_rx_get_sta_wcid(sta
, unicast
);
808 len
= FIELD_GET(MT_RXWI_CTL_MPDU_LEN
, ctl
);
809 pn_len
= FIELD_GET(MT_RXINFO_PN_LEN
, rxinfo
);
811 int offset
= ieee80211_get_hdrlen_from_skb(skb
) + pad_len
;
812 u8
*data
= skb
->data
+ offset
;
814 status
->iv
[0] = data
[7];
815 status
->iv
[1] = data
[6];
816 status
->iv
[2] = data
[5];
817 status
->iv
[3] = data
[4];
818 status
->iv
[4] = data
[1];
819 status
->iv
[5] = data
[0];
822 * Driver CCMP validation can't deal with fragments.
823 * Let mac80211 take care of it.
825 if (rxinfo
& MT_RXINFO_FRAG
) {
826 status
->flag
&= ~RX_FLAG_IV_STRIPPED
;
828 pad_len
+= pn_len
<< 2;
833 mt76x02_remove_hdr_pad(skb
, pad_len
);
835 if ((rxinfo
& MT_RXINFO_BA
) && !(rxinfo
& MT_RXINFO_NULL
))
838 if (rxinfo
& MT_RXINFO_AMPDU
) {
839 status
->flag
|= RX_FLAG_AMPDU_DETAILS
;
840 status
->ampdu_ref
= dev
->ampdu_ref
;
843 * When receiving an A-MPDU subframe and RSSI info is not valid,
844 * we can assume that more subframes belonging to the same A-MPDU
845 * are coming. The last one will have valid RSSI info
847 if (rxinfo
& MT_RXINFO_RSSI
) {
848 if (!++dev
->ampdu_ref
)
853 if (WARN_ON_ONCE(len
> skb
->len
))
856 if (pskb_trim(skb
, len
))
859 status
->chains
= BIT(0);
860 signal
= mt76x02_mac_get_rssi(dev
, rxwi
->rssi
[0], 0);
861 status
->chain_signal
[0] = signal
;
863 status
->chains
|= BIT(1);
864 status
->chain_signal
[1] = mt76x02_mac_get_rssi(dev
,
868 status
->freq
= dev
->mphy
.chandef
.chan
->center_freq
;
869 status
->band
= dev
->mphy
.chandef
.chan
->band
;
871 hdr
= (struct ieee80211_hdr
*)skb
->data
;
872 status
->qos_ctl
= *ieee80211_get_qos_ctl(hdr
);
873 status
->seqno
= FIELD_GET(MT_RXWI_SN
, tid_sn
);
875 return mt76x02_mac_process_rate(dev
, status
, rate
);
878 void mt76x02_mac_poll_tx_status(struct mt76x02_dev
*dev
, bool irq
)
880 struct mt76x02_tx_status stat
= {};
884 if (!test_bit(MT76_STATE_RUNNING
, &dev
->mphy
.state
))
887 trace_mac_txstat_poll(dev
);
889 while (!irq
|| !kfifo_is_full(&dev
->txstatus_fifo
)) {
890 if (!spin_trylock(&dev
->txstatus_fifo_lock
))
893 ret
= mt76x02_mac_load_tx_status(dev
, &stat
);
894 spin_unlock(&dev
->txstatus_fifo_lock
);
900 mt76x02_send_tx_status(dev
, &stat
, &update
);
904 kfifo_put(&dev
->txstatus_fifo
, stat
);
908 void mt76x02_tx_complete_skb(struct mt76_dev
*mdev
, struct mt76_queue_entry
*e
)
910 struct mt76x02_dev
*dev
= container_of(mdev
, struct mt76x02_dev
, mt76
);
911 struct mt76x02_txwi
*txwi
;
915 dev_kfree_skb_any(e
->skb
);
919 mt76x02_mac_poll_tx_status(dev
, false);
921 txwi_ptr
= mt76_get_txwi_ptr(mdev
, e
->txwi
);
922 txwi
= (struct mt76x02_txwi
*)txwi_ptr
;
923 trace_mac_txdone(mdev
, txwi
->wcid
, txwi
->pktid
);
925 mt76_tx_complete_skb(mdev
, e
->wcid
, e
->skb
);
927 EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb
);
929 void mt76x02_mac_set_rts_thresh(struct mt76x02_dev
*dev
, u32 val
)
934 data
= FIELD_PREP(MT_PROT_CFG_CTRL
, 1) |
935 MT_PROT_CFG_RTS_THRESH
;
937 mt76_rmw_field(dev
, MT_TX_RTS_CFG
, MT_TX_RTS_CFG_THRESH
, val
);
939 mt76_rmw(dev
, MT_CCK_PROT_CFG
,
940 MT_PROT_CFG_CTRL
| MT_PROT_CFG_RTS_THRESH
, data
);
941 mt76_rmw(dev
, MT_OFDM_PROT_CFG
,
942 MT_PROT_CFG_CTRL
| MT_PROT_CFG_RTS_THRESH
, data
);
945 void mt76x02_mac_set_tx_protection(struct mt76x02_dev
*dev
, bool legacy_prot
,
948 int mode
= ht_mode
& IEEE80211_HT_OP_MODE_PROTECTION
;
949 bool non_gf
= !!(ht_mode
& IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT
);
955 for (i
= 0; i
< ARRAY_SIZE(prot
); i
++) {
956 prot
[i
] = mt76_rr(dev
, MT_CCK_PROT_CFG
+ i
* 4);
957 prot
[i
] &= ~MT_PROT_CFG_CTRL
;
959 prot
[i
] &= ~MT_PROT_CFG_RATE
;
962 for (i
= 0; i
< ARRAY_SIZE(vht_prot
); i
++) {
963 vht_prot
[i
] = mt76_rr(dev
, MT_TX_PROT_CFG6
+ i
* 4);
964 vht_prot
[i
] &= ~(MT_PROT_CFG_CTRL
| MT_PROT_CFG_RATE
);
967 rts_thr
= mt76_get_field(dev
, MT_TX_RTS_CFG
, MT_TX_RTS_CFG_THRESH
);
969 if (rts_thr
!= 0xffff)
970 prot
[0] |= MT_PROT_CTRL_RTS_CTS
;
973 prot
[1] |= MT_PROT_CTRL_CTS2SELF
;
975 prot
[2] |= MT_PROT_RATE_CCK_11
;
976 prot
[3] |= MT_PROT_RATE_CCK_11
;
977 prot
[4] |= MT_PROT_RATE_CCK_11
;
978 prot
[5] |= MT_PROT_RATE_CCK_11
;
980 vht_prot
[0] |= MT_PROT_RATE_CCK_11
;
981 vht_prot
[1] |= MT_PROT_RATE_CCK_11
;
982 vht_prot
[2] |= MT_PROT_RATE_CCK_11
;
984 if (rts_thr
!= 0xffff)
985 prot
[1] |= MT_PROT_CTRL_RTS_CTS
;
987 prot
[2] |= MT_PROT_RATE_OFDM_24
;
988 prot
[3] |= MT_PROT_RATE_DUP_OFDM_24
;
989 prot
[4] |= MT_PROT_RATE_OFDM_24
;
990 prot
[5] |= MT_PROT_RATE_DUP_OFDM_24
;
992 vht_prot
[0] |= MT_PROT_RATE_OFDM_24
;
993 vht_prot
[1] |= MT_PROT_RATE_DUP_OFDM_24
;
994 vht_prot
[2] |= MT_PROT_RATE_SGI_OFDM_24
;
998 case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER
:
999 case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED
:
1000 prot
[2] |= MT_PROT_CTRL_RTS_CTS
;
1001 prot
[3] |= MT_PROT_CTRL_RTS_CTS
;
1002 prot
[4] |= MT_PROT_CTRL_RTS_CTS
;
1003 prot
[5] |= MT_PROT_CTRL_RTS_CTS
;
1004 vht_prot
[0] |= MT_PROT_CTRL_RTS_CTS
;
1005 vht_prot
[1] |= MT_PROT_CTRL_RTS_CTS
;
1006 vht_prot
[2] |= MT_PROT_CTRL_RTS_CTS
;
1008 case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ
:
1009 prot
[3] |= MT_PROT_CTRL_RTS_CTS
;
1010 prot
[5] |= MT_PROT_CTRL_RTS_CTS
;
1011 vht_prot
[1] |= MT_PROT_CTRL_RTS_CTS
;
1012 vht_prot
[2] |= MT_PROT_CTRL_RTS_CTS
;
1017 prot
[4] |= MT_PROT_CTRL_RTS_CTS
;
1018 prot
[5] |= MT_PROT_CTRL_RTS_CTS
;
1021 for (i
= 0; i
< ARRAY_SIZE(prot
); i
++)
1022 mt76_wr(dev
, MT_CCK_PROT_CFG
+ i
* 4, prot
[i
]);
1024 for (i
= 0; i
< ARRAY_SIZE(vht_prot
); i
++)
1025 mt76_wr(dev
, MT_TX_PROT_CFG6
+ i
* 4, vht_prot
[i
]);
1028 void mt76x02_update_channel(struct mt76_phy
*mphy
)
1030 struct mt76x02_dev
*dev
= container_of(mphy
->dev
, struct mt76x02_dev
, mt76
);
1031 struct mt76_channel_state
*state
;
1033 state
= mphy
->chan_state
;
1034 state
->cc_busy
+= mt76_rr(dev
, MT_CH_BUSY
);
1036 spin_lock_bh(&dev
->mt76
.cc_lock
);
1037 state
->cc_tx
+= dev
->tx_airtime
;
1038 dev
->tx_airtime
= 0;
1039 spin_unlock_bh(&dev
->mt76
.cc_lock
);
1041 EXPORT_SYMBOL_GPL(mt76x02_update_channel
);
1043 static void mt76x02_check_mac_err(struct mt76x02_dev
*dev
)
1045 if (dev
->mt76
.beacon_mask
) {
1046 if (mt76_rr(dev
, MT_TX_STA_0
) & MT_TX_STA_0_BEACONS
) {
1047 dev
->beacon_hang_check
= 0;
1051 if (dev
->beacon_hang_check
< 10)
1055 u32 val
= mt76_rr(dev
, 0x10f4);
1056 if (!(val
& BIT(29)) || !(val
& (BIT(7) | BIT(5))))
1060 dev_err(dev
->mt76
.dev
, "MAC error detected\n");
1062 mt76_wr(dev
, MT_MAC_SYS_CTRL
, 0);
1063 if (!mt76x02_wait_for_txrx_idle(&dev
->mt76
)) {
1064 dev_err(dev
->mt76
.dev
, "MAC stop failed\n");
1068 dev
->beacon_hang_check
= 0;
1069 mt76_set(dev
, MT_MAC_SYS_CTRL
, MT_MAC_SYS_CTRL_RESET_CSR
);
1073 mt76_wr(dev
, MT_MAC_SYS_CTRL
,
1074 MT_MAC_SYS_CTRL_ENABLE_TX
| MT_MAC_SYS_CTRL_ENABLE_RX
);
1078 mt76x02_edcca_tx_enable(struct mt76x02_dev
*dev
, bool enable
)
1083 mt76_set(dev
, MT_MAC_SYS_CTRL
, MT_MAC_SYS_CTRL_ENABLE_TX
);
1084 mt76_set(dev
, MT_AUTO_RSP_CFG
, MT_AUTO_RSP_EN
);
1086 data
= mt76_rr(dev
, MT_TX_PIN_CFG
);
1087 data
|= MT_TX_PIN_CFG_TXANT
|
1088 MT_TX_PIN_CFG_RXANT
|
1091 mt76_wr(dev
, MT_TX_PIN_CFG
, data
);
1093 mt76_clear(dev
, MT_MAC_SYS_CTRL
, MT_MAC_SYS_CTRL_ENABLE_TX
);
1094 mt76_clear(dev
, MT_AUTO_RSP_CFG
, MT_AUTO_RSP_EN
);
1095 /* disable pa-lna */
1096 mt76_clear(dev
, MT_TX_PIN_CFG
, MT_TX_PIN_CFG_TXANT
);
1097 mt76_clear(dev
, MT_TX_PIN_CFG
, MT_TX_PIN_CFG_RXANT
);
1099 dev
->ed_tx_blocked
= !enable
;
1102 void mt76x02_edcca_init(struct mt76x02_dev
*dev
)
1104 dev
->ed_trigger
= 0;
1107 if (dev
->ed_monitor
) {
1108 struct ieee80211_channel
*chan
= dev
->mphy
.chandef
.chan
;
1109 u8 ed_th
= chan
->band
== NL80211_BAND_5GHZ
? 0x0e : 0x20;
1111 mt76_clear(dev
, MT_TX_LINK_CFG
, MT_TX_CFACK_EN
);
1112 mt76_set(dev
, MT_TXOP_CTRL_CFG
, MT_TXOP_ED_CCA_EN
);
1113 mt76_rmw(dev
, MT_BBP(AGC
, 2), GENMASK(15, 0),
1114 ed_th
<< 8 | ed_th
);
1115 mt76_set(dev
, MT_TXOP_HLDR_ET
, MT_TXOP_HLDR_TX40M_BLK_EN
);
1117 mt76_set(dev
, MT_TX_LINK_CFG
, MT_TX_CFACK_EN
);
1118 mt76_clear(dev
, MT_TXOP_CTRL_CFG
, MT_TXOP_ED_CCA_EN
);
1119 if (is_mt76x2(dev
)) {
1120 mt76_wr(dev
, MT_BBP(AGC
, 2), 0x00007070);
1121 mt76_set(dev
, MT_TXOP_HLDR_ET
,
1122 MT_TXOP_HLDR_TX40M_BLK_EN
);
1124 mt76_wr(dev
, MT_BBP(AGC
, 2), 0x003a6464);
1125 mt76_clear(dev
, MT_TXOP_HLDR_ET
,
1126 MT_TXOP_HLDR_TX40M_BLK_EN
);
1129 mt76x02_edcca_tx_enable(dev
, true);
1130 dev
->ed_monitor_learning
= true;
1132 /* clear previous CCA timer value */
1133 mt76_rr(dev
, MT_ED_CCA_TIMER
);
1134 dev
->ed_time
= ktime_get_boottime();
1136 EXPORT_SYMBOL_GPL(mt76x02_edcca_init
);
1138 #define MT_EDCCA_TH 92
1139 #define MT_EDCCA_BLOCK_TH 2
1140 #define MT_EDCCA_LEARN_TH 50
1141 #define MT_EDCCA_LEARN_CCA 180
1142 #define MT_EDCCA_LEARN_TIMEOUT (20 * HZ)
1144 static void mt76x02_edcca_check(struct mt76x02_dev
*dev
)
1147 u32 active
, val
, busy
;
1149 cur_time
= ktime_get_boottime();
1150 val
= mt76_rr(dev
, MT_ED_CCA_TIMER
);
1152 active
= ktime_to_us(ktime_sub(cur_time
, dev
->ed_time
));
1153 dev
->ed_time
= cur_time
;
1155 busy
= (val
* 100) / active
;
1156 busy
= min_t(u32
, busy
, 100);
1158 if (busy
> MT_EDCCA_TH
) {
1163 dev
->ed_trigger
= 0;
1166 if (dev
->cal
.agc_lowest_gain
&&
1167 dev
->cal
.false_cca
> MT_EDCCA_LEARN_CCA
&&
1168 dev
->ed_trigger
> MT_EDCCA_LEARN_TH
) {
1169 dev
->ed_monitor_learning
= false;
1170 dev
->ed_trigger_timeout
= jiffies
+ 20 * HZ
;
1171 } else if (!dev
->ed_monitor_learning
&&
1172 time_is_after_jiffies(dev
->ed_trigger_timeout
)) {
1173 dev
->ed_monitor_learning
= true;
1174 mt76x02_edcca_tx_enable(dev
, true);
1177 if (dev
->ed_monitor_learning
)
1180 if (dev
->ed_trigger
> MT_EDCCA_BLOCK_TH
&& !dev
->ed_tx_blocked
)
1181 mt76x02_edcca_tx_enable(dev
, false);
1182 else if (dev
->ed_silent
> MT_EDCCA_BLOCK_TH
&& dev
->ed_tx_blocked
)
1183 mt76x02_edcca_tx_enable(dev
, true);
1186 void mt76x02_mac_work(struct work_struct
*work
)
1188 struct mt76x02_dev
*dev
= container_of(work
, struct mt76x02_dev
,
1189 mphy
.mac_work
.work
);
1192 mutex_lock(&dev
->mt76
.mutex
);
1194 mt76_update_survey(&dev
->mphy
);
1195 for (i
= 0, idx
= 0; i
< 16; i
++) {
1196 u32 val
= mt76_rr(dev
, MT_TX_AGG_CNT(i
));
1198 dev
->mphy
.aggr_stats
[idx
++] += val
& 0xffff;
1199 dev
->mphy
.aggr_stats
[idx
++] += val
>> 16;
1202 mt76x02_check_mac_err(dev
);
1204 if (dev
->ed_monitor
)
1205 mt76x02_edcca_check(dev
);
1207 mutex_unlock(&dev
->mt76
.mutex
);
1209 mt76_tx_status_check(&dev
->mt76
, false);
1211 ieee80211_queue_delayed_work(mt76_hw(dev
), &dev
->mphy
.mac_work
,
1212 MT_MAC_WORK_INTERVAL
);
1215 void mt76x02_mac_cc_reset(struct mt76x02_dev
*dev
)
1217 dev
->mphy
.survey_time
= ktime_get_boottime();
1219 mt76_wr(dev
, MT_CH_TIME_CFG
,
1220 MT_CH_TIME_CFG_TIMER_EN
|
1221 MT_CH_TIME_CFG_TX_AS_BUSY
|
1222 MT_CH_TIME_CFG_RX_AS_BUSY
|
1223 MT_CH_TIME_CFG_NAV_AS_BUSY
|
1224 MT_CH_TIME_CFG_EIFS_AS_BUSY
|
1226 FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR
, 1));
1228 /* channel cycle counters read-and-clear */
1229 mt76_rr(dev
, MT_CH_BUSY
);
1230 mt76_rr(dev
, MT_CH_IDLE
);
1232 EXPORT_SYMBOL_GPL(mt76x02_mac_cc_reset
);
1234 void mt76x02_mac_set_bssid(struct mt76x02_dev
*dev
, u8 idx
, const u8
*addr
)
1237 mt76_wr(dev
, MT_MAC_APC_BSSID_L(idx
), get_unaligned_le32(addr
));
1238 mt76_rmw_field(dev
, MT_MAC_APC_BSSID_H(idx
), MT_MAC_APC_BSSID_H_ADDR
,
1239 get_unaligned_le16(addr
+ 4));