2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 #include "mt76x02_trace.h"
21 static enum mt76x02_cipher_type
22 mt76x02_mac_get_key_info(struct ieee80211_key_conf
*key
, u8
*key_data
)
24 memset(key_data
, 0, 32);
26 return MT_CIPHER_NONE
;
29 return MT_CIPHER_NONE
;
31 memcpy(key_data
, key
->key
, key
->keylen
);
33 switch (key
->cipher
) {
34 case WLAN_CIPHER_SUITE_WEP40
:
35 return MT_CIPHER_WEP40
;
36 case WLAN_CIPHER_SUITE_WEP104
:
37 return MT_CIPHER_WEP104
;
38 case WLAN_CIPHER_SUITE_TKIP
:
39 return MT_CIPHER_TKIP
;
40 case WLAN_CIPHER_SUITE_CCMP
:
41 return MT_CIPHER_AES_CCMP
;
43 return MT_CIPHER_NONE
;
47 int mt76x02_mac_shared_key_setup(struct mt76x02_dev
*dev
, u8 vif_idx
,
48 u8 key_idx
, struct ieee80211_key_conf
*key
)
50 enum mt76x02_cipher_type cipher
;
54 cipher
= mt76x02_mac_get_key_info(key
, key_data
);
55 if (cipher
== MT_CIPHER_NONE
&& key
)
58 val
= mt76_rr(dev
, MT_SKEY_MODE(vif_idx
));
59 val
&= ~(MT_SKEY_MODE_MASK
<< MT_SKEY_MODE_SHIFT(vif_idx
, key_idx
));
60 val
|= cipher
<< MT_SKEY_MODE_SHIFT(vif_idx
, key_idx
);
61 mt76_wr(dev
, MT_SKEY_MODE(vif_idx
), val
);
63 mt76_wr_copy(dev
, MT_SKEY(vif_idx
, key_idx
), key_data
,
68 EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup
);
70 void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev
*dev
, u8 idx
,
71 struct ieee80211_key_conf
*key
)
73 enum mt76x02_cipher_type cipher
;
78 cipher
= mt76x02_mac_get_key_info(key
, key_data
);
79 iv
= mt76_rr(dev
, MT_WCID_IV(idx
));
80 eiv
= mt76_rr(dev
, MT_WCID_IV(idx
) + 4);
83 if (cipher
== MT_CIPHER_TKIP
) {
84 pn
|= (iv
>> 16) & 0xff;
85 pn
|= (iv
& 0xff) << 8;
86 } else if (cipher
>= MT_CIPHER_AES_CCMP
) {
92 atomic64_set(&key
->tx_pn
, pn
);
96 int mt76x02_mac_wcid_set_key(struct mt76x02_dev
*dev
, u8 idx
,
97 struct ieee80211_key_conf
*key
)
99 enum mt76x02_cipher_type cipher
;
104 cipher
= mt76x02_mac_get_key_info(key
, key_data
);
105 if (cipher
== MT_CIPHER_NONE
&& key
)
108 mt76_wr_copy(dev
, MT_WCID_KEY(idx
), key_data
, sizeof(key_data
));
109 mt76_rmw_field(dev
, MT_WCID_ATTR(idx
), MT_WCID_ATTR_PKEY_MODE
, cipher
);
111 memset(iv_data
, 0, sizeof(iv_data
));
113 mt76_rmw_field(dev
, MT_WCID_ATTR(idx
), MT_WCID_ATTR_PAIRWISE
,
114 !!(key
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
));
116 pn
= atomic64_read(&key
->tx_pn
);
118 iv_data
[3] = key
->keyidx
<< 6;
119 if (cipher
>= MT_CIPHER_TKIP
) {
121 put_unaligned_le32(pn
>> 16, &iv_data
[4]);
124 if (cipher
== MT_CIPHER_TKIP
) {
125 iv_data
[0] = (pn
>> 8) & 0xff;
126 iv_data
[1] = (iv_data
[0] | 0x20) & 0x7f;
127 iv_data
[2] = pn
& 0xff;
128 } else if (cipher
>= MT_CIPHER_AES_CCMP
) {
129 put_unaligned_le16((pn
& 0xffff), &iv_data
[0]);
133 mt76_wr_copy(dev
, MT_WCID_IV(idx
), iv_data
, sizeof(iv_data
));
138 void mt76x02_mac_wcid_setup(struct mt76x02_dev
*dev
, u8 idx
,
141 struct mt76_wcid_addr addr
= {};
144 attr
= FIELD_PREP(MT_WCID_ATTR_BSS_IDX
, vif_idx
& 7) |
145 FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT
, !!(vif_idx
& 8));
147 mt76_wr(dev
, MT_WCID_ATTR(idx
), attr
);
153 memcpy(addr
.macaddr
, mac
, ETH_ALEN
);
155 mt76_wr_copy(dev
, MT_WCID_ADDR(idx
), &addr
, sizeof(addr
));
157 EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup
);
159 void mt76x02_mac_wcid_set_drop(struct mt76x02_dev
*dev
, u8 idx
, bool drop
)
161 u32 val
= mt76_rr(dev
, MT_WCID_DROP(idx
));
162 u32 bit
= MT_WCID_DROP_MASK(idx
);
164 /* prevent unnecessary writes */
165 if ((val
& bit
) != (bit
* drop
))
166 mt76_wr(dev
, MT_WCID_DROP(idx
), (val
& ~bit
) | (bit
* drop
));
170 mt76x02_mac_tx_rate_val(struct mt76x02_dev
*dev
,
171 const struct ieee80211_tx_rate
*rate
, u8
*nss_val
)
173 u8 phy
, rate_idx
, nss
, bw
= 0;
176 if (rate
->flags
& IEEE80211_TX_RC_VHT_MCS
) {
177 rate_idx
= rate
->idx
;
178 nss
= 1 + (rate
->idx
>> 4);
179 phy
= MT_PHY_TYPE_VHT
;
180 if (rate
->flags
& IEEE80211_TX_RC_80_MHZ_WIDTH
)
182 else if (rate
->flags
& IEEE80211_TX_RC_40_MHZ_WIDTH
)
184 } else if (rate
->flags
& IEEE80211_TX_RC_MCS
) {
185 rate_idx
= rate
->idx
;
186 nss
= 1 + (rate
->idx
>> 3);
187 phy
= MT_PHY_TYPE_HT
;
188 if (rate
->flags
& IEEE80211_TX_RC_GREEN_FIELD
)
189 phy
= MT_PHY_TYPE_HT_GF
;
190 if (rate
->flags
& IEEE80211_TX_RC_40_MHZ_WIDTH
)
193 const struct ieee80211_rate
*r
;
194 int band
= dev
->mt76
.chandef
.chan
->band
;
197 r
= &dev
->mt76
.hw
->wiphy
->bands
[band
]->bitrates
[rate
->idx
];
198 if (rate
->flags
& IEEE80211_TX_RC_USE_SHORT_PREAMBLE
)
199 val
= r
->hw_value_short
;
204 rate_idx
= val
& 0xff;
208 rateval
= FIELD_PREP(MT_RXWI_RATE_INDEX
, rate_idx
);
209 rateval
|= FIELD_PREP(MT_RXWI_RATE_PHY
, phy
);
210 rateval
|= FIELD_PREP(MT_RXWI_RATE_BW
, bw
);
211 if (rate
->flags
& IEEE80211_TX_RC_SHORT_GI
)
212 rateval
|= MT_RXWI_RATE_SGI
;
215 return cpu_to_le16(rateval
);
218 void mt76x02_mac_wcid_set_rate(struct mt76x02_dev
*dev
, struct mt76_wcid
*wcid
,
219 const struct ieee80211_tx_rate
*rate
)
221 s8 max_txpwr_adj
= mt76x02_tx_get_max_txpwr_adj(dev
, rate
);
226 rateval
= mt76x02_mac_tx_rate_val(dev
, rate
, &nss
);
227 tx_info
= FIELD_PREP(MT_WCID_TX_INFO_RATE
, rateval
) |
228 FIELD_PREP(MT_WCID_TX_INFO_NSS
, nss
) |
229 FIELD_PREP(MT_WCID_TX_INFO_TXPWR_ADJ
, max_txpwr_adj
) |
231 wcid
->tx_info
= tx_info
;
234 void mt76x02_mac_set_short_preamble(struct mt76x02_dev
*dev
, bool enable
)
237 mt76_set(dev
, MT_AUTO_RSP_CFG
, MT_AUTO_RSP_PREAMB_SHORT
);
239 mt76_clear(dev
, MT_AUTO_RSP_CFG
, MT_AUTO_RSP_PREAMB_SHORT
);
242 bool mt76x02_mac_load_tx_status(struct mt76x02_dev
*dev
,
243 struct mt76x02_tx_status
*stat
)
247 stat2
= mt76_rr(dev
, MT_TX_STAT_FIFO_EXT
);
248 stat1
= mt76_rr(dev
, MT_TX_STAT_FIFO
);
250 stat
->valid
= !!(stat1
& MT_TX_STAT_FIFO_VALID
);
254 stat
->success
= !!(stat1
& MT_TX_STAT_FIFO_SUCCESS
);
255 stat
->aggr
= !!(stat1
& MT_TX_STAT_FIFO_AGGR
);
256 stat
->ack_req
= !!(stat1
& MT_TX_STAT_FIFO_ACKREQ
);
257 stat
->wcid
= FIELD_GET(MT_TX_STAT_FIFO_WCID
, stat1
);
258 stat
->rate
= FIELD_GET(MT_TX_STAT_FIFO_RATE
, stat1
);
260 stat
->retry
= FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY
, stat2
);
261 stat
->pktid
= FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID
, stat2
);
263 trace_mac_txstat_fetch(dev
, stat
);
269 mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate
*txrate
, u16 rate
,
270 enum nl80211_band band
)
272 u8 idx
= FIELD_GET(MT_RXWI_RATE_INDEX
, rate
);
278 switch (FIELD_GET(MT_RXWI_RATE_PHY
, rate
)) {
279 case MT_PHY_TYPE_OFDM
:
280 if (band
== NL80211_BAND_2GHZ
)
285 case MT_PHY_TYPE_CCK
:
291 case MT_PHY_TYPE_HT_GF
:
292 txrate
->flags
|= IEEE80211_TX_RC_GREEN_FIELD
;
295 txrate
->flags
|= IEEE80211_TX_RC_MCS
;
298 case MT_PHY_TYPE_VHT
:
299 txrate
->flags
|= IEEE80211_TX_RC_VHT_MCS
;
306 switch (FIELD_GET(MT_RXWI_RATE_BW
, rate
)) {
310 txrate
->flags
|= IEEE80211_TX_RC_40_MHZ_WIDTH
;
313 txrate
->flags
|= IEEE80211_TX_RC_80_MHZ_WIDTH
;
319 if (rate
& MT_RXWI_RATE_SGI
)
320 txrate
->flags
|= IEEE80211_TX_RC_SHORT_GI
;
325 void mt76x02_mac_write_txwi(struct mt76x02_dev
*dev
, struct mt76x02_txwi
*txwi
,
326 struct sk_buff
*skb
, struct mt76_wcid
*wcid
,
327 struct ieee80211_sta
*sta
, int len
)
329 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
330 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
331 struct ieee80211_tx_rate
*rate
= &info
->control
.rates
[0];
332 struct ieee80211_key_conf
*key
= info
->control
.hw_key
;
334 u16 rate_ht_mask
= FIELD_PREP(MT_RXWI_RATE_PHY
, BIT(1) | BIT(2));
337 s8 txpwr_adj
, max_txpwr_adj
;
338 u8 ccmp_pn
[8], nstreams
= dev
->mt76
.chainmask
& 0xf;
340 memset(txwi
, 0, sizeof(*txwi
));
342 if (!info
->control
.hw_key
&& wcid
&& wcid
->hw_key_idx
!= 0xff &&
343 ieee80211_has_protected(hdr
->frame_control
)) {
345 ieee80211_get_tx_rates(info
->control
.vif
, sta
, skb
,
346 info
->control
.rates
, 1);
350 txwi
->wcid
= wcid
->idx
;
354 if (wcid
&& wcid
->sw_iv
&& key
) {
355 u64 pn
= atomic64_inc_return(&key
->tx_pn
);
357 ccmp_pn
[1] = pn
>> 8;
359 ccmp_pn
[3] = 0x20 | (key
->keyidx
<< 6);
360 ccmp_pn
[4] = pn
>> 16;
361 ccmp_pn
[5] = pn
>> 24;
362 ccmp_pn
[6] = pn
>> 32;
363 ccmp_pn
[7] = pn
>> 40;
364 txwi
->iv
= *((__le32
*)&ccmp_pn
[0]);
365 txwi
->eiv
= *((__le32
*)&ccmp_pn
[4]);
368 if (wcid
&& (rate
->idx
< 0 || !rate
->count
)) {
369 wcid_tx_info
= wcid
->tx_info
;
370 txwi
->rate
= FIELD_GET(MT_WCID_TX_INFO_RATE
, wcid_tx_info
);
371 max_txpwr_adj
= FIELD_GET(MT_WCID_TX_INFO_TXPWR_ADJ
,
373 nss
= FIELD_GET(MT_WCID_TX_INFO_NSS
, wcid_tx_info
);
375 txwi
->rate
= mt76x02_mac_tx_rate_val(dev
, rate
, &nss
);
376 max_txpwr_adj
= mt76x02_tx_get_max_txpwr_adj(dev
, rate
);
379 txpwr_adj
= mt76x02_tx_get_txpwr_adj(dev
, dev
->mt76
.txpower_conf
,
381 txwi
->ctl2
= FIELD_PREP(MT_TX_PWR_ADJ
, txpwr_adj
);
383 if (nstreams
> 1 && mt76_rev(&dev
->mt76
) >= MT76XX_REV_E4
)
384 txwi
->txstream
= 0x13;
385 else if (nstreams
> 1 && mt76_rev(&dev
->mt76
) >= MT76XX_REV_E3
&&
386 !(txwi
->rate
& cpu_to_le16(rate_ht_mask
)))
387 txwi
->txstream
= 0x93;
389 if (is_mt76x2(dev
) && (info
->flags
& IEEE80211_TX_CTL_LDPC
))
390 txwi
->rate
|= cpu_to_le16(MT_RXWI_RATE_LDPC
);
391 if ((info
->flags
& IEEE80211_TX_CTL_STBC
) && nss
== 1)
392 txwi
->rate
|= cpu_to_le16(MT_RXWI_RATE_STBC
);
393 if (nss
> 1 && sta
&& sta
->smps_mode
== IEEE80211_SMPS_DYNAMIC
)
394 txwi_flags
|= MT_TXWI_FLAGS_MMPS
;
395 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
))
396 txwi
->ack_ctl
|= MT_TXWI_ACK_CTL_REQ
;
397 if (info
->flags
& IEEE80211_TX_CTL_ASSIGN_SEQ
)
398 txwi
->ack_ctl
|= MT_TXWI_ACK_CTL_NSEQ
;
399 if ((info
->flags
& IEEE80211_TX_CTL_AMPDU
) && sta
) {
400 u8 ba_size
= IEEE80211_MIN_AMPDU_BUF
;
402 ba_size
<<= sta
->ht_cap
.ampdu_factor
;
403 ba_size
= min_t(int, 63, ba_size
- 1);
404 if (info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
)
406 txwi
->ack_ctl
|= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW
, ba_size
);
408 txwi_flags
|= MT_TXWI_FLAGS_AMPDU
|
409 FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY
,
410 sta
->ht_cap
.ampdu_density
);
413 if (ieee80211_is_probe_resp(hdr
->frame_control
) ||
414 ieee80211_is_beacon(hdr
->frame_control
))
415 txwi_flags
|= MT_TXWI_FLAGS_TS
;
417 txwi
->flags
|= cpu_to_le16(txwi_flags
);
418 txwi
->len_ctl
= cpu_to_le16(len
);
420 EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi
);
423 mt76x02_mac_fill_tx_status(struct mt76x02_dev
*dev
,
424 struct ieee80211_tx_info
*info
,
425 struct mt76x02_tx_status
*st
, int n_frames
)
427 struct ieee80211_tx_rate
*rate
= info
->status
.rates
;
428 int cur_idx
, last_rate
;
434 last_rate
= min_t(int, st
->retry
, IEEE80211_TX_MAX_RATES
- 1);
435 mt76x02_mac_process_tx_rate(&rate
[last_rate
], st
->rate
,
436 dev
->mt76
.chandef
.chan
->band
);
437 if (last_rate
< IEEE80211_TX_MAX_RATES
- 1)
438 rate
[last_rate
+ 1].idx
= -1;
440 cur_idx
= rate
[last_rate
].idx
+ last_rate
;
441 for (i
= 0; i
<= last_rate
; i
++) {
442 rate
[i
].flags
= rate
[last_rate
].flags
;
443 rate
[i
].idx
= max_t(int, 0, cur_idx
- i
);
446 rate
[last_rate
].count
= st
->retry
+ 1 - last_rate
;
448 info
->status
.ampdu_len
= n_frames
;
449 info
->status
.ampdu_ack_len
= st
->success
? n_frames
: 0;
452 info
->flags
|= IEEE80211_TX_CTL_AMPDU
|
453 IEEE80211_TX_STAT_AMPDU
;
456 info
->flags
|= IEEE80211_TX_CTL_NO_ACK
;
457 else if (st
->success
)
458 info
->flags
|= IEEE80211_TX_STAT_ACK
;
461 void mt76x02_send_tx_status(struct mt76x02_dev
*dev
,
462 struct mt76x02_tx_status
*stat
, u8
*update
)
464 struct ieee80211_tx_info info
= {};
465 struct ieee80211_tx_status status
= {
468 struct mt76_wcid
*wcid
= NULL
;
469 struct mt76x02_sta
*msta
= NULL
;
470 struct mt76_dev
*mdev
= &dev
->mt76
;
471 struct sk_buff_head list
;
473 if (stat
->pktid
== MT_PACKET_ID_NO_ACK
)
478 if (stat
->wcid
< ARRAY_SIZE(dev
->mt76
.wcid
))
479 wcid
= rcu_dereference(dev
->mt76
.wcid
[stat
->wcid
]);
481 if (wcid
&& wcid
->sta
) {
484 priv
= msta
= container_of(wcid
, struct mt76x02_sta
, wcid
);
485 status
.sta
= container_of(priv
, struct ieee80211_sta
,
489 mt76_tx_status_lock(mdev
, &list
);
492 if (stat
->pktid
>= MT_PACKET_ID_FIRST
)
493 status
.skb
= mt76_tx_status_skb_get(mdev
, wcid
,
496 status
.info
= IEEE80211_SKB_CB(status
.skb
);
499 if (msta
&& stat
->aggr
&& !status
.skb
) {
500 u32 stat_val
, stat_cache
;
502 stat_val
= stat
->rate
;
503 stat_val
|= ((u32
) stat
->retry
) << 16;
504 stat_cache
= msta
->status
.rate
;
505 stat_cache
|= ((u32
) msta
->status
.retry
) << 16;
507 if (*update
== 0 && stat_val
== stat_cache
&&
508 stat
->wcid
== msta
->status
.wcid
&& msta
->n_frames
< 32) {
510 mt76_tx_status_unlock(mdev
, &list
);
515 mt76x02_mac_fill_tx_status(dev
, status
.info
, &msta
->status
,
518 msta
->status
= *stat
;
522 mt76x02_mac_fill_tx_status(dev
, status
.info
, stat
, 1);
527 mt76_tx_status_skb_done(mdev
, status
.skb
, &list
);
528 mt76_tx_status_unlock(mdev
, &list
);
531 ieee80211_tx_status_ext(mt76_hw(dev
), &status
);
536 mt76x02_mac_process_rate(struct mt76x02_dev
*dev
,
537 struct mt76_rx_status
*status
,
540 u8 idx
= FIELD_GET(MT_RXWI_RATE_INDEX
, rate
);
542 switch (FIELD_GET(MT_RXWI_RATE_PHY
, rate
)) {
543 case MT_PHY_TYPE_OFDM
:
547 if (status
->band
== NL80211_BAND_2GHZ
)
550 status
->rate_idx
= idx
;
552 case MT_PHY_TYPE_CCK
:
555 status
->enc_flags
|= RX_ENC_FLAG_SHORTPRE
;
561 status
->rate_idx
= idx
;
563 case MT_PHY_TYPE_HT_GF
:
564 status
->enc_flags
|= RX_ENC_FLAG_HT_GF
;
567 status
->encoding
= RX_ENC_HT
;
568 status
->rate_idx
= idx
;
570 case MT_PHY_TYPE_VHT
: {
571 u8 n_rxstream
= dev
->mt76
.chainmask
& 0xf;
573 status
->encoding
= RX_ENC_VHT
;
574 status
->rate_idx
= FIELD_GET(MT_RATE_INDEX_VHT_IDX
, idx
);
575 status
->nss
= min_t(u8
, n_rxstream
,
576 FIELD_GET(MT_RATE_INDEX_VHT_NSS
, idx
) + 1);
583 if (rate
& MT_RXWI_RATE_LDPC
)
584 status
->enc_flags
|= RX_ENC_FLAG_LDPC
;
586 if (rate
& MT_RXWI_RATE_SGI
)
587 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
589 if (rate
& MT_RXWI_RATE_STBC
)
590 status
->enc_flags
|= 1 << RX_ENC_FLAG_STBC_SHIFT
;
592 switch (FIELD_GET(MT_RXWI_RATE_BW
, rate
)) {
596 status
->bw
= RATE_INFO_BW_40
;
599 status
->bw
= RATE_INFO_BW_80
;
608 void mt76x02_mac_setaddr(struct mt76x02_dev
*dev
, const u8
*addr
)
610 static const u8 null_addr
[ETH_ALEN
] = {};
613 ether_addr_copy(dev
->mt76
.macaddr
, addr
);
615 if (!is_valid_ether_addr(dev
->mt76
.macaddr
)) {
616 eth_random_addr(dev
->mt76
.macaddr
);
617 dev_info(dev
->mt76
.dev
,
618 "Invalid MAC address, using random address %pM\n",
622 mt76_wr(dev
, MT_MAC_ADDR_DW0
, get_unaligned_le32(dev
->mt76
.macaddr
));
623 mt76_wr(dev
, MT_MAC_ADDR_DW1
,
624 get_unaligned_le16(dev
->mt76
.macaddr
+ 4) |
625 FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK
, 0xff));
627 mt76_wr(dev
, MT_MAC_BSSID_DW0
,
628 get_unaligned_le32(dev
->mt76
.macaddr
));
629 mt76_wr(dev
, MT_MAC_BSSID_DW1
,
630 get_unaligned_le16(dev
->mt76
.macaddr
+ 4) |
631 FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE
, 3) | /* 8 APs + 8 STAs */
632 MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT
);
634 for (i
= 0; i
< 16; i
++)
635 mt76x02_mac_set_bssid(dev
, i
, null_addr
);
637 EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr
);
640 mt76x02_mac_get_rssi(struct mt76x02_dev
*dev
, s8 rssi
, int chain
)
642 struct mt76x02_rx_freq_cal
*cal
= &dev
->cal
.rx
;
644 rssi
+= cal
->rssi_offset
[chain
];
645 rssi
-= cal
->lna_gain
;
650 int mt76x02_mac_process_rx(struct mt76x02_dev
*dev
, struct sk_buff
*skb
,
653 struct mt76_rx_status
*status
= (struct mt76_rx_status
*) skb
->cb
;
654 struct mt76x02_rxwi
*rxwi
= rxi
;
655 struct mt76x02_sta
*sta
;
656 u32 rxinfo
= le32_to_cpu(rxwi
->rxinfo
);
657 u32 ctl
= le32_to_cpu(rxwi
->ctl
);
658 u16 rate
= le16_to_cpu(rxwi
->rate
);
659 u16 tid_sn
= le16_to_cpu(rxwi
->tid_sn
);
660 bool unicast
= rxwi
->rxinfo
& cpu_to_le32(MT_RXINFO_UNICAST
);
661 int pad_len
= 0, nstreams
= dev
->mt76
.chainmask
& 0xf;
667 if (!test_bit(MT76_STATE_RUNNING
, &dev
->mt76
.state
))
670 if (rxinfo
& MT_RXINFO_L2PAD
)
673 if (rxinfo
& MT_RXINFO_DECRYPT
) {
674 status
->flag
|= RX_FLAG_DECRYPTED
;
675 status
->flag
|= RX_FLAG_MMIC_STRIPPED
;
676 status
->flag
|= RX_FLAG_MIC_STRIPPED
;
677 status
->flag
|= RX_FLAG_IV_STRIPPED
;
680 wcid
= FIELD_GET(MT_RXWI_CTL_WCID
, ctl
);
681 sta
= mt76x02_rx_get_sta(&dev
->mt76
, wcid
);
682 status
->wcid
= mt76x02_rx_get_sta_wcid(sta
, unicast
);
684 len
= FIELD_GET(MT_RXWI_CTL_MPDU_LEN
, ctl
);
685 pn_len
= FIELD_GET(MT_RXINFO_PN_LEN
, rxinfo
);
687 int offset
= ieee80211_get_hdrlen_from_skb(skb
) + pad_len
;
688 u8
*data
= skb
->data
+ offset
;
690 status
->iv
[0] = data
[7];
691 status
->iv
[1] = data
[6];
692 status
->iv
[2] = data
[5];
693 status
->iv
[3] = data
[4];
694 status
->iv
[4] = data
[1];
695 status
->iv
[5] = data
[0];
698 * Driver CCMP validation can't deal with fragments.
699 * Let mac80211 take care of it.
701 if (rxinfo
& MT_RXINFO_FRAG
) {
702 status
->flag
&= ~RX_FLAG_IV_STRIPPED
;
704 pad_len
+= pn_len
<< 2;
709 mt76x02_remove_hdr_pad(skb
, pad_len
);
711 if ((rxinfo
& MT_RXINFO_BA
) && !(rxinfo
& MT_RXINFO_NULL
))
714 if (WARN_ON_ONCE(len
> skb
->len
))
719 status
->chains
= BIT(0);
720 signal
= mt76x02_mac_get_rssi(dev
, rxwi
->rssi
[0], 0);
721 status
->chain_signal
[0] = signal
;
723 status
->chains
|= BIT(1);
724 status
->chain_signal
[1] = mt76x02_mac_get_rssi(dev
,
727 signal
= max_t(s8
, signal
, status
->chain_signal
[1]);
729 status
->signal
= signal
;
730 status
->freq
= dev
->mt76
.chandef
.chan
->center_freq
;
731 status
->band
= dev
->mt76
.chandef
.chan
->band
;
733 status
->tid
= FIELD_GET(MT_RXWI_TID
, tid_sn
);
734 status
->seqno
= FIELD_GET(MT_RXWI_SN
, tid_sn
);
736 return mt76x02_mac_process_rate(dev
, status
, rate
);
739 void mt76x02_mac_poll_tx_status(struct mt76x02_dev
*dev
, bool irq
)
741 struct mt76x02_tx_status stat
= {};
745 if (!test_bit(MT76_STATE_RUNNING
, &dev
->mt76
.state
))
748 trace_mac_txstat_poll(dev
);
750 while (!irq
|| !kfifo_is_full(&dev
->txstatus_fifo
)) {
751 if (!spin_trylock(&dev
->txstatus_fifo_lock
))
754 ret
= mt76x02_mac_load_tx_status(dev
, &stat
);
755 spin_unlock(&dev
->txstatus_fifo_lock
);
761 mt76x02_send_tx_status(dev
, &stat
, &update
);
765 kfifo_put(&dev
->txstatus_fifo
, stat
);
769 void mt76x02_tx_complete_skb(struct mt76_dev
*mdev
, enum mt76_txq_id qid
,
770 struct mt76_queue_entry
*e
)
772 struct mt76x02_dev
*dev
= container_of(mdev
, struct mt76x02_dev
, mt76
);
773 struct mt76x02_txwi
*txwi
;
777 dev_kfree_skb_any(e
->skb
);
781 mt76x02_mac_poll_tx_status(dev
, false);
783 txwi_ptr
= mt76_get_txwi_ptr(mdev
, e
->txwi
);
784 txwi
= (struct mt76x02_txwi
*)txwi_ptr
;
785 trace_mac_txdone_add(dev
, txwi
->wcid
, txwi
->pktid
);
787 mt76_tx_complete_skb(mdev
, e
->skb
);
789 EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb
);
791 void mt76x02_mac_set_rts_thresh(struct mt76x02_dev
*dev
, u32 val
)
796 data
= FIELD_PREP(MT_PROT_CFG_CTRL
, 1) |
797 MT_PROT_CFG_RTS_THRESH
;
799 mt76_rmw_field(dev
, MT_TX_RTS_CFG
, MT_TX_RTS_CFG_THRESH
, val
);
801 mt76_rmw(dev
, MT_CCK_PROT_CFG
,
802 MT_PROT_CFG_CTRL
| MT_PROT_CFG_RTS_THRESH
, data
);
803 mt76_rmw(dev
, MT_OFDM_PROT_CFG
,
804 MT_PROT_CFG_CTRL
| MT_PROT_CFG_RTS_THRESH
, data
);
807 void mt76x02_mac_set_tx_protection(struct mt76x02_dev
*dev
, bool legacy_prot
,
810 int mode
= ht_mode
& IEEE80211_HT_OP_MODE_PROTECTION
;
811 bool non_gf
= !!(ht_mode
& IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT
);
817 for (i
= 0; i
< ARRAY_SIZE(prot
); i
++) {
818 prot
[i
] = mt76_rr(dev
, MT_CCK_PROT_CFG
+ i
* 4);
819 prot
[i
] &= ~MT_PROT_CFG_CTRL
;
821 prot
[i
] &= ~MT_PROT_CFG_RATE
;
824 for (i
= 0; i
< ARRAY_SIZE(vht_prot
); i
++) {
825 vht_prot
[i
] = mt76_rr(dev
, MT_TX_PROT_CFG6
+ i
* 4);
826 vht_prot
[i
] &= ~(MT_PROT_CFG_CTRL
| MT_PROT_CFG_RATE
);
829 rts_thr
= mt76_get_field(dev
, MT_TX_RTS_CFG
, MT_TX_RTS_CFG_THRESH
);
831 if (rts_thr
!= 0xffff)
832 prot
[0] |= MT_PROT_CTRL_RTS_CTS
;
835 prot
[1] |= MT_PROT_CTRL_CTS2SELF
;
837 prot
[2] |= MT_PROT_RATE_CCK_11
;
838 prot
[3] |= MT_PROT_RATE_CCK_11
;
839 prot
[4] |= MT_PROT_RATE_CCK_11
;
840 prot
[5] |= MT_PROT_RATE_CCK_11
;
842 vht_prot
[0] |= MT_PROT_RATE_CCK_11
;
843 vht_prot
[1] |= MT_PROT_RATE_CCK_11
;
844 vht_prot
[2] |= MT_PROT_RATE_CCK_11
;
846 if (rts_thr
!= 0xffff)
847 prot
[1] |= MT_PROT_CTRL_RTS_CTS
;
849 prot
[2] |= MT_PROT_RATE_OFDM_24
;
850 prot
[3] |= MT_PROT_RATE_DUP_OFDM_24
;
851 prot
[4] |= MT_PROT_RATE_OFDM_24
;
852 prot
[5] |= MT_PROT_RATE_DUP_OFDM_24
;
854 vht_prot
[0] |= MT_PROT_RATE_OFDM_24
;
855 vht_prot
[1] |= MT_PROT_RATE_DUP_OFDM_24
;
856 vht_prot
[2] |= MT_PROT_RATE_SGI_OFDM_24
;
860 case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER
:
861 case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED
:
862 prot
[2] |= MT_PROT_CTRL_RTS_CTS
;
863 prot
[3] |= MT_PROT_CTRL_RTS_CTS
;
864 prot
[4] |= MT_PROT_CTRL_RTS_CTS
;
865 prot
[5] |= MT_PROT_CTRL_RTS_CTS
;
866 vht_prot
[0] |= MT_PROT_CTRL_RTS_CTS
;
867 vht_prot
[1] |= MT_PROT_CTRL_RTS_CTS
;
868 vht_prot
[2] |= MT_PROT_CTRL_RTS_CTS
;
870 case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ
:
871 prot
[3] |= MT_PROT_CTRL_RTS_CTS
;
872 prot
[5] |= MT_PROT_CTRL_RTS_CTS
;
873 vht_prot
[1] |= MT_PROT_CTRL_RTS_CTS
;
874 vht_prot
[2] |= MT_PROT_CTRL_RTS_CTS
;
879 prot
[4] |= MT_PROT_CTRL_RTS_CTS
;
880 prot
[5] |= MT_PROT_CTRL_RTS_CTS
;
883 for (i
= 0; i
< ARRAY_SIZE(prot
); i
++)
884 mt76_wr(dev
, MT_CCK_PROT_CFG
+ i
* 4, prot
[i
]);
886 for (i
= 0; i
< ARRAY_SIZE(vht_prot
); i
++)
887 mt76_wr(dev
, MT_TX_PROT_CFG6
+ i
* 4, vht_prot
[i
]);
890 void mt76x02_update_channel(struct mt76_dev
*mdev
)
892 struct mt76x02_dev
*dev
= container_of(mdev
, struct mt76x02_dev
, mt76
);
893 struct mt76_channel_state
*state
;
896 state
= mt76_channel_state(&dev
->mt76
, dev
->mt76
.chandef
.chan
);
898 busy
= mt76_rr(dev
, MT_CH_BUSY
);
899 active
= busy
+ mt76_rr(dev
, MT_CH_IDLE
);
901 spin_lock_bh(&dev
->mt76
.cc_lock
);
902 state
->cc_busy
+= busy
;
903 state
->cc_active
+= active
;
904 spin_unlock_bh(&dev
->mt76
.cc_lock
);
906 EXPORT_SYMBOL_GPL(mt76x02_update_channel
);
908 static void mt76x02_check_mac_err(struct mt76x02_dev
*dev
)
910 u32 val
= mt76_rr(dev
, 0x10f4);
912 if (!(val
& BIT(29)) || !(val
& (BIT(7) | BIT(5))))
915 dev_err(dev
->mt76
.dev
, "mac specific condition occurred\n");
917 mt76_set(dev
, MT_MAC_SYS_CTRL
, MT_MAC_SYS_CTRL_RESET_CSR
);
919 mt76_wr(dev
, MT_MAC_SYS_CTRL
,
920 MT_MAC_SYS_CTRL_ENABLE_TX
| MT_MAC_SYS_CTRL_ENABLE_RX
);
924 mt76x02_edcca_tx_enable(struct mt76x02_dev
*dev
, bool enable
)
929 mt76_set(dev
, MT_MAC_SYS_CTRL
, MT_MAC_SYS_CTRL_ENABLE_TX
);
930 mt76_set(dev
, MT_AUTO_RSP_CFG
, MT_AUTO_RSP_EN
);
932 data
= mt76_rr(dev
, MT_TX_PIN_CFG
);
933 data
|= MT_TX_PIN_CFG_TXANT
|
934 MT_TX_PIN_CFG_RXANT
|
937 mt76_wr(dev
, MT_TX_PIN_CFG
, data
);
939 mt76_clear(dev
, MT_MAC_SYS_CTRL
, MT_MAC_SYS_CTRL_ENABLE_TX
);
940 mt76_clear(dev
, MT_AUTO_RSP_CFG
, MT_AUTO_RSP_EN
);
942 mt76_clear(dev
, MT_TX_PIN_CFG
, MT_TX_PIN_CFG_TXANT
);
943 mt76_clear(dev
, MT_TX_PIN_CFG
, MT_TX_PIN_CFG_RXANT
);
945 dev
->ed_tx_blocked
= !enable
;
948 void mt76x02_edcca_init(struct mt76x02_dev
*dev
, bool enable
)
953 if (dev
->ed_monitor
&& enable
) {
954 struct ieee80211_channel
*chan
= dev
->mt76
.chandef
.chan
;
955 u8 ed_th
= chan
->band
== NL80211_BAND_5GHZ
? 0x0e : 0x20;
957 mt76_clear(dev
, MT_TX_LINK_CFG
, MT_TX_CFACK_EN
);
958 mt76_set(dev
, MT_TXOP_CTRL_CFG
, MT_TXOP_ED_CCA_EN
);
959 mt76_rmw(dev
, MT_BBP(AGC
, 2), GENMASK(15, 0),
961 mt76_set(dev
, MT_TXOP_HLDR_ET
, MT_TXOP_HLDR_TX40M_BLK_EN
);
963 mt76_set(dev
, MT_TX_LINK_CFG
, MT_TX_CFACK_EN
);
964 mt76_clear(dev
, MT_TXOP_CTRL_CFG
, MT_TXOP_ED_CCA_EN
);
965 if (is_mt76x2(dev
)) {
966 mt76_wr(dev
, MT_BBP(AGC
, 2), 0x00007070);
967 mt76_set(dev
, MT_TXOP_HLDR_ET
,
968 MT_TXOP_HLDR_TX40M_BLK_EN
);
970 mt76_wr(dev
, MT_BBP(AGC
, 2), 0x003a6464);
971 mt76_clear(dev
, MT_TXOP_HLDR_ET
,
972 MT_TXOP_HLDR_TX40M_BLK_EN
);
975 mt76x02_edcca_tx_enable(dev
, true);
976 dev
->ed_monitor_learning
= true;
978 /* clear previous CCA timer value */
979 mt76_rr(dev
, MT_ED_CCA_TIMER
);
980 dev
->ed_time
= ktime_get_boottime();
982 EXPORT_SYMBOL_GPL(mt76x02_edcca_init
);
984 #define MT_EDCCA_TH 92
985 #define MT_EDCCA_BLOCK_TH 2
986 #define MT_EDCCA_LEARN_TH 50
987 #define MT_EDCCA_LEARN_CCA 180
988 #define MT_EDCCA_LEARN_TIMEOUT (20 * HZ)
990 static void mt76x02_edcca_check(struct mt76x02_dev
*dev
)
993 u32 active
, val
, busy
;
995 cur_time
= ktime_get_boottime();
996 val
= mt76_rr(dev
, MT_ED_CCA_TIMER
);
998 active
= ktime_to_us(ktime_sub(cur_time
, dev
->ed_time
));
999 dev
->ed_time
= cur_time
;
1001 busy
= (val
* 100) / active
;
1002 busy
= min_t(u32
, busy
, 100);
1004 if (busy
> MT_EDCCA_TH
) {
1009 dev
->ed_trigger
= 0;
1012 if (dev
->cal
.agc_lowest_gain
&&
1013 dev
->cal
.false_cca
> MT_EDCCA_LEARN_CCA
&&
1014 dev
->ed_trigger
> MT_EDCCA_LEARN_TH
) {
1015 dev
->ed_monitor_learning
= false;
1016 dev
->ed_trigger_timeout
= jiffies
+ 20 * HZ
;
1017 } else if (!dev
->ed_monitor_learning
&&
1018 time_is_after_jiffies(dev
->ed_trigger_timeout
)) {
1019 dev
->ed_monitor_learning
= true;
1020 mt76x02_edcca_tx_enable(dev
, true);
1023 if (dev
->ed_monitor_learning
)
1026 if (dev
->ed_trigger
> MT_EDCCA_BLOCK_TH
&& !dev
->ed_tx_blocked
)
1027 mt76x02_edcca_tx_enable(dev
, false);
1028 else if (dev
->ed_silent
> MT_EDCCA_BLOCK_TH
&& dev
->ed_tx_blocked
)
1029 mt76x02_edcca_tx_enable(dev
, true);
1032 void mt76x02_mac_work(struct work_struct
*work
)
1034 struct mt76x02_dev
*dev
= container_of(work
, struct mt76x02_dev
,
1035 mt76
.mac_work
.work
);
1038 mutex_lock(&dev
->mt76
.mutex
);
1040 mt76x02_update_channel(&dev
->mt76
);
1041 for (i
= 0, idx
= 0; i
< 16; i
++) {
1042 u32 val
= mt76_rr(dev
, MT_TX_AGG_CNT(i
));
1044 dev
->aggr_stats
[idx
++] += val
& 0xffff;
1045 dev
->aggr_stats
[idx
++] += val
>> 16;
1048 if (!dev
->mt76
.beacon_mask
)
1049 mt76x02_check_mac_err(dev
);
1051 if (dev
->ed_monitor
)
1052 mt76x02_edcca_check(dev
);
1054 mutex_unlock(&dev
->mt76
.mutex
);
1056 mt76_tx_status_check(&dev
->mt76
, NULL
, false);
1058 ieee80211_queue_delayed_work(mt76_hw(dev
), &dev
->mt76
.mac_work
,
1059 MT_MAC_WORK_INTERVAL
);
1062 void mt76x02_mac_set_bssid(struct mt76x02_dev
*dev
, u8 idx
, const u8
*addr
)
1065 mt76_wr(dev
, MT_MAC_APC_BSSID_L(idx
), get_unaligned_le32(addr
));
1066 mt76_rmw_field(dev
, MT_MAC_APC_BSSID_H(idx
), MT_MAC_APC_BSSID_H_ADDR
,
1067 get_unaligned_le16(addr
+ 4));