1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2022 MediaTek Inc.
6 #include <linux/etherdevice.h>
7 #include <linux/timekeeping.h>
14 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2)
16 static const struct mt7996_dfs_radar_spec etsi_radar_specs
= {
17 .pulse_th
= { 110, -10, -80, 40, 5200, 128, 5200 },
19 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 },
20 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 },
21 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 },
22 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 },
23 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
24 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
25 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 },
26 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 },
30 static const struct mt7996_dfs_radar_spec fcc_radar_specs
= {
31 .pulse_th
= { 110, -10, -80, 40, 5200, 128, 5200 },
33 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
34 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
35 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
36 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
37 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
41 static const struct mt7996_dfs_radar_spec jp_radar_specs
= {
42 .pulse_th
= { 110, -10, -80, 40, 5200, 128, 5200 },
44 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
45 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
46 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
47 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
48 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
49 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 },
50 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 },
51 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 },
55 static struct mt76_wcid
*mt7996_rx_get_wcid(struct mt7996_dev
*dev
,
56 u16 idx
, bool unicast
)
58 struct mt7996_sta
*sta
;
59 struct mt76_wcid
*wcid
;
61 if (idx
>= ARRAY_SIZE(dev
->mt76
.wcid
))
64 wcid
= rcu_dereference(dev
->mt76
.wcid
[idx
]);
71 sta
= container_of(wcid
, struct mt7996_sta
, wcid
);
75 return &sta
->vif
->sta
.wcid
;
78 bool mt7996_mac_wtbl_update(struct mt7996_dev
*dev
, int idx
, u32 mask
)
80 mt76_rmw(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_WLAN_IDX
,
81 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX
, idx
) | mask
);
83 return mt76_poll(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_BUSY
,
87 u32
mt7996_mac_wtbl_lmac_addr(struct mt7996_dev
*dev
, u16 wcid
, u8 dw
)
89 mt76_wr(dev
, MT_WTBLON_TOP_WDUCR
,
90 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP
, (wcid
>> 7)));
92 return MT_WTBL_LMAC_OFFS(wcid
, dw
);
95 static void mt7996_mac_sta_poll(struct mt7996_dev
*dev
)
97 static const u8 ac_to_tid
[] = {
98 [IEEE80211_AC_BE
] = 0,
99 [IEEE80211_AC_BK
] = 1,
100 [IEEE80211_AC_VI
] = 4,
101 [IEEE80211_AC_VO
] = 6
103 struct ieee80211_sta
*sta
;
104 struct mt7996_sta
*msta
;
105 u32 tx_time
[IEEE80211_NUM_ACS
], rx_time
[IEEE80211_NUM_ACS
];
106 LIST_HEAD(sta_poll_list
);
109 spin_lock_bh(&dev
->mt76
.sta_poll_lock
);
110 list_splice_init(&dev
->mt76
.sta_poll_list
, &sta_poll_list
);
111 spin_unlock_bh(&dev
->mt76
.sta_poll_lock
);
121 spin_lock_bh(&dev
->mt76
.sta_poll_lock
);
122 if (list_empty(&sta_poll_list
)) {
123 spin_unlock_bh(&dev
->mt76
.sta_poll_lock
);
126 msta
= list_first_entry(&sta_poll_list
,
127 struct mt7996_sta
, wcid
.poll_list
);
128 list_del_init(&msta
->wcid
.poll_list
);
129 spin_unlock_bh(&dev
->mt76
.sta_poll_lock
);
131 idx
= msta
->wcid
.idx
;
133 /* refresh peer's airtime reporting */
134 addr
= mt7996_mac_wtbl_lmac_addr(dev
, idx
, 20);
136 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
137 u32 tx_last
= msta
->airtime_ac
[i
];
138 u32 rx_last
= msta
->airtime_ac
[i
+ 4];
140 msta
->airtime_ac
[i
] = mt76_rr(dev
, addr
);
141 msta
->airtime_ac
[i
+ 4] = mt76_rr(dev
, addr
+ 4);
143 tx_time
[i
] = msta
->airtime_ac
[i
] - tx_last
;
144 rx_time
[i
] = msta
->airtime_ac
[i
+ 4] - rx_last
;
146 if ((tx_last
| rx_last
) & BIT(30))
153 mt7996_mac_wtbl_update(dev
, idx
,
154 MT_WTBL_UPDATE_ADM_COUNT_CLEAR
);
155 memset(msta
->airtime_ac
, 0, sizeof(msta
->airtime_ac
));
161 sta
= container_of((void *)msta
, struct ieee80211_sta
,
163 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
164 u8 q
= mt76_connac_lmac_mapping(i
);
165 u32 tx_cur
= tx_time
[q
];
166 u32 rx_cur
= rx_time
[q
];
167 u8 tid
= ac_to_tid
[i
];
169 if (!tx_cur
&& !rx_cur
)
172 ieee80211_sta_register_airtime(sta
, tid
, tx_cur
, rx_cur
);
175 /* get signal strength of resp frames (CTS/BA/ACK) */
176 addr
= mt7996_mac_wtbl_lmac_addr(dev
, idx
, 34);
177 val
= mt76_rr(dev
, addr
);
179 rssi
[0] = to_rssi(GENMASK(7, 0), val
);
180 rssi
[1] = to_rssi(GENMASK(15, 8), val
);
181 rssi
[2] = to_rssi(GENMASK(23, 16), val
);
182 rssi
[3] = to_rssi(GENMASK(31, 14), val
);
185 mt76_rx_signal(msta
->vif
->phy
->mt76
->antenna_mask
, rssi
);
187 ewma_avg_signal_add(&msta
->avg_ack_signal
, -msta
->ack_signal
);
193 void mt7996_mac_enable_rtscts(struct mt7996_dev
*dev
,
194 struct ieee80211_vif
*vif
, bool enable
)
196 struct mt7996_vif
*mvif
= (struct mt7996_vif
*)vif
->drv_priv
;
199 addr
= mt7996_mac_wtbl_lmac_addr(dev
, mvif
->sta
.wcid
.idx
, 5);
201 mt76_set(dev
, addr
, BIT(5));
203 mt76_clear(dev
, addr
, BIT(5));
206 /* The HW does not translate the mac header to 802.3 for mesh point */
207 static int mt7996_reverse_frag0_hdr_trans(struct sk_buff
*skb
, u16 hdr_gap
)
209 struct mt76_rx_status
*status
= (struct mt76_rx_status
*)skb
->cb
;
210 struct ethhdr
*eth_hdr
= (struct ethhdr
*)(skb
->data
+ hdr_gap
);
211 struct mt7996_sta
*msta
= (struct mt7996_sta
*)status
->wcid
;
212 __le32
*rxd
= (__le32
*)skb
->data
;
213 struct ieee80211_sta
*sta
;
214 struct ieee80211_vif
*vif
;
215 struct ieee80211_hdr hdr
;
218 if (le32_get_bits(rxd
[3], MT_RXD3_NORMAL_ADDR_TYPE
) !=
222 if (!(le32_to_cpu(rxd
[1]) & MT_RXD1_NORMAL_GROUP_4
))
225 if (!msta
|| !msta
->vif
)
228 sta
= container_of((void *)msta
, struct ieee80211_sta
, drv_priv
);
229 vif
= container_of((void *)msta
->vif
, struct ieee80211_vif
, drv_priv
);
231 /* store the info from RXD and ethhdr to avoid being overridden */
232 frame_control
= le32_get_bits(rxd
[8], MT_RXD8_FRAME_CONTROL
);
233 hdr
.frame_control
= cpu_to_le16(frame_control
);
234 hdr
.seq_ctrl
= cpu_to_le16(le32_get_bits(rxd
[10], MT_RXD10_SEQ_CTRL
));
237 ether_addr_copy(hdr
.addr1
, vif
->addr
);
238 ether_addr_copy(hdr
.addr2
, sta
->addr
);
239 switch (frame_control
& (IEEE80211_FCTL_TODS
|
240 IEEE80211_FCTL_FROMDS
)) {
242 ether_addr_copy(hdr
.addr3
, vif
->bss_conf
.bssid
);
244 case IEEE80211_FCTL_FROMDS
:
245 ether_addr_copy(hdr
.addr3
, eth_hdr
->h_source
);
247 case IEEE80211_FCTL_TODS
:
248 ether_addr_copy(hdr
.addr3
, eth_hdr
->h_dest
);
250 case IEEE80211_FCTL_TODS
| IEEE80211_FCTL_FROMDS
:
251 ether_addr_copy(hdr
.addr3
, eth_hdr
->h_dest
);
252 ether_addr_copy(hdr
.addr4
, eth_hdr
->h_source
);
258 skb_pull(skb
, hdr_gap
+ sizeof(struct ethhdr
) - 2);
259 if (eth_hdr
->h_proto
== cpu_to_be16(ETH_P_AARP
) ||
260 eth_hdr
->h_proto
== cpu_to_be16(ETH_P_IPX
))
261 ether_addr_copy(skb_push(skb
, ETH_ALEN
), bridge_tunnel_header
);
262 else if (be16_to_cpu(eth_hdr
->h_proto
) >= ETH_P_802_3_MIN
)
263 ether_addr_copy(skb_push(skb
, ETH_ALEN
), rfc1042_header
);
267 if (ieee80211_has_order(hdr
.frame_control
))
268 memcpy(skb_push(skb
, IEEE80211_HT_CTL_LEN
), &rxd
[11],
269 IEEE80211_HT_CTL_LEN
);
270 if (ieee80211_is_data_qos(hdr
.frame_control
)) {
273 qos_ctrl
= cpu_to_le16(le32_get_bits(rxd
[10], MT_RXD10_QOS_CTL
));
274 memcpy(skb_push(skb
, IEEE80211_QOS_CTL_LEN
), &qos_ctrl
,
275 IEEE80211_QOS_CTL_LEN
);
278 if (ieee80211_has_a4(hdr
.frame_control
))
279 memcpy(skb_push(skb
, sizeof(hdr
)), &hdr
, sizeof(hdr
));
281 memcpy(skb_push(skb
, sizeof(hdr
) - 6), &hdr
, sizeof(hdr
) - 6);
287 mt7996_mac_fill_rx_rate(struct mt7996_dev
*dev
,
288 struct mt76_rx_status
*status
,
289 struct ieee80211_supported_band
*sband
,
290 __le32
*rxv
, u8
*mode
)
293 u8 stbc
, gi
, bw
, dcm
, nss
;
297 v0
= le32_to_cpu(rxv
[0]);
298 v2
= le32_to_cpu(rxv
[2]);
300 idx
= FIELD_GET(MT_PRXV_TX_RATE
, v0
);
302 nss
= FIELD_GET(MT_PRXV_NSTS
, v0
) + 1;
304 stbc
= FIELD_GET(MT_PRXV_HT_STBC
, v2
);
305 gi
= FIELD_GET(MT_PRXV_HT_SHORT_GI
, v2
);
306 *mode
= FIELD_GET(MT_PRXV_TX_MODE
, v2
);
307 dcm
= FIELD_GET(MT_PRXV_DCM
, v2
);
308 bw
= FIELD_GET(MT_PRXV_FRAME_MODE
, v2
);
311 case MT_PHY_TYPE_CCK
:
314 case MT_PHY_TYPE_OFDM
:
315 i
= mt76_get_rate(&dev
->mt76
, sband
, i
, cck
);
317 case MT_PHY_TYPE_HT_GF
:
319 status
->encoding
= RX_ENC_HT
;
321 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
325 case MT_PHY_TYPE_VHT
:
327 status
->encoding
= RX_ENC_VHT
;
329 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
333 case MT_PHY_TYPE_HE_MU
:
334 case MT_PHY_TYPE_HE_SU
:
335 case MT_PHY_TYPE_HE_EXT_SU
:
336 case MT_PHY_TYPE_HE_TB
:
338 status
->encoding
= RX_ENC_HE
;
341 if (gi
<= NL80211_RATE_INFO_HE_GI_3_2
)
344 status
->he_dcm
= dcm
;
346 case MT_PHY_TYPE_EHT_SU
:
347 case MT_PHY_TYPE_EHT_TRIG
:
348 case MT_PHY_TYPE_EHT_MU
:
350 status
->encoding
= RX_ENC_EHT
;
353 if (gi
<= NL80211_RATE_INFO_EHT_GI_3_2
)
359 status
->rate_idx
= i
;
362 case IEEE80211_STA_RX_BW_20
:
364 case IEEE80211_STA_RX_BW_40
:
365 if (*mode
& MT_PHY_TYPE_HE_EXT_SU
&&
366 (idx
& MT_PRXV_TX_ER_SU_106T
)) {
367 status
->bw
= RATE_INFO_BW_HE_RU
;
369 NL80211_RATE_INFO_HE_RU_ALLOC_106
;
371 status
->bw
= RATE_INFO_BW_40
;
374 case IEEE80211_STA_RX_BW_80
:
375 status
->bw
= RATE_INFO_BW_80
;
377 case IEEE80211_STA_RX_BW_160
:
378 status
->bw
= RATE_INFO_BW_160
;
380 /* rxv reports bw 320-1 and 320-2 separately */
381 case IEEE80211_STA_RX_BW_320
:
382 case IEEE80211_STA_RX_BW_320
+ 1:
383 status
->bw
= RATE_INFO_BW_320
;
389 status
->enc_flags
|= RX_ENC_FLAG_STBC_MASK
* stbc
;
390 if (*mode
< MT_PHY_TYPE_HE_SU
&& gi
)
391 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
397 mt7996_wed_check_ppe(struct mt7996_dev
*dev
, struct mt76_queue
*q
,
398 struct mt7996_sta
*msta
, struct sk_buff
*skb
,
401 struct ieee80211_vif
*vif
;
402 struct wireless_dev
*wdev
;
404 if (!msta
|| !msta
->vif
)
407 if (!mt76_queue_is_wed_rx(q
))
410 if (!(info
& MT_DMA_INFO_PPE_VLD
))
413 vif
= container_of((void *)msta
->vif
, struct ieee80211_vif
,
415 wdev
= ieee80211_vif_to_wdev(vif
);
416 skb
->dev
= wdev
->netdev
;
418 mtk_wed_device_ppe_check(&dev
->mt76
.mmio
.wed
, skb
,
419 FIELD_GET(MT_DMA_PPE_CPU_REASON
, info
),
420 FIELD_GET(MT_DMA_PPE_ENTRY
, info
));
424 mt7996_mac_fill_rx(struct mt7996_dev
*dev
, enum mt76_rxq_id q
,
425 struct sk_buff
*skb
, u32
*info
)
427 struct mt76_rx_status
*status
= (struct mt76_rx_status
*)skb
->cb
;
428 struct mt76_phy
*mphy
= &dev
->mt76
.phy
;
429 struct mt7996_phy
*phy
= &dev
->phy
;
430 struct ieee80211_supported_band
*sband
;
431 __le32
*rxd
= (__le32
*)skb
->data
;
433 u32 rxd0
= le32_to_cpu(rxd
[0]);
434 u32 rxd1
= le32_to_cpu(rxd
[1]);
435 u32 rxd2
= le32_to_cpu(rxd
[2]);
436 u32 rxd3
= le32_to_cpu(rxd
[3]);
437 u32 rxd4
= le32_to_cpu(rxd
[4]);
438 u32 csum_mask
= MT_RXD3_NORMAL_IP_SUM
| MT_RXD3_NORMAL_UDP_TCP_SUM
;
439 u32 csum_status
= *(u32
*)skb
->cb
;
440 u32 mesh_mask
= MT_RXD0_MESH
| MT_RXD0_MHCP
;
441 bool is_mesh
= (rxd0
& mesh_mask
) == mesh_mask
;
442 bool unicast
, insert_ccmp_hdr
= false;
443 u8 remove_pad
, amsdu_info
, band_idx
;
444 u8 mode
= 0, qos_ctl
= 0;
451 struct mt7996_sta
*msta
= NULL
;
453 hw_aggr
= status
->aggr
;
454 memset(status
, 0, sizeof(*status
));
456 band_idx
= FIELD_GET(MT_RXD1_NORMAL_BAND_IDX
, rxd1
);
457 mphy
= dev
->mt76
.phys
[band_idx
];
459 status
->phy_idx
= mphy
->band_idx
;
461 if (!test_bit(MT76_STATE_RUNNING
, &mphy
->state
))
464 if (rxd2
& MT_RXD2_NORMAL_AMSDU_ERR
)
467 hdr_trans
= rxd2
& MT_RXD2_NORMAL_HDR_TRANS
;
468 if (hdr_trans
&& (rxd1
& MT_RXD1_NORMAL_CM
))
471 /* ICV error or CCMP/BIP/WPI MIC error */
472 if (rxd1
& MT_RXD1_NORMAL_ICV_ERR
)
473 status
->flag
|= RX_FLAG_ONLY_MONITOR
;
475 unicast
= FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE
, rxd3
) == MT_RXD3_NORMAL_U2M
;
476 idx
= FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX
, rxd1
);
477 status
->wcid
= mt7996_rx_get_wcid(dev
, idx
, unicast
);
480 msta
= container_of(status
->wcid
, struct mt7996_sta
, wcid
);
481 spin_lock_bh(&dev
->mt76
.sta_poll_lock
);
482 if (list_empty(&msta
->wcid
.poll_list
))
483 list_add_tail(&msta
->wcid
.poll_list
,
484 &dev
->mt76
.sta_poll_list
);
485 spin_unlock_bh(&dev
->mt76
.sta_poll_lock
);
488 status
->freq
= mphy
->chandef
.chan
->center_freq
;
489 status
->band
= mphy
->chandef
.chan
->band
;
490 if (status
->band
== NL80211_BAND_5GHZ
)
491 sband
= &mphy
->sband_5g
.sband
;
492 else if (status
->band
== NL80211_BAND_6GHZ
)
493 sband
= &mphy
->sband_6g
.sband
;
495 sband
= &mphy
->sband_2g
.sband
;
497 if (!sband
->channels
)
500 if ((rxd3
& csum_mask
) == csum_mask
&&
501 !(csum_status
& (BIT(0) | BIT(2) | BIT(3))))
502 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
504 if (rxd1
& MT_RXD3_NORMAL_FCS_ERR
)
505 status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
507 if (rxd1
& MT_RXD1_NORMAL_TKIP_MIC_ERR
)
508 status
->flag
|= RX_FLAG_MMIC_ERROR
;
510 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE
, rxd2
) != 0 &&
511 !(rxd1
& (MT_RXD1_NORMAL_CLM
| MT_RXD1_NORMAL_CM
))) {
512 status
->flag
|= RX_FLAG_DECRYPTED
;
513 status
->flag
|= RX_FLAG_IV_STRIPPED
;
514 status
->flag
|= RX_FLAG_MMIC_STRIPPED
| RX_FLAG_MIC_STRIPPED
;
517 remove_pad
= FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET
, rxd2
);
519 if (rxd2
& MT_RXD2_NORMAL_MAX_LEN_ERROR
)
523 if (rxd1
& MT_RXD1_NORMAL_GROUP_4
) {
524 u32 v0
= le32_to_cpu(rxd
[0]);
525 u32 v2
= le32_to_cpu(rxd
[2]);
527 fc
= cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL
, v0
));
528 qos_ctl
= FIELD_GET(MT_RXD10_QOS_CTL
, v2
);
529 seq_ctrl
= FIELD_GET(MT_RXD10_SEQ_CTRL
, v2
);
532 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
536 if (rxd1
& MT_RXD1_NORMAL_GROUP_1
) {
537 u8
*data
= (u8
*)rxd
;
539 if (status
->flag
& RX_FLAG_DECRYPTED
) {
540 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE
, rxd2
)) {
541 case MT_CIPHER_AES_CCMP
:
542 case MT_CIPHER_CCMP_CCX
:
543 case MT_CIPHER_CCMP_256
:
545 FIELD_GET(MT_RXD2_NORMAL_FRAG
, rxd2
);
548 case MT_CIPHER_TKIP_NO_MIC
:
550 case MT_CIPHER_GCMP_256
:
551 status
->iv
[0] = data
[5];
552 status
->iv
[1] = data
[4];
553 status
->iv
[2] = data
[3];
554 status
->iv
[3] = data
[2];
555 status
->iv
[4] = data
[1];
556 status
->iv
[5] = data
[0];
563 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
567 if (rxd1
& MT_RXD1_NORMAL_GROUP_2
) {
568 status
->timestamp
= le32_to_cpu(rxd
[0]);
569 status
->flag
|= RX_FLAG_MACTIME_START
;
571 if (!(rxd2
& MT_RXD2_NORMAL_NON_AMPDU
)) {
572 status
->flag
|= RX_FLAG_AMPDU_DETAILS
;
574 /* all subframes of an A-MPDU have the same timestamp */
575 if (phy
->rx_ampdu_ts
!= status
->timestamp
) {
576 if (!++phy
->ampdu_ref
)
579 phy
->rx_ampdu_ts
= status
->timestamp
;
581 status
->ampdu_ref
= phy
->ampdu_ref
;
585 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
589 /* RXD Group 3 - P-RXV */
590 if (rxd1
& MT_RXD1_NORMAL_GROUP_3
) {
596 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
599 v3
= le32_to_cpu(rxv
[3]);
601 status
->chains
= mphy
->antenna_mask
;
602 status
->chain_signal
[0] = to_rssi(MT_PRXV_RCPI0
, v3
);
603 status
->chain_signal
[1] = to_rssi(MT_PRXV_RCPI1
, v3
);
604 status
->chain_signal
[2] = to_rssi(MT_PRXV_RCPI2
, v3
);
605 status
->chain_signal
[3] = to_rssi(MT_PRXV_RCPI3
, v3
);
607 /* RXD Group 5 - C-RXV */
608 if (rxd1
& MT_RXD1_NORMAL_GROUP_5
) {
610 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
614 ret
= mt7996_mac_fill_rx_rate(dev
, status
, sband
, rxv
, &mode
);
619 amsdu_info
= FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT
, rxd4
);
620 status
->amsdu
= !!amsdu_info
;
622 status
->first_amsdu
= amsdu_info
== MT_RXD4_FIRST_AMSDU_FRAME
;
623 status
->last_amsdu
= amsdu_info
== MT_RXD4_LAST_AMSDU_FRAME
;
626 hdr_gap
= (u8
*)rxd
- skb
->data
+ 2 * remove_pad
;
627 if (hdr_trans
&& ieee80211_has_morefrags(fc
)) {
628 if (mt7996_reverse_frag0_hdr_trans(skb
, hdr_gap
))
634 skb_pull(skb
, hdr_gap
);
635 if (!hdr_trans
&& status
->amsdu
&& !(ieee80211_has_a4(fc
) && is_mesh
)) {
636 pad_start
= ieee80211_get_hdrlen_from_skb(skb
);
637 } else if (hdr_trans
&& (rxd2
& MT_RXD2_NORMAL_HDR_TRANS_ERROR
)) {
638 /* When header translation failure is indicated,
639 * the hardware will insert an extra 2-byte field
640 * containing the data length after the protocol
641 * type field. This happens either when the LLC-SNAP
642 * pattern did not match, or if a VLAN header was
646 if (get_unaligned_be16(skb
->data
+ pad_start
) == ETH_P_8021Q
)
653 memmove(skb
->data
+ 2, skb
->data
, pad_start
);
659 struct ieee80211_hdr
*hdr
;
661 if (insert_ccmp_hdr
) {
662 u8 key_id
= FIELD_GET(MT_RXD1_NORMAL_KEY_ID
, rxd1
);
664 mt76_insert_ccmp_hdr(skb
, key_id
);
667 hdr
= mt76_skb_get_hdr(skb
);
668 fc
= hdr
->frame_control
;
669 if (ieee80211_is_data_qos(fc
)) {
670 u8
*qos
= ieee80211_get_qos_ctl(hdr
);
672 seq_ctrl
= le16_to_cpu(hdr
->seq_ctrl
);
675 /* Mesh DA/SA/Length will be stripped after hardware
676 * de-amsdu, so here needs to clear amsdu present bit
677 * to mark it as a normal mesh frame.
679 if (ieee80211_has_a4(fc
) && is_mesh
&& status
->amsdu
)
680 *qos
&= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
683 status
->flag
|= RX_FLAG_8023
;
684 mt7996_wed_check_ppe(dev
, &dev
->mt76
.q_rx
[q
], msta
, skb
,
688 if (rxv
&& mode
>= MT_PHY_TYPE_HE_SU
&& !(status
->flag
& RX_FLAG_8023
))
689 mt76_connac3_mac_decode_he_radiotap(skb
, rxv
, mode
);
691 if (!status
->wcid
|| !ieee80211_is_data_qos(fc
) || hw_aggr
)
694 status
->aggr
= unicast
&&
695 !ieee80211_is_qos_nullfunc(fc
);
696 status
->qos_ctl
= qos_ctl
;
697 status
->seqno
= IEEE80211_SEQ_TO_SN(seq_ctrl
);
703 mt7996_mac_write_txwi_8023(struct mt7996_dev
*dev
, __le32
*txwi
,
704 struct sk_buff
*skb
, struct mt76_wcid
*wcid
)
706 u8 tid
= skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
707 u8 fc_type
, fc_stype
;
713 struct ieee80211_sta
*sta
;
715 sta
= container_of((void *)wcid
, struct ieee80211_sta
, drv_priv
);
719 val
= FIELD_PREP(MT_TXD1_HDR_FORMAT
, MT_HDR_FORMAT_802_3
) |
720 FIELD_PREP(MT_TXD1_TID
, tid
);
722 ethertype
= get_unaligned_be16(&skb
->data
[12]);
723 if (ethertype
>= ETH_P_802_3_MIN
)
724 val
|= MT_TXD1_ETH_802_3
;
726 txwi
[1] |= cpu_to_le32(val
);
728 fc_type
= IEEE80211_FTYPE_DATA
>> 2;
729 fc_stype
= wmm
? IEEE80211_STYPE_QOS_DATA
>> 4 : 0;
731 val
= FIELD_PREP(MT_TXD2_FRAME_TYPE
, fc_type
) |
732 FIELD_PREP(MT_TXD2_SUB_TYPE
, fc_stype
);
734 txwi
[2] |= cpu_to_le32(val
);
737 txwi
[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU
);
741 mt7996_mac_write_txwi_80211(struct mt7996_dev
*dev
, __le32
*txwi
,
742 struct sk_buff
*skb
, struct ieee80211_key_conf
*key
)
744 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
745 struct ieee80211_mgmt
*mgmt
= (struct ieee80211_mgmt
*)skb
->data
;
746 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
747 bool multicast
= is_multicast_ether_addr(hdr
->addr1
);
748 u8 tid
= skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
749 __le16 fc
= hdr
->frame_control
, sc
= hdr
->seq_ctrl
;
750 u8 fc_type
, fc_stype
;
753 if (ieee80211_is_action(fc
) &&
754 mgmt
->u
.action
.category
== WLAN_CATEGORY_BACK
&&
755 mgmt
->u
.action
.u
.addba_req
.action_code
== WLAN_ACTION_ADDBA_REQ
)
757 else if (ieee80211_is_mgmt(hdr
->frame_control
))
760 val
= FIELD_PREP(MT_TXD1_HDR_FORMAT
, MT_HDR_FORMAT_802_11
) |
761 FIELD_PREP(MT_TXD1_HDR_INFO
,
762 ieee80211_get_hdrlen_from_skb(skb
) / 2) |
763 FIELD_PREP(MT_TXD1_TID
, tid
);
765 if (!ieee80211_is_data(fc
) || multicast
||
766 info
->flags
& IEEE80211_TX_CTL_USE_MINRATE
)
767 val
|= MT_TXD1_FIXED_RATE
;
769 if (key
&& multicast
&& ieee80211_is_robust_mgmt_frame(skb
) &&
770 key
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
) {
772 txwi
[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME
);
775 txwi
[1] |= cpu_to_le32(val
);
777 fc_type
= (le16_to_cpu(fc
) & IEEE80211_FCTL_FTYPE
) >> 2;
778 fc_stype
= (le16_to_cpu(fc
) & IEEE80211_FCTL_STYPE
) >> 4;
780 val
= FIELD_PREP(MT_TXD2_FRAME_TYPE
, fc_type
) |
781 FIELD_PREP(MT_TXD2_SUB_TYPE
, fc_stype
);
783 if (ieee80211_has_morefrags(fc
) && ieee80211_is_first_frag(sc
))
784 val
|= FIELD_PREP(MT_TXD2_FRAG
, MT_TX_FRAG_FIRST
);
785 else if (ieee80211_has_morefrags(fc
) && !ieee80211_is_first_frag(sc
))
786 val
|= FIELD_PREP(MT_TXD2_FRAG
, MT_TX_FRAG_MID
);
787 else if (!ieee80211_has_morefrags(fc
) && !ieee80211_is_first_frag(sc
))
788 val
|= FIELD_PREP(MT_TXD2_FRAG
, MT_TX_FRAG_LAST
);
790 val
|= FIELD_PREP(MT_TXD2_FRAG
, MT_TX_FRAG_NONE
);
792 txwi
[2] |= cpu_to_le32(val
);
794 txwi
[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM
, multicast
));
795 if (ieee80211_is_beacon(fc
)) {
796 txwi
[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT
);
797 txwi
[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT
);
800 if (info
->flags
& IEEE80211_TX_CTL_INJECTED
) {
801 u16 seqno
= le16_to_cpu(sc
);
803 if (ieee80211_is_back_req(hdr
->frame_control
)) {
804 struct ieee80211_bar
*bar
;
806 bar
= (struct ieee80211_bar
*)skb
->data
;
807 seqno
= le16_to_cpu(bar
->start_seq_num
);
810 val
= MT_TXD3_SN_VALID
|
811 FIELD_PREP(MT_TXD3_SEQ
, IEEE80211_SEQ_TO_SN(seqno
));
812 txwi
[3] |= cpu_to_le32(val
);
813 txwi
[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU
);
817 void mt7996_mac_write_txwi(struct mt7996_dev
*dev
, __le32
*txwi
,
818 struct sk_buff
*skb
, struct mt76_wcid
*wcid
,
819 struct ieee80211_key_conf
*key
, int pid
,
820 enum mt76_txq_id qid
, u32 changed
)
822 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
823 struct ieee80211_vif
*vif
= info
->control
.vif
;
824 u8 band_idx
= (info
->hw_queue
& MT_TX_HW_QUEUE_PHY
) >> 2;
825 u8 p_fmt
, q_idx
, omac_idx
= 0, wmm_idx
= 0;
826 bool is_8023
= info
->flags
& IEEE80211_TX_CTL_HW_80211_ENCAP
;
827 struct mt76_vif
*mvif
;
830 bool inband_disc
= !!(changed
& (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP
|
831 BSS_CHANGED_FILS_DISCOVERY
));
832 bool beacon
= !!(changed
& (BSS_CHANGED_BEACON
|
833 BSS_CHANGED_BEACON_ENABLED
)) && (!inband_disc
);
835 mvif
= vif
? (struct mt76_vif
*)vif
->drv_priv
: NULL
;
837 omac_idx
= mvif
->omac_idx
;
838 wmm_idx
= mvif
->wmm_idx
;
839 band_idx
= mvif
->band_idx
;
843 p_fmt
= MT_TX_TYPE_FW
;
844 q_idx
= MT_LMAC_ALTX0
;
846 p_fmt
= MT_TX_TYPE_FW
;
847 q_idx
= MT_LMAC_BCN0
;
848 } else if (qid
>= MT_TXQ_PSD
) {
849 p_fmt
= MT_TX_TYPE_CT
;
850 q_idx
= MT_LMAC_ALTX0
;
852 p_fmt
= MT_TX_TYPE_CT
;
853 q_idx
= wmm_idx
* MT7996_MAX_WMM_SETS
+
854 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb
));
857 val
= FIELD_PREP(MT_TXD0_TX_BYTES
, skb
->len
+ MT_TXD_SIZE
) |
858 FIELD_PREP(MT_TXD0_PKT_FMT
, p_fmt
) |
859 FIELD_PREP(MT_TXD0_Q_IDX
, q_idx
);
860 txwi
[0] = cpu_to_le32(val
);
862 val
= FIELD_PREP(MT_TXD1_WLAN_IDX
, wcid
->idx
) |
863 FIELD_PREP(MT_TXD1_OWN_MAC
, omac_idx
);
866 val
|= FIELD_PREP(MT_TXD1_TGID
, band_idx
);
868 txwi
[1] = cpu_to_le32(val
);
871 val
= MT_TXD3_SW_POWER_MGMT
|
872 FIELD_PREP(MT_TXD3_REM_TX_COUNT
, tx_count
);
874 val
|= MT_TXD3_PROTECT_FRAME
;
875 if (info
->flags
& IEEE80211_TX_CTL_NO_ACK
)
876 val
|= MT_TXD3_NO_ACK
;
878 txwi
[3] = cpu_to_le32(val
);
881 val
= FIELD_PREP(MT_TXD5_PID
, pid
);
882 if (pid
>= MT_PACKET_ID_FIRST
)
883 val
|= MT_TXD5_TX_STATUS_HOST
;
884 txwi
[5] = cpu_to_le32(val
);
886 val
= MT_TXD6_DIS_MAT
| MT_TXD6_DAS
;
887 if (is_mt7996(&dev
->mt76
))
888 val
|= FIELD_PREP(MT_TXD6_MSDU_CNT
, 1);
890 val
|= FIELD_PREP(MT_TXD6_MSDU_CNT_V2
, 1);
891 txwi
[6] = cpu_to_le32(val
);
895 mt7996_mac_write_txwi_8023(dev
, txwi
, skb
, wcid
);
897 mt7996_mac_write_txwi_80211(dev
, txwi
, skb
, key
);
899 if (txwi
[1] & cpu_to_le32(MT_TXD1_FIXED_RATE
)) {
900 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
901 bool mcast
= ieee80211_is_data(hdr
->frame_control
) &&
902 is_multicast_ether_addr(hdr
->addr1
);
903 u8 idx
= MT7996_BASIC_RATES_TBL
;
906 if (mcast
&& mvif
->mcast_rates_idx
)
907 idx
= mvif
->mcast_rates_idx
;
908 else if (beacon
&& mvif
->beacon_rates_idx
)
909 idx
= mvif
->beacon_rates_idx
;
911 idx
= mvif
->basic_rates_idx
;
914 val
= FIELD_PREP(MT_TXD6_TX_RATE
, idx
) | MT_TXD6_FIXED_BW
;
915 txwi
[6] |= cpu_to_le32(val
);
916 txwi
[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE
);
920 int mt7996_tx_prepare_skb(struct mt76_dev
*mdev
, void *txwi_ptr
,
921 enum mt76_txq_id qid
, struct mt76_wcid
*wcid
,
922 struct ieee80211_sta
*sta
,
923 struct mt76_tx_info
*tx_info
)
925 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)tx_info
->skb
->data
;
926 struct mt7996_dev
*dev
= container_of(mdev
, struct mt7996_dev
, mt76
);
927 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(tx_info
->skb
);
928 struct ieee80211_key_conf
*key
= info
->control
.hw_key
;
929 struct ieee80211_vif
*vif
= info
->control
.vif
;
930 struct mt76_connac_txp_common
*txp
;
931 struct mt76_txwi_cache
*t
;
932 int id
, i
, pid
, nbuf
= tx_info
->nbuf
- 1;
933 bool is_8023
= info
->flags
& IEEE80211_TX_CTL_HW_80211_ENCAP
;
934 u8
*txwi
= (u8
*)txwi_ptr
;
936 if (unlikely(tx_info
->skb
->len
<= ETH_HLEN
))
940 wcid
= &dev
->mt76
.global_wcid
;
942 t
= (struct mt76_txwi_cache
*)(txwi
+ mdev
->drv
->txwi_size
);
943 t
->skb
= tx_info
->skb
;
945 id
= mt76_token_consume(mdev
, &t
);
949 pid
= mt76_tx_status_skb_add(mdev
, wcid
, tx_info
->skb
);
950 mt7996_mac_write_txwi(dev
, txwi_ptr
, tx_info
->skb
, wcid
, key
,
953 txp
= (struct mt76_connac_txp_common
*)(txwi
+ MT_TXD_SIZE
);
954 for (i
= 0; i
< nbuf
; i
++) {
957 len
= FIELD_PREP(MT_TXP_BUF_LEN
, tx_info
->buf
[i
+ 1].len
);
958 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
959 len
|= FIELD_PREP(MT_TXP_DMA_ADDR_H
,
960 tx_info
->buf
[i
+ 1].addr
>> 32);
963 txp
->fw
.buf
[i
] = cpu_to_le32(tx_info
->buf
[i
+ 1].addr
);
964 txp
->fw
.len
[i
] = cpu_to_le16(len
);
969 cpu_to_le16(MT_CT_INFO_FROM_HOST
| MT_CT_INFO_APPLY_TXD
);
972 txp
->fw
.flags
|= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME
);
974 if (!is_8023
&& ieee80211_is_mgmt(hdr
->frame_control
))
975 txp
->fw
.flags
|= cpu_to_le16(MT_CT_INFO_MGMT_FRAME
);
978 struct mt7996_vif
*mvif
= (struct mt7996_vif
*)vif
->drv_priv
;
980 txp
->fw
.bss_idx
= mvif
->mt76
.idx
;
983 txp
->fw
.token
= cpu_to_le16(id
);
984 txp
->fw
.rept_wds_wcid
= cpu_to_le16(sta
? wcid
->idx
: 0xfff);
988 /* pass partial skb header to fw */
989 tx_info
->buf
[1].len
= MT_CT_PARSE_LEN
;
990 tx_info
->buf
[1].skip_unmap
= true;
991 tx_info
->nbuf
= MT_CT_DMA_BUF_NUM
;
996 u32
mt7996_wed_init_buf(void *ptr
, dma_addr_t phys
, int token_id
)
998 struct mt76_connac_fw_txp
*txp
= ptr
+ MT_TXD_SIZE
;
1002 memset(ptr
, 0, MT_TXD_SIZE
+ sizeof(*txp
));
1004 val
= FIELD_PREP(MT_TXD0_TX_BYTES
, MT_TXD_SIZE
) |
1005 FIELD_PREP(MT_TXD0_PKT_FMT
, MT_TX_TYPE_CT
);
1006 txwi
[0] = cpu_to_le32(val
);
1009 FIELD_PREP(MT_TXD1_HDR_FORMAT
, MT_HDR_FORMAT_802_3
);
1010 txwi
[1] = cpu_to_le32(val
);
1012 txp
->token
= cpu_to_le16(token_id
);
1014 txp
->buf
[0] = cpu_to_le32(phys
+ MT_TXD_SIZE
+ sizeof(*txp
));
1016 return MT_TXD_SIZE
+ sizeof(*txp
);
1020 mt7996_tx_check_aggr(struct ieee80211_sta
*sta
, struct sk_buff
*skb
)
1022 struct mt7996_sta
*msta
;
1023 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1024 bool is_8023
= info
->flags
& IEEE80211_TX_CTL_HW_80211_ENCAP
;
1027 if (!sta
|| !(sta
->deflink
.ht_cap
.ht_supported
|| sta
->deflink
.he_cap
.has_he
))
1030 tid
= skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
1031 if (tid
>= 6) /* skip VO queue */
1035 fc
= IEEE80211_FTYPE_DATA
|
1036 (sta
->wme
? IEEE80211_STYPE_QOS_DATA
: IEEE80211_STYPE_DATA
);
1038 /* No need to get precise TID for Action/Management Frame,
1039 * since it will not meet the following Frame Control
1043 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
1045 fc
= le16_to_cpu(hdr
->frame_control
) &
1046 (IEEE80211_FCTL_FTYPE
| IEEE80211_FCTL_STYPE
);
1049 if (unlikely(fc
!= (IEEE80211_FTYPE_DATA
| IEEE80211_STYPE_QOS_DATA
)))
1052 msta
= (struct mt7996_sta
*)sta
->drv_priv
;
1053 if (!test_and_set_bit(tid
, &msta
->wcid
.ampdu_state
))
1054 ieee80211_start_tx_ba_session(sta
, tid
, 0);
1058 mt7996_txwi_free(struct mt7996_dev
*dev
, struct mt76_txwi_cache
*t
,
1059 struct ieee80211_sta
*sta
, struct list_head
*free_list
)
1061 struct mt76_dev
*mdev
= &dev
->mt76
;
1062 struct mt76_wcid
*wcid
;
1066 mt76_connac_txp_skb_unmap(mdev
, t
);
1070 txwi
= (__le32
*)mt76_get_txwi_ptr(mdev
, t
);
1072 wcid
= (struct mt76_wcid
*)sta
->drv_priv
;
1073 wcid_idx
= wcid
->idx
;
1075 if (likely(t
->skb
->protocol
!= cpu_to_be16(ETH_P_PAE
)))
1076 mt7996_tx_check_aggr(sta
, t
->skb
);
1078 wcid_idx
= le32_get_bits(txwi
[9], MT_TXD9_WLAN_IDX
);
1081 __mt76_tx_complete_skb(mdev
, wcid_idx
, t
->skb
, free_list
);
1085 mt76_put_txwi(mdev
, t
);
1089 mt7996_mac_tx_free(struct mt7996_dev
*dev
, void *data
, int len
)
1091 __le32
*tx_free
= (__le32
*)data
, *cur_info
;
1092 struct mt76_dev
*mdev
= &dev
->mt76
;
1093 struct mt76_phy
*phy2
= mdev
->phys
[MT_BAND1
];
1094 struct mt76_phy
*phy3
= mdev
->phys
[MT_BAND2
];
1095 struct mt76_txwi_cache
*txwi
;
1096 struct ieee80211_sta
*sta
= NULL
;
1097 struct mt76_wcid
*wcid
= NULL
;
1098 LIST_HEAD(free_list
);
1099 struct sk_buff
*skb
, *tmp
;
1100 void *end
= data
+ len
;
1102 u16 total
, count
= 0;
1104 /* clean DMA queues and unmap buffers first */
1105 mt76_queue_tx_cleanup(dev
, dev
->mphy
.q_tx
[MT_TXQ_PSD
], false);
1106 mt76_queue_tx_cleanup(dev
, dev
->mphy
.q_tx
[MT_TXQ_BE
], false);
1108 mt76_queue_tx_cleanup(dev
, phy2
->q_tx
[MT_TXQ_PSD
], false);
1109 mt76_queue_tx_cleanup(dev
, phy2
->q_tx
[MT_TXQ_BE
], false);
1112 mt76_queue_tx_cleanup(dev
, phy3
->q_tx
[MT_TXQ_PSD
], false);
1113 mt76_queue_tx_cleanup(dev
, phy3
->q_tx
[MT_TXQ_BE
], false);
1116 if (WARN_ON_ONCE(le32_get_bits(tx_free
[1], MT_TXFREE1_VER
) < 5))
1119 total
= le32_get_bits(tx_free
[0], MT_TXFREE0_MSDU_CNT
);
1120 for (cur_info
= &tx_free
[2]; count
< total
; cur_info
++) {
1124 if (WARN_ON_ONCE((void *)cur_info
>= end
))
1126 /* 1'b1: new wcid pair.
1127 * 1'b0: msdu_id with the same 'wcid pair' as above.
1129 info
= le32_to_cpu(*cur_info
);
1130 if (info
& MT_TXFREE_INFO_PAIR
) {
1131 struct mt7996_sta
*msta
;
1134 idx
= FIELD_GET(MT_TXFREE_INFO_WLAN_ID
, info
);
1135 wcid
= rcu_dereference(dev
->mt76
.wcid
[idx
]);
1136 sta
= wcid_to_sta(wcid
);
1140 msta
= container_of(wcid
, struct mt7996_sta
, wcid
);
1141 spin_lock_bh(&mdev
->sta_poll_lock
);
1142 if (list_empty(&msta
->wcid
.poll_list
))
1143 list_add_tail(&msta
->wcid
.poll_list
,
1144 &mdev
->sta_poll_list
);
1145 spin_unlock_bh(&mdev
->sta_poll_lock
);
1147 } else if (info
& MT_TXFREE_INFO_HEADER
) {
1148 u32 tx_retries
= 0, tx_failed
= 0;
1154 FIELD_GET(MT_TXFREE_INFO_COUNT
, info
) - 1;
1155 tx_failed
= tx_retries
+
1156 !!FIELD_GET(MT_TXFREE_INFO_STAT
, info
);
1158 wcid
->stats
.tx_retries
+= tx_retries
;
1159 wcid
->stats
.tx_failed
+= tx_failed
;
1163 for (i
= 0; i
< 2; i
++) {
1164 msdu
= (info
>> (15 * i
)) & MT_TXFREE_INFO_MSDU_ID
;
1165 if (msdu
== MT_TXFREE_INFO_MSDU_ID
)
1169 txwi
= mt76_token_release(mdev
, msdu
, &wake
);
1173 mt7996_txwi_free(dev
, txwi
, sta
, &free_list
);
1177 mt7996_mac_sta_poll(dev
);
1180 mt76_set_tx_blocked(&dev
->mt76
, false);
1182 mt76_worker_schedule(&dev
->mt76
.tx_worker
);
1184 list_for_each_entry_safe(skb
, tmp
, &free_list
, list
) {
1185 skb_list_del_init(skb
);
1186 napi_consume_skb(skb
, 1);
1191 mt7996_mac_add_txs_skb(struct mt7996_dev
*dev
, struct mt76_wcid
*wcid
,
1192 int pid
, __le32
*txs_data
)
1194 struct mt76_sta_stats
*stats
= &wcid
->stats
;
1195 struct ieee80211_supported_band
*sband
;
1196 struct mt76_dev
*mdev
= &dev
->mt76
;
1197 struct mt76_phy
*mphy
;
1198 struct ieee80211_tx_info
*info
;
1199 struct sk_buff_head list
;
1200 struct rate_info rate
= {};
1201 struct sk_buff
*skb
= NULL
;
1203 u32 txrate
, txs
, mode
, stbc
;
1205 txs
= le32_to_cpu(txs_data
[0]);
1207 mt76_tx_status_lock(mdev
, &list
);
1209 /* only report MPDU TXS */
1210 if (le32_get_bits(txs_data
[0], MT_TXS0_TXS_FORMAT
) == 0) {
1211 skb
= mt76_tx_status_skb_get(mdev
, wcid
, pid
, &list
);
1213 info
= IEEE80211_SKB_CB(skb
);
1214 if (!(txs
& MT_TXS0_ACK_ERROR_MASK
))
1215 info
->flags
|= IEEE80211_TX_STAT_ACK
;
1217 info
->status
.ampdu_len
= 1;
1218 info
->status
.ampdu_ack_len
=
1219 !!(info
->flags
& IEEE80211_TX_STAT_ACK
);
1221 info
->status
.rates
[0].idx
= -1;
1225 if (mtk_wed_device_active(&dev
->mt76
.mmio
.wed
) && wcid
->sta
) {
1226 struct ieee80211_sta
*sta
;
1229 sta
= container_of((void *)wcid
, struct ieee80211_sta
, drv_priv
);
1230 tid
= FIELD_GET(MT_TXS0_TID
, txs
);
1231 ieee80211_refresh_tx_agg_session_timer(sta
, tid
);
1234 txrate
= FIELD_GET(MT_TXS0_TX_RATE
, txs
);
1236 rate
.mcs
= FIELD_GET(MT_TX_RATE_IDX
, txrate
);
1237 rate
.nss
= FIELD_GET(MT_TX_RATE_NSS
, txrate
) + 1;
1238 stbc
= le32_get_bits(txs_data
[3], MT_TXS3_RATE_STBC
);
1240 if (stbc
&& rate
.nss
> 1)
1243 if (rate
.nss
- 1 < ARRAY_SIZE(stats
->tx_nss
))
1244 stats
->tx_nss
[rate
.nss
- 1]++;
1245 if (rate
.mcs
< ARRAY_SIZE(stats
->tx_mcs
))
1246 stats
->tx_mcs
[rate
.mcs
]++;
1248 mode
= FIELD_GET(MT_TX_RATE_MODE
, txrate
);
1250 case MT_PHY_TYPE_CCK
:
1253 case MT_PHY_TYPE_OFDM
:
1254 mphy
= mt76_dev_phy(mdev
, wcid
->phy_idx
);
1256 if (mphy
->chandef
.chan
->band
== NL80211_BAND_5GHZ
)
1257 sband
= &mphy
->sband_5g
.sband
;
1258 else if (mphy
->chandef
.chan
->band
== NL80211_BAND_6GHZ
)
1259 sband
= &mphy
->sband_6g
.sband
;
1261 sband
= &mphy
->sband_2g
.sband
;
1263 rate
.mcs
= mt76_get_rate(mphy
->dev
, sband
, rate
.mcs
, cck
);
1264 rate
.legacy
= sband
->bitrates
[rate
.mcs
].bitrate
;
1266 case MT_PHY_TYPE_HT
:
1267 case MT_PHY_TYPE_HT_GF
:
1271 rate
.flags
= RATE_INFO_FLAGS_MCS
;
1272 if (wcid
->rate
.flags
& RATE_INFO_FLAGS_SHORT_GI
)
1273 rate
.flags
|= RATE_INFO_FLAGS_SHORT_GI
;
1275 case MT_PHY_TYPE_VHT
:
1279 rate
.flags
= RATE_INFO_FLAGS_VHT_MCS
;
1280 if (wcid
->rate
.flags
& RATE_INFO_FLAGS_SHORT_GI
)
1281 rate
.flags
|= RATE_INFO_FLAGS_SHORT_GI
;
1283 case MT_PHY_TYPE_HE_SU
:
1284 case MT_PHY_TYPE_HE_EXT_SU
:
1285 case MT_PHY_TYPE_HE_TB
:
1286 case MT_PHY_TYPE_HE_MU
:
1290 rate
.he_gi
= wcid
->rate
.he_gi
;
1291 rate
.he_dcm
= FIELD_GET(MT_TX_RATE_DCM
, txrate
);
1292 rate
.flags
= RATE_INFO_FLAGS_HE_MCS
;
1294 case MT_PHY_TYPE_EHT_SU
:
1295 case MT_PHY_TYPE_EHT_TRIG
:
1296 case MT_PHY_TYPE_EHT_MU
:
1300 rate
.eht_gi
= wcid
->rate
.eht_gi
;
1301 rate
.flags
= RATE_INFO_FLAGS_EHT_MCS
;
1307 stats
->tx_mode
[mode
]++;
1309 switch (FIELD_GET(MT_TXS0_BW
, txs
)) {
1310 case IEEE80211_STA_RX_BW_320
:
1311 rate
.bw
= RATE_INFO_BW_320
;
1314 case IEEE80211_STA_RX_BW_160
:
1315 rate
.bw
= RATE_INFO_BW_160
;
1318 case IEEE80211_STA_RX_BW_80
:
1319 rate
.bw
= RATE_INFO_BW_80
;
1322 case IEEE80211_STA_RX_BW_40
:
1323 rate
.bw
= RATE_INFO_BW_40
;
1327 rate
.bw
= RATE_INFO_BW_20
;
1335 mt76_tx_status_skb_done(mdev
, skb
, &list
);
1336 mt76_tx_status_unlock(mdev
, &list
);
1341 static void mt7996_mac_add_txs(struct mt7996_dev
*dev
, void *data
)
1343 struct mt7996_sta
*msta
= NULL
;
1344 struct mt76_wcid
*wcid
;
1345 __le32
*txs_data
= data
;
1349 wcidx
= le32_get_bits(txs_data
[2], MT_TXS2_WCID
);
1350 pid
= le32_get_bits(txs_data
[3], MT_TXS3_PID
);
1352 if (pid
< MT_PACKET_ID_NO_SKB
)
1355 if (wcidx
>= mt7996_wtbl_size(dev
))
1360 wcid
= rcu_dereference(dev
->mt76
.wcid
[wcidx
]);
1364 msta
= container_of(wcid
, struct mt7996_sta
, wcid
);
1366 mt7996_mac_add_txs_skb(dev
, wcid
, pid
, txs_data
);
1371 spin_lock_bh(&dev
->mt76
.sta_poll_lock
);
1372 if (list_empty(&msta
->wcid
.poll_list
))
1373 list_add_tail(&msta
->wcid
.poll_list
, &dev
->mt76
.sta_poll_list
);
1374 spin_unlock_bh(&dev
->mt76
.sta_poll_lock
);
1380 bool mt7996_rx_check(struct mt76_dev
*mdev
, void *data
, int len
)
1382 struct mt7996_dev
*dev
= container_of(mdev
, struct mt7996_dev
, mt76
);
1383 __le32
*rxd
= (__le32
*)data
;
1384 __le32
*end
= (__le32
*)&rxd
[len
/ 4];
1385 enum rx_pkt_type type
;
1387 type
= le32_get_bits(rxd
[0], MT_RXD0_PKT_TYPE
);
1388 if (type
!= PKT_TYPE_NORMAL
) {
1389 u32 sw_type
= le32_get_bits(rxd
[0], MT_RXD0_SW_PKT_TYPE_MASK
);
1391 if (unlikely((sw_type
& MT_RXD0_SW_PKT_TYPE_MAP
) ==
1392 MT_RXD0_SW_PKT_TYPE_FRAME
))
1397 case PKT_TYPE_TXRX_NOTIFY
:
1398 mt7996_mac_tx_free(dev
, data
, len
);
1401 for (rxd
+= 4; rxd
+ 8 <= end
; rxd
+= 8)
1402 mt7996_mac_add_txs(dev
, rxd
);
1404 case PKT_TYPE_RX_FW_MONITOR
:
1405 mt7996_debugfs_rx_fw_monitor(dev
, data
, len
);
1412 void mt7996_queue_rx_skb(struct mt76_dev
*mdev
, enum mt76_rxq_id q
,
1413 struct sk_buff
*skb
, u32
*info
)
1415 struct mt7996_dev
*dev
= container_of(mdev
, struct mt7996_dev
, mt76
);
1416 __le32
*rxd
= (__le32
*)skb
->data
;
1417 __le32
*end
= (__le32
*)&skb
->data
[skb
->len
];
1418 enum rx_pkt_type type
;
1420 type
= le32_get_bits(rxd
[0], MT_RXD0_PKT_TYPE
);
1421 if (type
!= PKT_TYPE_NORMAL
) {
1422 u32 sw_type
= le32_get_bits(rxd
[0], MT_RXD0_SW_PKT_TYPE_MASK
);
1424 if (unlikely((sw_type
& MT_RXD0_SW_PKT_TYPE_MAP
) ==
1425 MT_RXD0_SW_PKT_TYPE_FRAME
))
1426 type
= PKT_TYPE_NORMAL
;
1430 case PKT_TYPE_TXRX_NOTIFY
:
1431 if (mtk_wed_device_active(&dev
->mt76
.mmio
.wed_hif2
) &&
1432 q
== MT_RXQ_TXFREE_BAND2
) {
1437 mt7996_mac_tx_free(dev
, skb
->data
, skb
->len
);
1438 napi_consume_skb(skb
, 1);
1440 case PKT_TYPE_RX_EVENT
:
1441 mt7996_mcu_rx_event(dev
, skb
);
1444 for (rxd
+= 4; rxd
+ 8 <= end
; rxd
+= 8)
1445 mt7996_mac_add_txs(dev
, rxd
);
1448 case PKT_TYPE_RX_FW_MONITOR
:
1449 mt7996_debugfs_rx_fw_monitor(dev
, skb
->data
, skb
->len
);
1452 case PKT_TYPE_NORMAL
:
1453 if (!mt7996_mac_fill_rx(dev
, q
, skb
, info
)) {
1454 mt76_rx(&dev
->mt76
, q
, skb
);
1464 void mt7996_mac_cca_stats_reset(struct mt7996_phy
*phy
)
1466 struct mt7996_dev
*dev
= phy
->dev
;
1467 u32 reg
= MT_WF_PHYRX_BAND_RX_CTRL1(phy
->mt76
->band_idx
);
1469 mt76_clear(dev
, reg
, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN
);
1470 mt76_set(dev
, reg
, BIT(11) | BIT(9));
1473 void mt7996_mac_reset_counters(struct mt7996_phy
*phy
)
1475 struct mt7996_dev
*dev
= phy
->dev
;
1476 u8 band_idx
= phy
->mt76
->band_idx
;
1479 for (i
= 0; i
< 16; i
++)
1480 mt76_rr(dev
, MT_TX_AGG_CNT(band_idx
, i
));
1482 phy
->mt76
->survey_time
= ktime_get_boottime();
1484 memset(phy
->mt76
->aggr_stats
, 0, sizeof(phy
->mt76
->aggr_stats
));
1486 /* reset airtime counters */
1487 mt76_set(dev
, MT_WF_RMAC_MIB_AIRTIME0(band_idx
),
1488 MT_WF_RMAC_MIB_RXTIME_CLR
);
1490 mt7996_mcu_get_chan_mib_info(phy
, true);
1493 void mt7996_mac_set_coverage_class(struct mt7996_phy
*phy
)
1495 s16 coverage_class
= phy
->coverage_class
;
1496 struct mt7996_dev
*dev
= phy
->dev
;
1497 struct mt7996_phy
*phy2
= mt7996_phy2(dev
);
1498 struct mt7996_phy
*phy3
= mt7996_phy3(dev
);
1500 u32 cck
= FIELD_PREP(MT_TIMEOUT_VAL_PLCP
, 231) |
1501 FIELD_PREP(MT_TIMEOUT_VAL_CCA
, 48);
1502 u32 ofdm
= FIELD_PREP(MT_TIMEOUT_VAL_PLCP
, 60) |
1503 FIELD_PREP(MT_TIMEOUT_VAL_CCA
, 28);
1504 u8 band_idx
= phy
->mt76
->band_idx
;
1507 if (!test_bit(MT76_STATE_RUNNING
, &phy
->mt76
->state
))
1511 coverage_class
= max_t(s16
, dev
->phy
.coverage_class
,
1512 phy2
->coverage_class
);
1515 coverage_class
= max_t(s16
, coverage_class
,
1516 phy3
->coverage_class
);
1518 offset
= 3 * coverage_class
;
1519 reg_offset
= FIELD_PREP(MT_TIMEOUT_VAL_PLCP
, offset
) |
1520 FIELD_PREP(MT_TIMEOUT_VAL_CCA
, offset
);
1522 mt76_wr(dev
, MT_TMAC_CDTR(band_idx
), cck
+ reg_offset
);
1523 mt76_wr(dev
, MT_TMAC_ODTR(band_idx
), ofdm
+ reg_offset
);
1526 void mt7996_mac_enable_nf(struct mt7996_dev
*dev
, u8 band
)
1528 mt76_set(dev
, MT_WF_PHYRX_CSD_BAND_RXTD12(band
),
1529 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY
|
1530 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR
);
1532 mt76_set(dev
, MT_WF_PHYRX_BAND_RX_CTRL1(band
),
1533 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN
, 0x5));
1537 mt7996_phy_get_nf(struct mt7996_phy
*phy
, u8 band_idx
)
1539 static const u8 nf_power
[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1540 struct mt7996_dev
*dev
= phy
->dev
;
1541 u32 val
, sum
= 0, n
= 0;
1544 for (ant
= 0; ant
< hweight8(phy
->mt76
->antenna_mask
); ant
++) {
1545 u32 reg
= MT_WF_PHYRX_CSD_IRPI(band_idx
, ant
);
1547 for (i
= 0; i
< ARRAY_SIZE(nf_power
); i
++, reg
+= 4) {
1548 val
= mt76_rr(dev
, reg
);
1549 sum
+= val
* nf_power
[i
];
1554 return n
? sum
/ n
: 0;
1557 void mt7996_update_channel(struct mt76_phy
*mphy
)
1559 struct mt7996_phy
*phy
= mphy
->priv
;
1560 struct mt76_channel_state
*state
= mphy
->chan_state
;
1563 mt7996_mcu_get_chan_mib_info(phy
, false);
1565 nf
= mt7996_phy_get_nf(phy
, mphy
->band_idx
);
1567 phy
->noise
= nf
<< 4;
1569 phy
->noise
+= nf
- (phy
->noise
>> 4);
1571 state
->noise
= -(phy
->noise
>> 4);
1575 mt7996_wait_reset_state(struct mt7996_dev
*dev
, u32 state
)
1579 ret
= wait_event_timeout(dev
->reset_wait
,
1580 (READ_ONCE(dev
->recovery
.state
) & state
),
1581 MT7996_RESET_TIMEOUT
);
1583 WARN(!ret
, "Timeout waiting for MCU reset state %x\n", state
);
1588 mt7996_update_vif_beacon(void *priv
, u8
*mac
, struct ieee80211_vif
*vif
)
1590 struct ieee80211_hw
*hw
= priv
;
1592 switch (vif
->type
) {
1593 case NL80211_IFTYPE_MESH_POINT
:
1594 case NL80211_IFTYPE_ADHOC
:
1595 case NL80211_IFTYPE_AP
:
1596 mt7996_mcu_add_beacon(hw
, vif
, vif
->bss_conf
.enable_beacon
);
1604 mt7996_update_beacons(struct mt7996_dev
*dev
)
1606 struct mt76_phy
*phy2
, *phy3
;
1608 ieee80211_iterate_active_interfaces(dev
->mt76
.hw
,
1609 IEEE80211_IFACE_ITER_RESUME_ALL
,
1610 mt7996_update_vif_beacon
, dev
->mt76
.hw
);
1612 phy2
= dev
->mt76
.phys
[MT_BAND1
];
1616 ieee80211_iterate_active_interfaces(phy2
->hw
,
1617 IEEE80211_IFACE_ITER_RESUME_ALL
,
1618 mt7996_update_vif_beacon
, phy2
->hw
);
1620 phy3
= dev
->mt76
.phys
[MT_BAND2
];
1624 ieee80211_iterate_active_interfaces(phy3
->hw
,
1625 IEEE80211_IFACE_ITER_RESUME_ALL
,
1626 mt7996_update_vif_beacon
, phy3
->hw
);
1629 void mt7996_tx_token_put(struct mt7996_dev
*dev
)
1631 struct mt76_txwi_cache
*txwi
;
1634 spin_lock_bh(&dev
->mt76
.token_lock
);
1635 idr_for_each_entry(&dev
->mt76
.token
, txwi
, id
) {
1636 mt7996_txwi_free(dev
, txwi
, NULL
, NULL
);
1637 dev
->mt76
.token_count
--;
1639 spin_unlock_bh(&dev
->mt76
.token_lock
);
1640 idr_destroy(&dev
->mt76
.token
);
1644 mt7996_mac_restart(struct mt7996_dev
*dev
)
1646 struct mt7996_phy
*phy2
, *phy3
;
1647 struct mt76_dev
*mdev
= &dev
->mt76
;
1650 phy2
= mt7996_phy2(dev
);
1651 phy3
= mt7996_phy3(dev
);
1654 mt76_wr(dev
, MT_INT1_MASK_CSR
, 0x0);
1655 mt76_wr(dev
, MT_INT1_SOURCE_CSR
, ~0);
1658 if (dev_is_pci(mdev
->dev
)) {
1659 mt76_wr(dev
, MT_PCIE_MAC_INT_ENABLE
, 0x0);
1661 mt76_wr(dev
, MT_PCIE1_MAC_INT_ENABLE
, 0x0);
1664 set_bit(MT76_RESET
, &dev
->mphy
.state
);
1665 set_bit(MT76_MCU_RESET
, &dev
->mphy
.state
);
1666 wake_up(&dev
->mt76
.mcu
.wait
);
1668 set_bit(MT76_RESET
, &phy2
->mt76
->state
);
1670 set_bit(MT76_RESET
, &phy3
->mt76
->state
);
1672 /* lock/unlock all queues to ensure that no tx is pending */
1673 mt76_txq_schedule_all(&dev
->mphy
);
1675 mt76_txq_schedule_all(phy2
->mt76
);
1677 mt76_txq_schedule_all(phy3
->mt76
);
1679 /* disable all tx/rx napi */
1680 mt76_worker_disable(&dev
->mt76
.tx_worker
);
1681 mt76_for_each_q_rx(mdev
, i
) {
1682 if (mtk_wed_device_active(&dev
->mt76
.mmio
.wed
) &&
1683 mt76_queue_is_wed_rro(&mdev
->q_rx
[i
]))
1686 if (mdev
->q_rx
[i
].ndesc
)
1687 napi_disable(&dev
->mt76
.napi
[i
]);
1689 napi_disable(&dev
->mt76
.tx_napi
);
1692 mt7996_tx_token_put(dev
);
1693 idr_init(&dev
->mt76
.token
);
1695 mt7996_dma_reset(dev
, true);
1698 mt76_for_each_q_rx(mdev
, i
) {
1699 if (mtk_wed_device_active(&dev
->mt76
.mmio
.wed
) &&
1700 mt76_queue_is_wed_rro(&mdev
->q_rx
[i
]))
1703 if (mdev
->q_rx
[i
].ndesc
) {
1704 napi_enable(&dev
->mt76
.napi
[i
]);
1705 napi_schedule(&dev
->mt76
.napi
[i
]);
1709 clear_bit(MT76_MCU_RESET
, &dev
->mphy
.state
);
1710 clear_bit(MT76_STATE_MCU_RUNNING
, &dev
->mphy
.state
);
1712 mt76_wr(dev
, MT_INT_MASK_CSR
, dev
->mt76
.mmio
.irqmask
);
1713 mt76_wr(dev
, MT_INT_SOURCE_CSR
, ~0);
1715 mt76_wr(dev
, MT_INT1_MASK_CSR
, dev
->mt76
.mmio
.irqmask
);
1716 mt76_wr(dev
, MT_INT1_SOURCE_CSR
, ~0);
1718 if (dev_is_pci(mdev
->dev
)) {
1719 mt76_wr(dev
, MT_PCIE_MAC_INT_ENABLE
, 0xff);
1721 mt76_wr(dev
, MT_PCIE1_MAC_INT_ENABLE
, 0xff);
1725 ret
= mt7996_mcu_init_firmware(dev
);
1729 /* set the necessary init items */
1730 ret
= mt7996_mcu_set_eeprom(dev
);
1734 mt7996_mac_init(dev
);
1735 mt7996_init_txpower(&dev
->phy
);
1736 mt7996_init_txpower(phy2
);
1737 mt7996_init_txpower(phy3
);
1738 ret
= mt7996_txbf_init(dev
);
1740 if (test_bit(MT76_STATE_RUNNING
, &dev
->mphy
.state
)) {
1741 ret
= mt7996_run(dev
->mphy
.hw
);
1746 if (phy2
&& test_bit(MT76_STATE_RUNNING
, &phy2
->mt76
->state
)) {
1747 ret
= mt7996_run(phy2
->mt76
->hw
);
1752 if (phy3
&& test_bit(MT76_STATE_RUNNING
, &phy3
->mt76
->state
)) {
1753 ret
= mt7996_run(phy3
->mt76
->hw
);
1760 clear_bit(MT76_RESET
, &dev
->mphy
.state
);
1762 clear_bit(MT76_RESET
, &phy2
->mt76
->state
);
1764 clear_bit(MT76_RESET
, &phy3
->mt76
->state
);
1767 napi_enable(&dev
->mt76
.tx_napi
);
1768 napi_schedule(&dev
->mt76
.tx_napi
);
1771 mt76_worker_enable(&dev
->mt76
.tx_worker
);
1776 mt7996_mac_full_reset(struct mt7996_dev
*dev
)
1778 struct mt7996_phy
*phy2
, *phy3
;
1781 phy2
= mt7996_phy2(dev
);
1782 phy3
= mt7996_phy3(dev
);
1783 dev
->recovery
.hw_full_reset
= true;
1785 wake_up(&dev
->mt76
.mcu
.wait
);
1786 ieee80211_stop_queues(mt76_hw(dev
));
1788 ieee80211_stop_queues(phy2
->mt76
->hw
);
1790 ieee80211_stop_queues(phy3
->mt76
->hw
);
1792 cancel_work_sync(&dev
->wed_rro
.work
);
1793 cancel_delayed_work_sync(&dev
->mphy
.mac_work
);
1795 cancel_delayed_work_sync(&phy2
->mt76
->mac_work
);
1797 cancel_delayed_work_sync(&phy3
->mt76
->mac_work
);
1799 mutex_lock(&dev
->mt76
.mutex
);
1800 for (i
= 0; i
< 10; i
++) {
1801 if (!mt7996_mac_restart(dev
))
1804 mutex_unlock(&dev
->mt76
.mutex
);
1807 dev_err(dev
->mt76
.dev
, "chip full reset failed\n");
1809 ieee80211_restart_hw(mt76_hw(dev
));
1811 ieee80211_restart_hw(phy2
->mt76
->hw
);
1813 ieee80211_restart_hw(phy3
->mt76
->hw
);
1815 ieee80211_wake_queues(mt76_hw(dev
));
1817 ieee80211_wake_queues(phy2
->mt76
->hw
);
1819 ieee80211_wake_queues(phy3
->mt76
->hw
);
1821 dev
->recovery
.hw_full_reset
= false;
1822 ieee80211_queue_delayed_work(mt76_hw(dev
),
1823 &dev
->mphy
.mac_work
,
1824 MT7996_WATCHDOG_TIME
);
1826 ieee80211_queue_delayed_work(phy2
->mt76
->hw
,
1827 &phy2
->mt76
->mac_work
,
1828 MT7996_WATCHDOG_TIME
);
1830 ieee80211_queue_delayed_work(phy3
->mt76
->hw
,
1831 &phy3
->mt76
->mac_work
,
1832 MT7996_WATCHDOG_TIME
);
1835 void mt7996_mac_reset_work(struct work_struct
*work
)
1837 struct mt7996_phy
*phy2
, *phy3
;
1838 struct mt7996_dev
*dev
;
1841 dev
= container_of(work
, struct mt7996_dev
, reset_work
);
1842 phy2
= mt7996_phy2(dev
);
1843 phy3
= mt7996_phy3(dev
);
1845 /* chip full reset */
1846 if (dev
->recovery
.restart
) {
1847 /* disable WA/WM WDT */
1848 mt76_clear(dev
, MT_WFDMA0_MCU_HOST_INT_ENA
,
1849 MT_MCU_CMD_WDT_MASK
);
1851 if (READ_ONCE(dev
->recovery
.state
) & MT_MCU_CMD_WA_WDT
)
1852 dev
->recovery
.wa_reset_count
++;
1854 dev
->recovery
.wm_reset_count
++;
1856 mt7996_mac_full_reset(dev
);
1858 /* enable mcu irq */
1859 mt7996_irq_enable(dev
, MT_INT_MCU_CMD
);
1860 mt7996_irq_disable(dev
, 0);
1862 /* enable WA/WM WDT */
1863 mt76_set(dev
, MT_WFDMA0_MCU_HOST_INT_ENA
, MT_MCU_CMD_WDT_MASK
);
1865 dev
->recovery
.state
= MT_MCU_CMD_NORMAL_STATE
;
1866 dev
->recovery
.restart
= false;
1870 if (!(READ_ONCE(dev
->recovery
.state
) & MT_MCU_CMD_STOP_DMA
))
1873 dev_info(dev
->mt76
.dev
,"\n%s L1 SER recovery start.",
1874 wiphy_name(dev
->mt76
.hw
->wiphy
));
1876 if (mtk_wed_device_active(&dev
->mt76
.mmio
.wed_hif2
))
1877 mtk_wed_device_stop(&dev
->mt76
.mmio
.wed_hif2
);
1879 if (mtk_wed_device_active(&dev
->mt76
.mmio
.wed
))
1880 mtk_wed_device_stop(&dev
->mt76
.mmio
.wed
);
1882 ieee80211_stop_queues(mt76_hw(dev
));
1884 ieee80211_stop_queues(phy2
->mt76
->hw
);
1886 ieee80211_stop_queues(phy3
->mt76
->hw
);
1888 set_bit(MT76_RESET
, &dev
->mphy
.state
);
1889 set_bit(MT76_MCU_RESET
, &dev
->mphy
.state
);
1890 wake_up(&dev
->mt76
.mcu
.wait
);
1892 cancel_work_sync(&dev
->wed_rro
.work
);
1893 cancel_delayed_work_sync(&dev
->mphy
.mac_work
);
1895 set_bit(MT76_RESET
, &phy2
->mt76
->state
);
1896 cancel_delayed_work_sync(&phy2
->mt76
->mac_work
);
1899 set_bit(MT76_RESET
, &phy3
->mt76
->state
);
1900 cancel_delayed_work_sync(&phy3
->mt76
->mac_work
);
1902 mt76_worker_disable(&dev
->mt76
.tx_worker
);
1903 mt76_for_each_q_rx(&dev
->mt76
, i
) {
1904 if (mtk_wed_device_active(&dev
->mt76
.mmio
.wed
) &&
1905 mt76_queue_is_wed_rro(&dev
->mt76
.q_rx
[i
]))
1908 napi_disable(&dev
->mt76
.napi
[i
]);
1910 napi_disable(&dev
->mt76
.tx_napi
);
1912 mutex_lock(&dev
->mt76
.mutex
);
1914 mt76_wr(dev
, MT_MCU_INT_EVENT
, MT_MCU_INT_EVENT_DMA_STOPPED
);
1916 if (mt7996_wait_reset_state(dev
, MT_MCU_CMD_RESET_DONE
)) {
1917 mt7996_dma_reset(dev
, false);
1919 mt7996_tx_token_put(dev
);
1920 idr_init(&dev
->mt76
.token
);
1922 mt76_wr(dev
, MT_MCU_INT_EVENT
, MT_MCU_INT_EVENT_DMA_INIT
);
1923 mt7996_wait_reset_state(dev
, MT_MCU_CMD_RECOVERY_DONE
);
1926 mt76_wr(dev
, MT_MCU_INT_EVENT
, MT_MCU_INT_EVENT_RESET_DONE
);
1927 mt7996_wait_reset_state(dev
, MT_MCU_CMD_NORMAL_STATE
);
1929 /* enable DMA Tx/Tx and interrupt */
1930 mt7996_dma_start(dev
, false, false);
1932 if (mtk_wed_device_active(&dev
->mt76
.mmio
.wed
)) {
1933 u32 wed_irq_mask
= MT_INT_RRO_RX_DONE
| MT_INT_TX_DONE_BAND2
|
1934 dev
->mt76
.mmio
.irqmask
;
1936 if (mtk_wed_get_rx_capa(&dev
->mt76
.mmio
.wed
))
1937 wed_irq_mask
&= ~MT_INT_RX_DONE_RRO_IND
;
1939 mt76_wr(dev
, MT_INT_MASK_CSR
, wed_irq_mask
);
1941 mtk_wed_device_start_hw_rro(&dev
->mt76
.mmio
.wed
, wed_irq_mask
,
1943 mt7996_irq_enable(dev
, wed_irq_mask
);
1944 mt7996_irq_disable(dev
, 0);
1947 if (mtk_wed_device_active(&dev
->mt76
.mmio
.wed_hif2
)) {
1948 mt76_wr(dev
, MT_INT_PCIE1_MASK_CSR
, MT_INT_TX_RX_DONE_EXT
);
1949 mtk_wed_device_start(&dev
->mt76
.mmio
.wed_hif2
,
1950 MT_INT_TX_RX_DONE_EXT
);
1953 clear_bit(MT76_MCU_RESET
, &dev
->mphy
.state
);
1954 clear_bit(MT76_RESET
, &dev
->mphy
.state
);
1956 clear_bit(MT76_RESET
, &phy2
->mt76
->state
);
1958 clear_bit(MT76_RESET
, &phy3
->mt76
->state
);
1961 mt76_for_each_q_rx(&dev
->mt76
, i
) {
1962 if (mtk_wed_device_active(&dev
->mt76
.mmio
.wed
) &&
1963 mt76_queue_is_wed_rro(&dev
->mt76
.q_rx
[i
]))
1966 napi_enable(&dev
->mt76
.napi
[i
]);
1967 napi_schedule(&dev
->mt76
.napi
[i
]);
1971 tasklet_schedule(&dev
->mt76
.irq_tasklet
);
1973 mt76_worker_enable(&dev
->mt76
.tx_worker
);
1976 napi_enable(&dev
->mt76
.tx_napi
);
1977 napi_schedule(&dev
->mt76
.tx_napi
);
1980 ieee80211_wake_queues(mt76_hw(dev
));
1982 ieee80211_wake_queues(phy2
->mt76
->hw
);
1984 ieee80211_wake_queues(phy3
->mt76
->hw
);
1986 mutex_unlock(&dev
->mt76
.mutex
);
1988 mt7996_update_beacons(dev
);
1990 ieee80211_queue_delayed_work(mt76_hw(dev
), &dev
->mphy
.mac_work
,
1991 MT7996_WATCHDOG_TIME
);
1993 ieee80211_queue_delayed_work(phy2
->mt76
->hw
,
1994 &phy2
->mt76
->mac_work
,
1995 MT7996_WATCHDOG_TIME
);
1997 ieee80211_queue_delayed_work(phy3
->mt76
->hw
,
1998 &phy3
->mt76
->mac_work
,
1999 MT7996_WATCHDOG_TIME
);
2000 dev_info(dev
->mt76
.dev
,"\n%s L1 SER recovery completed.",
2001 wiphy_name(dev
->mt76
.hw
->wiphy
));
2004 /* firmware coredump */
2005 void mt7996_mac_dump_work(struct work_struct
*work
)
2007 const struct mt7996_mem_region
*mem_region
;
2008 struct mt7996_crash_data
*crash_data
;
2009 struct mt7996_dev
*dev
;
2010 struct mt7996_mem_hdr
*hdr
;
2016 dev
= container_of(work
, struct mt7996_dev
, dump_work
);
2018 mutex_lock(&dev
->dump_mutex
);
2020 crash_data
= mt7996_coredump_new(dev
);
2022 mutex_unlock(&dev
->dump_mutex
);
2026 mem_region
= mt7996_coredump_get_mem_layout(dev
, &num
);
2027 if (!mem_region
|| !crash_data
->memdump_buf_len
) {
2028 mutex_unlock(&dev
->dump_mutex
);
2032 buf
= crash_data
->memdump_buf
;
2033 buf_len
= crash_data
->memdump_buf_len
;
2035 /* dumping memory content... */
2036 memset(buf
, 0, buf_len
);
2037 for (i
= 0; i
< num
; i
++) {
2038 if (mem_region
->len
> buf_len
) {
2039 dev_warn(dev
->mt76
.dev
, "%s len %zu is too large\n",
2040 mem_region
->name
, mem_region
->len
);
2044 /* reserve space for the header */
2046 buf
+= sizeof(*hdr
);
2047 buf_len
-= sizeof(*hdr
);
2049 mt7996_memcpy_fromio(dev
, buf
, mem_region
->start
,
2052 hdr
->start
= mem_region
->start
;
2053 hdr
->len
= mem_region
->len
;
2055 if (!mem_region
->len
)
2056 /* note: the header remains, just with zero length */
2059 buf
+= mem_region
->len
;
2060 buf_len
-= mem_region
->len
;
2065 mutex_unlock(&dev
->dump_mutex
);
2068 mt7996_coredump_submit(dev
);
2070 queue_work(dev
->mt76
.wq
, &dev
->reset_work
);
2073 void mt7996_reset(struct mt7996_dev
*dev
)
2075 if (!dev
->recovery
.hw_init_done
)
2078 if (dev
->recovery
.hw_full_reset
)
2081 /* wm/wa exception: do full recovery */
2082 if (READ_ONCE(dev
->recovery
.state
) & MT_MCU_CMD_WDT_MASK
) {
2083 dev
->recovery
.restart
= true;
2084 dev_info(dev
->mt76
.dev
,
2085 "%s indicated firmware crash, attempting recovery\n",
2086 wiphy_name(dev
->mt76
.hw
->wiphy
));
2088 mt7996_irq_disable(dev
, MT_INT_MCU_CMD
);
2089 queue_work(dev
->mt76
.wq
, &dev
->dump_work
);
2093 queue_work(dev
->mt76
.wq
, &dev
->reset_work
);
2094 wake_up(&dev
->reset_wait
);
2097 void mt7996_mac_update_stats(struct mt7996_phy
*phy
)
2099 struct mt76_mib_stats
*mib
= &phy
->mib
;
2100 struct mt7996_dev
*dev
= phy
->dev
;
2101 u8 band_idx
= phy
->mt76
->band_idx
;
2105 cnt
= mt76_rr(dev
, MT_MIB_RSCR1(band_idx
));
2106 mib
->fcs_err_cnt
+= cnt
;
2108 cnt
= mt76_rr(dev
, MT_MIB_RSCR33(band_idx
));
2109 mib
->rx_fifo_full_cnt
+= cnt
;
2111 cnt
= mt76_rr(dev
, MT_MIB_RSCR31(band_idx
));
2112 mib
->rx_mpdu_cnt
+= cnt
;
2114 cnt
= mt76_rr(dev
, MT_MIB_SDR6(band_idx
));
2115 mib
->channel_idle_cnt
+= FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK
, cnt
);
2117 cnt
= mt76_rr(dev
, MT_MIB_RVSR0(band_idx
));
2118 mib
->rx_vector_mismatch_cnt
+= cnt
;
2120 cnt
= mt76_rr(dev
, MT_MIB_RSCR35(band_idx
));
2121 mib
->rx_delimiter_fail_cnt
+= cnt
;
2123 cnt
= mt76_rr(dev
, MT_MIB_RSCR36(band_idx
));
2124 mib
->rx_len_mismatch_cnt
+= cnt
;
2126 cnt
= mt76_rr(dev
, MT_MIB_TSCR0(band_idx
));
2127 mib
->tx_ampdu_cnt
+= cnt
;
2129 cnt
= mt76_rr(dev
, MT_MIB_TSCR2(band_idx
));
2130 mib
->tx_stop_q_empty_cnt
+= cnt
;
2132 cnt
= mt76_rr(dev
, MT_MIB_TSCR3(band_idx
));
2133 mib
->tx_mpdu_attempts_cnt
+= cnt
;
2135 cnt
= mt76_rr(dev
, MT_MIB_TSCR4(band_idx
));
2136 mib
->tx_mpdu_success_cnt
+= cnt
;
2138 cnt
= mt76_rr(dev
, MT_MIB_RSCR27(band_idx
));
2139 mib
->rx_ampdu_cnt
+= cnt
;
2141 cnt
= mt76_rr(dev
, MT_MIB_RSCR28(band_idx
));
2142 mib
->rx_ampdu_bytes_cnt
+= cnt
;
2144 cnt
= mt76_rr(dev
, MT_MIB_RSCR29(band_idx
));
2145 mib
->rx_ampdu_valid_subframe_cnt
+= cnt
;
2147 cnt
= mt76_rr(dev
, MT_MIB_RSCR30(band_idx
));
2148 mib
->rx_ampdu_valid_subframe_bytes_cnt
+= cnt
;
2150 cnt
= mt76_rr(dev
, MT_MIB_SDR27(band_idx
));
2151 mib
->tx_rwp_fail_cnt
+= FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT
, cnt
);
2153 cnt
= mt76_rr(dev
, MT_MIB_SDR28(band_idx
));
2154 mib
->tx_rwp_need_cnt
+= FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT
, cnt
);
2156 cnt
= mt76_rr(dev
, MT_UMIB_RPDCR(band_idx
));
2157 mib
->rx_pfdrop_cnt
+= cnt
;
2159 cnt
= mt76_rr(dev
, MT_MIB_RVSR1(band_idx
));
2160 mib
->rx_vec_queue_overflow_drop_cnt
+= cnt
;
2162 cnt
= mt76_rr(dev
, MT_MIB_TSCR1(band_idx
));
2163 mib
->rx_ba_cnt
+= cnt
;
2165 cnt
= mt76_rr(dev
, MT_MIB_BSCR0(band_idx
));
2166 mib
->tx_bf_ebf_ppdu_cnt
+= cnt
;
2168 cnt
= mt76_rr(dev
, MT_MIB_BSCR1(band_idx
));
2169 mib
->tx_bf_ibf_ppdu_cnt
+= cnt
;
2171 cnt
= mt76_rr(dev
, MT_MIB_BSCR2(band_idx
));
2172 mib
->tx_mu_bf_cnt
+= cnt
;
2174 cnt
= mt76_rr(dev
, MT_MIB_TSCR5(band_idx
));
2175 mib
->tx_mu_mpdu_cnt
+= cnt
;
2177 cnt
= mt76_rr(dev
, MT_MIB_TSCR6(band_idx
));
2178 mib
->tx_mu_acked_mpdu_cnt
+= cnt
;
2180 cnt
= mt76_rr(dev
, MT_MIB_TSCR7(band_idx
));
2181 mib
->tx_su_acked_mpdu_cnt
+= cnt
;
2183 cnt
= mt76_rr(dev
, MT_MIB_BSCR3(band_idx
));
2184 mib
->tx_bf_rx_fb_ht_cnt
+= cnt
;
2185 mib
->tx_bf_rx_fb_all_cnt
+= cnt
;
2187 cnt
= mt76_rr(dev
, MT_MIB_BSCR4(band_idx
));
2188 mib
->tx_bf_rx_fb_vht_cnt
+= cnt
;
2189 mib
->tx_bf_rx_fb_all_cnt
+= cnt
;
2191 cnt
= mt76_rr(dev
, MT_MIB_BSCR5(band_idx
));
2192 mib
->tx_bf_rx_fb_he_cnt
+= cnt
;
2193 mib
->tx_bf_rx_fb_all_cnt
+= cnt
;
2195 cnt
= mt76_rr(dev
, MT_MIB_BSCR6(band_idx
));
2196 mib
->tx_bf_rx_fb_eht_cnt
+= cnt
;
2197 mib
->tx_bf_rx_fb_all_cnt
+= cnt
;
2199 cnt
= mt76_rr(dev
, MT_ETBF_RX_FB_CONT(band_idx
));
2200 mib
->tx_bf_rx_fb_bw
= FIELD_GET(MT_ETBF_RX_FB_BW
, cnt
);
2201 mib
->tx_bf_rx_fb_nc_cnt
+= FIELD_GET(MT_ETBF_RX_FB_NC
, cnt
);
2202 mib
->tx_bf_rx_fb_nr_cnt
+= FIELD_GET(MT_ETBF_RX_FB_NR
, cnt
);
2204 cnt
= mt76_rr(dev
, MT_MIB_BSCR7(band_idx
));
2205 mib
->tx_bf_fb_trig_cnt
+= cnt
;
2207 cnt
= mt76_rr(dev
, MT_MIB_BSCR17(band_idx
));
2208 mib
->tx_bf_fb_cpl_cnt
+= cnt
;
2210 for (i
= 0; i
< ARRAY_SIZE(mib
->tx_amsdu
); i
++) {
2211 cnt
= mt76_rr(dev
, MT_PLE_AMSDU_PACK_MSDU_CNT(i
));
2212 mib
->tx_amsdu
[i
] += cnt
;
2213 mib
->tx_amsdu_cnt
+= cnt
;
2217 cnt
= mt76_rr(dev
, MT_MIB_BTSCR5(band_idx
));
2218 mib
->rts_cnt
+= cnt
;
2220 /* rts retry count */
2221 cnt
= mt76_rr(dev
, MT_MIB_BTSCR6(band_idx
));
2222 mib
->rts_retries_cnt
+= cnt
;
2225 cnt
= mt76_rr(dev
, MT_MIB_BTSCR0(band_idx
));
2226 mib
->ba_miss_cnt
+= cnt
;
2228 /* ack fail count */
2229 cnt
= mt76_rr(dev
, MT_MIB_BFTFCR(band_idx
));
2230 mib
->ack_fail_cnt
+= cnt
;
2232 for (i
= 0; i
< 16; i
++) {
2233 cnt
= mt76_rr(dev
, MT_TX_AGG_CNT(band_idx
, i
));
2234 phy
->mt76
->aggr_stats
[i
] += cnt
;
2238 void mt7996_mac_sta_rc_work(struct work_struct
*work
)
2240 struct mt7996_dev
*dev
= container_of(work
, struct mt7996_dev
, rc_work
);
2241 struct ieee80211_sta
*sta
;
2242 struct ieee80211_vif
*vif
;
2243 struct mt7996_sta
*msta
;
2247 spin_lock_bh(&dev
->mt76
.sta_poll_lock
);
2248 list_splice_init(&dev
->sta_rc_list
, &list
);
2250 while (!list_empty(&list
)) {
2251 msta
= list_first_entry(&list
, struct mt7996_sta
, rc_list
);
2252 list_del_init(&msta
->rc_list
);
2253 changed
= msta
->changed
;
2255 spin_unlock_bh(&dev
->mt76
.sta_poll_lock
);
2257 sta
= container_of((void *)msta
, struct ieee80211_sta
, drv_priv
);
2258 vif
= container_of((void *)msta
->vif
, struct ieee80211_vif
, drv_priv
);
2260 if (changed
& (IEEE80211_RC_SUPP_RATES_CHANGED
|
2261 IEEE80211_RC_NSS_CHANGED
|
2262 IEEE80211_RC_BW_CHANGED
))
2263 mt7996_mcu_add_rate_ctrl(dev
, vif
, sta
, true);
2265 if (changed
& IEEE80211_RC_SMPS_CHANGED
)
2266 mt7996_mcu_set_fixed_field(dev
, vif
, sta
, NULL
,
2267 RATE_PARAM_MMPS_UPDATE
);
2269 spin_lock_bh(&dev
->mt76
.sta_poll_lock
);
2272 spin_unlock_bh(&dev
->mt76
.sta_poll_lock
);
2275 void mt7996_mac_work(struct work_struct
*work
)
2277 struct mt7996_phy
*phy
;
2278 struct mt76_phy
*mphy
;
2280 mphy
= (struct mt76_phy
*)container_of(work
, struct mt76_phy
,
2284 mutex_lock(&mphy
->dev
->mutex
);
2286 mt76_update_survey(mphy
);
2287 if (++mphy
->mac_work_count
== 5) {
2288 mphy
->mac_work_count
= 0;
2290 mt7996_mac_update_stats(phy
);
2292 mt7996_mcu_get_all_sta_info(phy
, UNI_ALL_STA_TXRX_RATE
);
2293 if (mtk_wed_device_active(&phy
->dev
->mt76
.mmio
.wed
)) {
2294 mt7996_mcu_get_all_sta_info(phy
, UNI_ALL_STA_TXRX_ADM_STAT
);
2295 mt7996_mcu_get_all_sta_info(phy
, UNI_ALL_STA_TXRX_MSDU_COUNT
);
2299 mutex_unlock(&mphy
->dev
->mutex
);
2301 mt76_tx_status_check(mphy
->dev
, false);
2303 ieee80211_queue_delayed_work(mphy
->hw
, &mphy
->mac_work
,
2304 MT7996_WATCHDOG_TIME
);
2307 static void mt7996_dfs_stop_radar_detector(struct mt7996_phy
*phy
)
2309 struct mt7996_dev
*dev
= phy
->dev
;
2311 if (phy
->rdd_state
& BIT(0))
2312 mt7996_mcu_rdd_cmd(dev
, RDD_STOP
, 0,
2314 if (phy
->rdd_state
& BIT(1))
2315 mt7996_mcu_rdd_cmd(dev
, RDD_STOP
, 1,
2319 static int mt7996_dfs_start_rdd(struct mt7996_dev
*dev
, int chain
)
2323 switch (dev
->mt76
.region
) {
2324 case NL80211_DFS_ETSI
:
2327 case NL80211_DFS_JP
:
2330 case NL80211_DFS_FCC
:
2336 err
= mt7996_mcu_rdd_cmd(dev
, RDD_START
, chain
,
2337 MT_RX_SEL0
, region
);
2341 return mt7996_mcu_rdd_cmd(dev
, RDD_DET_MODE
, chain
,
2345 static int mt7996_dfs_start_radar_detector(struct mt7996_phy
*phy
)
2347 struct cfg80211_chan_def
*chandef
= &phy
->mt76
->chandef
;
2348 struct mt7996_dev
*dev
= phy
->dev
;
2349 u8 band_idx
= phy
->mt76
->band_idx
;
2353 err
= mt7996_mcu_rdd_cmd(dev
, RDD_CAC_START
, band_idx
,
2358 err
= mt7996_dfs_start_rdd(dev
, band_idx
);
2362 phy
->rdd_state
|= BIT(band_idx
);
2364 if (chandef
->width
== NL80211_CHAN_WIDTH_160
||
2365 chandef
->width
== NL80211_CHAN_WIDTH_80P80
) {
2366 err
= mt7996_dfs_start_rdd(dev
, 1);
2370 phy
->rdd_state
|= BIT(1);
2377 mt7996_dfs_init_radar_specs(struct mt7996_phy
*phy
)
2379 const struct mt7996_dfs_radar_spec
*radar_specs
;
2380 struct mt7996_dev
*dev
= phy
->dev
;
2383 switch (dev
->mt76
.region
) {
2384 case NL80211_DFS_FCC
:
2385 radar_specs
= &fcc_radar_specs
;
2386 err
= mt7996_mcu_set_fcc5_lpn(dev
, 8);
2390 case NL80211_DFS_ETSI
:
2391 radar_specs
= &etsi_radar_specs
;
2393 case NL80211_DFS_JP
:
2394 radar_specs
= &jp_radar_specs
;
2400 for (i
= 0; i
< ARRAY_SIZE(radar_specs
->radar_pattern
); i
++) {
2401 err
= mt7996_mcu_set_radar_th(dev
, i
,
2402 &radar_specs
->radar_pattern
[i
]);
2407 return mt7996_mcu_set_pulse_th(dev
, &radar_specs
->pulse_th
);
2410 int mt7996_dfs_init_radar_detector(struct mt7996_phy
*phy
)
2412 struct mt7996_dev
*dev
= phy
->dev
;
2413 enum mt76_dfs_state dfs_state
, prev_state
;
2416 prev_state
= phy
->mt76
->dfs_state
;
2417 dfs_state
= mt76_phy_dfs_state(phy
->mt76
);
2419 if (prev_state
== dfs_state
)
2422 if (prev_state
== MT_DFS_STATE_UNKNOWN
)
2423 mt7996_dfs_stop_radar_detector(phy
);
2425 if (dfs_state
== MT_DFS_STATE_DISABLED
)
2428 if (prev_state
<= MT_DFS_STATE_DISABLED
) {
2429 err
= mt7996_dfs_init_radar_specs(phy
);
2433 err
= mt7996_dfs_start_radar_detector(phy
);
2437 phy
->mt76
->dfs_state
= MT_DFS_STATE_CAC
;
2440 if (dfs_state
== MT_DFS_STATE_CAC
)
2443 err
= mt7996_mcu_rdd_cmd(dev
, RDD_CAC_END
,
2444 phy
->mt76
->band_idx
, MT_RX_SEL0
, 0);
2446 phy
->mt76
->dfs_state
= MT_DFS_STATE_UNKNOWN
;
2450 phy
->mt76
->dfs_state
= MT_DFS_STATE_ACTIVE
;
2454 err
= mt7996_mcu_rdd_cmd(dev
, RDD_NORMAL_START
,
2455 phy
->mt76
->band_idx
, MT_RX_SEL0
, 0);
2459 mt7996_dfs_stop_radar_detector(phy
);
2460 phy
->mt76
->dfs_state
= MT_DFS_STATE_DISABLED
;
2466 mt7996_mac_twt_duration_align(int duration
)
2468 return duration
<< 8;
2472 mt7996_mac_twt_sched_list_add(struct mt7996_dev
*dev
,
2473 struct mt7996_twt_flow
*flow
)
2475 struct mt7996_twt_flow
*iter
, *iter_next
;
2476 u32 duration
= flow
->duration
<< 8;
2479 iter
= list_first_entry_or_null(&dev
->twt_list
,
2480 struct mt7996_twt_flow
, list
);
2481 if (!iter
|| !iter
->sched
|| iter
->start_tsf
> duration
) {
2482 /* add flow as first entry in the list */
2483 list_add(&flow
->list
, &dev
->twt_list
);
2487 list_for_each_entry_safe(iter
, iter_next
, &dev
->twt_list
, list
) {
2488 start_tsf
= iter
->start_tsf
+
2489 mt7996_mac_twt_duration_align(iter
->duration
);
2490 if (list_is_last(&iter
->list
, &dev
->twt_list
))
2493 if (!iter_next
->sched
||
2494 iter_next
->start_tsf
> start_tsf
+ duration
) {
2495 list_add(&flow
->list
, &iter
->list
);
2500 /* add flow as last entry in the list */
2501 list_add_tail(&flow
->list
, &dev
->twt_list
);
2506 static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup
*twt
)
2508 struct ieee80211_twt_params
*twt_agrt
;
2509 u64 interval
, duration
;
2513 /* only individual agreement supported */
2514 if (twt
->control
& IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST
)
2517 /* only 256us unit supported */
2518 if (twt
->control
& IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT
)
2521 twt_agrt
= (struct ieee80211_twt_params
*)twt
->params
;
2523 /* explicit agreement not supported */
2524 if (!(twt_agrt
->req_type
& cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT
)))
2527 exp
= FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP
,
2528 le16_to_cpu(twt_agrt
->req_type
));
2529 mantissa
= le16_to_cpu(twt_agrt
->mantissa
);
2530 duration
= twt_agrt
->min_twt_dur
<< 8;
2532 interval
= (u64
)mantissa
<< exp
;
2533 if (interval
< duration
)
2540 mt7996_mac_twt_param_equal(struct mt7996_sta
*msta
,
2541 struct ieee80211_twt_params
*twt_agrt
)
2543 u16 type
= le16_to_cpu(twt_agrt
->req_type
);
2547 exp
= FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP
, type
);
2548 for (i
= 0; i
< MT7996_MAX_STA_TWT_AGRT
; i
++) {
2549 struct mt7996_twt_flow
*f
;
2551 if (!(msta
->twt
.flowid_mask
& BIT(i
)))
2554 f
= &msta
->twt
.flow
[i
];
2555 if (f
->duration
== twt_agrt
->min_twt_dur
&&
2556 f
->mantissa
== twt_agrt
->mantissa
&&
2558 f
->protection
== !!(type
& IEEE80211_TWT_REQTYPE_PROTECTION
) &&
2559 f
->flowtype
== !!(type
& IEEE80211_TWT_REQTYPE_FLOWTYPE
) &&
2560 f
->trigger
== !!(type
& IEEE80211_TWT_REQTYPE_TRIGGER
))
2567 void mt7996_mac_add_twt_setup(struct ieee80211_hw
*hw
,
2568 struct ieee80211_sta
*sta
,
2569 struct ieee80211_twt_setup
*twt
)
2571 enum ieee80211_twt_setup_cmd setup_cmd
= TWT_SETUP_CMD_REJECT
;
2572 struct mt7996_sta
*msta
= (struct mt7996_sta
*)sta
->drv_priv
;
2573 struct ieee80211_twt_params
*twt_agrt
= (void *)twt
->params
;
2574 u16 req_type
= le16_to_cpu(twt_agrt
->req_type
);
2575 enum ieee80211_twt_setup_cmd sta_setup_cmd
;
2576 struct mt7996_dev
*dev
= mt7996_hw_dev(hw
);
2577 struct mt7996_twt_flow
*flow
;
2578 u8 flowid
, table_id
, exp
;
2580 if (mt7996_mac_check_twt_req(twt
))
2583 mutex_lock(&dev
->mt76
.mutex
);
2585 if (dev
->twt
.n_agrt
== MT7996_MAX_TWT_AGRT
)
2588 if (hweight8(msta
->twt
.flowid_mask
) == ARRAY_SIZE(msta
->twt
.flow
))
2591 if (twt_agrt
->min_twt_dur
< MT7996_MIN_TWT_DUR
) {
2592 setup_cmd
= TWT_SETUP_CMD_DICTATE
;
2593 twt_agrt
->min_twt_dur
= MT7996_MIN_TWT_DUR
;
2597 if (mt7996_mac_twt_param_equal(msta
, twt_agrt
))
2600 flowid
= ffs(~msta
->twt
.flowid_mask
) - 1;
2601 twt_agrt
->req_type
&= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID
);
2602 twt_agrt
->req_type
|= le16_encode_bits(flowid
,
2603 IEEE80211_TWT_REQTYPE_FLOWID
);
2605 table_id
= ffs(~dev
->twt
.table_mask
) - 1;
2606 exp
= FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP
, req_type
);
2607 sta_setup_cmd
= FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD
, req_type
);
2609 flow
= &msta
->twt
.flow
[flowid
];
2610 memset(flow
, 0, sizeof(*flow
));
2611 INIT_LIST_HEAD(&flow
->list
);
2612 flow
->wcid
= msta
->wcid
.idx
;
2613 flow
->table_id
= table_id
;
2615 flow
->duration
= twt_agrt
->min_twt_dur
;
2616 flow
->mantissa
= twt_agrt
->mantissa
;
2618 flow
->protection
= !!(req_type
& IEEE80211_TWT_REQTYPE_PROTECTION
);
2619 flow
->flowtype
= !!(req_type
& IEEE80211_TWT_REQTYPE_FLOWTYPE
);
2620 flow
->trigger
= !!(req_type
& IEEE80211_TWT_REQTYPE_TRIGGER
);
2622 if (sta_setup_cmd
== TWT_SETUP_CMD_REQUEST
||
2623 sta_setup_cmd
== TWT_SETUP_CMD_SUGGEST
) {
2624 u64 interval
= (u64
)le16_to_cpu(twt_agrt
->mantissa
) << exp
;
2625 u64 flow_tsf
, curr_tsf
;
2629 flow
->start_tsf
= mt7996_mac_twt_sched_list_add(dev
, flow
);
2630 curr_tsf
= __mt7996_get_tsf(hw
, msta
->vif
);
2631 div_u64_rem(curr_tsf
- flow
->start_tsf
, interval
, &rem
);
2632 flow_tsf
= curr_tsf
+ interval
- rem
;
2633 twt_agrt
->twt
= cpu_to_le64(flow_tsf
);
2635 list_add_tail(&flow
->list
, &dev
->twt_list
);
2637 flow
->tsf
= le64_to_cpu(twt_agrt
->twt
);
2639 if (mt7996_mcu_twt_agrt_update(dev
, msta
->vif
, flow
, MCU_TWT_AGRT_ADD
))
2642 setup_cmd
= TWT_SETUP_CMD_ACCEPT
;
2643 dev
->twt
.table_mask
|= BIT(table_id
);
2644 msta
->twt
.flowid_mask
|= BIT(flowid
);
2648 mutex_unlock(&dev
->mt76
.mutex
);
2650 twt_agrt
->req_type
&= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD
);
2651 twt_agrt
->req_type
|=
2652 le16_encode_bits(setup_cmd
, IEEE80211_TWT_REQTYPE_SETUP_CMD
);
2653 twt
->control
= twt
->control
& IEEE80211_TWT_CONTROL_RX_DISABLED
;
2656 void mt7996_mac_twt_teardown_flow(struct mt7996_dev
*dev
,
2657 struct mt7996_sta
*msta
,
2660 struct mt7996_twt_flow
*flow
;
2662 lockdep_assert_held(&dev
->mt76
.mutex
);
2664 if (flowid
>= ARRAY_SIZE(msta
->twt
.flow
))
2667 if (!(msta
->twt
.flowid_mask
& BIT(flowid
)))
2670 flow
= &msta
->twt
.flow
[flowid
];
2671 if (mt7996_mcu_twt_agrt_update(dev
, msta
->vif
, flow
,
2672 MCU_TWT_AGRT_DELETE
))
2675 list_del_init(&flow
->list
);
2676 msta
->twt
.flowid_mask
&= ~BIT(flowid
);
2677 dev
->twt
.table_mask
&= ~BIT(flow
->table_id
);