1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc. */
4 #include <linux/etherdevice.h>
5 #include <linux/timekeeping.h>
12 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2)
14 static const struct mt7915_dfs_radar_spec etsi_radar_specs
= {
15 .pulse_th
= { 110, -10, -80, 40, 5200, 128, 5200 },
17 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 },
18 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 },
19 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 },
20 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 },
21 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
22 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
23 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 },
24 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 },
28 static const struct mt7915_dfs_radar_spec fcc_radar_specs
= {
29 .pulse_th
= { 110, -10, -80, 40, 5200, 128, 5200 },
31 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
32 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
33 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
34 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
35 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
39 static const struct mt7915_dfs_radar_spec jp_radar_specs
= {
40 .pulse_th
= { 110, -10, -80, 40, 5200, 128, 5200 },
42 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
43 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
44 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
45 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
46 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
47 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 },
48 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 },
49 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 },
53 static struct mt76_wcid
*mt7915_rx_get_wcid(struct mt7915_dev
*dev
,
54 u16 idx
, bool unicast
)
56 struct mt7915_sta
*sta
;
57 struct mt76_wcid
*wcid
;
59 if (idx
>= ARRAY_SIZE(dev
->mt76
.wcid
))
62 wcid
= rcu_dereference(dev
->mt76
.wcid
[idx
]);
69 sta
= container_of(wcid
, struct mt7915_sta
, wcid
);
73 return &sta
->vif
->sta
.wcid
;
76 bool mt7915_mac_wtbl_update(struct mt7915_dev
*dev
, int idx
, u32 mask
)
78 mt76_rmw(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_WLAN_IDX
,
79 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX
, idx
) | mask
);
81 return mt76_poll(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_BUSY
,
85 u32
mt7915_mac_wtbl_lmac_addr(struct mt7915_dev
*dev
, u16 wcid
, u8 dw
)
87 mt76_wr(dev
, MT_WTBLON_TOP_WDUCR
,
88 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP
, (wcid
>> 7)));
90 return MT_WTBL_LMAC_OFFS(wcid
, dw
);
93 static void mt7915_mac_sta_poll(struct mt7915_dev
*dev
)
95 static const u8 ac_to_tid
[] = {
96 [IEEE80211_AC_BE
] = 0,
97 [IEEE80211_AC_BK
] = 1,
98 [IEEE80211_AC_VI
] = 4,
101 struct ieee80211_sta
*sta
;
102 struct mt7915_sta
*msta
;
103 struct rate_info
*rate
;
104 u32 tx_time
[IEEE80211_NUM_ACS
], rx_time
[IEEE80211_NUM_ACS
];
105 LIST_HEAD(sta_poll_list
);
108 spin_lock_bh(&dev
->mt76
.sta_poll_lock
);
109 list_splice_init(&dev
->mt76
.sta_poll_list
, &sta_poll_list
);
110 spin_unlock_bh(&dev
->mt76
.sta_poll_lock
);
121 spin_lock_bh(&dev
->mt76
.sta_poll_lock
);
122 if (list_empty(&sta_poll_list
)) {
123 spin_unlock_bh(&dev
->mt76
.sta_poll_lock
);
126 msta
= list_first_entry(&sta_poll_list
,
127 struct mt7915_sta
, wcid
.poll_list
);
128 list_del_init(&msta
->wcid
.poll_list
);
129 spin_unlock_bh(&dev
->mt76
.sta_poll_lock
);
131 idx
= msta
->wcid
.idx
;
133 /* refresh peer's airtime reporting */
134 addr
= mt7915_mac_wtbl_lmac_addr(dev
, idx
, 20);
136 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
137 u32 tx_last
= msta
->airtime_ac
[i
];
138 u32 rx_last
= msta
->airtime_ac
[i
+ 4];
140 msta
->airtime_ac
[i
] = mt76_rr(dev
, addr
);
141 msta
->airtime_ac
[i
+ 4] = mt76_rr(dev
, addr
+ 4);
143 if (msta
->airtime_ac
[i
] <= tx_last
)
146 tx_time
[i
] = msta
->airtime_ac
[i
] - tx_last
;
148 if (msta
->airtime_ac
[i
+ 4] <= rx_last
)
151 rx_time
[i
] = msta
->airtime_ac
[i
+ 4] - rx_last
;
153 if ((tx_last
| rx_last
) & BIT(30))
160 mt7915_mac_wtbl_update(dev
, idx
,
161 MT_WTBL_UPDATE_ADM_COUNT_CLEAR
);
162 memset(msta
->airtime_ac
, 0, sizeof(msta
->airtime_ac
));
168 sta
= container_of((void *)msta
, struct ieee80211_sta
,
170 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
171 u8 queue
= mt76_connac_lmac_mapping(i
);
172 u32 tx_cur
= tx_time
[queue
];
173 u32 rx_cur
= rx_time
[queue
];
174 u8 tid
= ac_to_tid
[i
];
176 if (!tx_cur
&& !rx_cur
)
179 ieee80211_sta_register_airtime(sta
, tid
, tx_cur
,
184 * We don't support reading GI info from txs packets.
185 * For accurate tx status reporting and AQL improvement,
186 * we need to make sure that flags match so polling GI
187 * from per-sta counters directly.
189 rate
= &msta
->wcid
.rate
;
190 addr
= mt7915_mac_wtbl_lmac_addr(dev
, idx
, 7);
191 val
= mt76_rr(dev
, addr
);
194 case RATE_INFO_BW_160
:
195 bw
= IEEE80211_STA_RX_BW_160
;
197 case RATE_INFO_BW_80
:
198 bw
= IEEE80211_STA_RX_BW_80
;
200 case RATE_INFO_BW_40
:
201 bw
= IEEE80211_STA_RX_BW_40
;
204 bw
= IEEE80211_STA_RX_BW_20
;
208 if (rate
->flags
& RATE_INFO_FLAGS_HE_MCS
) {
209 u8 offs
= 24 + 2 * bw
;
211 rate
->he_gi
= (val
& (0x3 << offs
)) >> offs
;
212 } else if (rate
->flags
&
213 (RATE_INFO_FLAGS_VHT_MCS
| RATE_INFO_FLAGS_MCS
)) {
214 if (val
& BIT(12 + bw
))
215 rate
->flags
|= RATE_INFO_FLAGS_SHORT_GI
;
217 rate
->flags
&= ~RATE_INFO_FLAGS_SHORT_GI
;
220 /* get signal strength of resp frames (CTS/BA/ACK) */
221 addr
= mt7915_mac_wtbl_lmac_addr(dev
, idx
, 30);
222 val
= mt76_rr(dev
, addr
);
224 rssi
[0] = to_rssi(GENMASK(7, 0), val
);
225 rssi
[1] = to_rssi(GENMASK(15, 8), val
);
226 rssi
[2] = to_rssi(GENMASK(23, 16), val
);
227 rssi
[3] = to_rssi(GENMASK(31, 14), val
);
230 mt76_rx_signal(msta
->vif
->phy
->mt76
->antenna_mask
, rssi
);
232 ewma_avg_signal_add(&msta
->avg_ack_signal
, -msta
->ack_signal
);
238 void mt7915_mac_enable_rtscts(struct mt7915_dev
*dev
,
239 struct ieee80211_vif
*vif
, bool enable
)
241 struct mt7915_vif
*mvif
= (struct mt7915_vif
*)vif
->drv_priv
;
244 addr
= mt7915_mac_wtbl_lmac_addr(dev
, mvif
->sta
.wcid
.idx
, 5);
246 mt76_set(dev
, addr
, BIT(5));
248 mt76_clear(dev
, addr
, BIT(5));
252 mt7915_wed_check_ppe(struct mt7915_dev
*dev
, struct mt76_queue
*q
,
253 struct mt7915_sta
*msta
, struct sk_buff
*skb
,
256 struct ieee80211_vif
*vif
;
257 struct wireless_dev
*wdev
;
259 if (!msta
|| !msta
->vif
)
262 if (!mt76_queue_is_wed_rx(q
))
265 if (!(info
& MT_DMA_INFO_PPE_VLD
))
268 vif
= container_of((void *)msta
->vif
, struct ieee80211_vif
,
270 wdev
= ieee80211_vif_to_wdev(vif
);
271 skb
->dev
= wdev
->netdev
;
273 mtk_wed_device_ppe_check(&dev
->mt76
.mmio
.wed
, skb
,
274 FIELD_GET(MT_DMA_PPE_CPU_REASON
, info
),
275 FIELD_GET(MT_DMA_PPE_ENTRY
, info
));
279 mt7915_mac_fill_rx(struct mt7915_dev
*dev
, struct sk_buff
*skb
,
280 enum mt76_rxq_id q
, u32
*info
)
282 struct mt76_rx_status
*status
= (struct mt76_rx_status
*)skb
->cb
;
283 struct mt76_phy
*mphy
= &dev
->mt76
.phy
;
284 struct mt7915_phy
*phy
= &dev
->phy
;
285 struct ieee80211_supported_band
*sband
;
286 __le32
*rxd
= (__le32
*)skb
->data
;
288 u32 rxd0
= le32_to_cpu(rxd
[0]);
289 u32 rxd1
= le32_to_cpu(rxd
[1]);
290 u32 rxd2
= le32_to_cpu(rxd
[2]);
291 u32 rxd3
= le32_to_cpu(rxd
[3]);
292 u32 rxd4
= le32_to_cpu(rxd
[4]);
293 u32 csum_mask
= MT_RXD0_NORMAL_IP_SUM
| MT_RXD0_NORMAL_UDP_TCP_SUM
;
294 bool unicast
, insert_ccmp_hdr
= false;
295 u8 remove_pad
, amsdu_info
;
296 u8 mode
= 0, qos_ctl
= 0;
297 struct mt7915_sta
*msta
= NULL
;
298 u32 csum_status
= *(u32
*)skb
->cb
;
305 memset(status
, 0, sizeof(*status
));
307 if ((rxd1
& MT_RXD1_NORMAL_BAND_IDX
) && !phy
->mt76
->band_idx
) {
308 mphy
= dev
->mt76
.phys
[MT_BAND1
];
316 if (!test_bit(MT76_STATE_RUNNING
, &mphy
->state
))
319 if (rxd2
& MT_RXD2_NORMAL_AMSDU_ERR
)
322 hdr_trans
= rxd2
& MT_RXD2_NORMAL_HDR_TRANS
;
323 if (hdr_trans
&& (rxd1
& MT_RXD1_NORMAL_CM
))
326 /* ICV error or CCMP/BIP/WPI MIC error */
327 if (rxd1
& MT_RXD1_NORMAL_ICV_ERR
)
328 status
->flag
|= RX_FLAG_ONLY_MONITOR
;
330 unicast
= FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE
, rxd3
) == MT_RXD3_NORMAL_U2M
;
331 idx
= FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX
, rxd1
);
332 status
->wcid
= mt7915_rx_get_wcid(dev
, idx
, unicast
);
335 msta
= container_of(status
->wcid
, struct mt7915_sta
, wcid
);
336 spin_lock_bh(&dev
->mt76
.sta_poll_lock
);
337 if (list_empty(&msta
->wcid
.poll_list
))
338 list_add_tail(&msta
->wcid
.poll_list
,
339 &dev
->mt76
.sta_poll_list
);
340 spin_unlock_bh(&dev
->mt76
.sta_poll_lock
);
343 status
->freq
= mphy
->chandef
.chan
->center_freq
;
344 status
->band
= mphy
->chandef
.chan
->band
;
345 if (status
->band
== NL80211_BAND_5GHZ
)
346 sband
= &mphy
->sband_5g
.sband
;
347 else if (status
->band
== NL80211_BAND_6GHZ
)
348 sband
= &mphy
->sband_6g
.sband
;
350 sband
= &mphy
->sband_2g
.sband
;
352 if (!sband
->channels
)
355 if ((rxd0
& csum_mask
) == csum_mask
&&
356 !(csum_status
& (BIT(0) | BIT(2) | BIT(3))))
357 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
359 if (rxd1
& MT_RXD1_NORMAL_FCS_ERR
)
360 status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
362 if (rxd1
& MT_RXD1_NORMAL_TKIP_MIC_ERR
)
363 status
->flag
|= RX_FLAG_MMIC_ERROR
;
365 if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE
, rxd1
) != 0 &&
366 !(rxd1
& (MT_RXD1_NORMAL_CLM
| MT_RXD1_NORMAL_CM
))) {
367 status
->flag
|= RX_FLAG_DECRYPTED
;
368 status
->flag
|= RX_FLAG_IV_STRIPPED
;
369 status
->flag
|= RX_FLAG_MMIC_STRIPPED
| RX_FLAG_MIC_STRIPPED
;
372 remove_pad
= FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET
, rxd2
);
374 if (rxd2
& MT_RXD2_NORMAL_MAX_LEN_ERROR
)
378 if (rxd1
& MT_RXD1_NORMAL_GROUP_4
) {
379 u32 v0
= le32_to_cpu(rxd
[0]);
380 u32 v2
= le32_to_cpu(rxd
[2]);
382 fc
= cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL
, v0
));
383 qos_ctl
= FIELD_GET(MT_RXD8_QOS_CTL
, v2
);
384 seq_ctrl
= FIELD_GET(MT_RXD8_SEQ_CTRL
, v2
);
387 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
391 if (rxd1
& MT_RXD1_NORMAL_GROUP_1
) {
392 u8
*data
= (u8
*)rxd
;
394 if (status
->flag
& RX_FLAG_DECRYPTED
) {
395 switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE
, rxd1
)) {
396 case MT_CIPHER_AES_CCMP
:
397 case MT_CIPHER_CCMP_CCX
:
398 case MT_CIPHER_CCMP_256
:
400 FIELD_GET(MT_RXD2_NORMAL_FRAG
, rxd2
);
403 case MT_CIPHER_TKIP_NO_MIC
:
405 case MT_CIPHER_GCMP_256
:
406 status
->iv
[0] = data
[5];
407 status
->iv
[1] = data
[4];
408 status
->iv
[2] = data
[3];
409 status
->iv
[3] = data
[2];
410 status
->iv
[4] = data
[1];
411 status
->iv
[5] = data
[0];
418 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
422 if (rxd1
& MT_RXD1_NORMAL_GROUP_2
) {
423 status
->timestamp
= le32_to_cpu(rxd
[0]);
424 status
->flag
|= RX_FLAG_MACTIME_START
;
426 if (!(rxd2
& MT_RXD2_NORMAL_NON_AMPDU
)) {
427 status
->flag
|= RX_FLAG_AMPDU_DETAILS
;
429 /* all subframes of an A-MPDU have the same timestamp */
430 if (phy
->rx_ampdu_ts
!= status
->timestamp
) {
431 if (!++phy
->ampdu_ref
)
434 phy
->rx_ampdu_ts
= status
->timestamp
;
436 status
->ampdu_ref
= phy
->ampdu_ref
;
440 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
444 /* RXD Group 3 - P-RXV */
445 if (rxd1
& MT_RXD1_NORMAL_GROUP_3
) {
451 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
454 v0
= le32_to_cpu(rxv
[0]);
455 v1
= le32_to_cpu(rxv
[1]);
457 if (v0
& MT_PRXV_HT_AD_CODE
)
458 status
->enc_flags
|= RX_ENC_FLAG_LDPC
;
460 status
->chains
= mphy
->antenna_mask
;
461 status
->chain_signal
[0] = to_rssi(MT_PRXV_RCPI0
, v1
);
462 status
->chain_signal
[1] = to_rssi(MT_PRXV_RCPI1
, v1
);
463 status
->chain_signal
[2] = to_rssi(MT_PRXV_RCPI2
, v1
);
464 status
->chain_signal
[3] = to_rssi(MT_PRXV_RCPI3
, v1
);
466 /* RXD Group 5 - C-RXV */
467 if (rxd1
& MT_RXD1_NORMAL_GROUP_5
) {
469 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
473 if (!is_mt7915(&dev
->mt76
) || (rxd1
& MT_RXD1_NORMAL_GROUP_5
)) {
474 ret
= mt76_connac2_mac_fill_rx_rate(&dev
->mt76
, status
,
481 amsdu_info
= FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT
, rxd4
);
482 status
->amsdu
= !!amsdu_info
;
484 status
->first_amsdu
= amsdu_info
== MT_RXD4_FIRST_AMSDU_FRAME
;
485 status
->last_amsdu
= amsdu_info
== MT_RXD4_LAST_AMSDU_FRAME
;
488 hdr_gap
= (u8
*)rxd
- skb
->data
+ 2 * remove_pad
;
489 if (hdr_trans
&& ieee80211_has_morefrags(fc
)) {
490 struct ieee80211_vif
*vif
;
493 if (!msta
|| !msta
->vif
)
496 vif
= container_of((void *)msta
->vif
, struct ieee80211_vif
,
498 err
= mt76_connac2_reverse_frag0_hdr_trans(vif
, skb
, hdr_gap
);
506 skb_pull(skb
, hdr_gap
);
507 if (!hdr_trans
&& status
->amsdu
) {
508 pad_start
= ieee80211_get_hdrlen_from_skb(skb
);
509 } else if (hdr_trans
&& (rxd2
& MT_RXD2_NORMAL_HDR_TRANS_ERROR
)) {
511 * When header translation failure is indicated,
512 * the hardware will insert an extra 2-byte field
513 * containing the data length after the protocol
514 * type field. This happens either when the LLC-SNAP
515 * pattern did not match, or if a VLAN header was
519 if (get_unaligned_be16(skb
->data
+ pad_start
) == ETH_P_8021Q
)
526 memmove(skb
->data
+ 2, skb
->data
, pad_start
);
532 struct ieee80211_hdr
*hdr
;
534 if (insert_ccmp_hdr
) {
535 u8 key_id
= FIELD_GET(MT_RXD1_NORMAL_KEY_ID
, rxd1
);
537 mt76_insert_ccmp_hdr(skb
, key_id
);
540 hdr
= mt76_skb_get_hdr(skb
);
541 fc
= hdr
->frame_control
;
542 if (ieee80211_is_data_qos(fc
)) {
543 seq_ctrl
= le16_to_cpu(hdr
->seq_ctrl
);
544 qos_ctl
= *ieee80211_get_qos_ctl(hdr
);
547 status
->flag
|= RX_FLAG_8023
;
548 mt7915_wed_check_ppe(dev
, &dev
->mt76
.q_rx
[q
], msta
, skb
,
552 if (rxv
&& mode
>= MT_PHY_TYPE_HE_SU
&& !(status
->flag
& RX_FLAG_8023
))
553 mt76_connac2_mac_decode_he_radiotap(&dev
->mt76
, skb
, rxv
, mode
);
555 if (!status
->wcid
|| !ieee80211_is_data_qos(fc
))
558 status
->aggr
= unicast
&&
559 !ieee80211_is_qos_nullfunc(fc
);
560 status
->qos_ctl
= qos_ctl
;
561 status
->seqno
= IEEE80211_SEQ_TO_SN(seq_ctrl
);
567 mt7915_mac_fill_rx_vector(struct mt7915_dev
*dev
, struct sk_buff
*skb
)
569 #ifdef CONFIG_NL80211_TESTMODE
570 struct mt7915_phy
*phy
= &dev
->phy
;
571 __le32
*rxd
= (__le32
*)skb
->data
;
572 __le32
*rxv_hdr
= rxd
+ 2;
573 __le32
*rxv
= rxd
+ 4;
574 u32 rcpi
, ib_rssi
, wb_rssi
, v20
, v21
;
580 band_idx
= le32_get_bits(rxv_hdr
[1], MT_RXV_HDR_BAND_IDX
);
581 if (band_idx
&& !phy
->mt76
->band_idx
) {
582 phy
= mt7915_ext_phy(dev
);
587 rcpi
= le32_to_cpu(rxv
[6]);
588 ib_rssi
= le32_to_cpu(rxv
[7]);
589 wb_rssi
= le32_to_cpu(rxv
[8]) >> 5;
591 for (i
= 0; i
< 4; i
++, rcpi
>>= 8, ib_rssi
>>= 8, wb_rssi
>>= 9) {
593 wb_rssi
= le32_to_cpu(rxv
[9]);
595 phy
->test
.last_rcpi
[i
] = rcpi
& 0xff;
596 phy
->test
.last_ib_rssi
[i
] = ib_rssi
& 0xff;
597 phy
->test
.last_wb_rssi
[i
] = wb_rssi
& 0xff;
600 v20
= le32_to_cpu(rxv
[20]);
601 v21
= le32_to_cpu(rxv
[21]);
603 foe
= FIELD_GET(MT_CRXV_FOE_LO
, v20
) |
604 (FIELD_GET(MT_CRXV_FOE_HI
, v21
) << MT_CRXV_FOE_SHIFT
);
606 snr
= FIELD_GET(MT_CRXV_SNR
, v20
) - 16;
608 phy
->test
.last_freq_offset
= foe
;
609 phy
->test
.last_snr
= snr
;
616 mt7915_mac_write_txwi_tm(struct mt7915_phy
*phy
, __le32
*txwi
,
619 #ifdef CONFIG_NL80211_TESTMODE
620 struct mt76_testmode_data
*td
= &phy
->mt76
->test
;
621 const struct ieee80211_rate
*r
;
622 u8 bw
, mode
, nss
= td
->tx_rate_nss
;
623 u8 rate_idx
= td
->tx_rate_idx
;
629 if (skb
!= phy
->mt76
->test
.tx_skb
)
632 switch (td
->tx_rate_mode
) {
633 case MT76_TM_TX_MODE_HT
:
634 nss
= 1 + (rate_idx
>> 3);
635 mode
= MT_PHY_TYPE_HT
;
637 case MT76_TM_TX_MODE_VHT
:
638 mode
= MT_PHY_TYPE_VHT
;
640 case MT76_TM_TX_MODE_HE_SU
:
641 mode
= MT_PHY_TYPE_HE_SU
;
643 case MT76_TM_TX_MODE_HE_EXT_SU
:
644 mode
= MT_PHY_TYPE_HE_EXT_SU
;
646 case MT76_TM_TX_MODE_HE_TB
:
647 mode
= MT_PHY_TYPE_HE_TB
;
649 case MT76_TM_TX_MODE_HE_MU
:
650 mode
= MT_PHY_TYPE_HE_MU
;
652 case MT76_TM_TX_MODE_CCK
:
655 case MT76_TM_TX_MODE_OFDM
:
656 band
= phy
->mt76
->chandef
.chan
->band
;
657 if (band
== NL80211_BAND_2GHZ
&& !cck
)
660 r
= &phy
->mt76
->hw
->wiphy
->bands
[band
]->bitrates
[rate_idx
];
661 val
= cck
? r
->hw_value_short
: r
->hw_value
;
664 rate_idx
= val
& 0xff;
667 mode
= MT_PHY_TYPE_OFDM
;
671 switch (phy
->mt76
->chandef
.width
) {
672 case NL80211_CHAN_WIDTH_40
:
675 case NL80211_CHAN_WIDTH_80
:
678 case NL80211_CHAN_WIDTH_80P80
:
679 case NL80211_CHAN_WIDTH_160
:
687 if (td
->tx_rate_stbc
&& nss
== 1) {
689 rateval
|= MT_TX_RATE_STBC
;
692 rateval
|= FIELD_PREP(MT_TX_RATE_IDX
, rate_idx
) |
693 FIELD_PREP(MT_TX_RATE_MODE
, mode
) |
694 FIELD_PREP(MT_TX_RATE_NSS
, nss
- 1);
696 txwi
[2] |= cpu_to_le32(MT_TXD2_FIX_RATE
);
698 le32p_replace_bits(&txwi
[3], 1, MT_TXD3_REM_TX_COUNT
);
699 if (td
->tx_rate_mode
< MT76_TM_TX_MODE_HT
)
700 txwi
[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE
);
702 val
= MT_TXD6_FIXED_BW
|
703 FIELD_PREP(MT_TXD6_BW
, bw
) |
704 FIELD_PREP(MT_TXD6_TX_RATE
, rateval
) |
705 FIELD_PREP(MT_TXD6_SGI
, td
->tx_rate_sgi
);
707 /* for HE_SU/HE_EXT_SU PPDU
708 * - 1x, 2x, 4x LTF + 0.8us GI
709 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI
711 * - 2x, 4x LTF + 0.8us GI
712 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI
714 * - 1x, 2x LTF + 1.6us GI
715 * - 4x LTF + 3.2us GI
717 if (mode
>= MT_PHY_TYPE_HE_SU
)
718 val
|= FIELD_PREP(MT_TXD6_HELTF
, td
->tx_ltf
);
720 if (td
->tx_rate_ldpc
|| (bw
> 0 && mode
>= MT_PHY_TYPE_HE_SU
))
723 txwi
[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID
);
724 txwi
[6] |= cpu_to_le32(val
);
725 txwi
[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX
,
730 void mt7915_mac_write_txwi(struct mt76_dev
*dev
, __le32
*txwi
,
731 struct sk_buff
*skb
, struct mt76_wcid
*wcid
, int pid
,
732 struct ieee80211_key_conf
*key
,
733 enum mt76_txq_id qid
, u32 changed
)
735 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
736 u8 phy_idx
= (info
->hw_queue
& MT_TX_HW_QUEUE_PHY
) >> 2;
737 struct mt76_phy
*mphy
= &dev
->phy
;
739 if (phy_idx
&& dev
->phys
[MT_BAND1
])
740 mphy
= dev
->phys
[MT_BAND1
];
742 mt76_connac2_mac_write_txwi(dev
, txwi
, skb
, wcid
, key
, pid
, qid
, changed
);
744 if (mt76_testmode_enabled(mphy
))
745 mt7915_mac_write_txwi_tm(mphy
->priv
, txwi
, skb
);
748 int mt7915_tx_prepare_skb(struct mt76_dev
*mdev
, void *txwi_ptr
,
749 enum mt76_txq_id qid
, struct mt76_wcid
*wcid
,
750 struct ieee80211_sta
*sta
,
751 struct mt76_tx_info
*tx_info
)
753 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)tx_info
->skb
->data
;
754 struct mt7915_dev
*dev
= container_of(mdev
, struct mt7915_dev
, mt76
);
755 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(tx_info
->skb
);
756 struct ieee80211_key_conf
*key
= info
->control
.hw_key
;
757 struct ieee80211_vif
*vif
= info
->control
.vif
;
758 struct mt76_connac_fw_txp
*txp
;
759 struct mt76_txwi_cache
*t
;
760 int id
, i
, nbuf
= tx_info
->nbuf
- 1;
761 u8
*txwi
= (u8
*)txwi_ptr
;
764 if (unlikely(tx_info
->skb
->len
<= ETH_HLEN
))
768 wcid
= &dev
->mt76
.global_wcid
;
771 struct mt7915_sta
*msta
;
773 msta
= (struct mt7915_sta
*)sta
->drv_priv
;
775 if (time_after(jiffies
, msta
->jiffies
+ HZ
/ 4)) {
776 info
->flags
|= IEEE80211_TX_CTL_REQ_TX_STATUS
;
777 msta
->jiffies
= jiffies
;
781 t
= (struct mt76_txwi_cache
*)(txwi
+ mdev
->drv
->txwi_size
);
782 t
->skb
= tx_info
->skb
;
784 id
= mt76_token_consume(mdev
, &t
);
788 pid
= mt76_tx_status_skb_add(mdev
, wcid
, tx_info
->skb
);
789 mt7915_mac_write_txwi(mdev
, txwi_ptr
, tx_info
->skb
, wcid
, pid
, key
,
792 txp
= (struct mt76_connac_fw_txp
*)(txwi
+ MT_TXD_SIZE
);
793 for (i
= 0; i
< nbuf
; i
++) {
794 txp
->buf
[i
] = cpu_to_le32(tx_info
->buf
[i
+ 1].addr
);
795 txp
->len
[i
] = cpu_to_le16(tx_info
->buf
[i
+ 1].len
);
799 txp
->flags
= cpu_to_le16(MT_CT_INFO_APPLY_TXD
| MT_CT_INFO_FROM_HOST
);
802 txp
->flags
|= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME
);
804 if (!(info
->flags
& IEEE80211_TX_CTL_HW_80211_ENCAP
) &&
805 ieee80211_is_mgmt(hdr
->frame_control
))
806 txp
->flags
|= cpu_to_le16(MT_CT_INFO_MGMT_FRAME
);
809 struct mt7915_vif
*mvif
= (struct mt7915_vif
*)vif
->drv_priv
;
811 txp
->bss_idx
= mvif
->mt76
.idx
;
814 txp
->token
= cpu_to_le16(id
);
815 if (test_bit(MT_WCID_FLAG_4ADDR
, &wcid
->flags
))
816 txp
->rept_wds_wcid
= cpu_to_le16(wcid
->idx
);
818 txp
->rept_wds_wcid
= cpu_to_le16(0x3ff);
821 /* pass partial skb header to fw */
822 tx_info
->buf
[1].len
= MT_CT_PARSE_LEN
;
823 tx_info
->buf
[1].skip_unmap
= true;
824 tx_info
->nbuf
= MT_CT_DMA_BUF_NUM
;
829 u32
mt7915_wed_init_buf(void *ptr
, dma_addr_t phys
, int token_id
)
831 struct mt76_connac_fw_txp
*txp
= ptr
+ MT_TXD_SIZE
;
835 memset(ptr
, 0, MT_TXD_SIZE
+ sizeof(*txp
));
837 val
= FIELD_PREP(MT_TXD0_TX_BYTES
, MT_TXD_SIZE
) |
838 FIELD_PREP(MT_TXD0_PKT_FMT
, MT_TX_TYPE_CT
);
839 txwi
[0] = cpu_to_le32(val
);
841 val
= MT_TXD1_LONG_FORMAT
|
842 FIELD_PREP(MT_TXD1_HDR_FORMAT
, MT_HDR_FORMAT_802_3
);
843 txwi
[1] = cpu_to_le32(val
);
845 txp
->token
= cpu_to_le16(token_id
);
847 txp
->buf
[0] = cpu_to_le32(phys
+ MT_TXD_SIZE
+ sizeof(*txp
));
849 return MT_TXD_SIZE
+ sizeof(*txp
);
853 mt7915_mac_tx_free_prepare(struct mt7915_dev
*dev
)
855 struct mt76_dev
*mdev
= &dev
->mt76
;
856 struct mt76_phy
*mphy_ext
= mdev
->phys
[MT_BAND1
];
858 /* clean DMA queues and unmap buffers first */
859 mt76_queue_tx_cleanup(dev
, dev
->mphy
.q_tx
[MT_TXQ_PSD
], false);
860 mt76_queue_tx_cleanup(dev
, dev
->mphy
.q_tx
[MT_TXQ_BE
], false);
862 mt76_queue_tx_cleanup(dev
, mphy_ext
->q_tx
[MT_TXQ_PSD
], false);
863 mt76_queue_tx_cleanup(dev
, mphy_ext
->q_tx
[MT_TXQ_BE
], false);
868 mt7915_mac_tx_free_done(struct mt7915_dev
*dev
,
869 struct list_head
*free_list
, bool wake
)
871 struct sk_buff
*skb
, *tmp
;
873 mt7915_mac_sta_poll(dev
);
876 mt76_set_tx_blocked(&dev
->mt76
, false);
878 mt76_worker_schedule(&dev
->mt76
.tx_worker
);
880 list_for_each_entry_safe(skb
, tmp
, free_list
, list
) {
881 skb_list_del_init(skb
);
882 napi_consume_skb(skb
, 1);
887 mt7915_mac_tx_free(struct mt7915_dev
*dev
, void *data
, int len
)
889 struct mt76_connac_tx_free
*free
= data
;
890 __le32
*tx_info
= (__le32
*)(data
+ sizeof(*free
));
891 struct mt76_dev
*mdev
= &dev
->mt76
;
892 struct mt76_txwi_cache
*txwi
;
893 struct ieee80211_sta
*sta
= NULL
;
894 struct mt76_wcid
*wcid
= NULL
;
895 LIST_HEAD(free_list
);
896 void *end
= data
+ len
;
897 bool v3
, wake
= false;
898 u16 total
, count
= 0;
899 u32 txd
= le32_to_cpu(free
->txd
);
902 mt7915_mac_tx_free_prepare(dev
);
904 total
= le16_get_bits(free
->ctrl
, MT_TX_FREE_MSDU_CNT
);
905 v3
= (FIELD_GET(MT_TX_FREE_VER
, txd
) == 0x4);
907 for (cur_info
= tx_info
; count
< total
; cur_info
++) {
911 if (WARN_ON_ONCE((void *)cur_info
>= end
))
915 * 1'b1: new wcid pair.
916 * 1'b0: msdu_id with the same 'wcid pair' as above.
918 info
= le32_to_cpu(*cur_info
);
919 if (info
& MT_TX_FREE_PAIR
) {
920 struct mt7915_sta
*msta
;
923 idx
= FIELD_GET(MT_TX_FREE_WLAN_ID
, info
);
924 wcid
= rcu_dereference(dev
->mt76
.wcid
[idx
]);
925 sta
= wcid_to_sta(wcid
);
929 msta
= container_of(wcid
, struct mt7915_sta
, wcid
);
930 spin_lock_bh(&mdev
->sta_poll_lock
);
931 if (list_empty(&msta
->wcid
.poll_list
))
932 list_add_tail(&msta
->wcid
.poll_list
,
933 &mdev
->sta_poll_list
);
934 spin_unlock_bh(&mdev
->sta_poll_lock
);
938 if (!mtk_wed_device_active(&mdev
->mmio
.wed
) && wcid
) {
939 u32 tx_retries
= 0, tx_failed
= 0;
941 if (v3
&& (info
& MT_TX_FREE_MPDU_HEADER_V3
)) {
943 FIELD_GET(MT_TX_FREE_COUNT_V3
, info
) - 1;
944 tx_failed
= tx_retries
+
945 !!FIELD_GET(MT_TX_FREE_STAT_V3
, info
);
946 } else if (!v3
&& (info
& MT_TX_FREE_MPDU_HEADER
)) {
948 FIELD_GET(MT_TX_FREE_COUNT
, info
) - 1;
949 tx_failed
= tx_retries
+
950 !!FIELD_GET(MT_TX_FREE_STAT
, info
);
952 wcid
->stats
.tx_retries
+= tx_retries
;
953 wcid
->stats
.tx_failed
+= tx_failed
;
956 if (v3
&& (info
& MT_TX_FREE_MPDU_HEADER_V3
))
959 for (i
= 0; i
< 1 + v3
; i
++) {
961 msdu
= (info
>> (15 * i
)) & MT_TX_FREE_MSDU_ID_V3
;
962 if (msdu
== MT_TX_FREE_MSDU_ID_V3
)
965 msdu
= FIELD_GET(MT_TX_FREE_MSDU_ID
, info
);
968 txwi
= mt76_token_release(mdev
, msdu
, &wake
);
972 mt76_connac2_txwi_free(mdev
, txwi
, sta
, &free_list
);
976 mt7915_mac_tx_free_done(dev
, &free_list
, wake
);
980 mt7915_mac_tx_free_v0(struct mt7915_dev
*dev
, void *data
, int len
)
982 struct mt76_connac_tx_free
*free
= data
;
983 __le16
*info
= (__le16
*)(data
+ sizeof(*free
));
984 struct mt76_dev
*mdev
= &dev
->mt76
;
985 void *end
= data
+ len
;
986 LIST_HEAD(free_list
);
990 mt7915_mac_tx_free_prepare(dev
);
992 count
= FIELD_GET(MT_TX_FREE_MSDU_CNT_V0
, le16_to_cpu(free
->ctrl
));
993 if (WARN_ON_ONCE((void *)&info
[count
] > end
))
996 for (i
= 0; i
< count
; i
++) {
997 struct mt76_txwi_cache
*txwi
;
998 u16 msdu
= le16_to_cpu(info
[i
]);
1000 txwi
= mt76_token_release(mdev
, msdu
, &wake
);
1004 mt76_connac2_txwi_free(mdev
, txwi
, NULL
, &free_list
);
1007 mt7915_mac_tx_free_done(dev
, &free_list
, wake
);
1010 static void mt7915_mac_add_txs(struct mt7915_dev
*dev
, void *data
)
1012 struct mt7915_sta
*msta
= NULL
;
1013 struct mt76_wcid
*wcid
;
1014 __le32
*txs_data
= data
;
1018 wcidx
= le32_get_bits(txs_data
[2], MT_TXS2_WCID
);
1019 pid
= le32_get_bits(txs_data
[3], MT_TXS3_PID
);
1021 if (pid
< MT_PACKET_ID_WED
)
1024 if (wcidx
>= mt7915_wtbl_size(dev
))
1029 wcid
= rcu_dereference(dev
->mt76
.wcid
[wcidx
]);
1033 msta
= container_of(wcid
, struct mt7915_sta
, wcid
);
1035 if (pid
== MT_PACKET_ID_WED
)
1036 mt76_connac2_mac_fill_txs(&dev
->mt76
, wcid
, txs_data
);
1038 mt76_connac2_mac_add_txs_skb(&dev
->mt76
, wcid
, pid
, txs_data
);
1043 spin_lock_bh(&dev
->mt76
.sta_poll_lock
);
1044 if (list_empty(&msta
->wcid
.poll_list
))
1045 list_add_tail(&msta
->wcid
.poll_list
, &dev
->mt76
.sta_poll_list
);
1046 spin_unlock_bh(&dev
->mt76
.sta_poll_lock
);
1052 bool mt7915_rx_check(struct mt76_dev
*mdev
, void *data
, int len
)
1054 struct mt7915_dev
*dev
= container_of(mdev
, struct mt7915_dev
, mt76
);
1055 __le32
*rxd
= (__le32
*)data
;
1056 __le32
*end
= (__le32
*)&rxd
[len
/ 4];
1057 enum rx_pkt_type type
;
1059 type
= le32_get_bits(rxd
[0], MT_RXD0_PKT_TYPE
);
1062 case PKT_TYPE_TXRX_NOTIFY
:
1063 mt7915_mac_tx_free(dev
, data
, len
);
1065 case PKT_TYPE_TXRX_NOTIFY_V0
:
1066 mt7915_mac_tx_free_v0(dev
, data
, len
);
1069 for (rxd
+= 2; rxd
+ 8 <= end
; rxd
+= 8)
1070 mt7915_mac_add_txs(dev
, rxd
);
1072 case PKT_TYPE_RX_FW_MONITOR
:
1073 mt7915_debugfs_rx_fw_monitor(dev
, data
, len
);
1080 void mt7915_queue_rx_skb(struct mt76_dev
*mdev
, enum mt76_rxq_id q
,
1081 struct sk_buff
*skb
, u32
*info
)
1083 struct mt7915_dev
*dev
= container_of(mdev
, struct mt7915_dev
, mt76
);
1084 __le32
*rxd
= (__le32
*)skb
->data
;
1085 __le32
*end
= (__le32
*)&skb
->data
[skb
->len
];
1086 enum rx_pkt_type type
;
1088 type
= le32_get_bits(rxd
[0], MT_RXD0_PKT_TYPE
);
1091 case PKT_TYPE_TXRX_NOTIFY
:
1092 mt7915_mac_tx_free(dev
, skb
->data
, skb
->len
);
1093 napi_consume_skb(skb
, 1);
1095 case PKT_TYPE_TXRX_NOTIFY_V0
:
1096 mt7915_mac_tx_free_v0(dev
, skb
->data
, skb
->len
);
1097 napi_consume_skb(skb
, 1);
1099 case PKT_TYPE_RX_EVENT
:
1100 mt7915_mcu_rx_event(dev
, skb
);
1102 case PKT_TYPE_TXRXV
:
1103 mt7915_mac_fill_rx_vector(dev
, skb
);
1106 for (rxd
+= 2; rxd
+ 8 <= end
; rxd
+= 8)
1107 mt7915_mac_add_txs(dev
, rxd
);
1110 case PKT_TYPE_RX_FW_MONITOR
:
1111 mt7915_debugfs_rx_fw_monitor(dev
, skb
->data
, skb
->len
);
1114 case PKT_TYPE_NORMAL
:
1115 if (!mt7915_mac_fill_rx(dev
, skb
, q
, info
)) {
1116 mt76_rx(&dev
->mt76
, q
, skb
);
1126 void mt7915_mac_cca_stats_reset(struct mt7915_phy
*phy
)
1128 struct mt7915_dev
*dev
= phy
->dev
;
1129 u32 reg
= MT_WF_PHY_RX_CTRL1(phy
->mt76
->band_idx
);
1131 mt76_clear(dev
, reg
, MT_WF_PHY_RX_CTRL1_STSCNT_EN
);
1132 mt76_set(dev
, reg
, BIT(11) | BIT(9));
1135 void mt7915_mac_reset_counters(struct mt7915_phy
*phy
)
1137 struct mt7915_dev
*dev
= phy
->dev
;
1140 for (i
= 0; i
< 4; i
++) {
1141 mt76_rr(dev
, MT_TX_AGG_CNT(phy
->mt76
->band_idx
, i
));
1142 mt76_rr(dev
, MT_TX_AGG_CNT2(phy
->mt76
->band_idx
, i
));
1145 phy
->mt76
->survey_time
= ktime_get_boottime();
1146 memset(phy
->mt76
->aggr_stats
, 0, sizeof(phy
->mt76
->aggr_stats
));
1148 /* reset airtime counters */
1149 mt76_set(dev
, MT_WF_RMAC_MIB_AIRTIME0(phy
->mt76
->band_idx
),
1150 MT_WF_RMAC_MIB_RXTIME_CLR
);
1152 mt7915_mcu_get_chan_mib_info(phy
, true);
1155 void mt7915_mac_set_timing(struct mt7915_phy
*phy
)
1157 s16 coverage_class
= phy
->coverage_class
;
1158 struct mt7915_dev
*dev
= phy
->dev
;
1159 struct mt7915_phy
*ext_phy
= mt7915_ext_phy(dev
);
1160 u32 val
, reg_offset
;
1161 u32 cck
= FIELD_PREP(MT_TIMEOUT_VAL_PLCP
, 231) |
1162 FIELD_PREP(MT_TIMEOUT_VAL_CCA
, 48);
1163 u32 ofdm
= FIELD_PREP(MT_TIMEOUT_VAL_PLCP
, 60) |
1164 FIELD_PREP(MT_TIMEOUT_VAL_CCA
, 28);
1165 u8 band
= phy
->mt76
->band_idx
;
1166 int eifs_ofdm
= 360, sifs
= 10, offset
;
1167 bool a_band
= !(phy
->mt76
->chandef
.chan
->band
== NL80211_BAND_2GHZ
);
1169 if (!test_bit(MT76_STATE_RUNNING
, &phy
->mt76
->state
))
1173 coverage_class
= max_t(s16
, dev
->phy
.coverage_class
,
1174 ext_phy
->coverage_class
);
1176 mt76_set(dev
, MT_ARB_SCR(band
),
1177 MT_ARB_SCR_TX_DISABLE
| MT_ARB_SCR_RX_DISABLE
);
1180 offset
= 3 * coverage_class
;
1181 reg_offset
= FIELD_PREP(MT_TIMEOUT_VAL_PLCP
, offset
) |
1182 FIELD_PREP(MT_TIMEOUT_VAL_CCA
, offset
);
1184 if (!is_mt7915(&dev
->mt76
)) {
1186 mt76_wr(dev
, MT_TMAC_ICR1(band
),
1187 FIELD_PREP(MT_IFS_EIFS_CCK
, 314));
1192 } else if (a_band
) {
1196 mt76_wr(dev
, MT_TMAC_CDTR(band
), cck
+ reg_offset
);
1197 mt76_wr(dev
, MT_TMAC_ODTR(band
), ofdm
+ reg_offset
);
1198 mt76_wr(dev
, MT_TMAC_ICR0(band
),
1199 FIELD_PREP(MT_IFS_EIFS_OFDM
, eifs_ofdm
) |
1200 FIELD_PREP(MT_IFS_RIFS
, 2) |
1201 FIELD_PREP(MT_IFS_SIFS
, sifs
) |
1202 FIELD_PREP(MT_IFS_SLOT
, phy
->slottime
));
1204 if (phy
->slottime
< 20 || a_band
)
1205 val
= MT7915_CFEND_RATE_DEFAULT
;
1207 val
= MT7915_CFEND_RATE_11B
;
1209 mt76_rmw_field(dev
, MT_AGG_ACR0(band
), MT_AGG_ACR_CFEND_RATE
, val
);
1210 mt76_clear(dev
, MT_ARB_SCR(band
),
1211 MT_ARB_SCR_TX_DISABLE
| MT_ARB_SCR_RX_DISABLE
);
1214 void mt7915_mac_enable_nf(struct mt7915_dev
*dev
, bool band
)
1218 reg
= is_mt7915(&dev
->mt76
) ? MT_WF_PHY_RXTD12(band
) :
1219 MT_WF_PHY_RXTD12_MT7916(band
);
1221 MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY
|
1222 MT_WF_PHY_RXTD12_IRPI_SW_CLR
);
1224 reg
= is_mt7915(&dev
->mt76
) ? MT_WF_PHY_RX_CTRL1(band
) :
1225 MT_WF_PHY_RX_CTRL1_MT7916(band
);
1226 mt76_set(dev
, reg
, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN
, 0x5));
1230 mt7915_phy_get_nf(struct mt7915_phy
*phy
, int idx
)
1232 static const u8 nf_power
[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1233 struct mt7915_dev
*dev
= phy
->dev
;
1234 u32 val
, sum
= 0, n
= 0;
1237 for (nss
= 0; nss
< hweight8(phy
->mt76
->chainmask
); nss
++) {
1238 u32 reg
= is_mt7915(&dev
->mt76
) ?
1239 MT_WF_IRPI_NSS(0, nss
+ (idx
<< dev
->dbdc_support
)) :
1240 MT_WF_IRPI_NSS_MT7916(idx
, nss
);
1242 for (i
= 0; i
< ARRAY_SIZE(nf_power
); i
++, reg
+= 4) {
1243 val
= mt76_rr(dev
, reg
);
1244 sum
+= val
* nf_power
[i
];
1255 void mt7915_update_channel(struct mt76_phy
*mphy
)
1257 struct mt7915_phy
*phy
= mphy
->priv
;
1258 struct mt76_channel_state
*state
= mphy
->chan_state
;
1261 mt7915_mcu_get_chan_mib_info(phy
, false);
1263 nf
= mt7915_phy_get_nf(phy
, phy
->mt76
->band_idx
);
1265 phy
->noise
= nf
<< 4;
1267 phy
->noise
+= nf
- (phy
->noise
>> 4);
1269 state
->noise
= -(phy
->noise
>> 4);
1273 mt7915_wait_reset_state(struct mt7915_dev
*dev
, u32 state
)
1277 ret
= wait_event_timeout(dev
->reset_wait
,
1278 (READ_ONCE(dev
->recovery
.state
) & state
),
1279 MT7915_RESET_TIMEOUT
);
1281 WARN(!ret
, "Timeout waiting for MCU reset state %x\n", state
);
1286 mt7915_update_vif_beacon(void *priv
, u8
*mac
, struct ieee80211_vif
*vif
)
1288 struct ieee80211_hw
*hw
= priv
;
1290 switch (vif
->type
) {
1291 case NL80211_IFTYPE_MESH_POINT
:
1292 case NL80211_IFTYPE_ADHOC
:
1293 case NL80211_IFTYPE_AP
:
1294 mt7915_mcu_add_beacon(hw
, vif
, vif
->bss_conf
.enable_beacon
,
1295 BSS_CHANGED_BEACON_ENABLED
);
1303 mt7915_update_beacons(struct mt7915_dev
*dev
)
1305 struct mt76_phy
*mphy_ext
= dev
->mt76
.phys
[MT_BAND1
];
1307 ieee80211_iterate_active_interfaces(dev
->mt76
.hw
,
1308 IEEE80211_IFACE_ITER_RESUME_ALL
,
1309 mt7915_update_vif_beacon
, dev
->mt76
.hw
);
1314 ieee80211_iterate_active_interfaces(mphy_ext
->hw
,
1315 IEEE80211_IFACE_ITER_RESUME_ALL
,
1316 mt7915_update_vif_beacon
, mphy_ext
->hw
);
1320 mt7915_mac_restart(struct mt7915_dev
*dev
)
1322 struct mt7915_phy
*phy2
;
1323 struct mt76_phy
*ext_phy
;
1324 struct mt76_dev
*mdev
= &dev
->mt76
;
1327 ext_phy
= dev
->mt76
.phys
[MT_BAND1
];
1328 phy2
= ext_phy
? ext_phy
->priv
: NULL
;
1331 mt76_wr(dev
, MT_INT1_MASK_CSR
, 0x0);
1332 mt76_wr(dev
, MT_INT1_SOURCE_CSR
, ~0);
1335 if (dev_is_pci(mdev
->dev
)) {
1336 mt76_wr(dev
, MT_PCIE_MAC_INT_ENABLE
, 0x0);
1338 if (is_mt7915(mdev
))
1339 mt76_wr(dev
, MT_PCIE1_MAC_INT_ENABLE
, 0x0);
1341 mt76_wr(dev
, MT_PCIE1_MAC_INT_ENABLE_MT7916
, 0x0);
1345 set_bit(MT76_RESET
, &dev
->mphy
.state
);
1346 set_bit(MT76_MCU_RESET
, &dev
->mphy
.state
);
1347 wake_up(&dev
->mt76
.mcu
.wait
);
1349 set_bit(MT76_RESET
, &ext_phy
->state
);
1351 /* lock/unlock all queues to ensure that no tx is pending */
1352 mt76_txq_schedule_all(&dev
->mphy
);
1354 mt76_txq_schedule_all(ext_phy
);
1356 /* disable all tx/rx napi */
1357 mt76_worker_disable(&dev
->mt76
.tx_worker
);
1358 mt76_for_each_q_rx(mdev
, i
) {
1359 if (mdev
->q_rx
[i
].ndesc
)
1360 napi_disable(&dev
->mt76
.napi
[i
]);
1362 napi_disable(&dev
->mt76
.tx_napi
);
1365 mt76_connac2_tx_token_put(&dev
->mt76
);
1366 idr_init(&dev
->mt76
.token
);
1368 mt7915_dma_reset(dev
, true);
1371 mt76_for_each_q_rx(mdev
, i
) {
1372 if (mdev
->q_rx
[i
].ndesc
) {
1373 napi_enable(&dev
->mt76
.napi
[i
]);
1374 napi_schedule(&dev
->mt76
.napi
[i
]);
1378 clear_bit(MT76_MCU_RESET
, &dev
->mphy
.state
);
1379 clear_bit(MT76_STATE_MCU_RUNNING
, &dev
->mphy
.state
);
1381 mt76_wr(dev
, MT_INT_MASK_CSR
, dev
->mt76
.mmio
.irqmask
);
1382 mt76_wr(dev
, MT_INT_SOURCE_CSR
, ~0);
1385 mt76_wr(dev
, MT_INT1_MASK_CSR
, dev
->mt76
.mmio
.irqmask
);
1386 mt76_wr(dev
, MT_INT1_SOURCE_CSR
, ~0);
1388 if (dev_is_pci(mdev
->dev
)) {
1389 mt76_wr(dev
, MT_PCIE_MAC_INT_ENABLE
, 0xff);
1391 if (is_mt7915(mdev
))
1392 mt76_wr(dev
, MT_PCIE1_MAC_INT_ENABLE
, 0xff);
1394 mt76_wr(dev
, MT_PCIE1_MAC_INT_ENABLE_MT7916
, 0xff);
1399 ret
= mt7915_mcu_init_firmware(dev
);
1403 /* set the necessary init items */
1404 ret
= mt7915_mcu_set_eeprom(dev
);
1408 mt7915_mac_init(dev
);
1409 mt7915_init_txpower(&dev
->phy
);
1410 mt7915_init_txpower(phy2
);
1411 ret
= mt7915_txbf_init(dev
);
1413 if (test_bit(MT76_STATE_RUNNING
, &dev
->mphy
.state
)) {
1414 ret
= mt7915_run(dev
->mphy
.hw
);
1419 if (ext_phy
&& test_bit(MT76_STATE_RUNNING
, &ext_phy
->state
)) {
1420 ret
= mt7915_run(ext_phy
->hw
);
1427 clear_bit(MT76_RESET
, &dev
->mphy
.state
);
1429 clear_bit(MT76_RESET
, &phy2
->mt76
->state
);
1432 napi_enable(&dev
->mt76
.tx_napi
);
1433 napi_schedule(&dev
->mt76
.tx_napi
);
1436 mt76_worker_enable(&dev
->mt76
.tx_worker
);
1442 mt7915_mac_full_reset(struct mt7915_dev
*dev
)
1444 struct mt76_phy
*ext_phy
;
1447 ext_phy
= dev
->mt76
.phys
[MT_BAND1
];
1449 dev
->recovery
.hw_full_reset
= true;
1451 set_bit(MT76_MCU_RESET
, &dev
->mphy
.state
);
1452 wake_up(&dev
->mt76
.mcu
.wait
);
1453 ieee80211_stop_queues(mt76_hw(dev
));
1455 ieee80211_stop_queues(ext_phy
->hw
);
1457 cancel_delayed_work_sync(&dev
->mphy
.mac_work
);
1459 cancel_delayed_work_sync(&ext_phy
->mac_work
);
1461 mutex_lock(&dev
->mt76
.mutex
);
1462 for (i
= 0; i
< 10; i
++) {
1463 if (!mt7915_mac_restart(dev
))
1468 dev_err(dev
->mt76
.dev
, "chip full reset failed\n");
1470 spin_lock_bh(&dev
->mt76
.sta_poll_lock
);
1471 while (!list_empty(&dev
->mt76
.sta_poll_list
))
1472 list_del_init(dev
->mt76
.sta_poll_list
.next
);
1473 spin_unlock_bh(&dev
->mt76
.sta_poll_lock
);
1475 memset(dev
->mt76
.wcid_mask
, 0, sizeof(dev
->mt76
.wcid_mask
));
1476 dev
->mt76
.vif_mask
= 0;
1478 i
= mt76_wcid_alloc(dev
->mt76
.wcid_mask
, MT7915_WTBL_STA
);
1479 dev
->mt76
.global_wcid
.idx
= i
;
1480 dev
->recovery
.hw_full_reset
= false;
1482 mutex_unlock(&dev
->mt76
.mutex
);
1484 ieee80211_restart_hw(mt76_hw(dev
));
1486 ieee80211_restart_hw(ext_phy
->hw
);
1489 /* system error recovery */
1490 void mt7915_mac_reset_work(struct work_struct
*work
)
1492 struct mt7915_phy
*phy2
;
1493 struct mt76_phy
*ext_phy
;
1494 struct mt7915_dev
*dev
;
1497 dev
= container_of(work
, struct mt7915_dev
, reset_work
);
1498 ext_phy
= dev
->mt76
.phys
[MT_BAND1
];
1499 phy2
= ext_phy
? ext_phy
->priv
: NULL
;
1501 /* chip full reset */
1502 if (dev
->recovery
.restart
) {
1503 /* disable WA/WM WDT */
1504 mt76_clear(dev
, MT_WFDMA0_MCU_HOST_INT_ENA
,
1505 MT_MCU_CMD_WDT_MASK
);
1507 if (READ_ONCE(dev
->recovery
.state
) & MT_MCU_CMD_WA_WDT
)
1508 dev
->recovery
.wa_reset_count
++;
1510 dev
->recovery
.wm_reset_count
++;
1512 mt7915_mac_full_reset(dev
);
1514 /* enable mcu irq */
1515 mt7915_irq_enable(dev
, MT_INT_MCU_CMD
);
1516 mt7915_irq_disable(dev
, 0);
1518 /* enable WA/WM WDT */
1519 mt76_set(dev
, MT_WFDMA0_MCU_HOST_INT_ENA
, MT_MCU_CMD_WDT_MASK
);
1521 dev
->recovery
.state
= MT_MCU_CMD_NORMAL_STATE
;
1522 dev
->recovery
.restart
= false;
1526 /* chip partial reset */
1527 if (!(READ_ONCE(dev
->recovery
.state
) & MT_MCU_CMD_STOP_DMA
))
1530 ieee80211_stop_queues(mt76_hw(dev
));
1532 ieee80211_stop_queues(ext_phy
->hw
);
1534 set_bit(MT76_RESET
, &dev
->mphy
.state
);
1535 set_bit(MT76_MCU_RESET
, &dev
->mphy
.state
);
1536 wake_up(&dev
->mt76
.mcu
.wait
);
1537 cancel_delayed_work_sync(&dev
->mphy
.mac_work
);
1539 set_bit(MT76_RESET
, &phy2
->mt76
->state
);
1540 cancel_delayed_work_sync(&phy2
->mt76
->mac_work
);
1543 mutex_lock(&dev
->mt76
.mutex
);
1545 mt76_worker_disable(&dev
->mt76
.tx_worker
);
1546 mt76_for_each_q_rx(&dev
->mt76
, i
)
1547 napi_disable(&dev
->mt76
.napi
[i
]);
1548 napi_disable(&dev
->mt76
.tx_napi
);
1551 if (mtk_wed_device_active(&dev
->mt76
.mmio
.wed
))
1552 mtk_wed_device_stop(&dev
->mt76
.mmio
.wed
);
1554 mt76_wr(dev
, MT_MCU_INT_EVENT
, MT_MCU_INT_EVENT_DMA_STOPPED
);
1556 if (mt7915_wait_reset_state(dev
, MT_MCU_CMD_RESET_DONE
)) {
1557 mt7915_dma_reset(dev
, false);
1559 mt76_connac2_tx_token_put(&dev
->mt76
);
1560 idr_init(&dev
->mt76
.token
);
1562 mt76_wr(dev
, MT_MCU_INT_EVENT
, MT_MCU_INT_EVENT_DMA_INIT
);
1563 mt7915_wait_reset_state(dev
, MT_MCU_CMD_RECOVERY_DONE
);
1566 mt76_wr(dev
, MT_MCU_INT_EVENT
, MT_MCU_INT_EVENT_RESET_DONE
);
1567 mt7915_wait_reset_state(dev
, MT_MCU_CMD_NORMAL_STATE
);
1569 /* enable DMA Tx/Rx and interrupt */
1570 mt7915_dma_start(dev
, false, false);
1572 clear_bit(MT76_MCU_RESET
, &dev
->mphy
.state
);
1573 clear_bit(MT76_RESET
, &dev
->mphy
.state
);
1575 clear_bit(MT76_RESET
, &phy2
->mt76
->state
);
1578 mt76_for_each_q_rx(&dev
->mt76
, i
) {
1579 napi_enable(&dev
->mt76
.napi
[i
]);
1580 napi_schedule(&dev
->mt76
.napi
[i
]);
1584 tasklet_schedule(&dev
->mt76
.irq_tasklet
);
1586 mt76_worker_enable(&dev
->mt76
.tx_worker
);
1589 napi_enable(&dev
->mt76
.tx_napi
);
1590 napi_schedule(&dev
->mt76
.tx_napi
);
1593 ieee80211_wake_queues(mt76_hw(dev
));
1595 ieee80211_wake_queues(ext_phy
->hw
);
1597 mutex_unlock(&dev
->mt76
.mutex
);
1599 mt7915_update_beacons(dev
);
1601 ieee80211_queue_delayed_work(mt76_hw(dev
), &dev
->mphy
.mac_work
,
1602 MT7915_WATCHDOG_TIME
);
1604 ieee80211_queue_delayed_work(ext_phy
->hw
,
1605 &phy2
->mt76
->mac_work
,
1606 MT7915_WATCHDOG_TIME
);
1609 /* firmware coredump */
1610 void mt7915_mac_dump_work(struct work_struct
*work
)
1612 const struct mt7915_mem_region
*mem_region
;
1613 struct mt7915_crash_data
*crash_data
;
1614 struct mt7915_dev
*dev
;
1615 struct mt7915_mem_hdr
*hdr
;
1621 dev
= container_of(work
, struct mt7915_dev
, dump_work
);
1623 mutex_lock(&dev
->dump_mutex
);
1625 crash_data
= mt7915_coredump_new(dev
);
1627 mutex_unlock(&dev
->dump_mutex
);
1631 mem_region
= mt7915_coredump_get_mem_layout(dev
, &num
);
1632 if (!mem_region
|| !crash_data
->memdump_buf_len
) {
1633 mutex_unlock(&dev
->dump_mutex
);
1637 buf
= crash_data
->memdump_buf
;
1638 buf_len
= crash_data
->memdump_buf_len
;
1640 /* dumping memory content... */
1641 memset(buf
, 0, buf_len
);
1642 for (i
= 0; i
< num
; i
++) {
1643 if (mem_region
->len
> buf_len
) {
1644 dev_warn(dev
->mt76
.dev
, "%s len %lu is too large\n",
1646 (unsigned long)mem_region
->len
);
1650 /* reserve space for the header */
1652 buf
+= sizeof(*hdr
);
1653 buf_len
-= sizeof(*hdr
);
1655 mt7915_memcpy_fromio(dev
, buf
, mem_region
->start
,
1658 hdr
->start
= mem_region
->start
;
1659 hdr
->len
= mem_region
->len
;
1661 if (!mem_region
->len
)
1662 /* note: the header remains, just with zero length */
1665 buf
+= mem_region
->len
;
1666 buf_len
-= mem_region
->len
;
1671 mutex_unlock(&dev
->dump_mutex
);
1674 mt7915_coredump_submit(dev
);
1676 queue_work(dev
->mt76
.wq
, &dev
->reset_work
);
1679 void mt7915_reset(struct mt7915_dev
*dev
)
1681 if (!dev
->recovery
.hw_init_done
)
1684 if (dev
->recovery
.hw_full_reset
)
1687 /* wm/wa exception: do full recovery */
1688 if (READ_ONCE(dev
->recovery
.state
) & MT_MCU_CMD_WDT_MASK
) {
1689 dev
->recovery
.restart
= true;
1690 dev_info(dev
->mt76
.dev
,
1691 "%s indicated firmware crash, attempting recovery\n",
1692 wiphy_name(dev
->mt76
.hw
->wiphy
));
1694 mt7915_irq_disable(dev
, MT_INT_MCU_CMD
);
1695 queue_work(dev
->mt76
.wq
, &dev
->dump_work
);
1699 if ((READ_ONCE(dev
->recovery
.state
) & MT_MCU_CMD_STOP_DMA
)) {
1700 set_bit(MT76_MCU_RESET
, &dev
->mphy
.state
);
1701 wake_up(&dev
->mt76
.mcu
.wait
);
1704 queue_work(dev
->mt76
.wq
, &dev
->reset_work
);
1705 wake_up(&dev
->reset_wait
);
1708 void mt7915_mac_update_stats(struct mt7915_phy
*phy
)
1710 struct mt76_mib_stats
*mib
= &phy
->mib
;
1711 struct mt7915_dev
*dev
= phy
->dev
;
1712 int i
, aggr0
= 0, aggr1
, cnt
;
1713 u8 band
= phy
->mt76
->band_idx
;
1716 cnt
= mt76_rr(dev
, MT_MIB_SDR3(band
));
1717 mib
->fcs_err_cnt
+= is_mt7915(&dev
->mt76
) ?
1718 FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK
, cnt
) :
1719 FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916
, cnt
);
1721 cnt
= mt76_rr(dev
, MT_MIB_SDR4(band
));
1722 mib
->rx_fifo_full_cnt
+= FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK
, cnt
);
1724 cnt
= mt76_rr(dev
, MT_MIB_SDR5(band
));
1725 mib
->rx_mpdu_cnt
+= cnt
;
1727 cnt
= mt76_rr(dev
, MT_MIB_SDR6(band
));
1728 mib
->channel_idle_cnt
+= FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK
, cnt
);
1730 cnt
= mt76_rr(dev
, MT_MIB_SDR7(band
));
1731 mib
->rx_vector_mismatch_cnt
+=
1732 FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK
, cnt
);
1734 cnt
= mt76_rr(dev
, MT_MIB_SDR8(band
));
1735 mib
->rx_delimiter_fail_cnt
+=
1736 FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK
, cnt
);
1738 cnt
= mt76_rr(dev
, MT_MIB_SDR10(band
));
1739 mib
->rx_mrdy_cnt
+= is_mt7915(&dev
->mt76
) ?
1740 FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK
, cnt
) :
1741 FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916
, cnt
);
1743 cnt
= mt76_rr(dev
, MT_MIB_SDR11(band
));
1744 mib
->rx_len_mismatch_cnt
+=
1745 FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK
, cnt
);
1747 cnt
= mt76_rr(dev
, MT_MIB_SDR12(band
));
1748 mib
->tx_ampdu_cnt
+= cnt
;
1750 cnt
= mt76_rr(dev
, MT_MIB_SDR13(band
));
1751 mib
->tx_stop_q_empty_cnt
+=
1752 FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK
, cnt
);
1754 cnt
= mt76_rr(dev
, MT_MIB_SDR14(band
));
1755 mib
->tx_mpdu_attempts_cnt
+= is_mt7915(&dev
->mt76
) ?
1756 FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK
, cnt
) :
1757 FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916
, cnt
);
1759 cnt
= mt76_rr(dev
, MT_MIB_SDR15(band
));
1760 mib
->tx_mpdu_success_cnt
+= is_mt7915(&dev
->mt76
) ?
1761 FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK
, cnt
) :
1762 FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916
, cnt
);
1764 cnt
= mt76_rr(dev
, MT_MIB_SDR16(band
));
1765 mib
->primary_cca_busy_time
+=
1766 FIELD_GET(MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK
, cnt
);
1768 cnt
= mt76_rr(dev
, MT_MIB_SDR17(band
));
1769 mib
->secondary_cca_busy_time
+=
1770 FIELD_GET(MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK
, cnt
);
1772 cnt
= mt76_rr(dev
, MT_MIB_SDR18(band
));
1773 mib
->primary_energy_detect_time
+=
1774 FIELD_GET(MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK
, cnt
);
1776 cnt
= mt76_rr(dev
, MT_MIB_SDR19(band
));
1777 mib
->cck_mdrdy_time
+= FIELD_GET(MT_MIB_SDR19_CCK_MDRDY_TIME_MASK
, cnt
);
1779 cnt
= mt76_rr(dev
, MT_MIB_SDR20(band
));
1780 mib
->ofdm_mdrdy_time
+=
1781 FIELD_GET(MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK
, cnt
);
1783 cnt
= mt76_rr(dev
, MT_MIB_SDR21(band
));
1784 mib
->green_mdrdy_time
+=
1785 FIELD_GET(MT_MIB_SDR21_GREEN_MDRDY_TIME_MASK
, cnt
);
1787 cnt
= mt76_rr(dev
, MT_MIB_SDR22(band
));
1788 mib
->rx_ampdu_cnt
+= cnt
;
1790 cnt
= mt76_rr(dev
, MT_MIB_SDR23(band
));
1791 mib
->rx_ampdu_bytes_cnt
+= cnt
;
1793 cnt
= mt76_rr(dev
, MT_MIB_SDR24(band
));
1794 mib
->rx_ampdu_valid_subframe_cnt
+= is_mt7915(&dev
->mt76
) ?
1795 FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK
, cnt
) :
1796 FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916
, cnt
);
1798 cnt
= mt76_rr(dev
, MT_MIB_SDR25(band
));
1799 mib
->rx_ampdu_valid_subframe_bytes_cnt
+= cnt
;
1801 cnt
= mt76_rr(dev
, MT_MIB_SDR27(band
));
1802 mib
->tx_rwp_fail_cnt
+=
1803 FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK
, cnt
);
1805 cnt
= mt76_rr(dev
, MT_MIB_SDR28(band
));
1806 mib
->tx_rwp_need_cnt
+=
1807 FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK
, cnt
);
1809 cnt
= mt76_rr(dev
, MT_MIB_SDR29(band
));
1810 mib
->rx_pfdrop_cnt
+= is_mt7915(&dev
->mt76
) ?
1811 FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK
, cnt
) :
1812 FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916
, cnt
);
1814 cnt
= mt76_rr(dev
, MT_MIB_SDRVEC(band
));
1815 mib
->rx_vec_queue_overflow_drop_cnt
+= is_mt7915(&dev
->mt76
) ?
1816 FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK
, cnt
) :
1817 FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916
, cnt
);
1819 cnt
= mt76_rr(dev
, MT_MIB_SDR31(band
));
1820 mib
->rx_ba_cnt
+= cnt
;
1822 cnt
= mt76_rr(dev
, MT_MIB_SDRMUBF(band
));
1823 mib
->tx_bf_cnt
+= FIELD_GET(MT_MIB_MU_BF_TX_CNT
, cnt
);
1825 cnt
= mt76_rr(dev
, MT_MIB_DR8(band
));
1826 mib
->tx_mu_mpdu_cnt
+= cnt
;
1828 cnt
= mt76_rr(dev
, MT_MIB_DR9(band
));
1829 mib
->tx_mu_acked_mpdu_cnt
+= cnt
;
1831 cnt
= mt76_rr(dev
, MT_MIB_DR11(band
));
1832 mib
->tx_su_acked_mpdu_cnt
+= cnt
;
1834 cnt
= mt76_rr(dev
, MT_ETBF_PAR_RPT0(band
));
1835 mib
->tx_bf_rx_fb_bw
= FIELD_GET(MT_ETBF_PAR_RPT0_FB_BW
, cnt
);
1836 mib
->tx_bf_rx_fb_nc_cnt
+= FIELD_GET(MT_ETBF_PAR_RPT0_FB_NC
, cnt
);
1837 mib
->tx_bf_rx_fb_nr_cnt
+= FIELD_GET(MT_ETBF_PAR_RPT0_FB_NR
, cnt
);
1839 for (i
= 0; i
< ARRAY_SIZE(mib
->tx_amsdu
); i
++) {
1840 cnt
= mt76_rr(dev
, MT_PLE_AMSDU_PACK_MSDU_CNT(i
));
1841 mib
->tx_amsdu
[i
] += cnt
;
1842 mib
->tx_amsdu_cnt
+= cnt
;
1845 if (is_mt7915(&dev
->mt76
)) {
1846 for (i
= 0, aggr1
= aggr0
+ 8; i
< 4; i
++) {
1847 val
= mt76_rr(dev
, MT_MIB_MB_SDR1(band
, (i
<< 4)));
1849 FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK
, val
);
1850 mib
->ack_fail_cnt
+=
1851 FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK
, val
);
1853 val
= mt76_rr(dev
, MT_MIB_MB_SDR0(band
, (i
<< 4)));
1854 mib
->rts_cnt
+= FIELD_GET(MT_MIB_RTS_COUNT_MASK
, val
);
1855 mib
->rts_retries_cnt
+=
1856 FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK
, val
);
1858 val
= mt76_rr(dev
, MT_TX_AGG_CNT(band
, i
));
1859 phy
->mt76
->aggr_stats
[aggr0
++] += val
& 0xffff;
1860 phy
->mt76
->aggr_stats
[aggr0
++] += val
>> 16;
1862 val
= mt76_rr(dev
, MT_TX_AGG_CNT2(band
, i
));
1863 phy
->mt76
->aggr_stats
[aggr1
++] += val
& 0xffff;
1864 phy
->mt76
->aggr_stats
[aggr1
++] += val
>> 16;
1867 cnt
= mt76_rr(dev
, MT_MIB_SDR32(band
));
1868 mib
->tx_pkt_ebf_cnt
+= FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT
, cnt
);
1870 cnt
= mt76_rr(dev
, MT_MIB_SDR33(band
));
1871 mib
->tx_pkt_ibf_cnt
+= FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT
, cnt
);
1873 cnt
= mt76_rr(dev
, MT_ETBF_TX_APP_CNT(band
));
1874 mib
->tx_bf_ibf_ppdu_cnt
+= FIELD_GET(MT_ETBF_TX_IBF_CNT
, cnt
);
1875 mib
->tx_bf_ebf_ppdu_cnt
+= FIELD_GET(MT_ETBF_TX_EBF_CNT
, cnt
);
1877 cnt
= mt76_rr(dev
, MT_ETBF_TX_NDP_BFRP(band
));
1878 mib
->tx_bf_fb_cpl_cnt
+= FIELD_GET(MT_ETBF_TX_FB_CPL
, cnt
);
1879 mib
->tx_bf_fb_trig_cnt
+= FIELD_GET(MT_ETBF_TX_FB_TRI
, cnt
);
1881 cnt
= mt76_rr(dev
, MT_ETBF_RX_FB_CNT(band
));
1882 mib
->tx_bf_rx_fb_all_cnt
+= FIELD_GET(MT_ETBF_RX_FB_ALL
, cnt
);
1883 mib
->tx_bf_rx_fb_he_cnt
+= FIELD_GET(MT_ETBF_RX_FB_HE
, cnt
);
1884 mib
->tx_bf_rx_fb_vht_cnt
+= FIELD_GET(MT_ETBF_RX_FB_VHT
, cnt
);
1885 mib
->tx_bf_rx_fb_ht_cnt
+= FIELD_GET(MT_ETBF_RX_FB_HT
, cnt
);
1887 for (i
= 0; i
< 2; i
++) {
1889 val
= mt76_rr(dev
, MT_MIB_MB_SDR0(band
, (i
<< 2)));
1890 mib
->rts_cnt
+= FIELD_GET(GENMASK(15, 0), val
);
1891 mib
->rts_cnt
+= FIELD_GET(GENMASK(31, 16), val
);
1893 /* rts retry count */
1894 val
= mt76_rr(dev
, MT_MIB_MB_SDR1(band
, (i
<< 2)));
1895 mib
->rts_retries_cnt
+= FIELD_GET(GENMASK(15, 0), val
);
1896 mib
->rts_retries_cnt
+= FIELD_GET(GENMASK(31, 16), val
);
1899 val
= mt76_rr(dev
, MT_MIB_MB_SDR2(band
, (i
<< 2)));
1900 mib
->ba_miss_cnt
+= FIELD_GET(GENMASK(15, 0), val
);
1901 mib
->ba_miss_cnt
+= FIELD_GET(GENMASK(31, 16), val
);
1903 /* ack fail count */
1904 val
= mt76_rr(dev
, MT_MIB_MB_BFTF(band
, (i
<< 2)));
1905 mib
->ack_fail_cnt
+= FIELD_GET(GENMASK(15, 0), val
);
1906 mib
->ack_fail_cnt
+= FIELD_GET(GENMASK(31, 16), val
);
1909 for (i
= 0; i
< 8; i
++) {
1910 val
= mt76_rr(dev
, MT_TX_AGG_CNT(band
, i
));
1911 phy
->mt76
->aggr_stats
[aggr0
++] += FIELD_GET(GENMASK(15, 0), val
);
1912 phy
->mt76
->aggr_stats
[aggr0
++] += FIELD_GET(GENMASK(31, 16), val
);
1915 cnt
= mt76_rr(dev
, MT_MIB_SDR32(band
));
1916 mib
->tx_pkt_ibf_cnt
+= FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT
, cnt
);
1917 mib
->tx_bf_ibf_ppdu_cnt
+= FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT
, cnt
);
1918 mib
->tx_pkt_ebf_cnt
+= FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT
, cnt
);
1919 mib
->tx_bf_ebf_ppdu_cnt
+= FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT
, cnt
);
1921 cnt
= mt76_rr(dev
, MT_MIB_BFCR7(band
));
1922 mib
->tx_bf_fb_cpl_cnt
+= FIELD_GET(MT_MIB_BFCR7_BFEE_TX_FB_CPL
, cnt
);
1924 cnt
= mt76_rr(dev
, MT_MIB_BFCR2(band
));
1925 mib
->tx_bf_fb_trig_cnt
+= FIELD_GET(MT_MIB_BFCR2_BFEE_TX_FB_TRIG
, cnt
);
1927 cnt
= mt76_rr(dev
, MT_MIB_BFCR0(band
));
1928 mib
->tx_bf_rx_fb_vht_cnt
+= FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT
, cnt
);
1929 mib
->tx_bf_rx_fb_all_cnt
+= FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT
, cnt
);
1930 mib
->tx_bf_rx_fb_ht_cnt
+= FIELD_GET(MT_MIB_BFCR0_RX_FB_HT
, cnt
);
1931 mib
->tx_bf_rx_fb_all_cnt
+= FIELD_GET(MT_MIB_BFCR0_RX_FB_HT
, cnt
);
1933 cnt
= mt76_rr(dev
, MT_MIB_BFCR1(band
));
1934 mib
->tx_bf_rx_fb_he_cnt
+= FIELD_GET(MT_MIB_BFCR1_RX_FB_HE
, cnt
);
1935 mib
->tx_bf_rx_fb_all_cnt
+= FIELD_GET(MT_MIB_BFCR1_RX_FB_HE
, cnt
);
1939 static void mt7915_mac_severe_check(struct mt7915_phy
*phy
)
1941 struct mt7915_dev
*dev
= phy
->dev
;
1944 if (!phy
->omac_mask
)
1947 /* In rare cases, TRB pointers might be out of sync leads to RMAC
1948 * stopping Rx, so check status periodically to see if TRB hardware
1949 * requires minimal recovery.
1951 trb
= mt76_rr(dev
, MT_TRB_RXPSR0(phy
->mt76
->band_idx
));
1953 if ((FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR
, trb
) !=
1954 FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR
, trb
)) &&
1955 (FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR
, phy
->trb_ts
) !=
1956 FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR
, phy
->trb_ts
)) &&
1958 mt7915_mcu_set_ser(dev
, SER_RECOVER
, SER_SET_RECOVER_L3_RX_ABORT
,
1959 phy
->mt76
->band_idx
);
1964 void mt7915_mac_sta_rc_work(struct work_struct
*work
)
1966 struct mt7915_dev
*dev
= container_of(work
, struct mt7915_dev
, rc_work
);
1967 struct ieee80211_sta
*sta
;
1968 struct ieee80211_vif
*vif
;
1969 struct mt7915_sta
*msta
;
1973 spin_lock_bh(&dev
->mt76
.sta_poll_lock
);
1974 list_splice_init(&dev
->sta_rc_list
, &list
);
1976 while (!list_empty(&list
)) {
1977 msta
= list_first_entry(&list
, struct mt7915_sta
, rc_list
);
1978 list_del_init(&msta
->rc_list
);
1979 changed
= msta
->changed
;
1981 spin_unlock_bh(&dev
->mt76
.sta_poll_lock
);
1983 sta
= container_of((void *)msta
, struct ieee80211_sta
, drv_priv
);
1984 vif
= container_of((void *)msta
->vif
, struct ieee80211_vif
, drv_priv
);
1986 if (changed
& (IEEE80211_RC_SUPP_RATES_CHANGED
|
1987 IEEE80211_RC_NSS_CHANGED
|
1988 IEEE80211_RC_BW_CHANGED
))
1989 mt7915_mcu_add_rate_ctrl(dev
, vif
, sta
, true);
1991 if (changed
& IEEE80211_RC_SMPS_CHANGED
)
1992 mt7915_mcu_add_smps(dev
, vif
, sta
);
1994 spin_lock_bh(&dev
->mt76
.sta_poll_lock
);
1997 spin_unlock_bh(&dev
->mt76
.sta_poll_lock
);
2000 void mt7915_mac_work(struct work_struct
*work
)
2002 struct mt7915_phy
*phy
;
2003 struct mt76_phy
*mphy
;
2005 mphy
= (struct mt76_phy
*)container_of(work
, struct mt76_phy
,
2009 mutex_lock(&mphy
->dev
->mutex
);
2011 mt76_update_survey(mphy
);
2012 if (++mphy
->mac_work_count
== 5) {
2013 mphy
->mac_work_count
= 0;
2015 mt7915_mac_update_stats(phy
);
2016 mt7915_mac_severe_check(phy
);
2018 if (phy
->dev
->muru_debug
)
2019 mt7915_mcu_muru_debug_get(phy
);
2022 mutex_unlock(&mphy
->dev
->mutex
);
2024 mt76_tx_status_check(mphy
->dev
, false);
2026 ieee80211_queue_delayed_work(mphy
->hw
, &mphy
->mac_work
,
2027 MT7915_WATCHDOG_TIME
);
2030 static void mt7915_dfs_stop_radar_detector(struct mt7915_phy
*phy
)
2032 struct mt7915_dev
*dev
= phy
->dev
;
2034 if (phy
->rdd_state
& BIT(0))
2035 mt76_connac_mcu_rdd_cmd(&dev
->mt76
, RDD_STOP
, 0,
2037 if (phy
->rdd_state
& BIT(1))
2038 mt76_connac_mcu_rdd_cmd(&dev
->mt76
, RDD_STOP
, 1,
2042 static int mt7915_dfs_start_rdd(struct mt7915_dev
*dev
, int chain
)
2046 switch (dev
->mt76
.region
) {
2047 case NL80211_DFS_ETSI
:
2050 case NL80211_DFS_JP
:
2053 case NL80211_DFS_FCC
:
2059 err
= mt76_connac_mcu_rdd_cmd(&dev
->mt76
, RDD_START
, chain
,
2060 MT_RX_SEL0
, region
);
2064 if (is_mt7915(&dev
->mt76
)) {
2065 err
= mt76_connac_mcu_rdd_cmd(&dev
->mt76
, RDD_SET_WF_ANT
, chain
,
2066 0, dev
->dbdc_support
? 2 : 0);
2071 return mt76_connac_mcu_rdd_cmd(&dev
->mt76
, RDD_DET_MODE
, chain
,
2075 static int mt7915_dfs_start_radar_detector(struct mt7915_phy
*phy
)
2077 struct cfg80211_chan_def
*chandef
= &phy
->mt76
->chandef
;
2078 struct mt7915_dev
*dev
= phy
->dev
;
2082 err
= mt76_connac_mcu_rdd_cmd(&dev
->mt76
, RDD_CAC_START
,
2083 phy
->mt76
->band_idx
, MT_RX_SEL0
, 0);
2087 err
= mt7915_dfs_start_rdd(dev
, phy
->mt76
->band_idx
);
2091 phy
->rdd_state
|= BIT(phy
->mt76
->band_idx
);
2093 if (!is_mt7915(&dev
->mt76
))
2096 if (chandef
->width
== NL80211_CHAN_WIDTH_160
||
2097 chandef
->width
== NL80211_CHAN_WIDTH_80P80
) {
2098 err
= mt7915_dfs_start_rdd(dev
, 1);
2102 phy
->rdd_state
|= BIT(1);
2109 mt7915_dfs_init_radar_specs(struct mt7915_phy
*phy
)
2111 const struct mt7915_dfs_radar_spec
*radar_specs
;
2112 struct mt7915_dev
*dev
= phy
->dev
;
2115 switch (dev
->mt76
.region
) {
2116 case NL80211_DFS_FCC
:
2117 radar_specs
= &fcc_radar_specs
;
2118 err
= mt7915_mcu_set_fcc5_lpn(dev
, 8);
2122 case NL80211_DFS_ETSI
:
2123 radar_specs
= &etsi_radar_specs
;
2125 case NL80211_DFS_JP
:
2126 radar_specs
= &jp_radar_specs
;
2132 for (i
= 0; i
< ARRAY_SIZE(radar_specs
->radar_pattern
); i
++) {
2133 err
= mt7915_mcu_set_radar_th(dev
, i
,
2134 &radar_specs
->radar_pattern
[i
]);
2139 return mt7915_mcu_set_pulse_th(dev
, &radar_specs
->pulse_th
);
2142 int mt7915_dfs_init_radar_detector(struct mt7915_phy
*phy
)
2144 struct mt7915_dev
*dev
= phy
->dev
;
2145 enum mt76_dfs_state dfs_state
, prev_state
;
2148 prev_state
= phy
->mt76
->dfs_state
;
2149 dfs_state
= mt76_phy_dfs_state(phy
->mt76
);
2151 if (prev_state
== dfs_state
)
2154 if (prev_state
== MT_DFS_STATE_UNKNOWN
)
2155 mt7915_dfs_stop_radar_detector(phy
);
2157 if (dfs_state
== MT_DFS_STATE_DISABLED
)
2160 if (prev_state
<= MT_DFS_STATE_DISABLED
) {
2161 err
= mt7915_dfs_init_radar_specs(phy
);
2165 err
= mt7915_dfs_start_radar_detector(phy
);
2169 phy
->mt76
->dfs_state
= MT_DFS_STATE_CAC
;
2172 if (dfs_state
== MT_DFS_STATE_CAC
)
2175 err
= mt76_connac_mcu_rdd_cmd(&dev
->mt76
, RDD_CAC_END
,
2176 phy
->mt76
->band_idx
, MT_RX_SEL0
, 0);
2178 phy
->mt76
->dfs_state
= MT_DFS_STATE_UNKNOWN
;
2182 phy
->mt76
->dfs_state
= MT_DFS_STATE_ACTIVE
;
2186 err
= mt76_connac_mcu_rdd_cmd(&dev
->mt76
, RDD_NORMAL_START
,
2187 phy
->mt76
->band_idx
, MT_RX_SEL0
, 0);
2191 if (is_mt7915(&dev
->mt76
)) {
2192 err
= mt76_connac_mcu_rdd_cmd(&dev
->mt76
, RDD_SET_WF_ANT
,
2193 phy
->mt76
->band_idx
, 0,
2194 dev
->dbdc_support
? 2 : 0);
2199 mt7915_dfs_stop_radar_detector(phy
);
2200 phy
->mt76
->dfs_state
= MT_DFS_STATE_DISABLED
;
2206 mt7915_mac_twt_duration_align(int duration
)
2208 return duration
<< 8;
2212 mt7915_mac_twt_sched_list_add(struct mt7915_dev
*dev
,
2213 struct mt7915_twt_flow
*flow
)
2215 struct mt7915_twt_flow
*iter
, *iter_next
;
2216 u32 duration
= flow
->duration
<< 8;
2219 iter
= list_first_entry_or_null(&dev
->twt_list
,
2220 struct mt7915_twt_flow
, list
);
2221 if (!iter
|| !iter
->sched
|| iter
->start_tsf
> duration
) {
2222 /* add flow as first entry in the list */
2223 list_add(&flow
->list
, &dev
->twt_list
);
2227 list_for_each_entry_safe(iter
, iter_next
, &dev
->twt_list
, list
) {
2228 start_tsf
= iter
->start_tsf
+
2229 mt7915_mac_twt_duration_align(iter
->duration
);
2230 if (list_is_last(&iter
->list
, &dev
->twt_list
))
2233 if (!iter_next
->sched
||
2234 iter_next
->start_tsf
> start_tsf
+ duration
) {
2235 list_add(&flow
->list
, &iter
->list
);
2240 /* add flow as last entry in the list */
2241 list_add_tail(&flow
->list
, &dev
->twt_list
);
2246 static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup
*twt
)
2248 struct ieee80211_twt_params
*twt_agrt
;
2249 u64 interval
, duration
;
2253 /* only individual agreement supported */
2254 if (twt
->control
& IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST
)
2257 /* only 256us unit supported */
2258 if (twt
->control
& IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT
)
2261 twt_agrt
= (struct ieee80211_twt_params
*)twt
->params
;
2263 /* explicit agreement not supported */
2264 if (!(twt_agrt
->req_type
& cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT
)))
2267 exp
= FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP
,
2268 le16_to_cpu(twt_agrt
->req_type
));
2269 mantissa
= le16_to_cpu(twt_agrt
->mantissa
);
2270 duration
= twt_agrt
->min_twt_dur
<< 8;
2272 interval
= (u64
)mantissa
<< exp
;
2273 if (interval
< duration
)
2280 mt7915_mac_twt_param_equal(struct mt7915_sta
*msta
,
2281 struct ieee80211_twt_params
*twt_agrt
)
2283 u16 type
= le16_to_cpu(twt_agrt
->req_type
);
2287 exp
= FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP
, type
);
2288 for (i
= 0; i
< MT7915_MAX_STA_TWT_AGRT
; i
++) {
2289 struct mt7915_twt_flow
*f
;
2291 if (!(msta
->twt
.flowid_mask
& BIT(i
)))
2294 f
= &msta
->twt
.flow
[i
];
2295 if (f
->duration
== twt_agrt
->min_twt_dur
&&
2296 f
->mantissa
== twt_agrt
->mantissa
&&
2298 f
->protection
== !!(type
& IEEE80211_TWT_REQTYPE_PROTECTION
) &&
2299 f
->flowtype
== !!(type
& IEEE80211_TWT_REQTYPE_FLOWTYPE
) &&
2300 f
->trigger
== !!(type
& IEEE80211_TWT_REQTYPE_TRIGGER
))
2307 void mt7915_mac_add_twt_setup(struct ieee80211_hw
*hw
,
2308 struct ieee80211_sta
*sta
,
2309 struct ieee80211_twt_setup
*twt
)
2311 enum ieee80211_twt_setup_cmd setup_cmd
= TWT_SETUP_CMD_REJECT
;
2312 struct mt7915_sta
*msta
= (struct mt7915_sta
*)sta
->drv_priv
;
2313 struct ieee80211_twt_params
*twt_agrt
= (void *)twt
->params
;
2314 u16 req_type
= le16_to_cpu(twt_agrt
->req_type
);
2315 enum ieee80211_twt_setup_cmd sta_setup_cmd
;
2316 struct mt7915_dev
*dev
= mt7915_hw_dev(hw
);
2317 struct mt7915_twt_flow
*flow
;
2318 int flowid
, table_id
;
2321 if (mt7915_mac_check_twt_req(twt
))
2324 mutex_lock(&dev
->mt76
.mutex
);
2326 if (dev
->twt
.n_agrt
== MT7915_MAX_TWT_AGRT
)
2329 if (hweight8(msta
->twt
.flowid_mask
) == ARRAY_SIZE(msta
->twt
.flow
))
2332 if (twt_agrt
->min_twt_dur
< MT7915_MIN_TWT_DUR
) {
2333 setup_cmd
= TWT_SETUP_CMD_DICTATE
;
2334 twt_agrt
->min_twt_dur
= MT7915_MIN_TWT_DUR
;
2338 flowid
= ffs(~msta
->twt
.flowid_mask
) - 1;
2339 twt_agrt
->req_type
&= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID
);
2340 twt_agrt
->req_type
|= le16_encode_bits(flowid
,
2341 IEEE80211_TWT_REQTYPE_FLOWID
);
2343 table_id
= ffs(~dev
->twt
.table_mask
) - 1;
2344 exp
= FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP
, req_type
);
2345 sta_setup_cmd
= FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD
, req_type
);
2347 if (mt7915_mac_twt_param_equal(msta
, twt_agrt
))
2350 flow
= &msta
->twt
.flow
[flowid
];
2351 memset(flow
, 0, sizeof(*flow
));
2352 INIT_LIST_HEAD(&flow
->list
);
2353 flow
->wcid
= msta
->wcid
.idx
;
2354 flow
->table_id
= table_id
;
2356 flow
->duration
= twt_agrt
->min_twt_dur
;
2357 flow
->mantissa
= twt_agrt
->mantissa
;
2359 flow
->protection
= !!(req_type
& IEEE80211_TWT_REQTYPE_PROTECTION
);
2360 flow
->flowtype
= !!(req_type
& IEEE80211_TWT_REQTYPE_FLOWTYPE
);
2361 flow
->trigger
= !!(req_type
& IEEE80211_TWT_REQTYPE_TRIGGER
);
2363 if (sta_setup_cmd
== TWT_SETUP_CMD_REQUEST
||
2364 sta_setup_cmd
== TWT_SETUP_CMD_SUGGEST
) {
2365 u64 interval
= (u64
)le16_to_cpu(twt_agrt
->mantissa
) << exp
;
2366 u64 flow_tsf
, curr_tsf
;
2370 flow
->start_tsf
= mt7915_mac_twt_sched_list_add(dev
, flow
);
2371 curr_tsf
= __mt7915_get_tsf(hw
, msta
->vif
);
2372 div_u64_rem(curr_tsf
- flow
->start_tsf
, interval
, &rem
);
2373 flow_tsf
= curr_tsf
+ interval
- rem
;
2374 twt_agrt
->twt
= cpu_to_le64(flow_tsf
);
2376 list_add_tail(&flow
->list
, &dev
->twt_list
);
2378 flow
->tsf
= le64_to_cpu(twt_agrt
->twt
);
2380 if (mt7915_mcu_twt_agrt_update(dev
, msta
->vif
, flow
, MCU_TWT_AGRT_ADD
))
2383 setup_cmd
= TWT_SETUP_CMD_ACCEPT
;
2384 dev
->twt
.table_mask
|= BIT(table_id
);
2385 msta
->twt
.flowid_mask
|= BIT(flowid
);
2389 mutex_unlock(&dev
->mt76
.mutex
);
2391 twt_agrt
->req_type
&= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD
);
2392 twt_agrt
->req_type
|=
2393 le16_encode_bits(setup_cmd
, IEEE80211_TWT_REQTYPE_SETUP_CMD
);
2394 twt
->control
= (twt
->control
& IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT
) |
2395 (twt
->control
& IEEE80211_TWT_CONTROL_RX_DISABLED
);
2398 void mt7915_mac_twt_teardown_flow(struct mt7915_dev
*dev
,
2399 struct mt7915_sta
*msta
,
2402 struct mt7915_twt_flow
*flow
;
2404 lockdep_assert_held(&dev
->mt76
.mutex
);
2406 if (flowid
>= ARRAY_SIZE(msta
->twt
.flow
))
2409 if (!(msta
->twt
.flowid_mask
& BIT(flowid
)))
2412 flow
= &msta
->twt
.flow
[flowid
];
2413 if (mt7915_mcu_twt_agrt_update(dev
, msta
->vif
, flow
,
2414 MCU_TWT_AGRT_DELETE
))
2417 list_del_init(&flow
->list
);
2418 msta
->twt
.flowid_mask
&= ~BIT(flowid
);
2419 dev
->twt
.table_mask
&= ~BIT(flow
->table_id
);