1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2023 MediaTek Inc. */
4 #include <linux/devcoredump.h>
5 #include <linux/etherdevice.h>
6 #include <linux/timekeeping.h>
12 bool mt7925_mac_wtbl_update(struct mt792x_dev
*dev
, int idx
, u32 mask
)
14 mt76_rmw(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_WLAN_IDX
,
15 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX
, idx
) | mask
);
17 return mt76_poll(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_BUSY
,
21 static void mt7925_mac_sta_poll(struct mt792x_dev
*dev
)
23 static const u8 ac_to_tid
[] = {
24 [IEEE80211_AC_BE
] = 0,
25 [IEEE80211_AC_BK
] = 1,
26 [IEEE80211_AC_VI
] = 4,
29 struct ieee80211_sta
*sta
;
30 struct mt792x_sta
*msta
;
31 struct mt792x_link_sta
*mlink
;
32 u32 tx_time
[IEEE80211_NUM_ACS
], rx_time
[IEEE80211_NUM_ACS
];
33 LIST_HEAD(sta_poll_list
);
34 struct rate_info
*rate
;
38 spin_lock_bh(&dev
->mt76
.sta_poll_lock
);
39 list_splice_init(&dev
->mt76
.sta_poll_list
, &sta_poll_list
);
40 spin_unlock_bh(&dev
->mt76
.sta_poll_lock
);
48 if (list_empty(&sta_poll_list
))
50 mlink
= list_first_entry(&sta_poll_list
,
51 struct mt792x_link_sta
, wcid
.poll_list
);
52 msta
= container_of(mlink
, struct mt792x_sta
, deflink
);
53 spin_lock_bh(&dev
->mt76
.sta_poll_lock
);
54 list_del_init(&mlink
->wcid
.poll_list
);
55 spin_unlock_bh(&dev
->mt76
.sta_poll_lock
);
57 idx
= mlink
->wcid
.idx
;
58 addr
= mt7925_mac_wtbl_lmac_addr(dev
, idx
, MT_WTBL_AC0_CTT_OFFSET
);
60 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
61 u32 tx_last
= mlink
->airtime_ac
[i
];
62 u32 rx_last
= mlink
->airtime_ac
[i
+ 4];
64 mlink
->airtime_ac
[i
] = mt76_rr(dev
, addr
);
65 mlink
->airtime_ac
[i
+ 4] = mt76_rr(dev
, addr
+ 4);
67 tx_time
[i
] = mlink
->airtime_ac
[i
] - tx_last
;
68 rx_time
[i
] = mlink
->airtime_ac
[i
+ 4] - rx_last
;
70 if ((tx_last
| rx_last
) & BIT(30))
77 mt7925_mac_wtbl_update(dev
, idx
,
78 MT_WTBL_UPDATE_ADM_COUNT_CLEAR
);
79 memset(mlink
->airtime_ac
, 0, sizeof(mlink
->airtime_ac
));
85 sta
= container_of((void *)msta
, struct ieee80211_sta
,
87 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
88 u8 q
= mt76_connac_lmac_mapping(i
);
89 u32 tx_cur
= tx_time
[q
];
90 u32 rx_cur
= rx_time
[q
];
91 u8 tid
= ac_to_tid
[i
];
93 if (!tx_cur
&& !rx_cur
)
96 ieee80211_sta_register_airtime(sta
, tid
, tx_cur
,
100 /* We don't support reading GI info from txs packets.
101 * For accurate tx status reporting and AQL improvement,
102 * we need to make sure that flags match so polling GI
103 * from per-sta counters directly.
105 rate
= &mlink
->wcid
.rate
;
108 case RATE_INFO_BW_160
:
109 bw
= IEEE80211_STA_RX_BW_160
;
111 case RATE_INFO_BW_80
:
112 bw
= IEEE80211_STA_RX_BW_80
;
114 case RATE_INFO_BW_40
:
115 bw
= IEEE80211_STA_RX_BW_40
;
118 bw
= IEEE80211_STA_RX_BW_20
;
122 addr
= mt7925_mac_wtbl_lmac_addr(dev
, idx
, 6);
123 val
= mt76_rr(dev
, addr
);
124 if (rate
->flags
& RATE_INFO_FLAGS_EHT_MCS
) {
125 addr
= mt7925_mac_wtbl_lmac_addr(dev
, idx
, 5);
126 val
= mt76_rr(dev
, addr
);
127 rate
->eht_gi
= FIELD_GET(GENMASK(25, 24), val
);
128 } else if (rate
->flags
& RATE_INFO_FLAGS_HE_MCS
) {
129 u8 offs
= MT_WTBL_TXRX_RATE_G2_HE
+ 2 * bw
;
131 rate
->he_gi
= (val
& (0x3 << offs
)) >> offs
;
132 } else if (rate
->flags
&
133 (RATE_INFO_FLAGS_VHT_MCS
| RATE_INFO_FLAGS_MCS
)) {
134 if (val
& BIT(MT_WTBL_TXRX_RATE_G2
+ bw
))
135 rate
->flags
|= RATE_INFO_FLAGS_SHORT_GI
;
137 rate
->flags
&= ~RATE_INFO_FLAGS_SHORT_GI
;
140 /* get signal strength of resp frames (CTS/BA/ACK) */
141 addr
= mt7925_mac_wtbl_lmac_addr(dev
, idx
, 34);
142 val
= mt76_rr(dev
, addr
);
144 rssi
[0] = to_rssi(GENMASK(7, 0), val
);
145 rssi
[1] = to_rssi(GENMASK(15, 8), val
);
146 rssi
[2] = to_rssi(GENMASK(23, 16), val
);
147 rssi
[3] = to_rssi(GENMASK(31, 14), val
);
150 mt76_rx_signal(msta
->vif
->phy
->mt76
->antenna_mask
, rssi
);
152 ewma_avg_signal_add(&mlink
->avg_ack_signal
, -mlink
->ack_signal
);
156 void mt7925_mac_set_fixed_rate_table(struct mt792x_dev
*dev
,
157 u8 tbl_idx
, u16 rate_idx
)
159 u32 ctrl
= MT_WTBL_ITCR_WR
| MT_WTBL_ITCR_EXEC
| tbl_idx
;
161 mt76_wr(dev
, MT_WTBL_ITDR0
, rate_idx
);
162 /* use wtbl spe idx */
163 mt76_wr(dev
, MT_WTBL_ITDR1
, MT_WTBL_SPE_IDX_SEL
);
164 mt76_wr(dev
, MT_WTBL_ITCR
, ctrl
);
167 /* The HW does not translate the mac header to 802.3 for mesh point */
168 static int mt7925_reverse_frag0_hdr_trans(struct sk_buff
*skb
, u16 hdr_gap
)
170 struct mt76_rx_status
*status
= (struct mt76_rx_status
*)skb
->cb
;
171 struct ethhdr
*eth_hdr
= (struct ethhdr
*)(skb
->data
+ hdr_gap
);
172 struct mt792x_sta
*msta
= (struct mt792x_sta
*)status
->wcid
;
173 __le32
*rxd
= (__le32
*)skb
->data
;
174 struct ieee80211_sta
*sta
;
175 struct ieee80211_vif
*vif
;
176 struct ieee80211_hdr hdr
;
179 if (le32_get_bits(rxd
[3], MT_RXD3_NORMAL_ADDR_TYPE
) !=
183 if (!(le32_to_cpu(rxd
[1]) & MT_RXD1_NORMAL_GROUP_4
))
186 if (!msta
|| !msta
->vif
)
189 sta
= container_of((void *)msta
, struct ieee80211_sta
, drv_priv
);
190 vif
= container_of((void *)msta
->vif
, struct ieee80211_vif
, drv_priv
);
192 /* store the info from RXD and ethhdr to avoid being overridden */
193 frame_control
= le32_get_bits(rxd
[8], MT_RXD8_FRAME_CONTROL
);
194 hdr
.frame_control
= cpu_to_le16(frame_control
);
195 hdr
.seq_ctrl
= cpu_to_le16(le32_get_bits(rxd
[10], MT_RXD10_SEQ_CTRL
));
198 ether_addr_copy(hdr
.addr1
, vif
->addr
);
199 ether_addr_copy(hdr
.addr2
, sta
->addr
);
200 switch (frame_control
& (IEEE80211_FCTL_TODS
|
201 IEEE80211_FCTL_FROMDS
)) {
203 ether_addr_copy(hdr
.addr3
, vif
->bss_conf
.bssid
);
205 case IEEE80211_FCTL_FROMDS
:
206 ether_addr_copy(hdr
.addr3
, eth_hdr
->h_source
);
208 case IEEE80211_FCTL_TODS
:
209 ether_addr_copy(hdr
.addr3
, eth_hdr
->h_dest
);
211 case IEEE80211_FCTL_TODS
| IEEE80211_FCTL_FROMDS
:
212 ether_addr_copy(hdr
.addr3
, eth_hdr
->h_dest
);
213 ether_addr_copy(hdr
.addr4
, eth_hdr
->h_source
);
219 skb_pull(skb
, hdr_gap
+ sizeof(struct ethhdr
) - 2);
220 if (eth_hdr
->h_proto
== cpu_to_be16(ETH_P_AARP
) ||
221 eth_hdr
->h_proto
== cpu_to_be16(ETH_P_IPX
))
222 ether_addr_copy(skb_push(skb
, ETH_ALEN
), bridge_tunnel_header
);
223 else if (be16_to_cpu(eth_hdr
->h_proto
) >= ETH_P_802_3_MIN
)
224 ether_addr_copy(skb_push(skb
, ETH_ALEN
), rfc1042_header
);
228 if (ieee80211_has_order(hdr
.frame_control
))
229 memcpy(skb_push(skb
, IEEE80211_HT_CTL_LEN
), &rxd
[11],
230 IEEE80211_HT_CTL_LEN
);
231 if (ieee80211_is_data_qos(hdr
.frame_control
)) {
234 qos_ctrl
= cpu_to_le16(le32_get_bits(rxd
[10], MT_RXD10_QOS_CTL
));
235 memcpy(skb_push(skb
, IEEE80211_QOS_CTL_LEN
), &qos_ctrl
,
236 IEEE80211_QOS_CTL_LEN
);
239 if (ieee80211_has_a4(hdr
.frame_control
))
240 memcpy(skb_push(skb
, sizeof(hdr
)), &hdr
, sizeof(hdr
));
242 memcpy(skb_push(skb
, sizeof(hdr
) - 6), &hdr
, sizeof(hdr
) - 6);
248 mt7925_mac_fill_rx_rate(struct mt792x_dev
*dev
,
249 struct mt76_rx_status
*status
,
250 struct ieee80211_supported_band
*sband
,
251 __le32
*rxv
, u8
*mode
)
254 u8 stbc
, gi
, bw
, dcm
, nss
;
258 v0
= le32_to_cpu(rxv
[0]);
259 v2
= le32_to_cpu(rxv
[2]);
261 idx
= FIELD_GET(MT_PRXV_TX_RATE
, v0
);
263 nss
= FIELD_GET(MT_PRXV_NSTS
, v0
) + 1;
265 stbc
= FIELD_GET(MT_PRXV_HT_STBC
, v2
);
266 gi
= FIELD_GET(MT_PRXV_HT_SHORT_GI
, v2
);
267 *mode
= FIELD_GET(MT_PRXV_TX_MODE
, v2
);
268 dcm
= FIELD_GET(MT_PRXV_DCM
, v2
);
269 bw
= FIELD_GET(MT_PRXV_FRAME_MODE
, v2
);
272 case MT_PHY_TYPE_CCK
:
275 case MT_PHY_TYPE_OFDM
:
276 i
= mt76_get_rate(&dev
->mt76
, sband
, i
, cck
);
278 case MT_PHY_TYPE_HT_GF
:
280 status
->encoding
= RX_ENC_HT
;
282 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
286 case MT_PHY_TYPE_VHT
:
288 status
->encoding
= RX_ENC_VHT
;
290 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
294 case MT_PHY_TYPE_HE_MU
:
295 case MT_PHY_TYPE_HE_SU
:
296 case MT_PHY_TYPE_HE_EXT_SU
:
297 case MT_PHY_TYPE_HE_TB
:
299 status
->encoding
= RX_ENC_HE
;
302 if (gi
<= NL80211_RATE_INFO_HE_GI_3_2
)
305 status
->he_dcm
= dcm
;
307 case MT_PHY_TYPE_EHT_SU
:
308 case MT_PHY_TYPE_EHT_TRIG
:
309 case MT_PHY_TYPE_EHT_MU
:
311 status
->encoding
= RX_ENC_EHT
;
314 if (gi
<= NL80211_RATE_INFO_EHT_GI_3_2
)
320 status
->rate_idx
= i
;
323 case IEEE80211_STA_RX_BW_20
:
325 case IEEE80211_STA_RX_BW_40
:
326 if (*mode
& MT_PHY_TYPE_HE_EXT_SU
&&
327 (idx
& MT_PRXV_TX_ER_SU_106T
)) {
328 status
->bw
= RATE_INFO_BW_HE_RU
;
330 NL80211_RATE_INFO_HE_RU_ALLOC_106
;
332 status
->bw
= RATE_INFO_BW_40
;
335 case IEEE80211_STA_RX_BW_80
:
336 status
->bw
= RATE_INFO_BW_80
;
338 case IEEE80211_STA_RX_BW_160
:
339 status
->bw
= RATE_INFO_BW_160
;
345 status
->enc_flags
|= RX_ENC_FLAG_STBC_MASK
* stbc
;
346 if (*mode
< MT_PHY_TYPE_HE_SU
&& gi
)
347 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
353 mt7925_mac_fill_rx(struct mt792x_dev
*dev
, struct sk_buff
*skb
)
355 u32 csum_mask
= MT_RXD3_NORMAL_IP_SUM
| MT_RXD3_NORMAL_UDP_TCP_SUM
;
356 struct mt76_rx_status
*status
= (struct mt76_rx_status
*)skb
->cb
;
357 bool hdr_trans
, unicast
, insert_ccmp_hdr
= false;
358 u8 chfreq
, qos_ctl
= 0, remove_pad
, amsdu_info
;
360 __le32
*rxv
= NULL
, *rxd
= (__le32
*)skb
->data
;
361 struct mt76_phy
*mphy
= &dev
->mt76
.phy
;
362 struct mt792x_phy
*phy
= &dev
->phy
;
363 struct ieee80211_supported_band
*sband
;
364 u32 csum_status
= *(u32
*)skb
->cb
;
365 u32 rxd1
= le32_to_cpu(rxd
[1]);
366 u32 rxd2
= le32_to_cpu(rxd
[2]);
367 u32 rxd3
= le32_to_cpu(rxd
[3]);
368 u32 rxd4
= le32_to_cpu(rxd
[4]);
369 struct mt792x_link_sta
*mlink
;
370 u8 mode
= 0; /* , band_idx; */
375 memset(status
, 0, sizeof(*status
));
377 if (!test_bit(MT76_STATE_RUNNING
, &mphy
->state
))
380 if (rxd2
& MT_RXD2_NORMAL_AMSDU_ERR
)
383 hdr_trans
= rxd2
& MT_RXD2_NORMAL_HDR_TRANS
;
384 if (hdr_trans
&& (rxd1
& MT_RXD1_NORMAL_CM
))
387 /* ICV error or CCMP/BIP/WPI MIC error */
388 if (rxd1
& MT_RXD1_NORMAL_ICV_ERR
)
389 status
->flag
|= RX_FLAG_ONLY_MONITOR
;
391 chfreq
= FIELD_GET(MT_RXD3_NORMAL_CH_FREQ
, rxd3
);
392 unicast
= FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE
, rxd3
) == MT_RXD3_NORMAL_U2M
;
393 idx
= FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX
, rxd1
);
394 status
->wcid
= mt792x_rx_get_wcid(dev
, idx
, unicast
);
397 mlink
= container_of(status
->wcid
, struct mt792x_link_sta
, wcid
);
398 spin_lock_bh(&dev
->mt76
.sta_poll_lock
);
399 if (list_empty(&mlink
->wcid
.poll_list
))
400 list_add_tail(&mlink
->wcid
.poll_list
,
401 &dev
->mt76
.sta_poll_list
);
402 spin_unlock_bh(&dev
->mt76
.sta_poll_lock
);
405 mt792x_get_status_freq_info(status
, chfreq
);
407 switch (status
->band
) {
408 case NL80211_BAND_5GHZ
:
409 sband
= &mphy
->sband_5g
.sband
;
411 case NL80211_BAND_6GHZ
:
412 sband
= &mphy
->sband_6g
.sband
;
415 sband
= &mphy
->sband_2g
.sband
;
419 if (!sband
->channels
)
422 if (mt76_is_mmio(&dev
->mt76
) && (rxd3
& csum_mask
) == csum_mask
&&
423 !(csum_status
& (BIT(0) | BIT(2) | BIT(3))))
424 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
426 if (rxd3
& MT_RXD3_NORMAL_FCS_ERR
)
427 status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
429 if (rxd1
& MT_RXD1_NORMAL_TKIP_MIC_ERR
)
430 status
->flag
|= RX_FLAG_MMIC_ERROR
;
432 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE
, rxd2
) != 0 &&
433 !(rxd1
& (MT_RXD1_NORMAL_CLM
| MT_RXD1_NORMAL_CM
))) {
434 status
->flag
|= RX_FLAG_DECRYPTED
;
435 status
->flag
|= RX_FLAG_IV_STRIPPED
;
436 status
->flag
|= RX_FLAG_MMIC_STRIPPED
| RX_FLAG_MIC_STRIPPED
;
439 remove_pad
= FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET
, rxd2
);
441 if (rxd2
& MT_RXD2_NORMAL_MAX_LEN_ERROR
)
445 if (rxd1
& MT_RXD1_NORMAL_GROUP_4
) {
446 u32 v0
= le32_to_cpu(rxd
[0]);
447 u32 v2
= le32_to_cpu(rxd
[2]);
449 /* TODO: need to map rxd address */
450 fc
= cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL
, v0
));
451 seq_ctrl
= FIELD_GET(MT_RXD10_SEQ_CTRL
, v2
);
452 qos_ctl
= FIELD_GET(MT_RXD10_QOS_CTL
, v2
);
455 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
459 if (rxd1
& MT_RXD1_NORMAL_GROUP_1
) {
460 u8
*data
= (u8
*)rxd
;
462 if (status
->flag
& RX_FLAG_DECRYPTED
) {
463 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE
, rxd2
)) {
464 case MT_CIPHER_AES_CCMP
:
465 case MT_CIPHER_CCMP_CCX
:
466 case MT_CIPHER_CCMP_256
:
468 FIELD_GET(MT_RXD2_NORMAL_FRAG
, rxd2
);
471 case MT_CIPHER_TKIP_NO_MIC
:
473 case MT_CIPHER_GCMP_256
:
474 status
->iv
[0] = data
[5];
475 status
->iv
[1] = data
[4];
476 status
->iv
[2] = data
[3];
477 status
->iv
[3] = data
[2];
478 status
->iv
[4] = data
[1];
479 status
->iv
[5] = data
[0];
486 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
490 if (rxd1
& MT_RXD1_NORMAL_GROUP_2
) {
491 status
->timestamp
= le32_to_cpu(rxd
[0]);
492 status
->flag
|= RX_FLAG_MACTIME_START
;
494 if (!(rxd2
& MT_RXD2_NORMAL_NON_AMPDU
)) {
495 status
->flag
|= RX_FLAG_AMPDU_DETAILS
;
497 /* all subframes of an A-MPDU have the same timestamp */
498 if (phy
->rx_ampdu_ts
!= status
->timestamp
) {
499 if (!++phy
->ampdu_ref
)
502 phy
->rx_ampdu_ts
= status
->timestamp
;
504 status
->ampdu_ref
= phy
->ampdu_ref
;
508 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
512 /* RXD Group 3 - P-RXV */
513 if (rxd1
& MT_RXD1_NORMAL_GROUP_3
) {
519 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
522 v3
= le32_to_cpu(rxv
[3]);
524 status
->chains
= mphy
->antenna_mask
;
525 status
->chain_signal
[0] = to_rssi(MT_PRXV_RCPI0
, v3
);
526 status
->chain_signal
[1] = to_rssi(MT_PRXV_RCPI1
, v3
);
527 status
->chain_signal
[2] = to_rssi(MT_PRXV_RCPI2
, v3
);
528 status
->chain_signal
[3] = to_rssi(MT_PRXV_RCPI3
, v3
);
530 /* RXD Group 5 - C-RXV */
531 if (rxd1
& MT_RXD1_NORMAL_GROUP_5
) {
533 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
537 ret
= mt7925_mac_fill_rx_rate(dev
, status
, sband
, rxv
, &mode
);
542 amsdu_info
= FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT
, rxd4
);
543 status
->amsdu
= !!amsdu_info
;
545 status
->first_amsdu
= amsdu_info
== MT_RXD4_FIRST_AMSDU_FRAME
;
546 status
->last_amsdu
= amsdu_info
== MT_RXD4_LAST_AMSDU_FRAME
;
549 hdr_gap
= (u8
*)rxd
- skb
->data
+ 2 * remove_pad
;
550 if (hdr_trans
&& ieee80211_has_morefrags(fc
)) {
551 if (mt7925_reverse_frag0_hdr_trans(skb
, hdr_gap
))
557 skb_pull(skb
, hdr_gap
);
558 if (!hdr_trans
&& status
->amsdu
) {
559 pad_start
= ieee80211_get_hdrlen_from_skb(skb
);
560 } else if (hdr_trans
&& (rxd2
& MT_RXD2_NORMAL_HDR_TRANS_ERROR
)) {
561 /* When header translation failure is indicated,
562 * the hardware will insert an extra 2-byte field
563 * containing the data length after the protocol
567 if (get_unaligned_be16(skb
->data
+ pad_start
) == ETH_P_8021Q
)
574 memmove(skb
->data
+ 2, skb
->data
, pad_start
);
580 struct ieee80211_hdr
*hdr
;
582 if (insert_ccmp_hdr
) {
583 u8 key_id
= FIELD_GET(MT_RXD1_NORMAL_KEY_ID
, rxd1
);
585 mt76_insert_ccmp_hdr(skb
, key_id
);
588 hdr
= mt76_skb_get_hdr(skb
);
589 fc
= hdr
->frame_control
;
590 if (ieee80211_is_data_qos(fc
)) {
591 seq_ctrl
= le16_to_cpu(hdr
->seq_ctrl
);
592 qos_ctl
= *ieee80211_get_qos_ctl(hdr
);
594 skb_set_mac_header(skb
, (unsigned char *)hdr
- skb
->data
);
596 status
->flag
|= RX_FLAG_8023
;
599 mt792x_mac_assoc_rssi(dev
, skb
);
601 if (rxv
&& !(status
->flag
& RX_FLAG_8023
)) {
602 switch (status
->encoding
) {
604 mt76_connac3_mac_decode_eht_radiotap(skb
, rxv
, mode
);
607 mt76_connac3_mac_decode_he_radiotap(skb
, rxv
, mode
);
614 if (!status
->wcid
|| !ieee80211_is_data_qos(fc
))
617 status
->aggr
= unicast
&& !ieee80211_is_qos_nullfunc(fc
);
618 status
->seqno
= IEEE80211_SEQ_TO_SN(seq_ctrl
);
619 status
->qos_ctl
= qos_ctl
;
625 mt7925_mac_write_txwi_8023(__le32
*txwi
, struct sk_buff
*skb
,
626 struct mt76_wcid
*wcid
)
628 u8 tid
= skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
629 u8 fc_type
, fc_stype
;
635 struct ieee80211_sta
*sta
;
637 sta
= container_of((void *)wcid
, struct ieee80211_sta
, drv_priv
);
641 val
= FIELD_PREP(MT_TXD1_HDR_FORMAT
, MT_HDR_FORMAT_802_3
) |
642 FIELD_PREP(MT_TXD1_TID
, tid
);
644 ethertype
= get_unaligned_be16(&skb
->data
[12]);
645 if (ethertype
>= ETH_P_802_3_MIN
)
646 val
|= MT_TXD1_ETH_802_3
;
648 txwi
[1] |= cpu_to_le32(val
);
650 fc_type
= IEEE80211_FTYPE_DATA
>> 2;
651 fc_stype
= wmm
? IEEE80211_STYPE_QOS_DATA
>> 4 : 0;
653 val
= FIELD_PREP(MT_TXD2_FRAME_TYPE
, fc_type
) |
654 FIELD_PREP(MT_TXD2_SUB_TYPE
, fc_stype
);
656 txwi
[2] |= cpu_to_le32(val
);
660 mt7925_mac_write_txwi_80211(struct mt76_dev
*dev
, __le32
*txwi
,
662 struct ieee80211_key_conf
*key
)
664 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
665 struct ieee80211_mgmt
*mgmt
= (struct ieee80211_mgmt
*)skb
->data
;
666 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
667 bool multicast
= is_multicast_ether_addr(hdr
->addr1
);
668 u8 tid
= skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
669 __le16 fc
= hdr
->frame_control
;
670 u8 fc_type
, fc_stype
;
673 if (ieee80211_is_action(fc
) &&
674 mgmt
->u
.action
.category
== WLAN_CATEGORY_BACK
&&
675 mgmt
->u
.action
.u
.addba_req
.action_code
== WLAN_ACTION_ADDBA_REQ
)
677 else if (ieee80211_is_mgmt(hdr
->frame_control
))
680 val
= FIELD_PREP(MT_TXD1_HDR_FORMAT
, MT_HDR_FORMAT_802_11
) |
681 FIELD_PREP(MT_TXD1_HDR_INFO
,
682 ieee80211_get_hdrlen_from_skb(skb
) / 2) |
683 FIELD_PREP(MT_TXD1_TID
, tid
);
685 if (!ieee80211_is_data(fc
) || multicast
||
686 info
->flags
& IEEE80211_TX_CTL_USE_MINRATE
)
687 val
|= MT_TXD1_FIXED_RATE
;
689 if (key
&& multicast
&& ieee80211_is_robust_mgmt_frame(skb
) &&
690 key
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
) {
692 txwi
[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME
);
695 txwi
[1] |= cpu_to_le32(val
);
697 fc_type
= (le16_to_cpu(fc
) & IEEE80211_FCTL_FTYPE
) >> 2;
698 fc_stype
= (le16_to_cpu(fc
) & IEEE80211_FCTL_STYPE
) >> 4;
700 val
= FIELD_PREP(MT_TXD2_FRAME_TYPE
, fc_type
) |
701 FIELD_PREP(MT_TXD2_SUB_TYPE
, fc_stype
);
703 txwi
[2] |= cpu_to_le32(val
);
705 txwi
[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM
, multicast
));
706 if (ieee80211_is_beacon(fc
))
707 txwi
[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT
);
709 if (info
->flags
& IEEE80211_TX_CTL_INJECTED
) {
710 u16 seqno
= le16_to_cpu(hdr
->seq_ctrl
);
712 if (ieee80211_is_back_req(hdr
->frame_control
)) {
713 struct ieee80211_bar
*bar
;
715 bar
= (struct ieee80211_bar
*)skb
->data
;
716 seqno
= le16_to_cpu(bar
->start_seq_num
);
719 val
= MT_TXD3_SN_VALID
|
720 FIELD_PREP(MT_TXD3_SEQ
, IEEE80211_SEQ_TO_SN(seqno
));
721 txwi
[3] |= cpu_to_le32(val
);
722 txwi
[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU
);
727 mt7925_mac_write_txwi(struct mt76_dev
*dev
, __le32
*txwi
,
728 struct sk_buff
*skb
, struct mt76_wcid
*wcid
,
729 struct ieee80211_key_conf
*key
, int pid
,
730 enum mt76_txq_id qid
, u32 changed
)
732 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
733 struct ieee80211_vif
*vif
= info
->control
.vif
;
734 u8 p_fmt
, q_idx
, omac_idx
= 0, wmm_idx
= 0, band_idx
= 0;
735 u32 val
, sz_txd
= mt76_is_mmio(dev
) ? MT_TXD_SIZE
: MT_SDIO_TXD_SIZE
;
736 bool is_8023
= info
->flags
& IEEE80211_TX_CTL_HW_80211_ENCAP
;
737 struct mt76_vif
*mvif
;
738 bool beacon
= !!(changed
& (BSS_CHANGED_BEACON
|
739 BSS_CHANGED_BEACON_ENABLED
));
740 bool inband_disc
= !!(changed
& (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP
|
741 BSS_CHANGED_FILS_DISCOVERY
));
742 struct mt792x_bss_conf
*mconf
;
744 mconf
= vif
? mt792x_vif_to_link((struct mt792x_vif
*)vif
->drv_priv
,
745 wcid
->link_id
) : NULL
;
746 mvif
= mconf
? (struct mt76_vif
*)&mconf
->mt76
: NULL
;
749 omac_idx
= mvif
->omac_idx
;
750 wmm_idx
= mvif
->wmm_idx
;
751 band_idx
= mvif
->band_idx
;
755 p_fmt
= MT_TX_TYPE_FW
;
756 q_idx
= MT_LMAC_ALTX0
;
758 p_fmt
= MT_TX_TYPE_FW
;
759 q_idx
= MT_LMAC_BCN0
;
760 } else if (qid
>= MT_TXQ_PSD
) {
761 p_fmt
= mt76_is_mmio(dev
) ? MT_TX_TYPE_CT
: MT_TX_TYPE_SF
;
762 q_idx
= MT_LMAC_ALTX0
;
764 p_fmt
= mt76_is_mmio(dev
) ? MT_TX_TYPE_CT
: MT_TX_TYPE_SF
;
765 q_idx
= wmm_idx
* MT76_CONNAC_MAX_WMM_SETS
+
766 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb
));
768 /* counting non-offloading skbs */
769 wcid
->stats
.tx_bytes
+= skb
->len
;
770 wcid
->stats
.tx_packets
++;
773 val
= FIELD_PREP(MT_TXD0_TX_BYTES
, skb
->len
+ sz_txd
) |
774 FIELD_PREP(MT_TXD0_PKT_FMT
, p_fmt
) |
775 FIELD_PREP(MT_TXD0_Q_IDX
, q_idx
);
776 txwi
[0] = cpu_to_le32(val
);
778 val
= FIELD_PREP(MT_TXD1_WLAN_IDX
, wcid
->idx
) |
779 FIELD_PREP(MT_TXD1_OWN_MAC
, omac_idx
);
782 val
|= FIELD_PREP(MT_TXD1_TGID
, band_idx
);
784 txwi
[1] = cpu_to_le32(val
);
787 val
= FIELD_PREP(MT_TXD3_REM_TX_COUNT
, 15);
790 val
|= MT_TXD3_PROTECT_FRAME
;
791 if (info
->flags
& IEEE80211_TX_CTL_NO_ACK
)
792 val
|= MT_TXD3_NO_ACK
;
794 val
|= MT_TXD3_HW_AMSDU
;
796 txwi
[3] = cpu_to_le32(val
);
799 val
= FIELD_PREP(MT_TXD5_PID
, pid
);
800 if (pid
>= MT_PACKET_ID_FIRST
) {
801 val
|= MT_TXD5_TX_STATUS_HOST
;
802 txwi
[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE
);
803 txwi
[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU
);
806 txwi
[5] = cpu_to_le32(val
);
808 val
= MT_TXD6_DAS
| FIELD_PREP(MT_TXD6_MSDU_CNT
, 1);
809 if (!ieee80211_vif_is_mld(vif
) ||
810 (q_idx
>= MT_LMAC_ALTX0
&& q_idx
<= MT_LMAC_BCN0
))
811 val
|= MT_TXD6_DIS_MAT
;
812 txwi
[6] = cpu_to_le32(val
);
816 mt7925_mac_write_txwi_8023(txwi
, skb
, wcid
);
818 mt7925_mac_write_txwi_80211(dev
, txwi
, skb
, key
);
820 if (txwi
[1] & cpu_to_le32(MT_TXD1_FIXED_RATE
)) {
821 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
822 bool mcast
= ieee80211_is_data(hdr
->frame_control
) &&
823 is_multicast_ether_addr(hdr
->addr1
);
824 u8 idx
= MT792x_BASIC_RATES_TBL
;
827 if (mcast
&& mvif
->mcast_rates_idx
)
828 idx
= mvif
->mcast_rates_idx
;
829 else if (beacon
&& mvif
->beacon_rates_idx
)
830 idx
= mvif
->beacon_rates_idx
;
832 idx
= mvif
->basic_rates_idx
;
835 txwi
[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE
, idx
));
836 txwi
[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE
);
839 EXPORT_SYMBOL_GPL(mt7925_mac_write_txwi
);
841 static void mt7925_tx_check_aggr(struct ieee80211_sta
*sta
, struct sk_buff
*skb
,
842 struct mt76_wcid
*wcid
)
844 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
845 struct ieee80211_link_sta
*link_sta
;
846 struct mt792x_link_sta
*mlink
;
847 struct mt792x_sta
*msta
;
851 link_sta
= rcu_dereference(sta
->link
[wcid
->link_id
]);
855 if (!sta
|| !(link_sta
->ht_cap
.ht_supported
|| link_sta
->he_cap
.has_he
))
858 tid
= skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
859 is_8023
= info
->flags
& IEEE80211_TX_CTL_HW_80211_ENCAP
;
862 fc
= IEEE80211_FTYPE_DATA
|
863 (sta
->wme
? IEEE80211_STYPE_QOS_DATA
:
864 IEEE80211_STYPE_DATA
);
866 /* No need to get precise TID for Action/Management Frame,
867 * since it will not meet the following Frame Control
871 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
873 fc
= le16_to_cpu(hdr
->frame_control
) &
874 (IEEE80211_FCTL_FTYPE
| IEEE80211_FCTL_STYPE
);
877 if (unlikely(fc
!= (IEEE80211_FTYPE_DATA
| IEEE80211_STYPE_QOS_DATA
)))
880 msta
= (struct mt792x_sta
*)sta
->drv_priv
;
882 if (sta
->mlo
&& msta
->deflink_id
!= IEEE80211_LINK_UNSPECIFIED
)
883 mlink
= rcu_dereference(msta
->link
[msta
->deflink_id
]);
885 mlink
= &msta
->deflink
;
887 if (!test_and_set_bit(tid
, &mlink
->wcid
.ampdu_state
))
888 ieee80211_start_tx_ba_session(sta
, tid
, 0);
892 mt7925_mac_add_txs_skb(struct mt792x_dev
*dev
, struct mt76_wcid
*wcid
,
893 int pid
, __le32
*txs_data
)
895 struct mt76_sta_stats
*stats
= &wcid
->stats
;
896 struct ieee80211_supported_band
*sband
;
897 struct mt76_dev
*mdev
= &dev
->mt76
;
898 struct mt76_phy
*mphy
;
899 struct ieee80211_tx_info
*info
;
900 struct sk_buff_head list
;
901 struct rate_info rate
= {};
904 u32 txrate
, txs
, mode
, stbc
;
906 mt76_tx_status_lock(mdev
, &list
);
907 skb
= mt76_tx_status_skb_get(mdev
, wcid
, pid
, &list
);
911 txs
= le32_to_cpu(txs_data
[0]);
913 info
= IEEE80211_SKB_CB(skb
);
914 if (!(txs
& MT_TXS0_ACK_ERROR_MASK
))
915 info
->flags
|= IEEE80211_TX_STAT_ACK
;
917 info
->status
.ampdu_len
= 1;
918 info
->status
.ampdu_ack_len
= !!(info
->flags
&
919 IEEE80211_TX_STAT_ACK
);
921 info
->status
.rates
[0].idx
= -1;
923 txrate
= FIELD_GET(MT_TXS0_TX_RATE
, txs
);
925 rate
.mcs
= FIELD_GET(MT_TX_RATE_IDX
, txrate
);
926 rate
.nss
= FIELD_GET(MT_TX_RATE_NSS
, txrate
) + 1;
927 stbc
= le32_get_bits(txs_data
[3], MT_TXS3_RATE_STBC
);
929 if (stbc
&& rate
.nss
> 1)
932 if (rate
.nss
- 1 < ARRAY_SIZE(stats
->tx_nss
))
933 stats
->tx_nss
[rate
.nss
- 1]++;
934 if (rate
.mcs
< ARRAY_SIZE(stats
->tx_mcs
))
935 stats
->tx_mcs
[rate
.mcs
]++;
937 mode
= FIELD_GET(MT_TX_RATE_MODE
, txrate
);
939 case MT_PHY_TYPE_CCK
:
942 case MT_PHY_TYPE_OFDM
:
943 mphy
= mt76_dev_phy(mdev
, wcid
->phy_idx
);
945 if (mphy
->chandef
.chan
->band
== NL80211_BAND_5GHZ
)
946 sband
= &mphy
->sband_5g
.sband
;
947 else if (mphy
->chandef
.chan
->band
== NL80211_BAND_6GHZ
)
948 sband
= &mphy
->sband_6g
.sband
;
950 sband
= &mphy
->sband_2g
.sband
;
952 rate
.mcs
= mt76_get_rate(mphy
->dev
, sband
, rate
.mcs
, cck
);
953 rate
.legacy
= sband
->bitrates
[rate
.mcs
].bitrate
;
956 case MT_PHY_TYPE_HT_GF
:
960 rate
.flags
= RATE_INFO_FLAGS_MCS
;
961 if (wcid
->rate
.flags
& RATE_INFO_FLAGS_SHORT_GI
)
962 rate
.flags
|= RATE_INFO_FLAGS_SHORT_GI
;
964 case MT_PHY_TYPE_VHT
:
968 rate
.flags
= RATE_INFO_FLAGS_VHT_MCS
;
970 case MT_PHY_TYPE_HE_SU
:
971 case MT_PHY_TYPE_HE_EXT_SU
:
972 case MT_PHY_TYPE_HE_TB
:
973 case MT_PHY_TYPE_HE_MU
:
977 rate
.he_gi
= wcid
->rate
.he_gi
;
978 rate
.he_dcm
= FIELD_GET(MT_TX_RATE_DCM
, txrate
);
979 rate
.flags
= RATE_INFO_FLAGS_HE_MCS
;
981 case MT_PHY_TYPE_EHT_SU
:
982 case MT_PHY_TYPE_EHT_TRIG
:
983 case MT_PHY_TYPE_EHT_MU
:
987 rate
.eht_gi
= wcid
->rate
.eht_gi
;
988 rate
.flags
= RATE_INFO_FLAGS_EHT_MCS
;
994 stats
->tx_mode
[mode
]++;
996 switch (FIELD_GET(MT_TXS0_BW
, txs
)) {
997 case IEEE80211_STA_RX_BW_160
:
998 rate
.bw
= RATE_INFO_BW_160
;
1001 case IEEE80211_STA_RX_BW_80
:
1002 rate
.bw
= RATE_INFO_BW_80
;
1005 case IEEE80211_STA_RX_BW_40
:
1006 rate
.bw
= RATE_INFO_BW_40
;
1010 rate
.bw
= RATE_INFO_BW_20
;
1017 mt76_tx_status_skb_done(mdev
, skb
, &list
);
1020 mt76_tx_status_unlock(mdev
, &list
);
1025 void mt7925_mac_add_txs(struct mt792x_dev
*dev
, void *data
)
1027 struct mt792x_link_sta
*mlink
= NULL
;
1028 struct mt76_wcid
*wcid
;
1029 __le32
*txs_data
= data
;
1033 if (le32_get_bits(txs_data
[0], MT_TXS0_TXS_FORMAT
) > 1)
1036 wcidx
= le32_get_bits(txs_data
[2], MT_TXS2_WCID
);
1037 pid
= le32_get_bits(txs_data
[3], MT_TXS3_PID
);
1039 if (pid
< MT_PACKET_ID_FIRST
)
1042 if (wcidx
>= MT792x_WTBL_SIZE
)
1047 wcid
= rcu_dereference(dev
->mt76
.wcid
[wcidx
]);
1051 mlink
= container_of(wcid
, struct mt792x_link_sta
, wcid
);
1053 mt7925_mac_add_txs_skb(dev
, wcid
, pid
, txs_data
);
1057 spin_lock_bh(&dev
->mt76
.sta_poll_lock
);
1058 if (list_empty(&mlink
->wcid
.poll_list
))
1059 list_add_tail(&mlink
->wcid
.poll_list
, &dev
->mt76
.sta_poll_list
);
1060 spin_unlock_bh(&dev
->mt76
.sta_poll_lock
);
1066 void mt7925_txwi_free(struct mt792x_dev
*dev
, struct mt76_txwi_cache
*t
,
1067 struct ieee80211_sta
*sta
, struct mt76_wcid
*wcid
,
1068 struct list_head
*free_list
)
1070 struct mt76_dev
*mdev
= &dev
->mt76
;
1074 mt76_connac_txp_skb_unmap(mdev
, t
);
1078 txwi
= (__le32
*)mt76_get_txwi_ptr(mdev
, t
);
1080 if (likely(t
->skb
->protocol
!= cpu_to_be16(ETH_P_PAE
)))
1081 mt7925_tx_check_aggr(sta
, t
->skb
, wcid
);
1083 wcid_idx
= wcid
->idx
;
1085 wcid_idx
= le32_get_bits(txwi
[1], MT_TXD1_WLAN_IDX
);
1088 __mt76_tx_complete_skb(mdev
, wcid_idx
, t
->skb
, free_list
);
1091 mt76_put_txwi(mdev
, t
);
1093 EXPORT_SYMBOL_GPL(mt7925_txwi_free
);
1096 mt7925_mac_tx_free(struct mt792x_dev
*dev
, void *data
, int len
)
1098 __le32
*tx_free
= (__le32
*)data
, *cur_info
;
1099 struct mt76_dev
*mdev
= &dev
->mt76
;
1100 struct mt76_txwi_cache
*txwi
;
1101 struct ieee80211_sta
*sta
= NULL
;
1102 struct mt76_wcid
*wcid
= NULL
;
1103 LIST_HEAD(free_list
);
1104 struct sk_buff
*skb
, *tmp
;
1105 void *end
= data
+ len
;
1107 u16 total
, count
= 0;
1109 /* clean DMA queues and unmap buffers first */
1110 mt76_queue_tx_cleanup(dev
, dev
->mphy
.q_tx
[MT_TXQ_PSD
], false);
1111 mt76_queue_tx_cleanup(dev
, dev
->mphy
.q_tx
[MT_TXQ_BE
], false);
1113 if (WARN_ON_ONCE(le32_get_bits(tx_free
[1], MT_TXFREE1_VER
) < 4))
1116 total
= le32_get_bits(tx_free
[0], MT_TXFREE0_MSDU_CNT
);
1117 for (cur_info
= &tx_free
[2]; count
< total
; cur_info
++) {
1121 if (WARN_ON_ONCE((void *)cur_info
>= end
))
1123 /* 1'b1: new wcid pair.
1124 * 1'b0: msdu_id with the same 'wcid pair' as above.
1126 info
= le32_to_cpu(*cur_info
);
1127 if (info
& MT_TXFREE_INFO_PAIR
) {
1128 struct mt792x_link_sta
*mlink
;
1131 idx
= FIELD_GET(MT_TXFREE_INFO_WLAN_ID
, info
);
1132 wcid
= rcu_dereference(dev
->mt76
.wcid
[idx
]);
1133 sta
= wcid_to_sta(wcid
);
1137 mlink
= container_of(wcid
, struct mt792x_link_sta
, wcid
);
1138 spin_lock_bh(&mdev
->sta_poll_lock
);
1139 if (list_empty(&mlink
->wcid
.poll_list
))
1140 list_add_tail(&mlink
->wcid
.poll_list
,
1141 &mdev
->sta_poll_list
);
1142 spin_unlock_bh(&mdev
->sta_poll_lock
);
1146 if (info
& MT_TXFREE_INFO_HEADER
) {
1148 wcid
->stats
.tx_retries
+=
1149 FIELD_GET(MT_TXFREE_INFO_COUNT
, info
) - 1;
1150 wcid
->stats
.tx_failed
+=
1151 !!FIELD_GET(MT_TXFREE_INFO_STAT
, info
);
1156 for (i
= 0; i
< 2; i
++) {
1157 msdu
= (info
>> (15 * i
)) & MT_TXFREE_INFO_MSDU_ID
;
1158 if (msdu
== MT_TXFREE_INFO_MSDU_ID
)
1162 txwi
= mt76_token_release(mdev
, msdu
, &wake
);
1166 mt7925_txwi_free(dev
, txwi
, sta
, wcid
, &free_list
);
1170 mt7925_mac_sta_poll(dev
);
1173 mt76_set_tx_blocked(&dev
->mt76
, false);
1175 mt76_worker_schedule(&dev
->mt76
.tx_worker
);
1177 list_for_each_entry_safe(skb
, tmp
, &free_list
, list
) {
1178 skb_list_del_init(skb
);
1179 napi_consume_skb(skb
, 1);
1183 bool mt7925_rx_check(struct mt76_dev
*mdev
, void *data
, int len
)
1185 struct mt792x_dev
*dev
= container_of(mdev
, struct mt792x_dev
, mt76
);
1186 __le32
*rxd
= (__le32
*)data
;
1187 __le32
*end
= (__le32
*)&rxd
[len
/ 4];
1188 enum rx_pkt_type type
;
1190 type
= le32_get_bits(rxd
[0], MT_RXD0_PKT_TYPE
);
1191 if (type
!= PKT_TYPE_NORMAL
) {
1192 u32 sw_type
= le32_get_bits(rxd
[0], MT_RXD0_SW_PKT_TYPE_MASK
);
1194 if (unlikely((sw_type
& MT_RXD0_SW_PKT_TYPE_MAP
) ==
1195 MT_RXD0_SW_PKT_TYPE_FRAME
))
1200 case PKT_TYPE_TXRX_NOTIFY
:
1201 /* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
1202 mt7925_mac_tx_free(dev
, data
, len
); /* mmio */
1205 for (rxd
+= 4; rxd
+ 12 <= end
; rxd
+= 12)
1206 mt7925_mac_add_txs(dev
, rxd
);
1212 EXPORT_SYMBOL_GPL(mt7925_rx_check
);
1214 void mt7925_queue_rx_skb(struct mt76_dev
*mdev
, enum mt76_rxq_id q
,
1215 struct sk_buff
*skb
, u32
*info
)
1217 struct mt792x_dev
*dev
= container_of(mdev
, struct mt792x_dev
, mt76
);
1218 __le32
*rxd
= (__le32
*)skb
->data
;
1219 __le32
*end
= (__le32
*)&skb
->data
[skb
->len
];
1220 enum rx_pkt_type type
;
1223 type
= le32_get_bits(rxd
[0], MT_RXD0_PKT_TYPE
);
1224 flag
= le32_get_bits(rxd
[0], MT_RXD0_PKT_FLAG
);
1225 if (type
!= PKT_TYPE_NORMAL
) {
1226 u32 sw_type
= le32_get_bits(rxd
[0], MT_RXD0_SW_PKT_TYPE_MASK
);
1228 if (unlikely((sw_type
& MT_RXD0_SW_PKT_TYPE_MAP
) ==
1229 MT_RXD0_SW_PKT_TYPE_FRAME
))
1230 type
= PKT_TYPE_NORMAL
;
1233 if (type
== PKT_TYPE_RX_EVENT
&& flag
== 0x1)
1234 type
= PKT_TYPE_NORMAL_MCU
;
1237 case PKT_TYPE_TXRX_NOTIFY
:
1238 /* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
1239 mt7925_mac_tx_free(dev
, skb
->data
, skb
->len
);
1240 napi_consume_skb(skb
, 1);
1242 case PKT_TYPE_RX_EVENT
:
1243 mt7925_mcu_rx_event(dev
, skb
);
1246 for (rxd
+= 2; rxd
+ 8 <= end
; rxd
+= 8)
1247 mt7925_mac_add_txs(dev
, rxd
);
1250 case PKT_TYPE_NORMAL_MCU
:
1251 case PKT_TYPE_NORMAL
:
1252 if (!mt7925_mac_fill_rx(dev
, skb
)) {
1253 mt76_rx(&dev
->mt76
, q
, skb
);
1262 EXPORT_SYMBOL_GPL(mt7925_queue_rx_skb
);
1265 mt7925_vif_connect_iter(void *priv
, u8
*mac
,
1266 struct ieee80211_vif
*vif
)
1268 struct mt792x_vif
*mvif
= (struct mt792x_vif
*)vif
->drv_priv
;
1269 unsigned long valid
= ieee80211_vif_is_mld(vif
) ?
1270 mvif
->valid_links
: BIT(0);
1271 struct mt792x_dev
*dev
= mvif
->phy
->dev
;
1272 struct ieee80211_hw
*hw
= mt76_hw(dev
);
1273 struct ieee80211_bss_conf
*bss_conf
;
1276 if (vif
->type
== NL80211_IFTYPE_STATION
)
1277 ieee80211_disconnect(vif
, true);
1279 for_each_set_bit(i
, &valid
, IEEE80211_MLD_MAX_NUM_LINKS
) {
1280 bss_conf
= mt792x_vif_to_bss_conf(vif
, i
);
1282 mt76_connac_mcu_uni_add_dev(&dev
->mphy
, bss_conf
,
1283 &mvif
->sta
.deflink
.wcid
, true);
1284 mt7925_mcu_set_tx(dev
, bss_conf
);
1287 if (vif
->type
== NL80211_IFTYPE_AP
) {
1288 mt76_connac_mcu_uni_add_bss(dev
->phy
.mt76
, vif
, &mvif
->sta
.deflink
.wcid
,
1290 mt7925_mcu_sta_update(dev
, NULL
, vif
, true,
1291 MT76_STA_INFO_STATE_NONE
);
1292 mt7925_mcu_uni_add_beacon_offload(dev
, hw
, vif
, true);
1296 /* system error recovery */
1297 void mt7925_mac_reset_work(struct work_struct
*work
)
1299 struct mt792x_dev
*dev
= container_of(work
, struct mt792x_dev
,
1301 struct ieee80211_hw
*hw
= mt76_hw(dev
);
1302 struct mt76_connac_pm
*pm
= &dev
->pm
;
1305 dev_dbg(dev
->mt76
.dev
, "chip reset\n");
1306 dev
->hw_full_reset
= true;
1307 ieee80211_stop_queues(hw
);
1309 cancel_delayed_work_sync(&dev
->mphy
.mac_work
);
1310 cancel_delayed_work_sync(&pm
->ps_work
);
1311 cancel_work_sync(&pm
->wake_work
);
1313 for (i
= 0; i
< 10; i
++) {
1314 mutex_lock(&dev
->mt76
.mutex
);
1315 ret
= mt792x_dev_reset(dev
);
1316 mutex_unlock(&dev
->mt76
.mutex
);
1323 dev_err(dev
->mt76
.dev
, "chip reset failed\n");
1325 if (test_and_clear_bit(MT76_HW_SCANNING
, &dev
->mphy
.state
)) {
1326 struct cfg80211_scan_info info
= {
1330 ieee80211_scan_completed(dev
->mphy
.hw
, &info
);
1333 dev
->hw_full_reset
= false;
1334 pm
->suspended
= false;
1335 ieee80211_wake_queues(hw
);
1336 ieee80211_iterate_active_interfaces(hw
,
1337 IEEE80211_IFACE_ITER_RESUME_ALL
,
1338 mt7925_vif_connect_iter
, NULL
);
1339 mt76_connac_power_save_sched(&dev
->mt76
.phy
, pm
);
1342 void mt7925_coredump_work(struct work_struct
*work
)
1344 struct mt792x_dev
*dev
;
1347 dev
= (struct mt792x_dev
*)container_of(work
, struct mt792x_dev
,
1348 coredump
.work
.work
);
1350 if (time_is_after_jiffies(dev
->coredump
.last_activity
+
1351 4 * MT76_CONNAC_COREDUMP_TIMEOUT
)) {
1352 queue_delayed_work(dev
->mt76
.wq
, &dev
->coredump
.work
,
1353 MT76_CONNAC_COREDUMP_TIMEOUT
);
1357 dump
= vzalloc(MT76_CONNAC_COREDUMP_SZ
);
1361 struct sk_buff
*skb
;
1363 spin_lock_bh(&dev
->mt76
.lock
);
1364 skb
= __skb_dequeue(&dev
->coredump
.msg_list
);
1365 spin_unlock_bh(&dev
->mt76
.lock
);
1370 skb_pull(skb
, sizeof(struct mt7925_mcu_rxd
) + 8);
1371 if (!dump
|| data
+ skb
->len
- dump
> MT76_CONNAC_COREDUMP_SZ
) {
1376 memcpy(data
, skb
->data
, skb
->len
);
1383 dev_coredumpv(dev
->mt76
.dev
, dump
, MT76_CONNAC_COREDUMP_SZ
,
1386 mt792x_reset(&dev
->mt76
);
1391 mt7925_usb_sdio_write_txwi(struct mt792x_dev
*dev
, struct mt76_wcid
*wcid
,
1392 enum mt76_txq_id qid
, struct ieee80211_sta
*sta
,
1393 struct ieee80211_key_conf
*key
, int pid
,
1394 struct sk_buff
*skb
)
1396 __le32
*txwi
= (__le32
*)(skb
->data
- MT_SDIO_TXD_SIZE
);
1398 memset(txwi
, 0, MT_SDIO_TXD_SIZE
);
1399 mt7925_mac_write_txwi(&dev
->mt76
, txwi
, skb
, wcid
, key
, pid
, qid
, 0);
1400 skb_push(skb
, MT_SDIO_TXD_SIZE
);
1403 int mt7925_usb_sdio_tx_prepare_skb(struct mt76_dev
*mdev
, void *txwi_ptr
,
1404 enum mt76_txq_id qid
, struct mt76_wcid
*wcid
,
1405 struct ieee80211_sta
*sta
,
1406 struct mt76_tx_info
*tx_info
)
1408 struct mt792x_dev
*dev
= container_of(mdev
, struct mt792x_dev
, mt76
);
1409 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(tx_info
->skb
);
1410 struct ieee80211_key_conf
*key
= info
->control
.hw_key
;
1411 struct sk_buff
*skb
= tx_info
->skb
;
1412 int err
, pad
, pktid
;
1414 if (unlikely(tx_info
->skb
->len
<= ETH_HLEN
))
1418 wcid
= &dev
->mt76
.global_wcid
;
1421 struct mt792x_sta
*msta
= (struct mt792x_sta
*)sta
->drv_priv
;
1423 if (time_after(jiffies
, msta
->deflink
.last_txs
+ HZ
/ 4)) {
1424 info
->flags
|= IEEE80211_TX_CTL_REQ_TX_STATUS
;
1425 msta
->deflink
.last_txs
= jiffies
;
1429 pktid
= mt76_tx_status_skb_add(&dev
->mt76
, wcid
, skb
);
1430 mt7925_usb_sdio_write_txwi(dev
, wcid
, qid
, sta
, key
, pktid
, skb
);
1432 mt792x_skb_add_usb_sdio_hdr(dev
, skb
, 0);
1433 pad
= round_up(skb
->len
, 4) - skb
->len
;
1434 if (mt76_is_usb(mdev
))
1437 err
= mt76_skb_adjust_pad(skb
, pad
);
1439 /* Release pktid in case of error. */
1440 idr_remove(&wcid
->pktid
, pktid
);
1444 EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_prepare_skb
);
1446 void mt7925_usb_sdio_tx_complete_skb(struct mt76_dev
*mdev
,
1447 struct mt76_queue_entry
*e
)
1449 __le32
*txwi
= (__le32
*)(e
->skb
->data
+ MT_SDIO_HDR_SIZE
);
1450 unsigned int headroom
= MT_SDIO_TXD_SIZE
+ MT_SDIO_HDR_SIZE
;
1451 struct ieee80211_sta
*sta
;
1452 struct mt76_wcid
*wcid
;
1455 idx
= le32_get_bits(txwi
[1], MT_TXD1_WLAN_IDX
);
1456 wcid
= rcu_dereference(mdev
->wcid
[idx
]);
1457 sta
= wcid_to_sta(wcid
);
1459 if (sta
&& likely(e
->skb
->protocol
!= cpu_to_be16(ETH_P_PAE
)))
1460 mt76_connac2_tx_check_aggr(sta
, txwi
);
1462 skb_pull(e
->skb
, headroom
);
1463 mt76_tx_complete_skb(mdev
, e
->wcid
, e
->skb
);
1465 EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_complete_skb
);
1467 bool mt7925_usb_sdio_tx_status_data(struct mt76_dev
*mdev
, u8
*update
)
1469 struct mt792x_dev
*dev
= container_of(mdev
, struct mt792x_dev
, mt76
);
1471 mt792x_mutex_acquire(dev
);
1472 mt7925_mac_sta_poll(dev
);
1473 mt792x_mutex_release(dev
);
1477 EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_status_data
);
1479 #if IS_ENABLED(CONFIG_IPV6)
1480 void mt7925_set_ipv6_ns_work(struct work_struct
*work
)
1482 struct mt792x_dev
*dev
= container_of(work
, struct mt792x_dev
,
1484 struct sk_buff
*skb
;
1488 skb
= skb_dequeue(&dev
->ipv6_ns_list
);
1493 mt792x_mutex_acquire(dev
);
1494 ret
= mt76_mcu_skb_send_msg(&dev
->mt76
, skb
,
1495 MCU_UNI_CMD(OFFLOAD
), true);
1496 mt792x_mutex_release(dev
);
1501 skb_queue_purge(&dev
->ipv6_ns_list
);