1 // SPDX-License-Identifier: ISC
3 #include <linux/etherdevice.h>
4 #include <linux/timekeeping.h>
8 #define MT_PSE_PAGE_SIZE 128
11 mt7603_ac_queue_mask0(u32 mask
)
15 ret
|= GENMASK(3, 0) * !!(mask
& BIT(0));
16 ret
|= GENMASK(8, 5) * !!(mask
& BIT(1));
17 ret
|= GENMASK(13, 10) * !!(mask
& BIT(2));
18 ret
|= GENMASK(19, 16) * !!(mask
& BIT(3));
23 mt76_stop_tx_ac(struct mt7603_dev
*dev
, u32 mask
)
25 mt76_set(dev
, MT_WF_ARB_TX_STOP_0
, mt7603_ac_queue_mask0(mask
));
29 mt76_start_tx_ac(struct mt7603_dev
*dev
, u32 mask
)
31 mt76_set(dev
, MT_WF_ARB_TX_START_0
, mt7603_ac_queue_mask0(mask
));
34 void mt7603_mac_reset_counters(struct mt7603_dev
*dev
)
38 for (i
= 0; i
< 2; i
++)
39 mt76_rr(dev
, MT_TX_AGG_CNT(i
));
41 memset(dev
->mt76
.aggr_stats
, 0, sizeof(dev
->mt76
.aggr_stats
));
44 void mt7603_mac_set_timing(struct mt7603_dev
*dev
)
46 u32 cck
= FIELD_PREP(MT_TIMEOUT_VAL_PLCP
, 231) |
47 FIELD_PREP(MT_TIMEOUT_VAL_CCA
, 48);
48 u32 ofdm
= FIELD_PREP(MT_TIMEOUT_VAL_PLCP
, 60) |
49 FIELD_PREP(MT_TIMEOUT_VAL_CCA
, 24);
50 int offset
= 3 * dev
->coverage_class
;
51 u32 reg_offset
= FIELD_PREP(MT_TIMEOUT_VAL_PLCP
, offset
) |
52 FIELD_PREP(MT_TIMEOUT_VAL_CCA
, offset
);
56 if (dev
->mt76
.chandef
.chan
->band
== NL80211_BAND_5GHZ
)
61 mt76_set(dev
, MT_ARB_SCR
,
62 MT_ARB_SCR_TX_DISABLE
| MT_ARB_SCR_RX_DISABLE
);
65 mt76_wr(dev
, MT_TIMEOUT_CCK
, cck
+ reg_offset
);
66 mt76_wr(dev
, MT_TIMEOUT_OFDM
, ofdm
+ reg_offset
);
68 FIELD_PREP(MT_IFS_EIFS
, 360) |
69 FIELD_PREP(MT_IFS_RIFS
, 2) |
70 FIELD_PREP(MT_IFS_SIFS
, sifs
) |
71 FIELD_PREP(MT_IFS_SLOT
, dev
->slottime
));
73 if (dev
->slottime
< 20)
74 val
= MT7603_CFEND_RATE_DEFAULT
;
76 val
= MT7603_CFEND_RATE_11B
;
78 mt76_rmw_field(dev
, MT_AGG_CONTROL
, MT_AGG_CONTROL_CFEND_RATE
, val
);
80 mt76_clear(dev
, MT_ARB_SCR
,
81 MT_ARB_SCR_TX_DISABLE
| MT_ARB_SCR_RX_DISABLE
);
85 mt7603_wtbl_update(struct mt7603_dev
*dev
, int idx
, u32 mask
)
87 mt76_rmw(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_WLAN_IDX
,
88 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX
, idx
) | mask
);
90 mt76_poll(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_BUSY
, 0, 5000);
94 mt7603_wtbl1_addr(int idx
)
96 return MT_WTBL1_BASE
+ idx
* MT_WTBL1_SIZE
;
100 mt7603_wtbl2_addr(int idx
)
102 /* Mapped to WTBL2 */
103 return MT_PCIE_REMAP_BASE_1
+ idx
* MT_WTBL2_SIZE
;
107 mt7603_wtbl3_addr(int idx
)
109 u32 base
= mt7603_wtbl2_addr(MT7603_WTBL_SIZE
);
111 return base
+ idx
* MT_WTBL3_SIZE
;
115 mt7603_wtbl4_addr(int idx
)
117 u32 base
= mt7603_wtbl3_addr(MT7603_WTBL_SIZE
);
119 return base
+ idx
* MT_WTBL4_SIZE
;
122 void mt7603_wtbl_init(struct mt7603_dev
*dev
, int idx
, int vif
,
125 const void *_mac
= mac_addr
;
126 u32 addr
= mt7603_wtbl1_addr(idx
);
131 w0
= FIELD_PREP(MT_WTBL1_W0_ADDR_HI
,
132 get_unaligned_le16(_mac
+ 4));
133 w1
= FIELD_PREP(MT_WTBL1_W1_ADDR_LO
,
134 get_unaligned_le32(_mac
));
140 w0
|= MT_WTBL1_W0_RX_CHECK_A1
;
141 w0
|= FIELD_PREP(MT_WTBL1_W0_MUAR_IDX
, vif
);
143 mt76_poll(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_BUSY
, 0, 5000);
145 mt76_set(dev
, addr
+ 0 * 4, w0
);
146 mt76_set(dev
, addr
+ 1 * 4, w1
);
147 mt76_set(dev
, addr
+ 2 * 4, MT_WTBL1_W2_ADMISSION_CONTROL
);
149 mt76_stop_tx_ac(dev
, GENMASK(3, 0));
150 addr
= mt7603_wtbl2_addr(idx
);
151 for (i
= 0; i
< MT_WTBL2_SIZE
; i
+= 4)
152 mt76_wr(dev
, addr
+ i
, 0);
153 mt7603_wtbl_update(dev
, idx
, MT_WTBL_UPDATE_WTBL2
);
154 mt76_start_tx_ac(dev
, GENMASK(3, 0));
156 addr
= mt7603_wtbl3_addr(idx
);
157 for (i
= 0; i
< MT_WTBL3_SIZE
; i
+= 4)
158 mt76_wr(dev
, addr
+ i
, 0);
160 addr
= mt7603_wtbl4_addr(idx
);
161 for (i
= 0; i
< MT_WTBL4_SIZE
; i
+= 4)
162 mt76_wr(dev
, addr
+ i
, 0);
164 mt7603_wtbl_update(dev
, idx
, MT_WTBL_UPDATE_ADM_COUNT_CLEAR
);
168 mt7603_wtbl_set_skip_tx(struct mt7603_dev
*dev
, int idx
, bool enabled
)
170 u32 addr
= mt7603_wtbl1_addr(idx
);
171 u32 val
= mt76_rr(dev
, addr
+ 3 * 4);
173 val
&= ~MT_WTBL1_W3_SKIP_TX
;
174 val
|= enabled
* MT_WTBL1_W3_SKIP_TX
;
176 mt76_wr(dev
, addr
+ 3 * 4, val
);
179 void mt7603_filter_tx(struct mt7603_dev
*dev
, int idx
, bool abort
)
185 queue
= 8; /* free queue */
188 queue
= 1; /* MCU queue */
191 mt7603_wtbl_set_skip_tx(dev
, idx
, true);
193 mt76_wr(dev
, MT_TX_ABORT
, MT_TX_ABORT_EN
|
194 FIELD_PREP(MT_TX_ABORT_WCID
, idx
));
196 for (i
= 0; i
< 4; i
++) {
197 mt76_wr(dev
, MT_DMA_FQCR0
, MT_DMA_FQCR0_BUSY
|
198 FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID
, idx
) |
199 FIELD_PREP(MT_DMA_FQCR0_TARGET_QID
, i
) |
200 FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID
, port
) |
201 FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID
, queue
));
203 WARN_ON_ONCE(!mt76_poll(dev
, MT_DMA_FQCR0
, MT_DMA_FQCR0_BUSY
,
207 mt76_wr(dev
, MT_TX_ABORT
, 0);
209 mt7603_wtbl_set_skip_tx(dev
, idx
, false);
212 void mt7603_wtbl_set_smps(struct mt7603_dev
*dev
, struct mt7603_sta
*sta
,
215 u32 addr
= mt7603_wtbl1_addr(sta
->wcid
.idx
);
217 if (sta
->smps
== enabled
)
220 mt76_rmw_field(dev
, addr
+ 2 * 4, MT_WTBL1_W2_SMPS
, enabled
);
224 void mt7603_wtbl_set_ps(struct mt7603_dev
*dev
, struct mt7603_sta
*sta
,
227 int idx
= sta
->wcid
.idx
;
230 spin_lock_bh(&dev
->ps_lock
);
232 if (sta
->ps
== enabled
)
235 mt76_wr(dev
, MT_PSE_RTA
,
236 FIELD_PREP(MT_PSE_RTA_TAG_ID
, idx
) |
237 FIELD_PREP(MT_PSE_RTA_PORT_ID
, 0) |
238 FIELD_PREP(MT_PSE_RTA_QUEUE_ID
, 1) |
239 FIELD_PREP(MT_PSE_RTA_REDIRECT_EN
, enabled
) |
240 MT_PSE_RTA_WRITE
| MT_PSE_RTA_BUSY
);
242 mt76_poll(dev
, MT_PSE_RTA
, MT_PSE_RTA_BUSY
, 0, 5000);
245 mt7603_filter_tx(dev
, idx
, false);
247 addr
= mt7603_wtbl1_addr(idx
);
248 mt76_set(dev
, MT_WTBL1_OR
, MT_WTBL1_OR_PSM_WRITE
);
249 mt76_rmw(dev
, addr
+ 3 * 4, MT_WTBL1_W3_POWER_SAVE
,
250 enabled
* MT_WTBL1_W3_POWER_SAVE
);
251 mt76_clear(dev
, MT_WTBL1_OR
, MT_WTBL1_OR_PSM_WRITE
);
255 spin_unlock_bh(&dev
->ps_lock
);
258 void mt7603_wtbl_clear(struct mt7603_dev
*dev
, int idx
)
260 int wtbl2_frame_size
= MT_PSE_PAGE_SIZE
/ MT_WTBL2_SIZE
;
261 int wtbl2_frame
= idx
/ wtbl2_frame_size
;
262 int wtbl2_entry
= idx
% wtbl2_frame_size
;
264 int wtbl3_base_frame
= MT_WTBL3_OFFSET
/ MT_PSE_PAGE_SIZE
;
265 int wtbl3_frame_size
= MT_PSE_PAGE_SIZE
/ MT_WTBL3_SIZE
;
266 int wtbl3_frame
= wtbl3_base_frame
+ idx
/ wtbl3_frame_size
;
267 int wtbl3_entry
= (idx
% wtbl3_frame_size
) * 2;
269 int wtbl4_base_frame
= MT_WTBL4_OFFSET
/ MT_PSE_PAGE_SIZE
;
270 int wtbl4_frame_size
= MT_PSE_PAGE_SIZE
/ MT_WTBL4_SIZE
;
271 int wtbl4_frame
= wtbl4_base_frame
+ idx
/ wtbl4_frame_size
;
272 int wtbl4_entry
= idx
% wtbl4_frame_size
;
274 u32 addr
= MT_WTBL1_BASE
+ idx
* MT_WTBL1_SIZE
;
277 mt76_poll(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_BUSY
, 0, 5000);
279 mt76_wr(dev
, addr
+ 0 * 4,
280 MT_WTBL1_W0_RX_CHECK_A1
|
281 MT_WTBL1_W0_RX_CHECK_A2
|
282 MT_WTBL1_W0_RX_VALID
);
283 mt76_wr(dev
, addr
+ 1 * 4, 0);
284 mt76_wr(dev
, addr
+ 2 * 4, 0);
286 mt76_set(dev
, MT_WTBL1_OR
, MT_WTBL1_OR_PSM_WRITE
);
288 mt76_wr(dev
, addr
+ 3 * 4,
289 FIELD_PREP(MT_WTBL1_W3_WTBL2_FRAME_ID
, wtbl2_frame
) |
290 FIELD_PREP(MT_WTBL1_W3_WTBL2_ENTRY_ID
, wtbl2_entry
) |
291 FIELD_PREP(MT_WTBL1_W3_WTBL4_FRAME_ID
, wtbl4_frame
) |
292 MT_WTBL1_W3_I_PSM
| MT_WTBL1_W3_KEEP_I_PSM
);
293 mt76_wr(dev
, addr
+ 4 * 4,
294 FIELD_PREP(MT_WTBL1_W4_WTBL3_FRAME_ID
, wtbl3_frame
) |
295 FIELD_PREP(MT_WTBL1_W4_WTBL3_ENTRY_ID
, wtbl3_entry
) |
296 FIELD_PREP(MT_WTBL1_W4_WTBL4_ENTRY_ID
, wtbl4_entry
));
298 mt76_clear(dev
, MT_WTBL1_OR
, MT_WTBL1_OR_PSM_WRITE
);
300 addr
= mt7603_wtbl2_addr(idx
);
302 /* Clear BA information */
303 mt76_wr(dev
, addr
+ (15 * 4), 0);
305 mt76_stop_tx_ac(dev
, GENMASK(3, 0));
306 for (i
= 2; i
<= 4; i
++)
307 mt76_wr(dev
, addr
+ (i
* 4), 0);
308 mt7603_wtbl_update(dev
, idx
, MT_WTBL_UPDATE_WTBL2
);
309 mt76_start_tx_ac(dev
, GENMASK(3, 0));
311 mt7603_wtbl_update(dev
, idx
, MT_WTBL_UPDATE_RX_COUNT_CLEAR
);
312 mt7603_wtbl_update(dev
, idx
, MT_WTBL_UPDATE_TX_COUNT_CLEAR
);
313 mt7603_wtbl_update(dev
, idx
, MT_WTBL_UPDATE_ADM_COUNT_CLEAR
);
316 void mt7603_wtbl_update_cap(struct mt7603_dev
*dev
, struct ieee80211_sta
*sta
)
318 struct mt7603_sta
*msta
= (struct mt7603_sta
*)sta
->drv_priv
;
319 int idx
= msta
->wcid
.idx
;
323 addr
= mt7603_wtbl1_addr(idx
);
325 val
= mt76_rr(dev
, addr
+ 2 * 4);
326 val
&= MT_WTBL1_W2_KEY_TYPE
| MT_WTBL1_W2_ADMISSION_CONTROL
;
327 val
|= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR
, sta
->ht_cap
.ampdu_factor
) |
328 FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY
, sta
->ht_cap
.ampdu_density
) |
329 MT_WTBL1_W2_TXS_BAF_REPORT
;
332 val
|= MT_WTBL1_W2_HT
;
333 if (sta
->vht_cap
.cap
)
334 val
|= MT_WTBL1_W2_VHT
;
336 mt76_wr(dev
, addr
+ 2 * 4, val
);
338 addr
= mt7603_wtbl2_addr(idx
);
339 val
= mt76_rr(dev
, addr
+ 9 * 4);
340 val
&= ~(MT_WTBL2_W9_SHORT_GI_20
| MT_WTBL2_W9_SHORT_GI_40
|
341 MT_WTBL2_W9_SHORT_GI_80
);
342 if (sta
->ht_cap
.cap
& IEEE80211_HT_CAP_SGI_20
)
343 val
|= MT_WTBL2_W9_SHORT_GI_20
;
344 if (sta
->ht_cap
.cap
& IEEE80211_HT_CAP_SGI_40
)
345 val
|= MT_WTBL2_W9_SHORT_GI_40
;
346 mt76_wr(dev
, addr
+ 9 * 4, val
);
349 void mt7603_mac_rx_ba_reset(struct mt7603_dev
*dev
, void *addr
, u8 tid
)
351 mt76_wr(dev
, MT_BA_CONTROL_0
, get_unaligned_le32(addr
));
352 mt76_wr(dev
, MT_BA_CONTROL_1
,
353 (get_unaligned_le16(addr
+ 4) |
354 FIELD_PREP(MT_BA_CONTROL_1_TID
, tid
) |
355 MT_BA_CONTROL_1_RESET
));
358 void mt7603_mac_tx_ba_reset(struct mt7603_dev
*dev
, int wcid
, int tid
,
361 u32 addr
= mt7603_wtbl2_addr(wcid
);
362 u32 tid_mask
= FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS
, BIT(tid
)) |
363 (MT_WTBL2_W15_BA_WIN_SIZE
<<
364 (tid
* MT_WTBL2_W15_BA_WIN_SIZE_SHIFT
));
370 mt76_clear(dev
, addr
+ (15 * 4), tid_mask
);
374 for (i
= 7; i
> 0; i
--) {
375 if (ba_size
>= MT_AGG_SIZE_LIMIT(i
))
379 tid_val
= FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS
, BIT(tid
)) |
380 i
<< (tid
* MT_WTBL2_W15_BA_WIN_SIZE_SHIFT
);
382 mt76_rmw(dev
, addr
+ (15 * 4), tid_mask
, tid_val
);
385 void mt7603_mac_sta_poll(struct mt7603_dev
*dev
)
387 static const u8 ac_to_tid
[4] = {
388 [IEEE80211_AC_BE
] = 0,
389 [IEEE80211_AC_BK
] = 1,
390 [IEEE80211_AC_VI
] = 4,
391 [IEEE80211_AC_VO
] = 6
393 struct ieee80211_sta
*sta
;
394 struct mt7603_sta
*msta
;
395 u32 total_airtime
= 0;
405 spin_lock_bh(&dev
->sta_poll_lock
);
406 if (list_empty(&dev
->sta_poll_list
)) {
407 spin_unlock_bh(&dev
->sta_poll_lock
);
411 msta
= list_first_entry(&dev
->sta_poll_list
, struct mt7603_sta
,
413 list_del_init(&msta
->poll_list
);
414 spin_unlock_bh(&dev
->sta_poll_lock
);
416 addr
= mt7603_wtbl4_addr(msta
->wcid
.idx
);
417 for (i
= 0; i
< 4; i
++) {
418 u32 airtime_last
= msta
->tx_airtime_ac
[i
];
420 msta
->tx_airtime_ac
[i
] = mt76_rr(dev
, addr
+ i
* 8);
421 airtime
[i
] = msta
->tx_airtime_ac
[i
] - airtime_last
;
423 total_airtime
+= airtime
[i
];
425 if (msta
->tx_airtime_ac
[i
] & BIT(22))
430 mt7603_wtbl_update(dev
, msta
->wcid
.idx
,
431 MT_WTBL_UPDATE_ADM_COUNT_CLEAR
);
432 memset(msta
->tx_airtime_ac
, 0,
433 sizeof(msta
->tx_airtime_ac
));
439 sta
= container_of((void *)msta
, struct ieee80211_sta
, drv_priv
);
440 for (i
= 0; i
< 4; i
++) {
441 struct mt76_queue
*q
= dev
->mt76
.q_tx
[i
].q
;
443 u8 tid
= ac_to_tid
[i
];
444 u32 txtime
= airtime
[qidx
];
449 ieee80211_sta_register_airtime(sta
, tid
, txtime
, 0);
458 spin_lock_bh(&dev
->mt76
.cc_lock
);
459 dev
->mt76
.chan_state
->cc_tx
+= total_airtime
;
460 spin_unlock_bh(&dev
->mt76
.cc_lock
);
463 static struct mt76_wcid
*
464 mt7603_rx_get_wcid(struct mt7603_dev
*dev
, u8 idx
, bool unicast
)
466 struct mt7603_sta
*sta
;
467 struct mt76_wcid
*wcid
;
469 if (idx
>= ARRAY_SIZE(dev
->mt76
.wcid
))
472 wcid
= rcu_dereference(dev
->mt76
.wcid
[idx
]);
473 if (unicast
|| !wcid
)
479 sta
= container_of(wcid
, struct mt7603_sta
, wcid
);
483 return &sta
->vif
->sta
.wcid
;
487 mt7603_mac_fill_rx(struct mt7603_dev
*dev
, struct sk_buff
*skb
)
489 struct mt76_rx_status
*status
= (struct mt76_rx_status
*)skb
->cb
;
490 struct ieee80211_supported_band
*sband
;
491 struct ieee80211_hdr
*hdr
;
492 __le32
*rxd
= (__le32
*)skb
->data
;
493 u32 rxd0
= le32_to_cpu(rxd
[0]);
494 u32 rxd1
= le32_to_cpu(rxd
[1]);
495 u32 rxd2
= le32_to_cpu(rxd
[2]);
496 bool unicast
= rxd1
& MT_RXD1_NORMAL_U2M
;
497 bool insert_ccmp_hdr
= false;
502 memset(status
, 0, sizeof(*status
));
504 i
= FIELD_GET(MT_RXD1_NORMAL_CH_FREQ
, rxd1
);
505 sband
= (i
& 1) ? &dev
->mt76
.sband_5g
.sband
: &dev
->mt76
.sband_2g
.sband
;
508 idx
= FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX
, rxd2
);
509 status
->wcid
= mt7603_rx_get_wcid(dev
, idx
, unicast
);
511 status
->band
= sband
->band
;
512 if (i
< sband
->n_channels
)
513 status
->freq
= sband
->channels
[i
].center_freq
;
515 if (rxd2
& MT_RXD2_NORMAL_FCS_ERR
)
516 status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
518 if (rxd2
& MT_RXD2_NORMAL_TKIP_MIC_ERR
)
519 status
->flag
|= RX_FLAG_MMIC_ERROR
;
521 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE
, rxd2
) != 0 &&
522 !(rxd2
& (MT_RXD2_NORMAL_CLM
| MT_RXD2_NORMAL_CM
))) {
523 status
->flag
|= RX_FLAG_DECRYPTED
;
524 status
->flag
|= RX_FLAG_IV_STRIPPED
;
525 status
->flag
|= RX_FLAG_MMIC_STRIPPED
| RX_FLAG_MIC_STRIPPED
;
528 if (!(rxd2
& (MT_RXD2_NORMAL_NON_AMPDU_SUB
|
529 MT_RXD2_NORMAL_NON_AMPDU
))) {
530 status
->flag
|= RX_FLAG_AMPDU_DETAILS
;
532 /* all subframes of an A-MPDU have the same timestamp */
533 if (dev
->rx_ampdu_ts
!= rxd
[12]) {
534 if (!++dev
->mt76
.ampdu_ref
)
535 dev
->mt76
.ampdu_ref
++;
537 dev
->rx_ampdu_ts
= rxd
[12];
539 status
->ampdu_ref
= dev
->mt76
.ampdu_ref
;
542 remove_pad
= rxd1
& MT_RXD1_NORMAL_HDR_OFFSET
;
544 if (rxd2
& MT_RXD2_NORMAL_MAX_LEN_ERROR
)
547 if (!sband
->channels
)
551 if (rxd0
& MT_RXD0_NORMAL_GROUP_4
) {
553 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
556 if (rxd0
& MT_RXD0_NORMAL_GROUP_1
) {
557 u8
*data
= (u8
*)rxd
;
559 if (status
->flag
& RX_FLAG_DECRYPTED
) {
560 status
->iv
[0] = data
[5];
561 status
->iv
[1] = data
[4];
562 status
->iv
[2] = data
[3];
563 status
->iv
[3] = data
[2];
564 status
->iv
[4] = data
[1];
565 status
->iv
[5] = data
[0];
567 insert_ccmp_hdr
= FIELD_GET(MT_RXD2_NORMAL_FRAG
, rxd2
);
571 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
574 if (rxd0
& MT_RXD0_NORMAL_GROUP_2
) {
576 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
579 if (rxd0
& MT_RXD0_NORMAL_GROUP_3
) {
580 u32 rxdg0
= le32_to_cpu(rxd
[0]);
581 u32 rxdg3
= le32_to_cpu(rxd
[3]);
584 i
= FIELD_GET(MT_RXV1_TX_RATE
, rxdg0
);
585 switch (FIELD_GET(MT_RXV1_TX_MODE
, rxdg0
)) {
586 case MT_PHY_TYPE_CCK
:
589 case MT_PHY_TYPE_OFDM
:
590 i
= mt76_get_rate(&dev
->mt76
, sband
, i
, cck
);
592 case MT_PHY_TYPE_HT_GF
:
594 status
->encoding
= RX_ENC_HT
;
602 if (rxdg0
& MT_RXV1_HT_SHORT_GI
)
603 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
604 if (rxdg0
& MT_RXV1_HT_AD_CODE
)
605 status
->enc_flags
|= RX_ENC_FLAG_LDPC
;
607 status
->enc_flags
|= RX_ENC_FLAG_STBC_MASK
*
608 FIELD_GET(MT_RXV1_HT_STBC
, rxdg0
);
610 status
->rate_idx
= i
;
612 status
->chains
= dev
->mt76
.antenna_mask
;
613 status
->chain_signal
[0] = FIELD_GET(MT_RXV4_IB_RSSI0
, rxdg3
) +
615 status
->chain_signal
[1] = FIELD_GET(MT_RXV4_IB_RSSI1
, rxdg3
) +
618 status
->signal
= status
->chain_signal
[0];
619 if (status
->chains
& BIT(1))
620 status
->signal
= max(status
->signal
,
621 status
->chain_signal
[1]);
623 if (FIELD_GET(MT_RXV1_FRAME_MODE
, rxdg0
) == 1)
624 status
->bw
= RATE_INFO_BW_40
;
627 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
633 skb_pull(skb
, (u8
*)rxd
- skb
->data
+ 2 * remove_pad
);
635 if (insert_ccmp_hdr
) {
636 u8 key_id
= FIELD_GET(MT_RXD1_NORMAL_KEY_ID
, rxd1
);
638 mt76_insert_ccmp_hdr(skb
, key_id
);
641 hdr
= (struct ieee80211_hdr
*)skb
->data
;
642 if (!status
->wcid
|| !ieee80211_is_data_qos(hdr
->frame_control
))
645 status
->aggr
= unicast
&&
646 !ieee80211_is_qos_nullfunc(hdr
->frame_control
);
647 status
->tid
= *ieee80211_get_qos_ctl(hdr
) & IEEE80211_QOS_CTL_TID_MASK
;
648 status
->seqno
= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr
->seq_ctrl
));
654 mt7603_mac_tx_rate_val(struct mt7603_dev
*dev
,
655 const struct ieee80211_tx_rate
*rate
, bool stbc
, u8
*bw
)
657 u8 phy
, nss
, rate_idx
;
661 if (rate
->flags
& IEEE80211_TX_RC_MCS
) {
662 rate_idx
= rate
->idx
;
663 nss
= 1 + (rate
->idx
>> 3);
664 phy
= MT_PHY_TYPE_HT
;
665 if (rate
->flags
& IEEE80211_TX_RC_GREEN_FIELD
)
666 phy
= MT_PHY_TYPE_HT_GF
;
667 if (rate
->flags
& IEEE80211_TX_RC_40_MHZ_WIDTH
)
670 const struct ieee80211_rate
*r
;
671 int band
= dev
->mt76
.chandef
.chan
->band
;
675 r
= &mt76_hw(dev
)->wiphy
->bands
[band
]->bitrates
[rate
->idx
];
676 if (rate
->flags
& IEEE80211_TX_RC_USE_SHORT_PREAMBLE
)
677 val
= r
->hw_value_short
;
682 rate_idx
= val
& 0xff;
685 rateval
= (FIELD_PREP(MT_TX_RATE_IDX
, rate_idx
) |
686 FIELD_PREP(MT_TX_RATE_MODE
, phy
));
688 if (stbc
&& nss
== 1)
689 rateval
|= MT_TX_RATE_STBC
;
694 void mt7603_wtbl_set_rates(struct mt7603_dev
*dev
, struct mt7603_sta
*sta
,
695 struct ieee80211_tx_rate
*probe_rate
,
696 struct ieee80211_tx_rate
*rates
)
698 struct ieee80211_tx_rate
*ref
;
699 int wcid
= sta
->wcid
.idx
;
700 u32 addr
= mt7603_wtbl2_addr(wcid
);
702 int n_rates
= sta
->n_rates
;
703 u8 bw
, bw_prev
, bw_idx
= 0;
706 u32 w9
= mt76_rr(dev
, addr
+ 9 * 4);
710 if (!mt76_poll(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_BUSY
, 0, 5000))
713 for (i
= n_rates
; i
< 4; i
++)
714 rates
[i
] = rates
[n_rates
- 1];
716 rateset
= !(sta
->rate_set_tsf
& BIT(0));
717 memcpy(sta
->rateset
[rateset
].rates
, rates
,
718 sizeof(sta
->rateset
[rateset
].rates
));
720 sta
->rateset
[rateset
].probe_rate
= *probe_rate
;
721 ref
= &sta
->rateset
[rateset
].probe_rate
;
723 sta
->rateset
[rateset
].probe_rate
.idx
= -1;
724 ref
= &sta
->rateset
[rateset
].rates
[0];
727 rates
= sta
->rateset
[rateset
].rates
;
728 for (i
= 0; i
< ARRAY_SIZE(sta
->rateset
[rateset
].rates
); i
++) {
730 * We don't support switching between short and long GI
731 * within the rate set. For accurate tx status reporting, we
732 * need to make sure that flags match.
733 * For improved performance, avoid duplicate entries by
734 * decrementing the MCS index if necessary
736 if ((ref
->flags
^ rates
[i
].flags
) & IEEE80211_TX_RC_SHORT_GI
)
737 rates
[i
].flags
^= IEEE80211_TX_RC_SHORT_GI
;
739 for (k
= 0; k
< i
; k
++) {
740 if (rates
[i
].idx
!= rates
[k
].idx
)
742 if ((rates
[i
].flags
^ rates
[k
].flags
) &
743 IEEE80211_TX_RC_40_MHZ_WIDTH
)
753 w9
&= MT_WTBL2_W9_SHORT_GI_20
| MT_WTBL2_W9_SHORT_GI_40
|
754 MT_WTBL2_W9_SHORT_GI_80
;
756 val
[0] = mt7603_mac_tx_rate_val(dev
, &rates
[0], stbc
, &bw
);
760 probe_val
= mt7603_mac_tx_rate_val(dev
, probe_rate
, stbc
, &bw
);
769 w9
|= FIELD_PREP(MT_WTBL2_W9_CC_BW_SEL
, bw
);
770 w9
|= FIELD_PREP(MT_WTBL2_W9_BW_CAP
, bw
);
772 val
[1] = mt7603_mac_tx_rate_val(dev
, &rates
[1], stbc
, &bw
);
778 val
[2] = mt7603_mac_tx_rate_val(dev
, &rates
[2], stbc
, &bw
);
784 val
[3] = mt7603_mac_tx_rate_val(dev
, &rates
[3], stbc
, &bw
);
788 w9
|= FIELD_PREP(MT_WTBL2_W9_CHANGE_BW_RATE
,
789 bw_idx
? bw_idx
- 1 : 7);
791 mt76_wr(dev
, MT_WTBL_RIUCR0
, w9
);
793 mt76_wr(dev
, MT_WTBL_RIUCR1
,
794 FIELD_PREP(MT_WTBL_RIUCR1_RATE0
, probe_val
) |
795 FIELD_PREP(MT_WTBL_RIUCR1_RATE1
, val
[0]) |
796 FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO
, val
[1]));
798 mt76_wr(dev
, MT_WTBL_RIUCR2
,
799 FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI
, val
[1] >> 8) |
800 FIELD_PREP(MT_WTBL_RIUCR2_RATE3
, val
[1]) |
801 FIELD_PREP(MT_WTBL_RIUCR2_RATE4
, val
[2]) |
802 FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO
, val
[2]));
804 mt76_wr(dev
, MT_WTBL_RIUCR3
,
805 FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI
, val
[2] >> 4) |
806 FIELD_PREP(MT_WTBL_RIUCR3_RATE6
, val
[3]) |
807 FIELD_PREP(MT_WTBL_RIUCR3_RATE7
, val
[3]));
809 mt76_set(dev
, MT_LPON_T0CR
, MT_LPON_T0CR_MODE
); /* TSF read */
810 sta
->rate_set_tsf
= (mt76_rr(dev
, MT_LPON_UTTR0
) & ~BIT(0)) | rateset
;
812 mt76_wr(dev
, MT_WTBL_UPDATE
,
813 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX
, wcid
) |
814 MT_WTBL_UPDATE_RATE_UPDATE
|
815 MT_WTBL_UPDATE_TX_COUNT_CLEAR
);
817 if (!(sta
->wcid
.tx_info
& MT_WCID_TX_INFO_SET
))
818 mt76_poll(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_BUSY
, 0, 5000);
820 sta
->rate_count
= 2 * MT7603_RATE_RETRY
* n_rates
;
821 sta
->wcid
.tx_info
|= MT_WCID_TX_INFO_SET
;
824 static enum mt7603_cipher_type
825 mt7603_mac_get_key_info(struct ieee80211_key_conf
*key
, u8
*key_data
)
827 memset(key_data
, 0, 32);
829 return MT_CIPHER_NONE
;
831 if (key
->keylen
> 32)
832 return MT_CIPHER_NONE
;
834 memcpy(key_data
, key
->key
, key
->keylen
);
836 switch (key
->cipher
) {
837 case WLAN_CIPHER_SUITE_WEP40
:
838 return MT_CIPHER_WEP40
;
839 case WLAN_CIPHER_SUITE_WEP104
:
840 return MT_CIPHER_WEP104
;
841 case WLAN_CIPHER_SUITE_TKIP
:
842 /* Rx/Tx MIC keys are swapped */
843 memcpy(key_data
+ 16, key
->key
+ 24, 8);
844 memcpy(key_data
+ 24, key
->key
+ 16, 8);
845 return MT_CIPHER_TKIP
;
846 case WLAN_CIPHER_SUITE_CCMP
:
847 return MT_CIPHER_AES_CCMP
;
849 return MT_CIPHER_NONE
;
853 int mt7603_wtbl_set_key(struct mt7603_dev
*dev
, int wcid
,
854 struct ieee80211_key_conf
*key
)
856 enum mt7603_cipher_type cipher
;
857 u32 addr
= mt7603_wtbl3_addr(wcid
);
859 int key_len
= sizeof(key_data
);
861 cipher
= mt7603_mac_get_key_info(key
, key_data
);
862 if (cipher
== MT_CIPHER_NONE
&& key
)
865 if (key
&& (cipher
== MT_CIPHER_WEP40
|| cipher
== MT_CIPHER_WEP104
)) {
866 addr
+= key
->keyidx
* 16;
870 mt76_wr_copy(dev
, addr
, key_data
, key_len
);
872 addr
= mt7603_wtbl1_addr(wcid
);
873 mt76_rmw_field(dev
, addr
+ 2 * 4, MT_WTBL1_W2_KEY_TYPE
, cipher
);
875 mt76_rmw_field(dev
, addr
, MT_WTBL1_W0_KEY_IDX
, key
->keyidx
);
876 mt76_rmw_field(dev
, addr
, MT_WTBL1_W0_RX_KEY_VALID
, !!key
);
882 mt7603_mac_write_txwi(struct mt7603_dev
*dev
, __le32
*txwi
,
883 struct sk_buff
*skb
, enum mt76_txq_id qid
,
884 struct mt76_wcid
*wcid
, struct ieee80211_sta
*sta
,
885 int pid
, struct ieee80211_key_conf
*key
)
887 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
888 struct ieee80211_tx_rate
*rate
= &info
->control
.rates
[0];
889 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
890 struct ieee80211_bar
*bar
= (struct ieee80211_bar
*)skb
->data
;
891 struct ieee80211_vif
*vif
= info
->control
.vif
;
892 struct mt76_queue
*q
= dev
->mt76
.q_tx
[qid
].q
;
893 struct mt7603_vif
*mvif
;
895 int hdr_len
= ieee80211_get_hdrlen_from_skb(skb
);
897 u8 frame_type
, frame_subtype
;
898 u16 fc
= le16_to_cpu(hdr
->frame_control
);
905 mvif
= (struct mt7603_vif
*)vif
->drv_priv
;
907 if (vif_idx
&& qid
>= MT_TXQ_BEACON
)
912 struct mt7603_sta
*msta
= (struct mt7603_sta
*)sta
->drv_priv
;
914 tx_count
= msta
->rate_count
;
918 wlan_idx
= wcid
->idx
;
920 wlan_idx
= MT7603_WTBL_RESERVED
;
922 frame_type
= (fc
& IEEE80211_FCTL_FTYPE
) >> 2;
923 frame_subtype
= (fc
& IEEE80211_FCTL_STYPE
) >> 4;
925 val
= FIELD_PREP(MT_TXD0_TX_BYTES
, skb
->len
+ MT_TXD_SIZE
) |
926 FIELD_PREP(MT_TXD0_Q_IDX
, q
->hw_idx
);
927 txwi
[0] = cpu_to_le32(val
);
929 val
= MT_TXD1_LONG_FORMAT
|
930 FIELD_PREP(MT_TXD1_OWN_MAC
, vif_idx
) |
931 FIELD_PREP(MT_TXD1_TID
,
932 skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
) |
933 FIELD_PREP(MT_TXD1_HDR_FORMAT
, MT_HDR_FORMAT_802_11
) |
934 FIELD_PREP(MT_TXD1_HDR_INFO
, hdr_len
/ 2) |
935 FIELD_PREP(MT_TXD1_WLAN_IDX
, wlan_idx
) |
936 FIELD_PREP(MT_TXD1_PROTECTED
, !!key
);
937 txwi
[1] = cpu_to_le32(val
);
939 if (info
->flags
& IEEE80211_TX_CTL_NO_ACK
)
940 txwi
[1] |= cpu_to_le32(MT_TXD1_NO_ACK
);
942 val
= FIELD_PREP(MT_TXD2_FRAME_TYPE
, frame_type
) |
943 FIELD_PREP(MT_TXD2_SUB_TYPE
, frame_subtype
) |
944 FIELD_PREP(MT_TXD2_MULTICAST
,
945 is_multicast_ether_addr(hdr
->addr1
));
946 txwi
[2] = cpu_to_le32(val
);
948 if (!(info
->flags
& IEEE80211_TX_CTL_AMPDU
))
949 txwi
[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE
);
953 val
= MT_TXD5_TX_STATUS_HOST
| MT_TXD5_SW_POWER_MGMT
|
954 FIELD_PREP(MT_TXD5_PID
, pid
);
955 txwi
[5] = cpu_to_le32(val
);
959 if (rate
->idx
>= 0 && rate
->count
&&
960 !(info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
)) {
961 bool stbc
= info
->flags
& IEEE80211_TX_CTL_STBC
;
962 u16 rateval
= mt7603_mac_tx_rate_val(dev
, rate
, stbc
, &bw
);
964 txwi
[2] |= cpu_to_le32(MT_TXD2_FIX_RATE
);
966 val
= MT_TXD6_FIXED_BW
|
967 FIELD_PREP(MT_TXD6_BW
, bw
) |
968 FIELD_PREP(MT_TXD6_TX_RATE
, rateval
);
969 txwi
[6] |= cpu_to_le32(val
);
971 if (rate
->flags
& IEEE80211_TX_RC_SHORT_GI
)
972 txwi
[6] |= cpu_to_le32(MT_TXD6_SGI
);
974 if (!(rate
->flags
& IEEE80211_TX_RC_MCS
))
975 txwi
[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE
);
977 tx_count
= rate
->count
;
980 /* use maximum tx count for beacons and buffered multicast */
981 if (qid
>= MT_TXQ_BEACON
)
984 val
= FIELD_PREP(MT_TXD3_REM_TX_COUNT
, tx_count
) |
987 if (ieee80211_is_data_qos(hdr
->frame_control
))
988 seqno
= le16_to_cpu(hdr
->seq_ctrl
);
989 else if (ieee80211_is_back_req(hdr
->frame_control
))
990 seqno
= le16_to_cpu(bar
->start_seq_num
);
992 val
&= ~MT_TXD3_SN_VALID
;
994 val
|= FIELD_PREP(MT_TXD3_SEQ
, seqno
>> 4);
996 txwi
[3] = cpu_to_le32(val
);
999 u64 pn
= atomic64_inc_return(&key
->tx_pn
);
1001 txwi
[3] |= cpu_to_le32(MT_TXD3_PN_VALID
);
1002 txwi
[4] = cpu_to_le32(pn
& GENMASK(31, 0));
1003 txwi
[5] |= cpu_to_le32(FIELD_PREP(MT_TXD5_PN_HIGH
, pn
>> 32));
1011 int mt7603_tx_prepare_skb(struct mt76_dev
*mdev
, void *txwi_ptr
,
1012 enum mt76_txq_id qid
, struct mt76_wcid
*wcid
,
1013 struct ieee80211_sta
*sta
,
1014 struct mt76_tx_info
*tx_info
)
1016 struct mt7603_dev
*dev
= container_of(mdev
, struct mt7603_dev
, mt76
);
1017 struct mt7603_sta
*msta
= container_of(wcid
, struct mt7603_sta
, wcid
);
1018 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(tx_info
->skb
);
1019 struct ieee80211_key_conf
*key
= info
->control
.hw_key
;
1023 wcid
= &dev
->global_sta
.wcid
;
1026 msta
= (struct mt7603_sta
*)sta
->drv_priv
;
1028 if ((info
->flags
& (IEEE80211_TX_CTL_NO_PS_BUFFER
|
1029 IEEE80211_TX_CTL_CLEAR_PS_FILT
)) ||
1030 (info
->control
.flags
& IEEE80211_TX_CTRL_PS_RESPONSE
))
1031 mt7603_wtbl_set_ps(dev
, msta
, false);
1034 pid
= mt76_tx_status_skb_add(mdev
, wcid
, tx_info
->skb
);
1036 if (info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
) {
1037 spin_lock_bh(&dev
->mt76
.lock
);
1038 mt7603_wtbl_set_rates(dev
, msta
, &info
->control
.rates
[0],
1040 msta
->rate_probe
= true;
1041 spin_unlock_bh(&dev
->mt76
.lock
);
1044 mt7603_mac_write_txwi(dev
, txwi_ptr
, tx_info
->skb
, qid
, wcid
,
1051 mt7603_fill_txs(struct mt7603_dev
*dev
, struct mt7603_sta
*sta
,
1052 struct ieee80211_tx_info
*info
, __le32
*txs_data
)
1054 struct ieee80211_supported_band
*sband
;
1055 struct mt7603_rate_set
*rs
;
1056 int first_idx
= 0, last_idx
;
1059 u32 final_rate_flags
;
1071 fixed_rate
= info
->status
.rates
[0].count
;
1072 probe
= !!(info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
);
1074 txs
= le32_to_cpu(txs_data
[4]);
1075 ampdu
= !fixed_rate
&& (txs
& MT_TXS4_AMPDU
);
1076 count
= FIELD_GET(MT_TXS4_TX_COUNT
, txs
);
1077 last_idx
= FIELD_GET(MT_TXS4_LAST_TX_RATE
, txs
);
1079 txs
= le32_to_cpu(txs_data
[0]);
1080 final_rate
= FIELD_GET(MT_TXS0_TX_RATE
, txs
);
1081 ack_timeout
= txs
& MT_TXS0_ACK_TIMEOUT
;
1083 if (!ampdu
&& (txs
& MT_TXS0_RTS_TIMEOUT
))
1086 if (txs
& MT_TXS0_QUEUE_TIMEOUT
)
1090 info
->flags
|= IEEE80211_TX_STAT_ACK
;
1092 info
->status
.ampdu_len
= 1;
1093 info
->status
.ampdu_ack_len
= !!(info
->flags
&
1094 IEEE80211_TX_STAT_ACK
);
1096 if (ampdu
|| (info
->flags
& IEEE80211_TX_CTL_AMPDU
))
1097 info
->flags
|= IEEE80211_TX_STAT_AMPDU
| IEEE80211_TX_CTL_AMPDU
;
1099 first_idx
= max_t(int, 0, last_idx
- (count
+ 1) / MT7603_RATE_RETRY
);
1101 if (fixed_rate
&& !probe
) {
1102 info
->status
.rates
[0].count
= count
;
1107 rate_set_tsf
= READ_ONCE(sta
->rate_set_tsf
);
1108 rs_idx
= !((u32
)(FIELD_GET(MT_TXS1_F0_TIMESTAMP
, le32_to_cpu(txs_data
[1])) -
1109 rate_set_tsf
) < 1000000);
1110 rs_idx
^= rate_set_tsf
& BIT(0);
1111 rs
= &sta
->rateset
[rs_idx
];
1113 if (!first_idx
&& rs
->probe_rate
.idx
>= 0) {
1114 info
->status
.rates
[0] = rs
->probe_rate
;
1116 spin_lock_bh(&dev
->mt76
.lock
);
1117 if (sta
->rate_probe
) {
1118 mt7603_wtbl_set_rates(dev
, sta
, NULL
,
1120 sta
->rate_probe
= false;
1122 spin_unlock_bh(&dev
->mt76
.lock
);
1124 info
->status
.rates
[0] = rs
->rates
[first_idx
/ 2];
1126 info
->status
.rates
[0].count
= 0;
1128 for (i
= 0, idx
= first_idx
; count
&& idx
<= last_idx
; idx
++) {
1129 struct ieee80211_tx_rate
*cur_rate
;
1132 cur_rate
= &rs
->rates
[idx
/ 2];
1133 cur_count
= min_t(int, MT7603_RATE_RETRY
, count
);
1136 if (idx
&& (cur_rate
->idx
!= info
->status
.rates
[i
].idx
||
1137 cur_rate
->flags
!= info
->status
.rates
[i
].flags
)) {
1139 if (i
== ARRAY_SIZE(info
->status
.rates
)) {
1144 info
->status
.rates
[i
] = *cur_rate
;
1145 info
->status
.rates
[i
].count
= 0;
1148 info
->status
.rates
[i
].count
+= cur_count
;
1152 final_rate_flags
= info
->status
.rates
[i
].flags
;
1154 switch (FIELD_GET(MT_TX_RATE_MODE
, final_rate
)) {
1155 case MT_PHY_TYPE_CCK
:
1158 case MT_PHY_TYPE_OFDM
:
1159 if (dev
->mt76
.chandef
.chan
->band
== NL80211_BAND_5GHZ
)
1160 sband
= &dev
->mt76
.sband_5g
.sband
;
1162 sband
= &dev
->mt76
.sband_2g
.sband
;
1163 final_rate
&= GENMASK(5, 0);
1164 final_rate
= mt76_get_rate(&dev
->mt76
, sband
, final_rate
,
1166 final_rate_flags
= 0;
1168 case MT_PHY_TYPE_HT_GF
:
1169 case MT_PHY_TYPE_HT
:
1170 final_rate_flags
|= IEEE80211_TX_RC_MCS
;
1171 final_rate
&= GENMASK(5, 0);
1172 if (final_rate
> 15)
1179 info
->status
.rates
[i
].idx
= final_rate
;
1180 info
->status
.rates
[i
].flags
= final_rate_flags
;
1186 mt7603_mac_add_txs_skb(struct mt7603_dev
*dev
, struct mt7603_sta
*sta
, int pid
,
1189 struct mt76_dev
*mdev
= &dev
->mt76
;
1190 struct sk_buff_head list
;
1191 struct sk_buff
*skb
;
1193 if (pid
< MT_PACKET_ID_FIRST
)
1196 mt76_tx_status_lock(mdev
, &list
);
1197 skb
= mt76_tx_status_skb_get(mdev
, &sta
->wcid
, pid
, &list
);
1199 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1201 if (!mt7603_fill_txs(dev
, sta
, info
, txs_data
)) {
1202 ieee80211_tx_info_clear_status(info
);
1203 info
->status
.rates
[0].idx
= -1;
1206 mt76_tx_status_skb_done(mdev
, skb
, &list
);
1208 mt76_tx_status_unlock(mdev
, &list
);
1213 void mt7603_mac_add_txs(struct mt7603_dev
*dev
, void *data
)
1215 struct ieee80211_tx_info info
= {};
1216 struct ieee80211_sta
*sta
= NULL
;
1217 struct mt7603_sta
*msta
= NULL
;
1218 struct mt76_wcid
*wcid
;
1219 __le32
*txs_data
= data
;
1224 txs
= le32_to_cpu(txs_data
[4]);
1225 pid
= FIELD_GET(MT_TXS4_PID
, txs
);
1226 txs
= le32_to_cpu(txs_data
[3]);
1227 wcidx
= FIELD_GET(MT_TXS3_WCID
, txs
);
1229 if (pid
== MT_PACKET_ID_NO_ACK
)
1232 if (wcidx
>= ARRAY_SIZE(dev
->mt76
.wcid
))
1237 wcid
= rcu_dereference(dev
->mt76
.wcid
[wcidx
]);
1241 msta
= container_of(wcid
, struct mt7603_sta
, wcid
);
1242 sta
= wcid_to_sta(wcid
);
1244 if (list_empty(&msta
->poll_list
)) {
1245 spin_lock_bh(&dev
->sta_poll_lock
);
1246 list_add_tail(&msta
->poll_list
, &dev
->sta_poll_list
);
1247 spin_unlock_bh(&dev
->sta_poll_lock
);
1250 if (mt7603_mac_add_txs_skb(dev
, msta
, pid
, txs_data
))
1253 if (wcidx
>= MT7603_WTBL_STA
|| !sta
)
1256 if (mt7603_fill_txs(dev
, msta
, &info
, txs_data
))
1257 ieee80211_tx_status_noskb(mt76_hw(dev
), sta
, &info
);
1263 void mt7603_tx_complete_skb(struct mt76_dev
*mdev
, enum mt76_txq_id qid
,
1264 struct mt76_queue_entry
*e
)
1266 struct mt7603_dev
*dev
= container_of(mdev
, struct mt7603_dev
, mt76
);
1267 struct sk_buff
*skb
= e
->skb
;
1270 dev_kfree_skb_any(skb
);
1275 dev
->tx_hang_check
= 0;
1277 mt76_tx_complete_skb(mdev
, skb
);
1281 wait_for_wpdma(struct mt7603_dev
*dev
)
1283 return mt76_poll(dev
, MT_WPDMA_GLO_CFG
,
1284 MT_WPDMA_GLO_CFG_TX_DMA_BUSY
|
1285 MT_WPDMA_GLO_CFG_RX_DMA_BUSY
,
1289 static void mt7603_pse_reset(struct mt7603_dev
*dev
)
1291 /* Clear previous reset result */
1292 if (!dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
])
1293 mt76_clear(dev
, MT_MCU_DEBUG_RESET
, MT_MCU_DEBUG_RESET_PSE_S
);
1296 mt76_set(dev
, MT_MCU_DEBUG_RESET
, MT_MCU_DEBUG_RESET_PSE
);
1298 if (!mt76_poll_msec(dev
, MT_MCU_DEBUG_RESET
,
1299 MT_MCU_DEBUG_RESET_PSE_S
,
1300 MT_MCU_DEBUG_RESET_PSE_S
, 500)) {
1301 dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
]++;
1302 mt76_clear(dev
, MT_MCU_DEBUG_RESET
, MT_MCU_DEBUG_RESET_PSE
);
1304 dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
] = 0;
1305 mt76_clear(dev
, MT_MCU_DEBUG_RESET
, MT_MCU_DEBUG_RESET_QUEUES
);
1308 if (dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
] >= 3)
1309 dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
] = 0;
1312 void mt7603_mac_dma_start(struct mt7603_dev
*dev
)
1314 mt7603_mac_start(dev
);
1316 wait_for_wpdma(dev
);
1317 usleep_range(50, 100);
1319 mt76_set(dev
, MT_WPDMA_GLO_CFG
,
1320 (MT_WPDMA_GLO_CFG_TX_DMA_EN
|
1321 MT_WPDMA_GLO_CFG_RX_DMA_EN
|
1322 FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE
, 3) |
1323 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE
));
1325 mt7603_irq_enable(dev
, MT_INT_RX_DONE_ALL
| MT_INT_TX_DONE_ALL
);
1328 void mt7603_mac_start(struct mt7603_dev
*dev
)
1330 mt76_clear(dev
, MT_ARB_SCR
,
1331 MT_ARB_SCR_TX_DISABLE
| MT_ARB_SCR_RX_DISABLE
);
1332 mt76_wr(dev
, MT_WF_ARB_TX_START_0
, ~0);
1333 mt76_set(dev
, MT_WF_ARB_RQCR
, MT_WF_ARB_RQCR_RX_START
);
1336 void mt7603_mac_stop(struct mt7603_dev
*dev
)
1338 mt76_set(dev
, MT_ARB_SCR
,
1339 MT_ARB_SCR_TX_DISABLE
| MT_ARB_SCR_RX_DISABLE
);
1340 mt76_wr(dev
, MT_WF_ARB_TX_START_0
, 0);
1341 mt76_clear(dev
, MT_WF_ARB_RQCR
, MT_WF_ARB_RQCR_RX_START
);
1344 void mt7603_pse_client_reset(struct mt7603_dev
*dev
)
1348 addr
= mt7603_reg_map(dev
, MT_CLIENT_BASE_PHYS_ADDR
+
1349 MT_CLIENT_RESET_TX
);
1351 /* Clear previous reset state */
1352 mt76_clear(dev
, addr
,
1353 MT_CLIENT_RESET_TX_R_E_1
|
1354 MT_CLIENT_RESET_TX_R_E_2
|
1355 MT_CLIENT_RESET_TX_R_E_1_S
|
1356 MT_CLIENT_RESET_TX_R_E_2_S
);
1358 /* Start PSE client TX abort */
1359 mt76_set(dev
, addr
, MT_CLIENT_RESET_TX_R_E_1
);
1360 mt76_poll_msec(dev
, addr
, MT_CLIENT_RESET_TX_R_E_1_S
,
1361 MT_CLIENT_RESET_TX_R_E_1_S
, 500);
1363 mt76_set(dev
, addr
, MT_CLIENT_RESET_TX_R_E_2
);
1364 mt76_set(dev
, MT_WPDMA_GLO_CFG
, MT_WPDMA_GLO_CFG_SW_RESET
);
1366 /* Wait for PSE client to clear TX FIFO */
1367 mt76_poll_msec(dev
, addr
, MT_CLIENT_RESET_TX_R_E_2_S
,
1368 MT_CLIENT_RESET_TX_R_E_2_S
, 500);
1370 /* Clear PSE client TX abort state */
1371 mt76_clear(dev
, addr
,
1372 MT_CLIENT_RESET_TX_R_E_1
|
1373 MT_CLIENT_RESET_TX_R_E_2
);
1376 static void mt7603_dma_sched_reset(struct mt7603_dev
*dev
)
1378 if (!is_mt7628(dev
))
1381 mt76_set(dev
, MT_SCH_4
, MT_SCH_4_RESET
);
1382 mt76_clear(dev
, MT_SCH_4
, MT_SCH_4_RESET
);
1385 static void mt7603_mac_watchdog_reset(struct mt7603_dev
*dev
)
1387 int beacon_int
= dev
->mt76
.beacon_int
;
1388 u32 mask
= dev
->mt76
.mmio
.irqmask
;
1391 ieee80211_stop_queues(dev
->mt76
.hw
);
1392 set_bit(MT76_RESET
, &dev
->mt76
.state
);
1394 /* lock/unlock all queues to ensure that no tx is pending */
1395 mt76_txq_schedule_all(&dev
->mt76
);
1397 tasklet_disable(&dev
->mt76
.tx_tasklet
);
1398 tasklet_disable(&dev
->mt76
.pre_tbtt_tasklet
);
1399 napi_disable(&dev
->mt76
.napi
[0]);
1400 napi_disable(&dev
->mt76
.napi
[1]);
1401 napi_disable(&dev
->mt76
.tx_napi
);
1403 mutex_lock(&dev
->mt76
.mutex
);
1405 mt7603_beacon_set_timer(dev
, -1, 0);
1407 if (dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
] ||
1408 dev
->cur_reset_cause
== RESET_CAUSE_RX_PSE_BUSY
||
1409 dev
->cur_reset_cause
== RESET_CAUSE_BEACON_STUCK
||
1410 dev
->cur_reset_cause
== RESET_CAUSE_TX_HANG
)
1411 mt7603_pse_reset(dev
);
1413 if (dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
])
1414 goto skip_dma_reset
;
1416 mt7603_mac_stop(dev
);
1418 mt76_clear(dev
, MT_WPDMA_GLO_CFG
,
1419 MT_WPDMA_GLO_CFG_RX_DMA_EN
| MT_WPDMA_GLO_CFG_TX_DMA_EN
|
1420 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE
);
1421 usleep_range(1000, 2000);
1423 mt7603_irq_disable(dev
, mask
);
1425 mt76_set(dev
, MT_WPDMA_GLO_CFG
, MT_WPDMA_GLO_CFG_FORCE_TX_EOF
);
1427 mt7603_pse_client_reset(dev
);
1429 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.q_tx
); i
++)
1430 mt76_queue_tx_cleanup(dev
, i
, true);
1432 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.q_rx
); i
++)
1433 mt76_queue_rx_reset(dev
, i
);
1435 mt7603_dma_sched_reset(dev
);
1437 mt7603_mac_dma_start(dev
);
1439 mt7603_irq_enable(dev
, mask
);
1442 clear_bit(MT76_RESET
, &dev
->mt76
.state
);
1443 mutex_unlock(&dev
->mt76
.mutex
);
1445 tasklet_enable(&dev
->mt76
.tx_tasklet
);
1446 napi_enable(&dev
->mt76
.tx_napi
);
1447 napi_schedule(&dev
->mt76
.tx_napi
);
1449 tasklet_enable(&dev
->mt76
.pre_tbtt_tasklet
);
1450 mt7603_beacon_set_timer(dev
, -1, beacon_int
);
1452 napi_enable(&dev
->mt76
.napi
[0]);
1453 napi_schedule(&dev
->mt76
.napi
[0]);
1455 napi_enable(&dev
->mt76
.napi
[1]);
1456 napi_schedule(&dev
->mt76
.napi
[1]);
1458 ieee80211_wake_queues(dev
->mt76
.hw
);
1459 mt76_txq_schedule_all(&dev
->mt76
);
1462 static u32
mt7603_dma_debug(struct mt7603_dev
*dev
, u8 index
)
1466 mt76_wr(dev
, MT_WPDMA_DEBUG
,
1467 FIELD_PREP(MT_WPDMA_DEBUG_IDX
, index
) |
1468 MT_WPDMA_DEBUG_SEL
);
1470 val
= mt76_rr(dev
, MT_WPDMA_DEBUG
);
1471 return FIELD_GET(MT_WPDMA_DEBUG_VALUE
, val
);
1474 static bool mt7603_rx_fifo_busy(struct mt7603_dev
*dev
)
1477 return mt7603_dma_debug(dev
, 9) & BIT(9);
1479 return mt7603_dma_debug(dev
, 2) & BIT(8);
1482 static bool mt7603_rx_dma_busy(struct mt7603_dev
*dev
)
1484 if (!(mt76_rr(dev
, MT_WPDMA_GLO_CFG
) & MT_WPDMA_GLO_CFG_RX_DMA_BUSY
))
1487 return mt7603_rx_fifo_busy(dev
);
1490 static bool mt7603_tx_dma_busy(struct mt7603_dev
*dev
)
1494 if (!(mt76_rr(dev
, MT_WPDMA_GLO_CFG
) & MT_WPDMA_GLO_CFG_TX_DMA_BUSY
))
1497 val
= mt7603_dma_debug(dev
, 9);
1498 return (val
& BIT(8)) && (val
& 0xf) != 0xf;
1501 static bool mt7603_tx_hang(struct mt7603_dev
*dev
)
1503 struct mt76_queue
*q
;
1504 u32 dma_idx
, prev_dma_idx
;
1507 for (i
= 0; i
< 4; i
++) {
1508 q
= dev
->mt76
.q_tx
[i
].q
;
1513 prev_dma_idx
= dev
->tx_dma_idx
[i
];
1514 dma_idx
= readl(&q
->regs
->dma_idx
);
1515 dev
->tx_dma_idx
[i
] = dma_idx
;
1517 if (dma_idx
== prev_dma_idx
&&
1518 dma_idx
!= readl(&q
->regs
->cpu_idx
))
1525 static bool mt7603_rx_pse_busy(struct mt7603_dev
*dev
)
1529 if (mt76_rr(dev
, MT_MCU_DEBUG_RESET
) & MT_MCU_DEBUG_RESET_QUEUES
)
1532 if (mt7603_rx_fifo_busy(dev
))
1535 addr
= mt7603_reg_map(dev
, MT_CLIENT_BASE_PHYS_ADDR
+ MT_CLIENT_STATUS
);
1536 mt76_wr(dev
, addr
, 3);
1537 val
= mt76_rr(dev
, addr
) >> 16;
1539 if (is_mt7628(dev
) && (val
& 0x4001) == 0x4001)
1542 return (val
& 0x8001) == 0x8001 || (val
& 0xe001) == 0xe001;
1546 mt7603_watchdog_check(struct mt7603_dev
*dev
, u8
*counter
,
1547 enum mt7603_reset_cause cause
,
1548 bool (*check
)(struct mt7603_dev
*dev
))
1550 if (dev
->reset_test
== cause
+ 1) {
1551 dev
->reset_test
= 0;
1556 if (!check(dev
) && *counter
< MT7603_WATCHDOG_TIMEOUT
) {
1564 if (*counter
< MT7603_WATCHDOG_TIMEOUT
)
1567 dev
->cur_reset_cause
= cause
;
1568 dev
->reset_cause
[cause
]++;
1572 void mt7603_update_channel(struct mt76_dev
*mdev
)
1574 struct mt7603_dev
*dev
= container_of(mdev
, struct mt7603_dev
, mt76
);
1575 struct mt76_channel_state
*state
;
1577 state
= mdev
->chan_state
;
1578 state
->cc_busy
+= mt76_rr(dev
, MT_MIB_STAT_CCA
);
1582 mt7603_edcca_set_strict(struct mt7603_dev
*dev
, bool val
)
1584 u32 rxtd_6
= 0xd7c80000;
1586 if (val
== dev
->ed_strict_mode
)
1589 dev
->ed_strict_mode
= val
;
1591 /* Ensure that ED/CCA does not trigger if disabled */
1592 if (!dev
->ed_monitor
)
1593 rxtd_6
|= FIELD_PREP(MT_RXTD_6_CCAED_TH
, 0x34);
1595 rxtd_6
|= FIELD_PREP(MT_RXTD_6_CCAED_TH
, 0x7d);
1597 if (dev
->ed_monitor
&& !dev
->ed_strict_mode
)
1598 rxtd_6
|= FIELD_PREP(MT_RXTD_6_ACI_TH
, 0x0f);
1600 rxtd_6
|= FIELD_PREP(MT_RXTD_6_ACI_TH
, 0x10);
1602 mt76_wr(dev
, MT_RXTD(6), rxtd_6
);
1604 mt76_rmw_field(dev
, MT_RXTD(13), MT_RXTD_13_ACI_TH_EN
,
1605 dev
->ed_monitor
&& !dev
->ed_strict_mode
);
1609 mt7603_edcca_check(struct mt7603_dev
*dev
)
1611 u32 val
= mt76_rr(dev
, MT_AGC(41));
1617 if (!dev
->ed_monitor
)
1620 rssi0
= FIELD_GET(MT_AGC_41_RSSI_0
, val
);
1624 rssi1
= FIELD_GET(MT_AGC_41_RSSI_1
, val
);
1628 if (max(rssi0
, rssi1
) >= -40 &&
1629 dev
->ed_strong_signal
< MT7603_EDCCA_BLOCK_TH
)
1630 dev
->ed_strong_signal
++;
1631 else if (dev
->ed_strong_signal
> 0)
1632 dev
->ed_strong_signal
--;
1634 cur_time
= ktime_get_boottime();
1635 ed_busy
= mt76_rr(dev
, MT_MIB_STAT_ED
) & MT_MIB_STAT_ED_MASK
;
1637 active
= ktime_to_us(ktime_sub(cur_time
, dev
->ed_time
));
1638 dev
->ed_time
= cur_time
;
1643 if (100 * ed_busy
/ active
> 90) {
1644 if (dev
->ed_trigger
< 0)
1645 dev
->ed_trigger
= 0;
1648 if (dev
->ed_trigger
> 0)
1649 dev
->ed_trigger
= 0;
1653 if (dev
->ed_trigger
> MT7603_EDCCA_BLOCK_TH
||
1654 dev
->ed_strong_signal
< MT7603_EDCCA_BLOCK_TH
/ 2) {
1655 mt7603_edcca_set_strict(dev
, true);
1656 } else if (dev
->ed_trigger
< -MT7603_EDCCA_BLOCK_TH
) {
1657 mt7603_edcca_set_strict(dev
, false);
1660 if (dev
->ed_trigger
> MT7603_EDCCA_BLOCK_TH
)
1661 dev
->ed_trigger
= MT7603_EDCCA_BLOCK_TH
;
1662 else if (dev
->ed_trigger
< -MT7603_EDCCA_BLOCK_TH
)
1663 dev
->ed_trigger
= -MT7603_EDCCA_BLOCK_TH
;
1666 void mt7603_cca_stats_reset(struct mt7603_dev
*dev
)
1668 mt76_set(dev
, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET
);
1669 mt76_clear(dev
, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET
);
1670 mt76_set(dev
, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_EN
);
1674 mt7603_adjust_sensitivity(struct mt7603_dev
*dev
)
1676 u32 agc0
= dev
->agc0
, agc3
= dev
->agc3
;
1679 if (!dev
->sensitivity
|| dev
->sensitivity
< -100) {
1680 dev
->sensitivity
= 0;
1681 } else if (dev
->sensitivity
<= -84) {
1682 adj
= 7 + (dev
->sensitivity
+ 92) / 2;
1688 } else if (dev
->sensitivity
<= -72) {
1689 adj
= 7 + (dev
->sensitivity
+ 80) / 2;
1698 if (dev
->sensitivity
> -54)
1699 dev
->sensitivity
= -54;
1701 adj
= 7 + (dev
->sensitivity
+ 80) / 2;
1712 mt76_wr(dev
, MT_AGC(0), agc0
);
1713 mt76_wr(dev
, MT_AGC1(0), agc0
);
1715 mt76_wr(dev
, MT_AGC(3), agc3
);
1716 mt76_wr(dev
, MT_AGC1(3), agc3
);
1720 mt7603_false_cca_check(struct mt7603_dev
*dev
)
1722 int pd_cck
, pd_ofdm
, mdrdy_cck
, mdrdy_ofdm
;
1727 val
= mt76_rr(dev
, MT_PHYCTRL_STAT_PD
);
1728 pd_cck
= FIELD_GET(MT_PHYCTRL_STAT_PD_CCK
, val
);
1729 pd_ofdm
= FIELD_GET(MT_PHYCTRL_STAT_PD_OFDM
, val
);
1731 val
= mt76_rr(dev
, MT_PHYCTRL_STAT_MDRDY
);
1732 mdrdy_cck
= FIELD_GET(MT_PHYCTRL_STAT_MDRDY_CCK
, val
);
1733 mdrdy_ofdm
= FIELD_GET(MT_PHYCTRL_STAT_MDRDY_OFDM
, val
);
1735 dev
->false_cca_ofdm
= pd_ofdm
- mdrdy_ofdm
;
1736 dev
->false_cca_cck
= pd_cck
- mdrdy_cck
;
1738 mt7603_cca_stats_reset(dev
);
1740 min_signal
= mt76_get_min_avg_rssi(&dev
->mt76
);
1742 dev
->sensitivity
= 0;
1743 dev
->last_cca_adj
= jiffies
;
1749 false_cca
= dev
->false_cca_ofdm
+ dev
->false_cca_cck
;
1750 if (false_cca
> 600) {
1751 if (!dev
->sensitivity
)
1752 dev
->sensitivity
= -92;
1754 dev
->sensitivity
+= 2;
1755 dev
->last_cca_adj
= jiffies
;
1756 } else if (false_cca
< 100 ||
1757 time_after(jiffies
, dev
->last_cca_adj
+ 10 * HZ
)) {
1758 dev
->last_cca_adj
= jiffies
;
1759 if (!dev
->sensitivity
)
1762 dev
->sensitivity
-= 2;
1765 if (dev
->sensitivity
&& dev
->sensitivity
> min_signal
) {
1766 dev
->sensitivity
= min_signal
;
1767 dev
->last_cca_adj
= jiffies
;
1771 mt7603_adjust_sensitivity(dev
);
1774 void mt7603_mac_work(struct work_struct
*work
)
1776 struct mt7603_dev
*dev
= container_of(work
, struct mt7603_dev
,
1777 mt76
.mac_work
.work
);
1781 mt76_tx_status_check(&dev
->mt76
, NULL
, false);
1783 mutex_lock(&dev
->mt76
.mutex
);
1785 dev
->mac_work_count
++;
1786 mt76_update_survey(&dev
->mt76
);
1787 mt7603_edcca_check(dev
);
1789 for (i
= 0, idx
= 0; i
< 2; i
++) {
1790 u32 val
= mt76_rr(dev
, MT_TX_AGG_CNT(i
));
1792 dev
->mt76
.aggr_stats
[idx
++] += val
& 0xffff;
1793 dev
->mt76
.aggr_stats
[idx
++] += val
>> 16;
1796 if (dev
->mac_work_count
== 10)
1797 mt7603_false_cca_check(dev
);
1799 if (mt7603_watchdog_check(dev
, &dev
->rx_pse_check
,
1800 RESET_CAUSE_RX_PSE_BUSY
,
1801 mt7603_rx_pse_busy
) ||
1802 mt7603_watchdog_check(dev
, &dev
->beacon_check
,
1803 RESET_CAUSE_BEACON_STUCK
,
1805 mt7603_watchdog_check(dev
, &dev
->tx_hang_check
,
1806 RESET_CAUSE_TX_HANG
,
1808 mt7603_watchdog_check(dev
, &dev
->tx_dma_check
,
1809 RESET_CAUSE_TX_BUSY
,
1810 mt7603_tx_dma_busy
) ||
1811 mt7603_watchdog_check(dev
, &dev
->rx_dma_check
,
1812 RESET_CAUSE_RX_BUSY
,
1813 mt7603_rx_dma_busy
) ||
1814 mt7603_watchdog_check(dev
, &dev
->mcu_hang
,
1815 RESET_CAUSE_MCU_HANG
,
1817 dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
]) {
1818 dev
->beacon_check
= 0;
1819 dev
->tx_dma_check
= 0;
1820 dev
->tx_hang_check
= 0;
1821 dev
->rx_dma_check
= 0;
1822 dev
->rx_pse_check
= 0;
1824 dev
->rx_dma_idx
= ~0;
1825 memset(dev
->tx_dma_idx
, 0xff, sizeof(dev
->tx_dma_idx
));
1827 dev
->mac_work_count
= 0;
1830 if (dev
->mac_work_count
>= 10)
1831 dev
->mac_work_count
= 0;
1833 mutex_unlock(&dev
->mt76
.mutex
);
1836 mt7603_mac_watchdog_reset(dev
);
1838 ieee80211_queue_delayed_work(mt76_hw(dev
), &dev
->mt76
.mac_work
,
1839 msecs_to_jiffies(MT7603_WATCHDOG_TIME
));