1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2022 MediaTek Inc.
10 int mt7996_init_tx_queues(struct mt7996_phy
*phy
, int idx
, int n_desc
,
11 int ring_base
, struct mtk_wed_device
*wed
)
13 struct mt7996_dev
*dev
= phy
->dev
;
16 if (mtk_wed_device_active(wed
)) {
17 ring_base
+= MT_TXQ_ID(0) * MT_RING_SIZE
;
20 if (phy
->mt76
->band_idx
== MT_BAND2
)
21 flags
= MT_WED_Q_TX(0);
23 flags
= MT_WED_Q_TX(idx
);
26 return mt76_connac_init_tx_queues(phy
->mt76
, idx
, n_desc
,
27 ring_base
, wed
, flags
);
30 static int mt7996_poll_tx(struct napi_struct
*napi
, int budget
)
32 struct mt7996_dev
*dev
;
34 dev
= container_of(napi
, struct mt7996_dev
, mt76
.tx_napi
);
36 mt76_connac_tx_cleanup(&dev
->mt76
);
37 if (napi_complete_done(napi
, 0))
38 mt7996_irq_enable(dev
, MT_INT_TX_DONE_MCU
);
43 static void mt7996_dma_config(struct mt7996_dev
*dev
)
45 #define Q_CONFIG(q, wfdma, int, id) do { \
47 dev->q_wfdma_mask |= (1 << (q)); \
48 dev->q_int_mask[(q)] = int; \
49 dev->q_id[(q)] = id; \
52 #define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id))
53 #define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id))
54 #define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
57 RXQ_CONFIG(MT_RXQ_MCU
, WFDMA0
, MT_INT_RX_DONE_WM
, MT7996_RXQ_MCU_WM
);
58 RXQ_CONFIG(MT_RXQ_MCU_WA
, WFDMA0
, MT_INT_RX_DONE_WA
, MT7996_RXQ_MCU_WA
);
60 /* mt7996: band0 and band1, mt7992: band0 */
61 RXQ_CONFIG(MT_RXQ_MAIN
, WFDMA0
, MT_INT_RX_DONE_BAND0
, MT7996_RXQ_BAND0
);
62 RXQ_CONFIG(MT_RXQ_MAIN_WA
, WFDMA0
, MT_INT_RX_DONE_WA_MAIN
, MT7996_RXQ_MCU_WA_MAIN
);
64 if (is_mt7996(&dev
->mt76
)) {
66 RXQ_CONFIG(MT_RXQ_BAND2
, WFDMA0
, MT_INT_RX_DONE_BAND2
, MT7996_RXQ_BAND2
);
67 RXQ_CONFIG(MT_RXQ_BAND2_WA
, WFDMA0
, MT_INT_RX_DONE_WA_TRI
, MT7996_RXQ_MCU_WA_TRI
);
70 RXQ_CONFIG(MT_RXQ_BAND1
, WFDMA0
, MT_INT_RX_DONE_BAND1
, MT7996_RXQ_BAND1
);
71 RXQ_CONFIG(MT_RXQ_BAND1_WA
, WFDMA0
, MT_INT_RX_DONE_WA_EXT
, MT7996_RXQ_MCU_WA_EXT
);
76 RXQ_CONFIG(MT_RXQ_RRO_BAND0
, WFDMA0
, MT_INT_RX_DONE_RRO_BAND0
,
77 MT7996_RXQ_RRO_BAND0
);
78 RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0
, WFDMA0
, MT_INT_RX_DONE_MSDU_PG_BAND0
,
79 MT7996_RXQ_MSDU_PG_BAND0
);
80 RXQ_CONFIG(MT_RXQ_TXFREE_BAND0
, WFDMA0
, MT_INT_RX_TXFREE_MAIN
,
83 RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1
, WFDMA0
, MT_INT_RX_DONE_MSDU_PG_BAND1
,
84 MT7996_RXQ_MSDU_PG_BAND1
);
86 RXQ_CONFIG(MT_RXQ_RRO_BAND2
, WFDMA0
, MT_INT_RX_DONE_RRO_BAND2
,
87 MT7996_RXQ_RRO_BAND2
);
88 RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2
, WFDMA0
, MT_INT_RX_DONE_MSDU_PG_BAND2
,
89 MT7996_RXQ_MSDU_PG_BAND2
);
90 RXQ_CONFIG(MT_RXQ_TXFREE_BAND2
, WFDMA0
, MT_INT_RX_TXFREE_TRI
,
93 RXQ_CONFIG(MT_RXQ_RRO_IND
, WFDMA0
, MT_INT_RX_DONE_RRO_IND
,
98 TXQ_CONFIG(0, WFDMA0
, MT_INT_TX_DONE_BAND0
, MT7996_TXQ_BAND0
);
99 if (is_mt7996(&dev
->mt76
)) {
100 TXQ_CONFIG(1, WFDMA0
, MT_INT_TX_DONE_BAND1
, MT7996_TXQ_BAND1
);
101 TXQ_CONFIG(2, WFDMA0
, MT_INT_TX_DONE_BAND2
, MT7996_TXQ_BAND2
);
103 TXQ_CONFIG(1, WFDMA0
, MT_INT_TX_DONE_BAND1
, MT7996_TXQ_BAND1
);
107 MCUQ_CONFIG(MT_MCUQ_WM
, WFDMA0
, MT_INT_TX_DONE_MCU_WM
, MT7996_TXQ_MCU_WM
);
108 MCUQ_CONFIG(MT_MCUQ_WA
, WFDMA0
, MT_INT_TX_DONE_MCU_WA
, MT7996_TXQ_MCU_WA
);
109 MCUQ_CONFIG(MT_MCUQ_FWDL
, WFDMA0
, MT_INT_TX_DONE_FWDL
, MT7996_TXQ_FWDL
);
112 static u32
__mt7996_dma_prefetch_base(u16
*base
, u8 depth
)
114 u32 ret
= *base
<< 16 | depth
;
116 *base
= *base
+ (depth
<< 4);
121 static void __mt7996_dma_prefetch(struct mt7996_dev
*dev
, u32 ofs
)
126 #define PREFETCH(_depth) (__mt7996_dma_prefetch_base(&base, (_depth)))
127 /* prefetch SRAM wrapping boundary for tx/rx ring. */
128 mt76_wr(dev
, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL
) + ofs
, PREFETCH(0x2));
129 mt76_wr(dev
, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM
) + ofs
, PREFETCH(0x2));
130 mt76_wr(dev
, MT_TXQ_EXT_CTRL(0) + ofs
, PREFETCH(0x8));
131 mt76_wr(dev
, MT_TXQ_EXT_CTRL(1) + ofs
, PREFETCH(0x8));
132 mt76_wr(dev
, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA
) + ofs
, PREFETCH(0x2));
133 mt76_wr(dev
, MT_TXQ_EXT_CTRL(2) + ofs
, PREFETCH(0x8));
134 mt76_wr(dev
, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU
) + ofs
, PREFETCH(0x2));
135 mt76_wr(dev
, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA
) + ofs
, PREFETCH(0x2));
136 mt76_wr(dev
, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA
) + ofs
, PREFETCH(0x2));
138 queue
= is_mt7996(&dev
->mt76
) ? MT_RXQ_BAND2_WA
: MT_RXQ_BAND1_WA
;
139 mt76_wr(dev
, MT_RXQ_BAND1_CTRL(queue
) + ofs
, PREFETCH(0x2));
141 mt76_wr(dev
, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN
) + ofs
, PREFETCH(0x10));
143 queue
= is_mt7996(&dev
->mt76
) ? MT_RXQ_BAND2
: MT_RXQ_BAND1
;
144 mt76_wr(dev
, MT_RXQ_BAND1_CTRL(queue
) + ofs
, PREFETCH(0x10));
147 mt76_wr(dev
, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND0
) + ofs
,
149 mt76_wr(dev
, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND2
) + ofs
,
151 mt76_wr(dev
, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND0
) + ofs
,
153 mt76_wr(dev
, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND1
) + ofs
,
155 mt76_wr(dev
, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND2
) + ofs
,
157 mt76_wr(dev
, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND0
) + ofs
,
159 mt76_wr(dev
, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND2
) + ofs
,
164 mt76_set(dev
, WF_WFDMA0_GLO_CFG_EXT1
+ ofs
, WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE
);
167 void mt7996_dma_prefetch(struct mt7996_dev
*dev
)
169 __mt7996_dma_prefetch(dev
, 0);
171 __mt7996_dma_prefetch(dev
, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0));
174 static void mt7996_dma_disable(struct mt7996_dev
*dev
, bool reset
)
179 hif1_ofs
= MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
182 mt76_clear(dev
, MT_WFDMA0_RST
,
183 MT_WFDMA0_RST_DMASHDL_ALL_RST
|
184 MT_WFDMA0_RST_LOGIC_RST
);
186 mt76_set(dev
, MT_WFDMA0_RST
,
187 MT_WFDMA0_RST_DMASHDL_ALL_RST
|
188 MT_WFDMA0_RST_LOGIC_RST
);
191 mt76_clear(dev
, MT_WFDMA0_RST
+ hif1_ofs
,
192 MT_WFDMA0_RST_DMASHDL_ALL_RST
|
193 MT_WFDMA0_RST_LOGIC_RST
);
195 mt76_set(dev
, MT_WFDMA0_RST
+ hif1_ofs
,
196 MT_WFDMA0_RST_DMASHDL_ALL_RST
|
197 MT_WFDMA0_RST_LOGIC_RST
);
202 mt76_clear(dev
, MT_WFDMA0_GLO_CFG
,
203 MT_WFDMA0_GLO_CFG_TX_DMA_EN
|
204 MT_WFDMA0_GLO_CFG_RX_DMA_EN
|
205 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO
|
206 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO
|
207 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2
);
210 mt76_clear(dev
, MT_WFDMA0_GLO_CFG
+ hif1_ofs
,
211 MT_WFDMA0_GLO_CFG_TX_DMA_EN
|
212 MT_WFDMA0_GLO_CFG_RX_DMA_EN
|
213 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO
|
214 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO
|
215 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2
);
219 void mt7996_dma_start(struct mt7996_dev
*dev
, bool reset
, bool wed_reset
)
221 struct mtk_wed_device
*wed
= &dev
->mt76
.mmio
.wed
;
226 hif1_ofs
= MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
228 /* enable WFDMA Tx/Rx */
230 if (mtk_wed_device_active(wed
) && mtk_wed_get_rx_capa(wed
))
231 mt76_set(dev
, MT_WFDMA0_GLO_CFG
,
232 MT_WFDMA0_GLO_CFG_TX_DMA_EN
|
233 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO
|
234 MT_WFDMA0_GLO_CFG_EXT_EN
);
236 mt76_set(dev
, MT_WFDMA0_GLO_CFG
,
237 MT_WFDMA0_GLO_CFG_TX_DMA_EN
|
238 MT_WFDMA0_GLO_CFG_RX_DMA_EN
|
239 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO
|
240 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2
|
241 MT_WFDMA0_GLO_CFG_EXT_EN
);
244 mt76_set(dev
, MT_WFDMA0_GLO_CFG
+ hif1_ofs
,
245 MT_WFDMA0_GLO_CFG_TX_DMA_EN
|
246 MT_WFDMA0_GLO_CFG_RX_DMA_EN
|
247 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO
|
248 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2
|
249 MT_WFDMA0_GLO_CFG_EXT_EN
);
252 /* enable interrupts for TX/RX rings */
253 irq_mask
= MT_INT_MCU_CMD
| MT_INT_RX_DONE_MCU
| MT_INT_TX_DONE_MCU
;
255 if (mt7996_band_valid(dev
, MT_BAND0
))
256 irq_mask
|= MT_INT_BAND0_RX_DONE
;
258 if (mt7996_band_valid(dev
, MT_BAND1
))
259 irq_mask
|= MT_INT_BAND1_RX_DONE
;
261 if (mt7996_band_valid(dev
, MT_BAND2
))
262 irq_mask
|= MT_INT_BAND2_RX_DONE
;
264 if (mtk_wed_device_active(wed
) && wed_reset
) {
265 u32 wed_irq_mask
= irq_mask
;
267 wed_irq_mask
|= MT_INT_TX_DONE_BAND0
| MT_INT_TX_DONE_BAND1
;
268 mt76_wr(dev
, MT_INT_MASK_CSR
, wed_irq_mask
);
269 mtk_wed_device_start(wed
, wed_irq_mask
);
272 irq_mask
= reset
? MT_INT_MCU_CMD
: irq_mask
;
274 mt7996_irq_enable(dev
, irq_mask
);
275 mt7996_irq_disable(dev
, 0);
278 static void mt7996_dma_enable(struct mt7996_dev
*dev
, bool reset
)
283 hif1_ofs
= MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
286 mt76_wr(dev
, MT_WFDMA0_RST_DTX_PTR
, ~0);
288 mt76_wr(dev
, MT_WFDMA0_RST_DTX_PTR
+ hif1_ofs
, ~0);
290 /* configure delay interrupt off */
291 mt76_wr(dev
, MT_WFDMA0_PRI_DLY_INT_CFG0
, 0);
292 mt76_wr(dev
, MT_WFDMA0_PRI_DLY_INT_CFG1
, 0);
293 mt76_wr(dev
, MT_WFDMA0_PRI_DLY_INT_CFG2
, 0);
296 mt76_wr(dev
, MT_WFDMA0_PRI_DLY_INT_CFG0
+ hif1_ofs
, 0);
297 mt76_wr(dev
, MT_WFDMA0_PRI_DLY_INT_CFG1
+ hif1_ofs
, 0);
298 mt76_wr(dev
, MT_WFDMA0_PRI_DLY_INT_CFG2
+ hif1_ofs
, 0);
301 /* configure perfetch settings */
302 mt7996_dma_prefetch(dev
);
304 /* hif wait WFDMA idle */
305 mt76_set(dev
, MT_WFDMA0_BUSY_ENA
,
306 MT_WFDMA0_BUSY_ENA_TX_FIFO0
|
307 MT_WFDMA0_BUSY_ENA_TX_FIFO1
|
308 MT_WFDMA0_BUSY_ENA_RX_FIFO
);
311 mt76_set(dev
, MT_WFDMA0_BUSY_ENA
+ hif1_ofs
,
312 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0
|
313 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1
|
314 MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO
);
316 mt76_poll(dev
, MT_WFDMA_EXT_CSR_HIF_MISC
,
317 MT_WFDMA_EXT_CSR_HIF_MISC_BUSY
, 0, 1000);
320 mt76_set(dev
, WF_WFDMA0_GLO_CFG_EXT0
,
321 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD
|
322 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE
);
325 mt76_set(dev
, WF_WFDMA0_GLO_CFG_EXT1
,
326 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE
);
328 /* WFDMA rx threshold */
329 mt76_wr(dev
, MT_WFDMA0_PAUSE_RX_Q_45_TH
, 0xc000c);
330 mt76_wr(dev
, MT_WFDMA0_PAUSE_RX_Q_67_TH
, 0x10008);
331 mt76_wr(dev
, MT_WFDMA0_PAUSE_RX_Q_89_TH
, 0x10008);
332 mt76_wr(dev
, MT_WFDMA0_PAUSE_RX_Q_RRO_TH
, 0x20);
336 mt76_set(dev
, WF_WFDMA0_GLO_CFG_EXT0
+ hif1_ofs
,
337 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD
|
338 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE
);
341 mt76_set(dev
, WF_WFDMA0_GLO_CFG_EXT1
+ hif1_ofs
,
342 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE
);
344 mt76_set(dev
, MT_WFDMA_HOST_CONFIG
,
345 MT_WFDMA_HOST_CONFIG_PDMA_BAND
|
346 MT_WFDMA_HOST_CONFIG_BAND2_PCIE1
);
348 /* AXI read outstanding number */
349 mt76_rmw(dev
, MT_WFDMA_AXI_R2A_CTRL
,
350 MT_WFDMA_AXI_R2A_CTRL_OUTSTAND_MASK
, 0x14);
352 /* WFDMA rx threshold */
353 mt76_wr(dev
, MT_WFDMA0_PAUSE_RX_Q_45_TH
+ hif1_ofs
, 0xc000c);
354 mt76_wr(dev
, MT_WFDMA0_PAUSE_RX_Q_67_TH
+ hif1_ofs
, 0x10008);
355 mt76_wr(dev
, MT_WFDMA0_PAUSE_RX_Q_89_TH
+ hif1_ofs
, 0x10008);
356 mt76_wr(dev
, MT_WFDMA0_PAUSE_RX_Q_RRO_TH
+ hif1_ofs
, 0x20);
360 /* fix hardware limitation, pcie1's rx ring3 is not available
361 * so, redirect pcie0 rx ring3 interrupt to pcie1
363 if (mtk_wed_device_active(&dev
->mt76
.mmio
.wed
) &&
365 mt76_set(dev
, MT_WFDMA0_RX_INT_PCIE_SEL
+ hif1_ofs
,
366 MT_WFDMA0_RX_INT_SEL_RING6
);
368 mt76_set(dev
, MT_WFDMA0_RX_INT_PCIE_SEL
,
369 MT_WFDMA0_RX_INT_SEL_RING3
);
372 mt7996_dma_start(dev
, reset
, true);
375 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
376 int mt7996_dma_rro_init(struct mt7996_dev
*dev
)
378 struct mt76_dev
*mdev
= &dev
->mt76
;
383 mdev
->q_rx
[MT_RXQ_RRO_IND
].flags
= MT_WED_RRO_Q_IND
;
384 mdev
->q_rx
[MT_RXQ_RRO_IND
].wed
= &mdev
->mmio
.wed
;
385 ret
= mt76_queue_alloc(dev
, &mdev
->q_rx
[MT_RXQ_RRO_IND
],
386 MT_RXQ_ID(MT_RXQ_RRO_IND
),
388 0, MT_RXQ_RRO_IND_RING_BASE
);
392 /* rx msdu page queue for band0 */
393 mdev
->q_rx
[MT_RXQ_MSDU_PAGE_BAND0
].flags
=
394 MT_WED_RRO_Q_MSDU_PG(0) | MT_QFLAG_WED_RRO_EN
;
395 mdev
->q_rx
[MT_RXQ_MSDU_PAGE_BAND0
].wed
= &mdev
->mmio
.wed
;
396 ret
= mt76_queue_alloc(dev
, &mdev
->q_rx
[MT_RXQ_MSDU_PAGE_BAND0
],
397 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0
),
399 MT7996_RX_MSDU_PAGE_SIZE
,
400 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND0
));
404 if (mt7996_band_valid(dev
, MT_BAND1
)) {
405 /* rx msdu page queue for band1 */
406 mdev
->q_rx
[MT_RXQ_MSDU_PAGE_BAND1
].flags
=
407 MT_WED_RRO_Q_MSDU_PG(1) | MT_QFLAG_WED_RRO_EN
;
408 mdev
->q_rx
[MT_RXQ_MSDU_PAGE_BAND1
].wed
= &mdev
->mmio
.wed
;
409 ret
= mt76_queue_alloc(dev
, &mdev
->q_rx
[MT_RXQ_MSDU_PAGE_BAND1
],
410 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1
),
412 MT7996_RX_MSDU_PAGE_SIZE
,
413 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND1
));
418 if (mt7996_band_valid(dev
, MT_BAND2
)) {
419 /* rx msdu page queue for band2 */
420 mdev
->q_rx
[MT_RXQ_MSDU_PAGE_BAND2
].flags
=
421 MT_WED_RRO_Q_MSDU_PG(2) | MT_QFLAG_WED_RRO_EN
;
422 mdev
->q_rx
[MT_RXQ_MSDU_PAGE_BAND2
].wed
= &mdev
->mmio
.wed
;
423 ret
= mt76_queue_alloc(dev
, &mdev
->q_rx
[MT_RXQ_MSDU_PAGE_BAND2
],
424 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2
),
426 MT7996_RX_MSDU_PAGE_SIZE
,
427 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND2
));
432 irq_mask
= mdev
->mmio
.irqmask
| MT_INT_RRO_RX_DONE
|
433 MT_INT_TX_DONE_BAND2
;
434 mt76_wr(dev
, MT_INT_MASK_CSR
, irq_mask
);
435 mtk_wed_device_start_hw_rro(&mdev
->mmio
.wed
, irq_mask
, false);
436 mt7996_irq_enable(dev
, irq_mask
);
440 #endif /* CONFIG_NET_MEDIATEK_SOC_WED */
442 int mt7996_dma_init(struct mt7996_dev
*dev
)
444 struct mtk_wed_device
*wed
= &dev
->mt76
.mmio
.wed
;
445 struct mtk_wed_device
*wed_hif2
= &dev
->mt76
.mmio
.wed_hif2
;
450 mt7996_dma_config(dev
);
452 mt76_dma_attach(&dev
->mt76
);
455 hif1_ofs
= MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
457 mt7996_dma_disable(dev
, true);
460 ret
= mt7996_init_tx_queues(&dev
->phy
,
461 MT_TXQ_ID(dev
->mphy
.band_idx
),
469 ret
= mt76_init_mcu_queue(&dev
->mt76
, MT_MCUQ_WM
,
470 MT_MCUQ_ID(MT_MCUQ_WM
),
471 MT7996_TX_MCU_RING_SIZE
,
472 MT_MCUQ_RING_BASE(MT_MCUQ_WM
));
477 ret
= mt76_init_mcu_queue(&dev
->mt76
, MT_MCUQ_WA
,
478 MT_MCUQ_ID(MT_MCUQ_WA
),
479 MT7996_TX_MCU_RING_SIZE
,
480 MT_MCUQ_RING_BASE(MT_MCUQ_WA
));
484 /* firmware download */
485 ret
= mt76_init_mcu_queue(&dev
->mt76
, MT_MCUQ_FWDL
,
486 MT_MCUQ_ID(MT_MCUQ_FWDL
),
487 MT7996_TX_FWDL_RING_SIZE
,
488 MT_MCUQ_RING_BASE(MT_MCUQ_FWDL
));
493 ret
= mt76_queue_alloc(dev
, &dev
->mt76
.q_rx
[MT_RXQ_MCU
],
494 MT_RXQ_ID(MT_RXQ_MCU
),
495 MT7996_RX_MCU_RING_SIZE
,
497 MT_RXQ_RING_BASE(MT_RXQ_MCU
));
502 ret
= mt76_queue_alloc(dev
, &dev
->mt76
.q_rx
[MT_RXQ_MCU_WA
],
503 MT_RXQ_ID(MT_RXQ_MCU_WA
),
504 MT7996_RX_MCU_RING_SIZE_WA
,
506 MT_RXQ_RING_BASE(MT_RXQ_MCU_WA
));
510 /* rx data queue for band0 and mt7996 band1 */
511 if (mtk_wed_device_active(wed
) && mtk_wed_get_rx_capa(wed
)) {
512 dev
->mt76
.q_rx
[MT_RXQ_MAIN
].flags
= MT_WED_Q_RX(0);
513 dev
->mt76
.q_rx
[MT_RXQ_MAIN
].wed
= wed
;
516 ret
= mt76_queue_alloc(dev
, &dev
->mt76
.q_rx
[MT_RXQ_MAIN
],
517 MT_RXQ_ID(MT_RXQ_MAIN
),
520 MT_RXQ_RING_BASE(MT_RXQ_MAIN
));
524 /* tx free notify event from WA for band0 */
525 if (mtk_wed_device_active(wed
) && !dev
->has_rro
) {
526 dev
->mt76
.q_rx
[MT_RXQ_MAIN_WA
].flags
= MT_WED_Q_TXFREE
;
527 dev
->mt76
.q_rx
[MT_RXQ_MAIN_WA
].wed
= wed
;
530 ret
= mt76_queue_alloc(dev
, &dev
->mt76
.q_rx
[MT_RXQ_MAIN_WA
],
531 MT_RXQ_ID(MT_RXQ_MAIN_WA
),
532 MT7996_RX_MCU_RING_SIZE
,
534 MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA
));
538 if (mt7996_band_valid(dev
, MT_BAND2
)) {
539 /* rx data queue for mt7996 band2 */
540 rx_base
= MT_RXQ_RING_BASE(MT_RXQ_BAND2
) + hif1_ofs
;
541 ret
= mt76_queue_alloc(dev
, &dev
->mt76
.q_rx
[MT_RXQ_BAND2
],
542 MT_RXQ_ID(MT_RXQ_BAND2
),
549 /* tx free notify event from WA for mt7996 band2
550 * use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1
552 if (mtk_wed_device_active(wed_hif2
) && !dev
->has_rro
) {
553 dev
->mt76
.q_rx
[MT_RXQ_BAND2_WA
].flags
= MT_WED_Q_TXFREE
;
554 dev
->mt76
.q_rx
[MT_RXQ_BAND2_WA
].wed
= wed_hif2
;
557 ret
= mt76_queue_alloc(dev
, &dev
->mt76
.q_rx
[MT_RXQ_BAND2_WA
],
558 MT_RXQ_ID(MT_RXQ_BAND2_WA
),
559 MT7996_RX_MCU_RING_SIZE
,
561 MT_RXQ_RING_BASE(MT_RXQ_BAND2_WA
));
564 } else if (mt7996_band_valid(dev
, MT_BAND1
)) {
565 /* rx data queue for mt7992 band1 */
566 rx_base
= MT_RXQ_RING_BASE(MT_RXQ_BAND1
) + hif1_ofs
;
567 ret
= mt76_queue_alloc(dev
, &dev
->mt76
.q_rx
[MT_RXQ_BAND1
],
568 MT_RXQ_ID(MT_RXQ_BAND1
),
575 /* tx free notify event from WA for mt7992 band1 */
576 rx_base
= MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA
) + hif1_ofs
;
577 ret
= mt76_queue_alloc(dev
, &dev
->mt76
.q_rx
[MT_RXQ_BAND1_WA
],
578 MT_RXQ_ID(MT_RXQ_BAND1_WA
),
579 MT7996_RX_MCU_RING_SIZE
,
586 if (mtk_wed_device_active(wed
) && mtk_wed_get_rx_capa(wed
) &&
588 /* rx rro data queue for band0 */
589 dev
->mt76
.q_rx
[MT_RXQ_RRO_BAND0
].flags
=
590 MT_WED_RRO_Q_DATA(0) | MT_QFLAG_WED_RRO_EN
;
591 dev
->mt76
.q_rx
[MT_RXQ_RRO_BAND0
].wed
= wed
;
592 ret
= mt76_queue_alloc(dev
, &dev
->mt76
.q_rx
[MT_RXQ_RRO_BAND0
],
593 MT_RXQ_ID(MT_RXQ_RRO_BAND0
),
596 MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND0
));
600 /* tx free notify event from WA for band0 */
601 dev
->mt76
.q_rx
[MT_RXQ_TXFREE_BAND0
].flags
= MT_WED_Q_TXFREE
;
602 dev
->mt76
.q_rx
[MT_RXQ_TXFREE_BAND0
].wed
= wed
;
604 ret
= mt76_queue_alloc(dev
, &dev
->mt76
.q_rx
[MT_RXQ_TXFREE_BAND0
],
605 MT_RXQ_ID(MT_RXQ_TXFREE_BAND0
),
606 MT7996_RX_MCU_RING_SIZE
,
608 MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0
));
612 if (mt7996_band_valid(dev
, MT_BAND2
)) {
613 /* rx rro data queue for band2 */
614 dev
->mt76
.q_rx
[MT_RXQ_RRO_BAND2
].flags
=
615 MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN
;
616 dev
->mt76
.q_rx
[MT_RXQ_RRO_BAND2
].wed
= wed
;
617 ret
= mt76_queue_alloc(dev
, &dev
->mt76
.q_rx
[MT_RXQ_RRO_BAND2
],
618 MT_RXQ_ID(MT_RXQ_RRO_BAND2
),
621 MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND2
) + hif1_ofs
);
625 /* tx free notify event from MAC for band2 */
626 if (mtk_wed_device_active(wed_hif2
)) {
627 dev
->mt76
.q_rx
[MT_RXQ_TXFREE_BAND2
].flags
= MT_WED_Q_TXFREE
;
628 dev
->mt76
.q_rx
[MT_RXQ_TXFREE_BAND2
].wed
= wed_hif2
;
630 ret
= mt76_queue_alloc(dev
, &dev
->mt76
.q_rx
[MT_RXQ_TXFREE_BAND2
],
631 MT_RXQ_ID(MT_RXQ_TXFREE_BAND2
),
632 MT7996_RX_MCU_RING_SIZE
,
634 MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND2
) + hif1_ofs
);
640 ret
= mt76_init_queues(dev
, mt76_dma_rx_poll
);
644 netif_napi_add_tx(dev
->mt76
.tx_napi_dev
, &dev
->mt76
.tx_napi
,
646 napi_enable(&dev
->mt76
.tx_napi
);
648 mt7996_dma_enable(dev
, false);
653 void mt7996_dma_reset(struct mt7996_dev
*dev
, bool force
)
655 struct mt76_phy
*phy2
= dev
->mt76
.phys
[MT_BAND1
];
656 struct mt76_phy
*phy3
= dev
->mt76
.phys
[MT_BAND2
];
657 u32 hif1_ofs
= MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
660 mt76_clear(dev
, MT_WFDMA0_GLO_CFG
,
661 MT_WFDMA0_GLO_CFG_TX_DMA_EN
|
662 MT_WFDMA0_GLO_CFG_RX_DMA_EN
);
665 mt76_clear(dev
, MT_WFDMA0_GLO_CFG
+ hif1_ofs
,
666 MT_WFDMA0_GLO_CFG_TX_DMA_EN
|
667 MT_WFDMA0_GLO_CFG_RX_DMA_EN
);
669 usleep_range(1000, 2000);
671 for (i
= 0; i
< __MT_TXQ_MAX
; i
++) {
672 mt76_queue_tx_cleanup(dev
, dev
->mphy
.q_tx
[i
], true);
674 mt76_queue_tx_cleanup(dev
, phy2
->q_tx
[i
], true);
676 mt76_queue_tx_cleanup(dev
, phy3
->q_tx
[i
], true);
679 for (i
= 0; i
< __MT_MCUQ_MAX
; i
++)
680 mt76_queue_tx_cleanup(dev
, dev
->mt76
.q_mcu
[i
], true);
682 mt76_for_each_q_rx(&dev
->mt76
, i
)
683 mt76_queue_rx_cleanup(dev
, &dev
->mt76
.q_rx
[i
]);
685 mt76_tx_status_check(&dev
->mt76
, true);
689 mt7996_wfsys_reset(dev
);
691 if (dev
->hif2
&& mtk_wed_device_active(&dev
->mt76
.mmio
.wed_hif2
))
692 mtk_wed_device_dma_reset(&dev
->mt76
.mmio
.wed_hif2
);
694 if (mtk_wed_device_active(&dev
->mt76
.mmio
.wed
))
695 mtk_wed_device_dma_reset(&dev
->mt76
.mmio
.wed
);
697 mt7996_dma_disable(dev
, force
);
698 mt76_wed_dma_reset(&dev
->mt76
);
700 /* reset hw queues */
701 for (i
= 0; i
< __MT_TXQ_MAX
; i
++) {
702 mt76_dma_reset_tx_queue(&dev
->mt76
, dev
->mphy
.q_tx
[i
]);
704 mt76_dma_reset_tx_queue(&dev
->mt76
, phy2
->q_tx
[i
]);
706 mt76_dma_reset_tx_queue(&dev
->mt76
, phy3
->q_tx
[i
]);
709 for (i
= 0; i
< __MT_MCUQ_MAX
; i
++)
710 mt76_queue_reset(dev
, dev
->mt76
.q_mcu
[i
]);
712 mt76_for_each_q_rx(&dev
->mt76
, i
) {
713 if (mtk_wed_device_active(&dev
->mt76
.mmio
.wed
))
714 if (mt76_queue_is_wed_rro(&dev
->mt76
.q_rx
[i
]) ||
715 mt76_queue_is_wed_tx_free(&dev
->mt76
.q_rx
[i
]))
718 mt76_queue_reset(dev
, &dev
->mt76
.q_rx
[i
]);
721 mt76_tx_status_check(&dev
->mt76
, true);
723 mt76_for_each_q_rx(&dev
->mt76
, i
)
724 mt76_queue_rx_reset(dev
, i
);
726 mt7996_dma_enable(dev
, !force
);
729 void mt7996_dma_cleanup(struct mt7996_dev
*dev
)
731 mt7996_dma_disable(dev
, true);
733 mt76_dma_cleanup(&dev
->mt76
);