1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
7 #include <linux/kernel.h>
11 #include "mt76x02_mcu.h"
12 #include "mt76x02_trace.h"
14 static void mt76x02_pre_tbtt_tasklet(unsigned long arg
)
16 struct mt76x02_dev
*dev
= (struct mt76x02_dev
*)arg
;
17 struct mt76_queue
*q
= dev
->mt76
.q_tx
[MT_TXQ_PSD
].q
;
18 struct beacon_bc_data data
= {};
22 if (mt76_hw(dev
)->conf
.flags
& IEEE80211_CONF_OFFCHANNEL
)
25 mt76x02_resync_beacon_timer(dev
);
27 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev
),
28 IEEE80211_IFACE_ITER_RESUME_ALL
,
29 mt76x02_update_beacon_iter
, dev
);
31 mt76_csa_check(&dev
->mt76
);
33 if (dev
->mt76
.csa_complete
)
36 mt76x02_enqueue_buffered_bc(dev
, &data
, 8);
38 if (!skb_queue_len(&data
.q
))
41 for (i
= 0; i
< ARRAY_SIZE(data
.tail
); i
++) {
45 mt76_skb_set_moredata(data
.tail
[i
], false);
48 spin_lock_bh(&q
->lock
);
49 while ((skb
= __skb_dequeue(&data
.q
)) != NULL
) {
50 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
51 struct ieee80211_vif
*vif
= info
->control
.vif
;
52 struct mt76x02_vif
*mvif
= (struct mt76x02_vif
*)vif
->drv_priv
;
54 mt76_tx_queue_skb(dev
, MT_TXQ_PSD
, skb
, &mvif
->group_wcid
,
57 spin_unlock_bh(&q
->lock
);
60 static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev
*dev
, bool en
)
63 tasklet_enable(&dev
->mt76
.pre_tbtt_tasklet
);
65 tasklet_disable(&dev
->mt76
.pre_tbtt_tasklet
);
68 static void mt76x02e_beacon_enable(struct mt76x02_dev
*dev
, bool en
)
70 mt76_rmw_field(dev
, MT_INT_TIMER_EN
, MT_INT_TIMER_EN_PRE_TBTT_EN
, en
);
72 mt76x02_irq_enable(dev
, MT_INT_PRE_TBTT
| MT_INT_TBTT
);
74 mt76x02_irq_disable(dev
, MT_INT_PRE_TBTT
| MT_INT_TBTT
);
77 void mt76x02e_init_beacon_config(struct mt76x02_dev
*dev
)
79 static const struct mt76x02_beacon_ops beacon_ops
= {
82 .pre_tbtt_enable
= mt76x02e_pre_tbtt_enable
,
83 .beacon_enable
= mt76x02e_beacon_enable
,
86 dev
->beacon_ops
= &beacon_ops
;
88 /* Fire a pre-TBTT interrupt 8 ms before TBTT */
89 mt76_rmw_field(dev
, MT_INT_TIMER_CFG
, MT_INT_TIMER_CFG_PRE_TBTT
,
91 mt76_rmw_field(dev
, MT_INT_TIMER_CFG
, MT_INT_TIMER_CFG_GP_TIMER
,
93 mt76_wr(dev
, MT_INT_TIMER_EN
, 0);
95 mt76x02_init_beacon_config(dev
);
97 EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config
);
100 mt76x02_init_tx_queue(struct mt76x02_dev
*dev
, struct mt76_sw_queue
*q
,
103 struct mt76_queue
*hwq
;
106 hwq
= devm_kzalloc(dev
->mt76
.dev
, sizeof(*hwq
), GFP_KERNEL
);
110 err
= mt76_queue_alloc(dev
, hwq
, idx
, n_desc
, 0, MT_TX_RING_BASE
);
114 INIT_LIST_HEAD(&q
->swq
);
117 mt76x02_irq_enable(dev
, MT_INT_TX_DONE(idx
));
123 mt76x02_init_rx_queue(struct mt76x02_dev
*dev
, struct mt76_queue
*q
,
124 int idx
, int n_desc
, int bufsize
)
128 err
= mt76_queue_alloc(dev
, q
, idx
, n_desc
, bufsize
,
133 mt76x02_irq_enable(dev
, MT_INT_RX_DONE(idx
));
138 static void mt76x02_process_tx_status_fifo(struct mt76x02_dev
*dev
)
140 struct mt76x02_tx_status stat
;
143 while (kfifo_get(&dev
->txstatus_fifo
, &stat
))
144 mt76x02_send_tx_status(dev
, &stat
, &update
);
147 static void mt76x02_tx_tasklet(unsigned long data
)
149 struct mt76x02_dev
*dev
= (struct mt76x02_dev
*)data
;
151 mt76x02_mac_poll_tx_status(dev
, false);
152 mt76x02_process_tx_status_fifo(dev
);
154 mt76_txq_schedule_all(&dev
->mt76
);
157 static int mt76x02_poll_tx(struct napi_struct
*napi
, int budget
)
159 struct mt76x02_dev
*dev
= container_of(napi
, struct mt76x02_dev
,
163 mt76x02_mac_poll_tx_status(dev
, false);
165 for (i
= MT_TXQ_MCU
; i
>= 0; i
--)
166 mt76_queue_tx_cleanup(dev
, i
, false);
168 if (napi_complete_done(napi
, 0))
169 mt76x02_irq_enable(dev
, MT_INT_TX_DONE_ALL
);
171 for (i
= MT_TXQ_MCU
; i
>= 0; i
--)
172 mt76_queue_tx_cleanup(dev
, i
, false);
174 tasklet_schedule(&dev
->mt76
.tx_tasklet
);
179 int mt76x02_dma_init(struct mt76x02_dev
*dev
)
181 struct mt76_txwi_cache __maybe_unused
*t
;
182 int i
, ret
, fifo_size
;
183 struct mt76_queue
*q
;
186 BUILD_BUG_ON(sizeof(struct mt76x02_rxwi
) > MT_RX_HEADROOM
);
188 fifo_size
= roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status
));
189 status_fifo
= devm_kzalloc(dev
->mt76
.dev
, fifo_size
, GFP_KERNEL
);
193 tasklet_init(&dev
->mt76
.tx_tasklet
, mt76x02_tx_tasklet
,
195 tasklet_init(&dev
->mt76
.pre_tbtt_tasklet
, mt76x02_pre_tbtt_tasklet
,
198 spin_lock_init(&dev
->txstatus_fifo_lock
);
199 kfifo_init(&dev
->txstatus_fifo
, status_fifo
, fifo_size
);
201 mt76_dma_attach(&dev
->mt76
);
203 mt76_wr(dev
, MT_WPDMA_RST_IDX
, ~0);
205 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
206 ret
= mt76x02_init_tx_queue(dev
, &dev
->mt76
.q_tx
[i
],
213 ret
= mt76x02_init_tx_queue(dev
, &dev
->mt76
.q_tx
[MT_TXQ_PSD
],
214 MT_TX_HW_QUEUE_MGMT
, MT_TX_RING_SIZE
);
218 ret
= mt76x02_init_tx_queue(dev
, &dev
->mt76
.q_tx
[MT_TXQ_MCU
],
219 MT_TX_HW_QUEUE_MCU
, MT_MCU_RING_SIZE
);
223 ret
= mt76x02_init_rx_queue(dev
, &dev
->mt76
.q_rx
[MT_RXQ_MCU
], 1,
224 MT_MCU_RING_SIZE
, MT_RX_BUF_SIZE
);
228 q
= &dev
->mt76
.q_rx
[MT_RXQ_MAIN
];
229 q
->buf_offset
= MT_RX_HEADROOM
- sizeof(struct mt76x02_rxwi
);
230 ret
= mt76x02_init_rx_queue(dev
, q
, 0, MT76X02_RX_RING_SIZE
,
235 ret
= mt76_init_queues(dev
);
239 netif_tx_napi_add(&dev
->mt76
.napi_dev
, &dev
->mt76
.tx_napi
,
240 mt76x02_poll_tx
, NAPI_POLL_WEIGHT
);
241 napi_enable(&dev
->mt76
.tx_napi
);
245 EXPORT_SYMBOL_GPL(mt76x02_dma_init
);
247 void mt76x02_rx_poll_complete(struct mt76_dev
*mdev
, enum mt76_rxq_id q
)
249 struct mt76x02_dev
*dev
;
251 dev
= container_of(mdev
, struct mt76x02_dev
, mt76
);
252 mt76x02_irq_enable(dev
, MT_INT_RX_DONE(q
));
254 EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete
);
256 irqreturn_t
mt76x02_irq_handler(int irq
, void *dev_instance
)
258 struct mt76x02_dev
*dev
= dev_instance
;
261 intr
= mt76_rr(dev
, MT_INT_SOURCE_CSR
);
262 mt76_wr(dev
, MT_INT_SOURCE_CSR
, intr
);
264 if (!test_bit(MT76_STATE_INITIALIZED
, &dev
->mt76
.state
))
267 trace_dev_irq(dev
, intr
, dev
->mt76
.mmio
.irqmask
);
269 intr
&= dev
->mt76
.mmio
.irqmask
;
271 if (intr
& MT_INT_RX_DONE(0)) {
272 mt76x02_irq_disable(dev
, MT_INT_RX_DONE(0));
273 napi_schedule(&dev
->mt76
.napi
[0]);
276 if (intr
& MT_INT_RX_DONE(1)) {
277 mt76x02_irq_disable(dev
, MT_INT_RX_DONE(1));
278 napi_schedule(&dev
->mt76
.napi
[1]);
281 if (intr
& MT_INT_PRE_TBTT
)
282 tasklet_schedule(&dev
->mt76
.pre_tbtt_tasklet
);
284 /* send buffered multicast frames now */
285 if (intr
& MT_INT_TBTT
) {
286 if (dev
->mt76
.csa_complete
)
287 mt76_csa_finish(&dev
->mt76
);
289 mt76_queue_kick(dev
, dev
->mt76
.q_tx
[MT_TXQ_PSD
].q
);
292 if (intr
& MT_INT_TX_STAT
)
293 mt76x02_mac_poll_tx_status(dev
, true);
295 if (intr
& (MT_INT_TX_STAT
| MT_INT_TX_DONE_ALL
)) {
296 mt76x02_irq_disable(dev
, MT_INT_TX_DONE_ALL
);
297 napi_schedule(&dev
->mt76
.tx_napi
);
300 if (intr
& MT_INT_GPTIMER
) {
301 mt76x02_irq_disable(dev
, MT_INT_GPTIMER
);
302 tasklet_schedule(&dev
->dfs_pd
.dfs_tasklet
);
307 EXPORT_SYMBOL_GPL(mt76x02_irq_handler
);
309 static void mt76x02_dma_enable(struct mt76x02_dev
*dev
)
313 mt76_wr(dev
, MT_MAC_SYS_CTRL
, MT_MAC_SYS_CTRL_ENABLE_TX
);
314 mt76x02_wait_for_wpdma(&dev
->mt76
, 1000);
315 usleep_range(50, 100);
317 val
= FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE
, 3) |
318 MT_WPDMA_GLO_CFG_TX_DMA_EN
|
319 MT_WPDMA_GLO_CFG_RX_DMA_EN
;
320 mt76_set(dev
, MT_WPDMA_GLO_CFG
, val
);
321 mt76_clear(dev
, MT_WPDMA_GLO_CFG
,
322 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE
);
325 void mt76x02_dma_cleanup(struct mt76x02_dev
*dev
)
327 tasklet_kill(&dev
->mt76
.tx_tasklet
);
328 mt76_dma_cleanup(&dev
->mt76
);
330 EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup
);
332 void mt76x02_dma_disable(struct mt76x02_dev
*dev
)
334 u32 val
= mt76_rr(dev
, MT_WPDMA_GLO_CFG
);
336 val
&= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE
|
337 MT_WPDMA_GLO_CFG_BIG_ENDIAN
|
338 MT_WPDMA_GLO_CFG_HDR_SEG_LEN
;
339 val
|= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE
;
340 mt76_wr(dev
, MT_WPDMA_GLO_CFG
, val
);
342 EXPORT_SYMBOL_GPL(mt76x02_dma_disable
);
344 void mt76x02_mac_start(struct mt76x02_dev
*dev
)
346 mt76x02_mac_reset_counters(dev
);
347 mt76x02_dma_enable(dev
);
348 mt76_wr(dev
, MT_RX_FILTR_CFG
, dev
->mt76
.rxfilter
);
349 mt76_wr(dev
, MT_MAC_SYS_CTRL
,
350 MT_MAC_SYS_CTRL_ENABLE_TX
|
351 MT_MAC_SYS_CTRL_ENABLE_RX
);
352 mt76x02_irq_enable(dev
,
353 MT_INT_RX_DONE_ALL
| MT_INT_TX_DONE_ALL
|
356 EXPORT_SYMBOL_GPL(mt76x02_mac_start
);
358 static bool mt76x02_tx_hang(struct mt76x02_dev
*dev
)
360 u32 dma_idx
, prev_dma_idx
;
361 struct mt76_queue
*q
;
364 for (i
= 0; i
< 4; i
++) {
365 q
= dev
->mt76
.q_tx
[i
].q
;
370 prev_dma_idx
= dev
->mt76
.tx_dma_idx
[i
];
371 dma_idx
= readl(&q
->regs
->dma_idx
);
372 dev
->mt76
.tx_dma_idx
[i
] = dma_idx
;
374 if (prev_dma_idx
== dma_idx
)
381 static void mt76x02_key_sync(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
382 struct ieee80211_sta
*sta
,
383 struct ieee80211_key_conf
*key
, void *data
)
385 struct mt76x02_dev
*dev
= hw
->priv
;
386 struct mt76_wcid
*wcid
;
391 wcid
= (struct mt76_wcid
*)sta
->drv_priv
;
393 if (wcid
->hw_key_idx
!= key
->keyidx
|| wcid
->sw_iv
)
396 mt76x02_mac_wcid_sync_pn(dev
, wcid
->idx
, key
);
399 static void mt76x02_reset_state(struct mt76x02_dev
*dev
)
403 lockdep_assert_held(&dev
->mt76
.mutex
);
405 clear_bit(MT76_STATE_RUNNING
, &dev
->mt76
.state
);
408 ieee80211_iter_keys_rcu(dev
->mt76
.hw
, NULL
, mt76x02_key_sync
, NULL
);
411 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.wcid
); i
++) {
412 struct ieee80211_sta
*sta
;
413 struct ieee80211_vif
*vif
;
414 struct mt76x02_sta
*msta
;
415 struct mt76_wcid
*wcid
;
418 wcid
= rcu_dereference_protected(dev
->mt76
.wcid
[i
],
419 lockdep_is_held(&dev
->mt76
.mutex
));
423 priv
= msta
= container_of(wcid
, struct mt76x02_sta
, wcid
);
424 sta
= container_of(priv
, struct ieee80211_sta
, drv_priv
);
427 vif
= container_of(priv
, struct ieee80211_vif
, drv_priv
);
429 __mt76_sta_remove(&dev
->mt76
, vif
, sta
);
430 memset(msta
, 0, sizeof(*msta
));
434 dev
->mt76
.beacon_mask
= 0;
437 static void mt76x02_watchdog_reset(struct mt76x02_dev
*dev
)
439 u32 mask
= dev
->mt76
.mmio
.irqmask
;
440 bool restart
= dev
->mt76
.mcu_ops
->mcu_restart
;
443 ieee80211_stop_queues(dev
->mt76
.hw
);
444 set_bit(MT76_RESET
, &dev
->mt76
.state
);
446 tasklet_disable(&dev
->mt76
.pre_tbtt_tasklet
);
447 tasklet_disable(&dev
->mt76
.tx_tasklet
);
448 napi_disable(&dev
->mt76
.tx_napi
);
450 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.napi
); i
++)
451 napi_disable(&dev
->mt76
.napi
[i
]);
453 mutex_lock(&dev
->mt76
.mutex
);
456 mt76x02_reset_state(dev
);
458 if (dev
->mt76
.beacon_mask
)
459 mt76_clear(dev
, MT_BEACON_TIME_CFG
,
460 MT_BEACON_TIME_CFG_BEACON_TX
|
461 MT_BEACON_TIME_CFG_TBTT_EN
);
463 mt76x02_irq_disable(dev
, mask
);
465 /* perform device reset */
466 mt76_clear(dev
, MT_TXOP_CTRL_CFG
, MT_TXOP_ED_CCA_EN
);
467 mt76_wr(dev
, MT_MAC_SYS_CTRL
, 0);
468 mt76_clear(dev
, MT_WPDMA_GLO_CFG
,
469 MT_WPDMA_GLO_CFG_TX_DMA_EN
| MT_WPDMA_GLO_CFG_RX_DMA_EN
);
470 usleep_range(5000, 10000);
471 mt76_wr(dev
, MT_INT_SOURCE_CSR
, 0xffffffff);
473 /* let fw reset DMA */
474 mt76_set(dev
, 0x734, 0x3);
477 mt76_mcu_restart(dev
);
479 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.q_tx
); i
++)
480 mt76_queue_tx_cleanup(dev
, i
, true);
482 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.q_rx
); i
++)
483 mt76_queue_rx_reset(dev
, i
);
485 mt76x02_mac_start(dev
);
488 mt76_set(dev
, MT_TXOP_CTRL_CFG
, MT_TXOP_ED_CCA_EN
);
490 if (dev
->mt76
.beacon_mask
&& !restart
)
491 mt76_set(dev
, MT_BEACON_TIME_CFG
,
492 MT_BEACON_TIME_CFG_BEACON_TX
|
493 MT_BEACON_TIME_CFG_TBTT_EN
);
495 mt76x02_irq_enable(dev
, mask
);
497 mutex_unlock(&dev
->mt76
.mutex
);
499 clear_bit(MT76_RESET
, &dev
->mt76
.state
);
501 tasklet_enable(&dev
->mt76
.tx_tasklet
);
502 napi_enable(&dev
->mt76
.tx_napi
);
503 napi_schedule(&dev
->mt76
.tx_napi
);
505 tasklet_enable(&dev
->mt76
.pre_tbtt_tasklet
);
507 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.napi
); i
++) {
508 napi_enable(&dev
->mt76
.napi
[i
]);
509 napi_schedule(&dev
->mt76
.napi
[i
]);
513 mt76x02_mcu_function_select(dev
, Q_SELECT
, 1);
514 ieee80211_restart_hw(dev
->mt76
.hw
);
516 ieee80211_wake_queues(dev
->mt76
.hw
);
517 mt76_txq_schedule_all(&dev
->mt76
);
521 static void mt76x02_check_tx_hang(struct mt76x02_dev
*dev
)
523 if (mt76x02_tx_hang(dev
)) {
524 if (++dev
->tx_hang_check
>= MT_TX_HANG_TH
)
527 dev
->tx_hang_check
= 0;
530 if (dev
->mcu_timeout
)
536 mt76x02_watchdog_reset(dev
);
538 mutex_lock(&dev
->mt76
.mmio
.mcu
.mutex
);
539 dev
->mcu_timeout
= 0;
540 mutex_unlock(&dev
->mt76
.mmio
.mcu
.mutex
);
542 dev
->tx_hang_reset
++;
543 dev
->tx_hang_check
= 0;
544 memset(dev
->mt76
.tx_dma_idx
, 0xff,
545 sizeof(dev
->mt76
.tx_dma_idx
));
548 void mt76x02_wdt_work(struct work_struct
*work
)
550 struct mt76x02_dev
*dev
= container_of(work
, struct mt76x02_dev
,
553 mt76x02_check_tx_hang(dev
);
555 ieee80211_queue_delayed_work(mt76_hw(dev
), &dev
->wdt_work
,