1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
7 #include <linux/kernel.h>
11 #include "mt76x02_mcu.h"
14 static void mt76x02_pre_tbtt_tasklet(unsigned long arg
)
16 struct mt76x02_dev
*dev
= (struct mt76x02_dev
*)arg
;
17 struct mt76_queue
*q
= dev
->mt76
.q_tx
[MT_TXQ_PSD
].q
;
18 struct beacon_bc_data data
= {};
22 if (mt76_hw(dev
)->conf
.flags
& IEEE80211_CONF_OFFCHANNEL
)
25 mt76x02_resync_beacon_timer(dev
);
27 /* Prevent corrupt transmissions during update */
28 mt76_set(dev
, MT_BCN_BYPASS_MASK
, 0xffff);
29 dev
->beacon_data_count
= 0;
31 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev
),
32 IEEE80211_IFACE_ITER_RESUME_ALL
,
33 mt76x02_update_beacon_iter
, dev
);
35 mt76_wr(dev
, MT_BCN_BYPASS_MASK
,
36 0xff00 | ~(0xff00 >> dev
->beacon_data_count
));
38 mt76_csa_check(&dev
->mt76
);
40 if (dev
->mt76
.csa_complete
)
43 mt76x02_enqueue_buffered_bc(dev
, &data
, 8);
45 if (!skb_queue_len(&data
.q
))
48 for (i
= 0; i
< ARRAY_SIZE(data
.tail
); i
++) {
52 mt76_skb_set_moredata(data
.tail
[i
], false);
55 spin_lock_bh(&q
->lock
);
56 while ((skb
= __skb_dequeue(&data
.q
)) != NULL
) {
57 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
58 struct ieee80211_vif
*vif
= info
->control
.vif
;
59 struct mt76x02_vif
*mvif
= (struct mt76x02_vif
*)vif
->drv_priv
;
61 mt76_tx_queue_skb(dev
, MT_TXQ_PSD
, skb
, &mvif
->group_wcid
,
64 spin_unlock_bh(&q
->lock
);
67 static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev
*dev
, bool en
)
70 tasklet_enable(&dev
->mt76
.pre_tbtt_tasklet
);
72 tasklet_disable(&dev
->mt76
.pre_tbtt_tasklet
);
75 static void mt76x02e_beacon_enable(struct mt76x02_dev
*dev
, bool en
)
77 mt76_rmw_field(dev
, MT_INT_TIMER_EN
, MT_INT_TIMER_EN_PRE_TBTT_EN
, en
);
79 mt76x02_irq_enable(dev
, MT_INT_PRE_TBTT
| MT_INT_TBTT
);
81 mt76x02_irq_disable(dev
, MT_INT_PRE_TBTT
| MT_INT_TBTT
);
84 void mt76x02e_init_beacon_config(struct mt76x02_dev
*dev
)
86 static const struct mt76x02_beacon_ops beacon_ops
= {
89 .pre_tbtt_enable
= mt76x02e_pre_tbtt_enable
,
90 .beacon_enable
= mt76x02e_beacon_enable
,
93 dev
->beacon_ops
= &beacon_ops
;
95 /* Fire a pre-TBTT interrupt 8 ms before TBTT */
96 mt76_rmw_field(dev
, MT_INT_TIMER_CFG
, MT_INT_TIMER_CFG_PRE_TBTT
,
98 mt76_rmw_field(dev
, MT_INT_TIMER_CFG
, MT_INT_TIMER_CFG_GP_TIMER
,
100 mt76_wr(dev
, MT_INT_TIMER_EN
, 0);
102 mt76x02_init_beacon_config(dev
);
104 EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config
);
107 mt76x02_init_tx_queue(struct mt76x02_dev
*dev
, struct mt76_sw_queue
*q
,
110 struct mt76_queue
*hwq
;
113 hwq
= devm_kzalloc(dev
->mt76
.dev
, sizeof(*hwq
), GFP_KERNEL
);
117 err
= mt76_queue_alloc(dev
, hwq
, idx
, n_desc
, 0, MT_TX_RING_BASE
);
121 INIT_LIST_HEAD(&q
->swq
);
124 mt76x02_irq_enable(dev
, MT_INT_TX_DONE(idx
));
130 mt76x02_init_rx_queue(struct mt76x02_dev
*dev
, struct mt76_queue
*q
,
131 int idx
, int n_desc
, int bufsize
)
135 err
= mt76_queue_alloc(dev
, q
, idx
, n_desc
, bufsize
,
140 mt76x02_irq_enable(dev
, MT_INT_RX_DONE(idx
));
145 static void mt76x02_process_tx_status_fifo(struct mt76x02_dev
*dev
)
147 struct mt76x02_tx_status stat
;
150 while (kfifo_get(&dev
->txstatus_fifo
, &stat
))
151 mt76x02_send_tx_status(dev
, &stat
, &update
);
154 static void mt76x02_tx_tasklet(unsigned long data
)
156 struct mt76x02_dev
*dev
= (struct mt76x02_dev
*)data
;
158 mt76x02_mac_poll_tx_status(dev
, false);
159 mt76x02_process_tx_status_fifo(dev
);
161 mt76_txq_schedule_all(&dev
->mphy
);
164 static int mt76x02_poll_tx(struct napi_struct
*napi
, int budget
)
166 struct mt76x02_dev
*dev
= container_of(napi
, struct mt76x02_dev
,
170 mt76x02_mac_poll_tx_status(dev
, false);
172 for (i
= MT_TXQ_MCU
; i
>= 0; i
--)
173 mt76_queue_tx_cleanup(dev
, i
, false);
175 if (napi_complete_done(napi
, 0))
176 mt76x02_irq_enable(dev
, MT_INT_TX_DONE_ALL
);
178 for (i
= MT_TXQ_MCU
; i
>= 0; i
--)
179 mt76_queue_tx_cleanup(dev
, i
, false);
181 tasklet_schedule(&dev
->mt76
.tx_tasklet
);
186 int mt76x02_dma_init(struct mt76x02_dev
*dev
)
188 struct mt76_txwi_cache __maybe_unused
*t
;
189 int i
, ret
, fifo_size
;
190 struct mt76_queue
*q
;
193 BUILD_BUG_ON(sizeof(struct mt76x02_rxwi
) > MT_RX_HEADROOM
);
195 fifo_size
= roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status
));
196 status_fifo
= devm_kzalloc(dev
->mt76
.dev
, fifo_size
, GFP_KERNEL
);
200 tasklet_init(&dev
->mt76
.tx_tasklet
, mt76x02_tx_tasklet
,
202 tasklet_init(&dev
->mt76
.pre_tbtt_tasklet
, mt76x02_pre_tbtt_tasklet
,
205 spin_lock_init(&dev
->txstatus_fifo_lock
);
206 kfifo_init(&dev
->txstatus_fifo
, status_fifo
, fifo_size
);
208 mt76_dma_attach(&dev
->mt76
);
210 mt76_wr(dev
, MT_WPDMA_RST_IDX
, ~0);
212 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
213 ret
= mt76x02_init_tx_queue(dev
, &dev
->mt76
.q_tx
[i
],
220 ret
= mt76x02_init_tx_queue(dev
, &dev
->mt76
.q_tx
[MT_TXQ_PSD
],
221 MT_TX_HW_QUEUE_MGMT
, MT_TX_RING_SIZE
);
225 ret
= mt76x02_init_tx_queue(dev
, &dev
->mt76
.q_tx
[MT_TXQ_MCU
],
226 MT_TX_HW_QUEUE_MCU
, MT_MCU_RING_SIZE
);
230 ret
= mt76x02_init_rx_queue(dev
, &dev
->mt76
.q_rx
[MT_RXQ_MCU
], 1,
231 MT_MCU_RING_SIZE
, MT_RX_BUF_SIZE
);
235 q
= &dev
->mt76
.q_rx
[MT_RXQ_MAIN
];
236 q
->buf_offset
= MT_RX_HEADROOM
- sizeof(struct mt76x02_rxwi
);
237 ret
= mt76x02_init_rx_queue(dev
, q
, 0, MT76X02_RX_RING_SIZE
,
242 ret
= mt76_init_queues(dev
);
246 netif_tx_napi_add(&dev
->mt76
.napi_dev
, &dev
->mt76
.tx_napi
,
247 mt76x02_poll_tx
, NAPI_POLL_WEIGHT
);
248 napi_enable(&dev
->mt76
.tx_napi
);
252 EXPORT_SYMBOL_GPL(mt76x02_dma_init
);
254 void mt76x02_rx_poll_complete(struct mt76_dev
*mdev
, enum mt76_rxq_id q
)
256 struct mt76x02_dev
*dev
;
258 dev
= container_of(mdev
, struct mt76x02_dev
, mt76
);
259 mt76x02_irq_enable(dev
, MT_INT_RX_DONE(q
));
261 EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete
);
263 irqreturn_t
mt76x02_irq_handler(int irq
, void *dev_instance
)
265 struct mt76x02_dev
*dev
= dev_instance
;
268 intr
= mt76_rr(dev
, MT_INT_SOURCE_CSR
);
269 mt76_wr(dev
, MT_INT_SOURCE_CSR
, intr
);
271 if (!test_bit(MT76_STATE_INITIALIZED
, &dev
->mphy
.state
))
274 trace_dev_irq(&dev
->mt76
, intr
, dev
->mt76
.mmio
.irqmask
);
276 intr
&= dev
->mt76
.mmio
.irqmask
;
278 if (intr
& MT_INT_RX_DONE(0)) {
279 mt76x02_irq_disable(dev
, MT_INT_RX_DONE(0));
280 napi_schedule(&dev
->mt76
.napi
[0]);
283 if (intr
& MT_INT_RX_DONE(1)) {
284 mt76x02_irq_disable(dev
, MT_INT_RX_DONE(1));
285 napi_schedule(&dev
->mt76
.napi
[1]);
288 if (intr
& MT_INT_PRE_TBTT
)
289 tasklet_schedule(&dev
->mt76
.pre_tbtt_tasklet
);
291 /* send buffered multicast frames now */
292 if (intr
& MT_INT_TBTT
) {
293 if (dev
->mt76
.csa_complete
)
294 mt76_csa_finish(&dev
->mt76
);
296 mt76_queue_kick(dev
, dev
->mt76
.q_tx
[MT_TXQ_PSD
].q
);
299 if (intr
& MT_INT_TX_STAT
)
300 mt76x02_mac_poll_tx_status(dev
, true);
302 if (intr
& (MT_INT_TX_STAT
| MT_INT_TX_DONE_ALL
)) {
303 mt76x02_irq_disable(dev
, MT_INT_TX_DONE_ALL
);
304 napi_schedule(&dev
->mt76
.tx_napi
);
307 if (intr
& MT_INT_GPTIMER
) {
308 mt76x02_irq_disable(dev
, MT_INT_GPTIMER
);
309 tasklet_schedule(&dev
->dfs_pd
.dfs_tasklet
);
314 EXPORT_SYMBOL_GPL(mt76x02_irq_handler
);
316 static void mt76x02_dma_enable(struct mt76x02_dev
*dev
)
320 mt76_wr(dev
, MT_MAC_SYS_CTRL
, MT_MAC_SYS_CTRL_ENABLE_TX
);
321 mt76x02_wait_for_wpdma(&dev
->mt76
, 1000);
322 usleep_range(50, 100);
324 val
= FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE
, 3) |
325 MT_WPDMA_GLO_CFG_TX_DMA_EN
|
326 MT_WPDMA_GLO_CFG_RX_DMA_EN
;
327 mt76_set(dev
, MT_WPDMA_GLO_CFG
, val
);
328 mt76_clear(dev
, MT_WPDMA_GLO_CFG
,
329 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE
);
332 void mt76x02_dma_cleanup(struct mt76x02_dev
*dev
)
334 tasklet_kill(&dev
->mt76
.tx_tasklet
);
335 mt76_dma_cleanup(&dev
->mt76
);
337 EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup
);
339 void mt76x02_dma_disable(struct mt76x02_dev
*dev
)
341 u32 val
= mt76_rr(dev
, MT_WPDMA_GLO_CFG
);
343 val
&= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE
|
344 MT_WPDMA_GLO_CFG_BIG_ENDIAN
|
345 MT_WPDMA_GLO_CFG_HDR_SEG_LEN
;
346 val
|= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE
;
347 mt76_wr(dev
, MT_WPDMA_GLO_CFG
, val
);
349 EXPORT_SYMBOL_GPL(mt76x02_dma_disable
);
351 void mt76x02_mac_start(struct mt76x02_dev
*dev
)
353 mt76x02_mac_reset_counters(dev
);
354 mt76x02_dma_enable(dev
);
355 mt76_wr(dev
, MT_RX_FILTR_CFG
, dev
->mt76
.rxfilter
);
356 mt76_wr(dev
, MT_MAC_SYS_CTRL
,
357 MT_MAC_SYS_CTRL_ENABLE_TX
|
358 MT_MAC_SYS_CTRL_ENABLE_RX
);
359 mt76x02_irq_enable(dev
,
360 MT_INT_RX_DONE_ALL
| MT_INT_TX_DONE_ALL
|
363 EXPORT_SYMBOL_GPL(mt76x02_mac_start
);
365 static bool mt76x02_tx_hang(struct mt76x02_dev
*dev
)
367 u32 dma_idx
, prev_dma_idx
;
368 struct mt76_queue
*q
;
371 for (i
= 0; i
< 4; i
++) {
372 q
= dev
->mt76
.q_tx
[i
].q
;
377 prev_dma_idx
= dev
->mt76
.tx_dma_idx
[i
];
378 dma_idx
= readl(&q
->regs
->dma_idx
);
379 dev
->mt76
.tx_dma_idx
[i
] = dma_idx
;
381 if (prev_dma_idx
== dma_idx
)
388 static void mt76x02_key_sync(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
389 struct ieee80211_sta
*sta
,
390 struct ieee80211_key_conf
*key
, void *data
)
392 struct mt76x02_dev
*dev
= hw
->priv
;
393 struct mt76_wcid
*wcid
;
398 wcid
= (struct mt76_wcid
*)sta
->drv_priv
;
400 if (wcid
->hw_key_idx
!= key
->keyidx
|| wcid
->sw_iv
)
403 mt76x02_mac_wcid_sync_pn(dev
, wcid
->idx
, key
);
406 static void mt76x02_reset_state(struct mt76x02_dev
*dev
)
410 lockdep_assert_held(&dev
->mt76
.mutex
);
412 clear_bit(MT76_STATE_RUNNING
, &dev
->mphy
.state
);
415 ieee80211_iter_keys_rcu(dev
->mt76
.hw
, NULL
, mt76x02_key_sync
, NULL
);
418 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.wcid
); i
++) {
419 struct ieee80211_sta
*sta
;
420 struct ieee80211_vif
*vif
;
421 struct mt76x02_sta
*msta
;
422 struct mt76_wcid
*wcid
;
425 wcid
= rcu_dereference_protected(dev
->mt76
.wcid
[i
],
426 lockdep_is_held(&dev
->mt76
.mutex
));
430 rcu_assign_pointer(dev
->mt76
.wcid
[i
], NULL
);
432 priv
= msta
= container_of(wcid
, struct mt76x02_sta
, wcid
);
433 sta
= container_of(priv
, struct ieee80211_sta
, drv_priv
);
436 vif
= container_of(priv
, struct ieee80211_vif
, drv_priv
);
438 __mt76_sta_remove(&dev
->mt76
, vif
, sta
);
439 memset(msta
, 0, sizeof(*msta
));
443 dev
->mt76
.beacon_mask
= 0;
446 static void mt76x02_watchdog_reset(struct mt76x02_dev
*dev
)
448 u32 mask
= dev
->mt76
.mmio
.irqmask
;
449 bool restart
= dev
->mt76
.mcu_ops
->mcu_restart
;
452 ieee80211_stop_queues(dev
->mt76
.hw
);
453 set_bit(MT76_RESET
, &dev
->mphy
.state
);
455 tasklet_disable(&dev
->mt76
.pre_tbtt_tasklet
);
456 tasklet_disable(&dev
->mt76
.tx_tasklet
);
457 napi_disable(&dev
->mt76
.tx_napi
);
459 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.napi
); i
++)
460 napi_disable(&dev
->mt76
.napi
[i
]);
462 mutex_lock(&dev
->mt76
.mutex
);
464 dev
->mcu_timeout
= 0;
466 mt76x02_reset_state(dev
);
468 if (dev
->mt76
.beacon_mask
)
469 mt76_clear(dev
, MT_BEACON_TIME_CFG
,
470 MT_BEACON_TIME_CFG_BEACON_TX
|
471 MT_BEACON_TIME_CFG_TBTT_EN
);
473 mt76x02_irq_disable(dev
, mask
);
475 /* perform device reset */
476 mt76_clear(dev
, MT_TXOP_CTRL_CFG
, MT_TXOP_ED_CCA_EN
);
477 mt76_wr(dev
, MT_MAC_SYS_CTRL
, 0);
478 mt76_clear(dev
, MT_WPDMA_GLO_CFG
,
479 MT_WPDMA_GLO_CFG_TX_DMA_EN
| MT_WPDMA_GLO_CFG_RX_DMA_EN
);
480 usleep_range(5000, 10000);
481 mt76_wr(dev
, MT_INT_SOURCE_CSR
, 0xffffffff);
483 /* let fw reset DMA */
484 mt76_set(dev
, 0x734, 0x3);
487 mt76_mcu_restart(dev
);
489 for (i
= 0; i
< __MT_TXQ_MAX
; i
++)
490 mt76_queue_tx_cleanup(dev
, i
, true);
492 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.q_rx
); i
++)
493 mt76_queue_rx_reset(dev
, i
);
495 mt76x02_mac_start(dev
);
498 mt76_set(dev
, MT_TXOP_CTRL_CFG
, MT_TXOP_ED_CCA_EN
);
500 if (dev
->mt76
.beacon_mask
&& !restart
)
501 mt76_set(dev
, MT_BEACON_TIME_CFG
,
502 MT_BEACON_TIME_CFG_BEACON_TX
|
503 MT_BEACON_TIME_CFG_TBTT_EN
);
505 mt76x02_irq_enable(dev
, mask
);
507 mutex_unlock(&dev
->mt76
.mutex
);
509 clear_bit(MT76_RESET
, &dev
->mphy
.state
);
511 tasklet_enable(&dev
->mt76
.tx_tasklet
);
512 napi_enable(&dev
->mt76
.tx_napi
);
513 napi_schedule(&dev
->mt76
.tx_napi
);
515 tasklet_enable(&dev
->mt76
.pre_tbtt_tasklet
);
517 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.napi
); i
++) {
518 napi_enable(&dev
->mt76
.napi
[i
]);
519 napi_schedule(&dev
->mt76
.napi
[i
]);
523 mt76x02_mcu_function_select(dev
, Q_SELECT
, 1);
524 ieee80211_restart_hw(dev
->mt76
.hw
);
526 ieee80211_wake_queues(dev
->mt76
.hw
);
527 mt76_txq_schedule_all(&dev
->mphy
);
531 static void mt76x02_check_tx_hang(struct mt76x02_dev
*dev
)
533 if (mt76x02_tx_hang(dev
)) {
534 if (++dev
->tx_hang_check
>= MT_TX_HANG_TH
)
537 dev
->tx_hang_check
= 0;
540 if (dev
->mcu_timeout
)
546 mt76x02_watchdog_reset(dev
);
548 dev
->tx_hang_reset
++;
549 dev
->tx_hang_check
= 0;
550 memset(dev
->mt76
.tx_dma_idx
, 0xff,
551 sizeof(dev
->mt76
.tx_dma_idx
));
554 void mt76x02_wdt_work(struct work_struct
*work
)
556 struct mt76x02_dev
*dev
= container_of(work
, struct mt76x02_dev
,
559 mt76x02_check_tx_hang(dev
);
561 ieee80211_queue_delayed_work(mt76_hw(dev
), &dev
->wdt_work
,