2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/kernel.h>
19 #include <linux/irq.h>
22 #include "mt76x02_mcu.h"
23 #include "mt76x02_trace.h"
25 static void mt76x02_pre_tbtt_tasklet(unsigned long arg
)
27 struct mt76x02_dev
*dev
= (struct mt76x02_dev
*)arg
;
28 struct mt76_queue
*q
= dev
->mt76
.q_tx
[MT_TXQ_PSD
].q
;
29 struct beacon_bc_data data
= {};
33 if (mt76_hw(dev
)->conf
.flags
& IEEE80211_CONF_OFFCHANNEL
)
36 mt76x02_resync_beacon_timer(dev
);
38 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev
),
39 IEEE80211_IFACE_ITER_RESUME_ALL
,
40 mt76x02_update_beacon_iter
, dev
);
42 mt76_csa_check(&dev
->mt76
);
44 if (dev
->mt76
.csa_complete
)
47 mt76x02_enqueue_buffered_bc(dev
, &data
, 8);
49 if (!skb_queue_len(&data
.q
))
52 for (i
= 0; i
< ARRAY_SIZE(data
.tail
); i
++) {
56 mt76_skb_set_moredata(data
.tail
[i
], false);
59 spin_lock_bh(&q
->lock
);
60 while ((skb
= __skb_dequeue(&data
.q
)) != NULL
) {
61 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
62 struct ieee80211_vif
*vif
= info
->control
.vif
;
63 struct mt76x02_vif
*mvif
= (struct mt76x02_vif
*)vif
->drv_priv
;
65 mt76_tx_queue_skb(dev
, MT_TXQ_PSD
, skb
, &mvif
->group_wcid
,
68 spin_unlock_bh(&q
->lock
);
71 static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev
*dev
, bool en
)
74 tasklet_enable(&dev
->mt76
.pre_tbtt_tasklet
);
76 tasklet_disable(&dev
->mt76
.pre_tbtt_tasklet
);
79 static void mt76x02e_beacon_enable(struct mt76x02_dev
*dev
, bool en
)
81 mt76_rmw_field(dev
, MT_INT_TIMER_EN
, MT_INT_TIMER_EN_PRE_TBTT_EN
, en
);
83 mt76x02_irq_enable(dev
, MT_INT_PRE_TBTT
| MT_INT_TBTT
);
85 mt76x02_irq_disable(dev
, MT_INT_PRE_TBTT
| MT_INT_TBTT
);
88 void mt76x02e_init_beacon_config(struct mt76x02_dev
*dev
)
90 static const struct mt76x02_beacon_ops beacon_ops
= {
93 .pre_tbtt_enable
= mt76x02e_pre_tbtt_enable
,
94 .beacon_enable
= mt76x02e_beacon_enable
,
97 dev
->beacon_ops
= &beacon_ops
;
99 /* Fire a pre-TBTT interrupt 8 ms before TBTT */
100 mt76_rmw_field(dev
, MT_INT_TIMER_CFG
, MT_INT_TIMER_CFG_PRE_TBTT
, 8 << 4);
101 mt76_rmw_field(dev
, MT_INT_TIMER_CFG
, MT_INT_TIMER_CFG_GP_TIMER
,
103 mt76_wr(dev
, MT_INT_TIMER_EN
, 0);
105 mt76x02_init_beacon_config(dev
);
107 EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config
);
110 mt76x02_init_tx_queue(struct mt76x02_dev
*dev
, struct mt76_sw_queue
*q
,
113 struct mt76_queue
*hwq
;
116 hwq
= devm_kzalloc(dev
->mt76
.dev
, sizeof(*hwq
), GFP_KERNEL
);
120 err
= mt76_queue_alloc(dev
, hwq
, idx
, n_desc
, 0, MT_TX_RING_BASE
);
124 INIT_LIST_HEAD(&q
->swq
);
127 mt76x02_irq_enable(dev
, MT_INT_TX_DONE(idx
));
133 mt76x02_init_rx_queue(struct mt76x02_dev
*dev
, struct mt76_queue
*q
,
134 int idx
, int n_desc
, int bufsize
)
138 err
= mt76_queue_alloc(dev
, q
, idx
, n_desc
, bufsize
,
143 mt76x02_irq_enable(dev
, MT_INT_RX_DONE(idx
));
148 static void mt76x02_process_tx_status_fifo(struct mt76x02_dev
*dev
)
150 struct mt76x02_tx_status stat
;
153 while (kfifo_get(&dev
->txstatus_fifo
, &stat
))
154 mt76x02_send_tx_status(dev
, &stat
, &update
);
157 static void mt76x02_tx_tasklet(unsigned long data
)
159 struct mt76x02_dev
*dev
= (struct mt76x02_dev
*)data
;
161 mt76x02_mac_poll_tx_status(dev
, false);
162 mt76x02_process_tx_status_fifo(dev
);
164 mt76_txq_schedule_all(&dev
->mt76
);
167 static int mt76x02_poll_tx(struct napi_struct
*napi
, int budget
)
169 struct mt76x02_dev
*dev
= container_of(napi
, struct mt76x02_dev
, tx_napi
);
172 mt76x02_mac_poll_tx_status(dev
, false);
174 for (i
= MT_TXQ_MCU
; i
>= 0; i
--)
175 mt76_queue_tx_cleanup(dev
, i
, false);
177 if (napi_complete_done(napi
, 0))
178 mt76x02_irq_enable(dev
, MT_INT_TX_DONE_ALL
);
180 for (i
= MT_TXQ_MCU
; i
>= 0; i
--)
181 mt76_queue_tx_cleanup(dev
, i
, false);
183 tasklet_schedule(&dev
->mt76
.tx_tasklet
);
188 int mt76x02_dma_init(struct mt76x02_dev
*dev
)
190 struct mt76_txwi_cache __maybe_unused
*t
;
191 int i
, ret
, fifo_size
;
192 struct mt76_queue
*q
;
195 BUILD_BUG_ON(sizeof(struct mt76x02_rxwi
) > MT_RX_HEADROOM
);
197 fifo_size
= roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status
));
198 status_fifo
= devm_kzalloc(dev
->mt76
.dev
, fifo_size
, GFP_KERNEL
);
202 tasklet_init(&dev
->mt76
.tx_tasklet
, mt76x02_tx_tasklet
,
203 (unsigned long) dev
);
204 tasklet_init(&dev
->mt76
.pre_tbtt_tasklet
, mt76x02_pre_tbtt_tasklet
,
207 spin_lock_init(&dev
->txstatus_fifo_lock
);
208 kfifo_init(&dev
->txstatus_fifo
, status_fifo
, fifo_size
);
210 mt76_dma_attach(&dev
->mt76
);
212 mt76_wr(dev
, MT_WPDMA_RST_IDX
, ~0);
214 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
215 ret
= mt76x02_init_tx_queue(dev
, &dev
->mt76
.q_tx
[i
],
222 ret
= mt76x02_init_tx_queue(dev
, &dev
->mt76
.q_tx
[MT_TXQ_PSD
],
223 MT_TX_HW_QUEUE_MGMT
, MT_TX_RING_SIZE
);
227 ret
= mt76x02_init_tx_queue(dev
, &dev
->mt76
.q_tx
[MT_TXQ_MCU
],
228 MT_TX_HW_QUEUE_MCU
, MT_MCU_RING_SIZE
);
232 ret
= mt76x02_init_rx_queue(dev
, &dev
->mt76
.q_rx
[MT_RXQ_MCU
], 1,
233 MT_MCU_RING_SIZE
, MT_RX_BUF_SIZE
);
237 q
= &dev
->mt76
.q_rx
[MT_RXQ_MAIN
];
238 q
->buf_offset
= MT_RX_HEADROOM
- sizeof(struct mt76x02_rxwi
);
239 ret
= mt76x02_init_rx_queue(dev
, q
, 0, MT76X02_RX_RING_SIZE
,
244 ret
= mt76_init_queues(dev
);
248 netif_tx_napi_add(&dev
->mt76
.napi_dev
, &dev
->tx_napi
, mt76x02_poll_tx
,
250 napi_enable(&dev
->tx_napi
);
254 EXPORT_SYMBOL_GPL(mt76x02_dma_init
);
256 void mt76x02_rx_poll_complete(struct mt76_dev
*mdev
, enum mt76_rxq_id q
)
258 struct mt76x02_dev
*dev
;
260 dev
= container_of(mdev
, struct mt76x02_dev
, mt76
);
261 mt76x02_irq_enable(dev
, MT_INT_RX_DONE(q
));
263 EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete
);
265 irqreturn_t
mt76x02_irq_handler(int irq
, void *dev_instance
)
267 struct mt76x02_dev
*dev
= dev_instance
;
270 intr
= mt76_rr(dev
, MT_INT_SOURCE_CSR
);
271 mt76_wr(dev
, MT_INT_SOURCE_CSR
, intr
);
273 if (!test_bit(MT76_STATE_INITIALIZED
, &dev
->mt76
.state
))
276 trace_dev_irq(dev
, intr
, dev
->mt76
.mmio
.irqmask
);
278 intr
&= dev
->mt76
.mmio
.irqmask
;
280 if (intr
& MT_INT_RX_DONE(0)) {
281 mt76x02_irq_disable(dev
, MT_INT_RX_DONE(0));
282 napi_schedule(&dev
->mt76
.napi
[0]);
285 if (intr
& MT_INT_RX_DONE(1)) {
286 mt76x02_irq_disable(dev
, MT_INT_RX_DONE(1));
287 napi_schedule(&dev
->mt76
.napi
[1]);
290 if (intr
& MT_INT_PRE_TBTT
)
291 tasklet_schedule(&dev
->mt76
.pre_tbtt_tasklet
);
293 /* send buffered multicast frames now */
294 if (intr
& MT_INT_TBTT
) {
295 if (dev
->mt76
.csa_complete
)
296 mt76_csa_finish(&dev
->mt76
);
298 mt76_queue_kick(dev
, dev
->mt76
.q_tx
[MT_TXQ_PSD
].q
);
301 if (intr
& MT_INT_TX_STAT
)
302 mt76x02_mac_poll_tx_status(dev
, true);
304 if (intr
& (MT_INT_TX_STAT
| MT_INT_TX_DONE_ALL
)) {
305 mt76x02_irq_disable(dev
, MT_INT_TX_DONE_ALL
);
306 napi_schedule(&dev
->tx_napi
);
309 if (intr
& MT_INT_GPTIMER
) {
310 mt76x02_irq_disable(dev
, MT_INT_GPTIMER
);
311 tasklet_schedule(&dev
->dfs_pd
.dfs_tasklet
);
316 EXPORT_SYMBOL_GPL(mt76x02_irq_handler
);
318 static void mt76x02_dma_enable(struct mt76x02_dev
*dev
)
322 mt76_wr(dev
, MT_MAC_SYS_CTRL
, MT_MAC_SYS_CTRL_ENABLE_TX
);
323 mt76x02_wait_for_wpdma(&dev
->mt76
, 1000);
324 usleep_range(50, 100);
326 val
= FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE
, 3) |
327 MT_WPDMA_GLO_CFG_TX_DMA_EN
|
328 MT_WPDMA_GLO_CFG_RX_DMA_EN
;
329 mt76_set(dev
, MT_WPDMA_GLO_CFG
, val
);
330 mt76_clear(dev
, MT_WPDMA_GLO_CFG
,
331 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE
);
334 void mt76x02_dma_cleanup(struct mt76x02_dev
*dev
)
336 tasklet_kill(&dev
->mt76
.tx_tasklet
);
337 netif_napi_del(&dev
->tx_napi
);
338 mt76_dma_cleanup(&dev
->mt76
);
340 EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup
);
342 void mt76x02_dma_disable(struct mt76x02_dev
*dev
)
344 u32 val
= mt76_rr(dev
, MT_WPDMA_GLO_CFG
);
346 val
&= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE
|
347 MT_WPDMA_GLO_CFG_BIG_ENDIAN
|
348 MT_WPDMA_GLO_CFG_HDR_SEG_LEN
;
349 val
|= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE
;
350 mt76_wr(dev
, MT_WPDMA_GLO_CFG
, val
);
352 EXPORT_SYMBOL_GPL(mt76x02_dma_disable
);
354 void mt76x02_mac_start(struct mt76x02_dev
*dev
)
356 mt76x02_dma_enable(dev
);
357 mt76_wr(dev
, MT_RX_FILTR_CFG
, dev
->mt76
.rxfilter
);
358 mt76_wr(dev
, MT_MAC_SYS_CTRL
,
359 MT_MAC_SYS_CTRL_ENABLE_TX
|
360 MT_MAC_SYS_CTRL_ENABLE_RX
);
361 mt76x02_irq_enable(dev
,
362 MT_INT_RX_DONE_ALL
| MT_INT_TX_DONE_ALL
|
365 EXPORT_SYMBOL_GPL(mt76x02_mac_start
);
367 static bool mt76x02_tx_hang(struct mt76x02_dev
*dev
)
369 u32 dma_idx
, prev_dma_idx
;
370 struct mt76_queue
*q
;
373 for (i
= 0; i
< 4; i
++) {
374 q
= dev
->mt76
.q_tx
[i
].q
;
379 prev_dma_idx
= dev
->mt76
.tx_dma_idx
[i
];
380 dma_idx
= readl(&q
->regs
->dma_idx
);
381 dev
->mt76
.tx_dma_idx
[i
] = dma_idx
;
383 if (prev_dma_idx
== dma_idx
)
390 static void mt76x02_key_sync(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
391 struct ieee80211_sta
*sta
,
392 struct ieee80211_key_conf
*key
, void *data
)
394 struct mt76x02_dev
*dev
= hw
->priv
;
395 struct mt76_wcid
*wcid
;
400 wcid
= (struct mt76_wcid
*) sta
->drv_priv
;
402 if (wcid
->hw_key_idx
!= key
->keyidx
|| wcid
->sw_iv
)
405 mt76x02_mac_wcid_sync_pn(dev
, wcid
->idx
, key
);
408 static void mt76x02_reset_state(struct mt76x02_dev
*dev
)
412 lockdep_assert_held(&dev
->mt76
.mutex
);
414 clear_bit(MT76_STATE_RUNNING
, &dev
->mt76
.state
);
417 ieee80211_iter_keys_rcu(dev
->mt76
.hw
, NULL
, mt76x02_key_sync
, NULL
);
420 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.wcid
); i
++) {
421 struct ieee80211_sta
*sta
;
422 struct ieee80211_vif
*vif
;
423 struct mt76x02_sta
*msta
;
424 struct mt76_wcid
*wcid
;
427 wcid
= rcu_dereference_protected(dev
->mt76
.wcid
[i
],
428 lockdep_is_held(&dev
->mt76
.mutex
));
432 priv
= msta
= container_of(wcid
, struct mt76x02_sta
, wcid
);
433 sta
= container_of(priv
, struct ieee80211_sta
, drv_priv
);
436 vif
= container_of(priv
, struct ieee80211_vif
, drv_priv
);
438 __mt76_sta_remove(&dev
->mt76
, vif
, sta
);
439 memset(msta
, 0, sizeof(*msta
));
443 dev
->mt76
.beacon_mask
= 0;
446 static void mt76x02_watchdog_reset(struct mt76x02_dev
*dev
)
448 u32 mask
= dev
->mt76
.mmio
.irqmask
;
449 bool restart
= dev
->mt76
.mcu_ops
->mcu_restart
;
452 ieee80211_stop_queues(dev
->mt76
.hw
);
453 set_bit(MT76_RESET
, &dev
->mt76
.state
);
455 tasklet_disable(&dev
->mt76
.pre_tbtt_tasklet
);
456 tasklet_disable(&dev
->mt76
.tx_tasklet
);
457 napi_disable(&dev
->tx_napi
);
459 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.napi
); i
++)
460 napi_disable(&dev
->mt76
.napi
[i
]);
462 mutex_lock(&dev
->mt76
.mutex
);
465 mt76x02_reset_state(dev
);
467 if (dev
->mt76
.beacon_mask
)
468 mt76_clear(dev
, MT_BEACON_TIME_CFG
,
469 MT_BEACON_TIME_CFG_BEACON_TX
|
470 MT_BEACON_TIME_CFG_TBTT_EN
);
472 mt76x02_irq_disable(dev
, mask
);
474 /* perform device reset */
475 mt76_clear(dev
, MT_TXOP_CTRL_CFG
, MT_TXOP_ED_CCA_EN
);
476 mt76_wr(dev
, MT_MAC_SYS_CTRL
, 0);
477 mt76_clear(dev
, MT_WPDMA_GLO_CFG
,
478 MT_WPDMA_GLO_CFG_TX_DMA_EN
| MT_WPDMA_GLO_CFG_RX_DMA_EN
);
479 usleep_range(5000, 10000);
480 mt76_wr(dev
, MT_INT_SOURCE_CSR
, 0xffffffff);
482 /* let fw reset DMA */
483 mt76_set(dev
, 0x734, 0x3);
486 mt76_mcu_restart(dev
);
488 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.q_tx
); i
++)
489 mt76_queue_tx_cleanup(dev
, i
, true);
491 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.q_rx
); i
++)
492 mt76_queue_rx_reset(dev
, i
);
494 mt76x02_mac_start(dev
);
497 mt76_set(dev
, MT_TXOP_CTRL_CFG
, MT_TXOP_ED_CCA_EN
);
499 if (dev
->mt76
.beacon_mask
&& !restart
)
500 mt76_set(dev
, MT_BEACON_TIME_CFG
,
501 MT_BEACON_TIME_CFG_BEACON_TX
|
502 MT_BEACON_TIME_CFG_TBTT_EN
);
504 mt76x02_irq_enable(dev
, mask
);
506 mutex_unlock(&dev
->mt76
.mutex
);
508 clear_bit(MT76_RESET
, &dev
->mt76
.state
);
510 tasklet_enable(&dev
->mt76
.tx_tasklet
);
511 napi_enable(&dev
->tx_napi
);
512 napi_schedule(&dev
->tx_napi
);
514 tasklet_enable(&dev
->mt76
.pre_tbtt_tasklet
);
516 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.napi
); i
++) {
517 napi_enable(&dev
->mt76
.napi
[i
]);
518 napi_schedule(&dev
->mt76
.napi
[i
]);
522 mt76x02_mcu_function_select(dev
, Q_SELECT
, 1);
523 ieee80211_restart_hw(dev
->mt76
.hw
);
525 ieee80211_wake_queues(dev
->mt76
.hw
);
526 mt76_txq_schedule_all(&dev
->mt76
);
530 static void mt76x02_check_tx_hang(struct mt76x02_dev
*dev
)
532 if (mt76x02_tx_hang(dev
)) {
533 if (++dev
->tx_hang_check
>= MT_TX_HANG_TH
)
536 dev
->tx_hang_check
= 0;
539 if (dev
->mcu_timeout
)
545 mt76x02_watchdog_reset(dev
);
547 mutex_lock(&dev
->mt76
.mmio
.mcu
.mutex
);
548 dev
->mcu_timeout
= 0;
549 mutex_unlock(&dev
->mt76
.mmio
.mcu
.mutex
);
551 dev
->tx_hang_reset
++;
552 dev
->tx_hang_check
= 0;
553 memset(dev
->mt76
.tx_dma_idx
, 0xff,
554 sizeof(dev
->mt76
.tx_dma_idx
));
557 void mt76x02_wdt_work(struct work_struct
*work
)
559 struct mt76x02_dev
*dev
= container_of(work
, struct mt76x02_dev
,
562 mt76x02_check_tx_hang(dev
);
564 ieee80211_queue_delayed_work(mt76_hw(dev
), &dev
->wdt_work
,