1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc.
4 * Author: Felix Fietkau <nbd@nbd.name>
5 * Lorenzo Bianconi <lorenzo@kernel.org>
6 * Sean Wang <sean.wang@mediatek.com>
9 #include <linux/kernel.h>
10 #include <linux/iopoll.h>
11 #include <linux/module.h>
13 #include <linux/mmc/host.h>
14 #include <linux/mmc/sdio_ids.h>
15 #include <linux/mmc/sdio_func.h>
21 static int mt76s_refill_sched_quota(struct mt76_dev
*dev
, u32
*data
)
23 u32 ple_ac_data_quota
[] = {
24 FIELD_GET(TXQ_CNT_L
, data
[4]), /* VO */
25 FIELD_GET(TXQ_CNT_H
, data
[3]), /* VI */
26 FIELD_GET(TXQ_CNT_L
, data
[3]), /* BE */
27 FIELD_GET(TXQ_CNT_H
, data
[2]), /* BK */
29 u32 pse_ac_data_quota
[] = {
30 FIELD_GET(TXQ_CNT_H
, data
[1]), /* VO */
31 FIELD_GET(TXQ_CNT_L
, data
[1]), /* VI */
32 FIELD_GET(TXQ_CNT_H
, data
[0]), /* BE */
33 FIELD_GET(TXQ_CNT_L
, data
[0]), /* BK */
35 u32 pse_mcu_quota
= FIELD_GET(TXQ_CNT_L
, data
[2]);
36 u32 pse_data_quota
= 0, ple_data_quota
= 0;
37 struct mt76_sdio
*sdio
= &dev
->sdio
;
40 for (i
= 0; i
< ARRAY_SIZE(pse_ac_data_quota
); i
++) {
41 pse_data_quota
+= pse_ac_data_quota
[i
];
42 ple_data_quota
+= ple_ac_data_quota
[i
];
45 if (!pse_data_quota
&& !ple_data_quota
&& !pse_mcu_quota
)
48 sdio
->sched
.pse_mcu_quota
+= pse_mcu_quota
;
49 sdio
->sched
.pse_data_quota
+= pse_data_quota
;
50 sdio
->sched
.ple_data_quota
+= ple_data_quota
;
52 return pse_data_quota
+ ple_data_quota
+ pse_mcu_quota
;
55 static struct sk_buff
*
56 mt76s_build_rx_skb(void *data
, int data_len
, int buf_len
)
58 int len
= min_t(int, data_len
, MT_SKB_HEAD_LEN
);
61 skb
= alloc_skb(len
, GFP_KERNEL
);
65 skb_put_data(skb
, data
, len
);
70 page
= virt_to_head_page(data
);
71 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
72 page
, data
- page_address(page
),
73 data_len
- len
, buf_len
);
81 mt76s_rx_run_queue(struct mt76_dev
*dev
, enum mt76_rxq_id qid
,
82 struct mt76s_intr
*intr
)
84 struct mt76_queue
*q
= &dev
->q_rx
[qid
];
85 struct mt76_sdio
*sdio
= &dev
->sdio
;
90 for (i
= 0; i
< intr
->rx
.num
[qid
]; i
++)
91 len
+= round_up(intr
->rx
.len
[qid
][i
] + 4, 4);
96 if (len
> sdio
->func
->cur_blksize
)
97 len
= roundup(len
, sdio
->func
->cur_blksize
);
99 page
= __dev_alloc_pages(GFP_KERNEL
, get_order(len
));
103 buf
= page_address(page
);
105 sdio_claim_host(sdio
->func
);
106 err
= sdio_readsb(sdio
->func
, buf
, MCR_WRDR(qid
), len
);
107 sdio_release_host(sdio
->func
);
110 dev_err(dev
->dev
, "sdio read data failed:%d\n", err
);
118 while (i
< intr
->rx
.num
[qid
] && buf
< end
) {
119 int index
= (q
->head
+ i
) % q
->ndesc
;
120 struct mt76_queue_entry
*e
= &q
->entry
[index
];
121 __le32
*rxd
= (__le32
*)buf
;
123 /* parse rxd to get the actual packet length */
124 len
= le32_get_bits(rxd
[0], GENMASK(15, 0));
126 /* Optimized path for TXS */
127 if (!dev
->drv
->rx_check
|| dev
->drv
->rx_check(dev
, buf
, len
)) {
128 e
->skb
= mt76s_build_rx_skb(buf
, len
,
129 round_up(len
+ 4, 4));
133 if (q
->queued
+ i
+ 1 == q
->ndesc
)
137 buf
+= round_up(len
+ 4, 4);
141 spin_lock_bh(&q
->lock
);
142 q
->head
= (q
->head
+ i
) % q
->ndesc
;
144 spin_unlock_bh(&q
->lock
);
149 static int mt76s_rx_handler(struct mt76_dev
*dev
)
151 struct mt76_sdio
*sdio
= &dev
->sdio
;
152 struct mt76s_intr intr
;
153 int nframes
= 0, ret
;
155 ret
= sdio
->parse_irq(dev
, &intr
);
159 trace_dev_irq(dev
, intr
.isr
, 0);
161 if (intr
.isr
& WHIER_RX0_DONE_INT_EN
) {
162 ret
= mt76s_rx_run_queue(dev
, 0, &intr
);
164 mt76_worker_schedule(&sdio
->net_worker
);
169 if (intr
.isr
& WHIER_RX1_DONE_INT_EN
) {
170 ret
= mt76s_rx_run_queue(dev
, 1, &intr
);
172 mt76_worker_schedule(&sdio
->net_worker
);
177 nframes
+= !!mt76s_refill_sched_quota(dev
, intr
.tx
.wtqcr
);
183 mt76s_tx_pick_quota(struct mt76_sdio
*sdio
, bool mcu
, int buf_sz
,
184 int *pse_size
, int *ple_size
)
188 pse_sz
= DIV_ROUND_UP(buf_sz
+ sdio
->sched
.deficit
,
189 sdio
->sched
.pse_page_size
);
191 if (mcu
&& sdio
->hw_ver
== MT76_CONNAC2_SDIO
)
195 if (sdio
->sched
.pse_mcu_quota
< *pse_size
+ pse_sz
)
198 if (sdio
->sched
.pse_data_quota
< *pse_size
+ pse_sz
||
199 sdio
->sched
.ple_data_quota
< *ple_size
+ 1)
202 *ple_size
= *ple_size
+ 1;
204 *pse_size
= *pse_size
+ pse_sz
;
210 mt76s_tx_update_quota(struct mt76_sdio
*sdio
, bool mcu
, int pse_size
,
214 sdio
->sched
.pse_mcu_quota
-= pse_size
;
216 sdio
->sched
.pse_data_quota
-= pse_size
;
217 sdio
->sched
.ple_data_quota
-= ple_size
;
221 static int __mt76s_xmit_queue(struct mt76_dev
*dev
, u8
*data
, int len
)
223 struct mt76_sdio
*sdio
= &dev
->sdio
;
226 if (len
> sdio
->func
->cur_blksize
)
227 len
= roundup(len
, sdio
->func
->cur_blksize
);
229 sdio_claim_host(sdio
->func
);
230 err
= sdio_writesb(sdio
->func
, MCR_WTDR1
, data
, len
);
231 sdio_release_host(sdio
->func
);
234 dev_err(dev
->dev
, "sdio write failed: %d\n", err
);
239 static int mt76s_tx_run_queue(struct mt76_dev
*dev
, struct mt76_queue
*q
)
241 int err
, nframes
= 0, len
= 0, pse_sz
= 0, ple_sz
= 0;
242 bool mcu
= q
== dev
->q_mcu
[MT_MCUQ_WM
];
243 struct mt76_sdio
*sdio
= &dev
->sdio
;
246 while (q
->first
!= q
->head
) {
247 struct mt76_queue_entry
*e
= &q
->entry
[q
->first
];
248 struct sk_buff
*iter
;
252 if (test_bit(MT76_MCU_RESET
, &dev
->phy
.state
))
255 if (!test_bit(MT76_STATE_MCU_RUNNING
, &dev
->phy
.state
)) {
256 __skb_put_zero(e
->skb
, 4);
257 err
= __skb_grow(e
->skb
, roundup(e
->skb
->len
,
258 sdio
->func
->cur_blksize
));
261 err
= __mt76s_xmit_queue(dev
, e
->skb
->data
,
269 pad
= roundup(e
->skb
->len
, 4) - e
->skb
->len
;
270 if (len
+ e
->skb
->len
+ pad
+ 4 > dev
->sdio
.xmit_buf_sz
)
273 if (mt76s_tx_pick_quota(sdio
, mcu
, e
->buf_sz
, &pse_sz
,
277 memcpy(sdio
->xmit_buf
+ len
, e
->skb
->data
, skb_headlen(e
->skb
));
278 len
+= skb_headlen(e
->skb
);
281 skb_walk_frags(e
->skb
, iter
) {
282 memcpy(sdio
->xmit_buf
+ len
, iter
->data
, iter
->len
);
288 memset(sdio
->xmit_buf
+ len
, 0, pad
);
292 q
->first
= (q
->first
+ 1) % q
->ndesc
;
297 memset(sdio
->xmit_buf
+ len
, 0, 4);
298 err
= __mt76s_xmit_queue(dev
, sdio
->xmit_buf
, len
+ 4);
302 mt76s_tx_update_quota(sdio
, mcu
, pse_sz
, ple_sz
);
304 mt76_worker_schedule(&sdio
->status_worker
);
309 void mt76s_txrx_worker(struct mt76_sdio
*sdio
)
311 struct mt76_dev
*dev
= container_of(sdio
, struct mt76_dev
, sdio
);
314 /* disable interrupt */
315 sdio_claim_host(sdio
->func
);
316 sdio_writel(sdio
->func
, WHLPCR_INT_EN_CLR
, MCR_WHLPCR
, NULL
);
317 sdio_release_host(sdio
->func
);
323 for (i
= 0; i
<= MT_TXQ_PSD
; i
++) {
324 ret
= mt76s_tx_run_queue(dev
, dev
->phy
.q_tx
[i
]);
328 ret
= mt76s_tx_run_queue(dev
, dev
->q_mcu
[MT_MCUQ_WM
]);
333 ret
= mt76s_rx_handler(dev
);
337 if (test_bit(MT76_MCU_RESET
, &dev
->phy
.state
) ||
338 test_bit(MT76_STATE_SUSPEND
, &dev
->phy
.state
)) {
339 if (!mt76s_txqs_empty(dev
))
342 wake_up(&sdio
->wait
);
344 } while (nframes
> 0);
346 /* enable interrupt */
347 sdio_claim_host(sdio
->func
);
348 sdio_writel(sdio
->func
, WHLPCR_INT_EN_SET
, MCR_WHLPCR
, NULL
);
349 sdio_release_host(sdio
->func
);
351 EXPORT_SYMBOL_GPL(mt76s_txrx_worker
);
353 void mt76s_sdio_irq(struct sdio_func
*func
)
355 struct mt76_dev
*dev
= sdio_get_drvdata(func
);
356 struct mt76_sdio
*sdio
= &dev
->sdio
;
358 if (!test_bit(MT76_STATE_INITIALIZED
, &dev
->phy
.state
) ||
359 test_bit(MT76_MCU_RESET
, &dev
->phy
.state
))
362 sdio_writel(sdio
->func
, WHLPCR_INT_EN_CLR
, MCR_WHLPCR
, NULL
);
363 mt76_worker_schedule(&sdio
->txrx_worker
);
365 EXPORT_SYMBOL_GPL(mt76s_sdio_irq
);
367 bool mt76s_txqs_empty(struct mt76_dev
*dev
)
369 struct mt76_queue
*q
;
372 for (i
= 0; i
<= MT_TXQ_PSD
+ 1; i
++) {
374 q
= dev
->phy
.q_tx
[i
];
376 q
= dev
->q_mcu
[MT_MCUQ_WM
];
378 if (q
->first
!= q
->head
)
384 EXPORT_SYMBOL_GPL(mt76s_txqs_empty
);