2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/dma-mapping.h>
21 #define DMA_DUMMY_TXWI ((void *) ~0)
24 mt76_dma_alloc_queue(struct mt76_dev
*dev
, struct mt76_queue
*q
)
29 spin_lock_init(&q
->lock
);
30 INIT_LIST_HEAD(&q
->swq
);
32 size
= q
->ndesc
* sizeof(struct mt76_desc
);
33 q
->desc
= dmam_alloc_coherent(dev
->dev
, size
, &q
->desc_dma
, GFP_KERNEL
);
37 size
= q
->ndesc
* sizeof(*q
->entry
);
38 q
->entry
= devm_kzalloc(dev
->dev
, size
, GFP_KERNEL
);
42 /* clear descriptors */
43 for (i
= 0; i
< q
->ndesc
; i
++)
44 q
->desc
[i
].ctrl
= cpu_to_le32(MT_DMA_CTL_DMA_DONE
);
46 iowrite32(q
->desc_dma
, &q
->regs
->desc_base
);
47 iowrite32(0, &q
->regs
->cpu_idx
);
48 iowrite32(0, &q
->regs
->dma_idx
);
49 iowrite32(q
->ndesc
, &q
->regs
->ring_size
);
55 mt76_dma_add_buf(struct mt76_dev
*dev
, struct mt76_queue
*q
,
56 struct mt76_queue_buf
*buf
, int nbufs
, u32 info
,
57 struct sk_buff
*skb
, void *txwi
)
59 struct mt76_desc
*desc
;
64 q
->entry
[q
->head
].txwi
= DMA_DUMMY_TXWI
;
66 for (i
= 0; i
< nbufs
; i
+= 2, buf
+= 2) {
67 u32 buf0
= buf
[0].addr
, buf1
= 0;
69 ctrl
= FIELD_PREP(MT_DMA_CTL_SD_LEN0
, buf
[0].len
);
72 ctrl
|= FIELD_PREP(MT_DMA_CTL_SD_LEN1
, buf
[1].len
);
76 ctrl
|= MT_DMA_CTL_LAST_SEC0
;
77 else if (i
== nbufs
- 2)
78 ctrl
|= MT_DMA_CTL_LAST_SEC1
;
81 q
->head
= (q
->head
+ 1) % q
->ndesc
;
85 WRITE_ONCE(desc
->buf0
, cpu_to_le32(buf0
));
86 WRITE_ONCE(desc
->buf1
, cpu_to_le32(buf1
));
87 WRITE_ONCE(desc
->info
, cpu_to_le32(info
));
88 WRITE_ONCE(desc
->ctrl
, cpu_to_le32(ctrl
));
93 q
->entry
[idx
].txwi
= txwi
;
94 q
->entry
[idx
].skb
= skb
;
100 mt76_dma_tx_cleanup_idx(struct mt76_dev
*dev
, struct mt76_queue
*q
, int idx
,
101 struct mt76_queue_entry
*prev_e
)
103 struct mt76_queue_entry
*e
= &q
->entry
[idx
];
104 __le32 __ctrl
= READ_ONCE(q
->desc
[idx
].ctrl
);
105 u32 ctrl
= le32_to_cpu(__ctrl
);
107 if (!e
->txwi
|| !e
->skb
) {
108 __le32 addr
= READ_ONCE(q
->desc
[idx
].buf0
);
109 u32 len
= FIELD_GET(MT_DMA_CTL_SD_LEN0
, ctrl
);
111 dma_unmap_single(dev
->dev
, le32_to_cpu(addr
), len
,
115 if (!(ctrl
& MT_DMA_CTL_LAST_SEC0
)) {
116 __le32 addr
= READ_ONCE(q
->desc
[idx
].buf1
);
117 u32 len
= FIELD_GET(MT_DMA_CTL_SD_LEN1
, ctrl
);
119 dma_unmap_single(dev
->dev
, le32_to_cpu(addr
), len
,
123 if (e
->txwi
== DMA_DUMMY_TXWI
)
127 memset(e
, 0, sizeof(*e
));
131 mt76_dma_sync_idx(struct mt76_dev
*dev
, struct mt76_queue
*q
)
133 q
->head
= ioread32(&q
->regs
->dma_idx
);
135 iowrite32(q
->head
, &q
->regs
->cpu_idx
);
139 mt76_dma_tx_cleanup(struct mt76_dev
*dev
, enum mt76_txq_id qid
, bool flush
)
141 struct mt76_queue
*q
= &dev
->q_tx
[qid
];
142 struct mt76_queue_entry entry
;
149 spin_lock_bh(&q
->lock
);
153 last
= ioread32(&q
->regs
->dma_idx
);
155 while (q
->queued
&& q
->tail
!= last
) {
156 mt76_dma_tx_cleanup_idx(dev
, q
, q
->tail
, &entry
);
161 dev
->drv
->tx_complete_skb(dev
, q
, &entry
, flush
);
164 mt76_put_txwi(dev
, entry
.txwi
);
168 q
->tail
= (q
->tail
+ 1) % q
->ndesc
;
171 if (!flush
&& q
->tail
== last
)
172 last
= ioread32(&q
->regs
->dma_idx
);
176 mt76_txq_schedule(dev
, q
);
178 mt76_dma_sync_idx(dev
, q
);
180 wake
= wake
&& qid
< IEEE80211_NUM_ACS
&& q
->queued
< q
->ndesc
- 8;
183 wake_up(&dev
->tx_wait
);
185 spin_unlock_bh(&q
->lock
);
188 ieee80211_wake_queue(dev
->hw
, qid
);
192 mt76_dma_get_buf(struct mt76_dev
*dev
, struct mt76_queue
*q
, int idx
,
193 int *len
, u32
*info
, bool *more
)
195 struct mt76_queue_entry
*e
= &q
->entry
[idx
];
196 struct mt76_desc
*desc
= &q
->desc
[idx
];
199 int buf_len
= SKB_WITH_OVERHEAD(q
->buf_size
);
201 buf_addr
= le32_to_cpu(READ_ONCE(desc
->buf0
));
203 u32 ctl
= le32_to_cpu(READ_ONCE(desc
->ctrl
));
204 *len
= FIELD_GET(MT_DMA_CTL_SD_LEN0
, ctl
);
205 *more
= !(ctl
& MT_DMA_CTL_LAST_SEC0
);
209 *info
= le32_to_cpu(desc
->info
);
211 dma_unmap_single(dev
->dev
, buf_addr
, buf_len
, DMA_FROM_DEVICE
);
218 mt76_dma_dequeue(struct mt76_dev
*dev
, struct mt76_queue
*q
, bool flush
,
219 int *len
, u32
*info
, bool *more
)
227 if (!flush
&& !(q
->desc
[idx
].ctrl
& cpu_to_le32(MT_DMA_CTL_DMA_DONE
)))
230 q
->tail
= (q
->tail
+ 1) % q
->ndesc
;
233 return mt76_dma_get_buf(dev
, q
, idx
, len
, info
, more
);
237 mt76_dma_kick_queue(struct mt76_dev
*dev
, struct mt76_queue
*q
)
239 iowrite32(q
->head
, &q
->regs
->cpu_idx
);
243 mt76_dma_rx_fill(struct mt76_dev
*dev
, struct mt76_queue
*q
, bool napi
)
248 int len
= SKB_WITH_OVERHEAD(q
->buf_size
);
249 int offset
= q
->buf_offset
;
251 void *(*alloc
)(unsigned int fragsz
);
254 alloc
= napi_alloc_frag
;
256 alloc
= netdev_alloc_frag
;
258 spin_lock_bh(&q
->lock
);
260 while (q
->queued
< q
->ndesc
- 1) {
261 struct mt76_queue_buf qbuf
;
263 buf
= alloc(q
->buf_size
);
267 addr
= dma_map_single(dev
->dev
, buf
, len
, DMA_FROM_DEVICE
);
268 if (dma_mapping_error(dev
->dev
, addr
)) {
273 qbuf
.addr
= addr
+ offset
;
274 qbuf
.len
= len
- offset
;
275 idx
= mt76_dma_add_buf(dev
, q
, &qbuf
, 1, 0, buf
, NULL
);
280 mt76_dma_kick_queue(dev
, q
);
282 spin_unlock_bh(&q
->lock
);
288 mt76_dma_rx_cleanup(struct mt76_dev
*dev
, struct mt76_queue
*q
)
293 spin_lock_bh(&q
->lock
);
295 buf
= mt76_dma_dequeue(dev
, q
, true, NULL
, NULL
, &more
);
301 spin_unlock_bh(&q
->lock
);
305 mt76_dma_rx_reset(struct mt76_dev
*dev
, enum mt76_rxq_id qid
)
307 struct mt76_queue
*q
= &dev
->q_rx
[qid
];
310 for (i
= 0; i
< q
->ndesc
; i
++)
311 q
->desc
[i
].ctrl
&= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE
);
313 mt76_dma_rx_cleanup(dev
, q
);
314 mt76_dma_sync_idx(dev
, q
);
315 mt76_dma_rx_fill(dev
, q
, false);
319 mt76_add_fragment(struct mt76_dev
*dev
, struct mt76_queue
*q
, void *data
,
322 struct page
*page
= virt_to_head_page(data
);
323 int offset
= data
- page_address(page
);
324 struct sk_buff
*skb
= q
->rx_head
;
326 offset
+= q
->buf_offset
;
327 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
, offset
, len
,
334 dev
->drv
->rx_skb(dev
, q
- dev
->q_rx
, skb
);
338 mt76_dma_rx_process(struct mt76_dev
*dev
, struct mt76_queue
*q
, int budget
)
346 while (done
< budget
) {
349 data
= mt76_dma_dequeue(dev
, q
, false, &len
, &info
, &more
);
354 mt76_add_fragment(dev
, q
, data
, len
, more
);
358 skb
= build_skb(data
, q
->buf_size
);
364 skb_reserve(skb
, q
->buf_offset
);
365 if (skb
->tail
+ len
> skb
->end
) {
370 if (q
== &dev
->q_rx
[MT_RXQ_MCU
]) {
371 u32
*rxfce
= (u32
*) skb
->cb
;
383 dev
->drv
->rx_skb(dev
, q
- dev
->q_rx
, skb
);
386 mt76_dma_rx_fill(dev
, q
, true);
391 mt76_dma_rx_poll(struct napi_struct
*napi
, int budget
)
393 struct mt76_dev
*dev
;
394 int qid
, done
= 0, cur
;
396 dev
= container_of(napi
->dev
, struct mt76_dev
, napi_dev
);
397 qid
= napi
- dev
->napi
;
402 cur
= mt76_dma_rx_process(dev
, &dev
->q_rx
[qid
], budget
- done
);
403 mt76_rx_poll_complete(dev
, qid
);
405 } while (cur
&& done
< budget
);
411 dev
->drv
->rx_poll_complete(dev
, qid
);
418 mt76_dma_init(struct mt76_dev
*dev
)
422 init_dummy_netdev(&dev
->napi_dev
);
424 for (i
= 0; i
< ARRAY_SIZE(dev
->q_rx
); i
++) {
425 netif_napi_add(&dev
->napi_dev
, &dev
->napi
[i
], mt76_dma_rx_poll
,
427 mt76_dma_rx_fill(dev
, &dev
->q_rx
[i
], false);
428 skb_queue_head_init(&dev
->rx_skb
[i
]);
429 napi_enable(&dev
->napi
[i
]);
435 static const struct mt76_queue_ops mt76_dma_ops
= {
436 .init
= mt76_dma_init
,
437 .alloc
= mt76_dma_alloc_queue
,
438 .add_buf
= mt76_dma_add_buf
,
439 .tx_cleanup
= mt76_dma_tx_cleanup
,
440 .rx_reset
= mt76_dma_rx_reset
,
441 .kick
= mt76_dma_kick_queue
,
444 int mt76_dma_attach(struct mt76_dev
*dev
)
446 dev
->queue_ops
= &mt76_dma_ops
;
449 EXPORT_SYMBOL_GPL(mt76_dma_attach
);
451 void mt76_dma_cleanup(struct mt76_dev
*dev
)
455 for (i
= 0; i
< ARRAY_SIZE(dev
->q_tx
); i
++)
456 mt76_dma_tx_cleanup(dev
, i
, true);
458 for (i
= 0; i
< ARRAY_SIZE(dev
->q_rx
); i
++) {
459 netif_napi_del(&dev
->napi
[i
]);
460 mt76_dma_rx_cleanup(dev
, &dev
->q_rx
[i
]);
463 EXPORT_SYMBOL_GPL(mt76_dma_cleanup
);