treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / dma / hisi_dma.c
blobed3619266a482eff75d0422c1aa650ef039885a1
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2019 HiSilicon Limited. */
3 #include <linux/bitfield.h>
4 #include <linux/dmaengine.h>
5 #include <linux/init.h>
6 #include <linux/iopoll.h>
7 #include <linux/module.h>
8 #include <linux/pci.h>
9 #include <linux/spinlock.h>
10 #include "virt-dma.h"
12 #define HISI_DMA_SQ_BASE_L 0x0
13 #define HISI_DMA_SQ_BASE_H 0x4
14 #define HISI_DMA_SQ_DEPTH 0x8
15 #define HISI_DMA_SQ_TAIL_PTR 0xc
16 #define HISI_DMA_CQ_BASE_L 0x10
17 #define HISI_DMA_CQ_BASE_H 0x14
18 #define HISI_DMA_CQ_DEPTH 0x18
19 #define HISI_DMA_CQ_HEAD_PTR 0x1c
20 #define HISI_DMA_CTRL0 0x20
21 #define HISI_DMA_CTRL0_QUEUE_EN_S 0
22 #define HISI_DMA_CTRL0_QUEUE_PAUSE_S 4
23 #define HISI_DMA_CTRL1 0x24
24 #define HISI_DMA_CTRL1_QUEUE_RESET_S 0
25 #define HISI_DMA_Q_FSM_STS 0x30
26 #define HISI_DMA_FSM_STS_MASK GENMASK(3, 0)
27 #define HISI_DMA_INT_STS 0x40
28 #define HISI_DMA_INT_STS_MASK GENMASK(12, 0)
29 #define HISI_DMA_INT_MSK 0x44
30 #define HISI_DMA_MODE 0x217c
31 #define HISI_DMA_OFFSET 0x100
33 #define HISI_DMA_MSI_NUM 30
34 #define HISI_DMA_CHAN_NUM 30
35 #define HISI_DMA_Q_DEPTH_VAL 1024
37 #define PCI_BAR_2 2
39 enum hisi_dma_mode {
40 EP = 0,
41 RC,
44 enum hisi_dma_chan_status {
45 DISABLE = -1,
46 IDLE = 0,
47 RUN,
48 CPL,
49 PAUSE,
50 HALT,
51 ABORT,
52 WAIT,
53 BUFFCLR,
56 struct hisi_dma_sqe {
57 __le32 dw0;
58 #define OPCODE_MASK GENMASK(3, 0)
59 #define OPCODE_SMALL_PACKAGE 0x1
60 #define OPCODE_M2M 0x4
61 #define LOCAL_IRQ_EN BIT(8)
62 #define ATTR_SRC_MASK GENMASK(14, 12)
63 __le32 dw1;
64 __le32 dw2;
65 #define ATTR_DST_MASK GENMASK(26, 24)
66 __le32 length;
67 __le64 src_addr;
68 __le64 dst_addr;
71 struct hisi_dma_cqe {
72 __le32 rsv0;
73 __le32 rsv1;
74 __le16 sq_head;
75 __le16 rsv2;
76 __le16 rsv3;
77 __le16 w0;
78 #define STATUS_MASK GENMASK(15, 1)
79 #define STATUS_SUCC 0x0
80 #define VALID_BIT BIT(0)
83 struct hisi_dma_desc {
84 struct virt_dma_desc vd;
85 struct hisi_dma_sqe sqe;
88 struct hisi_dma_chan {
89 struct virt_dma_chan vc;
90 struct hisi_dma_dev *hdma_dev;
91 struct hisi_dma_sqe *sq;
92 struct hisi_dma_cqe *cq;
93 dma_addr_t sq_dma;
94 dma_addr_t cq_dma;
95 u32 sq_tail;
96 u32 cq_head;
97 u32 qp_num;
98 enum hisi_dma_chan_status status;
99 struct hisi_dma_desc *desc;
102 struct hisi_dma_dev {
103 struct pci_dev *pdev;
104 void __iomem *base;
105 struct dma_device dma_dev;
106 u32 chan_num;
107 u32 chan_depth;
108 struct hisi_dma_chan chan[];
111 static inline struct hisi_dma_chan *to_hisi_dma_chan(struct dma_chan *c)
113 return container_of(c, struct hisi_dma_chan, vc.chan);
116 static inline struct hisi_dma_desc *to_hisi_dma_desc(struct virt_dma_desc *vd)
118 return container_of(vd, struct hisi_dma_desc, vd);
121 static inline void hisi_dma_chan_write(void __iomem *base, u32 reg, u32 index,
122 u32 val)
124 writel_relaxed(val, base + reg + index * HISI_DMA_OFFSET);
127 static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val)
129 u32 tmp;
131 tmp = readl_relaxed(addr);
132 tmp = val ? tmp | BIT(pos) : tmp & ~BIT(pos);
133 writel_relaxed(tmp, addr);
136 static void hisi_dma_free_irq_vectors(void *data)
138 pci_free_irq_vectors(data);
141 static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index,
142 bool pause)
144 void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index *
145 HISI_DMA_OFFSET;
147 hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_PAUSE_S, pause);
150 static void hisi_dma_enable_dma(struct hisi_dma_dev *hdma_dev, u32 index,
151 bool enable)
153 void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index *
154 HISI_DMA_OFFSET;
156 hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_EN_S, enable);
159 static void hisi_dma_mask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
161 hisi_dma_chan_write(hdma_dev->base, HISI_DMA_INT_MSK, qp_index,
162 HISI_DMA_INT_STS_MASK);
165 static void hisi_dma_unmask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
167 void __iomem *base = hdma_dev->base;
169 hisi_dma_chan_write(base, HISI_DMA_INT_STS, qp_index,
170 HISI_DMA_INT_STS_MASK);
171 hisi_dma_chan_write(base, HISI_DMA_INT_MSK, qp_index, 0);
174 static void hisi_dma_do_reset(struct hisi_dma_dev *hdma_dev, u32 index)
176 void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL1 + index *
177 HISI_DMA_OFFSET;
179 hisi_dma_update_bit(addr, HISI_DMA_CTRL1_QUEUE_RESET_S, 1);
182 static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index)
184 hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, index, 0);
185 hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, index, 0);
188 static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan)
190 struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
191 u32 index = chan->qp_num, tmp;
192 int ret;
194 hisi_dma_pause_dma(hdma_dev, index, true);
195 hisi_dma_enable_dma(hdma_dev, index, false);
196 hisi_dma_mask_irq(hdma_dev, index);
198 ret = readl_relaxed_poll_timeout(hdma_dev->base +
199 HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp,
200 FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) != RUN, 10, 1000);
201 if (ret) {
202 dev_err(&hdma_dev->pdev->dev, "disable channel timeout!\n");
203 WARN_ON(1);
206 hisi_dma_do_reset(hdma_dev, index);
207 hisi_dma_reset_qp_point(hdma_dev, index);
208 hisi_dma_pause_dma(hdma_dev, index, false);
209 hisi_dma_enable_dma(hdma_dev, index, true);
210 hisi_dma_unmask_irq(hdma_dev, index);
212 ret = readl_relaxed_poll_timeout(hdma_dev->base +
213 HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp,
214 FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) == IDLE, 10, 1000);
215 if (ret) {
216 dev_err(&hdma_dev->pdev->dev, "reset channel timeout!\n");
217 WARN_ON(1);
221 static void hisi_dma_free_chan_resources(struct dma_chan *c)
223 struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
224 struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
226 hisi_dma_reset_hw_chan(chan);
227 vchan_free_chan_resources(&chan->vc);
229 memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth);
230 memset(chan->cq, 0, sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth);
231 chan->sq_tail = 0;
232 chan->cq_head = 0;
233 chan->status = DISABLE;
236 static void hisi_dma_desc_free(struct virt_dma_desc *vd)
238 kfree(to_hisi_dma_desc(vd));
241 static struct dma_async_tx_descriptor *
242 hisi_dma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dst, dma_addr_t src,
243 size_t len, unsigned long flags)
245 struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
246 struct hisi_dma_desc *desc;
248 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
249 if (!desc)
250 return NULL;
252 desc->sqe.length = cpu_to_le32(len);
253 desc->sqe.src_addr = cpu_to_le64(src);
254 desc->sqe.dst_addr = cpu_to_le64(dst);
256 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
259 static enum dma_status
260 hisi_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
261 struct dma_tx_state *txstate)
263 return dma_cookie_status(c, cookie, txstate);
266 static void hisi_dma_start_transfer(struct hisi_dma_chan *chan)
268 struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail;
269 struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
270 struct hisi_dma_desc *desc;
271 struct virt_dma_desc *vd;
273 vd = vchan_next_desc(&chan->vc);
274 if (!vd) {
275 dev_err(&hdma_dev->pdev->dev, "no issued task!\n");
276 chan->desc = NULL;
277 return;
279 list_del(&vd->node);
280 desc = to_hisi_dma_desc(vd);
281 chan->desc = desc;
283 memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe));
285 /* update other field in sqe */
286 sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M));
287 sqe->dw0 |= cpu_to_le32(LOCAL_IRQ_EN);
289 /* make sure data has been updated in sqe */
290 wmb();
292 /* update sq tail, point to new sqe position */
293 chan->sq_tail = (chan->sq_tail + 1) % hdma_dev->chan_depth;
295 /* update sq_tail to trigger a new task */
296 hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, chan->qp_num,
297 chan->sq_tail);
300 static void hisi_dma_issue_pending(struct dma_chan *c)
302 struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
303 unsigned long flags;
305 spin_lock_irqsave(&chan->vc.lock, flags);
307 if (vchan_issue_pending(&chan->vc))
308 hisi_dma_start_transfer(chan);
310 spin_unlock_irqrestore(&chan->vc.lock, flags);
313 static int hisi_dma_terminate_all(struct dma_chan *c)
315 struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
316 unsigned long flags;
317 LIST_HEAD(head);
319 spin_lock_irqsave(&chan->vc.lock, flags);
321 hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, true);
322 if (chan->desc) {
323 vchan_terminate_vdesc(&chan->desc->vd);
324 chan->desc = NULL;
327 vchan_get_all_descriptors(&chan->vc, &head);
329 spin_unlock_irqrestore(&chan->vc.lock, flags);
331 vchan_dma_desc_free_list(&chan->vc, &head);
332 hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, false);
334 return 0;
337 static void hisi_dma_synchronize(struct dma_chan *c)
339 struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
341 vchan_synchronize(&chan->vc);
344 static int hisi_dma_alloc_qps_mem(struct hisi_dma_dev *hdma_dev)
346 size_t sq_size = sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth;
347 size_t cq_size = sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth;
348 struct device *dev = &hdma_dev->pdev->dev;
349 struct hisi_dma_chan *chan;
350 int i;
352 for (i = 0; i < hdma_dev->chan_num; i++) {
353 chan = &hdma_dev->chan[i];
354 chan->sq = dmam_alloc_coherent(dev, sq_size, &chan->sq_dma,
355 GFP_KERNEL);
356 if (!chan->sq)
357 return -ENOMEM;
359 chan->cq = dmam_alloc_coherent(dev, cq_size, &chan->cq_dma,
360 GFP_KERNEL);
361 if (!chan->cq)
362 return -ENOMEM;
365 return 0;
368 static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index)
370 struct hisi_dma_chan *chan = &hdma_dev->chan[index];
371 u32 hw_depth = hdma_dev->chan_depth - 1;
372 void __iomem *base = hdma_dev->base;
374 /* set sq, cq base */
375 hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_L, index,
376 lower_32_bits(chan->sq_dma));
377 hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_H, index,
378 upper_32_bits(chan->sq_dma));
379 hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_L, index,
380 lower_32_bits(chan->cq_dma));
381 hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_H, index,
382 upper_32_bits(chan->cq_dma));
384 /* set sq, cq depth */
385 hisi_dma_chan_write(base, HISI_DMA_SQ_DEPTH, index, hw_depth);
386 hisi_dma_chan_write(base, HISI_DMA_CQ_DEPTH, index, hw_depth);
388 /* init sq tail and cq head */
389 hisi_dma_chan_write(base, HISI_DMA_SQ_TAIL_PTR, index, 0);
390 hisi_dma_chan_write(base, HISI_DMA_CQ_HEAD_PTR, index, 0);
393 static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
395 hisi_dma_init_hw_qp(hdma_dev, qp_index);
396 hisi_dma_unmask_irq(hdma_dev, qp_index);
397 hisi_dma_enable_dma(hdma_dev, qp_index, true);
400 static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
402 hisi_dma_reset_hw_chan(&hdma_dev->chan[qp_index]);
405 static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev)
407 int i;
409 for (i = 0; i < hdma_dev->chan_num; i++) {
410 hdma_dev->chan[i].qp_num = i;
411 hdma_dev->chan[i].hdma_dev = hdma_dev;
412 hdma_dev->chan[i].vc.desc_free = hisi_dma_desc_free;
413 vchan_init(&hdma_dev->chan[i].vc, &hdma_dev->dma_dev);
414 hisi_dma_enable_qp(hdma_dev, i);
418 static void hisi_dma_disable_qps(struct hisi_dma_dev *hdma_dev)
420 int i;
422 for (i = 0; i < hdma_dev->chan_num; i++) {
423 hisi_dma_disable_qp(hdma_dev, i);
424 tasklet_kill(&hdma_dev->chan[i].vc.task);
428 static irqreturn_t hisi_dma_irq(int irq, void *data)
430 struct hisi_dma_chan *chan = data;
431 struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
432 struct hisi_dma_desc *desc;
433 struct hisi_dma_cqe *cqe;
434 unsigned long flags;
436 spin_lock_irqsave(&chan->vc.lock, flags);
438 desc = chan->desc;
439 cqe = chan->cq + chan->cq_head;
440 if (desc) {
441 if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) {
442 chan->cq_head = (chan->cq_head + 1) %
443 hdma_dev->chan_depth;
444 hisi_dma_chan_write(hdma_dev->base,
445 HISI_DMA_CQ_HEAD_PTR, chan->qp_num,
446 chan->cq_head);
447 vchan_cookie_complete(&desc->vd);
448 } else {
449 dev_err(&hdma_dev->pdev->dev, "task error!\n");
452 chan->desc = NULL;
455 spin_unlock_irqrestore(&chan->vc.lock, flags);
457 return IRQ_HANDLED;
460 static int hisi_dma_request_qps_irq(struct hisi_dma_dev *hdma_dev)
462 struct pci_dev *pdev = hdma_dev->pdev;
463 int i, ret;
465 for (i = 0; i < hdma_dev->chan_num; i++) {
466 ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
467 hisi_dma_irq, IRQF_SHARED, "hisi_dma",
468 &hdma_dev->chan[i]);
469 if (ret)
470 return ret;
473 return 0;
476 /* This function enables all hw channels in a device */
477 static int hisi_dma_enable_hw_channels(struct hisi_dma_dev *hdma_dev)
479 int ret;
481 ret = hisi_dma_alloc_qps_mem(hdma_dev);
482 if (ret) {
483 dev_err(&hdma_dev->pdev->dev, "fail to allocate qp memory!\n");
484 return ret;
487 ret = hisi_dma_request_qps_irq(hdma_dev);
488 if (ret) {
489 dev_err(&hdma_dev->pdev->dev, "fail to request qp irq!\n");
490 return ret;
493 hisi_dma_enable_qps(hdma_dev);
495 return 0;
498 static void hisi_dma_disable_hw_channels(void *data)
500 hisi_dma_disable_qps(data);
503 static void hisi_dma_set_mode(struct hisi_dma_dev *hdma_dev,
504 enum hisi_dma_mode mode)
506 writel_relaxed(mode == RC ? 1 : 0, hdma_dev->base + HISI_DMA_MODE);
509 static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
511 struct device *dev = &pdev->dev;
512 struct hisi_dma_dev *hdma_dev;
513 struct dma_device *dma_dev;
514 size_t dev_size;
515 int ret;
517 ret = pcim_enable_device(pdev);
518 if (ret) {
519 dev_err(dev, "failed to enable device mem!\n");
520 return ret;
523 ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_2, pci_name(pdev));
524 if (ret) {
525 dev_err(dev, "failed to remap I/O region!\n");
526 return ret;
529 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
530 if (ret)
531 return ret;
533 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
534 if (ret)
535 return ret;
537 dev_size = sizeof(struct hisi_dma_chan) * HISI_DMA_CHAN_NUM +
538 sizeof(*hdma_dev);
539 hdma_dev = devm_kzalloc(dev, dev_size, GFP_KERNEL);
540 if (!hdma_dev)
541 return -EINVAL;
543 hdma_dev->base = pcim_iomap_table(pdev)[PCI_BAR_2];
544 hdma_dev->pdev = pdev;
545 hdma_dev->chan_num = HISI_DMA_CHAN_NUM;
546 hdma_dev->chan_depth = HISI_DMA_Q_DEPTH_VAL;
548 pci_set_drvdata(pdev, hdma_dev);
549 pci_set_master(pdev);
551 ret = pci_alloc_irq_vectors(pdev, HISI_DMA_MSI_NUM, HISI_DMA_MSI_NUM,
552 PCI_IRQ_MSI);
553 if (ret < 0) {
554 dev_err(dev, "Failed to allocate MSI vectors!\n");
555 return ret;
558 ret = devm_add_action_or_reset(dev, hisi_dma_free_irq_vectors, pdev);
559 if (ret)
560 return ret;
562 dma_dev = &hdma_dev->dma_dev;
563 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
564 dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources;
565 dma_dev->device_prep_dma_memcpy = hisi_dma_prep_dma_memcpy;
566 dma_dev->device_tx_status = hisi_dma_tx_status;
567 dma_dev->device_issue_pending = hisi_dma_issue_pending;
568 dma_dev->device_terminate_all = hisi_dma_terminate_all;
569 dma_dev->device_synchronize = hisi_dma_synchronize;
570 dma_dev->directions = BIT(DMA_MEM_TO_MEM);
571 dma_dev->dev = dev;
572 INIT_LIST_HEAD(&dma_dev->channels);
574 hisi_dma_set_mode(hdma_dev, RC);
576 ret = hisi_dma_enable_hw_channels(hdma_dev);
577 if (ret < 0) {
578 dev_err(dev, "failed to enable hw channel!\n");
579 return ret;
582 ret = devm_add_action_or_reset(dev, hisi_dma_disable_hw_channels,
583 hdma_dev);
584 if (ret)
585 return ret;
587 ret = dmaenginem_async_device_register(dma_dev);
588 if (ret < 0)
589 dev_err(dev, "failed to register device!\n");
591 return ret;
594 static const struct pci_device_id hisi_dma_pci_tbl[] = {
595 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa122) },
596 { 0, }
599 static struct pci_driver hisi_dma_pci_driver = {
600 .name = "hisi_dma",
601 .id_table = hisi_dma_pci_tbl,
602 .probe = hisi_dma_probe,
605 module_pci_driver(hisi_dma_pci_driver);
607 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
608 MODULE_AUTHOR("Zhenfa Qiu <qiuzhenfa@hisilicon.com>");
609 MODULE_DESCRIPTION("HiSilicon Kunpeng DMA controller driver");
610 MODULE_LICENSE("GPL v2");
611 MODULE_DEVICE_TABLE(pci, hisi_dma_pci_tbl);