Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / dma / idxd / dma.c
blob8ed2773d82859867c0d0f9b91ffc6cb38b226ee5
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <linux/dmaengine.h>
10 #include <uapi/linux/idxd.h>
11 #include "../dmaengine.h"
12 #include "registers.h"
13 #include "idxd.h"
15 static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
17 return container_of(c, struct idxd_wq, dma_chan);
20 void idxd_dma_complete_txd(struct idxd_desc *desc,
21 enum idxd_complete_type comp_type)
23 struct dma_async_tx_descriptor *tx;
24 struct dmaengine_result res;
25 int complete = 1;
27 if (desc->completion->status == DSA_COMP_SUCCESS)
28 res.result = DMA_TRANS_NOERROR;
29 else if (desc->completion->status)
30 res.result = DMA_TRANS_WRITE_FAILED;
31 else if (comp_type == IDXD_COMPLETE_ABORT)
32 res.result = DMA_TRANS_ABORTED;
33 else
34 complete = 0;
36 tx = &desc->txd;
37 if (complete && tx->cookie) {
38 dma_cookie_complete(tx);
39 dma_descriptor_unmap(tx);
40 dmaengine_desc_get_callback_invoke(tx, &res);
41 tx->callback = NULL;
42 tx->callback_result = NULL;
46 static void op_flag_setup(unsigned long flags, u32 *desc_flags)
48 *desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR;
49 if (flags & DMA_PREP_INTERRUPT)
50 *desc_flags |= IDXD_OP_FLAG_RCI;
53 static inline void set_completion_address(struct idxd_desc *desc,
54 u64 *compl_addr)
56 *compl_addr = desc->compl_dma;
59 static inline void idxd_prep_desc_common(struct idxd_wq *wq,
60 struct dsa_hw_desc *hw, char opcode,
61 u64 addr_f1, u64 addr_f2, u64 len,
62 u64 compl, u32 flags)
64 hw->flags = flags;
65 hw->opcode = opcode;
66 hw->src_addr = addr_f1;
67 hw->dst_addr = addr_f2;
68 hw->xfer_size = len;
69 hw->priv = !!(wq->type == IDXD_WQT_KERNEL);
70 hw->completion_addr = compl;
73 static struct dma_async_tx_descriptor *
74 idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
75 dma_addr_t dma_src, size_t len, unsigned long flags)
77 struct idxd_wq *wq = to_idxd_wq(c);
78 u32 desc_flags;
79 struct idxd_device *idxd = wq->idxd;
80 struct idxd_desc *desc;
82 if (wq->state != IDXD_WQ_ENABLED)
83 return NULL;
85 if (len > idxd->max_xfer_bytes)
86 return NULL;
88 op_flag_setup(flags, &desc_flags);
89 desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
90 if (IS_ERR(desc))
91 return NULL;
93 idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE,
94 dma_src, dma_dest, len, desc->compl_dma,
95 desc_flags);
97 desc->txd.flags = flags;
99 return &desc->txd;
102 static int idxd_dma_alloc_chan_resources(struct dma_chan *chan)
104 struct idxd_wq *wq = to_idxd_wq(chan);
105 struct device *dev = &wq->idxd->pdev->dev;
107 idxd_wq_get(wq);
108 dev_dbg(dev, "%s: client_count: %d\n", __func__,
109 idxd_wq_refcount(wq));
110 return 0;
113 static void idxd_dma_free_chan_resources(struct dma_chan *chan)
115 struct idxd_wq *wq = to_idxd_wq(chan);
116 struct device *dev = &wq->idxd->pdev->dev;
118 idxd_wq_put(wq);
119 dev_dbg(dev, "%s: client_count: %d\n", __func__,
120 idxd_wq_refcount(wq));
123 static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan,
124 dma_cookie_t cookie,
125 struct dma_tx_state *txstate)
127 return DMA_OUT_OF_ORDER;
131 * issue_pending() does not need to do anything since tx_submit() does the job
132 * already.
134 static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
138 dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
140 struct dma_chan *c = tx->chan;
141 struct idxd_wq *wq = to_idxd_wq(c);
142 dma_cookie_t cookie;
143 int rc;
144 struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd);
146 cookie = dma_cookie_assign(tx);
148 rc = idxd_submit_desc(wq, desc);
149 if (rc < 0) {
150 idxd_free_desc(wq, desc);
151 return rc;
154 return cookie;
157 static void idxd_dma_release(struct dma_device *device)
161 int idxd_register_dma_device(struct idxd_device *idxd)
163 struct dma_device *dma = &idxd->dma_dev;
165 INIT_LIST_HEAD(&dma->channels);
166 dma->dev = &idxd->pdev->dev;
168 dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
169 dma->device_release = idxd_dma_release;
171 if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
172 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
173 dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
176 dma->device_tx_status = idxd_dma_tx_status;
177 dma->device_issue_pending = idxd_dma_issue_pending;
178 dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
179 dma->device_free_chan_resources = idxd_dma_free_chan_resources;
181 return dma_async_device_register(&idxd->dma_dev);
184 void idxd_unregister_dma_device(struct idxd_device *idxd)
186 dma_async_device_unregister(&idxd->dma_dev);
189 int idxd_register_dma_channel(struct idxd_wq *wq)
191 struct idxd_device *idxd = wq->idxd;
192 struct dma_device *dma = &idxd->dma_dev;
193 struct dma_chan *chan = &wq->dma_chan;
194 int rc;
196 memset(&wq->dma_chan, 0, sizeof(struct dma_chan));
197 chan->device = dma;
198 list_add_tail(&chan->device_node, &dma->channels);
199 rc = dma_async_device_channel_register(dma, chan);
200 if (rc < 0)
201 return rc;
203 return 0;
206 void idxd_unregister_dma_channel(struct idxd_wq *wq)
208 dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan);