1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <linux/dmaengine.h>
10 #include <uapi/linux/idxd.h>
11 #include "../dmaengine.h"
12 #include "registers.h"
15 static inline struct idxd_wq
*to_idxd_wq(struct dma_chan
*c
)
17 struct idxd_dma_chan
*idxd_chan
;
19 idxd_chan
= container_of(c
, struct idxd_dma_chan
, chan
);
23 void idxd_dma_complete_txd(struct idxd_desc
*desc
,
24 enum idxd_complete_type comp_type
,
25 bool free_desc
, void *ctx
, u32
*status
)
27 struct idxd_device
*idxd
= desc
->wq
->idxd
;
28 struct dma_async_tx_descriptor
*tx
;
29 struct dmaengine_result res
;
32 if (desc
->completion
->status
== DSA_COMP_SUCCESS
) {
33 res
.result
= DMA_TRANS_NOERROR
;
34 } else if (desc
->completion
->status
) {
35 if (idxd
->request_int_handles
&& comp_type
!= IDXD_COMPLETE_ABORT
&&
36 desc
->completion
->status
== DSA_COMP_INT_HANDLE_INVAL
&&
37 idxd_queue_int_handle_resubmit(desc
))
39 res
.result
= DMA_TRANS_WRITE_FAILED
;
40 } else if (comp_type
== IDXD_COMPLETE_ABORT
) {
41 res
.result
= DMA_TRANS_ABORTED
;
47 if (complete
&& tx
->cookie
) {
48 dma_cookie_complete(tx
);
49 dma_descriptor_unmap(tx
);
50 dmaengine_desc_get_callback_invoke(tx
, &res
);
52 tx
->callback_result
= NULL
;
56 idxd_free_desc(desc
->wq
, desc
);
59 static void op_flag_setup(unsigned long flags
, u32
*desc_flags
)
61 *desc_flags
= IDXD_OP_FLAG_CRAV
| IDXD_OP_FLAG_RCR
;
62 if (flags
& DMA_PREP_INTERRUPT
)
63 *desc_flags
|= IDXD_OP_FLAG_RCI
;
66 static inline void idxd_prep_desc_common(struct idxd_wq
*wq
,
67 struct dsa_hw_desc
*hw
, char opcode
,
68 u64 addr_f1
, u64 addr_f2
, u64 len
,
73 hw
->src_addr
= addr_f1
;
74 hw
->dst_addr
= addr_f2
;
77 * For dedicated WQ, this field is ignored and HW will use the WQCFG.priv
78 * field instead. This field should be set to 0 for kernel descriptors
79 * since kernel DMA on VT-d supports "user" privilege only.
82 hw
->completion_addr
= compl;
85 static struct dma_async_tx_descriptor
*
86 idxd_dma_prep_interrupt(struct dma_chan
*c
, unsigned long flags
)
88 struct idxd_wq
*wq
= to_idxd_wq(c
);
90 struct idxd_desc
*desc
;
92 if (wq
->state
!= IDXD_WQ_ENABLED
)
95 op_flag_setup(flags
, &desc_flags
);
96 desc
= idxd_alloc_desc(wq
, IDXD_OP_BLOCK
);
100 idxd_prep_desc_common(wq
, desc
->hw
, DSA_OPCODE_NOOP
,
101 0, 0, 0, desc
->compl_dma
, desc_flags
);
102 desc
->txd
.flags
= flags
;
106 static struct dma_async_tx_descriptor
*
107 idxd_dma_submit_memcpy(struct dma_chan
*c
, dma_addr_t dma_dest
,
108 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
110 struct idxd_wq
*wq
= to_idxd_wq(c
);
112 struct idxd_device
*idxd
= wq
->idxd
;
113 struct idxd_desc
*desc
;
115 if (wq
->state
!= IDXD_WQ_ENABLED
)
118 if (len
> idxd
->max_xfer_bytes
)
121 op_flag_setup(flags
, &desc_flags
);
122 desc
= idxd_alloc_desc(wq
, IDXD_OP_BLOCK
);
126 idxd_prep_desc_common(wq
, desc
->hw
, DSA_OPCODE_MEMMOVE
,
127 dma_src
, dma_dest
, len
, desc
->compl_dma
,
130 desc
->txd
.flags
= flags
;
135 static int idxd_dma_alloc_chan_resources(struct dma_chan
*chan
)
137 struct idxd_wq
*wq
= to_idxd_wq(chan
);
138 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
141 dev_dbg(dev
, "%s: client_count: %d\n", __func__
,
142 idxd_wq_refcount(wq
));
146 static void idxd_dma_free_chan_resources(struct dma_chan
*chan
)
148 struct idxd_wq
*wq
= to_idxd_wq(chan
);
149 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
152 dev_dbg(dev
, "%s: client_count: %d\n", __func__
,
153 idxd_wq_refcount(wq
));
156 static enum dma_status
idxd_dma_tx_status(struct dma_chan
*dma_chan
,
158 struct dma_tx_state
*txstate
)
160 return DMA_OUT_OF_ORDER
;
164 * issue_pending() does not need to do anything since tx_submit() does the job
167 static void idxd_dma_issue_pending(struct dma_chan
*dma_chan
)
171 static dma_cookie_t
idxd_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
173 struct dma_chan
*c
= tx
->chan
;
174 struct idxd_wq
*wq
= to_idxd_wq(c
);
177 struct idxd_desc
*desc
= container_of(tx
, struct idxd_desc
, txd
);
179 cookie
= dma_cookie_assign(tx
);
181 rc
= idxd_submit_desc(wq
, desc
);
183 idxd_free_desc(wq
, desc
);
190 static void idxd_dma_release(struct dma_device
*device
)
192 struct idxd_dma_dev
*idxd_dma
= container_of(device
, struct idxd_dma_dev
, dma
);
197 int idxd_register_dma_device(struct idxd_device
*idxd
)
199 struct idxd_dma_dev
*idxd_dma
;
200 struct dma_device
*dma
;
201 struct device
*dev
= &idxd
->pdev
->dev
;
204 idxd_dma
= kzalloc_node(sizeof(*idxd_dma
), GFP_KERNEL
, dev_to_node(dev
));
208 dma
= &idxd_dma
->dma
;
209 INIT_LIST_HEAD(&dma
->channels
);
212 dma_cap_set(DMA_INTERRUPT
, dma
->cap_mask
);
213 dma_cap_set(DMA_PRIVATE
, dma
->cap_mask
);
214 dma_cap_set(DMA_COMPLETION_NO_ORDER
, dma
->cap_mask
);
215 dma
->device_release
= idxd_dma_release
;
217 dma
->device_prep_dma_interrupt
= idxd_dma_prep_interrupt
;
218 if (idxd
->hw
.opcap
.bits
[0] & IDXD_OPCAP_MEMMOVE
) {
219 dma_cap_set(DMA_MEMCPY
, dma
->cap_mask
);
220 dma
->device_prep_dma_memcpy
= idxd_dma_submit_memcpy
;
223 dma
->device_tx_status
= idxd_dma_tx_status
;
224 dma
->device_issue_pending
= idxd_dma_issue_pending
;
225 dma
->device_alloc_chan_resources
= idxd_dma_alloc_chan_resources
;
226 dma
->device_free_chan_resources
= idxd_dma_free_chan_resources
;
228 rc
= dma_async_device_register(dma
);
234 idxd_dma
->idxd
= idxd
;
236 * This pointer is protected by the refs taken by the dma_chan. It will remain valid
237 * as long as there are outstanding channels.
239 idxd
->idxd_dma
= idxd_dma
;
243 void idxd_unregister_dma_device(struct idxd_device
*idxd
)
245 dma_async_device_unregister(&idxd
->idxd_dma
->dma
);
248 static int idxd_register_dma_channel(struct idxd_wq
*wq
)
250 struct idxd_device
*idxd
= wq
->idxd
;
251 struct dma_device
*dma
= &idxd
->idxd_dma
->dma
;
252 struct device
*dev
= &idxd
->pdev
->dev
;
253 struct idxd_dma_chan
*idxd_chan
;
254 struct dma_chan
*chan
;
257 idxd_chan
= kzalloc_node(sizeof(*idxd_chan
), GFP_KERNEL
, dev_to_node(dev
));
261 chan
= &idxd_chan
->chan
;
263 list_add_tail(&chan
->device_node
, &dma
->channels
);
265 for (i
= 0; i
< wq
->num_descs
; i
++) {
266 struct idxd_desc
*desc
= wq
->descs
[i
];
268 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
269 desc
->txd
.tx_submit
= idxd_dma_tx_submit
;
272 rc
= dma_async_device_channel_register(dma
, chan
, NULL
);
278 wq
->idxd_chan
= idxd_chan
;
280 get_device(wq_confdev(wq
));
285 static void idxd_unregister_dma_channel(struct idxd_wq
*wq
)
287 struct idxd_dma_chan
*idxd_chan
= wq
->idxd_chan
;
288 struct dma_chan
*chan
= &idxd_chan
->chan
;
289 struct idxd_dma_dev
*idxd_dma
= wq
->idxd
->idxd_dma
;
291 dma_async_device_channel_unregister(&idxd_dma
->dma
, chan
);
292 list_del(&chan
->device_node
);
293 kfree(wq
->idxd_chan
);
294 wq
->idxd_chan
= NULL
;
295 put_device(wq_confdev(wq
));
298 static int idxd_dmaengine_drv_probe(struct idxd_dev
*idxd_dev
)
300 struct device
*dev
= &idxd_dev
->conf_dev
;
301 struct idxd_wq
*wq
= idxd_dev_to_wq(idxd_dev
);
302 struct idxd_device
*idxd
= wq
->idxd
;
305 if (idxd
->state
!= IDXD_DEV_ENABLED
)
308 mutex_lock(&wq
->wq_lock
);
309 if (!idxd_wq_driver_name_match(wq
, dev
)) {
310 idxd
->cmd_status
= IDXD_SCMD_WQ_NO_DRV_NAME
;
315 wq
->type
= IDXD_WQT_KERNEL
;
317 rc
= idxd_drv_enable_wq(wq
);
319 dev_dbg(dev
, "Enable wq %d failed: %d\n", wq
->id
, rc
);
324 rc
= idxd_register_dma_channel(wq
);
326 idxd
->cmd_status
= IDXD_SCMD_DMA_CHAN_ERR
;
327 dev_dbg(dev
, "Failed to register dma channel\n");
331 idxd
->cmd_status
= 0;
332 mutex_unlock(&wq
->wq_lock
);
336 idxd_drv_disable_wq(wq
);
338 wq
->type
= IDXD_WQT_NONE
;
339 mutex_unlock(&wq
->wq_lock
);
343 static void idxd_dmaengine_drv_remove(struct idxd_dev
*idxd_dev
)
345 struct idxd_wq
*wq
= idxd_dev_to_wq(idxd_dev
);
347 mutex_lock(&wq
->wq_lock
);
348 __idxd_wq_quiesce(wq
);
349 idxd_unregister_dma_channel(wq
);
350 idxd_drv_disable_wq(wq
);
351 mutex_unlock(&wq
->wq_lock
);
354 static enum idxd_dev_type dev_types
[] = {
359 struct idxd_device_driver idxd_dmaengine_drv
= {
360 .probe
= idxd_dmaengine_drv_probe
,
361 .remove
= idxd_dmaengine_drv_remove
,
362 .desc_complete
= idxd_dma_complete_txd
,
366 EXPORT_SYMBOL_GPL(idxd_dmaengine_drv
);