1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <linux/dmaengine.h>
10 #include <uapi/linux/idxd.h>
11 #include "../dmaengine.h"
12 #include "registers.h"
15 static inline struct idxd_wq
*to_idxd_wq(struct dma_chan
*c
)
17 return container_of(c
, struct idxd_wq
, dma_chan
);
20 void idxd_dma_complete_txd(struct idxd_desc
*desc
,
21 enum idxd_complete_type comp_type
)
23 struct dma_async_tx_descriptor
*tx
;
24 struct dmaengine_result res
;
27 if (desc
->completion
->status
== DSA_COMP_SUCCESS
)
28 res
.result
= DMA_TRANS_NOERROR
;
29 else if (desc
->completion
->status
)
30 res
.result
= DMA_TRANS_WRITE_FAILED
;
31 else if (comp_type
== IDXD_COMPLETE_ABORT
)
32 res
.result
= DMA_TRANS_ABORTED
;
37 if (complete
&& tx
->cookie
) {
38 dma_cookie_complete(tx
);
39 dma_descriptor_unmap(tx
);
40 dmaengine_desc_get_callback_invoke(tx
, &res
);
42 tx
->callback_result
= NULL
;
46 static void op_flag_setup(unsigned long flags
, u32
*desc_flags
)
48 *desc_flags
= IDXD_OP_FLAG_CRAV
| IDXD_OP_FLAG_RCR
;
49 if (flags
& DMA_PREP_INTERRUPT
)
50 *desc_flags
|= IDXD_OP_FLAG_RCI
;
53 static inline void set_completion_address(struct idxd_desc
*desc
,
56 *compl_addr
= desc
->compl_dma
;
59 static inline void idxd_prep_desc_common(struct idxd_wq
*wq
,
60 struct dsa_hw_desc
*hw
, char opcode
,
61 u64 addr_f1
, u64 addr_f2
, u64 len
,
64 struct idxd_device
*idxd
= wq
->idxd
;
68 hw
->src_addr
= addr_f1
;
69 hw
->dst_addr
= addr_f2
;
71 hw
->priv
= !!(wq
->type
== IDXD_WQT_KERNEL
);
72 hw
->completion_addr
= compl;
75 * Descriptor completion vectors are 1-8 for MSIX. We will round
76 * robin through the 8 vectors.
78 wq
->vec_ptr
= (wq
->vec_ptr
% idxd
->num_wq_irqs
) + 1;
79 hw
->int_handle
= wq
->vec_ptr
;
82 static struct dma_async_tx_descriptor
*
83 idxd_dma_submit_memcpy(struct dma_chan
*c
, dma_addr_t dma_dest
,
84 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
86 struct idxd_wq
*wq
= to_idxd_wq(c
);
88 struct idxd_device
*idxd
= wq
->idxd
;
89 struct idxd_desc
*desc
;
91 if (wq
->state
!= IDXD_WQ_ENABLED
)
94 if (len
> idxd
->max_xfer_bytes
)
97 op_flag_setup(flags
, &desc_flags
);
98 desc
= idxd_alloc_desc(wq
, IDXD_OP_BLOCK
);
102 idxd_prep_desc_common(wq
, desc
->hw
, DSA_OPCODE_MEMMOVE
,
103 dma_src
, dma_dest
, len
, desc
->compl_dma
,
106 desc
->txd
.flags
= flags
;
111 static int idxd_dma_alloc_chan_resources(struct dma_chan
*chan
)
113 struct idxd_wq
*wq
= to_idxd_wq(chan
);
114 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
117 dev_dbg(dev
, "%s: client_count: %d\n", __func__
,
118 idxd_wq_refcount(wq
));
122 static void idxd_dma_free_chan_resources(struct dma_chan
*chan
)
124 struct idxd_wq
*wq
= to_idxd_wq(chan
);
125 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
128 dev_dbg(dev
, "%s: client_count: %d\n", __func__
,
129 idxd_wq_refcount(wq
));
132 static enum dma_status
idxd_dma_tx_status(struct dma_chan
*dma_chan
,
134 struct dma_tx_state
*txstate
)
136 return dma_cookie_status(dma_chan
, cookie
, txstate
);
140 * issue_pending() does not need to do anything since tx_submit() does the job
143 static void idxd_dma_issue_pending(struct dma_chan
*dma_chan
)
147 dma_cookie_t
idxd_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
149 struct dma_chan
*c
= tx
->chan
;
150 struct idxd_wq
*wq
= to_idxd_wq(c
);
153 struct idxd_desc
*desc
= container_of(tx
, struct idxd_desc
, txd
);
155 cookie
= dma_cookie_assign(tx
);
157 rc
= idxd_submit_desc(wq
, desc
);
159 idxd_free_desc(wq
, desc
);
166 static void idxd_dma_release(struct dma_device
*device
)
170 int idxd_register_dma_device(struct idxd_device
*idxd
)
172 struct dma_device
*dma
= &idxd
->dma_dev
;
174 INIT_LIST_HEAD(&dma
->channels
);
175 dma
->dev
= &idxd
->pdev
->dev
;
177 dma
->device_release
= idxd_dma_release
;
179 if (idxd
->hw
.opcap
.bits
[0] & IDXD_OPCAP_MEMMOVE
) {
180 dma_cap_set(DMA_MEMCPY
, dma
->cap_mask
);
181 dma
->device_prep_dma_memcpy
= idxd_dma_submit_memcpy
;
184 dma
->device_tx_status
= idxd_dma_tx_status
;
185 dma
->device_issue_pending
= idxd_dma_issue_pending
;
186 dma
->device_alloc_chan_resources
= idxd_dma_alloc_chan_resources
;
187 dma
->device_free_chan_resources
= idxd_dma_free_chan_resources
;
189 return dma_async_device_register(&idxd
->dma_dev
);
192 void idxd_unregister_dma_device(struct idxd_device
*idxd
)
194 dma_async_device_unregister(&idxd
->dma_dev
);
197 int idxd_register_dma_channel(struct idxd_wq
*wq
)
199 struct idxd_device
*idxd
= wq
->idxd
;
200 struct dma_device
*dma
= &idxd
->dma_dev
;
201 struct dma_chan
*chan
= &wq
->dma_chan
;
204 memset(&wq
->dma_chan
, 0, sizeof(struct dma_chan
));
206 list_add_tail(&chan
->device_node
, &dma
->channels
);
207 rc
= dma_async_device_channel_register(dma
, chan
);
214 void idxd_unregister_dma_channel(struct idxd_wq
*wq
)
216 dma_async_device_channel_unregister(&wq
->idxd
->dma_dev
, &wq
->dma_chan
);