1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <linux/dmaengine.h>
10 #include <uapi/linux/idxd.h>
11 #include "../dmaengine.h"
12 #include "registers.h"
15 static inline struct idxd_wq
*to_idxd_wq(struct dma_chan
*c
)
17 return container_of(c
, struct idxd_wq
, dma_chan
);
20 void idxd_dma_complete_txd(struct idxd_desc
*desc
,
21 enum idxd_complete_type comp_type
)
23 struct dma_async_tx_descriptor
*tx
;
24 struct dmaengine_result res
;
27 if (desc
->completion
->status
== DSA_COMP_SUCCESS
)
28 res
.result
= DMA_TRANS_NOERROR
;
29 else if (desc
->completion
->status
)
30 res
.result
= DMA_TRANS_WRITE_FAILED
;
31 else if (comp_type
== IDXD_COMPLETE_ABORT
)
32 res
.result
= DMA_TRANS_ABORTED
;
37 if (complete
&& tx
->cookie
) {
38 dma_cookie_complete(tx
);
39 dma_descriptor_unmap(tx
);
40 dmaengine_desc_get_callback_invoke(tx
, &res
);
42 tx
->callback_result
= NULL
;
46 static void op_flag_setup(unsigned long flags
, u32
*desc_flags
)
48 *desc_flags
= IDXD_OP_FLAG_CRAV
| IDXD_OP_FLAG_RCR
;
49 if (flags
& DMA_PREP_INTERRUPT
)
50 *desc_flags
|= IDXD_OP_FLAG_RCI
;
53 static inline void set_completion_address(struct idxd_desc
*desc
,
56 *compl_addr
= desc
->compl_dma
;
59 static inline void idxd_prep_desc_common(struct idxd_wq
*wq
,
60 struct dsa_hw_desc
*hw
, char opcode
,
61 u64 addr_f1
, u64 addr_f2
, u64 len
,
66 hw
->src_addr
= addr_f1
;
67 hw
->dst_addr
= addr_f2
;
69 hw
->priv
= !!(wq
->type
== IDXD_WQT_KERNEL
);
70 hw
->completion_addr
= compl;
73 static struct dma_async_tx_descriptor
*
74 idxd_dma_submit_memcpy(struct dma_chan
*c
, dma_addr_t dma_dest
,
75 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
77 struct idxd_wq
*wq
= to_idxd_wq(c
);
79 struct idxd_device
*idxd
= wq
->idxd
;
80 struct idxd_desc
*desc
;
82 if (wq
->state
!= IDXD_WQ_ENABLED
)
85 if (len
> idxd
->max_xfer_bytes
)
88 op_flag_setup(flags
, &desc_flags
);
89 desc
= idxd_alloc_desc(wq
, IDXD_OP_BLOCK
);
93 idxd_prep_desc_common(wq
, desc
->hw
, DSA_OPCODE_MEMMOVE
,
94 dma_src
, dma_dest
, len
, desc
->compl_dma
,
97 desc
->txd
.flags
= flags
;
102 static int idxd_dma_alloc_chan_resources(struct dma_chan
*chan
)
104 struct idxd_wq
*wq
= to_idxd_wq(chan
);
105 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
108 dev_dbg(dev
, "%s: client_count: %d\n", __func__
,
109 idxd_wq_refcount(wq
));
113 static void idxd_dma_free_chan_resources(struct dma_chan
*chan
)
115 struct idxd_wq
*wq
= to_idxd_wq(chan
);
116 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
119 dev_dbg(dev
, "%s: client_count: %d\n", __func__
,
120 idxd_wq_refcount(wq
));
123 static enum dma_status
idxd_dma_tx_status(struct dma_chan
*dma_chan
,
125 struct dma_tx_state
*txstate
)
127 return DMA_OUT_OF_ORDER
;
131 * issue_pending() does not need to do anything since tx_submit() does the job
134 static void idxd_dma_issue_pending(struct dma_chan
*dma_chan
)
138 dma_cookie_t
idxd_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
140 struct dma_chan
*c
= tx
->chan
;
141 struct idxd_wq
*wq
= to_idxd_wq(c
);
144 struct idxd_desc
*desc
= container_of(tx
, struct idxd_desc
, txd
);
146 cookie
= dma_cookie_assign(tx
);
148 rc
= idxd_submit_desc(wq
, desc
);
150 idxd_free_desc(wq
, desc
);
157 static void idxd_dma_release(struct dma_device
*device
)
161 int idxd_register_dma_device(struct idxd_device
*idxd
)
163 struct dma_device
*dma
= &idxd
->dma_dev
;
165 INIT_LIST_HEAD(&dma
->channels
);
166 dma
->dev
= &idxd
->pdev
->dev
;
168 dma_cap_set(DMA_COMPLETION_NO_ORDER
, dma
->cap_mask
);
169 dma
->device_release
= idxd_dma_release
;
171 if (idxd
->hw
.opcap
.bits
[0] & IDXD_OPCAP_MEMMOVE
) {
172 dma_cap_set(DMA_MEMCPY
, dma
->cap_mask
);
173 dma
->device_prep_dma_memcpy
= idxd_dma_submit_memcpy
;
176 dma
->device_tx_status
= idxd_dma_tx_status
;
177 dma
->device_issue_pending
= idxd_dma_issue_pending
;
178 dma
->device_alloc_chan_resources
= idxd_dma_alloc_chan_resources
;
179 dma
->device_free_chan_resources
= idxd_dma_free_chan_resources
;
181 return dma_async_device_register(&idxd
->dma_dev
);
184 void idxd_unregister_dma_device(struct idxd_device
*idxd
)
186 dma_async_device_unregister(&idxd
->dma_dev
);
189 int idxd_register_dma_channel(struct idxd_wq
*wq
)
191 struct idxd_device
*idxd
= wq
->idxd
;
192 struct dma_device
*dma
= &idxd
->dma_dev
;
193 struct dma_chan
*chan
= &wq
->dma_chan
;
196 memset(&wq
->dma_chan
, 0, sizeof(struct dma_chan
));
198 list_add_tail(&chan
->device_node
, &dma
->channels
);
199 rc
= dma_async_device_channel_register(dma
, chan
);
206 void idxd_unregister_dma_channel(struct idxd_wq
*wq
)
208 dma_async_device_channel_unregister(&wq
->idxd
->dma_dev
, &wq
->dma_chan
);