1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Passthrough DMA device driver
4 * -- Based on the CCP driver
6 * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
8 * Author: Sanjay R Mehta <sanju.mehta@amd.com>
9 * Author: Gary R Hook <gary.hook@amd.com>
13 #include "../dmaengine.h"
14 #include "../virt-dma.h"
16 static inline struct pt_dma_chan
*to_pt_chan(struct dma_chan
*dma_chan
)
18 return container_of(dma_chan
, struct pt_dma_chan
, vc
.chan
);
21 static inline struct pt_dma_desc
*to_pt_desc(struct virt_dma_desc
*vd
)
23 return container_of(vd
, struct pt_dma_desc
, vd
);
26 static void pt_free_chan_resources(struct dma_chan
*dma_chan
)
28 struct pt_dma_chan
*chan
= to_pt_chan(dma_chan
);
30 vchan_free_chan_resources(&chan
->vc
);
33 static void pt_synchronize(struct dma_chan
*dma_chan
)
35 struct pt_dma_chan
*chan
= to_pt_chan(dma_chan
);
37 vchan_synchronize(&chan
->vc
);
40 static void pt_do_cleanup(struct virt_dma_desc
*vd
)
42 struct pt_dma_desc
*desc
= to_pt_desc(vd
);
43 struct pt_device
*pt
= desc
->pt
;
45 kmem_cache_free(pt
->dma_desc_cache
, desc
);
48 static int pt_dma_start_desc(struct pt_dma_desc
*desc
)
50 struct pt_passthru_engine
*pt_engine
;
52 struct pt_cmd
*pt_cmd
;
53 struct pt_cmd_queue
*cmd_q
;
55 desc
->issued_to_hw
= 1;
57 pt_cmd
= &desc
->pt_cmd
;
60 pt_engine
= &pt_cmd
->passthru
;
62 pt
->tdata
.cmd
= pt_cmd
;
64 /* Execute the command */
65 pt_cmd
->ret
= pt_core_perform_passthru(cmd_q
, pt_engine
);
70 static struct pt_dma_desc
*pt_next_dma_desc(struct pt_dma_chan
*chan
)
72 /* Get the next DMA descriptor on the active list */
73 struct virt_dma_desc
*vd
= vchan_next_desc(&chan
->vc
);
75 return vd
? to_pt_desc(vd
) : NULL
;
78 static struct pt_dma_desc
*pt_handle_active_desc(struct pt_dma_chan
*chan
,
79 struct pt_dma_desc
*desc
)
81 struct dma_async_tx_descriptor
*tx_desc
;
82 struct virt_dma_desc
*vd
;
85 /* Loop over descriptors until one is found with commands */
88 if (!desc
->issued_to_hw
) {
89 /* No errors, keep going */
90 if (desc
->status
!= DMA_ERROR
)
94 tx_desc
= &desc
->vd
.tx
;
100 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
103 if (desc
->status
!= DMA_COMPLETE
) {
104 if (desc
->status
!= DMA_ERROR
)
105 desc
->status
= DMA_COMPLETE
;
107 dma_cookie_complete(tx_desc
);
108 dma_descriptor_unmap(tx_desc
);
109 list_del(&desc
->vd
.node
);
111 /* Don't handle it twice */
116 desc
= pt_next_dma_desc(chan
);
118 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
121 dmaengine_desc_get_callback_invoke(tx_desc
, NULL
);
122 dma_run_dependencies(tx_desc
);
123 vchan_vdesc_fini(vd
);
130 static void pt_cmd_callback(void *data
, int err
)
132 struct pt_dma_desc
*desc
= data
;
133 struct dma_chan
*dma_chan
;
134 struct pt_dma_chan
*chan
;
137 if (err
== -EINPROGRESS
)
140 dma_chan
= desc
->vd
.tx
.chan
;
141 chan
= to_pt_chan(dma_chan
);
144 desc
->status
= DMA_ERROR
;
147 /* Check for DMA descriptor completion */
148 desc
= pt_handle_active_desc(chan
, desc
);
150 /* Don't submit cmd if no descriptor or DMA is paused */
154 ret
= pt_dma_start_desc(desc
);
158 desc
->status
= DMA_ERROR
;
162 static struct pt_dma_desc
*pt_alloc_dma_desc(struct pt_dma_chan
*chan
,
165 struct pt_dma_desc
*desc
;
167 desc
= kmem_cache_zalloc(chan
->pt
->dma_desc_cache
, GFP_NOWAIT
);
171 vchan_tx_prep(&chan
->vc
, &desc
->vd
, flags
);
174 desc
->pt
->cmd_q
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
175 desc
->issued_to_hw
= 0;
176 desc
->status
= DMA_IN_PROGRESS
;
181 static struct pt_dma_desc
*pt_create_desc(struct dma_chan
*dma_chan
,
187 struct pt_dma_chan
*chan
= to_pt_chan(dma_chan
);
188 struct pt_passthru_engine
*pt_engine
;
189 struct pt_dma_desc
*desc
;
190 struct pt_cmd
*pt_cmd
;
192 desc
= pt_alloc_dma_desc(chan
, flags
);
196 pt_cmd
= &desc
->pt_cmd
;
197 pt_cmd
->pt
= chan
->pt
;
198 pt_engine
= &pt_cmd
->passthru
;
199 pt_cmd
->engine
= PT_ENGINE_PASSTHRU
;
200 pt_engine
->src_dma
= src
;
201 pt_engine
->dst_dma
= dst
;
202 pt_engine
->src_len
= len
;
203 pt_cmd
->pt_cmd_callback
= pt_cmd_callback
;
211 static struct dma_async_tx_descriptor
*
212 pt_prep_dma_memcpy(struct dma_chan
*dma_chan
, dma_addr_t dst
,
213 dma_addr_t src
, size_t len
, unsigned long flags
)
215 struct pt_dma_desc
*desc
;
217 desc
= pt_create_desc(dma_chan
, dst
, src
, len
, flags
);
224 static struct dma_async_tx_descriptor
*
225 pt_prep_dma_interrupt(struct dma_chan
*dma_chan
, unsigned long flags
)
227 struct pt_dma_chan
*chan
= to_pt_chan(dma_chan
);
228 struct pt_dma_desc
*desc
;
230 desc
= pt_alloc_dma_desc(chan
, flags
);
237 static void pt_issue_pending(struct dma_chan
*dma_chan
)
239 struct pt_dma_chan
*chan
= to_pt_chan(dma_chan
);
240 struct pt_dma_desc
*desc
;
242 bool engine_is_idle
= true;
244 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
246 desc
= pt_next_dma_desc(chan
);
248 engine_is_idle
= false;
250 vchan_issue_pending(&chan
->vc
);
252 desc
= pt_next_dma_desc(chan
);
254 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
256 /* If there was nothing active, start processing */
257 if (engine_is_idle
&& desc
)
258 pt_cmd_callback(desc
, 0);
261 static enum dma_status
262 pt_tx_status(struct dma_chan
*c
, dma_cookie_t cookie
,
263 struct dma_tx_state
*txstate
)
265 struct pt_device
*pt
= to_pt_chan(c
)->pt
;
266 struct pt_cmd_queue
*cmd_q
= &pt
->cmd_q
;
268 pt_check_status_trans(pt
, cmd_q
);
269 return dma_cookie_status(c
, cookie
, txstate
);
272 static int pt_pause(struct dma_chan
*dma_chan
)
274 struct pt_dma_chan
*chan
= to_pt_chan(dma_chan
);
277 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
278 pt_stop_queue(&chan
->pt
->cmd_q
);
279 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
284 static int pt_resume(struct dma_chan
*dma_chan
)
286 struct pt_dma_chan
*chan
= to_pt_chan(dma_chan
);
287 struct pt_dma_desc
*desc
= NULL
;
290 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
291 pt_start_queue(&chan
->pt
->cmd_q
);
292 desc
= pt_next_dma_desc(chan
);
293 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
295 /* If there was something active, re-start */
297 pt_cmd_callback(desc
, 0);
302 static int pt_terminate_all(struct dma_chan
*dma_chan
)
304 struct pt_dma_chan
*chan
= to_pt_chan(dma_chan
);
306 struct pt_cmd_queue
*cmd_q
= &chan
->pt
->cmd_q
;
309 iowrite32(SUPPORTED_INTERRUPTS
, cmd_q
->reg_control
+ 0x0010);
310 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
311 vchan_get_all_descriptors(&chan
->vc
, &head
);
312 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
314 vchan_dma_desc_free_list(&chan
->vc
, &head
);
315 vchan_free_chan_resources(&chan
->vc
);
320 int pt_dmaengine_register(struct pt_device
*pt
)
322 struct pt_dma_chan
*chan
;
323 struct dma_device
*dma_dev
= &pt
->dma_dev
;
324 char *cmd_cache_name
;
325 char *desc_cache_name
;
328 pt
->pt_dma_chan
= devm_kzalloc(pt
->dev
, sizeof(*pt
->pt_dma_chan
),
330 if (!pt
->pt_dma_chan
)
333 cmd_cache_name
= devm_kasprintf(pt
->dev
, GFP_KERNEL
,
334 "%s-dmaengine-cmd-cache",
339 desc_cache_name
= devm_kasprintf(pt
->dev
, GFP_KERNEL
,
340 "%s-dmaengine-desc-cache",
342 if (!desc_cache_name
) {
347 pt
->dma_desc_cache
= kmem_cache_create(desc_cache_name
,
348 sizeof(struct pt_dma_desc
), 0,
349 SLAB_HWCACHE_ALIGN
, NULL
);
350 if (!pt
->dma_desc_cache
) {
355 dma_dev
->dev
= pt
->dev
;
356 dma_dev
->src_addr_widths
= DMA_SLAVE_BUSWIDTH_64_BYTES
;
357 dma_dev
->dst_addr_widths
= DMA_SLAVE_BUSWIDTH_64_BYTES
;
358 dma_dev
->directions
= DMA_MEM_TO_MEM
;
359 dma_dev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
360 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
361 dma_cap_set(DMA_INTERRUPT
, dma_dev
->cap_mask
);
364 * PTDMA is intended to be used with the AMD NTB devices, hence
365 * marking it as DMA_PRIVATE.
367 dma_cap_set(DMA_PRIVATE
, dma_dev
->cap_mask
);
369 INIT_LIST_HEAD(&dma_dev
->channels
);
371 chan
= pt
->pt_dma_chan
;
374 /* Set base and prep routines */
375 dma_dev
->device_free_chan_resources
= pt_free_chan_resources
;
376 dma_dev
->device_prep_dma_memcpy
= pt_prep_dma_memcpy
;
377 dma_dev
->device_prep_dma_interrupt
= pt_prep_dma_interrupt
;
378 dma_dev
->device_issue_pending
= pt_issue_pending
;
379 dma_dev
->device_tx_status
= pt_tx_status
;
380 dma_dev
->device_pause
= pt_pause
;
381 dma_dev
->device_resume
= pt_resume
;
382 dma_dev
->device_terminate_all
= pt_terminate_all
;
383 dma_dev
->device_synchronize
= pt_synchronize
;
385 chan
->vc
.desc_free
= pt_do_cleanup
;
386 vchan_init(&chan
->vc
, dma_dev
);
388 ret
= dma_async_device_register(dma_dev
);
395 kmem_cache_destroy(pt
->dma_desc_cache
);
398 kmem_cache_destroy(pt
->dma_cmd_cache
);
403 void pt_dmaengine_unregister(struct pt_device
*pt
)
405 struct dma_device
*dma_dev
= &pt
->dma_dev
;
407 dma_async_device_unregister(dma_dev
);
409 kmem_cache_destroy(pt
->dma_desc_cache
);
410 kmem_cache_destroy(pt
->dma_cmd_cache
);