1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Passthru DMA device driver
4 * -- Based on the CCP driver
6 * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
8 * Author: Sanjay R Mehta <sanju.mehta@amd.com>
9 * Author: Gary R Hook <gary.hook@amd.com>
12 #include <linux/bitfield.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/debugfs.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
22 /* Human-readable error strings */
23 static char *pt_error_codes
[] = {
25 "ERR 01: ILLEGAL_ENGINE",
26 "ERR 03: ILLEGAL_FUNCTION_TYPE",
27 "ERR 04: ILLEGAL_FUNCTION_MODE",
28 "ERR 06: ILLEGAL_FUNCTION_SIZE",
29 "ERR 08: ILLEGAL_FUNCTION_RSVD",
30 "ERR 09: ILLEGAL_BUFFER_LENGTH",
32 "ERR 11: ILLEGAL_MEM_ADDR",
33 "ERR 12: ILLEGAL_MEM_SEL",
34 "ERR 13: ILLEGAL_CONTEXT_ID",
35 "ERR 15: 0xF Reserved",
36 "ERR 18: CMD_TIMEOUT",
37 "ERR 19: IDMA0_AXI_SLVERR",
38 "ERR 20: IDMA0_AXI_DECERR",
39 "ERR 21: 0x15 Reserved",
40 "ERR 22: IDMA1_AXI_SLAVE_FAULT",
41 "ERR 23: IDMA1_AIXI_DECERR",
42 "ERR 24: 0x18 Reserved",
43 "ERR 27: 0x1B Reserved",
44 "ERR 38: ODMA0_AXI_SLVERR",
45 "ERR 39: ODMA0_AXI_DECERR",
46 "ERR 40: 0x28 Reserved",
47 "ERR 41: ODMA1_AXI_SLVERR",
48 "ERR 42: ODMA1_AXI_DECERR",
49 "ERR 43: LSB_PARITY_ERR",
52 static void pt_log_error(struct pt_device
*d
, int e
)
54 dev_err(d
->dev
, "PTDMA error: %s (0x%x)\n", pt_error_codes
[e
], e
);
57 void pt_start_queue(struct pt_cmd_queue
*cmd_q
)
59 /* Turn on the run bit */
60 iowrite32(cmd_q
->qcontrol
| CMD_Q_RUN
, cmd_q
->reg_control
);
63 void pt_stop_queue(struct pt_cmd_queue
*cmd_q
)
65 /* Turn off the run bit */
66 iowrite32(cmd_q
->qcontrol
& ~CMD_Q_RUN
, cmd_q
->reg_control
);
69 static int pt_core_execute_cmd(struct ptdma_desc
*desc
, struct pt_cmd_queue
*cmd_q
)
71 bool soc
= FIELD_GET(DWORD0_SOC
, desc
->dw0
);
72 u8
*q_desc
= (u8
*)&cmd_q
->qbase
[cmd_q
->qidx
];
77 desc
->dw0
|= FIELD_PREP(DWORD0_IOC
, desc
->dw0
);
78 desc
->dw0
&= ~DWORD0_SOC
;
80 spin_lock_irqsave(&cmd_q
->q_lock
, flags
);
82 /* Copy 32-byte command descriptor to hw queue. */
83 memcpy(q_desc
, desc
, 32);
84 cmd_q
->qidx
= (cmd_q
->qidx
+ 1) % CMD_Q_LEN
;
86 /* The data used by this command must be flushed to memory */
89 /* Write the new tail address back to the queue register */
90 tail
= lower_32_bits(cmd_q
->qdma_tail
+ cmd_q
->qidx
* Q_DESC_SIZE
);
91 iowrite32(tail
, cmd_q
->reg_control
+ 0x0004);
93 /* Turn the queue back on using our cached control register */
94 pt_start_queue(cmd_q
);
95 spin_unlock_irqrestore(&cmd_q
->q_lock
, flags
);
100 int pt_core_perform_passthru(struct pt_cmd_queue
*cmd_q
,
101 struct pt_passthru_engine
*pt_engine
)
103 struct ptdma_desc desc
;
104 struct pt_device
*pt
= container_of(cmd_q
, struct pt_device
, cmd_q
);
106 cmd_q
->cmd_error
= 0;
107 cmd_q
->total_pt_ops
++;
108 memset(&desc
, 0, sizeof(desc
));
109 desc
.dw0
= CMD_DESC_DW0_VAL
;
110 desc
.length
= pt_engine
->src_len
;
111 desc
.src_lo
= lower_32_bits(pt_engine
->src_dma
);
112 desc
.dw3
.src_hi
= upper_32_bits(pt_engine
->src_dma
);
113 desc
.dst_lo
= lower_32_bits(pt_engine
->dst_dma
);
114 desc
.dw5
.dst_hi
= upper_32_bits(pt_engine
->dst_dma
);
117 pt_core_enable_queue_interrupts(pt
);
119 pt_core_disable_queue_interrupts(pt
);
121 return pt_core_execute_cmd(&desc
, cmd_q
);
124 static void pt_do_cmd_complete(unsigned long data
)
126 struct pt_tasklet_data
*tdata
= (struct pt_tasklet_data
*)data
;
127 struct pt_cmd
*cmd
= tdata
->cmd
;
128 struct pt_cmd_queue
*cmd_q
= &cmd
->pt
->cmd_q
;
131 if (cmd_q
->cmd_error
) {
133 * Log the error and flush the queue by
134 * moving the head pointer
136 tail
= lower_32_bits(cmd_q
->qdma_tail
+ cmd_q
->qidx
* Q_DESC_SIZE
);
137 pt_log_error(cmd_q
->pt
, cmd_q
->cmd_error
);
138 iowrite32(tail
, cmd_q
->reg_control
+ 0x0008);
141 cmd
->pt_cmd_callback(cmd
->data
, cmd
->ret
);
144 void pt_check_status_trans(struct pt_device
*pt
, struct pt_cmd_queue
*cmd_q
)
148 status
= ioread32(cmd_q
->reg_control
+ 0x0010);
150 cmd_q
->int_status
= status
;
151 cmd_q
->q_status
= ioread32(cmd_q
->reg_control
+ 0x0100);
152 cmd_q
->q_int_status
= ioread32(cmd_q
->reg_control
+ 0x0104);
154 /* On error, only save the first error value */
155 if ((status
& INT_ERROR
) && !cmd_q
->cmd_error
)
156 cmd_q
->cmd_error
= CMD_Q_ERROR(cmd_q
->q_status
);
158 /* Acknowledge the completion */
159 iowrite32(status
, cmd_q
->reg_control
+ 0x0010);
160 pt_do_cmd_complete((ulong
)&pt
->tdata
);
164 static irqreturn_t
pt_core_irq_handler(int irq
, void *data
)
166 struct pt_device
*pt
= data
;
167 struct pt_cmd_queue
*cmd_q
= &pt
->cmd_q
;
169 pt_core_disable_queue_interrupts(pt
);
170 pt
->total_interrupts
++;
171 pt_check_status_trans(pt
, cmd_q
);
172 pt_core_enable_queue_interrupts(pt
);
176 int pt_core_init(struct pt_device
*pt
)
178 char dma_pool_name
[MAX_DMAPOOL_NAME_LEN
];
179 struct pt_cmd_queue
*cmd_q
= &pt
->cmd_q
;
180 u32 dma_addr_lo
, dma_addr_hi
;
181 struct device
*dev
= pt
->dev
;
182 struct dma_pool
*dma_pool
;
185 /* Allocate a dma pool for the queue */
186 snprintf(dma_pool_name
, sizeof(dma_pool_name
), "%s_q", dev_name(pt
->dev
));
188 dma_pool
= dma_pool_create(dma_pool_name
, dev
,
190 PT_DMAPOOL_ALIGN
, 0);
194 /* ptdma core initialisation */
195 iowrite32(CMD_CONFIG_VHB_EN
, pt
->io_regs
+ CMD_CONFIG_OFFSET
);
196 iowrite32(CMD_QUEUE_PRIO
, pt
->io_regs
+ CMD_QUEUE_PRIO_OFFSET
);
197 iowrite32(CMD_TIMEOUT_DISABLE
, pt
->io_regs
+ CMD_TIMEOUT_OFFSET
);
198 iowrite32(CMD_CLK_GATE_CONFIG
, pt
->io_regs
+ CMD_CLK_GATE_CTL_OFFSET
);
199 iowrite32(CMD_CONFIG_REQID
, pt
->io_regs
+ CMD_REQID_CONFIG_OFFSET
);
202 cmd_q
->dma_pool
= dma_pool
;
203 spin_lock_init(&cmd_q
->q_lock
);
205 /* Page alignment satisfies our needs for N <= 128 */
206 cmd_q
->qsize
= Q_SIZE(Q_DESC_SIZE
);
207 cmd_q
->qbase
= dma_alloc_coherent(dev
, cmd_q
->qsize
,
211 dev_err(dev
, "unable to allocate command queue\n");
218 /* Preset some register values */
219 cmd_q
->reg_control
= pt
->io_regs
+ CMD_Q_STATUS_INCR
;
221 /* Turn off the queues and disable interrupts until ready */
222 pt_core_disable_queue_interrupts(pt
);
224 cmd_q
->qcontrol
= 0; /* Start with nothing */
225 iowrite32(cmd_q
->qcontrol
, cmd_q
->reg_control
);
227 ioread32(cmd_q
->reg_control
+ 0x0104);
228 ioread32(cmd_q
->reg_control
+ 0x0100);
230 /* Clear the interrupt status */
231 iowrite32(SUPPORTED_INTERRUPTS
, cmd_q
->reg_control
+ 0x0010);
234 ret
= request_irq(pt
->pt_irq
, pt_core_irq_handler
, 0, dev_name(pt
->dev
), pt
);
236 dev_err(dev
, "unable to allocate an IRQ\n");
240 /* Update the device registers with queue information. */
241 cmd_q
->qcontrol
&= ~CMD_Q_SIZE
;
242 cmd_q
->qcontrol
|= FIELD_PREP(CMD_Q_SIZE
, QUEUE_SIZE_VAL
);
244 cmd_q
->qdma_tail
= cmd_q
->qbase_dma
;
245 dma_addr_lo
= lower_32_bits(cmd_q
->qdma_tail
);
246 iowrite32((u32
)dma_addr_lo
, cmd_q
->reg_control
+ 0x0004);
247 iowrite32((u32
)dma_addr_lo
, cmd_q
->reg_control
+ 0x0008);
249 dma_addr_hi
= upper_32_bits(cmd_q
->qdma_tail
);
250 cmd_q
->qcontrol
|= (dma_addr_hi
<< 16);
251 iowrite32(cmd_q
->qcontrol
, cmd_q
->reg_control
);
253 pt_core_enable_queue_interrupts(pt
);
255 /* Register the DMA engine support */
256 ret
= pt_dmaengine_register(pt
);
260 /* Set up debugfs entries */
261 ptdma_debugfs_setup(pt
);
266 free_irq(pt
->pt_irq
, pt
);
269 dma_free_coherent(dev
, cmd_q
->qsize
, cmd_q
->qbase
, cmd_q
->qbase_dma
);
272 dma_pool_destroy(pt
->cmd_q
.dma_pool
);
277 void pt_core_destroy(struct pt_device
*pt
)
279 struct device
*dev
= pt
->dev
;
280 struct pt_cmd_queue
*cmd_q
= &pt
->cmd_q
;
283 /* Unregister the DMA engine */
284 pt_dmaengine_unregister(pt
);
286 /* Disable and clear interrupts */
287 pt_core_disable_queue_interrupts(pt
);
289 /* Turn off the run bit */
290 pt_stop_queue(cmd_q
);
292 /* Clear the interrupt status */
293 iowrite32(SUPPORTED_INTERRUPTS
, cmd_q
->reg_control
+ 0x0010);
294 ioread32(cmd_q
->reg_control
+ 0x0104);
295 ioread32(cmd_q
->reg_control
+ 0x0100);
297 free_irq(pt
->pt_irq
, pt
);
299 dma_free_coherent(dev
, cmd_q
->qsize
, cmd_q
->qbase
,
302 /* Flush the cmd queue */
303 while (!list_empty(&pt
->cmd
)) {
304 /* Invoke the callback directly with an error code */
305 cmd
= list_first_entry(&pt
->cmd
, struct pt_cmd
, entry
);
306 list_del(&cmd
->entry
);
307 cmd
->pt_cmd_callback(cmd
->data
, -ENODEV
);