Merge tag 'trace-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux/fpc-iii.git] / drivers / dma / xgene-dma.c
blob3589b4ef50b83f597b20d11ad4ed8e11a6634c0c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Applied Micro X-Gene SoC DMA engine Driver
5 * Copyright (c) 2015, Applied Micro Circuits Corporation
6 * Authors: Rameshwar Prasad Sahu <rsahu@apm.com>
7 * Loc Ho <lho@apm.com>
9 * NOTE: PM support is currently not available.
12 #include <linux/acpi.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmaengine.h>
17 #include <linux/dmapool.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/irq.h>
21 #include <linux/module.h>
22 #include <linux/of_device.h>
24 #include "dmaengine.h"
26 /* X-Gene DMA ring csr registers and bit definations */
27 #define XGENE_DMA_RING_CONFIG 0x04
28 #define XGENE_DMA_RING_ENABLE BIT(31)
29 #define XGENE_DMA_RING_ID 0x08
30 #define XGENE_DMA_RING_ID_SETUP(v) ((v) | BIT(31))
31 #define XGENE_DMA_RING_ID_BUF 0x0C
32 #define XGENE_DMA_RING_ID_BUF_SETUP(v) (((v) << 9) | BIT(21))
33 #define XGENE_DMA_RING_THRESLD0_SET1 0x30
34 #define XGENE_DMA_RING_THRESLD0_SET1_VAL 0X64
35 #define XGENE_DMA_RING_THRESLD1_SET1 0x34
36 #define XGENE_DMA_RING_THRESLD1_SET1_VAL 0xC8
37 #define XGENE_DMA_RING_HYSTERESIS 0x68
38 #define XGENE_DMA_RING_HYSTERESIS_VAL 0xFFFFFFFF
39 #define XGENE_DMA_RING_STATE 0x6C
40 #define XGENE_DMA_RING_STATE_WR_BASE 0x70
41 #define XGENE_DMA_RING_NE_INT_MODE 0x017C
42 #define XGENE_DMA_RING_NE_INT_MODE_SET(m, v) \
43 ((m) = ((m) & ~BIT(31 - (v))) | BIT(31 - (v)))
44 #define XGENE_DMA_RING_NE_INT_MODE_RESET(m, v) \
45 ((m) &= (~BIT(31 - (v))))
46 #define XGENE_DMA_RING_CLKEN 0xC208
47 #define XGENE_DMA_RING_SRST 0xC200
48 #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070
49 #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074
50 #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF
51 #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num))
52 #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v))
53 #define XGENE_DMA_RING_CMD_OFFSET 0x2C
54 #define XGENE_DMA_RING_CMD_BASE_OFFSET(v) ((v) << 6)
55 #define XGENE_DMA_RING_COHERENT_SET(m) \
56 (((u32 *)(m))[2] |= BIT(4))
57 #define XGENE_DMA_RING_ADDRL_SET(m, v) \
58 (((u32 *)(m))[2] |= (((v) >> 8) << 5))
59 #define XGENE_DMA_RING_ADDRH_SET(m, v) \
60 (((u32 *)(m))[3] |= ((v) >> 35))
61 #define XGENE_DMA_RING_ACCEPTLERR_SET(m) \
62 (((u32 *)(m))[3] |= BIT(19))
63 #define XGENE_DMA_RING_SIZE_SET(m, v) \
64 (((u32 *)(m))[3] |= ((v) << 23))
65 #define XGENE_DMA_RING_RECOMBBUF_SET(m) \
66 (((u32 *)(m))[3] |= BIT(27))
67 #define XGENE_DMA_RING_RECOMTIMEOUTL_SET(m) \
68 (((u32 *)(m))[3] |= (0x7 << 28))
69 #define XGENE_DMA_RING_RECOMTIMEOUTH_SET(m) \
70 (((u32 *)(m))[4] |= 0x3)
71 #define XGENE_DMA_RING_SELTHRSH_SET(m) \
72 (((u32 *)(m))[4] |= BIT(3))
73 #define XGENE_DMA_RING_TYPE_SET(m, v) \
74 (((u32 *)(m))[4] |= ((v) << 19))
76 /* X-Gene DMA device csr registers and bit definitions */
77 #define XGENE_DMA_IPBRR 0x0
78 #define XGENE_DMA_DEV_ID_RD(v) ((v) & 0x00000FFF)
79 #define XGENE_DMA_BUS_ID_RD(v) (((v) >> 12) & 3)
80 #define XGENE_DMA_REV_NO_RD(v) (((v) >> 14) & 3)
81 #define XGENE_DMA_GCR 0x10
82 #define XGENE_DMA_CH_SETUP(v) \
83 ((v) = ((v) & ~0x000FFFFF) | 0x000AAFFF)
84 #define XGENE_DMA_ENABLE(v) ((v) |= BIT(31))
85 #define XGENE_DMA_DISABLE(v) ((v) &= ~BIT(31))
86 #define XGENE_DMA_RAID6_CONT 0x14
87 #define XGENE_DMA_RAID6_MULTI_CTRL(v) ((v) << 24)
88 #define XGENE_DMA_INT 0x70
89 #define XGENE_DMA_INT_MASK 0x74
90 #define XGENE_DMA_INT_ALL_MASK 0xFFFFFFFF
91 #define XGENE_DMA_INT_ALL_UNMASK 0x0
92 #define XGENE_DMA_INT_MASK_SHIFT 0x14
93 #define XGENE_DMA_RING_INT0_MASK 0x90A0
94 #define XGENE_DMA_RING_INT1_MASK 0x90A8
95 #define XGENE_DMA_RING_INT2_MASK 0x90B0
96 #define XGENE_DMA_RING_INT3_MASK 0x90B8
97 #define XGENE_DMA_RING_INT4_MASK 0x90C0
98 #define XGENE_DMA_CFG_RING_WQ_ASSOC 0x90E0
99 #define XGENE_DMA_ASSOC_RING_MNGR1 0xFFFFFFFF
100 #define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070
101 #define XGENE_DMA_BLK_MEM_RDY 0xD074
102 #define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF
103 #define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000
105 /* X-Gene SoC EFUSE csr register and bit defination */
106 #define XGENE_SOC_JTAG1_SHADOW 0x18
107 #define XGENE_DMA_PQ_DISABLE_MASK BIT(13)
109 /* X-Gene DMA Descriptor format */
110 #define XGENE_DMA_DESC_NV_BIT BIT_ULL(50)
111 #define XGENE_DMA_DESC_IN_BIT BIT_ULL(55)
112 #define XGENE_DMA_DESC_C_BIT BIT_ULL(63)
113 #define XGENE_DMA_DESC_DR_BIT BIT_ULL(61)
114 #define XGENE_DMA_DESC_ELERR_POS 46
115 #define XGENE_DMA_DESC_RTYPE_POS 56
116 #define XGENE_DMA_DESC_LERR_POS 60
117 #define XGENE_DMA_DESC_BUFLEN_POS 48
118 #define XGENE_DMA_DESC_HOENQ_NUM_POS 48
119 #define XGENE_DMA_DESC_ELERR_RD(m) \
120 (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3)
121 #define XGENE_DMA_DESC_LERR_RD(m) \
122 (((m) >> XGENE_DMA_DESC_LERR_POS) & 0x7)
123 #define XGENE_DMA_DESC_STATUS(elerr, lerr) \
124 (((elerr) << 4) | (lerr))
126 /* X-Gene DMA descriptor empty s/w signature */
127 #define XGENE_DMA_DESC_EMPTY_SIGNATURE ~0ULL
129 /* X-Gene DMA configurable parameters defines */
130 #define XGENE_DMA_RING_NUM 512
131 #define XGENE_DMA_BUFNUM 0x0
132 #define XGENE_DMA_CPU_BUFNUM 0x18
133 #define XGENE_DMA_RING_OWNER_DMA 0x03
134 #define XGENE_DMA_RING_OWNER_CPU 0x0F
135 #define XGENE_DMA_RING_TYPE_REGULAR 0x01
136 #define XGENE_DMA_RING_WQ_DESC_SIZE 32 /* 32 Bytes */
137 #define XGENE_DMA_RING_NUM_CONFIG 5
138 #define XGENE_DMA_MAX_CHANNEL 4
139 #define XGENE_DMA_XOR_CHANNEL 0
140 #define XGENE_DMA_PQ_CHANNEL 1
141 #define XGENE_DMA_MAX_BYTE_CNT 0x4000 /* 16 KB */
142 #define XGENE_DMA_MAX_64B_DESC_BYTE_CNT 0x14000 /* 80 KB */
143 #define XGENE_DMA_MAX_XOR_SRC 5
144 #define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0
145 #define XGENE_DMA_INVALID_LEN_CODE 0x7800000000000000ULL
147 /* X-Gene DMA descriptor error codes */
148 #define ERR_DESC_AXI 0x01
149 #define ERR_BAD_DESC 0x02
150 #define ERR_READ_DATA_AXI 0x03
151 #define ERR_WRITE_DATA_AXI 0x04
152 #define ERR_FBP_TIMEOUT 0x05
153 #define ERR_ECC 0x06
154 #define ERR_DIFF_SIZE 0x08
155 #define ERR_SCT_GAT_LEN 0x09
156 #define ERR_CRC_ERR 0x11
157 #define ERR_CHKSUM 0x12
158 #define ERR_DIF 0x13
160 /* X-Gene DMA error interrupt codes */
161 #define ERR_DIF_SIZE_INT 0x0
162 #define ERR_GS_ERR_INT 0x1
163 #define ERR_FPB_TIMEO_INT 0x2
164 #define ERR_WFIFO_OVF_INT 0x3
165 #define ERR_RFIFO_OVF_INT 0x4
166 #define ERR_WR_TIMEO_INT 0x5
167 #define ERR_RD_TIMEO_INT 0x6
168 #define ERR_WR_ERR_INT 0x7
169 #define ERR_RD_ERR_INT 0x8
170 #define ERR_BAD_DESC_INT 0x9
171 #define ERR_DESC_DST_INT 0xA
172 #define ERR_DESC_SRC_INT 0xB
174 /* X-Gene DMA flyby operation code */
175 #define FLYBY_2SRC_XOR 0x80
176 #define FLYBY_3SRC_XOR 0x90
177 #define FLYBY_4SRC_XOR 0xA0
178 #define FLYBY_5SRC_XOR 0xB0
180 /* X-Gene DMA SW descriptor flags */
181 #define XGENE_DMA_FLAG_64B_DESC BIT(0)
183 /* Define to dump X-Gene DMA descriptor */
184 #define XGENE_DMA_DESC_DUMP(desc, m) \
185 print_hex_dump(KERN_ERR, (m), \
186 DUMP_PREFIX_ADDRESS, 16, 8, (desc), 32, 0)
188 #define to_dma_desc_sw(tx) \
189 container_of(tx, struct xgene_dma_desc_sw, tx)
190 #define to_dma_chan(dchan) \
191 container_of(dchan, struct xgene_dma_chan, dma_chan)
193 #define chan_dbg(chan, fmt, arg...) \
194 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
195 #define chan_err(chan, fmt, arg...) \
196 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
198 struct xgene_dma_desc_hw {
199 __le64 m0;
200 __le64 m1;
201 __le64 m2;
202 __le64 m3;
205 enum xgene_dma_ring_cfgsize {
206 XGENE_DMA_RING_CFG_SIZE_512B,
207 XGENE_DMA_RING_CFG_SIZE_2KB,
208 XGENE_DMA_RING_CFG_SIZE_16KB,
209 XGENE_DMA_RING_CFG_SIZE_64KB,
210 XGENE_DMA_RING_CFG_SIZE_512KB,
211 XGENE_DMA_RING_CFG_SIZE_INVALID
214 struct xgene_dma_ring {
215 struct xgene_dma *pdma;
216 u8 buf_num;
217 u16 id;
218 u16 num;
219 u16 head;
220 u16 owner;
221 u16 slots;
222 u16 dst_ring_num;
223 u32 size;
224 void __iomem *cmd;
225 void __iomem *cmd_base;
226 dma_addr_t desc_paddr;
227 u32 state[XGENE_DMA_RING_NUM_CONFIG];
228 enum xgene_dma_ring_cfgsize cfgsize;
229 union {
230 void *desc_vaddr;
231 struct xgene_dma_desc_hw *desc_hw;
235 struct xgene_dma_desc_sw {
236 struct xgene_dma_desc_hw desc1;
237 struct xgene_dma_desc_hw desc2;
238 u32 flags;
239 struct list_head node;
240 struct list_head tx_list;
241 struct dma_async_tx_descriptor tx;
245 * struct xgene_dma_chan - internal representation of an X-Gene DMA channel
246 * @dma_chan: dmaengine channel object member
247 * @pdma: X-Gene DMA device structure reference
248 * @dev: struct device reference for dma mapping api
249 * @id: raw id of this channel
250 * @rx_irq: channel IRQ
251 * @name: name of X-Gene DMA channel
252 * @lock: serializes enqueue/dequeue operations to the descriptor pool
253 * @pending: number of transaction request pushed to DMA controller for
254 * execution, but still waiting for completion,
255 * @max_outstanding: max number of outstanding request we can push to channel
256 * @ld_pending: descriptors which are queued to run, but have not yet been
257 * submitted to the hardware for execution
258 * @ld_running: descriptors which are currently being executing by the hardware
259 * @ld_completed: descriptors which have finished execution by the hardware.
260 * These descriptors have already had their cleanup actions run. They
261 * are waiting for the ACK bit to be set by the async tx API.
262 * @desc_pool: descriptor pool for DMA operations
263 * @tasklet: bottom half where all completed descriptors cleans
264 * @tx_ring: transmit ring descriptor that we use to prepare actual
265 * descriptors for further executions
266 * @rx_ring: receive ring descriptor that we use to get completed DMA
267 * descriptors during cleanup time
269 struct xgene_dma_chan {
270 struct dma_chan dma_chan;
271 struct xgene_dma *pdma;
272 struct device *dev;
273 int id;
274 int rx_irq;
275 char name[10];
276 spinlock_t lock;
277 int pending;
278 int max_outstanding;
279 struct list_head ld_pending;
280 struct list_head ld_running;
281 struct list_head ld_completed;
282 struct dma_pool *desc_pool;
283 struct tasklet_struct tasklet;
284 struct xgene_dma_ring tx_ring;
285 struct xgene_dma_ring rx_ring;
289 * struct xgene_dma - internal representation of an X-Gene DMA device
290 * @dev: reference to this device's struct device
291 * @clk: reference to this device's clock
292 * @err_irq: DMA error irq number
293 * @ring_num: start id number for DMA ring
294 * @csr_dma: base for DMA register access
295 * @csr_ring: base for DMA ring register access
296 * @csr_ring_cmd: base for DMA ring command register access
297 * @csr_efuse: base for efuse register access
298 * @dma_dev: embedded struct dma_device
299 * @chan: reference to X-Gene DMA channels
301 struct xgene_dma {
302 struct device *dev;
303 struct clk *clk;
304 int err_irq;
305 int ring_num;
306 void __iomem *csr_dma;
307 void __iomem *csr_ring;
308 void __iomem *csr_ring_cmd;
309 void __iomem *csr_efuse;
310 struct dma_device dma_dev[XGENE_DMA_MAX_CHANNEL];
311 struct xgene_dma_chan chan[XGENE_DMA_MAX_CHANNEL];
314 static const char * const xgene_dma_desc_err[] = {
315 [ERR_DESC_AXI] = "AXI error when reading src/dst link list",
316 [ERR_BAD_DESC] = "ERR or El_ERR fields not set to zero in desc",
317 [ERR_READ_DATA_AXI] = "AXI error when reading data",
318 [ERR_WRITE_DATA_AXI] = "AXI error when writing data",
319 [ERR_FBP_TIMEOUT] = "Timeout on bufpool fetch",
320 [ERR_ECC] = "ECC double bit error",
321 [ERR_DIFF_SIZE] = "Bufpool too small to hold all the DIF result",
322 [ERR_SCT_GAT_LEN] = "Gather and scatter data length not same",
323 [ERR_CRC_ERR] = "CRC error",
324 [ERR_CHKSUM] = "Checksum error",
325 [ERR_DIF] = "DIF error",
328 static const char * const xgene_dma_err[] = {
329 [ERR_DIF_SIZE_INT] = "DIF size error",
330 [ERR_GS_ERR_INT] = "Gather scatter not same size error",
331 [ERR_FPB_TIMEO_INT] = "Free pool time out error",
332 [ERR_WFIFO_OVF_INT] = "Write FIFO over flow error",
333 [ERR_RFIFO_OVF_INT] = "Read FIFO over flow error",
334 [ERR_WR_TIMEO_INT] = "Write time out error",
335 [ERR_RD_TIMEO_INT] = "Read time out error",
336 [ERR_WR_ERR_INT] = "HBF bus write error",
337 [ERR_RD_ERR_INT] = "HBF bus read error",
338 [ERR_BAD_DESC_INT] = "Ring descriptor HE0 not set error",
339 [ERR_DESC_DST_INT] = "HFB reading dst link address error",
340 [ERR_DESC_SRC_INT] = "HFB reading src link address error",
343 static bool is_pq_enabled(struct xgene_dma *pdma)
345 u32 val;
347 val = ioread32(pdma->csr_efuse + XGENE_SOC_JTAG1_SHADOW);
348 return !(val & XGENE_DMA_PQ_DISABLE_MASK);
351 static u64 xgene_dma_encode_len(size_t len)
353 return (len < XGENE_DMA_MAX_BYTE_CNT) ?
354 ((u64)len << XGENE_DMA_DESC_BUFLEN_POS) :
355 XGENE_DMA_16K_BUFFER_LEN_CODE;
358 static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
360 static u8 flyby_type[] = {
361 FLYBY_2SRC_XOR, /* Dummy */
362 FLYBY_2SRC_XOR, /* Dummy */
363 FLYBY_2SRC_XOR,
364 FLYBY_3SRC_XOR,
365 FLYBY_4SRC_XOR,
366 FLYBY_5SRC_XOR
369 return flyby_type[src_cnt];
372 static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
373 dma_addr_t *paddr)
375 size_t nbytes = (*len < XGENE_DMA_MAX_BYTE_CNT) ?
376 *len : XGENE_DMA_MAX_BYTE_CNT;
378 *ext8 |= cpu_to_le64(*paddr);
379 *ext8 |= cpu_to_le64(xgene_dma_encode_len(nbytes));
380 *len -= nbytes;
381 *paddr += nbytes;
384 static __le64 *xgene_dma_lookup_ext8(struct xgene_dma_desc_hw *desc, int idx)
386 switch (idx) {
387 case 0:
388 return &desc->m1;
389 case 1:
390 return &desc->m0;
391 case 2:
392 return &desc->m3;
393 case 3:
394 return &desc->m2;
395 default:
396 pr_err("Invalid dma descriptor index\n");
399 return NULL;
402 static void xgene_dma_init_desc(struct xgene_dma_desc_hw *desc,
403 u16 dst_ring_num)
405 desc->m0 |= cpu_to_le64(XGENE_DMA_DESC_IN_BIT);
406 desc->m0 |= cpu_to_le64((u64)XGENE_DMA_RING_OWNER_DMA <<
407 XGENE_DMA_DESC_RTYPE_POS);
408 desc->m1 |= cpu_to_le64(XGENE_DMA_DESC_C_BIT);
409 desc->m3 |= cpu_to_le64((u64)dst_ring_num <<
410 XGENE_DMA_DESC_HOENQ_NUM_POS);
413 static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan,
414 struct xgene_dma_desc_sw *desc_sw,
415 dma_addr_t *dst, dma_addr_t *src,
416 u32 src_cnt, size_t *nbytes,
417 const u8 *scf)
419 struct xgene_dma_desc_hw *desc1, *desc2;
420 size_t len = *nbytes;
421 int i;
423 desc1 = &desc_sw->desc1;
424 desc2 = &desc_sw->desc2;
426 /* Initialize DMA descriptor */
427 xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
429 /* Set destination address */
430 desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT);
431 desc1->m3 |= cpu_to_le64(*dst);
433 /* We have multiple source addresses, so need to set NV bit*/
434 desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT);
436 /* Set flyby opcode */
437 desc1->m2 |= cpu_to_le64(xgene_dma_encode_xor_flyby(src_cnt));
439 /* Set 1st to 5th source addresses */
440 for (i = 0; i < src_cnt; i++) {
441 len = *nbytes;
442 xgene_dma_set_src_buffer((i == 0) ? &desc1->m1 :
443 xgene_dma_lookup_ext8(desc2, i - 1),
444 &len, &src[i]);
445 desc1->m2 |= cpu_to_le64((scf[i] << ((i + 1) * 8)));
448 /* Update meta data */
449 *nbytes = len;
450 *dst += XGENE_DMA_MAX_BYTE_CNT;
452 /* We need always 64B descriptor to perform xor or pq operations */
453 desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC;
456 static dma_cookie_t xgene_dma_tx_submit(struct dma_async_tx_descriptor *tx)
458 struct xgene_dma_desc_sw *desc;
459 struct xgene_dma_chan *chan;
460 dma_cookie_t cookie;
462 if (unlikely(!tx))
463 return -EINVAL;
465 chan = to_dma_chan(tx->chan);
466 desc = to_dma_desc_sw(tx);
468 spin_lock_bh(&chan->lock);
470 cookie = dma_cookie_assign(tx);
472 /* Add this transaction list onto the tail of the pending queue */
473 list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
475 spin_unlock_bh(&chan->lock);
477 return cookie;
480 static void xgene_dma_clean_descriptor(struct xgene_dma_chan *chan,
481 struct xgene_dma_desc_sw *desc)
483 list_del(&desc->node);
484 chan_dbg(chan, "LD %p free\n", desc);
485 dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
488 static struct xgene_dma_desc_sw *xgene_dma_alloc_descriptor(
489 struct xgene_dma_chan *chan)
491 struct xgene_dma_desc_sw *desc;
492 dma_addr_t phys;
494 desc = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
495 if (!desc) {
496 chan_err(chan, "Failed to allocate LDs\n");
497 return NULL;
500 INIT_LIST_HEAD(&desc->tx_list);
501 desc->tx.phys = phys;
502 desc->tx.tx_submit = xgene_dma_tx_submit;
503 dma_async_tx_descriptor_init(&desc->tx, &chan->dma_chan);
505 chan_dbg(chan, "LD %p allocated\n", desc);
507 return desc;
511 * xgene_dma_clean_completed_descriptor - free all descriptors which
512 * has been completed and acked
513 * @chan: X-Gene DMA channel
515 * This function is used on all completed and acked descriptors.
517 static void xgene_dma_clean_completed_descriptor(struct xgene_dma_chan *chan)
519 struct xgene_dma_desc_sw *desc, *_desc;
521 /* Run the callback for each descriptor, in order */
522 list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) {
523 if (async_tx_test_ack(&desc->tx))
524 xgene_dma_clean_descriptor(chan, desc);
529 * xgene_dma_run_tx_complete_actions - cleanup a single link descriptor
530 * @chan: X-Gene DMA channel
531 * @desc: descriptor to cleanup and free
533 * This function is used on a descriptor which has been executed by the DMA
534 * controller. It will run any callbacks, submit any dependencies.
536 static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan,
537 struct xgene_dma_desc_sw *desc)
539 struct dma_async_tx_descriptor *tx = &desc->tx;
542 * If this is not the last transaction in the group,
543 * then no need to complete cookie and run any callback as
544 * this is not the tx_descriptor which had been sent to caller
545 * of this DMA request
548 if (tx->cookie == 0)
549 return;
551 dma_cookie_complete(tx);
552 dma_descriptor_unmap(tx);
554 /* Run the link descriptor callback function */
555 dmaengine_desc_get_callback_invoke(tx, NULL);
557 /* Run any dependencies */
558 dma_run_dependencies(tx);
562 * xgene_dma_clean_running_descriptor - move the completed descriptor from
563 * ld_running to ld_completed
564 * @chan: X-Gene DMA channel
565 * @desc: the descriptor which is completed
567 * Free the descriptor directly if acked by async_tx api,
568 * else move it to queue ld_completed.
570 static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan,
571 struct xgene_dma_desc_sw *desc)
573 /* Remove from the list of running transactions */
574 list_del(&desc->node);
577 * the client is allowed to attach dependent operations
578 * until 'ack' is set
580 if (!async_tx_test_ack(&desc->tx)) {
582 * Move this descriptor to the list of descriptors which is
583 * completed, but still awaiting the 'ack' bit to be set.
585 list_add_tail(&desc->node, &chan->ld_completed);
586 return;
589 chan_dbg(chan, "LD %p free\n", desc);
590 dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
593 static void xgene_chan_xfer_request(struct xgene_dma_chan *chan,
594 struct xgene_dma_desc_sw *desc_sw)
596 struct xgene_dma_ring *ring = &chan->tx_ring;
597 struct xgene_dma_desc_hw *desc_hw;
599 /* Get hw descriptor from DMA tx ring */
600 desc_hw = &ring->desc_hw[ring->head];
603 * Increment the head count to point next
604 * descriptor for next time
606 if (++ring->head == ring->slots)
607 ring->head = 0;
609 /* Copy prepared sw descriptor data to hw descriptor */
610 memcpy(desc_hw, &desc_sw->desc1, sizeof(*desc_hw));
613 * Check if we have prepared 64B descriptor,
614 * in this case we need one more hw descriptor
616 if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) {
617 desc_hw = &ring->desc_hw[ring->head];
619 if (++ring->head == ring->slots)
620 ring->head = 0;
622 memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
625 /* Increment the pending transaction count */
626 chan->pending += ((desc_sw->flags &
627 XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
629 /* Notify the hw that we have descriptor ready for execution */
630 iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
631 2 : 1, ring->cmd);
635 * xgene_chan_xfer_ld_pending - push any pending transactions to hw
636 * @chan : X-Gene DMA channel
638 * LOCKING: must hold chan->lock
640 static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
642 struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
645 * If the list of pending descriptors is empty, then we
646 * don't need to do any work at all
648 if (list_empty(&chan->ld_pending)) {
649 chan_dbg(chan, "No pending LDs\n");
650 return;
654 * Move elements from the queue of pending transactions onto the list
655 * of running transactions and push it to hw for further executions
657 list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_pending, node) {
659 * Check if have pushed max number of transactions to hw
660 * as capable, so let's stop here and will push remaining
661 * elements from pening ld queue after completing some
662 * descriptors that we have already pushed
664 if (chan->pending >= chan->max_outstanding)
665 return;
667 xgene_chan_xfer_request(chan, desc_sw);
670 * Delete this element from ld pending queue and append it to
671 * ld running queue
673 list_move_tail(&desc_sw->node, &chan->ld_running);
678 * xgene_dma_cleanup_descriptors - cleanup link descriptors which are completed
679 * and move them to ld_completed to free until flag 'ack' is set
680 * @chan: X-Gene DMA channel
682 * This function is used on descriptors which have been executed by the DMA
683 * controller. It will run any callbacks, submit any dependencies, then
684 * free these descriptors if flag 'ack' is set.
686 static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
688 struct xgene_dma_ring *ring = &chan->rx_ring;
689 struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
690 struct xgene_dma_desc_hw *desc_hw;
691 struct list_head ld_completed;
692 u8 status;
694 INIT_LIST_HEAD(&ld_completed);
696 spin_lock(&chan->lock);
698 /* Clean already completed and acked descriptors */
699 xgene_dma_clean_completed_descriptor(chan);
701 /* Move all completed descriptors to ld completed queue, in order */
702 list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) {
703 /* Get subsequent hw descriptor from DMA rx ring */
704 desc_hw = &ring->desc_hw[ring->head];
706 /* Check if this descriptor has been completed */
707 if (unlikely(le64_to_cpu(desc_hw->m0) ==
708 XGENE_DMA_DESC_EMPTY_SIGNATURE))
709 break;
711 if (++ring->head == ring->slots)
712 ring->head = 0;
714 /* Check if we have any error with DMA transactions */
715 status = XGENE_DMA_DESC_STATUS(
716 XGENE_DMA_DESC_ELERR_RD(le64_to_cpu(
717 desc_hw->m0)),
718 XGENE_DMA_DESC_LERR_RD(le64_to_cpu(
719 desc_hw->m0)));
720 if (status) {
721 /* Print the DMA error type */
722 chan_err(chan, "%s\n", xgene_dma_desc_err[status]);
725 * We have DMA transactions error here. Dump DMA Tx
726 * and Rx descriptors for this request */
727 XGENE_DMA_DESC_DUMP(&desc_sw->desc1,
728 "X-Gene DMA TX DESC1: ");
730 if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC)
731 XGENE_DMA_DESC_DUMP(&desc_sw->desc2,
732 "X-Gene DMA TX DESC2: ");
734 XGENE_DMA_DESC_DUMP(desc_hw,
735 "X-Gene DMA RX ERR DESC: ");
738 /* Notify the hw about this completed descriptor */
739 iowrite32(-1, ring->cmd);
741 /* Mark this hw descriptor as processed */
742 desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
745 * Decrement the pending transaction count
746 * as we have processed one
748 chan->pending -= ((desc_sw->flags &
749 XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
752 * Delete this node from ld running queue and append it to
753 * ld completed queue for further processing
755 list_move_tail(&desc_sw->node, &ld_completed);
759 * Start any pending transactions automatically
760 * In the ideal case, we keep the DMA controller busy while we go
761 * ahead and free the descriptors below.
763 xgene_chan_xfer_ld_pending(chan);
765 spin_unlock(&chan->lock);
767 /* Run the callback for each descriptor, in order */
768 list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) {
769 xgene_dma_run_tx_complete_actions(chan, desc_sw);
770 xgene_dma_clean_running_descriptor(chan, desc_sw);
774 static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
776 struct xgene_dma_chan *chan = to_dma_chan(dchan);
778 /* Has this channel already been allocated? */
779 if (chan->desc_pool)
780 return 1;
782 chan->desc_pool = dma_pool_create(chan->name, chan->dev,
783 sizeof(struct xgene_dma_desc_sw),
784 0, 0);
785 if (!chan->desc_pool) {
786 chan_err(chan, "Failed to allocate descriptor pool\n");
787 return -ENOMEM;
790 chan_dbg(chan, "Allocate descriptor pool\n");
792 return 1;
796 * xgene_dma_free_desc_list - Free all descriptors in a queue
797 * @chan: X-Gene DMA channel
798 * @list: the list to free
800 * LOCKING: must hold chan->lock
802 static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan,
803 struct list_head *list)
805 struct xgene_dma_desc_sw *desc, *_desc;
807 list_for_each_entry_safe(desc, _desc, list, node)
808 xgene_dma_clean_descriptor(chan, desc);
811 static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
813 struct xgene_dma_chan *chan = to_dma_chan(dchan);
815 chan_dbg(chan, "Free all resources\n");
817 if (!chan->desc_pool)
818 return;
820 /* Process all running descriptor */
821 xgene_dma_cleanup_descriptors(chan);
823 spin_lock_bh(&chan->lock);
825 /* Clean all link descriptor queues */
826 xgene_dma_free_desc_list(chan, &chan->ld_pending);
827 xgene_dma_free_desc_list(chan, &chan->ld_running);
828 xgene_dma_free_desc_list(chan, &chan->ld_completed);
830 spin_unlock_bh(&chan->lock);
832 /* Delete this channel DMA pool */
833 dma_pool_destroy(chan->desc_pool);
834 chan->desc_pool = NULL;
837 static struct dma_async_tx_descriptor *xgene_dma_prep_xor(
838 struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
839 u32 src_cnt, size_t len, unsigned long flags)
841 struct xgene_dma_desc_sw *first = NULL, *new;
842 struct xgene_dma_chan *chan;
843 static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {
844 0x01, 0x01, 0x01, 0x01, 0x01};
846 if (unlikely(!dchan || !len))
847 return NULL;
849 chan = to_dma_chan(dchan);
851 do {
852 /* Allocate the link descriptor from DMA pool */
853 new = xgene_dma_alloc_descriptor(chan);
854 if (!new)
855 goto fail;
857 /* Prepare xor DMA descriptor */
858 xgene_dma_prep_xor_desc(chan, new, &dst, src,
859 src_cnt, &len, multi);
861 if (!first)
862 first = new;
864 new->tx.cookie = 0;
865 async_tx_ack(&new->tx);
867 /* Insert the link descriptor to the LD ring */
868 list_add_tail(&new->node, &first->tx_list);
869 } while (len);
871 new->tx.flags = flags; /* client is in control of this ack */
872 new->tx.cookie = -EBUSY;
873 list_splice(&first->tx_list, &new->tx_list);
875 return &new->tx;
877 fail:
878 if (!first)
879 return NULL;
881 xgene_dma_free_desc_list(chan, &first->tx_list);
882 return NULL;
885 static struct dma_async_tx_descriptor *xgene_dma_prep_pq(
886 struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
887 u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
889 struct xgene_dma_desc_sw *first = NULL, *new;
890 struct xgene_dma_chan *chan;
891 size_t _len = len;
892 dma_addr_t _src[XGENE_DMA_MAX_XOR_SRC];
893 static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {0x01, 0x01, 0x01, 0x01, 0x01};
895 if (unlikely(!dchan || !len))
896 return NULL;
898 chan = to_dma_chan(dchan);
901 * Save source addresses on local variable, may be we have to
902 * prepare two descriptor to generate P and Q if both enabled
903 * in the flags by client
905 memcpy(_src, src, sizeof(*src) * src_cnt);
907 if (flags & DMA_PREP_PQ_DISABLE_P)
908 len = 0;
910 if (flags & DMA_PREP_PQ_DISABLE_Q)
911 _len = 0;
913 do {
914 /* Allocate the link descriptor from DMA pool */
915 new = xgene_dma_alloc_descriptor(chan);
916 if (!new)
917 goto fail;
919 if (!first)
920 first = new;
922 new->tx.cookie = 0;
923 async_tx_ack(&new->tx);
925 /* Insert the link descriptor to the LD ring */
926 list_add_tail(&new->node, &first->tx_list);
929 * Prepare DMA descriptor to generate P,
930 * if DMA_PREP_PQ_DISABLE_P flag is not set
932 if (len) {
933 xgene_dma_prep_xor_desc(chan, new, &dst[0], src,
934 src_cnt, &len, multi);
935 continue;
939 * Prepare DMA descriptor to generate Q,
940 * if DMA_PREP_PQ_DISABLE_Q flag is not set
942 if (_len) {
943 xgene_dma_prep_xor_desc(chan, new, &dst[1], _src,
944 src_cnt, &_len, scf);
946 } while (len || _len);
948 new->tx.flags = flags; /* client is in control of this ack */
949 new->tx.cookie = -EBUSY;
950 list_splice(&first->tx_list, &new->tx_list);
952 return &new->tx;
954 fail:
955 if (!first)
956 return NULL;
958 xgene_dma_free_desc_list(chan, &first->tx_list);
959 return NULL;
962 static void xgene_dma_issue_pending(struct dma_chan *dchan)
964 struct xgene_dma_chan *chan = to_dma_chan(dchan);
966 spin_lock_bh(&chan->lock);
967 xgene_chan_xfer_ld_pending(chan);
968 spin_unlock_bh(&chan->lock);
971 static enum dma_status xgene_dma_tx_status(struct dma_chan *dchan,
972 dma_cookie_t cookie,
973 struct dma_tx_state *txstate)
975 return dma_cookie_status(dchan, cookie, txstate);
978 static void xgene_dma_tasklet_cb(struct tasklet_struct *t)
980 struct xgene_dma_chan *chan = from_tasklet(chan, t, tasklet);
982 /* Run all cleanup for descriptors which have been completed */
983 xgene_dma_cleanup_descriptors(chan);
985 /* Re-enable DMA channel IRQ */
986 enable_irq(chan->rx_irq);
989 static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id)
991 struct xgene_dma_chan *chan = (struct xgene_dma_chan *)id;
993 BUG_ON(!chan);
996 * Disable DMA channel IRQ until we process completed
997 * descriptors
999 disable_irq_nosync(chan->rx_irq);
1002 * Schedule the tasklet to handle all cleanup of the current
1003 * transaction. It will start a new transaction if there is
1004 * one pending.
1006 tasklet_schedule(&chan->tasklet);
1008 return IRQ_HANDLED;
1011 static irqreturn_t xgene_dma_err_isr(int irq, void *id)
1013 struct xgene_dma *pdma = (struct xgene_dma *)id;
1014 unsigned long int_mask;
1015 u32 val, i;
1017 val = ioread32(pdma->csr_dma + XGENE_DMA_INT);
1019 /* Clear DMA interrupts */
1020 iowrite32(val, pdma->csr_dma + XGENE_DMA_INT);
1022 /* Print DMA error info */
1023 int_mask = val >> XGENE_DMA_INT_MASK_SHIFT;
1024 for_each_set_bit(i, &int_mask, ARRAY_SIZE(xgene_dma_err))
1025 dev_err(pdma->dev,
1026 "Interrupt status 0x%08X %s\n", val, xgene_dma_err[i]);
1028 return IRQ_HANDLED;
1031 static void xgene_dma_wr_ring_state(struct xgene_dma_ring *ring)
1033 int i;
1035 iowrite32(ring->num, ring->pdma->csr_ring + XGENE_DMA_RING_STATE);
1037 for (i = 0; i < XGENE_DMA_RING_NUM_CONFIG; i++)
1038 iowrite32(ring->state[i], ring->pdma->csr_ring +
1039 XGENE_DMA_RING_STATE_WR_BASE + (i * 4));
1042 static void xgene_dma_clr_ring_state(struct xgene_dma_ring *ring)
1044 memset(ring->state, 0, sizeof(u32) * XGENE_DMA_RING_NUM_CONFIG);
1045 xgene_dma_wr_ring_state(ring);
1048 static void xgene_dma_setup_ring(struct xgene_dma_ring *ring)
1050 void *ring_cfg = ring->state;
1051 u64 addr = ring->desc_paddr;
1052 u32 i, val;
1054 ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE;
1056 /* Clear DMA ring state */
1057 xgene_dma_clr_ring_state(ring);
1059 /* Set DMA ring type */
1060 XGENE_DMA_RING_TYPE_SET(ring_cfg, XGENE_DMA_RING_TYPE_REGULAR);
1062 if (ring->owner == XGENE_DMA_RING_OWNER_DMA) {
1063 /* Set recombination buffer and timeout */
1064 XGENE_DMA_RING_RECOMBBUF_SET(ring_cfg);
1065 XGENE_DMA_RING_RECOMTIMEOUTL_SET(ring_cfg);
1066 XGENE_DMA_RING_RECOMTIMEOUTH_SET(ring_cfg);
1069 /* Initialize DMA ring state */
1070 XGENE_DMA_RING_SELTHRSH_SET(ring_cfg);
1071 XGENE_DMA_RING_ACCEPTLERR_SET(ring_cfg);
1072 XGENE_DMA_RING_COHERENT_SET(ring_cfg);
1073 XGENE_DMA_RING_ADDRL_SET(ring_cfg, addr);
1074 XGENE_DMA_RING_ADDRH_SET(ring_cfg, addr);
1075 XGENE_DMA_RING_SIZE_SET(ring_cfg, ring->cfgsize);
1077 /* Write DMA ring configurations */
1078 xgene_dma_wr_ring_state(ring);
1080 /* Set DMA ring id */
1081 iowrite32(XGENE_DMA_RING_ID_SETUP(ring->id),
1082 ring->pdma->csr_ring + XGENE_DMA_RING_ID);
1084 /* Set DMA ring buffer */
1085 iowrite32(XGENE_DMA_RING_ID_BUF_SETUP(ring->num),
1086 ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF);
1088 if (ring->owner != XGENE_DMA_RING_OWNER_CPU)
1089 return;
1091 /* Set empty signature to DMA Rx ring descriptors */
1092 for (i = 0; i < ring->slots; i++) {
1093 struct xgene_dma_desc_hw *desc;
1095 desc = &ring->desc_hw[i];
1096 desc->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
1099 /* Enable DMA Rx ring interrupt */
1100 val = ioread32(ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE);
1101 XGENE_DMA_RING_NE_INT_MODE_SET(val, ring->buf_num);
1102 iowrite32(val, ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE);
1105 static void xgene_dma_clear_ring(struct xgene_dma_ring *ring)
1107 u32 ring_id, val;
1109 if (ring->owner == XGENE_DMA_RING_OWNER_CPU) {
1110 /* Disable DMA Rx ring interrupt */
1111 val = ioread32(ring->pdma->csr_ring +
1112 XGENE_DMA_RING_NE_INT_MODE);
1113 XGENE_DMA_RING_NE_INT_MODE_RESET(val, ring->buf_num);
1114 iowrite32(val, ring->pdma->csr_ring +
1115 XGENE_DMA_RING_NE_INT_MODE);
1118 /* Clear DMA ring state */
1119 ring_id = XGENE_DMA_RING_ID_SETUP(ring->id);
1120 iowrite32(ring_id, ring->pdma->csr_ring + XGENE_DMA_RING_ID);
1122 iowrite32(0, ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF);
1123 xgene_dma_clr_ring_state(ring);
1126 static void xgene_dma_set_ring_cmd(struct xgene_dma_ring *ring)
1128 ring->cmd_base = ring->pdma->csr_ring_cmd +
1129 XGENE_DMA_RING_CMD_BASE_OFFSET((ring->num -
1130 XGENE_DMA_RING_NUM));
1132 ring->cmd = ring->cmd_base + XGENE_DMA_RING_CMD_OFFSET;
1135 static int xgene_dma_get_ring_size(struct xgene_dma_chan *chan,
1136 enum xgene_dma_ring_cfgsize cfgsize)
1138 int size;
1140 switch (cfgsize) {
1141 case XGENE_DMA_RING_CFG_SIZE_512B:
1142 size = 0x200;
1143 break;
1144 case XGENE_DMA_RING_CFG_SIZE_2KB:
1145 size = 0x800;
1146 break;
1147 case XGENE_DMA_RING_CFG_SIZE_16KB:
1148 size = 0x4000;
1149 break;
1150 case XGENE_DMA_RING_CFG_SIZE_64KB:
1151 size = 0x10000;
1152 break;
1153 case XGENE_DMA_RING_CFG_SIZE_512KB:
1154 size = 0x80000;
1155 break;
1156 default:
1157 chan_err(chan, "Unsupported cfg ring size %d\n", cfgsize);
1158 return -EINVAL;
1161 return size;
1164 static void xgene_dma_delete_ring_one(struct xgene_dma_ring *ring)
1166 /* Clear DMA ring configurations */
1167 xgene_dma_clear_ring(ring);
1169 /* De-allocate DMA ring descriptor */
1170 if (ring->desc_vaddr) {
1171 dma_free_coherent(ring->pdma->dev, ring->size,
1172 ring->desc_vaddr, ring->desc_paddr);
1173 ring->desc_vaddr = NULL;
1177 static void xgene_dma_delete_chan_rings(struct xgene_dma_chan *chan)
1179 xgene_dma_delete_ring_one(&chan->rx_ring);
1180 xgene_dma_delete_ring_one(&chan->tx_ring);
1183 static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
1184 struct xgene_dma_ring *ring,
1185 enum xgene_dma_ring_cfgsize cfgsize)
1187 int ret;
1189 /* Setup DMA ring descriptor variables */
1190 ring->pdma = chan->pdma;
1191 ring->cfgsize = cfgsize;
1192 ring->num = chan->pdma->ring_num++;
1193 ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
1195 ret = xgene_dma_get_ring_size(chan, cfgsize);
1196 if (ret <= 0)
1197 return ret;
1198 ring->size = ret;
1200 /* Allocate memory for DMA ring descriptor */
1201 ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size,
1202 &ring->desc_paddr, GFP_KERNEL);
1203 if (!ring->desc_vaddr) {
1204 chan_err(chan, "Failed to allocate ring desc\n");
1205 return -ENOMEM;
1208 /* Configure and enable DMA ring */
1209 xgene_dma_set_ring_cmd(ring);
1210 xgene_dma_setup_ring(ring);
1212 return 0;
1215 static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan)
1217 struct xgene_dma_ring *rx_ring = &chan->rx_ring;
1218 struct xgene_dma_ring *tx_ring = &chan->tx_ring;
1219 int ret;
1221 /* Create DMA Rx ring descriptor */
1222 rx_ring->owner = XGENE_DMA_RING_OWNER_CPU;
1223 rx_ring->buf_num = XGENE_DMA_CPU_BUFNUM + chan->id;
1225 ret = xgene_dma_create_ring_one(chan, rx_ring,
1226 XGENE_DMA_RING_CFG_SIZE_64KB);
1227 if (ret)
1228 return ret;
1230 chan_dbg(chan, "Rx ring id 0x%X num %d desc 0x%p\n",
1231 rx_ring->id, rx_ring->num, rx_ring->desc_vaddr);
1233 /* Create DMA Tx ring descriptor */
1234 tx_ring->owner = XGENE_DMA_RING_OWNER_DMA;
1235 tx_ring->buf_num = XGENE_DMA_BUFNUM + chan->id;
1237 ret = xgene_dma_create_ring_one(chan, tx_ring,
1238 XGENE_DMA_RING_CFG_SIZE_64KB);
1239 if (ret) {
1240 xgene_dma_delete_ring_one(rx_ring);
1241 return ret;
1244 tx_ring->dst_ring_num = XGENE_DMA_RING_DST_ID(rx_ring->num);
1246 chan_dbg(chan,
1247 "Tx ring id 0x%X num %d desc 0x%p\n",
1248 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
1250 /* Set the max outstanding request possible to this channel */
1251 chan->max_outstanding = tx_ring->slots;
1253 return ret;
1256 static int xgene_dma_init_rings(struct xgene_dma *pdma)
1258 int ret, i, j;
1260 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
1261 ret = xgene_dma_create_chan_rings(&pdma->chan[i]);
1262 if (ret) {
1263 for (j = 0; j < i; j++)
1264 xgene_dma_delete_chan_rings(&pdma->chan[j]);
1265 return ret;
1269 return ret;
1272 static void xgene_dma_enable(struct xgene_dma *pdma)
1274 u32 val;
1276 /* Configure and enable DMA engine */
1277 val = ioread32(pdma->csr_dma + XGENE_DMA_GCR);
1278 XGENE_DMA_CH_SETUP(val);
1279 XGENE_DMA_ENABLE(val);
1280 iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR);
1283 static void xgene_dma_disable(struct xgene_dma *pdma)
1285 u32 val;
1287 val = ioread32(pdma->csr_dma + XGENE_DMA_GCR);
1288 XGENE_DMA_DISABLE(val);
1289 iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR);
1292 static void xgene_dma_mask_interrupts(struct xgene_dma *pdma)
1295 * Mask DMA ring overflow, underflow and
1296 * AXI write/read error interrupts
1298 iowrite32(XGENE_DMA_INT_ALL_MASK,
1299 pdma->csr_dma + XGENE_DMA_RING_INT0_MASK);
1300 iowrite32(XGENE_DMA_INT_ALL_MASK,
1301 pdma->csr_dma + XGENE_DMA_RING_INT1_MASK);
1302 iowrite32(XGENE_DMA_INT_ALL_MASK,
1303 pdma->csr_dma + XGENE_DMA_RING_INT2_MASK);
1304 iowrite32(XGENE_DMA_INT_ALL_MASK,
1305 pdma->csr_dma + XGENE_DMA_RING_INT3_MASK);
1306 iowrite32(XGENE_DMA_INT_ALL_MASK,
1307 pdma->csr_dma + XGENE_DMA_RING_INT4_MASK);
1309 /* Mask DMA error interrupts */
1310 iowrite32(XGENE_DMA_INT_ALL_MASK, pdma->csr_dma + XGENE_DMA_INT_MASK);
1313 static void xgene_dma_unmask_interrupts(struct xgene_dma *pdma)
1316 * Unmask DMA ring overflow, underflow and
1317 * AXI write/read error interrupts
1319 iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1320 pdma->csr_dma + XGENE_DMA_RING_INT0_MASK);
1321 iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1322 pdma->csr_dma + XGENE_DMA_RING_INT1_MASK);
1323 iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1324 pdma->csr_dma + XGENE_DMA_RING_INT2_MASK);
1325 iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1326 pdma->csr_dma + XGENE_DMA_RING_INT3_MASK);
1327 iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1328 pdma->csr_dma + XGENE_DMA_RING_INT4_MASK);
1330 /* Unmask DMA error interrupts */
1331 iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1332 pdma->csr_dma + XGENE_DMA_INT_MASK);
1335 static void xgene_dma_init_hw(struct xgene_dma *pdma)
1337 u32 val;
1339 /* Associate DMA ring to corresponding ring HW */
1340 iowrite32(XGENE_DMA_ASSOC_RING_MNGR1,
1341 pdma->csr_dma + XGENE_DMA_CFG_RING_WQ_ASSOC);
1343 /* Configure RAID6 polynomial control setting */
1344 if (is_pq_enabled(pdma))
1345 iowrite32(XGENE_DMA_RAID6_MULTI_CTRL(0x1D),
1346 pdma->csr_dma + XGENE_DMA_RAID6_CONT);
1347 else
1348 dev_info(pdma->dev, "PQ is disabled in HW\n");
1350 xgene_dma_enable(pdma);
1351 xgene_dma_unmask_interrupts(pdma);
1353 /* Get DMA id and version info */
1354 val = ioread32(pdma->csr_dma + XGENE_DMA_IPBRR);
1356 /* DMA device info */
1357 dev_info(pdma->dev,
1358 "X-Gene DMA v%d.%02d.%02d driver registered %d channels",
1359 XGENE_DMA_REV_NO_RD(val), XGENE_DMA_BUS_ID_RD(val),
1360 XGENE_DMA_DEV_ID_RD(val), XGENE_DMA_MAX_CHANNEL);
1363 static int xgene_dma_init_ring_mngr(struct xgene_dma *pdma)
1365 if (ioread32(pdma->csr_ring + XGENE_DMA_RING_CLKEN) &&
1366 (!ioread32(pdma->csr_ring + XGENE_DMA_RING_SRST)))
1367 return 0;
1369 iowrite32(0x3, pdma->csr_ring + XGENE_DMA_RING_CLKEN);
1370 iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_SRST);
1372 /* Bring up memory */
1373 iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN);
1375 /* Force a barrier */
1376 ioread32(pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN);
1378 /* reset may take up to 1ms */
1379 usleep_range(1000, 1100);
1381 if (ioread32(pdma->csr_ring + XGENE_DMA_RING_BLK_MEM_RDY)
1382 != XGENE_DMA_RING_BLK_MEM_RDY_VAL) {
1383 dev_err(pdma->dev,
1384 "Failed to release ring mngr memory from shutdown\n");
1385 return -ENODEV;
1388 /* program threshold set 1 and all hysteresis */
1389 iowrite32(XGENE_DMA_RING_THRESLD0_SET1_VAL,
1390 pdma->csr_ring + XGENE_DMA_RING_THRESLD0_SET1);
1391 iowrite32(XGENE_DMA_RING_THRESLD1_SET1_VAL,
1392 pdma->csr_ring + XGENE_DMA_RING_THRESLD1_SET1);
1393 iowrite32(XGENE_DMA_RING_HYSTERESIS_VAL,
1394 pdma->csr_ring + XGENE_DMA_RING_HYSTERESIS);
1396 /* Enable QPcore and assign error queue */
1397 iowrite32(XGENE_DMA_RING_ENABLE,
1398 pdma->csr_ring + XGENE_DMA_RING_CONFIG);
1400 return 0;
1403 static int xgene_dma_init_mem(struct xgene_dma *pdma)
1405 int ret;
1407 ret = xgene_dma_init_ring_mngr(pdma);
1408 if (ret)
1409 return ret;
1411 /* Bring up memory */
1412 iowrite32(0x0, pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN);
1414 /* Force a barrier */
1415 ioread32(pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN);
1417 /* reset may take up to 1ms */
1418 usleep_range(1000, 1100);
1420 if (ioread32(pdma->csr_dma + XGENE_DMA_BLK_MEM_RDY)
1421 != XGENE_DMA_BLK_MEM_RDY_VAL) {
1422 dev_err(pdma->dev,
1423 "Failed to release DMA memory from shutdown\n");
1424 return -ENODEV;
1427 return 0;
1430 static int xgene_dma_request_irqs(struct xgene_dma *pdma)
1432 struct xgene_dma_chan *chan;
1433 int ret, i, j;
1435 /* Register DMA error irq */
1436 ret = devm_request_irq(pdma->dev, pdma->err_irq, xgene_dma_err_isr,
1437 0, "dma_error", pdma);
1438 if (ret) {
1439 dev_err(pdma->dev,
1440 "Failed to register error IRQ %d\n", pdma->err_irq);
1441 return ret;
1444 /* Register DMA channel rx irq */
1445 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
1446 chan = &pdma->chan[i];
1447 irq_set_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY);
1448 ret = devm_request_irq(chan->dev, chan->rx_irq,
1449 xgene_dma_chan_ring_isr,
1450 0, chan->name, chan);
1451 if (ret) {
1452 chan_err(chan, "Failed to register Rx IRQ %d\n",
1453 chan->rx_irq);
1454 devm_free_irq(pdma->dev, pdma->err_irq, pdma);
1456 for (j = 0; j < i; j++) {
1457 chan = &pdma->chan[i];
1458 irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY);
1459 devm_free_irq(chan->dev, chan->rx_irq, chan);
1462 return ret;
1466 return 0;
1469 static void xgene_dma_free_irqs(struct xgene_dma *pdma)
1471 struct xgene_dma_chan *chan;
1472 int i;
1474 /* Free DMA device error irq */
1475 devm_free_irq(pdma->dev, pdma->err_irq, pdma);
1477 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
1478 chan = &pdma->chan[i];
1479 irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY);
1480 devm_free_irq(chan->dev, chan->rx_irq, chan);
1484 static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
1485 struct dma_device *dma_dev)
1487 /* Initialize DMA device capability mask */
1488 dma_cap_zero(dma_dev->cap_mask);
1490 /* Set DMA device capability */
1492 /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
1493 * and channel 1 supports XOR, PQ both. First thing here is we have
1494 * mechanism in hw to enable/disable PQ/XOR supports on channel 1,
1495 * we can make sure this by reading SoC Efuse register.
1496 * Second thing, we have hw errata that if we run channel 0 and
1497 * channel 1 simultaneously with executing XOR and PQ request,
1498 * suddenly DMA engine hangs, So here we enable XOR on channel 0 only
1499 * if XOR and PQ supports on channel 1 is disabled.
1501 if ((chan->id == XGENE_DMA_PQ_CHANNEL) &&
1502 is_pq_enabled(chan->pdma)) {
1503 dma_cap_set(DMA_PQ, dma_dev->cap_mask);
1504 dma_cap_set(DMA_XOR, dma_dev->cap_mask);
1505 } else if ((chan->id == XGENE_DMA_XOR_CHANNEL) &&
1506 !is_pq_enabled(chan->pdma)) {
1507 dma_cap_set(DMA_XOR, dma_dev->cap_mask);
1510 /* Set base and prep routines */
1511 dma_dev->dev = chan->dev;
1512 dma_dev->device_alloc_chan_resources = xgene_dma_alloc_chan_resources;
1513 dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources;
1514 dma_dev->device_issue_pending = xgene_dma_issue_pending;
1515 dma_dev->device_tx_status = xgene_dma_tx_status;
1517 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1518 dma_dev->device_prep_dma_xor = xgene_dma_prep_xor;
1519 dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC;
1520 dma_dev->xor_align = DMAENGINE_ALIGN_64_BYTES;
1523 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1524 dma_dev->device_prep_dma_pq = xgene_dma_prep_pq;
1525 dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC;
1526 dma_dev->pq_align = DMAENGINE_ALIGN_64_BYTES;
1530 static int xgene_dma_async_register(struct xgene_dma *pdma, int id)
1532 struct xgene_dma_chan *chan = &pdma->chan[id];
1533 struct dma_device *dma_dev = &pdma->dma_dev[id];
1534 int ret;
1536 chan->dma_chan.device = dma_dev;
1538 spin_lock_init(&chan->lock);
1539 INIT_LIST_HEAD(&chan->ld_pending);
1540 INIT_LIST_HEAD(&chan->ld_running);
1541 INIT_LIST_HEAD(&chan->ld_completed);
1542 tasklet_setup(&chan->tasklet, xgene_dma_tasklet_cb);
1544 chan->pending = 0;
1545 chan->desc_pool = NULL;
1546 dma_cookie_init(&chan->dma_chan);
1548 /* Setup dma device capabilities and prep routines */
1549 xgene_dma_set_caps(chan, dma_dev);
1551 /* Initialize DMA device list head */
1552 INIT_LIST_HEAD(&dma_dev->channels);
1553 list_add_tail(&chan->dma_chan.device_node, &dma_dev->channels);
1555 /* Register with Linux async DMA framework*/
1556 ret = dma_async_device_register(dma_dev);
1557 if (ret) {
1558 chan_err(chan, "Failed to register async device %d", ret);
1559 tasklet_kill(&chan->tasklet);
1561 return ret;
1564 /* DMA capability info */
1565 dev_info(pdma->dev,
1566 "%s: CAPABILITY ( %s%s)\n", dma_chan_name(&chan->dma_chan),
1567 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "",
1568 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : "");
1570 return 0;
1573 static int xgene_dma_init_async(struct xgene_dma *pdma)
1575 int ret, i, j;
1577 for (i = 0; i < XGENE_DMA_MAX_CHANNEL ; i++) {
1578 ret = xgene_dma_async_register(pdma, i);
1579 if (ret) {
1580 for (j = 0; j < i; j++) {
1581 dma_async_device_unregister(&pdma->dma_dev[j]);
1582 tasklet_kill(&pdma->chan[j].tasklet);
1585 return ret;
1589 return ret;
1592 static void xgene_dma_async_unregister(struct xgene_dma *pdma)
1594 int i;
1596 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++)
1597 dma_async_device_unregister(&pdma->dma_dev[i]);
1600 static void xgene_dma_init_channels(struct xgene_dma *pdma)
1602 struct xgene_dma_chan *chan;
1603 int i;
1605 pdma->ring_num = XGENE_DMA_RING_NUM;
1607 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
1608 chan = &pdma->chan[i];
1609 chan->dev = pdma->dev;
1610 chan->pdma = pdma;
1611 chan->id = i;
1612 snprintf(chan->name, sizeof(chan->name), "dmachan%d", chan->id);
1616 static int xgene_dma_get_resources(struct platform_device *pdev,
1617 struct xgene_dma *pdma)
1619 struct resource *res;
1620 int irq, i;
1622 /* Get DMA csr region */
1623 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1624 if (!res) {
1625 dev_err(&pdev->dev, "Failed to get csr region\n");
1626 return -ENXIO;
1629 pdma->csr_dma = devm_ioremap(&pdev->dev, res->start,
1630 resource_size(res));
1631 if (!pdma->csr_dma) {
1632 dev_err(&pdev->dev, "Failed to ioremap csr region");
1633 return -ENOMEM;
1636 /* Get DMA ring csr region */
1637 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1638 if (!res) {
1639 dev_err(&pdev->dev, "Failed to get ring csr region\n");
1640 return -ENXIO;
1643 pdma->csr_ring = devm_ioremap(&pdev->dev, res->start,
1644 resource_size(res));
1645 if (!pdma->csr_ring) {
1646 dev_err(&pdev->dev, "Failed to ioremap ring csr region");
1647 return -ENOMEM;
1650 /* Get DMA ring cmd csr region */
1651 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1652 if (!res) {
1653 dev_err(&pdev->dev, "Failed to get ring cmd csr region\n");
1654 return -ENXIO;
1657 pdma->csr_ring_cmd = devm_ioremap(&pdev->dev, res->start,
1658 resource_size(res));
1659 if (!pdma->csr_ring_cmd) {
1660 dev_err(&pdev->dev, "Failed to ioremap ring cmd csr region");
1661 return -ENOMEM;
1664 pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET;
1666 /* Get efuse csr region */
1667 res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1668 if (!res) {
1669 dev_err(&pdev->dev, "Failed to get efuse csr region\n");
1670 return -ENXIO;
1673 pdma->csr_efuse = devm_ioremap(&pdev->dev, res->start,
1674 resource_size(res));
1675 if (!pdma->csr_efuse) {
1676 dev_err(&pdev->dev, "Failed to ioremap efuse csr region");
1677 return -ENOMEM;
1680 /* Get DMA error interrupt */
1681 irq = platform_get_irq(pdev, 0);
1682 if (irq <= 0)
1683 return -ENXIO;
1685 pdma->err_irq = irq;
1687 /* Get DMA Rx ring descriptor interrupts for all DMA channels */
1688 for (i = 1; i <= XGENE_DMA_MAX_CHANNEL; i++) {
1689 irq = platform_get_irq(pdev, i);
1690 if (irq <= 0)
1691 return -ENXIO;
1693 pdma->chan[i - 1].rx_irq = irq;
1696 return 0;
1699 static int xgene_dma_probe(struct platform_device *pdev)
1701 struct xgene_dma *pdma;
1702 int ret, i;
1704 pdma = devm_kzalloc(&pdev->dev, sizeof(*pdma), GFP_KERNEL);
1705 if (!pdma)
1706 return -ENOMEM;
1708 pdma->dev = &pdev->dev;
1709 platform_set_drvdata(pdev, pdma);
1711 ret = xgene_dma_get_resources(pdev, pdma);
1712 if (ret)
1713 return ret;
1715 pdma->clk = devm_clk_get(&pdev->dev, NULL);
1716 if (IS_ERR(pdma->clk) && !ACPI_COMPANION(&pdev->dev)) {
1717 dev_err(&pdev->dev, "Failed to get clk\n");
1718 return PTR_ERR(pdma->clk);
1721 /* Enable clk before accessing registers */
1722 if (!IS_ERR(pdma->clk)) {
1723 ret = clk_prepare_enable(pdma->clk);
1724 if (ret) {
1725 dev_err(&pdev->dev, "Failed to enable clk %d\n", ret);
1726 return ret;
1730 /* Remove DMA RAM out of shutdown */
1731 ret = xgene_dma_init_mem(pdma);
1732 if (ret)
1733 goto err_clk_enable;
1735 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(42));
1736 if (ret) {
1737 dev_err(&pdev->dev, "No usable DMA configuration\n");
1738 goto err_dma_mask;
1741 /* Initialize DMA channels software state */
1742 xgene_dma_init_channels(pdma);
1744 /* Configue DMA rings */
1745 ret = xgene_dma_init_rings(pdma);
1746 if (ret)
1747 goto err_clk_enable;
1749 ret = xgene_dma_request_irqs(pdma);
1750 if (ret)
1751 goto err_request_irq;
1753 /* Configure and enable DMA engine */
1754 xgene_dma_init_hw(pdma);
1756 /* Register DMA device with linux async framework */
1757 ret = xgene_dma_init_async(pdma);
1758 if (ret)
1759 goto err_async_init;
1761 return 0;
1763 err_async_init:
1764 xgene_dma_free_irqs(pdma);
1766 err_request_irq:
1767 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++)
1768 xgene_dma_delete_chan_rings(&pdma->chan[i]);
1770 err_dma_mask:
1771 err_clk_enable:
1772 if (!IS_ERR(pdma->clk))
1773 clk_disable_unprepare(pdma->clk);
1775 return ret;
1778 static int xgene_dma_remove(struct platform_device *pdev)
1780 struct xgene_dma *pdma = platform_get_drvdata(pdev);
1781 struct xgene_dma_chan *chan;
1782 int i;
1784 xgene_dma_async_unregister(pdma);
1786 /* Mask interrupts and disable DMA engine */
1787 xgene_dma_mask_interrupts(pdma);
1788 xgene_dma_disable(pdma);
1789 xgene_dma_free_irqs(pdma);
1791 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
1792 chan = &pdma->chan[i];
1793 tasklet_kill(&chan->tasklet);
1794 xgene_dma_delete_chan_rings(chan);
1797 if (!IS_ERR(pdma->clk))
1798 clk_disable_unprepare(pdma->clk);
1800 return 0;
1803 #ifdef CONFIG_ACPI
1804 static const struct acpi_device_id xgene_dma_acpi_match_ptr[] = {
1805 {"APMC0D43", 0},
1808 MODULE_DEVICE_TABLE(acpi, xgene_dma_acpi_match_ptr);
1809 #endif
1811 static const struct of_device_id xgene_dma_of_match_ptr[] = {
1812 {.compatible = "apm,xgene-storm-dma",},
1815 MODULE_DEVICE_TABLE(of, xgene_dma_of_match_ptr);
1817 static struct platform_driver xgene_dma_driver = {
1818 .probe = xgene_dma_probe,
1819 .remove = xgene_dma_remove,
1820 .driver = {
1821 .name = "X-Gene-DMA",
1822 .of_match_table = xgene_dma_of_match_ptr,
1823 .acpi_match_table = ACPI_PTR(xgene_dma_acpi_match_ptr),
1827 module_platform_driver(xgene_dma_driver);
1829 MODULE_DESCRIPTION("APM X-Gene SoC DMA driver");
1830 MODULE_AUTHOR("Rameshwar Prasad Sahu <rsahu@apm.com>");
1831 MODULE_AUTHOR("Loc Ho <lho@apm.com>");
1832 MODULE_LICENSE("GPL");
1833 MODULE_VERSION("1.0");