2 * Applied Micro X-Gene SoC DMA engine Driver
4 * Copyright (c) 2015, Applied Micro Circuits Corporation
5 * Authors: Rameshwar Prasad Sahu <rsahu@apm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 * NOTE: PM support is currently not available.
24 #include <linux/acpi.h>
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/dmaengine.h>
29 #include <linux/dmapool.h>
30 #include <linux/interrupt.h>
32 #include <linux/irq.h>
33 #include <linux/module.h>
34 #include <linux/of_device.h>
36 #include "dmaengine.h"
38 /* X-Gene DMA ring csr registers and bit definations */
39 #define XGENE_DMA_RING_CONFIG 0x04
40 #define XGENE_DMA_RING_ENABLE BIT(31)
41 #define XGENE_DMA_RING_ID 0x08
42 #define XGENE_DMA_RING_ID_SETUP(v) ((v) | BIT(31))
43 #define XGENE_DMA_RING_ID_BUF 0x0C
44 #define XGENE_DMA_RING_ID_BUF_SETUP(v) (((v) << 9) | BIT(21))
45 #define XGENE_DMA_RING_THRESLD0_SET1 0x30
46 #define XGENE_DMA_RING_THRESLD0_SET1_VAL 0X64
47 #define XGENE_DMA_RING_THRESLD1_SET1 0x34
48 #define XGENE_DMA_RING_THRESLD1_SET1_VAL 0xC8
49 #define XGENE_DMA_RING_HYSTERESIS 0x68
50 #define XGENE_DMA_RING_HYSTERESIS_VAL 0xFFFFFFFF
51 #define XGENE_DMA_RING_STATE 0x6C
52 #define XGENE_DMA_RING_STATE_WR_BASE 0x70
53 #define XGENE_DMA_RING_NE_INT_MODE 0x017C
54 #define XGENE_DMA_RING_NE_INT_MODE_SET(m, v) \
55 ((m) = ((m) & ~BIT(31 - (v))) | BIT(31 - (v)))
56 #define XGENE_DMA_RING_NE_INT_MODE_RESET(m, v) \
57 ((m) &= (~BIT(31 - (v))))
58 #define XGENE_DMA_RING_CLKEN 0xC208
59 #define XGENE_DMA_RING_SRST 0xC200
60 #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070
61 #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074
62 #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF
63 #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num))
64 #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v))
65 #define XGENE_DMA_RING_CMD_OFFSET 0x2C
66 #define XGENE_DMA_RING_CMD_BASE_OFFSET(v) ((v) << 6)
67 #define XGENE_DMA_RING_COHERENT_SET(m) \
68 (((u32 *)(m))[2] |= BIT(4))
69 #define XGENE_DMA_RING_ADDRL_SET(m, v) \
70 (((u32 *)(m))[2] |= (((v) >> 8) << 5))
71 #define XGENE_DMA_RING_ADDRH_SET(m, v) \
72 (((u32 *)(m))[3] |= ((v) >> 35))
73 #define XGENE_DMA_RING_ACCEPTLERR_SET(m) \
74 (((u32 *)(m))[3] |= BIT(19))
75 #define XGENE_DMA_RING_SIZE_SET(m, v) \
76 (((u32 *)(m))[3] |= ((v) << 23))
77 #define XGENE_DMA_RING_RECOMBBUF_SET(m) \
78 (((u32 *)(m))[3] |= BIT(27))
79 #define XGENE_DMA_RING_RECOMTIMEOUTL_SET(m) \
80 (((u32 *)(m))[3] |= (0x7 << 28))
81 #define XGENE_DMA_RING_RECOMTIMEOUTH_SET(m) \
82 (((u32 *)(m))[4] |= 0x3)
83 #define XGENE_DMA_RING_SELTHRSH_SET(m) \
84 (((u32 *)(m))[4] |= BIT(3))
85 #define XGENE_DMA_RING_TYPE_SET(m, v) \
86 (((u32 *)(m))[4] |= ((v) << 19))
88 /* X-Gene DMA device csr registers and bit definitions */
89 #define XGENE_DMA_IPBRR 0x0
90 #define XGENE_DMA_DEV_ID_RD(v) ((v) & 0x00000FFF)
91 #define XGENE_DMA_BUS_ID_RD(v) (((v) >> 12) & 3)
92 #define XGENE_DMA_REV_NO_RD(v) (((v) >> 14) & 3)
93 #define XGENE_DMA_GCR 0x10
94 #define XGENE_DMA_CH_SETUP(v) \
95 ((v) = ((v) & ~0x000FFFFF) | 0x000AAFFF)
96 #define XGENE_DMA_ENABLE(v) ((v) |= BIT(31))
97 #define XGENE_DMA_DISABLE(v) ((v) &= ~BIT(31))
98 #define XGENE_DMA_RAID6_CONT 0x14
99 #define XGENE_DMA_RAID6_MULTI_CTRL(v) ((v) << 24)
100 #define XGENE_DMA_INT 0x70
101 #define XGENE_DMA_INT_MASK 0x74
102 #define XGENE_DMA_INT_ALL_MASK 0xFFFFFFFF
103 #define XGENE_DMA_INT_ALL_UNMASK 0x0
104 #define XGENE_DMA_INT_MASK_SHIFT 0x14
105 #define XGENE_DMA_RING_INT0_MASK 0x90A0
106 #define XGENE_DMA_RING_INT1_MASK 0x90A8
107 #define XGENE_DMA_RING_INT2_MASK 0x90B0
108 #define XGENE_DMA_RING_INT3_MASK 0x90B8
109 #define XGENE_DMA_RING_INT4_MASK 0x90C0
110 #define XGENE_DMA_CFG_RING_WQ_ASSOC 0x90E0
111 #define XGENE_DMA_ASSOC_RING_MNGR1 0xFFFFFFFF
112 #define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070
113 #define XGENE_DMA_BLK_MEM_RDY 0xD074
114 #define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF
115 #define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000
117 /* X-Gene SoC EFUSE csr register and bit defination */
118 #define XGENE_SOC_JTAG1_SHADOW 0x18
119 #define XGENE_DMA_PQ_DISABLE_MASK BIT(13)
121 /* X-Gene DMA Descriptor format */
122 #define XGENE_DMA_DESC_NV_BIT BIT_ULL(50)
123 #define XGENE_DMA_DESC_IN_BIT BIT_ULL(55)
124 #define XGENE_DMA_DESC_C_BIT BIT_ULL(63)
125 #define XGENE_DMA_DESC_DR_BIT BIT_ULL(61)
126 #define XGENE_DMA_DESC_ELERR_POS 46
127 #define XGENE_DMA_DESC_RTYPE_POS 56
128 #define XGENE_DMA_DESC_LERR_POS 60
129 #define XGENE_DMA_DESC_BUFLEN_POS 48
130 #define XGENE_DMA_DESC_HOENQ_NUM_POS 48
131 #define XGENE_DMA_DESC_ELERR_RD(m) \
132 (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3)
133 #define XGENE_DMA_DESC_LERR_RD(m) \
134 (((m) >> XGENE_DMA_DESC_LERR_POS) & 0x7)
135 #define XGENE_DMA_DESC_STATUS(elerr, lerr) \
136 (((elerr) << 4) | (lerr))
138 /* X-Gene DMA descriptor empty s/w signature */
139 #define XGENE_DMA_DESC_EMPTY_SIGNATURE ~0ULL
141 /* X-Gene DMA configurable parameters defines */
142 #define XGENE_DMA_RING_NUM 512
143 #define XGENE_DMA_BUFNUM 0x0
144 #define XGENE_DMA_CPU_BUFNUM 0x18
145 #define XGENE_DMA_RING_OWNER_DMA 0x03
146 #define XGENE_DMA_RING_OWNER_CPU 0x0F
147 #define XGENE_DMA_RING_TYPE_REGULAR 0x01
148 #define XGENE_DMA_RING_WQ_DESC_SIZE 32 /* 32 Bytes */
149 #define XGENE_DMA_RING_NUM_CONFIG 5
150 #define XGENE_DMA_MAX_CHANNEL 4
151 #define XGENE_DMA_XOR_CHANNEL 0
152 #define XGENE_DMA_PQ_CHANNEL 1
153 #define XGENE_DMA_MAX_BYTE_CNT 0x4000 /* 16 KB */
154 #define XGENE_DMA_MAX_64B_DESC_BYTE_CNT 0x14000 /* 80 KB */
155 #define XGENE_DMA_MAX_XOR_SRC 5
156 #define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0
157 #define XGENE_DMA_INVALID_LEN_CODE 0x7800000000000000ULL
159 /* X-Gene DMA descriptor error codes */
160 #define ERR_DESC_AXI 0x01
161 #define ERR_BAD_DESC 0x02
162 #define ERR_READ_DATA_AXI 0x03
163 #define ERR_WRITE_DATA_AXI 0x04
164 #define ERR_FBP_TIMEOUT 0x05
166 #define ERR_DIFF_SIZE 0x08
167 #define ERR_SCT_GAT_LEN 0x09
168 #define ERR_CRC_ERR 0x11
169 #define ERR_CHKSUM 0x12
172 /* X-Gene DMA error interrupt codes */
173 #define ERR_DIF_SIZE_INT 0x0
174 #define ERR_GS_ERR_INT 0x1
175 #define ERR_FPB_TIMEO_INT 0x2
176 #define ERR_WFIFO_OVF_INT 0x3
177 #define ERR_RFIFO_OVF_INT 0x4
178 #define ERR_WR_TIMEO_INT 0x5
179 #define ERR_RD_TIMEO_INT 0x6
180 #define ERR_WR_ERR_INT 0x7
181 #define ERR_RD_ERR_INT 0x8
182 #define ERR_BAD_DESC_INT 0x9
183 #define ERR_DESC_DST_INT 0xA
184 #define ERR_DESC_SRC_INT 0xB
186 /* X-Gene DMA flyby operation code */
187 #define FLYBY_2SRC_XOR 0x80
188 #define FLYBY_3SRC_XOR 0x90
189 #define FLYBY_4SRC_XOR 0xA0
190 #define FLYBY_5SRC_XOR 0xB0
192 /* X-Gene DMA SW descriptor flags */
193 #define XGENE_DMA_FLAG_64B_DESC BIT(0)
195 /* Define to dump X-Gene DMA descriptor */
196 #define XGENE_DMA_DESC_DUMP(desc, m) \
197 print_hex_dump(KERN_ERR, (m), \
198 DUMP_PREFIX_ADDRESS, 16, 8, (desc), 32, 0)
200 #define to_dma_desc_sw(tx) \
201 container_of(tx, struct xgene_dma_desc_sw, tx)
202 #define to_dma_chan(dchan) \
203 container_of(dchan, struct xgene_dma_chan, dma_chan)
205 #define chan_dbg(chan, fmt, arg...) \
206 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
207 #define chan_err(chan, fmt, arg...) \
208 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
210 struct xgene_dma_desc_hw
{
217 enum xgene_dma_ring_cfgsize
{
218 XGENE_DMA_RING_CFG_SIZE_512B
,
219 XGENE_DMA_RING_CFG_SIZE_2KB
,
220 XGENE_DMA_RING_CFG_SIZE_16KB
,
221 XGENE_DMA_RING_CFG_SIZE_64KB
,
222 XGENE_DMA_RING_CFG_SIZE_512KB
,
223 XGENE_DMA_RING_CFG_SIZE_INVALID
226 struct xgene_dma_ring
{
227 struct xgene_dma
*pdma
;
237 void __iomem
*cmd_base
;
238 dma_addr_t desc_paddr
;
239 u32 state
[XGENE_DMA_RING_NUM_CONFIG
];
240 enum xgene_dma_ring_cfgsize cfgsize
;
243 struct xgene_dma_desc_hw
*desc_hw
;
247 struct xgene_dma_desc_sw
{
248 struct xgene_dma_desc_hw desc1
;
249 struct xgene_dma_desc_hw desc2
;
251 struct list_head node
;
252 struct list_head tx_list
;
253 struct dma_async_tx_descriptor tx
;
257 * struct xgene_dma_chan - internal representation of an X-Gene DMA channel
258 * @dma_chan: dmaengine channel object member
259 * @pdma: X-Gene DMA device structure reference
260 * @dev: struct device reference for dma mapping api
261 * @id: raw id of this channel
262 * @rx_irq: channel IRQ
263 * @name: name of X-Gene DMA channel
264 * @lock: serializes enqueue/dequeue operations to the descriptor pool
265 * @pending: number of transaction request pushed to DMA controller for
266 * execution, but still waiting for completion,
267 * @max_outstanding: max number of outstanding request we can push to channel
268 * @ld_pending: descriptors which are queued to run, but have not yet been
269 * submitted to the hardware for execution
270 * @ld_running: descriptors which are currently being executing by the hardware
271 * @ld_completed: descriptors which have finished execution by the hardware.
272 * These descriptors have already had their cleanup actions run. They
273 * are waiting for the ACK bit to be set by the async tx API.
274 * @desc_pool: descriptor pool for DMA operations
275 * @tasklet: bottom half where all completed descriptors cleans
276 * @tx_ring: transmit ring descriptor that we use to prepare actual
277 * descriptors for further executions
278 * @rx_ring: receive ring descriptor that we use to get completed DMA
279 * descriptors during cleanup time
281 struct xgene_dma_chan
{
282 struct dma_chan dma_chan
;
283 struct xgene_dma
*pdma
;
291 struct list_head ld_pending
;
292 struct list_head ld_running
;
293 struct list_head ld_completed
;
294 struct dma_pool
*desc_pool
;
295 struct tasklet_struct tasklet
;
296 struct xgene_dma_ring tx_ring
;
297 struct xgene_dma_ring rx_ring
;
301 * struct xgene_dma - internal representation of an X-Gene DMA device
302 * @err_irq: DMA error irq number
303 * @ring_num: start id number for DMA ring
304 * @csr_dma: base for DMA register access
305 * @csr_ring: base for DMA ring register access
306 * @csr_ring_cmd: base for DMA ring command register access
307 * @csr_efuse: base for efuse register access
308 * @dma_dev: embedded struct dma_device
309 * @chan: reference to X-Gene DMA channels
316 void __iomem
*csr_dma
;
317 void __iomem
*csr_ring
;
318 void __iomem
*csr_ring_cmd
;
319 void __iomem
*csr_efuse
;
320 struct dma_device dma_dev
[XGENE_DMA_MAX_CHANNEL
];
321 struct xgene_dma_chan chan
[XGENE_DMA_MAX_CHANNEL
];
324 static const char * const xgene_dma_desc_err
[] = {
325 [ERR_DESC_AXI
] = "AXI error when reading src/dst link list",
326 [ERR_BAD_DESC
] = "ERR or El_ERR fields not set to zero in desc",
327 [ERR_READ_DATA_AXI
] = "AXI error when reading data",
328 [ERR_WRITE_DATA_AXI
] = "AXI error when writing data",
329 [ERR_FBP_TIMEOUT
] = "Timeout on bufpool fetch",
330 [ERR_ECC
] = "ECC double bit error",
331 [ERR_DIFF_SIZE
] = "Bufpool too small to hold all the DIF result",
332 [ERR_SCT_GAT_LEN
] = "Gather and scatter data length not same",
333 [ERR_CRC_ERR
] = "CRC error",
334 [ERR_CHKSUM
] = "Checksum error",
335 [ERR_DIF
] = "DIF error",
338 static const char * const xgene_dma_err
[] = {
339 [ERR_DIF_SIZE_INT
] = "DIF size error",
340 [ERR_GS_ERR_INT
] = "Gather scatter not same size error",
341 [ERR_FPB_TIMEO_INT
] = "Free pool time out error",
342 [ERR_WFIFO_OVF_INT
] = "Write FIFO over flow error",
343 [ERR_RFIFO_OVF_INT
] = "Read FIFO over flow error",
344 [ERR_WR_TIMEO_INT
] = "Write time out error",
345 [ERR_RD_TIMEO_INT
] = "Read time out error",
346 [ERR_WR_ERR_INT
] = "HBF bus write error",
347 [ERR_RD_ERR_INT
] = "HBF bus read error",
348 [ERR_BAD_DESC_INT
] = "Ring descriptor HE0 not set error",
349 [ERR_DESC_DST_INT
] = "HFB reading dst link address error",
350 [ERR_DESC_SRC_INT
] = "HFB reading src link address error",
353 static bool is_pq_enabled(struct xgene_dma
*pdma
)
357 val
= ioread32(pdma
->csr_efuse
+ XGENE_SOC_JTAG1_SHADOW
);
358 return !(val
& XGENE_DMA_PQ_DISABLE_MASK
);
361 static u64
xgene_dma_encode_len(size_t len
)
363 return (len
< XGENE_DMA_MAX_BYTE_CNT
) ?
364 ((u64
)len
<< XGENE_DMA_DESC_BUFLEN_POS
) :
365 XGENE_DMA_16K_BUFFER_LEN_CODE
;
368 static u8
xgene_dma_encode_xor_flyby(u32 src_cnt
)
370 static u8 flyby_type
[] = {
371 FLYBY_2SRC_XOR
, /* Dummy */
372 FLYBY_2SRC_XOR
, /* Dummy */
379 return flyby_type
[src_cnt
];
382 static void xgene_dma_set_src_buffer(__le64
*ext8
, size_t *len
,
385 size_t nbytes
= (*len
< XGENE_DMA_MAX_BYTE_CNT
) ?
386 *len
: XGENE_DMA_MAX_BYTE_CNT
;
388 *ext8
|= cpu_to_le64(*paddr
);
389 *ext8
|= cpu_to_le64(xgene_dma_encode_len(nbytes
));
394 static __le64
*xgene_dma_lookup_ext8(struct xgene_dma_desc_hw
*desc
, int idx
)
406 pr_err("Invalid dma descriptor index\n");
412 static void xgene_dma_init_desc(struct xgene_dma_desc_hw
*desc
,
415 desc
->m0
|= cpu_to_le64(XGENE_DMA_DESC_IN_BIT
);
416 desc
->m0
|= cpu_to_le64((u64
)XGENE_DMA_RING_OWNER_DMA
<<
417 XGENE_DMA_DESC_RTYPE_POS
);
418 desc
->m1
|= cpu_to_le64(XGENE_DMA_DESC_C_BIT
);
419 desc
->m3
|= cpu_to_le64((u64
)dst_ring_num
<<
420 XGENE_DMA_DESC_HOENQ_NUM_POS
);
423 static void xgene_dma_prep_xor_desc(struct xgene_dma_chan
*chan
,
424 struct xgene_dma_desc_sw
*desc_sw
,
425 dma_addr_t
*dst
, dma_addr_t
*src
,
426 u32 src_cnt
, size_t *nbytes
,
429 struct xgene_dma_desc_hw
*desc1
, *desc2
;
430 size_t len
= *nbytes
;
433 desc1
= &desc_sw
->desc1
;
434 desc2
= &desc_sw
->desc2
;
436 /* Initialize DMA descriptor */
437 xgene_dma_init_desc(desc1
, chan
->tx_ring
.dst_ring_num
);
439 /* Set destination address */
440 desc1
->m2
|= cpu_to_le64(XGENE_DMA_DESC_DR_BIT
);
441 desc1
->m3
|= cpu_to_le64(*dst
);
443 /* We have multiple source addresses, so need to set NV bit*/
444 desc1
->m0
|= cpu_to_le64(XGENE_DMA_DESC_NV_BIT
);
446 /* Set flyby opcode */
447 desc1
->m2
|= cpu_to_le64(xgene_dma_encode_xor_flyby(src_cnt
));
449 /* Set 1st to 5th source addresses */
450 for (i
= 0; i
< src_cnt
; i
++) {
452 xgene_dma_set_src_buffer((i
== 0) ? &desc1
->m1
:
453 xgene_dma_lookup_ext8(desc2
, i
- 1),
455 desc1
->m2
|= cpu_to_le64((scf
[i
] << ((i
+ 1) * 8)));
458 /* Update meta data */
460 *dst
+= XGENE_DMA_MAX_BYTE_CNT
;
462 /* We need always 64B descriptor to perform xor or pq operations */
463 desc_sw
->flags
|= XGENE_DMA_FLAG_64B_DESC
;
466 static dma_cookie_t
xgene_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
468 struct xgene_dma_desc_sw
*desc
;
469 struct xgene_dma_chan
*chan
;
475 chan
= to_dma_chan(tx
->chan
);
476 desc
= to_dma_desc_sw(tx
);
478 spin_lock_bh(&chan
->lock
);
480 cookie
= dma_cookie_assign(tx
);
482 /* Add this transaction list onto the tail of the pending queue */
483 list_splice_tail_init(&desc
->tx_list
, &chan
->ld_pending
);
485 spin_unlock_bh(&chan
->lock
);
490 static void xgene_dma_clean_descriptor(struct xgene_dma_chan
*chan
,
491 struct xgene_dma_desc_sw
*desc
)
493 list_del(&desc
->node
);
494 chan_dbg(chan
, "LD %p free\n", desc
);
495 dma_pool_free(chan
->desc_pool
, desc
, desc
->tx
.phys
);
498 static struct xgene_dma_desc_sw
*xgene_dma_alloc_descriptor(
499 struct xgene_dma_chan
*chan
)
501 struct xgene_dma_desc_sw
*desc
;
504 desc
= dma_pool_zalloc(chan
->desc_pool
, GFP_NOWAIT
, &phys
);
506 chan_err(chan
, "Failed to allocate LDs\n");
510 INIT_LIST_HEAD(&desc
->tx_list
);
511 desc
->tx
.phys
= phys
;
512 desc
->tx
.tx_submit
= xgene_dma_tx_submit
;
513 dma_async_tx_descriptor_init(&desc
->tx
, &chan
->dma_chan
);
515 chan_dbg(chan
, "LD %p allocated\n", desc
);
521 * xgene_dma_clean_completed_descriptor - free all descriptors which
522 * has been completed and acked
523 * @chan: X-Gene DMA channel
525 * This function is used on all completed and acked descriptors.
527 static void xgene_dma_clean_completed_descriptor(struct xgene_dma_chan
*chan
)
529 struct xgene_dma_desc_sw
*desc
, *_desc
;
531 /* Run the callback for each descriptor, in order */
532 list_for_each_entry_safe(desc
, _desc
, &chan
->ld_completed
, node
) {
533 if (async_tx_test_ack(&desc
->tx
))
534 xgene_dma_clean_descriptor(chan
, desc
);
539 * xgene_dma_run_tx_complete_actions - cleanup a single link descriptor
540 * @chan: X-Gene DMA channel
541 * @desc: descriptor to cleanup and free
543 * This function is used on a descriptor which has been executed by the DMA
544 * controller. It will run any callbacks, submit any dependencies.
546 static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan
*chan
,
547 struct xgene_dma_desc_sw
*desc
)
549 struct dma_async_tx_descriptor
*tx
= &desc
->tx
;
552 * If this is not the last transaction in the group,
553 * then no need to complete cookie and run any callback as
554 * this is not the tx_descriptor which had been sent to caller
555 * of this DMA request
561 dma_cookie_complete(tx
);
562 dma_descriptor_unmap(tx
);
564 /* Run the link descriptor callback function */
565 dmaengine_desc_get_callback_invoke(tx
, NULL
);
567 /* Run any dependencies */
568 dma_run_dependencies(tx
);
572 * xgene_dma_clean_running_descriptor - move the completed descriptor from
573 * ld_running to ld_completed
574 * @chan: X-Gene DMA channel
575 * @desc: the descriptor which is completed
577 * Free the descriptor directly if acked by async_tx api,
578 * else move it to queue ld_completed.
580 static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan
*chan
,
581 struct xgene_dma_desc_sw
*desc
)
583 /* Remove from the list of running transactions */
584 list_del(&desc
->node
);
587 * the client is allowed to attach dependent operations
590 if (!async_tx_test_ack(&desc
->tx
)) {
592 * Move this descriptor to the list of descriptors which is
593 * completed, but still awaiting the 'ack' bit to be set.
595 list_add_tail(&desc
->node
, &chan
->ld_completed
);
599 chan_dbg(chan
, "LD %p free\n", desc
);
600 dma_pool_free(chan
->desc_pool
, desc
, desc
->tx
.phys
);
603 static void xgene_chan_xfer_request(struct xgene_dma_chan
*chan
,
604 struct xgene_dma_desc_sw
*desc_sw
)
606 struct xgene_dma_ring
*ring
= &chan
->tx_ring
;
607 struct xgene_dma_desc_hw
*desc_hw
;
609 /* Get hw descriptor from DMA tx ring */
610 desc_hw
= &ring
->desc_hw
[ring
->head
];
613 * Increment the head count to point next
614 * descriptor for next time
616 if (++ring
->head
== ring
->slots
)
619 /* Copy prepared sw descriptor data to hw descriptor */
620 memcpy(desc_hw
, &desc_sw
->desc1
, sizeof(*desc_hw
));
623 * Check if we have prepared 64B descriptor,
624 * in this case we need one more hw descriptor
626 if (desc_sw
->flags
& XGENE_DMA_FLAG_64B_DESC
) {
627 desc_hw
= &ring
->desc_hw
[ring
->head
];
629 if (++ring
->head
== ring
->slots
)
632 memcpy(desc_hw
, &desc_sw
->desc2
, sizeof(*desc_hw
));
635 /* Increment the pending transaction count */
636 chan
->pending
+= ((desc_sw
->flags
&
637 XGENE_DMA_FLAG_64B_DESC
) ? 2 : 1);
639 /* Notify the hw that we have descriptor ready for execution */
640 iowrite32((desc_sw
->flags
& XGENE_DMA_FLAG_64B_DESC
) ?
645 * xgene_chan_xfer_ld_pending - push any pending transactions to hw
646 * @chan : X-Gene DMA channel
648 * LOCKING: must hold chan->lock
650 static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan
*chan
)
652 struct xgene_dma_desc_sw
*desc_sw
, *_desc_sw
;
655 * If the list of pending descriptors is empty, then we
656 * don't need to do any work at all
658 if (list_empty(&chan
->ld_pending
)) {
659 chan_dbg(chan
, "No pending LDs\n");
664 * Move elements from the queue of pending transactions onto the list
665 * of running transactions and push it to hw for further executions
667 list_for_each_entry_safe(desc_sw
, _desc_sw
, &chan
->ld_pending
, node
) {
669 * Check if have pushed max number of transactions to hw
670 * as capable, so let's stop here and will push remaining
671 * elements from pening ld queue after completing some
672 * descriptors that we have already pushed
674 if (chan
->pending
>= chan
->max_outstanding
)
677 xgene_chan_xfer_request(chan
, desc_sw
);
680 * Delete this element from ld pending queue and append it to
683 list_move_tail(&desc_sw
->node
, &chan
->ld_running
);
688 * xgene_dma_cleanup_descriptors - cleanup link descriptors which are completed
689 * and move them to ld_completed to free until flag 'ack' is set
690 * @chan: X-Gene DMA channel
692 * This function is used on descriptors which have been executed by the DMA
693 * controller. It will run any callbacks, submit any dependencies, then
694 * free these descriptors if flag 'ack' is set.
696 static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan
*chan
)
698 struct xgene_dma_ring
*ring
= &chan
->rx_ring
;
699 struct xgene_dma_desc_sw
*desc_sw
, *_desc_sw
;
700 struct xgene_dma_desc_hw
*desc_hw
;
701 struct list_head ld_completed
;
704 INIT_LIST_HEAD(&ld_completed
);
706 spin_lock_bh(&chan
->lock
);
708 /* Clean already completed and acked descriptors */
709 xgene_dma_clean_completed_descriptor(chan
);
711 /* Move all completed descriptors to ld completed queue, in order */
712 list_for_each_entry_safe(desc_sw
, _desc_sw
, &chan
->ld_running
, node
) {
713 /* Get subsequent hw descriptor from DMA rx ring */
714 desc_hw
= &ring
->desc_hw
[ring
->head
];
716 /* Check if this descriptor has been completed */
717 if (unlikely(le64_to_cpu(desc_hw
->m0
) ==
718 XGENE_DMA_DESC_EMPTY_SIGNATURE
))
721 if (++ring
->head
== ring
->slots
)
724 /* Check if we have any error with DMA transactions */
725 status
= XGENE_DMA_DESC_STATUS(
726 XGENE_DMA_DESC_ELERR_RD(le64_to_cpu(
728 XGENE_DMA_DESC_LERR_RD(le64_to_cpu(
731 /* Print the DMA error type */
732 chan_err(chan
, "%s\n", xgene_dma_desc_err
[status
]);
735 * We have DMA transactions error here. Dump DMA Tx
736 * and Rx descriptors for this request */
737 XGENE_DMA_DESC_DUMP(&desc_sw
->desc1
,
738 "X-Gene DMA TX DESC1: ");
740 if (desc_sw
->flags
& XGENE_DMA_FLAG_64B_DESC
)
741 XGENE_DMA_DESC_DUMP(&desc_sw
->desc2
,
742 "X-Gene DMA TX DESC2: ");
744 XGENE_DMA_DESC_DUMP(desc_hw
,
745 "X-Gene DMA RX ERR DESC: ");
748 /* Notify the hw about this completed descriptor */
749 iowrite32(-1, ring
->cmd
);
751 /* Mark this hw descriptor as processed */
752 desc_hw
->m0
= cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE
);
755 * Decrement the pending transaction count
756 * as we have processed one
758 chan
->pending
-= ((desc_sw
->flags
&
759 XGENE_DMA_FLAG_64B_DESC
) ? 2 : 1);
762 * Delete this node from ld running queue and append it to
763 * ld completed queue for further processing
765 list_move_tail(&desc_sw
->node
, &ld_completed
);
769 * Start any pending transactions automatically
770 * In the ideal case, we keep the DMA controller busy while we go
771 * ahead and free the descriptors below.
773 xgene_chan_xfer_ld_pending(chan
);
775 spin_unlock_bh(&chan
->lock
);
777 /* Run the callback for each descriptor, in order */
778 list_for_each_entry_safe(desc_sw
, _desc_sw
, &ld_completed
, node
) {
779 xgene_dma_run_tx_complete_actions(chan
, desc_sw
);
780 xgene_dma_clean_running_descriptor(chan
, desc_sw
);
784 static int xgene_dma_alloc_chan_resources(struct dma_chan
*dchan
)
786 struct xgene_dma_chan
*chan
= to_dma_chan(dchan
);
788 /* Has this channel already been allocated? */
792 chan
->desc_pool
= dma_pool_create(chan
->name
, chan
->dev
,
793 sizeof(struct xgene_dma_desc_sw
),
795 if (!chan
->desc_pool
) {
796 chan_err(chan
, "Failed to allocate descriptor pool\n");
800 chan_dbg(chan
, "Allocate descripto pool\n");
806 * xgene_dma_free_desc_list - Free all descriptors in a queue
807 * @chan: X-Gene DMA channel
808 * @list: the list to free
810 * LOCKING: must hold chan->lock
812 static void xgene_dma_free_desc_list(struct xgene_dma_chan
*chan
,
813 struct list_head
*list
)
815 struct xgene_dma_desc_sw
*desc
, *_desc
;
817 list_for_each_entry_safe(desc
, _desc
, list
, node
)
818 xgene_dma_clean_descriptor(chan
, desc
);
821 static void xgene_dma_free_chan_resources(struct dma_chan
*dchan
)
823 struct xgene_dma_chan
*chan
= to_dma_chan(dchan
);
825 chan_dbg(chan
, "Free all resources\n");
827 if (!chan
->desc_pool
)
830 /* Process all running descriptor */
831 xgene_dma_cleanup_descriptors(chan
);
833 spin_lock_bh(&chan
->lock
);
835 /* Clean all link descriptor queues */
836 xgene_dma_free_desc_list(chan
, &chan
->ld_pending
);
837 xgene_dma_free_desc_list(chan
, &chan
->ld_running
);
838 xgene_dma_free_desc_list(chan
, &chan
->ld_completed
);
840 spin_unlock_bh(&chan
->lock
);
842 /* Delete this channel DMA pool */
843 dma_pool_destroy(chan
->desc_pool
);
844 chan
->desc_pool
= NULL
;
847 static struct dma_async_tx_descriptor
*xgene_dma_prep_xor(
848 struct dma_chan
*dchan
, dma_addr_t dst
, dma_addr_t
*src
,
849 u32 src_cnt
, size_t len
, unsigned long flags
)
851 struct xgene_dma_desc_sw
*first
= NULL
, *new;
852 struct xgene_dma_chan
*chan
;
853 static u8 multi
[XGENE_DMA_MAX_XOR_SRC
] = {
854 0x01, 0x01, 0x01, 0x01, 0x01};
856 if (unlikely(!dchan
|| !len
))
859 chan
= to_dma_chan(dchan
);
862 /* Allocate the link descriptor from DMA pool */
863 new = xgene_dma_alloc_descriptor(chan
);
867 /* Prepare xor DMA descriptor */
868 xgene_dma_prep_xor_desc(chan
, new, &dst
, src
,
869 src_cnt
, &len
, multi
);
875 async_tx_ack(&new->tx
);
877 /* Insert the link descriptor to the LD ring */
878 list_add_tail(&new->node
, &first
->tx_list
);
881 new->tx
.flags
= flags
; /* client is in control of this ack */
882 new->tx
.cookie
= -EBUSY
;
883 list_splice(&first
->tx_list
, &new->tx_list
);
891 xgene_dma_free_desc_list(chan
, &first
->tx_list
);
895 static struct dma_async_tx_descriptor
*xgene_dma_prep_pq(
896 struct dma_chan
*dchan
, dma_addr_t
*dst
, dma_addr_t
*src
,
897 u32 src_cnt
, const u8
*scf
, size_t len
, unsigned long flags
)
899 struct xgene_dma_desc_sw
*first
= NULL
, *new;
900 struct xgene_dma_chan
*chan
;
902 dma_addr_t _src
[XGENE_DMA_MAX_XOR_SRC
];
903 static u8 multi
[XGENE_DMA_MAX_XOR_SRC
] = {0x01, 0x01, 0x01, 0x01, 0x01};
905 if (unlikely(!dchan
|| !len
))
908 chan
= to_dma_chan(dchan
);
911 * Save source addresses on local variable, may be we have to
912 * prepare two descriptor to generate P and Q if both enabled
913 * in the flags by client
915 memcpy(_src
, src
, sizeof(*src
) * src_cnt
);
917 if (flags
& DMA_PREP_PQ_DISABLE_P
)
920 if (flags
& DMA_PREP_PQ_DISABLE_Q
)
924 /* Allocate the link descriptor from DMA pool */
925 new = xgene_dma_alloc_descriptor(chan
);
933 async_tx_ack(&new->tx
);
935 /* Insert the link descriptor to the LD ring */
936 list_add_tail(&new->node
, &first
->tx_list
);
939 * Prepare DMA descriptor to generate P,
940 * if DMA_PREP_PQ_DISABLE_P flag is not set
943 xgene_dma_prep_xor_desc(chan
, new, &dst
[0], src
,
944 src_cnt
, &len
, multi
);
949 * Prepare DMA descriptor to generate Q,
950 * if DMA_PREP_PQ_DISABLE_Q flag is not set
953 xgene_dma_prep_xor_desc(chan
, new, &dst
[1], _src
,
954 src_cnt
, &_len
, scf
);
956 } while (len
|| _len
);
958 new->tx
.flags
= flags
; /* client is in control of this ack */
959 new->tx
.cookie
= -EBUSY
;
960 list_splice(&first
->tx_list
, &new->tx_list
);
968 xgene_dma_free_desc_list(chan
, &first
->tx_list
);
972 static void xgene_dma_issue_pending(struct dma_chan
*dchan
)
974 struct xgene_dma_chan
*chan
= to_dma_chan(dchan
);
976 spin_lock_bh(&chan
->lock
);
977 xgene_chan_xfer_ld_pending(chan
);
978 spin_unlock_bh(&chan
->lock
);
981 static enum dma_status
xgene_dma_tx_status(struct dma_chan
*dchan
,
983 struct dma_tx_state
*txstate
)
985 return dma_cookie_status(dchan
, cookie
, txstate
);
988 static void xgene_dma_tasklet_cb(unsigned long data
)
990 struct xgene_dma_chan
*chan
= (struct xgene_dma_chan
*)data
;
992 /* Run all cleanup for descriptors which have been completed */
993 xgene_dma_cleanup_descriptors(chan
);
995 /* Re-enable DMA channel IRQ */
996 enable_irq(chan
->rx_irq
);
999 static irqreturn_t
xgene_dma_chan_ring_isr(int irq
, void *id
)
1001 struct xgene_dma_chan
*chan
= (struct xgene_dma_chan
*)id
;
1006 * Disable DMA channel IRQ until we process completed
1009 disable_irq_nosync(chan
->rx_irq
);
1012 * Schedule the tasklet to handle all cleanup of the current
1013 * transaction. It will start a new transaction if there is
1016 tasklet_schedule(&chan
->tasklet
);
1021 static irqreturn_t
xgene_dma_err_isr(int irq
, void *id
)
1023 struct xgene_dma
*pdma
= (struct xgene_dma
*)id
;
1024 unsigned long int_mask
;
1027 val
= ioread32(pdma
->csr_dma
+ XGENE_DMA_INT
);
1029 /* Clear DMA interrupts */
1030 iowrite32(val
, pdma
->csr_dma
+ XGENE_DMA_INT
);
1032 /* Print DMA error info */
1033 int_mask
= val
>> XGENE_DMA_INT_MASK_SHIFT
;
1034 for_each_set_bit(i
, &int_mask
, ARRAY_SIZE(xgene_dma_err
))
1036 "Interrupt status 0x%08X %s\n", val
, xgene_dma_err
[i
]);
1041 static void xgene_dma_wr_ring_state(struct xgene_dma_ring
*ring
)
1045 iowrite32(ring
->num
, ring
->pdma
->csr_ring
+ XGENE_DMA_RING_STATE
);
1047 for (i
= 0; i
< XGENE_DMA_RING_NUM_CONFIG
; i
++)
1048 iowrite32(ring
->state
[i
], ring
->pdma
->csr_ring
+
1049 XGENE_DMA_RING_STATE_WR_BASE
+ (i
* 4));
1052 static void xgene_dma_clr_ring_state(struct xgene_dma_ring
*ring
)
1054 memset(ring
->state
, 0, sizeof(u32
) * XGENE_DMA_RING_NUM_CONFIG
);
1055 xgene_dma_wr_ring_state(ring
);
1058 static void xgene_dma_setup_ring(struct xgene_dma_ring
*ring
)
1060 void *ring_cfg
= ring
->state
;
1061 u64 addr
= ring
->desc_paddr
;
1064 ring
->slots
= ring
->size
/ XGENE_DMA_RING_WQ_DESC_SIZE
;
1066 /* Clear DMA ring state */
1067 xgene_dma_clr_ring_state(ring
);
1069 /* Set DMA ring type */
1070 XGENE_DMA_RING_TYPE_SET(ring_cfg
, XGENE_DMA_RING_TYPE_REGULAR
);
1072 if (ring
->owner
== XGENE_DMA_RING_OWNER_DMA
) {
1073 /* Set recombination buffer and timeout */
1074 XGENE_DMA_RING_RECOMBBUF_SET(ring_cfg
);
1075 XGENE_DMA_RING_RECOMTIMEOUTL_SET(ring_cfg
);
1076 XGENE_DMA_RING_RECOMTIMEOUTH_SET(ring_cfg
);
1079 /* Initialize DMA ring state */
1080 XGENE_DMA_RING_SELTHRSH_SET(ring_cfg
);
1081 XGENE_DMA_RING_ACCEPTLERR_SET(ring_cfg
);
1082 XGENE_DMA_RING_COHERENT_SET(ring_cfg
);
1083 XGENE_DMA_RING_ADDRL_SET(ring_cfg
, addr
);
1084 XGENE_DMA_RING_ADDRH_SET(ring_cfg
, addr
);
1085 XGENE_DMA_RING_SIZE_SET(ring_cfg
, ring
->cfgsize
);
1087 /* Write DMA ring configurations */
1088 xgene_dma_wr_ring_state(ring
);
1090 /* Set DMA ring id */
1091 iowrite32(XGENE_DMA_RING_ID_SETUP(ring
->id
),
1092 ring
->pdma
->csr_ring
+ XGENE_DMA_RING_ID
);
1094 /* Set DMA ring buffer */
1095 iowrite32(XGENE_DMA_RING_ID_BUF_SETUP(ring
->num
),
1096 ring
->pdma
->csr_ring
+ XGENE_DMA_RING_ID_BUF
);
1098 if (ring
->owner
!= XGENE_DMA_RING_OWNER_CPU
)
1101 /* Set empty signature to DMA Rx ring descriptors */
1102 for (i
= 0; i
< ring
->slots
; i
++) {
1103 struct xgene_dma_desc_hw
*desc
;
1105 desc
= &ring
->desc_hw
[i
];
1106 desc
->m0
= cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE
);
1109 /* Enable DMA Rx ring interrupt */
1110 val
= ioread32(ring
->pdma
->csr_ring
+ XGENE_DMA_RING_NE_INT_MODE
);
1111 XGENE_DMA_RING_NE_INT_MODE_SET(val
, ring
->buf_num
);
1112 iowrite32(val
, ring
->pdma
->csr_ring
+ XGENE_DMA_RING_NE_INT_MODE
);
1115 static void xgene_dma_clear_ring(struct xgene_dma_ring
*ring
)
1119 if (ring
->owner
== XGENE_DMA_RING_OWNER_CPU
) {
1120 /* Disable DMA Rx ring interrupt */
1121 val
= ioread32(ring
->pdma
->csr_ring
+
1122 XGENE_DMA_RING_NE_INT_MODE
);
1123 XGENE_DMA_RING_NE_INT_MODE_RESET(val
, ring
->buf_num
);
1124 iowrite32(val
, ring
->pdma
->csr_ring
+
1125 XGENE_DMA_RING_NE_INT_MODE
);
1128 /* Clear DMA ring state */
1129 ring_id
= XGENE_DMA_RING_ID_SETUP(ring
->id
);
1130 iowrite32(ring_id
, ring
->pdma
->csr_ring
+ XGENE_DMA_RING_ID
);
1132 iowrite32(0, ring
->pdma
->csr_ring
+ XGENE_DMA_RING_ID_BUF
);
1133 xgene_dma_clr_ring_state(ring
);
1136 static void xgene_dma_set_ring_cmd(struct xgene_dma_ring
*ring
)
1138 ring
->cmd_base
= ring
->pdma
->csr_ring_cmd
+
1139 XGENE_DMA_RING_CMD_BASE_OFFSET((ring
->num
-
1140 XGENE_DMA_RING_NUM
));
1142 ring
->cmd
= ring
->cmd_base
+ XGENE_DMA_RING_CMD_OFFSET
;
1145 static int xgene_dma_get_ring_size(struct xgene_dma_chan
*chan
,
1146 enum xgene_dma_ring_cfgsize cfgsize
)
1151 case XGENE_DMA_RING_CFG_SIZE_512B
:
1154 case XGENE_DMA_RING_CFG_SIZE_2KB
:
1157 case XGENE_DMA_RING_CFG_SIZE_16KB
:
1160 case XGENE_DMA_RING_CFG_SIZE_64KB
:
1163 case XGENE_DMA_RING_CFG_SIZE_512KB
:
1167 chan_err(chan
, "Unsupported cfg ring size %d\n", cfgsize
);
1174 static void xgene_dma_delete_ring_one(struct xgene_dma_ring
*ring
)
1176 /* Clear DMA ring configurations */
1177 xgene_dma_clear_ring(ring
);
1179 /* De-allocate DMA ring descriptor */
1180 if (ring
->desc_vaddr
) {
1181 dma_free_coherent(ring
->pdma
->dev
, ring
->size
,
1182 ring
->desc_vaddr
, ring
->desc_paddr
);
1183 ring
->desc_vaddr
= NULL
;
1187 static void xgene_dma_delete_chan_rings(struct xgene_dma_chan
*chan
)
1189 xgene_dma_delete_ring_one(&chan
->rx_ring
);
1190 xgene_dma_delete_ring_one(&chan
->tx_ring
);
1193 static int xgene_dma_create_ring_one(struct xgene_dma_chan
*chan
,
1194 struct xgene_dma_ring
*ring
,
1195 enum xgene_dma_ring_cfgsize cfgsize
)
1199 /* Setup DMA ring descriptor variables */
1200 ring
->pdma
= chan
->pdma
;
1201 ring
->cfgsize
= cfgsize
;
1202 ring
->num
= chan
->pdma
->ring_num
++;
1203 ring
->id
= XGENE_DMA_RING_ID_GET(ring
->owner
, ring
->buf_num
);
1205 ret
= xgene_dma_get_ring_size(chan
, cfgsize
);
1210 /* Allocate memory for DMA ring descriptor */
1211 ring
->desc_vaddr
= dma_zalloc_coherent(chan
->dev
, ring
->size
,
1212 &ring
->desc_paddr
, GFP_KERNEL
);
1213 if (!ring
->desc_vaddr
) {
1214 chan_err(chan
, "Failed to allocate ring desc\n");
1218 /* Configure and enable DMA ring */
1219 xgene_dma_set_ring_cmd(ring
);
1220 xgene_dma_setup_ring(ring
);
1225 static int xgene_dma_create_chan_rings(struct xgene_dma_chan
*chan
)
1227 struct xgene_dma_ring
*rx_ring
= &chan
->rx_ring
;
1228 struct xgene_dma_ring
*tx_ring
= &chan
->tx_ring
;
1231 /* Create DMA Rx ring descriptor */
1232 rx_ring
->owner
= XGENE_DMA_RING_OWNER_CPU
;
1233 rx_ring
->buf_num
= XGENE_DMA_CPU_BUFNUM
+ chan
->id
;
1235 ret
= xgene_dma_create_ring_one(chan
, rx_ring
,
1236 XGENE_DMA_RING_CFG_SIZE_64KB
);
1240 chan_dbg(chan
, "Rx ring id 0x%X num %d desc 0x%p\n",
1241 rx_ring
->id
, rx_ring
->num
, rx_ring
->desc_vaddr
);
1243 /* Create DMA Tx ring descriptor */
1244 tx_ring
->owner
= XGENE_DMA_RING_OWNER_DMA
;
1245 tx_ring
->buf_num
= XGENE_DMA_BUFNUM
+ chan
->id
;
1247 ret
= xgene_dma_create_ring_one(chan
, tx_ring
,
1248 XGENE_DMA_RING_CFG_SIZE_64KB
);
1250 xgene_dma_delete_ring_one(rx_ring
);
1254 tx_ring
->dst_ring_num
= XGENE_DMA_RING_DST_ID(rx_ring
->num
);
1257 "Tx ring id 0x%X num %d desc 0x%p\n",
1258 tx_ring
->id
, tx_ring
->num
, tx_ring
->desc_vaddr
);
1260 /* Set the max outstanding request possible to this channel */
1261 chan
->max_outstanding
= tx_ring
->slots
;
1266 static int xgene_dma_init_rings(struct xgene_dma
*pdma
)
1270 for (i
= 0; i
< XGENE_DMA_MAX_CHANNEL
; i
++) {
1271 ret
= xgene_dma_create_chan_rings(&pdma
->chan
[i
]);
1273 for (j
= 0; j
< i
; j
++)
1274 xgene_dma_delete_chan_rings(&pdma
->chan
[j
]);
1282 static void xgene_dma_enable(struct xgene_dma
*pdma
)
1286 /* Configure and enable DMA engine */
1287 val
= ioread32(pdma
->csr_dma
+ XGENE_DMA_GCR
);
1288 XGENE_DMA_CH_SETUP(val
);
1289 XGENE_DMA_ENABLE(val
);
1290 iowrite32(val
, pdma
->csr_dma
+ XGENE_DMA_GCR
);
1293 static void xgene_dma_disable(struct xgene_dma
*pdma
)
1297 val
= ioread32(pdma
->csr_dma
+ XGENE_DMA_GCR
);
1298 XGENE_DMA_DISABLE(val
);
1299 iowrite32(val
, pdma
->csr_dma
+ XGENE_DMA_GCR
);
1302 static void xgene_dma_mask_interrupts(struct xgene_dma
*pdma
)
1305 * Mask DMA ring overflow, underflow and
1306 * AXI write/read error interrupts
1308 iowrite32(XGENE_DMA_INT_ALL_MASK
,
1309 pdma
->csr_dma
+ XGENE_DMA_RING_INT0_MASK
);
1310 iowrite32(XGENE_DMA_INT_ALL_MASK
,
1311 pdma
->csr_dma
+ XGENE_DMA_RING_INT1_MASK
);
1312 iowrite32(XGENE_DMA_INT_ALL_MASK
,
1313 pdma
->csr_dma
+ XGENE_DMA_RING_INT2_MASK
);
1314 iowrite32(XGENE_DMA_INT_ALL_MASK
,
1315 pdma
->csr_dma
+ XGENE_DMA_RING_INT3_MASK
);
1316 iowrite32(XGENE_DMA_INT_ALL_MASK
,
1317 pdma
->csr_dma
+ XGENE_DMA_RING_INT4_MASK
);
1319 /* Mask DMA error interrupts */
1320 iowrite32(XGENE_DMA_INT_ALL_MASK
, pdma
->csr_dma
+ XGENE_DMA_INT_MASK
);
1323 static void xgene_dma_unmask_interrupts(struct xgene_dma
*pdma
)
1326 * Unmask DMA ring overflow, underflow and
1327 * AXI write/read error interrupts
1329 iowrite32(XGENE_DMA_INT_ALL_UNMASK
,
1330 pdma
->csr_dma
+ XGENE_DMA_RING_INT0_MASK
);
1331 iowrite32(XGENE_DMA_INT_ALL_UNMASK
,
1332 pdma
->csr_dma
+ XGENE_DMA_RING_INT1_MASK
);
1333 iowrite32(XGENE_DMA_INT_ALL_UNMASK
,
1334 pdma
->csr_dma
+ XGENE_DMA_RING_INT2_MASK
);
1335 iowrite32(XGENE_DMA_INT_ALL_UNMASK
,
1336 pdma
->csr_dma
+ XGENE_DMA_RING_INT3_MASK
);
1337 iowrite32(XGENE_DMA_INT_ALL_UNMASK
,
1338 pdma
->csr_dma
+ XGENE_DMA_RING_INT4_MASK
);
1340 /* Unmask DMA error interrupts */
1341 iowrite32(XGENE_DMA_INT_ALL_UNMASK
,
1342 pdma
->csr_dma
+ XGENE_DMA_INT_MASK
);
1345 static void xgene_dma_init_hw(struct xgene_dma
*pdma
)
1349 /* Associate DMA ring to corresponding ring HW */
1350 iowrite32(XGENE_DMA_ASSOC_RING_MNGR1
,
1351 pdma
->csr_dma
+ XGENE_DMA_CFG_RING_WQ_ASSOC
);
1353 /* Configure RAID6 polynomial control setting */
1354 if (is_pq_enabled(pdma
))
1355 iowrite32(XGENE_DMA_RAID6_MULTI_CTRL(0x1D),
1356 pdma
->csr_dma
+ XGENE_DMA_RAID6_CONT
);
1358 dev_info(pdma
->dev
, "PQ is disabled in HW\n");
1360 xgene_dma_enable(pdma
);
1361 xgene_dma_unmask_interrupts(pdma
);
1363 /* Get DMA id and version info */
1364 val
= ioread32(pdma
->csr_dma
+ XGENE_DMA_IPBRR
);
1366 /* DMA device info */
1368 "X-Gene DMA v%d.%02d.%02d driver registered %d channels",
1369 XGENE_DMA_REV_NO_RD(val
), XGENE_DMA_BUS_ID_RD(val
),
1370 XGENE_DMA_DEV_ID_RD(val
), XGENE_DMA_MAX_CHANNEL
);
1373 static int xgene_dma_init_ring_mngr(struct xgene_dma
*pdma
)
1375 if (ioread32(pdma
->csr_ring
+ XGENE_DMA_RING_CLKEN
) &&
1376 (!ioread32(pdma
->csr_ring
+ XGENE_DMA_RING_SRST
)))
1379 iowrite32(0x3, pdma
->csr_ring
+ XGENE_DMA_RING_CLKEN
);
1380 iowrite32(0x0, pdma
->csr_ring
+ XGENE_DMA_RING_SRST
);
1382 /* Bring up memory */
1383 iowrite32(0x0, pdma
->csr_ring
+ XGENE_DMA_RING_MEM_RAM_SHUTDOWN
);
1385 /* Force a barrier */
1386 ioread32(pdma
->csr_ring
+ XGENE_DMA_RING_MEM_RAM_SHUTDOWN
);
1388 /* reset may take up to 1ms */
1389 usleep_range(1000, 1100);
1391 if (ioread32(pdma
->csr_ring
+ XGENE_DMA_RING_BLK_MEM_RDY
)
1392 != XGENE_DMA_RING_BLK_MEM_RDY_VAL
) {
1394 "Failed to release ring mngr memory from shutdown\n");
1398 /* program threshold set 1 and all hysteresis */
1399 iowrite32(XGENE_DMA_RING_THRESLD0_SET1_VAL
,
1400 pdma
->csr_ring
+ XGENE_DMA_RING_THRESLD0_SET1
);
1401 iowrite32(XGENE_DMA_RING_THRESLD1_SET1_VAL
,
1402 pdma
->csr_ring
+ XGENE_DMA_RING_THRESLD1_SET1
);
1403 iowrite32(XGENE_DMA_RING_HYSTERESIS_VAL
,
1404 pdma
->csr_ring
+ XGENE_DMA_RING_HYSTERESIS
);
1406 /* Enable QPcore and assign error queue */
1407 iowrite32(XGENE_DMA_RING_ENABLE
,
1408 pdma
->csr_ring
+ XGENE_DMA_RING_CONFIG
);
1413 static int xgene_dma_init_mem(struct xgene_dma
*pdma
)
1417 ret
= xgene_dma_init_ring_mngr(pdma
);
1421 /* Bring up memory */
1422 iowrite32(0x0, pdma
->csr_dma
+ XGENE_DMA_MEM_RAM_SHUTDOWN
);
1424 /* Force a barrier */
1425 ioread32(pdma
->csr_dma
+ XGENE_DMA_MEM_RAM_SHUTDOWN
);
1427 /* reset may take up to 1ms */
1428 usleep_range(1000, 1100);
1430 if (ioread32(pdma
->csr_dma
+ XGENE_DMA_BLK_MEM_RDY
)
1431 != XGENE_DMA_BLK_MEM_RDY_VAL
) {
1433 "Failed to release DMA memory from shutdown\n");
1440 static int xgene_dma_request_irqs(struct xgene_dma
*pdma
)
1442 struct xgene_dma_chan
*chan
;
1445 /* Register DMA error irq */
1446 ret
= devm_request_irq(pdma
->dev
, pdma
->err_irq
, xgene_dma_err_isr
,
1447 0, "dma_error", pdma
);
1450 "Failed to register error IRQ %d\n", pdma
->err_irq
);
1454 /* Register DMA channel rx irq */
1455 for (i
= 0; i
< XGENE_DMA_MAX_CHANNEL
; i
++) {
1456 chan
= &pdma
->chan
[i
];
1457 irq_set_status_flags(chan
->rx_irq
, IRQ_DISABLE_UNLAZY
);
1458 ret
= devm_request_irq(chan
->dev
, chan
->rx_irq
,
1459 xgene_dma_chan_ring_isr
,
1460 0, chan
->name
, chan
);
1462 chan_err(chan
, "Failed to register Rx IRQ %d\n",
1464 devm_free_irq(pdma
->dev
, pdma
->err_irq
, pdma
);
1466 for (j
= 0; j
< i
; j
++) {
1467 chan
= &pdma
->chan
[i
];
1468 irq_clear_status_flags(chan
->rx_irq
, IRQ_DISABLE_UNLAZY
);
1469 devm_free_irq(chan
->dev
, chan
->rx_irq
, chan
);
1479 static void xgene_dma_free_irqs(struct xgene_dma
*pdma
)
1481 struct xgene_dma_chan
*chan
;
1484 /* Free DMA device error irq */
1485 devm_free_irq(pdma
->dev
, pdma
->err_irq
, pdma
);
1487 for (i
= 0; i
< XGENE_DMA_MAX_CHANNEL
; i
++) {
1488 chan
= &pdma
->chan
[i
];
1489 irq_clear_status_flags(chan
->rx_irq
, IRQ_DISABLE_UNLAZY
);
1490 devm_free_irq(chan
->dev
, chan
->rx_irq
, chan
);
1494 static void xgene_dma_set_caps(struct xgene_dma_chan
*chan
,
1495 struct dma_device
*dma_dev
)
1497 /* Initialize DMA device capability mask */
1498 dma_cap_zero(dma_dev
->cap_mask
);
1500 /* Set DMA device capability */
1502 /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
1503 * and channel 1 supports XOR, PQ both. First thing here is we have
1504 * mechanism in hw to enable/disable PQ/XOR supports on channel 1,
1505 * we can make sure this by reading SoC Efuse register.
1506 * Second thing, we have hw errata that if we run channel 0 and
1507 * channel 1 simultaneously with executing XOR and PQ request,
1508 * suddenly DMA engine hangs, So here we enable XOR on channel 0 only
1509 * if XOR and PQ supports on channel 1 is disabled.
1511 if ((chan
->id
== XGENE_DMA_PQ_CHANNEL
) &&
1512 is_pq_enabled(chan
->pdma
)) {
1513 dma_cap_set(DMA_PQ
, dma_dev
->cap_mask
);
1514 dma_cap_set(DMA_XOR
, dma_dev
->cap_mask
);
1515 } else if ((chan
->id
== XGENE_DMA_XOR_CHANNEL
) &&
1516 !is_pq_enabled(chan
->pdma
)) {
1517 dma_cap_set(DMA_XOR
, dma_dev
->cap_mask
);
1520 /* Set base and prep routines */
1521 dma_dev
->dev
= chan
->dev
;
1522 dma_dev
->device_alloc_chan_resources
= xgene_dma_alloc_chan_resources
;
1523 dma_dev
->device_free_chan_resources
= xgene_dma_free_chan_resources
;
1524 dma_dev
->device_issue_pending
= xgene_dma_issue_pending
;
1525 dma_dev
->device_tx_status
= xgene_dma_tx_status
;
1527 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
)) {
1528 dma_dev
->device_prep_dma_xor
= xgene_dma_prep_xor
;
1529 dma_dev
->max_xor
= XGENE_DMA_MAX_XOR_SRC
;
1530 dma_dev
->xor_align
= DMAENGINE_ALIGN_64_BYTES
;
1533 if (dma_has_cap(DMA_PQ
, dma_dev
->cap_mask
)) {
1534 dma_dev
->device_prep_dma_pq
= xgene_dma_prep_pq
;
1535 dma_dev
->max_pq
= XGENE_DMA_MAX_XOR_SRC
;
1536 dma_dev
->pq_align
= DMAENGINE_ALIGN_64_BYTES
;
1540 static int xgene_dma_async_register(struct xgene_dma
*pdma
, int id
)
1542 struct xgene_dma_chan
*chan
= &pdma
->chan
[id
];
1543 struct dma_device
*dma_dev
= &pdma
->dma_dev
[id
];
1546 chan
->dma_chan
.device
= dma_dev
;
1548 spin_lock_init(&chan
->lock
);
1549 INIT_LIST_HEAD(&chan
->ld_pending
);
1550 INIT_LIST_HEAD(&chan
->ld_running
);
1551 INIT_LIST_HEAD(&chan
->ld_completed
);
1552 tasklet_init(&chan
->tasklet
, xgene_dma_tasklet_cb
,
1553 (unsigned long)chan
);
1556 chan
->desc_pool
= NULL
;
1557 dma_cookie_init(&chan
->dma_chan
);
1559 /* Setup dma device capabilities and prep routines */
1560 xgene_dma_set_caps(chan
, dma_dev
);
1562 /* Initialize DMA device list head */
1563 INIT_LIST_HEAD(&dma_dev
->channels
);
1564 list_add_tail(&chan
->dma_chan
.device_node
, &dma_dev
->channels
);
1566 /* Register with Linux async DMA framework*/
1567 ret
= dma_async_device_register(dma_dev
);
1569 chan_err(chan
, "Failed to register async device %d", ret
);
1570 tasklet_kill(&chan
->tasklet
);
1575 /* DMA capability info */
1577 "%s: CAPABILITY ( %s%s)\n", dma_chan_name(&chan
->dma_chan
),
1578 dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
) ? "XOR " : "",
1579 dma_has_cap(DMA_PQ
, dma_dev
->cap_mask
) ? "PQ " : "");
1584 static int xgene_dma_init_async(struct xgene_dma
*pdma
)
1588 for (i
= 0; i
< XGENE_DMA_MAX_CHANNEL
; i
++) {
1589 ret
= xgene_dma_async_register(pdma
, i
);
1591 for (j
= 0; j
< i
; j
++) {
1592 dma_async_device_unregister(&pdma
->dma_dev
[j
]);
1593 tasklet_kill(&pdma
->chan
[j
].tasklet
);
1603 static void xgene_dma_async_unregister(struct xgene_dma
*pdma
)
1607 for (i
= 0; i
< XGENE_DMA_MAX_CHANNEL
; i
++)
1608 dma_async_device_unregister(&pdma
->dma_dev
[i
]);
1611 static void xgene_dma_init_channels(struct xgene_dma
*pdma
)
1613 struct xgene_dma_chan
*chan
;
1616 pdma
->ring_num
= XGENE_DMA_RING_NUM
;
1618 for (i
= 0; i
< XGENE_DMA_MAX_CHANNEL
; i
++) {
1619 chan
= &pdma
->chan
[i
];
1620 chan
->dev
= pdma
->dev
;
1623 snprintf(chan
->name
, sizeof(chan
->name
), "dmachan%d", chan
->id
);
1627 static int xgene_dma_get_resources(struct platform_device
*pdev
,
1628 struct xgene_dma
*pdma
)
1630 struct resource
*res
;
1633 /* Get DMA csr region */
1634 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1636 dev_err(&pdev
->dev
, "Failed to get csr region\n");
1640 pdma
->csr_dma
= devm_ioremap(&pdev
->dev
, res
->start
,
1641 resource_size(res
));
1642 if (!pdma
->csr_dma
) {
1643 dev_err(&pdev
->dev
, "Failed to ioremap csr region");
1647 /* Get DMA ring csr region */
1648 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1650 dev_err(&pdev
->dev
, "Failed to get ring csr region\n");
1654 pdma
->csr_ring
= devm_ioremap(&pdev
->dev
, res
->start
,
1655 resource_size(res
));
1656 if (!pdma
->csr_ring
) {
1657 dev_err(&pdev
->dev
, "Failed to ioremap ring csr region");
1661 /* Get DMA ring cmd csr region */
1662 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 2);
1664 dev_err(&pdev
->dev
, "Failed to get ring cmd csr region\n");
1668 pdma
->csr_ring_cmd
= devm_ioremap(&pdev
->dev
, res
->start
,
1669 resource_size(res
));
1670 if (!pdma
->csr_ring_cmd
) {
1671 dev_err(&pdev
->dev
, "Failed to ioremap ring cmd csr region");
1675 pdma
->csr_ring_cmd
+= XGENE_DMA_RING_CMD_SM_OFFSET
;
1677 /* Get efuse csr region */
1678 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 3);
1680 dev_err(&pdev
->dev
, "Failed to get efuse csr region\n");
1684 pdma
->csr_efuse
= devm_ioremap(&pdev
->dev
, res
->start
,
1685 resource_size(res
));
1686 if (!pdma
->csr_efuse
) {
1687 dev_err(&pdev
->dev
, "Failed to ioremap efuse csr region");
1691 /* Get DMA error interrupt */
1692 irq
= platform_get_irq(pdev
, 0);
1694 dev_err(&pdev
->dev
, "Failed to get Error IRQ\n");
1698 pdma
->err_irq
= irq
;
1700 /* Get DMA Rx ring descriptor interrupts for all DMA channels */
1701 for (i
= 1; i
<= XGENE_DMA_MAX_CHANNEL
; i
++) {
1702 irq
= platform_get_irq(pdev
, i
);
1704 dev_err(&pdev
->dev
, "Failed to get Rx IRQ\n");
1708 pdma
->chan
[i
- 1].rx_irq
= irq
;
1714 static int xgene_dma_probe(struct platform_device
*pdev
)
1716 struct xgene_dma
*pdma
;
1719 pdma
= devm_kzalloc(&pdev
->dev
, sizeof(*pdma
), GFP_KERNEL
);
1723 pdma
->dev
= &pdev
->dev
;
1724 platform_set_drvdata(pdev
, pdma
);
1726 ret
= xgene_dma_get_resources(pdev
, pdma
);
1730 pdma
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1731 if (IS_ERR(pdma
->clk
) && !ACPI_COMPANION(&pdev
->dev
)) {
1732 dev_err(&pdev
->dev
, "Failed to get clk\n");
1733 return PTR_ERR(pdma
->clk
);
1736 /* Enable clk before accessing registers */
1737 if (!IS_ERR(pdma
->clk
)) {
1738 ret
= clk_prepare_enable(pdma
->clk
);
1740 dev_err(&pdev
->dev
, "Failed to enable clk %d\n", ret
);
1745 /* Remove DMA RAM out of shutdown */
1746 ret
= xgene_dma_init_mem(pdma
);
1748 goto err_clk_enable
;
1750 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(42));
1752 dev_err(&pdev
->dev
, "No usable DMA configuration\n");
1756 /* Initialize DMA channels software state */
1757 xgene_dma_init_channels(pdma
);
1759 /* Configue DMA rings */
1760 ret
= xgene_dma_init_rings(pdma
);
1762 goto err_clk_enable
;
1764 ret
= xgene_dma_request_irqs(pdma
);
1766 goto err_request_irq
;
1768 /* Configure and enable DMA engine */
1769 xgene_dma_init_hw(pdma
);
1771 /* Register DMA device with linux async framework */
1772 ret
= xgene_dma_init_async(pdma
);
1774 goto err_async_init
;
1779 xgene_dma_free_irqs(pdma
);
1782 for (i
= 0; i
< XGENE_DMA_MAX_CHANNEL
; i
++)
1783 xgene_dma_delete_chan_rings(&pdma
->chan
[i
]);
1787 if (!IS_ERR(pdma
->clk
))
1788 clk_disable_unprepare(pdma
->clk
);
1793 static int xgene_dma_remove(struct platform_device
*pdev
)
1795 struct xgene_dma
*pdma
= platform_get_drvdata(pdev
);
1796 struct xgene_dma_chan
*chan
;
1799 xgene_dma_async_unregister(pdma
);
1801 /* Mask interrupts and disable DMA engine */
1802 xgene_dma_mask_interrupts(pdma
);
1803 xgene_dma_disable(pdma
);
1804 xgene_dma_free_irqs(pdma
);
1806 for (i
= 0; i
< XGENE_DMA_MAX_CHANNEL
; i
++) {
1807 chan
= &pdma
->chan
[i
];
1808 tasklet_kill(&chan
->tasklet
);
1809 xgene_dma_delete_chan_rings(chan
);
1812 if (!IS_ERR(pdma
->clk
))
1813 clk_disable_unprepare(pdma
->clk
);
1819 static const struct acpi_device_id xgene_dma_acpi_match_ptr
[] = {
1823 MODULE_DEVICE_TABLE(acpi
, xgene_dma_acpi_match_ptr
);
1826 static const struct of_device_id xgene_dma_of_match_ptr
[] = {
1827 {.compatible
= "apm,xgene-storm-dma",},
1830 MODULE_DEVICE_TABLE(of
, xgene_dma_of_match_ptr
);
1832 static struct platform_driver xgene_dma_driver
= {
1833 .probe
= xgene_dma_probe
,
1834 .remove
= xgene_dma_remove
,
1836 .name
= "X-Gene-DMA",
1837 .of_match_table
= xgene_dma_of_match_ptr
,
1838 .acpi_match_table
= ACPI_PTR(xgene_dma_acpi_match_ptr
),
1842 module_platform_driver(xgene_dma_driver
);
1844 MODULE_DESCRIPTION("APM X-Gene SoC DMA driver");
1845 MODULE_AUTHOR("Rameshwar Prasad Sahu <rsahu@apm.com>");
1846 MODULE_AUTHOR("Loc Ho <lho@apm.com>");
1847 MODULE_LICENSE("GPL");
1848 MODULE_VERSION("1.0");