2 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 * QCOM BAM DMA engine driver
17 * QCOM BAM DMA blocks are distributed amongst a number of the on-chip
18 * peripherals on the MSM 8x74. The configuration of the channels are dependent
19 * on the way they are hard wired to that specific peripheral. The peripheral
20 * device tree entries specify the configuration of each channel.
22 * The DMA controller requires the use of external memory for storage of the
23 * hardware descriptors for each channel. The descriptor FIFO is accessed as a
24 * circular buffer and operations are managed according to the offset within the
25 * FIFO. After pipe/channel reset, all of the pipe registers and internal state
26 * are back to defaults.
28 * During DMA operations, we write descriptors to the FIFO, being careful to
29 * handle wrapping and then write the last FIFO offset to that channel's
30 * P_EVNT_REG register to kick off the transaction. The P_SW_OFSTS register
31 * indicates the current FIFO offset that is being processed, so there is some
32 * indication of where the hardware is currently working.
35 #include <linux/kernel.h>
37 #include <linux/init.h>
38 #include <linux/slab.h>
39 #include <linux/module.h>
40 #include <linux/interrupt.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/scatterlist.h>
43 #include <linux/device.h>
44 #include <linux/platform_device.h>
46 #include <linux/of_address.h>
47 #include <linux/of_irq.h>
48 #include <linux/of_dma.h>
49 #include <linux/clk.h>
50 #include <linux/dmaengine.h>
52 #include "dmaengine.h"
56 u32 addr
; /* Buffer physical address */
57 u16 size
; /* Buffer size in bytes */
61 #define DESC_FLAG_INT BIT(15)
62 #define DESC_FLAG_EOT BIT(14)
63 #define DESC_FLAG_EOB BIT(13)
64 #define DESC_FLAG_NWD BIT(12)
66 struct bam_async_desc
{
67 struct virt_dma_desc vd
;
72 /* transaction flags, EOT|EOB|NWD */
75 struct bam_desc_hw
*curr_desc
;
77 enum dma_transfer_direction dir
;
79 struct bam_desc_hw desc
[0];
89 BAM_IRQ_SRCS_UNMASKED
,
102 BAM_P_EVNT_DEST_ADDR
,
105 BAM_P_DATA_FIFO_ADDR
,
106 BAM_P_DESC_FIFO_ADDR
,
107 BAM_P_EVNT_GEN_TRSHLD
,
111 struct reg_offset_data
{
113 unsigned int pipe_mult
, evnt_mult
, ee_mult
;
116 static const struct reg_offset_data bam_v1_3_reg_info
[] = {
117 [BAM_CTRL
] = { 0x0F80, 0x00, 0x00, 0x00 },
118 [BAM_REVISION
] = { 0x0F84, 0x00, 0x00, 0x00 },
119 [BAM_NUM_PIPES
] = { 0x0FBC, 0x00, 0x00, 0x00 },
120 [BAM_DESC_CNT_TRSHLD
] = { 0x0F88, 0x00, 0x00, 0x00 },
121 [BAM_IRQ_SRCS
] = { 0x0F8C, 0x00, 0x00, 0x00 },
122 [BAM_IRQ_SRCS_MSK
] = { 0x0F90, 0x00, 0x00, 0x00 },
123 [BAM_IRQ_SRCS_UNMASKED
] = { 0x0FB0, 0x00, 0x00, 0x00 },
124 [BAM_IRQ_STTS
] = { 0x0F94, 0x00, 0x00, 0x00 },
125 [BAM_IRQ_CLR
] = { 0x0F98, 0x00, 0x00, 0x00 },
126 [BAM_IRQ_EN
] = { 0x0F9C, 0x00, 0x00, 0x00 },
127 [BAM_CNFG_BITS
] = { 0x0FFC, 0x00, 0x00, 0x00 },
128 [BAM_IRQ_SRCS_EE
] = { 0x1800, 0x00, 0x00, 0x80 },
129 [BAM_IRQ_SRCS_MSK_EE
] = { 0x1804, 0x00, 0x00, 0x80 },
130 [BAM_P_CTRL
] = { 0x0000, 0x80, 0x00, 0x00 },
131 [BAM_P_RST
] = { 0x0004, 0x80, 0x00, 0x00 },
132 [BAM_P_HALT
] = { 0x0008, 0x80, 0x00, 0x00 },
133 [BAM_P_IRQ_STTS
] = { 0x0010, 0x80, 0x00, 0x00 },
134 [BAM_P_IRQ_CLR
] = { 0x0014, 0x80, 0x00, 0x00 },
135 [BAM_P_IRQ_EN
] = { 0x0018, 0x80, 0x00, 0x00 },
136 [BAM_P_EVNT_DEST_ADDR
] = { 0x102C, 0x00, 0x40, 0x00 },
137 [BAM_P_EVNT_REG
] = { 0x1018, 0x00, 0x40, 0x00 },
138 [BAM_P_SW_OFSTS
] = { 0x1000, 0x00, 0x40, 0x00 },
139 [BAM_P_DATA_FIFO_ADDR
] = { 0x1024, 0x00, 0x40, 0x00 },
140 [BAM_P_DESC_FIFO_ADDR
] = { 0x101C, 0x00, 0x40, 0x00 },
141 [BAM_P_EVNT_GEN_TRSHLD
] = { 0x1028, 0x00, 0x40, 0x00 },
142 [BAM_P_FIFO_SIZES
] = { 0x1020, 0x00, 0x40, 0x00 },
145 static const struct reg_offset_data bam_v1_4_reg_info
[] = {
146 [BAM_CTRL
] = { 0x0000, 0x00, 0x00, 0x00 },
147 [BAM_REVISION
] = { 0x0004, 0x00, 0x00, 0x00 },
148 [BAM_NUM_PIPES
] = { 0x003C, 0x00, 0x00, 0x00 },
149 [BAM_DESC_CNT_TRSHLD
] = { 0x0008, 0x00, 0x00, 0x00 },
150 [BAM_IRQ_SRCS
] = { 0x000C, 0x00, 0x00, 0x00 },
151 [BAM_IRQ_SRCS_MSK
] = { 0x0010, 0x00, 0x00, 0x00 },
152 [BAM_IRQ_SRCS_UNMASKED
] = { 0x0030, 0x00, 0x00, 0x00 },
153 [BAM_IRQ_STTS
] = { 0x0014, 0x00, 0x00, 0x00 },
154 [BAM_IRQ_CLR
] = { 0x0018, 0x00, 0x00, 0x00 },
155 [BAM_IRQ_EN
] = { 0x001C, 0x00, 0x00, 0x00 },
156 [BAM_CNFG_BITS
] = { 0x007C, 0x00, 0x00, 0x00 },
157 [BAM_IRQ_SRCS_EE
] = { 0x0800, 0x00, 0x00, 0x80 },
158 [BAM_IRQ_SRCS_MSK_EE
] = { 0x0804, 0x00, 0x00, 0x80 },
159 [BAM_P_CTRL
] = { 0x1000, 0x1000, 0x00, 0x00 },
160 [BAM_P_RST
] = { 0x1004, 0x1000, 0x00, 0x00 },
161 [BAM_P_HALT
] = { 0x1008, 0x1000, 0x00, 0x00 },
162 [BAM_P_IRQ_STTS
] = { 0x1010, 0x1000, 0x00, 0x00 },
163 [BAM_P_IRQ_CLR
] = { 0x1014, 0x1000, 0x00, 0x00 },
164 [BAM_P_IRQ_EN
] = { 0x1018, 0x1000, 0x00, 0x00 },
165 [BAM_P_EVNT_DEST_ADDR
] = { 0x182C, 0x00, 0x1000, 0x00 },
166 [BAM_P_EVNT_REG
] = { 0x1818, 0x00, 0x1000, 0x00 },
167 [BAM_P_SW_OFSTS
] = { 0x1800, 0x00, 0x1000, 0x00 },
168 [BAM_P_DATA_FIFO_ADDR
] = { 0x1824, 0x00, 0x1000, 0x00 },
169 [BAM_P_DESC_FIFO_ADDR
] = { 0x181C, 0x00, 0x1000, 0x00 },
170 [BAM_P_EVNT_GEN_TRSHLD
] = { 0x1828, 0x00, 0x1000, 0x00 },
171 [BAM_P_FIFO_SIZES
] = { 0x1820, 0x00, 0x1000, 0x00 },
175 #define BAM_SW_RST BIT(0)
176 #define BAM_EN BIT(1)
177 #define BAM_EN_ACCUM BIT(4)
178 #define BAM_TESTBUS_SEL_SHIFT 5
179 #define BAM_TESTBUS_SEL_MASK 0x3F
180 #define BAM_DESC_CACHE_SEL_SHIFT 13
181 #define BAM_DESC_CACHE_SEL_MASK 0x3
182 #define BAM_CACHED_DESC_STORE BIT(15)
183 #define IBC_DISABLE BIT(16)
186 #define REVISION_SHIFT 0
187 #define REVISION_MASK 0xFF
188 #define NUM_EES_SHIFT 8
189 #define NUM_EES_MASK 0xF
190 #define CE_BUFFER_SIZE BIT(13)
191 #define AXI_ACTIVE BIT(14)
192 #define USE_VMIDMT BIT(15)
193 #define SECURED BIT(16)
194 #define BAM_HAS_NO_BYPASS BIT(17)
195 #define HIGH_FREQUENCY_BAM BIT(18)
196 #define INACTIV_TMRS_EXST BIT(19)
197 #define NUM_INACTIV_TMRS BIT(20)
198 #define DESC_CACHE_DEPTH_SHIFT 21
199 #define DESC_CACHE_DEPTH_1 (0 << DESC_CACHE_DEPTH_SHIFT)
200 #define DESC_CACHE_DEPTH_2 (1 << DESC_CACHE_DEPTH_SHIFT)
201 #define DESC_CACHE_DEPTH_3 (2 << DESC_CACHE_DEPTH_SHIFT)
202 #define DESC_CACHE_DEPTH_4 (3 << DESC_CACHE_DEPTH_SHIFT)
203 #define CMD_DESC_EN BIT(23)
204 #define INACTIV_TMR_BASE_SHIFT 24
205 #define INACTIV_TMR_BASE_MASK 0xFF
208 #define BAM_NUM_PIPES_SHIFT 0
209 #define BAM_NUM_PIPES_MASK 0xFF
210 #define PERIPH_NON_PIPE_GRP_SHIFT 16
211 #define PERIPH_NON_PIP_GRP_MASK 0xFF
212 #define BAM_NON_PIPE_GRP_SHIFT 24
213 #define BAM_NON_PIPE_GRP_MASK 0xFF
216 #define BAM_PIPE_CNFG BIT(2)
217 #define BAM_FULL_PIPE BIT(11)
218 #define BAM_NO_EXT_P_RST BIT(12)
219 #define BAM_IBC_DISABLE BIT(13)
220 #define BAM_SB_CLK_REQ BIT(14)
221 #define BAM_PSM_CSW_REQ BIT(15)
222 #define BAM_PSM_P_RES BIT(16)
223 #define BAM_AU_P_RES BIT(17)
224 #define BAM_SI_P_RES BIT(18)
225 #define BAM_WB_P_RES BIT(19)
226 #define BAM_WB_BLK_CSW BIT(20)
227 #define BAM_WB_CSW_ACK_IDL BIT(21)
228 #define BAM_WB_RETR_SVPNT BIT(22)
229 #define BAM_WB_DSC_AVL_P_RST BIT(23)
230 #define BAM_REG_P_EN BIT(24)
231 #define BAM_PSM_P_HD_DATA BIT(25)
232 #define BAM_AU_ACCUMED BIT(26)
233 #define BAM_CMD_ENABLE BIT(27)
235 #define BAM_CNFG_BITS_DEFAULT (BAM_PIPE_CNFG | \
245 BAM_WB_CSW_ACK_IDL | \
246 BAM_WB_RETR_SVPNT | \
247 BAM_WB_DSC_AVL_P_RST | \
249 BAM_PSM_P_HD_DATA | \
255 #define P_DIRECTION BIT(3)
256 #define P_SYS_STRM BIT(4)
257 #define P_SYS_MODE BIT(5)
258 #define P_AUTO_EOB BIT(6)
259 #define P_AUTO_EOB_SEL_SHIFT 7
260 #define P_AUTO_EOB_SEL_512 (0 << P_AUTO_EOB_SEL_SHIFT)
261 #define P_AUTO_EOB_SEL_256 (1 << P_AUTO_EOB_SEL_SHIFT)
262 #define P_AUTO_EOB_SEL_128 (2 << P_AUTO_EOB_SEL_SHIFT)
263 #define P_AUTO_EOB_SEL_64 (3 << P_AUTO_EOB_SEL_SHIFT)
264 #define P_PREFETCH_LIMIT_SHIFT 9
265 #define P_PREFETCH_LIMIT_32 (0 << P_PREFETCH_LIMIT_SHIFT)
266 #define P_PREFETCH_LIMIT_16 (1 << P_PREFETCH_LIMIT_SHIFT)
267 #define P_PREFETCH_LIMIT_4 (2 << P_PREFETCH_LIMIT_SHIFT)
268 #define P_WRITE_NWD BIT(11)
269 #define P_LOCK_GROUP_SHIFT 16
270 #define P_LOCK_GROUP_MASK 0x1F
272 /* BAM_DESC_CNT_TRSHLD */
273 #define CNT_TRSHLD 0xffff
274 #define DEFAULT_CNT_THRSHLD 0x4
277 #define BAM_IRQ BIT(31)
278 #define P_IRQ 0x7fffffff
280 /* BAM_IRQ_SRCS_MSK */
281 #define BAM_IRQ_MSK BAM_IRQ
282 #define P_IRQ_MSK P_IRQ
285 #define BAM_TIMER_IRQ BIT(4)
286 #define BAM_EMPTY_IRQ BIT(3)
287 #define BAM_ERROR_IRQ BIT(2)
288 #define BAM_HRESP_ERR_IRQ BIT(1)
291 #define BAM_TIMER_CLR BIT(4)
292 #define BAM_EMPTY_CLR BIT(3)
293 #define BAM_ERROR_CLR BIT(2)
294 #define BAM_HRESP_ERR_CLR BIT(1)
297 #define BAM_TIMER_EN BIT(4)
298 #define BAM_EMPTY_EN BIT(3)
299 #define BAM_ERROR_EN BIT(2)
300 #define BAM_HRESP_ERR_EN BIT(1)
303 #define P_PRCSD_DESC_EN BIT(0)
304 #define P_TIMER_EN BIT(1)
305 #define P_WAKE_EN BIT(2)
306 #define P_OUT_OF_DESC_EN BIT(3)
307 #define P_ERR_EN BIT(4)
308 #define P_TRNSFR_END_EN BIT(5)
309 #define P_DEFAULT_IRQS_EN (P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN)
312 #define P_SW_OFSTS_MASK 0xffff
314 #define BAM_DESC_FIFO_SIZE SZ_32K
315 #define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
316 #define BAM_MAX_DATA_SIZE (SZ_32K - 8)
319 struct virt_dma_chan vc
;
321 struct bam_device
*bdev
;
323 /* configuration from device tree */
326 struct bam_async_desc
*curr_txd
; /* current running dma */
328 /* runtime configuration */
329 struct dma_slave_config slave
;
332 struct bam_desc_hw
*fifo_virt
;
333 dma_addr_t fifo_phys
;
336 unsigned short head
; /* start of active descriptor entries */
337 unsigned short tail
; /* end of active descriptor entries */
339 unsigned int initialized
; /* is the channel hw initialized? */
340 unsigned int paused
; /* is the channel paused? */
341 unsigned int reconfigure
; /* new slave config? */
343 struct list_head node
;
346 static inline struct bam_chan
*to_bam_chan(struct dma_chan
*common
)
348 return container_of(common
, struct bam_chan
, vc
.chan
);
354 struct dma_device common
;
355 struct device_dma_parameters dma_parms
;
356 struct bam_chan
*channels
;
359 /* execution environment ID, from DT */
362 const struct reg_offset_data
*layout
;
367 /* dma start transaction tasklet */
368 struct tasklet_struct task
;
372 * bam_addr - returns BAM register address
374 * @pipe: pipe instance (ignored when register doesn't have multiple instances)
375 * @reg: register enum
377 static inline void __iomem
*bam_addr(struct bam_device
*bdev
, u32 pipe
,
380 const struct reg_offset_data r
= bdev
->layout
[reg
];
382 return bdev
->regs
+ r
.base_offset
+
385 r
.ee_mult
* bdev
->ee
;
389 * bam_reset_channel - Reset individual BAM DMA channel
390 * @bchan: bam channel
392 * This function resets a specific BAM channel
394 static void bam_reset_channel(struct bam_chan
*bchan
)
396 struct bam_device
*bdev
= bchan
->bdev
;
398 lockdep_assert_held(&bchan
->vc
.lock
);
401 writel_relaxed(1, bam_addr(bdev
, bchan
->id
, BAM_P_RST
));
402 writel_relaxed(0, bam_addr(bdev
, bchan
->id
, BAM_P_RST
));
404 /* don't allow cpu to reorder BAM register accesses done after this */
407 /* make sure hw is initialized when channel is used the first time */
408 bchan
->initialized
= 0;
412 * bam_chan_init_hw - Initialize channel hardware
413 * @bchan: bam channel
415 * This function resets and initializes the BAM channel
417 static void bam_chan_init_hw(struct bam_chan
*bchan
,
418 enum dma_transfer_direction dir
)
420 struct bam_device
*bdev
= bchan
->bdev
;
423 /* Reset the channel to clear internal state of the FIFO */
424 bam_reset_channel(bchan
);
427 * write out 8 byte aligned address. We have enough space for this
428 * because we allocated 1 more descriptor (8 bytes) than we can use
430 writel_relaxed(ALIGN(bchan
->fifo_phys
, sizeof(struct bam_desc_hw
)),
431 bam_addr(bdev
, bchan
->id
, BAM_P_DESC_FIFO_ADDR
));
432 writel_relaxed(BAM_DESC_FIFO_SIZE
,
433 bam_addr(bdev
, bchan
->id
, BAM_P_FIFO_SIZES
));
435 /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
436 writel_relaxed(P_DEFAULT_IRQS_EN
,
437 bam_addr(bdev
, bchan
->id
, BAM_P_IRQ_EN
));
439 /* unmask the specific pipe and EE combo */
440 val
= readl_relaxed(bam_addr(bdev
, 0, BAM_IRQ_SRCS_MSK_EE
));
441 val
|= BIT(bchan
->id
);
442 writel_relaxed(val
, bam_addr(bdev
, 0, BAM_IRQ_SRCS_MSK_EE
));
444 /* don't allow cpu to reorder the channel enable done below */
447 /* set fixed direction and mode, then enable channel */
448 val
= P_EN
| P_SYS_MODE
;
449 if (dir
== DMA_DEV_TO_MEM
)
452 writel_relaxed(val
, bam_addr(bdev
, bchan
->id
, BAM_P_CTRL
));
454 bchan
->initialized
= 1;
456 /* init FIFO pointers */
462 * bam_alloc_chan - Allocate channel resources for DMA channel.
463 * @chan: specified channel
465 * This function allocates the FIFO descriptor memory
467 static int bam_alloc_chan(struct dma_chan
*chan
)
469 struct bam_chan
*bchan
= to_bam_chan(chan
);
470 struct bam_device
*bdev
= bchan
->bdev
;
472 if (bchan
->fifo_virt
)
475 /* allocate FIFO descriptor space, but only if necessary */
476 bchan
->fifo_virt
= dma_alloc_writecombine(bdev
->dev
, BAM_DESC_FIFO_SIZE
,
477 &bchan
->fifo_phys
, GFP_KERNEL
);
479 if (!bchan
->fifo_virt
) {
480 dev_err(bdev
->dev
, "Failed to allocate desc fifo\n");
488 * bam_free_chan - Frees dma resources associated with specific channel
489 * @chan: specified channel
491 * Free the allocated fifo descriptor memory and channel resources
494 static void bam_free_chan(struct dma_chan
*chan
)
496 struct bam_chan
*bchan
= to_bam_chan(chan
);
497 struct bam_device
*bdev
= bchan
->bdev
;
501 vchan_free_chan_resources(to_virt_chan(chan
));
503 if (bchan
->curr_txd
) {
504 dev_err(bchan
->bdev
->dev
, "Cannot free busy channel\n");
508 spin_lock_irqsave(&bchan
->vc
.lock
, flags
);
509 bam_reset_channel(bchan
);
510 spin_unlock_irqrestore(&bchan
->vc
.lock
, flags
);
512 dma_free_writecombine(bdev
->dev
, BAM_DESC_FIFO_SIZE
, bchan
->fifo_virt
,
514 bchan
->fifo_virt
= NULL
;
516 /* mask irq for pipe/channel */
517 val
= readl_relaxed(bam_addr(bdev
, 0, BAM_IRQ_SRCS_MSK_EE
));
518 val
&= ~BIT(bchan
->id
);
519 writel_relaxed(val
, bam_addr(bdev
, 0, BAM_IRQ_SRCS_MSK_EE
));
522 writel_relaxed(0, bam_addr(bdev
, bchan
->id
, BAM_P_IRQ_EN
));
526 * bam_slave_config - set slave configuration for channel
528 * @cfg: slave configuration
530 * Sets slave configuration for channel
533 static int bam_slave_config(struct dma_chan
*chan
,
534 struct dma_slave_config
*cfg
)
536 struct bam_chan
*bchan
= to_bam_chan(chan
);
539 spin_lock_irqsave(&bchan
->vc
.lock
, flag
);
540 memcpy(&bchan
->slave
, cfg
, sizeof(*cfg
));
541 bchan
->reconfigure
= 1;
542 spin_unlock_irqrestore(&bchan
->vc
.lock
, flag
);
548 * bam_prep_slave_sg - Prep slave sg transaction
551 * @sgl: scatter gather list
552 * @sg_len: length of sg
553 * @direction: DMA transfer direction
555 * @context: transfer context (unused)
557 static struct dma_async_tx_descriptor
*bam_prep_slave_sg(struct dma_chan
*chan
,
558 struct scatterlist
*sgl
, unsigned int sg_len
,
559 enum dma_transfer_direction direction
, unsigned long flags
,
562 struct bam_chan
*bchan
= to_bam_chan(chan
);
563 struct bam_device
*bdev
= bchan
->bdev
;
564 struct bam_async_desc
*async_desc
;
565 struct scatterlist
*sg
;
567 struct bam_desc_hw
*desc
;
568 unsigned int num_alloc
= 0;
571 if (!is_slave_direction(direction
)) {
572 dev_err(bdev
->dev
, "invalid dma direction\n");
576 /* calculate number of required entries */
577 for_each_sg(sgl
, sg
, sg_len
, i
)
578 num_alloc
+= DIV_ROUND_UP(sg_dma_len(sg
), BAM_MAX_DATA_SIZE
);
580 /* allocate enough room to accomodate the number of entries */
581 async_desc
= kzalloc(sizeof(*async_desc
) +
582 (num_alloc
* sizeof(struct bam_desc_hw
)), GFP_NOWAIT
);
587 if (flags
& DMA_PREP_FENCE
)
588 async_desc
->flags
|= DESC_FLAG_NWD
;
590 if (flags
& DMA_PREP_INTERRUPT
)
591 async_desc
->flags
|= DESC_FLAG_EOT
;
593 async_desc
->flags
|= DESC_FLAG_INT
;
595 async_desc
->num_desc
= num_alloc
;
596 async_desc
->curr_desc
= async_desc
->desc
;
597 async_desc
->dir
= direction
;
599 /* fill in temporary descriptors */
600 desc
= async_desc
->desc
;
601 for_each_sg(sgl
, sg
, sg_len
, i
) {
602 unsigned int remainder
= sg_dma_len(sg
);
603 unsigned int curr_offset
= 0;
606 desc
->addr
= sg_dma_address(sg
) + curr_offset
;
608 if (remainder
> BAM_MAX_DATA_SIZE
) {
609 desc
->size
= BAM_MAX_DATA_SIZE
;
610 remainder
-= BAM_MAX_DATA_SIZE
;
611 curr_offset
+= BAM_MAX_DATA_SIZE
;
613 desc
->size
= remainder
;
617 async_desc
->length
+= desc
->size
;
619 } while (remainder
> 0);
622 return vchan_tx_prep(&bchan
->vc
, &async_desc
->vd
, flags
);
630 * bam_dma_terminate_all - terminate all transactions on a channel
631 * @bchan: bam dma channel
633 * Dequeues and frees all transactions
634 * No callbacks are done
637 static int bam_dma_terminate_all(struct dma_chan
*chan
)
639 struct bam_chan
*bchan
= to_bam_chan(chan
);
643 /* remove all transactions, including active transaction */
644 spin_lock_irqsave(&bchan
->vc
.lock
, flag
);
645 if (bchan
->curr_txd
) {
646 list_add(&bchan
->curr_txd
->vd
.node
, &bchan
->vc
.desc_issued
);
647 bchan
->curr_txd
= NULL
;
650 vchan_get_all_descriptors(&bchan
->vc
, &head
);
651 spin_unlock_irqrestore(&bchan
->vc
.lock
, flag
);
653 vchan_dma_desc_free_list(&bchan
->vc
, &head
);
659 * bam_pause - Pause DMA channel
663 static int bam_pause(struct dma_chan
*chan
)
665 struct bam_chan
*bchan
= to_bam_chan(chan
);
666 struct bam_device
*bdev
= bchan
->bdev
;
669 spin_lock_irqsave(&bchan
->vc
.lock
, flag
);
670 writel_relaxed(1, bam_addr(bdev
, bchan
->id
, BAM_P_HALT
));
672 spin_unlock_irqrestore(&bchan
->vc
.lock
, flag
);
678 * bam_resume - Resume DMA channel operations
682 static int bam_resume(struct dma_chan
*chan
)
684 struct bam_chan
*bchan
= to_bam_chan(chan
);
685 struct bam_device
*bdev
= bchan
->bdev
;
688 spin_lock_irqsave(&bchan
->vc
.lock
, flag
);
689 writel_relaxed(0, bam_addr(bdev
, bchan
->id
, BAM_P_HALT
));
691 spin_unlock_irqrestore(&bchan
->vc
.lock
, flag
);
697 * process_channel_irqs - processes the channel interrupts
698 * @bdev: bam controller
700 * This function processes the channel interrupts
703 static u32
process_channel_irqs(struct bam_device
*bdev
)
705 u32 i
, srcs
, pipe_stts
;
707 struct bam_async_desc
*async_desc
;
709 srcs
= readl_relaxed(bam_addr(bdev
, 0, BAM_IRQ_SRCS_EE
));
711 /* return early if no pipe/channel interrupts are present */
715 for (i
= 0; i
< bdev
->num_channels
; i
++) {
716 struct bam_chan
*bchan
= &bdev
->channels
[i
];
718 if (!(srcs
& BIT(i
)))
722 pipe_stts
= readl_relaxed(bam_addr(bdev
, i
, BAM_P_IRQ_STTS
));
724 writel_relaxed(pipe_stts
, bam_addr(bdev
, i
, BAM_P_IRQ_CLR
));
726 spin_lock_irqsave(&bchan
->vc
.lock
, flags
);
727 async_desc
= bchan
->curr_txd
;
730 async_desc
->num_desc
-= async_desc
->xfer_len
;
731 async_desc
->curr_desc
+= async_desc
->xfer_len
;
732 bchan
->curr_txd
= NULL
;
735 bchan
->head
+= async_desc
->xfer_len
;
736 bchan
->head
%= MAX_DESCRIPTORS
;
739 * if complete, process cookie. Otherwise
740 * push back to front of desc_issued so that
741 * it gets restarted by the tasklet
743 if (!async_desc
->num_desc
)
744 vchan_cookie_complete(&async_desc
->vd
);
746 list_add(&async_desc
->vd
.node
,
747 &bchan
->vc
.desc_issued
);
750 spin_unlock_irqrestore(&bchan
->vc
.lock
, flags
);
757 * bam_dma_irq - irq handler for bam controller
758 * @irq: IRQ of interrupt
759 * @data: callback data
761 * IRQ handler for the bam controller
763 static irqreturn_t
bam_dma_irq(int irq
, void *data
)
765 struct bam_device
*bdev
= data
;
766 u32 clr_mask
= 0, srcs
= 0;
768 srcs
|= process_channel_irqs(bdev
);
770 /* kick off tasklet to start next dma transfer */
772 tasklet_schedule(&bdev
->task
);
775 clr_mask
= readl_relaxed(bam_addr(bdev
, 0, BAM_IRQ_STTS
));
777 /* don't allow reorder of the various accesses to the BAM registers */
780 writel_relaxed(clr_mask
, bam_addr(bdev
, 0, BAM_IRQ_CLR
));
786 * bam_tx_status - returns status of transaction
788 * @cookie: transaction cookie
789 * @txstate: DMA transaction state
791 * Return status of dma transaction
793 static enum dma_status
bam_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
794 struct dma_tx_state
*txstate
)
796 struct bam_chan
*bchan
= to_bam_chan(chan
);
797 struct virt_dma_desc
*vd
;
803 ret
= dma_cookie_status(chan
, cookie
, txstate
);
804 if (ret
== DMA_COMPLETE
)
808 return bchan
->paused
? DMA_PAUSED
: ret
;
810 spin_lock_irqsave(&bchan
->vc
.lock
, flags
);
811 vd
= vchan_find_desc(&bchan
->vc
, cookie
);
813 residue
= container_of(vd
, struct bam_async_desc
, vd
)->length
;
814 else if (bchan
->curr_txd
&& bchan
->curr_txd
->vd
.tx
.cookie
== cookie
)
815 for (i
= 0; i
< bchan
->curr_txd
->num_desc
; i
++)
816 residue
+= bchan
->curr_txd
->curr_desc
[i
].size
;
818 spin_unlock_irqrestore(&bchan
->vc
.lock
, flags
);
820 dma_set_residue(txstate
, residue
);
822 if (ret
== DMA_IN_PROGRESS
&& bchan
->paused
)
829 * bam_apply_new_config
830 * @bchan: bam dma channel
831 * @dir: DMA direction
833 static void bam_apply_new_config(struct bam_chan
*bchan
,
834 enum dma_transfer_direction dir
)
836 struct bam_device
*bdev
= bchan
->bdev
;
839 if (dir
== DMA_DEV_TO_MEM
)
840 maxburst
= bchan
->slave
.src_maxburst
;
842 maxburst
= bchan
->slave
.dst_maxburst
;
844 writel_relaxed(maxburst
, bam_addr(bdev
, 0, BAM_DESC_CNT_TRSHLD
));
846 bchan
->reconfigure
= 0;
850 * bam_start_dma - start next transaction
851 * @bchan - bam dma channel
853 static void bam_start_dma(struct bam_chan
*bchan
)
855 struct virt_dma_desc
*vd
= vchan_next_desc(&bchan
->vc
);
856 struct bam_device
*bdev
= bchan
->bdev
;
857 struct bam_async_desc
*async_desc
;
858 struct bam_desc_hw
*desc
;
859 struct bam_desc_hw
*fifo
= PTR_ALIGN(bchan
->fifo_virt
,
860 sizeof(struct bam_desc_hw
));
862 lockdep_assert_held(&bchan
->vc
.lock
);
869 async_desc
= container_of(vd
, struct bam_async_desc
, vd
);
870 bchan
->curr_txd
= async_desc
;
872 /* on first use, initialize the channel hardware */
873 if (!bchan
->initialized
)
874 bam_chan_init_hw(bchan
, async_desc
->dir
);
876 /* apply new slave config changes, if necessary */
877 if (bchan
->reconfigure
)
878 bam_apply_new_config(bchan
, async_desc
->dir
);
880 desc
= bchan
->curr_txd
->curr_desc
;
882 if (async_desc
->num_desc
> MAX_DESCRIPTORS
)
883 async_desc
->xfer_len
= MAX_DESCRIPTORS
;
885 async_desc
->xfer_len
= async_desc
->num_desc
;
887 /* set any special flags on the last descriptor */
888 if (async_desc
->num_desc
== async_desc
->xfer_len
)
889 desc
[async_desc
->xfer_len
- 1].flags
= async_desc
->flags
;
891 desc
[async_desc
->xfer_len
- 1].flags
|= DESC_FLAG_INT
;
893 if (bchan
->tail
+ async_desc
->xfer_len
> MAX_DESCRIPTORS
) {
894 u32 partial
= MAX_DESCRIPTORS
- bchan
->tail
;
896 memcpy(&fifo
[bchan
->tail
], desc
,
897 partial
* sizeof(struct bam_desc_hw
));
898 memcpy(fifo
, &desc
[partial
], (async_desc
->xfer_len
- partial
) *
899 sizeof(struct bam_desc_hw
));
901 memcpy(&fifo
[bchan
->tail
], desc
,
902 async_desc
->xfer_len
* sizeof(struct bam_desc_hw
));
905 bchan
->tail
+= async_desc
->xfer_len
;
906 bchan
->tail
%= MAX_DESCRIPTORS
;
908 /* ensure descriptor writes and dma start not reordered */
910 writel_relaxed(bchan
->tail
* sizeof(struct bam_desc_hw
),
911 bam_addr(bdev
, bchan
->id
, BAM_P_EVNT_REG
));
915 * dma_tasklet - DMA IRQ tasklet
916 * @data: tasklet argument (bam controller structure)
918 * Sets up next DMA operation and then processes all completed transactions
920 static void dma_tasklet(unsigned long data
)
922 struct bam_device
*bdev
= (struct bam_device
*)data
;
923 struct bam_chan
*bchan
;
927 /* go through the channels and kick off transactions */
928 for (i
= 0; i
< bdev
->num_channels
; i
++) {
929 bchan
= &bdev
->channels
[i
];
930 spin_lock_irqsave(&bchan
->vc
.lock
, flags
);
932 if (!list_empty(&bchan
->vc
.desc_issued
) && !bchan
->curr_txd
)
933 bam_start_dma(bchan
);
934 spin_unlock_irqrestore(&bchan
->vc
.lock
, flags
);
939 * bam_issue_pending - starts pending transactions
942 * Calls tasklet directly which in turn starts any pending transactions
944 static void bam_issue_pending(struct dma_chan
*chan
)
946 struct bam_chan
*bchan
= to_bam_chan(chan
);
949 spin_lock_irqsave(&bchan
->vc
.lock
, flags
);
951 /* if work pending and idle, start a transaction */
952 if (vchan_issue_pending(&bchan
->vc
) && !bchan
->curr_txd
)
953 bam_start_dma(bchan
);
955 spin_unlock_irqrestore(&bchan
->vc
.lock
, flags
);
959 * bam_dma_free_desc - free descriptor memory
960 * @vd: virtual descriptor
963 static void bam_dma_free_desc(struct virt_dma_desc
*vd
)
965 struct bam_async_desc
*async_desc
= container_of(vd
,
966 struct bam_async_desc
, vd
);
971 static struct dma_chan
*bam_dma_xlate(struct of_phandle_args
*dma_spec
,
974 struct bam_device
*bdev
= container_of(of
->of_dma_data
,
975 struct bam_device
, common
);
976 unsigned int request
;
978 if (dma_spec
->args_count
!= 1)
981 request
= dma_spec
->args
[0];
982 if (request
>= bdev
->num_channels
)
985 return dma_get_slave_channel(&(bdev
->channels
[request
].vc
.chan
));
992 * Initialization helper for global bam registers
994 static int bam_init(struct bam_device
*bdev
)
998 /* read revision and configuration information */
999 val
= readl_relaxed(bam_addr(bdev
, 0, BAM_REVISION
)) >> NUM_EES_SHIFT
;
1000 val
&= NUM_EES_MASK
;
1002 /* check that configured EE is within range */
1003 if (bdev
->ee
>= val
)
1006 val
= readl_relaxed(bam_addr(bdev
, 0, BAM_NUM_PIPES
));
1007 bdev
->num_channels
= val
& BAM_NUM_PIPES_MASK
;
1010 /* after reset all pipes are disabled and idle */
1011 val
= readl_relaxed(bam_addr(bdev
, 0, BAM_CTRL
));
1013 writel_relaxed(val
, bam_addr(bdev
, 0, BAM_CTRL
));
1015 writel_relaxed(val
, bam_addr(bdev
, 0, BAM_CTRL
));
1017 /* make sure previous stores are visible before enabling BAM */
1022 writel_relaxed(val
, bam_addr(bdev
, 0, BAM_CTRL
));
1024 /* set descriptor threshhold, start with 4 bytes */
1025 writel_relaxed(DEFAULT_CNT_THRSHLD
,
1026 bam_addr(bdev
, 0, BAM_DESC_CNT_TRSHLD
));
1028 /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
1029 writel_relaxed(BAM_CNFG_BITS_DEFAULT
, bam_addr(bdev
, 0, BAM_CNFG_BITS
));
1031 /* enable irqs for errors */
1032 writel_relaxed(BAM_ERROR_EN
| BAM_HRESP_ERR_EN
,
1033 bam_addr(bdev
, 0, BAM_IRQ_EN
));
1035 /* unmask global bam interrupt */
1036 writel_relaxed(BAM_IRQ_MSK
, bam_addr(bdev
, 0, BAM_IRQ_SRCS_MSK_EE
));
1041 static void bam_channel_init(struct bam_device
*bdev
, struct bam_chan
*bchan
,
1047 vchan_init(&bchan
->vc
, &bdev
->common
);
1048 bchan
->vc
.desc_free
= bam_dma_free_desc
;
1051 static const struct of_device_id bam_of_match
[] = {
1052 { .compatible
= "qcom,bam-v1.3.0", .data
= &bam_v1_3_reg_info
},
1053 { .compatible
= "qcom,bam-v1.4.0", .data
= &bam_v1_4_reg_info
},
1057 MODULE_DEVICE_TABLE(of
, bam_of_match
);
1059 static int bam_dma_probe(struct platform_device
*pdev
)
1061 struct bam_device
*bdev
;
1062 const struct of_device_id
*match
;
1063 struct resource
*iores
;
1066 bdev
= devm_kzalloc(&pdev
->dev
, sizeof(*bdev
), GFP_KERNEL
);
1070 bdev
->dev
= &pdev
->dev
;
1072 match
= of_match_node(bam_of_match
, pdev
->dev
.of_node
);
1074 dev_err(&pdev
->dev
, "Unsupported BAM module\n");
1078 bdev
->layout
= match
->data
;
1080 iores
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1081 bdev
->regs
= devm_ioremap_resource(&pdev
->dev
, iores
);
1082 if (IS_ERR(bdev
->regs
))
1083 return PTR_ERR(bdev
->regs
);
1085 bdev
->irq
= platform_get_irq(pdev
, 0);
1089 ret
= of_property_read_u32(pdev
->dev
.of_node
, "qcom,ee", &bdev
->ee
);
1091 dev_err(bdev
->dev
, "Execution environment unspecified\n");
1095 bdev
->bamclk
= devm_clk_get(bdev
->dev
, "bam_clk");
1096 if (IS_ERR(bdev
->bamclk
))
1097 return PTR_ERR(bdev
->bamclk
);
1099 ret
= clk_prepare_enable(bdev
->bamclk
);
1101 dev_err(bdev
->dev
, "failed to prepare/enable clock\n");
1105 ret
= bam_init(bdev
);
1107 goto err_disable_clk
;
1109 tasklet_init(&bdev
->task
, dma_tasklet
, (unsigned long)bdev
);
1111 bdev
->channels
= devm_kcalloc(bdev
->dev
, bdev
->num_channels
,
1112 sizeof(*bdev
->channels
), GFP_KERNEL
);
1114 if (!bdev
->channels
) {
1116 goto err_disable_clk
;
1119 /* allocate and initialize channels */
1120 INIT_LIST_HEAD(&bdev
->common
.channels
);
1122 for (i
= 0; i
< bdev
->num_channels
; i
++)
1123 bam_channel_init(bdev
, &bdev
->channels
[i
], i
);
1125 ret
= devm_request_irq(bdev
->dev
, bdev
->irq
, bam_dma_irq
,
1126 IRQF_TRIGGER_HIGH
, "bam_dma", bdev
);
1128 goto err_disable_clk
;
1130 /* set max dma segment size */
1131 bdev
->common
.dev
= bdev
->dev
;
1132 bdev
->common
.dev
->dma_parms
= &bdev
->dma_parms
;
1133 ret
= dma_set_max_seg_size(bdev
->common
.dev
, BAM_MAX_DATA_SIZE
);
1135 dev_err(bdev
->dev
, "cannot set maximum segment size\n");
1136 goto err_disable_clk
;
1139 platform_set_drvdata(pdev
, bdev
);
1141 /* set capabilities */
1142 dma_cap_zero(bdev
->common
.cap_mask
);
1143 dma_cap_set(DMA_SLAVE
, bdev
->common
.cap_mask
);
1145 /* initialize dmaengine apis */
1146 bdev
->common
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
1147 bdev
->common
.residue_granularity
= DMA_RESIDUE_GRANULARITY_SEGMENT
;
1148 bdev
->common
.src_addr_widths
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1149 bdev
->common
.dst_addr_widths
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1150 bdev
->common
.device_alloc_chan_resources
= bam_alloc_chan
;
1151 bdev
->common
.device_free_chan_resources
= bam_free_chan
;
1152 bdev
->common
.device_prep_slave_sg
= bam_prep_slave_sg
;
1153 bdev
->common
.device_config
= bam_slave_config
;
1154 bdev
->common
.device_pause
= bam_pause
;
1155 bdev
->common
.device_resume
= bam_resume
;
1156 bdev
->common
.device_terminate_all
= bam_dma_terminate_all
;
1157 bdev
->common
.device_issue_pending
= bam_issue_pending
;
1158 bdev
->common
.device_tx_status
= bam_tx_status
;
1159 bdev
->common
.dev
= bdev
->dev
;
1161 ret
= dma_async_device_register(&bdev
->common
);
1163 dev_err(bdev
->dev
, "failed to register dma async device\n");
1164 goto err_disable_clk
;
1167 ret
= of_dma_controller_register(pdev
->dev
.of_node
, bam_dma_xlate
,
1170 goto err_unregister_dma
;
1175 dma_async_device_unregister(&bdev
->common
);
1177 clk_disable_unprepare(bdev
->bamclk
);
1181 static int bam_dma_remove(struct platform_device
*pdev
)
1183 struct bam_device
*bdev
= platform_get_drvdata(pdev
);
1186 of_dma_controller_free(pdev
->dev
.of_node
);
1187 dma_async_device_unregister(&bdev
->common
);
1189 /* mask all interrupts for this execution environment */
1190 writel_relaxed(0, bam_addr(bdev
, 0, BAM_IRQ_SRCS_MSK_EE
));
1192 devm_free_irq(bdev
->dev
, bdev
->irq
, bdev
);
1194 for (i
= 0; i
< bdev
->num_channels
; i
++) {
1195 bam_dma_terminate_all(&bdev
->channels
[i
].vc
.chan
);
1196 tasklet_kill(&bdev
->channels
[i
].vc
.task
);
1198 dma_free_writecombine(bdev
->dev
, BAM_DESC_FIFO_SIZE
,
1199 bdev
->channels
[i
].fifo_virt
,
1200 bdev
->channels
[i
].fifo_phys
);
1203 tasklet_kill(&bdev
->task
);
1205 clk_disable_unprepare(bdev
->bamclk
);
1210 static struct platform_driver bam_dma_driver
= {
1211 .probe
= bam_dma_probe
,
1212 .remove
= bam_dma_remove
,
1214 .name
= "bam-dma-engine",
1215 .of_match_table
= bam_of_match
,
1219 module_platform_driver(bam_dma_driver
);
1221 MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
1222 MODULE_DESCRIPTION("QCOM BAM DMA engine driver");
1223 MODULE_LICENSE("GPL v2");