2 * skl-sst-cldma.c - Code Loader DMA handler
4 * Copyright (C) 2015, Intel Corporation.
5 * Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as version 2, as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include <linux/device.h>
20 #include <linux/kthread.h>
21 #include <linux/delay.h>
22 #include "../common/sst-dsp.h"
23 #include "../common/sst-dsp-priv.h"
25 static void skl_cldma_int_enable(struct sst_dsp
*ctx
)
27 sst_dsp_shim_update_bits_unlocked(ctx
, SKL_ADSP_REG_ADSPIC
,
28 SKL_ADSPIC_CL_DMA
, SKL_ADSPIC_CL_DMA
);
31 void skl_cldma_int_disable(struct sst_dsp
*ctx
)
33 sst_dsp_shim_update_bits_unlocked(ctx
,
34 SKL_ADSP_REG_ADSPIC
, SKL_ADSPIC_CL_DMA
, 0);
37 static void skl_cldma_stream_run(struct sst_dsp
*ctx
, bool enable
)
42 sst_dsp_shim_update_bits_unlocked(ctx
,
43 SKL_ADSP_REG_CL_SD_CTL
,
44 CL_SD_CTL_RUN_MASK
, CL_SD_CTL_RUN(enable
));
49 /* waiting for hardware to report that the stream Run bit set */
50 val
= sst_dsp_shim_read(ctx
, SKL_ADSP_REG_CL_SD_CTL
) &
54 else if (!enable
&& !val
)
60 dev_err(ctx
->dev
, "Failed to set Run bit=%d enable=%d\n", val
, enable
);
63 static void skl_cldma_stream_clear(struct sst_dsp
*ctx
)
65 /* make sure Run bit is cleared before setting stream register */
66 skl_cldma_stream_run(ctx
, 0);
68 sst_dsp_shim_update_bits(ctx
, SKL_ADSP_REG_CL_SD_CTL
,
69 CL_SD_CTL_IOCE_MASK
, CL_SD_CTL_IOCE(0));
70 sst_dsp_shim_update_bits(ctx
, SKL_ADSP_REG_CL_SD_CTL
,
71 CL_SD_CTL_FEIE_MASK
, CL_SD_CTL_FEIE(0));
72 sst_dsp_shim_update_bits(ctx
, SKL_ADSP_REG_CL_SD_CTL
,
73 CL_SD_CTL_DEIE_MASK
, CL_SD_CTL_DEIE(0));
74 sst_dsp_shim_update_bits(ctx
, SKL_ADSP_REG_CL_SD_CTL
,
75 CL_SD_CTL_STRM_MASK
, CL_SD_CTL_STRM(0));
77 sst_dsp_shim_write(ctx
, SKL_ADSP_REG_CL_SD_BDLPL
, CL_SD_BDLPLBA(0));
78 sst_dsp_shim_write(ctx
, SKL_ADSP_REG_CL_SD_BDLPU
, 0);
80 sst_dsp_shim_write(ctx
, SKL_ADSP_REG_CL_SD_CBL
, 0);
81 sst_dsp_shim_write(ctx
, SKL_ADSP_REG_CL_SD_LVI
, 0);
84 /* Code loader helper APIs */
85 static void skl_cldma_setup_bdle(struct sst_dsp
*ctx
,
86 struct snd_dma_buffer
*dmab_data
,
87 u32
**bdlp
, int size
, int with_ioc
)
91 ctx
->cl_dev
.frags
= 0;
93 phys_addr_t addr
= virt_to_phys(dmab_data
->area
+
94 (ctx
->cl_dev
.frags
* ctx
->cl_dev
.bufsize
));
96 bdl
[0] = cpu_to_le32(lower_32_bits(addr
));
97 bdl
[1] = cpu_to_le32(upper_32_bits(addr
));
99 bdl
[2] = cpu_to_le32(ctx
->cl_dev
.bufsize
);
101 size
-= ctx
->cl_dev
.bufsize
;
102 bdl
[3] = (size
|| !with_ioc
) ? 0 : cpu_to_le32(0x01);
111 * Configure the registers to update the dma buffer address and
113 * Note: Using the channel 1 for transfer
115 static void skl_cldma_setup_controller(struct sst_dsp
*ctx
,
116 struct snd_dma_buffer
*dmab_bdl
, unsigned int max_size
,
119 skl_cldma_stream_clear(ctx
);
120 sst_dsp_shim_write(ctx
, SKL_ADSP_REG_CL_SD_BDLPL
,
121 CL_SD_BDLPLBA(dmab_bdl
->addr
));
122 sst_dsp_shim_write(ctx
, SKL_ADSP_REG_CL_SD_BDLPU
,
123 CL_SD_BDLPUBA(dmab_bdl
->addr
));
125 sst_dsp_shim_write(ctx
, SKL_ADSP_REG_CL_SD_CBL
, max_size
);
126 sst_dsp_shim_write(ctx
, SKL_ADSP_REG_CL_SD_LVI
, count
- 1);
127 sst_dsp_shim_update_bits(ctx
, SKL_ADSP_REG_CL_SD_CTL
,
128 CL_SD_CTL_IOCE_MASK
, CL_SD_CTL_IOCE(1));
129 sst_dsp_shim_update_bits(ctx
, SKL_ADSP_REG_CL_SD_CTL
,
130 CL_SD_CTL_FEIE_MASK
, CL_SD_CTL_FEIE(1));
131 sst_dsp_shim_update_bits(ctx
, SKL_ADSP_REG_CL_SD_CTL
,
132 CL_SD_CTL_DEIE_MASK
, CL_SD_CTL_DEIE(1));
133 sst_dsp_shim_update_bits(ctx
, SKL_ADSP_REG_CL_SD_CTL
,
134 CL_SD_CTL_STRM_MASK
, CL_SD_CTL_STRM(FW_CL_STREAM_NUMBER
));
137 static void skl_cldma_setup_spb(struct sst_dsp
*ctx
,
138 unsigned int size
, bool enable
)
141 sst_dsp_shim_update_bits_unlocked(ctx
,
142 SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL
,
143 CL_SPBFIFO_SPBFCCTL_SPIBE_MASK
,
144 CL_SPBFIFO_SPBFCCTL_SPIBE(1));
146 sst_dsp_shim_write_unlocked(ctx
, SKL_ADSP_REG_CL_SPBFIFO_SPIB
, size
);
149 static void skl_cldma_cleanup_spb(struct sst_dsp
*ctx
)
151 sst_dsp_shim_update_bits_unlocked(ctx
,
152 SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL
,
153 CL_SPBFIFO_SPBFCCTL_SPIBE_MASK
,
154 CL_SPBFIFO_SPBFCCTL_SPIBE(0));
156 sst_dsp_shim_write_unlocked(ctx
, SKL_ADSP_REG_CL_SPBFIFO_SPIB
, 0);
159 static void skl_cldma_cleanup(struct sst_dsp
*ctx
)
161 skl_cldma_cleanup_spb(ctx
);
162 skl_cldma_stream_clear(ctx
);
164 ctx
->dsp_ops
.free_dma_buf(ctx
->dev
, &ctx
->cl_dev
.dmab_data
);
165 ctx
->dsp_ops
.free_dma_buf(ctx
->dev
, &ctx
->cl_dev
.dmab_bdl
);
168 static int skl_cldma_wait_interruptible(struct sst_dsp
*ctx
)
172 if (!wait_event_timeout(ctx
->cl_dev
.wait_queue
,
173 ctx
->cl_dev
.wait_condition
,
174 msecs_to_jiffies(SKL_WAIT_TIMEOUT
))) {
175 dev_err(ctx
->dev
, "%s: Wait timeout\n", __func__
);
180 dev_dbg(ctx
->dev
, "%s: Event wake\n", __func__
);
181 if (ctx
->cl_dev
.wake_status
!= SKL_CL_DMA_BUF_COMPLETE
) {
182 dev_err(ctx
->dev
, "%s: DMA Error\n", __func__
);
187 ctx
->cl_dev
.wake_status
= SKL_CL_DMA_STATUS_NONE
;
191 static void skl_cldma_stop(struct sst_dsp
*ctx
)
193 skl_cldma_stream_run(ctx
, false);
196 static void skl_cldma_fill_buffer(struct sst_dsp
*ctx
, unsigned int size
,
197 const void *curr_pos
, bool intr_enable
, bool trigger
)
199 dev_dbg(ctx
->dev
, "Size: %x, intr_enable: %d\n", size
, intr_enable
);
200 dev_dbg(ctx
->dev
, "buf_pos_index:%d, trigger:%d\n",
201 ctx
->cl_dev
.dma_buffer_offset
, trigger
);
202 dev_dbg(ctx
->dev
, "spib position: %d\n", ctx
->cl_dev
.curr_spib_pos
);
205 * Check if the size exceeds buffer boundary. If it exceeds
206 * max_buffer size, then copy till buffer size and then copy
207 * remaining buffer from the start of ring buffer.
209 if (ctx
->cl_dev
.dma_buffer_offset
+ size
> ctx
->cl_dev
.bufsize
) {
210 unsigned int size_b
= ctx
->cl_dev
.bufsize
-
211 ctx
->cl_dev
.dma_buffer_offset
;
212 memcpy(ctx
->cl_dev
.dmab_data
.area
+ ctx
->cl_dev
.dma_buffer_offset
,
216 ctx
->cl_dev
.dma_buffer_offset
= 0;
219 memcpy(ctx
->cl_dev
.dmab_data
.area
+ ctx
->cl_dev
.dma_buffer_offset
,
222 if (ctx
->cl_dev
.curr_spib_pos
== ctx
->cl_dev
.bufsize
)
223 ctx
->cl_dev
.dma_buffer_offset
= 0;
225 ctx
->cl_dev
.dma_buffer_offset
= ctx
->cl_dev
.curr_spib_pos
;
227 ctx
->cl_dev
.wait_condition
= false;
230 skl_cldma_int_enable(ctx
);
232 ctx
->cl_dev
.ops
.cl_setup_spb(ctx
, ctx
->cl_dev
.curr_spib_pos
, trigger
);
234 ctx
->cl_dev
.ops
.cl_trigger(ctx
, true);
238 * The CL dma doesn't have any way to update the transfer status until a BDL
239 * buffer is fully transferred
241 * So Copying is divided in two parts.
242 * 1. Interrupt on buffer done where the size to be transferred is more than
244 * 2. Polling on fw register to identify if data left to transferred doesn't
245 * fill the ring buffer. Caller takes care of polling the required status
246 * register to identify the transfer status.
249 skl_cldma_copy_to_buf(struct sst_dsp
*ctx
, const void *bin
, u32 total_size
)
253 unsigned int excess_bytes
;
255 unsigned int bytes_left
= total_size
;
256 const void *curr_pos
= bin
;
261 dev_dbg(ctx
->dev
, "%s: Total binary size: %u\n", __func__
, bytes_left
);
264 if (bytes_left
> ctx
->cl_dev
.bufsize
) {
267 * dma transfers only till the write pointer as
270 if (ctx
->cl_dev
.curr_spib_pos
== 0)
271 ctx
->cl_dev
.curr_spib_pos
= ctx
->cl_dev
.bufsize
;
273 size
= ctx
->cl_dev
.bufsize
;
274 skl_cldma_fill_buffer(ctx
, size
, curr_pos
, true, start
);
277 ret
= skl_cldma_wait_interruptible(ctx
);
284 skl_cldma_int_disable(ctx
);
286 if ((ctx
->cl_dev
.curr_spib_pos
+ bytes_left
)
287 <= ctx
->cl_dev
.bufsize
) {
288 ctx
->cl_dev
.curr_spib_pos
+= bytes_left
;
290 excess_bytes
= bytes_left
-
291 (ctx
->cl_dev
.bufsize
-
292 ctx
->cl_dev
.curr_spib_pos
);
293 ctx
->cl_dev
.curr_spib_pos
= excess_bytes
;
297 skl_cldma_fill_buffer(ctx
, size
,
298 curr_pos
, false, start
);
301 curr_pos
= curr_pos
+ size
;
307 void skl_cldma_process_intr(struct sst_dsp
*ctx
)
309 u8 cl_dma_intr_status
;
312 sst_dsp_shim_read_unlocked(ctx
, SKL_ADSP_REG_CL_SD_STS
);
314 if (!(cl_dma_intr_status
& SKL_CL_DMA_SD_INT_COMPLETE
))
315 ctx
->cl_dev
.wake_status
= SKL_CL_DMA_ERR
;
317 ctx
->cl_dev
.wake_status
= SKL_CL_DMA_BUF_COMPLETE
;
319 ctx
->cl_dev
.wait_condition
= true;
320 wake_up(&ctx
->cl_dev
.wait_queue
);
323 int skl_cldma_prepare(struct sst_dsp
*ctx
)
328 ctx
->cl_dev
.bufsize
= SKL_MAX_BUFFER_SIZE
;
330 /* Allocate cl ops */
331 ctx
->cl_dev
.ops
.cl_setup_bdle
= skl_cldma_setup_bdle
;
332 ctx
->cl_dev
.ops
.cl_setup_controller
= skl_cldma_setup_controller
;
333 ctx
->cl_dev
.ops
.cl_setup_spb
= skl_cldma_setup_spb
;
334 ctx
->cl_dev
.ops
.cl_cleanup_spb
= skl_cldma_cleanup_spb
;
335 ctx
->cl_dev
.ops
.cl_trigger
= skl_cldma_stream_run
;
336 ctx
->cl_dev
.ops
.cl_cleanup_controller
= skl_cldma_cleanup
;
337 ctx
->cl_dev
.ops
.cl_copy_to_dmabuf
= skl_cldma_copy_to_buf
;
338 ctx
->cl_dev
.ops
.cl_stop_dma
= skl_cldma_stop
;
341 ret
= ctx
->dsp_ops
.alloc_dma_buf(ctx
->dev
,
342 &ctx
->cl_dev
.dmab_data
, ctx
->cl_dev
.bufsize
);
344 dev_err(ctx
->dev
, "Alloc buffer for base fw failed: %x", ret
);
347 /* Setup Code loader BDL */
348 ret
= ctx
->dsp_ops
.alloc_dma_buf(ctx
->dev
,
349 &ctx
->cl_dev
.dmab_bdl
, PAGE_SIZE
);
351 dev_err(ctx
->dev
, "Alloc buffer for blde failed: %x", ret
);
352 ctx
->dsp_ops
.free_dma_buf(ctx
->dev
, &ctx
->cl_dev
.dmab_data
);
355 bdl
= (u32
*)ctx
->cl_dev
.dmab_bdl
.area
;
358 ctx
->cl_dev
.ops
.cl_setup_bdle(ctx
, &ctx
->cl_dev
.dmab_data
,
359 &bdl
, ctx
->cl_dev
.bufsize
, 1);
360 ctx
->cl_dev
.ops
.cl_setup_controller(ctx
, &ctx
->cl_dev
.dmab_bdl
,
361 ctx
->cl_dev
.bufsize
, ctx
->cl_dev
.frags
);
363 ctx
->cl_dev
.curr_spib_pos
= 0;
364 ctx
->cl_dev
.dma_buffer_offset
= 0;
365 init_waitqueue_head(&ctx
->cl_dev
.wait_queue
);