2 * skl-sst-cldma.c - Code Loader DMA handler
4 * Copyright (C) 2015, Intel Corporation.
5 * Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as version 2, as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include <linux/device.h>
20 #include <linux/delay.h>
21 #include "../common/sst-dsp.h"
22 #include "../common/sst-dsp-priv.h"
24 static void skl_cldma_int_enable(struct sst_dsp
*ctx
)
26 sst_dsp_shim_update_bits_unlocked(ctx
, SKL_ADSP_REG_ADSPIC
,
27 SKL_ADSPIC_CL_DMA
, SKL_ADSPIC_CL_DMA
);
30 void skl_cldma_int_disable(struct sst_dsp
*ctx
)
32 sst_dsp_shim_update_bits_unlocked(ctx
,
33 SKL_ADSP_REG_ADSPIC
, SKL_ADSPIC_CL_DMA
, 0);
36 static void skl_cldma_stream_run(struct sst_dsp
*ctx
, bool enable
)
41 sst_dsp_shim_update_bits_unlocked(ctx
,
42 SKL_ADSP_REG_CL_SD_CTL
,
43 CL_SD_CTL_RUN_MASK
, CL_SD_CTL_RUN(enable
));
48 /* waiting for hardware to report that the stream Run bit set */
49 val
= sst_dsp_shim_read(ctx
, SKL_ADSP_REG_CL_SD_CTL
) &
53 else if (!enable
&& !val
)
59 dev_err(ctx
->dev
, "Failed to set Run bit=%d enable=%d\n", val
, enable
);
62 static void skl_cldma_stream_clear(struct sst_dsp
*ctx
)
64 /* make sure Run bit is cleared before setting stream register */
65 skl_cldma_stream_run(ctx
, 0);
67 sst_dsp_shim_update_bits(ctx
, SKL_ADSP_REG_CL_SD_CTL
,
68 CL_SD_CTL_IOCE_MASK
, CL_SD_CTL_IOCE(0));
69 sst_dsp_shim_update_bits(ctx
, SKL_ADSP_REG_CL_SD_CTL
,
70 CL_SD_CTL_FEIE_MASK
, CL_SD_CTL_FEIE(0));
71 sst_dsp_shim_update_bits(ctx
, SKL_ADSP_REG_CL_SD_CTL
,
72 CL_SD_CTL_DEIE_MASK
, CL_SD_CTL_DEIE(0));
73 sst_dsp_shim_update_bits(ctx
, SKL_ADSP_REG_CL_SD_CTL
,
74 CL_SD_CTL_STRM_MASK
, CL_SD_CTL_STRM(0));
76 sst_dsp_shim_write(ctx
, SKL_ADSP_REG_CL_SD_BDLPL
, CL_SD_BDLPLBA(0));
77 sst_dsp_shim_write(ctx
, SKL_ADSP_REG_CL_SD_BDLPU
, 0);
79 sst_dsp_shim_write(ctx
, SKL_ADSP_REG_CL_SD_CBL
, 0);
80 sst_dsp_shim_write(ctx
, SKL_ADSP_REG_CL_SD_LVI
, 0);
83 /* Code loader helper APIs */
84 static void skl_cldma_setup_bdle(struct sst_dsp
*ctx
,
85 struct snd_dma_buffer
*dmab_data
,
86 u32
**bdlp
, int size
, int with_ioc
)
90 ctx
->cl_dev
.frags
= 0;
92 phys_addr_t addr
= virt_to_phys(dmab_data
->area
+
93 (ctx
->cl_dev
.frags
* ctx
->cl_dev
.bufsize
));
95 bdl
[0] = cpu_to_le32(lower_32_bits(addr
));
96 bdl
[1] = cpu_to_le32(upper_32_bits(addr
));
98 bdl
[2] = cpu_to_le32(ctx
->cl_dev
.bufsize
);
100 size
-= ctx
->cl_dev
.bufsize
;
101 bdl
[3] = (size
|| !with_ioc
) ? 0 : cpu_to_le32(0x01);
110 * Configure the registers to update the dma buffer address and
112 * Note: Using the channel 1 for transfer
114 static void skl_cldma_setup_controller(struct sst_dsp
*ctx
,
115 struct snd_dma_buffer
*dmab_bdl
, unsigned int max_size
,
118 skl_cldma_stream_clear(ctx
);
119 sst_dsp_shim_write(ctx
, SKL_ADSP_REG_CL_SD_BDLPL
,
120 CL_SD_BDLPLBA(dmab_bdl
->addr
));
121 sst_dsp_shim_write(ctx
, SKL_ADSP_REG_CL_SD_BDLPU
,
122 CL_SD_BDLPUBA(dmab_bdl
->addr
));
124 sst_dsp_shim_write(ctx
, SKL_ADSP_REG_CL_SD_CBL
, max_size
);
125 sst_dsp_shim_write(ctx
, SKL_ADSP_REG_CL_SD_LVI
, count
- 1);
126 sst_dsp_shim_update_bits(ctx
, SKL_ADSP_REG_CL_SD_CTL
,
127 CL_SD_CTL_IOCE_MASK
, CL_SD_CTL_IOCE(1));
128 sst_dsp_shim_update_bits(ctx
, SKL_ADSP_REG_CL_SD_CTL
,
129 CL_SD_CTL_FEIE_MASK
, CL_SD_CTL_FEIE(1));
130 sst_dsp_shim_update_bits(ctx
, SKL_ADSP_REG_CL_SD_CTL
,
131 CL_SD_CTL_DEIE_MASK
, CL_SD_CTL_DEIE(1));
132 sst_dsp_shim_update_bits(ctx
, SKL_ADSP_REG_CL_SD_CTL
,
133 CL_SD_CTL_STRM_MASK
, CL_SD_CTL_STRM(FW_CL_STREAM_NUMBER
));
136 static void skl_cldma_setup_spb(struct sst_dsp
*ctx
,
137 unsigned int size
, bool enable
)
140 sst_dsp_shim_update_bits_unlocked(ctx
,
141 SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL
,
142 CL_SPBFIFO_SPBFCCTL_SPIBE_MASK
,
143 CL_SPBFIFO_SPBFCCTL_SPIBE(1));
145 sst_dsp_shim_write_unlocked(ctx
, SKL_ADSP_REG_CL_SPBFIFO_SPIB
, size
);
148 static void skl_cldma_cleanup_spb(struct sst_dsp
*ctx
)
150 sst_dsp_shim_update_bits_unlocked(ctx
,
151 SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL
,
152 CL_SPBFIFO_SPBFCCTL_SPIBE_MASK
,
153 CL_SPBFIFO_SPBFCCTL_SPIBE(0));
155 sst_dsp_shim_write_unlocked(ctx
, SKL_ADSP_REG_CL_SPBFIFO_SPIB
, 0);
158 static void skl_cldma_cleanup(struct sst_dsp
*ctx
)
160 skl_cldma_cleanup_spb(ctx
);
161 skl_cldma_stream_clear(ctx
);
163 ctx
->dsp_ops
.free_dma_buf(ctx
->dev
, &ctx
->cl_dev
.dmab_data
);
164 ctx
->dsp_ops
.free_dma_buf(ctx
->dev
, &ctx
->cl_dev
.dmab_bdl
);
167 static int skl_cldma_wait_interruptible(struct sst_dsp
*ctx
)
171 if (!wait_event_timeout(ctx
->cl_dev
.wait_queue
,
172 ctx
->cl_dev
.wait_condition
,
173 msecs_to_jiffies(SKL_WAIT_TIMEOUT
))) {
174 dev_err(ctx
->dev
, "%s: Wait timeout\n", __func__
);
179 dev_dbg(ctx
->dev
, "%s: Event wake\n", __func__
);
180 if (ctx
->cl_dev
.wake_status
!= SKL_CL_DMA_BUF_COMPLETE
) {
181 dev_err(ctx
->dev
, "%s: DMA Error\n", __func__
);
186 ctx
->cl_dev
.wake_status
= SKL_CL_DMA_STATUS_NONE
;
190 static void skl_cldma_stop(struct sst_dsp
*ctx
)
192 skl_cldma_stream_run(ctx
, false);
195 static void skl_cldma_fill_buffer(struct sst_dsp
*ctx
, unsigned int size
,
196 const void *curr_pos
, bool intr_enable
, bool trigger
)
198 dev_dbg(ctx
->dev
, "Size: %x, intr_enable: %d\n", size
, intr_enable
);
199 dev_dbg(ctx
->dev
, "buf_pos_index:%d, trigger:%d\n",
200 ctx
->cl_dev
.dma_buffer_offset
, trigger
);
201 dev_dbg(ctx
->dev
, "spib position: %d\n", ctx
->cl_dev
.curr_spib_pos
);
204 * Check if the size exceeds buffer boundary. If it exceeds
205 * max_buffer size, then copy till buffer size and then copy
206 * remaining buffer from the start of ring buffer.
208 if (ctx
->cl_dev
.dma_buffer_offset
+ size
> ctx
->cl_dev
.bufsize
) {
209 unsigned int size_b
= ctx
->cl_dev
.bufsize
-
210 ctx
->cl_dev
.dma_buffer_offset
;
211 memcpy(ctx
->cl_dev
.dmab_data
.area
+ ctx
->cl_dev
.dma_buffer_offset
,
215 ctx
->cl_dev
.dma_buffer_offset
= 0;
218 memcpy(ctx
->cl_dev
.dmab_data
.area
+ ctx
->cl_dev
.dma_buffer_offset
,
221 if (ctx
->cl_dev
.curr_spib_pos
== ctx
->cl_dev
.bufsize
)
222 ctx
->cl_dev
.dma_buffer_offset
= 0;
224 ctx
->cl_dev
.dma_buffer_offset
= ctx
->cl_dev
.curr_spib_pos
;
226 ctx
->cl_dev
.wait_condition
= false;
229 skl_cldma_int_enable(ctx
);
231 ctx
->cl_dev
.ops
.cl_setup_spb(ctx
, ctx
->cl_dev
.curr_spib_pos
, trigger
);
233 ctx
->cl_dev
.ops
.cl_trigger(ctx
, true);
237 * The CL dma doesn't have any way to update the transfer status until a BDL
238 * buffer is fully transferred
240 * So Copying is divided in two parts.
241 * 1. Interrupt on buffer done where the size to be transferred is more than
243 * 2. Polling on fw register to identify if data left to transferred doesn't
244 * fill the ring buffer. Caller takes care of polling the required status
245 * register to identify the transfer status.
248 skl_cldma_copy_to_buf(struct sst_dsp
*ctx
, const void *bin
, u32 total_size
)
252 unsigned int excess_bytes
;
254 unsigned int bytes_left
= total_size
;
255 const void *curr_pos
= bin
;
260 dev_dbg(ctx
->dev
, "%s: Total binary size: %u\n", __func__
, bytes_left
);
263 if (bytes_left
> ctx
->cl_dev
.bufsize
) {
266 * dma transfers only till the write pointer as
269 if (ctx
->cl_dev
.curr_spib_pos
== 0)
270 ctx
->cl_dev
.curr_spib_pos
= ctx
->cl_dev
.bufsize
;
272 size
= ctx
->cl_dev
.bufsize
;
273 skl_cldma_fill_buffer(ctx
, size
, curr_pos
, true, start
);
276 ret
= skl_cldma_wait_interruptible(ctx
);
283 skl_cldma_int_disable(ctx
);
285 if ((ctx
->cl_dev
.curr_spib_pos
+ bytes_left
)
286 <= ctx
->cl_dev
.bufsize
) {
287 ctx
->cl_dev
.curr_spib_pos
+= bytes_left
;
289 excess_bytes
= bytes_left
-
290 (ctx
->cl_dev
.bufsize
-
291 ctx
->cl_dev
.curr_spib_pos
);
292 ctx
->cl_dev
.curr_spib_pos
= excess_bytes
;
296 skl_cldma_fill_buffer(ctx
, size
,
297 curr_pos
, false, start
);
300 curr_pos
= curr_pos
+ size
;
306 void skl_cldma_process_intr(struct sst_dsp
*ctx
)
308 u8 cl_dma_intr_status
;
311 sst_dsp_shim_read_unlocked(ctx
, SKL_ADSP_REG_CL_SD_STS
);
313 if (!(cl_dma_intr_status
& SKL_CL_DMA_SD_INT_COMPLETE
))
314 ctx
->cl_dev
.wake_status
= SKL_CL_DMA_ERR
;
316 ctx
->cl_dev
.wake_status
= SKL_CL_DMA_BUF_COMPLETE
;
318 ctx
->cl_dev
.wait_condition
= true;
319 wake_up(&ctx
->cl_dev
.wait_queue
);
322 int skl_cldma_prepare(struct sst_dsp
*ctx
)
327 ctx
->cl_dev
.bufsize
= SKL_MAX_BUFFER_SIZE
;
329 /* Allocate cl ops */
330 ctx
->cl_dev
.ops
.cl_setup_bdle
= skl_cldma_setup_bdle
;
331 ctx
->cl_dev
.ops
.cl_setup_controller
= skl_cldma_setup_controller
;
332 ctx
->cl_dev
.ops
.cl_setup_spb
= skl_cldma_setup_spb
;
333 ctx
->cl_dev
.ops
.cl_cleanup_spb
= skl_cldma_cleanup_spb
;
334 ctx
->cl_dev
.ops
.cl_trigger
= skl_cldma_stream_run
;
335 ctx
->cl_dev
.ops
.cl_cleanup_controller
= skl_cldma_cleanup
;
336 ctx
->cl_dev
.ops
.cl_copy_to_dmabuf
= skl_cldma_copy_to_buf
;
337 ctx
->cl_dev
.ops
.cl_stop_dma
= skl_cldma_stop
;
340 ret
= ctx
->dsp_ops
.alloc_dma_buf(ctx
->dev
,
341 &ctx
->cl_dev
.dmab_data
, ctx
->cl_dev
.bufsize
);
343 dev_err(ctx
->dev
, "Alloc buffer for base fw failed: %x\n", ret
);
346 /* Setup Code loader BDL */
347 ret
= ctx
->dsp_ops
.alloc_dma_buf(ctx
->dev
,
348 &ctx
->cl_dev
.dmab_bdl
, PAGE_SIZE
);
350 dev_err(ctx
->dev
, "Alloc buffer for blde failed: %x\n", ret
);
351 ctx
->dsp_ops
.free_dma_buf(ctx
->dev
, &ctx
->cl_dev
.dmab_data
);
354 bdl
= (u32
*)ctx
->cl_dev
.dmab_bdl
.area
;
357 ctx
->cl_dev
.ops
.cl_setup_bdle(ctx
, &ctx
->cl_dev
.dmab_data
,
358 &bdl
, ctx
->cl_dev
.bufsize
, 1);
359 ctx
->cl_dev
.ops
.cl_setup_controller(ctx
, &ctx
->cl_dev
.dmab_bdl
,
360 ctx
->cl_dev
.bufsize
, ctx
->cl_dev
.frags
);
362 ctx
->cl_dev
.curr_spib_pos
= 0;
363 ctx
->cl_dev
.dma_buffer_offset
= 0;
364 init_waitqueue_head(&ctx
->cl_dev
.wait_queue
);