2 * Intel SST Firmware Loader
4 * Copyright (C) 2013, Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/kernel.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/firmware.h>
21 #include <linux/export.h>
22 #include <linux/platform_device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmaengine.h>
25 #include <linux/pci.h>
28 #include <asm/pgtable.h>
31 #include "sst-dsp-priv.h"
33 static void block_module_remove(struct sst_module
*module
);
35 static void sst_memcpy32(volatile void __iomem
*dest
, void *src
, u32 bytes
)
39 /* copy one 32 bit word at a time as 64 bit access is not supported */
40 for (i
= 0; i
< bytes
; i
+= 4)
41 memcpy_toio(dest
+ i
, src
+ i
, 4);
44 /* create new generic firmware object */
45 struct sst_fw
*sst_fw_new(struct sst_dsp
*dsp
,
46 const struct firmware
*fw
, void *private)
48 struct sst_fw
*sst_fw
;
51 if (!dsp
->ops
->parse_fw
)
54 sst_fw
= kzalloc(sizeof(*sst_fw
), GFP_KERNEL
);
59 sst_fw
->private = private;
60 sst_fw
->size
= fw
->size
;
62 /* allocate DMA buffer to store FW data */
63 sst_fw
->dma_buf
= dma_alloc_coherent(dsp
->dma_dev
, sst_fw
->size
,
64 &sst_fw
->dmable_fw_paddr
, GFP_DMA
| GFP_KERNEL
);
65 if (!sst_fw
->dma_buf
) {
66 dev_err(dsp
->dev
, "error: DMA alloc failed\n");
71 /* copy FW data to DMA-able memory */
72 memcpy((void *)sst_fw
->dma_buf
, (void *)fw
->data
, fw
->size
);
74 /* call core specific FW paser to load FW data into DSP */
75 err
= dsp
->ops
->parse_fw(sst_fw
);
77 dev_err(dsp
->dev
, "error: parse fw failed %d\n", err
);
81 mutex_lock(&dsp
->mutex
);
82 list_add(&sst_fw
->list
, &dsp
->fw_list
);
83 mutex_unlock(&dsp
->mutex
);
88 dma_free_coherent(dsp
->dev
, sst_fw
->size
,
90 sst_fw
->dmable_fw_paddr
);
94 EXPORT_SYMBOL_GPL(sst_fw_new
);
96 int sst_fw_reload(struct sst_fw
*sst_fw
)
98 struct sst_dsp
*dsp
= sst_fw
->dsp
;
101 dev_dbg(dsp
->dev
, "reloading firmware\n");
103 /* call core specific FW paser to load FW data into DSP */
104 ret
= dsp
->ops
->parse_fw(sst_fw
);
106 dev_err(dsp
->dev
, "error: parse fw failed %d\n", ret
);
110 EXPORT_SYMBOL_GPL(sst_fw_reload
);
112 void sst_fw_unload(struct sst_fw
*sst_fw
)
114 struct sst_dsp
*dsp
= sst_fw
->dsp
;
115 struct sst_module
*module
, *tmp
;
117 dev_dbg(dsp
->dev
, "unloading firmware\n");
119 mutex_lock(&dsp
->mutex
);
120 list_for_each_entry_safe(module
, tmp
, &dsp
->module_list
, list
) {
121 if (module
->sst_fw
== sst_fw
) {
122 block_module_remove(module
);
123 list_del(&module
->list
);
128 mutex_unlock(&dsp
->mutex
);
130 EXPORT_SYMBOL_GPL(sst_fw_unload
);
132 /* free single firmware object */
133 void sst_fw_free(struct sst_fw
*sst_fw
)
135 struct sst_dsp
*dsp
= sst_fw
->dsp
;
137 mutex_lock(&dsp
->mutex
);
138 list_del(&sst_fw
->list
);
139 mutex_unlock(&dsp
->mutex
);
141 dma_free_coherent(dsp
->dma_dev
, sst_fw
->size
, sst_fw
->dma_buf
,
142 sst_fw
->dmable_fw_paddr
);
145 EXPORT_SYMBOL_GPL(sst_fw_free
);
147 /* free all firmware objects */
148 void sst_fw_free_all(struct sst_dsp
*dsp
)
150 struct sst_fw
*sst_fw
, *t
;
152 mutex_lock(&dsp
->mutex
);
153 list_for_each_entry_safe(sst_fw
, t
, &dsp
->fw_list
, list
) {
155 list_del(&sst_fw
->list
);
156 dma_free_coherent(dsp
->dev
, sst_fw
->size
, sst_fw
->dma_buf
,
157 sst_fw
->dmable_fw_paddr
);
160 mutex_unlock(&dsp
->mutex
);
162 EXPORT_SYMBOL_GPL(sst_fw_free_all
);
164 /* create a new SST generic module from FW template */
165 struct sst_module
*sst_module_new(struct sst_fw
*sst_fw
,
166 struct sst_module_template
*template, void *private)
168 struct sst_dsp
*dsp
= sst_fw
->dsp
;
169 struct sst_module
*sst_module
;
171 sst_module
= kzalloc(sizeof(*sst_module
), GFP_KERNEL
);
172 if (sst_module
== NULL
)
175 sst_module
->id
= template->id
;
176 sst_module
->dsp
= dsp
;
177 sst_module
->sst_fw
= sst_fw
;
179 memcpy(&sst_module
->s
, &template->s
, sizeof(struct sst_module_data
));
180 memcpy(&sst_module
->p
, &template->p
, sizeof(struct sst_module_data
));
182 INIT_LIST_HEAD(&sst_module
->block_list
);
184 mutex_lock(&dsp
->mutex
);
185 list_add(&sst_module
->list
, &dsp
->module_list
);
186 mutex_unlock(&dsp
->mutex
);
190 EXPORT_SYMBOL_GPL(sst_module_new
);
192 /* free firmware module and remove from available list */
193 void sst_module_free(struct sst_module
*sst_module
)
195 struct sst_dsp
*dsp
= sst_module
->dsp
;
197 mutex_lock(&dsp
->mutex
);
198 list_del(&sst_module
->list
);
199 mutex_unlock(&dsp
->mutex
);
203 EXPORT_SYMBOL_GPL(sst_module_free
);
205 static struct sst_mem_block
*find_block(struct sst_dsp
*dsp
, int type
,
208 struct sst_mem_block
*block
;
210 list_for_each_entry(block
, &dsp
->free_block_list
, list
) {
211 if (block
->type
== type
&& block
->offset
== offset
)
218 static int block_alloc_contiguous(struct sst_module
*module
,
219 struct sst_module_data
*data
, u32 offset
, int size
)
221 struct list_head tmp
= LIST_HEAD_INIT(tmp
);
222 struct sst_dsp
*dsp
= module
->dsp
;
223 struct sst_mem_block
*block
;
226 block
= find_block(dsp
, data
->type
, offset
);
228 list_splice(&tmp
, &dsp
->free_block_list
);
232 list_move_tail(&block
->list
, &tmp
);
233 offset
+= block
->size
;
237 list_for_each_entry(block
, &tmp
, list
)
238 list_add(&block
->module_list
, &module
->block_list
);
240 list_splice(&tmp
, &dsp
->used_block_list
);
244 /* allocate free DSP blocks for module data - callers hold locks */
245 static int block_alloc(struct sst_module
*module
,
246 struct sst_module_data
*data
)
248 struct sst_dsp
*dsp
= module
->dsp
;
249 struct sst_mem_block
*block
, *tmp
;
255 /* find first free whole blocks that can hold module */
256 list_for_each_entry_safe(block
, tmp
, &dsp
->free_block_list
, list
) {
258 /* ignore blocks with wrong type */
259 if (block
->type
!= data
->type
)
262 if (data
->size
> block
->size
)
265 data
->offset
= block
->offset
;
266 block
->data_type
= data
->data_type
;
267 block
->bytes_used
= data
->size
% block
->size
;
268 list_add(&block
->module_list
, &module
->block_list
);
269 list_move(&block
->list
, &dsp
->used_block_list
);
270 dev_dbg(dsp
->dev
, " *module %d added block %d:%d\n",
271 module
->id
, block
->type
, block
->index
);
275 /* then find free multiple blocks that can hold module */
276 list_for_each_entry_safe(block
, tmp
, &dsp
->free_block_list
, list
) {
278 /* ignore blocks with wrong type */
279 if (block
->type
!= data
->type
)
282 /* do we span > 1 blocks */
283 if (data
->size
> block
->size
) {
284 ret
= block_alloc_contiguous(module
, data
,
285 block
->offset
, data
->size
);
291 /* not enough free block space */
295 /* remove module from memory - callers hold locks */
296 static void block_module_remove(struct sst_module
*module
)
298 struct sst_mem_block
*block
, *tmp
;
299 struct sst_dsp
*dsp
= module
->dsp
;
302 /* disable each block */
303 list_for_each_entry(block
, &module
->block_list
, module_list
) {
305 if (block
->ops
&& block
->ops
->disable
) {
306 err
= block
->ops
->disable(block
);
309 "error: cant disable block %d:%d\n",
310 block
->type
, block
->index
);
314 /* mark each block as free */
315 list_for_each_entry_safe(block
, tmp
, &module
->block_list
, module_list
) {
316 list_del(&block
->module_list
);
317 list_move(&block
->list
, &dsp
->free_block_list
);
321 /* prepare the memory block to receive data from host - callers hold locks */
322 static int block_module_prepare(struct sst_module
*module
)
324 struct sst_mem_block
*block
;
327 /* enable each block so that's it'e ready for module P/S data */
328 list_for_each_entry(block
, &module
->block_list
, module_list
) {
330 if (block
->ops
&& block
->ops
->enable
) {
331 ret
= block
->ops
->enable(block
);
333 dev_err(module
->dsp
->dev
,
334 "error: cant disable block %d:%d\n",
335 block
->type
, block
->index
);
343 list_for_each_entry(block
, &module
->block_list
, module_list
) {
344 if (block
->ops
&& block
->ops
->disable
)
345 block
->ops
->disable(block
);
350 /* allocate memory blocks for static module addresses - callers hold locks */
351 static int block_alloc_fixed(struct sst_module
*module
,
352 struct sst_module_data
*data
)
354 struct sst_dsp
*dsp
= module
->dsp
;
355 struct sst_mem_block
*block
, *tmp
;
356 u32 end
= data
->offset
+ data
->size
, block_end
;
359 /* only IRAM/DRAM blocks are managed */
360 if (data
->type
!= SST_MEM_IRAM
&& data
->type
!= SST_MEM_DRAM
)
363 /* are blocks already attached to this module */
364 list_for_each_entry_safe(block
, tmp
, &module
->block_list
, module_list
) {
366 /* force compacting mem blocks of the same data_type */
367 if (block
->data_type
!= data
->data_type
)
370 block_end
= block
->offset
+ block
->size
;
372 /* find block that holds section */
373 if (data
->offset
>= block
->offset
&& end
< block_end
)
376 /* does block span more than 1 section */
377 if (data
->offset
>= block
->offset
&& data
->offset
< block_end
) {
379 err
= block_alloc_contiguous(module
, data
,
380 block
->offset
+ block
->size
,
381 data
->size
- block
->size
);
385 /* module already owns blocks */
390 /* find first free blocks that can hold section in free list */
391 list_for_each_entry_safe(block
, tmp
, &dsp
->free_block_list
, list
) {
392 block_end
= block
->offset
+ block
->size
;
394 /* find block that holds section */
395 if (data
->offset
>= block
->offset
&& end
< block_end
) {
398 block
->data_type
= data
->data_type
;
399 list_move(&block
->list
, &dsp
->used_block_list
);
400 list_add(&block
->module_list
, &module
->block_list
);
404 /* does block span more than 1 section */
405 if (data
->offset
>= block
->offset
&& data
->offset
< block_end
) {
407 err
= block_alloc_contiguous(module
, data
,
408 block
->offset
, data
->size
);
420 /* Load fixed module data into DSP memory blocks */
421 int sst_module_insert_fixed_block(struct sst_module
*module
,
422 struct sst_module_data
*data
)
424 struct sst_dsp
*dsp
= module
->dsp
;
427 mutex_lock(&dsp
->mutex
);
429 /* alloc blocks that includes this section */
430 ret
= block_alloc_fixed(module
, data
);
433 "error: no free blocks for section at offset 0x%x size 0x%x\n",
434 data
->offset
, data
->size
);
435 mutex_unlock(&dsp
->mutex
);
439 /* prepare DSP blocks for module copy */
440 ret
= block_module_prepare(module
);
442 dev_err(dsp
->dev
, "error: fw module prepare failed\n");
446 /* copy partial module data to blocks */
447 sst_memcpy32(dsp
->addr
.lpe
+ data
->offset
, data
->data
, data
->size
);
449 mutex_unlock(&dsp
->mutex
);
453 block_module_remove(module
);
454 mutex_unlock(&dsp
->mutex
);
457 EXPORT_SYMBOL_GPL(sst_module_insert_fixed_block
);
459 /* Unload entire module from DSP memory */
460 int sst_block_module_remove(struct sst_module
*module
)
462 struct sst_dsp
*dsp
= module
->dsp
;
464 mutex_lock(&dsp
->mutex
);
465 block_module_remove(module
);
466 mutex_unlock(&dsp
->mutex
);
469 EXPORT_SYMBOL_GPL(sst_block_module_remove
);
471 /* register a DSP memory block for use with FW based modules */
472 struct sst_mem_block
*sst_mem_block_register(struct sst_dsp
*dsp
, u32 offset
,
473 u32 size
, enum sst_mem_type type
, struct sst_block_ops
*ops
, u32 index
,
476 struct sst_mem_block
*block
;
478 block
= kzalloc(sizeof(*block
), GFP_KERNEL
);
482 block
->offset
= offset
;
484 block
->index
= index
;
487 block
->private = private;
490 mutex_lock(&dsp
->mutex
);
491 list_add(&block
->list
, &dsp
->free_block_list
);
492 mutex_unlock(&dsp
->mutex
);
496 EXPORT_SYMBOL_GPL(sst_mem_block_register
);
498 /* unregister all DSP memory blocks */
499 void sst_mem_block_unregister_all(struct sst_dsp
*dsp
)
501 struct sst_mem_block
*block
, *tmp
;
503 mutex_lock(&dsp
->mutex
);
505 /* unregister used blocks */
506 list_for_each_entry_safe(block
, tmp
, &dsp
->used_block_list
, list
) {
507 list_del(&block
->list
);
511 /* unregister free blocks */
512 list_for_each_entry_safe(block
, tmp
, &dsp
->free_block_list
, list
) {
513 list_del(&block
->list
);
517 mutex_unlock(&dsp
->mutex
);
519 EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all
);
521 /* allocate scratch buffer blocks */
522 struct sst_module
*sst_mem_block_alloc_scratch(struct sst_dsp
*dsp
)
524 struct sst_module
*sst_module
, *scratch
;
525 struct sst_mem_block
*block
, *tmp
;
529 scratch
= kzalloc(sizeof(struct sst_module
), GFP_KERNEL
);
533 mutex_lock(&dsp
->mutex
);
535 /* calculate required scratch size */
536 list_for_each_entry(sst_module
, &dsp
->module_list
, list
) {
537 if (scratch
->s
.size
< sst_module
->s
.size
)
538 scratch
->s
.size
= sst_module
->s
.size
;
541 dev_dbg(dsp
->dev
, "scratch buffer required is %d bytes\n",
544 /* init scratch module */
546 scratch
->s
.type
= SST_MEM_DRAM
;
547 scratch
->s
.data_type
= SST_DATA_S
;
548 INIT_LIST_HEAD(&scratch
->block_list
);
550 /* check free blocks before looking at used blocks for space */
551 if (!list_empty(&dsp
->free_block_list
))
552 block
= list_first_entry(&dsp
->free_block_list
,
553 struct sst_mem_block
, list
);
555 block
= list_first_entry(&dsp
->used_block_list
,
556 struct sst_mem_block
, list
);
557 block_size
= block
->size
;
559 /* allocate blocks for module scratch buffers */
560 dev_dbg(dsp
->dev
, "allocating scratch blocks\n");
561 ret
= block_alloc(scratch
, &scratch
->s
);
563 dev_err(dsp
->dev
, "error: can't alloc scratch blocks\n");
567 /* assign the same offset of scratch to each module */
568 list_for_each_entry(sst_module
, &dsp
->module_list
, list
)
569 sst_module
->s
.offset
= scratch
->s
.offset
;
571 mutex_unlock(&dsp
->mutex
);
575 list_for_each_entry_safe(block
, tmp
, &scratch
->block_list
, module_list
)
576 list_del(&block
->module_list
);
577 mutex_unlock(&dsp
->mutex
);
580 EXPORT_SYMBOL_GPL(sst_mem_block_alloc_scratch
);
582 /* free all scratch blocks */
583 void sst_mem_block_free_scratch(struct sst_dsp
*dsp
,
584 struct sst_module
*scratch
)
586 struct sst_mem_block
*block
, *tmp
;
588 mutex_lock(&dsp
->mutex
);
590 list_for_each_entry_safe(block
, tmp
, &scratch
->block_list
, module_list
)
591 list_del(&block
->module_list
);
593 mutex_unlock(&dsp
->mutex
);
595 EXPORT_SYMBOL_GPL(sst_mem_block_free_scratch
);
597 /* get a module from it's unique ID */
598 struct sst_module
*sst_module_get_from_id(struct sst_dsp
*dsp
, u32 id
)
600 struct sst_module
*module
;
602 mutex_lock(&dsp
->mutex
);
604 list_for_each_entry(module
, &dsp
->module_list
, list
) {
605 if (module
->id
== id
) {
606 mutex_unlock(&dsp
->mutex
);
611 mutex_unlock(&dsp
->mutex
);
614 EXPORT_SYMBOL_GPL(sst_module_get_from_id
);