2 * Intel SST Firmware Loader
4 * Copyright (C) 2013, Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/kernel.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/firmware.h>
21 #include <linux/export.h>
22 #include <linux/platform_device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmaengine.h>
25 #include <linux/pci.h>
26 #include <linux/acpi.h>
28 /* supported DMA engine drivers */
29 #include <linux/dma/dw.h>
32 #include <asm/pgtable.h>
35 #include "sst-dsp-priv.h"
37 #define SST_DMA_RESOURCES 2
38 #define SST_DSP_DMA_MAX_BURST 0x3
39 #define SST_HSW_BLOCK_ANY 0xffffffff
41 #define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000
46 struct dw_dma_chip
*chip
;
48 struct dma_async_tx_descriptor
*desc
;
52 static inline void sst_memcpy32(volatile void __iomem
*dest
, void *src
, u32 bytes
)
54 /* __iowrite32_copy use 32bit size values so divide by 4 */
55 __iowrite32_copy((void *)dest
, src
, bytes
/4);
58 static void sst_dma_transfer_complete(void *arg
)
60 struct sst_dsp
*sst
= (struct sst_dsp
*)arg
;
62 dev_dbg(sst
->dev
, "DMA: callback\n");
65 static int sst_dsp_dma_copy(struct sst_dsp
*sst
, dma_addr_t dest_addr
,
66 dma_addr_t src_addr
, size_t size
)
68 struct dma_async_tx_descriptor
*desc
;
69 struct sst_dma
*dma
= sst
->dma
;
71 if (dma
->ch
== NULL
) {
72 dev_err(sst
->dev
, "error: no DMA channel\n");
76 dev_dbg(sst
->dev
, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
77 (unsigned long)src_addr
, (unsigned long)dest_addr
, size
);
79 desc
= dma
->ch
->device
->device_prep_dma_memcpy(dma
->ch
, dest_addr
,
80 src_addr
, size
, DMA_CTRL_ACK
);
82 dev_err(sst
->dev
, "error: dma prep memcpy failed\n");
86 desc
->callback
= sst_dma_transfer_complete
;
87 desc
->callback_param
= sst
;
89 desc
->tx_submit(desc
);
90 dma_wait_for_async_tx(desc
);
96 int sst_dsp_dma_copyto(struct sst_dsp
*sst
, dma_addr_t dest_addr
,
97 dma_addr_t src_addr
, size_t size
)
99 return sst_dsp_dma_copy(sst
, dest_addr
| SST_HSW_MASK_DMA_ADDR_DSP
,
102 EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto
);
105 int sst_dsp_dma_copyfrom(struct sst_dsp
*sst
, dma_addr_t dest_addr
,
106 dma_addr_t src_addr
, size_t size
)
108 return sst_dsp_dma_copy(sst
, dest_addr
,
109 src_addr
| SST_HSW_MASK_DMA_ADDR_DSP
, size
);
111 EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom
);
113 /* remove module from memory - callers hold locks */
114 static void block_list_remove(struct sst_dsp
*dsp
,
115 struct list_head
*block_list
)
117 struct sst_mem_block
*block
, *tmp
;
120 /* disable each block */
121 list_for_each_entry(block
, block_list
, module_list
) {
123 if (block
->ops
&& block
->ops
->disable
) {
124 err
= block
->ops
->disable(block
);
127 "error: cant disable block %d:%d\n",
128 block
->type
, block
->index
);
132 /* mark each block as free */
133 list_for_each_entry_safe(block
, tmp
, block_list
, module_list
) {
134 list_del(&block
->module_list
);
135 list_move(&block
->list
, &dsp
->free_block_list
);
136 dev_dbg(dsp
->dev
, "block freed %d:%d at offset 0x%x\n",
137 block
->type
, block
->index
, block
->offset
);
141 /* prepare the memory block to receive data from host - callers hold locks */
142 static int block_list_prepare(struct sst_dsp
*dsp
,
143 struct list_head
*block_list
)
145 struct sst_mem_block
*block
;
148 /* enable each block so that's it'e ready for data */
149 list_for_each_entry(block
, block_list
, module_list
) {
151 if (block
->ops
&& block
->ops
->enable
&& !block
->users
) {
152 ret
= block
->ops
->enable(block
);
155 "error: cant disable block %d:%d\n",
156 block
->type
, block
->index
);
164 list_for_each_entry(block
, block_list
, module_list
) {
165 if (block
->ops
&& block
->ops
->disable
)
166 block
->ops
->disable(block
);
171 static struct dw_dma_chip
*dw_probe(struct device
*dev
, struct resource
*mem
,
174 struct dw_dma_chip
*chip
;
177 chip
= devm_kzalloc(dev
, sizeof(*chip
), GFP_KERNEL
);
179 return ERR_PTR(-ENOMEM
);
182 chip
->regs
= devm_ioremap_resource(dev
, mem
);
183 if (IS_ERR(chip
->regs
))
184 return ERR_CAST(chip
->regs
);
186 err
= dma_coerce_mask_and_coherent(dev
, DMA_BIT_MASK(31));
192 err
= dw_dma_probe(chip
, NULL
);
199 static void dw_remove(struct dw_dma_chip
*chip
)
204 static bool dma_chan_filter(struct dma_chan
*chan
, void *param
)
206 struct sst_dsp
*dsp
= (struct sst_dsp
*)param
;
208 return chan
->device
->dev
== dsp
->dma_dev
;
211 int sst_dsp_dma_get_channel(struct sst_dsp
*dsp
, int chan_id
)
213 struct sst_dma
*dma
= dsp
->dma
;
214 struct dma_slave_config slave
;
219 dma_cap_set(DMA_SLAVE
, mask
);
220 dma_cap_set(DMA_MEMCPY
, mask
);
222 dma
->ch
= dma_request_channel(mask
, dma_chan_filter
, dsp
);
223 if (dma
->ch
== NULL
) {
224 dev_err(dsp
->dev
, "error: DMA request channel failed\n");
228 memset(&slave
, 0, sizeof(slave
));
229 slave
.direction
= DMA_MEM_TO_DEV
;
230 slave
.src_addr_width
=
231 slave
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
232 slave
.src_maxburst
= slave
.dst_maxburst
= SST_DSP_DMA_MAX_BURST
;
234 ret
= dmaengine_slave_config(dma
->ch
, &slave
);
236 dev_err(dsp
->dev
, "error: unable to set DMA slave config %d\n",
238 dma_release_channel(dma
->ch
);
244 EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel
);
246 void sst_dsp_dma_put_channel(struct sst_dsp
*dsp
)
248 struct sst_dma
*dma
= dsp
->dma
;
253 dma_release_channel(dma
->ch
);
256 EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel
);
258 int sst_dma_new(struct sst_dsp
*sst
)
260 struct sst_pdata
*sst_pdata
= sst
->pdata
;
263 const char *dma_dev_name
;
266 if (sst
->pdata
->resindex_dma_base
== -1)
267 /* DMA is not used, return and squelsh error messages */
270 /* configure the correct platform data for whatever DMA engine
271 * is attached to the ADSP IP. */
272 switch (sst
->pdata
->dma_engine
) {
273 case SST_DMA_TYPE_DW
:
274 dma_dev_name
= "dw_dmac";
277 dev_err(sst
->dev
, "error: invalid DMA engine %d\n",
278 sst
->pdata
->dma_engine
);
282 dma
= devm_kzalloc(sst
->dev
, sizeof(struct sst_dma
), GFP_KERNEL
);
288 memset(&mem
, 0, sizeof(mem
));
290 mem
.start
= sst
->addr
.lpe_base
+ sst_pdata
->dma_base
;
291 mem
.end
= sst
->addr
.lpe_base
+ sst_pdata
->dma_base
+ sst_pdata
->dma_size
- 1;
292 mem
.flags
= IORESOURCE_MEM
;
294 /* now register DMA engine device */
295 dma
->chip
= dw_probe(sst
->dma_dev
, &mem
, sst_pdata
->irq
);
296 if (IS_ERR(dma
->chip
)) {
297 dev_err(sst
->dev
, "error: DMA device register failed\n");
298 ret
= PTR_ERR(dma
->chip
);
303 sst
->fw_use_dma
= true;
307 devm_kfree(sst
->dev
, dma
);
310 EXPORT_SYMBOL(sst_dma_new
);
312 void sst_dma_free(struct sst_dma
*dma
)
319 dma_release_channel(dma
->ch
);
322 dw_remove(dma
->chip
);
325 EXPORT_SYMBOL(sst_dma_free
);
327 /* create new generic firmware object */
328 struct sst_fw
*sst_fw_new(struct sst_dsp
*dsp
,
329 const struct firmware
*fw
, void *private)
331 struct sst_fw
*sst_fw
;
334 if (!dsp
->ops
->parse_fw
)
337 sst_fw
= kzalloc(sizeof(*sst_fw
), GFP_KERNEL
);
342 sst_fw
->private = private;
343 sst_fw
->size
= fw
->size
;
345 /* allocate DMA buffer to store FW data */
346 sst_fw
->dma_buf
= dma_alloc_coherent(dsp
->dma_dev
, sst_fw
->size
,
347 &sst_fw
->dmable_fw_paddr
, GFP_DMA
| GFP_KERNEL
);
348 if (!sst_fw
->dma_buf
) {
349 dev_err(dsp
->dev
, "error: DMA alloc failed\n");
354 /* copy FW data to DMA-able memory */
355 memcpy((void *)sst_fw
->dma_buf
, (void *)fw
->data
, fw
->size
);
357 if (dsp
->fw_use_dma
) {
358 err
= sst_dsp_dma_get_channel(dsp
, 0);
363 /* call core specific FW paser to load FW data into DSP */
364 err
= dsp
->ops
->parse_fw(sst_fw
);
366 dev_err(dsp
->dev
, "error: parse fw failed %d\n", err
);
371 sst_dsp_dma_put_channel(dsp
);
373 mutex_lock(&dsp
->mutex
);
374 list_add(&sst_fw
->list
, &dsp
->fw_list
);
375 mutex_unlock(&dsp
->mutex
);
381 sst_dsp_dma_put_channel(dsp
);
383 dma_free_coherent(dsp
->dma_dev
, sst_fw
->size
,
385 sst_fw
->dmable_fw_paddr
);
386 sst_fw
->dma_buf
= NULL
;
390 EXPORT_SYMBOL_GPL(sst_fw_new
);
392 int sst_fw_reload(struct sst_fw
*sst_fw
)
394 struct sst_dsp
*dsp
= sst_fw
->dsp
;
397 dev_dbg(dsp
->dev
, "reloading firmware\n");
399 /* call core specific FW paser to load FW data into DSP */
400 ret
= dsp
->ops
->parse_fw(sst_fw
);
402 dev_err(dsp
->dev
, "error: parse fw failed %d\n", ret
);
406 EXPORT_SYMBOL_GPL(sst_fw_reload
);
408 void sst_fw_unload(struct sst_fw
*sst_fw
)
410 struct sst_dsp
*dsp
= sst_fw
->dsp
;
411 struct sst_module
*module
, *mtmp
;
412 struct sst_module_runtime
*runtime
, *rtmp
;
414 dev_dbg(dsp
->dev
, "unloading firmware\n");
416 mutex_lock(&dsp
->mutex
);
418 /* check module by module */
419 list_for_each_entry_safe(module
, mtmp
, &dsp
->module_list
, list
) {
420 if (module
->sst_fw
== sst_fw
) {
422 /* remove runtime modules */
423 list_for_each_entry_safe(runtime
, rtmp
, &module
->runtime_list
, list
) {
425 block_list_remove(dsp
, &runtime
->block_list
);
426 list_del(&runtime
->list
);
430 /* now remove the module */
431 block_list_remove(dsp
, &module
->block_list
);
432 list_del(&module
->list
);
437 /* remove all scratch blocks */
438 block_list_remove(dsp
, &dsp
->scratch_block_list
);
440 mutex_unlock(&dsp
->mutex
);
442 EXPORT_SYMBOL_GPL(sst_fw_unload
);
444 /* free single firmware object */
445 void sst_fw_free(struct sst_fw
*sst_fw
)
447 struct sst_dsp
*dsp
= sst_fw
->dsp
;
449 mutex_lock(&dsp
->mutex
);
450 list_del(&sst_fw
->list
);
451 mutex_unlock(&dsp
->mutex
);
454 dma_free_coherent(dsp
->dma_dev
, sst_fw
->size
, sst_fw
->dma_buf
,
455 sst_fw
->dmable_fw_paddr
);
458 EXPORT_SYMBOL_GPL(sst_fw_free
);
460 /* free all firmware objects */
461 void sst_fw_free_all(struct sst_dsp
*dsp
)
463 struct sst_fw
*sst_fw
, *t
;
465 mutex_lock(&dsp
->mutex
);
466 list_for_each_entry_safe(sst_fw
, t
, &dsp
->fw_list
, list
) {
468 list_del(&sst_fw
->list
);
469 dma_free_coherent(dsp
->dev
, sst_fw
->size
, sst_fw
->dma_buf
,
470 sst_fw
->dmable_fw_paddr
);
473 mutex_unlock(&dsp
->mutex
);
475 EXPORT_SYMBOL_GPL(sst_fw_free_all
);
477 /* create a new SST generic module from FW template */
478 struct sst_module
*sst_module_new(struct sst_fw
*sst_fw
,
479 struct sst_module_template
*template, void *private)
481 struct sst_dsp
*dsp
= sst_fw
->dsp
;
482 struct sst_module
*sst_module
;
484 sst_module
= kzalloc(sizeof(*sst_module
), GFP_KERNEL
);
485 if (sst_module
== NULL
)
488 sst_module
->id
= template->id
;
489 sst_module
->dsp
= dsp
;
490 sst_module
->sst_fw
= sst_fw
;
491 sst_module
->scratch_size
= template->scratch_size
;
492 sst_module
->persistent_size
= template->persistent_size
;
493 sst_module
->entry
= template->entry
;
494 sst_module
->state
= SST_MODULE_STATE_UNLOADED
;
496 INIT_LIST_HEAD(&sst_module
->block_list
);
497 INIT_LIST_HEAD(&sst_module
->runtime_list
);
499 mutex_lock(&dsp
->mutex
);
500 list_add(&sst_module
->list
, &dsp
->module_list
);
501 mutex_unlock(&dsp
->mutex
);
505 EXPORT_SYMBOL_GPL(sst_module_new
);
507 /* free firmware module and remove from available list */
508 void sst_module_free(struct sst_module
*sst_module
)
510 struct sst_dsp
*dsp
= sst_module
->dsp
;
512 mutex_lock(&dsp
->mutex
);
513 list_del(&sst_module
->list
);
514 mutex_unlock(&dsp
->mutex
);
518 EXPORT_SYMBOL_GPL(sst_module_free
);
520 struct sst_module_runtime
*sst_module_runtime_new(struct sst_module
*module
,
521 int id
, void *private)
523 struct sst_dsp
*dsp
= module
->dsp
;
524 struct sst_module_runtime
*runtime
;
526 runtime
= kzalloc(sizeof(*runtime
), GFP_KERNEL
);
532 runtime
->module
= module
;
533 INIT_LIST_HEAD(&runtime
->block_list
);
535 mutex_lock(&dsp
->mutex
);
536 list_add(&runtime
->list
, &module
->runtime_list
);
537 mutex_unlock(&dsp
->mutex
);
541 EXPORT_SYMBOL_GPL(sst_module_runtime_new
);
543 void sst_module_runtime_free(struct sst_module_runtime
*runtime
)
545 struct sst_dsp
*dsp
= runtime
->dsp
;
547 mutex_lock(&dsp
->mutex
);
548 list_del(&runtime
->list
);
549 mutex_unlock(&dsp
->mutex
);
553 EXPORT_SYMBOL_GPL(sst_module_runtime_free
);
555 static struct sst_mem_block
*find_block(struct sst_dsp
*dsp
,
556 struct sst_block_allocator
*ba
)
558 struct sst_mem_block
*block
;
560 list_for_each_entry(block
, &dsp
->free_block_list
, list
) {
561 if (block
->type
== ba
->type
&& block
->offset
== ba
->offset
)
568 /* Block allocator must be on block boundary */
569 static int block_alloc_contiguous(struct sst_dsp
*dsp
,
570 struct sst_block_allocator
*ba
, struct list_head
*block_list
)
572 struct list_head tmp
= LIST_HEAD_INIT(tmp
);
573 struct sst_mem_block
*block
;
574 u32 block_start
= SST_HSW_BLOCK_ANY
;
575 int size
= ba
->size
, offset
= ba
->offset
;
577 while (ba
->size
> 0) {
579 block
= find_block(dsp
, ba
);
581 list_splice(&tmp
, &dsp
->free_block_list
);
588 list_move_tail(&block
->list
, &tmp
);
589 ba
->offset
+= block
->size
;
590 ba
->size
-= block
->size
;
595 list_for_each_entry(block
, &tmp
, list
) {
597 if (block
->offset
< block_start
)
598 block_start
= block
->offset
;
600 list_add(&block
->module_list
, block_list
);
602 dev_dbg(dsp
->dev
, "block allocated %d:%d at offset 0x%x\n",
603 block
->type
, block
->index
, block
->offset
);
606 list_splice(&tmp
, &dsp
->used_block_list
);
610 /* allocate first free DSP blocks for data - callers hold locks */
611 static int block_alloc(struct sst_dsp
*dsp
, struct sst_block_allocator
*ba
,
612 struct list_head
*block_list
)
614 struct sst_mem_block
*block
, *tmp
;
620 /* find first free whole blocks that can hold module */
621 list_for_each_entry_safe(block
, tmp
, &dsp
->free_block_list
, list
) {
623 /* ignore blocks with wrong type */
624 if (block
->type
!= ba
->type
)
627 if (ba
->size
> block
->size
)
630 ba
->offset
= block
->offset
;
631 block
->bytes_used
= ba
->size
% block
->size
;
632 list_add(&block
->module_list
, block_list
);
633 list_move(&block
->list
, &dsp
->used_block_list
);
634 dev_dbg(dsp
->dev
, "block allocated %d:%d at offset 0x%x\n",
635 block
->type
, block
->index
, block
->offset
);
639 /* then find free multiple blocks that can hold module */
640 list_for_each_entry_safe(block
, tmp
, &dsp
->free_block_list
, list
) {
642 /* ignore blocks with wrong type */
643 if (block
->type
!= ba
->type
)
646 /* do we span > 1 blocks */
647 if (ba
->size
> block
->size
) {
649 /* align ba to block boundary */
650 ba
->offset
= block
->offset
;
652 ret
= block_alloc_contiguous(dsp
, ba
, block_list
);
659 /* not enough free block space */
663 int sst_alloc_blocks(struct sst_dsp
*dsp
, struct sst_block_allocator
*ba
,
664 struct list_head
*block_list
)
668 dev_dbg(dsp
->dev
, "block request 0x%x bytes at offset 0x%x type %d\n",
669 ba
->size
, ba
->offset
, ba
->type
);
671 mutex_lock(&dsp
->mutex
);
673 ret
= block_alloc(dsp
, ba
, block_list
);
675 dev_err(dsp
->dev
, "error: can't alloc blocks %d\n", ret
);
679 /* prepare DSP blocks for module usage */
680 ret
= block_list_prepare(dsp
, block_list
);
682 dev_err(dsp
->dev
, "error: prepare failed\n");
685 mutex_unlock(&dsp
->mutex
);
688 EXPORT_SYMBOL_GPL(sst_alloc_blocks
);
690 int sst_free_blocks(struct sst_dsp
*dsp
, struct list_head
*block_list
)
692 mutex_lock(&dsp
->mutex
);
693 block_list_remove(dsp
, block_list
);
694 mutex_unlock(&dsp
->mutex
);
697 EXPORT_SYMBOL_GPL(sst_free_blocks
);
699 /* allocate memory blocks for static module addresses - callers hold locks */
700 static int block_alloc_fixed(struct sst_dsp
*dsp
, struct sst_block_allocator
*ba
,
701 struct list_head
*block_list
)
703 struct sst_mem_block
*block
, *tmp
;
704 struct sst_block_allocator ba_tmp
= *ba
;
705 u32 end
= ba
->offset
+ ba
->size
, block_end
;
708 /* only IRAM/DRAM blocks are managed */
709 if (ba
->type
!= SST_MEM_IRAM
&& ba
->type
!= SST_MEM_DRAM
)
712 /* are blocks already attached to this module */
713 list_for_each_entry_safe(block
, tmp
, block_list
, module_list
) {
715 /* ignore blocks with wrong type */
716 if (block
->type
!= ba
->type
)
719 block_end
= block
->offset
+ block
->size
;
721 /* find block that holds section */
722 if (ba
->offset
>= block
->offset
&& end
<= block_end
)
725 /* does block span more than 1 section */
726 if (ba
->offset
>= block
->offset
&& ba
->offset
< block_end
) {
728 /* align ba to block boundary */
729 ba_tmp
.size
-= block_end
- ba
->offset
;
730 ba_tmp
.offset
= block_end
;
731 err
= block_alloc_contiguous(dsp
, &ba_tmp
, block_list
);
735 /* module already owns blocks */
740 /* find first free blocks that can hold section in free list */
741 list_for_each_entry_safe(block
, tmp
, &dsp
->free_block_list
, list
) {
742 block_end
= block
->offset
+ block
->size
;
744 /* ignore blocks with wrong type */
745 if (block
->type
!= ba
->type
)
748 /* find block that holds section */
749 if (ba
->offset
>= block
->offset
&& end
<= block_end
) {
752 list_move(&block
->list
, &dsp
->used_block_list
);
753 list_add(&block
->module_list
, block_list
);
754 dev_dbg(dsp
->dev
, "block allocated %d:%d at offset 0x%x\n",
755 block
->type
, block
->index
, block
->offset
);
759 /* does block span more than 1 section */
760 if (ba
->offset
>= block
->offset
&& ba
->offset
< block_end
) {
763 list_move(&block
->list
, &dsp
->used_block_list
);
764 list_add(&block
->module_list
, block_list
);
765 /* align ba to block boundary */
766 ba_tmp
.size
-= block_end
- ba
->offset
;
767 ba_tmp
.offset
= block_end
;
769 err
= block_alloc_contiguous(dsp
, &ba_tmp
, block_list
);
780 /* Load fixed module data into DSP memory blocks */
781 int sst_module_alloc_blocks(struct sst_module
*module
)
783 struct sst_dsp
*dsp
= module
->dsp
;
784 struct sst_fw
*sst_fw
= module
->sst_fw
;
785 struct sst_block_allocator ba
;
788 memset(&ba
, 0, sizeof(ba
));
789 ba
.size
= module
->size
;
790 ba
.type
= module
->type
;
791 ba
.offset
= module
->offset
;
793 dev_dbg(dsp
->dev
, "block request 0x%x bytes at offset 0x%x type %d\n",
794 ba
.size
, ba
.offset
, ba
.type
);
796 mutex_lock(&dsp
->mutex
);
798 /* alloc blocks that includes this section */
799 ret
= block_alloc_fixed(dsp
, &ba
, &module
->block_list
);
802 "error: no free blocks for section at offset 0x%x size 0x%x\n",
803 module
->offset
, module
->size
);
804 mutex_unlock(&dsp
->mutex
);
808 /* prepare DSP blocks for module copy */
809 ret
= block_list_prepare(dsp
, &module
->block_list
);
811 dev_err(dsp
->dev
, "error: fw module prepare failed\n");
815 /* copy partial module data to blocks */
816 if (dsp
->fw_use_dma
) {
817 ret
= sst_dsp_dma_copyto(dsp
,
818 dsp
->addr
.lpe_base
+ module
->offset
,
819 sst_fw
->dmable_fw_paddr
+ module
->data_offset
,
822 dev_err(dsp
->dev
, "error: module copy failed\n");
826 sst_memcpy32(dsp
->addr
.lpe
+ module
->offset
, module
->data
,
829 mutex_unlock(&dsp
->mutex
);
833 block_list_remove(dsp
, &module
->block_list
);
834 mutex_unlock(&dsp
->mutex
);
837 EXPORT_SYMBOL_GPL(sst_module_alloc_blocks
);
839 /* Unload entire module from DSP memory */
840 int sst_module_free_blocks(struct sst_module
*module
)
842 struct sst_dsp
*dsp
= module
->dsp
;
844 mutex_lock(&dsp
->mutex
);
845 block_list_remove(dsp
, &module
->block_list
);
846 mutex_unlock(&dsp
->mutex
);
849 EXPORT_SYMBOL_GPL(sst_module_free_blocks
);
851 int sst_module_runtime_alloc_blocks(struct sst_module_runtime
*runtime
,
854 struct sst_dsp
*dsp
= runtime
->dsp
;
855 struct sst_module
*module
= runtime
->module
;
856 struct sst_block_allocator ba
;
859 if (module
->persistent_size
== 0)
862 memset(&ba
, 0, sizeof(ba
));
863 ba
.size
= module
->persistent_size
;
864 ba
.type
= SST_MEM_DRAM
;
866 mutex_lock(&dsp
->mutex
);
868 /* do we need to allocate at a fixed address ? */
873 dev_dbg(dsp
->dev
, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n",
874 ba
.size
, ba
.type
, ba
.offset
);
876 /* alloc blocks that includes this section */
877 ret
= block_alloc_fixed(dsp
, &ba
, &runtime
->block_list
);
880 dev_dbg(dsp
->dev
, "persistent block request 0x%x bytes type %d\n",
883 /* alloc blocks that includes this section */
884 ret
= block_alloc(dsp
, &ba
, &runtime
->block_list
);
888 "error: no free blocks for runtime module size 0x%x\n",
889 module
->persistent_size
);
890 mutex_unlock(&dsp
->mutex
);
893 runtime
->persistent_offset
= ba
.offset
;
895 /* prepare DSP blocks for module copy */
896 ret
= block_list_prepare(dsp
, &runtime
->block_list
);
898 dev_err(dsp
->dev
, "error: runtime block prepare failed\n");
902 mutex_unlock(&dsp
->mutex
);
906 block_list_remove(dsp
, &module
->block_list
);
907 mutex_unlock(&dsp
->mutex
);
910 EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks
);
912 int sst_module_runtime_free_blocks(struct sst_module_runtime
*runtime
)
914 struct sst_dsp
*dsp
= runtime
->dsp
;
916 mutex_lock(&dsp
->mutex
);
917 block_list_remove(dsp
, &runtime
->block_list
);
918 mutex_unlock(&dsp
->mutex
);
921 EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks
);
923 int sst_module_runtime_save(struct sst_module_runtime
*runtime
,
924 struct sst_module_runtime_context
*context
)
926 struct sst_dsp
*dsp
= runtime
->dsp
;
927 struct sst_module
*module
= runtime
->module
;
930 dev_dbg(dsp
->dev
, "saving runtime %d memory at 0x%x size 0x%x\n",
931 runtime
->id
, runtime
->persistent_offset
,
932 module
->persistent_size
);
934 context
->buffer
= dma_alloc_coherent(dsp
->dma_dev
,
935 module
->persistent_size
,
936 &context
->dma_buffer
, GFP_DMA
| GFP_KERNEL
);
937 if (!context
->buffer
) {
938 dev_err(dsp
->dev
, "error: DMA context alloc failed\n");
942 mutex_lock(&dsp
->mutex
);
944 if (dsp
->fw_use_dma
) {
946 ret
= sst_dsp_dma_get_channel(dsp
, 0);
950 ret
= sst_dsp_dma_copyfrom(dsp
, context
->dma_buffer
,
951 dsp
->addr
.lpe_base
+ runtime
->persistent_offset
,
952 module
->persistent_size
);
953 sst_dsp_dma_put_channel(dsp
);
955 dev_err(dsp
->dev
, "error: context copy failed\n");
959 sst_memcpy32(context
->buffer
, dsp
->addr
.lpe
+
960 runtime
->persistent_offset
,
961 module
->persistent_size
);
964 mutex_unlock(&dsp
->mutex
);
967 EXPORT_SYMBOL_GPL(sst_module_runtime_save
);
969 int sst_module_runtime_restore(struct sst_module_runtime
*runtime
,
970 struct sst_module_runtime_context
*context
)
972 struct sst_dsp
*dsp
= runtime
->dsp
;
973 struct sst_module
*module
= runtime
->module
;
976 dev_dbg(dsp
->dev
, "restoring runtime %d memory at 0x%x size 0x%x\n",
977 runtime
->id
, runtime
->persistent_offset
,
978 module
->persistent_size
);
980 mutex_lock(&dsp
->mutex
);
982 if (!context
->buffer
) {
983 dev_info(dsp
->dev
, "no context buffer need to restore!\n");
987 if (dsp
->fw_use_dma
) {
989 ret
= sst_dsp_dma_get_channel(dsp
, 0);
993 ret
= sst_dsp_dma_copyto(dsp
,
994 dsp
->addr
.lpe_base
+ runtime
->persistent_offset
,
995 context
->dma_buffer
, module
->persistent_size
);
996 sst_dsp_dma_put_channel(dsp
);
998 dev_err(dsp
->dev
, "error: module copy failed\n");
1002 sst_memcpy32(dsp
->addr
.lpe
+ runtime
->persistent_offset
,
1003 context
->buffer
, module
->persistent_size
);
1005 dma_free_coherent(dsp
->dma_dev
, module
->persistent_size
,
1006 context
->buffer
, context
->dma_buffer
);
1007 context
->buffer
= NULL
;
1010 mutex_unlock(&dsp
->mutex
);
1013 EXPORT_SYMBOL_GPL(sst_module_runtime_restore
);
1015 /* register a DSP memory block for use with FW based modules */
1016 struct sst_mem_block
*sst_mem_block_register(struct sst_dsp
*dsp
, u32 offset
,
1017 u32 size
, enum sst_mem_type type
, struct sst_block_ops
*ops
, u32 index
,
1020 struct sst_mem_block
*block
;
1022 block
= kzalloc(sizeof(*block
), GFP_KERNEL
);
1026 block
->offset
= offset
;
1028 block
->index
= index
;
1031 block
->private = private;
1034 mutex_lock(&dsp
->mutex
);
1035 list_add(&block
->list
, &dsp
->free_block_list
);
1036 mutex_unlock(&dsp
->mutex
);
1040 EXPORT_SYMBOL_GPL(sst_mem_block_register
);
1042 /* unregister all DSP memory blocks */
1043 void sst_mem_block_unregister_all(struct sst_dsp
*dsp
)
1045 struct sst_mem_block
*block
, *tmp
;
1047 mutex_lock(&dsp
->mutex
);
1049 /* unregister used blocks */
1050 list_for_each_entry_safe(block
, tmp
, &dsp
->used_block_list
, list
) {
1051 list_del(&block
->list
);
1055 /* unregister free blocks */
1056 list_for_each_entry_safe(block
, tmp
, &dsp
->free_block_list
, list
) {
1057 list_del(&block
->list
);
1061 mutex_unlock(&dsp
->mutex
);
1063 EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all
);
1065 /* allocate scratch buffer blocks */
1066 int sst_block_alloc_scratch(struct sst_dsp
*dsp
)
1068 struct sst_module
*module
;
1069 struct sst_block_allocator ba
;
1072 mutex_lock(&dsp
->mutex
);
1074 /* calculate required scratch size */
1075 dsp
->scratch_size
= 0;
1076 list_for_each_entry(module
, &dsp
->module_list
, list
) {
1077 dev_dbg(dsp
->dev
, "module %d scratch req 0x%x bytes\n",
1078 module
->id
, module
->scratch_size
);
1079 if (dsp
->scratch_size
< module
->scratch_size
)
1080 dsp
->scratch_size
= module
->scratch_size
;
1083 dev_dbg(dsp
->dev
, "scratch buffer required is 0x%x bytes\n",
1086 if (dsp
->scratch_size
== 0) {
1087 dev_info(dsp
->dev
, "no modules need scratch buffer\n");
1088 mutex_unlock(&dsp
->mutex
);
1092 /* allocate blocks for module scratch buffers */
1093 dev_dbg(dsp
->dev
, "allocating scratch blocks\n");
1095 ba
.size
= dsp
->scratch_size
;
1096 ba
.type
= SST_MEM_DRAM
;
1098 /* do we need to allocate at fixed offset */
1099 if (dsp
->scratch_offset
!= 0) {
1101 dev_dbg(dsp
->dev
, "block request 0x%x bytes type %d at 0x%x\n",
1102 ba
.size
, ba
.type
, ba
.offset
);
1104 ba
.offset
= dsp
->scratch_offset
;
1106 /* alloc blocks that includes this section */
1107 ret
= block_alloc_fixed(dsp
, &ba
, &dsp
->scratch_block_list
);
1110 dev_dbg(dsp
->dev
, "block request 0x%x bytes type %d\n",
1114 ret
= block_alloc(dsp
, &ba
, &dsp
->scratch_block_list
);
1117 dev_err(dsp
->dev
, "error: can't alloc scratch blocks\n");
1118 mutex_unlock(&dsp
->mutex
);
1122 ret
= block_list_prepare(dsp
, &dsp
->scratch_block_list
);
1124 dev_err(dsp
->dev
, "error: scratch block prepare failed\n");
1125 mutex_unlock(&dsp
->mutex
);
1129 /* assign the same offset of scratch to each module */
1130 dsp
->scratch_offset
= ba
.offset
;
1131 mutex_unlock(&dsp
->mutex
);
1132 return dsp
->scratch_size
;
1134 EXPORT_SYMBOL_GPL(sst_block_alloc_scratch
);
1136 /* free all scratch blocks */
1137 void sst_block_free_scratch(struct sst_dsp
*dsp
)
1139 mutex_lock(&dsp
->mutex
);
1140 block_list_remove(dsp
, &dsp
->scratch_block_list
);
1141 mutex_unlock(&dsp
->mutex
);
1143 EXPORT_SYMBOL_GPL(sst_block_free_scratch
);
1145 /* get a module from it's unique ID */
1146 struct sst_module
*sst_module_get_from_id(struct sst_dsp
*dsp
, u32 id
)
1148 struct sst_module
*module
;
1150 mutex_lock(&dsp
->mutex
);
1152 list_for_each_entry(module
, &dsp
->module_list
, list
) {
1153 if (module
->id
== id
) {
1154 mutex_unlock(&dsp
->mutex
);
1159 mutex_unlock(&dsp
->mutex
);
1162 EXPORT_SYMBOL_GPL(sst_module_get_from_id
);
1164 struct sst_module_runtime
*sst_module_runtime_get_from_id(
1165 struct sst_module
*module
, u32 id
)
1167 struct sst_module_runtime
*runtime
;
1168 struct sst_dsp
*dsp
= module
->dsp
;
1170 mutex_lock(&dsp
->mutex
);
1172 list_for_each_entry(runtime
, &module
->runtime_list
, list
) {
1173 if (runtime
->id
== id
) {
1174 mutex_unlock(&dsp
->mutex
);
1179 mutex_unlock(&dsp
->mutex
);
1182 EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id
);
1184 /* returns block address in DSP address space */
1185 u32
sst_dsp_get_offset(struct sst_dsp
*dsp
, u32 offset
,
1186 enum sst_mem_type type
)
1190 return offset
- dsp
->addr
.iram_offset
+
1191 dsp
->addr
.dsp_iram_offset
;
1193 return offset
- dsp
->addr
.dram_offset
+
1194 dsp
->addr
.dsp_dram_offset
;
1199 EXPORT_SYMBOL_GPL(sst_dsp_get_offset
);