2 * Intel Haswell SST DSP driver
4 * Copyright (C) 2013, Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/delay.h>
19 #include <linux/slab.h>
20 #include <linux/device.h>
21 #include <linux/sched.h>
22 #include <linux/export.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/platform_device.h>
27 #include <linux/pci.h>
28 #include <linux/firmware.h>
29 #include <linux/pm_runtime.h>
32 #include "sst-dsp-priv.h"
33 #include "sst-haswell-ipc.h"
35 #include <trace/events/hswadsp.h>
37 #define SST_HSW_FW_SIGNATURE_SIZE 4
38 #define SST_HSW_FW_SIGN "$SST"
39 #define SST_HSW_FW_LIB_SIGN "$LIB"
41 #define SST_WPT_SHIM_OFFSET 0xFB000
42 #define SST_LP_SHIM_OFFSET 0xE7000
43 #define SST_WPT_IRAM_OFFSET 0xA0000
44 #define SST_LP_IRAM_OFFSET 0x80000
45 #define SST_WPT_DSP_DRAM_OFFSET 0x400000
46 #define SST_WPT_DSP_IRAM_OFFSET 0x00000
47 #define SST_LPT_DSP_DRAM_OFFSET 0x400000
48 #define SST_LPT_DSP_IRAM_OFFSET 0x00000
50 #define SST_SHIM_PM_REG 0x84
52 #define SST_HSW_IRAM 1
53 #define SST_HSW_DRAM 2
54 #define SST_HSW_REGS 3
56 struct dma_block_info
{
57 __le32 type
; /* IRAM/DRAM */
58 __le32 size
; /* Bytes */
59 __le32 ram_offset
; /* Offset in I/DRAM */
60 __le32 rsvd
; /* Reserved field */
61 } __attribute__((packed
));
63 struct fw_module_info
{
64 __le32 persistent_size
;
66 } __attribute__((packed
));
69 unsigned char signature
[SST_HSW_FW_SIGNATURE_SIZE
]; /* FW signature */
70 __le32 file_size
; /* size of fw minus this header */
71 __le32 modules
; /* # of modules */
72 __le32 file_format
; /* version of header format */
74 } __attribute__((packed
));
76 struct fw_module_header
{
77 unsigned char signature
[SST_HSW_FW_SIGNATURE_SIZE
]; /* module signature */
78 __le32 mod_size
; /* size of module */
79 __le32 blocks
; /* # of blocks */
81 __le16 type
; /* codec type, pp lib */
83 struct fw_module_info info
;
84 } __attribute__((packed
));
86 static void hsw_free(struct sst_dsp
*sst
);
88 static int hsw_parse_module(struct sst_dsp
*dsp
, struct sst_fw
*fw
,
89 struct fw_module_header
*module
)
91 struct dma_block_info
*block
;
92 struct sst_module
*mod
;
93 struct sst_module_template
template;
97 /* TODO: allowed module types need to be configurable */
98 if (module
->type
!= SST_HSW_MODULE_BASE_FW
99 && module
->type
!= SST_HSW_MODULE_PCM_SYSTEM
100 && module
->type
!= SST_HSW_MODULE_PCM
101 && module
->type
!= SST_HSW_MODULE_PCM_REFERENCE
102 && module
->type
!= SST_HSW_MODULE_PCM_CAPTURE
103 && module
->type
!= SST_HSW_MODULE_LPAL
)
106 dev_dbg(dsp
->dev
, "new module sign 0x%s size 0x%x blocks 0x%x type 0x%x\n",
107 module
->signature
, module
->mod_size
,
108 module
->blocks
, module
->type
);
109 dev_dbg(dsp
->dev
, " entrypoint 0x%x\n", module
->entry_point
);
110 dev_dbg(dsp
->dev
, " persistent 0x%x scratch 0x%x\n",
111 module
->info
.persistent_size
, module
->info
.scratch_size
);
113 memset(&template, 0, sizeof(template));
114 template.id
= module
->type
;
115 template.entry
= module
->entry_point
- 4;
116 template.persistent_size
= module
->info
.persistent_size
;
117 template.scratch_size
= module
->info
.scratch_size
;
119 mod
= sst_module_new(fw
, &template, NULL
);
123 block
= (void *)module
+ sizeof(*module
);
125 for (count
= 0; count
< module
->blocks
; count
++) {
127 if (block
->size
<= 0) {
129 "error: block %d size invalid\n", count
);
130 sst_module_free(mod
);
134 switch (block
->type
) {
138 block
->ram_offset
+ dsp
->addr
.iram_offset
;
139 mod
->type
= SST_MEM_IRAM
;
143 mod
->offset
= block
->ram_offset
;
144 mod
->type
= SST_MEM_DRAM
;
147 dev_err(dsp
->dev
, "error: bad type 0x%x for block 0x%x\n",
149 sst_module_free(mod
);
153 mod
->size
= block
->size
;
154 mod
->data
= (void *)block
+ sizeof(*block
);
155 mod
->data_offset
= mod
->data
- fw
->dma_buf
;
157 dev_dbg(dsp
->dev
, "module block %d type 0x%x "
158 "size 0x%x ==> ram %p offset 0x%x\n",
159 count
, mod
->type
, block
->size
, ram
,
162 ret
= sst_module_alloc_blocks(mod
);
164 dev_err(dsp
->dev
, "error: could not allocate blocks for module %d\n",
166 sst_module_free(mod
);
170 block
= (void *)block
+ sizeof(*block
) + block
->size
;
176 static int hsw_parse_fw_image(struct sst_fw
*sst_fw
)
178 struct fw_header
*header
;
179 struct fw_module_header
*module
;
180 struct sst_dsp
*dsp
= sst_fw
->dsp
;
183 /* Read the header information from the data pointer */
184 header
= (struct fw_header
*)sst_fw
->dma_buf
;
187 if ((strncmp(header
->signature
, SST_HSW_FW_SIGN
, 4) != 0) ||
188 (sst_fw
->size
!= header
->file_size
+ sizeof(*header
))) {
189 dev_err(dsp
->dev
, "error: invalid fw sign/filesize mismatch\n");
193 dev_dbg(dsp
->dev
, "header size=0x%x modules=0x%x fmt=0x%x size=%zu\n",
194 header
->file_size
, header
->modules
,
195 header
->file_format
, sizeof(*header
));
197 /* parse each module */
198 module
= (void *)sst_fw
->dma_buf
+ sizeof(*header
);
199 for (count
= 0; count
< header
->modules
; count
++) {
202 ret
= hsw_parse_module(dsp
, sst_fw
, module
);
204 dev_err(dsp
->dev
, "error: invalid module %d\n", count
);
207 module
= (void *)module
+ sizeof(*module
) + module
->mod_size
;
210 /* allocate scratch mem regions */
211 sst_block_alloc_scratch(dsp
);
216 static irqreturn_t
hsw_irq(int irq
, void *context
)
218 struct sst_dsp
*sst
= (struct sst_dsp
*) context
;
222 spin_lock(&sst
->spinlock
);
224 /* Interrupt arrived, check src */
225 isr
= sst_dsp_shim_read_unlocked(sst
, SST_ISRX
);
226 if (isr
& SST_ISRX_DONE
) {
227 trace_sst_irq_done(isr
,
228 sst_dsp_shim_read_unlocked(sst
, SST_IMRX
));
230 /* Mask Done interrupt before return */
231 sst_dsp_shim_update_bits_unlocked(sst
, SST_IMRX
,
232 SST_IMRX_DONE
, SST_IMRX_DONE
);
233 ret
= IRQ_WAKE_THREAD
;
236 if (isr
& SST_ISRX_BUSY
) {
237 trace_sst_irq_busy(isr
,
238 sst_dsp_shim_read_unlocked(sst
, SST_IMRX
));
240 /* Mask Busy interrupt before return */
241 sst_dsp_shim_update_bits_unlocked(sst
, SST_IMRX
,
242 SST_IMRX_BUSY
, SST_IMRX_BUSY
);
243 ret
= IRQ_WAKE_THREAD
;
246 spin_unlock(&sst
->spinlock
);
250 static void hsw_set_dsp_D3(struct sst_dsp
*sst
)
255 /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
256 reg
= readl(sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
257 reg
&= ~(SST_VDRTCL2_DCLCGE
| SST_VDRTCL2_DTCGE
);
258 writel(reg
, sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
260 /* enable power gating and switch off DRAM & IRAM blocks */
261 val
= readl(sst
->addr
.pci_cfg
+ SST_VDRTCTL0
);
262 val
|= SST_VDRTCL0_DSRAMPGE_MASK
|
263 SST_VDRTCL0_ISRAMPGE_MASK
;
264 val
&= ~(SST_VDRTCL0_D3PGD
| SST_VDRTCL0_D3SRAMPGD
);
265 writel(val
, sst
->addr
.pci_cfg
+ SST_VDRTCTL0
);
267 /* switch off audio PLL */
268 val
= readl(sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
269 val
|= SST_VDRTCL2_APLLSE_MASK
;
270 writel(val
, sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
272 /* disable MCLK(clkctl.smos = 0) */
273 sst_dsp_shim_update_bits_unlocked(sst
, SST_CLKCTL
,
276 /* Set D3 state, delay 50 us */
277 val
= readl(sst
->addr
.pci_cfg
+ SST_PMCS
);
278 val
|= SST_PMCS_PS_MASK
;
279 writel(val
, sst
->addr
.pci_cfg
+ SST_PMCS
);
282 /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
283 reg
= readl(sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
284 reg
|= SST_VDRTCL2_DCLCGE
| SST_VDRTCL2_DTCGE
;
285 writel(reg
, sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
291 static void hsw_reset(struct sst_dsp
*sst
)
293 /* put DSP into reset and stall */
294 sst_dsp_shim_update_bits_unlocked(sst
, SST_CSR
,
295 SST_CSR_RST
| SST_CSR_STALL
,
296 SST_CSR_RST
| SST_CSR_STALL
);
298 /* keep in reset for 10ms */
301 /* take DSP out of reset and keep stalled for FW loading */
302 sst_dsp_shim_update_bits_unlocked(sst
, SST_CSR
,
303 SST_CSR_RST
| SST_CSR_STALL
, SST_CSR_STALL
);
306 static int hsw_set_dsp_D0(struct sst_dsp
*sst
)
311 /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
312 reg
= readl(sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
313 reg
&= ~(SST_VDRTCL2_DCLCGE
| SST_VDRTCL2_DTCGE
);
314 writel(reg
, sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
316 /* Disable D3PG (VDRTCTL0.D3PGD = 1) */
317 reg
= readl(sst
->addr
.pci_cfg
+ SST_VDRTCTL0
);
318 reg
|= SST_VDRTCL0_D3PGD
;
319 writel(reg
, sst
->addr
.pci_cfg
+ SST_VDRTCTL0
);
322 reg
= readl(sst
->addr
.pci_cfg
+ SST_PMCS
);
323 reg
&= ~SST_PMCS_PS_MASK
;
324 writel(reg
, sst
->addr
.pci_cfg
+ SST_PMCS
);
326 /* check that ADSP shim is enabled */
328 reg
= readl(sst
->addr
.pci_cfg
+ SST_PMCS
) & SST_PMCS_PS_MASK
;
338 /* select SSP1 19.2MHz base clock, SSP clock 0, turn off Low Power Clock */
339 sst_dsp_shim_update_bits_unlocked(sst
, SST_CSR
,
340 SST_CSR_S1IOCS
| SST_CSR_SBCS1
| SST_CSR_LPCS
, 0x0);
342 /* stall DSP core, set clk to 192/96Mhz */
343 sst_dsp_shim_update_bits_unlocked(sst
,
344 SST_CSR
, SST_CSR_STALL
| SST_CSR_DCS_MASK
,
345 SST_CSR_STALL
| SST_CSR_DCS(4));
347 /* Set 24MHz MCLK, prevent local clock gating, enable SSP0 clock */
348 sst_dsp_shim_update_bits_unlocked(sst
, SST_CLKCTL
,
349 SST_CLKCTL_MASK
| SST_CLKCTL_DCPLCG
| SST_CLKCTL_SCOE0
,
350 SST_CLKCTL_MASK
| SST_CLKCTL_DCPLCG
| SST_CLKCTL_SCOE0
);
352 /* Stall and reset core, set CSR */
355 /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
356 reg
= readl(sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
357 reg
|= SST_VDRTCL2_DCLCGE
| SST_VDRTCL2_DTCGE
;
358 writel(reg
, sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
362 /* switch on audio PLL */
363 reg
= readl(sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
364 reg
&= ~SST_VDRTCL2_APLLSE_MASK
;
365 writel(reg
, sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
367 /* set default power gating control, enable power gating control for all blocks. that is,
368 can't be accessed, please enable each block before accessing. */
369 reg
= readl(sst
->addr
.pci_cfg
+ SST_VDRTCTL0
);
370 reg
|= SST_VDRTCL0_DSRAMPGE_MASK
| SST_VDRTCL0_ISRAMPGE_MASK
;
371 writel(reg
, sst
->addr
.pci_cfg
+ SST_VDRTCTL0
);
374 /* disable DMA finish function for SSP0 & SSP1 */
375 sst_dsp_shim_update_bits_unlocked(sst
, SST_CSR2
, SST_CSR2_SDFD_SSP1
,
378 /* set on-demond mode on engine 0,1 for all channels */
379 sst_dsp_shim_update_bits(sst
, SST_HMDC
,
380 SST_HMDC_HDDA_E0_ALLCH
| SST_HMDC_HDDA_E1_ALLCH
,
381 SST_HMDC_HDDA_E0_ALLCH
| SST_HMDC_HDDA_E1_ALLCH
);
383 /* Enable Interrupt from both sides */
384 sst_dsp_shim_update_bits(sst
, SST_IMRX
, (SST_IMRX_BUSY
| SST_IMRX_DONE
),
386 sst_dsp_shim_update_bits(sst
, SST_IMRD
, (SST_IMRD_DONE
| SST_IMRD_BUSY
|
387 SST_IMRD_SSP0
| SST_IMRD_DMAC
), 0x0);
389 /* clear IPC registers */
390 sst_dsp_shim_write(sst
, SST_IPCX
, 0x0);
391 sst_dsp_shim_write(sst
, SST_IPCD
, 0x0);
392 sst_dsp_shim_write(sst
, 0x80, 0x6);
393 sst_dsp_shim_write(sst
, 0xe0, 0x300a);
398 static void hsw_boot(struct sst_dsp
*sst
)
400 /* set oportunistic mode on engine 0,1 for all channels */
401 sst_dsp_shim_update_bits(sst
, SST_HMDC
,
402 SST_HMDC_HDDA_E0_ALLCH
| SST_HMDC_HDDA_E1_ALLCH
, 0);
405 sst_dsp_shim_update_bits_unlocked(sst
, SST_CSR
, SST_CSR_STALL
, 0x0);
408 static void hsw_stall(struct sst_dsp
*sst
)
411 sst_dsp_shim_update_bits(sst
, SST_CSR
,
412 SST_CSR_24MHZ_LPCS
| SST_CSR_STALL
,
413 SST_CSR_STALL
| SST_CSR_24MHZ_LPCS
);
416 static void hsw_sleep(struct sst_dsp
*sst
)
418 dev_dbg(sst
->dev
, "HSW_PM dsp runtime suspend\n");
420 /* put DSP into reset and stall */
421 sst_dsp_shim_update_bits(sst
, SST_CSR
,
422 SST_CSR_24MHZ_LPCS
| SST_CSR_RST
| SST_CSR_STALL
,
423 SST_CSR_RST
| SST_CSR_STALL
| SST_CSR_24MHZ_LPCS
);
426 dev_dbg(sst
->dev
, "HSW_PM dsp runtime suspend exit\n");
429 static int hsw_wake(struct sst_dsp
*sst
)
433 dev_dbg(sst
->dev
, "HSW_PM dsp runtime resume\n");
435 ret
= hsw_set_dsp_D0(sst
);
439 dev_dbg(sst
->dev
, "HSW_PM dsp runtime resume exit\n");
444 struct sst_adsp_memregion
{
448 enum sst_mem_type type
;
451 /* lynx point ADSP mem regions */
452 static const struct sst_adsp_memregion lp_region
[] = {
453 {0x00000, 0x40000, 8, SST_MEM_DRAM
}, /* D-SRAM0 - 8 * 32kB */
454 {0x40000, 0x80000, 8, SST_MEM_DRAM
}, /* D-SRAM1 - 8 * 32kB */
455 {0x80000, 0xE0000, 12, SST_MEM_IRAM
}, /* I-SRAM - 12 * 32kB */
458 /* wild cat point ADSP mem regions */
459 static const struct sst_adsp_memregion wpt_region
[] = {
460 {0x00000, 0xA0000, 20, SST_MEM_DRAM
}, /* D-SRAM0,D-SRAM1,D-SRAM2 - 20 * 32kB */
461 {0xA0000, 0xF0000, 10, SST_MEM_IRAM
}, /* I-SRAM - 10 * 32kB */
464 static int hsw_acpi_resource_map(struct sst_dsp
*sst
, struct sst_pdata
*pdata
)
466 /* ADSP DRAM & IRAM */
467 sst
->addr
.lpe_base
= pdata
->lpe_base
;
468 sst
->addr
.lpe
= ioremap(pdata
->lpe_base
, pdata
->lpe_size
);
472 /* ADSP PCI MMIO config space */
473 sst
->addr
.pci_cfg
= ioremap(pdata
->pcicfg_base
, pdata
->pcicfg_size
);
474 if (!sst
->addr
.pci_cfg
) {
475 iounmap(sst
->addr
.lpe
);
480 sst
->addr
.shim
= sst
->addr
.lpe
+ sst
->addr
.shim_offset
;
484 struct sst_sram_shift
{
485 u32 dev_id
; /* SST Device IDs */
490 static const struct sst_sram_shift sram_shift
[] = {
491 {SST_DEV_ID_LYNX_POINT
, 6, 16}, /* lp */
492 {SST_DEV_ID_WILDCAT_POINT
, 2, 12}, /* wpt */
494 static u32
hsw_block_get_bit(struct sst_mem_block
*block
)
496 u32 bit
= 0, shift
= 0, index
;
497 struct sst_dsp
*sst
= block
->dsp
;
499 for (index
= 0; index
< ARRAY_SIZE(sram_shift
); index
++) {
500 if (sram_shift
[index
].dev_id
== sst
->id
)
504 if (index
< ARRAY_SIZE(sram_shift
)) {
505 switch (block
->type
) {
507 shift
= sram_shift
[index
].dram_shift
;
510 shift
= sram_shift
[index
].iram_shift
;
518 bit
= 1 << (block
->index
+ shift
);
523 /*dummy read a SRAM block.*/
524 static void sst_mem_block_dummy_read(struct sst_mem_block
*block
)
528 struct sst_dsp
*sst
= block
->dsp
;
530 size
= block
->size
> 4 ? 4 : block
->size
;
531 memcpy_fromio(tmp_buf
, sst
->addr
.lpe
+ block
->offset
, size
);
534 /* enable 32kB memory block - locks held by caller */
535 static int hsw_block_enable(struct sst_mem_block
*block
)
537 struct sst_dsp
*sst
= block
->dsp
;
540 if (block
->users
++ > 0)
543 dev_dbg(block
->dsp
->dev
, " enabled block %d:%d at offset 0x%x\n",
544 block
->type
, block
->index
, block
->offset
);
546 /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
547 val
= readl(sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
548 val
&= ~SST_VDRTCL2_DCLCGE
;
549 writel(val
, sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
551 val
= readl(sst
->addr
.pci_cfg
+ SST_VDRTCTL0
);
552 bit
= hsw_block_get_bit(block
);
553 writel(val
& ~bit
, sst
->addr
.pci_cfg
+ SST_VDRTCTL0
);
555 /* wait 18 DSP clock ticks */
558 /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
559 val
= readl(sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
560 val
|= SST_VDRTCL2_DCLCGE
;
561 writel(val
, sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
565 /*add a dummy read before the SRAM block is written, otherwise the writing may miss bytes sometimes.*/
566 sst_mem_block_dummy_read(block
);
570 /* disable 32kB memory block - locks held by caller */
571 static int hsw_block_disable(struct sst_mem_block
*block
)
573 struct sst_dsp
*sst
= block
->dsp
;
576 if (--block
->users
> 0)
579 dev_dbg(block
->dsp
->dev
, " disabled block %d:%d at offset 0x%x\n",
580 block
->type
, block
->index
, block
->offset
);
582 /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
583 val
= readl(sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
584 val
&= ~SST_VDRTCL2_DCLCGE
;
585 writel(val
, sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
588 val
= readl(sst
->addr
.pci_cfg
+ SST_VDRTCTL0
);
589 bit
= hsw_block_get_bit(block
);
590 writel(val
| bit
, sst
->addr
.pci_cfg
+ SST_VDRTCTL0
);
592 /* wait 18 DSP clock ticks */
595 /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
596 val
= readl(sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
597 val
|= SST_VDRTCL2_DCLCGE
;
598 writel(val
, sst
->addr
.pci_cfg
+ SST_VDRTCTL2
);
605 static struct sst_block_ops sst_hsw_ops
= {
606 .enable
= hsw_block_enable
,
607 .disable
= hsw_block_disable
,
610 static int hsw_init(struct sst_dsp
*sst
, struct sst_pdata
*pdata
)
612 const struct sst_adsp_memregion
*region
;
614 int ret
= -ENODEV
, i
, j
, region_count
;
620 case SST_DEV_ID_LYNX_POINT
:
622 region_count
= ARRAY_SIZE(lp_region
);
623 sst
->addr
.iram_offset
= SST_LP_IRAM_OFFSET
;
624 sst
->addr
.dsp_iram_offset
= SST_LPT_DSP_IRAM_OFFSET
;
625 sst
->addr
.dsp_dram_offset
= SST_LPT_DSP_DRAM_OFFSET
;
626 sst
->addr
.shim_offset
= SST_LP_SHIM_OFFSET
;
628 case SST_DEV_ID_WILDCAT_POINT
:
630 region_count
= ARRAY_SIZE(wpt_region
);
631 sst
->addr
.iram_offset
= SST_WPT_IRAM_OFFSET
;
632 sst
->addr
.dsp_iram_offset
= SST_WPT_DSP_IRAM_OFFSET
;
633 sst
->addr
.dsp_dram_offset
= SST_WPT_DSP_DRAM_OFFSET
;
634 sst
->addr
.shim_offset
= SST_WPT_SHIM_OFFSET
;
637 dev_err(dev
, "error: failed to get mem resources\n");
641 ret
= hsw_acpi_resource_map(sst
, pdata
);
643 dev_err(dev
, "error: failed to map resources\n");
647 /* enable the DSP SHIM */
648 ret
= hsw_set_dsp_D0(sst
);
650 dev_err(dev
, "error: failed to set DSP D0 and reset SHIM\n");
654 ret
= dma_coerce_mask_and_coherent(dev
, DMA_BIT_MASK(31));
659 /* register DSP memory blocks - ideally we should get this from ACPI */
660 for (i
= 0; i
< region_count
; i
++) {
661 offset
= region
[i
].start
;
662 size
= (region
[i
].end
- region
[i
].start
) / region
[i
].blocks
;
664 /* register individual memory blocks */
665 for (j
= 0; j
< region
[i
].blocks
; j
++) {
666 sst_mem_block_register(sst
, offset
, size
,
667 region
[i
].type
, &sst_hsw_ops
, j
, sst
);
672 /* set default power gating control, enable power gating control for all blocks. that is,
673 can't be accessed, please enable each block before accessing. */
674 writel(0xffffffff, sst
->addr
.pci_cfg
+ SST_VDRTCTL0
);
679 static void hsw_free(struct sst_dsp
*sst
)
681 sst_mem_block_unregister_all(sst
);
682 iounmap(sst
->addr
.lpe
);
683 iounmap(sst
->addr
.pci_cfg
);
686 struct sst_ops haswell_ops
= {
692 .write
= sst_shim32_write
,
693 .read
= sst_shim32_read
,
694 .write64
= sst_shim32_write64
,
695 .read64
= sst_shim32_read64
,
696 .ram_read
= sst_memcpy_fromio_32
,
697 .ram_write
= sst_memcpy_toio_32
,
698 .irq_handler
= hsw_irq
,
701 .parse_fw
= hsw_parse_fw_image
,