1 // SPDX-License-Identifier: GPL-2.0-only
3 // Copyright(c) 2020 Intel Corporation. All rights reserved.
5 // Author: Cezary Rojewski <cezary.rojewski@intel.com>
8 #include <linux/devcoredump.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/firmware.h>
11 #include <linux/pci.h>
12 #include <linux/pxa2xx_ssp.h>
15 #include "registers.h"
17 static bool catpt_dma_filter(struct dma_chan
*chan
, void *param
)
19 return param
== chan
->device
->dev
;
23 * Either engine 0 or 1 can be used for image loading.
24 * Align with Windows driver equivalent and stick to engine 1.
26 #define CATPT_DMA_DEVID 1
27 #define CATPT_DMA_DSP_ADDR_MASK GENMASK(31, 20)
29 struct dma_chan
*catpt_dma_request_config_chan(struct catpt_dev
*cdev
)
31 struct dma_slave_config config
;
32 struct dma_chan
*chan
;
37 dma_cap_set(DMA_MEMCPY
, mask
);
39 chan
= dma_request_channel(mask
, catpt_dma_filter
, cdev
->dev
);
41 dev_err(cdev
->dev
, "request channel failed\n");
42 return ERR_PTR(-ENODEV
);
45 memset(&config
, 0, sizeof(config
));
46 config
.direction
= DMA_MEM_TO_DEV
;
47 config
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
48 config
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
49 config
.src_maxburst
= 16;
50 config
.dst_maxburst
= 16;
52 ret
= dmaengine_slave_config(chan
, &config
);
54 dev_err(cdev
->dev
, "slave config failed: %d\n", ret
);
55 dma_release_channel(chan
);
62 static int catpt_dma_memcpy(struct catpt_dev
*cdev
, struct dma_chan
*chan
,
63 dma_addr_t dst_addr
, dma_addr_t src_addr
,
66 struct dma_async_tx_descriptor
*desc
;
67 enum dma_status status
;
69 desc
= dmaengine_prep_dma_memcpy(chan
, dst_addr
, src_addr
, size
,
72 dev_err(cdev
->dev
, "prep dma memcpy failed\n");
76 /* enable demand mode for dma channel */
77 catpt_updatel_shim(cdev
, HMDC
,
78 CATPT_HMDC_HDDA(CATPT_DMA_DEVID
, chan
->chan_id
),
79 CATPT_HMDC_HDDA(CATPT_DMA_DEVID
, chan
->chan_id
));
80 dmaengine_submit(desc
);
81 status
= dma_wait_for_async_tx(desc
);
82 /* regardless of status, disable access to HOST memory in demand mode */
83 catpt_updatel_shim(cdev
, HMDC
,
84 CATPT_HMDC_HDDA(CATPT_DMA_DEVID
, chan
->chan_id
), 0);
86 return (status
== DMA_COMPLETE
) ? 0 : -EPROTO
;
89 int catpt_dma_memcpy_todsp(struct catpt_dev
*cdev
, struct dma_chan
*chan
,
90 dma_addr_t dst_addr
, dma_addr_t src_addr
,
93 return catpt_dma_memcpy(cdev
, chan
, dst_addr
| CATPT_DMA_DSP_ADDR_MASK
,
97 int catpt_dma_memcpy_fromdsp(struct catpt_dev
*cdev
, struct dma_chan
*chan
,
98 dma_addr_t dst_addr
, dma_addr_t src_addr
,
101 return catpt_dma_memcpy(cdev
, chan
, dst_addr
,
102 src_addr
| CATPT_DMA_DSP_ADDR_MASK
, size
);
105 int catpt_dmac_probe(struct catpt_dev
*cdev
)
107 struct dw_dma_chip
*dmac
;
110 dmac
= devm_kzalloc(cdev
->dev
, sizeof(*dmac
), GFP_KERNEL
);
114 dmac
->regs
= cdev
->lpe_ba
+ cdev
->spec
->host_dma_offset
[CATPT_DMA_DEVID
];
115 dmac
->dev
= cdev
->dev
;
116 dmac
->irq
= cdev
->irq
;
118 ret
= dma_coerce_mask_and_coherent(cdev
->dev
, DMA_BIT_MASK(31));
122 * Caller is responsible for putting device in D0 to allow
123 * for I/O and memory access before probing DW.
125 ret
= dw_dma_probe(dmac
);
133 void catpt_dmac_remove(struct catpt_dev
*cdev
)
136 * As do_dma_remove() juggles with pm_runtime_get_xxx() and
137 * pm_runtime_put_xxx() while both ADSP and DW 'devices' are part of
138 * the same module, caller makes sure pm_runtime_disable() is invoked
139 * before removing DW to prevent postmortem resume and suspend.
141 dw_dma_remove(cdev
->dmac
);
144 static void catpt_dsp_set_srampge(struct catpt_dev
*cdev
, struct resource
*sram
,
145 unsigned long mask
, unsigned long new)
148 u32 off
= sram
->start
;
151 old
= catpt_readl_pci(cdev
, VDRTCTL0
) & mask
;
152 dev_dbg(cdev
->dev
, "SRAMPGE [0x%08lx] 0x%08lx -> 0x%08lx",
158 catpt_updatel_pci(cdev
, VDRTCTL0
, mask
, new);
159 /* wait for SRAM power gating to propagate */
163 * Dummy read as the very first access after block enable
164 * to prevent byte loss in future operations.
166 for_each_clear_bit_from(b
, &new, fls_long(mask
)) {
169 /* newly enabled: new bit=0 while old bit=1 */
170 if (test_bit(b
, &old
)) {
171 dev_dbg(cdev
->dev
, "sanitize block %ld: off 0x%08x\n",
172 b
- __ffs(mask
), off
);
173 memcpy_fromio(buf
, cdev
->lpe_ba
+ off
, sizeof(buf
));
175 off
+= CATPT_MEMBLOCK_SIZE
;
179 void catpt_dsp_update_srampge(struct catpt_dev
*cdev
, struct resource
*sram
,
182 struct resource
*res
;
183 unsigned long new = 0;
185 /* flag all busy blocks */
186 for (res
= sram
->child
; res
; res
= res
->sibling
) {
189 h
= (res
->end
- sram
->start
) / CATPT_MEMBLOCK_SIZE
;
190 l
= (res
->start
- sram
->start
) / CATPT_MEMBLOCK_SIZE
;
191 new |= GENMASK(h
, l
);
194 /* offset value given mask's start and invert it as ON=b0 */
195 new = ~(new << __ffs(mask
)) & mask
;
197 /* disable core clock gating */
198 catpt_updatel_pci(cdev
, VDRTCTL2
, CATPT_VDRTCTL2_DCLCGE
, 0);
200 catpt_dsp_set_srampge(cdev
, sram
, mask
, new);
202 /* enable core clock gating */
203 catpt_updatel_pci(cdev
, VDRTCTL2
, CATPT_VDRTCTL2_DCLCGE
,
204 CATPT_VDRTCTL2_DCLCGE
);
207 int catpt_dsp_stall(struct catpt_dev
*cdev
, bool stall
)
211 val
= stall
? CATPT_CS_STALL
: 0;
212 catpt_updatel_shim(cdev
, CS1
, CATPT_CS_STALL
, val
);
214 return catpt_readl_poll_shim(cdev
, CS1
,
215 reg
, (reg
& CATPT_CS_STALL
) == val
,
219 static int catpt_dsp_reset(struct catpt_dev
*cdev
, bool reset
)
223 val
= reset
? CATPT_CS_RST
: 0;
224 catpt_updatel_shim(cdev
, CS1
, CATPT_CS_RST
, val
);
226 return catpt_readl_poll_shim(cdev
, CS1
,
227 reg
, (reg
& CATPT_CS_RST
) == val
,
231 void lpt_dsp_pll_shutdown(struct catpt_dev
*cdev
, bool enable
)
235 val
= enable
? LPT_VDRTCTL0_APLLSE
: 0;
236 catpt_updatel_pci(cdev
, VDRTCTL0
, LPT_VDRTCTL0_APLLSE
, val
);
239 void wpt_dsp_pll_shutdown(struct catpt_dev
*cdev
, bool enable
)
243 val
= enable
? WPT_VDRTCTL2_APLLSE
: 0;
244 catpt_updatel_pci(cdev
, VDRTCTL2
, WPT_VDRTCTL2_APLLSE
, val
);
247 static int catpt_dsp_select_lpclock(struct catpt_dev
*cdev
, bool lp
, bool waiti
)
252 mutex_lock(&cdev
->clk_mutex
);
254 val
= lp
? CATPT_CS_LPCS
: 0;
255 reg
= catpt_readl_shim(cdev
, CS1
) & CATPT_CS_LPCS
;
256 dev_dbg(cdev
->dev
, "LPCS [0x%08lx] 0x%08x -> 0x%08x",
257 CATPT_CS_LPCS
, reg
, val
);
260 mutex_unlock(&cdev
->clk_mutex
);
265 /* wait for DSP to signal WAIT state */
266 ret
= catpt_readl_poll_shim(cdev
, ISD
,
267 reg
, (reg
& CATPT_ISD_DCPWM
),
270 dev_warn(cdev
->dev
, "await WAITI timeout\n");
271 /* no signal - only high clock selection allowed */
273 mutex_unlock(&cdev
->clk_mutex
);
279 ret
= catpt_readl_poll_shim(cdev
, CLKCTL
,
280 reg
, !(reg
& CATPT_CLKCTL_CFCIP
),
283 dev_warn(cdev
->dev
, "clock change still in progress\n");
285 /* default to DSP core & audio fabric high clock */
286 val
|= CATPT_CS_DCS_HIGH
;
287 mask
= CATPT_CS_LPCS
| CATPT_CS_DCS
;
288 catpt_updatel_shim(cdev
, CS1
, mask
, val
);
290 ret
= catpt_readl_poll_shim(cdev
, CLKCTL
,
291 reg
, !(reg
& CATPT_CLKCTL_CFCIP
),
294 dev_warn(cdev
->dev
, "clock change still in progress\n");
296 /* update PLL accordingly */
297 cdev
->spec
->pll_shutdown(cdev
, lp
);
299 mutex_unlock(&cdev
->clk_mutex
);
303 int catpt_dsp_update_lpclock(struct catpt_dev
*cdev
)
305 struct catpt_stream_runtime
*stream
;
307 list_for_each_entry(stream
, &cdev
->stream_list
, node
)
308 if (stream
->prepared
)
309 return catpt_dsp_select_lpclock(cdev
, false, true);
311 return catpt_dsp_select_lpclock(cdev
, true, true);
314 /* bring registers to their defaults as HW won't reset itself */
315 static void catpt_dsp_set_regs_defaults(struct catpt_dev
*cdev
)
319 catpt_writel_shim(cdev
, CS1
, CATPT_CS_DEFAULT
);
320 catpt_writel_shim(cdev
, ISC
, CATPT_ISC_DEFAULT
);
321 catpt_writel_shim(cdev
, ISD
, CATPT_ISD_DEFAULT
);
322 catpt_writel_shim(cdev
, IMC
, CATPT_IMC_DEFAULT
);
323 catpt_writel_shim(cdev
, IMD
, CATPT_IMD_DEFAULT
);
324 catpt_writel_shim(cdev
, IPCC
, CATPT_IPCC_DEFAULT
);
325 catpt_writel_shim(cdev
, IPCD
, CATPT_IPCD_DEFAULT
);
326 catpt_writel_shim(cdev
, CLKCTL
, CATPT_CLKCTL_DEFAULT
);
327 catpt_writel_shim(cdev
, CS2
, CATPT_CS2_DEFAULT
);
328 catpt_writel_shim(cdev
, LTRC
, CATPT_LTRC_DEFAULT
);
329 catpt_writel_shim(cdev
, HMDC
, CATPT_HMDC_DEFAULT
);
331 for (i
= 0; i
< CATPT_SSP_COUNT
; i
++) {
332 catpt_writel_ssp(cdev
, i
, SSCR0
, CATPT_SSC0_DEFAULT
);
333 catpt_writel_ssp(cdev
, i
, SSCR1
, CATPT_SSC1_DEFAULT
);
334 catpt_writel_ssp(cdev
, i
, SSSR
, CATPT_SSS_DEFAULT
);
335 catpt_writel_ssp(cdev
, i
, SSITR
, CATPT_SSIT_DEFAULT
);
336 catpt_writel_ssp(cdev
, i
, SSDR
, CATPT_SSD_DEFAULT
);
337 catpt_writel_ssp(cdev
, i
, SSTO
, CATPT_SSTO_DEFAULT
);
338 catpt_writel_ssp(cdev
, i
, SSPSP
, CATPT_SSPSP_DEFAULT
);
339 catpt_writel_ssp(cdev
, i
, SSTSA
, CATPT_SSTSA_DEFAULT
);
340 catpt_writel_ssp(cdev
, i
, SSRSA
, CATPT_SSRSA_DEFAULT
);
341 catpt_writel_ssp(cdev
, i
, SSTSS
, CATPT_SSTSS_DEFAULT
);
342 catpt_writel_ssp(cdev
, i
, SSCR2
, CATPT_SSCR2_DEFAULT
);
343 catpt_writel_ssp(cdev
, i
, SSPSP2
, CATPT_SSPSP2_DEFAULT
);
347 int catpt_dsp_power_down(struct catpt_dev
*cdev
)
351 /* disable core clock gating */
352 catpt_updatel_pci(cdev
, VDRTCTL2
, CATPT_VDRTCTL2_DCLCGE
, 0);
354 catpt_dsp_reset(cdev
, true);
355 /* set 24Mhz clock for both SSPs */
356 catpt_updatel_shim(cdev
, CS1
, CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1),
357 CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1));
358 catpt_dsp_select_lpclock(cdev
, true, false);
360 catpt_updatel_shim(cdev
, CLKCTL
, CATPT_CLKCTL_SMOS
, 0);
362 catpt_dsp_set_regs_defaults(cdev
);
364 /* switch clock gating */
365 mask
= CATPT_VDRTCTL2_CGEALL
& (~CATPT_VDRTCTL2_DCLCGE
);
366 val
= mask
& (~CATPT_VDRTCTL2_DTCGE
);
367 catpt_updatel_pci(cdev
, VDRTCTL2
, mask
, val
);
368 /* enable DTCGE separatelly */
369 catpt_updatel_pci(cdev
, VDRTCTL2
, CATPT_VDRTCTL2_DTCGE
,
370 CATPT_VDRTCTL2_DTCGE
);
372 /* SRAM power gating all */
373 catpt_dsp_set_srampge(cdev
, &cdev
->dram
, cdev
->spec
->dram_mask
,
374 cdev
->spec
->dram_mask
);
375 catpt_dsp_set_srampge(cdev
, &cdev
->iram
, cdev
->spec
->iram_mask
,
376 cdev
->spec
->iram_mask
);
377 mask
= cdev
->spec
->d3srampgd_bit
| cdev
->spec
->d3pgd_bit
;
378 catpt_updatel_pci(cdev
, VDRTCTL0
, mask
, cdev
->spec
->d3pgd_bit
);
380 catpt_updatel_pci(cdev
, PMCS
, PCI_PM_CTRL_STATE_MASK
, PCI_D3hot
);
381 /* give hw time to drop off */
384 /* enable core clock gating */
385 catpt_updatel_pci(cdev
, VDRTCTL2
, CATPT_VDRTCTL2_DCLCGE
,
386 CATPT_VDRTCTL2_DCLCGE
);
392 int catpt_dsp_power_up(struct catpt_dev
*cdev
)
396 /* disable core clock gating */
397 catpt_updatel_pci(cdev
, VDRTCTL2
, CATPT_VDRTCTL2_DCLCGE
, 0);
399 /* switch clock gating */
400 mask
= CATPT_VDRTCTL2_CGEALL
& (~CATPT_VDRTCTL2_DCLCGE
);
401 val
= mask
& (~CATPT_VDRTCTL2_DTCGE
);
402 catpt_updatel_pci(cdev
, VDRTCTL2
, mask
, val
);
404 catpt_updatel_pci(cdev
, PMCS
, PCI_PM_CTRL_STATE_MASK
, PCI_D0
);
406 /* SRAM power gating none */
407 mask
= cdev
->spec
->d3srampgd_bit
| cdev
->spec
->d3pgd_bit
;
408 catpt_updatel_pci(cdev
, VDRTCTL0
, mask
, mask
);
409 catpt_dsp_set_srampge(cdev
, &cdev
->dram
, cdev
->spec
->dram_mask
, 0);
410 catpt_dsp_set_srampge(cdev
, &cdev
->iram
, cdev
->spec
->iram_mask
, 0);
412 catpt_dsp_set_regs_defaults(cdev
);
415 catpt_updatel_shim(cdev
, CLKCTL
, CATPT_CLKCTL_SMOS
, CATPT_CLKCTL_SMOS
);
416 catpt_dsp_select_lpclock(cdev
, false, false);
417 /* set 24Mhz clock for both SSPs */
418 catpt_updatel_shim(cdev
, CS1
, CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1),
419 CATPT_CS_SBCS(0) | CATPT_CS_SBCS(1));
420 catpt_dsp_reset(cdev
, false);
422 /* enable core clock gating */
423 catpt_updatel_pci(cdev
, VDRTCTL2
, CATPT_VDRTCTL2_DCLCGE
,
424 CATPT_VDRTCTL2_DCLCGE
);
426 /* generate int deassert msg to fix inversed int logic */
427 catpt_updatel_shim(cdev
, IMC
, CATPT_IMC_IPCDB
| CATPT_IMC_IPCCD
, 0);
432 #define CATPT_DUMP_MAGIC 0xcd42
433 #define CATPT_DUMP_SECTION_ID_FILE 0x00
434 #define CATPT_DUMP_SECTION_ID_IRAM 0x01
435 #define CATPT_DUMP_SECTION_ID_DRAM 0x02
436 #define CATPT_DUMP_SECTION_ID_REGS 0x03
437 #define CATPT_DUMP_HASH_SIZE 20
439 struct catpt_dump_section_hdr
{
446 int catpt_coredump(struct catpt_dev
*cdev
)
448 struct catpt_dump_section_hdr
*hdr
;
449 size_t dump_size
, regs_size
;
455 regs_size
= CATPT_SHIM_REGS_SIZE
;
456 regs_size
+= CATPT_DMA_COUNT
* CATPT_DMA_REGS_SIZE
;
457 regs_size
+= CATPT_SSP_COUNT
* CATPT_SSP_REGS_SIZE
;
458 dump_size
= resource_size(&cdev
->dram
);
459 dump_size
+= resource_size(&cdev
->iram
);
460 dump_size
+= regs_size
;
461 /* account for header of each section and hash chunk */
462 dump_size
+= 4 * sizeof(*hdr
) + CATPT_DUMP_HASH_SIZE
;
464 dump
= vzalloc(dump_size
);
470 hdr
= (struct catpt_dump_section_hdr
*)pos
;
471 hdr
->magic
= CATPT_DUMP_MAGIC
;
472 hdr
->core_id
= cdev
->spec
->core_id
;
473 hdr
->section_id
= CATPT_DUMP_SECTION_ID_FILE
;
474 hdr
->size
= dump_size
- sizeof(*hdr
);
477 info
= cdev
->ipc
.config
.fw_info
;
478 eof
= info
+ FW_INFO_SIZE_MAX
;
479 /* navigate to fifth info segment (fw hash) */
480 for (i
= 0; i
< 4 && info
< eof
; i
++, info
++) {
481 /* info segments are separated by space each */
482 info
= strnchr(info
, eof
- info
, ' ');
488 memcpy(pos
, info
, min_t(u32
, eof
- info
, CATPT_DUMP_HASH_SIZE
));
489 pos
+= CATPT_DUMP_HASH_SIZE
;
491 hdr
= (struct catpt_dump_section_hdr
*)pos
;
492 hdr
->magic
= CATPT_DUMP_MAGIC
;
493 hdr
->core_id
= cdev
->spec
->core_id
;
494 hdr
->section_id
= CATPT_DUMP_SECTION_ID_IRAM
;
495 hdr
->size
= resource_size(&cdev
->iram
);
498 memcpy_fromio(pos
, cdev
->lpe_ba
+ cdev
->iram
.start
, hdr
->size
);
501 hdr
= (struct catpt_dump_section_hdr
*)pos
;
502 hdr
->magic
= CATPT_DUMP_MAGIC
;
503 hdr
->core_id
= cdev
->spec
->core_id
;
504 hdr
->section_id
= CATPT_DUMP_SECTION_ID_DRAM
;
505 hdr
->size
= resource_size(&cdev
->dram
);
508 memcpy_fromio(pos
, cdev
->lpe_ba
+ cdev
->dram
.start
, hdr
->size
);
511 hdr
= (struct catpt_dump_section_hdr
*)pos
;
512 hdr
->magic
= CATPT_DUMP_MAGIC
;
513 hdr
->core_id
= cdev
->spec
->core_id
;
514 hdr
->section_id
= CATPT_DUMP_SECTION_ID_REGS
;
515 hdr
->size
= regs_size
;
518 memcpy_fromio(pos
, catpt_shim_addr(cdev
), CATPT_SHIM_REGS_SIZE
);
519 pos
+= CATPT_SHIM_REGS_SIZE
;
521 for (i
= 0; i
< CATPT_SSP_COUNT
; i
++) {
522 memcpy_fromio(pos
, catpt_ssp_addr(cdev
, i
),
523 CATPT_SSP_REGS_SIZE
);
524 pos
+= CATPT_SSP_REGS_SIZE
;
526 for (i
= 0; i
< CATPT_DMA_COUNT
; i
++) {
527 memcpy_fromio(pos
, catpt_dma_addr(cdev
, i
),
528 CATPT_DMA_REGS_SIZE
);
529 pos
+= CATPT_DMA_REGS_SIZE
;
532 dev_coredumpv(cdev
->dev
, dump
, dump_size
, GFP_KERNEL
);