2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/irqdomain.h>
27 #include <linux/pm_domain.h>
28 #include <linux/platform_device.h>
29 #include <sound/designware_i2s.h>
30 #include <sound/pcm.h>
34 #include "amdgpu_acp.h"
36 #include "acp_gfx_if.h"
38 #define ACP_TILE_ON_MASK 0x03
39 #define ACP_TILE_OFF_MASK 0x02
40 #define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
41 #define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
43 #define ACP_TILE_P1_MASK 0x3e
44 #define ACP_TILE_P2_MASK 0x3d
45 #define ACP_TILE_DSP0_MASK 0x3b
46 #define ACP_TILE_DSP1_MASK 0x37
48 #define ACP_TILE_DSP2_MASK 0x2f
50 #define ACP_DMA_REGS_END 0x146c0
51 #define ACP_I2S_PLAY_REGS_START 0x14840
52 #define ACP_I2S_PLAY_REGS_END 0x148b4
53 #define ACP_I2S_CAP_REGS_START 0x148b8
54 #define ACP_I2S_CAP_REGS_END 0x1496c
56 #define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
57 #define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
58 #define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
59 #define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
60 #define ACP_BT_PLAY_REGS_START 0x14970
61 #define ACP_BT_PLAY_REGS_END 0x14a24
62 #define ACP_BT_COMP1_REG_OFFSET 0xac
63 #define ACP_BT_COMP2_REG_OFFSET 0xa8
65 #define mmACP_PGFSM_RETAIN_REG 0x51c9
66 #define mmACP_PGFSM_CONFIG_REG 0x51ca
67 #define mmACP_PGFSM_READ_REG_0 0x51cc
69 #define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
70 #define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
71 #define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
72 #define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
74 #define mmACP_CONTROL 0x5131
75 #define mmACP_STATUS 0x5133
76 #define mmACP_SOFT_RESET 0x5134
77 #define ACP_CONTROL__ClkEn_MASK 0x1
78 #define ACP_SOFT_RESET__SoftResetAud_MASK 0x100
79 #define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000
80 #define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF
81 #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
83 #define ACP_TIMEOUT_LOOP 0x000000FF
85 #define ACP_SRC_ID 162
95 static int acp_sw_init(void *handle
)
97 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
99 adev
->acp
.parent
= adev
->dev
;
101 adev
->acp
.cgs_device
=
102 amdgpu_cgs_create_device(adev
);
103 if (!adev
->acp
.cgs_device
)
109 static int acp_sw_fini(void *handle
)
111 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
113 if (adev
->acp
.cgs_device
)
114 amdgpu_cgs_destroy_device(adev
->acp
.cgs_device
);
119 /* power off a tile/block within ACP */
120 static int acp_suspend_tile(void *cgs_dev
, int tile
)
125 if ((tile
< ACP_TILE_P1
) || (tile
> ACP_TILE_DSP2
)) {
126 pr_err("Invalid ACP tile : %d to suspend\n", tile
);
130 val
= cgs_read_register(cgs_dev
, mmACP_PGFSM_READ_REG_0
+ tile
);
131 val
&= ACP_TILE_ON_MASK
;
134 val
= cgs_read_register(cgs_dev
, mmACP_PGFSM_RETAIN_REG
);
135 val
= val
| (1 << tile
);
136 cgs_write_register(cgs_dev
, mmACP_PGFSM_RETAIN_REG
, val
);
137 cgs_write_register(cgs_dev
, mmACP_PGFSM_CONFIG_REG
,
140 count
= ACP_TIMEOUT_LOOP
;
142 val
= cgs_read_register(cgs_dev
, mmACP_PGFSM_READ_REG_0
144 val
= val
& ACP_TILE_ON_MASK
;
145 if (val
== ACP_TILE_OFF_MASK
)
148 pr_err("Timeout reading ACP PGFSM status\n");
154 val
= cgs_read_register(cgs_dev
, mmACP_PGFSM_RETAIN_REG
);
156 val
|= ACP_TILE_OFF_RETAIN_REG_MASK
;
157 cgs_write_register(cgs_dev
, mmACP_PGFSM_RETAIN_REG
, val
);
162 /* power on a tile/block within ACP */
163 static int acp_resume_tile(void *cgs_dev
, int tile
)
168 if ((tile
< ACP_TILE_P1
) || (tile
> ACP_TILE_DSP2
)) {
169 pr_err("Invalid ACP tile to resume\n");
173 val
= cgs_read_register(cgs_dev
, mmACP_PGFSM_READ_REG_0
+ tile
);
174 val
= val
& ACP_TILE_ON_MASK
;
177 cgs_write_register(cgs_dev
, mmACP_PGFSM_CONFIG_REG
,
179 count
= ACP_TIMEOUT_LOOP
;
181 val
= cgs_read_register(cgs_dev
, mmACP_PGFSM_READ_REG_0
183 val
= val
& ACP_TILE_ON_MASK
;
187 pr_err("Timeout reading ACP PGFSM status\n");
192 val
= cgs_read_register(cgs_dev
, mmACP_PGFSM_RETAIN_REG
);
193 if (tile
== ACP_TILE_P1
)
194 val
= val
& (ACP_TILE_P1_MASK
);
195 else if (tile
== ACP_TILE_P2
)
196 val
= val
& (ACP_TILE_P2_MASK
);
198 cgs_write_register(cgs_dev
, mmACP_PGFSM_RETAIN_REG
, val
);
203 struct acp_pm_domain
{
205 struct generic_pm_domain gpd
;
208 static int acp_poweroff(struct generic_pm_domain
*genpd
)
211 struct acp_pm_domain
*apd
;
213 apd
= container_of(genpd
, struct acp_pm_domain
, gpd
);
215 /* Donot return abruptly if any of power tile fails to suspend.
216 * Log it and continue powering off other tile
218 for (i
= 4; i
>= 0 ; i
--) {
219 ret
= acp_suspend_tile(apd
->cgs_dev
, ACP_TILE_P1
+ i
);
221 pr_err("ACP tile %d tile suspend failed\n", i
);
227 static int acp_poweron(struct generic_pm_domain
*genpd
)
230 struct acp_pm_domain
*apd
;
232 apd
= container_of(genpd
, struct acp_pm_domain
, gpd
);
234 for (i
= 0; i
< 2; i
++) {
235 ret
= acp_resume_tile(apd
->cgs_dev
, ACP_TILE_P1
+ i
);
237 pr_err("ACP tile %d resume failed\n", i
);
242 /* Disable DSPs which are not going to be used */
243 for (i
= 0; i
< 3; i
++) {
244 ret
= acp_suspend_tile(apd
->cgs_dev
, ACP_TILE_DSP0
+ i
);
245 /* Continue suspending other DSP, even if one fails */
247 pr_err("ACP DSP %d suspend failed\n", i
);
253 static struct device
*get_mfd_cell_dev(const char *device_name
, int r
)
255 char auto_dev_name
[25];
258 snprintf(auto_dev_name
, sizeof(auto_dev_name
),
259 "%s.%d.auto", device_name
, r
);
260 dev
= bus_find_device_by_name(&platform_bus_type
, NULL
, auto_dev_name
);
261 dev_info(dev
, "device %s added to pm domain\n", auto_dev_name
);
267 * acp_hw_init - start and test ACP block
269 * @adev: amdgpu_device pointer
272 static int acp_hw_init(void *handle
)
279 struct i2s_platform_data
*i2s_pdata
;
281 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
283 const struct amdgpu_ip_block
*ip_block
=
284 amdgpu_device_ip_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_ACP
);
289 r
= amd_acp_hw_init(adev
->acp
.cgs_device
,
290 ip_block
->version
->major
, ip_block
->version
->minor
);
291 /* -ENODEV means board uses AZ rather than ACP */
297 if (adev
->rmmio_size
== 0 || adev
->rmmio_size
< 0x5289)
300 acp_base
= adev
->rmmio_base
;
302 if (adev
->asic_type
!= CHIP_STONEY
) {
303 adev
->acp
.acp_genpd
= kzalloc(sizeof(struct acp_pm_domain
), GFP_KERNEL
);
304 if (adev
->acp
.acp_genpd
== NULL
)
307 adev
->acp
.acp_genpd
->gpd
.name
= "ACP_AUDIO";
308 adev
->acp
.acp_genpd
->gpd
.power_off
= acp_poweroff
;
309 adev
->acp
.acp_genpd
->gpd
.power_on
= acp_poweron
;
312 adev
->acp
.acp_genpd
->cgs_dev
= adev
->acp
.cgs_device
;
314 pm_genpd_init(&adev
->acp
.acp_genpd
->gpd
, NULL
, false);
317 adev
->acp
.acp_cell
= kcalloc(ACP_DEVS
, sizeof(struct mfd_cell
),
320 if (adev
->acp
.acp_cell
== NULL
)
323 adev
->acp
.acp_res
= kcalloc(5, sizeof(struct resource
), GFP_KERNEL
);
324 if (adev
->acp
.acp_res
== NULL
) {
325 kfree(adev
->acp
.acp_cell
);
329 i2s_pdata
= kcalloc(3, sizeof(struct i2s_platform_data
), GFP_KERNEL
);
330 if (i2s_pdata
== NULL
) {
331 kfree(adev
->acp
.acp_res
);
332 kfree(adev
->acp
.acp_cell
);
336 switch (adev
->asic_type
) {
338 i2s_pdata
[0].quirks
= DW_I2S_QUIRK_COMP_REG_OFFSET
|
339 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE
;
342 i2s_pdata
[0].quirks
= DW_I2S_QUIRK_COMP_REG_OFFSET
;
344 i2s_pdata
[0].cap
= DWC_I2S_PLAY
;
345 i2s_pdata
[0].snd_rates
= SNDRV_PCM_RATE_8000_96000
;
346 i2s_pdata
[0].i2s_reg_comp1
= ACP_I2S_COMP1_PLAY_REG_OFFSET
;
347 i2s_pdata
[0].i2s_reg_comp2
= ACP_I2S_COMP2_PLAY_REG_OFFSET
;
348 switch (adev
->asic_type
) {
350 i2s_pdata
[1].quirks
= DW_I2S_QUIRK_COMP_REG_OFFSET
|
351 DW_I2S_QUIRK_COMP_PARAM1
|
352 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE
;
355 i2s_pdata
[1].quirks
= DW_I2S_QUIRK_COMP_REG_OFFSET
|
356 DW_I2S_QUIRK_COMP_PARAM1
;
359 i2s_pdata
[1].cap
= DWC_I2S_RECORD
;
360 i2s_pdata
[1].snd_rates
= SNDRV_PCM_RATE_8000_96000
;
361 i2s_pdata
[1].i2s_reg_comp1
= ACP_I2S_COMP1_CAP_REG_OFFSET
;
362 i2s_pdata
[1].i2s_reg_comp2
= ACP_I2S_COMP2_CAP_REG_OFFSET
;
364 i2s_pdata
[2].quirks
= DW_I2S_QUIRK_COMP_REG_OFFSET
;
365 switch (adev
->asic_type
) {
367 i2s_pdata
[2].quirks
|= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE
;
373 i2s_pdata
[2].cap
= DWC_I2S_PLAY
| DWC_I2S_RECORD
;
374 i2s_pdata
[2].snd_rates
= SNDRV_PCM_RATE_8000_96000
;
375 i2s_pdata
[2].i2s_reg_comp1
= ACP_BT_COMP1_REG_OFFSET
;
376 i2s_pdata
[2].i2s_reg_comp2
= ACP_BT_COMP2_REG_OFFSET
;
378 adev
->acp
.acp_res
[0].name
= "acp2x_dma";
379 adev
->acp
.acp_res
[0].flags
= IORESOURCE_MEM
;
380 adev
->acp
.acp_res
[0].start
= acp_base
;
381 adev
->acp
.acp_res
[0].end
= acp_base
+ ACP_DMA_REGS_END
;
383 adev
->acp
.acp_res
[1].name
= "acp2x_dw_i2s_play";
384 adev
->acp
.acp_res
[1].flags
= IORESOURCE_MEM
;
385 adev
->acp
.acp_res
[1].start
= acp_base
+ ACP_I2S_PLAY_REGS_START
;
386 adev
->acp
.acp_res
[1].end
= acp_base
+ ACP_I2S_PLAY_REGS_END
;
388 adev
->acp
.acp_res
[2].name
= "acp2x_dw_i2s_cap";
389 adev
->acp
.acp_res
[2].flags
= IORESOURCE_MEM
;
390 adev
->acp
.acp_res
[2].start
= acp_base
+ ACP_I2S_CAP_REGS_START
;
391 adev
->acp
.acp_res
[2].end
= acp_base
+ ACP_I2S_CAP_REGS_END
;
393 adev
->acp
.acp_res
[3].name
= "acp2x_dw_bt_i2s_play_cap";
394 adev
->acp
.acp_res
[3].flags
= IORESOURCE_MEM
;
395 adev
->acp
.acp_res
[3].start
= acp_base
+ ACP_BT_PLAY_REGS_START
;
396 adev
->acp
.acp_res
[3].end
= acp_base
+ ACP_BT_PLAY_REGS_END
;
398 adev
->acp
.acp_res
[4].name
= "acp2x_dma_irq";
399 adev
->acp
.acp_res
[4].flags
= IORESOURCE_IRQ
;
400 adev
->acp
.acp_res
[4].start
= amdgpu_irq_create_mapping(adev
, 162);
401 adev
->acp
.acp_res
[4].end
= adev
->acp
.acp_res
[4].start
;
403 adev
->acp
.acp_cell
[0].name
= "acp_audio_dma";
404 adev
->acp
.acp_cell
[0].num_resources
= 5;
405 adev
->acp
.acp_cell
[0].resources
= &adev
->acp
.acp_res
[0];
406 adev
->acp
.acp_cell
[0].platform_data
= &adev
->asic_type
;
407 adev
->acp
.acp_cell
[0].pdata_size
= sizeof(adev
->asic_type
);
409 adev
->acp
.acp_cell
[1].name
= "designware-i2s";
410 adev
->acp
.acp_cell
[1].num_resources
= 1;
411 adev
->acp
.acp_cell
[1].resources
= &adev
->acp
.acp_res
[1];
412 adev
->acp
.acp_cell
[1].platform_data
= &i2s_pdata
[0];
413 adev
->acp
.acp_cell
[1].pdata_size
= sizeof(struct i2s_platform_data
);
415 adev
->acp
.acp_cell
[2].name
= "designware-i2s";
416 adev
->acp
.acp_cell
[2].num_resources
= 1;
417 adev
->acp
.acp_cell
[2].resources
= &adev
->acp
.acp_res
[2];
418 adev
->acp
.acp_cell
[2].platform_data
= &i2s_pdata
[1];
419 adev
->acp
.acp_cell
[2].pdata_size
= sizeof(struct i2s_platform_data
);
421 adev
->acp
.acp_cell
[3].name
= "designware-i2s";
422 adev
->acp
.acp_cell
[3].num_resources
= 1;
423 adev
->acp
.acp_cell
[3].resources
= &adev
->acp
.acp_res
[3];
424 adev
->acp
.acp_cell
[3].platform_data
= &i2s_pdata
[2];
425 adev
->acp
.acp_cell
[3].pdata_size
= sizeof(struct i2s_platform_data
);
427 r
= mfd_add_hotplug_devices(adev
->acp
.parent
, adev
->acp
.acp_cell
,
432 if (adev
->asic_type
!= CHIP_STONEY
) {
433 for (i
= 0; i
< ACP_DEVS
; i
++) {
434 dev
= get_mfd_cell_dev(adev
->acp
.acp_cell
[i
].name
, i
);
435 r
= pm_genpd_add_device(&adev
->acp
.acp_genpd
->gpd
, dev
);
437 dev_err(dev
, "Failed to add dev to genpd\n");
443 /* Assert Soft reset of ACP */
444 val
= cgs_read_register(adev
->acp
.cgs_device
, mmACP_SOFT_RESET
);
446 val
|= ACP_SOFT_RESET__SoftResetAud_MASK
;
447 cgs_write_register(adev
->acp
.cgs_device
, mmACP_SOFT_RESET
, val
);
449 count
= ACP_SOFT_RESET_DONE_TIME_OUT_VALUE
;
451 val
= cgs_read_register(adev
->acp
.cgs_device
, mmACP_SOFT_RESET
);
452 if (ACP_SOFT_RESET__SoftResetAudDone_MASK
==
453 (val
& ACP_SOFT_RESET__SoftResetAudDone_MASK
))
456 dev_err(&adev
->pdev
->dev
, "Failed to reset ACP\n");
461 /* Enable clock to ACP and wait until the clock is enabled */
462 val
= cgs_read_register(adev
->acp
.cgs_device
, mmACP_CONTROL
);
463 val
= val
| ACP_CONTROL__ClkEn_MASK
;
464 cgs_write_register(adev
->acp
.cgs_device
, mmACP_CONTROL
, val
);
466 count
= ACP_CLOCK_EN_TIME_OUT_VALUE
;
469 val
= cgs_read_register(adev
->acp
.cgs_device
, mmACP_STATUS
);
473 dev_err(&adev
->pdev
->dev
, "Failed to reset ACP\n");
478 /* Deassert the SOFT RESET flags */
479 val
= cgs_read_register(adev
->acp
.cgs_device
, mmACP_SOFT_RESET
);
480 val
&= ~ACP_SOFT_RESET__SoftResetAud_MASK
;
481 cgs_write_register(adev
->acp
.cgs_device
, mmACP_SOFT_RESET
, val
);
486 * acp_hw_fini - stop the hardware block
488 * @adev: amdgpu_device pointer
491 static int acp_hw_fini(void *handle
)
497 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
499 /* return early if no ACP */
500 if (!adev
->acp
.acp_cell
)
503 /* Assert Soft reset of ACP */
504 val
= cgs_read_register(adev
->acp
.cgs_device
, mmACP_SOFT_RESET
);
506 val
|= ACP_SOFT_RESET__SoftResetAud_MASK
;
507 cgs_write_register(adev
->acp
.cgs_device
, mmACP_SOFT_RESET
, val
);
509 count
= ACP_SOFT_RESET_DONE_TIME_OUT_VALUE
;
511 val
= cgs_read_register(adev
->acp
.cgs_device
, mmACP_SOFT_RESET
);
512 if (ACP_SOFT_RESET__SoftResetAudDone_MASK
==
513 (val
& ACP_SOFT_RESET__SoftResetAudDone_MASK
))
516 dev_err(&adev
->pdev
->dev
, "Failed to reset ACP\n");
521 /* Disable ACP clock */
522 val
= cgs_read_register(adev
->acp
.cgs_device
, mmACP_CONTROL
);
523 val
&= ~ACP_CONTROL__ClkEn_MASK
;
524 cgs_write_register(adev
->acp
.cgs_device
, mmACP_CONTROL
, val
);
526 count
= ACP_CLOCK_EN_TIME_OUT_VALUE
;
529 val
= cgs_read_register(adev
->acp
.cgs_device
, mmACP_STATUS
);
533 dev_err(&adev
->pdev
->dev
, "Failed to reset ACP\n");
539 if (adev
->acp
.acp_genpd
) {
540 for (i
= 0; i
< ACP_DEVS
; i
++) {
541 dev
= get_mfd_cell_dev(adev
->acp
.acp_cell
[i
].name
, i
);
542 ret
= pm_genpd_remove_device(dev
);
543 /* If removal fails, dont giveup and try rest */
545 dev_err(dev
, "remove dev from genpd failed\n");
547 kfree(adev
->acp
.acp_genpd
);
550 mfd_remove_devices(adev
->acp
.parent
);
551 kfree(adev
->acp
.acp_res
);
552 kfree(adev
->acp
.acp_cell
);
557 static int acp_suspend(void *handle
)
562 static int acp_resume(void *handle
)
567 static int acp_early_init(void *handle
)
572 static bool acp_is_idle(void *handle
)
577 static int acp_wait_for_idle(void *handle
)
582 static int acp_soft_reset(void *handle
)
587 static int acp_set_clockgating_state(void *handle
,
588 enum amd_clockgating_state state
)
593 static int acp_set_powergating_state(void *handle
,
594 enum amd_powergating_state state
)
599 static const struct amd_ip_funcs acp_ip_funcs
= {
601 .early_init
= acp_early_init
,
603 .sw_init
= acp_sw_init
,
604 .sw_fini
= acp_sw_fini
,
605 .hw_init
= acp_hw_init
,
606 .hw_fini
= acp_hw_fini
,
607 .suspend
= acp_suspend
,
608 .resume
= acp_resume
,
609 .is_idle
= acp_is_idle
,
610 .wait_for_idle
= acp_wait_for_idle
,
611 .soft_reset
= acp_soft_reset
,
612 .set_clockgating_state
= acp_set_clockgating_state
,
613 .set_powergating_state
= acp_set_powergating_state
,
616 const struct amdgpu_ip_block_version acp_ip_block
=
618 .type
= AMD_IP_BLOCK_TYPE_ACP
,
622 .funcs
= &acp_ip_funcs
,