2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_ih.h"
30 #include "amdgpu_uvd.h"
31 #include "amdgpu_vce.h"
32 #include "amdgpu_ucode.h"
33 #include "amdgpu_psp.h"
37 #include "uvd/uvd_7_0_offset.h"
38 #include "gc/gc_9_0_offset.h"
39 #include "gc/gc_9_0_sh_mask.h"
40 #include "sdma0/sdma0_4_0_offset.h"
41 #include "sdma1/sdma1_4_0_offset.h"
42 #include "hdp/hdp_4_0_offset.h"
43 #include "hdp/hdp_4_0_sh_mask.h"
44 #include "mp/mp_9_0_offset.h"
45 #include "mp/mp_9_0_sh_mask.h"
46 #include "smuio/smuio_9_0_offset.h"
47 #include "smuio/smuio_9_0_sh_mask.h"
50 #include "soc15_common.h"
53 #include "gfxhub_v1_0.h"
54 #include "mmhub_v1_0.h"
55 #include "vega10_ih.h"
56 #include "sdma_v4_0.h"
60 #include "amdgpu_powerplay.h"
61 #include "dce_virtual.h"
64 #define mmFabricConfigAccessControl 0x0410
65 #define mmFabricConfigAccessControl_BASE_IDX 0
66 #define mmFabricConfigAccessControl_DEFAULT 0x00000000
67 //FabricConfigAccessControl
68 #define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT 0x0
69 #define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT 0x1
70 #define FabricConfigAccessControl__CfgRegInstID__SHIFT 0x10
71 #define FabricConfigAccessControl__CfgRegInstAccEn_MASK 0x00000001L
72 #define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK 0x00000002L
73 #define FabricConfigAccessControl__CfgRegInstID_MASK 0x00FF0000L
76 #define mmDF_PIE_AON0_DfGlobalClkGater 0x00fc
77 #define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX 0
78 //DF_PIE_AON0_DfGlobalClkGater
79 #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT 0x0
80 #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK 0x0000000FL
84 DF_MGCG_ENABLE_00_CYCLE_DELAY
=1,
85 DF_MGCG_ENABLE_01_CYCLE_DELAY
=2,
86 DF_MGCG_ENABLE_15_CYCLE_DELAY
=13,
87 DF_MGCG_ENABLE_31_CYCLE_DELAY
=14,
88 DF_MGCG_ENABLE_63_CYCLE_DELAY
=15
91 #define mmMP0_MISC_CGTT_CTRL0 0x01b9
92 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
93 #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
94 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
97 * Indirect registers accessor
99 static u32
soc15_pcie_rreg(struct amdgpu_device
*adev
, u32 reg
)
101 unsigned long flags
, address
, data
;
103 address
= adev
->nbio_funcs
->get_pcie_index_offset(adev
);
104 data
= adev
->nbio_funcs
->get_pcie_data_offset(adev
);
106 spin_lock_irqsave(&adev
->pcie_idx_lock
, flags
);
107 WREG32(address
, reg
);
108 (void)RREG32(address
);
110 spin_unlock_irqrestore(&adev
->pcie_idx_lock
, flags
);
114 static void soc15_pcie_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
116 unsigned long flags
, address
, data
;
118 address
= adev
->nbio_funcs
->get_pcie_index_offset(adev
);
119 data
= adev
->nbio_funcs
->get_pcie_data_offset(adev
);
121 spin_lock_irqsave(&adev
->pcie_idx_lock
, flags
);
122 WREG32(address
, reg
);
123 (void)RREG32(address
);
126 spin_unlock_irqrestore(&adev
->pcie_idx_lock
, flags
);
129 static u32
soc15_uvd_ctx_rreg(struct amdgpu_device
*adev
, u32 reg
)
131 unsigned long flags
, address
, data
;
134 address
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_CTX_INDEX
);
135 data
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_CTX_DATA
);
137 spin_lock_irqsave(&adev
->uvd_ctx_idx_lock
, flags
);
138 WREG32(address
, ((reg
) & 0x1ff));
140 spin_unlock_irqrestore(&adev
->uvd_ctx_idx_lock
, flags
);
144 static void soc15_uvd_ctx_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
146 unsigned long flags
, address
, data
;
148 address
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_CTX_INDEX
);
149 data
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_CTX_DATA
);
151 spin_lock_irqsave(&adev
->uvd_ctx_idx_lock
, flags
);
152 WREG32(address
, ((reg
) & 0x1ff));
154 spin_unlock_irqrestore(&adev
->uvd_ctx_idx_lock
, flags
);
157 static u32
soc15_didt_rreg(struct amdgpu_device
*adev
, u32 reg
)
159 unsigned long flags
, address
, data
;
162 address
= SOC15_REG_OFFSET(GC
, 0, mmDIDT_IND_INDEX
);
163 data
= SOC15_REG_OFFSET(GC
, 0, mmDIDT_IND_DATA
);
165 spin_lock_irqsave(&adev
->didt_idx_lock
, flags
);
166 WREG32(address
, (reg
));
168 spin_unlock_irqrestore(&adev
->didt_idx_lock
, flags
);
172 static void soc15_didt_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
174 unsigned long flags
, address
, data
;
176 address
= SOC15_REG_OFFSET(GC
, 0, mmDIDT_IND_INDEX
);
177 data
= SOC15_REG_OFFSET(GC
, 0, mmDIDT_IND_DATA
);
179 spin_lock_irqsave(&adev
->didt_idx_lock
, flags
);
180 WREG32(address
, (reg
));
182 spin_unlock_irqrestore(&adev
->didt_idx_lock
, flags
);
185 static u32
soc15_gc_cac_rreg(struct amdgpu_device
*adev
, u32 reg
)
190 spin_lock_irqsave(&adev
->gc_cac_idx_lock
, flags
);
191 WREG32_SOC15(GC
, 0, mmGC_CAC_IND_INDEX
, (reg
));
192 r
= RREG32_SOC15(GC
, 0, mmGC_CAC_IND_DATA
);
193 spin_unlock_irqrestore(&adev
->gc_cac_idx_lock
, flags
);
197 static void soc15_gc_cac_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
201 spin_lock_irqsave(&adev
->gc_cac_idx_lock
, flags
);
202 WREG32_SOC15(GC
, 0, mmGC_CAC_IND_INDEX
, (reg
));
203 WREG32_SOC15(GC
, 0, mmGC_CAC_IND_DATA
, (v
));
204 spin_unlock_irqrestore(&adev
->gc_cac_idx_lock
, flags
);
207 static u32
soc15_se_cac_rreg(struct amdgpu_device
*adev
, u32 reg
)
212 spin_lock_irqsave(&adev
->se_cac_idx_lock
, flags
);
213 WREG32_SOC15(GC
, 0, mmSE_CAC_IND_INDEX
, (reg
));
214 r
= RREG32_SOC15(GC
, 0, mmSE_CAC_IND_DATA
);
215 spin_unlock_irqrestore(&adev
->se_cac_idx_lock
, flags
);
219 static void soc15_se_cac_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
223 spin_lock_irqsave(&adev
->se_cac_idx_lock
, flags
);
224 WREG32_SOC15(GC
, 0, mmSE_CAC_IND_INDEX
, (reg
));
225 WREG32_SOC15(GC
, 0, mmSE_CAC_IND_DATA
, (v
));
226 spin_unlock_irqrestore(&adev
->se_cac_idx_lock
, flags
);
229 static u32
soc15_get_config_memsize(struct amdgpu_device
*adev
)
231 return adev
->nbio_funcs
->get_memsize(adev
);
234 static u32
soc15_get_xclk(struct amdgpu_device
*adev
)
236 return adev
->clock
.spll
.reference_freq
;
240 void soc15_grbm_select(struct amdgpu_device
*adev
,
241 u32 me
, u32 pipe
, u32 queue
, u32 vmid
)
243 u32 grbm_gfx_cntl
= 0;
244 grbm_gfx_cntl
= REG_SET_FIELD(grbm_gfx_cntl
, GRBM_GFX_CNTL
, PIPEID
, pipe
);
245 grbm_gfx_cntl
= REG_SET_FIELD(grbm_gfx_cntl
, GRBM_GFX_CNTL
, MEID
, me
);
246 grbm_gfx_cntl
= REG_SET_FIELD(grbm_gfx_cntl
, GRBM_GFX_CNTL
, VMID
, vmid
);
247 grbm_gfx_cntl
= REG_SET_FIELD(grbm_gfx_cntl
, GRBM_GFX_CNTL
, QUEUEID
, queue
);
249 WREG32(SOC15_REG_OFFSET(GC
, 0, mmGRBM_GFX_CNTL
), grbm_gfx_cntl
);
252 static void soc15_vga_set_state(struct amdgpu_device
*adev
, bool state
)
257 static bool soc15_read_disabled_bios(struct amdgpu_device
*adev
)
263 static bool soc15_read_bios_from_rom(struct amdgpu_device
*adev
,
264 u8
*bios
, u32 length_bytes
)
271 if (length_bytes
== 0)
273 /* APU vbios image is part of sbios image */
274 if (adev
->flags
& AMD_IS_APU
)
277 dw_ptr
= (u32
*)bios
;
278 length_dw
= ALIGN(length_bytes
, 4) / 4;
280 /* set rom index to 0 */
281 WREG32(SOC15_REG_OFFSET(SMUIO
, 0, mmROM_INDEX
), 0);
282 /* read out the rom data */
283 for (i
= 0; i
< length_dw
; i
++)
284 dw_ptr
[i
] = RREG32(SOC15_REG_OFFSET(SMUIO
, 0, mmROM_DATA
));
289 struct soc15_allowed_register_entry
{
298 static struct soc15_allowed_register_entry soc15_allowed_read_registers
[] = {
299 { SOC15_REG_ENTRY(GC
, 0, mmGRBM_STATUS
)},
300 { SOC15_REG_ENTRY(GC
, 0, mmGRBM_STATUS2
)},
301 { SOC15_REG_ENTRY(GC
, 0, mmGRBM_STATUS_SE0
)},
302 { SOC15_REG_ENTRY(GC
, 0, mmGRBM_STATUS_SE1
)},
303 { SOC15_REG_ENTRY(GC
, 0, mmGRBM_STATUS_SE2
)},
304 { SOC15_REG_ENTRY(GC
, 0, mmGRBM_STATUS_SE3
)},
305 { SOC15_REG_ENTRY(SDMA0
, 0, mmSDMA0_STATUS_REG
)},
306 { SOC15_REG_ENTRY(SDMA1
, 0, mmSDMA1_STATUS_REG
)},
307 { SOC15_REG_ENTRY(GC
, 0, mmCP_STAT
)},
308 { SOC15_REG_ENTRY(GC
, 0, mmCP_STALLED_STAT1
)},
309 { SOC15_REG_ENTRY(GC
, 0, mmCP_STALLED_STAT2
)},
310 { SOC15_REG_ENTRY(GC
, 0, mmCP_STALLED_STAT3
)},
311 { SOC15_REG_ENTRY(GC
, 0, mmCP_CPF_BUSY_STAT
)},
312 { SOC15_REG_ENTRY(GC
, 0, mmCP_CPF_STALLED_STAT1
)},
313 { SOC15_REG_ENTRY(GC
, 0, mmCP_CPF_STATUS
)},
314 { SOC15_REG_ENTRY(GC
, 0, mmCP_CPC_STALLED_STAT1
)},
315 { SOC15_REG_ENTRY(GC
, 0, mmCP_CPC_STATUS
)},
316 { SOC15_REG_ENTRY(GC
, 0, mmGB_ADDR_CONFIG
)},
319 static uint32_t soc15_read_indexed_register(struct amdgpu_device
*adev
, u32 se_num
,
320 u32 sh_num
, u32 reg_offset
)
324 mutex_lock(&adev
->grbm_idx_mutex
);
325 if (se_num
!= 0xffffffff || sh_num
!= 0xffffffff)
326 amdgpu_gfx_select_se_sh(adev
, se_num
, sh_num
, 0xffffffff);
328 val
= RREG32(reg_offset
);
330 if (se_num
!= 0xffffffff || sh_num
!= 0xffffffff)
331 amdgpu_gfx_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
332 mutex_unlock(&adev
->grbm_idx_mutex
);
336 static uint32_t soc15_get_register_value(struct amdgpu_device
*adev
,
337 bool indexed
, u32 se_num
,
338 u32 sh_num
, u32 reg_offset
)
341 return soc15_read_indexed_register(adev
, se_num
, sh_num
, reg_offset
);
343 if (reg_offset
== SOC15_REG_OFFSET(GC
, 0, mmGB_ADDR_CONFIG
))
344 return adev
->gfx
.config
.gb_addr_config
;
345 return RREG32(reg_offset
);
349 static int soc15_read_register(struct amdgpu_device
*adev
, u32 se_num
,
350 u32 sh_num
, u32 reg_offset
, u32
*value
)
353 struct soc15_allowed_register_entry
*en
;
356 for (i
= 0; i
< ARRAY_SIZE(soc15_allowed_read_registers
); i
++) {
357 en
= &soc15_allowed_read_registers
[i
];
358 if (reg_offset
!= (adev
->reg_offset
[en
->hwip
][en
->inst
][en
->seg
]
362 *value
= soc15_get_register_value(adev
,
363 soc15_allowed_read_registers
[i
].grbm_indexed
,
364 se_num
, sh_num
, reg_offset
);
372 * soc15_program_register_sequence - program an array of registers.
374 * @adev: amdgpu_device pointer
375 * @regs: pointer to the register array
376 * @array_size: size of the register array
378 * Programs an array or registers with and and or masks.
379 * This is a helper for setting golden registers.
382 void soc15_program_register_sequence(struct amdgpu_device
*adev
,
383 const struct soc15_reg_golden
*regs
,
384 const u32 array_size
)
386 const struct soc15_reg_golden
*entry
;
390 for (i
= 0; i
< array_size
; ++i
) {
392 reg
= adev
->reg_offset
[entry
->hwip
][entry
->instance
][entry
->segment
] + entry
->reg
;
394 if (entry
->and_mask
== 0xffffffff) {
395 tmp
= entry
->or_mask
;
398 tmp
&= ~(entry
->and_mask
);
399 tmp
|= entry
->or_mask
;
407 static int soc15_asic_reset(struct amdgpu_device
*adev
)
411 amdgpu_atombios_scratch_regs_engine_hung(adev
, true);
413 dev_info(adev
->dev
, "GPU reset\n");
416 pci_clear_master(adev
->pdev
);
418 pci_save_state(adev
->pdev
);
420 for (i
= 0; i
< AMDGPU_MAX_IP_NUM
; i
++) {
421 if (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_PSP
){
422 adev
->ip_blocks
[i
].version
->funcs
->soft_reset((void *)adev
);
427 pci_restore_state(adev
->pdev
);
429 /* wait for asic to come out of reset */
430 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
431 u32 memsize
= adev
->nbio_funcs
->get_memsize(adev
);
433 if (memsize
!= 0xffffffff)
438 amdgpu_atombios_scratch_regs_engine_hung(adev
, false);
443 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
444 u32 cntl_reg, u32 status_reg)
449 static int soc15_set_uvd_clocks(struct amdgpu_device
*adev
, u32 vclk
, u32 dclk
)
453 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
457 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
462 static int soc15_set_vce_clocks(struct amdgpu_device
*adev
, u32 evclk
, u32 ecclk
)
469 static void soc15_pcie_gen3_enable(struct amdgpu_device
*adev
)
471 if (pci_is_root_bus(adev
->pdev
->bus
))
474 if (amdgpu_pcie_gen2
== 0)
477 if (adev
->flags
& AMD_IS_APU
)
480 if (!(adev
->pm
.pcie_gen_mask
& (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2
|
481 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3
)))
487 static void soc15_program_aspm(struct amdgpu_device
*adev
)
490 if (amdgpu_aspm
== 0)
496 static void soc15_enable_doorbell_aperture(struct amdgpu_device
*adev
,
499 adev
->nbio_funcs
->enable_doorbell_aperture(adev
, enable
);
500 adev
->nbio_funcs
->enable_doorbell_selfring_aperture(adev
, enable
);
503 static const struct amdgpu_ip_block_version vega10_common_ip_block
=
505 .type
= AMD_IP_BLOCK_TYPE_COMMON
,
509 .funcs
= &soc15_common_ip_funcs
,
512 int soc15_set_ip_blocks(struct amdgpu_device
*adev
)
514 /* Set IP register base before any HW register access */
515 switch (adev
->asic_type
) {
518 vega10_reg_base_init(adev
);
524 if (adev
->flags
& AMD_IS_APU
)
525 adev
->nbio_funcs
= &nbio_v7_0_funcs
;
527 adev
->nbio_funcs
= &nbio_v6_1_funcs
;
529 adev
->nbio_funcs
->detect_hw_virt(adev
);
531 if (amdgpu_sriov_vf(adev
))
532 adev
->virt
.ops
= &xgpu_ai_virt_ops
;
534 switch (adev
->asic_type
) {
536 amdgpu_device_ip_block_add(adev
, &vega10_common_ip_block
);
537 amdgpu_device_ip_block_add(adev
, &gmc_v9_0_ip_block
);
538 amdgpu_device_ip_block_add(adev
, &vega10_ih_ip_block
);
539 if (amdgpu_fw_load_type
== 2 || amdgpu_fw_load_type
== -1)
540 amdgpu_device_ip_block_add(adev
, &psp_v3_1_ip_block
);
541 if (!amdgpu_sriov_vf(adev
))
542 amdgpu_device_ip_block_add(adev
, &amdgpu_pp_ip_block
);
543 if (adev
->enable_virtual_display
|| amdgpu_sriov_vf(adev
))
544 amdgpu_device_ip_block_add(adev
, &dce_virtual_ip_block
);
545 #if defined(CONFIG_DRM_AMD_DC)
546 else if (amdgpu_device_has_dc_support(adev
))
547 amdgpu_device_ip_block_add(adev
, &dm_ip_block
);
549 # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
551 amdgpu_device_ip_block_add(adev
, &gfx_v9_0_ip_block
);
552 amdgpu_device_ip_block_add(adev
, &sdma_v4_0_ip_block
);
553 amdgpu_device_ip_block_add(adev
, &uvd_v7_0_ip_block
);
554 amdgpu_device_ip_block_add(adev
, &vce_v4_0_ip_block
);
557 amdgpu_device_ip_block_add(adev
, &vega10_common_ip_block
);
558 amdgpu_device_ip_block_add(adev
, &gmc_v9_0_ip_block
);
559 amdgpu_device_ip_block_add(adev
, &vega10_ih_ip_block
);
560 amdgpu_device_ip_block_add(adev
, &psp_v10_0_ip_block
);
561 amdgpu_device_ip_block_add(adev
, &amdgpu_pp_ip_block
);
562 if (adev
->enable_virtual_display
|| amdgpu_sriov_vf(adev
))
563 amdgpu_device_ip_block_add(adev
, &dce_virtual_ip_block
);
564 #if defined(CONFIG_DRM_AMD_DC)
565 else if (amdgpu_device_has_dc_support(adev
))
566 amdgpu_device_ip_block_add(adev
, &dm_ip_block
);
568 # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
570 amdgpu_device_ip_block_add(adev
, &gfx_v9_0_ip_block
);
571 amdgpu_device_ip_block_add(adev
, &sdma_v4_0_ip_block
);
572 amdgpu_device_ip_block_add(adev
, &vcn_v1_0_ip_block
);
581 static uint32_t soc15_get_rev_id(struct amdgpu_device
*adev
)
583 return adev
->nbio_funcs
->get_rev_id(adev
);
586 static const struct amdgpu_asic_funcs soc15_asic_funcs
=
588 .read_disabled_bios
= &soc15_read_disabled_bios
,
589 .read_bios_from_rom
= &soc15_read_bios_from_rom
,
590 .read_register
= &soc15_read_register
,
591 .reset
= &soc15_asic_reset
,
592 .set_vga_state
= &soc15_vga_set_state
,
593 .get_xclk
= &soc15_get_xclk
,
594 .set_uvd_clocks
= &soc15_set_uvd_clocks
,
595 .set_vce_clocks
= &soc15_set_vce_clocks
,
596 .get_config_memsize
= &soc15_get_config_memsize
,
599 static int soc15_common_early_init(void *handle
)
601 bool psp_enabled
= false;
602 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
604 adev
->smc_rreg
= NULL
;
605 adev
->smc_wreg
= NULL
;
606 adev
->pcie_rreg
= &soc15_pcie_rreg
;
607 adev
->pcie_wreg
= &soc15_pcie_wreg
;
608 adev
->uvd_ctx_rreg
= &soc15_uvd_ctx_rreg
;
609 adev
->uvd_ctx_wreg
= &soc15_uvd_ctx_wreg
;
610 adev
->didt_rreg
= &soc15_didt_rreg
;
611 adev
->didt_wreg
= &soc15_didt_wreg
;
612 adev
->gc_cac_rreg
= &soc15_gc_cac_rreg
;
613 adev
->gc_cac_wreg
= &soc15_gc_cac_wreg
;
614 adev
->se_cac_rreg
= &soc15_se_cac_rreg
;
615 adev
->se_cac_wreg
= &soc15_se_cac_wreg
;
617 adev
->asic_funcs
= &soc15_asic_funcs
;
619 if (amdgpu_device_ip_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_PSP
) &&
620 (amdgpu_ip_block_mask
& (1 << AMD_IP_BLOCK_TYPE_PSP
)))
623 adev
->rev_id
= soc15_get_rev_id(adev
);
624 adev
->external_rev_id
= 0xFF;
625 switch (adev
->asic_type
) {
627 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
628 AMD_CG_SUPPORT_GFX_MGLS
|
629 AMD_CG_SUPPORT_GFX_RLC_LS
|
630 AMD_CG_SUPPORT_GFX_CP_LS
|
631 AMD_CG_SUPPORT_GFX_3D_CGCG
|
632 AMD_CG_SUPPORT_GFX_3D_CGLS
|
633 AMD_CG_SUPPORT_GFX_CGCG
|
634 AMD_CG_SUPPORT_GFX_CGLS
|
635 AMD_CG_SUPPORT_BIF_MGCG
|
636 AMD_CG_SUPPORT_BIF_LS
|
637 AMD_CG_SUPPORT_HDP_LS
|
638 AMD_CG_SUPPORT_DRM_MGCG
|
639 AMD_CG_SUPPORT_DRM_LS
|
640 AMD_CG_SUPPORT_ROM_MGCG
|
641 AMD_CG_SUPPORT_DF_MGCG
|
642 AMD_CG_SUPPORT_SDMA_MGCG
|
643 AMD_CG_SUPPORT_SDMA_LS
|
644 AMD_CG_SUPPORT_MC_MGCG
|
645 AMD_CG_SUPPORT_MC_LS
;
647 adev
->external_rev_id
= 0x1;
650 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
651 AMD_CG_SUPPORT_GFX_MGLS
|
652 AMD_CG_SUPPORT_GFX_RLC_LS
|
653 AMD_CG_SUPPORT_GFX_CP_LS
|
654 AMD_CG_SUPPORT_GFX_3D_CGCG
|
655 AMD_CG_SUPPORT_GFX_3D_CGLS
|
656 AMD_CG_SUPPORT_GFX_CGCG
|
657 AMD_CG_SUPPORT_GFX_CGLS
|
658 AMD_CG_SUPPORT_BIF_MGCG
|
659 AMD_CG_SUPPORT_BIF_LS
|
660 AMD_CG_SUPPORT_HDP_MGCG
|
661 AMD_CG_SUPPORT_HDP_LS
|
662 AMD_CG_SUPPORT_DRM_MGCG
|
663 AMD_CG_SUPPORT_DRM_LS
|
664 AMD_CG_SUPPORT_ROM_MGCG
|
665 AMD_CG_SUPPORT_MC_MGCG
|
666 AMD_CG_SUPPORT_MC_LS
|
667 AMD_CG_SUPPORT_SDMA_MGCG
|
668 AMD_CG_SUPPORT_SDMA_LS
;
669 adev
->pg_flags
= AMD_PG_SUPPORT_SDMA
;
671 adev
->external_rev_id
= 0x1;
674 /* FIXME: not supported yet */
678 if (amdgpu_sriov_vf(adev
)) {
679 amdgpu_virt_init_setting(adev
);
680 xgpu_ai_mailbox_set_irq_funcs(adev
);
683 adev
->firmware
.load_type
= amdgpu_ucode_get_load_type(adev
, amdgpu_fw_load_type
);
685 amdgpu_device_get_pcie_info(adev
);
690 static int soc15_common_late_init(void *handle
)
692 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
694 if (amdgpu_sriov_vf(adev
))
695 xgpu_ai_mailbox_get_irq(adev
);
700 static int soc15_common_sw_init(void *handle
)
702 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
704 if (amdgpu_sriov_vf(adev
))
705 xgpu_ai_mailbox_add_irq_id(adev
);
710 static int soc15_common_sw_fini(void *handle
)
715 static int soc15_common_hw_init(void *handle
)
717 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
719 /* enable pcie gen2/3 link */
720 soc15_pcie_gen3_enable(adev
);
722 soc15_program_aspm(adev
);
723 /* setup nbio registers */
724 adev
->nbio_funcs
->init_registers(adev
);
725 /* enable the doorbell aperture */
726 soc15_enable_doorbell_aperture(adev
, true);
731 static int soc15_common_hw_fini(void *handle
)
733 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
735 /* disable the doorbell aperture */
736 soc15_enable_doorbell_aperture(adev
, false);
737 if (amdgpu_sriov_vf(adev
))
738 xgpu_ai_mailbox_put_irq(adev
);
743 static int soc15_common_suspend(void *handle
)
745 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
747 return soc15_common_hw_fini(adev
);
750 static int soc15_common_resume(void *handle
)
752 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
754 return soc15_common_hw_init(adev
);
757 static bool soc15_common_is_idle(void *handle
)
762 static int soc15_common_wait_for_idle(void *handle
)
767 static int soc15_common_soft_reset(void *handle
)
772 static void soc15_update_hdp_light_sleep(struct amdgpu_device
*adev
, bool enable
)
776 def
= data
= RREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_MEM_POWER_LS
));
778 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_HDP_LS
))
779 data
|= HDP_MEM_POWER_LS__LS_ENABLE_MASK
;
781 data
&= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK
;
784 WREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_MEM_POWER_LS
), data
);
787 static void soc15_update_drm_clock_gating(struct amdgpu_device
*adev
, bool enable
)
791 def
= data
= RREG32(SOC15_REG_OFFSET(MP0
, 0, mmMP0_MISC_CGTT_CTRL0
));
793 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_DRM_MGCG
))
794 data
&= ~(0x01000000 |
803 data
|= (0x01000000 |
813 WREG32(SOC15_REG_OFFSET(MP0
, 0, mmMP0_MISC_CGTT_CTRL0
), data
);
816 static void soc15_update_drm_light_sleep(struct amdgpu_device
*adev
, bool enable
)
820 def
= data
= RREG32(SOC15_REG_OFFSET(MP0
, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL
));
822 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_DRM_LS
))
828 WREG32(SOC15_REG_OFFSET(MP0
, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL
), data
);
831 static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device
*adev
,
836 def
= data
= RREG32(SOC15_REG_OFFSET(SMUIO
, 0, mmCGTT_ROM_CLK_CTRL0
));
838 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_ROM_MGCG
))
839 data
&= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK
|
840 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK
);
842 data
|= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK
|
843 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK
;
846 WREG32(SOC15_REG_OFFSET(SMUIO
, 0, mmCGTT_ROM_CLK_CTRL0
), data
);
849 static void soc15_update_df_medium_grain_clock_gating(struct amdgpu_device
*adev
,
854 /* Put DF on broadcast mode */
855 data
= RREG32(SOC15_REG_OFFSET(DF
, 0, mmFabricConfigAccessControl
));
856 data
&= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK
;
857 WREG32(SOC15_REG_OFFSET(DF
, 0, mmFabricConfigAccessControl
), data
);
859 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_DF_MGCG
)) {
860 data
= RREG32(SOC15_REG_OFFSET(DF
, 0, mmDF_PIE_AON0_DfGlobalClkGater
));
861 data
&= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK
;
862 data
|= DF_MGCG_ENABLE_15_CYCLE_DELAY
;
863 WREG32(SOC15_REG_OFFSET(DF
, 0, mmDF_PIE_AON0_DfGlobalClkGater
), data
);
865 data
= RREG32(SOC15_REG_OFFSET(DF
, 0, mmDF_PIE_AON0_DfGlobalClkGater
));
866 data
&= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK
;
867 data
|= DF_MGCG_DISABLE
;
868 WREG32(SOC15_REG_OFFSET(DF
, 0, mmDF_PIE_AON0_DfGlobalClkGater
), data
);
871 WREG32(SOC15_REG_OFFSET(DF
, 0, mmFabricConfigAccessControl
),
872 mmFabricConfigAccessControl_DEFAULT
);
875 static int soc15_common_set_clockgating_state(void *handle
,
876 enum amd_clockgating_state state
)
878 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
880 if (amdgpu_sriov_vf(adev
))
883 switch (adev
->asic_type
) {
885 adev
->nbio_funcs
->update_medium_grain_clock_gating(adev
,
886 state
== AMD_CG_STATE_GATE
? true : false);
887 adev
->nbio_funcs
->update_medium_grain_light_sleep(adev
,
888 state
== AMD_CG_STATE_GATE
? true : false);
889 soc15_update_hdp_light_sleep(adev
,
890 state
== AMD_CG_STATE_GATE
? true : false);
891 soc15_update_drm_clock_gating(adev
,
892 state
== AMD_CG_STATE_GATE
? true : false);
893 soc15_update_drm_light_sleep(adev
,
894 state
== AMD_CG_STATE_GATE
? true : false);
895 soc15_update_rom_medium_grain_clock_gating(adev
,
896 state
== AMD_CG_STATE_GATE
? true : false);
897 soc15_update_df_medium_grain_clock_gating(adev
,
898 state
== AMD_CG_STATE_GATE
? true : false);
901 adev
->nbio_funcs
->update_medium_grain_clock_gating(adev
,
902 state
== AMD_CG_STATE_GATE
? true : false);
903 adev
->nbio_funcs
->update_medium_grain_light_sleep(adev
,
904 state
== AMD_CG_STATE_GATE
? true : false);
905 soc15_update_hdp_light_sleep(adev
,
906 state
== AMD_CG_STATE_GATE
? true : false);
907 soc15_update_drm_clock_gating(adev
,
908 state
== AMD_CG_STATE_GATE
? true : false);
909 soc15_update_drm_light_sleep(adev
,
910 state
== AMD_CG_STATE_GATE
? true : false);
911 soc15_update_rom_medium_grain_clock_gating(adev
,
912 state
== AMD_CG_STATE_GATE
? true : false);
920 static void soc15_common_get_clockgating_state(void *handle
, u32
*flags
)
922 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
925 if (amdgpu_sriov_vf(adev
))
928 adev
->nbio_funcs
->get_clockgating_state(adev
, flags
);
930 /* AMD_CG_SUPPORT_HDP_LS */
931 data
= RREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_MEM_POWER_LS
));
932 if (data
& HDP_MEM_POWER_LS__LS_ENABLE_MASK
)
933 *flags
|= AMD_CG_SUPPORT_HDP_LS
;
935 /* AMD_CG_SUPPORT_DRM_MGCG */
936 data
= RREG32(SOC15_REG_OFFSET(MP0
, 0, mmMP0_MISC_CGTT_CTRL0
));
937 if (!(data
& 0x01000000))
938 *flags
|= AMD_CG_SUPPORT_DRM_MGCG
;
940 /* AMD_CG_SUPPORT_DRM_LS */
941 data
= RREG32(SOC15_REG_OFFSET(MP0
, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL
));
943 *flags
|= AMD_CG_SUPPORT_DRM_LS
;
945 /* AMD_CG_SUPPORT_ROM_MGCG */
946 data
= RREG32(SOC15_REG_OFFSET(SMUIO
, 0, mmCGTT_ROM_CLK_CTRL0
));
947 if (!(data
& CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK
))
948 *flags
|= AMD_CG_SUPPORT_ROM_MGCG
;
950 /* AMD_CG_SUPPORT_DF_MGCG */
951 data
= RREG32(SOC15_REG_OFFSET(DF
, 0, mmDF_PIE_AON0_DfGlobalClkGater
));
952 if (data
& DF_MGCG_ENABLE_15_CYCLE_DELAY
)
953 *flags
|= AMD_CG_SUPPORT_DF_MGCG
;
956 static int soc15_common_set_powergating_state(void *handle
,
957 enum amd_powergating_state state
)
963 const struct amd_ip_funcs soc15_common_ip_funcs
= {
964 .name
= "soc15_common",
965 .early_init
= soc15_common_early_init
,
966 .late_init
= soc15_common_late_init
,
967 .sw_init
= soc15_common_sw_init
,
968 .sw_fini
= soc15_common_sw_fini
,
969 .hw_init
= soc15_common_hw_init
,
970 .hw_fini
= soc15_common_hw_fini
,
971 .suspend
= soc15_common_suspend
,
972 .resume
= soc15_common_resume
,
973 .is_idle
= soc15_common_is_idle
,
974 .wait_for_idle
= soc15_common_wait_for_idle
,
975 .soft_reset
= soc15_common_soft_reset
,
976 .set_clockgating_state
= soc15_common_set_clockgating_state
,
977 .set_powergating_state
= soc15_common_set_powergating_state
,
978 .get_clockgating_state
= soc15_common_get_clockgating_state
,