2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_ih.h"
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
33 #include "amdgpu_ucode.h"
34 #include "amdgpu_psp.h"
38 #include "uvd/uvd_7_0_offset.h"
39 #include "gc/gc_9_0_offset.h"
40 #include "gc/gc_9_0_sh_mask.h"
41 #include "sdma0/sdma0_4_0_offset.h"
42 #include "sdma1/sdma1_4_0_offset.h"
43 #include "hdp/hdp_4_0_offset.h"
44 #include "hdp/hdp_4_0_sh_mask.h"
45 #include "smuio/smuio_9_0_offset.h"
46 #include "smuio/smuio_9_0_sh_mask.h"
47 #include "nbio/nbio_7_0_default.h"
48 #include "nbio/nbio_7_0_offset.h"
49 #include "nbio/nbio_7_0_sh_mask.h"
50 #include "nbio/nbio_7_0_smn.h"
51 #include "mp/mp_9_0_offset.h"
54 #include "soc15_common.h"
57 #include "gfxhub_v1_0.h"
58 #include "mmhub_v1_0.h"
61 #include "nbio_v6_1.h"
62 #include "nbio_v7_0.h"
63 #include "nbio_v7_4.h"
64 #include "vega10_ih.h"
65 #include "sdma_v4_0.h"
70 #include "jpeg_v2_0.h"
72 #include "jpeg_v2_5.h"
73 #include "dce_virtual.h"
75 #include "amdgpu_smu.h"
76 #include "amdgpu_ras.h"
77 #include "amdgpu_xgmi.h"
78 #include <uapi/linux/kfd_ioctl.h>
80 #define mmMP0_MISC_CGTT_CTRL0 0x01b9
81 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
82 #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
83 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
85 /* for Vega20 register name change */
86 #define mmHDP_MEM_POWER_CTRL 0x00d4
87 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
88 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
89 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
90 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
91 #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
93 * Indirect registers accessor
95 static u32
soc15_pcie_rreg(struct amdgpu_device
*adev
, u32 reg
)
97 unsigned long flags
, address
, data
;
99 address
= adev
->nbio
.funcs
->get_pcie_index_offset(adev
);
100 data
= adev
->nbio
.funcs
->get_pcie_data_offset(adev
);
102 spin_lock_irqsave(&adev
->pcie_idx_lock
, flags
);
103 WREG32(address
, reg
);
104 (void)RREG32(address
);
106 spin_unlock_irqrestore(&adev
->pcie_idx_lock
, flags
);
110 static void soc15_pcie_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
112 unsigned long flags
, address
, data
;
114 address
= adev
->nbio
.funcs
->get_pcie_index_offset(adev
);
115 data
= adev
->nbio
.funcs
->get_pcie_data_offset(adev
);
117 spin_lock_irqsave(&adev
->pcie_idx_lock
, flags
);
118 WREG32(address
, reg
);
119 (void)RREG32(address
);
122 spin_unlock_irqrestore(&adev
->pcie_idx_lock
, flags
);
125 static u64
soc15_pcie_rreg64(struct amdgpu_device
*adev
, u32 reg
)
127 unsigned long flags
, address
, data
;
129 address
= adev
->nbio
.funcs
->get_pcie_index_offset(adev
);
130 data
= adev
->nbio
.funcs
->get_pcie_data_offset(adev
);
132 spin_lock_irqsave(&adev
->pcie_idx_lock
, flags
);
133 /* read low 32 bit */
134 WREG32(address
, reg
);
135 (void)RREG32(address
);
138 /* read high 32 bit*/
139 WREG32(address
, reg
+ 4);
140 (void)RREG32(address
);
141 r
|= ((u64
)RREG32(data
) << 32);
142 spin_unlock_irqrestore(&adev
->pcie_idx_lock
, flags
);
146 static void soc15_pcie_wreg64(struct amdgpu_device
*adev
, u32 reg
, u64 v
)
148 unsigned long flags
, address
, data
;
150 address
= adev
->nbio
.funcs
->get_pcie_index_offset(adev
);
151 data
= adev
->nbio
.funcs
->get_pcie_data_offset(adev
);
153 spin_lock_irqsave(&adev
->pcie_idx_lock
, flags
);
154 /* write low 32 bit */
155 WREG32(address
, reg
);
156 (void)RREG32(address
);
157 WREG32(data
, (u32
)(v
& 0xffffffffULL
));
160 /* write high 32 bit */
161 WREG32(address
, reg
+ 4);
162 (void)RREG32(address
);
163 WREG32(data
, (u32
)(v
>> 32));
165 spin_unlock_irqrestore(&adev
->pcie_idx_lock
, flags
);
168 static u32
soc15_uvd_ctx_rreg(struct amdgpu_device
*adev
, u32 reg
)
170 unsigned long flags
, address
, data
;
173 address
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_CTX_INDEX
);
174 data
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_CTX_DATA
);
176 spin_lock_irqsave(&adev
->uvd_ctx_idx_lock
, flags
);
177 WREG32(address
, ((reg
) & 0x1ff));
179 spin_unlock_irqrestore(&adev
->uvd_ctx_idx_lock
, flags
);
183 static void soc15_uvd_ctx_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
185 unsigned long flags
, address
, data
;
187 address
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_CTX_INDEX
);
188 data
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_CTX_DATA
);
190 spin_lock_irqsave(&adev
->uvd_ctx_idx_lock
, flags
);
191 WREG32(address
, ((reg
) & 0x1ff));
193 spin_unlock_irqrestore(&adev
->uvd_ctx_idx_lock
, flags
);
196 static u32
soc15_didt_rreg(struct amdgpu_device
*adev
, u32 reg
)
198 unsigned long flags
, address
, data
;
201 address
= SOC15_REG_OFFSET(GC
, 0, mmDIDT_IND_INDEX
);
202 data
= SOC15_REG_OFFSET(GC
, 0, mmDIDT_IND_DATA
);
204 spin_lock_irqsave(&adev
->didt_idx_lock
, flags
);
205 WREG32(address
, (reg
));
207 spin_unlock_irqrestore(&adev
->didt_idx_lock
, flags
);
211 static void soc15_didt_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
213 unsigned long flags
, address
, data
;
215 address
= SOC15_REG_OFFSET(GC
, 0, mmDIDT_IND_INDEX
);
216 data
= SOC15_REG_OFFSET(GC
, 0, mmDIDT_IND_DATA
);
218 spin_lock_irqsave(&adev
->didt_idx_lock
, flags
);
219 WREG32(address
, (reg
));
221 spin_unlock_irqrestore(&adev
->didt_idx_lock
, flags
);
224 static u32
soc15_gc_cac_rreg(struct amdgpu_device
*adev
, u32 reg
)
229 spin_lock_irqsave(&adev
->gc_cac_idx_lock
, flags
);
230 WREG32_SOC15(GC
, 0, mmGC_CAC_IND_INDEX
, (reg
));
231 r
= RREG32_SOC15(GC
, 0, mmGC_CAC_IND_DATA
);
232 spin_unlock_irqrestore(&adev
->gc_cac_idx_lock
, flags
);
236 static void soc15_gc_cac_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
240 spin_lock_irqsave(&adev
->gc_cac_idx_lock
, flags
);
241 WREG32_SOC15(GC
, 0, mmGC_CAC_IND_INDEX
, (reg
));
242 WREG32_SOC15(GC
, 0, mmGC_CAC_IND_DATA
, (v
));
243 spin_unlock_irqrestore(&adev
->gc_cac_idx_lock
, flags
);
246 static u32
soc15_se_cac_rreg(struct amdgpu_device
*adev
, u32 reg
)
251 spin_lock_irqsave(&adev
->se_cac_idx_lock
, flags
);
252 WREG32_SOC15(GC
, 0, mmSE_CAC_IND_INDEX
, (reg
));
253 r
= RREG32_SOC15(GC
, 0, mmSE_CAC_IND_DATA
);
254 spin_unlock_irqrestore(&adev
->se_cac_idx_lock
, flags
);
258 static void soc15_se_cac_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
262 spin_lock_irqsave(&adev
->se_cac_idx_lock
, flags
);
263 WREG32_SOC15(GC
, 0, mmSE_CAC_IND_INDEX
, (reg
));
264 WREG32_SOC15(GC
, 0, mmSE_CAC_IND_DATA
, (v
));
265 spin_unlock_irqrestore(&adev
->se_cac_idx_lock
, flags
);
268 static u32
soc15_get_config_memsize(struct amdgpu_device
*adev
)
270 return adev
->nbio
.funcs
->get_memsize(adev
);
273 static u32
soc15_get_xclk(struct amdgpu_device
*adev
)
275 return adev
->clock
.spll
.reference_freq
;
279 void soc15_grbm_select(struct amdgpu_device
*adev
,
280 u32 me
, u32 pipe
, u32 queue
, u32 vmid
)
282 u32 grbm_gfx_cntl
= 0;
283 grbm_gfx_cntl
= REG_SET_FIELD(grbm_gfx_cntl
, GRBM_GFX_CNTL
, PIPEID
, pipe
);
284 grbm_gfx_cntl
= REG_SET_FIELD(grbm_gfx_cntl
, GRBM_GFX_CNTL
, MEID
, me
);
285 grbm_gfx_cntl
= REG_SET_FIELD(grbm_gfx_cntl
, GRBM_GFX_CNTL
, VMID
, vmid
);
286 grbm_gfx_cntl
= REG_SET_FIELD(grbm_gfx_cntl
, GRBM_GFX_CNTL
, QUEUEID
, queue
);
288 WREG32_SOC15_RLC_SHADOW(GC
, 0, mmGRBM_GFX_CNTL
, grbm_gfx_cntl
);
291 static void soc15_vga_set_state(struct amdgpu_device
*adev
, bool state
)
296 static bool soc15_read_disabled_bios(struct amdgpu_device
*adev
)
302 static bool soc15_read_bios_from_rom(struct amdgpu_device
*adev
,
303 u8
*bios
, u32 length_bytes
)
310 if (length_bytes
== 0)
312 /* APU vbios image is part of sbios image */
313 if (adev
->flags
& AMD_IS_APU
)
316 dw_ptr
= (u32
*)bios
;
317 length_dw
= ALIGN(length_bytes
, 4) / 4;
319 /* set rom index to 0 */
320 WREG32(SOC15_REG_OFFSET(SMUIO
, 0, mmROM_INDEX
), 0);
321 /* read out the rom data */
322 for (i
= 0; i
< length_dw
; i
++)
323 dw_ptr
[i
] = RREG32(SOC15_REG_OFFSET(SMUIO
, 0, mmROM_DATA
));
328 static struct soc15_allowed_register_entry soc15_allowed_read_registers
[] = {
329 { SOC15_REG_ENTRY(GC
, 0, mmGRBM_STATUS
)},
330 { SOC15_REG_ENTRY(GC
, 0, mmGRBM_STATUS2
)},
331 { SOC15_REG_ENTRY(GC
, 0, mmGRBM_STATUS_SE0
)},
332 { SOC15_REG_ENTRY(GC
, 0, mmGRBM_STATUS_SE1
)},
333 { SOC15_REG_ENTRY(GC
, 0, mmGRBM_STATUS_SE2
)},
334 { SOC15_REG_ENTRY(GC
, 0, mmGRBM_STATUS_SE3
)},
335 { SOC15_REG_ENTRY(SDMA0
, 0, mmSDMA0_STATUS_REG
)},
336 { SOC15_REG_ENTRY(SDMA1
, 0, mmSDMA1_STATUS_REG
)},
337 { SOC15_REG_ENTRY(GC
, 0, mmCP_STAT
)},
338 { SOC15_REG_ENTRY(GC
, 0, mmCP_STALLED_STAT1
)},
339 { SOC15_REG_ENTRY(GC
, 0, mmCP_STALLED_STAT2
)},
340 { SOC15_REG_ENTRY(GC
, 0, mmCP_STALLED_STAT3
)},
341 { SOC15_REG_ENTRY(GC
, 0, mmCP_CPF_BUSY_STAT
)},
342 { SOC15_REG_ENTRY(GC
, 0, mmCP_CPF_STALLED_STAT1
)},
343 { SOC15_REG_ENTRY(GC
, 0, mmCP_CPF_STATUS
)},
344 { SOC15_REG_ENTRY(GC
, 0, mmCP_CPC_BUSY_STAT
)},
345 { SOC15_REG_ENTRY(GC
, 0, mmCP_CPC_STALLED_STAT1
)},
346 { SOC15_REG_ENTRY(GC
, 0, mmCP_CPC_STATUS
)},
347 { SOC15_REG_ENTRY(GC
, 0, mmGB_ADDR_CONFIG
)},
348 { SOC15_REG_ENTRY(GC
, 0, mmDB_DEBUG2
)},
351 static uint32_t soc15_read_indexed_register(struct amdgpu_device
*adev
, u32 se_num
,
352 u32 sh_num
, u32 reg_offset
)
356 mutex_lock(&adev
->grbm_idx_mutex
);
357 if (se_num
!= 0xffffffff || sh_num
!= 0xffffffff)
358 amdgpu_gfx_select_se_sh(adev
, se_num
, sh_num
, 0xffffffff);
360 val
= RREG32(reg_offset
);
362 if (se_num
!= 0xffffffff || sh_num
!= 0xffffffff)
363 amdgpu_gfx_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
364 mutex_unlock(&adev
->grbm_idx_mutex
);
368 static uint32_t soc15_get_register_value(struct amdgpu_device
*adev
,
369 bool indexed
, u32 se_num
,
370 u32 sh_num
, u32 reg_offset
)
373 return soc15_read_indexed_register(adev
, se_num
, sh_num
, reg_offset
);
375 if (reg_offset
== SOC15_REG_OFFSET(GC
, 0, mmGB_ADDR_CONFIG
))
376 return adev
->gfx
.config
.gb_addr_config
;
377 else if (reg_offset
== SOC15_REG_OFFSET(GC
, 0, mmDB_DEBUG2
))
378 return adev
->gfx
.config
.db_debug2
;
379 return RREG32(reg_offset
);
383 static int soc15_read_register(struct amdgpu_device
*adev
, u32 se_num
,
384 u32 sh_num
, u32 reg_offset
, u32
*value
)
387 struct soc15_allowed_register_entry
*en
;
390 for (i
= 0; i
< ARRAY_SIZE(soc15_allowed_read_registers
); i
++) {
391 en
= &soc15_allowed_read_registers
[i
];
392 if (reg_offset
!= (adev
->reg_offset
[en
->hwip
][en
->inst
][en
->seg
]
396 *value
= soc15_get_register_value(adev
,
397 soc15_allowed_read_registers
[i
].grbm_indexed
,
398 se_num
, sh_num
, reg_offset
);
406 * soc15_program_register_sequence - program an array of registers.
408 * @adev: amdgpu_device pointer
409 * @regs: pointer to the register array
410 * @array_size: size of the register array
412 * Programs an array or registers with and and or masks.
413 * This is a helper for setting golden registers.
416 void soc15_program_register_sequence(struct amdgpu_device
*adev
,
417 const struct soc15_reg_golden
*regs
,
418 const u32 array_size
)
420 const struct soc15_reg_golden
*entry
;
424 for (i
= 0; i
< array_size
; ++i
) {
426 reg
= adev
->reg_offset
[entry
->hwip
][entry
->instance
][entry
->segment
] + entry
->reg
;
428 if (entry
->and_mask
== 0xffffffff) {
429 tmp
= entry
->or_mask
;
432 tmp
&= ~(entry
->and_mask
);
433 tmp
|= (entry
->or_mask
& entry
->and_mask
);
436 if (reg
== SOC15_REG_OFFSET(GC
, 0, mmPA_SC_BINNER_EVENT_CNTL_3
) ||
437 reg
== SOC15_REG_OFFSET(GC
, 0, mmPA_SC_ENHANCE
) ||
438 reg
== SOC15_REG_OFFSET(GC
, 0, mmPA_SC_ENHANCE_1
) ||
439 reg
== SOC15_REG_OFFSET(GC
, 0, mmSH_MEM_CONFIG
))
440 WREG32_RLC(reg
, tmp
);
448 static int soc15_asic_mode1_reset(struct amdgpu_device
*adev
)
453 amdgpu_atombios_scratch_regs_engine_hung(adev
, true);
455 dev_info(adev
->dev
, "GPU mode1 reset\n");
458 pci_clear_master(adev
->pdev
);
460 pci_save_state(adev
->pdev
);
462 ret
= psp_gpu_reset(adev
);
464 dev_err(adev
->dev
, "GPU mode1 reset failed\n");
466 pci_restore_state(adev
->pdev
);
468 /* wait for asic to come out of reset */
469 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
470 u32 memsize
= adev
->nbio
.funcs
->get_memsize(adev
);
472 if (memsize
!= 0xffffffff)
477 amdgpu_atombios_scratch_regs_engine_hung(adev
, false);
482 static int soc15_asic_baco_reset(struct amdgpu_device
*adev
)
484 struct amdgpu_ras
*ras
= amdgpu_ras_get_context(adev
);
487 /* avoid NBIF got stuck when do RAS recovery in BACO reset */
488 if (ras
&& ras
->supported
)
489 adev
->nbio
.funcs
->enable_doorbell_interrupt(adev
, false);
491 ret
= amdgpu_dpm_baco_reset(adev
);
495 /* re-enable doorbell interrupt after BACO exit */
496 if (ras
&& ras
->supported
)
497 adev
->nbio
.funcs
->enable_doorbell_interrupt(adev
, true);
502 static enum amd_reset_method
503 soc15_asic_reset_method(struct amdgpu_device
*adev
)
505 bool baco_reset
= false;
506 struct amdgpu_ras
*ras
= amdgpu_ras_get_context(adev
);
508 switch (adev
->asic_type
) {
511 return AMD_RESET_METHOD_MODE2
;
515 baco_reset
= amdgpu_dpm_is_baco_supported(adev
);
518 if (adev
->psp
.sos_fw_version
>= 0x80067)
519 baco_reset
= amdgpu_dpm_is_baco_supported(adev
);
522 * 1. PMFW version > 0x284300: all cases use baco
523 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
525 if ((ras
&& ras
->supported
) && adev
->pm
.fw_version
<= 0x283400)
533 return AMD_RESET_METHOD_BACO
;
535 return AMD_RESET_METHOD_MODE1
;
538 static int soc15_asic_reset(struct amdgpu_device
*adev
)
540 switch (soc15_asic_reset_method(adev
)) {
541 case AMD_RESET_METHOD_BACO
:
542 if (!adev
->in_suspend
)
543 amdgpu_inc_vram_lost(adev
);
544 return soc15_asic_baco_reset(adev
);
545 case AMD_RESET_METHOD_MODE2
:
546 return amdgpu_dpm_mode2_reset(adev
);
548 if (!adev
->in_suspend
)
549 amdgpu_inc_vram_lost(adev
);
550 return soc15_asic_mode1_reset(adev
);
554 static bool soc15_supports_baco(struct amdgpu_device
*adev
)
556 switch (adev
->asic_type
) {
560 return amdgpu_dpm_is_baco_supported(adev
);
562 if (adev
->psp
.sos_fw_version
>= 0x80067)
563 return amdgpu_dpm_is_baco_supported(adev
);
570 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
571 u32 cntl_reg, u32 status_reg)
576 static int soc15_set_uvd_clocks(struct amdgpu_device
*adev
, u32 vclk
, u32 dclk
)
580 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
584 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
589 static int soc15_set_vce_clocks(struct amdgpu_device
*adev
, u32 evclk
, u32 ecclk
)
596 static void soc15_pcie_gen3_enable(struct amdgpu_device
*adev
)
598 if (pci_is_root_bus(adev
->pdev
->bus
))
601 if (amdgpu_pcie_gen2
== 0)
604 if (adev
->flags
& AMD_IS_APU
)
607 if (!(adev
->pm
.pcie_gen_mask
& (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2
|
608 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3
)))
614 static void soc15_program_aspm(struct amdgpu_device
*adev
)
617 if (amdgpu_aspm
== 0)
623 static void soc15_enable_doorbell_aperture(struct amdgpu_device
*adev
,
626 adev
->nbio
.funcs
->enable_doorbell_aperture(adev
, enable
);
627 adev
->nbio
.funcs
->enable_doorbell_selfring_aperture(adev
, enable
);
630 static const struct amdgpu_ip_block_version vega10_common_ip_block
=
632 .type
= AMD_IP_BLOCK_TYPE_COMMON
,
636 .funcs
= &soc15_common_ip_funcs
,
639 static uint32_t soc15_get_rev_id(struct amdgpu_device
*adev
)
641 return adev
->nbio
.funcs
->get_rev_id(adev
);
644 int soc15_set_ip_blocks(struct amdgpu_device
*adev
)
646 /* Set IP register base before any HW register access */
647 switch (adev
->asic_type
) {
652 vega10_reg_base_init(adev
);
655 vega20_reg_base_init(adev
);
658 arct_reg_base_init(adev
);
664 if (adev
->asic_type
== CHIP_VEGA20
|| adev
->asic_type
== CHIP_ARCTURUS
)
665 adev
->gmc
.xgmi
.supported
= true;
667 if (adev
->flags
& AMD_IS_APU
) {
668 adev
->nbio
.funcs
= &nbio_v7_0_funcs
;
669 adev
->nbio
.hdp_flush_reg
= &nbio_v7_0_hdp_flush_reg
;
670 } else if (adev
->asic_type
== CHIP_VEGA20
||
671 adev
->asic_type
== CHIP_ARCTURUS
) {
672 adev
->nbio
.funcs
= &nbio_v7_4_funcs
;
673 adev
->nbio
.hdp_flush_reg
= &nbio_v7_4_hdp_flush_reg
;
675 adev
->nbio
.funcs
= &nbio_v6_1_funcs
;
676 adev
->nbio
.hdp_flush_reg
= &nbio_v6_1_hdp_flush_reg
;
679 if (adev
->asic_type
== CHIP_VEGA20
|| adev
->asic_type
== CHIP_ARCTURUS
)
680 adev
->df
.funcs
= &df_v3_6_funcs
;
682 adev
->df
.funcs
= &df_v1_7_funcs
;
684 adev
->rev_id
= soc15_get_rev_id(adev
);
685 adev
->nbio
.funcs
->detect_hw_virt(adev
);
687 if (amdgpu_sriov_vf(adev
))
688 adev
->virt
.ops
= &xgpu_ai_virt_ops
;
690 switch (adev
->asic_type
) {
694 amdgpu_device_ip_block_add(adev
, &vega10_common_ip_block
);
695 amdgpu_device_ip_block_add(adev
, &gmc_v9_0_ip_block
);
697 /* For Vega10 SR-IOV, PSP need to be initialized before IH */
698 if (amdgpu_sriov_vf(adev
)) {
699 if (likely(adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
)) {
700 if (adev
->asic_type
== CHIP_VEGA20
)
701 amdgpu_device_ip_block_add(adev
, &psp_v11_0_ip_block
);
703 amdgpu_device_ip_block_add(adev
, &psp_v3_1_ip_block
);
705 amdgpu_device_ip_block_add(adev
, &vega10_ih_ip_block
);
707 amdgpu_device_ip_block_add(adev
, &vega10_ih_ip_block
);
708 if (likely(adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
)) {
709 if (adev
->asic_type
== CHIP_VEGA20
)
710 amdgpu_device_ip_block_add(adev
, &psp_v11_0_ip_block
);
712 amdgpu_device_ip_block_add(adev
, &psp_v3_1_ip_block
);
715 amdgpu_device_ip_block_add(adev
, &gfx_v9_0_ip_block
);
716 amdgpu_device_ip_block_add(adev
, &sdma_v4_0_ip_block
);
717 if (is_support_sw_smu(adev
)) {
718 if (!amdgpu_sriov_vf(adev
))
719 amdgpu_device_ip_block_add(adev
, &smu_v11_0_ip_block
);
721 amdgpu_device_ip_block_add(adev
, &pp_smu_ip_block
);
723 if (adev
->enable_virtual_display
|| amdgpu_sriov_vf(adev
))
724 amdgpu_device_ip_block_add(adev
, &dce_virtual_ip_block
);
725 #if defined(CONFIG_DRM_AMD_DC)
726 else if (amdgpu_device_has_dc_support(adev
))
727 amdgpu_device_ip_block_add(adev
, &dm_ip_block
);
729 if (!(adev
->asic_type
== CHIP_VEGA20
&& amdgpu_sriov_vf(adev
))) {
730 amdgpu_device_ip_block_add(adev
, &uvd_v7_0_ip_block
);
731 amdgpu_device_ip_block_add(adev
, &vce_v4_0_ip_block
);
735 amdgpu_device_ip_block_add(adev
, &vega10_common_ip_block
);
736 amdgpu_device_ip_block_add(adev
, &gmc_v9_0_ip_block
);
737 amdgpu_device_ip_block_add(adev
, &vega10_ih_ip_block
);
738 if (likely(adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
))
739 amdgpu_device_ip_block_add(adev
, &psp_v10_0_ip_block
);
740 amdgpu_device_ip_block_add(adev
, &gfx_v9_0_ip_block
);
741 amdgpu_device_ip_block_add(adev
, &sdma_v4_0_ip_block
);
742 amdgpu_device_ip_block_add(adev
, &pp_smu_ip_block
);
743 if (adev
->enable_virtual_display
|| amdgpu_sriov_vf(adev
))
744 amdgpu_device_ip_block_add(adev
, &dce_virtual_ip_block
);
745 #if defined(CONFIG_DRM_AMD_DC)
746 else if (amdgpu_device_has_dc_support(adev
))
747 amdgpu_device_ip_block_add(adev
, &dm_ip_block
);
749 amdgpu_device_ip_block_add(adev
, &vcn_v1_0_ip_block
);
752 amdgpu_device_ip_block_add(adev
, &vega10_common_ip_block
);
753 amdgpu_device_ip_block_add(adev
, &gmc_v9_0_ip_block
);
755 if (amdgpu_sriov_vf(adev
)) {
756 if (likely(adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
))
757 amdgpu_device_ip_block_add(adev
, &psp_v11_0_ip_block
);
758 amdgpu_device_ip_block_add(adev
, &vega10_ih_ip_block
);
760 amdgpu_device_ip_block_add(adev
, &vega10_ih_ip_block
);
761 if (likely(adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
))
762 amdgpu_device_ip_block_add(adev
, &psp_v11_0_ip_block
);
765 if (adev
->enable_virtual_display
|| amdgpu_sriov_vf(adev
))
766 amdgpu_device_ip_block_add(adev
, &dce_virtual_ip_block
);
767 amdgpu_device_ip_block_add(adev
, &gfx_v9_0_ip_block
);
768 amdgpu_device_ip_block_add(adev
, &sdma_v4_0_ip_block
);
769 amdgpu_device_ip_block_add(adev
, &smu_v11_0_ip_block
);
771 if (amdgpu_sriov_vf(adev
)) {
772 if (likely(adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
))
773 amdgpu_device_ip_block_add(adev
, &vcn_v2_5_ip_block
);
775 amdgpu_device_ip_block_add(adev
, &vcn_v2_5_ip_block
);
777 if (!amdgpu_sriov_vf(adev
))
778 amdgpu_device_ip_block_add(adev
, &jpeg_v2_5_ip_block
);
781 amdgpu_device_ip_block_add(adev
, &vega10_common_ip_block
);
782 amdgpu_device_ip_block_add(adev
, &gmc_v9_0_ip_block
);
783 amdgpu_device_ip_block_add(adev
, &vega10_ih_ip_block
);
784 if (likely(adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
))
785 amdgpu_device_ip_block_add(adev
, &psp_v12_0_ip_block
);
786 amdgpu_device_ip_block_add(adev
, &smu_v12_0_ip_block
);
787 amdgpu_device_ip_block_add(adev
, &gfx_v9_0_ip_block
);
788 amdgpu_device_ip_block_add(adev
, &sdma_v4_0_ip_block
);
789 if (adev
->enable_virtual_display
|| amdgpu_sriov_vf(adev
))
790 amdgpu_device_ip_block_add(adev
, &dce_virtual_ip_block
);
791 #if defined(CONFIG_DRM_AMD_DC)
792 else if (amdgpu_device_has_dc_support(adev
))
793 amdgpu_device_ip_block_add(adev
, &dm_ip_block
);
795 amdgpu_device_ip_block_add(adev
, &vcn_v2_0_ip_block
);
796 amdgpu_device_ip_block_add(adev
, &jpeg_v2_0_ip_block
);
805 static void soc15_flush_hdp(struct amdgpu_device
*adev
, struct amdgpu_ring
*ring
)
807 adev
->nbio
.funcs
->hdp_flush(adev
, ring
);
810 static void soc15_invalidate_hdp(struct amdgpu_device
*adev
,
811 struct amdgpu_ring
*ring
)
813 if (!ring
|| !ring
->funcs
->emit_wreg
)
814 WREG32_SOC15_NO_KIQ(HDP
, 0, mmHDP_READ_CACHE_INVALIDATE
, 1);
816 amdgpu_ring_emit_wreg(ring
, SOC15_REG_OFFSET(
817 HDP
, 0, mmHDP_READ_CACHE_INVALIDATE
), 1);
820 static bool soc15_need_full_reset(struct amdgpu_device
*adev
)
822 /* change this when we implement soft reset */
825 static void soc15_get_pcie_usage(struct amdgpu_device
*adev
, uint64_t *count0
,
828 uint32_t perfctr
= 0;
829 uint64_t cnt0_of
, cnt1_of
;
832 /* This reports 0 on APUs, so return to avoid writing/reading registers
833 * that may or may not be different from their GPU counterparts
835 if (adev
->flags
& AMD_IS_APU
)
838 /* Set the 2 events that we wish to watch, defined above */
839 /* Reg 40 is # received msgs */
840 /* Reg 104 is # of posted requests sent */
841 perfctr
= REG_SET_FIELD(perfctr
, PCIE_PERF_CNTL_TXCLK
, EVENT0_SEL
, 40);
842 perfctr
= REG_SET_FIELD(perfctr
, PCIE_PERF_CNTL_TXCLK
, EVENT1_SEL
, 104);
844 /* Write to enable desired perf counters */
845 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK
, perfctr
);
846 /* Zero out and enable the perf counters
848 * Bit 0 = Start all counters(1)
849 * Bit 2 = Global counter reset enable(1)
851 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL
, 0x00000005);
855 /* Load the shadow and disable the perf counters
857 * Bit 0 = Stop counters(0)
858 * Bit 1 = Load the shadow counters(1)
860 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL
, 0x00000002);
862 /* Read register values to get any >32bit overflow */
863 tmp
= RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK
);
864 cnt0_of
= REG_GET_FIELD(tmp
, PCIE_PERF_CNTL_TXCLK
, COUNTER0_UPPER
);
865 cnt1_of
= REG_GET_FIELD(tmp
, PCIE_PERF_CNTL_TXCLK
, COUNTER1_UPPER
);
867 /* Get the values and add the overflow */
868 *count0
= RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK
) | (cnt0_of
<< 32);
869 *count1
= RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK
) | (cnt1_of
<< 32);
872 static void vega20_get_pcie_usage(struct amdgpu_device
*adev
, uint64_t *count0
,
875 uint32_t perfctr
= 0;
876 uint64_t cnt0_of
, cnt1_of
;
879 /* This reports 0 on APUs, so return to avoid writing/reading registers
880 * that may or may not be different from their GPU counterparts
882 if (adev
->flags
& AMD_IS_APU
)
885 /* Set the 2 events that we wish to watch, defined above */
886 /* Reg 40 is # received msgs */
887 /* Reg 108 is # of posted requests sent on VG20 */
888 perfctr
= REG_SET_FIELD(perfctr
, PCIE_PERF_CNTL_TXCLK3
,
890 perfctr
= REG_SET_FIELD(perfctr
, PCIE_PERF_CNTL_TXCLK3
,
893 /* Write to enable desired perf counters */
894 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3
, perfctr
);
895 /* Zero out and enable the perf counters
897 * Bit 0 = Start all counters(1)
898 * Bit 2 = Global counter reset enable(1)
900 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL
, 0x00000005);
904 /* Load the shadow and disable the perf counters
906 * Bit 0 = Stop counters(0)
907 * Bit 1 = Load the shadow counters(1)
909 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL
, 0x00000002);
911 /* Read register values to get any >32bit overflow */
912 tmp
= RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3
);
913 cnt0_of
= REG_GET_FIELD(tmp
, PCIE_PERF_CNTL_TXCLK3
, COUNTER0_UPPER
);
914 cnt1_of
= REG_GET_FIELD(tmp
, PCIE_PERF_CNTL_TXCLK3
, COUNTER1_UPPER
);
916 /* Get the values and add the overflow */
917 *count0
= RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3
) | (cnt0_of
<< 32);
918 *count1
= RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3
) | (cnt1_of
<< 32);
921 static bool soc15_need_reset_on_init(struct amdgpu_device
*adev
)
925 /* Just return false for soc15 GPUs. Reset does not seem to
928 if (!amdgpu_passthrough(adev
))
931 if (adev
->flags
& AMD_IS_APU
)
934 /* Check sOS sign of life register to confirm sys driver and sOS
935 * are already been loaded.
937 sol_reg
= RREG32_SOC15(MP0
, 0, mmMP0_SMN_C2PMSG_81
);
944 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device
*adev
)
946 uint64_t nak_r
, nak_g
;
948 /* Get the number of NAKs received and generated */
949 nak_r
= RREG32_PCIE(smnPCIE_RX_NUM_NAK
);
950 nak_g
= RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED
);
952 /* Add the total number of NAKs, i.e the number of replays */
953 return (nak_r
+ nak_g
);
956 static const struct amdgpu_asic_funcs soc15_asic_funcs
=
958 .read_disabled_bios
= &soc15_read_disabled_bios
,
959 .read_bios_from_rom
= &soc15_read_bios_from_rom
,
960 .read_register
= &soc15_read_register
,
961 .reset
= &soc15_asic_reset
,
962 .reset_method
= &soc15_asic_reset_method
,
963 .set_vga_state
= &soc15_vga_set_state
,
964 .get_xclk
= &soc15_get_xclk
,
965 .set_uvd_clocks
= &soc15_set_uvd_clocks
,
966 .set_vce_clocks
= &soc15_set_vce_clocks
,
967 .get_config_memsize
= &soc15_get_config_memsize
,
968 .flush_hdp
= &soc15_flush_hdp
,
969 .invalidate_hdp
= &soc15_invalidate_hdp
,
970 .need_full_reset
= &soc15_need_full_reset
,
971 .init_doorbell_index
= &vega10_doorbell_index_init
,
972 .get_pcie_usage
= &soc15_get_pcie_usage
,
973 .need_reset_on_init
= &soc15_need_reset_on_init
,
974 .get_pcie_replay_count
= &soc15_get_pcie_replay_count
,
975 .supports_baco
= &soc15_supports_baco
,
978 static const struct amdgpu_asic_funcs vega20_asic_funcs
=
980 .read_disabled_bios
= &soc15_read_disabled_bios
,
981 .read_bios_from_rom
= &soc15_read_bios_from_rom
,
982 .read_register
= &soc15_read_register
,
983 .reset
= &soc15_asic_reset
,
984 .reset_method
= &soc15_asic_reset_method
,
985 .set_vga_state
= &soc15_vga_set_state
,
986 .get_xclk
= &soc15_get_xclk
,
987 .set_uvd_clocks
= &soc15_set_uvd_clocks
,
988 .set_vce_clocks
= &soc15_set_vce_clocks
,
989 .get_config_memsize
= &soc15_get_config_memsize
,
990 .flush_hdp
= &soc15_flush_hdp
,
991 .invalidate_hdp
= &soc15_invalidate_hdp
,
992 .need_full_reset
= &soc15_need_full_reset
,
993 .init_doorbell_index
= &vega20_doorbell_index_init
,
994 .get_pcie_usage
= &vega20_get_pcie_usage
,
995 .need_reset_on_init
= &soc15_need_reset_on_init
,
996 .get_pcie_replay_count
= &soc15_get_pcie_replay_count
,
997 .supports_baco
= &soc15_supports_baco
,
1000 static int soc15_common_early_init(void *handle
)
1002 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
1003 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1005 adev
->rmmio_remap
.reg_offset
= MMIO_REG_HOLE_OFFSET
;
1006 adev
->rmmio_remap
.bus_addr
= adev
->rmmio_base
+ MMIO_REG_HOLE_OFFSET
;
1007 adev
->smc_rreg
= NULL
;
1008 adev
->smc_wreg
= NULL
;
1009 adev
->pcie_rreg
= &soc15_pcie_rreg
;
1010 adev
->pcie_wreg
= &soc15_pcie_wreg
;
1011 adev
->pcie_rreg64
= &soc15_pcie_rreg64
;
1012 adev
->pcie_wreg64
= &soc15_pcie_wreg64
;
1013 adev
->uvd_ctx_rreg
= &soc15_uvd_ctx_rreg
;
1014 adev
->uvd_ctx_wreg
= &soc15_uvd_ctx_wreg
;
1015 adev
->didt_rreg
= &soc15_didt_rreg
;
1016 adev
->didt_wreg
= &soc15_didt_wreg
;
1017 adev
->gc_cac_rreg
= &soc15_gc_cac_rreg
;
1018 adev
->gc_cac_wreg
= &soc15_gc_cac_wreg
;
1019 adev
->se_cac_rreg
= &soc15_se_cac_rreg
;
1020 adev
->se_cac_wreg
= &soc15_se_cac_wreg
;
1023 adev
->external_rev_id
= 0xFF;
1024 switch (adev
->asic_type
) {
1026 adev
->asic_funcs
= &soc15_asic_funcs
;
1027 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1028 AMD_CG_SUPPORT_GFX_MGLS
|
1029 AMD_CG_SUPPORT_GFX_RLC_LS
|
1030 AMD_CG_SUPPORT_GFX_CP_LS
|
1031 AMD_CG_SUPPORT_GFX_3D_CGCG
|
1032 AMD_CG_SUPPORT_GFX_3D_CGLS
|
1033 AMD_CG_SUPPORT_GFX_CGCG
|
1034 AMD_CG_SUPPORT_GFX_CGLS
|
1035 AMD_CG_SUPPORT_BIF_MGCG
|
1036 AMD_CG_SUPPORT_BIF_LS
|
1037 AMD_CG_SUPPORT_HDP_LS
|
1038 AMD_CG_SUPPORT_DRM_MGCG
|
1039 AMD_CG_SUPPORT_DRM_LS
|
1040 AMD_CG_SUPPORT_ROM_MGCG
|
1041 AMD_CG_SUPPORT_DF_MGCG
|
1042 AMD_CG_SUPPORT_SDMA_MGCG
|
1043 AMD_CG_SUPPORT_SDMA_LS
|
1044 AMD_CG_SUPPORT_MC_MGCG
|
1045 AMD_CG_SUPPORT_MC_LS
;
1047 adev
->external_rev_id
= 0x1;
1050 adev
->asic_funcs
= &soc15_asic_funcs
;
1051 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1052 AMD_CG_SUPPORT_GFX_MGLS
|
1053 AMD_CG_SUPPORT_GFX_CGCG
|
1054 AMD_CG_SUPPORT_GFX_CGLS
|
1055 AMD_CG_SUPPORT_GFX_3D_CGCG
|
1056 AMD_CG_SUPPORT_GFX_3D_CGLS
|
1057 AMD_CG_SUPPORT_GFX_CP_LS
|
1058 AMD_CG_SUPPORT_MC_LS
|
1059 AMD_CG_SUPPORT_MC_MGCG
|
1060 AMD_CG_SUPPORT_SDMA_MGCG
|
1061 AMD_CG_SUPPORT_SDMA_LS
|
1062 AMD_CG_SUPPORT_BIF_MGCG
|
1063 AMD_CG_SUPPORT_BIF_LS
|
1064 AMD_CG_SUPPORT_HDP_MGCG
|
1065 AMD_CG_SUPPORT_HDP_LS
|
1066 AMD_CG_SUPPORT_ROM_MGCG
|
1067 AMD_CG_SUPPORT_VCE_MGCG
|
1068 AMD_CG_SUPPORT_UVD_MGCG
;
1070 adev
->external_rev_id
= adev
->rev_id
+ 0x14;
1073 adev
->asic_funcs
= &vega20_asic_funcs
;
1074 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1075 AMD_CG_SUPPORT_GFX_MGLS
|
1076 AMD_CG_SUPPORT_GFX_CGCG
|
1077 AMD_CG_SUPPORT_GFX_CGLS
|
1078 AMD_CG_SUPPORT_GFX_3D_CGCG
|
1079 AMD_CG_SUPPORT_GFX_3D_CGLS
|
1080 AMD_CG_SUPPORT_GFX_CP_LS
|
1081 AMD_CG_SUPPORT_MC_LS
|
1082 AMD_CG_SUPPORT_MC_MGCG
|
1083 AMD_CG_SUPPORT_SDMA_MGCG
|
1084 AMD_CG_SUPPORT_SDMA_LS
|
1085 AMD_CG_SUPPORT_BIF_MGCG
|
1086 AMD_CG_SUPPORT_BIF_LS
|
1087 AMD_CG_SUPPORT_HDP_MGCG
|
1088 AMD_CG_SUPPORT_HDP_LS
|
1089 AMD_CG_SUPPORT_ROM_MGCG
|
1090 AMD_CG_SUPPORT_VCE_MGCG
|
1091 AMD_CG_SUPPORT_UVD_MGCG
;
1093 adev
->external_rev_id
= adev
->rev_id
+ 0x28;
1096 adev
->asic_funcs
= &soc15_asic_funcs
;
1097 if (adev
->rev_id
>= 0x8)
1098 adev
->external_rev_id
= adev
->rev_id
+ 0x79;
1099 else if (adev
->pdev
->device
== 0x15d8)
1100 adev
->external_rev_id
= adev
->rev_id
+ 0x41;
1101 else if (adev
->rev_id
== 1)
1102 adev
->external_rev_id
= adev
->rev_id
+ 0x20;
1104 adev
->external_rev_id
= adev
->rev_id
+ 0x01;
1106 if (adev
->rev_id
>= 0x8) {
1107 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1108 AMD_CG_SUPPORT_GFX_MGLS
|
1109 AMD_CG_SUPPORT_GFX_CP_LS
|
1110 AMD_CG_SUPPORT_GFX_3D_CGCG
|
1111 AMD_CG_SUPPORT_GFX_3D_CGLS
|
1112 AMD_CG_SUPPORT_GFX_CGCG
|
1113 AMD_CG_SUPPORT_GFX_CGLS
|
1114 AMD_CG_SUPPORT_BIF_LS
|
1115 AMD_CG_SUPPORT_HDP_LS
|
1116 AMD_CG_SUPPORT_ROM_MGCG
|
1117 AMD_CG_SUPPORT_MC_MGCG
|
1118 AMD_CG_SUPPORT_MC_LS
|
1119 AMD_CG_SUPPORT_SDMA_MGCG
|
1120 AMD_CG_SUPPORT_SDMA_LS
|
1121 AMD_CG_SUPPORT_VCN_MGCG
;
1123 adev
->pg_flags
= AMD_PG_SUPPORT_SDMA
| AMD_PG_SUPPORT_VCN
;
1124 } else if (adev
->pdev
->device
== 0x15d8) {
1125 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1126 AMD_CG_SUPPORT_GFX_MGLS
|
1127 AMD_CG_SUPPORT_GFX_CP_LS
|
1128 AMD_CG_SUPPORT_GFX_3D_CGCG
|
1129 AMD_CG_SUPPORT_GFX_3D_CGLS
|
1130 AMD_CG_SUPPORT_GFX_CGCG
|
1131 AMD_CG_SUPPORT_GFX_CGLS
|
1132 AMD_CG_SUPPORT_BIF_LS
|
1133 AMD_CG_SUPPORT_HDP_LS
|
1134 AMD_CG_SUPPORT_ROM_MGCG
|
1135 AMD_CG_SUPPORT_MC_MGCG
|
1136 AMD_CG_SUPPORT_MC_LS
|
1137 AMD_CG_SUPPORT_SDMA_MGCG
|
1138 AMD_CG_SUPPORT_SDMA_LS
;
1140 adev
->pg_flags
= AMD_PG_SUPPORT_SDMA
|
1141 AMD_PG_SUPPORT_MMHUB
|
1142 AMD_PG_SUPPORT_VCN
|
1143 AMD_PG_SUPPORT_VCN_DPG
;
1145 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1146 AMD_CG_SUPPORT_GFX_MGLS
|
1147 AMD_CG_SUPPORT_GFX_RLC_LS
|
1148 AMD_CG_SUPPORT_GFX_CP_LS
|
1149 AMD_CG_SUPPORT_GFX_3D_CGCG
|
1150 AMD_CG_SUPPORT_GFX_3D_CGLS
|
1151 AMD_CG_SUPPORT_GFX_CGCG
|
1152 AMD_CG_SUPPORT_GFX_CGLS
|
1153 AMD_CG_SUPPORT_BIF_MGCG
|
1154 AMD_CG_SUPPORT_BIF_LS
|
1155 AMD_CG_SUPPORT_HDP_MGCG
|
1156 AMD_CG_SUPPORT_HDP_LS
|
1157 AMD_CG_SUPPORT_DRM_MGCG
|
1158 AMD_CG_SUPPORT_DRM_LS
|
1159 AMD_CG_SUPPORT_ROM_MGCG
|
1160 AMD_CG_SUPPORT_MC_MGCG
|
1161 AMD_CG_SUPPORT_MC_LS
|
1162 AMD_CG_SUPPORT_SDMA_MGCG
|
1163 AMD_CG_SUPPORT_SDMA_LS
|
1164 AMD_CG_SUPPORT_VCN_MGCG
;
1166 adev
->pg_flags
= AMD_PG_SUPPORT_SDMA
| AMD_PG_SUPPORT_VCN
;
1170 adev
->asic_funcs
= &vega20_asic_funcs
;
1171 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1172 AMD_CG_SUPPORT_GFX_MGLS
|
1173 AMD_CG_SUPPORT_GFX_CGCG
|
1174 AMD_CG_SUPPORT_GFX_CGLS
|
1175 AMD_CG_SUPPORT_GFX_CP_LS
|
1176 AMD_CG_SUPPORT_HDP_MGCG
|
1177 AMD_CG_SUPPORT_HDP_LS
|
1178 AMD_CG_SUPPORT_SDMA_MGCG
|
1179 AMD_CG_SUPPORT_SDMA_LS
|
1180 AMD_CG_SUPPORT_MC_MGCG
|
1181 AMD_CG_SUPPORT_MC_LS
|
1182 AMD_CG_SUPPORT_IH_CG
|
1183 AMD_CG_SUPPORT_VCN_MGCG
|
1184 AMD_CG_SUPPORT_JPEG_MGCG
;
1186 adev
->external_rev_id
= adev
->rev_id
+ 0x32;
1189 adev
->asic_funcs
= &soc15_asic_funcs
;
1190 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1191 AMD_CG_SUPPORT_GFX_MGLS
|
1192 AMD_CG_SUPPORT_GFX_3D_CGCG
|
1193 AMD_CG_SUPPORT_GFX_3D_CGLS
|
1194 AMD_CG_SUPPORT_GFX_CGCG
|
1195 AMD_CG_SUPPORT_GFX_CGLS
|
1196 AMD_CG_SUPPORT_GFX_CP_LS
|
1197 AMD_CG_SUPPORT_MC_MGCG
|
1198 AMD_CG_SUPPORT_MC_LS
|
1199 AMD_CG_SUPPORT_SDMA_MGCG
|
1200 AMD_CG_SUPPORT_SDMA_LS
|
1201 AMD_CG_SUPPORT_BIF_LS
|
1202 AMD_CG_SUPPORT_HDP_LS
|
1203 AMD_CG_SUPPORT_ROM_MGCG
|
1204 AMD_CG_SUPPORT_VCN_MGCG
|
1205 AMD_CG_SUPPORT_JPEG_MGCG
|
1206 AMD_CG_SUPPORT_IH_CG
|
1207 AMD_CG_SUPPORT_ATHUB_LS
|
1208 AMD_CG_SUPPORT_ATHUB_MGCG
|
1209 AMD_CG_SUPPORT_DF_MGCG
;
1210 adev
->pg_flags
= AMD_PG_SUPPORT_SDMA
|
1211 AMD_PG_SUPPORT_VCN
|
1212 AMD_PG_SUPPORT_JPEG
|
1213 AMD_PG_SUPPORT_VCN_DPG
;
1214 adev
->external_rev_id
= adev
->rev_id
+ 0x91;
1217 /* FIXME: not supported yet */
1221 if (amdgpu_sriov_vf(adev
)) {
1222 amdgpu_virt_init_setting(adev
);
1223 xgpu_ai_mailbox_set_irq_funcs(adev
);
1229 static int soc15_common_late_init(void *handle
)
1231 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1234 if (amdgpu_sriov_vf(adev
))
1235 xgpu_ai_mailbox_get_irq(adev
);
1237 if (adev
->nbio
.funcs
->ras_late_init
)
1238 r
= adev
->nbio
.funcs
->ras_late_init(adev
);
1243 static int soc15_common_sw_init(void *handle
)
1245 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1247 if (amdgpu_sriov_vf(adev
))
1248 xgpu_ai_mailbox_add_irq_id(adev
);
1250 adev
->df
.funcs
->sw_init(adev
);
1255 static int soc15_common_sw_fini(void *handle
)
1257 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1259 amdgpu_nbio_ras_fini(adev
);
1260 adev
->df
.funcs
->sw_fini(adev
);
1264 static void soc15_doorbell_range_init(struct amdgpu_device
*adev
)
1267 struct amdgpu_ring
*ring
;
1269 /* sdma/ih doorbell range are programed by hypervisor */
1270 if (!amdgpu_sriov_vf(adev
)) {
1271 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++) {
1272 ring
= &adev
->sdma
.instance
[i
].ring
;
1273 adev
->nbio
.funcs
->sdma_doorbell_range(adev
, i
,
1274 ring
->use_doorbell
, ring
->doorbell_index
,
1275 adev
->doorbell_index
.sdma_doorbell_range
);
1278 adev
->nbio
.funcs
->ih_doorbell_range(adev
, adev
->irq
.ih
.use_doorbell
,
1279 adev
->irq
.ih
.doorbell_index
);
1283 static int soc15_common_hw_init(void *handle
)
1285 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1287 /* enable pcie gen2/3 link */
1288 soc15_pcie_gen3_enable(adev
);
1290 soc15_program_aspm(adev
);
1291 /* setup nbio registers */
1292 adev
->nbio
.funcs
->init_registers(adev
);
1293 /* remap HDP registers to a hole in mmio space,
1294 * for the purpose of expose those registers
1297 if (adev
->nbio
.funcs
->remap_hdp_registers
)
1298 adev
->nbio
.funcs
->remap_hdp_registers(adev
);
1300 /* enable the doorbell aperture */
1301 soc15_enable_doorbell_aperture(adev
, true);
1302 /* HW doorbell routing policy: doorbell writing not
1303 * in SDMA/IH/MM/ACV range will be routed to CP. So
1304 * we need to init SDMA/IH/MM/ACV doorbell range prior
1305 * to CP ip block init and ring test.
1307 soc15_doorbell_range_init(adev
);
1312 static int soc15_common_hw_fini(void *handle
)
1314 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1316 /* disable the doorbell aperture */
1317 soc15_enable_doorbell_aperture(adev
, false);
1318 if (amdgpu_sriov_vf(adev
))
1319 xgpu_ai_mailbox_put_irq(adev
);
1321 if (adev
->nbio
.ras_if
&&
1322 amdgpu_ras_is_supported(adev
, adev
->nbio
.ras_if
->block
)) {
1323 if (adev
->nbio
.funcs
->init_ras_controller_interrupt
)
1324 amdgpu_irq_put(adev
, &adev
->nbio
.ras_controller_irq
, 0);
1325 if (adev
->nbio
.funcs
->init_ras_err_event_athub_interrupt
)
1326 amdgpu_irq_put(adev
, &adev
->nbio
.ras_err_event_athub_irq
, 0);
1332 static int soc15_common_suspend(void *handle
)
1334 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1336 return soc15_common_hw_fini(adev
);
1339 static int soc15_common_resume(void *handle
)
1341 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1343 return soc15_common_hw_init(adev
);
1346 static bool soc15_common_is_idle(void *handle
)
1351 static int soc15_common_wait_for_idle(void *handle
)
1356 static int soc15_common_soft_reset(void *handle
)
1361 static void soc15_update_hdp_light_sleep(struct amdgpu_device
*adev
, bool enable
)
1365 if (adev
->asic_type
== CHIP_VEGA20
||
1366 adev
->asic_type
== CHIP_ARCTURUS
) {
1367 def
= data
= RREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_MEM_POWER_CTRL
));
1369 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_HDP_LS
))
1370 data
|= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK
|
1371 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK
|
1372 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK
|
1373 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK
;
1375 data
&= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK
|
1376 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK
|
1377 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK
|
1378 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK
);
1381 WREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_MEM_POWER_CTRL
), data
);
1383 def
= data
= RREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_MEM_POWER_LS
));
1385 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_HDP_LS
))
1386 data
|= HDP_MEM_POWER_LS__LS_ENABLE_MASK
;
1388 data
&= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK
;
1391 WREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_MEM_POWER_LS
), data
);
1395 static void soc15_update_drm_clock_gating(struct amdgpu_device
*adev
, bool enable
)
1399 def
= data
= RREG32(SOC15_REG_OFFSET(MP0
, 0, mmMP0_MISC_CGTT_CTRL0
));
1401 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_DRM_MGCG
))
1402 data
&= ~(0x01000000 |
1411 data
|= (0x01000000 |
1421 WREG32(SOC15_REG_OFFSET(MP0
, 0, mmMP0_MISC_CGTT_CTRL0
), data
);
1424 static void soc15_update_drm_light_sleep(struct amdgpu_device
*adev
, bool enable
)
1428 def
= data
= RREG32(SOC15_REG_OFFSET(MP0
, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL
));
1430 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_DRM_LS
))
1436 WREG32(SOC15_REG_OFFSET(MP0
, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL
), data
);
1439 static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device
*adev
,
1444 def
= data
= RREG32(SOC15_REG_OFFSET(SMUIO
, 0, mmCGTT_ROM_CLK_CTRL0
));
1446 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_ROM_MGCG
))
1447 data
&= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK
|
1448 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK
);
1450 data
|= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK
|
1451 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK
;
1454 WREG32(SOC15_REG_OFFSET(SMUIO
, 0, mmCGTT_ROM_CLK_CTRL0
), data
);
1457 static int soc15_common_set_clockgating_state(void *handle
,
1458 enum amd_clockgating_state state
)
1460 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1462 if (amdgpu_sriov_vf(adev
))
1465 switch (adev
->asic_type
) {
1469 adev
->nbio
.funcs
->update_medium_grain_clock_gating(adev
,
1470 state
== AMD_CG_STATE_GATE
? true : false);
1471 adev
->nbio
.funcs
->update_medium_grain_light_sleep(adev
,
1472 state
== AMD_CG_STATE_GATE
? true : false);
1473 soc15_update_hdp_light_sleep(adev
,
1474 state
== AMD_CG_STATE_GATE
? true : false);
1475 soc15_update_drm_clock_gating(adev
,
1476 state
== AMD_CG_STATE_GATE
? true : false);
1477 soc15_update_drm_light_sleep(adev
,
1478 state
== AMD_CG_STATE_GATE
? true : false);
1479 soc15_update_rom_medium_grain_clock_gating(adev
,
1480 state
== AMD_CG_STATE_GATE
? true : false);
1481 adev
->df
.funcs
->update_medium_grain_clock_gating(adev
,
1482 state
== AMD_CG_STATE_GATE
? true : false);
1486 adev
->nbio
.funcs
->update_medium_grain_clock_gating(adev
,
1487 state
== AMD_CG_STATE_GATE
? true : false);
1488 adev
->nbio
.funcs
->update_medium_grain_light_sleep(adev
,
1489 state
== AMD_CG_STATE_GATE
? true : false);
1490 soc15_update_hdp_light_sleep(adev
,
1491 state
== AMD_CG_STATE_GATE
? true : false);
1492 soc15_update_drm_clock_gating(adev
,
1493 state
== AMD_CG_STATE_GATE
? true : false);
1494 soc15_update_drm_light_sleep(adev
,
1495 state
== AMD_CG_STATE_GATE
? true : false);
1496 soc15_update_rom_medium_grain_clock_gating(adev
,
1497 state
== AMD_CG_STATE_GATE
? true : false);
1500 soc15_update_hdp_light_sleep(adev
,
1501 state
== AMD_CG_STATE_GATE
? true : false);
1509 static void soc15_common_get_clockgating_state(void *handle
, u32
*flags
)
1511 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1514 if (amdgpu_sriov_vf(adev
))
1517 adev
->nbio
.funcs
->get_clockgating_state(adev
, flags
);
1519 /* AMD_CG_SUPPORT_HDP_LS */
1520 data
= RREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_MEM_POWER_LS
));
1521 if (data
& HDP_MEM_POWER_LS__LS_ENABLE_MASK
)
1522 *flags
|= AMD_CG_SUPPORT_HDP_LS
;
1524 /* AMD_CG_SUPPORT_DRM_MGCG */
1525 data
= RREG32(SOC15_REG_OFFSET(MP0
, 0, mmMP0_MISC_CGTT_CTRL0
));
1526 if (!(data
& 0x01000000))
1527 *flags
|= AMD_CG_SUPPORT_DRM_MGCG
;
1529 /* AMD_CG_SUPPORT_DRM_LS */
1530 data
= RREG32(SOC15_REG_OFFSET(MP0
, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL
));
1532 *flags
|= AMD_CG_SUPPORT_DRM_LS
;
1534 /* AMD_CG_SUPPORT_ROM_MGCG */
1535 data
= RREG32(SOC15_REG_OFFSET(SMUIO
, 0, mmCGTT_ROM_CLK_CTRL0
));
1536 if (!(data
& CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK
))
1537 *flags
|= AMD_CG_SUPPORT_ROM_MGCG
;
1539 adev
->df
.funcs
->get_clockgating_state(adev
, flags
);
1542 static int soc15_common_set_powergating_state(void *handle
,
1543 enum amd_powergating_state state
)
1549 const struct amd_ip_funcs soc15_common_ip_funcs
= {
1550 .name
= "soc15_common",
1551 .early_init
= soc15_common_early_init
,
1552 .late_init
= soc15_common_late_init
,
1553 .sw_init
= soc15_common_sw_init
,
1554 .sw_fini
= soc15_common_sw_fini
,
1555 .hw_init
= soc15_common_hw_init
,
1556 .hw_fini
= soc15_common_hw_fini
,
1557 .suspend
= soc15_common_suspend
,
1558 .resume
= soc15_common_resume
,
1559 .is_idle
= soc15_common_is_idle
,
1560 .wait_for_idle
= soc15_common_wait_for_idle
,
1561 .soft_reset
= soc15_common_soft_reset
,
1562 .set_clockgating_state
= soc15_common_set_clockgating_state
,
1563 .set_powergating_state
= soc15_common_set_powergating_state
,
1564 .get_clockgating_state
= soc15_common_get_clockgating_state
,