2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/pci.h>
25 #include <linux/slab.h>
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_ih.h"
30 #include "amdgpu_uvd.h"
31 #include "amdgpu_vce.h"
32 #include "amdgpu_ucode.h"
36 #include "gmc/gmc_8_1_d.h"
37 #include "gmc/gmc_8_1_sh_mask.h"
39 #include "oss/oss_3_0_d.h"
40 #include "oss/oss_3_0_sh_mask.h"
42 #include "bif/bif_5_0_d.h"
43 #include "bif/bif_5_0_sh_mask.h"
45 #include "gca/gfx_8_0_d.h"
46 #include "gca/gfx_8_0_sh_mask.h"
48 #include "smu/smu_7_1_1_d.h"
49 #include "smu/smu_7_1_1_sh_mask.h"
51 #include "uvd/uvd_5_0_d.h"
52 #include "uvd/uvd_5_0_sh_mask.h"
54 #include "vce/vce_3_0_d.h"
55 #include "vce/vce_3_0_sh_mask.h"
57 #include "dce/dce_10_0_d.h"
58 #include "dce/dce_10_0_sh_mask.h"
65 #include "sdma_v2_4.h"
66 #include "sdma_v3_0.h"
67 #include "dce_v10_0.h"
68 #include "dce_v11_0.h"
69 #include "iceland_ih.h"
75 #if defined(CONFIG_DRM_AMD_ACP)
76 #include "amdgpu_acp.h"
78 #include "dce_virtual.h"
80 #include "amdgpu_dm.h"
83 * Indirect registers accessor
85 static u32
vi_pcie_rreg(struct amdgpu_device
*adev
, u32 reg
)
90 spin_lock_irqsave(&adev
->pcie_idx_lock
, flags
);
91 WREG32_NO_KIQ(mmPCIE_INDEX
, reg
);
92 (void)RREG32_NO_KIQ(mmPCIE_INDEX
);
93 r
= RREG32_NO_KIQ(mmPCIE_DATA
);
94 spin_unlock_irqrestore(&adev
->pcie_idx_lock
, flags
);
98 static void vi_pcie_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
102 spin_lock_irqsave(&adev
->pcie_idx_lock
, flags
);
103 WREG32_NO_KIQ(mmPCIE_INDEX
, reg
);
104 (void)RREG32_NO_KIQ(mmPCIE_INDEX
);
105 WREG32_NO_KIQ(mmPCIE_DATA
, v
);
106 (void)RREG32_NO_KIQ(mmPCIE_DATA
);
107 spin_unlock_irqrestore(&adev
->pcie_idx_lock
, flags
);
110 static u32
vi_smc_rreg(struct amdgpu_device
*adev
, u32 reg
)
115 spin_lock_irqsave(&adev
->smc_idx_lock
, flags
);
116 WREG32_NO_KIQ(mmSMC_IND_INDEX_11
, (reg
));
117 r
= RREG32_NO_KIQ(mmSMC_IND_DATA_11
);
118 spin_unlock_irqrestore(&adev
->smc_idx_lock
, flags
);
122 static void vi_smc_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
126 spin_lock_irqsave(&adev
->smc_idx_lock
, flags
);
127 WREG32_NO_KIQ(mmSMC_IND_INDEX_11
, (reg
));
128 WREG32_NO_KIQ(mmSMC_IND_DATA_11
, (v
));
129 spin_unlock_irqrestore(&adev
->smc_idx_lock
, flags
);
133 #define mmMP0PUB_IND_INDEX 0x180
134 #define mmMP0PUB_IND_DATA 0x181
136 static u32
cz_smc_rreg(struct amdgpu_device
*adev
, u32 reg
)
141 spin_lock_irqsave(&adev
->smc_idx_lock
, flags
);
142 WREG32(mmMP0PUB_IND_INDEX
, (reg
));
143 r
= RREG32(mmMP0PUB_IND_DATA
);
144 spin_unlock_irqrestore(&adev
->smc_idx_lock
, flags
);
148 static void cz_smc_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
152 spin_lock_irqsave(&adev
->smc_idx_lock
, flags
);
153 WREG32(mmMP0PUB_IND_INDEX
, (reg
));
154 WREG32(mmMP0PUB_IND_DATA
, (v
));
155 spin_unlock_irqrestore(&adev
->smc_idx_lock
, flags
);
158 static u32
vi_uvd_ctx_rreg(struct amdgpu_device
*adev
, u32 reg
)
163 spin_lock_irqsave(&adev
->uvd_ctx_idx_lock
, flags
);
164 WREG32(mmUVD_CTX_INDEX
, ((reg
) & 0x1ff));
165 r
= RREG32(mmUVD_CTX_DATA
);
166 spin_unlock_irqrestore(&adev
->uvd_ctx_idx_lock
, flags
);
170 static void vi_uvd_ctx_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
174 spin_lock_irqsave(&adev
->uvd_ctx_idx_lock
, flags
);
175 WREG32(mmUVD_CTX_INDEX
, ((reg
) & 0x1ff));
176 WREG32(mmUVD_CTX_DATA
, (v
));
177 spin_unlock_irqrestore(&adev
->uvd_ctx_idx_lock
, flags
);
180 static u32
vi_didt_rreg(struct amdgpu_device
*adev
, u32 reg
)
185 spin_lock_irqsave(&adev
->didt_idx_lock
, flags
);
186 WREG32(mmDIDT_IND_INDEX
, (reg
));
187 r
= RREG32(mmDIDT_IND_DATA
);
188 spin_unlock_irqrestore(&adev
->didt_idx_lock
, flags
);
192 static void vi_didt_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
196 spin_lock_irqsave(&adev
->didt_idx_lock
, flags
);
197 WREG32(mmDIDT_IND_INDEX
, (reg
));
198 WREG32(mmDIDT_IND_DATA
, (v
));
199 spin_unlock_irqrestore(&adev
->didt_idx_lock
, flags
);
202 static u32
vi_gc_cac_rreg(struct amdgpu_device
*adev
, u32 reg
)
207 spin_lock_irqsave(&adev
->gc_cac_idx_lock
, flags
);
208 WREG32(mmGC_CAC_IND_INDEX
, (reg
));
209 r
= RREG32(mmGC_CAC_IND_DATA
);
210 spin_unlock_irqrestore(&adev
->gc_cac_idx_lock
, flags
);
214 static void vi_gc_cac_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
218 spin_lock_irqsave(&adev
->gc_cac_idx_lock
, flags
);
219 WREG32(mmGC_CAC_IND_INDEX
, (reg
));
220 WREG32(mmGC_CAC_IND_DATA
, (v
));
221 spin_unlock_irqrestore(&adev
->gc_cac_idx_lock
, flags
);
225 static const u32 tonga_mgcg_cgcg_init
[] =
227 mmCGTT_DRM_CLK_CTRL0
, 0xffffffff, 0x00600100,
228 mmPCIE_INDEX
, 0xffffffff, 0x0140001c,
229 mmPCIE_DATA
, 0x000f0000, 0x00000000,
230 mmSMC_IND_INDEX_4
, 0xffffffff, 0xC060000C,
231 mmSMC_IND_DATA_4
, 0xc0000fff, 0x00000100,
232 mmCGTT_DRM_CLK_CTRL0
, 0xff000fff, 0x00000100,
233 mmHDP_XDP_CGTT_BLK_CTRL
, 0xc0000fff, 0x00000104,
236 static const u32 fiji_mgcg_cgcg_init
[] =
238 mmCGTT_DRM_CLK_CTRL0
, 0xffffffff, 0x00600100,
239 mmPCIE_INDEX
, 0xffffffff, 0x0140001c,
240 mmPCIE_DATA
, 0x000f0000, 0x00000000,
241 mmSMC_IND_INDEX_4
, 0xffffffff, 0xC060000C,
242 mmSMC_IND_DATA_4
, 0xc0000fff, 0x00000100,
243 mmCGTT_DRM_CLK_CTRL0
, 0xff000fff, 0x00000100,
244 mmHDP_XDP_CGTT_BLK_CTRL
, 0xc0000fff, 0x00000104,
247 static const u32 iceland_mgcg_cgcg_init
[] =
249 mmPCIE_INDEX
, 0xffffffff, ixPCIE_CNTL2
,
250 mmPCIE_DATA
, 0x000f0000, 0x00000000,
251 mmSMC_IND_INDEX_4
, 0xffffffff, ixCGTT_ROM_CLK_CTRL0
,
252 mmSMC_IND_DATA_4
, 0xc0000fff, 0x00000100,
253 mmHDP_XDP_CGTT_BLK_CTRL
, 0xc0000fff, 0x00000104,
256 static const u32 cz_mgcg_cgcg_init
[] =
258 mmCGTT_DRM_CLK_CTRL0
, 0xffffffff, 0x00600100,
259 mmPCIE_INDEX
, 0xffffffff, 0x0140001c,
260 mmPCIE_DATA
, 0x000f0000, 0x00000000,
261 mmCGTT_DRM_CLK_CTRL0
, 0xff000fff, 0x00000100,
262 mmHDP_XDP_CGTT_BLK_CTRL
, 0xc0000fff, 0x00000104,
265 static const u32 stoney_mgcg_cgcg_init
[] =
267 mmCGTT_DRM_CLK_CTRL0
, 0xffffffff, 0x00000100,
268 mmHDP_XDP_CGTT_BLK_CTRL
, 0xffffffff, 0x00000104,
269 mmHDP_HOST_PATH_CNTL
, 0xffffffff, 0x0f000027,
272 static void vi_init_golden_registers(struct amdgpu_device
*adev
)
274 /* Some of the registers might be dependent on GRBM_GFX_INDEX */
275 mutex_lock(&adev
->grbm_idx_mutex
);
277 if (amdgpu_sriov_vf(adev
)) {
278 xgpu_vi_init_golden_registers(adev
);
279 mutex_unlock(&adev
->grbm_idx_mutex
);
283 switch (adev
->asic_type
) {
285 amdgpu_device_program_register_sequence(adev
,
286 iceland_mgcg_cgcg_init
,
287 ARRAY_SIZE(iceland_mgcg_cgcg_init
));
290 amdgpu_device_program_register_sequence(adev
,
292 ARRAY_SIZE(fiji_mgcg_cgcg_init
));
295 amdgpu_device_program_register_sequence(adev
,
296 tonga_mgcg_cgcg_init
,
297 ARRAY_SIZE(tonga_mgcg_cgcg_init
));
300 amdgpu_device_program_register_sequence(adev
,
302 ARRAY_SIZE(cz_mgcg_cgcg_init
));
305 amdgpu_device_program_register_sequence(adev
,
306 stoney_mgcg_cgcg_init
,
307 ARRAY_SIZE(stoney_mgcg_cgcg_init
));
316 mutex_unlock(&adev
->grbm_idx_mutex
);
320 * vi_get_xclk - get the xclk
322 * @adev: amdgpu_device pointer
324 * Returns the reference clock used by the gfx engine
327 static u32
vi_get_xclk(struct amdgpu_device
*adev
)
329 u32 reference_clock
= adev
->clock
.spll
.reference_freq
;
332 if (adev
->flags
& AMD_IS_APU
)
333 return reference_clock
;
335 tmp
= RREG32_SMC(ixCG_CLKPIN_CNTL_2
);
336 if (REG_GET_FIELD(tmp
, CG_CLKPIN_CNTL_2
, MUX_TCLK_TO_XCLK
))
339 tmp
= RREG32_SMC(ixCG_CLKPIN_CNTL
);
340 if (REG_GET_FIELD(tmp
, CG_CLKPIN_CNTL
, XTALIN_DIVIDE
))
341 return reference_clock
/ 4;
343 return reference_clock
;
347 * vi_srbm_select - select specific register instances
349 * @adev: amdgpu_device pointer
350 * @me: selected ME (micro engine)
355 * Switches the currently active registers instances. Some
356 * registers are instanced per VMID, others are instanced per
357 * me/pipe/queue combination.
359 void vi_srbm_select(struct amdgpu_device
*adev
,
360 u32 me
, u32 pipe
, u32 queue
, u32 vmid
)
362 u32 srbm_gfx_cntl
= 0;
363 srbm_gfx_cntl
= REG_SET_FIELD(srbm_gfx_cntl
, SRBM_GFX_CNTL
, PIPEID
, pipe
);
364 srbm_gfx_cntl
= REG_SET_FIELD(srbm_gfx_cntl
, SRBM_GFX_CNTL
, MEID
, me
);
365 srbm_gfx_cntl
= REG_SET_FIELD(srbm_gfx_cntl
, SRBM_GFX_CNTL
, VMID
, vmid
);
366 srbm_gfx_cntl
= REG_SET_FIELD(srbm_gfx_cntl
, SRBM_GFX_CNTL
, QUEUEID
, queue
);
367 WREG32(mmSRBM_GFX_CNTL
, srbm_gfx_cntl
);
370 static void vi_vga_set_state(struct amdgpu_device
*adev
, bool state
)
375 static bool vi_read_disabled_bios(struct amdgpu_device
*adev
)
378 u32 d1vga_control
= 0;
379 u32 d2vga_control
= 0;
380 u32 vga_render_control
= 0;
384 bus_cntl
= RREG32(mmBUS_CNTL
);
385 if (adev
->mode_info
.num_crtc
) {
386 d1vga_control
= RREG32(mmD1VGA_CONTROL
);
387 d2vga_control
= RREG32(mmD2VGA_CONTROL
);
388 vga_render_control
= RREG32(mmVGA_RENDER_CONTROL
);
390 rom_cntl
= RREG32_SMC(ixROM_CNTL
);
393 WREG32(mmBUS_CNTL
, (bus_cntl
& ~BUS_CNTL__BIOS_ROM_DIS_MASK
));
394 if (adev
->mode_info
.num_crtc
) {
395 /* Disable VGA mode */
396 WREG32(mmD1VGA_CONTROL
,
397 (d1vga_control
& ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK
|
398 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK
)));
399 WREG32(mmD2VGA_CONTROL
,
400 (d2vga_control
& ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK
|
401 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK
)));
402 WREG32(mmVGA_RENDER_CONTROL
,
403 (vga_render_control
& ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK
));
405 WREG32_SMC(ixROM_CNTL
, rom_cntl
| ROM_CNTL__SCK_OVERWRITE_MASK
);
407 r
= amdgpu_read_bios(adev
);
410 WREG32(mmBUS_CNTL
, bus_cntl
);
411 if (adev
->mode_info
.num_crtc
) {
412 WREG32(mmD1VGA_CONTROL
, d1vga_control
);
413 WREG32(mmD2VGA_CONTROL
, d2vga_control
);
414 WREG32(mmVGA_RENDER_CONTROL
, vga_render_control
);
416 WREG32_SMC(ixROM_CNTL
, rom_cntl
);
420 static bool vi_read_bios_from_rom(struct amdgpu_device
*adev
,
421 u8
*bios
, u32 length_bytes
)
429 if (length_bytes
== 0)
431 /* APU vbios image is part of sbios image */
432 if (adev
->flags
& AMD_IS_APU
)
435 dw_ptr
= (u32
*)bios
;
436 length_dw
= ALIGN(length_bytes
, 4) / 4;
437 /* take the smc lock since we are using the smc index */
438 spin_lock_irqsave(&adev
->smc_idx_lock
, flags
);
439 /* set rom index to 0 */
440 WREG32(mmSMC_IND_INDEX_11
, ixROM_INDEX
);
441 WREG32(mmSMC_IND_DATA_11
, 0);
442 /* set index to data for continous read */
443 WREG32(mmSMC_IND_INDEX_11
, ixROM_DATA
);
444 for (i
= 0; i
< length_dw
; i
++)
445 dw_ptr
[i
] = RREG32(mmSMC_IND_DATA_11
);
446 spin_unlock_irqrestore(&adev
->smc_idx_lock
, flags
);
451 static void vi_detect_hw_virtualization(struct amdgpu_device
*adev
)
455 if (adev
->asic_type
== CHIP_TONGA
||
456 adev
->asic_type
== CHIP_FIJI
) {
457 reg
= RREG32(mmBIF_IOV_FUNC_IDENTIFIER
);
458 /* bit0: 0 means pf and 1 means vf */
459 if (REG_GET_FIELD(reg
, BIF_IOV_FUNC_IDENTIFIER
, FUNC_IDENTIFIER
))
460 adev
->virt
.caps
|= AMDGPU_SRIOV_CAPS_IS_VF
;
461 /* bit31: 0 means disable IOV and 1 means enable */
462 if (REG_GET_FIELD(reg
, BIF_IOV_FUNC_IDENTIFIER
, IOV_ENABLE
))
463 adev
->virt
.caps
|= AMDGPU_SRIOV_CAPS_ENABLE_IOV
;
467 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
468 adev
->virt
.caps
|= AMDGPU_PASSTHROUGH_MODE
;
472 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers
[] = {
482 {mmSDMA0_STATUS_REG
+ SDMA0_REGISTER_OFFSET
},
483 {mmSDMA0_STATUS_REG
+ SDMA1_REGISTER_OFFSET
},
485 {mmCP_STALLED_STAT1
},
486 {mmCP_STALLED_STAT2
},
487 {mmCP_STALLED_STAT3
},
488 {mmCP_CPF_BUSY_STAT
},
489 {mmCP_CPF_STALLED_STAT1
},
491 {mmCP_CPC_BUSY_STAT
},
492 {mmCP_CPC_STALLED_STAT1
},
528 {mmGB_MACROTILE_MODE0
},
529 {mmGB_MACROTILE_MODE1
},
530 {mmGB_MACROTILE_MODE2
},
531 {mmGB_MACROTILE_MODE3
},
532 {mmGB_MACROTILE_MODE4
},
533 {mmGB_MACROTILE_MODE5
},
534 {mmGB_MACROTILE_MODE6
},
535 {mmGB_MACROTILE_MODE7
},
536 {mmGB_MACROTILE_MODE8
},
537 {mmGB_MACROTILE_MODE9
},
538 {mmGB_MACROTILE_MODE10
},
539 {mmGB_MACROTILE_MODE11
},
540 {mmGB_MACROTILE_MODE12
},
541 {mmGB_MACROTILE_MODE13
},
542 {mmGB_MACROTILE_MODE14
},
543 {mmGB_MACROTILE_MODE15
},
544 {mmCC_RB_BACKEND_DISABLE
, true},
545 {mmGC_USER_RB_BACKEND_DISABLE
, true},
546 {mmGB_BACKEND_MAP
, false},
547 {mmPA_SC_RASTER_CONFIG
, true},
548 {mmPA_SC_RASTER_CONFIG_1
, true},
551 static uint32_t vi_get_register_value(struct amdgpu_device
*adev
,
552 bool indexed
, u32 se_num
,
553 u32 sh_num
, u32 reg_offset
)
557 unsigned se_idx
= (se_num
== 0xffffffff) ? 0 : se_num
;
558 unsigned sh_idx
= (sh_num
== 0xffffffff) ? 0 : sh_num
;
560 switch (reg_offset
) {
561 case mmCC_RB_BACKEND_DISABLE
:
562 return adev
->gfx
.config
.rb_config
[se_idx
][sh_idx
].rb_backend_disable
;
563 case mmGC_USER_RB_BACKEND_DISABLE
:
564 return adev
->gfx
.config
.rb_config
[se_idx
][sh_idx
].user_rb_backend_disable
;
565 case mmPA_SC_RASTER_CONFIG
:
566 return adev
->gfx
.config
.rb_config
[se_idx
][sh_idx
].raster_config
;
567 case mmPA_SC_RASTER_CONFIG_1
:
568 return adev
->gfx
.config
.rb_config
[se_idx
][sh_idx
].raster_config_1
;
571 mutex_lock(&adev
->grbm_idx_mutex
);
572 if (se_num
!= 0xffffffff || sh_num
!= 0xffffffff)
573 amdgpu_gfx_select_se_sh(adev
, se_num
, sh_num
, 0xffffffff);
575 val
= RREG32(reg_offset
);
577 if (se_num
!= 0xffffffff || sh_num
!= 0xffffffff)
578 amdgpu_gfx_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
579 mutex_unlock(&adev
->grbm_idx_mutex
);
584 switch (reg_offset
) {
585 case mmGB_ADDR_CONFIG
:
586 return adev
->gfx
.config
.gb_addr_config
;
587 case mmMC_ARB_RAMCFG
:
588 return adev
->gfx
.config
.mc_arb_ramcfg
;
589 case mmGB_TILE_MODE0
:
590 case mmGB_TILE_MODE1
:
591 case mmGB_TILE_MODE2
:
592 case mmGB_TILE_MODE3
:
593 case mmGB_TILE_MODE4
:
594 case mmGB_TILE_MODE5
:
595 case mmGB_TILE_MODE6
:
596 case mmGB_TILE_MODE7
:
597 case mmGB_TILE_MODE8
:
598 case mmGB_TILE_MODE9
:
599 case mmGB_TILE_MODE10
:
600 case mmGB_TILE_MODE11
:
601 case mmGB_TILE_MODE12
:
602 case mmGB_TILE_MODE13
:
603 case mmGB_TILE_MODE14
:
604 case mmGB_TILE_MODE15
:
605 case mmGB_TILE_MODE16
:
606 case mmGB_TILE_MODE17
:
607 case mmGB_TILE_MODE18
:
608 case mmGB_TILE_MODE19
:
609 case mmGB_TILE_MODE20
:
610 case mmGB_TILE_MODE21
:
611 case mmGB_TILE_MODE22
:
612 case mmGB_TILE_MODE23
:
613 case mmGB_TILE_MODE24
:
614 case mmGB_TILE_MODE25
:
615 case mmGB_TILE_MODE26
:
616 case mmGB_TILE_MODE27
:
617 case mmGB_TILE_MODE28
:
618 case mmGB_TILE_MODE29
:
619 case mmGB_TILE_MODE30
:
620 case mmGB_TILE_MODE31
:
621 idx
= (reg_offset
- mmGB_TILE_MODE0
);
622 return adev
->gfx
.config
.tile_mode_array
[idx
];
623 case mmGB_MACROTILE_MODE0
:
624 case mmGB_MACROTILE_MODE1
:
625 case mmGB_MACROTILE_MODE2
:
626 case mmGB_MACROTILE_MODE3
:
627 case mmGB_MACROTILE_MODE4
:
628 case mmGB_MACROTILE_MODE5
:
629 case mmGB_MACROTILE_MODE6
:
630 case mmGB_MACROTILE_MODE7
:
631 case mmGB_MACROTILE_MODE8
:
632 case mmGB_MACROTILE_MODE9
:
633 case mmGB_MACROTILE_MODE10
:
634 case mmGB_MACROTILE_MODE11
:
635 case mmGB_MACROTILE_MODE12
:
636 case mmGB_MACROTILE_MODE13
:
637 case mmGB_MACROTILE_MODE14
:
638 case mmGB_MACROTILE_MODE15
:
639 idx
= (reg_offset
- mmGB_MACROTILE_MODE0
);
640 return adev
->gfx
.config
.macrotile_mode_array
[idx
];
642 return RREG32(reg_offset
);
647 static int vi_read_register(struct amdgpu_device
*adev
, u32 se_num
,
648 u32 sh_num
, u32 reg_offset
, u32
*value
)
653 for (i
= 0; i
< ARRAY_SIZE(vi_allowed_read_registers
); i
++) {
654 bool indexed
= vi_allowed_read_registers
[i
].grbm_indexed
;
656 if (reg_offset
!= vi_allowed_read_registers
[i
].reg_offset
)
659 *value
= vi_get_register_value(adev
, indexed
, se_num
, sh_num
,
666 static int vi_gpu_pci_config_reset(struct amdgpu_device
*adev
)
670 dev_info(adev
->dev
, "GPU pci config reset\n");
673 pci_clear_master(adev
->pdev
);
675 amdgpu_device_pci_config_reset(adev
);
679 /* wait for asic to come out of reset */
680 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
681 if (RREG32(mmCONFIG_MEMSIZE
) != 0xffffffff) {
683 pci_set_master(adev
->pdev
);
684 adev
->has_hw_reset
= true;
693 * vi_asic_pci_config_reset - soft reset GPU
695 * @adev: amdgpu_device pointer
697 * Use PCI Config method to reset the GPU.
699 * Returns 0 for success.
701 static int vi_asic_pci_config_reset(struct amdgpu_device
*adev
)
705 amdgpu_atombios_scratch_regs_engine_hung(adev
, true);
707 r
= vi_gpu_pci_config_reset(adev
);
709 amdgpu_atombios_scratch_regs_engine_hung(adev
, false);
714 static bool vi_asic_supports_baco(struct amdgpu_device
*adev
)
716 switch (adev
->asic_type
) {
723 return amdgpu_dpm_is_baco_supported(adev
);
729 static enum amd_reset_method
730 vi_asic_reset_method(struct amdgpu_device
*adev
)
734 switch (adev
->asic_type
) {
741 baco_reset
= amdgpu_dpm_is_baco_supported(adev
);
749 return AMD_RESET_METHOD_BACO
;
751 return AMD_RESET_METHOD_LEGACY
;
755 * vi_asic_reset - soft reset GPU
757 * @adev: amdgpu_device pointer
759 * Look up which blocks are hung and attempt
761 * Returns 0 for success.
763 static int vi_asic_reset(struct amdgpu_device
*adev
)
767 if (vi_asic_reset_method(adev
) == AMD_RESET_METHOD_BACO
) {
768 if (!adev
->in_suspend
)
769 amdgpu_inc_vram_lost(adev
);
770 r
= amdgpu_dpm_baco_reset(adev
);
772 r
= vi_asic_pci_config_reset(adev
);
778 static u32
vi_get_config_memsize(struct amdgpu_device
*adev
)
780 return RREG32(mmCONFIG_MEMSIZE
);
783 static int vi_set_uvd_clock(struct amdgpu_device
*adev
, u32 clock
,
784 u32 cntl_reg
, u32 status_reg
)
787 struct atom_clock_dividers dividers
;
790 r
= amdgpu_atombios_get_clock_dividers(adev
,
791 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
792 clock
, false, ÷rs
);
796 tmp
= RREG32_SMC(cntl_reg
);
798 if (adev
->flags
& AMD_IS_APU
)
799 tmp
&= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK
;
801 tmp
&= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK
|
802 CG_DCLK_CNTL__DCLK_DIVIDER_MASK
);
803 tmp
|= dividers
.post_divider
;
804 WREG32_SMC(cntl_reg
, tmp
);
806 for (i
= 0; i
< 100; i
++) {
807 tmp
= RREG32_SMC(status_reg
);
808 if (adev
->flags
& AMD_IS_APU
) {
812 if (tmp
& CG_DCLK_STATUS__DCLK_STATUS_MASK
)
822 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0
823 #define ixGNB_CLK1_STATUS 0xD822010C
824 #define ixGNB_CLK2_DFS_CNTL 0xD8220110
825 #define ixGNB_CLK2_STATUS 0xD822012C
826 #define ixGNB_CLK3_DFS_CNTL 0xD8220130
827 #define ixGNB_CLK3_STATUS 0xD822014C
829 static int vi_set_uvd_clocks(struct amdgpu_device
*adev
, u32 vclk
, u32 dclk
)
833 if (adev
->flags
& AMD_IS_APU
) {
834 r
= vi_set_uvd_clock(adev
, vclk
, ixGNB_CLK2_DFS_CNTL
, ixGNB_CLK2_STATUS
);
838 r
= vi_set_uvd_clock(adev
, dclk
, ixGNB_CLK1_DFS_CNTL
, ixGNB_CLK1_STATUS
);
842 r
= vi_set_uvd_clock(adev
, vclk
, ixCG_VCLK_CNTL
, ixCG_VCLK_STATUS
);
846 r
= vi_set_uvd_clock(adev
, dclk
, ixCG_DCLK_CNTL
, ixCG_DCLK_STATUS
);
854 static int vi_set_vce_clocks(struct amdgpu_device
*adev
, u32 evclk
, u32 ecclk
)
857 struct atom_clock_dividers dividers
;
864 if (adev
->flags
& AMD_IS_APU
) {
865 reg_ctrl
= ixGNB_CLK3_DFS_CNTL
;
866 reg_status
= ixGNB_CLK3_STATUS
;
867 status_mask
= 0x00010000;
868 reg_mask
= CG_ECLK_CNTL__ECLK_DIVIDER_MASK
;
870 reg_ctrl
= ixCG_ECLK_CNTL
;
871 reg_status
= ixCG_ECLK_STATUS
;
872 status_mask
= CG_ECLK_STATUS__ECLK_STATUS_MASK
;
873 reg_mask
= CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK
| CG_ECLK_CNTL__ECLK_DIVIDER_MASK
;
876 r
= amdgpu_atombios_get_clock_dividers(adev
,
877 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
878 ecclk
, false, ÷rs
);
882 for (i
= 0; i
< 100; i
++) {
883 if (RREG32_SMC(reg_status
) & status_mask
)
891 tmp
= RREG32_SMC(reg_ctrl
);
893 tmp
|= dividers
.post_divider
;
894 WREG32_SMC(reg_ctrl
, tmp
);
896 for (i
= 0; i
< 100; i
++) {
897 if (RREG32_SMC(reg_status
) & status_mask
)
908 static void vi_pcie_gen3_enable(struct amdgpu_device
*adev
)
910 if (pci_is_root_bus(adev
->pdev
->bus
))
913 if (amdgpu_pcie_gen2
== 0)
916 if (adev
->flags
& AMD_IS_APU
)
919 if (!(adev
->pm
.pcie_gen_mask
& (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2
|
920 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3
)))
926 static void vi_program_aspm(struct amdgpu_device
*adev
)
929 if (amdgpu_aspm
== 0)
935 static void vi_enable_doorbell_aperture(struct amdgpu_device
*adev
,
940 /* not necessary on CZ */
941 if (adev
->flags
& AMD_IS_APU
)
944 tmp
= RREG32(mmBIF_DOORBELL_APER_EN
);
946 tmp
= REG_SET_FIELD(tmp
, BIF_DOORBELL_APER_EN
, BIF_DOORBELL_APER_EN
, 1);
948 tmp
= REG_SET_FIELD(tmp
, BIF_DOORBELL_APER_EN
, BIF_DOORBELL_APER_EN
, 0);
950 WREG32(mmBIF_DOORBELL_APER_EN
, tmp
);
953 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
954 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9
955 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
957 static uint32_t vi_get_rev_id(struct amdgpu_device
*adev
)
959 if (adev
->flags
& AMD_IS_APU
)
960 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS
) & ATI_REV_ID_FUSE_MACRO__MASK
)
961 >> ATI_REV_ID_FUSE_MACRO__SHIFT
;
963 return (RREG32(mmPCIE_EFUSE4
) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK
)
964 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT
;
967 static void vi_flush_hdp(struct amdgpu_device
*adev
, struct amdgpu_ring
*ring
)
969 if (!ring
|| !ring
->funcs
->emit_wreg
) {
970 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL
, 1);
971 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL
);
973 amdgpu_ring_emit_wreg(ring
, mmHDP_MEM_COHERENCY_FLUSH_CNTL
, 1);
977 static void vi_invalidate_hdp(struct amdgpu_device
*adev
,
978 struct amdgpu_ring
*ring
)
980 if (!ring
|| !ring
->funcs
->emit_wreg
) {
981 WREG32(mmHDP_DEBUG0
, 1);
982 RREG32(mmHDP_DEBUG0
);
984 amdgpu_ring_emit_wreg(ring
, mmHDP_DEBUG0
, 1);
988 static bool vi_need_full_reset(struct amdgpu_device
*adev
)
990 switch (adev
->asic_type
) {
993 /* CZ has hang issues with full reset at the moment */
997 /* XXX: soft reset should work on fiji and tonga */
1000 case CHIP_POLARIS11
:
1001 case CHIP_POLARIS12
:
1004 /* change this when we support soft reset */
1009 static void vi_get_pcie_usage(struct amdgpu_device
*adev
, uint64_t *count0
,
1012 uint32_t perfctr
= 0;
1013 uint64_t cnt0_of
, cnt1_of
;
1016 /* This reports 0 on APUs, so return to avoid writing/reading registers
1017 * that may or may not be different from their GPU counterparts
1019 if (adev
->flags
& AMD_IS_APU
)
1022 /* Set the 2 events that we wish to watch, defined above */
1023 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
1024 perfctr
= REG_SET_FIELD(perfctr
, PCIE_PERF_CNTL_TXCLK
, EVENT0_SEL
, 40);
1025 perfctr
= REG_SET_FIELD(perfctr
, PCIE_PERF_CNTL_TXCLK
, EVENT1_SEL
, 104);
1027 /* Write to enable desired perf counters */
1028 WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK
, perfctr
);
1029 /* Zero out and enable the perf counters
1031 * Bit 0 = Start all counters(1)
1032 * Bit 2 = Global counter reset enable(1)
1034 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL
, 0x00000005);
1038 /* Load the shadow and disable the perf counters
1040 * Bit 0 = Stop counters(0)
1041 * Bit 1 = Load the shadow counters(1)
1043 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL
, 0x00000002);
1045 /* Read register values to get any >32bit overflow */
1046 tmp
= RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK
);
1047 cnt0_of
= REG_GET_FIELD(tmp
, PCIE_PERF_CNTL_TXCLK
, COUNTER0_UPPER
);
1048 cnt1_of
= REG_GET_FIELD(tmp
, PCIE_PERF_CNTL_TXCLK
, COUNTER1_UPPER
);
1050 /* Get the values and add the overflow */
1051 *count0
= RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK
) | (cnt0_of
<< 32);
1052 *count1
= RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK
) | (cnt1_of
<< 32);
1055 static uint64_t vi_get_pcie_replay_count(struct amdgpu_device
*adev
)
1057 uint64_t nak_r
, nak_g
;
1059 /* Get the number of NAKs received and generated */
1060 nak_r
= RREG32_PCIE(ixPCIE_RX_NUM_NAK
);
1061 nak_g
= RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED
);
1063 /* Add the total number of NAKs, i.e the number of replays */
1064 return (nak_r
+ nak_g
);
1067 static bool vi_need_reset_on_init(struct amdgpu_device
*adev
)
1071 if (adev
->flags
& AMD_IS_APU
)
1074 /* check if the SMC is already running */
1075 clock_cntl
= RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0
);
1076 pc
= RREG32_SMC(ixSMC_PC_C
);
1077 if ((0 == REG_GET_FIELD(clock_cntl
, SMC_SYSCON_CLOCK_CNTL_0
, ck_disable
)) &&
1084 static const struct amdgpu_asic_funcs vi_asic_funcs
=
1086 .read_disabled_bios
= &vi_read_disabled_bios
,
1087 .read_bios_from_rom
= &vi_read_bios_from_rom
,
1088 .read_register
= &vi_read_register
,
1089 .reset
= &vi_asic_reset
,
1090 .reset_method
= &vi_asic_reset_method
,
1091 .set_vga_state
= &vi_vga_set_state
,
1092 .get_xclk
= &vi_get_xclk
,
1093 .set_uvd_clocks
= &vi_set_uvd_clocks
,
1094 .set_vce_clocks
= &vi_set_vce_clocks
,
1095 .get_config_memsize
= &vi_get_config_memsize
,
1096 .flush_hdp
= &vi_flush_hdp
,
1097 .invalidate_hdp
= &vi_invalidate_hdp
,
1098 .need_full_reset
= &vi_need_full_reset
,
1099 .init_doorbell_index
= &legacy_doorbell_index_init
,
1100 .get_pcie_usage
= &vi_get_pcie_usage
,
1101 .need_reset_on_init
= &vi_need_reset_on_init
,
1102 .get_pcie_replay_count
= &vi_get_pcie_replay_count
,
1103 .supports_baco
= &vi_asic_supports_baco
,
1106 #define CZ_REV_BRISTOL(rev) \
1107 ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
1109 static int vi_common_early_init(void *handle
)
1111 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1113 if (adev
->flags
& AMD_IS_APU
) {
1114 adev
->smc_rreg
= &cz_smc_rreg
;
1115 adev
->smc_wreg
= &cz_smc_wreg
;
1117 adev
->smc_rreg
= &vi_smc_rreg
;
1118 adev
->smc_wreg
= &vi_smc_wreg
;
1120 adev
->pcie_rreg
= &vi_pcie_rreg
;
1121 adev
->pcie_wreg
= &vi_pcie_wreg
;
1122 adev
->uvd_ctx_rreg
= &vi_uvd_ctx_rreg
;
1123 adev
->uvd_ctx_wreg
= &vi_uvd_ctx_wreg
;
1124 adev
->didt_rreg
= &vi_didt_rreg
;
1125 adev
->didt_wreg
= &vi_didt_wreg
;
1126 adev
->gc_cac_rreg
= &vi_gc_cac_rreg
;
1127 adev
->gc_cac_wreg
= &vi_gc_cac_wreg
;
1129 adev
->asic_funcs
= &vi_asic_funcs
;
1131 adev
->rev_id
= vi_get_rev_id(adev
);
1132 adev
->external_rev_id
= 0xFF;
1133 switch (adev
->asic_type
) {
1137 adev
->external_rev_id
= 0x1;
1140 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1141 AMD_CG_SUPPORT_GFX_MGLS
|
1142 AMD_CG_SUPPORT_GFX_RLC_LS
|
1143 AMD_CG_SUPPORT_GFX_CP_LS
|
1144 AMD_CG_SUPPORT_GFX_CGTS
|
1145 AMD_CG_SUPPORT_GFX_CGTS_LS
|
1146 AMD_CG_SUPPORT_GFX_CGCG
|
1147 AMD_CG_SUPPORT_GFX_CGLS
|
1148 AMD_CG_SUPPORT_SDMA_MGCG
|
1149 AMD_CG_SUPPORT_SDMA_LS
|
1150 AMD_CG_SUPPORT_BIF_LS
|
1151 AMD_CG_SUPPORT_HDP_MGCG
|
1152 AMD_CG_SUPPORT_HDP_LS
|
1153 AMD_CG_SUPPORT_ROM_MGCG
|
1154 AMD_CG_SUPPORT_MC_MGCG
|
1155 AMD_CG_SUPPORT_MC_LS
|
1156 AMD_CG_SUPPORT_UVD_MGCG
;
1158 adev
->external_rev_id
= adev
->rev_id
+ 0x3c;
1161 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1162 AMD_CG_SUPPORT_GFX_CGCG
|
1163 AMD_CG_SUPPORT_GFX_CGLS
|
1164 AMD_CG_SUPPORT_SDMA_MGCG
|
1165 AMD_CG_SUPPORT_SDMA_LS
|
1166 AMD_CG_SUPPORT_BIF_LS
|
1167 AMD_CG_SUPPORT_HDP_MGCG
|
1168 AMD_CG_SUPPORT_HDP_LS
|
1169 AMD_CG_SUPPORT_ROM_MGCG
|
1170 AMD_CG_SUPPORT_MC_MGCG
|
1171 AMD_CG_SUPPORT_MC_LS
|
1172 AMD_CG_SUPPORT_DRM_LS
|
1173 AMD_CG_SUPPORT_UVD_MGCG
;
1175 adev
->external_rev_id
= adev
->rev_id
+ 0x14;
1177 case CHIP_POLARIS11
:
1178 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1179 AMD_CG_SUPPORT_GFX_RLC_LS
|
1180 AMD_CG_SUPPORT_GFX_CP_LS
|
1181 AMD_CG_SUPPORT_GFX_CGCG
|
1182 AMD_CG_SUPPORT_GFX_CGLS
|
1183 AMD_CG_SUPPORT_GFX_3D_CGCG
|
1184 AMD_CG_SUPPORT_GFX_3D_CGLS
|
1185 AMD_CG_SUPPORT_SDMA_MGCG
|
1186 AMD_CG_SUPPORT_SDMA_LS
|
1187 AMD_CG_SUPPORT_BIF_MGCG
|
1188 AMD_CG_SUPPORT_BIF_LS
|
1189 AMD_CG_SUPPORT_HDP_MGCG
|
1190 AMD_CG_SUPPORT_HDP_LS
|
1191 AMD_CG_SUPPORT_ROM_MGCG
|
1192 AMD_CG_SUPPORT_MC_MGCG
|
1193 AMD_CG_SUPPORT_MC_LS
|
1194 AMD_CG_SUPPORT_DRM_LS
|
1195 AMD_CG_SUPPORT_UVD_MGCG
|
1196 AMD_CG_SUPPORT_VCE_MGCG
;
1198 adev
->external_rev_id
= adev
->rev_id
+ 0x5A;
1200 case CHIP_POLARIS10
:
1201 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1202 AMD_CG_SUPPORT_GFX_RLC_LS
|
1203 AMD_CG_SUPPORT_GFX_CP_LS
|
1204 AMD_CG_SUPPORT_GFX_CGCG
|
1205 AMD_CG_SUPPORT_GFX_CGLS
|
1206 AMD_CG_SUPPORT_GFX_3D_CGCG
|
1207 AMD_CG_SUPPORT_GFX_3D_CGLS
|
1208 AMD_CG_SUPPORT_SDMA_MGCG
|
1209 AMD_CG_SUPPORT_SDMA_LS
|
1210 AMD_CG_SUPPORT_BIF_MGCG
|
1211 AMD_CG_SUPPORT_BIF_LS
|
1212 AMD_CG_SUPPORT_HDP_MGCG
|
1213 AMD_CG_SUPPORT_HDP_LS
|
1214 AMD_CG_SUPPORT_ROM_MGCG
|
1215 AMD_CG_SUPPORT_MC_MGCG
|
1216 AMD_CG_SUPPORT_MC_LS
|
1217 AMD_CG_SUPPORT_DRM_LS
|
1218 AMD_CG_SUPPORT_UVD_MGCG
|
1219 AMD_CG_SUPPORT_VCE_MGCG
;
1221 adev
->external_rev_id
= adev
->rev_id
+ 0x50;
1223 case CHIP_POLARIS12
:
1224 adev
->cg_flags
= AMD_CG_SUPPORT_GFX_MGCG
|
1225 AMD_CG_SUPPORT_GFX_RLC_LS
|
1226 AMD_CG_SUPPORT_GFX_CP_LS
|
1227 AMD_CG_SUPPORT_GFX_CGCG
|
1228 AMD_CG_SUPPORT_GFX_CGLS
|
1229 AMD_CG_SUPPORT_GFX_3D_CGCG
|
1230 AMD_CG_SUPPORT_GFX_3D_CGLS
|
1231 AMD_CG_SUPPORT_SDMA_MGCG
|
1232 AMD_CG_SUPPORT_SDMA_LS
|
1233 AMD_CG_SUPPORT_BIF_MGCG
|
1234 AMD_CG_SUPPORT_BIF_LS
|
1235 AMD_CG_SUPPORT_HDP_MGCG
|
1236 AMD_CG_SUPPORT_HDP_LS
|
1237 AMD_CG_SUPPORT_ROM_MGCG
|
1238 AMD_CG_SUPPORT_MC_MGCG
|
1239 AMD_CG_SUPPORT_MC_LS
|
1240 AMD_CG_SUPPORT_DRM_LS
|
1241 AMD_CG_SUPPORT_UVD_MGCG
|
1242 AMD_CG_SUPPORT_VCE_MGCG
;
1244 adev
->external_rev_id
= adev
->rev_id
+ 0x64;
1248 /*AMD_CG_SUPPORT_GFX_MGCG |
1249 AMD_CG_SUPPORT_GFX_RLC_LS |
1250 AMD_CG_SUPPORT_GFX_CP_LS |
1251 AMD_CG_SUPPORT_GFX_CGCG |
1252 AMD_CG_SUPPORT_GFX_CGLS |
1253 AMD_CG_SUPPORT_GFX_3D_CGCG |
1254 AMD_CG_SUPPORT_GFX_3D_CGLS |
1255 AMD_CG_SUPPORT_SDMA_MGCG |
1256 AMD_CG_SUPPORT_SDMA_LS |
1257 AMD_CG_SUPPORT_BIF_MGCG |
1258 AMD_CG_SUPPORT_BIF_LS |
1259 AMD_CG_SUPPORT_HDP_MGCG |
1260 AMD_CG_SUPPORT_HDP_LS |
1261 AMD_CG_SUPPORT_ROM_MGCG |
1262 AMD_CG_SUPPORT_MC_MGCG |
1263 AMD_CG_SUPPORT_MC_LS |
1264 AMD_CG_SUPPORT_DRM_LS |
1265 AMD_CG_SUPPORT_UVD_MGCG |
1266 AMD_CG_SUPPORT_VCE_MGCG;*/
1268 adev
->external_rev_id
= adev
->rev_id
+ 0x6E;
1271 adev
->cg_flags
= AMD_CG_SUPPORT_UVD_MGCG
|
1272 AMD_CG_SUPPORT_GFX_MGCG
|
1273 AMD_CG_SUPPORT_GFX_MGLS
|
1274 AMD_CG_SUPPORT_GFX_RLC_LS
|
1275 AMD_CG_SUPPORT_GFX_CP_LS
|
1276 AMD_CG_SUPPORT_GFX_CGTS
|
1277 AMD_CG_SUPPORT_GFX_CGTS_LS
|
1278 AMD_CG_SUPPORT_GFX_CGCG
|
1279 AMD_CG_SUPPORT_GFX_CGLS
|
1280 AMD_CG_SUPPORT_BIF_LS
|
1281 AMD_CG_SUPPORT_HDP_MGCG
|
1282 AMD_CG_SUPPORT_HDP_LS
|
1283 AMD_CG_SUPPORT_SDMA_MGCG
|
1284 AMD_CG_SUPPORT_SDMA_LS
|
1285 AMD_CG_SUPPORT_VCE_MGCG
;
1286 /* rev0 hardware requires workarounds to support PG */
1288 if (adev
->rev_id
!= 0x00 || CZ_REV_BRISTOL(adev
->pdev
->revision
)) {
1289 adev
->pg_flags
|= AMD_PG_SUPPORT_GFX_SMG
|
1290 AMD_PG_SUPPORT_GFX_PIPELINE
|
1292 AMD_PG_SUPPORT_UVD
|
1295 adev
->external_rev_id
= adev
->rev_id
+ 0x1;
1298 adev
->cg_flags
= AMD_CG_SUPPORT_UVD_MGCG
|
1299 AMD_CG_SUPPORT_GFX_MGCG
|
1300 AMD_CG_SUPPORT_GFX_MGLS
|
1301 AMD_CG_SUPPORT_GFX_RLC_LS
|
1302 AMD_CG_SUPPORT_GFX_CP_LS
|
1303 AMD_CG_SUPPORT_GFX_CGTS
|
1304 AMD_CG_SUPPORT_GFX_CGTS_LS
|
1305 AMD_CG_SUPPORT_GFX_CGLS
|
1306 AMD_CG_SUPPORT_BIF_LS
|
1307 AMD_CG_SUPPORT_HDP_MGCG
|
1308 AMD_CG_SUPPORT_HDP_LS
|
1309 AMD_CG_SUPPORT_SDMA_MGCG
|
1310 AMD_CG_SUPPORT_SDMA_LS
|
1311 AMD_CG_SUPPORT_VCE_MGCG
;
1312 adev
->pg_flags
= AMD_PG_SUPPORT_GFX_PG
|
1313 AMD_PG_SUPPORT_GFX_SMG
|
1314 AMD_PG_SUPPORT_GFX_PIPELINE
|
1316 AMD_PG_SUPPORT_UVD
|
1318 adev
->external_rev_id
= adev
->rev_id
+ 0x61;
1321 /* FIXME: not supported yet */
1325 if (amdgpu_sriov_vf(adev
)) {
1326 amdgpu_virt_init_setting(adev
);
1327 xgpu_vi_mailbox_set_irq_funcs(adev
);
1333 static int vi_common_late_init(void *handle
)
1335 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1337 if (amdgpu_sriov_vf(adev
))
1338 xgpu_vi_mailbox_get_irq(adev
);
1343 static int vi_common_sw_init(void *handle
)
1345 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1347 if (amdgpu_sriov_vf(adev
))
1348 xgpu_vi_mailbox_add_irq_id(adev
);
1353 static int vi_common_sw_fini(void *handle
)
1358 static int vi_common_hw_init(void *handle
)
1360 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1362 /* move the golden regs per IP block */
1363 vi_init_golden_registers(adev
);
1364 /* enable pcie gen2/3 link */
1365 vi_pcie_gen3_enable(adev
);
1367 vi_program_aspm(adev
);
1368 /* enable the doorbell aperture */
1369 vi_enable_doorbell_aperture(adev
, true);
1374 static int vi_common_hw_fini(void *handle
)
1376 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1378 /* enable the doorbell aperture */
1379 vi_enable_doorbell_aperture(adev
, false);
1381 if (amdgpu_sriov_vf(adev
))
1382 xgpu_vi_mailbox_put_irq(adev
);
1387 static int vi_common_suspend(void *handle
)
1389 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1391 return vi_common_hw_fini(adev
);
1394 static int vi_common_resume(void *handle
)
1396 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1398 return vi_common_hw_init(adev
);
1401 static bool vi_common_is_idle(void *handle
)
1406 static int vi_common_wait_for_idle(void *handle
)
1411 static int vi_common_soft_reset(void *handle
)
1416 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device
*adev
,
1419 uint32_t temp
, data
;
1421 temp
= data
= RREG32_PCIE(ixPCIE_CNTL2
);
1423 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_BIF_LS
))
1424 data
|= PCIE_CNTL2__SLV_MEM_LS_EN_MASK
|
1425 PCIE_CNTL2__MST_MEM_LS_EN_MASK
|
1426 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK
;
1428 data
&= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK
|
1429 PCIE_CNTL2__MST_MEM_LS_EN_MASK
|
1430 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK
);
1433 WREG32_PCIE(ixPCIE_CNTL2
, data
);
1436 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device
*adev
,
1439 uint32_t temp
, data
;
1441 temp
= data
= RREG32(mmHDP_HOST_PATH_CNTL
);
1443 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_HDP_MGCG
))
1444 data
&= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK
;
1446 data
|= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK
;
1449 WREG32(mmHDP_HOST_PATH_CNTL
, data
);
1452 static void vi_update_hdp_light_sleep(struct amdgpu_device
*adev
,
1455 uint32_t temp
, data
;
1457 temp
= data
= RREG32(mmHDP_MEM_POWER_LS
);
1459 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_HDP_LS
))
1460 data
|= HDP_MEM_POWER_LS__LS_ENABLE_MASK
;
1462 data
&= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK
;
1465 WREG32(mmHDP_MEM_POWER_LS
, data
);
1468 static void vi_update_drm_light_sleep(struct amdgpu_device
*adev
,
1471 uint32_t temp
, data
;
1473 temp
= data
= RREG32(0x157a);
1475 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_DRM_LS
))
1481 WREG32(0x157a, data
);
1485 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device
*adev
,
1488 uint32_t temp
, data
;
1490 temp
= data
= RREG32_SMC(ixCGTT_ROM_CLK_CTRL0
);
1492 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_ROM_MGCG
))
1493 data
&= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK
|
1494 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK
);
1496 data
|= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK
|
1497 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK
;
1500 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0
, data
);
1503 static int vi_common_set_clockgating_state_by_smu(void *handle
,
1504 enum amd_clockgating_state state
)
1506 uint32_t msg_id
, pp_state
= 0;
1507 uint32_t pp_support_state
= 0;
1508 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1510 if (adev
->cg_flags
& (AMD_CG_SUPPORT_MC_LS
| AMD_CG_SUPPORT_MC_MGCG
)) {
1511 if (adev
->cg_flags
& AMD_CG_SUPPORT_MC_LS
) {
1512 pp_support_state
= PP_STATE_SUPPORT_LS
;
1513 pp_state
= PP_STATE_LS
;
1515 if (adev
->cg_flags
& AMD_CG_SUPPORT_MC_MGCG
) {
1516 pp_support_state
|= PP_STATE_SUPPORT_CG
;
1517 pp_state
|= PP_STATE_CG
;
1519 if (state
== AMD_CG_STATE_UNGATE
)
1521 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1525 if (adev
->powerplay
.pp_funcs
->set_clockgating_by_smu
)
1526 amdgpu_dpm_set_clockgating_by_smu(adev
, msg_id
);
1529 if (adev
->cg_flags
& (AMD_CG_SUPPORT_SDMA_LS
| AMD_CG_SUPPORT_SDMA_MGCG
)) {
1530 if (adev
->cg_flags
& AMD_CG_SUPPORT_SDMA_LS
) {
1531 pp_support_state
= PP_STATE_SUPPORT_LS
;
1532 pp_state
= PP_STATE_LS
;
1534 if (adev
->cg_flags
& AMD_CG_SUPPORT_SDMA_MGCG
) {
1535 pp_support_state
|= PP_STATE_SUPPORT_CG
;
1536 pp_state
|= PP_STATE_CG
;
1538 if (state
== AMD_CG_STATE_UNGATE
)
1540 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1544 if (adev
->powerplay
.pp_funcs
->set_clockgating_by_smu
)
1545 amdgpu_dpm_set_clockgating_by_smu(adev
, msg_id
);
1548 if (adev
->cg_flags
& (AMD_CG_SUPPORT_HDP_LS
| AMD_CG_SUPPORT_HDP_MGCG
)) {
1549 if (adev
->cg_flags
& AMD_CG_SUPPORT_HDP_LS
) {
1550 pp_support_state
= PP_STATE_SUPPORT_LS
;
1551 pp_state
= PP_STATE_LS
;
1553 if (adev
->cg_flags
& AMD_CG_SUPPORT_HDP_MGCG
) {
1554 pp_support_state
|= PP_STATE_SUPPORT_CG
;
1555 pp_state
|= PP_STATE_CG
;
1557 if (state
== AMD_CG_STATE_UNGATE
)
1559 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1563 if (adev
->powerplay
.pp_funcs
->set_clockgating_by_smu
)
1564 amdgpu_dpm_set_clockgating_by_smu(adev
, msg_id
);
1568 if (adev
->cg_flags
& AMD_CG_SUPPORT_BIF_LS
) {
1569 if (state
== AMD_CG_STATE_UNGATE
)
1572 pp_state
= PP_STATE_LS
;
1574 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1576 PP_STATE_SUPPORT_LS
,
1578 if (adev
->powerplay
.pp_funcs
->set_clockgating_by_smu
)
1579 amdgpu_dpm_set_clockgating_by_smu(adev
, msg_id
);
1581 if (adev
->cg_flags
& AMD_CG_SUPPORT_BIF_MGCG
) {
1582 if (state
== AMD_CG_STATE_UNGATE
)
1585 pp_state
= PP_STATE_CG
;
1587 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1589 PP_STATE_SUPPORT_CG
,
1591 if (adev
->powerplay
.pp_funcs
->set_clockgating_by_smu
)
1592 amdgpu_dpm_set_clockgating_by_smu(adev
, msg_id
);
1595 if (adev
->cg_flags
& AMD_CG_SUPPORT_DRM_LS
) {
1597 if (state
== AMD_CG_STATE_UNGATE
)
1600 pp_state
= PP_STATE_LS
;
1602 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1604 PP_STATE_SUPPORT_LS
,
1606 if (adev
->powerplay
.pp_funcs
->set_clockgating_by_smu
)
1607 amdgpu_dpm_set_clockgating_by_smu(adev
, msg_id
);
1610 if (adev
->cg_flags
& AMD_CG_SUPPORT_ROM_MGCG
) {
1612 if (state
== AMD_CG_STATE_UNGATE
)
1615 pp_state
= PP_STATE_CG
;
1617 msg_id
= PP_CG_MSG_ID(PP_GROUP_SYS
,
1619 PP_STATE_SUPPORT_CG
,
1621 if (adev
->powerplay
.pp_funcs
->set_clockgating_by_smu
)
1622 amdgpu_dpm_set_clockgating_by_smu(adev
, msg_id
);
1627 static int vi_common_set_clockgating_state(void *handle
,
1628 enum amd_clockgating_state state
)
1630 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1632 if (amdgpu_sriov_vf(adev
))
1635 switch (adev
->asic_type
) {
1637 vi_update_bif_medium_grain_light_sleep(adev
,
1638 state
== AMD_CG_STATE_GATE
);
1639 vi_update_hdp_medium_grain_clock_gating(adev
,
1640 state
== AMD_CG_STATE_GATE
);
1641 vi_update_hdp_light_sleep(adev
,
1642 state
== AMD_CG_STATE_GATE
);
1643 vi_update_rom_medium_grain_clock_gating(adev
,
1644 state
== AMD_CG_STATE_GATE
);
1648 vi_update_bif_medium_grain_light_sleep(adev
,
1649 state
== AMD_CG_STATE_GATE
);
1650 vi_update_hdp_medium_grain_clock_gating(adev
,
1651 state
== AMD_CG_STATE_GATE
);
1652 vi_update_hdp_light_sleep(adev
,
1653 state
== AMD_CG_STATE_GATE
);
1654 vi_update_drm_light_sleep(adev
,
1655 state
== AMD_CG_STATE_GATE
);
1658 case CHIP_POLARIS10
:
1659 case CHIP_POLARIS11
:
1660 case CHIP_POLARIS12
:
1662 vi_common_set_clockgating_state_by_smu(adev
, state
);
1669 static int vi_common_set_powergating_state(void *handle
,
1670 enum amd_powergating_state state
)
1675 static void vi_common_get_clockgating_state(void *handle
, u32
*flags
)
1677 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1680 if (amdgpu_sriov_vf(adev
))
1683 /* AMD_CG_SUPPORT_BIF_LS */
1684 data
= RREG32_PCIE(ixPCIE_CNTL2
);
1685 if (data
& PCIE_CNTL2__SLV_MEM_LS_EN_MASK
)
1686 *flags
|= AMD_CG_SUPPORT_BIF_LS
;
1688 /* AMD_CG_SUPPORT_HDP_LS */
1689 data
= RREG32(mmHDP_MEM_POWER_LS
);
1690 if (data
& HDP_MEM_POWER_LS__LS_ENABLE_MASK
)
1691 *flags
|= AMD_CG_SUPPORT_HDP_LS
;
1693 /* AMD_CG_SUPPORT_HDP_MGCG */
1694 data
= RREG32(mmHDP_HOST_PATH_CNTL
);
1695 if (!(data
& HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK
))
1696 *flags
|= AMD_CG_SUPPORT_HDP_MGCG
;
1698 /* AMD_CG_SUPPORT_ROM_MGCG */
1699 data
= RREG32_SMC(ixCGTT_ROM_CLK_CTRL0
);
1700 if (!(data
& CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK
))
1701 *flags
|= AMD_CG_SUPPORT_ROM_MGCG
;
1704 static const struct amd_ip_funcs vi_common_ip_funcs
= {
1705 .name
= "vi_common",
1706 .early_init
= vi_common_early_init
,
1707 .late_init
= vi_common_late_init
,
1708 .sw_init
= vi_common_sw_init
,
1709 .sw_fini
= vi_common_sw_fini
,
1710 .hw_init
= vi_common_hw_init
,
1711 .hw_fini
= vi_common_hw_fini
,
1712 .suspend
= vi_common_suspend
,
1713 .resume
= vi_common_resume
,
1714 .is_idle
= vi_common_is_idle
,
1715 .wait_for_idle
= vi_common_wait_for_idle
,
1716 .soft_reset
= vi_common_soft_reset
,
1717 .set_clockgating_state
= vi_common_set_clockgating_state
,
1718 .set_powergating_state
= vi_common_set_powergating_state
,
1719 .get_clockgating_state
= vi_common_get_clockgating_state
,
1722 static const struct amdgpu_ip_block_version vi_common_ip_block
=
1724 .type
= AMD_IP_BLOCK_TYPE_COMMON
,
1728 .funcs
= &vi_common_ip_funcs
,
1731 int vi_set_ip_blocks(struct amdgpu_device
*adev
)
1733 /* in early init stage, vbios code won't work */
1734 vi_detect_hw_virtualization(adev
);
1736 if (amdgpu_sriov_vf(adev
))
1737 adev
->virt
.ops
= &xgpu_vi_virt_ops
;
1739 switch (adev
->asic_type
) {
1741 /* topaz has no DCE, UVD, VCE */
1742 amdgpu_device_ip_block_add(adev
, &vi_common_ip_block
);
1743 amdgpu_device_ip_block_add(adev
, &gmc_v7_4_ip_block
);
1744 amdgpu_device_ip_block_add(adev
, &iceland_ih_ip_block
);
1745 amdgpu_device_ip_block_add(adev
, &gfx_v8_0_ip_block
);
1746 amdgpu_device_ip_block_add(adev
, &sdma_v2_4_ip_block
);
1747 amdgpu_device_ip_block_add(adev
, &pp_smu_ip_block
);
1748 if (adev
->enable_virtual_display
)
1749 amdgpu_device_ip_block_add(adev
, &dce_virtual_ip_block
);
1752 amdgpu_device_ip_block_add(adev
, &vi_common_ip_block
);
1753 amdgpu_device_ip_block_add(adev
, &gmc_v8_5_ip_block
);
1754 amdgpu_device_ip_block_add(adev
, &tonga_ih_ip_block
);
1755 amdgpu_device_ip_block_add(adev
, &gfx_v8_0_ip_block
);
1756 amdgpu_device_ip_block_add(adev
, &sdma_v3_0_ip_block
);
1757 amdgpu_device_ip_block_add(adev
, &pp_smu_ip_block
);
1758 if (adev
->enable_virtual_display
|| amdgpu_sriov_vf(adev
))
1759 amdgpu_device_ip_block_add(adev
, &dce_virtual_ip_block
);
1760 #if defined(CONFIG_DRM_AMD_DC)
1761 else if (amdgpu_device_has_dc_support(adev
))
1762 amdgpu_device_ip_block_add(adev
, &dm_ip_block
);
1765 amdgpu_device_ip_block_add(adev
, &dce_v10_1_ip_block
);
1766 if (!amdgpu_sriov_vf(adev
)) {
1767 amdgpu_device_ip_block_add(adev
, &uvd_v6_0_ip_block
);
1768 amdgpu_device_ip_block_add(adev
, &vce_v3_0_ip_block
);
1772 amdgpu_device_ip_block_add(adev
, &vi_common_ip_block
);
1773 amdgpu_device_ip_block_add(adev
, &gmc_v8_0_ip_block
);
1774 amdgpu_device_ip_block_add(adev
, &tonga_ih_ip_block
);
1775 amdgpu_device_ip_block_add(adev
, &gfx_v8_0_ip_block
);
1776 amdgpu_device_ip_block_add(adev
, &sdma_v3_0_ip_block
);
1777 amdgpu_device_ip_block_add(adev
, &pp_smu_ip_block
);
1778 if (adev
->enable_virtual_display
|| amdgpu_sriov_vf(adev
))
1779 amdgpu_device_ip_block_add(adev
, &dce_virtual_ip_block
);
1780 #if defined(CONFIG_DRM_AMD_DC)
1781 else if (amdgpu_device_has_dc_support(adev
))
1782 amdgpu_device_ip_block_add(adev
, &dm_ip_block
);
1785 amdgpu_device_ip_block_add(adev
, &dce_v10_0_ip_block
);
1786 if (!amdgpu_sriov_vf(adev
)) {
1787 amdgpu_device_ip_block_add(adev
, &uvd_v5_0_ip_block
);
1788 amdgpu_device_ip_block_add(adev
, &vce_v3_0_ip_block
);
1791 case CHIP_POLARIS10
:
1792 case CHIP_POLARIS11
:
1793 case CHIP_POLARIS12
:
1795 amdgpu_device_ip_block_add(adev
, &vi_common_ip_block
);
1796 amdgpu_device_ip_block_add(adev
, &gmc_v8_1_ip_block
);
1797 amdgpu_device_ip_block_add(adev
, &tonga_ih_ip_block
);
1798 amdgpu_device_ip_block_add(adev
, &gfx_v8_0_ip_block
);
1799 amdgpu_device_ip_block_add(adev
, &sdma_v3_1_ip_block
);
1800 amdgpu_device_ip_block_add(adev
, &pp_smu_ip_block
);
1801 if (adev
->enable_virtual_display
)
1802 amdgpu_device_ip_block_add(adev
, &dce_virtual_ip_block
);
1803 #if defined(CONFIG_DRM_AMD_DC)
1804 else if (amdgpu_device_has_dc_support(adev
))
1805 amdgpu_device_ip_block_add(adev
, &dm_ip_block
);
1808 amdgpu_device_ip_block_add(adev
, &dce_v11_2_ip_block
);
1809 amdgpu_device_ip_block_add(adev
, &uvd_v6_3_ip_block
);
1810 amdgpu_device_ip_block_add(adev
, &vce_v3_4_ip_block
);
1813 amdgpu_device_ip_block_add(adev
, &vi_common_ip_block
);
1814 amdgpu_device_ip_block_add(adev
, &gmc_v8_0_ip_block
);
1815 amdgpu_device_ip_block_add(adev
, &cz_ih_ip_block
);
1816 amdgpu_device_ip_block_add(adev
, &gfx_v8_0_ip_block
);
1817 amdgpu_device_ip_block_add(adev
, &sdma_v3_0_ip_block
);
1818 amdgpu_device_ip_block_add(adev
, &pp_smu_ip_block
);
1819 if (adev
->enable_virtual_display
)
1820 amdgpu_device_ip_block_add(adev
, &dce_virtual_ip_block
);
1821 #if defined(CONFIG_DRM_AMD_DC)
1822 else if (amdgpu_device_has_dc_support(adev
))
1823 amdgpu_device_ip_block_add(adev
, &dm_ip_block
);
1826 amdgpu_device_ip_block_add(adev
, &dce_v11_0_ip_block
);
1827 amdgpu_device_ip_block_add(adev
, &uvd_v6_0_ip_block
);
1828 amdgpu_device_ip_block_add(adev
, &vce_v3_1_ip_block
);
1829 #if defined(CONFIG_DRM_AMD_ACP)
1830 amdgpu_device_ip_block_add(adev
, &acp_ip_block
);
1834 amdgpu_device_ip_block_add(adev
, &vi_common_ip_block
);
1835 amdgpu_device_ip_block_add(adev
, &gmc_v8_0_ip_block
);
1836 amdgpu_device_ip_block_add(adev
, &cz_ih_ip_block
);
1837 amdgpu_device_ip_block_add(adev
, &gfx_v8_1_ip_block
);
1838 amdgpu_device_ip_block_add(adev
, &sdma_v3_0_ip_block
);
1839 amdgpu_device_ip_block_add(adev
, &pp_smu_ip_block
);
1840 if (adev
->enable_virtual_display
)
1841 amdgpu_device_ip_block_add(adev
, &dce_virtual_ip_block
);
1842 #if defined(CONFIG_DRM_AMD_DC)
1843 else if (amdgpu_device_has_dc_support(adev
))
1844 amdgpu_device_ip_block_add(adev
, &dm_ip_block
);
1847 amdgpu_device_ip_block_add(adev
, &dce_v11_0_ip_block
);
1848 amdgpu_device_ip_block_add(adev
, &uvd_v6_2_ip_block
);
1849 amdgpu_device_ip_block_add(adev
, &vce_v3_4_ip_block
);
1850 #if defined(CONFIG_DRM_AMD_ACP)
1851 amdgpu_device_ip_block_add(adev
, &acp_ip_block
);
1855 /* FIXME: not supported yet */
1862 void legacy_doorbell_index_init(struct amdgpu_device
*adev
)
1864 adev
->doorbell_index
.kiq
= AMDGPU_DOORBELL_KIQ
;
1865 adev
->doorbell_index
.mec_ring0
= AMDGPU_DOORBELL_MEC_RING0
;
1866 adev
->doorbell_index
.mec_ring1
= AMDGPU_DOORBELL_MEC_RING1
;
1867 adev
->doorbell_index
.mec_ring2
= AMDGPU_DOORBELL_MEC_RING2
;
1868 adev
->doorbell_index
.mec_ring3
= AMDGPU_DOORBELL_MEC_RING3
;
1869 adev
->doorbell_index
.mec_ring4
= AMDGPU_DOORBELL_MEC_RING4
;
1870 adev
->doorbell_index
.mec_ring5
= AMDGPU_DOORBELL_MEC_RING5
;
1871 adev
->doorbell_index
.mec_ring6
= AMDGPU_DOORBELL_MEC_RING6
;
1872 adev
->doorbell_index
.mec_ring7
= AMDGPU_DOORBELL_MEC_RING7
;
1873 adev
->doorbell_index
.gfx_ring0
= AMDGPU_DOORBELL_GFX_RING0
;
1874 adev
->doorbell_index
.sdma_engine
[0] = AMDGPU_DOORBELL_sDMA_ENGINE0
;
1875 adev
->doorbell_index
.sdma_engine
[1] = AMDGPU_DOORBELL_sDMA_ENGINE1
;
1876 adev
->doorbell_index
.ih
= AMDGPU_DOORBELL_IH
;
1877 adev
->doorbell_index
.max_assignment
= AMDGPU_DOORBELL_MAX_ASSIGNMENT
;