2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "mmhub_v1_0.h"
26 #include "vega10/soc15ip.h"
27 #include "vega10/MMHUB/mmhub_1_0_offset.h"
28 #include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
29 #include "vega10/MMHUB/mmhub_1_0_default.h"
30 #include "vega10/ATHUB/athub_1_0_offset.h"
31 #include "vega10/ATHUB/athub_1_0_sh_mask.h"
32 #include "vega10/ATHUB/athub_1_0_default.h"
33 #include "vega10/vega10_enum.h"
35 #include "soc15_common.h"
37 u64
mmhub_v1_0_get_fb_location(struct amdgpu_device
*adev
)
39 u64 base
= RREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmMC_VM_FB_LOCATION_BASE
));
41 base
&= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK
;
47 int mmhub_v1_0_gart_enable(struct amdgpu_device
*adev
)
55 /* Update configuration */
56 DRM_INFO("%s -- in\n", __func__
);
57 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR
),
58 adev
->mc
.vram_start
>> 18);
59 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR
),
60 adev
->mc
.vram_end
>> 18);
61 value
= adev
->vram_scratch
.gpu_addr
- adev
->mc
.vram_start
+
62 adev
->vm_manager
.vram_base_offset
;
63 WREG32(SOC15_REG_OFFSET(MMHUB
, 0,
64 mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB
),
66 WREG32(SOC15_REG_OFFSET(MMHUB
, 0,
67 mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB
),
70 if (amdgpu_sriov_vf(adev
)) {
71 /* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are VF copy registers so
72 vbios post doesn't program them, for SRIOV driver need to program them */
73 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmMC_VM_FB_LOCATION_BASE
),
74 adev
->mc
.vram_start
>> 24);
75 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmMC_VM_FB_LOCATION_TOP
),
76 adev
->mc
.vram_end
>> 24);
80 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmMC_VM_AGP_BASE
), 0);
81 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmMC_VM_AGP_TOP
), 0);
82 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmMC_VM_AGP_BOT
), 0x00FFFFFF);
86 /* Setup TLB control */
87 tmp
= RREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmMC_VM_MX_L1_TLB_CNTL
));
88 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_TLB
, 1);
89 tmp
= REG_SET_FIELD(tmp
,
93 tmp
= REG_SET_FIELD(tmp
,
95 ENABLE_ADVANCED_DRIVER_MODEL
,
97 tmp
= REG_SET_FIELD(tmp
,
99 SYSTEM_APERTURE_UNMAPPED_ACCESS
,
101 tmp
= REG_SET_FIELD(tmp
,
102 MC_VM_MX_L1_TLB_CNTL
,
105 tmp
= REG_SET_FIELD(tmp
,
106 MC_VM_MX_L1_TLB_CNTL
,
108 MTYPE_UC
);/* XXX for emulation. */
109 tmp
= REG_SET_FIELD(tmp
,
110 MC_VM_MX_L1_TLB_CNTL
,
113 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmMC_VM_MX_L1_TLB_CNTL
), tmp
);
116 tmp
= RREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_L2_CNTL
));
117 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_CACHE
, 1);
118 tmp
= REG_SET_FIELD(tmp
,
120 ENABLE_L2_FRAGMENT_PROCESSING
,
122 tmp
= REG_SET_FIELD(tmp
,
124 L2_PDE0_CACHE_TAG_GENERATION_MODE
,
125 0);/* XXX for emulation, Refer to closed source code.*/
126 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, PDE_FAULT_CLASSIFICATION
, 1);
127 tmp
= REG_SET_FIELD(tmp
,
129 CONTEXT1_IDENTITY_ACCESS_MODE
,
131 tmp
= REG_SET_FIELD(tmp
,
133 IDENTITY_MODE_FRAGMENT_SIZE
,
135 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_L2_CNTL
), tmp
);
137 tmp
= RREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_L2_CNTL2
));
138 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL2
, INVALIDATE_ALL_L1_TLBS
, 1);
139 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL2
, INVALIDATE_L2_CACHE
, 1);
140 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_L2_CNTL2
), tmp
);
142 tmp
= mmVM_L2_CNTL3_DEFAULT
;
143 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_L2_CNTL3
), tmp
);
145 tmp
= RREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_L2_CNTL4
));
146 tmp
= REG_SET_FIELD(tmp
,
148 VMC_TAP_PDE_REQUEST_PHYSICAL
,
150 tmp
= REG_SET_FIELD(tmp
,
152 VMC_TAP_PTE_REQUEST_PHYSICAL
,
154 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_L2_CNTL4
), tmp
);
157 WREG32(SOC15_REG_OFFSET(MMHUB
, 0,
158 mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
),
159 (u32
)(adev
->mc
.gtt_start
>> 12));
160 WREG32(SOC15_REG_OFFSET(MMHUB
, 0,
161 mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
),
162 (u32
)(adev
->mc
.gtt_start
>> 44));
164 WREG32(SOC15_REG_OFFSET(MMHUB
, 0,
165 mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
),
166 (u32
)(adev
->mc
.gtt_end
>> 12));
167 WREG32(SOC15_REG_OFFSET(MMHUB
, 0,
168 mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
),
169 (u32
)(adev
->mc
.gtt_end
>> 44));
171 BUG_ON(adev
->gart
.table_addr
& (~0x0000FFFFFFFFF000ULL
));
172 value
= adev
->gart
.table_addr
- adev
->mc
.vram_start
+
173 adev
->vm_manager
.vram_base_offset
;
174 value
&= 0x0000FFFFFFFFF000ULL
;
175 value
|= 0x1; /* valid bit */
177 WREG32(SOC15_REG_OFFSET(MMHUB
, 0,
178 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
),
180 WREG32(SOC15_REG_OFFSET(MMHUB
, 0,
181 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
),
184 WREG32(SOC15_REG_OFFSET(MMHUB
, 0,
185 mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32
),
186 (u32
)(adev
->dummy_page
.addr
>> 12));
187 WREG32(SOC15_REG_OFFSET(MMHUB
, 0,
188 mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32
),
189 (u32
)((u64
)adev
->dummy_page
.addr
>> 44));
191 tmp
= RREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_L2_PROTECTION_FAULT_CNTL2
));
192 tmp
= REG_SET_FIELD(tmp
, VM_L2_PROTECTION_FAULT_CNTL2
,
193 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY
,
195 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_L2_PROTECTION_FAULT_CNTL2
), tmp
);
197 addr
= SOC15_REG_OFFSET(MMHUB
, 0, mmVM_CONTEXT0_CNTL
);
200 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
, 1);
201 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, PAGE_TABLE_DEPTH
, 0);
202 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_CONTEXT0_CNTL
), tmp
);
206 /* Disable identity aperture.*/
207 WREG32(SOC15_REG_OFFSET(MMHUB
, 0,
208 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32
), 0XFFFFFFFF);
209 WREG32(SOC15_REG_OFFSET(MMHUB
, 0,
210 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32
), 0x0000000F);
212 WREG32(SOC15_REG_OFFSET(MMHUB
, 0,
213 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32
), 0);
214 WREG32(SOC15_REG_OFFSET(MMHUB
, 0,
215 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32
), 0);
217 WREG32(SOC15_REG_OFFSET(MMHUB
, 0,
218 mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32
), 0);
219 WREG32(SOC15_REG_OFFSET(MMHUB
, 0,
220 mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32
), 0);
222 for (i
= 0; i
<= 14; i
++) {
223 tmp
= RREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_CONTEXT1_CNTL
)
225 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
227 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
228 PAGE_TABLE_DEPTH
, adev
->vm_manager
.num_level
);
229 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
230 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
231 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
232 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
233 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
234 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
235 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
236 VALID_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
237 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
238 READ_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
239 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
240 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
241 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
242 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
243 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
244 PAGE_TABLE_BLOCK_SIZE
,
245 adev
->vm_manager
.block_size
- 9);
246 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_CONTEXT1_CNTL
) + i
, tmp
);
247 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32
) + i
*2, 0);
248 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32
) + i
*2, 0);
249 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32
) + i
*2,
250 lower_32_bits(adev
->vm_manager
.max_pfn
- 1));
251 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32
) + i
*2,
252 upper_32_bits(adev
->vm_manager
.max_pfn
- 1));
258 void mmhub_v1_0_gart_disable(struct amdgpu_device
*adev
)
263 /* Disable all tables */
264 for (i
= 0; i
< 16; i
++)
265 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_CONTEXT0_CNTL
) + i
, 0);
267 /* Setup TLB control */
268 tmp
= RREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmMC_VM_MX_L1_TLB_CNTL
));
269 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_TLB
, 0);
270 tmp
= REG_SET_FIELD(tmp
,
271 MC_VM_MX_L1_TLB_CNTL
,
272 ENABLE_ADVANCED_DRIVER_MODEL
,
274 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmMC_VM_MX_L1_TLB_CNTL
), tmp
);
277 tmp
= RREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_L2_CNTL
));
278 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_CACHE
, 0);
279 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_L2_CNTL
), tmp
);
280 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_L2_CNTL3
), 0);
284 * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
286 * @adev: amdgpu_device pointer
287 * @value: true redirects VM faults to the default page
289 void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device
*adev
, bool value
)
292 tmp
= RREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_L2_PROTECTION_FAULT_CNTL
));
293 tmp
= REG_SET_FIELD(tmp
, VM_L2_PROTECTION_FAULT_CNTL
,
294 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
295 tmp
= REG_SET_FIELD(tmp
, VM_L2_PROTECTION_FAULT_CNTL
,
296 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
297 tmp
= REG_SET_FIELD(tmp
, VM_L2_PROTECTION_FAULT_CNTL
,
298 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
299 tmp
= REG_SET_FIELD(tmp
, VM_L2_PROTECTION_FAULT_CNTL
,
300 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
301 tmp
= REG_SET_FIELD(tmp
,
302 VM_L2_PROTECTION_FAULT_CNTL
,
303 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT
,
305 tmp
= REG_SET_FIELD(tmp
, VM_L2_PROTECTION_FAULT_CNTL
,
306 NACK_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
307 tmp
= REG_SET_FIELD(tmp
, VM_L2_PROTECTION_FAULT_CNTL
,
308 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
309 tmp
= REG_SET_FIELD(tmp
, VM_L2_PROTECTION_FAULT_CNTL
,
310 VALID_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
311 tmp
= REG_SET_FIELD(tmp
, VM_L2_PROTECTION_FAULT_CNTL
,
312 READ_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
313 tmp
= REG_SET_FIELD(tmp
, VM_L2_PROTECTION_FAULT_CNTL
,
314 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
315 tmp
= REG_SET_FIELD(tmp
, VM_L2_PROTECTION_FAULT_CNTL
,
316 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
317 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmVM_L2_PROTECTION_FAULT_CNTL
), tmp
);
320 static int mmhub_v1_0_early_init(void *handle
)
325 static int mmhub_v1_0_late_init(void *handle
)
330 static int mmhub_v1_0_sw_init(void *handle
)
332 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
333 struct amdgpu_vmhub
*hub
= &adev
->vmhub
[AMDGPU_MMHUB
];
335 hub
->ctx0_ptb_addr_lo32
=
336 SOC15_REG_OFFSET(MMHUB
, 0,
337 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
);
338 hub
->ctx0_ptb_addr_hi32
=
339 SOC15_REG_OFFSET(MMHUB
, 0,
340 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
);
341 hub
->vm_inv_eng0_req
=
342 SOC15_REG_OFFSET(MMHUB
, 0, mmVM_INVALIDATE_ENG0_REQ
);
343 hub
->vm_inv_eng0_ack
=
344 SOC15_REG_OFFSET(MMHUB
, 0, mmVM_INVALIDATE_ENG0_ACK
);
345 hub
->vm_context0_cntl
=
346 SOC15_REG_OFFSET(MMHUB
, 0, mmVM_CONTEXT0_CNTL
);
347 hub
->vm_l2_pro_fault_status
=
348 SOC15_REG_OFFSET(MMHUB
, 0, mmVM_L2_PROTECTION_FAULT_STATUS
);
349 hub
->vm_l2_pro_fault_cntl
=
350 SOC15_REG_OFFSET(MMHUB
, 0, mmVM_L2_PROTECTION_FAULT_CNTL
);
355 static int mmhub_v1_0_sw_fini(void *handle
)
360 static int mmhub_v1_0_hw_init(void *handle
)
362 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
365 for (i
= 0; i
< 18; ++i
) {
366 WREG32(SOC15_REG_OFFSET(MMHUB
, 0,
367 mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32
) +
369 WREG32(SOC15_REG_OFFSET(MMHUB
, 0,
370 mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32
) +
377 static int mmhub_v1_0_hw_fini(void *handle
)
382 static int mmhub_v1_0_suspend(void *handle
)
387 static int mmhub_v1_0_resume(void *handle
)
392 static bool mmhub_v1_0_is_idle(void *handle
)
397 static int mmhub_v1_0_wait_for_idle(void *handle
)
402 static int mmhub_v1_0_soft_reset(void *handle
)
407 static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device
*adev
,
410 uint32_t def
, data
, def1
, data1
, def2
, data2
;
412 def
= data
= RREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmATC_L2_MISC_CG
));
413 def1
= data1
= RREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmDAGB0_CNTL_MISC2
));
414 def2
= data2
= RREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmDAGB1_CNTL_MISC2
));
416 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_MC_MGCG
)) {
417 data
|= ATC_L2_MISC_CG__ENABLE_MASK
;
419 data1
&= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK
|
420 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK
|
421 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK
|
422 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK
|
423 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK
|
424 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK
);
426 data2
&= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK
|
427 DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK
|
428 DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK
|
429 DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK
|
430 DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK
|
431 DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK
);
433 data
&= ~ATC_L2_MISC_CG__ENABLE_MASK
;
435 data1
|= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK
|
436 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK
|
437 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK
|
438 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK
|
439 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK
|
440 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK
);
442 data2
|= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK
|
443 DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK
|
444 DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK
|
445 DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK
|
446 DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK
|
447 DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK
);
451 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmATC_L2_MISC_CG
), data
);
454 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmDAGB0_CNTL_MISC2
), data1
);
457 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmDAGB1_CNTL_MISC2
), data2
);
460 static void athub_update_medium_grain_clock_gating(struct amdgpu_device
*adev
,
465 def
= data
= RREG32(SOC15_REG_OFFSET(ATHUB
, 0, mmATHUB_MISC_CNTL
));
467 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_MC_MGCG
))
468 data
|= ATHUB_MISC_CNTL__CG_ENABLE_MASK
;
470 data
&= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK
;
473 WREG32(SOC15_REG_OFFSET(ATHUB
, 0, mmATHUB_MISC_CNTL
), data
);
476 static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device
*adev
,
481 def
= data
= RREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmATC_L2_MISC_CG
));
483 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_MC_LS
))
484 data
|= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK
;
486 data
&= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK
;
489 WREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmATC_L2_MISC_CG
), data
);
492 static void athub_update_medium_grain_light_sleep(struct amdgpu_device
*adev
,
497 def
= data
= RREG32(SOC15_REG_OFFSET(ATHUB
, 0, mmATHUB_MISC_CNTL
));
499 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_MC_LS
) &&
500 (adev
->cg_flags
& AMD_CG_SUPPORT_HDP_LS
))
501 data
|= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK
;
503 data
&= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK
;
506 WREG32(SOC15_REG_OFFSET(ATHUB
, 0, mmATHUB_MISC_CNTL
), data
);
509 static int mmhub_v1_0_set_clockgating_state(void *handle
,
510 enum amd_clockgating_state state
)
512 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
514 if (amdgpu_sriov_vf(adev
))
517 switch (adev
->asic_type
) {
519 mmhub_v1_0_update_medium_grain_clock_gating(adev
,
520 state
== AMD_CG_STATE_GATE
? true : false);
521 athub_update_medium_grain_clock_gating(adev
,
522 state
== AMD_CG_STATE_GATE
? true : false);
523 mmhub_v1_0_update_medium_grain_light_sleep(adev
,
524 state
== AMD_CG_STATE_GATE
? true : false);
525 athub_update_medium_grain_light_sleep(adev
,
526 state
== AMD_CG_STATE_GATE
? true : false);
535 static void mmhub_v1_0_get_clockgating_state(void *handle
, u32
*flags
)
537 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
540 if (amdgpu_sriov_vf(adev
))
543 /* AMD_CG_SUPPORT_MC_MGCG */
544 data
= RREG32(SOC15_REG_OFFSET(ATHUB
, 0, mmATHUB_MISC_CNTL
));
545 if (data
& ATHUB_MISC_CNTL__CG_ENABLE_MASK
)
546 *flags
|= AMD_CG_SUPPORT_MC_MGCG
;
548 /* AMD_CG_SUPPORT_MC_LS */
549 data
= RREG32(SOC15_REG_OFFSET(MMHUB
, 0, mmATC_L2_MISC_CG
));
550 if (data
& ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK
)
551 *flags
|= AMD_CG_SUPPORT_MC_LS
;
554 static int mmhub_v1_0_set_powergating_state(void *handle
,
555 enum amd_powergating_state state
)
560 const struct amd_ip_funcs mmhub_v1_0_ip_funcs
= {
561 .name
= "mmhub_v1_0",
562 .early_init
= mmhub_v1_0_early_init
,
563 .late_init
= mmhub_v1_0_late_init
,
564 .sw_init
= mmhub_v1_0_sw_init
,
565 .sw_fini
= mmhub_v1_0_sw_fini
,
566 .hw_init
= mmhub_v1_0_hw_init
,
567 .hw_fini
= mmhub_v1_0_hw_fini
,
568 .suspend
= mmhub_v1_0_suspend
,
569 .resume
= mmhub_v1_0_resume
,
570 .is_idle
= mmhub_v1_0_is_idle
,
571 .wait_for_idle
= mmhub_v1_0_wait_for_idle
,
572 .soft_reset
= mmhub_v1_0_soft_reset
,
573 .set_clockgating_state
= mmhub_v1_0_set_clockgating_state
,
574 .set_powergating_state
= mmhub_v1_0_set_powergating_state
,
575 .get_clockgating_state
= mmhub_v1_0_get_clockgating_state
,
578 const struct amdgpu_ip_block_version mmhub_v1_0_ip_block
=
580 .type
= AMD_IP_BLOCK_TYPE_MMHUB
,
584 .funcs
= &mmhub_v1_0_ip_funcs
,