2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
27 #include "amdgpu_ucode.h"
29 #include "gmc/gmc_8_1_d.h"
30 #include "gmc/gmc_8_1_sh_mask.h"
32 #include "bif/bif_5_0_d.h"
33 #include "bif/bif_5_0_sh_mask.h"
35 #include "oss/oss_3_0_d.h"
36 #include "oss/oss_3_0_sh_mask.h"
42 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device
*adev
);
43 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device
*adev
);
45 MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
46 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
48 static const u32 golden_settings_tonga_a11
[] =
50 mmMC_ARB_WTM_GRPWT_RD
, 0x00000003, 0x00000000,
51 mmMC_HUB_RDREQ_DMIF_LIMIT
, 0x0000007f, 0x00000028,
52 mmMC_HUB_WDP_UMC
, 0x00007fb6, 0x00000991,
53 mmVM_PRT_APERTURE0_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
54 mmVM_PRT_APERTURE1_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
55 mmVM_PRT_APERTURE2_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
56 mmVM_PRT_APERTURE3_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
59 static const u32 tonga_mgcg_cgcg_init
[] =
61 mmMC_MEM_POWER_LS
, 0xffffffff, 0x00000104
64 static const u32 golden_settings_iceland_a11
[] =
66 mmVM_PRT_APERTURE0_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
67 mmVM_PRT_APERTURE1_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
68 mmVM_PRT_APERTURE2_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
69 mmVM_PRT_APERTURE3_LOW_ADDR
, 0x0fffffff, 0x0fffffff
72 static const u32 iceland_mgcg_cgcg_init
[] =
74 mmMC_MEM_POWER_LS
, 0xffffffff, 0x00000104
77 static const u32 cz_mgcg_cgcg_init
[] =
79 mmMC_MEM_POWER_LS
, 0xffffffff, 0x00000104
82 static void gmc_v8_0_init_golden_registers(struct amdgpu_device
*adev
)
84 switch (adev
->asic_type
) {
86 amdgpu_program_register_sequence(adev
,
87 iceland_mgcg_cgcg_init
,
88 (const u32
)ARRAY_SIZE(iceland_mgcg_cgcg_init
));
89 amdgpu_program_register_sequence(adev
,
90 golden_settings_iceland_a11
,
91 (const u32
)ARRAY_SIZE(golden_settings_iceland_a11
));
94 amdgpu_program_register_sequence(adev
,
96 (const u32
)ARRAY_SIZE(tonga_mgcg_cgcg_init
));
97 amdgpu_program_register_sequence(adev
,
98 golden_settings_tonga_a11
,
99 (const u32
)ARRAY_SIZE(golden_settings_tonga_a11
));
102 amdgpu_program_register_sequence(adev
,
104 (const u32
)ARRAY_SIZE(cz_mgcg_cgcg_init
));
112 * gmc8_mc_wait_for_idle - wait for MC idle callback.
114 * @adev: amdgpu_device pointer
116 * Wait for the MC (memory controller) to be idle.
118 * Returns 0 if the MC is idle, -1 if not.
120 int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device
*adev
)
125 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
127 tmp
= RREG32(mmSRBM_STATUS
) & (SRBM_STATUS__VMC_BUSY_MASK
|
128 SRBM_STATUS__MCB_BUSY_MASK
|
129 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
130 SRBM_STATUS__MCC_BUSY_MASK
|
131 SRBM_STATUS__MCD_BUSY_MASK
|
132 SRBM_STATUS__VMC1_BUSY_MASK
);
140 void gmc_v8_0_mc_stop(struct amdgpu_device
*adev
,
141 struct amdgpu_mode_mc_save
*save
)
145 if (adev
->mode_info
.num_crtc
)
146 amdgpu_display_stop_mc_access(adev
, save
);
148 amdgpu_asic_wait_for_mc_idle(adev
);
150 blackout
= RREG32(mmMC_SHARED_BLACKOUT_CNTL
);
151 if (REG_GET_FIELD(blackout
, MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
) != 1) {
152 /* Block CPU access */
153 WREG32(mmBIF_FB_EN
, 0);
154 /* blackout the MC */
155 blackout
= REG_SET_FIELD(blackout
,
156 MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
, 1);
157 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, blackout
);
159 /* wait for the MC to settle */
163 void gmc_v8_0_mc_resume(struct amdgpu_device
*adev
,
164 struct amdgpu_mode_mc_save
*save
)
168 /* unblackout the MC */
169 tmp
= RREG32(mmMC_SHARED_BLACKOUT_CNTL
);
170 tmp
= REG_SET_FIELD(tmp
, MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
, 0);
171 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, tmp
);
172 /* allow CPU access */
173 tmp
= REG_SET_FIELD(0, BIF_FB_EN
, FB_READ_EN
, 1);
174 tmp
= REG_SET_FIELD(tmp
, BIF_FB_EN
, FB_WRITE_EN
, 1);
175 WREG32(mmBIF_FB_EN
, tmp
);
177 if (adev
->mode_info
.num_crtc
)
178 amdgpu_display_resume_mc_access(adev
, save
);
182 * gmc_v8_0_init_microcode - load ucode images from disk
184 * @adev: amdgpu_device pointer
186 * Use the firmware interface to load the ucode images into
187 * the driver (not loaded into hw).
188 * Returns 0 on success, error on failure.
190 static int gmc_v8_0_init_microcode(struct amdgpu_device
*adev
)
192 const char *chip_name
;
198 switch (adev
->asic_type
) {
210 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_mc.bin", chip_name
);
211 err
= request_firmware(&adev
->mc
.fw
, fw_name
, adev
->dev
);
214 err
= amdgpu_ucode_validate(adev
->mc
.fw
);
219 "mc: Failed to load firmware \"%s\"\n",
221 release_firmware(adev
->mc
.fw
);
228 * gmc_v8_0_mc_load_microcode - load MC ucode into the hw
230 * @adev: amdgpu_device pointer
232 * Load the GDDR MC ucode into the hw (CIK).
233 * Returns 0 on success, error on failure.
235 static int gmc_v8_0_mc_load_microcode(struct amdgpu_device
*adev
)
237 const struct mc_firmware_header_v1_0
*hdr
;
238 const __le32
*fw_data
= NULL
;
239 const __le32
*io_mc_regs
= NULL
;
240 u32 running
, blackout
= 0;
241 int i
, ucode_size
, regs_size
;
246 hdr
= (const struct mc_firmware_header_v1_0
*)adev
->mc
.fw
->data
;
247 amdgpu_ucode_print_mc_hdr(&hdr
->header
);
249 adev
->mc
.fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
250 regs_size
= le32_to_cpu(hdr
->io_debug_size_bytes
) / (4 * 2);
251 io_mc_regs
= (const __le32
*)
252 (adev
->mc
.fw
->data
+ le32_to_cpu(hdr
->io_debug_array_offset_bytes
));
253 ucode_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
) / 4;
254 fw_data
= (const __le32
*)
255 (adev
->mc
.fw
->data
+ le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
257 running
= REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL
), MC_SEQ_SUP_CNTL
, RUN
);
261 blackout
= RREG32(mmMC_SHARED_BLACKOUT_CNTL
);
262 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, blackout
| 1);
265 /* reset the engine and set to writable */
266 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000008);
267 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000010);
269 /* load mc io regs */
270 for (i
= 0; i
< regs_size
; i
++) {
271 WREG32(mmMC_SEQ_IO_DEBUG_INDEX
, le32_to_cpup(io_mc_regs
++));
272 WREG32(mmMC_SEQ_IO_DEBUG_DATA
, le32_to_cpup(io_mc_regs
++));
274 /* load the MC ucode */
275 for (i
= 0; i
< ucode_size
; i
++)
276 WREG32(mmMC_SEQ_SUP_PGM
, le32_to_cpup(fw_data
++));
278 /* put the engine back into the active state */
279 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000008);
280 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000004);
281 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000001);
283 /* wait for training to complete */
284 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
285 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL
),
286 MC_SEQ_TRAIN_WAKEUP_CNTL
, TRAIN_DONE_D0
))
290 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
291 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL
),
292 MC_SEQ_TRAIN_WAKEUP_CNTL
, TRAIN_DONE_D1
))
298 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, blackout
);
304 static void gmc_v8_0_vram_gtt_location(struct amdgpu_device
*adev
,
305 struct amdgpu_mc
*mc
)
307 if (mc
->mc_vram_size
> 0xFFC0000000ULL
) {
308 /* leave room for at least 1024M GTT */
309 dev_warn(adev
->dev
, "limiting VRAM\n");
310 mc
->real_vram_size
= 0xFFC0000000ULL
;
311 mc
->mc_vram_size
= 0xFFC0000000ULL
;
313 amdgpu_vram_location(adev
, &adev
->mc
, 0);
314 adev
->mc
.gtt_base_align
= 0;
315 amdgpu_gtt_location(adev
, mc
);
319 * gmc_v8_0_mc_program - program the GPU memory controller
321 * @adev: amdgpu_device pointer
323 * Set the location of vram, gart, and AGP in the GPU's
324 * physical address space (CIK).
326 static void gmc_v8_0_mc_program(struct amdgpu_device
*adev
)
328 struct amdgpu_mode_mc_save save
;
333 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x6) {
334 WREG32((0xb05 + j
), 0x00000000);
335 WREG32((0xb06 + j
), 0x00000000);
336 WREG32((0xb07 + j
), 0x00000000);
337 WREG32((0xb08 + j
), 0x00000000);
338 WREG32((0xb09 + j
), 0x00000000);
340 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL
, 0);
342 if (adev
->mode_info
.num_crtc
)
343 amdgpu_display_set_vga_render_state(adev
, false);
345 gmc_v8_0_mc_stop(adev
, &save
);
346 if (amdgpu_asic_wait_for_mc_idle(adev
)) {
347 dev_warn(adev
->dev
, "Wait for MC idle timedout !\n");
349 /* Update configuration */
350 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR
,
351 adev
->mc
.vram_start
>> 12);
352 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
353 adev
->mc
.vram_end
>> 12);
354 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
,
355 adev
->vram_scratch
.gpu_addr
>> 12);
356 tmp
= ((adev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
357 tmp
|= ((adev
->mc
.vram_start
>> 24) & 0xFFFF);
358 WREG32(mmMC_VM_FB_LOCATION
, tmp
);
359 /* XXX double check these! */
360 WREG32(mmHDP_NONSURFACE_BASE
, (adev
->mc
.vram_start
>> 8));
361 WREG32(mmHDP_NONSURFACE_INFO
, (2 << 7) | (1 << 30));
362 WREG32(mmHDP_NONSURFACE_SIZE
, 0x3FFFFFFF);
363 WREG32(mmMC_VM_AGP_BASE
, 0);
364 WREG32(mmMC_VM_AGP_TOP
, 0x0FFFFFFF);
365 WREG32(mmMC_VM_AGP_BOT
, 0x0FFFFFFF);
366 if (amdgpu_asic_wait_for_mc_idle(adev
)) {
367 dev_warn(adev
->dev
, "Wait for MC idle timedout !\n");
369 gmc_v8_0_mc_resume(adev
, &save
);
371 WREG32(mmBIF_FB_EN
, BIF_FB_EN__FB_READ_EN_MASK
| BIF_FB_EN__FB_WRITE_EN_MASK
);
373 tmp
= RREG32(mmHDP_MISC_CNTL
);
374 tmp
= REG_SET_FIELD(tmp
, HDP_MISC_CNTL
, FLUSH_INVALIDATE_CACHE
, 1);
375 WREG32(mmHDP_MISC_CNTL
, tmp
);
377 tmp
= RREG32(mmHDP_HOST_PATH_CNTL
);
378 WREG32(mmHDP_HOST_PATH_CNTL
, tmp
);
382 * gmc_v8_0_mc_init - initialize the memory controller driver params
384 * @adev: amdgpu_device pointer
386 * Look up the amount of vram, vram width, and decide how to place
387 * vram and gart within the GPU's physical address space (CIK).
388 * Returns 0 for success.
390 static int gmc_v8_0_mc_init(struct amdgpu_device
*adev
)
393 int chansize
, numchan
;
395 /* Get VRAM informations */
396 tmp
= RREG32(mmMC_ARB_RAMCFG
);
397 if (REG_GET_FIELD(tmp
, MC_ARB_RAMCFG
, CHANSIZE
)) {
402 tmp
= RREG32(mmMC_SHARED_CHMAP
);
403 switch (REG_GET_FIELD(tmp
, MC_SHARED_CHMAP
, NOOFCHAN
)) {
433 adev
->mc
.vram_width
= numchan
* chansize
;
434 /* Could aper size report 0 ? */
435 adev
->mc
.aper_base
= pci_resource_start(adev
->pdev
, 0);
436 adev
->mc
.aper_size
= pci_resource_len(adev
->pdev
, 0);
437 /* size in MB on si */
438 adev
->mc
.mc_vram_size
= RREG32(mmCONFIG_MEMSIZE
) * 1024ULL * 1024ULL;
439 adev
->mc
.real_vram_size
= RREG32(mmCONFIG_MEMSIZE
) * 1024ULL * 1024ULL;
440 adev
->mc
.visible_vram_size
= adev
->mc
.aper_size
;
442 /* unless the user had overridden it, set the gart
443 * size equal to the 1024 or vram, whichever is larger.
445 if (amdgpu_gart_size
== -1)
446 adev
->mc
.gtt_size
= max((1024ULL << 20), adev
->mc
.mc_vram_size
);
448 adev
->mc
.gtt_size
= (uint64_t)amdgpu_gart_size
<< 20;
450 gmc_v8_0_vram_gtt_location(adev
, &adev
->mc
);
457 * VMID 0 is the physical GPU addresses as used by the kernel.
458 * VMIDs 1-15 are used for userspace clients and are handled
459 * by the amdgpu vm/hsa code.
463 * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback
465 * @adev: amdgpu_device pointer
466 * @vmid: vm instance to flush
468 * Flush the TLB for the requested page table (CIK).
470 static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device
*adev
,
473 /* flush hdp cache */
474 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL
, 0);
476 /* bits 0-15 are the VM contexts0-15 */
477 WREG32(mmVM_INVALIDATE_REQUEST
, 1 << vmid
);
481 * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO
483 * @adev: amdgpu_device pointer
484 * @cpu_pt_addr: cpu address of the page table
485 * @gpu_page_idx: entry in the page table to update
486 * @addr: dst addr to write into pte/pde
487 * @flags: access flags
489 * Update the page tables using the CPU.
491 static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device
*adev
,
493 uint32_t gpu_page_idx
,
497 void __iomem
*ptr
= (void *)cpu_pt_addr
;
503 * 39:12 4k physical page base address
514 * 63:59 block fragment size
516 * 39:1 physical base address of PTE
517 * bits 5:1 must be 0.
520 value
= addr
& 0x000000FFFFFFF000ULL
;
522 writeq(value
, ptr
+ (gpu_page_idx
* 8));
528 * gmc_v8_0_gart_enable - gart enable
530 * @adev: amdgpu_device pointer
532 * This sets up the TLBs, programs the page tables for VMID0,
533 * sets up the hw for VMIDs 1-15 which are allocated on
534 * demand, and sets up the global locations for the LDS, GDS,
535 * and GPUVM for FSA64 clients (CIK).
536 * Returns 0 for success, errors for failure.
538 static int gmc_v8_0_gart_enable(struct amdgpu_device
*adev
)
543 if (adev
->gart
.robj
== NULL
) {
544 dev_err(adev
->dev
, "No VRAM object for PCIE GART.\n");
547 r
= amdgpu_gart_table_vram_pin(adev
);
550 /* Setup TLB control */
551 tmp
= RREG32(mmMC_VM_MX_L1_TLB_CNTL
);
552 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_TLB
, 1);
553 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_FRAGMENT_PROCESSING
, 1);
554 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, SYSTEM_ACCESS_MODE
, 3);
555 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_ADVANCED_DRIVER_MODEL
, 1);
556 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, SYSTEM_APERTURE_UNMAPPED_ACCESS
, 0);
557 WREG32(mmMC_VM_MX_L1_TLB_CNTL
, tmp
);
559 tmp
= RREG32(mmVM_L2_CNTL
);
560 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_CACHE
, 1);
561 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
, 1);
562 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
, 1);
563 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE
, 1);
564 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, EFFECTIVE_L2_QUEUE_SIZE
, 7);
565 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, CONTEXT1_IDENTITY_ACCESS_MODE
, 1);
566 WREG32(mmVM_L2_CNTL
, tmp
);
567 tmp
= RREG32(mmVM_L2_CNTL2
);
568 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL2
, INVALIDATE_ALL_L1_TLBS
, 1);
569 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL2
, INVALIDATE_L2_CACHE
, 1);
570 WREG32(mmVM_L2_CNTL2
, tmp
);
571 tmp
= RREG32(mmVM_L2_CNTL3
);
572 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL3
, L2_CACHE_BIGK_ASSOCIATIVITY
, 1);
573 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL3
, BANK_SELECT
, 4);
574 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL3
, L2_CACHE_BIGK_FRAGMENT_SIZE
, 4);
575 WREG32(mmVM_L2_CNTL3
, tmp
);
576 /* XXX: set to enable PTE/PDE in system memory */
577 tmp
= RREG32(mmVM_L2_CNTL4
);
578 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL
, 0);
579 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED
, 0);
580 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP
, 0);
581 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL
, 0);
582 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED
, 0);
583 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP
, 0);
584 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL
, 0);
585 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED
, 0);
586 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP
, 0);
587 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL
, 0);
588 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED
, 0);
589 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP
, 0);
590 WREG32(mmVM_L2_CNTL4
, tmp
);
592 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR
, adev
->mc
.gtt_start
>> 12);
593 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR
, (adev
->mc
.gtt_end
>> 12) - 1);
594 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, adev
->gart
.table_addr
>> 12);
595 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
596 (u32
)(adev
->dummy_page
.addr
>> 12));
597 WREG32(mmVM_CONTEXT0_CNTL2
, 0);
598 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
599 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
, 1);
600 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, PAGE_TABLE_DEPTH
, 0);
601 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
602 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
604 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR
, 0);
605 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR
, 0);
606 WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET
, 0);
608 /* empty context1-15 */
609 /* FIXME start with 4G, once using 2 level pt switch to full
612 /* set vm size, must be a multiple of 4 */
613 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR
, 0);
614 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR
, adev
->vm_manager
.max_pfn
- 1);
615 for (i
= 1; i
< 16; i
++) {
617 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ i
,
618 adev
->gart
.table_addr
>> 12);
620 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ i
- 8,
621 adev
->gart
.table_addr
>> 12);
624 /* enable context1-15 */
625 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR
,
626 (u32
)(adev
->dummy_page
.addr
>> 12));
627 WREG32(mmVM_CONTEXT1_CNTL2
, 4);
628 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
629 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, ENABLE_CONTEXT
, 1);
630 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, PAGE_TABLE_DEPTH
, 1);
631 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT
, 1);
632 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
633 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT
, 1);
634 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
635 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT
, 1);
636 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
637 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT
, 1);
638 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, VALID_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
639 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, READ_PROTECTION_FAULT_ENABLE_INTERRUPT
, 1);
640 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, READ_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
641 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT
, 1);
642 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
643 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT
, 1);
644 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
645 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, PAGE_TABLE_BLOCK_SIZE
,
646 amdgpu_vm_block_size
- 9);
647 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
649 gmc_v8_0_gart_flush_gpu_tlb(adev
, 0);
650 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
651 (unsigned)(adev
->mc
.gtt_size
>> 20),
652 (unsigned long long)adev
->gart
.table_addr
);
653 adev
->gart
.ready
= true;
657 static int gmc_v8_0_gart_init(struct amdgpu_device
*adev
)
661 if (adev
->gart
.robj
) {
662 WARN(1, "R600 PCIE GART already initialized\n");
665 /* Initialize common gart structure */
666 r
= amdgpu_gart_init(adev
);
669 adev
->gart
.table_size
= adev
->gart
.num_gpu_pages
* 8;
670 return amdgpu_gart_table_vram_alloc(adev
);
674 * gmc_v8_0_gart_disable - gart disable
676 * @adev: amdgpu_device pointer
678 * This disables all VM page table (CIK).
680 static void gmc_v8_0_gart_disable(struct amdgpu_device
*adev
)
684 /* Disable all tables */
685 WREG32(mmVM_CONTEXT0_CNTL
, 0);
686 WREG32(mmVM_CONTEXT1_CNTL
, 0);
687 /* Setup TLB control */
688 tmp
= RREG32(mmMC_VM_MX_L1_TLB_CNTL
);
689 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_TLB
, 0);
690 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_FRAGMENT_PROCESSING
, 0);
691 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_ADVANCED_DRIVER_MODEL
, 0);
692 WREG32(mmMC_VM_MX_L1_TLB_CNTL
, tmp
);
694 tmp
= RREG32(mmVM_L2_CNTL
);
695 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_CACHE
, 0);
696 WREG32(mmVM_L2_CNTL
, tmp
);
697 WREG32(mmVM_L2_CNTL2
, 0);
698 amdgpu_gart_table_vram_unpin(adev
);
702 * gmc_v8_0_gart_fini - vm fini callback
704 * @adev: amdgpu_device pointer
706 * Tears down the driver GART/VM setup (CIK).
708 static void gmc_v8_0_gart_fini(struct amdgpu_device
*adev
)
710 amdgpu_gart_table_vram_free(adev
);
711 amdgpu_gart_fini(adev
);
716 * VMID 0 is the physical GPU addresses as used by the kernel.
717 * VMIDs 1-15 are used for userspace clients and are handled
718 * by the amdgpu vm/hsa code.
721 * gmc_v8_0_vm_init - cik vm init callback
723 * @adev: amdgpu_device pointer
725 * Inits cik specific vm parameters (number of VMs, base of vram for
727 * Returns 0 for success.
729 static int gmc_v8_0_vm_init(struct amdgpu_device
*adev
)
733 * VMID 0 is reserved for System
734 * amdgpu graphics/compute will use VMIDs 1-7
735 * amdkfd will use VMIDs 8-15
737 adev
->vm_manager
.nvm
= AMDGPU_NUM_OF_VMIDS
;
739 /* base offset of vram pages */
740 if (adev
->flags
& AMDGPU_IS_APU
) {
741 u64 tmp
= RREG32(mmMC_VM_FB_OFFSET
);
743 adev
->vm_manager
.vram_base_offset
= tmp
;
745 adev
->vm_manager
.vram_base_offset
= 0;
751 * gmc_v8_0_vm_fini - cik vm fini callback
753 * @adev: amdgpu_device pointer
755 * Tear down any asic specific VM setup (CIK).
757 static void gmc_v8_0_vm_fini(struct amdgpu_device
*adev
)
762 * gmc_v8_0_vm_decode_fault - print human readable fault info
764 * @adev: amdgpu_device pointer
765 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
766 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
768 * Print human readable fault information (CIK).
770 static void gmc_v8_0_vm_decode_fault(struct amdgpu_device
*adev
,
771 u32 status
, u32 addr
, u32 mc_client
)
774 u32 vmid
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
, VMID
);
775 u32 protections
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
777 char block
[5] = { mc_client
>> 24, (mc_client
>> 16) & 0xff,
778 (mc_client
>> 8) & 0xff, mc_client
& 0xff, 0 };
780 mc_id
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
783 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
784 protections
, vmid
, addr
,
785 REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
787 "write" : "read", block
, mc_client
, mc_id
);
790 static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type
)
792 switch (mc_seq_vram_type
) {
793 case MC_SEQ_MISC0__MT__GDDR1
:
794 return AMDGPU_VRAM_TYPE_GDDR1
;
795 case MC_SEQ_MISC0__MT__DDR2
:
796 return AMDGPU_VRAM_TYPE_DDR2
;
797 case MC_SEQ_MISC0__MT__GDDR3
:
798 return AMDGPU_VRAM_TYPE_GDDR3
;
799 case MC_SEQ_MISC0__MT__GDDR4
:
800 return AMDGPU_VRAM_TYPE_GDDR4
;
801 case MC_SEQ_MISC0__MT__GDDR5
:
802 return AMDGPU_VRAM_TYPE_GDDR5
;
803 case MC_SEQ_MISC0__MT__HBM
:
804 return AMDGPU_VRAM_TYPE_HBM
;
805 case MC_SEQ_MISC0__MT__DDR3
:
806 return AMDGPU_VRAM_TYPE_DDR3
;
808 return AMDGPU_VRAM_TYPE_UNKNOWN
;
812 static int gmc_v8_0_early_init(void *handle
)
814 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
816 gmc_v8_0_set_gart_funcs(adev
);
817 gmc_v8_0_set_irq_funcs(adev
);
819 if (adev
->flags
& AMDGPU_IS_APU
) {
820 adev
->mc
.vram_type
= AMDGPU_VRAM_TYPE_UNKNOWN
;
822 u32 tmp
= RREG32(mmMC_SEQ_MISC0
);
823 tmp
&= MC_SEQ_MISC0__MT__MASK
;
824 adev
->mc
.vram_type
= gmc_v8_0_convert_vram_type(tmp
);
830 static int gmc_v8_0_sw_init(void *handle
)
834 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
836 r
= amdgpu_gem_init(adev
);
840 r
= amdgpu_irq_add_id(adev
, 146, &adev
->mc
.vm_fault
);
844 r
= amdgpu_irq_add_id(adev
, 147, &adev
->mc
.vm_fault
);
848 /* Adjust VM size here.
849 * Currently set to 4GB ((1 << 20) 4k pages).
850 * Max GPUVM size for cayman and SI is 40 bits.
852 adev
->vm_manager
.max_pfn
= amdgpu_vm_size
<< 18;
854 /* Set the internal MC address mask
855 * This is the max address of the GPU's
856 * internal address space.
858 adev
->mc
.mc_mask
= 0xffffffffffULL
; /* 40 bit MC */
860 /* set DMA mask + need_dma32 flags.
861 * PCIE - can handle 40-bits.
862 * IGP - can handle 40-bits
863 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
865 adev
->need_dma32
= false;
866 dma_bits
= adev
->need_dma32
? 32 : 40;
867 r
= pci_set_dma_mask(adev
->pdev
, DMA_BIT_MASK(dma_bits
));
869 adev
->need_dma32
= true;
871 printk(KERN_WARNING
"amdgpu: No suitable DMA available.\n");
873 r
= pci_set_consistent_dma_mask(adev
->pdev
, DMA_BIT_MASK(dma_bits
));
875 pci_set_consistent_dma_mask(adev
->pdev
, DMA_BIT_MASK(32));
876 printk(KERN_WARNING
"amdgpu: No coherent DMA available.\n");
879 r
= gmc_v8_0_init_microcode(adev
);
881 DRM_ERROR("Failed to load mc firmware!\n");
885 r
= gmc_v8_0_mc_init(adev
);
890 r
= amdgpu_bo_init(adev
);
894 r
= gmc_v8_0_gart_init(adev
);
898 if (!adev
->vm_manager
.enabled
) {
899 r
= gmc_v8_0_vm_init(adev
);
901 dev_err(adev
->dev
, "vm manager initialization failed (%d).\n", r
);
904 adev
->vm_manager
.enabled
= true;
910 static int gmc_v8_0_sw_fini(void *handle
)
913 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
915 if (adev
->vm_manager
.enabled
) {
916 for (i
= 0; i
< AMDGPU_NUM_VM
; ++i
)
917 amdgpu_fence_unref(&adev
->vm_manager
.active
[i
]);
918 gmc_v8_0_vm_fini(adev
);
919 adev
->vm_manager
.enabled
= false;
921 gmc_v8_0_gart_fini(adev
);
922 amdgpu_gem_fini(adev
);
923 amdgpu_bo_fini(adev
);
928 static int gmc_v8_0_hw_init(void *handle
)
931 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
933 gmc_v8_0_init_golden_registers(adev
);
935 gmc_v8_0_mc_program(adev
);
937 if (!(adev
->flags
& AMDGPU_IS_APU
)) {
938 r
= gmc_v8_0_mc_load_microcode(adev
);
940 DRM_ERROR("Failed to load MC firmware!\n");
945 r
= gmc_v8_0_gart_enable(adev
);
952 static int gmc_v8_0_hw_fini(void *handle
)
954 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
956 gmc_v8_0_gart_disable(adev
);
961 static int gmc_v8_0_suspend(void *handle
)
964 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
966 if (adev
->vm_manager
.enabled
) {
967 for (i
= 0; i
< AMDGPU_NUM_VM
; ++i
)
968 amdgpu_fence_unref(&adev
->vm_manager
.active
[i
]);
969 gmc_v8_0_vm_fini(adev
);
970 adev
->vm_manager
.enabled
= false;
972 gmc_v8_0_hw_fini(adev
);
977 static int gmc_v8_0_resume(void *handle
)
980 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
982 r
= gmc_v8_0_hw_init(adev
);
986 if (!adev
->vm_manager
.enabled
) {
987 r
= gmc_v8_0_vm_init(adev
);
989 dev_err(adev
->dev
, "vm manager initialization failed (%d).\n", r
);
992 adev
->vm_manager
.enabled
= true;
998 static bool gmc_v8_0_is_idle(void *handle
)
1000 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1001 u32 tmp
= RREG32(mmSRBM_STATUS
);
1003 if (tmp
& (SRBM_STATUS__MCB_BUSY_MASK
| SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
1004 SRBM_STATUS__MCC_BUSY_MASK
| SRBM_STATUS__MCD_BUSY_MASK
| SRBM_STATUS__VMC_BUSY_MASK
))
1010 static int gmc_v8_0_wait_for_idle(void *handle
)
1014 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1016 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
1017 /* read MC_STATUS */
1018 tmp
= RREG32(mmSRBM_STATUS
) & (SRBM_STATUS__MCB_BUSY_MASK
|
1019 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
1020 SRBM_STATUS__MCC_BUSY_MASK
|
1021 SRBM_STATUS__MCD_BUSY_MASK
|
1022 SRBM_STATUS__VMC_BUSY_MASK
|
1023 SRBM_STATUS__VMC1_BUSY_MASK
);
1032 static void gmc_v8_0_print_status(void *handle
)
1035 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1037 dev_info(adev
->dev
, "GMC 8.x registers\n");
1038 dev_info(adev
->dev
, " SRBM_STATUS=0x%08X\n",
1039 RREG32(mmSRBM_STATUS
));
1040 dev_info(adev
->dev
, " SRBM_STATUS2=0x%08X\n",
1041 RREG32(mmSRBM_STATUS2
));
1043 dev_info(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1044 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR
));
1045 dev_info(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1046 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS
));
1047 dev_info(adev
->dev
, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
1048 RREG32(mmMC_VM_MX_L1_TLB_CNTL
));
1049 dev_info(adev
->dev
, " VM_L2_CNTL=0x%08X\n",
1050 RREG32(mmVM_L2_CNTL
));
1051 dev_info(adev
->dev
, " VM_L2_CNTL2=0x%08X\n",
1052 RREG32(mmVM_L2_CNTL2
));
1053 dev_info(adev
->dev
, " VM_L2_CNTL3=0x%08X\n",
1054 RREG32(mmVM_L2_CNTL3
));
1055 dev_info(adev
->dev
, " VM_L2_CNTL4=0x%08X\n",
1056 RREG32(mmVM_L2_CNTL4
));
1057 dev_info(adev
->dev
, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
1058 RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR
));
1059 dev_info(adev
->dev
, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
1060 RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR
));
1061 dev_info(adev
->dev
, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1062 RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
));
1063 dev_info(adev
->dev
, " VM_CONTEXT0_CNTL2=0x%08X\n",
1064 RREG32(mmVM_CONTEXT0_CNTL2
));
1065 dev_info(adev
->dev
, " VM_CONTEXT0_CNTL=0x%08X\n",
1066 RREG32(mmVM_CONTEXT0_CNTL
));
1067 dev_info(adev
->dev
, " VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR=0x%08X\n",
1068 RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR
));
1069 dev_info(adev
->dev
, " VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR=0x%08X\n",
1070 RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR
));
1071 dev_info(adev
->dev
, " mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET=0x%08X\n",
1072 RREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET
));
1073 dev_info(adev
->dev
, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
1074 RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR
));
1075 dev_info(adev
->dev
, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
1076 RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR
));
1077 dev_info(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1078 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR
));
1079 dev_info(adev
->dev
, " VM_CONTEXT1_CNTL2=0x%08X\n",
1080 RREG32(mmVM_CONTEXT1_CNTL2
));
1081 dev_info(adev
->dev
, " VM_CONTEXT1_CNTL=0x%08X\n",
1082 RREG32(mmVM_CONTEXT1_CNTL
));
1083 for (i
= 0; i
< 16; i
++) {
1085 dev_info(adev
->dev
, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1086 i
, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ i
));
1088 dev_info(adev
->dev
, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1089 i
, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ i
- 8));
1091 dev_info(adev
->dev
, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
1092 RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR
));
1093 dev_info(adev
->dev
, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
1094 RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR
));
1095 dev_info(adev
->dev
, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
1096 RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
));
1097 dev_info(adev
->dev
, " MC_VM_FB_LOCATION=0x%08X\n",
1098 RREG32(mmMC_VM_FB_LOCATION
));
1099 dev_info(adev
->dev
, " MC_VM_AGP_BASE=0x%08X\n",
1100 RREG32(mmMC_VM_AGP_BASE
));
1101 dev_info(adev
->dev
, " MC_VM_AGP_TOP=0x%08X\n",
1102 RREG32(mmMC_VM_AGP_TOP
));
1103 dev_info(adev
->dev
, " MC_VM_AGP_BOT=0x%08X\n",
1104 RREG32(mmMC_VM_AGP_BOT
));
1106 dev_info(adev
->dev
, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
1107 RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL
));
1108 dev_info(adev
->dev
, " HDP_NONSURFACE_BASE=0x%08X\n",
1109 RREG32(mmHDP_NONSURFACE_BASE
));
1110 dev_info(adev
->dev
, " HDP_NONSURFACE_INFO=0x%08X\n",
1111 RREG32(mmHDP_NONSURFACE_INFO
));
1112 dev_info(adev
->dev
, " HDP_NONSURFACE_SIZE=0x%08X\n",
1113 RREG32(mmHDP_NONSURFACE_SIZE
));
1114 dev_info(adev
->dev
, " HDP_MISC_CNTL=0x%08X\n",
1115 RREG32(mmHDP_MISC_CNTL
));
1116 dev_info(adev
->dev
, " HDP_HOST_PATH_CNTL=0x%08X\n",
1117 RREG32(mmHDP_HOST_PATH_CNTL
));
1119 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x6) {
1120 dev_info(adev
->dev
, " %d:\n", i
);
1121 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1122 0xb05 + j
, RREG32(0xb05 + j
));
1123 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1124 0xb06 + j
, RREG32(0xb06 + j
));
1125 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1126 0xb07 + j
, RREG32(0xb07 + j
));
1127 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1128 0xb08 + j
, RREG32(0xb08 + j
));
1129 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1130 0xb09 + j
, RREG32(0xb09 + j
));
1133 dev_info(adev
->dev
, " BIF_FB_EN=0x%08X\n",
1134 RREG32(mmBIF_FB_EN
));
1137 static int gmc_v8_0_soft_reset(void *handle
)
1139 struct amdgpu_mode_mc_save save
;
1140 u32 srbm_soft_reset
= 0;
1141 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1142 u32 tmp
= RREG32(mmSRBM_STATUS
);
1144 if (tmp
& SRBM_STATUS__VMC_BUSY_MASK
)
1145 srbm_soft_reset
= REG_SET_FIELD(srbm_soft_reset
,
1146 SRBM_SOFT_RESET
, SOFT_RESET_VMC
, 1);
1148 if (tmp
& (SRBM_STATUS__MCB_BUSY_MASK
| SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
1149 SRBM_STATUS__MCC_BUSY_MASK
| SRBM_STATUS__MCD_BUSY_MASK
)) {
1150 if (!(adev
->flags
& AMDGPU_IS_APU
))
1151 srbm_soft_reset
= REG_SET_FIELD(srbm_soft_reset
,
1152 SRBM_SOFT_RESET
, SOFT_RESET_MC
, 1);
1155 if (srbm_soft_reset
) {
1156 gmc_v8_0_print_status((void *)adev
);
1158 gmc_v8_0_mc_stop(adev
, &save
);
1159 if (gmc_v8_0_wait_for_idle(adev
)) {
1160 dev_warn(adev
->dev
, "Wait for GMC idle timed out !\n");
1164 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1165 tmp
|= srbm_soft_reset
;
1166 dev_info(adev
->dev
, "SRBM_SOFT_RESET=0x%08X\n", tmp
);
1167 WREG32(mmSRBM_SOFT_RESET
, tmp
);
1168 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1172 tmp
&= ~srbm_soft_reset
;
1173 WREG32(mmSRBM_SOFT_RESET
, tmp
);
1174 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1176 /* Wait a little for things to settle down */
1179 gmc_v8_0_mc_resume(adev
, &save
);
1182 gmc_v8_0_print_status((void *)adev
);
1188 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device
*adev
,
1189 struct amdgpu_irq_src
*src
,
1191 enum amdgpu_interrupt_state state
)
1194 u32 bits
= (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1195 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1196 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1197 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1198 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1199 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1200 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
);
1203 case AMDGPU_IRQ_STATE_DISABLE
:
1204 /* system context */
1205 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
1207 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
1209 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
1211 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
1213 case AMDGPU_IRQ_STATE_ENABLE
:
1214 /* system context */
1215 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
1217 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
1219 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
1221 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
1230 static int gmc_v8_0_process_interrupt(struct amdgpu_device
*adev
,
1231 struct amdgpu_irq_src
*source
,
1232 struct amdgpu_iv_entry
*entry
)
1234 u32 addr
, status
, mc_client
;
1236 addr
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR
);
1237 status
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS
);
1238 mc_client
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT
);
1239 dev_err(adev
->dev
, "GPU fault detected: %d 0x%08x\n",
1240 entry
->src_id
, entry
->src_data
);
1241 dev_err(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1243 dev_err(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1245 gmc_v8_0_vm_decode_fault(adev
, status
, addr
, mc_client
);
1246 /* reset addr and status */
1247 WREG32_P(mmVM_CONTEXT1_CNTL2
, 1, ~1);
1252 static int gmc_v8_0_set_clockgating_state(void *handle
,
1253 enum amd_clockgating_state state
)
1258 static int gmc_v8_0_set_powergating_state(void *handle
,
1259 enum amd_powergating_state state
)
1264 const struct amd_ip_funcs gmc_v8_0_ip_funcs
= {
1265 .early_init
= gmc_v8_0_early_init
,
1267 .sw_init
= gmc_v8_0_sw_init
,
1268 .sw_fini
= gmc_v8_0_sw_fini
,
1269 .hw_init
= gmc_v8_0_hw_init
,
1270 .hw_fini
= gmc_v8_0_hw_fini
,
1271 .suspend
= gmc_v8_0_suspend
,
1272 .resume
= gmc_v8_0_resume
,
1273 .is_idle
= gmc_v8_0_is_idle
,
1274 .wait_for_idle
= gmc_v8_0_wait_for_idle
,
1275 .soft_reset
= gmc_v8_0_soft_reset
,
1276 .print_status
= gmc_v8_0_print_status
,
1277 .set_clockgating_state
= gmc_v8_0_set_clockgating_state
,
1278 .set_powergating_state
= gmc_v8_0_set_powergating_state
,
1281 static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs
= {
1282 .flush_gpu_tlb
= gmc_v8_0_gart_flush_gpu_tlb
,
1283 .set_pte_pde
= gmc_v8_0_gart_set_pte_pde
,
1286 static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs
= {
1287 .set
= gmc_v8_0_vm_fault_interrupt_state
,
1288 .process
= gmc_v8_0_process_interrupt
,
1291 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device
*adev
)
1293 if (adev
->gart
.gart_funcs
== NULL
)
1294 adev
->gart
.gart_funcs
= &gmc_v8_0_gart_funcs
;
1297 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device
*adev
)
1299 adev
->mc
.vm_fault
.num_types
= 1;
1300 adev
->mc
.vm_fault
.funcs
= &gmc_v8_0_irq_funcs
;