Linux 4.2.1
[linux/fpc-iii.git] / drivers / gpu / drm / amd / amdgpu / gmc_v8_0.c
blob8135963a66be45145419bd4c21848a7a90af28e3
1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include "drmP.h"
25 #include "amdgpu.h"
26 #include "gmc_v8_0.h"
27 #include "amdgpu_ucode.h"
29 #include "gmc/gmc_8_1_d.h"
30 #include "gmc/gmc_8_1_sh_mask.h"
32 #include "bif/bif_5_0_d.h"
33 #include "bif/bif_5_0_sh_mask.h"
35 #include "oss/oss_3_0_d.h"
36 #include "oss/oss_3_0_sh_mask.h"
38 #include "vid.h"
39 #include "vi.h"
42 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
43 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
45 MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
46 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
48 static const u32 golden_settings_tonga_a11[] =
50 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
51 mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
52 mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
53 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
54 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
55 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
56 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
59 static const u32 tonga_mgcg_cgcg_init[] =
61 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
64 static const u32 golden_settings_iceland_a11[] =
66 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
67 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
68 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
69 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
72 static const u32 iceland_mgcg_cgcg_init[] =
74 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
77 static const u32 cz_mgcg_cgcg_init[] =
79 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
82 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
84 switch (adev->asic_type) {
85 case CHIP_TOPAZ:
86 amdgpu_program_register_sequence(adev,
87 iceland_mgcg_cgcg_init,
88 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
89 amdgpu_program_register_sequence(adev,
90 golden_settings_iceland_a11,
91 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
92 break;
93 case CHIP_TONGA:
94 amdgpu_program_register_sequence(adev,
95 tonga_mgcg_cgcg_init,
96 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
97 amdgpu_program_register_sequence(adev,
98 golden_settings_tonga_a11,
99 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
100 break;
101 case CHIP_CARRIZO:
102 amdgpu_program_register_sequence(adev,
103 cz_mgcg_cgcg_init,
104 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
105 break;
106 default:
107 break;
112 * gmc8_mc_wait_for_idle - wait for MC idle callback.
114 * @adev: amdgpu_device pointer
116 * Wait for the MC (memory controller) to be idle.
117 * (evergreen+).
118 * Returns 0 if the MC is idle, -1 if not.
120 int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev)
122 unsigned i;
123 u32 tmp;
125 for (i = 0; i < adev->usec_timeout; i++) {
126 /* read MC_STATUS */
127 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK |
128 SRBM_STATUS__MCB_BUSY_MASK |
129 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
130 SRBM_STATUS__MCC_BUSY_MASK |
131 SRBM_STATUS__MCD_BUSY_MASK |
132 SRBM_STATUS__VMC1_BUSY_MASK);
133 if (!tmp)
134 return 0;
135 udelay(1);
137 return -1;
140 void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
141 struct amdgpu_mode_mc_save *save)
143 u32 blackout;
145 if (adev->mode_info.num_crtc)
146 amdgpu_display_stop_mc_access(adev, save);
148 amdgpu_asic_wait_for_mc_idle(adev);
150 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
151 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
152 /* Block CPU access */
153 WREG32(mmBIF_FB_EN, 0);
154 /* blackout the MC */
155 blackout = REG_SET_FIELD(blackout,
156 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
157 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
159 /* wait for the MC to settle */
160 udelay(100);
163 void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
164 struct amdgpu_mode_mc_save *save)
166 u32 tmp;
168 /* unblackout the MC */
169 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
170 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
171 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
172 /* allow CPU access */
173 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
174 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
175 WREG32(mmBIF_FB_EN, tmp);
177 if (adev->mode_info.num_crtc)
178 amdgpu_display_resume_mc_access(adev, save);
182 * gmc_v8_0_init_microcode - load ucode images from disk
184 * @adev: amdgpu_device pointer
186 * Use the firmware interface to load the ucode images into
187 * the driver (not loaded into hw).
188 * Returns 0 on success, error on failure.
190 static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
192 const char *chip_name;
193 char fw_name[30];
194 int err;
196 DRM_DEBUG("\n");
198 switch (adev->asic_type) {
199 case CHIP_TOPAZ:
200 chip_name = "topaz";
201 break;
202 case CHIP_TONGA:
203 chip_name = "tonga";
204 break;
205 case CHIP_CARRIZO:
206 return 0;
207 default: BUG();
210 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
211 err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
212 if (err)
213 goto out;
214 err = amdgpu_ucode_validate(adev->mc.fw);
216 out:
217 if (err) {
218 printk(KERN_ERR
219 "mc: Failed to load firmware \"%s\"\n",
220 fw_name);
221 release_firmware(adev->mc.fw);
222 adev->mc.fw = NULL;
224 return err;
228 * gmc_v8_0_mc_load_microcode - load MC ucode into the hw
230 * @adev: amdgpu_device pointer
232 * Load the GDDR MC ucode into the hw (CIK).
233 * Returns 0 on success, error on failure.
235 static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
237 const struct mc_firmware_header_v1_0 *hdr;
238 const __le32 *fw_data = NULL;
239 const __le32 *io_mc_regs = NULL;
240 u32 running, blackout = 0;
241 int i, ucode_size, regs_size;
243 if (!adev->mc.fw)
244 return -EINVAL;
246 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
247 amdgpu_ucode_print_mc_hdr(&hdr->header);
249 adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
250 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
251 io_mc_regs = (const __le32 *)
252 (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
253 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
254 fw_data = (const __le32 *)
255 (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
257 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
259 if (running == 0) {
260 if (running) {
261 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
262 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
265 /* reset the engine and set to writable */
266 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
267 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
269 /* load mc io regs */
270 for (i = 0; i < regs_size; i++) {
271 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
272 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
274 /* load the MC ucode */
275 for (i = 0; i < ucode_size; i++)
276 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
278 /* put the engine back into the active state */
279 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
280 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
281 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
283 /* wait for training to complete */
284 for (i = 0; i < adev->usec_timeout; i++) {
285 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
286 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
287 break;
288 udelay(1);
290 for (i = 0; i < adev->usec_timeout; i++) {
291 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
292 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
293 break;
294 udelay(1);
297 if (running)
298 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
301 return 0;
304 static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
305 struct amdgpu_mc *mc)
307 if (mc->mc_vram_size > 0xFFC0000000ULL) {
308 /* leave room for at least 1024M GTT */
309 dev_warn(adev->dev, "limiting VRAM\n");
310 mc->real_vram_size = 0xFFC0000000ULL;
311 mc->mc_vram_size = 0xFFC0000000ULL;
313 amdgpu_vram_location(adev, &adev->mc, 0);
314 adev->mc.gtt_base_align = 0;
315 amdgpu_gtt_location(adev, mc);
319 * gmc_v8_0_mc_program - program the GPU memory controller
321 * @adev: amdgpu_device pointer
323 * Set the location of vram, gart, and AGP in the GPU's
324 * physical address space (CIK).
326 static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
328 struct amdgpu_mode_mc_save save;
329 u32 tmp;
330 int i, j;
332 /* Initialize HDP */
333 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
334 WREG32((0xb05 + j), 0x00000000);
335 WREG32((0xb06 + j), 0x00000000);
336 WREG32((0xb07 + j), 0x00000000);
337 WREG32((0xb08 + j), 0x00000000);
338 WREG32((0xb09 + j), 0x00000000);
340 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
342 if (adev->mode_info.num_crtc)
343 amdgpu_display_set_vga_render_state(adev, false);
345 gmc_v8_0_mc_stop(adev, &save);
346 if (amdgpu_asic_wait_for_mc_idle(adev)) {
347 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
349 /* Update configuration */
350 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
351 adev->mc.vram_start >> 12);
352 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
353 adev->mc.vram_end >> 12);
354 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
355 adev->vram_scratch.gpu_addr >> 12);
356 tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
357 tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
358 WREG32(mmMC_VM_FB_LOCATION, tmp);
359 /* XXX double check these! */
360 WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
361 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
362 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
363 WREG32(mmMC_VM_AGP_BASE, 0);
364 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
365 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
366 if (amdgpu_asic_wait_for_mc_idle(adev)) {
367 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
369 gmc_v8_0_mc_resume(adev, &save);
371 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
373 tmp = RREG32(mmHDP_MISC_CNTL);
374 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
375 WREG32(mmHDP_MISC_CNTL, tmp);
377 tmp = RREG32(mmHDP_HOST_PATH_CNTL);
378 WREG32(mmHDP_HOST_PATH_CNTL, tmp);
382 * gmc_v8_0_mc_init - initialize the memory controller driver params
384 * @adev: amdgpu_device pointer
386 * Look up the amount of vram, vram width, and decide how to place
387 * vram and gart within the GPU's physical address space (CIK).
388 * Returns 0 for success.
390 static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
392 u32 tmp;
393 int chansize, numchan;
395 /* Get VRAM informations */
396 tmp = RREG32(mmMC_ARB_RAMCFG);
397 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
398 chansize = 64;
399 } else {
400 chansize = 32;
402 tmp = RREG32(mmMC_SHARED_CHMAP);
403 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
404 case 0:
405 default:
406 numchan = 1;
407 break;
408 case 1:
409 numchan = 2;
410 break;
411 case 2:
412 numchan = 4;
413 break;
414 case 3:
415 numchan = 8;
416 break;
417 case 4:
418 numchan = 3;
419 break;
420 case 5:
421 numchan = 6;
422 break;
423 case 6:
424 numchan = 10;
425 break;
426 case 7:
427 numchan = 12;
428 break;
429 case 8:
430 numchan = 16;
431 break;
433 adev->mc.vram_width = numchan * chansize;
434 /* Could aper size report 0 ? */
435 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
436 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
437 /* size in MB on si */
438 adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
439 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
440 adev->mc.visible_vram_size = adev->mc.aper_size;
442 /* unless the user had overridden it, set the gart
443 * size equal to the 1024 or vram, whichever is larger.
445 if (amdgpu_gart_size == -1)
446 adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
447 else
448 adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
450 gmc_v8_0_vram_gtt_location(adev, &adev->mc);
452 return 0;
456 * GART
457 * VMID 0 is the physical GPU addresses as used by the kernel.
458 * VMIDs 1-15 are used for userspace clients and are handled
459 * by the amdgpu vm/hsa code.
463 * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback
465 * @adev: amdgpu_device pointer
466 * @vmid: vm instance to flush
468 * Flush the TLB for the requested page table (CIK).
470 static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
471 uint32_t vmid)
473 /* flush hdp cache */
474 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
476 /* bits 0-15 are the VM contexts0-15 */
477 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
481 * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO
483 * @adev: amdgpu_device pointer
484 * @cpu_pt_addr: cpu address of the page table
485 * @gpu_page_idx: entry in the page table to update
486 * @addr: dst addr to write into pte/pde
487 * @flags: access flags
489 * Update the page tables using the CPU.
491 static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev,
492 void *cpu_pt_addr,
493 uint32_t gpu_page_idx,
494 uint64_t addr,
495 uint32_t flags)
497 void __iomem *ptr = (void *)cpu_pt_addr;
498 uint64_t value;
501 * PTE format on VI:
502 * 63:40 reserved
503 * 39:12 4k physical page base address
504 * 11:7 fragment
505 * 6 write
506 * 5 read
507 * 4 exe
508 * 3 reserved
509 * 2 snooped
510 * 1 system
511 * 0 valid
513 * PDE format on VI:
514 * 63:59 block fragment size
515 * 58:40 reserved
516 * 39:1 physical base address of PTE
517 * bits 5:1 must be 0.
518 * 0 valid
520 value = addr & 0x000000FFFFFFF000ULL;
521 value |= flags;
522 writeq(value, ptr + (gpu_page_idx * 8));
524 return 0;
528 * gmc_v8_0_gart_enable - gart enable
530 * @adev: amdgpu_device pointer
532 * This sets up the TLBs, programs the page tables for VMID0,
533 * sets up the hw for VMIDs 1-15 which are allocated on
534 * demand, and sets up the global locations for the LDS, GDS,
535 * and GPUVM for FSA64 clients (CIK).
536 * Returns 0 for success, errors for failure.
538 static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
540 int r, i;
541 u32 tmp;
543 if (adev->gart.robj == NULL) {
544 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
545 return -EINVAL;
547 r = amdgpu_gart_table_vram_pin(adev);
548 if (r)
549 return r;
550 /* Setup TLB control */
551 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
552 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
553 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
554 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
555 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
556 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
557 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
558 /* Setup L2 cache */
559 tmp = RREG32(mmVM_L2_CNTL);
560 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
561 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
562 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
563 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
564 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
565 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
566 WREG32(mmVM_L2_CNTL, tmp);
567 tmp = RREG32(mmVM_L2_CNTL2);
568 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
569 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
570 WREG32(mmVM_L2_CNTL2, tmp);
571 tmp = RREG32(mmVM_L2_CNTL3);
572 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
573 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
574 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
575 WREG32(mmVM_L2_CNTL3, tmp);
576 /* XXX: set to enable PTE/PDE in system memory */
577 tmp = RREG32(mmVM_L2_CNTL4);
578 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
579 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
580 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
581 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
582 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
583 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
584 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
585 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
586 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
587 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
588 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
589 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
590 WREG32(mmVM_L2_CNTL4, tmp);
591 /* setup context0 */
592 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
593 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1);
594 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
595 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
596 (u32)(adev->dummy_page.addr >> 12));
597 WREG32(mmVM_CONTEXT0_CNTL2, 0);
598 tmp = RREG32(mmVM_CONTEXT0_CNTL);
599 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
600 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
601 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
602 WREG32(mmVM_CONTEXT0_CNTL, tmp);
604 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
605 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
606 WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
608 /* empty context1-15 */
609 /* FIXME start with 4G, once using 2 level pt switch to full
610 * vm size space
612 /* set vm size, must be a multiple of 4 */
613 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
614 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
615 for (i = 1; i < 16; i++) {
616 if (i < 8)
617 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
618 adev->gart.table_addr >> 12);
619 else
620 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
621 adev->gart.table_addr >> 12);
624 /* enable context1-15 */
625 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
626 (u32)(adev->dummy_page.addr >> 12));
627 WREG32(mmVM_CONTEXT1_CNTL2, 4);
628 tmp = RREG32(mmVM_CONTEXT1_CNTL);
629 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
630 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
631 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
632 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
633 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
634 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
635 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
636 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
637 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
638 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
639 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
640 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
641 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
642 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
643 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
644 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
645 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
646 amdgpu_vm_block_size - 9);
647 WREG32(mmVM_CONTEXT1_CNTL, tmp);
649 gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
650 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
651 (unsigned)(adev->mc.gtt_size >> 20),
652 (unsigned long long)adev->gart.table_addr);
653 adev->gart.ready = true;
654 return 0;
657 static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
659 int r;
661 if (adev->gart.robj) {
662 WARN(1, "R600 PCIE GART already initialized\n");
663 return 0;
665 /* Initialize common gart structure */
666 r = amdgpu_gart_init(adev);
667 if (r)
668 return r;
669 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
670 return amdgpu_gart_table_vram_alloc(adev);
674 * gmc_v8_0_gart_disable - gart disable
676 * @adev: amdgpu_device pointer
678 * This disables all VM page table (CIK).
680 static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
682 u32 tmp;
684 /* Disable all tables */
685 WREG32(mmVM_CONTEXT0_CNTL, 0);
686 WREG32(mmVM_CONTEXT1_CNTL, 0);
687 /* Setup TLB control */
688 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
689 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
690 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
691 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
692 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
693 /* Setup L2 cache */
694 tmp = RREG32(mmVM_L2_CNTL);
695 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
696 WREG32(mmVM_L2_CNTL, tmp);
697 WREG32(mmVM_L2_CNTL2, 0);
698 amdgpu_gart_table_vram_unpin(adev);
702 * gmc_v8_0_gart_fini - vm fini callback
704 * @adev: amdgpu_device pointer
706 * Tears down the driver GART/VM setup (CIK).
708 static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
710 amdgpu_gart_table_vram_free(adev);
711 amdgpu_gart_fini(adev);
715 * vm
716 * VMID 0 is the physical GPU addresses as used by the kernel.
717 * VMIDs 1-15 are used for userspace clients and are handled
718 * by the amdgpu vm/hsa code.
721 * gmc_v8_0_vm_init - cik vm init callback
723 * @adev: amdgpu_device pointer
725 * Inits cik specific vm parameters (number of VMs, base of vram for
726 * VMIDs 1-15) (CIK).
727 * Returns 0 for success.
729 static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
732 * number of VMs
733 * VMID 0 is reserved for System
734 * amdgpu graphics/compute will use VMIDs 1-7
735 * amdkfd will use VMIDs 8-15
737 adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
739 /* base offset of vram pages */
740 if (adev->flags & AMDGPU_IS_APU) {
741 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
742 tmp <<= 22;
743 adev->vm_manager.vram_base_offset = tmp;
744 } else
745 adev->vm_manager.vram_base_offset = 0;
747 return 0;
751 * gmc_v8_0_vm_fini - cik vm fini callback
753 * @adev: amdgpu_device pointer
755 * Tear down any asic specific VM setup (CIK).
757 static void gmc_v8_0_vm_fini(struct amdgpu_device *adev)
762 * gmc_v8_0_vm_decode_fault - print human readable fault info
764 * @adev: amdgpu_device pointer
765 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
766 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
768 * Print human readable fault information (CIK).
770 static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev,
771 u32 status, u32 addr, u32 mc_client)
773 u32 mc_id;
774 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
775 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
776 PROTECTIONS);
777 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
778 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
780 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
781 MEMORY_CLIENT_ID);
783 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
784 protections, vmid, addr,
785 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
786 MEMORY_CLIENT_RW) ?
787 "write" : "read", block, mc_client, mc_id);
790 static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
792 switch (mc_seq_vram_type) {
793 case MC_SEQ_MISC0__MT__GDDR1:
794 return AMDGPU_VRAM_TYPE_GDDR1;
795 case MC_SEQ_MISC0__MT__DDR2:
796 return AMDGPU_VRAM_TYPE_DDR2;
797 case MC_SEQ_MISC0__MT__GDDR3:
798 return AMDGPU_VRAM_TYPE_GDDR3;
799 case MC_SEQ_MISC0__MT__GDDR4:
800 return AMDGPU_VRAM_TYPE_GDDR4;
801 case MC_SEQ_MISC0__MT__GDDR5:
802 return AMDGPU_VRAM_TYPE_GDDR5;
803 case MC_SEQ_MISC0__MT__HBM:
804 return AMDGPU_VRAM_TYPE_HBM;
805 case MC_SEQ_MISC0__MT__DDR3:
806 return AMDGPU_VRAM_TYPE_DDR3;
807 default:
808 return AMDGPU_VRAM_TYPE_UNKNOWN;
812 static int gmc_v8_0_early_init(void *handle)
814 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
816 gmc_v8_0_set_gart_funcs(adev);
817 gmc_v8_0_set_irq_funcs(adev);
819 if (adev->flags & AMDGPU_IS_APU) {
820 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
821 } else {
822 u32 tmp = RREG32(mmMC_SEQ_MISC0);
823 tmp &= MC_SEQ_MISC0__MT__MASK;
824 adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
827 return 0;
830 static int gmc_v8_0_sw_init(void *handle)
832 int r;
833 int dma_bits;
834 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
836 r = amdgpu_gem_init(adev);
837 if (r)
838 return r;
840 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
841 if (r)
842 return r;
844 r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
845 if (r)
846 return r;
848 /* Adjust VM size here.
849 * Currently set to 4GB ((1 << 20) 4k pages).
850 * Max GPUVM size for cayman and SI is 40 bits.
852 adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
854 /* Set the internal MC address mask
855 * This is the max address of the GPU's
856 * internal address space.
858 adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
860 /* set DMA mask + need_dma32 flags.
861 * PCIE - can handle 40-bits.
862 * IGP - can handle 40-bits
863 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
865 adev->need_dma32 = false;
866 dma_bits = adev->need_dma32 ? 32 : 40;
867 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
868 if (r) {
869 adev->need_dma32 = true;
870 dma_bits = 32;
871 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
873 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
874 if (r) {
875 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
876 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
879 r = gmc_v8_0_init_microcode(adev);
880 if (r) {
881 DRM_ERROR("Failed to load mc firmware!\n");
882 return r;
885 r = gmc_v8_0_mc_init(adev);
886 if (r)
887 return r;
889 /* Memory manager */
890 r = amdgpu_bo_init(adev);
891 if (r)
892 return r;
894 r = gmc_v8_0_gart_init(adev);
895 if (r)
896 return r;
898 if (!adev->vm_manager.enabled) {
899 r = gmc_v8_0_vm_init(adev);
900 if (r) {
901 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
902 return r;
904 adev->vm_manager.enabled = true;
907 return r;
910 static int gmc_v8_0_sw_fini(void *handle)
912 int i;
913 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
915 if (adev->vm_manager.enabled) {
916 for (i = 0; i < AMDGPU_NUM_VM; ++i)
917 amdgpu_fence_unref(&adev->vm_manager.active[i]);
918 gmc_v8_0_vm_fini(adev);
919 adev->vm_manager.enabled = false;
921 gmc_v8_0_gart_fini(adev);
922 amdgpu_gem_fini(adev);
923 amdgpu_bo_fini(adev);
925 return 0;
928 static int gmc_v8_0_hw_init(void *handle)
930 int r;
931 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
933 gmc_v8_0_init_golden_registers(adev);
935 gmc_v8_0_mc_program(adev);
937 if (!(adev->flags & AMDGPU_IS_APU)) {
938 r = gmc_v8_0_mc_load_microcode(adev);
939 if (r) {
940 DRM_ERROR("Failed to load MC firmware!\n");
941 return r;
945 r = gmc_v8_0_gart_enable(adev);
946 if (r)
947 return r;
949 return r;
952 static int gmc_v8_0_hw_fini(void *handle)
954 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
956 gmc_v8_0_gart_disable(adev);
958 return 0;
961 static int gmc_v8_0_suspend(void *handle)
963 int i;
964 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
966 if (adev->vm_manager.enabled) {
967 for (i = 0; i < AMDGPU_NUM_VM; ++i)
968 amdgpu_fence_unref(&adev->vm_manager.active[i]);
969 gmc_v8_0_vm_fini(adev);
970 adev->vm_manager.enabled = false;
972 gmc_v8_0_hw_fini(adev);
974 return 0;
977 static int gmc_v8_0_resume(void *handle)
979 int r;
980 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
982 r = gmc_v8_0_hw_init(adev);
983 if (r)
984 return r;
986 if (!adev->vm_manager.enabled) {
987 r = gmc_v8_0_vm_init(adev);
988 if (r) {
989 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
990 return r;
992 adev->vm_manager.enabled = true;
995 return r;
998 static bool gmc_v8_0_is_idle(void *handle)
1000 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1001 u32 tmp = RREG32(mmSRBM_STATUS);
1003 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1004 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1005 return false;
1007 return true;
1010 static int gmc_v8_0_wait_for_idle(void *handle)
1012 unsigned i;
1013 u32 tmp;
1014 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1016 for (i = 0; i < adev->usec_timeout; i++) {
1017 /* read MC_STATUS */
1018 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1019 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1020 SRBM_STATUS__MCC_BUSY_MASK |
1021 SRBM_STATUS__MCD_BUSY_MASK |
1022 SRBM_STATUS__VMC_BUSY_MASK |
1023 SRBM_STATUS__VMC1_BUSY_MASK);
1024 if (!tmp)
1025 return 0;
1026 udelay(1);
1028 return -ETIMEDOUT;
1032 static void gmc_v8_0_print_status(void *handle)
1034 int i, j;
1035 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1037 dev_info(adev->dev, "GMC 8.x registers\n");
1038 dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
1039 RREG32(mmSRBM_STATUS));
1040 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
1041 RREG32(mmSRBM_STATUS2));
1043 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1044 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
1045 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1046 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
1047 dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
1048 RREG32(mmMC_VM_MX_L1_TLB_CNTL));
1049 dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n",
1050 RREG32(mmVM_L2_CNTL));
1051 dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n",
1052 RREG32(mmVM_L2_CNTL2));
1053 dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n",
1054 RREG32(mmVM_L2_CNTL3));
1055 dev_info(adev->dev, " VM_L2_CNTL4=0x%08X\n",
1056 RREG32(mmVM_L2_CNTL4));
1057 dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
1058 RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR));
1059 dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
1060 RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR));
1061 dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1062 RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR));
1063 dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n",
1064 RREG32(mmVM_CONTEXT0_CNTL2));
1065 dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n",
1066 RREG32(mmVM_CONTEXT0_CNTL));
1067 dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR=0x%08X\n",
1068 RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR));
1069 dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR=0x%08X\n",
1070 RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR));
1071 dev_info(adev->dev, " mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET=0x%08X\n",
1072 RREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET));
1073 dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
1074 RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR));
1075 dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
1076 RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR));
1077 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1078 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR));
1079 dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n",
1080 RREG32(mmVM_CONTEXT1_CNTL2));
1081 dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n",
1082 RREG32(mmVM_CONTEXT1_CNTL));
1083 for (i = 0; i < 16; i++) {
1084 if (i < 8)
1085 dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1086 i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i));
1087 else
1088 dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1089 i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8));
1091 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
1092 RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR));
1093 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
1094 RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR));
1095 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
1096 RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR));
1097 dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n",
1098 RREG32(mmMC_VM_FB_LOCATION));
1099 dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n",
1100 RREG32(mmMC_VM_AGP_BASE));
1101 dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n",
1102 RREG32(mmMC_VM_AGP_TOP));
1103 dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n",
1104 RREG32(mmMC_VM_AGP_BOT));
1106 dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
1107 RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL));
1108 dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n",
1109 RREG32(mmHDP_NONSURFACE_BASE));
1110 dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n",
1111 RREG32(mmHDP_NONSURFACE_INFO));
1112 dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n",
1113 RREG32(mmHDP_NONSURFACE_SIZE));
1114 dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n",
1115 RREG32(mmHDP_MISC_CNTL));
1116 dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n",
1117 RREG32(mmHDP_HOST_PATH_CNTL));
1119 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
1120 dev_info(adev->dev, " %d:\n", i);
1121 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1122 0xb05 + j, RREG32(0xb05 + j));
1123 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1124 0xb06 + j, RREG32(0xb06 + j));
1125 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1126 0xb07 + j, RREG32(0xb07 + j));
1127 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1128 0xb08 + j, RREG32(0xb08 + j));
1129 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1130 0xb09 + j, RREG32(0xb09 + j));
1133 dev_info(adev->dev, " BIF_FB_EN=0x%08X\n",
1134 RREG32(mmBIF_FB_EN));
1137 static int gmc_v8_0_soft_reset(void *handle)
1139 struct amdgpu_mode_mc_save save;
1140 u32 srbm_soft_reset = 0;
1141 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1142 u32 tmp = RREG32(mmSRBM_STATUS);
1144 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1145 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1146 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1148 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1149 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1150 if (!(adev->flags & AMDGPU_IS_APU))
1151 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1152 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1155 if (srbm_soft_reset) {
1156 gmc_v8_0_print_status((void *)adev);
1158 gmc_v8_0_mc_stop(adev, &save);
1159 if (gmc_v8_0_wait_for_idle(adev)) {
1160 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1164 tmp = RREG32(mmSRBM_SOFT_RESET);
1165 tmp |= srbm_soft_reset;
1166 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1167 WREG32(mmSRBM_SOFT_RESET, tmp);
1168 tmp = RREG32(mmSRBM_SOFT_RESET);
1170 udelay(50);
1172 tmp &= ~srbm_soft_reset;
1173 WREG32(mmSRBM_SOFT_RESET, tmp);
1174 tmp = RREG32(mmSRBM_SOFT_RESET);
1176 /* Wait a little for things to settle down */
1177 udelay(50);
1179 gmc_v8_0_mc_resume(adev, &save);
1180 udelay(50);
1182 gmc_v8_0_print_status((void *)adev);
1185 return 0;
1188 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1189 struct amdgpu_irq_src *src,
1190 unsigned type,
1191 enum amdgpu_interrupt_state state)
1193 u32 tmp;
1194 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1195 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1196 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1197 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1198 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1199 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1200 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1202 switch (state) {
1203 case AMDGPU_IRQ_STATE_DISABLE:
1204 /* system context */
1205 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1206 tmp &= ~bits;
1207 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1208 /* VMs */
1209 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1210 tmp &= ~bits;
1211 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1212 break;
1213 case AMDGPU_IRQ_STATE_ENABLE:
1214 /* system context */
1215 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1216 tmp |= bits;
1217 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1218 /* VMs */
1219 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1220 tmp |= bits;
1221 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1222 break;
1223 default:
1224 break;
1227 return 0;
1230 static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1231 struct amdgpu_irq_src *source,
1232 struct amdgpu_iv_entry *entry)
1234 u32 addr, status, mc_client;
1236 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1237 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1238 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1239 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1240 entry->src_id, entry->src_data);
1241 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1242 addr);
1243 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1244 status);
1245 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
1246 /* reset addr and status */
1247 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1249 return 0;
1252 static int gmc_v8_0_set_clockgating_state(void *handle,
1253 enum amd_clockgating_state state)
1255 return 0;
1258 static int gmc_v8_0_set_powergating_state(void *handle,
1259 enum amd_powergating_state state)
1261 return 0;
1264 const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1265 .early_init = gmc_v8_0_early_init,
1266 .late_init = NULL,
1267 .sw_init = gmc_v8_0_sw_init,
1268 .sw_fini = gmc_v8_0_sw_fini,
1269 .hw_init = gmc_v8_0_hw_init,
1270 .hw_fini = gmc_v8_0_hw_fini,
1271 .suspend = gmc_v8_0_suspend,
1272 .resume = gmc_v8_0_resume,
1273 .is_idle = gmc_v8_0_is_idle,
1274 .wait_for_idle = gmc_v8_0_wait_for_idle,
1275 .soft_reset = gmc_v8_0_soft_reset,
1276 .print_status = gmc_v8_0_print_status,
1277 .set_clockgating_state = gmc_v8_0_set_clockgating_state,
1278 .set_powergating_state = gmc_v8_0_set_powergating_state,
1281 static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
1282 .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
1283 .set_pte_pde = gmc_v8_0_gart_set_pte_pde,
1286 static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1287 .set = gmc_v8_0_vm_fault_interrupt_state,
1288 .process = gmc_v8_0_process_interrupt,
1291 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
1293 if (adev->gart.gart_funcs == NULL)
1294 adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
1297 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1299 adev->mc.vm_fault.num_types = 1;
1300 adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;