treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / amd / amdgpu / gmc_v9_0.c
blob40a4968043561f6faf2677e4b1aa3758a8a1380e
1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
27 #include <drm/drm_cache.h>
29 #include "amdgpu.h"
30 #include "gmc_v9_0.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
34 #include "hdp/hdp_4_0_offset.h"
35 #include "hdp/hdp_4_0_sh_mask.h"
36 #include "gc/gc_9_0_sh_mask.h"
37 #include "dce/dce_12_0_offset.h"
38 #include "dce/dce_12_0_sh_mask.h"
39 #include "vega10_enum.h"
40 #include "mmhub/mmhub_1_0_offset.h"
41 #include "athub/athub_1_0_sh_mask.h"
42 #include "athub/athub_1_0_offset.h"
43 #include "oss/osssys_4_0_offset.h"
45 #include "soc15.h"
46 #include "soc15d.h"
47 #include "soc15_common.h"
48 #include "umc/umc_6_0_sh_mask.h"
50 #include "gfxhub_v1_0.h"
51 #include "mmhub_v1_0.h"
52 #include "athub_v1_0.h"
53 #include "gfxhub_v1_1.h"
54 #include "mmhub_v9_4.h"
55 #include "umc_v6_1.h"
56 #include "umc_v6_0.h"
58 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
60 #include "amdgpu_ras.h"
61 #include "amdgpu_xgmi.h"
63 /* add these here since we already include dce12 headers and these are for DCN */
64 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
65 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
66 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
67 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
68 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
69 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
71 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
72 #define AMDGPU_NUM_OF_VMIDS 8
74 static const u32 golden_settings_vega10_hdp[] =
76 0xf64, 0x0fffffff, 0x00000000,
77 0xf65, 0x0fffffff, 0x00000000,
78 0xf66, 0x0fffffff, 0x00000000,
79 0xf67, 0x0fffffff, 0x00000000,
80 0xf68, 0x0fffffff, 0x00000000,
81 0xf6a, 0x0fffffff, 0x00000000,
82 0xf6b, 0x0fffffff, 0x00000000,
83 0xf6c, 0x0fffffff, 0x00000000,
84 0xf6d, 0x0fffffff, 0x00000000,
85 0xf6e, 0x0fffffff, 0x00000000,
88 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
90 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
91 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
94 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
96 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
97 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
100 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
101 (0x000143c0 + 0x00000000),
102 (0x000143c0 + 0x00000800),
103 (0x000143c0 + 0x00001000),
104 (0x000143c0 + 0x00001800),
105 (0x000543c0 + 0x00000000),
106 (0x000543c0 + 0x00000800),
107 (0x000543c0 + 0x00001000),
108 (0x000543c0 + 0x00001800),
109 (0x000943c0 + 0x00000000),
110 (0x000943c0 + 0x00000800),
111 (0x000943c0 + 0x00001000),
112 (0x000943c0 + 0x00001800),
113 (0x000d43c0 + 0x00000000),
114 (0x000d43c0 + 0x00000800),
115 (0x000d43c0 + 0x00001000),
116 (0x000d43c0 + 0x00001800),
117 (0x001143c0 + 0x00000000),
118 (0x001143c0 + 0x00000800),
119 (0x001143c0 + 0x00001000),
120 (0x001143c0 + 0x00001800),
121 (0x001543c0 + 0x00000000),
122 (0x001543c0 + 0x00000800),
123 (0x001543c0 + 0x00001000),
124 (0x001543c0 + 0x00001800),
125 (0x001943c0 + 0x00000000),
126 (0x001943c0 + 0x00000800),
127 (0x001943c0 + 0x00001000),
128 (0x001943c0 + 0x00001800),
129 (0x001d43c0 + 0x00000000),
130 (0x001d43c0 + 0x00000800),
131 (0x001d43c0 + 0x00001000),
132 (0x001d43c0 + 0x00001800),
135 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
136 (0x000143e0 + 0x00000000),
137 (0x000143e0 + 0x00000800),
138 (0x000143e0 + 0x00001000),
139 (0x000143e0 + 0x00001800),
140 (0x000543e0 + 0x00000000),
141 (0x000543e0 + 0x00000800),
142 (0x000543e0 + 0x00001000),
143 (0x000543e0 + 0x00001800),
144 (0x000943e0 + 0x00000000),
145 (0x000943e0 + 0x00000800),
146 (0x000943e0 + 0x00001000),
147 (0x000943e0 + 0x00001800),
148 (0x000d43e0 + 0x00000000),
149 (0x000d43e0 + 0x00000800),
150 (0x000d43e0 + 0x00001000),
151 (0x000d43e0 + 0x00001800),
152 (0x001143e0 + 0x00000000),
153 (0x001143e0 + 0x00000800),
154 (0x001143e0 + 0x00001000),
155 (0x001143e0 + 0x00001800),
156 (0x001543e0 + 0x00000000),
157 (0x001543e0 + 0x00000800),
158 (0x001543e0 + 0x00001000),
159 (0x001543e0 + 0x00001800),
160 (0x001943e0 + 0x00000000),
161 (0x001943e0 + 0x00000800),
162 (0x001943e0 + 0x00001000),
163 (0x001943e0 + 0x00001800),
164 (0x001d43e0 + 0x00000000),
165 (0x001d43e0 + 0x00000800),
166 (0x001d43e0 + 0x00001000),
167 (0x001d43e0 + 0x00001800),
170 static const uint32_t ecc_umc_mcumc_status_addrs[] = {
171 (0x000143c2 + 0x00000000),
172 (0x000143c2 + 0x00000800),
173 (0x000143c2 + 0x00001000),
174 (0x000143c2 + 0x00001800),
175 (0x000543c2 + 0x00000000),
176 (0x000543c2 + 0x00000800),
177 (0x000543c2 + 0x00001000),
178 (0x000543c2 + 0x00001800),
179 (0x000943c2 + 0x00000000),
180 (0x000943c2 + 0x00000800),
181 (0x000943c2 + 0x00001000),
182 (0x000943c2 + 0x00001800),
183 (0x000d43c2 + 0x00000000),
184 (0x000d43c2 + 0x00000800),
185 (0x000d43c2 + 0x00001000),
186 (0x000d43c2 + 0x00001800),
187 (0x001143c2 + 0x00000000),
188 (0x001143c2 + 0x00000800),
189 (0x001143c2 + 0x00001000),
190 (0x001143c2 + 0x00001800),
191 (0x001543c2 + 0x00000000),
192 (0x001543c2 + 0x00000800),
193 (0x001543c2 + 0x00001000),
194 (0x001543c2 + 0x00001800),
195 (0x001943c2 + 0x00000000),
196 (0x001943c2 + 0x00000800),
197 (0x001943c2 + 0x00001000),
198 (0x001943c2 + 0x00001800),
199 (0x001d43c2 + 0x00000000),
200 (0x001d43c2 + 0x00000800),
201 (0x001d43c2 + 0x00001000),
202 (0x001d43c2 + 0x00001800),
205 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
206 struct amdgpu_irq_src *src,
207 unsigned type,
208 enum amdgpu_interrupt_state state)
210 u32 bits, i, tmp, reg;
212 /* Devices newer then VEGA10/12 shall have these programming
213 sequences performed by PSP BL */
214 if (adev->asic_type >= CHIP_VEGA20)
215 return 0;
217 bits = 0x7f;
219 switch (state) {
220 case AMDGPU_IRQ_STATE_DISABLE:
221 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
222 reg = ecc_umc_mcumc_ctrl_addrs[i];
223 tmp = RREG32(reg);
224 tmp &= ~bits;
225 WREG32(reg, tmp);
227 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
228 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
229 tmp = RREG32(reg);
230 tmp &= ~bits;
231 WREG32(reg, tmp);
233 break;
234 case AMDGPU_IRQ_STATE_ENABLE:
235 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
236 reg = ecc_umc_mcumc_ctrl_addrs[i];
237 tmp = RREG32(reg);
238 tmp |= bits;
239 WREG32(reg, tmp);
241 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
242 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
243 tmp = RREG32(reg);
244 tmp |= bits;
245 WREG32(reg, tmp);
247 break;
248 default:
249 break;
252 return 0;
255 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
256 struct amdgpu_irq_src *src,
257 unsigned type,
258 enum amdgpu_interrupt_state state)
260 struct amdgpu_vmhub *hub;
261 u32 tmp, reg, bits, i, j;
263 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
264 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
265 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
266 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
267 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
268 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
269 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
271 switch (state) {
272 case AMDGPU_IRQ_STATE_DISABLE:
273 for (j = 0; j < adev->num_vmhubs; j++) {
274 hub = &adev->vmhub[j];
275 for (i = 0; i < 16; i++) {
276 reg = hub->vm_context0_cntl + i;
277 tmp = RREG32(reg);
278 tmp &= ~bits;
279 WREG32(reg, tmp);
282 break;
283 case AMDGPU_IRQ_STATE_ENABLE:
284 for (j = 0; j < adev->num_vmhubs; j++) {
285 hub = &adev->vmhub[j];
286 for (i = 0; i < 16; i++) {
287 reg = hub->vm_context0_cntl + i;
288 tmp = RREG32(reg);
289 tmp |= bits;
290 WREG32(reg, tmp);
293 default:
294 break;
297 return 0;
300 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
301 struct amdgpu_irq_src *source,
302 struct amdgpu_iv_entry *entry)
304 struct amdgpu_vmhub *hub;
305 bool retry_fault = !!(entry->src_data[1] & 0x80);
306 uint32_t status = 0;
307 u64 addr;
308 char hub_name[10];
310 addr = (u64)entry->src_data[0] << 12;
311 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
313 if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
314 entry->timestamp))
315 return 1; /* This also prevents sending it to KFD */
317 if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
318 snprintf(hub_name, sizeof(hub_name), "mmhub0");
319 hub = &adev->vmhub[AMDGPU_MMHUB_0];
320 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
321 snprintf(hub_name, sizeof(hub_name), "mmhub1");
322 hub = &adev->vmhub[AMDGPU_MMHUB_1];
323 } else {
324 snprintf(hub_name, sizeof(hub_name), "gfxhub0");
325 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
328 /* If it's the first fault for this address, process it normally */
329 if (retry_fault && !in_interrupt() &&
330 amdgpu_vm_handle_fault(adev, entry->pasid, addr))
331 return 1; /* This also prevents sending it to KFD */
333 if (!amdgpu_sriov_vf(adev)) {
335 * Issue a dummy read to wait for the status register to
336 * be updated to avoid reading an incorrect value due to
337 * the new fast GRBM interface.
339 if (entry->vmid_src == AMDGPU_GFXHUB_0)
340 RREG32(hub->vm_l2_pro_fault_status);
342 status = RREG32(hub->vm_l2_pro_fault_status);
343 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
346 if (printk_ratelimit()) {
347 struct amdgpu_task_info task_info;
349 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
350 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
352 dev_err(adev->dev,
353 "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
354 "pasid:%u, for process %s pid %d thread %s pid %d)\n",
355 hub_name, retry_fault ? "retry" : "no-retry",
356 entry->src_id, entry->ring_id, entry->vmid,
357 entry->pasid, task_info.process_name, task_info.tgid,
358 task_info.task_name, task_info.pid);
359 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
360 addr, entry->client_id);
361 if (!amdgpu_sriov_vf(adev)) {
362 dev_err(adev->dev,
363 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
364 status);
365 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
366 REG_GET_FIELD(status,
367 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
368 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
369 REG_GET_FIELD(status,
370 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
371 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
372 REG_GET_FIELD(status,
373 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
374 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
375 REG_GET_FIELD(status,
376 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
377 dev_err(adev->dev, "\t RW: 0x%lx\n",
378 REG_GET_FIELD(status,
379 VM_L2_PROTECTION_FAULT_STATUS, RW));
384 return 0;
387 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
388 .set = gmc_v9_0_vm_fault_interrupt_state,
389 .process = gmc_v9_0_process_interrupt,
393 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
394 .set = gmc_v9_0_ecc_interrupt_state,
395 .process = amdgpu_umc_process_ecc_irq,
398 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
400 adev->gmc.vm_fault.num_types = 1;
401 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
403 if (!amdgpu_sriov_vf(adev)) {
404 adev->gmc.ecc_irq.num_types = 1;
405 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
409 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
410 uint32_t flush_type)
412 u32 req = 0;
414 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
415 PER_VMID_INVALIDATE_REQ, 1 << vmid);
416 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
417 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
418 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
419 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
420 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
421 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
422 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
423 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
425 return req;
429 * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
431 * @adev: amdgpu_device pointer
432 * @vmhub: vmhub type
435 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
436 uint32_t vmhub)
438 return ((vmhub == AMDGPU_MMHUB_0 ||
439 vmhub == AMDGPU_MMHUB_1) &&
440 (!amdgpu_sriov_vf(adev)) &&
441 (!(adev->asic_type == CHIP_RAVEN &&
442 adev->rev_id < 0x8 &&
443 adev->pdev->device == 0x15d8)));
446 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
447 uint8_t vmid, uint16_t *p_pasid)
449 uint32_t value;
451 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
452 + vmid);
453 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
455 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
459 * GART
460 * VMID 0 is the physical GPU addresses as used by the kernel.
461 * VMIDs 1-15 are used for userspace clients and are handled
462 * by the amdgpu vm/hsa code.
466 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
468 * @adev: amdgpu_device pointer
469 * @vmid: vm instance to flush
470 * @flush_type: the flush type
472 * Flush the TLB for the requested page table using certain type.
474 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
475 uint32_t vmhub, uint32_t flush_type)
477 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
478 const unsigned eng = 17;
479 u32 j, tmp;
480 struct amdgpu_vmhub *hub;
482 BUG_ON(vmhub >= adev->num_vmhubs);
484 hub = &adev->vmhub[vmhub];
485 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
487 /* This is necessary for a HW workaround under SRIOV as well
488 * as GFXOFF under bare metal
490 if (adev->gfx.kiq.ring.sched.ready &&
491 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
492 !adev->in_gpu_reset) {
493 uint32_t req = hub->vm_inv_eng0_req + eng;
494 uint32_t ack = hub->vm_inv_eng0_ack + eng;
496 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
497 1 << vmid);
498 return;
501 spin_lock(&adev->gmc.invalidate_lock);
504 * It may lose gpuvm invalidate acknowldege state across power-gating
505 * off cycle, add semaphore acquire before invalidation and semaphore
506 * release after invalidation to avoid entering power gated state
507 * to WA the Issue
510 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
511 if (use_semaphore) {
512 for (j = 0; j < adev->usec_timeout; j++) {
513 /* a read return value of 1 means semaphore acuqire */
514 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
515 if (tmp & 0x1)
516 break;
517 udelay(1);
520 if (j >= adev->usec_timeout)
521 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
524 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
527 * Issue a dummy read to wait for the ACK register to be cleared
528 * to avoid a false ACK due to the new fast GRBM interface.
530 if (vmhub == AMDGPU_GFXHUB_0)
531 RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
533 for (j = 0; j < adev->usec_timeout; j++) {
534 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
535 if (tmp & (1 << vmid))
536 break;
537 udelay(1);
540 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
541 if (use_semaphore)
543 * add semaphore release after invalidation,
544 * write with 0 means semaphore release
546 WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
548 spin_unlock(&adev->gmc.invalidate_lock);
550 if (j < adev->usec_timeout)
551 return;
553 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
557 * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
559 * @adev: amdgpu_device pointer
560 * @pasid: pasid to be flush
562 * Flush the TLB for the requested pasid.
564 static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
565 uint16_t pasid, uint32_t flush_type,
566 bool all_hub)
568 int vmid, i;
569 signed long r;
570 uint32_t seq;
571 uint16_t queried_pasid;
572 bool ret;
573 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
574 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
576 if (adev->in_gpu_reset)
577 return -EIO;
579 if (ring->sched.ready) {
580 spin_lock(&adev->gfx.kiq.ring_lock);
581 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size);
582 kiq->pmf->kiq_invalidate_tlbs(ring,
583 pasid, flush_type, all_hub);
584 amdgpu_fence_emit_polling(ring, &seq);
585 amdgpu_ring_commit(ring);
586 spin_unlock(&adev->gfx.kiq.ring_lock);
587 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
588 if (r < 1) {
589 DRM_ERROR("wait for kiq fence error: %ld.\n", r);
590 return -ETIME;
593 return 0;
596 for (vmid = 1; vmid < 16; vmid++) {
598 ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
599 &queried_pasid);
600 if (ret && queried_pasid == pasid) {
601 if (all_hub) {
602 for (i = 0; i < adev->num_vmhubs; i++)
603 gmc_v9_0_flush_gpu_tlb(adev, vmid,
604 i, 0);
605 } else {
606 gmc_v9_0_flush_gpu_tlb(adev, vmid,
607 AMDGPU_GFXHUB_0, 0);
609 break;
613 return 0;
617 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
618 unsigned vmid, uint64_t pd_addr)
620 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
621 struct amdgpu_device *adev = ring->adev;
622 struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
623 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
624 unsigned eng = ring->vm_inv_eng;
627 * It may lose gpuvm invalidate acknowldege state across power-gating
628 * off cycle, add semaphore acquire before invalidation and semaphore
629 * release after invalidation to avoid entering power gated state
630 * to WA the Issue
633 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
634 if (use_semaphore)
635 /* a read return value of 1 means semaphore acuqire */
636 amdgpu_ring_emit_reg_wait(ring,
637 hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
639 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
640 lower_32_bits(pd_addr));
642 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
643 upper_32_bits(pd_addr));
645 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
646 hub->vm_inv_eng0_ack + eng,
647 req, 1 << vmid);
649 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
650 if (use_semaphore)
652 * add semaphore release after invalidation,
653 * write with 0 means semaphore release
655 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
657 return pd_addr;
660 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
661 unsigned pasid)
663 struct amdgpu_device *adev = ring->adev;
664 uint32_t reg;
666 /* Do nothing because there's no lut register for mmhub1. */
667 if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
668 return;
670 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
671 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
672 else
673 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
675 amdgpu_ring_emit_wreg(ring, reg, pasid);
679 * PTE format on VEGA 10:
680 * 63:59 reserved
681 * 58:57 mtype
682 * 56 F
683 * 55 L
684 * 54 P
685 * 53 SW
686 * 52 T
687 * 50:48 reserved
688 * 47:12 4k physical page base address
689 * 11:7 fragment
690 * 6 write
691 * 5 read
692 * 4 exe
693 * 3 Z
694 * 2 snooped
695 * 1 system
696 * 0 valid
698 * PDE format on VEGA 10:
699 * 63:59 block fragment size
700 * 58:55 reserved
701 * 54 P
702 * 53:48 reserved
703 * 47:6 physical base address of PD or PTE
704 * 5:3 reserved
705 * 2 C
706 * 1 system
707 * 0 valid
710 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
713 switch (flags) {
714 case AMDGPU_VM_MTYPE_DEFAULT:
715 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
716 case AMDGPU_VM_MTYPE_NC:
717 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
718 case AMDGPU_VM_MTYPE_WC:
719 return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
720 case AMDGPU_VM_MTYPE_RW:
721 return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
722 case AMDGPU_VM_MTYPE_CC:
723 return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
724 case AMDGPU_VM_MTYPE_UC:
725 return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
726 default:
727 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
731 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
732 uint64_t *addr, uint64_t *flags)
734 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
735 *addr = adev->vm_manager.vram_base_offset + *addr -
736 adev->gmc.vram_start;
737 BUG_ON(*addr & 0xFFFF00000000003FULL);
739 if (!adev->gmc.translate_further)
740 return;
742 if (level == AMDGPU_VM_PDB1) {
743 /* Set the block fragment size */
744 if (!(*flags & AMDGPU_PDE_PTE))
745 *flags |= AMDGPU_PDE_BFS(0x9);
747 } else if (level == AMDGPU_VM_PDB0) {
748 if (*flags & AMDGPU_PDE_PTE)
749 *flags &= ~AMDGPU_PDE_PTE;
750 else
751 *flags |= AMDGPU_PTE_TF;
755 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
756 struct amdgpu_bo_va_mapping *mapping,
757 uint64_t *flags)
759 *flags &= ~AMDGPU_PTE_EXECUTABLE;
760 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
762 *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
763 *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
765 if (mapping->flags & AMDGPU_PTE_PRT) {
766 *flags |= AMDGPU_PTE_PRT;
767 *flags &= ~AMDGPU_PTE_VALID;
770 if (adev->asic_type == CHIP_ARCTURUS &&
771 !(*flags & AMDGPU_PTE_SYSTEM) &&
772 mapping->bo_va->is_xgmi)
773 *flags |= AMDGPU_PTE_SNOOPED;
776 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
777 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
778 .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
779 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
780 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
781 .map_mtype = gmc_v9_0_map_mtype,
782 .get_vm_pde = gmc_v9_0_get_vm_pde,
783 .get_vm_pte = gmc_v9_0_get_vm_pte
786 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
788 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
791 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
793 switch (adev->asic_type) {
794 case CHIP_VEGA10:
795 adev->umc.funcs = &umc_v6_0_funcs;
796 break;
797 case CHIP_VEGA20:
798 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
799 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
800 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
801 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
802 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
803 adev->umc.funcs = &umc_v6_1_funcs;
804 break;
805 case CHIP_ARCTURUS:
806 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
807 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
808 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
809 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
810 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
811 adev->umc.funcs = &umc_v6_1_funcs;
812 break;
813 default:
814 break;
818 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
820 switch (adev->asic_type) {
821 case CHIP_VEGA20:
822 adev->mmhub.funcs = &mmhub_v1_0_funcs;
823 break;
824 case CHIP_ARCTURUS:
825 adev->mmhub.funcs = &mmhub_v9_4_funcs;
826 break;
827 default:
828 break;
832 static int gmc_v9_0_early_init(void *handle)
834 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
836 gmc_v9_0_set_gmc_funcs(adev);
837 gmc_v9_0_set_irq_funcs(adev);
838 gmc_v9_0_set_umc_funcs(adev);
839 gmc_v9_0_set_mmhub_funcs(adev);
841 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
842 adev->gmc.shared_aperture_end =
843 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
844 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
845 adev->gmc.private_aperture_end =
846 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
848 return 0;
851 static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
855 * TODO:
856 * Currently there is a bug where some memory client outside
857 * of the driver writes to first 8M of VRAM on S3 resume,
858 * this overrides GART which by default gets placed in first 8M and
859 * causes VM_FAULTS once GTT is accessed.
860 * Keep the stolen memory reservation until the while this is not solved.
861 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
863 switch (adev->asic_type) {
864 case CHIP_VEGA10:
865 case CHIP_RAVEN:
866 case CHIP_ARCTURUS:
867 case CHIP_RENOIR:
868 return true;
869 case CHIP_VEGA12:
870 case CHIP_VEGA20:
871 default:
872 return false;
876 static int gmc_v9_0_late_init(void *handle)
878 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
879 int r;
881 if (!gmc_v9_0_keep_stolen_memory(adev))
882 amdgpu_bo_late_init(adev);
884 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
885 if (r)
886 return r;
887 /* Check if ecc is available */
888 if (!amdgpu_sriov_vf(adev)) {
889 switch (adev->asic_type) {
890 case CHIP_VEGA10:
891 case CHIP_VEGA20:
892 case CHIP_ARCTURUS:
893 r = amdgpu_atomfirmware_mem_ecc_supported(adev);
894 if (!r) {
895 DRM_INFO("ECC is not present.\n");
896 if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
897 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
898 } else {
899 DRM_INFO("ECC is active.\n");
902 r = amdgpu_atomfirmware_sram_ecc_supported(adev);
903 if (!r) {
904 DRM_INFO("SRAM ECC is not present.\n");
905 } else {
906 DRM_INFO("SRAM ECC is active.\n");
908 break;
909 default:
910 break;
914 r = amdgpu_gmc_ras_late_init(adev);
915 if (r)
916 return r;
918 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
921 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
922 struct amdgpu_gmc *mc)
924 u64 base = 0;
926 if (adev->asic_type == CHIP_ARCTURUS)
927 base = mmhub_v9_4_get_fb_location(adev);
928 else if (!amdgpu_sriov_vf(adev))
929 base = mmhub_v1_0_get_fb_location(adev);
931 /* add the xgmi offset of the physical node */
932 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
933 amdgpu_gmc_vram_location(adev, mc, base);
934 amdgpu_gmc_gart_location(adev, mc);
935 amdgpu_gmc_agp_location(adev, mc);
936 /* base offset of vram pages */
937 adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
939 /* XXX: add the xgmi offset of the physical node? */
940 adev->vm_manager.vram_base_offset +=
941 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
945 * gmc_v9_0_mc_init - initialize the memory controller driver params
947 * @adev: amdgpu_device pointer
949 * Look up the amount of vram, vram width, and decide how to place
950 * vram and gart within the GPU's physical address space.
951 * Returns 0 for success.
953 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
955 int r;
957 /* size in MB on si */
958 adev->gmc.mc_vram_size =
959 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
960 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
962 if (!(adev->flags & AMD_IS_APU)) {
963 r = amdgpu_device_resize_fb_bar(adev);
964 if (r)
965 return r;
967 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
968 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
970 #ifdef CONFIG_X86_64
971 if (adev->flags & AMD_IS_APU) {
972 adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
973 adev->gmc.aper_size = adev->gmc.real_vram_size;
975 #endif
976 /* In case the PCI BAR is larger than the actual amount of vram */
977 adev->gmc.visible_vram_size = adev->gmc.aper_size;
978 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
979 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
981 /* set the gart size */
982 if (amdgpu_gart_size == -1) {
983 switch (adev->asic_type) {
984 case CHIP_VEGA10: /* all engines support GPUVM */
985 case CHIP_VEGA12: /* all engines support GPUVM */
986 case CHIP_VEGA20:
987 case CHIP_ARCTURUS:
988 default:
989 adev->gmc.gart_size = 512ULL << 20;
990 break;
991 case CHIP_RAVEN: /* DCE SG support */
992 case CHIP_RENOIR:
993 adev->gmc.gart_size = 1024ULL << 20;
994 break;
996 } else {
997 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1000 gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1002 return 0;
1005 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1007 int r;
1009 if (adev->gart.bo) {
1010 WARN(1, "VEGA10 PCIE GART already initialized\n");
1011 return 0;
1013 /* Initialize common gart structure */
1014 r = amdgpu_gart_init(adev);
1015 if (r)
1016 return r;
1017 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1018 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1019 AMDGPU_PTE_EXECUTABLE;
1020 return amdgpu_gart_table_vram_alloc(adev);
1023 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1025 u32 d1vga_control;
1026 unsigned size;
1029 * TODO Remove once GART corruption is resolved
1030 * Check related code in gmc_v9_0_sw_fini
1031 * */
1032 if (gmc_v9_0_keep_stolen_memory(adev))
1033 return 9 * 1024 * 1024;
1035 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1036 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1037 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
1038 } else {
1039 u32 viewport;
1041 switch (adev->asic_type) {
1042 case CHIP_RAVEN:
1043 case CHIP_RENOIR:
1044 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1045 size = (REG_GET_FIELD(viewport,
1046 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1047 REG_GET_FIELD(viewport,
1048 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1050 break;
1051 case CHIP_VEGA10:
1052 case CHIP_VEGA12:
1053 case CHIP_VEGA20:
1054 default:
1055 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1056 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1057 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1059 break;
1062 /* return 0 if the pre-OS buffer uses up most of vram */
1063 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
1064 return 0;
1066 return size;
1069 static int gmc_v9_0_sw_init(void *handle)
1071 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
1072 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1074 gfxhub_v1_0_init(adev);
1075 if (adev->asic_type == CHIP_ARCTURUS)
1076 mmhub_v9_4_init(adev);
1077 else
1078 mmhub_v1_0_init(adev);
1080 spin_lock_init(&adev->gmc.invalidate_lock);
1082 r = amdgpu_atomfirmware_get_vram_info(adev,
1083 &vram_width, &vram_type, &vram_vendor);
1084 if (amdgpu_sriov_vf(adev))
1085 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1086 * and DF related registers is not readable, seems hardcord is the
1087 * only way to set the correct vram_width
1089 adev->gmc.vram_width = 2048;
1090 else if (amdgpu_emu_mode != 1)
1091 adev->gmc.vram_width = vram_width;
1093 if (!adev->gmc.vram_width) {
1094 int chansize, numchan;
1096 /* hbm memory channel size */
1097 if (adev->flags & AMD_IS_APU)
1098 chansize = 64;
1099 else
1100 chansize = 128;
1102 numchan = adev->df.funcs->get_hbm_channel_number(adev);
1103 adev->gmc.vram_width = numchan * chansize;
1106 adev->gmc.vram_type = vram_type;
1107 adev->gmc.vram_vendor = vram_vendor;
1108 switch (adev->asic_type) {
1109 case CHIP_RAVEN:
1110 adev->num_vmhubs = 2;
1112 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1113 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1114 } else {
1115 /* vm_size is 128TB + 512GB for legacy 3-level page support */
1116 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1117 adev->gmc.translate_further =
1118 adev->vm_manager.num_level > 1;
1120 break;
1121 case CHIP_VEGA10:
1122 case CHIP_VEGA12:
1123 case CHIP_VEGA20:
1124 case CHIP_RENOIR:
1125 adev->num_vmhubs = 2;
1129 * To fulfill 4-level page support,
1130 * vm size is 256TB (48bit), maximum size of Vega10,
1131 * block size 512 (9bit)
1133 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1134 if (amdgpu_sriov_vf(adev))
1135 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1136 else
1137 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1138 break;
1139 case CHIP_ARCTURUS:
1140 adev->num_vmhubs = 3;
1142 /* Keep the vm size same with Vega20 */
1143 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1144 break;
1145 default:
1146 break;
1149 /* This interrupt is VMC page fault.*/
1150 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1151 &adev->gmc.vm_fault);
1152 if (r)
1153 return r;
1155 if (adev->asic_type == CHIP_ARCTURUS) {
1156 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
1157 &adev->gmc.vm_fault);
1158 if (r)
1159 return r;
1162 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1163 &adev->gmc.vm_fault);
1165 if (r)
1166 return r;
1168 if (!amdgpu_sriov_vf(adev)) {
1169 /* interrupt sent to DF. */
1170 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1171 &adev->gmc.ecc_irq);
1172 if (r)
1173 return r;
1176 /* Set the internal MC address mask
1177 * This is the max address of the GPU's
1178 * internal address space.
1180 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1182 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1183 if (r) {
1184 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1185 return r;
1187 adev->need_swiotlb = drm_need_swiotlb(44);
1189 if (adev->gmc.xgmi.supported) {
1190 r = gfxhub_v1_1_get_xgmi_info(adev);
1191 if (r)
1192 return r;
1195 r = gmc_v9_0_mc_init(adev);
1196 if (r)
1197 return r;
1199 adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
1201 /* Memory manager */
1202 r = amdgpu_bo_init(adev);
1203 if (r)
1204 return r;
1206 r = gmc_v9_0_gart_init(adev);
1207 if (r)
1208 return r;
1211 * number of VMs
1212 * VMID 0 is reserved for System
1213 * amdgpu graphics/compute will use VMIDs 1-7
1214 * amdkfd will use VMIDs 8-15
1216 adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1217 adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1218 adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS;
1220 amdgpu_vm_manager_init(adev);
1222 return 0;
1225 static int gmc_v9_0_sw_fini(void *handle)
1227 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1228 void *stolen_vga_buf;
1230 amdgpu_gmc_ras_fini(adev);
1231 amdgpu_gem_force_release(adev);
1232 amdgpu_vm_manager_fini(adev);
1234 if (gmc_v9_0_keep_stolen_memory(adev))
1235 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
1237 amdgpu_gart_table_vram_free(adev);
1238 amdgpu_bo_fini(adev);
1239 amdgpu_gart_fini(adev);
1241 return 0;
1244 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1247 switch (adev->asic_type) {
1248 case CHIP_VEGA10:
1249 if (amdgpu_sriov_vf(adev))
1250 break;
1251 /* fall through */
1252 case CHIP_VEGA20:
1253 soc15_program_register_sequence(adev,
1254 golden_settings_mmhub_1_0_0,
1255 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1256 soc15_program_register_sequence(adev,
1257 golden_settings_athub_1_0_0,
1258 ARRAY_SIZE(golden_settings_athub_1_0_0));
1259 break;
1260 case CHIP_VEGA12:
1261 break;
1262 case CHIP_RAVEN:
1263 /* TODO for renoir */
1264 soc15_program_register_sequence(adev,
1265 golden_settings_athub_1_0_0,
1266 ARRAY_SIZE(golden_settings_athub_1_0_0));
1267 break;
1268 default:
1269 break;
1274 * gmc_v9_0_gart_enable - gart enable
1276 * @adev: amdgpu_device pointer
1278 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1280 int r;
1282 if (adev->gart.bo == NULL) {
1283 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1284 return -EINVAL;
1286 r = amdgpu_gart_table_vram_pin(adev);
1287 if (r)
1288 return r;
1290 r = gfxhub_v1_0_gart_enable(adev);
1291 if (r)
1292 return r;
1294 if (adev->asic_type == CHIP_ARCTURUS)
1295 r = mmhub_v9_4_gart_enable(adev);
1296 else
1297 r = mmhub_v1_0_gart_enable(adev);
1298 if (r)
1299 return r;
1301 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1302 (unsigned)(adev->gmc.gart_size >> 20),
1303 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1304 adev->gart.ready = true;
1305 return 0;
1308 static int gmc_v9_0_hw_init(void *handle)
1310 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1311 bool value;
1312 int r, i;
1313 u32 tmp;
1315 /* The sequence of these two function calls matters.*/
1316 gmc_v9_0_init_golden_registers(adev);
1318 if (adev->mode_info.num_crtc) {
1319 if (adev->asic_type != CHIP_ARCTURUS) {
1320 /* Lockout access through VGA aperture*/
1321 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1323 /* disable VGA render */
1324 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1328 amdgpu_device_program_register_sequence(adev,
1329 golden_settings_vega10_hdp,
1330 ARRAY_SIZE(golden_settings_vega10_hdp));
1332 switch (adev->asic_type) {
1333 case CHIP_RAVEN:
1334 /* TODO for renoir */
1335 mmhub_v1_0_update_power_gating(adev, true);
1336 break;
1337 case CHIP_ARCTURUS:
1338 WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
1339 break;
1340 default:
1341 break;
1344 WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1346 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1347 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1349 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1350 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1352 /* After HDP is initialized, flush HDP.*/
1353 adev->nbio.funcs->hdp_flush(adev, NULL);
1355 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1356 value = false;
1357 else
1358 value = true;
1360 if (!amdgpu_sriov_vf(adev)) {
1361 gfxhub_v1_0_set_fault_enable_default(adev, value);
1362 if (adev->asic_type == CHIP_ARCTURUS)
1363 mmhub_v9_4_set_fault_enable_default(adev, value);
1364 else
1365 mmhub_v1_0_set_fault_enable_default(adev, value);
1367 for (i = 0; i < adev->num_vmhubs; ++i)
1368 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1370 if (adev->umc.funcs && adev->umc.funcs->init_registers)
1371 adev->umc.funcs->init_registers(adev);
1373 r = gmc_v9_0_gart_enable(adev);
1375 return r;
1379 * gmc_v9_0_gart_disable - gart disable
1381 * @adev: amdgpu_device pointer
1383 * This disables all VM page table.
1385 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1387 gfxhub_v1_0_gart_disable(adev);
1388 if (adev->asic_type == CHIP_ARCTURUS)
1389 mmhub_v9_4_gart_disable(adev);
1390 else
1391 mmhub_v1_0_gart_disable(adev);
1392 amdgpu_gart_table_vram_unpin(adev);
1395 static int gmc_v9_0_hw_fini(void *handle)
1397 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1399 if (amdgpu_sriov_vf(adev)) {
1400 /* full access mode, so don't touch any GMC register */
1401 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1402 return 0;
1405 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1406 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1407 gmc_v9_0_gart_disable(adev);
1409 return 0;
1412 static int gmc_v9_0_suspend(void *handle)
1414 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1416 return gmc_v9_0_hw_fini(adev);
1419 static int gmc_v9_0_resume(void *handle)
1421 int r;
1422 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1424 r = gmc_v9_0_hw_init(adev);
1425 if (r)
1426 return r;
1428 amdgpu_vmid_reset_all(adev);
1430 return 0;
1433 static bool gmc_v9_0_is_idle(void *handle)
1435 /* MC is always ready in GMC v9.*/
1436 return true;
1439 static int gmc_v9_0_wait_for_idle(void *handle)
1441 /* There is no need to wait for MC idle in GMC v9.*/
1442 return 0;
1445 static int gmc_v9_0_soft_reset(void *handle)
1447 /* XXX for emulation.*/
1448 return 0;
1451 static int gmc_v9_0_set_clockgating_state(void *handle,
1452 enum amd_clockgating_state state)
1454 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1456 if (adev->asic_type == CHIP_ARCTURUS)
1457 mmhub_v9_4_set_clockgating(adev, state);
1458 else
1459 mmhub_v1_0_set_clockgating(adev, state);
1461 athub_v1_0_set_clockgating(adev, state);
1463 return 0;
1466 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1468 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1470 if (adev->asic_type == CHIP_ARCTURUS)
1471 mmhub_v9_4_get_clockgating(adev, flags);
1472 else
1473 mmhub_v1_0_get_clockgating(adev, flags);
1475 athub_v1_0_get_clockgating(adev, flags);
1478 static int gmc_v9_0_set_powergating_state(void *handle,
1479 enum amd_powergating_state state)
1481 return 0;
1484 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1485 .name = "gmc_v9_0",
1486 .early_init = gmc_v9_0_early_init,
1487 .late_init = gmc_v9_0_late_init,
1488 .sw_init = gmc_v9_0_sw_init,
1489 .sw_fini = gmc_v9_0_sw_fini,
1490 .hw_init = gmc_v9_0_hw_init,
1491 .hw_fini = gmc_v9_0_hw_fini,
1492 .suspend = gmc_v9_0_suspend,
1493 .resume = gmc_v9_0_resume,
1494 .is_idle = gmc_v9_0_is_idle,
1495 .wait_for_idle = gmc_v9_0_wait_for_idle,
1496 .soft_reset = gmc_v9_0_soft_reset,
1497 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
1498 .set_powergating_state = gmc_v9_0_set_powergating_state,
1499 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
1502 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1504 .type = AMD_IP_BLOCK_TYPE_GMC,
1505 .major = 9,
1506 .minor = 0,
1507 .rev = 0,
1508 .funcs = &gmc_v9_0_ip_funcs,