2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
26 #include "amdgpu_gfx.h"
29 #include "amdgpu_ucode.h"
30 #include "clearstate_vi.h"
32 #include "gmc/gmc_8_2_d.h"
33 #include "gmc/gmc_8_2_sh_mask.h"
35 #include "oss/oss_3_0_d.h"
36 #include "oss/oss_3_0_sh_mask.h"
38 #include "bif/bif_5_0_d.h"
39 #include "bif/bif_5_0_sh_mask.h"
41 #include "gca/gfx_8_0_d.h"
42 #include "gca/gfx_8_0_enum.h"
43 #include "gca/gfx_8_0_sh_mask.h"
44 #include "gca/gfx_8_0_enum.h"
46 #include "uvd/uvd_5_0_d.h"
47 #include "uvd/uvd_5_0_sh_mask.h"
49 #include "dce/dce_10_0_d.h"
50 #include "dce/dce_10_0_sh_mask.h"
52 #define GFX8_NUM_GFX_RINGS 1
53 #define GFX8_NUM_COMPUTE_RINGS 8
55 #define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
56 #define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
57 #define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003
59 #define ARRAY_MODE(x) ((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
60 #define PIPE_CONFIG(x) ((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT)
61 #define TILE_SPLIT(x) ((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT)
62 #define MICRO_TILE_MODE_NEW(x) ((x) << GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT)
63 #define SAMPLE_SPLIT(x) ((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT)
64 #define BANK_WIDTH(x) ((x) << GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT)
65 #define BANK_HEIGHT(x) ((x) << GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT)
66 #define MACRO_TILE_ASPECT(x) ((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT)
67 #define NUM_BANKS(x) ((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT)
69 MODULE_FIRMWARE("amdgpu/carrizo_ce.bin");
70 MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin");
71 MODULE_FIRMWARE("amdgpu/carrizo_me.bin");
72 MODULE_FIRMWARE("amdgpu/carrizo_mec.bin");
73 MODULE_FIRMWARE("amdgpu/carrizo_mec2.bin");
74 MODULE_FIRMWARE("amdgpu/carrizo_rlc.bin");
76 MODULE_FIRMWARE("amdgpu/stoney_ce.bin");
77 MODULE_FIRMWARE("amdgpu/stoney_pfp.bin");
78 MODULE_FIRMWARE("amdgpu/stoney_me.bin");
79 MODULE_FIRMWARE("amdgpu/stoney_mec.bin");
80 MODULE_FIRMWARE("amdgpu/stoney_rlc.bin");
82 MODULE_FIRMWARE("amdgpu/tonga_ce.bin");
83 MODULE_FIRMWARE("amdgpu/tonga_pfp.bin");
84 MODULE_FIRMWARE("amdgpu/tonga_me.bin");
85 MODULE_FIRMWARE("amdgpu/tonga_mec.bin");
86 MODULE_FIRMWARE("amdgpu/tonga_mec2.bin");
87 MODULE_FIRMWARE("amdgpu/tonga_rlc.bin");
89 MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
90 MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
91 MODULE_FIRMWARE("amdgpu/topaz_me.bin");
92 MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
93 MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
94 MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
96 MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
97 MODULE_FIRMWARE("amdgpu/fiji_pfp.bin");
98 MODULE_FIRMWARE("amdgpu/fiji_me.bin");
99 MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
100 MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
101 MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
103 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset
[] =
105 {mmGDS_VMID0_BASE
, mmGDS_VMID0_SIZE
, mmGDS_GWS_VMID0
, mmGDS_OA_VMID0
},
106 {mmGDS_VMID1_BASE
, mmGDS_VMID1_SIZE
, mmGDS_GWS_VMID1
, mmGDS_OA_VMID1
},
107 {mmGDS_VMID2_BASE
, mmGDS_VMID2_SIZE
, mmGDS_GWS_VMID2
, mmGDS_OA_VMID2
},
108 {mmGDS_VMID3_BASE
, mmGDS_VMID3_SIZE
, mmGDS_GWS_VMID3
, mmGDS_OA_VMID3
},
109 {mmGDS_VMID4_BASE
, mmGDS_VMID4_SIZE
, mmGDS_GWS_VMID4
, mmGDS_OA_VMID4
},
110 {mmGDS_VMID5_BASE
, mmGDS_VMID5_SIZE
, mmGDS_GWS_VMID5
, mmGDS_OA_VMID5
},
111 {mmGDS_VMID6_BASE
, mmGDS_VMID6_SIZE
, mmGDS_GWS_VMID6
, mmGDS_OA_VMID6
},
112 {mmGDS_VMID7_BASE
, mmGDS_VMID7_SIZE
, mmGDS_GWS_VMID7
, mmGDS_OA_VMID7
},
113 {mmGDS_VMID8_BASE
, mmGDS_VMID8_SIZE
, mmGDS_GWS_VMID8
, mmGDS_OA_VMID8
},
114 {mmGDS_VMID9_BASE
, mmGDS_VMID9_SIZE
, mmGDS_GWS_VMID9
, mmGDS_OA_VMID9
},
115 {mmGDS_VMID10_BASE
, mmGDS_VMID10_SIZE
, mmGDS_GWS_VMID10
, mmGDS_OA_VMID10
},
116 {mmGDS_VMID11_BASE
, mmGDS_VMID11_SIZE
, mmGDS_GWS_VMID11
, mmGDS_OA_VMID11
},
117 {mmGDS_VMID12_BASE
, mmGDS_VMID12_SIZE
, mmGDS_GWS_VMID12
, mmGDS_OA_VMID12
},
118 {mmGDS_VMID13_BASE
, mmGDS_VMID13_SIZE
, mmGDS_GWS_VMID13
, mmGDS_OA_VMID13
},
119 {mmGDS_VMID14_BASE
, mmGDS_VMID14_SIZE
, mmGDS_GWS_VMID14
, mmGDS_OA_VMID14
},
120 {mmGDS_VMID15_BASE
, mmGDS_VMID15_SIZE
, mmGDS_GWS_VMID15
, mmGDS_OA_VMID15
}
123 static const u32 golden_settings_tonga_a11
[] =
125 mmCB_HW_CONTROL
, 0xfffdf3cf, 0x00007208,
126 mmCB_HW_CONTROL_3
, 0x00000040, 0x00000040,
127 mmDB_DEBUG2
, 0xf00fffff, 0x00000400,
128 mmGB_GPU_ID
, 0x0000000f, 0x00000000,
129 mmPA_SC_ENHANCE
, 0xffffffff, 0x20000001,
130 mmPA_SC_FIFO_DEPTH_CNTL
, 0x000003ff, 0x000000fc,
131 mmPA_SC_LINE_STIPPLE_STATE
, 0x0000ff0f, 0x00000000,
132 mmSQ_RANDOM_WAVE_PRI
, 0x001fffff, 0x000006fd,
133 mmTA_CNTL_AUX
, 0x000f000f, 0x000b0000,
134 mmTCC_CTRL
, 0x00100000, 0xf31fff7f,
135 mmTCC_EXE_DISABLE
, 0x00000002, 0x00000002,
136 mmTCP_ADDR_CONFIG
, 0x000003ff, 0x000002fb,
137 mmTCP_CHAN_STEER_HI
, 0xffffffff, 0x0000543b,
138 mmTCP_CHAN_STEER_LO
, 0xffffffff, 0xa9210876,
139 mmVGT_RESET_DEBUG
, 0x00000004, 0x00000004,
142 static const u32 tonga_golden_common_all
[] =
144 mmGRBM_GFX_INDEX
, 0xffffffff, 0xe0000000,
145 mmPA_SC_RASTER_CONFIG
, 0xffffffff, 0x16000012,
146 mmPA_SC_RASTER_CONFIG_1
, 0xffffffff, 0x0000002A,
147 mmGB_ADDR_CONFIG
, 0xffffffff, 0x22011003,
148 mmSPI_RESOURCE_RESERVE_CU_0
, 0xffffffff, 0x00000800,
149 mmSPI_RESOURCE_RESERVE_CU_1
, 0xffffffff, 0x00000800,
150 mmSPI_RESOURCE_RESERVE_EN_CU_0
, 0xffffffff, 0x00007FBF,
151 mmSPI_RESOURCE_RESERVE_EN_CU_1
, 0xffffffff, 0x00007FAF
154 static const u32 tonga_mgcg_cgcg_init
[] =
156 mmRLC_CGTT_MGCG_OVERRIDE
, 0xffffffff, 0xffffffff,
157 mmGRBM_GFX_INDEX
, 0xffffffff, 0xe0000000,
158 mmCB_CGTT_SCLK_CTRL
, 0xffffffff, 0x00000100,
159 mmCGTT_BCI_CLK_CTRL
, 0xffffffff, 0x00000100,
160 mmCGTT_CP_CLK_CTRL
, 0xffffffff, 0x00000100,
161 mmCGTT_CPC_CLK_CTRL
, 0xffffffff, 0x00000100,
162 mmCGTT_CPF_CLK_CTRL
, 0xffffffff, 0x40000100,
163 mmCGTT_GDS_CLK_CTRL
, 0xffffffff, 0x00000100,
164 mmCGTT_IA_CLK_CTRL
, 0xffffffff, 0x06000100,
165 mmCGTT_PA_CLK_CTRL
, 0xffffffff, 0x00000100,
166 mmCGTT_WD_CLK_CTRL
, 0xffffffff, 0x06000100,
167 mmCGTT_PC_CLK_CTRL
, 0xffffffff, 0x00000100,
168 mmCGTT_RLC_CLK_CTRL
, 0xffffffff, 0x00000100,
169 mmCGTT_SC_CLK_CTRL
, 0xffffffff, 0x00000100,
170 mmCGTT_SPI_CLK_CTRL
, 0xffffffff, 0x00000100,
171 mmCGTT_SQ_CLK_CTRL
, 0xffffffff, 0x00000100,
172 mmCGTT_SQG_CLK_CTRL
, 0xffffffff, 0x00000100,
173 mmCGTT_SX_CLK_CTRL0
, 0xffffffff, 0x00000100,
174 mmCGTT_SX_CLK_CTRL1
, 0xffffffff, 0x00000100,
175 mmCGTT_SX_CLK_CTRL2
, 0xffffffff, 0x00000100,
176 mmCGTT_SX_CLK_CTRL3
, 0xffffffff, 0x00000100,
177 mmCGTT_SX_CLK_CTRL4
, 0xffffffff, 0x00000100,
178 mmCGTT_TCI_CLK_CTRL
, 0xffffffff, 0x00000100,
179 mmCGTT_TCP_CLK_CTRL
, 0xffffffff, 0x00000100,
180 mmCGTT_VGT_CLK_CTRL
, 0xffffffff, 0x06000100,
181 mmDB_CGTT_CLK_CTRL_0
, 0xffffffff, 0x00000100,
182 mmTA_CGTT_CTRL
, 0xffffffff, 0x00000100,
183 mmTCA_CGTT_SCLK_CTRL
, 0xffffffff, 0x00000100,
184 mmTCC_CGTT_SCLK_CTRL
, 0xffffffff, 0x00000100,
185 mmTD_CGTT_CTRL
, 0xffffffff, 0x00000100,
186 mmGRBM_GFX_INDEX
, 0xffffffff, 0xe0000000,
187 mmCGTS_CU0_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
188 mmCGTS_CU0_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
189 mmCGTS_CU0_TA_SQC_CTRL_REG
, 0xffffffff, 0x00040007,
190 mmCGTS_CU0_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
191 mmCGTS_CU0_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
192 mmCGTS_CU1_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
193 mmCGTS_CU1_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
194 mmCGTS_CU1_TA_CTRL_REG
, 0xffffffff, 0x00040007,
195 mmCGTS_CU1_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
196 mmCGTS_CU1_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
197 mmCGTS_CU2_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
198 mmCGTS_CU2_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
199 mmCGTS_CU2_TA_CTRL_REG
, 0xffffffff, 0x00040007,
200 mmCGTS_CU2_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
201 mmCGTS_CU2_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
202 mmCGTS_CU3_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
203 mmCGTS_CU3_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
204 mmCGTS_CU3_TA_CTRL_REG
, 0xffffffff, 0x00040007,
205 mmCGTS_CU3_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
206 mmCGTS_CU3_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
207 mmCGTS_CU4_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
208 mmCGTS_CU4_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
209 mmCGTS_CU4_TA_SQC_CTRL_REG
, 0xffffffff, 0x00040007,
210 mmCGTS_CU4_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
211 mmCGTS_CU4_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
212 mmCGTS_CU5_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
213 mmCGTS_CU5_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
214 mmCGTS_CU5_TA_CTRL_REG
, 0xffffffff, 0x00040007,
215 mmCGTS_CU5_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
216 mmCGTS_CU5_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
217 mmCGTS_CU6_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
218 mmCGTS_CU6_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
219 mmCGTS_CU6_TA_CTRL_REG
, 0xffffffff, 0x00040007,
220 mmCGTS_CU6_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
221 mmCGTS_CU6_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
222 mmCGTS_CU7_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
223 mmCGTS_CU7_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
224 mmCGTS_CU7_TA_CTRL_REG
, 0xffffffff, 0x00040007,
225 mmCGTS_CU7_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
226 mmCGTS_CU7_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
227 mmCGTS_SM_CTRL_REG
, 0xffffffff, 0x96e00200,
228 mmCP_RB_WPTR_POLL_CNTL
, 0xffffffff, 0x00900100,
229 mmRLC_CGCG_CGLS_CTRL
, 0xffffffff, 0x0020003c,
230 mmCP_MEM_SLP_CNTL
, 0x00000001, 0x00000001,
233 static const u32 fiji_golden_common_all
[] =
235 mmGRBM_GFX_INDEX
, 0xffffffff, 0xe0000000,
236 mmPA_SC_RASTER_CONFIG
, 0xffffffff, 0x3a00161a,
237 mmPA_SC_RASTER_CONFIG_1
, 0xffffffff, 0x0000002e,
238 mmGB_ADDR_CONFIG
, 0xffffffff, 0x22011003,
239 mmSPI_RESOURCE_RESERVE_CU_0
, 0xffffffff, 0x00000800,
240 mmSPI_RESOURCE_RESERVE_CU_1
, 0xffffffff, 0x00000800,
241 mmSPI_RESOURCE_RESERVE_EN_CU_0
, 0xffffffff, 0x00007FBF,
242 mmSPI_RESOURCE_RESERVE_EN_CU_1
, 0xffffffff, 0x00007FAF,
243 mmGRBM_GFX_INDEX
, 0xffffffff, 0xe0000000,
244 mmSPI_CONFIG_CNTL_1
, 0x0000000f, 0x00000009,
247 static const u32 golden_settings_fiji_a10
[] =
249 mmCB_HW_CONTROL_3
, 0x000001ff, 0x00000040,
250 mmDB_DEBUG2
, 0xf00fffff, 0x00000400,
251 mmPA_SC_ENHANCE
, 0xffffffff, 0x20000001,
252 mmPA_SC_LINE_STIPPLE_STATE
, 0x0000ff0f, 0x00000000,
253 mmRLC_CGCG_CGLS_CTRL
, 0x00000003, 0x0001003c,
254 mmSQ_RANDOM_WAVE_PRI
, 0x001fffff, 0x000006fd,
255 mmTA_CNTL_AUX
, 0x000f000f, 0x000b0000,
256 mmTCC_CTRL
, 0x00100000, 0xf31fff7f,
257 mmTCC_EXE_DISABLE
, 0x00000002, 0x00000002,
258 mmTCP_ADDR_CONFIG
, 0x000003ff, 0x000000ff,
259 mmVGT_RESET_DEBUG
, 0x00000004, 0x00000004,
262 static const u32 fiji_mgcg_cgcg_init
[] =
264 mmRLC_CGTT_MGCG_OVERRIDE
, 0xffffffff, 0xffffffff,
265 mmGRBM_GFX_INDEX
, 0xffffffff, 0xe0000000,
266 mmCB_CGTT_SCLK_CTRL
, 0xffffffff, 0x00000100,
267 mmCGTT_BCI_CLK_CTRL
, 0xffffffff, 0x00000100,
268 mmCGTT_CP_CLK_CTRL
, 0xffffffff, 0x00000100,
269 mmCGTT_CPC_CLK_CTRL
, 0xffffffff, 0x00000100,
270 mmCGTT_CPF_CLK_CTRL
, 0xffffffff, 0x40000100,
271 mmCGTT_GDS_CLK_CTRL
, 0xffffffff, 0x00000100,
272 mmCGTT_IA_CLK_CTRL
, 0xffffffff, 0x06000100,
273 mmCGTT_PA_CLK_CTRL
, 0xffffffff, 0x00000100,
274 mmCGTT_WD_CLK_CTRL
, 0xffffffff, 0x06000100,
275 mmCGTT_PC_CLK_CTRL
, 0xffffffff, 0x00000100,
276 mmCGTT_RLC_CLK_CTRL
, 0xffffffff, 0x00000100,
277 mmCGTT_SC_CLK_CTRL
, 0xffffffff, 0x00000100,
278 mmCGTT_SPI_CLK_CTRL
, 0xffffffff, 0x00000100,
279 mmCGTT_SQ_CLK_CTRL
, 0xffffffff, 0x00000100,
280 mmCGTT_SQG_CLK_CTRL
, 0xffffffff, 0x00000100,
281 mmCGTT_SX_CLK_CTRL0
, 0xffffffff, 0x00000100,
282 mmCGTT_SX_CLK_CTRL1
, 0xffffffff, 0x00000100,
283 mmCGTT_SX_CLK_CTRL2
, 0xffffffff, 0x00000100,
284 mmCGTT_SX_CLK_CTRL3
, 0xffffffff, 0x00000100,
285 mmCGTT_SX_CLK_CTRL4
, 0xffffffff, 0x00000100,
286 mmCGTT_TCI_CLK_CTRL
, 0xffffffff, 0x00000100,
287 mmCGTT_TCP_CLK_CTRL
, 0xffffffff, 0x00000100,
288 mmCGTT_VGT_CLK_CTRL
, 0xffffffff, 0x06000100,
289 mmDB_CGTT_CLK_CTRL_0
, 0xffffffff, 0x00000100,
290 mmTA_CGTT_CTRL
, 0xffffffff, 0x00000100,
291 mmTCA_CGTT_SCLK_CTRL
, 0xffffffff, 0x00000100,
292 mmTCC_CGTT_SCLK_CTRL
, 0xffffffff, 0x00000100,
293 mmTD_CGTT_CTRL
, 0xffffffff, 0x00000100,
294 mmGRBM_GFX_INDEX
, 0xffffffff, 0xe0000000,
295 mmCGTS_SM_CTRL_REG
, 0xffffffff, 0x96e00200,
296 mmCP_RB_WPTR_POLL_CNTL
, 0xffffffff, 0x00900100,
297 mmRLC_CGCG_CGLS_CTRL
, 0xffffffff, 0x0020003c,
298 mmCP_MEM_SLP_CNTL
, 0x00000001, 0x00000001,
301 static const u32 golden_settings_iceland_a11
[] =
303 mmCB_HW_CONTROL_3
, 0x00000040, 0x00000040,
304 mmDB_DEBUG2
, 0xf00fffff, 0x00000400,
305 mmDB_DEBUG3
, 0xc0000000, 0xc0000000,
306 mmGB_GPU_ID
, 0x0000000f, 0x00000000,
307 mmPA_SC_ENHANCE
, 0xffffffff, 0x20000001,
308 mmPA_SC_LINE_STIPPLE_STATE
, 0x0000ff0f, 0x00000000,
309 mmPA_SC_RASTER_CONFIG
, 0x3f3fffff, 0x00000002,
310 mmPA_SC_RASTER_CONFIG_1
, 0x0000003f, 0x00000000,
311 mmSQ_RANDOM_WAVE_PRI
, 0x001fffff, 0x000006fd,
312 mmTA_CNTL_AUX
, 0x000f000f, 0x000b0000,
313 mmTCC_CTRL
, 0x00100000, 0xf31fff7f,
314 mmTCC_EXE_DISABLE
, 0x00000002, 0x00000002,
315 mmTCP_ADDR_CONFIG
, 0x000003ff, 0x000000f1,
316 mmTCP_CHAN_STEER_HI
, 0xffffffff, 0x00000000,
317 mmTCP_CHAN_STEER_LO
, 0xffffffff, 0x00000010,
320 static const u32 iceland_golden_common_all
[] =
322 mmGRBM_GFX_INDEX
, 0xffffffff, 0xe0000000,
323 mmPA_SC_RASTER_CONFIG
, 0xffffffff, 0x00000002,
324 mmPA_SC_RASTER_CONFIG_1
, 0xffffffff, 0x00000000,
325 mmGB_ADDR_CONFIG
, 0xffffffff, 0x22010001,
326 mmSPI_RESOURCE_RESERVE_CU_0
, 0xffffffff, 0x00000800,
327 mmSPI_RESOURCE_RESERVE_CU_1
, 0xffffffff, 0x00000800,
328 mmSPI_RESOURCE_RESERVE_EN_CU_0
, 0xffffffff, 0x00007FBF,
329 mmSPI_RESOURCE_RESERVE_EN_CU_1
, 0xffffffff, 0x00007FAF
332 static const u32 iceland_mgcg_cgcg_init
[] =
334 mmRLC_CGTT_MGCG_OVERRIDE
, 0xffffffff, 0xffffffff,
335 mmGRBM_GFX_INDEX
, 0xffffffff, 0xe0000000,
336 mmCB_CGTT_SCLK_CTRL
, 0xffffffff, 0x00000100,
337 mmCGTT_BCI_CLK_CTRL
, 0xffffffff, 0x00000100,
338 mmCGTT_CP_CLK_CTRL
, 0xffffffff, 0xc0000100,
339 mmCGTT_CPC_CLK_CTRL
, 0xffffffff, 0xc0000100,
340 mmCGTT_CPF_CLK_CTRL
, 0xffffffff, 0xc0000100,
341 mmCGTT_GDS_CLK_CTRL
, 0xffffffff, 0x00000100,
342 mmCGTT_IA_CLK_CTRL
, 0xffffffff, 0x06000100,
343 mmCGTT_PA_CLK_CTRL
, 0xffffffff, 0x00000100,
344 mmCGTT_WD_CLK_CTRL
, 0xffffffff, 0x06000100,
345 mmCGTT_PC_CLK_CTRL
, 0xffffffff, 0x00000100,
346 mmCGTT_RLC_CLK_CTRL
, 0xffffffff, 0x00000100,
347 mmCGTT_SC_CLK_CTRL
, 0xffffffff, 0x00000100,
348 mmCGTT_SPI_CLK_CTRL
, 0xffffffff, 0x00000100,
349 mmCGTT_SQ_CLK_CTRL
, 0xffffffff, 0x00000100,
350 mmCGTT_SQG_CLK_CTRL
, 0xffffffff, 0x00000100,
351 mmCGTT_SX_CLK_CTRL0
, 0xffffffff, 0x00000100,
352 mmCGTT_SX_CLK_CTRL1
, 0xffffffff, 0x00000100,
353 mmCGTT_SX_CLK_CTRL2
, 0xffffffff, 0x00000100,
354 mmCGTT_SX_CLK_CTRL3
, 0xffffffff, 0x00000100,
355 mmCGTT_SX_CLK_CTRL4
, 0xffffffff, 0x00000100,
356 mmCGTT_TCI_CLK_CTRL
, 0xffffffff, 0xff000100,
357 mmCGTT_TCP_CLK_CTRL
, 0xffffffff, 0x00000100,
358 mmCGTT_VGT_CLK_CTRL
, 0xffffffff, 0x06000100,
359 mmDB_CGTT_CLK_CTRL_0
, 0xffffffff, 0x00000100,
360 mmTA_CGTT_CTRL
, 0xffffffff, 0x00000100,
361 mmTCA_CGTT_SCLK_CTRL
, 0xffffffff, 0x00000100,
362 mmTCC_CGTT_SCLK_CTRL
, 0xffffffff, 0x00000100,
363 mmTD_CGTT_CTRL
, 0xffffffff, 0x00000100,
364 mmGRBM_GFX_INDEX
, 0xffffffff, 0xe0000000,
365 mmCGTS_CU0_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
366 mmCGTS_CU0_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
367 mmCGTS_CU0_TA_SQC_CTRL_REG
, 0xffffffff, 0x0f840f87,
368 mmCGTS_CU0_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
369 mmCGTS_CU0_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
370 mmCGTS_CU1_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
371 mmCGTS_CU1_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
372 mmCGTS_CU1_TA_CTRL_REG
, 0xffffffff, 0x00040007,
373 mmCGTS_CU1_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
374 mmCGTS_CU1_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
375 mmCGTS_CU2_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
376 mmCGTS_CU2_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
377 mmCGTS_CU2_TA_CTRL_REG
, 0xffffffff, 0x00040007,
378 mmCGTS_CU2_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
379 mmCGTS_CU2_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
380 mmCGTS_CU3_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
381 mmCGTS_CU3_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
382 mmCGTS_CU3_TA_CTRL_REG
, 0xffffffff, 0x00040007,
383 mmCGTS_CU3_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
384 mmCGTS_CU3_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
385 mmCGTS_CU4_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
386 mmCGTS_CU4_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
387 mmCGTS_CU4_TA_SQC_CTRL_REG
, 0xffffffff, 0x0f840f87,
388 mmCGTS_CU4_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
389 mmCGTS_CU4_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
390 mmCGTS_CU5_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
391 mmCGTS_CU5_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
392 mmCGTS_CU5_TA_CTRL_REG
, 0xffffffff, 0x00040007,
393 mmCGTS_CU5_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
394 mmCGTS_CU5_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
395 mmCGTS_SM_CTRL_REG
, 0xffffffff, 0x96e00200,
396 mmCP_RB_WPTR_POLL_CNTL
, 0xffffffff, 0x00900100,
397 mmRLC_CGCG_CGLS_CTRL
, 0xffffffff, 0x0020003c,
400 static const u32 cz_golden_settings_a11
[] =
402 mmCB_HW_CONTROL_3
, 0x00000040, 0x00000040,
403 mmDB_DEBUG2
, 0xf00fffff, 0x00000400,
404 mmGB_GPU_ID
, 0x0000000f, 0x00000000,
405 mmPA_SC_ENHANCE
, 0xffffffff, 0x00000001,
406 mmPA_SC_LINE_STIPPLE_STATE
, 0x0000ff0f, 0x00000000,
407 mmSQ_RANDOM_WAVE_PRI
, 0x001fffff, 0x000006fd,
408 mmTA_CNTL_AUX
, 0x000f000f, 0x00010000,
409 mmTCC_EXE_DISABLE
, 0x00000002, 0x00000002,
410 mmTCP_ADDR_CONFIG
, 0x0000000f, 0x000000f3,
411 mmTCP_CHAN_STEER_LO
, 0xffffffff, 0x00001302
414 static const u32 cz_golden_common_all
[] =
416 mmGRBM_GFX_INDEX
, 0xffffffff, 0xe0000000,
417 mmPA_SC_RASTER_CONFIG
, 0xffffffff, 0x00000002,
418 mmPA_SC_RASTER_CONFIG_1
, 0xffffffff, 0x00000000,
419 mmGB_ADDR_CONFIG
, 0xffffffff, 0x22010001,
420 mmSPI_RESOURCE_RESERVE_CU_0
, 0xffffffff, 0x00000800,
421 mmSPI_RESOURCE_RESERVE_CU_1
, 0xffffffff, 0x00000800,
422 mmSPI_RESOURCE_RESERVE_EN_CU_0
, 0xffffffff, 0x00007FBF,
423 mmSPI_RESOURCE_RESERVE_EN_CU_1
, 0xffffffff, 0x00007FAF
426 static const u32 cz_mgcg_cgcg_init
[] =
428 mmRLC_CGTT_MGCG_OVERRIDE
, 0xffffffff, 0xffffffff,
429 mmGRBM_GFX_INDEX
, 0xffffffff, 0xe0000000,
430 mmCB_CGTT_SCLK_CTRL
, 0xffffffff, 0x00000100,
431 mmCGTT_BCI_CLK_CTRL
, 0xffffffff, 0x00000100,
432 mmCGTT_CP_CLK_CTRL
, 0xffffffff, 0x00000100,
433 mmCGTT_CPC_CLK_CTRL
, 0xffffffff, 0x00000100,
434 mmCGTT_CPF_CLK_CTRL
, 0xffffffff, 0x00000100,
435 mmCGTT_GDS_CLK_CTRL
, 0xffffffff, 0x00000100,
436 mmCGTT_IA_CLK_CTRL
, 0xffffffff, 0x06000100,
437 mmCGTT_PA_CLK_CTRL
, 0xffffffff, 0x00000100,
438 mmCGTT_WD_CLK_CTRL
, 0xffffffff, 0x06000100,
439 mmCGTT_PC_CLK_CTRL
, 0xffffffff, 0x00000100,
440 mmCGTT_RLC_CLK_CTRL
, 0xffffffff, 0x00000100,
441 mmCGTT_SC_CLK_CTRL
, 0xffffffff, 0x00000100,
442 mmCGTT_SPI_CLK_CTRL
, 0xffffffff, 0x00000100,
443 mmCGTT_SQ_CLK_CTRL
, 0xffffffff, 0x00000100,
444 mmCGTT_SQG_CLK_CTRL
, 0xffffffff, 0x00000100,
445 mmCGTT_SX_CLK_CTRL0
, 0xffffffff, 0x00000100,
446 mmCGTT_SX_CLK_CTRL1
, 0xffffffff, 0x00000100,
447 mmCGTT_SX_CLK_CTRL2
, 0xffffffff, 0x00000100,
448 mmCGTT_SX_CLK_CTRL3
, 0xffffffff, 0x00000100,
449 mmCGTT_SX_CLK_CTRL4
, 0xffffffff, 0x00000100,
450 mmCGTT_TCI_CLK_CTRL
, 0xffffffff, 0x00000100,
451 mmCGTT_TCP_CLK_CTRL
, 0xffffffff, 0x00000100,
452 mmCGTT_VGT_CLK_CTRL
, 0xffffffff, 0x06000100,
453 mmDB_CGTT_CLK_CTRL_0
, 0xffffffff, 0x00000100,
454 mmTA_CGTT_CTRL
, 0xffffffff, 0x00000100,
455 mmTCA_CGTT_SCLK_CTRL
, 0xffffffff, 0x00000100,
456 mmTCC_CGTT_SCLK_CTRL
, 0xffffffff, 0x00000100,
457 mmTD_CGTT_CTRL
, 0xffffffff, 0x00000100,
458 mmGRBM_GFX_INDEX
, 0xffffffff, 0xe0000000,
459 mmCGTS_CU0_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
460 mmCGTS_CU0_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
461 mmCGTS_CU0_TA_SQC_CTRL_REG
, 0xffffffff, 0x00040007,
462 mmCGTS_CU0_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
463 mmCGTS_CU0_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
464 mmCGTS_CU1_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
465 mmCGTS_CU1_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
466 mmCGTS_CU1_TA_CTRL_REG
, 0xffffffff, 0x00040007,
467 mmCGTS_CU1_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
468 mmCGTS_CU1_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
469 mmCGTS_CU2_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
470 mmCGTS_CU2_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
471 mmCGTS_CU2_TA_CTRL_REG
, 0xffffffff, 0x00040007,
472 mmCGTS_CU2_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
473 mmCGTS_CU2_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
474 mmCGTS_CU3_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
475 mmCGTS_CU3_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
476 mmCGTS_CU3_TA_CTRL_REG
, 0xffffffff, 0x00040007,
477 mmCGTS_CU3_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
478 mmCGTS_CU3_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
479 mmCGTS_CU4_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
480 mmCGTS_CU4_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
481 mmCGTS_CU4_TA_SQC_CTRL_REG
, 0xffffffff, 0x00040007,
482 mmCGTS_CU4_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
483 mmCGTS_CU4_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
484 mmCGTS_CU5_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
485 mmCGTS_CU5_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
486 mmCGTS_CU5_TA_CTRL_REG
, 0xffffffff, 0x00040007,
487 mmCGTS_CU5_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
488 mmCGTS_CU5_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
489 mmCGTS_CU6_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
490 mmCGTS_CU6_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
491 mmCGTS_CU6_TA_CTRL_REG
, 0xffffffff, 0x00040007,
492 mmCGTS_CU6_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
493 mmCGTS_CU6_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
494 mmCGTS_CU7_SP0_CTRL_REG
, 0xffffffff, 0x00010000,
495 mmCGTS_CU7_LDS_SQ_CTRL_REG
, 0xffffffff, 0x00030002,
496 mmCGTS_CU7_TA_CTRL_REG
, 0xffffffff, 0x00040007,
497 mmCGTS_CU7_SP1_CTRL_REG
, 0xffffffff, 0x00060005,
498 mmCGTS_CU7_TD_TCP_CTRL_REG
, 0xffffffff, 0x00090008,
499 mmCGTS_SM_CTRL_REG
, 0xffffffff, 0x96e00200,
500 mmCP_RB_WPTR_POLL_CNTL
, 0xffffffff, 0x00900100,
501 mmRLC_CGCG_CGLS_CTRL
, 0xffffffff, 0x0020003f,
502 mmCP_MEM_SLP_CNTL
, 0x00000001, 0x00000001,
505 static const u32 stoney_golden_settings_a11
[] =
507 mmDB_DEBUG2
, 0xf00fffff, 0x00000400,
508 mmGB_GPU_ID
, 0x0000000f, 0x00000000,
509 mmPA_SC_ENHANCE
, 0xffffffff, 0x20000001,
510 mmPA_SC_LINE_STIPPLE_STATE
, 0x0000ff0f, 0x00000000,
511 mmRLC_CGCG_CGLS_CTRL
, 0x00000003, 0x0001003c,
512 mmTA_CNTL_AUX
, 0x000f000f, 0x000b0000,
513 mmTCC_CTRL
, 0x00100000, 0xf31fff7f,
514 mmTCC_EXE_DISABLE
, 0x00000002, 0x00000002,
515 mmTCP_ADDR_CONFIG
, 0x0000000f, 0x000000f1,
516 mmTCP_CHAN_STEER_LO
, 0xffffffff, 0x10101010,
519 static const u32 stoney_golden_common_all
[] =
521 mmGRBM_GFX_INDEX
, 0xffffffff, 0xe0000000,
522 mmPA_SC_RASTER_CONFIG
, 0xffffffff, 0x00000000,
523 mmPA_SC_RASTER_CONFIG_1
, 0xffffffff, 0x00000000,
524 mmGB_ADDR_CONFIG
, 0xffffffff, 0x12010001,
525 mmSPI_RESOURCE_RESERVE_CU_0
, 0xffffffff, 0x00000800,
526 mmSPI_RESOURCE_RESERVE_CU_1
, 0xffffffff, 0x00000800,
527 mmSPI_RESOURCE_RESERVE_EN_CU_0
, 0xffffffff, 0x00007FBF,
528 mmSPI_RESOURCE_RESERVE_EN_CU_1
, 0xffffffff, 0x00007FAF,
531 static const u32 stoney_mgcg_cgcg_init
[] =
533 mmGRBM_GFX_INDEX
, 0xffffffff, 0xe0000000,
534 mmRLC_CGCG_CGLS_CTRL
, 0xffffffff, 0x0020003f,
535 mmCP_MEM_SLP_CNTL
, 0xffffffff, 0x00020201,
536 mmRLC_MEM_SLP_CNTL
, 0xffffffff, 0x00020201,
537 mmCGTS_SM_CTRL_REG
, 0xffffffff, 0x96940200,
538 mmATC_MISC_CG
, 0xffffffff, 0x000c0200,
541 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device
*adev
);
542 static void gfx_v8_0_set_irq_funcs(struct amdgpu_device
*adev
);
543 static void gfx_v8_0_set_gds_init(struct amdgpu_device
*adev
);
545 static void gfx_v8_0_init_golden_registers(struct amdgpu_device
*adev
)
547 switch (adev
->asic_type
) {
549 amdgpu_program_register_sequence(adev
,
550 iceland_mgcg_cgcg_init
,
551 (const u32
)ARRAY_SIZE(iceland_mgcg_cgcg_init
));
552 amdgpu_program_register_sequence(adev
,
553 golden_settings_iceland_a11
,
554 (const u32
)ARRAY_SIZE(golden_settings_iceland_a11
));
555 amdgpu_program_register_sequence(adev
,
556 iceland_golden_common_all
,
557 (const u32
)ARRAY_SIZE(iceland_golden_common_all
));
560 amdgpu_program_register_sequence(adev
,
562 (const u32
)ARRAY_SIZE(fiji_mgcg_cgcg_init
));
563 amdgpu_program_register_sequence(adev
,
564 golden_settings_fiji_a10
,
565 (const u32
)ARRAY_SIZE(golden_settings_fiji_a10
));
566 amdgpu_program_register_sequence(adev
,
567 fiji_golden_common_all
,
568 (const u32
)ARRAY_SIZE(fiji_golden_common_all
));
572 amdgpu_program_register_sequence(adev
,
573 tonga_mgcg_cgcg_init
,
574 (const u32
)ARRAY_SIZE(tonga_mgcg_cgcg_init
));
575 amdgpu_program_register_sequence(adev
,
576 golden_settings_tonga_a11
,
577 (const u32
)ARRAY_SIZE(golden_settings_tonga_a11
));
578 amdgpu_program_register_sequence(adev
,
579 tonga_golden_common_all
,
580 (const u32
)ARRAY_SIZE(tonga_golden_common_all
));
583 amdgpu_program_register_sequence(adev
,
585 (const u32
)ARRAY_SIZE(cz_mgcg_cgcg_init
));
586 amdgpu_program_register_sequence(adev
,
587 cz_golden_settings_a11
,
588 (const u32
)ARRAY_SIZE(cz_golden_settings_a11
));
589 amdgpu_program_register_sequence(adev
,
590 cz_golden_common_all
,
591 (const u32
)ARRAY_SIZE(cz_golden_common_all
));
594 amdgpu_program_register_sequence(adev
,
595 stoney_mgcg_cgcg_init
,
596 (const u32
)ARRAY_SIZE(stoney_mgcg_cgcg_init
));
597 amdgpu_program_register_sequence(adev
,
598 stoney_golden_settings_a11
,
599 (const u32
)ARRAY_SIZE(stoney_golden_settings_a11
));
600 amdgpu_program_register_sequence(adev
,
601 stoney_golden_common_all
,
602 (const u32
)ARRAY_SIZE(stoney_golden_common_all
));
609 static void gfx_v8_0_scratch_init(struct amdgpu_device
*adev
)
613 adev
->gfx
.scratch
.num_reg
= 7;
614 adev
->gfx
.scratch
.reg_base
= mmSCRATCH_REG0
;
615 for (i
= 0; i
< adev
->gfx
.scratch
.num_reg
; i
++) {
616 adev
->gfx
.scratch
.free
[i
] = true;
617 adev
->gfx
.scratch
.reg
[i
] = adev
->gfx
.scratch
.reg_base
+ i
;
621 static int gfx_v8_0_ring_test_ring(struct amdgpu_ring
*ring
)
623 struct amdgpu_device
*adev
= ring
->adev
;
629 r
= amdgpu_gfx_scratch_get(adev
, &scratch
);
631 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r
);
634 WREG32(scratch
, 0xCAFEDEAD);
635 r
= amdgpu_ring_lock(ring
, 3);
637 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
639 amdgpu_gfx_scratch_free(adev
, scratch
);
642 amdgpu_ring_write(ring
, PACKET3(PACKET3_SET_UCONFIG_REG
, 1));
643 amdgpu_ring_write(ring
, (scratch
- PACKET3_SET_UCONFIG_REG_START
));
644 amdgpu_ring_write(ring
, 0xDEADBEEF);
645 amdgpu_ring_unlock_commit(ring
);
647 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
648 tmp
= RREG32(scratch
);
649 if (tmp
== 0xDEADBEEF)
653 if (i
< adev
->usec_timeout
) {
654 DRM_INFO("ring test on %d succeeded in %d usecs\n",
657 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
658 ring
->idx
, scratch
, tmp
);
661 amdgpu_gfx_scratch_free(adev
, scratch
);
665 static int gfx_v8_0_ring_test_ib(struct amdgpu_ring
*ring
)
667 struct amdgpu_device
*adev
= ring
->adev
;
669 struct fence
*f
= NULL
;
675 r
= amdgpu_gfx_scratch_get(adev
, &scratch
);
677 DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r
);
680 WREG32(scratch
, 0xCAFEDEAD);
681 memset(&ib
, 0, sizeof(ib
));
682 r
= amdgpu_ib_get(ring
, NULL
, 256, &ib
);
684 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r
);
687 ib
.ptr
[0] = PACKET3(PACKET3_SET_UCONFIG_REG
, 1);
688 ib
.ptr
[1] = ((scratch
- PACKET3_SET_UCONFIG_REG_START
));
689 ib
.ptr
[2] = 0xDEADBEEF;
692 r
= amdgpu_sched_ib_submit_kernel_helper(adev
, ring
, &ib
, 1, NULL
,
693 AMDGPU_FENCE_OWNER_UNDEFINED
,
698 r
= fence_wait(f
, false);
700 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r
);
703 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
704 tmp
= RREG32(scratch
);
705 if (tmp
== 0xDEADBEEF)
709 if (i
< adev
->usec_timeout
) {
710 DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
714 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
720 amdgpu_ib_free(adev
, &ib
);
722 amdgpu_gfx_scratch_free(adev
, scratch
);
726 static int gfx_v8_0_init_microcode(struct amdgpu_device
*adev
)
728 const char *chip_name
;
731 struct amdgpu_firmware_info
*info
= NULL
;
732 const struct common_firmware_header
*header
= NULL
;
733 const struct gfx_firmware_header_v1_0
*cp_hdr
;
737 switch (adev
->asic_type
) {
745 chip_name
= "carrizo";
751 chip_name
= "stoney";
757 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_pfp.bin", chip_name
);
758 err
= request_firmware(&adev
->gfx
.pfp_fw
, fw_name
, adev
->dev
);
761 err
= amdgpu_ucode_validate(adev
->gfx
.pfp_fw
);
764 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.pfp_fw
->data
;
765 adev
->gfx
.pfp_fw_version
= le32_to_cpu(cp_hdr
->header
.ucode_version
);
766 adev
->gfx
.pfp_feature_version
= le32_to_cpu(cp_hdr
->ucode_feature_version
);
768 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_me.bin", chip_name
);
769 err
= request_firmware(&adev
->gfx
.me_fw
, fw_name
, adev
->dev
);
772 err
= amdgpu_ucode_validate(adev
->gfx
.me_fw
);
775 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.me_fw
->data
;
776 adev
->gfx
.me_fw_version
= le32_to_cpu(cp_hdr
->header
.ucode_version
);
777 adev
->gfx
.me_feature_version
= le32_to_cpu(cp_hdr
->ucode_feature_version
);
779 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_ce.bin", chip_name
);
780 err
= request_firmware(&adev
->gfx
.ce_fw
, fw_name
, adev
->dev
);
783 err
= amdgpu_ucode_validate(adev
->gfx
.ce_fw
);
786 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.ce_fw
->data
;
787 adev
->gfx
.ce_fw_version
= le32_to_cpu(cp_hdr
->header
.ucode_version
);
788 adev
->gfx
.ce_feature_version
= le32_to_cpu(cp_hdr
->ucode_feature_version
);
790 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_rlc.bin", chip_name
);
791 err
= request_firmware(&adev
->gfx
.rlc_fw
, fw_name
, adev
->dev
);
794 err
= amdgpu_ucode_validate(adev
->gfx
.rlc_fw
);
795 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.rlc_fw
->data
;
796 adev
->gfx
.rlc_fw_version
= le32_to_cpu(cp_hdr
->header
.ucode_version
);
797 adev
->gfx
.rlc_feature_version
= le32_to_cpu(cp_hdr
->ucode_feature_version
);
799 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_mec.bin", chip_name
);
800 err
= request_firmware(&adev
->gfx
.mec_fw
, fw_name
, adev
->dev
);
803 err
= amdgpu_ucode_validate(adev
->gfx
.mec_fw
);
806 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.mec_fw
->data
;
807 adev
->gfx
.mec_fw_version
= le32_to_cpu(cp_hdr
->header
.ucode_version
);
808 adev
->gfx
.mec_feature_version
= le32_to_cpu(cp_hdr
->ucode_feature_version
);
810 if (adev
->asic_type
!= CHIP_STONEY
) {
811 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_mec2.bin", chip_name
);
812 err
= request_firmware(&adev
->gfx
.mec2_fw
, fw_name
, adev
->dev
);
814 err
= amdgpu_ucode_validate(adev
->gfx
.mec2_fw
);
817 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)
818 adev
->gfx
.mec2_fw
->data
;
819 adev
->gfx
.mec2_fw_version
=
820 le32_to_cpu(cp_hdr
->header
.ucode_version
);
821 adev
->gfx
.mec2_feature_version
=
822 le32_to_cpu(cp_hdr
->ucode_feature_version
);
825 adev
->gfx
.mec2_fw
= NULL
;
829 if (adev
->firmware
.smu_load
) {
830 info
= &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_CP_PFP
];
831 info
->ucode_id
= AMDGPU_UCODE_ID_CP_PFP
;
832 info
->fw
= adev
->gfx
.pfp_fw
;
833 header
= (const struct common_firmware_header
*)info
->fw
->data
;
834 adev
->firmware
.fw_size
+=
835 ALIGN(le32_to_cpu(header
->ucode_size_bytes
), PAGE_SIZE
);
837 info
= &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_CP_ME
];
838 info
->ucode_id
= AMDGPU_UCODE_ID_CP_ME
;
839 info
->fw
= adev
->gfx
.me_fw
;
840 header
= (const struct common_firmware_header
*)info
->fw
->data
;
841 adev
->firmware
.fw_size
+=
842 ALIGN(le32_to_cpu(header
->ucode_size_bytes
), PAGE_SIZE
);
844 info
= &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_CP_CE
];
845 info
->ucode_id
= AMDGPU_UCODE_ID_CP_CE
;
846 info
->fw
= adev
->gfx
.ce_fw
;
847 header
= (const struct common_firmware_header
*)info
->fw
->data
;
848 adev
->firmware
.fw_size
+=
849 ALIGN(le32_to_cpu(header
->ucode_size_bytes
), PAGE_SIZE
);
851 info
= &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_RLC_G
];
852 info
->ucode_id
= AMDGPU_UCODE_ID_RLC_G
;
853 info
->fw
= adev
->gfx
.rlc_fw
;
854 header
= (const struct common_firmware_header
*)info
->fw
->data
;
855 adev
->firmware
.fw_size
+=
856 ALIGN(le32_to_cpu(header
->ucode_size_bytes
), PAGE_SIZE
);
858 info
= &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_CP_MEC1
];
859 info
->ucode_id
= AMDGPU_UCODE_ID_CP_MEC1
;
860 info
->fw
= adev
->gfx
.mec_fw
;
861 header
= (const struct common_firmware_header
*)info
->fw
->data
;
862 adev
->firmware
.fw_size
+=
863 ALIGN(le32_to_cpu(header
->ucode_size_bytes
), PAGE_SIZE
);
865 if (adev
->gfx
.mec2_fw
) {
866 info
= &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_CP_MEC2
];
867 info
->ucode_id
= AMDGPU_UCODE_ID_CP_MEC2
;
868 info
->fw
= adev
->gfx
.mec2_fw
;
869 header
= (const struct common_firmware_header
*)info
->fw
->data
;
870 adev
->firmware
.fw_size
+=
871 ALIGN(le32_to_cpu(header
->ucode_size_bytes
), PAGE_SIZE
);
879 "gfx8: Failed to load firmware \"%s\"\n",
881 release_firmware(adev
->gfx
.pfp_fw
);
882 adev
->gfx
.pfp_fw
= NULL
;
883 release_firmware(adev
->gfx
.me_fw
);
884 adev
->gfx
.me_fw
= NULL
;
885 release_firmware(adev
->gfx
.ce_fw
);
886 adev
->gfx
.ce_fw
= NULL
;
887 release_firmware(adev
->gfx
.rlc_fw
);
888 adev
->gfx
.rlc_fw
= NULL
;
889 release_firmware(adev
->gfx
.mec_fw
);
890 adev
->gfx
.mec_fw
= NULL
;
891 release_firmware(adev
->gfx
.mec2_fw
);
892 adev
->gfx
.mec2_fw
= NULL
;
897 static void gfx_v8_0_mec_fini(struct amdgpu_device
*adev
)
901 if (adev
->gfx
.mec
.hpd_eop_obj
) {
902 r
= amdgpu_bo_reserve(adev
->gfx
.mec
.hpd_eop_obj
, false);
903 if (unlikely(r
!= 0))
904 dev_warn(adev
->dev
, "(%d) reserve HPD EOP bo failed\n", r
);
905 amdgpu_bo_unpin(adev
->gfx
.mec
.hpd_eop_obj
);
906 amdgpu_bo_unreserve(adev
->gfx
.mec
.hpd_eop_obj
);
908 amdgpu_bo_unref(&adev
->gfx
.mec
.hpd_eop_obj
);
909 adev
->gfx
.mec
.hpd_eop_obj
= NULL
;
913 #define MEC_HPD_SIZE 2048
915 static int gfx_v8_0_mec_init(struct amdgpu_device
*adev
)
921 * we assign only 1 pipe because all other pipes will
924 adev
->gfx
.mec
.num_mec
= 1;
925 adev
->gfx
.mec
.num_pipe
= 1;
926 adev
->gfx
.mec
.num_queue
= adev
->gfx
.mec
.num_mec
* adev
->gfx
.mec
.num_pipe
* 8;
928 if (adev
->gfx
.mec
.hpd_eop_obj
== NULL
) {
929 r
= amdgpu_bo_create(adev
,
930 adev
->gfx
.mec
.num_mec
*adev
->gfx
.mec
.num_pipe
* MEC_HPD_SIZE
* 2,
932 AMDGPU_GEM_DOMAIN_GTT
, 0, NULL
, NULL
,
933 &adev
->gfx
.mec
.hpd_eop_obj
);
935 dev_warn(adev
->dev
, "(%d) create HDP EOP bo failed\n", r
);
940 r
= amdgpu_bo_reserve(adev
->gfx
.mec
.hpd_eop_obj
, false);
941 if (unlikely(r
!= 0)) {
942 gfx_v8_0_mec_fini(adev
);
945 r
= amdgpu_bo_pin(adev
->gfx
.mec
.hpd_eop_obj
, AMDGPU_GEM_DOMAIN_GTT
,
946 &adev
->gfx
.mec
.hpd_eop_gpu_addr
);
948 dev_warn(adev
->dev
, "(%d) pin HDP EOP bo failed\n", r
);
949 gfx_v8_0_mec_fini(adev
);
952 r
= amdgpu_bo_kmap(adev
->gfx
.mec
.hpd_eop_obj
, (void **)&hpd
);
954 dev_warn(adev
->dev
, "(%d) map HDP EOP bo failed\n", r
);
955 gfx_v8_0_mec_fini(adev
);
959 memset(hpd
, 0, adev
->gfx
.mec
.num_mec
*adev
->gfx
.mec
.num_pipe
* MEC_HPD_SIZE
* 2);
961 amdgpu_bo_kunmap(adev
->gfx
.mec
.hpd_eop_obj
);
962 amdgpu_bo_unreserve(adev
->gfx
.mec
.hpd_eop_obj
);
967 static void gfx_v8_0_gpu_early_init(struct amdgpu_device
*adev
)
970 u32 mc_shared_chmap
, mc_arb_ramcfg
;
971 u32 dimm00_addr_map
, dimm01_addr_map
, dimm10_addr_map
, dimm11_addr_map
;
974 switch (adev
->asic_type
) {
976 adev
->gfx
.config
.max_shader_engines
= 1;
977 adev
->gfx
.config
.max_tile_pipes
= 2;
978 adev
->gfx
.config
.max_cu_per_sh
= 6;
979 adev
->gfx
.config
.max_sh_per_se
= 1;
980 adev
->gfx
.config
.max_backends_per_se
= 2;
981 adev
->gfx
.config
.max_texture_channel_caches
= 2;
982 adev
->gfx
.config
.max_gprs
= 256;
983 adev
->gfx
.config
.max_gs_threads
= 32;
984 adev
->gfx
.config
.max_hw_contexts
= 8;
986 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
987 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x100;
988 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
989 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x130;
990 gb_addr_config
= TOPAZ_GB_ADDR_CONFIG_GOLDEN
;
993 adev
->gfx
.config
.max_shader_engines
= 4;
994 adev
->gfx
.config
.max_tile_pipes
= 16;
995 adev
->gfx
.config
.max_cu_per_sh
= 16;
996 adev
->gfx
.config
.max_sh_per_se
= 1;
997 adev
->gfx
.config
.max_backends_per_se
= 4;
998 adev
->gfx
.config
.max_texture_channel_caches
= 16;
999 adev
->gfx
.config
.max_gprs
= 256;
1000 adev
->gfx
.config
.max_gs_threads
= 32;
1001 adev
->gfx
.config
.max_hw_contexts
= 8;
1003 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
1004 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x100;
1005 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
1006 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x130;
1007 gb_addr_config
= TONGA_GB_ADDR_CONFIG_GOLDEN
;
1010 adev
->gfx
.config
.max_shader_engines
= 4;
1011 adev
->gfx
.config
.max_tile_pipes
= 8;
1012 adev
->gfx
.config
.max_cu_per_sh
= 8;
1013 adev
->gfx
.config
.max_sh_per_se
= 1;
1014 adev
->gfx
.config
.max_backends_per_se
= 2;
1015 adev
->gfx
.config
.max_texture_channel_caches
= 8;
1016 adev
->gfx
.config
.max_gprs
= 256;
1017 adev
->gfx
.config
.max_gs_threads
= 32;
1018 adev
->gfx
.config
.max_hw_contexts
= 8;
1020 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
1021 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x100;
1022 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
1023 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x130;
1024 gb_addr_config
= TONGA_GB_ADDR_CONFIG_GOLDEN
;
1027 adev
->gfx
.config
.max_shader_engines
= 1;
1028 adev
->gfx
.config
.max_tile_pipes
= 2;
1029 adev
->gfx
.config
.max_sh_per_se
= 1;
1030 adev
->gfx
.config
.max_backends_per_se
= 2;
1032 switch (adev
->pdev
->revision
) {
1040 adev
->gfx
.config
.max_cu_per_sh
= 8;
1050 adev
->gfx
.config
.max_cu_per_sh
= 6;
1057 adev
->gfx
.config
.max_cu_per_sh
= 6;
1066 adev
->gfx
.config
.max_cu_per_sh
= 4;
1070 adev
->gfx
.config
.max_texture_channel_caches
= 2;
1071 adev
->gfx
.config
.max_gprs
= 256;
1072 adev
->gfx
.config
.max_gs_threads
= 32;
1073 adev
->gfx
.config
.max_hw_contexts
= 8;
1075 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
1076 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x100;
1077 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
1078 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x130;
1079 gb_addr_config
= CARRIZO_GB_ADDR_CONFIG_GOLDEN
;
1082 adev
->gfx
.config
.max_shader_engines
= 1;
1083 adev
->gfx
.config
.max_tile_pipes
= 2;
1084 adev
->gfx
.config
.max_sh_per_se
= 1;
1085 adev
->gfx
.config
.max_backends_per_se
= 1;
1087 switch (adev
->pdev
->revision
) {
1094 adev
->gfx
.config
.max_cu_per_sh
= 3;
1100 adev
->gfx
.config
.max_cu_per_sh
= 2;
1104 adev
->gfx
.config
.max_texture_channel_caches
= 2;
1105 adev
->gfx
.config
.max_gprs
= 256;
1106 adev
->gfx
.config
.max_gs_threads
= 16;
1107 adev
->gfx
.config
.max_hw_contexts
= 8;
1109 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
1110 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x100;
1111 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
1112 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x130;
1113 gb_addr_config
= CARRIZO_GB_ADDR_CONFIG_GOLDEN
;
1116 adev
->gfx
.config
.max_shader_engines
= 2;
1117 adev
->gfx
.config
.max_tile_pipes
= 4;
1118 adev
->gfx
.config
.max_cu_per_sh
= 2;
1119 adev
->gfx
.config
.max_sh_per_se
= 1;
1120 adev
->gfx
.config
.max_backends_per_se
= 2;
1121 adev
->gfx
.config
.max_texture_channel_caches
= 4;
1122 adev
->gfx
.config
.max_gprs
= 256;
1123 adev
->gfx
.config
.max_gs_threads
= 32;
1124 adev
->gfx
.config
.max_hw_contexts
= 8;
1126 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
1127 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x100;
1128 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
1129 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x130;
1130 gb_addr_config
= TONGA_GB_ADDR_CONFIG_GOLDEN
;
1134 mc_shared_chmap
= RREG32(mmMC_SHARED_CHMAP
);
1135 adev
->gfx
.config
.mc_arb_ramcfg
= RREG32(mmMC_ARB_RAMCFG
);
1136 mc_arb_ramcfg
= adev
->gfx
.config
.mc_arb_ramcfg
;
1138 adev
->gfx
.config
.num_tile_pipes
= adev
->gfx
.config
.max_tile_pipes
;
1139 adev
->gfx
.config
.mem_max_burst_length_bytes
= 256;
1140 if (adev
->flags
& AMD_IS_APU
) {
1141 /* Get memory bank mapping mode. */
1142 tmp
= RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING
);
1143 dimm00_addr_map
= REG_GET_FIELD(tmp
, MC_FUS_DRAM0_BANK_ADDR_MAPPING
, DIMM0ADDRMAP
);
1144 dimm01_addr_map
= REG_GET_FIELD(tmp
, MC_FUS_DRAM0_BANK_ADDR_MAPPING
, DIMM1ADDRMAP
);
1146 tmp
= RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING
);
1147 dimm10_addr_map
= REG_GET_FIELD(tmp
, MC_FUS_DRAM1_BANK_ADDR_MAPPING
, DIMM0ADDRMAP
);
1148 dimm11_addr_map
= REG_GET_FIELD(tmp
, MC_FUS_DRAM1_BANK_ADDR_MAPPING
, DIMM1ADDRMAP
);
1150 /* Validate settings in case only one DIMM installed. */
1151 if ((dimm00_addr_map
== 0) || (dimm00_addr_map
== 3) || (dimm00_addr_map
== 4) || (dimm00_addr_map
> 12))
1152 dimm00_addr_map
= 0;
1153 if ((dimm01_addr_map
== 0) || (dimm01_addr_map
== 3) || (dimm01_addr_map
== 4) || (dimm01_addr_map
> 12))
1154 dimm01_addr_map
= 0;
1155 if ((dimm10_addr_map
== 0) || (dimm10_addr_map
== 3) || (dimm10_addr_map
== 4) || (dimm10_addr_map
> 12))
1156 dimm10_addr_map
= 0;
1157 if ((dimm11_addr_map
== 0) || (dimm11_addr_map
== 3) || (dimm11_addr_map
== 4) || (dimm11_addr_map
> 12))
1158 dimm11_addr_map
= 0;
1160 /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
1161 /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
1162 if ((dimm00_addr_map
== 11) || (dimm01_addr_map
== 11) || (dimm10_addr_map
== 11) || (dimm11_addr_map
== 11))
1163 adev
->gfx
.config
.mem_row_size_in_kb
= 2;
1165 adev
->gfx
.config
.mem_row_size_in_kb
= 1;
1167 tmp
= REG_GET_FIELD(mc_arb_ramcfg
, MC_ARB_RAMCFG
, NOOFCOLS
);
1168 adev
->gfx
.config
.mem_row_size_in_kb
= (4 * (1 << (8 + tmp
))) / 1024;
1169 if (adev
->gfx
.config
.mem_row_size_in_kb
> 4)
1170 adev
->gfx
.config
.mem_row_size_in_kb
= 4;
1173 adev
->gfx
.config
.shader_engine_tile_size
= 32;
1174 adev
->gfx
.config
.num_gpus
= 1;
1175 adev
->gfx
.config
.multi_gpu_tile_size
= 64;
1177 /* fix up row size */
1178 switch (adev
->gfx
.config
.mem_row_size_in_kb
) {
1181 gb_addr_config
= REG_SET_FIELD(gb_addr_config
, GB_ADDR_CONFIG
, ROW_SIZE
, 0);
1184 gb_addr_config
= REG_SET_FIELD(gb_addr_config
, GB_ADDR_CONFIG
, ROW_SIZE
, 1);
1187 gb_addr_config
= REG_SET_FIELD(gb_addr_config
, GB_ADDR_CONFIG
, ROW_SIZE
, 2);
1190 adev
->gfx
.config
.gb_addr_config
= gb_addr_config
;
1193 static int gfx_v8_0_sw_init(void *handle
)
1196 struct amdgpu_ring
*ring
;
1197 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1200 r
= amdgpu_irq_add_id(adev
, 181, &adev
->gfx
.eop_irq
);
1204 /* Privileged reg */
1205 r
= amdgpu_irq_add_id(adev
, 184, &adev
->gfx
.priv_reg_irq
);
1209 /* Privileged inst */
1210 r
= amdgpu_irq_add_id(adev
, 185, &adev
->gfx
.priv_inst_irq
);
1214 adev
->gfx
.gfx_current_status
= AMDGPU_GFX_NORMAL_MODE
;
1216 gfx_v8_0_scratch_init(adev
);
1218 r
= gfx_v8_0_init_microcode(adev
);
1220 DRM_ERROR("Failed to load gfx firmware!\n");
1224 r
= gfx_v8_0_mec_init(adev
);
1226 DRM_ERROR("Failed to init MEC BOs!\n");
1230 /* set up the gfx ring */
1231 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++) {
1232 ring
= &adev
->gfx
.gfx_ring
[i
];
1233 ring
->ring_obj
= NULL
;
1234 sprintf(ring
->name
, "gfx");
1235 /* no gfx doorbells on iceland */
1236 if (adev
->asic_type
!= CHIP_TOPAZ
) {
1237 ring
->use_doorbell
= true;
1238 ring
->doorbell_index
= AMDGPU_DOORBELL_GFX_RING0
;
1241 r
= amdgpu_ring_init(adev
, ring
, 1024 * 1024,
1242 PACKET3(PACKET3_NOP
, 0x3FFF), 0xf,
1243 &adev
->gfx
.eop_irq
, AMDGPU_CP_IRQ_GFX_EOP
,
1244 AMDGPU_RING_TYPE_GFX
);
1249 /* set up the compute queues */
1250 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
1253 /* max 32 queues per MEC */
1254 if ((i
>= 32) || (i
>= AMDGPU_MAX_COMPUTE_RINGS
)) {
1255 DRM_ERROR("Too many (%d) compute rings!\n", i
);
1258 ring
= &adev
->gfx
.compute_ring
[i
];
1259 ring
->ring_obj
= NULL
;
1260 ring
->use_doorbell
= true;
1261 ring
->doorbell_index
= AMDGPU_DOORBELL_MEC_RING0
+ i
;
1262 ring
->me
= 1; /* first MEC */
1264 ring
->queue
= i
% 8;
1265 sprintf(ring
->name
, "comp %d.%d.%d", ring
->me
, ring
->pipe
, ring
->queue
);
1266 irq_type
= AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ ring
->pipe
;
1267 /* type-2 packets are deprecated on MEC, use type-3 instead */
1268 r
= amdgpu_ring_init(adev
, ring
, 1024 * 1024,
1269 PACKET3(PACKET3_NOP
, 0x3FFF), 0xf,
1270 &adev
->gfx
.eop_irq
, irq_type
,
1271 AMDGPU_RING_TYPE_COMPUTE
);
1276 /* reserve GDS, GWS and OA resource for gfx */
1277 r
= amdgpu_bo_create(adev
, adev
->gds
.mem
.gfx_partition_size
,
1279 AMDGPU_GEM_DOMAIN_GDS
, 0, NULL
,
1280 NULL
, &adev
->gds
.gds_gfx_bo
);
1284 r
= amdgpu_bo_create(adev
, adev
->gds
.gws
.gfx_partition_size
,
1286 AMDGPU_GEM_DOMAIN_GWS
, 0, NULL
,
1287 NULL
, &adev
->gds
.gws_gfx_bo
);
1291 r
= amdgpu_bo_create(adev
, adev
->gds
.oa
.gfx_partition_size
,
1293 AMDGPU_GEM_DOMAIN_OA
, 0, NULL
,
1294 NULL
, &adev
->gds
.oa_gfx_bo
);
1298 adev
->gfx
.ce_ram_size
= 0x8000;
1300 gfx_v8_0_gpu_early_init(adev
);
1305 static int gfx_v8_0_sw_fini(void *handle
)
1308 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1310 amdgpu_bo_unref(&adev
->gds
.oa_gfx_bo
);
1311 amdgpu_bo_unref(&adev
->gds
.gws_gfx_bo
);
1312 amdgpu_bo_unref(&adev
->gds
.gds_gfx_bo
);
1314 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
1315 amdgpu_ring_fini(&adev
->gfx
.gfx_ring
[i
]);
1316 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
1317 amdgpu_ring_fini(&adev
->gfx
.compute_ring
[i
]);
1319 gfx_v8_0_mec_fini(adev
);
1324 static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device
*adev
)
1326 const u32 num_tile_mode_states
= 32;
1327 const u32 num_secondary_tile_mode_states
= 16;
1328 u32 reg_offset
, gb_tile_moden
, split_equal_to_row_size
;
1330 switch (adev
->gfx
.config
.mem_row_size_in_kb
) {
1332 split_equal_to_row_size
= ADDR_SURF_TILE_SPLIT_1KB
;
1336 split_equal_to_row_size
= ADDR_SURF_TILE_SPLIT_2KB
;
1339 split_equal_to_row_size
= ADDR_SURF_TILE_SPLIT_4KB
;
1343 switch (adev
->asic_type
) {
1345 for (reg_offset
= 0; reg_offset
< num_tile_mode_states
; reg_offset
++) {
1346 switch (reg_offset
) {
1348 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1349 PIPE_CONFIG(ADDR_SURF_P2
) |
1350 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
1351 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1354 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1355 PIPE_CONFIG(ADDR_SURF_P2
) |
1356 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
1357 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1360 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1361 PIPE_CONFIG(ADDR_SURF_P2
) |
1362 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1363 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1366 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1367 PIPE_CONFIG(ADDR_SURF_P2
) |
1368 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1369 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1372 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1373 PIPE_CONFIG(ADDR_SURF_P2
) |
1374 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
1375 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1378 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1379 PIPE_CONFIG(ADDR_SURF_P2
) |
1380 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
1381 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1384 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1385 PIPE_CONFIG(ADDR_SURF_P2
) |
1386 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
1387 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1390 gb_tile_moden
= (ARRAY_MODE(ARRAY_LINEAR_ALIGNED
) |
1391 PIPE_CONFIG(ADDR_SURF_P2
));
1394 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1395 PIPE_CONFIG(ADDR_SURF_P2
) |
1396 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1397 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1400 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1401 PIPE_CONFIG(ADDR_SURF_P2
) |
1402 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1403 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1406 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1407 PIPE_CONFIG(ADDR_SURF_P2
) |
1408 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1409 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1412 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1413 PIPE_CONFIG(ADDR_SURF_P2
) |
1414 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1415 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1418 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1419 PIPE_CONFIG(ADDR_SURF_P2
) |
1420 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1421 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1424 gb_tile_moden
= (ARRAY_MODE(ARRAY_3D_TILED_THIN1
) |
1425 PIPE_CONFIG(ADDR_SURF_P2
) |
1426 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1427 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1430 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1431 PIPE_CONFIG(ADDR_SURF_P2
) |
1432 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1433 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1436 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
1437 PIPE_CONFIG(ADDR_SURF_P2
) |
1438 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1439 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1442 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
1443 PIPE_CONFIG(ADDR_SURF_P2
) |
1444 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1445 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1448 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
1449 PIPE_CONFIG(ADDR_SURF_P2
) |
1450 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1451 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1454 gb_tile_moden
= (ARRAY_MODE(ARRAY_3D_TILED_THICK
) |
1455 PIPE_CONFIG(ADDR_SURF_P2
) |
1456 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1457 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1460 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THICK
) |
1461 PIPE_CONFIG(ADDR_SURF_P2
) |
1462 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1463 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1466 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
1467 PIPE_CONFIG(ADDR_SURF_P2
) |
1468 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1469 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1472 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_XTHICK
) |
1473 PIPE_CONFIG(ADDR_SURF_P2
) |
1474 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1475 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1478 gb_tile_moden
= (ARRAY_MODE(ARRAY_3D_TILED_XTHICK
) |
1479 PIPE_CONFIG(ADDR_SURF_P2
) |
1480 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1481 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1484 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1485 PIPE_CONFIG(ADDR_SURF_P2
) |
1486 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1487 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1490 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1491 PIPE_CONFIG(ADDR_SURF_P2
) |
1492 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1493 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1496 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1497 PIPE_CONFIG(ADDR_SURF_P2
) |
1498 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1499 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1511 adev
->gfx
.config
.tile_mode_array
[reg_offset
] = gb_tile_moden
;
1512 WREG32(mmGB_TILE_MODE0
+ reg_offset
, gb_tile_moden
);
1514 for (reg_offset
= 0; reg_offset
< num_secondary_tile_mode_states
; reg_offset
++) {
1515 switch (reg_offset
) {
1517 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4
) |
1518 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1519 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1520 NUM_BANKS(ADDR_SURF_8_BANK
));
1523 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4
) |
1524 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1525 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1526 NUM_BANKS(ADDR_SURF_8_BANK
));
1529 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
1530 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1531 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1532 NUM_BANKS(ADDR_SURF_8_BANK
));
1535 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1536 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1537 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1538 NUM_BANKS(ADDR_SURF_8_BANK
));
1541 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1542 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1543 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1544 NUM_BANKS(ADDR_SURF_8_BANK
));
1547 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1548 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1549 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1550 NUM_BANKS(ADDR_SURF_8_BANK
));
1553 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1554 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1555 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1556 NUM_BANKS(ADDR_SURF_8_BANK
));
1559 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4
) |
1560 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8
) |
1561 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1562 NUM_BANKS(ADDR_SURF_16_BANK
));
1565 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4
) |
1566 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1567 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1568 NUM_BANKS(ADDR_SURF_16_BANK
));
1571 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
1572 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1573 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1574 NUM_BANKS(ADDR_SURF_16_BANK
));
1577 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
1578 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1579 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1580 NUM_BANKS(ADDR_SURF_16_BANK
));
1583 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1584 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1585 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1586 NUM_BANKS(ADDR_SURF_16_BANK
));
1589 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1590 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1591 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1592 NUM_BANKS(ADDR_SURF_16_BANK
));
1595 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1596 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1597 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1598 NUM_BANKS(ADDR_SURF_8_BANK
));
1607 adev
->gfx
.config
.macrotile_mode_array
[reg_offset
] = gb_tile_moden
;
1608 WREG32(mmGB_MACROTILE_MODE0
+ reg_offset
, gb_tile_moden
);
1611 for (reg_offset
= 0; reg_offset
< num_tile_mode_states
; reg_offset
++) {
1612 switch (reg_offset
) {
1614 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1615 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1616 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
1617 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1620 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1621 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1622 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
1623 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1626 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1627 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1628 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1629 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1632 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1633 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1634 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1635 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1638 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1639 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1640 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
1641 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1644 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1645 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1646 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
1647 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1650 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1651 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1652 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
1653 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1656 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1657 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1658 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
1659 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1662 gb_tile_moden
= (ARRAY_MODE(ARRAY_LINEAR_ALIGNED
) |
1663 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
));
1666 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1667 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1668 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1669 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1672 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1673 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1674 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1675 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1678 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1679 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1680 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1681 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1684 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1685 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1686 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1687 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1690 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1691 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1692 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1693 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1696 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1697 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1698 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1699 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1702 gb_tile_moden
= (ARRAY_MODE(ARRAY_3D_TILED_THIN1
) |
1703 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1704 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1705 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1708 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1709 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1710 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1711 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1714 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1715 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1716 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1717 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1720 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
1721 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1722 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1723 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1726 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
1727 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1728 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1729 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1732 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
1733 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1734 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1735 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1738 gb_tile_moden
= (ARRAY_MODE(ARRAY_3D_TILED_THICK
) |
1739 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1740 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1741 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1744 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THICK
) |
1745 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1746 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1747 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1750 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THICK
) |
1751 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1752 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1753 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1756 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
1757 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1758 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1759 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1762 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_XTHICK
) |
1763 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1764 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1765 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1768 gb_tile_moden
= (ARRAY_MODE(ARRAY_3D_TILED_XTHICK
) |
1769 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1770 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1771 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1774 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1775 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1776 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1777 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1780 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1781 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1782 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1783 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1786 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1787 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1788 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1789 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1792 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1793 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1794 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1795 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1801 adev
->gfx
.config
.tile_mode_array
[reg_offset
] = gb_tile_moden
;
1802 WREG32(mmGB_TILE_MODE0
+ reg_offset
, gb_tile_moden
);
1804 for (reg_offset
= 0; reg_offset
< num_secondary_tile_mode_states
; reg_offset
++) {
1805 switch (reg_offset
) {
1807 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1808 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1809 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1810 NUM_BANKS(ADDR_SURF_8_BANK
));
1813 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1814 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1815 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1816 NUM_BANKS(ADDR_SURF_8_BANK
));
1819 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1820 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1821 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1822 NUM_BANKS(ADDR_SURF_8_BANK
));
1825 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1826 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1827 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1828 NUM_BANKS(ADDR_SURF_8_BANK
));
1831 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1832 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1833 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1834 NUM_BANKS(ADDR_SURF_8_BANK
));
1837 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1838 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1839 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1840 NUM_BANKS(ADDR_SURF_8_BANK
));
1843 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1844 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1845 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1846 NUM_BANKS(ADDR_SURF_8_BANK
));
1849 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1850 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8
) |
1851 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1852 NUM_BANKS(ADDR_SURF_8_BANK
));
1855 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1856 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1857 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1858 NUM_BANKS(ADDR_SURF_8_BANK
));
1861 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1862 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1863 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1864 NUM_BANKS(ADDR_SURF_8_BANK
));
1867 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1868 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1869 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1870 NUM_BANKS(ADDR_SURF_8_BANK
));
1873 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1874 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1875 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1876 NUM_BANKS(ADDR_SURF_8_BANK
));
1879 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1880 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1881 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1882 NUM_BANKS(ADDR_SURF_8_BANK
));
1885 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1886 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1887 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1888 NUM_BANKS(ADDR_SURF_4_BANK
));
1897 adev
->gfx
.config
.macrotile_mode_array
[reg_offset
] = gb_tile_moden
;
1898 WREG32(mmGB_MACROTILE_MODE0
+ reg_offset
, gb_tile_moden
);
1902 for (reg_offset
= 0; reg_offset
< num_tile_mode_states
; reg_offset
++) {
1903 switch (reg_offset
) {
1905 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1906 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
1907 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
1908 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1911 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1912 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
1913 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
1914 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1917 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1918 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
1919 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1920 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1923 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1924 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
1925 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1926 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1929 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1930 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
1931 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
1932 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1935 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1936 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
1937 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
1938 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1941 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1942 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
1943 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
1944 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1947 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1948 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1949 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
1950 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1953 gb_tile_moden
= (ARRAY_MODE(ARRAY_LINEAR_ALIGNED
) |
1954 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
));
1957 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1958 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
1959 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1960 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1963 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1964 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
1965 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1966 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1969 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1970 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
1971 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1972 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1975 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1976 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1977 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1978 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1981 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1982 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
1983 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1984 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1987 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1988 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
1989 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1990 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1993 gb_tile_moden
= (ARRAY_MODE(ARRAY_3D_TILED_THIN1
) |
1994 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
1995 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1996 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1999 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
2000 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
2001 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
2002 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
2005 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
2006 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
2007 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
2008 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
2011 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
2012 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
2013 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
2014 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2017 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
2018 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
2019 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
2020 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2023 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
2024 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
2025 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
2026 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2029 gb_tile_moden
= (ARRAY_MODE(ARRAY_3D_TILED_THICK
) |
2030 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
2031 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
2032 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2035 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THICK
) |
2036 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
2037 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
2038 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2041 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THICK
) |
2042 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
2043 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
2044 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2047 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
2048 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
2049 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
2050 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2053 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_XTHICK
) |
2054 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
2055 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
2056 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2059 gb_tile_moden
= (ARRAY_MODE(ARRAY_3D_TILED_XTHICK
) |
2060 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
2061 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
2062 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2065 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
2066 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
2067 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
2068 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
2071 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
2072 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
2073 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
2074 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
2077 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
2078 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16
) |
2079 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
2080 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
2083 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
2084 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
2085 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
2086 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
2092 adev
->gfx
.config
.tile_mode_array
[reg_offset
] = gb_tile_moden
;
2093 WREG32(mmGB_TILE_MODE0
+ reg_offset
, gb_tile_moden
);
2095 for (reg_offset
= 0; reg_offset
< num_secondary_tile_mode_states
; reg_offset
++) {
2096 switch (reg_offset
) {
2098 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2099 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
2100 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2101 NUM_BANKS(ADDR_SURF_16_BANK
));
2104 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2105 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
2106 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2107 NUM_BANKS(ADDR_SURF_16_BANK
));
2110 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2111 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
2112 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2113 NUM_BANKS(ADDR_SURF_16_BANK
));
2116 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2117 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
2118 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2119 NUM_BANKS(ADDR_SURF_16_BANK
));
2122 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2123 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
2124 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
2125 NUM_BANKS(ADDR_SURF_16_BANK
));
2128 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2129 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2130 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
2131 NUM_BANKS(ADDR_SURF_16_BANK
));
2134 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2135 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2136 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
2137 NUM_BANKS(ADDR_SURF_16_BANK
));
2140 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2141 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8
) |
2142 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2143 NUM_BANKS(ADDR_SURF_16_BANK
));
2146 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2147 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
2148 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2149 NUM_BANKS(ADDR_SURF_16_BANK
));
2152 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2153 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
2154 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
2155 NUM_BANKS(ADDR_SURF_16_BANK
));
2158 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2159 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2160 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
2161 NUM_BANKS(ADDR_SURF_16_BANK
));
2164 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2165 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2166 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
2167 NUM_BANKS(ADDR_SURF_8_BANK
));
2170 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2171 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2172 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
2173 NUM_BANKS(ADDR_SURF_4_BANK
));
2176 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2177 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2178 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
2179 NUM_BANKS(ADDR_SURF_4_BANK
));
2188 adev
->gfx
.config
.macrotile_mode_array
[reg_offset
] = gb_tile_moden
;
2189 WREG32(mmGB_MACROTILE_MODE0
+ reg_offset
, gb_tile_moden
);
2193 for (reg_offset
= 0; reg_offset
< num_tile_mode_states
; reg_offset
++) {
2194 switch (reg_offset
) {
2196 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
2197 PIPE_CONFIG(ADDR_SURF_P2
) |
2198 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
2199 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
2202 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
2203 PIPE_CONFIG(ADDR_SURF_P2
) |
2204 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
2205 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
2208 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
2209 PIPE_CONFIG(ADDR_SURF_P2
) |
2210 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
2211 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
2214 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
2215 PIPE_CONFIG(ADDR_SURF_P2
) |
2216 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
2217 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
2220 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
2221 PIPE_CONFIG(ADDR_SURF_P2
) |
2222 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
2223 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
2226 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
2227 PIPE_CONFIG(ADDR_SURF_P2
) |
2228 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
2229 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
2232 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
2233 PIPE_CONFIG(ADDR_SURF_P2
) |
2234 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
2235 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
2238 gb_tile_moden
= (ARRAY_MODE(ARRAY_LINEAR_ALIGNED
) |
2239 PIPE_CONFIG(ADDR_SURF_P2
));
2242 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
2243 PIPE_CONFIG(ADDR_SURF_P2
) |
2244 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
2245 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
2248 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
2249 PIPE_CONFIG(ADDR_SURF_P2
) |
2250 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
2251 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
2254 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
2255 PIPE_CONFIG(ADDR_SURF_P2
) |
2256 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
2257 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
2260 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
2261 PIPE_CONFIG(ADDR_SURF_P2
) |
2262 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
2263 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
2266 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
2267 PIPE_CONFIG(ADDR_SURF_P2
) |
2268 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
2269 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
2272 gb_tile_moden
= (ARRAY_MODE(ARRAY_3D_TILED_THIN1
) |
2273 PIPE_CONFIG(ADDR_SURF_P2
) |
2274 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
2275 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
2278 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
2279 PIPE_CONFIG(ADDR_SURF_P2
) |
2280 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
2281 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
2284 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
2285 PIPE_CONFIG(ADDR_SURF_P2
) |
2286 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
2287 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2290 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
2291 PIPE_CONFIG(ADDR_SURF_P2
) |
2292 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
2293 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2296 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
2297 PIPE_CONFIG(ADDR_SURF_P2
) |
2298 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
2299 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2302 gb_tile_moden
= (ARRAY_MODE(ARRAY_3D_TILED_THICK
) |
2303 PIPE_CONFIG(ADDR_SURF_P2
) |
2304 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
2305 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2308 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THICK
) |
2309 PIPE_CONFIG(ADDR_SURF_P2
) |
2310 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
2311 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2314 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
2315 PIPE_CONFIG(ADDR_SURF_P2
) |
2316 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
2317 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2320 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_XTHICK
) |
2321 PIPE_CONFIG(ADDR_SURF_P2
) |
2322 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
2323 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2326 gb_tile_moden
= (ARRAY_MODE(ARRAY_3D_TILED_XTHICK
) |
2327 PIPE_CONFIG(ADDR_SURF_P2
) |
2328 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
2329 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2332 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
2333 PIPE_CONFIG(ADDR_SURF_P2
) |
2334 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
2335 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
2338 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
2339 PIPE_CONFIG(ADDR_SURF_P2
) |
2340 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
2341 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
2344 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
2345 PIPE_CONFIG(ADDR_SURF_P2
) |
2346 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
2347 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
2359 adev
->gfx
.config
.tile_mode_array
[reg_offset
] = gb_tile_moden
;
2360 WREG32(mmGB_TILE_MODE0
+ reg_offset
, gb_tile_moden
);
2362 for (reg_offset
= 0; reg_offset
< num_secondary_tile_mode_states
; reg_offset
++) {
2363 switch (reg_offset
) {
2365 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2366 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
2367 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2368 NUM_BANKS(ADDR_SURF_8_BANK
));
2371 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2372 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
2373 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2374 NUM_BANKS(ADDR_SURF_8_BANK
));
2377 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2378 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2379 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
2380 NUM_BANKS(ADDR_SURF_8_BANK
));
2383 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2384 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2385 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
2386 NUM_BANKS(ADDR_SURF_8_BANK
));
2389 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2390 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2391 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
2392 NUM_BANKS(ADDR_SURF_8_BANK
));
2395 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2396 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2397 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
2398 NUM_BANKS(ADDR_SURF_8_BANK
));
2401 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2402 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2403 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
2404 NUM_BANKS(ADDR_SURF_8_BANK
));
2407 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4
) |
2408 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8
) |
2409 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2410 NUM_BANKS(ADDR_SURF_16_BANK
));
2413 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4
) |
2414 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
2415 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2416 NUM_BANKS(ADDR_SURF_16_BANK
));
2419 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
2420 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
2421 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2422 NUM_BANKS(ADDR_SURF_16_BANK
));
2425 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
2426 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
2427 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2428 NUM_BANKS(ADDR_SURF_16_BANK
));
2431 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2432 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
2433 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2434 NUM_BANKS(ADDR_SURF_16_BANK
));
2437 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2438 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2439 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2440 NUM_BANKS(ADDR_SURF_16_BANK
));
2443 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2444 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2445 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
2446 NUM_BANKS(ADDR_SURF_8_BANK
));
2455 adev
->gfx
.config
.macrotile_mode_array
[reg_offset
] = gb_tile_moden
;
2456 WREG32(mmGB_MACROTILE_MODE0
+ reg_offset
, gb_tile_moden
);
2461 for (reg_offset
= 0; reg_offset
< num_tile_mode_states
; reg_offset
++) {
2462 switch (reg_offset
) {
2464 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
2465 PIPE_CONFIG(ADDR_SURF_P2
) |
2466 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
2467 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
2470 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
2471 PIPE_CONFIG(ADDR_SURF_P2
) |
2472 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
2473 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
2476 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
2477 PIPE_CONFIG(ADDR_SURF_P2
) |
2478 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
2479 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
2482 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
2483 PIPE_CONFIG(ADDR_SURF_P2
) |
2484 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
2485 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
2488 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
2489 PIPE_CONFIG(ADDR_SURF_P2
) |
2490 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
2491 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
2494 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
2495 PIPE_CONFIG(ADDR_SURF_P2
) |
2496 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
2497 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
2500 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
2501 PIPE_CONFIG(ADDR_SURF_P2
) |
2502 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
2503 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
2506 gb_tile_moden
= (ARRAY_MODE(ARRAY_LINEAR_ALIGNED
) |
2507 PIPE_CONFIG(ADDR_SURF_P2
));
2510 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
2511 PIPE_CONFIG(ADDR_SURF_P2
) |
2512 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
2513 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
2516 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
2517 PIPE_CONFIG(ADDR_SURF_P2
) |
2518 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
2519 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
2522 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
2523 PIPE_CONFIG(ADDR_SURF_P2
) |
2524 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
2525 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
2528 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
2529 PIPE_CONFIG(ADDR_SURF_P2
) |
2530 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
2531 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
2534 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
2535 PIPE_CONFIG(ADDR_SURF_P2
) |
2536 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
2537 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
2540 gb_tile_moden
= (ARRAY_MODE(ARRAY_3D_TILED_THIN1
) |
2541 PIPE_CONFIG(ADDR_SURF_P2
) |
2542 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
2543 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
2546 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
2547 PIPE_CONFIG(ADDR_SURF_P2
) |
2548 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
2549 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
2552 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
2553 PIPE_CONFIG(ADDR_SURF_P2
) |
2554 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
2555 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2558 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
2559 PIPE_CONFIG(ADDR_SURF_P2
) |
2560 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
2561 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2564 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
2565 PIPE_CONFIG(ADDR_SURF_P2
) |
2566 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
2567 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2570 gb_tile_moden
= (ARRAY_MODE(ARRAY_3D_TILED_THICK
) |
2571 PIPE_CONFIG(ADDR_SURF_P2
) |
2572 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
2573 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2576 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THICK
) |
2577 PIPE_CONFIG(ADDR_SURF_P2
) |
2578 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
2579 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2582 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
2583 PIPE_CONFIG(ADDR_SURF_P2
) |
2584 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
2585 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2588 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_XTHICK
) |
2589 PIPE_CONFIG(ADDR_SURF_P2
) |
2590 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
2591 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2594 gb_tile_moden
= (ARRAY_MODE(ARRAY_3D_TILED_XTHICK
) |
2595 PIPE_CONFIG(ADDR_SURF_P2
) |
2596 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
2597 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
2600 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
2601 PIPE_CONFIG(ADDR_SURF_P2
) |
2602 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
2603 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
2606 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
2607 PIPE_CONFIG(ADDR_SURF_P2
) |
2608 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
2609 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
2612 gb_tile_moden
= (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
2613 PIPE_CONFIG(ADDR_SURF_P2
) |
2614 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
2615 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
2627 adev
->gfx
.config
.tile_mode_array
[reg_offset
] = gb_tile_moden
;
2628 WREG32(mmGB_TILE_MODE0
+ reg_offset
, gb_tile_moden
);
2630 for (reg_offset
= 0; reg_offset
< num_secondary_tile_mode_states
; reg_offset
++) {
2631 switch (reg_offset
) {
2633 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2634 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
2635 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2636 NUM_BANKS(ADDR_SURF_8_BANK
));
2639 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2640 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
2641 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2642 NUM_BANKS(ADDR_SURF_8_BANK
));
2645 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2646 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2647 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
2648 NUM_BANKS(ADDR_SURF_8_BANK
));
2651 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2652 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2653 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
2654 NUM_BANKS(ADDR_SURF_8_BANK
));
2657 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2658 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2659 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
2660 NUM_BANKS(ADDR_SURF_8_BANK
));
2663 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2664 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2665 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
2666 NUM_BANKS(ADDR_SURF_8_BANK
));
2669 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2670 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2671 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
2672 NUM_BANKS(ADDR_SURF_8_BANK
));
2675 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4
) |
2676 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8
) |
2677 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2678 NUM_BANKS(ADDR_SURF_16_BANK
));
2681 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4
) |
2682 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
2683 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2684 NUM_BANKS(ADDR_SURF_16_BANK
));
2687 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
2688 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
2689 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2690 NUM_BANKS(ADDR_SURF_16_BANK
));
2693 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
2694 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
2695 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2696 NUM_BANKS(ADDR_SURF_16_BANK
));
2699 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2700 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
2701 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2702 NUM_BANKS(ADDR_SURF_16_BANK
));
2705 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2706 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2707 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
2708 NUM_BANKS(ADDR_SURF_16_BANK
));
2711 gb_tile_moden
= (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
2712 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
2713 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
2714 NUM_BANKS(ADDR_SURF_8_BANK
));
2723 adev
->gfx
.config
.macrotile_mode_array
[reg_offset
] = gb_tile_moden
;
2724 WREG32(mmGB_MACROTILE_MODE0
+ reg_offset
, gb_tile_moden
);
2729 static u32
gfx_v8_0_create_bitmask(u32 bit_width
)
2733 for (i
= 0; i
< bit_width
; i
++) {
2740 void gfx_v8_0_select_se_sh(struct amdgpu_device
*adev
, u32 se_num
, u32 sh_num
)
2742 u32 data
= REG_SET_FIELD(0, GRBM_GFX_INDEX
, INSTANCE_BROADCAST_WRITES
, 1);
2744 if ((se_num
== 0xffffffff) && (sh_num
== 0xffffffff)) {
2745 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
, SH_BROADCAST_WRITES
, 1);
2746 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
, SE_BROADCAST_WRITES
, 1);
2747 } else if (se_num
== 0xffffffff) {
2748 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
, SH_INDEX
, sh_num
);
2749 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
, SE_BROADCAST_WRITES
, 1);
2750 } else if (sh_num
== 0xffffffff) {
2751 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
, SH_BROADCAST_WRITES
, 1);
2752 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
, SE_INDEX
, se_num
);
2754 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
, SH_INDEX
, sh_num
);
2755 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
, SE_INDEX
, se_num
);
2757 WREG32(mmGRBM_GFX_INDEX
, data
);
2760 static u32
gfx_v8_0_get_rb_disabled(struct amdgpu_device
*adev
,
2761 u32 max_rb_num_per_se
,
2766 data
= RREG32(mmCC_RB_BACKEND_DISABLE
);
2767 data
&= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK
;
2769 data
|= RREG32(mmGC_USER_RB_BACKEND_DISABLE
);
2771 data
>>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT
;
2773 mask
= gfx_v8_0_create_bitmask(max_rb_num_per_se
/ sh_per_se
);
2778 static void gfx_v8_0_setup_rb(struct amdgpu_device
*adev
,
2779 u32 se_num
, u32 sh_per_se
,
2780 u32 max_rb_num_per_se
)
2784 u32 disabled_rbs
= 0;
2785 u32 enabled_rbs
= 0;
2787 mutex_lock(&adev
->grbm_idx_mutex
);
2788 for (i
= 0; i
< se_num
; i
++) {
2789 for (j
= 0; j
< sh_per_se
; j
++) {
2790 gfx_v8_0_select_se_sh(adev
, i
, j
);
2791 data
= gfx_v8_0_get_rb_disabled(adev
,
2792 max_rb_num_per_se
, sh_per_se
);
2793 disabled_rbs
|= data
<< ((i
* sh_per_se
+ j
) *
2794 RB_BITMAP_WIDTH_PER_SH
);
2797 gfx_v8_0_select_se_sh(adev
, 0xffffffff, 0xffffffff);
2798 mutex_unlock(&adev
->grbm_idx_mutex
);
2801 for (i
= 0; i
< max_rb_num_per_se
* se_num
; i
++) {
2802 if (!(disabled_rbs
& mask
))
2803 enabled_rbs
|= mask
;
2807 adev
->gfx
.config
.backend_enable_mask
= enabled_rbs
;
2809 mutex_lock(&adev
->grbm_idx_mutex
);
2810 for (i
= 0; i
< se_num
; i
++) {
2811 gfx_v8_0_select_se_sh(adev
, i
, 0xffffffff);
2813 for (j
= 0; j
< sh_per_se
; j
++) {
2814 switch (enabled_rbs
& 3) {
2817 data
|= (RASTER_CONFIG_RB_MAP_3
<<
2818 PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT
);
2820 data
|= (RASTER_CONFIG_RB_MAP_0
<<
2821 PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT
);
2824 data
|= (RASTER_CONFIG_RB_MAP_0
<<
2825 (i
* sh_per_se
+ j
) * 2);
2828 data
|= (RASTER_CONFIG_RB_MAP_3
<<
2829 (i
* sh_per_se
+ j
) * 2);
2833 data
|= (RASTER_CONFIG_RB_MAP_2
<<
2834 (i
* sh_per_se
+ j
) * 2);
2839 WREG32(mmPA_SC_RASTER_CONFIG
, data
);
2841 gfx_v8_0_select_se_sh(adev
, 0xffffffff, 0xffffffff);
2842 mutex_unlock(&adev
->grbm_idx_mutex
);
2846 * gfx_v8_0_init_compute_vmid - gart enable
2848 * @rdev: amdgpu_device pointer
2850 * Initialize compute vmid sh_mem registers
2853 #define DEFAULT_SH_MEM_BASES (0x6000)
2854 #define FIRST_COMPUTE_VMID (8)
2855 #define LAST_COMPUTE_VMID (16)
2856 static void gfx_v8_0_init_compute_vmid(struct amdgpu_device
*adev
)
2859 uint32_t sh_mem_config
;
2860 uint32_t sh_mem_bases
;
2863 * Configure apertures:
2864 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
2865 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
2866 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
2868 sh_mem_bases
= DEFAULT_SH_MEM_BASES
| (DEFAULT_SH_MEM_BASES
<< 16);
2870 sh_mem_config
= SH_MEM_ADDRESS_MODE_HSA64
<<
2871 SH_MEM_CONFIG__ADDRESS_MODE__SHIFT
|
2872 SH_MEM_ALIGNMENT_MODE_UNALIGNED
<<
2873 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT
|
2874 MTYPE_CC
<< SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT
|
2875 SH_MEM_CONFIG__PRIVATE_ATC_MASK
;
2877 mutex_lock(&adev
->srbm_mutex
);
2878 for (i
= FIRST_COMPUTE_VMID
; i
< LAST_COMPUTE_VMID
; i
++) {
2879 vi_srbm_select(adev
, 0, 0, 0, i
);
2880 /* CP and shaders */
2881 WREG32(mmSH_MEM_CONFIG
, sh_mem_config
);
2882 WREG32(mmSH_MEM_APE1_BASE
, 1);
2883 WREG32(mmSH_MEM_APE1_LIMIT
, 0);
2884 WREG32(mmSH_MEM_BASES
, sh_mem_bases
);
2886 vi_srbm_select(adev
, 0, 0, 0, 0);
2887 mutex_unlock(&adev
->srbm_mutex
);
2890 static void gfx_v8_0_gpu_init(struct amdgpu_device
*adev
)
2895 tmp
= RREG32(mmGRBM_CNTL
);
2896 tmp
= REG_SET_FIELD(tmp
, GRBM_CNTL
, READ_TIMEOUT
, 0xff);
2897 WREG32(mmGRBM_CNTL
, tmp
);
2899 WREG32(mmGB_ADDR_CONFIG
, adev
->gfx
.config
.gb_addr_config
);
2900 WREG32(mmHDP_ADDR_CONFIG
, adev
->gfx
.config
.gb_addr_config
);
2901 WREG32(mmDMIF_ADDR_CALC
, adev
->gfx
.config
.gb_addr_config
);
2902 WREG32(mmSDMA0_TILING_CONFIG
+ SDMA0_REGISTER_OFFSET
,
2903 adev
->gfx
.config
.gb_addr_config
& 0x70);
2904 WREG32(mmSDMA0_TILING_CONFIG
+ SDMA1_REGISTER_OFFSET
,
2905 adev
->gfx
.config
.gb_addr_config
& 0x70);
2906 WREG32(mmUVD_UDEC_ADDR_CONFIG
, adev
->gfx
.config
.gb_addr_config
);
2907 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG
, adev
->gfx
.config
.gb_addr_config
);
2908 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG
, adev
->gfx
.config
.gb_addr_config
);
2910 gfx_v8_0_tiling_mode_table_init(adev
);
2912 gfx_v8_0_setup_rb(adev
, adev
->gfx
.config
.max_shader_engines
,
2913 adev
->gfx
.config
.max_sh_per_se
,
2914 adev
->gfx
.config
.max_backends_per_se
);
2916 /* XXX SH_MEM regs */
2917 /* where to put LDS, scratch, GPUVM in FSA64 space */
2918 mutex_lock(&adev
->srbm_mutex
);
2919 for (i
= 0; i
< 16; i
++) {
2920 vi_srbm_select(adev
, 0, 0, 0, i
);
2921 /* CP and shaders */
2923 tmp
= REG_SET_FIELD(0, SH_MEM_CONFIG
, DEFAULT_MTYPE
, MTYPE_UC
);
2924 tmp
= REG_SET_FIELD(tmp
, SH_MEM_CONFIG
, APE1_MTYPE
, MTYPE_UC
);
2925 tmp
= REG_SET_FIELD(tmp
, SH_MEM_CONFIG
, ALIGNMENT_MODE
,
2926 SH_MEM_ALIGNMENT_MODE_UNALIGNED
);
2927 WREG32(mmSH_MEM_CONFIG
, tmp
);
2929 tmp
= REG_SET_FIELD(0, SH_MEM_CONFIG
, DEFAULT_MTYPE
, MTYPE_NC
);
2930 tmp
= REG_SET_FIELD(tmp
, SH_MEM_CONFIG
, APE1_MTYPE
, MTYPE_NC
);
2931 tmp
= REG_SET_FIELD(tmp
, SH_MEM_CONFIG
, ALIGNMENT_MODE
,
2932 SH_MEM_ALIGNMENT_MODE_UNALIGNED
);
2933 WREG32(mmSH_MEM_CONFIG
, tmp
);
2936 WREG32(mmSH_MEM_APE1_BASE
, 1);
2937 WREG32(mmSH_MEM_APE1_LIMIT
, 0);
2938 WREG32(mmSH_MEM_BASES
, 0);
2940 vi_srbm_select(adev
, 0, 0, 0, 0);
2941 mutex_unlock(&adev
->srbm_mutex
);
2943 gfx_v8_0_init_compute_vmid(adev
);
2945 mutex_lock(&adev
->grbm_idx_mutex
);
2947 * making sure that the following register writes will be broadcasted
2948 * to all the shaders
2950 gfx_v8_0_select_se_sh(adev
, 0xffffffff, 0xffffffff);
2952 WREG32(mmPA_SC_FIFO_SIZE
,
2953 (adev
->gfx
.config
.sc_prim_fifo_size_frontend
<<
2954 PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT
) |
2955 (adev
->gfx
.config
.sc_prim_fifo_size_backend
<<
2956 PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT
) |
2957 (adev
->gfx
.config
.sc_hiz_tile_fifo_size
<<
2958 PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT
) |
2959 (adev
->gfx
.config
.sc_earlyz_tile_fifo_size
<<
2960 PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT
));
2961 mutex_unlock(&adev
->grbm_idx_mutex
);
2965 static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device
*adev
)
2970 mutex_lock(&adev
->grbm_idx_mutex
);
2971 for (i
= 0; i
< adev
->gfx
.config
.max_shader_engines
; i
++) {
2972 for (j
= 0; j
< adev
->gfx
.config
.max_sh_per_se
; j
++) {
2973 gfx_v8_0_select_se_sh(adev
, i
, j
);
2974 for (k
= 0; k
< adev
->usec_timeout
; k
++) {
2975 if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY
) == 0)
2981 gfx_v8_0_select_se_sh(adev
, 0xffffffff, 0xffffffff);
2982 mutex_unlock(&adev
->grbm_idx_mutex
);
2984 mask
= RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK
|
2985 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK
|
2986 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK
|
2987 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK
;
2988 for (k
= 0; k
< adev
->usec_timeout
; k
++) {
2989 if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY
) & mask
) == 0)
2995 static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device
*adev
,
2998 u32 tmp
= RREG32(mmCP_INT_CNTL_RING0
);
3001 tmp
= REG_SET_FIELD(tmp
, CP_INT_CNTL_RING0
, CNTX_BUSY_INT_ENABLE
, 1);
3002 tmp
= REG_SET_FIELD(tmp
, CP_INT_CNTL_RING0
, CNTX_EMPTY_INT_ENABLE
, 1);
3003 tmp
= REG_SET_FIELD(tmp
, CP_INT_CNTL_RING0
, CMP_BUSY_INT_ENABLE
, 1);
3004 tmp
= REG_SET_FIELD(tmp
, CP_INT_CNTL_RING0
, GFX_IDLE_INT_ENABLE
, 1);
3006 tmp
= REG_SET_FIELD(tmp
, CP_INT_CNTL_RING0
, CNTX_BUSY_INT_ENABLE
, 0);
3007 tmp
= REG_SET_FIELD(tmp
, CP_INT_CNTL_RING0
, CNTX_EMPTY_INT_ENABLE
, 0);
3008 tmp
= REG_SET_FIELD(tmp
, CP_INT_CNTL_RING0
, CMP_BUSY_INT_ENABLE
, 0);
3009 tmp
= REG_SET_FIELD(tmp
, CP_INT_CNTL_RING0
, GFX_IDLE_INT_ENABLE
, 0);
3011 WREG32(mmCP_INT_CNTL_RING0
, tmp
);
3014 void gfx_v8_0_rlc_stop(struct amdgpu_device
*adev
)
3016 u32 tmp
= RREG32(mmRLC_CNTL
);
3018 tmp
= REG_SET_FIELD(tmp
, RLC_CNTL
, RLC_ENABLE_F32
, 0);
3019 WREG32(mmRLC_CNTL
, tmp
);
3021 gfx_v8_0_enable_gui_idle_interrupt(adev
, false);
3023 gfx_v8_0_wait_for_rlc_serdes(adev
);
3026 static void gfx_v8_0_rlc_reset(struct amdgpu_device
*adev
)
3028 u32 tmp
= RREG32(mmGRBM_SOFT_RESET
);
3030 tmp
= REG_SET_FIELD(tmp
, GRBM_SOFT_RESET
, SOFT_RESET_RLC
, 1);
3031 WREG32(mmGRBM_SOFT_RESET
, tmp
);
3033 tmp
= REG_SET_FIELD(tmp
, GRBM_SOFT_RESET
, SOFT_RESET_RLC
, 0);
3034 WREG32(mmGRBM_SOFT_RESET
, tmp
);
3038 static void gfx_v8_0_rlc_start(struct amdgpu_device
*adev
)
3040 u32 tmp
= RREG32(mmRLC_CNTL
);
3042 tmp
= REG_SET_FIELD(tmp
, RLC_CNTL
, RLC_ENABLE_F32
, 1);
3043 WREG32(mmRLC_CNTL
, tmp
);
3045 /* carrizo do enable cp interrupt after cp inited */
3046 if (!(adev
->flags
& AMD_IS_APU
))
3047 gfx_v8_0_enable_gui_idle_interrupt(adev
, true);
3052 static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device
*adev
)
3054 const struct rlc_firmware_header_v2_0
*hdr
;
3055 const __le32
*fw_data
;
3056 unsigned i
, fw_size
;
3058 if (!adev
->gfx
.rlc_fw
)
3061 hdr
= (const struct rlc_firmware_header_v2_0
*)adev
->gfx
.rlc_fw
->data
;
3062 amdgpu_ucode_print_rlc_hdr(&hdr
->header
);
3064 fw_data
= (const __le32
*)(adev
->gfx
.rlc_fw
->data
+
3065 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
3066 fw_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
) / 4;
3068 WREG32(mmRLC_GPM_UCODE_ADDR
, 0);
3069 for (i
= 0; i
< fw_size
; i
++)
3070 WREG32(mmRLC_GPM_UCODE_DATA
, le32_to_cpup(fw_data
++));
3071 WREG32(mmRLC_GPM_UCODE_ADDR
, adev
->gfx
.rlc_fw_version
);
3076 static int gfx_v8_0_rlc_resume(struct amdgpu_device
*adev
)
3080 gfx_v8_0_rlc_stop(adev
);
3083 WREG32(mmRLC_CGCG_CGLS_CTRL
, 0);
3086 WREG32(mmRLC_PG_CNTL
, 0);
3088 gfx_v8_0_rlc_reset(adev
);
3090 if (!adev
->firmware
.smu_load
) {
3091 /* legacy rlc firmware loading */
3092 r
= gfx_v8_0_rlc_load_microcode(adev
);
3096 r
= adev
->smu
.smumgr_funcs
->check_fw_load_finish(adev
,
3097 AMDGPU_UCODE_ID_RLC_G
);
3102 gfx_v8_0_rlc_start(adev
);
3107 static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device
*adev
, bool enable
)
3110 u32 tmp
= RREG32(mmCP_ME_CNTL
);
3113 tmp
= REG_SET_FIELD(tmp
, CP_ME_CNTL
, ME_HALT
, 0);
3114 tmp
= REG_SET_FIELD(tmp
, CP_ME_CNTL
, PFP_HALT
, 0);
3115 tmp
= REG_SET_FIELD(tmp
, CP_ME_CNTL
, CE_HALT
, 0);
3117 tmp
= REG_SET_FIELD(tmp
, CP_ME_CNTL
, ME_HALT
, 1);
3118 tmp
= REG_SET_FIELD(tmp
, CP_ME_CNTL
, PFP_HALT
, 1);
3119 tmp
= REG_SET_FIELD(tmp
, CP_ME_CNTL
, CE_HALT
, 1);
3120 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
3121 adev
->gfx
.gfx_ring
[i
].ready
= false;
3123 WREG32(mmCP_ME_CNTL
, tmp
);
3127 static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device
*adev
)
3129 const struct gfx_firmware_header_v1_0
*pfp_hdr
;
3130 const struct gfx_firmware_header_v1_0
*ce_hdr
;
3131 const struct gfx_firmware_header_v1_0
*me_hdr
;
3132 const __le32
*fw_data
;
3133 unsigned i
, fw_size
;
3135 if (!adev
->gfx
.me_fw
|| !adev
->gfx
.pfp_fw
|| !adev
->gfx
.ce_fw
)
3138 pfp_hdr
= (const struct gfx_firmware_header_v1_0
*)
3139 adev
->gfx
.pfp_fw
->data
;
3140 ce_hdr
= (const struct gfx_firmware_header_v1_0
*)
3141 adev
->gfx
.ce_fw
->data
;
3142 me_hdr
= (const struct gfx_firmware_header_v1_0
*)
3143 adev
->gfx
.me_fw
->data
;
3145 amdgpu_ucode_print_gfx_hdr(&pfp_hdr
->header
);
3146 amdgpu_ucode_print_gfx_hdr(&ce_hdr
->header
);
3147 amdgpu_ucode_print_gfx_hdr(&me_hdr
->header
);
3149 gfx_v8_0_cp_gfx_enable(adev
, false);
3152 fw_data
= (const __le32
*)
3153 (adev
->gfx
.pfp_fw
->data
+
3154 le32_to_cpu(pfp_hdr
->header
.ucode_array_offset_bytes
));
3155 fw_size
= le32_to_cpu(pfp_hdr
->header
.ucode_size_bytes
) / 4;
3156 WREG32(mmCP_PFP_UCODE_ADDR
, 0);
3157 for (i
= 0; i
< fw_size
; i
++)
3158 WREG32(mmCP_PFP_UCODE_DATA
, le32_to_cpup(fw_data
++));
3159 WREG32(mmCP_PFP_UCODE_ADDR
, adev
->gfx
.pfp_fw_version
);
3162 fw_data
= (const __le32
*)
3163 (adev
->gfx
.ce_fw
->data
+
3164 le32_to_cpu(ce_hdr
->header
.ucode_array_offset_bytes
));
3165 fw_size
= le32_to_cpu(ce_hdr
->header
.ucode_size_bytes
) / 4;
3166 WREG32(mmCP_CE_UCODE_ADDR
, 0);
3167 for (i
= 0; i
< fw_size
; i
++)
3168 WREG32(mmCP_CE_UCODE_DATA
, le32_to_cpup(fw_data
++));
3169 WREG32(mmCP_CE_UCODE_ADDR
, adev
->gfx
.ce_fw_version
);
3172 fw_data
= (const __le32
*)
3173 (adev
->gfx
.me_fw
->data
+
3174 le32_to_cpu(me_hdr
->header
.ucode_array_offset_bytes
));
3175 fw_size
= le32_to_cpu(me_hdr
->header
.ucode_size_bytes
) / 4;
3176 WREG32(mmCP_ME_RAM_WADDR
, 0);
3177 for (i
= 0; i
< fw_size
; i
++)
3178 WREG32(mmCP_ME_RAM_DATA
, le32_to_cpup(fw_data
++));
3179 WREG32(mmCP_ME_RAM_WADDR
, adev
->gfx
.me_fw_version
);
3184 static u32
gfx_v8_0_get_csb_size(struct amdgpu_device
*adev
)
3187 const struct cs_section_def
*sect
= NULL
;
3188 const struct cs_extent_def
*ext
= NULL
;
3190 /* begin clear state */
3192 /* context control state */
3195 for (sect
= vi_cs_data
; sect
->section
!= NULL
; ++sect
) {
3196 for (ext
= sect
->section
; ext
->extent
!= NULL
; ++ext
) {
3197 if (sect
->id
== SECT_CONTEXT
)
3198 count
+= 2 + ext
->reg_count
;
3203 /* pa_sc_raster_config/pa_sc_raster_config1 */
3205 /* end clear state */
3213 static int gfx_v8_0_cp_gfx_start(struct amdgpu_device
*adev
)
3215 struct amdgpu_ring
*ring
= &adev
->gfx
.gfx_ring
[0];
3216 const struct cs_section_def
*sect
= NULL
;
3217 const struct cs_extent_def
*ext
= NULL
;
3221 WREG32(mmCP_MAX_CONTEXT
, adev
->gfx
.config
.max_hw_contexts
- 1);
3222 WREG32(mmCP_ENDIAN_SWAP
, 0);
3223 WREG32(mmCP_DEVICE_ID
, 1);
3225 gfx_v8_0_cp_gfx_enable(adev
, true);
3227 r
= amdgpu_ring_lock(ring
, gfx_v8_0_get_csb_size(adev
) + 4);
3229 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r
);
3233 /* clear state buffer */
3234 amdgpu_ring_write(ring
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
3235 amdgpu_ring_write(ring
, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE
);
3237 amdgpu_ring_write(ring
, PACKET3(PACKET3_CONTEXT_CONTROL
, 1));
3238 amdgpu_ring_write(ring
, 0x80000000);
3239 amdgpu_ring_write(ring
, 0x80000000);
3241 for (sect
= vi_cs_data
; sect
->section
!= NULL
; ++sect
) {
3242 for (ext
= sect
->section
; ext
->extent
!= NULL
; ++ext
) {
3243 if (sect
->id
== SECT_CONTEXT
) {
3244 amdgpu_ring_write(ring
,
3245 PACKET3(PACKET3_SET_CONTEXT_REG
,
3247 amdgpu_ring_write(ring
,
3248 ext
->reg_index
- PACKET3_SET_CONTEXT_REG_START
);
3249 for (i
= 0; i
< ext
->reg_count
; i
++)
3250 amdgpu_ring_write(ring
, ext
->extent
[i
]);
3255 amdgpu_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
3256 amdgpu_ring_write(ring
, mmPA_SC_RASTER_CONFIG
- PACKET3_SET_CONTEXT_REG_START
);
3257 switch (adev
->asic_type
) {
3259 amdgpu_ring_write(ring
, 0x16000012);
3260 amdgpu_ring_write(ring
, 0x0000002A);
3263 amdgpu_ring_write(ring
, 0x3a00161a);
3264 amdgpu_ring_write(ring
, 0x0000002e);
3268 amdgpu_ring_write(ring
, 0x00000002);
3269 amdgpu_ring_write(ring
, 0x00000000);
3272 amdgpu_ring_write(ring
, 0x00000000);
3273 amdgpu_ring_write(ring
, 0x00000000);
3279 amdgpu_ring_write(ring
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
3280 amdgpu_ring_write(ring
, PACKET3_PREAMBLE_END_CLEAR_STATE
);
3282 amdgpu_ring_write(ring
, PACKET3(PACKET3_CLEAR_STATE
, 0));
3283 amdgpu_ring_write(ring
, 0);
3285 /* init the CE partitions */
3286 amdgpu_ring_write(ring
, PACKET3(PACKET3_SET_BASE
, 2));
3287 amdgpu_ring_write(ring
, PACKET3_BASE_INDEX(CE_PARTITION_BASE
));
3288 amdgpu_ring_write(ring
, 0x8000);
3289 amdgpu_ring_write(ring
, 0x8000);
3291 amdgpu_ring_unlock_commit(ring
);
3296 static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device
*adev
)
3298 struct amdgpu_ring
*ring
;
3301 u64 rb_addr
, rptr_addr
;
3304 /* Set the write pointer delay */
3305 WREG32(mmCP_RB_WPTR_DELAY
, 0);
3307 /* set the RB to use vmid 0 */
3308 WREG32(mmCP_RB_VMID
, 0);
3310 /* Set ring buffer size */
3311 ring
= &adev
->gfx
.gfx_ring
[0];
3312 rb_bufsz
= order_base_2(ring
->ring_size
/ 8);
3313 tmp
= REG_SET_FIELD(0, CP_RB0_CNTL
, RB_BUFSZ
, rb_bufsz
);
3314 tmp
= REG_SET_FIELD(tmp
, CP_RB0_CNTL
, RB_BLKSZ
, rb_bufsz
- 2);
3315 tmp
= REG_SET_FIELD(tmp
, CP_RB0_CNTL
, MTYPE
, 3);
3316 tmp
= REG_SET_FIELD(tmp
, CP_RB0_CNTL
, MIN_IB_AVAILSZ
, 1);
3318 tmp
= REG_SET_FIELD(tmp
, CP_RB0_CNTL
, BUF_SWAP
, 1);
3320 WREG32(mmCP_RB0_CNTL
, tmp
);
3322 /* Initialize the ring buffer's read and write pointers */
3323 WREG32(mmCP_RB0_CNTL
, tmp
| CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK
);
3325 WREG32(mmCP_RB0_WPTR
, ring
->wptr
);
3327 /* set the wb address wether it's enabled or not */
3328 rptr_addr
= adev
->wb
.gpu_addr
+ (ring
->rptr_offs
* 4);
3329 WREG32(mmCP_RB0_RPTR_ADDR
, lower_32_bits(rptr_addr
));
3330 WREG32(mmCP_RB0_RPTR_ADDR_HI
, upper_32_bits(rptr_addr
) & 0xFF);
3333 WREG32(mmCP_RB0_CNTL
, tmp
);
3335 rb_addr
= ring
->gpu_addr
>> 8;
3336 WREG32(mmCP_RB0_BASE
, rb_addr
);
3337 WREG32(mmCP_RB0_BASE_HI
, upper_32_bits(rb_addr
));
3339 /* no gfx doorbells on iceland */
3340 if (adev
->asic_type
!= CHIP_TOPAZ
) {
3341 tmp
= RREG32(mmCP_RB_DOORBELL_CONTROL
);
3342 if (ring
->use_doorbell
) {
3343 tmp
= REG_SET_FIELD(tmp
, CP_RB_DOORBELL_CONTROL
,
3344 DOORBELL_OFFSET
, ring
->doorbell_index
);
3345 tmp
= REG_SET_FIELD(tmp
, CP_RB_DOORBELL_CONTROL
,
3348 tmp
= REG_SET_FIELD(tmp
, CP_RB_DOORBELL_CONTROL
,
3351 WREG32(mmCP_RB_DOORBELL_CONTROL
, tmp
);
3353 if (adev
->asic_type
== CHIP_TONGA
) {
3354 tmp
= REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER
,
3355 DOORBELL_RANGE_LOWER
,
3356 AMDGPU_DOORBELL_GFX_RING0
);
3357 WREG32(mmCP_RB_DOORBELL_RANGE_LOWER
, tmp
);
3359 WREG32(mmCP_RB_DOORBELL_RANGE_UPPER
,
3360 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK
);
3365 /* start the ring */
3366 gfx_v8_0_cp_gfx_start(adev
);
3368 r
= amdgpu_ring_test_ring(ring
);
3370 ring
->ready
= false;
3377 static void gfx_v8_0_cp_compute_enable(struct amdgpu_device
*adev
, bool enable
)
3382 WREG32(mmCP_MEC_CNTL
, 0);
3384 WREG32(mmCP_MEC_CNTL
, (CP_MEC_CNTL__MEC_ME1_HALT_MASK
| CP_MEC_CNTL__MEC_ME2_HALT_MASK
));
3385 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
3386 adev
->gfx
.compute_ring
[i
].ready
= false;
3391 static int gfx_v8_0_cp_compute_start(struct amdgpu_device
*adev
)
3393 gfx_v8_0_cp_compute_enable(adev
, true);
3398 static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device
*adev
)
3400 const struct gfx_firmware_header_v1_0
*mec_hdr
;
3401 const __le32
*fw_data
;
3402 unsigned i
, fw_size
;
3404 if (!adev
->gfx
.mec_fw
)
3407 gfx_v8_0_cp_compute_enable(adev
, false);
3409 mec_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.mec_fw
->data
;
3410 amdgpu_ucode_print_gfx_hdr(&mec_hdr
->header
);
3412 fw_data
= (const __le32
*)
3413 (adev
->gfx
.mec_fw
->data
+
3414 le32_to_cpu(mec_hdr
->header
.ucode_array_offset_bytes
));
3415 fw_size
= le32_to_cpu(mec_hdr
->header
.ucode_size_bytes
) / 4;
3418 WREG32(mmCP_MEC_ME1_UCODE_ADDR
, 0);
3419 for (i
= 0; i
< fw_size
; i
++)
3420 WREG32(mmCP_MEC_ME1_UCODE_DATA
, le32_to_cpup(fw_data
+i
));
3421 WREG32(mmCP_MEC_ME1_UCODE_ADDR
, adev
->gfx
.mec_fw_version
);
3423 /* Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
3424 if (adev
->gfx
.mec2_fw
) {
3425 const struct gfx_firmware_header_v1_0
*mec2_hdr
;
3427 mec2_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.mec2_fw
->data
;
3428 amdgpu_ucode_print_gfx_hdr(&mec2_hdr
->header
);
3430 fw_data
= (const __le32
*)
3431 (adev
->gfx
.mec2_fw
->data
+
3432 le32_to_cpu(mec2_hdr
->header
.ucode_array_offset_bytes
));
3433 fw_size
= le32_to_cpu(mec2_hdr
->header
.ucode_size_bytes
) / 4;
3435 WREG32(mmCP_MEC_ME2_UCODE_ADDR
, 0);
3436 for (i
= 0; i
< fw_size
; i
++)
3437 WREG32(mmCP_MEC_ME2_UCODE_DATA
, le32_to_cpup(fw_data
+i
));
3438 WREG32(mmCP_MEC_ME2_UCODE_ADDR
, adev
->gfx
.mec2_fw_version
);
3445 uint32_t header
; /* ordinal0 */
3446 uint32_t compute_dispatch_initiator
; /* ordinal1 */
3447 uint32_t compute_dim_x
; /* ordinal2 */
3448 uint32_t compute_dim_y
; /* ordinal3 */
3449 uint32_t compute_dim_z
; /* ordinal4 */
3450 uint32_t compute_start_x
; /* ordinal5 */
3451 uint32_t compute_start_y
; /* ordinal6 */
3452 uint32_t compute_start_z
; /* ordinal7 */
3453 uint32_t compute_num_thread_x
; /* ordinal8 */
3454 uint32_t compute_num_thread_y
; /* ordinal9 */
3455 uint32_t compute_num_thread_z
; /* ordinal10 */
3456 uint32_t compute_pipelinestat_enable
; /* ordinal11 */
3457 uint32_t compute_perfcount_enable
; /* ordinal12 */
3458 uint32_t compute_pgm_lo
; /* ordinal13 */
3459 uint32_t compute_pgm_hi
; /* ordinal14 */
3460 uint32_t compute_tba_lo
; /* ordinal15 */
3461 uint32_t compute_tba_hi
; /* ordinal16 */
3462 uint32_t compute_tma_lo
; /* ordinal17 */
3463 uint32_t compute_tma_hi
; /* ordinal18 */
3464 uint32_t compute_pgm_rsrc1
; /* ordinal19 */
3465 uint32_t compute_pgm_rsrc2
; /* ordinal20 */
3466 uint32_t compute_vmid
; /* ordinal21 */
3467 uint32_t compute_resource_limits
; /* ordinal22 */
3468 uint32_t compute_static_thread_mgmt_se0
; /* ordinal23 */
3469 uint32_t compute_static_thread_mgmt_se1
; /* ordinal24 */
3470 uint32_t compute_tmpring_size
; /* ordinal25 */
3471 uint32_t compute_static_thread_mgmt_se2
; /* ordinal26 */
3472 uint32_t compute_static_thread_mgmt_se3
; /* ordinal27 */
3473 uint32_t compute_restart_x
; /* ordinal28 */
3474 uint32_t compute_restart_y
; /* ordinal29 */
3475 uint32_t compute_restart_z
; /* ordinal30 */
3476 uint32_t compute_thread_trace_enable
; /* ordinal31 */
3477 uint32_t compute_misc_reserved
; /* ordinal32 */
3478 uint32_t compute_dispatch_id
; /* ordinal33 */
3479 uint32_t compute_threadgroup_id
; /* ordinal34 */
3480 uint32_t compute_relaunch
; /* ordinal35 */
3481 uint32_t compute_wave_restore_addr_lo
; /* ordinal36 */
3482 uint32_t compute_wave_restore_addr_hi
; /* ordinal37 */
3483 uint32_t compute_wave_restore_control
; /* ordinal38 */
3484 uint32_t reserved9
; /* ordinal39 */
3485 uint32_t reserved10
; /* ordinal40 */
3486 uint32_t reserved11
; /* ordinal41 */
3487 uint32_t reserved12
; /* ordinal42 */
3488 uint32_t reserved13
; /* ordinal43 */
3489 uint32_t reserved14
; /* ordinal44 */
3490 uint32_t reserved15
; /* ordinal45 */
3491 uint32_t reserved16
; /* ordinal46 */
3492 uint32_t reserved17
; /* ordinal47 */
3493 uint32_t reserved18
; /* ordinal48 */
3494 uint32_t reserved19
; /* ordinal49 */
3495 uint32_t reserved20
; /* ordinal50 */
3496 uint32_t reserved21
; /* ordinal51 */
3497 uint32_t reserved22
; /* ordinal52 */
3498 uint32_t reserved23
; /* ordinal53 */
3499 uint32_t reserved24
; /* ordinal54 */
3500 uint32_t reserved25
; /* ordinal55 */
3501 uint32_t reserved26
; /* ordinal56 */
3502 uint32_t reserved27
; /* ordinal57 */
3503 uint32_t reserved28
; /* ordinal58 */
3504 uint32_t reserved29
; /* ordinal59 */
3505 uint32_t reserved30
; /* ordinal60 */
3506 uint32_t reserved31
; /* ordinal61 */
3507 uint32_t reserved32
; /* ordinal62 */
3508 uint32_t reserved33
; /* ordinal63 */
3509 uint32_t reserved34
; /* ordinal64 */
3510 uint32_t compute_user_data_0
; /* ordinal65 */
3511 uint32_t compute_user_data_1
; /* ordinal66 */
3512 uint32_t compute_user_data_2
; /* ordinal67 */
3513 uint32_t compute_user_data_3
; /* ordinal68 */
3514 uint32_t compute_user_data_4
; /* ordinal69 */
3515 uint32_t compute_user_data_5
; /* ordinal70 */
3516 uint32_t compute_user_data_6
; /* ordinal71 */
3517 uint32_t compute_user_data_7
; /* ordinal72 */
3518 uint32_t compute_user_data_8
; /* ordinal73 */
3519 uint32_t compute_user_data_9
; /* ordinal74 */
3520 uint32_t compute_user_data_10
; /* ordinal75 */
3521 uint32_t compute_user_data_11
; /* ordinal76 */
3522 uint32_t compute_user_data_12
; /* ordinal77 */
3523 uint32_t compute_user_data_13
; /* ordinal78 */
3524 uint32_t compute_user_data_14
; /* ordinal79 */
3525 uint32_t compute_user_data_15
; /* ordinal80 */
3526 uint32_t cp_compute_csinvoc_count_lo
; /* ordinal81 */
3527 uint32_t cp_compute_csinvoc_count_hi
; /* ordinal82 */
3528 uint32_t reserved35
; /* ordinal83 */
3529 uint32_t reserved36
; /* ordinal84 */
3530 uint32_t reserved37
; /* ordinal85 */
3531 uint32_t cp_mqd_query_time_lo
; /* ordinal86 */
3532 uint32_t cp_mqd_query_time_hi
; /* ordinal87 */
3533 uint32_t cp_mqd_connect_start_time_lo
; /* ordinal88 */
3534 uint32_t cp_mqd_connect_start_time_hi
; /* ordinal89 */
3535 uint32_t cp_mqd_connect_end_time_lo
; /* ordinal90 */
3536 uint32_t cp_mqd_connect_end_time_hi
; /* ordinal91 */
3537 uint32_t cp_mqd_connect_end_wf_count
; /* ordinal92 */
3538 uint32_t cp_mqd_connect_end_pq_rptr
; /* ordinal93 */
3539 uint32_t cp_mqd_connect_end_pq_wptr
; /* ordinal94 */
3540 uint32_t cp_mqd_connect_end_ib_rptr
; /* ordinal95 */
3541 uint32_t reserved38
; /* ordinal96 */
3542 uint32_t reserved39
; /* ordinal97 */
3543 uint32_t cp_mqd_save_start_time_lo
; /* ordinal98 */
3544 uint32_t cp_mqd_save_start_time_hi
; /* ordinal99 */
3545 uint32_t cp_mqd_save_end_time_lo
; /* ordinal100 */
3546 uint32_t cp_mqd_save_end_time_hi
; /* ordinal101 */
3547 uint32_t cp_mqd_restore_start_time_lo
; /* ordinal102 */
3548 uint32_t cp_mqd_restore_start_time_hi
; /* ordinal103 */
3549 uint32_t cp_mqd_restore_end_time_lo
; /* ordinal104 */
3550 uint32_t cp_mqd_restore_end_time_hi
; /* ordinal105 */
3551 uint32_t reserved40
; /* ordinal106 */
3552 uint32_t reserved41
; /* ordinal107 */
3553 uint32_t gds_cs_ctxsw_cnt0
; /* ordinal108 */
3554 uint32_t gds_cs_ctxsw_cnt1
; /* ordinal109 */
3555 uint32_t gds_cs_ctxsw_cnt2
; /* ordinal110 */
3556 uint32_t gds_cs_ctxsw_cnt3
; /* ordinal111 */
3557 uint32_t reserved42
; /* ordinal112 */
3558 uint32_t reserved43
; /* ordinal113 */
3559 uint32_t cp_pq_exe_status_lo
; /* ordinal114 */
3560 uint32_t cp_pq_exe_status_hi
; /* ordinal115 */
3561 uint32_t cp_packet_id_lo
; /* ordinal116 */
3562 uint32_t cp_packet_id_hi
; /* ordinal117 */
3563 uint32_t cp_packet_exe_status_lo
; /* ordinal118 */
3564 uint32_t cp_packet_exe_status_hi
; /* ordinal119 */
3565 uint32_t gds_save_base_addr_lo
; /* ordinal120 */
3566 uint32_t gds_save_base_addr_hi
; /* ordinal121 */
3567 uint32_t gds_save_mask_lo
; /* ordinal122 */
3568 uint32_t gds_save_mask_hi
; /* ordinal123 */
3569 uint32_t ctx_save_base_addr_lo
; /* ordinal124 */
3570 uint32_t ctx_save_base_addr_hi
; /* ordinal125 */
3571 uint32_t reserved44
; /* ordinal126 */
3572 uint32_t reserved45
; /* ordinal127 */
3573 uint32_t cp_mqd_base_addr_lo
; /* ordinal128 */
3574 uint32_t cp_mqd_base_addr_hi
; /* ordinal129 */
3575 uint32_t cp_hqd_active
; /* ordinal130 */
3576 uint32_t cp_hqd_vmid
; /* ordinal131 */
3577 uint32_t cp_hqd_persistent_state
; /* ordinal132 */
3578 uint32_t cp_hqd_pipe_priority
; /* ordinal133 */
3579 uint32_t cp_hqd_queue_priority
; /* ordinal134 */
3580 uint32_t cp_hqd_quantum
; /* ordinal135 */
3581 uint32_t cp_hqd_pq_base_lo
; /* ordinal136 */
3582 uint32_t cp_hqd_pq_base_hi
; /* ordinal137 */
3583 uint32_t cp_hqd_pq_rptr
; /* ordinal138 */
3584 uint32_t cp_hqd_pq_rptr_report_addr_lo
; /* ordinal139 */
3585 uint32_t cp_hqd_pq_rptr_report_addr_hi
; /* ordinal140 */
3586 uint32_t cp_hqd_pq_wptr_poll_addr
; /* ordinal141 */
3587 uint32_t cp_hqd_pq_wptr_poll_addr_hi
; /* ordinal142 */
3588 uint32_t cp_hqd_pq_doorbell_control
; /* ordinal143 */
3589 uint32_t cp_hqd_pq_wptr
; /* ordinal144 */
3590 uint32_t cp_hqd_pq_control
; /* ordinal145 */
3591 uint32_t cp_hqd_ib_base_addr_lo
; /* ordinal146 */
3592 uint32_t cp_hqd_ib_base_addr_hi
; /* ordinal147 */
3593 uint32_t cp_hqd_ib_rptr
; /* ordinal148 */
3594 uint32_t cp_hqd_ib_control
; /* ordinal149 */
3595 uint32_t cp_hqd_iq_timer
; /* ordinal150 */
3596 uint32_t cp_hqd_iq_rptr
; /* ordinal151 */
3597 uint32_t cp_hqd_dequeue_request
; /* ordinal152 */
3598 uint32_t cp_hqd_dma_offload
; /* ordinal153 */
3599 uint32_t cp_hqd_sema_cmd
; /* ordinal154 */
3600 uint32_t cp_hqd_msg_type
; /* ordinal155 */
3601 uint32_t cp_hqd_atomic0_preop_lo
; /* ordinal156 */
3602 uint32_t cp_hqd_atomic0_preop_hi
; /* ordinal157 */
3603 uint32_t cp_hqd_atomic1_preop_lo
; /* ordinal158 */
3604 uint32_t cp_hqd_atomic1_preop_hi
; /* ordinal159 */
3605 uint32_t cp_hqd_hq_status0
; /* ordinal160 */
3606 uint32_t cp_hqd_hq_control0
; /* ordinal161 */
3607 uint32_t cp_mqd_control
; /* ordinal162 */
3608 uint32_t cp_hqd_hq_status1
; /* ordinal163 */
3609 uint32_t cp_hqd_hq_control1
; /* ordinal164 */
3610 uint32_t cp_hqd_eop_base_addr_lo
; /* ordinal165 */
3611 uint32_t cp_hqd_eop_base_addr_hi
; /* ordinal166 */
3612 uint32_t cp_hqd_eop_control
; /* ordinal167 */
3613 uint32_t cp_hqd_eop_rptr
; /* ordinal168 */
3614 uint32_t cp_hqd_eop_wptr
; /* ordinal169 */
3615 uint32_t cp_hqd_eop_done_events
; /* ordinal170 */
3616 uint32_t cp_hqd_ctx_save_base_addr_lo
; /* ordinal171 */
3617 uint32_t cp_hqd_ctx_save_base_addr_hi
; /* ordinal172 */
3618 uint32_t cp_hqd_ctx_save_control
; /* ordinal173 */
3619 uint32_t cp_hqd_cntl_stack_offset
; /* ordinal174 */
3620 uint32_t cp_hqd_cntl_stack_size
; /* ordinal175 */
3621 uint32_t cp_hqd_wg_state_offset
; /* ordinal176 */
3622 uint32_t cp_hqd_ctx_save_size
; /* ordinal177 */
3623 uint32_t cp_hqd_gds_resource_state
; /* ordinal178 */
3624 uint32_t cp_hqd_error
; /* ordinal179 */
3625 uint32_t cp_hqd_eop_wptr_mem
; /* ordinal180 */
3626 uint32_t cp_hqd_eop_dones
; /* ordinal181 */
3627 uint32_t reserved46
; /* ordinal182 */
3628 uint32_t reserved47
; /* ordinal183 */
3629 uint32_t reserved48
; /* ordinal184 */
3630 uint32_t reserved49
; /* ordinal185 */
3631 uint32_t reserved50
; /* ordinal186 */
3632 uint32_t reserved51
; /* ordinal187 */
3633 uint32_t reserved52
; /* ordinal188 */
3634 uint32_t reserved53
; /* ordinal189 */
3635 uint32_t reserved54
; /* ordinal190 */
3636 uint32_t reserved55
; /* ordinal191 */
3637 uint32_t iqtimer_pkt_header
; /* ordinal192 */
3638 uint32_t iqtimer_pkt_dw0
; /* ordinal193 */
3639 uint32_t iqtimer_pkt_dw1
; /* ordinal194 */
3640 uint32_t iqtimer_pkt_dw2
; /* ordinal195 */
3641 uint32_t iqtimer_pkt_dw3
; /* ordinal196 */
3642 uint32_t iqtimer_pkt_dw4
; /* ordinal197 */
3643 uint32_t iqtimer_pkt_dw5
; /* ordinal198 */
3644 uint32_t iqtimer_pkt_dw6
; /* ordinal199 */
3645 uint32_t iqtimer_pkt_dw7
; /* ordinal200 */
3646 uint32_t iqtimer_pkt_dw8
; /* ordinal201 */
3647 uint32_t iqtimer_pkt_dw9
; /* ordinal202 */
3648 uint32_t iqtimer_pkt_dw10
; /* ordinal203 */
3649 uint32_t iqtimer_pkt_dw11
; /* ordinal204 */
3650 uint32_t iqtimer_pkt_dw12
; /* ordinal205 */
3651 uint32_t iqtimer_pkt_dw13
; /* ordinal206 */
3652 uint32_t iqtimer_pkt_dw14
; /* ordinal207 */
3653 uint32_t iqtimer_pkt_dw15
; /* ordinal208 */
3654 uint32_t iqtimer_pkt_dw16
; /* ordinal209 */
3655 uint32_t iqtimer_pkt_dw17
; /* ordinal210 */
3656 uint32_t iqtimer_pkt_dw18
; /* ordinal211 */
3657 uint32_t iqtimer_pkt_dw19
; /* ordinal212 */
3658 uint32_t iqtimer_pkt_dw20
; /* ordinal213 */
3659 uint32_t iqtimer_pkt_dw21
; /* ordinal214 */
3660 uint32_t iqtimer_pkt_dw22
; /* ordinal215 */
3661 uint32_t iqtimer_pkt_dw23
; /* ordinal216 */
3662 uint32_t iqtimer_pkt_dw24
; /* ordinal217 */
3663 uint32_t iqtimer_pkt_dw25
; /* ordinal218 */
3664 uint32_t iqtimer_pkt_dw26
; /* ordinal219 */
3665 uint32_t iqtimer_pkt_dw27
; /* ordinal220 */
3666 uint32_t iqtimer_pkt_dw28
; /* ordinal221 */
3667 uint32_t iqtimer_pkt_dw29
; /* ordinal222 */
3668 uint32_t iqtimer_pkt_dw30
; /* ordinal223 */
3669 uint32_t iqtimer_pkt_dw31
; /* ordinal224 */
3670 uint32_t reserved56
; /* ordinal225 */
3671 uint32_t reserved57
; /* ordinal226 */
3672 uint32_t reserved58
; /* ordinal227 */
3673 uint32_t set_resources_header
; /* ordinal228 */
3674 uint32_t set_resources_dw1
; /* ordinal229 */
3675 uint32_t set_resources_dw2
; /* ordinal230 */
3676 uint32_t set_resources_dw3
; /* ordinal231 */
3677 uint32_t set_resources_dw4
; /* ordinal232 */
3678 uint32_t set_resources_dw5
; /* ordinal233 */
3679 uint32_t set_resources_dw6
; /* ordinal234 */
3680 uint32_t set_resources_dw7
; /* ordinal235 */
3681 uint32_t reserved59
; /* ordinal236 */
3682 uint32_t reserved60
; /* ordinal237 */
3683 uint32_t reserved61
; /* ordinal238 */
3684 uint32_t reserved62
; /* ordinal239 */
3685 uint32_t reserved63
; /* ordinal240 */
3686 uint32_t reserved64
; /* ordinal241 */
3687 uint32_t reserved65
; /* ordinal242 */
3688 uint32_t reserved66
; /* ordinal243 */
3689 uint32_t reserved67
; /* ordinal244 */
3690 uint32_t reserved68
; /* ordinal245 */
3691 uint32_t reserved69
; /* ordinal246 */
3692 uint32_t reserved70
; /* ordinal247 */
3693 uint32_t reserved71
; /* ordinal248 */
3694 uint32_t reserved72
; /* ordinal249 */
3695 uint32_t reserved73
; /* ordinal250 */
3696 uint32_t reserved74
; /* ordinal251 */
3697 uint32_t reserved75
; /* ordinal252 */
3698 uint32_t reserved76
; /* ordinal253 */
3699 uint32_t reserved77
; /* ordinal254 */
3700 uint32_t reserved78
; /* ordinal255 */
3702 uint32_t reserved_t
[256]; /* Reserve 256 dword buffer used by ucode */
3705 static void gfx_v8_0_cp_compute_fini(struct amdgpu_device
*adev
)
3709 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
3710 struct amdgpu_ring
*ring
= &adev
->gfx
.compute_ring
[i
];
3712 if (ring
->mqd_obj
) {
3713 r
= amdgpu_bo_reserve(ring
->mqd_obj
, false);
3714 if (unlikely(r
!= 0))
3715 dev_warn(adev
->dev
, "(%d) reserve MQD bo failed\n", r
);
3717 amdgpu_bo_unpin(ring
->mqd_obj
);
3718 amdgpu_bo_unreserve(ring
->mqd_obj
);
3720 amdgpu_bo_unref(&ring
->mqd_obj
);
3721 ring
->mqd_obj
= NULL
;
3726 static int gfx_v8_0_cp_compute_resume(struct amdgpu_device
*adev
)
3730 bool use_doorbell
= true;
3738 /* init the pipes */
3739 mutex_lock(&adev
->srbm_mutex
);
3740 for (i
= 0; i
< (adev
->gfx
.mec
.num_pipe
* adev
->gfx
.mec
.num_mec
); i
++) {
3741 int me
= (i
< 4) ? 1 : 2;
3742 int pipe
= (i
< 4) ? i
: (i
- 4);
3744 eop_gpu_addr
= adev
->gfx
.mec
.hpd_eop_gpu_addr
+ (i
* MEC_HPD_SIZE
);
3747 vi_srbm_select(adev
, me
, pipe
, 0, 0);
3749 /* write the EOP addr */
3750 WREG32(mmCP_HQD_EOP_BASE_ADDR
, eop_gpu_addr
);
3751 WREG32(mmCP_HQD_EOP_BASE_ADDR_HI
, upper_32_bits(eop_gpu_addr
));
3753 /* set the VMID assigned */
3754 WREG32(mmCP_HQD_VMID
, 0);
3756 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3757 tmp
= RREG32(mmCP_HQD_EOP_CONTROL
);
3758 tmp
= REG_SET_FIELD(tmp
, CP_HQD_EOP_CONTROL
, EOP_SIZE
,
3759 (order_base_2(MEC_HPD_SIZE
/ 4) - 1));
3760 WREG32(mmCP_HQD_EOP_CONTROL
, tmp
);
3762 vi_srbm_select(adev
, 0, 0, 0, 0);
3763 mutex_unlock(&adev
->srbm_mutex
);
3765 /* init the queues. Just two for now. */
3766 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
3767 struct amdgpu_ring
*ring
= &adev
->gfx
.compute_ring
[i
];
3769 if (ring
->mqd_obj
== NULL
) {
3770 r
= amdgpu_bo_create(adev
,
3771 sizeof(struct vi_mqd
),
3773 AMDGPU_GEM_DOMAIN_GTT
, 0, NULL
,
3774 NULL
, &ring
->mqd_obj
);
3776 dev_warn(adev
->dev
, "(%d) create MQD bo failed\n", r
);
3781 r
= amdgpu_bo_reserve(ring
->mqd_obj
, false);
3782 if (unlikely(r
!= 0)) {
3783 gfx_v8_0_cp_compute_fini(adev
);
3786 r
= amdgpu_bo_pin(ring
->mqd_obj
, AMDGPU_GEM_DOMAIN_GTT
,
3789 dev_warn(adev
->dev
, "(%d) pin MQD bo failed\n", r
);
3790 gfx_v8_0_cp_compute_fini(adev
);
3793 r
= amdgpu_bo_kmap(ring
->mqd_obj
, (void **)&buf
);
3795 dev_warn(adev
->dev
, "(%d) map MQD bo failed\n", r
);
3796 gfx_v8_0_cp_compute_fini(adev
);
3800 /* init the mqd struct */
3801 memset(buf
, 0, sizeof(struct vi_mqd
));
3803 mqd
= (struct vi_mqd
*)buf
;
3804 mqd
->header
= 0xC0310800;
3805 mqd
->compute_pipelinestat_enable
= 0x00000001;
3806 mqd
->compute_static_thread_mgmt_se0
= 0xffffffff;
3807 mqd
->compute_static_thread_mgmt_se1
= 0xffffffff;
3808 mqd
->compute_static_thread_mgmt_se2
= 0xffffffff;
3809 mqd
->compute_static_thread_mgmt_se3
= 0xffffffff;
3810 mqd
->compute_misc_reserved
= 0x00000003;
3812 mutex_lock(&adev
->srbm_mutex
);
3813 vi_srbm_select(adev
, ring
->me
,
3817 /* disable wptr polling */
3818 tmp
= RREG32(mmCP_PQ_WPTR_POLL_CNTL
);
3819 tmp
= REG_SET_FIELD(tmp
, CP_PQ_WPTR_POLL_CNTL
, EN
, 0);
3820 WREG32(mmCP_PQ_WPTR_POLL_CNTL
, tmp
);
3822 mqd
->cp_hqd_eop_base_addr_lo
=
3823 RREG32(mmCP_HQD_EOP_BASE_ADDR
);
3824 mqd
->cp_hqd_eop_base_addr_hi
=
3825 RREG32(mmCP_HQD_EOP_BASE_ADDR_HI
);
3827 /* enable doorbell? */
3828 tmp
= RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
);
3830 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_DOORBELL_CONTROL
, DOORBELL_EN
, 1);
3832 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_DOORBELL_CONTROL
, DOORBELL_EN
, 0);
3834 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
, tmp
);
3835 mqd
->cp_hqd_pq_doorbell_control
= tmp
;
3837 /* disable the queue if it's active */
3838 mqd
->cp_hqd_dequeue_request
= 0;
3839 mqd
->cp_hqd_pq_rptr
= 0;
3840 mqd
->cp_hqd_pq_wptr
= 0;
3841 if (RREG32(mmCP_HQD_ACTIVE
) & 1) {
3842 WREG32(mmCP_HQD_DEQUEUE_REQUEST
, 1);
3843 for (j
= 0; j
< adev
->usec_timeout
; j
++) {
3844 if (!(RREG32(mmCP_HQD_ACTIVE
) & 1))
3848 WREG32(mmCP_HQD_DEQUEUE_REQUEST
, mqd
->cp_hqd_dequeue_request
);
3849 WREG32(mmCP_HQD_PQ_RPTR
, mqd
->cp_hqd_pq_rptr
);
3850 WREG32(mmCP_HQD_PQ_WPTR
, mqd
->cp_hqd_pq_wptr
);
3853 /* set the pointer to the MQD */
3854 mqd
->cp_mqd_base_addr_lo
= mqd_gpu_addr
& 0xfffffffc;
3855 mqd
->cp_mqd_base_addr_hi
= upper_32_bits(mqd_gpu_addr
);
3856 WREG32(mmCP_MQD_BASE_ADDR
, mqd
->cp_mqd_base_addr_lo
);
3857 WREG32(mmCP_MQD_BASE_ADDR_HI
, mqd
->cp_mqd_base_addr_hi
);
3859 /* set MQD vmid to 0 */
3860 tmp
= RREG32(mmCP_MQD_CONTROL
);
3861 tmp
= REG_SET_FIELD(tmp
, CP_MQD_CONTROL
, VMID
, 0);
3862 WREG32(mmCP_MQD_CONTROL
, tmp
);
3863 mqd
->cp_mqd_control
= tmp
;
3865 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3866 hqd_gpu_addr
= ring
->gpu_addr
>> 8;
3867 mqd
->cp_hqd_pq_base_lo
= hqd_gpu_addr
;
3868 mqd
->cp_hqd_pq_base_hi
= upper_32_bits(hqd_gpu_addr
);
3869 WREG32(mmCP_HQD_PQ_BASE
, mqd
->cp_hqd_pq_base_lo
);
3870 WREG32(mmCP_HQD_PQ_BASE_HI
, mqd
->cp_hqd_pq_base_hi
);
3872 /* set up the HQD, this is similar to CP_RB0_CNTL */
3873 tmp
= RREG32(mmCP_HQD_PQ_CONTROL
);
3874 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_CONTROL
, QUEUE_SIZE
,
3875 (order_base_2(ring
->ring_size
/ 4) - 1));
3876 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_CONTROL
, RPTR_BLOCK_SIZE
,
3877 ((order_base_2(AMDGPU_GPU_PAGE_SIZE
/ 4) - 1) << 8));
3879 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_CONTROL
, ENDIAN_SWAP
, 1);
3881 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_CONTROL
, UNORD_DISPATCH
, 0);
3882 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_CONTROL
, ROQ_PQ_IB_FLIP
, 0);
3883 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_CONTROL
, PRIV_STATE
, 1);
3884 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_CONTROL
, KMD_QUEUE
, 1);
3885 WREG32(mmCP_HQD_PQ_CONTROL
, tmp
);
3886 mqd
->cp_hqd_pq_control
= tmp
;
3888 /* set the wb address wether it's enabled or not */
3889 wb_gpu_addr
= adev
->wb
.gpu_addr
+ (ring
->rptr_offs
* 4);
3890 mqd
->cp_hqd_pq_rptr_report_addr_lo
= wb_gpu_addr
& 0xfffffffc;
3891 mqd
->cp_hqd_pq_rptr_report_addr_hi
=
3892 upper_32_bits(wb_gpu_addr
) & 0xffff;
3893 WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR
,
3894 mqd
->cp_hqd_pq_rptr_report_addr_lo
);
3895 WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI
,
3896 mqd
->cp_hqd_pq_rptr_report_addr_hi
);
3898 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3899 wb_gpu_addr
= adev
->wb
.gpu_addr
+ (ring
->wptr_offs
* 4);
3900 mqd
->cp_hqd_pq_wptr_poll_addr
= wb_gpu_addr
& 0xfffffffc;
3901 mqd
->cp_hqd_pq_wptr_poll_addr_hi
= upper_32_bits(wb_gpu_addr
) & 0xffff;
3902 WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR
, mqd
->cp_hqd_pq_wptr_poll_addr
);
3903 WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI
,
3904 mqd
->cp_hqd_pq_wptr_poll_addr_hi
);
3906 /* enable the doorbell if requested */
3908 if ((adev
->asic_type
== CHIP_CARRIZO
) ||
3909 (adev
->asic_type
== CHIP_FIJI
) ||
3910 (adev
->asic_type
== CHIP_STONEY
)) {
3911 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER
,
3912 AMDGPU_DOORBELL_KIQ
<< 2);
3913 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER
,
3914 AMDGPU_DOORBELL_MEC_RING7
<< 2);
3916 tmp
= RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
);
3917 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_DOORBELL_CONTROL
,
3918 DOORBELL_OFFSET
, ring
->doorbell_index
);
3919 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_DOORBELL_CONTROL
, DOORBELL_EN
, 1);
3920 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_DOORBELL_CONTROL
, DOORBELL_SOURCE
, 0);
3921 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_DOORBELL_CONTROL
, DOORBELL_HIT
, 0);
3922 mqd
->cp_hqd_pq_doorbell_control
= tmp
;
3925 mqd
->cp_hqd_pq_doorbell_control
= 0;
3927 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
,
3928 mqd
->cp_hqd_pq_doorbell_control
);
3930 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3932 mqd
->cp_hqd_pq_wptr
= ring
->wptr
;
3933 WREG32(mmCP_HQD_PQ_WPTR
, mqd
->cp_hqd_pq_wptr
);
3934 mqd
->cp_hqd_pq_rptr
= RREG32(mmCP_HQD_PQ_RPTR
);
3936 /* set the vmid for the queue */
3937 mqd
->cp_hqd_vmid
= 0;
3938 WREG32(mmCP_HQD_VMID
, mqd
->cp_hqd_vmid
);
3940 tmp
= RREG32(mmCP_HQD_PERSISTENT_STATE
);
3941 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PERSISTENT_STATE
, PRELOAD_SIZE
, 0x53);
3942 WREG32(mmCP_HQD_PERSISTENT_STATE
, tmp
);
3943 mqd
->cp_hqd_persistent_state
= tmp
;
3945 /* activate the queue */
3946 mqd
->cp_hqd_active
= 1;
3947 WREG32(mmCP_HQD_ACTIVE
, mqd
->cp_hqd_active
);
3949 vi_srbm_select(adev
, 0, 0, 0, 0);
3950 mutex_unlock(&adev
->srbm_mutex
);
3952 amdgpu_bo_kunmap(ring
->mqd_obj
);
3953 amdgpu_bo_unreserve(ring
->mqd_obj
);
3957 tmp
= RREG32(mmCP_PQ_STATUS
);
3958 tmp
= REG_SET_FIELD(tmp
, CP_PQ_STATUS
, DOORBELL_ENABLE
, 1);
3959 WREG32(mmCP_PQ_STATUS
, tmp
);
3962 r
= gfx_v8_0_cp_compute_start(adev
);
3966 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
3967 struct amdgpu_ring
*ring
= &adev
->gfx
.compute_ring
[i
];
3970 r
= amdgpu_ring_test_ring(ring
);
3972 ring
->ready
= false;
3978 static int gfx_v8_0_cp_resume(struct amdgpu_device
*adev
)
3982 if (!(adev
->flags
& AMD_IS_APU
))
3983 gfx_v8_0_enable_gui_idle_interrupt(adev
, false);
3985 if (!adev
->firmware
.smu_load
) {
3986 /* legacy firmware loading */
3987 r
= gfx_v8_0_cp_gfx_load_microcode(adev
);
3991 r
= gfx_v8_0_cp_compute_load_microcode(adev
);
3995 r
= adev
->smu
.smumgr_funcs
->check_fw_load_finish(adev
,
3996 AMDGPU_UCODE_ID_CP_CE
);
4000 r
= adev
->smu
.smumgr_funcs
->check_fw_load_finish(adev
,
4001 AMDGPU_UCODE_ID_CP_PFP
);
4005 r
= adev
->smu
.smumgr_funcs
->check_fw_load_finish(adev
,
4006 AMDGPU_UCODE_ID_CP_ME
);
4010 r
= adev
->smu
.smumgr_funcs
->check_fw_load_finish(adev
,
4011 AMDGPU_UCODE_ID_CP_MEC1
);
4016 r
= gfx_v8_0_cp_gfx_resume(adev
);
4020 r
= gfx_v8_0_cp_compute_resume(adev
);
4024 gfx_v8_0_enable_gui_idle_interrupt(adev
, true);
4029 static void gfx_v8_0_cp_enable(struct amdgpu_device
*adev
, bool enable
)
4031 gfx_v8_0_cp_gfx_enable(adev
, enable
);
4032 gfx_v8_0_cp_compute_enable(adev
, enable
);
4035 static int gfx_v8_0_hw_init(void *handle
)
4038 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4040 gfx_v8_0_init_golden_registers(adev
);
4042 gfx_v8_0_gpu_init(adev
);
4044 r
= gfx_v8_0_rlc_resume(adev
);
4048 r
= gfx_v8_0_cp_resume(adev
);
4055 static int gfx_v8_0_hw_fini(void *handle
)
4057 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4059 gfx_v8_0_cp_enable(adev
, false);
4060 gfx_v8_0_rlc_stop(adev
);
4061 gfx_v8_0_cp_compute_fini(adev
);
4066 static int gfx_v8_0_suspend(void *handle
)
4068 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4070 return gfx_v8_0_hw_fini(adev
);
4073 static int gfx_v8_0_resume(void *handle
)
4075 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4077 return gfx_v8_0_hw_init(adev
);
4080 static bool gfx_v8_0_is_idle(void *handle
)
4082 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4084 if (REG_GET_FIELD(RREG32(mmGRBM_STATUS
), GRBM_STATUS
, GUI_ACTIVE
))
4090 static int gfx_v8_0_wait_for_idle(void *handle
)
4094 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4096 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4097 /* read MC_STATUS */
4098 tmp
= RREG32(mmGRBM_STATUS
) & GRBM_STATUS__GUI_ACTIVE_MASK
;
4100 if (!REG_GET_FIELD(tmp
, GRBM_STATUS
, GUI_ACTIVE
))
4107 static void gfx_v8_0_print_status(void *handle
)
4110 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4112 dev_info(adev
->dev
, "GFX 8.x registers\n");
4113 dev_info(adev
->dev
, " GRBM_STATUS=0x%08X\n",
4114 RREG32(mmGRBM_STATUS
));
4115 dev_info(adev
->dev
, " GRBM_STATUS2=0x%08X\n",
4116 RREG32(mmGRBM_STATUS2
));
4117 dev_info(adev
->dev
, " GRBM_STATUS_SE0=0x%08X\n",
4118 RREG32(mmGRBM_STATUS_SE0
));
4119 dev_info(adev
->dev
, " GRBM_STATUS_SE1=0x%08X\n",
4120 RREG32(mmGRBM_STATUS_SE1
));
4121 dev_info(adev
->dev
, " GRBM_STATUS_SE2=0x%08X\n",
4122 RREG32(mmGRBM_STATUS_SE2
));
4123 dev_info(adev
->dev
, " GRBM_STATUS_SE3=0x%08X\n",
4124 RREG32(mmGRBM_STATUS_SE3
));
4125 dev_info(adev
->dev
, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT
));
4126 dev_info(adev
->dev
, " CP_STALLED_STAT1 = 0x%08x\n",
4127 RREG32(mmCP_STALLED_STAT1
));
4128 dev_info(adev
->dev
, " CP_STALLED_STAT2 = 0x%08x\n",
4129 RREG32(mmCP_STALLED_STAT2
));
4130 dev_info(adev
->dev
, " CP_STALLED_STAT3 = 0x%08x\n",
4131 RREG32(mmCP_STALLED_STAT3
));
4132 dev_info(adev
->dev
, " CP_CPF_BUSY_STAT = 0x%08x\n",
4133 RREG32(mmCP_CPF_BUSY_STAT
));
4134 dev_info(adev
->dev
, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
4135 RREG32(mmCP_CPF_STALLED_STAT1
));
4136 dev_info(adev
->dev
, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS
));
4137 dev_info(adev
->dev
, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT
));
4138 dev_info(adev
->dev
, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
4139 RREG32(mmCP_CPC_STALLED_STAT1
));
4140 dev_info(adev
->dev
, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS
));
4142 for (i
= 0; i
< 32; i
++) {
4143 dev_info(adev
->dev
, " GB_TILE_MODE%d=0x%08X\n",
4144 i
, RREG32(mmGB_TILE_MODE0
+ (i
* 4)));
4146 for (i
= 0; i
< 16; i
++) {
4147 dev_info(adev
->dev
, " GB_MACROTILE_MODE%d=0x%08X\n",
4148 i
, RREG32(mmGB_MACROTILE_MODE0
+ (i
* 4)));
4150 for (i
= 0; i
< adev
->gfx
.config
.max_shader_engines
; i
++) {
4151 dev_info(adev
->dev
, " se: %d\n", i
);
4152 gfx_v8_0_select_se_sh(adev
, i
, 0xffffffff);
4153 dev_info(adev
->dev
, " PA_SC_RASTER_CONFIG=0x%08X\n",
4154 RREG32(mmPA_SC_RASTER_CONFIG
));
4155 dev_info(adev
->dev
, " PA_SC_RASTER_CONFIG_1=0x%08X\n",
4156 RREG32(mmPA_SC_RASTER_CONFIG_1
));
4158 gfx_v8_0_select_se_sh(adev
, 0xffffffff, 0xffffffff);
4160 dev_info(adev
->dev
, " GB_ADDR_CONFIG=0x%08X\n",
4161 RREG32(mmGB_ADDR_CONFIG
));
4162 dev_info(adev
->dev
, " HDP_ADDR_CONFIG=0x%08X\n",
4163 RREG32(mmHDP_ADDR_CONFIG
));
4164 dev_info(adev
->dev
, " DMIF_ADDR_CALC=0x%08X\n",
4165 RREG32(mmDMIF_ADDR_CALC
));
4166 dev_info(adev
->dev
, " SDMA0_TILING_CONFIG=0x%08X\n",
4167 RREG32(mmSDMA0_TILING_CONFIG
+ SDMA0_REGISTER_OFFSET
));
4168 dev_info(adev
->dev
, " SDMA1_TILING_CONFIG=0x%08X\n",
4169 RREG32(mmSDMA0_TILING_CONFIG
+ SDMA1_REGISTER_OFFSET
));
4170 dev_info(adev
->dev
, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
4171 RREG32(mmUVD_UDEC_ADDR_CONFIG
));
4172 dev_info(adev
->dev
, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
4173 RREG32(mmUVD_UDEC_DB_ADDR_CONFIG
));
4174 dev_info(adev
->dev
, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
4175 RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG
));
4177 dev_info(adev
->dev
, " CP_MEQ_THRESHOLDS=0x%08X\n",
4178 RREG32(mmCP_MEQ_THRESHOLDS
));
4179 dev_info(adev
->dev
, " SX_DEBUG_1=0x%08X\n",
4180 RREG32(mmSX_DEBUG_1
));
4181 dev_info(adev
->dev
, " TA_CNTL_AUX=0x%08X\n",
4182 RREG32(mmTA_CNTL_AUX
));
4183 dev_info(adev
->dev
, " SPI_CONFIG_CNTL=0x%08X\n",
4184 RREG32(mmSPI_CONFIG_CNTL
));
4185 dev_info(adev
->dev
, " SQ_CONFIG=0x%08X\n",
4186 RREG32(mmSQ_CONFIG
));
4187 dev_info(adev
->dev
, " DB_DEBUG=0x%08X\n",
4188 RREG32(mmDB_DEBUG
));
4189 dev_info(adev
->dev
, " DB_DEBUG2=0x%08X\n",
4190 RREG32(mmDB_DEBUG2
));
4191 dev_info(adev
->dev
, " DB_DEBUG3=0x%08X\n",
4192 RREG32(mmDB_DEBUG3
));
4193 dev_info(adev
->dev
, " CB_HW_CONTROL=0x%08X\n",
4194 RREG32(mmCB_HW_CONTROL
));
4195 dev_info(adev
->dev
, " SPI_CONFIG_CNTL_1=0x%08X\n",
4196 RREG32(mmSPI_CONFIG_CNTL_1
));
4197 dev_info(adev
->dev
, " PA_SC_FIFO_SIZE=0x%08X\n",
4198 RREG32(mmPA_SC_FIFO_SIZE
));
4199 dev_info(adev
->dev
, " VGT_NUM_INSTANCES=0x%08X\n",
4200 RREG32(mmVGT_NUM_INSTANCES
));
4201 dev_info(adev
->dev
, " CP_PERFMON_CNTL=0x%08X\n",
4202 RREG32(mmCP_PERFMON_CNTL
));
4203 dev_info(adev
->dev
, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n",
4204 RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS
));
4205 dev_info(adev
->dev
, " VGT_CACHE_INVALIDATION=0x%08X\n",
4206 RREG32(mmVGT_CACHE_INVALIDATION
));
4207 dev_info(adev
->dev
, " VGT_GS_VERTEX_REUSE=0x%08X\n",
4208 RREG32(mmVGT_GS_VERTEX_REUSE
));
4209 dev_info(adev
->dev
, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n",
4210 RREG32(mmPA_SC_LINE_STIPPLE_STATE
));
4211 dev_info(adev
->dev
, " PA_CL_ENHANCE=0x%08X\n",
4212 RREG32(mmPA_CL_ENHANCE
));
4213 dev_info(adev
->dev
, " PA_SC_ENHANCE=0x%08X\n",
4214 RREG32(mmPA_SC_ENHANCE
));
4216 dev_info(adev
->dev
, " CP_ME_CNTL=0x%08X\n",
4217 RREG32(mmCP_ME_CNTL
));
4218 dev_info(adev
->dev
, " CP_MAX_CONTEXT=0x%08X\n",
4219 RREG32(mmCP_MAX_CONTEXT
));
4220 dev_info(adev
->dev
, " CP_ENDIAN_SWAP=0x%08X\n",
4221 RREG32(mmCP_ENDIAN_SWAP
));
4222 dev_info(adev
->dev
, " CP_DEVICE_ID=0x%08X\n",
4223 RREG32(mmCP_DEVICE_ID
));
4225 dev_info(adev
->dev
, " CP_SEM_WAIT_TIMER=0x%08X\n",
4226 RREG32(mmCP_SEM_WAIT_TIMER
));
4228 dev_info(adev
->dev
, " CP_RB_WPTR_DELAY=0x%08X\n",
4229 RREG32(mmCP_RB_WPTR_DELAY
));
4230 dev_info(adev
->dev
, " CP_RB_VMID=0x%08X\n",
4231 RREG32(mmCP_RB_VMID
));
4232 dev_info(adev
->dev
, " CP_RB0_CNTL=0x%08X\n",
4233 RREG32(mmCP_RB0_CNTL
));
4234 dev_info(adev
->dev
, " CP_RB0_WPTR=0x%08X\n",
4235 RREG32(mmCP_RB0_WPTR
));
4236 dev_info(adev
->dev
, " CP_RB0_RPTR_ADDR=0x%08X\n",
4237 RREG32(mmCP_RB0_RPTR_ADDR
));
4238 dev_info(adev
->dev
, " CP_RB0_RPTR_ADDR_HI=0x%08X\n",
4239 RREG32(mmCP_RB0_RPTR_ADDR_HI
));
4240 dev_info(adev
->dev
, " CP_RB0_CNTL=0x%08X\n",
4241 RREG32(mmCP_RB0_CNTL
));
4242 dev_info(adev
->dev
, " CP_RB0_BASE=0x%08X\n",
4243 RREG32(mmCP_RB0_BASE
));
4244 dev_info(adev
->dev
, " CP_RB0_BASE_HI=0x%08X\n",
4245 RREG32(mmCP_RB0_BASE_HI
));
4246 dev_info(adev
->dev
, " CP_MEC_CNTL=0x%08X\n",
4247 RREG32(mmCP_MEC_CNTL
));
4248 dev_info(adev
->dev
, " CP_CPF_DEBUG=0x%08X\n",
4249 RREG32(mmCP_CPF_DEBUG
));
4251 dev_info(adev
->dev
, " SCRATCH_ADDR=0x%08X\n",
4252 RREG32(mmSCRATCH_ADDR
));
4253 dev_info(adev
->dev
, " SCRATCH_UMSK=0x%08X\n",
4254 RREG32(mmSCRATCH_UMSK
));
4256 dev_info(adev
->dev
, " CP_INT_CNTL_RING0=0x%08X\n",
4257 RREG32(mmCP_INT_CNTL_RING0
));
4258 dev_info(adev
->dev
, " RLC_LB_CNTL=0x%08X\n",
4259 RREG32(mmRLC_LB_CNTL
));
4260 dev_info(adev
->dev
, " RLC_CNTL=0x%08X\n",
4261 RREG32(mmRLC_CNTL
));
4262 dev_info(adev
->dev
, " RLC_CGCG_CGLS_CTRL=0x%08X\n",
4263 RREG32(mmRLC_CGCG_CGLS_CTRL
));
4264 dev_info(adev
->dev
, " RLC_LB_CNTR_INIT=0x%08X\n",
4265 RREG32(mmRLC_LB_CNTR_INIT
));
4266 dev_info(adev
->dev
, " RLC_LB_CNTR_MAX=0x%08X\n",
4267 RREG32(mmRLC_LB_CNTR_MAX
));
4268 dev_info(adev
->dev
, " RLC_LB_INIT_CU_MASK=0x%08X\n",
4269 RREG32(mmRLC_LB_INIT_CU_MASK
));
4270 dev_info(adev
->dev
, " RLC_LB_PARAMS=0x%08X\n",
4271 RREG32(mmRLC_LB_PARAMS
));
4272 dev_info(adev
->dev
, " RLC_LB_CNTL=0x%08X\n",
4273 RREG32(mmRLC_LB_CNTL
));
4274 dev_info(adev
->dev
, " RLC_MC_CNTL=0x%08X\n",
4275 RREG32(mmRLC_MC_CNTL
));
4276 dev_info(adev
->dev
, " RLC_UCODE_CNTL=0x%08X\n",
4277 RREG32(mmRLC_UCODE_CNTL
));
4279 mutex_lock(&adev
->srbm_mutex
);
4280 for (i
= 0; i
< 16; i
++) {
4281 vi_srbm_select(adev
, 0, 0, 0, i
);
4282 dev_info(adev
->dev
, " VM %d:\n", i
);
4283 dev_info(adev
->dev
, " SH_MEM_CONFIG=0x%08X\n",
4284 RREG32(mmSH_MEM_CONFIG
));
4285 dev_info(adev
->dev
, " SH_MEM_APE1_BASE=0x%08X\n",
4286 RREG32(mmSH_MEM_APE1_BASE
));
4287 dev_info(adev
->dev
, " SH_MEM_APE1_LIMIT=0x%08X\n",
4288 RREG32(mmSH_MEM_APE1_LIMIT
));
4289 dev_info(adev
->dev
, " SH_MEM_BASES=0x%08X\n",
4290 RREG32(mmSH_MEM_BASES
));
4292 vi_srbm_select(adev
, 0, 0, 0, 0);
4293 mutex_unlock(&adev
->srbm_mutex
);
4296 static int gfx_v8_0_soft_reset(void *handle
)
4298 u32 grbm_soft_reset
= 0, srbm_soft_reset
= 0;
4300 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4303 tmp
= RREG32(mmGRBM_STATUS
);
4304 if (tmp
& (GRBM_STATUS__PA_BUSY_MASK
| GRBM_STATUS__SC_BUSY_MASK
|
4305 GRBM_STATUS__BCI_BUSY_MASK
| GRBM_STATUS__SX_BUSY_MASK
|
4306 GRBM_STATUS__TA_BUSY_MASK
| GRBM_STATUS__VGT_BUSY_MASK
|
4307 GRBM_STATUS__DB_BUSY_MASK
| GRBM_STATUS__CB_BUSY_MASK
|
4308 GRBM_STATUS__GDS_BUSY_MASK
| GRBM_STATUS__SPI_BUSY_MASK
|
4309 GRBM_STATUS__IA_BUSY_MASK
| GRBM_STATUS__IA_BUSY_NO_DMA_MASK
)) {
4310 grbm_soft_reset
= REG_SET_FIELD(grbm_soft_reset
,
4311 GRBM_SOFT_RESET
, SOFT_RESET_CP
, 1);
4312 grbm_soft_reset
= REG_SET_FIELD(grbm_soft_reset
,
4313 GRBM_SOFT_RESET
, SOFT_RESET_GFX
, 1);
4316 if (tmp
& (GRBM_STATUS__CP_BUSY_MASK
| GRBM_STATUS__CP_COHERENCY_BUSY_MASK
)) {
4317 grbm_soft_reset
= REG_SET_FIELD(grbm_soft_reset
,
4318 GRBM_SOFT_RESET
, SOFT_RESET_CP
, 1);
4319 srbm_soft_reset
= REG_SET_FIELD(srbm_soft_reset
,
4320 SRBM_SOFT_RESET
, SOFT_RESET_GRBM
, 1);
4324 tmp
= RREG32(mmGRBM_STATUS2
);
4325 if (REG_GET_FIELD(tmp
, GRBM_STATUS2
, RLC_BUSY
))
4326 grbm_soft_reset
= REG_SET_FIELD(grbm_soft_reset
,
4327 GRBM_SOFT_RESET
, SOFT_RESET_RLC
, 1);
4330 tmp
= RREG32(mmSRBM_STATUS
);
4331 if (REG_GET_FIELD(tmp
, SRBM_STATUS
, GRBM_RQ_PENDING
))
4332 srbm_soft_reset
= REG_SET_FIELD(srbm_soft_reset
,
4333 SRBM_SOFT_RESET
, SOFT_RESET_GRBM
, 1);
4335 if (grbm_soft_reset
|| srbm_soft_reset
) {
4336 gfx_v8_0_print_status((void *)adev
);
4338 gfx_v8_0_rlc_stop(adev
);
4340 /* Disable GFX parsing/prefetching */
4341 gfx_v8_0_cp_gfx_enable(adev
, false);
4343 /* Disable MEC parsing/prefetching */
4346 if (grbm_soft_reset
) {
4347 tmp
= RREG32(mmGRBM_SOFT_RESET
);
4348 tmp
|= grbm_soft_reset
;
4349 dev_info(adev
->dev
, "GRBM_SOFT_RESET=0x%08X\n", tmp
);
4350 WREG32(mmGRBM_SOFT_RESET
, tmp
);
4351 tmp
= RREG32(mmGRBM_SOFT_RESET
);
4355 tmp
&= ~grbm_soft_reset
;
4356 WREG32(mmGRBM_SOFT_RESET
, tmp
);
4357 tmp
= RREG32(mmGRBM_SOFT_RESET
);
4360 if (srbm_soft_reset
) {
4361 tmp
= RREG32(mmSRBM_SOFT_RESET
);
4362 tmp
|= srbm_soft_reset
;
4363 dev_info(adev
->dev
, "SRBM_SOFT_RESET=0x%08X\n", tmp
);
4364 WREG32(mmSRBM_SOFT_RESET
, tmp
);
4365 tmp
= RREG32(mmSRBM_SOFT_RESET
);
4369 tmp
&= ~srbm_soft_reset
;
4370 WREG32(mmSRBM_SOFT_RESET
, tmp
);
4371 tmp
= RREG32(mmSRBM_SOFT_RESET
);
4373 /* Wait a little for things to settle down */
4375 gfx_v8_0_print_status((void *)adev
);
4381 * gfx_v8_0_get_gpu_clock_counter - return GPU clock counter snapshot
4383 * @adev: amdgpu_device pointer
4385 * Fetches a GPU clock counter snapshot.
4386 * Returns the 64 bit clock counter snapshot.
4388 uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device
*adev
)
4392 mutex_lock(&adev
->gfx
.gpu_clock_mutex
);
4393 WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT
, 1);
4394 clock
= (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB
) |
4395 ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB
) << 32ULL);
4396 mutex_unlock(&adev
->gfx
.gpu_clock_mutex
);
4400 static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring
*ring
,
4402 uint32_t gds_base
, uint32_t gds_size
,
4403 uint32_t gws_base
, uint32_t gws_size
,
4404 uint32_t oa_base
, uint32_t oa_size
)
4406 gds_base
= gds_base
>> AMDGPU_GDS_SHIFT
;
4407 gds_size
= gds_size
>> AMDGPU_GDS_SHIFT
;
4409 gws_base
= gws_base
>> AMDGPU_GWS_SHIFT
;
4410 gws_size
= gws_size
>> AMDGPU_GWS_SHIFT
;
4412 oa_base
= oa_base
>> AMDGPU_OA_SHIFT
;
4413 oa_size
= oa_size
>> AMDGPU_OA_SHIFT
;
4416 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
4417 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(0) |
4418 WRITE_DATA_DST_SEL(0)));
4419 amdgpu_ring_write(ring
, amdgpu_gds_reg_offset
[vmid
].mem_base
);
4420 amdgpu_ring_write(ring
, 0);
4421 amdgpu_ring_write(ring
, gds_base
);
4424 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
4425 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(0) |
4426 WRITE_DATA_DST_SEL(0)));
4427 amdgpu_ring_write(ring
, amdgpu_gds_reg_offset
[vmid
].mem_size
);
4428 amdgpu_ring_write(ring
, 0);
4429 amdgpu_ring_write(ring
, gds_size
);
4432 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
4433 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(0) |
4434 WRITE_DATA_DST_SEL(0)));
4435 amdgpu_ring_write(ring
, amdgpu_gds_reg_offset
[vmid
].gws
);
4436 amdgpu_ring_write(ring
, 0);
4437 amdgpu_ring_write(ring
, gws_size
<< GDS_GWS_VMID0__SIZE__SHIFT
| gws_base
);
4440 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
4441 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(0) |
4442 WRITE_DATA_DST_SEL(0)));
4443 amdgpu_ring_write(ring
, amdgpu_gds_reg_offset
[vmid
].oa
);
4444 amdgpu_ring_write(ring
, 0);
4445 amdgpu_ring_write(ring
, (1 << (oa_size
+ oa_base
)) - (1 << oa_base
));
4448 static int gfx_v8_0_early_init(void *handle
)
4450 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4452 adev
->gfx
.num_gfx_rings
= GFX8_NUM_GFX_RINGS
;
4453 adev
->gfx
.num_compute_rings
= GFX8_NUM_COMPUTE_RINGS
;
4454 gfx_v8_0_set_ring_funcs(adev
);
4455 gfx_v8_0_set_irq_funcs(adev
);
4456 gfx_v8_0_set_gds_init(adev
);
4461 static int gfx_v8_0_set_powergating_state(void *handle
,
4462 enum amd_powergating_state state
)
4467 static int gfx_v8_0_set_clockgating_state(void *handle
,
4468 enum amd_clockgating_state state
)
4473 static u32
gfx_v8_0_ring_get_rptr_gfx(struct amdgpu_ring
*ring
)
4477 rptr
= ring
->adev
->wb
.wb
[ring
->rptr_offs
];
4482 static u32
gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring
*ring
)
4484 struct amdgpu_device
*adev
= ring
->adev
;
4487 if (ring
->use_doorbell
)
4488 /* XXX check if swapping is necessary on BE */
4489 wptr
= ring
->adev
->wb
.wb
[ring
->wptr_offs
];
4491 wptr
= RREG32(mmCP_RB0_WPTR
);
4496 static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring
*ring
)
4498 struct amdgpu_device
*adev
= ring
->adev
;
4500 if (ring
->use_doorbell
) {
4501 /* XXX check if swapping is necessary on BE */
4502 adev
->wb
.wb
[ring
->wptr_offs
] = ring
->wptr
;
4503 WDOORBELL32(ring
->doorbell_index
, ring
->wptr
);
4505 WREG32(mmCP_RB0_WPTR
, ring
->wptr
);
4506 (void)RREG32(mmCP_RB0_WPTR
);
4510 static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring
*ring
)
4512 u32 ref_and_mask
, reg_mem_engine
;
4514 if (ring
->type
== AMDGPU_RING_TYPE_COMPUTE
) {
4517 ref_and_mask
= GPU_HDP_FLUSH_DONE__CP2_MASK
<< ring
->pipe
;
4520 ref_and_mask
= GPU_HDP_FLUSH_DONE__CP6_MASK
<< ring
->pipe
;
4527 ref_and_mask
= GPU_HDP_FLUSH_DONE__CP0_MASK
;
4528 reg_mem_engine
= WAIT_REG_MEM_ENGINE(1); /* pfp */
4531 amdgpu_ring_write(ring
, PACKET3(PACKET3_WAIT_REG_MEM
, 5));
4532 amdgpu_ring_write(ring
, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
4533 WAIT_REG_MEM_FUNCTION(3) | /* == */
4535 amdgpu_ring_write(ring
, mmGPU_HDP_FLUSH_REQ
);
4536 amdgpu_ring_write(ring
, mmGPU_HDP_FLUSH_DONE
);
4537 amdgpu_ring_write(ring
, ref_and_mask
);
4538 amdgpu_ring_write(ring
, ref_and_mask
);
4539 amdgpu_ring_write(ring
, 0x20); /* poll interval */
4542 static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring
*ring
,
4543 struct amdgpu_ib
*ib
)
4545 bool need_ctx_switch
= ring
->current_ctx
!= ib
->ctx
;
4546 u32 header
, control
= 0;
4547 u32 next_rptr
= ring
->wptr
+ 5;
4549 /* drop the CE preamble IB for the same context */
4550 if ((ib
->flags
& AMDGPU_IB_FLAG_PREAMBLE
) && !need_ctx_switch
)
4553 if (need_ctx_switch
)
4557 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
4558 amdgpu_ring_write(ring
, WRITE_DATA_DST_SEL(5) | WR_CONFIRM
);
4559 amdgpu_ring_write(ring
, ring
->next_rptr_gpu_addr
& 0xfffffffc);
4560 amdgpu_ring_write(ring
, upper_32_bits(ring
->next_rptr_gpu_addr
) & 0xffffffff);
4561 amdgpu_ring_write(ring
, next_rptr
);
4563 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
4564 if (need_ctx_switch
) {
4565 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
4566 amdgpu_ring_write(ring
, 0);
4569 if (ib
->flags
& AMDGPU_IB_FLAG_CE
)
4570 header
= PACKET3(PACKET3_INDIRECT_BUFFER_CONST
, 2);
4572 header
= PACKET3(PACKET3_INDIRECT_BUFFER
, 2);
4574 control
|= ib
->length_dw
|
4575 (ib
->vm
? (ib
->vm
->ids
[ring
->idx
].id
<< 24) : 0);
4577 amdgpu_ring_write(ring
, header
);
4578 amdgpu_ring_write(ring
,
4582 (ib
->gpu_addr
& 0xFFFFFFFC));
4583 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
) & 0xFFFF);
4584 amdgpu_ring_write(ring
, control
);
4587 static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring
*ring
,
4588 struct amdgpu_ib
*ib
)
4590 u32 header
, control
= 0;
4591 u32 next_rptr
= ring
->wptr
+ 5;
4593 control
|= INDIRECT_BUFFER_VALID
;
4596 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
4597 amdgpu_ring_write(ring
, WRITE_DATA_DST_SEL(5) | WR_CONFIRM
);
4598 amdgpu_ring_write(ring
, ring
->next_rptr_gpu_addr
& 0xfffffffc);
4599 amdgpu_ring_write(ring
, upper_32_bits(ring
->next_rptr_gpu_addr
) & 0xffffffff);
4600 amdgpu_ring_write(ring
, next_rptr
);
4602 header
= PACKET3(PACKET3_INDIRECT_BUFFER
, 2);
4604 control
|= ib
->length_dw
|
4605 (ib
->vm
? (ib
->vm
->ids
[ring
->idx
].id
<< 24) : 0);
4607 amdgpu_ring_write(ring
, header
);
4608 amdgpu_ring_write(ring
,
4612 (ib
->gpu_addr
& 0xFFFFFFFC));
4613 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
) & 0xFFFF);
4614 amdgpu_ring_write(ring
, control
);
4617 static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring
*ring
, u64 addr
,
4618 u64 seq
, unsigned flags
)
4620 bool write64bit
= flags
& AMDGPU_FENCE_FLAG_64BIT
;
4621 bool int_sel
= flags
& AMDGPU_FENCE_FLAG_INT
;
4623 /* EVENT_WRITE_EOP - flush caches, send int */
4624 amdgpu_ring_write(ring
, PACKET3(PACKET3_EVENT_WRITE_EOP
, 4));
4625 amdgpu_ring_write(ring
, (EOP_TCL1_ACTION_EN
|
4627 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT
) |
4629 amdgpu_ring_write(ring
, addr
& 0xfffffffc);
4630 amdgpu_ring_write(ring
, (upper_32_bits(addr
) & 0xffff) |
4631 DATA_SEL(write64bit
? 2 : 1) | INT_SEL(int_sel
? 2 : 0));
4632 amdgpu_ring_write(ring
, lower_32_bits(seq
));
4633 amdgpu_ring_write(ring
, upper_32_bits(seq
));
4638 * gfx_v8_0_ring_emit_semaphore - emit a semaphore on the CP ring
4640 * @ring: amdgpu ring buffer object
4641 * @semaphore: amdgpu semaphore object
4642 * @emit_wait: Is this a sempahore wait?
4644 * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
4645 * from running ahead of semaphore waits.
4647 static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring
*ring
,
4648 struct amdgpu_semaphore
*semaphore
,
4651 uint64_t addr
= semaphore
->gpu_addr
;
4652 unsigned sel
= emit_wait
? PACKET3_SEM_SEL_WAIT
: PACKET3_SEM_SEL_SIGNAL
;
4654 if (ring
->adev
->asic_type
== CHIP_TOPAZ
||
4655 ring
->adev
->asic_type
== CHIP_TONGA
||
4656 ring
->adev
->asic_type
== CHIP_FIJI
)
4657 /* we got a hw semaphore bug in VI TONGA, return false to switch back to sw fence wait */
4660 amdgpu_ring_write(ring
, PACKET3(PACKET3_MEM_SEMAPHORE
, 2));
4661 amdgpu_ring_write(ring
, lower_32_bits(addr
));
4662 amdgpu_ring_write(ring
, upper_32_bits(addr
));
4663 amdgpu_ring_write(ring
, sel
);
4666 if (emit_wait
&& (ring
->type
== AMDGPU_RING_TYPE_GFX
)) {
4667 /* Prevent the PFP from running ahead of the semaphore wait */
4668 amdgpu_ring_write(ring
, PACKET3(PACKET3_PFP_SYNC_ME
, 0));
4669 amdgpu_ring_write(ring
, 0x0);
4675 static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring
*ring
,
4676 unsigned vm_id
, uint64_t pd_addr
)
4678 int usepfp
= (ring
->type
== AMDGPU_RING_TYPE_GFX
);
4679 uint32_t seq
= ring
->fence_drv
.sync_seq
[ring
->idx
];
4680 uint64_t addr
= ring
->fence_drv
.gpu_addr
;
4682 amdgpu_ring_write(ring
, PACKET3(PACKET3_WAIT_REG_MEM
, 5));
4683 amdgpu_ring_write(ring
, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
4684 WAIT_REG_MEM_FUNCTION(3))); /* equal */
4685 amdgpu_ring_write(ring
, addr
& 0xfffffffc);
4686 amdgpu_ring_write(ring
, upper_32_bits(addr
) & 0xffffffff);
4687 amdgpu_ring_write(ring
, seq
);
4688 amdgpu_ring_write(ring
, 0xffffffff);
4689 amdgpu_ring_write(ring
, 4); /* poll interval */
4692 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
4693 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
4694 amdgpu_ring_write(ring
, 0);
4695 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
4696 amdgpu_ring_write(ring
, 0);
4699 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
4700 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(usepfp
) |
4701 WRITE_DATA_DST_SEL(0)) |
4704 amdgpu_ring_write(ring
,
4705 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ vm_id
));
4707 amdgpu_ring_write(ring
,
4708 (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ vm_id
- 8));
4710 amdgpu_ring_write(ring
, 0);
4711 amdgpu_ring_write(ring
, pd_addr
>> 12);
4713 /* bits 0-15 are the VM contexts0-15 */
4714 /* invalidate the cache */
4715 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
4716 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(0) |
4717 WRITE_DATA_DST_SEL(0)));
4718 amdgpu_ring_write(ring
, mmVM_INVALIDATE_REQUEST
);
4719 amdgpu_ring_write(ring
, 0);
4720 amdgpu_ring_write(ring
, 1 << vm_id
);
4722 /* wait for the invalidate to complete */
4723 amdgpu_ring_write(ring
, PACKET3(PACKET3_WAIT_REG_MEM
, 5));
4724 amdgpu_ring_write(ring
, (WAIT_REG_MEM_OPERATION(0) | /* wait */
4725 WAIT_REG_MEM_FUNCTION(0) | /* always */
4726 WAIT_REG_MEM_ENGINE(0))); /* me */
4727 amdgpu_ring_write(ring
, mmVM_INVALIDATE_REQUEST
);
4728 amdgpu_ring_write(ring
, 0);
4729 amdgpu_ring_write(ring
, 0); /* ref */
4730 amdgpu_ring_write(ring
, 0); /* mask */
4731 amdgpu_ring_write(ring
, 0x20); /* poll interval */
4733 /* compute doesn't have PFP */
4735 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4736 amdgpu_ring_write(ring
, PACKET3(PACKET3_PFP_SYNC_ME
, 0));
4737 amdgpu_ring_write(ring
, 0x0);
4738 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
4739 amdgpu_ring_write(ring
, 0);
4740 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
4741 amdgpu_ring_write(ring
, 0);
4745 static u32
gfx_v8_0_ring_get_rptr_compute(struct amdgpu_ring
*ring
)
4747 return ring
->adev
->wb
.wb
[ring
->rptr_offs
];
4750 static u32
gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring
*ring
)
4752 return ring
->adev
->wb
.wb
[ring
->wptr_offs
];
4755 static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring
*ring
)
4757 struct amdgpu_device
*adev
= ring
->adev
;
4759 /* XXX check if swapping is necessary on BE */
4760 adev
->wb
.wb
[ring
->wptr_offs
] = ring
->wptr
;
4761 WDOORBELL32(ring
->doorbell_index
, ring
->wptr
);
4764 static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring
*ring
,
4768 bool write64bit
= flags
& AMDGPU_FENCE_FLAG_64BIT
;
4769 bool int_sel
= flags
& AMDGPU_FENCE_FLAG_INT
;
4771 /* RELEASE_MEM - flush caches, send int */
4772 amdgpu_ring_write(ring
, PACKET3(PACKET3_RELEASE_MEM
, 5));
4773 amdgpu_ring_write(ring
, (EOP_TCL1_ACTION_EN
|
4775 EOP_TC_WB_ACTION_EN
|
4776 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT
) |
4778 amdgpu_ring_write(ring
, DATA_SEL(write64bit
? 2 : 1) | INT_SEL(int_sel
? 2 : 0));
4779 amdgpu_ring_write(ring
, addr
& 0xfffffffc);
4780 amdgpu_ring_write(ring
, upper_32_bits(addr
));
4781 amdgpu_ring_write(ring
, lower_32_bits(seq
));
4782 amdgpu_ring_write(ring
, upper_32_bits(seq
));
4785 static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device
*adev
,
4786 enum amdgpu_interrupt_state state
)
4791 case AMDGPU_IRQ_STATE_DISABLE
:
4792 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
4793 cp_int_cntl
= REG_SET_FIELD(cp_int_cntl
, CP_INT_CNTL_RING0
,
4794 TIME_STAMP_INT_ENABLE
, 0);
4795 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
4797 case AMDGPU_IRQ_STATE_ENABLE
:
4798 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
4800 REG_SET_FIELD(cp_int_cntl
, CP_INT_CNTL_RING0
,
4801 TIME_STAMP_INT_ENABLE
, 1);
4802 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
4809 static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device
*adev
,
4811 enum amdgpu_interrupt_state state
)
4813 u32 mec_int_cntl
, mec_int_cntl_reg
;
4816 * amdgpu controls only pipe 0 of MEC1. That's why this function only
4817 * handles the setting of interrupts for this specific pipe. All other
4818 * pipes' interrupts are set by amdkfd.
4824 mec_int_cntl_reg
= mmCP_ME1_PIPE0_INT_CNTL
;
4827 DRM_DEBUG("invalid pipe %d\n", pipe
);
4831 DRM_DEBUG("invalid me %d\n", me
);
4836 case AMDGPU_IRQ_STATE_DISABLE
:
4837 mec_int_cntl
= RREG32(mec_int_cntl_reg
);
4838 mec_int_cntl
= REG_SET_FIELD(mec_int_cntl
, CP_ME1_PIPE0_INT_CNTL
,
4839 TIME_STAMP_INT_ENABLE
, 0);
4840 WREG32(mec_int_cntl_reg
, mec_int_cntl
);
4842 case AMDGPU_IRQ_STATE_ENABLE
:
4843 mec_int_cntl
= RREG32(mec_int_cntl_reg
);
4844 mec_int_cntl
= REG_SET_FIELD(mec_int_cntl
, CP_ME1_PIPE0_INT_CNTL
,
4845 TIME_STAMP_INT_ENABLE
, 1);
4846 WREG32(mec_int_cntl_reg
, mec_int_cntl
);
4853 static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device
*adev
,
4854 struct amdgpu_irq_src
*source
,
4856 enum amdgpu_interrupt_state state
)
4861 case AMDGPU_IRQ_STATE_DISABLE
:
4862 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
4863 cp_int_cntl
= REG_SET_FIELD(cp_int_cntl
, CP_INT_CNTL_RING0
,
4864 PRIV_REG_INT_ENABLE
, 0);
4865 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
4867 case AMDGPU_IRQ_STATE_ENABLE
:
4868 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
4869 cp_int_cntl
= REG_SET_FIELD(cp_int_cntl
, CP_INT_CNTL_RING0
,
4870 PRIV_REG_INT_ENABLE
, 0);
4871 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
4880 static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device
*adev
,
4881 struct amdgpu_irq_src
*source
,
4883 enum amdgpu_interrupt_state state
)
4888 case AMDGPU_IRQ_STATE_DISABLE
:
4889 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
4890 cp_int_cntl
= REG_SET_FIELD(cp_int_cntl
, CP_INT_CNTL_RING0
,
4891 PRIV_INSTR_INT_ENABLE
, 0);
4892 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
4894 case AMDGPU_IRQ_STATE_ENABLE
:
4895 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
4896 cp_int_cntl
= REG_SET_FIELD(cp_int_cntl
, CP_INT_CNTL_RING0
,
4897 PRIV_INSTR_INT_ENABLE
, 1);
4898 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
4907 static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device
*adev
,
4908 struct amdgpu_irq_src
*src
,
4910 enum amdgpu_interrupt_state state
)
4913 case AMDGPU_CP_IRQ_GFX_EOP
:
4914 gfx_v8_0_set_gfx_eop_interrupt_state(adev
, state
);
4916 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
:
4917 gfx_v8_0_set_compute_eop_interrupt_state(adev
, 1, 0, state
);
4919 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP
:
4920 gfx_v8_0_set_compute_eop_interrupt_state(adev
, 1, 1, state
);
4922 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP
:
4923 gfx_v8_0_set_compute_eop_interrupt_state(adev
, 1, 2, state
);
4925 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP
:
4926 gfx_v8_0_set_compute_eop_interrupt_state(adev
, 1, 3, state
);
4928 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP
:
4929 gfx_v8_0_set_compute_eop_interrupt_state(adev
, 2, 0, state
);
4931 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP
:
4932 gfx_v8_0_set_compute_eop_interrupt_state(adev
, 2, 1, state
);
4934 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP
:
4935 gfx_v8_0_set_compute_eop_interrupt_state(adev
, 2, 2, state
);
4937 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP
:
4938 gfx_v8_0_set_compute_eop_interrupt_state(adev
, 2, 3, state
);
4946 static int gfx_v8_0_eop_irq(struct amdgpu_device
*adev
,
4947 struct amdgpu_irq_src
*source
,
4948 struct amdgpu_iv_entry
*entry
)
4951 u8 me_id
, pipe_id
, queue_id
;
4952 struct amdgpu_ring
*ring
;
4954 DRM_DEBUG("IH: CP EOP\n");
4955 me_id
= (entry
->ring_id
& 0x0c) >> 2;
4956 pipe_id
= (entry
->ring_id
& 0x03) >> 0;
4957 queue_id
= (entry
->ring_id
& 0x70) >> 4;
4961 amdgpu_fence_process(&adev
->gfx
.gfx_ring
[0]);
4965 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
4966 ring
= &adev
->gfx
.compute_ring
[i
];
4967 /* Per-queue interrupt is supported for MEC starting from VI.
4968 * The interrupt can only be enabled/disabled per pipe instead of per queue.
4970 if ((ring
->me
== me_id
) && (ring
->pipe
== pipe_id
) && (ring
->queue
== queue_id
))
4971 amdgpu_fence_process(ring
);
4978 static int gfx_v8_0_priv_reg_irq(struct amdgpu_device
*adev
,
4979 struct amdgpu_irq_src
*source
,
4980 struct amdgpu_iv_entry
*entry
)
4982 DRM_ERROR("Illegal register access in command stream\n");
4983 schedule_work(&adev
->reset_work
);
4987 static int gfx_v8_0_priv_inst_irq(struct amdgpu_device
*adev
,
4988 struct amdgpu_irq_src
*source
,
4989 struct amdgpu_iv_entry
*entry
)
4991 DRM_ERROR("Illegal instruction in command stream\n");
4992 schedule_work(&adev
->reset_work
);
4996 const struct amd_ip_funcs gfx_v8_0_ip_funcs
= {
4997 .early_init
= gfx_v8_0_early_init
,
4999 .sw_init
= gfx_v8_0_sw_init
,
5000 .sw_fini
= gfx_v8_0_sw_fini
,
5001 .hw_init
= gfx_v8_0_hw_init
,
5002 .hw_fini
= gfx_v8_0_hw_fini
,
5003 .suspend
= gfx_v8_0_suspend
,
5004 .resume
= gfx_v8_0_resume
,
5005 .is_idle
= gfx_v8_0_is_idle
,
5006 .wait_for_idle
= gfx_v8_0_wait_for_idle
,
5007 .soft_reset
= gfx_v8_0_soft_reset
,
5008 .print_status
= gfx_v8_0_print_status
,
5009 .set_clockgating_state
= gfx_v8_0_set_clockgating_state
,
5010 .set_powergating_state
= gfx_v8_0_set_powergating_state
,
5013 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx
= {
5014 .get_rptr
= gfx_v8_0_ring_get_rptr_gfx
,
5015 .get_wptr
= gfx_v8_0_ring_get_wptr_gfx
,
5016 .set_wptr
= gfx_v8_0_ring_set_wptr_gfx
,
5018 .emit_ib
= gfx_v8_0_ring_emit_ib_gfx
,
5019 .emit_fence
= gfx_v8_0_ring_emit_fence_gfx
,
5020 .emit_semaphore
= gfx_v8_0_ring_emit_semaphore
,
5021 .emit_vm_flush
= gfx_v8_0_ring_emit_vm_flush
,
5022 .emit_gds_switch
= gfx_v8_0_ring_emit_gds_switch
,
5023 .emit_hdp_flush
= gfx_v8_0_ring_emit_hdp_flush
,
5024 .test_ring
= gfx_v8_0_ring_test_ring
,
5025 .test_ib
= gfx_v8_0_ring_test_ib
,
5026 .insert_nop
= amdgpu_ring_insert_nop
,
5029 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute
= {
5030 .get_rptr
= gfx_v8_0_ring_get_rptr_compute
,
5031 .get_wptr
= gfx_v8_0_ring_get_wptr_compute
,
5032 .set_wptr
= gfx_v8_0_ring_set_wptr_compute
,
5034 .emit_ib
= gfx_v8_0_ring_emit_ib_compute
,
5035 .emit_fence
= gfx_v8_0_ring_emit_fence_compute
,
5036 .emit_semaphore
= gfx_v8_0_ring_emit_semaphore
,
5037 .emit_vm_flush
= gfx_v8_0_ring_emit_vm_flush
,
5038 .emit_gds_switch
= gfx_v8_0_ring_emit_gds_switch
,
5039 .emit_hdp_flush
= gfx_v8_0_ring_emit_hdp_flush
,
5040 .test_ring
= gfx_v8_0_ring_test_ring
,
5041 .test_ib
= gfx_v8_0_ring_test_ib
,
5042 .insert_nop
= amdgpu_ring_insert_nop
,
5045 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device
*adev
)
5049 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
5050 adev
->gfx
.gfx_ring
[i
].funcs
= &gfx_v8_0_ring_funcs_gfx
;
5052 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
5053 adev
->gfx
.compute_ring
[i
].funcs
= &gfx_v8_0_ring_funcs_compute
;
5056 static const struct amdgpu_irq_src_funcs gfx_v8_0_eop_irq_funcs
= {
5057 .set
= gfx_v8_0_set_eop_interrupt_state
,
5058 .process
= gfx_v8_0_eop_irq
,
5061 static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_reg_irq_funcs
= {
5062 .set
= gfx_v8_0_set_priv_reg_fault_state
,
5063 .process
= gfx_v8_0_priv_reg_irq
,
5066 static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs
= {
5067 .set
= gfx_v8_0_set_priv_inst_fault_state
,
5068 .process
= gfx_v8_0_priv_inst_irq
,
5071 static void gfx_v8_0_set_irq_funcs(struct amdgpu_device
*adev
)
5073 adev
->gfx
.eop_irq
.num_types
= AMDGPU_CP_IRQ_LAST
;
5074 adev
->gfx
.eop_irq
.funcs
= &gfx_v8_0_eop_irq_funcs
;
5076 adev
->gfx
.priv_reg_irq
.num_types
= 1;
5077 adev
->gfx
.priv_reg_irq
.funcs
= &gfx_v8_0_priv_reg_irq_funcs
;
5079 adev
->gfx
.priv_inst_irq
.num_types
= 1;
5080 adev
->gfx
.priv_inst_irq
.funcs
= &gfx_v8_0_priv_inst_irq_funcs
;
5083 static void gfx_v8_0_set_gds_init(struct amdgpu_device
*adev
)
5085 /* init asci gds info */
5086 adev
->gds
.mem
.total_size
= RREG32(mmGDS_VMID0_SIZE
);
5087 adev
->gds
.gws
.total_size
= 64;
5088 adev
->gds
.oa
.total_size
= 16;
5090 if (adev
->gds
.mem
.total_size
== 64 * 1024) {
5091 adev
->gds
.mem
.gfx_partition_size
= 4096;
5092 adev
->gds
.mem
.cs_partition_size
= 4096;
5094 adev
->gds
.gws
.gfx_partition_size
= 4;
5095 adev
->gds
.gws
.cs_partition_size
= 4;
5097 adev
->gds
.oa
.gfx_partition_size
= 4;
5098 adev
->gds
.oa
.cs_partition_size
= 1;
5100 adev
->gds
.mem
.gfx_partition_size
= 1024;
5101 adev
->gds
.mem
.cs_partition_size
= 1024;
5103 adev
->gds
.gws
.gfx_partition_size
= 16;
5104 adev
->gds
.gws
.cs_partition_size
= 16;
5106 adev
->gds
.oa
.gfx_partition_size
= 4;
5107 adev
->gds
.oa
.cs_partition_size
= 4;
5111 static u32
gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device
*adev
,
5114 u32 mask
= 0, tmp
, tmp1
;
5117 gfx_v8_0_select_se_sh(adev
, se
, sh
);
5118 tmp
= RREG32(mmCC_GC_SHADER_ARRAY_CONFIG
);
5119 tmp1
= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG
);
5120 gfx_v8_0_select_se_sh(adev
, 0xffffffff, 0xffffffff);
5127 for (i
= 0; i
< adev
->gfx
.config
.max_cu_per_sh
; i
++) {
5132 return (~tmp
) & mask
;
5135 int gfx_v8_0_get_cu_info(struct amdgpu_device
*adev
,
5136 struct amdgpu_cu_info
*cu_info
)
5138 int i
, j
, k
, counter
, active_cu_number
= 0;
5139 u32 mask
, bitmap
, ao_bitmap
, ao_cu_mask
= 0;
5141 if (!adev
|| !cu_info
)
5144 mutex_lock(&adev
->grbm_idx_mutex
);
5145 for (i
= 0; i
< adev
->gfx
.config
.max_shader_engines
; i
++) {
5146 for (j
= 0; j
< adev
->gfx
.config
.max_sh_per_se
; j
++) {
5150 bitmap
= gfx_v8_0_get_cu_active_bitmap(adev
, i
, j
);
5151 cu_info
->bitmap
[i
][j
] = bitmap
;
5153 for (k
= 0; k
< adev
->gfx
.config
.max_cu_per_sh
; k
++) {
5154 if (bitmap
& mask
) {
5161 active_cu_number
+= counter
;
5162 ao_cu_mask
|= (ao_bitmap
<< (i
* 16 + j
* 8));
5166 cu_info
->number
= active_cu_number
;
5167 cu_info
->ao_cu_mask
= ao_cu_mask
;
5168 mutex_unlock(&adev
->grbm_idx_mutex
);