treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / amd / display / dc / dcn21 / dcn21_hubbub.c
blobf546260c15b72c7db5ee416946d6b5760267f94f
1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: AMD
25 #include <linux/delay.h>
26 #include "dm_services.h"
27 #include "dcn20/dcn20_hubbub.h"
28 #include "dcn21_hubbub.h"
29 #include "reg_helper.h"
31 #define REG(reg)\
32 hubbub1->regs->reg
33 #define DC_LOGGER \
34 hubbub1->base.ctx->logger
35 #define CTX \
36 hubbub1->base.ctx
38 #undef FN
39 #define FN(reg_name, field_name) \
40 hubbub1->shifts->field_name, hubbub1->masks->field_name
42 #define REG(reg)\
43 hubbub1->regs->reg
45 #define CTX \
46 hubbub1->base.ctx
48 #undef FN
49 #define FN(reg_name, field_name) \
50 hubbub1->shifts->field_name, hubbub1->masks->field_name
52 #ifdef NUM_VMID
53 #undef NUM_VMID
54 #endif
55 #define NUM_VMID 16
57 static uint32_t convert_and_clamp(
58 uint32_t wm_ns,
59 uint32_t refclk_mhz,
60 uint32_t clamp_value)
62 uint32_t ret_val = 0;
63 ret_val = wm_ns * refclk_mhz;
64 ret_val /= 1000;
66 if (ret_val > clamp_value)
67 ret_val = clamp_value;
69 return ret_val;
72 void dcn21_dchvm_init(struct hubbub *hubbub)
74 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
75 uint32_t riommu_active;
76 int i;
78 //Init DCHVM block
79 REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
81 //Poll until RIOMMU_ACTIVE = 1
82 for (i = 0; i < 100; i++) {
83 REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active);
85 if (riommu_active)
86 break;
87 else
88 udelay(5);
91 if (riommu_active) {
92 //Reflect the power status of DCHUBBUB
93 REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
95 //Start rIOMMU prefetching
96 REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
98 // Enable dynamic clock gating
99 REG_UPDATE_4(DCHVM_CLK_CTRL,
100 HVM_DISPCLK_R_GATE_DIS, 0,
101 HVM_DISPCLK_G_GATE_DIS, 0,
102 HVM_DCFCLK_R_GATE_DIS, 0,
103 HVM_DCFCLK_G_GATE_DIS, 0);
105 //Poll until HOSTVM_PREFETCH_DONE = 1
106 REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
110 int hubbub21_init_dchub(struct hubbub *hubbub,
111 struct dcn_hubbub_phys_addr_config *pa_config)
113 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
114 struct dcn_vmid_page_table_config phys_config;
116 REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
117 FB_BASE, pa_config->system_aperture.fb_base >> 24);
118 REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
119 FB_TOP, pa_config->system_aperture.fb_top >> 24);
120 REG_SET(DCN_VM_FB_OFFSET, 0,
121 FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
122 REG_SET(DCN_VM_AGP_BOT, 0,
123 AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
124 REG_SET(DCN_VM_AGP_TOP, 0,
125 AGP_TOP, pa_config->system_aperture.agp_top >> 24);
126 REG_SET(DCN_VM_AGP_BASE, 0,
127 AGP_BASE, pa_config->system_aperture.agp_base >> 24);
129 if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
130 phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
131 phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
132 phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack
133 phys_config.depth = 0;
134 phys_config.block_size = 0;
135 // Init VMID 0 based on PA config
136 dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
139 dcn21_dchvm_init(hubbub);
141 return NUM_VMID;
144 void hubbub21_program_urgent_watermarks(
145 struct hubbub *hubbub,
146 struct dcn_watermark_set *watermarks,
147 unsigned int refclk_mhz,
148 bool safe_to_lower)
150 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
151 uint32_t prog_wm_value;
153 /* Repeat for water mark set A, B, C and D. */
154 /* clock state A */
155 if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
156 hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
157 prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
158 refclk_mhz, 0x1fffff);
159 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
160 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value,
161 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, prog_wm_value);
163 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
164 "HW register value = 0x%x\n",
165 watermarks->a.urgent_ns, prog_wm_value);
168 /* determine the transfer time for a quantity of data for a particular requestor.*/
169 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
170 > hubbub1->watermarks.a.frac_urg_bw_flip) {
171 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
173 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
174 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
177 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
178 > hubbub1->watermarks.a.frac_urg_bw_nom) {
179 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
181 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
182 DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
184 if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) {
185 hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
186 prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
187 refclk_mhz, 0x1fffff);
188 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
189 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
192 /* clock state B */
193 if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
194 hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
195 prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
196 refclk_mhz, 0x1fffff);
197 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
198 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value,
199 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B, prog_wm_value);
201 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
202 "HW register value = 0x%x\n",
203 watermarks->b.urgent_ns, prog_wm_value);
206 /* determine the transfer time for a quantity of data for a particular requestor.*/
207 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
208 > hubbub1->watermarks.a.frac_urg_bw_flip) {
209 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
211 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
212 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->a.frac_urg_bw_flip);
215 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
216 > hubbub1->watermarks.a.frac_urg_bw_nom) {
217 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
219 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
220 DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
223 if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) {
224 hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
225 prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
226 refclk_mhz, 0x1fffff);
227 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
228 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
231 /* clock state C */
232 if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
233 hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
234 prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
235 refclk_mhz, 0x1fffff);
236 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
237 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value,
238 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C, prog_wm_value);
240 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
241 "HW register value = 0x%x\n",
242 watermarks->c.urgent_ns, prog_wm_value);
245 /* determine the transfer time for a quantity of data for a particular requestor.*/
246 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
247 > hubbub1->watermarks.a.frac_urg_bw_flip) {
248 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
250 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
251 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->a.frac_urg_bw_flip);
254 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
255 > hubbub1->watermarks.a.frac_urg_bw_nom) {
256 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
258 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
259 DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
262 if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) {
263 hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
264 prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
265 refclk_mhz, 0x1fffff);
266 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
267 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
270 /* clock state D */
271 if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
272 hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
273 prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
274 refclk_mhz, 0x1fffff);
275 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
276 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value,
277 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D, prog_wm_value);
279 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
280 "HW register value = 0x%x\n",
281 watermarks->d.urgent_ns, prog_wm_value);
284 /* determine the transfer time for a quantity of data for a particular requestor.*/
285 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
286 > hubbub1->watermarks.a.frac_urg_bw_flip) {
287 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
289 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
290 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->a.frac_urg_bw_flip);
293 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
294 > hubbub1->watermarks.a.frac_urg_bw_nom) {
295 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
297 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
298 DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
301 if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) {
302 hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
303 prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
304 refclk_mhz, 0x1fffff);
305 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
306 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
310 void hubbub21_program_stutter_watermarks(
311 struct hubbub *hubbub,
312 struct dcn_watermark_set *watermarks,
313 unsigned int refclk_mhz,
314 bool safe_to_lower)
316 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
317 uint32_t prog_wm_value;
319 /* clock state A */
320 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
321 > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
322 hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
323 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
324 prog_wm_value = convert_and_clamp(
325 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
326 refclk_mhz, 0x1fffff);
327 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
328 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value,
329 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
330 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
331 "HW register value = 0x%x\n",
332 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
335 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
336 > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
337 hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
338 watermarks->a.cstate_pstate.cstate_exit_ns;
339 prog_wm_value = convert_and_clamp(
340 watermarks->a.cstate_pstate.cstate_exit_ns,
341 refclk_mhz, 0x1fffff);
342 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
343 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value,
344 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
345 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
346 "HW register value = 0x%x\n",
347 watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
350 /* clock state B */
351 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
352 > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
353 hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
354 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
355 prog_wm_value = convert_and_clamp(
356 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
357 refclk_mhz, 0x1fffff);
358 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
359 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value,
360 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
361 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
362 "HW register value = 0x%x\n",
363 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
366 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
367 > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
368 hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
369 watermarks->b.cstate_pstate.cstate_exit_ns;
370 prog_wm_value = convert_and_clamp(
371 watermarks->b.cstate_pstate.cstate_exit_ns,
372 refclk_mhz, 0x1fffff);
373 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
374 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value,
375 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
376 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
377 "HW register value = 0x%x\n",
378 watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
381 /* clock state C */
382 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
383 > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
384 hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
385 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
386 prog_wm_value = convert_and_clamp(
387 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
388 refclk_mhz, 0x1fffff);
389 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
390 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value,
391 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
392 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
393 "HW register value = 0x%x\n",
394 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
397 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
398 > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
399 hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
400 watermarks->c.cstate_pstate.cstate_exit_ns;
401 prog_wm_value = convert_and_clamp(
402 watermarks->c.cstate_pstate.cstate_exit_ns,
403 refclk_mhz, 0x1fffff);
404 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
405 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value,
406 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
407 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
408 "HW register value = 0x%x\n",
409 watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
412 /* clock state D */
413 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
414 > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
415 hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
416 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
417 prog_wm_value = convert_and_clamp(
418 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
419 refclk_mhz, 0x1fffff);
420 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
421 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value,
422 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
423 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
424 "HW register value = 0x%x\n",
425 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
428 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
429 > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
430 hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
431 watermarks->d.cstate_pstate.cstate_exit_ns;
432 prog_wm_value = convert_and_clamp(
433 watermarks->d.cstate_pstate.cstate_exit_ns,
434 refclk_mhz, 0x1fffff);
435 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
436 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value,
437 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
438 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
439 "HW register value = 0x%x\n",
440 watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
444 void hubbub21_program_pstate_watermarks(
445 struct hubbub *hubbub,
446 struct dcn_watermark_set *watermarks,
447 unsigned int refclk_mhz,
448 bool safe_to_lower)
450 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
451 uint32_t prog_wm_value;
453 /* clock state A */
454 if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
455 > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
456 hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
457 watermarks->a.cstate_pstate.pstate_change_ns;
458 prog_wm_value = convert_and_clamp(
459 watermarks->a.cstate_pstate.pstate_change_ns,
460 refclk_mhz, 0x1fffff);
461 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
462 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value,
463 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
464 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
465 "HW register value = 0x%x\n\n",
466 watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
469 /* clock state B */
470 if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
471 > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
472 hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
473 watermarks->b.cstate_pstate.pstate_change_ns;
474 prog_wm_value = convert_and_clamp(
475 watermarks->b.cstate_pstate.pstate_change_ns,
476 refclk_mhz, 0x1fffff);
477 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
478 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value,
479 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
480 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
481 "HW register value = 0x%x\n\n",
482 watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
485 /* clock state C */
486 if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
487 > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
488 hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
489 watermarks->c.cstate_pstate.pstate_change_ns;
490 prog_wm_value = convert_and_clamp(
491 watermarks->c.cstate_pstate.pstate_change_ns,
492 refclk_mhz, 0x1fffff);
493 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
494 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value,
495 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
496 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
497 "HW register value = 0x%x\n\n",
498 watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
501 /* clock state D */
502 if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
503 > hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
504 hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
505 watermarks->d.cstate_pstate.pstate_change_ns;
506 prog_wm_value = convert_and_clamp(
507 watermarks->d.cstate_pstate.pstate_change_ns,
508 refclk_mhz, 0x1fffff);
509 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
510 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value,
511 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
512 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
513 "HW register value = 0x%x\n\n",
514 watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
518 void hubbub21_program_watermarks(
519 struct hubbub *hubbub,
520 struct dcn_watermark_set *watermarks,
521 unsigned int refclk_mhz,
522 bool safe_to_lower)
524 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
526 hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
527 hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
528 hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
531 * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
532 * If the memory controller is fully utilized and the DCHub requestors are
533 * well ahead of their amortized schedule, then it is safe to prevent the next winner
534 * from being committed and sent to the fabric.
535 * The utilization of the memory controller is approximated by ensuring that
536 * the number of outstanding requests is greater than a threshold specified
537 * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule,
538 * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
540 * TODO: Revisit request limit after figure out right number. request limit for Renoir isn't decided yet, set maximum value (0x1FF)
541 * to turn off it for now.
543 REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
544 DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
545 REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
546 DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF,
547 DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);
548 REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL,
549 DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
551 hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
554 void hubbub21_wm_read_state(struct hubbub *hubbub,
555 struct dcn_hubbub_wm *wm)
557 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
558 struct dcn_hubbub_wm_set *s;
560 memset(wm, 0, sizeof(struct dcn_hubbub_wm));
562 s = &wm->sets[0];
563 s->wm_set = 0;
564 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A,
565 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent);
567 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A,
568 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter);
570 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A,
571 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
573 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A,
574 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_chanage);
576 s = &wm->sets[1];
577 s->wm_set = 1;
578 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B,
579 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent);
581 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B,
582 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter);
584 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B,
585 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
587 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B,
588 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_chanage);
590 s = &wm->sets[2];
591 s->wm_set = 2;
592 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C,
593 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent);
595 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C,
596 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter);
598 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C,
599 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
601 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C,
602 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_chanage);
604 s = &wm->sets[3];
605 s->wm_set = 3;
606 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D,
607 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent);
609 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D,
610 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter);
612 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D,
613 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
615 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D,
616 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage);
619 void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
621 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
622 uint32_t prog_wm_value;
624 prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
625 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
628 static const struct hubbub_funcs hubbub21_funcs = {
629 .update_dchub = hubbub2_update_dchub,
630 .init_dchub_sys_ctx = hubbub21_init_dchub,
631 .init_vm_ctx = hubbub2_init_vm_ctx,
632 .dcc_support_swizzle = hubbub2_dcc_support_swizzle,
633 .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
634 .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
635 .wm_read_state = hubbub21_wm_read_state,
636 .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
637 .program_watermarks = hubbub21_program_watermarks,
638 .apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa,
641 void hubbub21_construct(struct dcn20_hubbub *hubbub,
642 struct dc_context *ctx,
643 const struct dcn_hubbub_registers *hubbub_regs,
644 const struct dcn_hubbub_shift *hubbub_shift,
645 const struct dcn_hubbub_mask *hubbub_mask)
647 hubbub->base.ctx = ctx;
649 hubbub->base.funcs = &hubbub21_funcs;
651 hubbub->regs = hubbub_regs;
652 hubbub->shifts = hubbub_shift;
653 hubbub->masks = hubbub_mask;
655 hubbub->debug_test_index_pstate = 0xB;
656 hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */