2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/delay.h>
26 #include "dm_services.h"
27 #include "dcn20/dcn20_hubbub.h"
28 #include "dcn21_hubbub.h"
29 #include "reg_helper.h"
34 hubbub1->base.ctx->logger
39 #define FN(reg_name, field_name) \
40 hubbub1->shifts->field_name, hubbub1->masks->field_name
49 #define FN(reg_name, field_name) \
50 hubbub1->shifts->field_name, hubbub1->masks->field_name
52 static uint32_t convert_and_clamp(
58 ret_val
= wm_ns
* refclk_mhz
;
61 if (ret_val
> clamp_value
)
62 ret_val
= clamp_value
;
67 void dcn21_dchvm_init(struct hubbub
*hubbub
)
69 struct dcn20_hubbub
*hubbub1
= TO_DCN20_HUBBUB(hubbub
);
70 uint32_t riommu_active
;
74 REG_UPDATE(DCHVM_CTRL0
, HOSTVM_INIT_REQ
, 1);
76 //Poll until RIOMMU_ACTIVE = 1
77 for (i
= 0; i
< 100; i
++) {
78 REG_GET(DCHVM_RIOMMU_STAT0
, RIOMMU_ACTIVE
, &riommu_active
);
87 //Reflect the power status of DCHUBBUB
88 REG_UPDATE(DCHVM_RIOMMU_CTRL0
, HOSTVM_POWERSTATUS
, 1);
90 //Start rIOMMU prefetching
91 REG_UPDATE(DCHVM_RIOMMU_CTRL0
, HOSTVM_PREFETCH_REQ
, 1);
93 // Enable dynamic clock gating
94 REG_UPDATE_4(DCHVM_CLK_CTRL
,
95 HVM_DISPCLK_R_GATE_DIS
, 0,
96 HVM_DISPCLK_G_GATE_DIS
, 0,
97 HVM_DCFCLK_R_GATE_DIS
, 0,
98 HVM_DCFCLK_G_GATE_DIS
, 0);
100 //Poll until HOSTVM_PREFETCH_DONE = 1
101 REG_WAIT(DCHVM_RIOMMU_STAT0
, HOSTVM_PREFETCH_DONE
, 1, 5, 100);
103 hubbub
->riommu_active
= true;
107 int hubbub21_init_dchub(struct hubbub
*hubbub
,
108 struct dcn_hubbub_phys_addr_config
*pa_config
)
110 struct dcn20_hubbub
*hubbub1
= TO_DCN20_HUBBUB(hubbub
);
111 struct dcn_vmid_page_table_config phys_config
;
113 REG_SET(DCN_VM_FB_LOCATION_BASE
, 0,
114 FB_BASE
, pa_config
->system_aperture
.fb_base
>> 24);
115 REG_SET(DCN_VM_FB_LOCATION_TOP
, 0,
116 FB_TOP
, pa_config
->system_aperture
.fb_top
>> 24);
117 REG_SET(DCN_VM_FB_OFFSET
, 0,
118 FB_OFFSET
, pa_config
->system_aperture
.fb_offset
>> 24);
119 REG_SET(DCN_VM_AGP_BOT
, 0,
120 AGP_BOT
, pa_config
->system_aperture
.agp_bot
>> 24);
121 REG_SET(DCN_VM_AGP_TOP
, 0,
122 AGP_TOP
, pa_config
->system_aperture
.agp_top
>> 24);
123 REG_SET(DCN_VM_AGP_BASE
, 0,
124 AGP_BASE
, pa_config
->system_aperture
.agp_base
>> 24);
126 if (pa_config
->gart_config
.page_table_start_addr
!= pa_config
->gart_config
.page_table_end_addr
) {
127 phys_config
.page_table_start_addr
= pa_config
->gart_config
.page_table_start_addr
>> 12;
128 phys_config
.page_table_end_addr
= pa_config
->gart_config
.page_table_end_addr
>> 12;
129 phys_config
.page_table_base_addr
= pa_config
->gart_config
.page_table_base_addr
| 1; //Note: hack
130 phys_config
.depth
= 0;
131 phys_config
.block_size
= 0;
132 // Init VMID 0 based on PA config
133 dcn20_vmid_setup(&hubbub1
->vmid
[0], &phys_config
);
136 dcn21_dchvm_init(hubbub
);
138 return hubbub1
->num_vmid
;
141 bool hubbub21_program_urgent_watermarks(
142 struct hubbub
*hubbub
,
143 struct dcn_watermark_set
*watermarks
,
144 unsigned int refclk_mhz
,
147 struct dcn20_hubbub
*hubbub1
= TO_DCN20_HUBBUB(hubbub
);
148 uint32_t prog_wm_value
;
149 bool wm_pending
= false;
151 /* Repeat for water mark set A, B, C and D. */
153 if (safe_to_lower
|| watermarks
->a
.urgent_ns
> hubbub1
->watermarks
.a
.urgent_ns
) {
154 hubbub1
->watermarks
.a
.urgent_ns
= watermarks
->a
.urgent_ns
;
155 prog_wm_value
= convert_and_clamp(watermarks
->a
.urgent_ns
,
156 refclk_mhz
, 0x1fffff);
157 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A
, 0,
158 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A
, prog_wm_value
,
159 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A
, prog_wm_value
);
161 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
162 "HW register value = 0x%x\n",
163 watermarks
->a
.urgent_ns
, prog_wm_value
);
164 } else if (watermarks
->a
.urgent_ns
< hubbub1
->watermarks
.a
.urgent_ns
)
167 /* determine the transfer time for a quantity of data for a particular requestor.*/
168 if (safe_to_lower
|| watermarks
->a
.frac_urg_bw_flip
169 > hubbub1
->watermarks
.a
.frac_urg_bw_flip
) {
170 hubbub1
->watermarks
.a
.frac_urg_bw_flip
= watermarks
->a
.frac_urg_bw_flip
;
172 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A
, 0,
173 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A
, watermarks
->a
.frac_urg_bw_flip
);
174 } else if (watermarks
->a
.frac_urg_bw_flip
175 < hubbub1
->watermarks
.a
.frac_urg_bw_flip
)
178 if (safe_to_lower
|| watermarks
->a
.frac_urg_bw_nom
179 > hubbub1
->watermarks
.a
.frac_urg_bw_nom
) {
180 hubbub1
->watermarks
.a
.frac_urg_bw_nom
= watermarks
->a
.frac_urg_bw_nom
;
182 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A
, 0,
183 DCHUBBUB_ARB_FRAC_URG_BW_NOM_A
, watermarks
->a
.frac_urg_bw_nom
);
184 } else if (watermarks
->a
.frac_urg_bw_nom
185 < hubbub1
->watermarks
.a
.frac_urg_bw_nom
)
188 if (safe_to_lower
|| watermarks
->a
.urgent_latency_ns
> hubbub1
->watermarks
.a
.urgent_latency_ns
) {
189 hubbub1
->watermarks
.a
.urgent_latency_ns
= watermarks
->a
.urgent_latency_ns
;
190 prog_wm_value
= convert_and_clamp(watermarks
->a
.urgent_latency_ns
,
191 refclk_mhz
, 0x1fffff);
192 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A
, 0,
193 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A
, prog_wm_value
);
194 } else if (watermarks
->a
.urgent_latency_ns
< hubbub1
->watermarks
.a
.urgent_latency_ns
)
198 if (safe_to_lower
|| watermarks
->b
.urgent_ns
> hubbub1
->watermarks
.b
.urgent_ns
) {
199 hubbub1
->watermarks
.b
.urgent_ns
= watermarks
->b
.urgent_ns
;
200 prog_wm_value
= convert_and_clamp(watermarks
->b
.urgent_ns
,
201 refclk_mhz
, 0x1fffff);
202 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B
, 0,
203 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B
, prog_wm_value
,
204 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B
, prog_wm_value
);
206 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
207 "HW register value = 0x%x\n",
208 watermarks
->b
.urgent_ns
, prog_wm_value
);
209 } else if (watermarks
->b
.urgent_ns
< hubbub1
->watermarks
.b
.urgent_ns
)
212 /* determine the transfer time for a quantity of data for a particular requestor.*/
213 if (safe_to_lower
|| watermarks
->a
.frac_urg_bw_flip
214 > hubbub1
->watermarks
.a
.frac_urg_bw_flip
) {
215 hubbub1
->watermarks
.a
.frac_urg_bw_flip
= watermarks
->a
.frac_urg_bw_flip
;
217 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B
, 0,
218 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B
, watermarks
->a
.frac_urg_bw_flip
);
219 } else if (watermarks
->a
.frac_urg_bw_flip
220 < hubbub1
->watermarks
.a
.frac_urg_bw_flip
)
223 if (safe_to_lower
|| watermarks
->a
.frac_urg_bw_nom
224 > hubbub1
->watermarks
.a
.frac_urg_bw_nom
) {
225 hubbub1
->watermarks
.a
.frac_urg_bw_nom
= watermarks
->a
.frac_urg_bw_nom
;
227 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B
, 0,
228 DCHUBBUB_ARB_FRAC_URG_BW_NOM_B
, watermarks
->a
.frac_urg_bw_nom
);
229 } else if (watermarks
->a
.frac_urg_bw_nom
230 < hubbub1
->watermarks
.a
.frac_urg_bw_nom
)
233 if (safe_to_lower
|| watermarks
->b
.urgent_latency_ns
> hubbub1
->watermarks
.b
.urgent_latency_ns
) {
234 hubbub1
->watermarks
.b
.urgent_latency_ns
= watermarks
->b
.urgent_latency_ns
;
235 prog_wm_value
= convert_and_clamp(watermarks
->b
.urgent_latency_ns
,
236 refclk_mhz
, 0x1fffff);
237 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B
, 0,
238 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B
, prog_wm_value
);
239 } else if (watermarks
->b
.urgent_latency_ns
< hubbub1
->watermarks
.b
.urgent_latency_ns
)
243 if (safe_to_lower
|| watermarks
->c
.urgent_ns
> hubbub1
->watermarks
.c
.urgent_ns
) {
244 hubbub1
->watermarks
.c
.urgent_ns
= watermarks
->c
.urgent_ns
;
245 prog_wm_value
= convert_and_clamp(watermarks
->c
.urgent_ns
,
246 refclk_mhz
, 0x1fffff);
247 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C
, 0,
248 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C
, prog_wm_value
,
249 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C
, prog_wm_value
);
251 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
252 "HW register value = 0x%x\n",
253 watermarks
->c
.urgent_ns
, prog_wm_value
);
254 } else if (watermarks
->c
.urgent_ns
< hubbub1
->watermarks
.c
.urgent_ns
)
257 /* determine the transfer time for a quantity of data for a particular requestor.*/
258 if (safe_to_lower
|| watermarks
->a
.frac_urg_bw_flip
259 > hubbub1
->watermarks
.a
.frac_urg_bw_flip
) {
260 hubbub1
->watermarks
.a
.frac_urg_bw_flip
= watermarks
->a
.frac_urg_bw_flip
;
262 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C
, 0,
263 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C
, watermarks
->a
.frac_urg_bw_flip
);
264 } else if (watermarks
->a
.frac_urg_bw_flip
265 < hubbub1
->watermarks
.a
.frac_urg_bw_flip
)
268 if (safe_to_lower
|| watermarks
->a
.frac_urg_bw_nom
269 > hubbub1
->watermarks
.a
.frac_urg_bw_nom
) {
270 hubbub1
->watermarks
.a
.frac_urg_bw_nom
= watermarks
->a
.frac_urg_bw_nom
;
272 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C
, 0,
273 DCHUBBUB_ARB_FRAC_URG_BW_NOM_C
, watermarks
->a
.frac_urg_bw_nom
);
274 } else if (watermarks
->a
.frac_urg_bw_nom
275 < hubbub1
->watermarks
.a
.frac_urg_bw_nom
)
278 if (safe_to_lower
|| watermarks
->c
.urgent_latency_ns
> hubbub1
->watermarks
.c
.urgent_latency_ns
) {
279 hubbub1
->watermarks
.c
.urgent_latency_ns
= watermarks
->c
.urgent_latency_ns
;
280 prog_wm_value
= convert_and_clamp(watermarks
->c
.urgent_latency_ns
,
281 refclk_mhz
, 0x1fffff);
282 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C
, 0,
283 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C
, prog_wm_value
);
284 } else if (watermarks
->c
.urgent_latency_ns
< hubbub1
->watermarks
.c
.urgent_latency_ns
)
288 if (safe_to_lower
|| watermarks
->d
.urgent_ns
> hubbub1
->watermarks
.d
.urgent_ns
) {
289 hubbub1
->watermarks
.d
.urgent_ns
= watermarks
->d
.urgent_ns
;
290 prog_wm_value
= convert_and_clamp(watermarks
->d
.urgent_ns
,
291 refclk_mhz
, 0x1fffff);
292 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D
, 0,
293 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D
, prog_wm_value
,
294 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D
, prog_wm_value
);
296 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
297 "HW register value = 0x%x\n",
298 watermarks
->d
.urgent_ns
, prog_wm_value
);
299 } else if (watermarks
->d
.urgent_ns
< hubbub1
->watermarks
.d
.urgent_ns
)
302 /* determine the transfer time for a quantity of data for a particular requestor.*/
303 if (safe_to_lower
|| watermarks
->a
.frac_urg_bw_flip
304 > hubbub1
->watermarks
.a
.frac_urg_bw_flip
) {
305 hubbub1
->watermarks
.a
.frac_urg_bw_flip
= watermarks
->a
.frac_urg_bw_flip
;
307 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D
, 0,
308 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D
, watermarks
->a
.frac_urg_bw_flip
);
309 } else if (watermarks
->a
.frac_urg_bw_flip
310 < hubbub1
->watermarks
.a
.frac_urg_bw_flip
)
313 if (safe_to_lower
|| watermarks
->a
.frac_urg_bw_nom
314 > hubbub1
->watermarks
.a
.frac_urg_bw_nom
) {
315 hubbub1
->watermarks
.a
.frac_urg_bw_nom
= watermarks
->a
.frac_urg_bw_nom
;
317 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D
, 0,
318 DCHUBBUB_ARB_FRAC_URG_BW_NOM_D
, watermarks
->a
.frac_urg_bw_nom
);
319 } else if (watermarks
->a
.frac_urg_bw_nom
320 < hubbub1
->watermarks
.a
.frac_urg_bw_nom
)
323 if (safe_to_lower
|| watermarks
->d
.urgent_latency_ns
> hubbub1
->watermarks
.d
.urgent_latency_ns
) {
324 hubbub1
->watermarks
.d
.urgent_latency_ns
= watermarks
->d
.urgent_latency_ns
;
325 prog_wm_value
= convert_and_clamp(watermarks
->d
.urgent_latency_ns
,
326 refclk_mhz
, 0x1fffff);
327 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D
, 0,
328 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D
, prog_wm_value
);
329 } else if (watermarks
->d
.urgent_latency_ns
< hubbub1
->watermarks
.d
.urgent_latency_ns
)
335 bool hubbub21_program_stutter_watermarks(
336 struct hubbub
*hubbub
,
337 struct dcn_watermark_set
*watermarks
,
338 unsigned int refclk_mhz
,
341 struct dcn20_hubbub
*hubbub1
= TO_DCN20_HUBBUB(hubbub
);
342 uint32_t prog_wm_value
;
343 bool wm_pending
= false;
346 if (safe_to_lower
|| watermarks
->a
.cstate_pstate
.cstate_enter_plus_exit_ns
347 > hubbub1
->watermarks
.a
.cstate_pstate
.cstate_enter_plus_exit_ns
) {
348 hubbub1
->watermarks
.a
.cstate_pstate
.cstate_enter_plus_exit_ns
=
349 watermarks
->a
.cstate_pstate
.cstate_enter_plus_exit_ns
;
350 prog_wm_value
= convert_and_clamp(
351 watermarks
->a
.cstate_pstate
.cstate_enter_plus_exit_ns
,
352 refclk_mhz
, 0x1fffff);
353 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A
, 0,
354 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A
, prog_wm_value
,
355 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A
, prog_wm_value
);
356 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
357 "HW register value = 0x%x\n",
358 watermarks
->a
.cstate_pstate
.cstate_enter_plus_exit_ns
, prog_wm_value
);
359 } else if (watermarks
->a
.cstate_pstate
.cstate_enter_plus_exit_ns
360 < hubbub1
->watermarks
.a
.cstate_pstate
.cstate_enter_plus_exit_ns
)
363 if (safe_to_lower
|| watermarks
->a
.cstate_pstate
.cstate_exit_ns
364 > hubbub1
->watermarks
.a
.cstate_pstate
.cstate_exit_ns
) {
365 hubbub1
->watermarks
.a
.cstate_pstate
.cstate_exit_ns
=
366 watermarks
->a
.cstate_pstate
.cstate_exit_ns
;
367 prog_wm_value
= convert_and_clamp(
368 watermarks
->a
.cstate_pstate
.cstate_exit_ns
,
369 refclk_mhz
, 0x1fffff);
370 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A
, 0,
371 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A
, prog_wm_value
,
372 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A
, prog_wm_value
);
373 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
374 "HW register value = 0x%x\n",
375 watermarks
->a
.cstate_pstate
.cstate_exit_ns
, prog_wm_value
);
376 } else if (watermarks
->a
.cstate_pstate
.cstate_exit_ns
377 < hubbub1
->watermarks
.a
.cstate_pstate
.cstate_exit_ns
)
381 if (safe_to_lower
|| watermarks
->b
.cstate_pstate
.cstate_enter_plus_exit_ns
382 > hubbub1
->watermarks
.b
.cstate_pstate
.cstate_enter_plus_exit_ns
) {
383 hubbub1
->watermarks
.b
.cstate_pstate
.cstate_enter_plus_exit_ns
=
384 watermarks
->b
.cstate_pstate
.cstate_enter_plus_exit_ns
;
385 prog_wm_value
= convert_and_clamp(
386 watermarks
->b
.cstate_pstate
.cstate_enter_plus_exit_ns
,
387 refclk_mhz
, 0x1fffff);
388 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B
, 0,
389 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B
, prog_wm_value
,
390 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B
, prog_wm_value
);
391 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
392 "HW register value = 0x%x\n",
393 watermarks
->b
.cstate_pstate
.cstate_enter_plus_exit_ns
, prog_wm_value
);
394 } else if (watermarks
->b
.cstate_pstate
.cstate_enter_plus_exit_ns
395 < hubbub1
->watermarks
.b
.cstate_pstate
.cstate_enter_plus_exit_ns
)
398 if (safe_to_lower
|| watermarks
->b
.cstate_pstate
.cstate_exit_ns
399 > hubbub1
->watermarks
.b
.cstate_pstate
.cstate_exit_ns
) {
400 hubbub1
->watermarks
.b
.cstate_pstate
.cstate_exit_ns
=
401 watermarks
->b
.cstate_pstate
.cstate_exit_ns
;
402 prog_wm_value
= convert_and_clamp(
403 watermarks
->b
.cstate_pstate
.cstate_exit_ns
,
404 refclk_mhz
, 0x1fffff);
405 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B
, 0,
406 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B
, prog_wm_value
,
407 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A
, prog_wm_value
);
408 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
409 "HW register value = 0x%x\n",
410 watermarks
->b
.cstate_pstate
.cstate_exit_ns
, prog_wm_value
);
411 } else if (watermarks
->b
.cstate_pstate
.cstate_exit_ns
412 < hubbub1
->watermarks
.b
.cstate_pstate
.cstate_exit_ns
)
416 if (safe_to_lower
|| watermarks
->c
.cstate_pstate
.cstate_enter_plus_exit_ns
417 > hubbub1
->watermarks
.c
.cstate_pstate
.cstate_enter_plus_exit_ns
) {
418 hubbub1
->watermarks
.c
.cstate_pstate
.cstate_enter_plus_exit_ns
=
419 watermarks
->c
.cstate_pstate
.cstate_enter_plus_exit_ns
;
420 prog_wm_value
= convert_and_clamp(
421 watermarks
->c
.cstate_pstate
.cstate_enter_plus_exit_ns
,
422 refclk_mhz
, 0x1fffff);
423 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C
, 0,
424 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C
, prog_wm_value
,
425 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C
, prog_wm_value
);
426 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
427 "HW register value = 0x%x\n",
428 watermarks
->c
.cstate_pstate
.cstate_enter_plus_exit_ns
, prog_wm_value
);
429 } else if (watermarks
->c
.cstate_pstate
.cstate_enter_plus_exit_ns
430 < hubbub1
->watermarks
.c
.cstate_pstate
.cstate_enter_plus_exit_ns
)
433 if (safe_to_lower
|| watermarks
->c
.cstate_pstate
.cstate_exit_ns
434 > hubbub1
->watermarks
.c
.cstate_pstate
.cstate_exit_ns
) {
435 hubbub1
->watermarks
.c
.cstate_pstate
.cstate_exit_ns
=
436 watermarks
->c
.cstate_pstate
.cstate_exit_ns
;
437 prog_wm_value
= convert_and_clamp(
438 watermarks
->c
.cstate_pstate
.cstate_exit_ns
,
439 refclk_mhz
, 0x1fffff);
440 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C
, 0,
441 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C
, prog_wm_value
,
442 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A
, prog_wm_value
);
443 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
444 "HW register value = 0x%x\n",
445 watermarks
->c
.cstate_pstate
.cstate_exit_ns
, prog_wm_value
);
446 } else if (watermarks
->c
.cstate_pstate
.cstate_exit_ns
447 < hubbub1
->watermarks
.c
.cstate_pstate
.cstate_exit_ns
)
451 if (safe_to_lower
|| watermarks
->d
.cstate_pstate
.cstate_enter_plus_exit_ns
452 > hubbub1
->watermarks
.d
.cstate_pstate
.cstate_enter_plus_exit_ns
) {
453 hubbub1
->watermarks
.d
.cstate_pstate
.cstate_enter_plus_exit_ns
=
454 watermarks
->d
.cstate_pstate
.cstate_enter_plus_exit_ns
;
455 prog_wm_value
= convert_and_clamp(
456 watermarks
->d
.cstate_pstate
.cstate_enter_plus_exit_ns
,
457 refclk_mhz
, 0x1fffff);
458 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D
, 0,
459 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D
, prog_wm_value
,
460 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D
, prog_wm_value
);
461 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
462 "HW register value = 0x%x\n",
463 watermarks
->d
.cstate_pstate
.cstate_enter_plus_exit_ns
, prog_wm_value
);
464 } else if (watermarks
->d
.cstate_pstate
.cstate_enter_plus_exit_ns
465 < hubbub1
->watermarks
.d
.cstate_pstate
.cstate_enter_plus_exit_ns
)
468 if (safe_to_lower
|| watermarks
->d
.cstate_pstate
.cstate_exit_ns
469 > hubbub1
->watermarks
.d
.cstate_pstate
.cstate_exit_ns
) {
470 hubbub1
->watermarks
.d
.cstate_pstate
.cstate_exit_ns
=
471 watermarks
->d
.cstate_pstate
.cstate_exit_ns
;
472 prog_wm_value
= convert_and_clamp(
473 watermarks
->d
.cstate_pstate
.cstate_exit_ns
,
474 refclk_mhz
, 0x1fffff);
475 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D
, 0,
476 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D
, prog_wm_value
,
477 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A
, prog_wm_value
);
478 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
479 "HW register value = 0x%x\n",
480 watermarks
->d
.cstate_pstate
.cstate_exit_ns
, prog_wm_value
);
481 } else if (watermarks
->d
.cstate_pstate
.cstate_exit_ns
482 < hubbub1
->watermarks
.d
.cstate_pstate
.cstate_exit_ns
)
488 bool hubbub21_program_pstate_watermarks(
489 struct hubbub
*hubbub
,
490 struct dcn_watermark_set
*watermarks
,
491 unsigned int refclk_mhz
,
494 struct dcn20_hubbub
*hubbub1
= TO_DCN20_HUBBUB(hubbub
);
495 uint32_t prog_wm_value
;
497 bool wm_pending
= false;
500 if (safe_to_lower
|| watermarks
->a
.cstate_pstate
.pstate_change_ns
501 > hubbub1
->watermarks
.a
.cstate_pstate
.pstate_change_ns
) {
502 hubbub1
->watermarks
.a
.cstate_pstate
.pstate_change_ns
=
503 watermarks
->a
.cstate_pstate
.pstate_change_ns
;
504 prog_wm_value
= convert_and_clamp(
505 watermarks
->a
.cstate_pstate
.pstate_change_ns
,
506 refclk_mhz
, 0x1fffff);
507 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A
, 0,
508 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A
, prog_wm_value
,
509 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A
, prog_wm_value
);
510 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
511 "HW register value = 0x%x\n\n",
512 watermarks
->a
.cstate_pstate
.pstate_change_ns
, prog_wm_value
);
513 } else if (watermarks
->a
.cstate_pstate
.pstate_change_ns
514 < hubbub1
->watermarks
.a
.cstate_pstate
.pstate_change_ns
)
518 if (safe_to_lower
|| watermarks
->b
.cstate_pstate
.pstate_change_ns
519 > hubbub1
->watermarks
.b
.cstate_pstate
.pstate_change_ns
) {
520 hubbub1
->watermarks
.b
.cstate_pstate
.pstate_change_ns
=
521 watermarks
->b
.cstate_pstate
.pstate_change_ns
;
522 prog_wm_value
= convert_and_clamp(
523 watermarks
->b
.cstate_pstate
.pstate_change_ns
,
524 refclk_mhz
, 0x1fffff);
525 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B
, 0,
526 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B
, prog_wm_value
,
527 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B
, prog_wm_value
);
528 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
529 "HW register value = 0x%x\n\n",
530 watermarks
->b
.cstate_pstate
.pstate_change_ns
, prog_wm_value
);
531 } else if (watermarks
->b
.cstate_pstate
.pstate_change_ns
532 < hubbub1
->watermarks
.b
.cstate_pstate
.pstate_change_ns
)
536 if (safe_to_lower
|| watermarks
->c
.cstate_pstate
.pstate_change_ns
537 > hubbub1
->watermarks
.c
.cstate_pstate
.pstate_change_ns
) {
538 hubbub1
->watermarks
.c
.cstate_pstate
.pstate_change_ns
=
539 watermarks
->c
.cstate_pstate
.pstate_change_ns
;
540 prog_wm_value
= convert_and_clamp(
541 watermarks
->c
.cstate_pstate
.pstate_change_ns
,
542 refclk_mhz
, 0x1fffff);
543 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C
, 0,
544 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C
, prog_wm_value
,
545 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C
, prog_wm_value
);
546 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
547 "HW register value = 0x%x\n\n",
548 watermarks
->c
.cstate_pstate
.pstate_change_ns
, prog_wm_value
);
549 } else if (watermarks
->c
.cstate_pstate
.pstate_change_ns
550 < hubbub1
->watermarks
.c
.cstate_pstate
.pstate_change_ns
)
554 if (safe_to_lower
|| watermarks
->d
.cstate_pstate
.pstate_change_ns
555 > hubbub1
->watermarks
.d
.cstate_pstate
.pstate_change_ns
) {
556 hubbub1
->watermarks
.d
.cstate_pstate
.pstate_change_ns
=
557 watermarks
->d
.cstate_pstate
.pstate_change_ns
;
558 prog_wm_value
= convert_and_clamp(
559 watermarks
->d
.cstate_pstate
.pstate_change_ns
,
560 refclk_mhz
, 0x1fffff);
561 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D
, 0,
562 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D
, prog_wm_value
,
563 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D
, prog_wm_value
);
564 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
565 "HW register value = 0x%x\n\n",
566 watermarks
->d
.cstate_pstate
.pstate_change_ns
, prog_wm_value
);
567 } else if (watermarks
->d
.cstate_pstate
.pstate_change_ns
568 < hubbub1
->watermarks
.d
.cstate_pstate
.pstate_change_ns
)
574 bool hubbub21_program_watermarks(
575 struct hubbub
*hubbub
,
576 struct dcn_watermark_set
*watermarks
,
577 unsigned int refclk_mhz
,
580 struct dcn20_hubbub
*hubbub1
= TO_DCN20_HUBBUB(hubbub
);
581 bool wm_pending
= false;
583 if (hubbub21_program_urgent_watermarks(hubbub
, watermarks
, refclk_mhz
, safe_to_lower
))
586 if (hubbub21_program_stutter_watermarks(hubbub
, watermarks
, refclk_mhz
, safe_to_lower
))
589 if (hubbub21_program_pstate_watermarks(hubbub
, watermarks
, refclk_mhz
, safe_to_lower
))
593 * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
594 * If the memory controller is fully utilized and the DCHub requestors are
595 * well ahead of their amortized schedule, then it is safe to prevent the next winner
596 * from being committed and sent to the fabric.
597 * The utilization of the memory controller is approximated by ensuring that
598 * the number of outstanding requests is greater than a threshold specified
599 * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule,
600 * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
602 * TODO: Revisit request limit after figure out right number. request limit for Renoir isn't decided yet, set maximum value (0x1FF)
603 * to turn off it for now.
605 REG_SET(DCHUBBUB_ARB_SAT_LEVEL
, 0,
606 DCHUBBUB_ARB_SAT_LEVEL
, 60 * refclk_mhz
);
607 REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND
,
608 DCHUBBUB_ARB_MIN_REQ_OUTSTAND
, 0x1FF,
609 DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD
, 0xA);
610 REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL
,
611 DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD
, 0xF);
613 hubbub1_allow_self_refresh_control(hubbub
, !hubbub
->ctx
->dc
->debug
.disable_stutter
);
618 void hubbub21_wm_read_state(struct hubbub
*hubbub
,
619 struct dcn_hubbub_wm
*wm
)
621 struct dcn20_hubbub
*hubbub1
= TO_DCN20_HUBBUB(hubbub
);
622 struct dcn_hubbub_wm_set
*s
;
624 memset(wm
, 0, sizeof(struct dcn_hubbub_wm
));
628 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A
,
629 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A
, &s
->data_urgent
);
631 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A
,
632 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A
, &s
->sr_enter
);
634 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A
,
635 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A
, &s
->sr_exit
);
637 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A
,
638 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A
, &s
->dram_clk_chanage
);
642 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B
,
643 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B
, &s
->data_urgent
);
645 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B
,
646 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B
, &s
->sr_enter
);
648 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B
,
649 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B
, &s
->sr_exit
);
651 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B
,
652 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B
, &s
->dram_clk_chanage
);
656 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C
,
657 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C
, &s
->data_urgent
);
659 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C
,
660 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C
, &s
->sr_enter
);
662 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C
,
663 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C
, &s
->sr_exit
);
665 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C
,
666 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C
, &s
->dram_clk_chanage
);
670 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D
,
671 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D
, &s
->data_urgent
);
673 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D
,
674 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D
, &s
->sr_enter
);
676 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D
,
677 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D
, &s
->sr_exit
);
679 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D
,
680 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D
, &s
->dram_clk_chanage
);
683 void hubbub21_apply_DEDCN21_147_wa(struct hubbub
*hubbub
)
685 struct dcn20_hubbub
*hubbub1
= TO_DCN20_HUBBUB(hubbub
);
686 uint32_t prog_wm_value
;
688 prog_wm_value
= REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A
);
689 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A
, prog_wm_value
);
692 static const struct hubbub_funcs hubbub21_funcs
= {
693 .update_dchub
= hubbub2_update_dchub
,
694 .init_dchub_sys_ctx
= hubbub21_init_dchub
,
695 .init_vm_ctx
= hubbub2_init_vm_ctx
,
696 .dcc_support_swizzle
= hubbub2_dcc_support_swizzle
,
697 .dcc_support_pixel_format
= hubbub2_dcc_support_pixel_format
,
698 .get_dcc_compression_cap
= hubbub2_get_dcc_compression_cap
,
699 .wm_read_state
= hubbub21_wm_read_state
,
700 .get_dchub_ref_freq
= hubbub2_get_dchub_ref_freq
,
701 .program_watermarks
= hubbub21_program_watermarks
,
702 .allow_self_refresh_control
= hubbub1_allow_self_refresh_control
,
703 .apply_DEDCN21_147_wa
= hubbub21_apply_DEDCN21_147_wa
,
706 void hubbub21_construct(struct dcn20_hubbub
*hubbub
,
707 struct dc_context
*ctx
,
708 const struct dcn_hubbub_registers
*hubbub_regs
,
709 const struct dcn_hubbub_shift
*hubbub_shift
,
710 const struct dcn_hubbub_mask
*hubbub_mask
)
712 hubbub
->base
.ctx
= ctx
;
714 hubbub
->base
.funcs
= &hubbub21_funcs
;
716 hubbub
->regs
= hubbub_regs
;
717 hubbub
->shifts
= hubbub_shift
;
718 hubbub
->masks
= hubbub_mask
;
720 hubbub
->debug_test_index_pstate
= 0xB;
721 hubbub
->detile_buf_size
= 164 * 1024; /* 164KB for DCN2.0 */