treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / amd / display / dc / dcn10 / dcn10_hw_sequencer.c
blobf2127afb37b2e3b825f340d7aab5144a647f47da
1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: AMD
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
49 #include "dccg.h"
50 #include "clk_mgr.h"
53 #include "dsc.h"
55 #define DC_LOGGER_INIT(logger)
57 #define CTX \
58 hws->ctx
59 #define REG(reg)\
60 hws->regs->reg
62 #undef FN
63 #define FN(reg_name, field_name) \
64 hws->shifts->field_name, hws->masks->field_name
66 /*print is 17 wide, first two characters are spaces*/
67 #define DTN_INFO_MICRO_SEC(ref_cycle) \
68 print_microsec(dc_ctx, log_ctx, ref_cycle)
70 #define GAMMA_HW_POINTS_NUM 256
72 void print_microsec(struct dc_context *dc_ctx,
73 struct dc_log_buffer_ctx *log_ctx,
74 uint32_t ref_cycle)
76 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
77 static const unsigned int frac = 1000;
78 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
80 DTN_INFO(" %11d.%03d",
81 us_x10 / frac,
82 us_x10 % frac);
85 static void dcn10_lock_all_pipes(struct dc *dc,
86 struct dc_state *context,
87 bool lock)
89 struct pipe_ctx *pipe_ctx;
90 struct timing_generator *tg;
91 int i;
93 for (i = 0; i < dc->res_pool->pipe_count; i++) {
94 pipe_ctx = &context->res_ctx.pipe_ctx[i];
95 tg = pipe_ctx->stream_res.tg;
97 * Only lock the top pipe's tg to prevent redundant
98 * (un)locking. Also skip if pipe is disabled.
100 if (pipe_ctx->top_pipe ||
101 !pipe_ctx->stream || !pipe_ctx->plane_state ||
102 !tg->funcs->is_tg_enabled(tg))
103 continue;
105 if (lock)
106 tg->funcs->lock(tg);
107 else
108 tg->funcs->unlock(tg);
112 static void log_mpc_crc(struct dc *dc,
113 struct dc_log_buffer_ctx *log_ctx)
115 struct dc_context *dc_ctx = dc->ctx;
116 struct dce_hwseq *hws = dc->hwseq;
118 if (REG(MPC_CRC_RESULT_GB))
119 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
120 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
121 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
122 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
123 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
126 void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
128 struct dc_context *dc_ctx = dc->ctx;
129 struct dcn_hubbub_wm wm;
130 int i;
132 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
133 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
135 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
136 " sr_enter sr_exit dram_clk_change\n");
138 for (i = 0; i < 4; i++) {
139 struct dcn_hubbub_wm_set *s;
141 s = &wm.sets[i];
142 DTN_INFO("WM_Set[%d]:", s->wm_set);
143 DTN_INFO_MICRO_SEC(s->data_urgent);
144 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
145 DTN_INFO_MICRO_SEC(s->sr_enter);
146 DTN_INFO_MICRO_SEC(s->sr_exit);
147 DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
148 DTN_INFO("\n");
151 DTN_INFO("\n");
154 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
156 struct dc_context *dc_ctx = dc->ctx;
157 struct resource_pool *pool = dc->res_pool;
158 int i;
160 DTN_INFO(
161 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
162 for (i = 0; i < pool->pipe_count; i++) {
163 struct hubp *hubp = pool->hubps[i];
164 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
166 hubp->funcs->hubp_read_state(hubp);
168 if (!s->blank_en) {
169 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
170 hubp->inst,
171 s->pixel_format,
172 s->inuse_addr_hi,
173 s->viewport_width,
174 s->viewport_height,
175 s->rotation_angle,
176 s->h_mirror_en,
177 s->sw_mode,
178 s->dcc_en,
179 s->blank_en,
180 s->clock_en,
181 s->ttu_disable,
182 s->underflow_status);
183 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
184 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
185 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
186 DTN_INFO("\n");
190 DTN_INFO("\n=========RQ========\n");
191 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
192 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
193 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
194 for (i = 0; i < pool->pipe_count; i++) {
195 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
196 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
198 if (!s->blank_en)
199 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
200 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
201 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
202 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
203 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
204 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
205 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
206 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
207 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
208 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
211 DTN_INFO("========DLG========\n");
212 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
213 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
214 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
215 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
216 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
217 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
218 " x_rp_dlay x_rr_sfl\n");
219 for (i = 0; i < pool->pipe_count; i++) {
220 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
221 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
223 if (!s->blank_en)
224 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
225 "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
226 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
227 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
228 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
229 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
230 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
231 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
232 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
233 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
234 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
235 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
236 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
237 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
238 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
239 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
240 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
241 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
242 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
243 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
244 dlg_regs->xfc_reg_remote_surface_flip_latency);
247 DTN_INFO("========TTU========\n");
248 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
249 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
250 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
251 for (i = 0; i < pool->pipe_count; i++) {
252 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
253 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
255 if (!s->blank_en)
256 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
257 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
258 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
259 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
260 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
261 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
262 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
263 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
265 DTN_INFO("\n");
268 void dcn10_log_hw_state(struct dc *dc,
269 struct dc_log_buffer_ctx *log_ctx)
271 struct dc_context *dc_ctx = dc->ctx;
272 struct resource_pool *pool = dc->res_pool;
273 int i;
275 DTN_INFO_BEGIN();
277 dcn10_log_hubbub_state(dc, log_ctx);
279 dcn10_log_hubp_states(dc, log_ctx);
281 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
282 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
283 "C31 C32 C33 C34\n");
284 for (i = 0; i < pool->pipe_count; i++) {
285 struct dpp *dpp = pool->dpps[i];
286 struct dcn_dpp_state s = {0};
288 dpp->funcs->dpp_read_state(dpp, &s);
290 if (!s.is_enabled)
291 continue;
293 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
294 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
295 dpp->inst,
296 s.igam_input_format,
297 (s.igam_lut_mode == 0) ? "BypassFixed" :
298 ((s.igam_lut_mode == 1) ? "BypassFloat" :
299 ((s.igam_lut_mode == 2) ? "RAM" :
300 ((s.igam_lut_mode == 3) ? "RAM" :
301 "Unknown"))),
302 (s.dgam_lut_mode == 0) ? "Bypass" :
303 ((s.dgam_lut_mode == 1) ? "sRGB" :
304 ((s.dgam_lut_mode == 2) ? "Ycc" :
305 ((s.dgam_lut_mode == 3) ? "RAM" :
306 ((s.dgam_lut_mode == 4) ? "RAM" :
307 "Unknown")))),
308 (s.rgam_lut_mode == 0) ? "Bypass" :
309 ((s.rgam_lut_mode == 1) ? "sRGB" :
310 ((s.rgam_lut_mode == 2) ? "Ycc" :
311 ((s.rgam_lut_mode == 3) ? "RAM" :
312 ((s.rgam_lut_mode == 4) ? "RAM" :
313 "Unknown")))),
314 s.gamut_remap_mode,
315 s.gamut_remap_c11_c12,
316 s.gamut_remap_c13_c14,
317 s.gamut_remap_c21_c22,
318 s.gamut_remap_c23_c24,
319 s.gamut_remap_c31_c32,
320 s.gamut_remap_c33_c34);
321 DTN_INFO("\n");
323 DTN_INFO("\n");
325 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
326 for (i = 0; i < pool->pipe_count; i++) {
327 struct mpcc_state s = {0};
329 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
330 if (s.opp_id != 0xf)
331 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
332 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
333 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
334 s.idle);
336 DTN_INFO("\n");
338 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
340 for (i = 0; i < pool->timing_generator_count; i++) {
341 struct timing_generator *tg = pool->timing_generators[i];
342 struct dcn_otg_state s = {0};
343 /* Read shared OTG state registers for all DCNx */
344 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
347 * For DCN2 and greater, a register on the OPP is used to
348 * determine if the CRTC is blanked instead of the OTG. So use
349 * dpg_is_blanked() if exists, otherwise fallback on otg.
351 * TODO: Implement DCN-specific read_otg_state hooks.
353 if (pool->opps[i]->funcs->dpg_is_blanked)
354 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
355 else
356 s.blank_enabled = tg->funcs->is_blanked(tg);
358 //only print if OTG master is enabled
359 if ((s.otg_enabled & 1) == 0)
360 continue;
362 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
363 tg->inst,
364 s.v_blank_start,
365 s.v_blank_end,
366 s.v_sync_a_start,
367 s.v_sync_a_end,
368 s.v_sync_a_pol,
369 s.v_total_max,
370 s.v_total_min,
371 s.v_total_max_sel,
372 s.v_total_min_sel,
373 s.h_blank_start,
374 s.h_blank_end,
375 s.h_sync_a_start,
376 s.h_sync_a_end,
377 s.h_sync_a_pol,
378 s.h_total,
379 s.v_total,
380 s.underflow_occurred_status,
381 s.blank_enabled);
383 // Clear underflow for debug purposes
384 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
385 // This function is called only from Windows or Diags test environment, hence it's safe to clear
386 // it from here without affecting the original intent.
387 tg->funcs->clear_optc_underflow(tg);
389 DTN_INFO("\n");
391 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
392 for (i = 0; i < pool->res_cap->num_dsc; i++) {
393 struct display_stream_compressor *dsc = pool->dscs[i];
394 struct dcn_dsc_state s = {0};
396 dsc->funcs->dsc_read_state(dsc, &s);
397 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
398 dsc->inst,
399 s.dsc_clock_en,
400 s.dsc_slice_width,
401 s.dsc_bytes_per_pixel);
402 DTN_INFO("\n");
404 DTN_INFO("\n");
406 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
407 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
408 for (i = 0; i < pool->stream_enc_count; i++) {
409 struct stream_encoder *enc = pool->stream_enc[i];
410 struct enc_state s = {0};
412 if (enc->funcs->enc_read_state) {
413 enc->funcs->enc_read_state(enc, &s);
414 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
415 enc->id,
416 s.dsc_mode,
417 s.sec_gsp_pps_line_num,
418 s.vbid6_line_reference,
419 s.vbid6_line_num,
420 s.sec_gsp_pps_enable,
421 s.sec_stream_enable);
422 DTN_INFO("\n");
425 DTN_INFO("\n");
427 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
428 for (i = 0; i < dc->link_count; i++) {
429 struct link_encoder *lenc = dc->links[i]->link_enc;
431 struct link_enc_state s = {0};
433 if (lenc->funcs->read_state) {
434 lenc->funcs->read_state(lenc, &s);
435 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
437 s.dphy_fec_en,
438 s.dphy_fec_ready_shadow,
439 s.dphy_fec_active_status,
440 s.dp_link_training_complete);
441 DTN_INFO("\n");
444 DTN_INFO("\n");
446 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
447 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
448 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
449 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
450 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
451 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
452 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
453 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
454 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
456 log_mpc_crc(dc, log_ctx);
458 DTN_INFO_END();
461 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
463 struct hubp *hubp = pipe_ctx->plane_res.hubp;
464 struct timing_generator *tg = pipe_ctx->stream_res.tg;
466 if (tg->funcs->is_optc_underflow_occurred(tg)) {
467 tg->funcs->clear_optc_underflow(tg);
468 return true;
471 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
472 hubp->funcs->hubp_clear_underflow(hubp);
473 return true;
475 return false;
478 void dcn10_enable_power_gating_plane(
479 struct dce_hwseq *hws,
480 bool enable)
482 bool force_on = true; /* disable power gating */
484 if (enable)
485 force_on = false;
487 /* DCHUBP0/1/2/3 */
488 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
489 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
490 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
491 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
493 /* DPP0/1/2/3 */
494 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
495 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
496 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
497 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
500 void dcn10_disable_vga(
501 struct dce_hwseq *hws)
503 unsigned int in_vga1_mode = 0;
504 unsigned int in_vga2_mode = 0;
505 unsigned int in_vga3_mode = 0;
506 unsigned int in_vga4_mode = 0;
508 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
509 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
510 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
511 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
513 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
514 in_vga3_mode == 0 && in_vga4_mode == 0)
515 return;
517 REG_WRITE(D1VGA_CONTROL, 0);
518 REG_WRITE(D2VGA_CONTROL, 0);
519 REG_WRITE(D3VGA_CONTROL, 0);
520 REG_WRITE(D4VGA_CONTROL, 0);
522 /* HW Engineer's Notes:
523 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
524 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
526 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
527 * VGA_TEST_ENABLE, to leave it in the same state as before.
529 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
530 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
533 void dcn10_dpp_pg_control(
534 struct dce_hwseq *hws,
535 unsigned int dpp_inst,
536 bool power_on)
538 uint32_t power_gate = power_on ? 0 : 1;
539 uint32_t pwr_status = power_on ? 0 : 2;
541 if (hws->ctx->dc->debug.disable_dpp_power_gate)
542 return;
543 if (REG(DOMAIN1_PG_CONFIG) == 0)
544 return;
546 switch (dpp_inst) {
547 case 0: /* DPP0 */
548 REG_UPDATE(DOMAIN1_PG_CONFIG,
549 DOMAIN1_POWER_GATE, power_gate);
551 REG_WAIT(DOMAIN1_PG_STATUS,
552 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
553 1, 1000);
554 break;
555 case 1: /* DPP1 */
556 REG_UPDATE(DOMAIN3_PG_CONFIG,
557 DOMAIN3_POWER_GATE, power_gate);
559 REG_WAIT(DOMAIN3_PG_STATUS,
560 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
561 1, 1000);
562 break;
563 case 2: /* DPP2 */
564 REG_UPDATE(DOMAIN5_PG_CONFIG,
565 DOMAIN5_POWER_GATE, power_gate);
567 REG_WAIT(DOMAIN5_PG_STATUS,
568 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
569 1, 1000);
570 break;
571 case 3: /* DPP3 */
572 REG_UPDATE(DOMAIN7_PG_CONFIG,
573 DOMAIN7_POWER_GATE, power_gate);
575 REG_WAIT(DOMAIN7_PG_STATUS,
576 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
577 1, 1000);
578 break;
579 default:
580 BREAK_TO_DEBUGGER();
581 break;
585 void dcn10_hubp_pg_control(
586 struct dce_hwseq *hws,
587 unsigned int hubp_inst,
588 bool power_on)
590 uint32_t power_gate = power_on ? 0 : 1;
591 uint32_t pwr_status = power_on ? 0 : 2;
593 if (hws->ctx->dc->debug.disable_hubp_power_gate)
594 return;
595 if (REG(DOMAIN0_PG_CONFIG) == 0)
596 return;
598 switch (hubp_inst) {
599 case 0: /* DCHUBP0 */
600 REG_UPDATE(DOMAIN0_PG_CONFIG,
601 DOMAIN0_POWER_GATE, power_gate);
603 REG_WAIT(DOMAIN0_PG_STATUS,
604 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
605 1, 1000);
606 break;
607 case 1: /* DCHUBP1 */
608 REG_UPDATE(DOMAIN2_PG_CONFIG,
609 DOMAIN2_POWER_GATE, power_gate);
611 REG_WAIT(DOMAIN2_PG_STATUS,
612 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
613 1, 1000);
614 break;
615 case 2: /* DCHUBP2 */
616 REG_UPDATE(DOMAIN4_PG_CONFIG,
617 DOMAIN4_POWER_GATE, power_gate);
619 REG_WAIT(DOMAIN4_PG_STATUS,
620 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
621 1, 1000);
622 break;
623 case 3: /* DCHUBP3 */
624 REG_UPDATE(DOMAIN6_PG_CONFIG,
625 DOMAIN6_POWER_GATE, power_gate);
627 REG_WAIT(DOMAIN6_PG_STATUS,
628 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
629 1, 1000);
630 break;
631 default:
632 BREAK_TO_DEBUGGER();
633 break;
637 static void power_on_plane(
638 struct dce_hwseq *hws,
639 int plane_id)
641 DC_LOGGER_INIT(hws->ctx->logger);
642 if (REG(DC_IP_REQUEST_CNTL)) {
643 REG_SET(DC_IP_REQUEST_CNTL, 0,
644 IP_REQUEST_EN, 1);
645 hws->funcs.dpp_pg_control(hws, plane_id, true);
646 hws->funcs.hubp_pg_control(hws, plane_id, true);
647 REG_SET(DC_IP_REQUEST_CNTL, 0,
648 IP_REQUEST_EN, 0);
649 DC_LOG_DEBUG(
650 "Un-gated front end for pipe %d\n", plane_id);
654 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
656 struct dce_hwseq *hws = dc->hwseq;
657 struct hubp *hubp = dc->res_pool->hubps[0];
659 if (!hws->wa_state.DEGVIDCN10_253_applied)
660 return;
662 hubp->funcs->set_blank(hubp, true);
664 REG_SET(DC_IP_REQUEST_CNTL, 0,
665 IP_REQUEST_EN, 1);
667 hws->funcs.hubp_pg_control(hws, 0, false);
668 REG_SET(DC_IP_REQUEST_CNTL, 0,
669 IP_REQUEST_EN, 0);
671 hws->wa_state.DEGVIDCN10_253_applied = false;
674 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
676 struct dce_hwseq *hws = dc->hwseq;
677 struct hubp *hubp = dc->res_pool->hubps[0];
678 int i;
680 if (dc->debug.disable_stutter)
681 return;
683 if (!hws->wa.DEGVIDCN10_253)
684 return;
686 for (i = 0; i < dc->res_pool->pipe_count; i++) {
687 if (!dc->res_pool->hubps[i]->power_gated)
688 return;
691 /* all pipe power gated, apply work around to enable stutter. */
693 REG_SET(DC_IP_REQUEST_CNTL, 0,
694 IP_REQUEST_EN, 1);
696 hws->funcs.hubp_pg_control(hws, 0, true);
697 REG_SET(DC_IP_REQUEST_CNTL, 0,
698 IP_REQUEST_EN, 0);
700 hubp->funcs->set_hubp_blank_en(hubp, false);
701 hws->wa_state.DEGVIDCN10_253_applied = true;
704 void dcn10_bios_golden_init(struct dc *dc)
706 struct dce_hwseq *hws = dc->hwseq;
707 struct dc_bios *bp = dc->ctx->dc_bios;
708 int i;
709 bool allow_self_fresh_force_enable = true;
711 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
712 return;
714 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
715 allow_self_fresh_force_enable =
716 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
719 /* WA for making DF sleep when idle after resume from S0i3.
720 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
721 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
722 * before calling command table and it changed to 1 after,
723 * it should be set back to 0.
726 /* initialize dcn global */
727 bp->funcs->enable_disp_power_gating(bp,
728 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
730 for (i = 0; i < dc->res_pool->pipe_count; i++) {
731 /* initialize dcn per pipe */
732 bp->funcs->enable_disp_power_gating(bp,
733 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
736 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
737 if (allow_self_fresh_force_enable == false &&
738 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
739 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, true);
743 static void false_optc_underflow_wa(
744 struct dc *dc,
745 const struct dc_stream_state *stream,
746 struct timing_generator *tg)
748 int i;
749 bool underflow;
751 if (!dc->hwseq->wa.false_optc_underflow)
752 return;
754 underflow = tg->funcs->is_optc_underflow_occurred(tg);
756 for (i = 0; i < dc->res_pool->pipe_count; i++) {
757 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
759 if (old_pipe_ctx->stream != stream)
760 continue;
762 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
765 if (tg->funcs->set_blank_data_double_buffer)
766 tg->funcs->set_blank_data_double_buffer(tg, true);
768 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
769 tg->funcs->clear_optc_underflow(tg);
772 enum dc_status dcn10_enable_stream_timing(
773 struct pipe_ctx *pipe_ctx,
774 struct dc_state *context,
775 struct dc *dc)
777 struct dc_stream_state *stream = pipe_ctx->stream;
778 enum dc_color_space color_space;
779 struct tg_color black_color = {0};
781 /* by upper caller loop, pipe0 is parent pipe and be called first.
782 * back end is set up by for pipe0. Other children pipe share back end
783 * with pipe 0. No program is needed.
785 if (pipe_ctx->top_pipe != NULL)
786 return DC_OK;
788 /* TODO check if timing_changed, disable stream if timing changed */
790 /* HW program guide assume display already disable
791 * by unplug sequence. OTG assume stop.
793 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
795 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
796 pipe_ctx->clock_source,
797 &pipe_ctx->stream_res.pix_clk_params,
798 &pipe_ctx->pll_settings)) {
799 BREAK_TO_DEBUGGER();
800 return DC_ERROR_UNEXPECTED;
803 pipe_ctx->stream_res.tg->funcs->program_timing(
804 pipe_ctx->stream_res.tg,
805 &stream->timing,
806 pipe_ctx->pipe_dlg_param.vready_offset,
807 pipe_ctx->pipe_dlg_param.vstartup_start,
808 pipe_ctx->pipe_dlg_param.vupdate_offset,
809 pipe_ctx->pipe_dlg_param.vupdate_width,
810 pipe_ctx->stream->signal,
811 true);
813 #if 0 /* move to after enable_crtc */
814 /* TODO: OPP FMT, ABM. etc. should be done here. */
815 /* or FPGA now. instance 0 only. TODO: move to opp.c */
817 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
819 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
820 pipe_ctx->stream_res.opp,
821 &stream->bit_depth_params,
822 &stream->clamping);
823 #endif
824 /* program otg blank color */
825 color_space = stream->output_color_space;
826 color_space_to_black_color(dc, color_space, &black_color);
828 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
829 pipe_ctx->stream_res.tg->funcs->set_blank_color(
830 pipe_ctx->stream_res.tg,
831 &black_color);
833 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
834 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
835 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
836 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
837 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
840 /* VTG is within DCHUB command block. DCFCLK is always on */
841 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
842 BREAK_TO_DEBUGGER();
843 return DC_ERROR_UNEXPECTED;
846 /* TODO program crtc source select for non-virtual signal*/
847 /* TODO program FMT */
848 /* TODO setup link_enc */
849 /* TODO set stream attributes */
850 /* TODO program audio */
851 /* TODO enable stream if timing changed */
852 /* TODO unblank stream if DP */
854 return DC_OK;
857 static void dcn10_reset_back_end_for_pipe(
858 struct dc *dc,
859 struct pipe_ctx *pipe_ctx,
860 struct dc_state *context)
862 int i;
863 struct dc_link *link;
864 DC_LOGGER_INIT(dc->ctx->logger);
865 if (pipe_ctx->stream_res.stream_enc == NULL) {
866 pipe_ctx->stream = NULL;
867 return;
870 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
871 link = pipe_ctx->stream->link;
872 /* DPMS may already disable or */
873 /* dpms_off status is incorrect due to fastboot
874 * feature. When system resume from S4 with second
875 * screen only, the dpms_off would be true but
876 * VBIOS lit up eDP, so check link status too.
878 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
879 core_link_disable_stream(pipe_ctx);
880 else if (pipe_ctx->stream_res.audio)
881 dc->hwss.disable_audio_stream(pipe_ctx);
883 if (pipe_ctx->stream_res.audio) {
884 /*disable az_endpoint*/
885 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
887 /*free audio*/
888 if (dc->caps.dynamic_audio == true) {
889 /*we have to dynamic arbitrate the audio endpoints*/
890 /*we free the resource, need reset is_audio_acquired*/
891 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
892 pipe_ctx->stream_res.audio, false);
893 pipe_ctx->stream_res.audio = NULL;
898 /* by upper caller loop, parent pipe: pipe0, will be reset last.
899 * back end share by all pipes and will be disable only when disable
900 * parent pipe.
902 if (pipe_ctx->top_pipe == NULL) {
903 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
905 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
906 if (pipe_ctx->stream_res.tg->funcs->set_drr)
907 pipe_ctx->stream_res.tg->funcs->set_drr(
908 pipe_ctx->stream_res.tg, NULL);
911 for (i = 0; i < dc->res_pool->pipe_count; i++)
912 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
913 break;
915 if (i == dc->res_pool->pipe_count)
916 return;
918 pipe_ctx->stream = NULL;
919 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
920 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
923 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
925 struct hubp *hubp ;
926 unsigned int i;
927 bool need_recover = true;
929 if (!dc->debug.recovery_enabled)
930 return false;
932 for (i = 0; i < dc->res_pool->pipe_count; i++) {
933 struct pipe_ctx *pipe_ctx =
934 &dc->current_state->res_ctx.pipe_ctx[i];
935 if (pipe_ctx != NULL) {
936 hubp = pipe_ctx->plane_res.hubp;
937 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
938 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
939 /* one pipe underflow, we will reset all the pipes*/
940 need_recover = true;
945 if (!need_recover)
946 return false;
948 DCHUBP_CNTL:HUBP_BLANK_EN=1
949 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
950 DCHUBP_CNTL:HUBP_DISABLE=1
951 DCHUBP_CNTL:HUBP_DISABLE=0
952 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
953 DCSURF_PRIMARY_SURFACE_ADDRESS
954 DCHUBP_CNTL:HUBP_BLANK_EN=0
957 for (i = 0; i < dc->res_pool->pipe_count; i++) {
958 struct pipe_ctx *pipe_ctx =
959 &dc->current_state->res_ctx.pipe_ctx[i];
960 if (pipe_ctx != NULL) {
961 hubp = pipe_ctx->plane_res.hubp;
962 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
963 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
964 hubp->funcs->set_hubp_blank_en(hubp, true);
967 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
968 hubbub1_soft_reset(dc->res_pool->hubbub, true);
970 for (i = 0; i < dc->res_pool->pipe_count; i++) {
971 struct pipe_ctx *pipe_ctx =
972 &dc->current_state->res_ctx.pipe_ctx[i];
973 if (pipe_ctx != NULL) {
974 hubp = pipe_ctx->plane_res.hubp;
975 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
976 if (hubp != NULL && hubp->funcs->hubp_disable_control)
977 hubp->funcs->hubp_disable_control(hubp, true);
980 for (i = 0; i < dc->res_pool->pipe_count; i++) {
981 struct pipe_ctx *pipe_ctx =
982 &dc->current_state->res_ctx.pipe_ctx[i];
983 if (pipe_ctx != NULL) {
984 hubp = pipe_ctx->plane_res.hubp;
985 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
986 if (hubp != NULL && hubp->funcs->hubp_disable_control)
987 hubp->funcs->hubp_disable_control(hubp, true);
990 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
991 hubbub1_soft_reset(dc->res_pool->hubbub, false);
992 for (i = 0; i < dc->res_pool->pipe_count; i++) {
993 struct pipe_ctx *pipe_ctx =
994 &dc->current_state->res_ctx.pipe_ctx[i];
995 if (pipe_ctx != NULL) {
996 hubp = pipe_ctx->plane_res.hubp;
997 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
998 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
999 hubp->funcs->set_hubp_blank_en(hubp, true);
1002 return true;
1007 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1009 static bool should_log_hw_state; /* prevent hw state log by default */
1011 if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
1012 if (should_log_hw_state) {
1013 dcn10_log_hw_state(dc, NULL);
1015 BREAK_TO_DEBUGGER();
1016 if (dcn10_hw_wa_force_recovery(dc)) {
1017 /*check again*/
1018 if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
1019 BREAK_TO_DEBUGGER();
1024 /* trigger HW to start disconnect plane from stream on the next vsync */
1025 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1027 struct dce_hwseq *hws = dc->hwseq;
1028 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1029 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1030 struct mpc *mpc = dc->res_pool->mpc;
1031 struct mpc_tree *mpc_tree_params;
1032 struct mpcc *mpcc_to_remove = NULL;
1033 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1035 mpc_tree_params = &(opp->mpc_tree_params);
1036 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1038 /*Already reset*/
1039 if (mpcc_to_remove == NULL)
1040 return;
1042 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1043 if (opp != NULL)
1044 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1046 dc->optimized_required = true;
1048 if (hubp->funcs->hubp_disconnect)
1049 hubp->funcs->hubp_disconnect(hubp);
1051 if (dc->debug.sanity_checks)
1052 hws->funcs.verify_allow_pstate_change_high(dc);
1055 void dcn10_plane_atomic_power_down(struct dc *dc,
1056 struct dpp *dpp,
1057 struct hubp *hubp)
1059 struct dce_hwseq *hws = dc->hwseq;
1060 DC_LOGGER_INIT(dc->ctx->logger);
1062 if (REG(DC_IP_REQUEST_CNTL)) {
1063 REG_SET(DC_IP_REQUEST_CNTL, 0,
1064 IP_REQUEST_EN, 1);
1065 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1066 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1067 dpp->funcs->dpp_reset(dpp);
1068 REG_SET(DC_IP_REQUEST_CNTL, 0,
1069 IP_REQUEST_EN, 0);
1070 DC_LOG_DEBUG(
1071 "Power gated front end %d\n", hubp->inst);
1075 /* disable HW used by plane.
1076 * note: cannot disable until disconnect is complete
1078 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1080 struct dce_hwseq *hws = dc->hwseq;
1081 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1082 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1083 int opp_id = hubp->opp_id;
1085 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1087 hubp->funcs->hubp_clk_cntl(hubp, false);
1089 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1091 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1092 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1093 pipe_ctx->stream_res.opp,
1094 false);
1096 hubp->power_gated = true;
1097 dc->optimized_required = false; /* We're powering off, no need to optimize */
1099 hws->funcs.plane_atomic_power_down(dc,
1100 pipe_ctx->plane_res.dpp,
1101 pipe_ctx->plane_res.hubp);
1103 pipe_ctx->stream = NULL;
1104 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1105 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1106 pipe_ctx->top_pipe = NULL;
1107 pipe_ctx->bottom_pipe = NULL;
1108 pipe_ctx->plane_state = NULL;
1111 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1113 struct dce_hwseq *hws = dc->hwseq;
1114 DC_LOGGER_INIT(dc->ctx->logger);
1116 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1117 return;
1119 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1121 apply_DEGVIDCN10_253_wa(dc);
1123 DC_LOG_DC("Power down front end %d\n",
1124 pipe_ctx->pipe_idx);
1127 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1129 int i;
1130 struct dce_hwseq *hws = dc->hwseq;
1131 bool can_apply_seamless_boot = false;
1133 for (i = 0; i < context->stream_count; i++) {
1134 if (context->streams[i]->apply_seamless_boot_optimization) {
1135 can_apply_seamless_boot = true;
1136 break;
1140 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1141 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1142 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1144 /* There is assumption that pipe_ctx is not mapping irregularly
1145 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1146 * we will use the pipe, so don't disable
1148 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1149 continue;
1151 /* Blank controller using driver code instead of
1152 * command table.
1154 if (tg->funcs->is_tg_enabled(tg)) {
1155 if (hws->funcs.init_blank != NULL) {
1156 hws->funcs.init_blank(dc, tg);
1157 tg->funcs->lock(tg);
1158 } else {
1159 tg->funcs->lock(tg);
1160 tg->funcs->set_blank(tg, true);
1161 hwss_wait_for_blank_complete(tg);
1166 /* num_opp will be equal to number of mpcc */
1167 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1168 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1170 /* Cannot reset the MPC mux if seamless boot */
1171 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1172 continue;
1174 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1175 dc->res_pool->mpc, i);
1178 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1179 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1180 struct hubp *hubp = dc->res_pool->hubps[i];
1181 struct dpp *dpp = dc->res_pool->dpps[i];
1182 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1184 /* There is assumption that pipe_ctx is not mapping irregularly
1185 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1186 * we will use the pipe, so don't disable
1188 if (can_apply_seamless_boot &&
1189 pipe_ctx->stream != NULL &&
1190 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1191 pipe_ctx->stream_res.tg)) {
1192 // Enable double buffering for OTG_BLANK no matter if
1193 // seamless boot is enabled or not to suppress global sync
1194 // signals when OTG blanked. This is to prevent pipe from
1195 // requesting data while in PSR.
1196 tg->funcs->tg_init(tg);
1197 continue;
1200 /* Disable on the current state so the new one isn't cleared. */
1201 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1203 dpp->funcs->dpp_reset(dpp);
1205 pipe_ctx->stream_res.tg = tg;
1206 pipe_ctx->pipe_idx = i;
1208 pipe_ctx->plane_res.hubp = hubp;
1209 pipe_ctx->plane_res.dpp = dpp;
1210 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1211 hubp->mpcc_id = dpp->inst;
1212 hubp->opp_id = OPP_ID_INVALID;
1213 hubp->power_gated = false;
1215 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1216 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1217 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1218 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1220 hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1222 if (tg->funcs->is_tg_enabled(tg))
1223 tg->funcs->unlock(tg);
1225 dc->hwss.disable_plane(dc, pipe_ctx);
1227 pipe_ctx->stream_res.tg = NULL;
1228 pipe_ctx->plane_res.hubp = NULL;
1230 tg->funcs->tg_init(tg);
1234 void dcn10_init_hw(struct dc *dc)
1236 int i;
1237 struct abm *abm = dc->res_pool->abm;
1238 struct dmcu *dmcu = dc->res_pool->dmcu;
1239 struct dce_hwseq *hws = dc->hwseq;
1240 struct dc_bios *dcb = dc->ctx->dc_bios;
1241 struct resource_pool *res_pool = dc->res_pool;
1243 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1244 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1246 // Initialize the dccg
1247 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1248 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1250 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1252 REG_WRITE(REFCLK_CNTL, 0);
1253 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1254 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1256 if (!dc->debug.disable_clock_gate) {
1257 /* enable all DCN clock gating */
1258 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1260 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1262 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1265 //Enable ability to power gate / don't force power on permanently
1266 hws->funcs.enable_power_gating_plane(hws, true);
1268 return;
1271 if (!dcb->funcs->is_accelerated_mode(dcb))
1272 hws->funcs.disable_vga(dc->hwseq);
1274 hws->funcs.bios_golden_init(dc);
1275 if (dc->ctx->dc_bios->fw_info_valid) {
1276 res_pool->ref_clocks.xtalin_clock_inKhz =
1277 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1279 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1280 if (res_pool->dccg && res_pool->hubbub) {
1282 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1283 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1284 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1286 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1287 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1288 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1289 } else {
1290 // Not all ASICs have DCCG sw component
1291 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1292 res_pool->ref_clocks.xtalin_clock_inKhz;
1293 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1294 res_pool->ref_clocks.xtalin_clock_inKhz;
1297 } else
1298 ASSERT_CRITICAL(false);
1300 for (i = 0; i < dc->link_count; i++) {
1301 /* Power up AND update implementation according to the
1302 * required signal (which may be different from the
1303 * default signal on connector).
1305 struct dc_link *link = dc->links[i];
1307 link->link_enc->funcs->hw_init(link->link_enc);
1309 /* Check for enabled DIG to identify enabled display */
1310 if (link->link_enc->funcs->is_dig_enabled &&
1311 link->link_enc->funcs->is_dig_enabled(link->link_enc))
1312 link->link_status.link_active = true;
1315 /* Power gate DSCs */
1316 for (i = 0; i < res_pool->res_cap->num_dsc; i++)
1317 if (hws->funcs.dsc_pg_control != NULL)
1318 hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
1320 /* If taking control over from VBIOS, we may want to optimize our first
1321 * mode set, so we need to skip powering down pipes until we know which
1322 * pipes we want to use.
1323 * Otherwise, if taking control is not possible, we need to power
1324 * everything down.
1326 if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
1327 hws->funcs.init_pipes(dc, dc->current_state);
1330 for (i = 0; i < res_pool->audio_count; i++) {
1331 struct audio *audio = res_pool->audios[i];
1333 audio->funcs->hw_init(audio);
1336 if (abm != NULL) {
1337 abm->funcs->init_backlight(abm);
1338 abm->funcs->abm_init(abm);
1341 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1342 dmcu->funcs->dmcu_init(dmcu);
1344 if (abm != NULL && dmcu != NULL)
1345 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1347 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1348 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1350 if (!dc->debug.disable_clock_gate) {
1351 /* enable all DCN clock gating */
1352 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1354 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1356 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1359 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1361 if (dc->clk_mgr->funcs->notify_wm_ranges)
1362 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1366 void dcn10_reset_hw_ctx_wrap(
1367 struct dc *dc,
1368 struct dc_state *context)
1370 int i;
1371 struct dce_hwseq *hws = dc->hwseq;
1373 /* Reset Back End*/
1374 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1375 struct pipe_ctx *pipe_ctx_old =
1376 &dc->current_state->res_ctx.pipe_ctx[i];
1377 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1379 if (!pipe_ctx_old->stream)
1380 continue;
1382 if (pipe_ctx_old->top_pipe)
1383 continue;
1385 if (!pipe_ctx->stream ||
1386 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1387 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1389 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1390 if (hws->funcs.enable_stream_gating)
1391 hws->funcs.enable_stream_gating(dc, pipe_ctx);
1392 if (old_clk)
1393 old_clk->funcs->cs_power_down(old_clk);
1398 static bool patch_address_for_sbs_tb_stereo(
1399 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1401 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1402 bool sec_split = pipe_ctx->top_pipe &&
1403 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1404 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1405 (pipe_ctx->stream->timing.timing_3d_format ==
1406 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1407 pipe_ctx->stream->timing.timing_3d_format ==
1408 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1409 *addr = plane_state->address.grph_stereo.left_addr;
1410 plane_state->address.grph_stereo.left_addr =
1411 plane_state->address.grph_stereo.right_addr;
1412 return true;
1413 } else {
1414 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1415 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1416 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1417 plane_state->address.grph_stereo.right_addr =
1418 plane_state->address.grph_stereo.left_addr;
1421 return false;
1424 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1426 bool addr_patched = false;
1427 PHYSICAL_ADDRESS_LOC addr;
1428 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1430 if (plane_state == NULL)
1431 return;
1433 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1435 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1436 pipe_ctx->plane_res.hubp,
1437 &plane_state->address,
1438 plane_state->flip_immediate);
1440 plane_state->status.requested_address = plane_state->address;
1442 if (plane_state->flip_immediate)
1443 plane_state->status.current_address = plane_state->address;
1445 if (addr_patched)
1446 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1449 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1450 const struct dc_plane_state *plane_state)
1452 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1453 const struct dc_transfer_func *tf = NULL;
1454 bool result = true;
1456 if (dpp_base == NULL)
1457 return false;
1459 if (plane_state->in_transfer_func)
1460 tf = plane_state->in_transfer_func;
1462 if (plane_state->gamma_correction &&
1463 !dpp_base->ctx->dc->debug.always_use_regamma
1464 && !plane_state->gamma_correction->is_identity
1465 && dce_use_lut(plane_state->format))
1466 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1468 if (tf == NULL)
1469 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1470 else if (tf->type == TF_TYPE_PREDEFINED) {
1471 switch (tf->tf) {
1472 case TRANSFER_FUNCTION_SRGB:
1473 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1474 break;
1475 case TRANSFER_FUNCTION_BT709:
1476 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1477 break;
1478 case TRANSFER_FUNCTION_LINEAR:
1479 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1480 break;
1481 case TRANSFER_FUNCTION_PQ:
1482 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1483 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1484 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1485 result = true;
1486 break;
1487 default:
1488 result = false;
1489 break;
1491 } else if (tf->type == TF_TYPE_BYPASS) {
1492 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1493 } else {
1494 cm_helper_translate_curve_to_degamma_hw_format(tf,
1495 &dpp_base->degamma_params);
1496 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1497 &dpp_base->degamma_params);
1498 result = true;
1501 return result;
1504 #define MAX_NUM_HW_POINTS 0x200
1506 static void log_tf(struct dc_context *ctx,
1507 struct dc_transfer_func *tf, uint32_t hw_points_num)
1509 // DC_LOG_GAMMA is default logging of all hw points
1510 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1511 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1512 int i = 0;
1514 DC_LOGGER_INIT(ctx->logger);
1515 DC_LOG_GAMMA("Gamma Correction TF");
1516 DC_LOG_ALL_GAMMA("Logging all tf points...");
1517 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1519 for (i = 0; i < hw_points_num; i++) {
1520 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1521 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1522 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1525 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1526 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1527 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1528 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1532 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1533 const struct dc_stream_state *stream)
1535 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1537 if (dpp == NULL)
1538 return false;
1540 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1542 if (stream->out_transfer_func &&
1543 stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1544 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1545 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1547 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1548 * update.
1550 else if (cm_helper_translate_curve_to_hw_format(
1551 stream->out_transfer_func,
1552 &dpp->regamma_params, false)) {
1553 dpp->funcs->dpp_program_regamma_pwl(
1554 dpp,
1555 &dpp->regamma_params, OPP_REGAMMA_USER);
1556 } else
1557 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1559 if (stream != NULL && stream->ctx != NULL &&
1560 stream->out_transfer_func != NULL) {
1561 log_tf(stream->ctx,
1562 stream->out_transfer_func,
1563 dpp->regamma_params.hw_points_num);
1566 return true;
1569 void dcn10_pipe_control_lock(
1570 struct dc *dc,
1571 struct pipe_ctx *pipe,
1572 bool lock)
1574 struct dce_hwseq *hws = dc->hwseq;
1576 /* use TG master update lock to lock everything on the TG
1577 * therefore only top pipe need to lock
1579 if (pipe->top_pipe)
1580 return;
1582 if (dc->debug.sanity_checks)
1583 hws->funcs.verify_allow_pstate_change_high(dc);
1585 if (lock)
1586 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1587 else
1588 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1590 if (dc->debug.sanity_checks)
1591 hws->funcs.verify_allow_pstate_change_high(dc);
1594 static bool wait_for_reset_trigger_to_occur(
1595 struct dc_context *dc_ctx,
1596 struct timing_generator *tg)
1598 bool rc = false;
1600 /* To avoid endless loop we wait at most
1601 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1602 const uint32_t frames_to_wait_on_triggered_reset = 10;
1603 int i;
1605 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1607 if (!tg->funcs->is_counter_moving(tg)) {
1608 DC_ERROR("TG counter is not moving!\n");
1609 break;
1612 if (tg->funcs->did_triggered_reset_occur(tg)) {
1613 rc = true;
1614 /* usually occurs at i=1 */
1615 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1617 break;
1620 /* Wait for one frame. */
1621 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1622 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1625 if (false == rc)
1626 DC_ERROR("GSL: Timeout on reset trigger!\n");
1628 return rc;
1631 void dcn10_enable_timing_synchronization(
1632 struct dc *dc,
1633 int group_index,
1634 int group_size,
1635 struct pipe_ctx *grouped_pipes[])
1637 struct dc_context *dc_ctx = dc->ctx;
1638 int i;
1640 DC_SYNC_INFO("Setting up OTG reset trigger\n");
1642 for (i = 1; i < group_size; i++)
1643 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
1644 grouped_pipes[i]->stream_res.tg,
1645 grouped_pipes[0]->stream_res.tg->inst);
1647 DC_SYNC_INFO("Waiting for trigger\n");
1649 /* Need to get only check 1 pipe for having reset as all the others are
1650 * synchronized. Look at last pipe programmed to reset.
1653 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
1654 for (i = 1; i < group_size; i++)
1655 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
1656 grouped_pipes[i]->stream_res.tg);
1658 DC_SYNC_INFO("Sync complete\n");
1661 void dcn10_enable_per_frame_crtc_position_reset(
1662 struct dc *dc,
1663 int group_size,
1664 struct pipe_ctx *grouped_pipes[])
1666 struct dc_context *dc_ctx = dc->ctx;
1667 int i;
1669 DC_SYNC_INFO("Setting up\n");
1670 for (i = 0; i < group_size; i++)
1671 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
1672 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
1673 grouped_pipes[i]->stream_res.tg,
1675 &grouped_pipes[i]->stream->triggered_crtc_reset);
1677 DC_SYNC_INFO("Waiting for trigger\n");
1679 for (i = 0; i < group_size; i++)
1680 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
1682 DC_SYNC_INFO("Multi-display sync is complete\n");
1685 /*static void print_rq_dlg_ttu(
1686 struct dc *dc,
1687 struct pipe_ctx *pipe_ctx)
1689 DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1690 "\n============== DML TTU Output parameters [%d] ==============\n"
1691 "qos_level_low_wm: %d, \n"
1692 "qos_level_high_wm: %d, \n"
1693 "min_ttu_vblank: %d, \n"
1694 "qos_level_flip: %d, \n"
1695 "refcyc_per_req_delivery_l: %d, \n"
1696 "qos_level_fixed_l: %d, \n"
1697 "qos_ramp_disable_l: %d, \n"
1698 "refcyc_per_req_delivery_pre_l: %d, \n"
1699 "refcyc_per_req_delivery_c: %d, \n"
1700 "qos_level_fixed_c: %d, \n"
1701 "qos_ramp_disable_c: %d, \n"
1702 "refcyc_per_req_delivery_pre_c: %d\n"
1703 "=============================================================\n",
1704 pipe_ctx->pipe_idx,
1705 pipe_ctx->ttu_regs.qos_level_low_wm,
1706 pipe_ctx->ttu_regs.qos_level_high_wm,
1707 pipe_ctx->ttu_regs.min_ttu_vblank,
1708 pipe_ctx->ttu_regs.qos_level_flip,
1709 pipe_ctx->ttu_regs.refcyc_per_req_delivery_l,
1710 pipe_ctx->ttu_regs.qos_level_fixed_l,
1711 pipe_ctx->ttu_regs.qos_ramp_disable_l,
1712 pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_l,
1713 pipe_ctx->ttu_regs.refcyc_per_req_delivery_c,
1714 pipe_ctx->ttu_regs.qos_level_fixed_c,
1715 pipe_ctx->ttu_regs.qos_ramp_disable_c,
1716 pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
1719 DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1720 "\n============== DML DLG Output parameters [%d] ==============\n"
1721 "refcyc_h_blank_end: %d, \n"
1722 "dlg_vblank_end: %d, \n"
1723 "min_dst_y_next_start: %d, \n"
1724 "refcyc_per_htotal: %d, \n"
1725 "refcyc_x_after_scaler: %d, \n"
1726 "dst_y_after_scaler: %d, \n"
1727 "dst_y_prefetch: %d, \n"
1728 "dst_y_per_vm_vblank: %d, \n"
1729 "dst_y_per_row_vblank: %d, \n"
1730 "ref_freq_to_pix_freq: %d, \n"
1731 "vratio_prefetch: %d, \n"
1732 "refcyc_per_pte_group_vblank_l: %d, \n"
1733 "refcyc_per_meta_chunk_vblank_l: %d, \n"
1734 "dst_y_per_pte_row_nom_l: %d, \n"
1735 "refcyc_per_pte_group_nom_l: %d, \n",
1736 pipe_ctx->pipe_idx,
1737 pipe_ctx->dlg_regs.refcyc_h_blank_end,
1738 pipe_ctx->dlg_regs.dlg_vblank_end,
1739 pipe_ctx->dlg_regs.min_dst_y_next_start,
1740 pipe_ctx->dlg_regs.refcyc_per_htotal,
1741 pipe_ctx->dlg_regs.refcyc_x_after_scaler,
1742 pipe_ctx->dlg_regs.dst_y_after_scaler,
1743 pipe_ctx->dlg_regs.dst_y_prefetch,
1744 pipe_ctx->dlg_regs.dst_y_per_vm_vblank,
1745 pipe_ctx->dlg_regs.dst_y_per_row_vblank,
1746 pipe_ctx->dlg_regs.ref_freq_to_pix_freq,
1747 pipe_ctx->dlg_regs.vratio_prefetch,
1748 pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_l,
1749 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_l,
1750 pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_l,
1751 pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
1754 DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1755 "\ndst_y_per_meta_row_nom_l: %d, \n"
1756 "refcyc_per_meta_chunk_nom_l: %d, \n"
1757 "refcyc_per_line_delivery_pre_l: %d, \n"
1758 "refcyc_per_line_delivery_l: %d, \n"
1759 "vratio_prefetch_c: %d, \n"
1760 "refcyc_per_pte_group_vblank_c: %d, \n"
1761 "refcyc_per_meta_chunk_vblank_c: %d, \n"
1762 "dst_y_per_pte_row_nom_c: %d, \n"
1763 "refcyc_per_pte_group_nom_c: %d, \n"
1764 "dst_y_per_meta_row_nom_c: %d, \n"
1765 "refcyc_per_meta_chunk_nom_c: %d, \n"
1766 "refcyc_per_line_delivery_pre_c: %d, \n"
1767 "refcyc_per_line_delivery_c: %d \n"
1768 "========================================================\n",
1769 pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_l,
1770 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_l,
1771 pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_l,
1772 pipe_ctx->dlg_regs.refcyc_per_line_delivery_l,
1773 pipe_ctx->dlg_regs.vratio_prefetch_c,
1774 pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_c,
1775 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_c,
1776 pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_c,
1777 pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_c,
1778 pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_c,
1779 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_c,
1780 pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_c,
1781 pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
1784 DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1785 "\n============== DML RQ Output parameters [%d] ==============\n"
1786 "chunk_size: %d \n"
1787 "min_chunk_size: %d \n"
1788 "meta_chunk_size: %d \n"
1789 "min_meta_chunk_size: %d \n"
1790 "dpte_group_size: %d \n"
1791 "mpte_group_size: %d \n"
1792 "swath_height: %d \n"
1793 "pte_row_height_linear: %d \n"
1794 "========================================================\n",
1795 pipe_ctx->pipe_idx,
1796 pipe_ctx->rq_regs.rq_regs_l.chunk_size,
1797 pipe_ctx->rq_regs.rq_regs_l.min_chunk_size,
1798 pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size,
1799 pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size,
1800 pipe_ctx->rq_regs.rq_regs_l.dpte_group_size,
1801 pipe_ctx->rq_regs.rq_regs_l.mpte_group_size,
1802 pipe_ctx->rq_regs.rq_regs_l.swath_height,
1803 pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear
1808 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
1809 struct vm_system_aperture_param *apt,
1810 struct dce_hwseq *hws)
1812 PHYSICAL_ADDRESS_LOC physical_page_number;
1813 uint32_t logical_addr_low;
1814 uint32_t logical_addr_high;
1816 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
1817 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
1818 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
1819 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
1821 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1822 LOGICAL_ADDR, &logical_addr_low);
1824 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1825 LOGICAL_ADDR, &logical_addr_high);
1827 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
1828 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
1829 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
1832 /* Temporary read settings, future will get values from kmd directly */
1833 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
1834 struct vm_context0_param *vm0,
1835 struct dce_hwseq *hws)
1837 PHYSICAL_ADDRESS_LOC fb_base;
1838 PHYSICAL_ADDRESS_LOC fb_offset;
1839 uint32_t fb_base_value;
1840 uint32_t fb_offset_value;
1842 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
1843 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
1845 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
1846 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
1847 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
1848 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
1850 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
1851 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
1852 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
1853 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
1855 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
1856 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
1857 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
1858 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
1860 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
1861 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
1862 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
1863 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
1866 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
1867 * Therefore we need to do
1868 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
1869 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
1871 fb_base.quad_part = (uint64_t)fb_base_value << 24;
1872 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
1873 vm0->pte_base.quad_part += fb_base.quad_part;
1874 vm0->pte_base.quad_part -= fb_offset.quad_part;
1878 void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
1880 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
1881 struct vm_system_aperture_param apt = { {{ 0 } } };
1882 struct vm_context0_param vm0 = { { { 0 } } };
1884 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
1885 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
1887 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
1888 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
1891 static void dcn10_enable_plane(
1892 struct dc *dc,
1893 struct pipe_ctx *pipe_ctx,
1894 struct dc_state *context)
1896 struct dce_hwseq *hws = dc->hwseq;
1898 if (dc->debug.sanity_checks) {
1899 hws->funcs.verify_allow_pstate_change_high(dc);
1902 undo_DEGVIDCN10_253_wa(dc);
1904 power_on_plane(dc->hwseq,
1905 pipe_ctx->plane_res.hubp->inst);
1907 /* enable DCFCLK current DCHUB */
1908 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
1910 /* make sure OPP_PIPE_CLOCK_EN = 1 */
1911 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1912 pipe_ctx->stream_res.opp,
1913 true);
1915 /* TODO: enable/disable in dm as per update type.
1916 if (plane_state) {
1917 DC_LOG_DC(dc->ctx->logger,
1918 "Pipe:%d 0x%x: addr hi:0x%x, "
1919 "addr low:0x%x, "
1920 "src: %d, %d, %d,"
1921 " %d; dst: %d, %d, %d, %d;\n",
1922 pipe_ctx->pipe_idx,
1923 plane_state,
1924 plane_state->address.grph.addr.high_part,
1925 plane_state->address.grph.addr.low_part,
1926 plane_state->src_rect.x,
1927 plane_state->src_rect.y,
1928 plane_state->src_rect.width,
1929 plane_state->src_rect.height,
1930 plane_state->dst_rect.x,
1931 plane_state->dst_rect.y,
1932 plane_state->dst_rect.width,
1933 plane_state->dst_rect.height);
1935 DC_LOG_DC(dc->ctx->logger,
1936 "Pipe %d: width, height, x, y format:%d\n"
1937 "viewport:%d, %d, %d, %d\n"
1938 "recout: %d, %d, %d, %d\n",
1939 pipe_ctx->pipe_idx,
1940 plane_state->format,
1941 pipe_ctx->plane_res.scl_data.viewport.width,
1942 pipe_ctx->plane_res.scl_data.viewport.height,
1943 pipe_ctx->plane_res.scl_data.viewport.x,
1944 pipe_ctx->plane_res.scl_data.viewport.y,
1945 pipe_ctx->plane_res.scl_data.recout.width,
1946 pipe_ctx->plane_res.scl_data.recout.height,
1947 pipe_ctx->plane_res.scl_data.recout.x,
1948 pipe_ctx->plane_res.scl_data.recout.y);
1949 print_rq_dlg_ttu(dc, pipe_ctx);
1952 if (dc->config.gpu_vm_support)
1953 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
1955 if (dc->debug.sanity_checks) {
1956 hws->funcs.verify_allow_pstate_change_high(dc);
1960 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
1962 int i = 0;
1963 struct dpp_grph_csc_adjustment adjust;
1964 memset(&adjust, 0, sizeof(adjust));
1965 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
1968 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
1969 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
1970 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
1971 adjust.temperature_matrix[i] =
1972 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
1975 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
1979 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
1981 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
1982 if (pipe_ctx->top_pipe) {
1983 struct pipe_ctx *top = pipe_ctx->top_pipe;
1985 while (top->top_pipe)
1986 top = top->top_pipe; // Traverse to top pipe_ctx
1987 if (top->plane_state && top->plane_state->layer_index == 0)
1988 return true; // Front MPO plane not hidden
1991 return false;
1994 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
1996 // Override rear plane RGB bias to fix MPO brightness
1997 uint16_t rgb_bias = matrix[3];
1999 matrix[3] = 0;
2000 matrix[7] = 0;
2001 matrix[11] = 0;
2002 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2003 matrix[3] = rgb_bias;
2004 matrix[7] = rgb_bias;
2005 matrix[11] = rgb_bias;
2008 void dcn10_program_output_csc(struct dc *dc,
2009 struct pipe_ctx *pipe_ctx,
2010 enum dc_color_space colorspace,
2011 uint16_t *matrix,
2012 int opp_id)
2014 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2015 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2017 /* MPO is broken with RGB colorspaces when OCSC matrix
2018 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2019 * Blending adds offsets from front + rear to rear plane
2021 * Fix is to set RGB bias to 0 on rear plane, top plane
2022 * black value pixels add offset instead of rear + front
2025 int16_t rgb_bias = matrix[3];
2026 // matrix[3/7/11] are all the same offset value
2028 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2029 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2030 } else {
2031 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2034 } else {
2035 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2036 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2040 void dcn10_get_surface_visual_confirm_color(
2041 const struct pipe_ctx *pipe_ctx,
2042 struct tg_color *color)
2044 uint32_t color_value = MAX_TG_COLOR_VALUE;
2046 switch (pipe_ctx->plane_res.scl_data.format) {
2047 case PIXEL_FORMAT_ARGB8888:
2048 /* set boarder color to red */
2049 color->color_r_cr = color_value;
2050 break;
2052 case PIXEL_FORMAT_ARGB2101010:
2053 /* set boarder color to blue */
2054 color->color_b_cb = color_value;
2055 break;
2056 case PIXEL_FORMAT_420BPP8:
2057 /* set boarder color to green */
2058 color->color_g_y = color_value;
2059 break;
2060 case PIXEL_FORMAT_420BPP10:
2061 /* set boarder color to yellow */
2062 color->color_g_y = color_value;
2063 color->color_r_cr = color_value;
2064 break;
2065 case PIXEL_FORMAT_FP16:
2066 /* set boarder color to white */
2067 color->color_r_cr = color_value;
2068 color->color_b_cb = color_value;
2069 color->color_g_y = color_value;
2070 break;
2071 default:
2072 break;
2076 void dcn10_get_hdr_visual_confirm_color(
2077 struct pipe_ctx *pipe_ctx,
2078 struct tg_color *color)
2080 uint32_t color_value = MAX_TG_COLOR_VALUE;
2082 // Determine the overscan color based on the top-most (desktop) plane's context
2083 struct pipe_ctx *top_pipe_ctx = pipe_ctx;
2085 while (top_pipe_ctx->top_pipe != NULL)
2086 top_pipe_ctx = top_pipe_ctx->top_pipe;
2088 switch (top_pipe_ctx->plane_res.scl_data.format) {
2089 case PIXEL_FORMAT_ARGB2101010:
2090 if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2091 /* HDR10, ARGB2101010 - set boarder color to red */
2092 color->color_r_cr = color_value;
2094 break;
2095 case PIXEL_FORMAT_FP16:
2096 if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2097 /* HDR10, FP16 - set boarder color to blue */
2098 color->color_b_cb = color_value;
2099 } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
2100 /* FreeSync 2 HDR - set boarder color to green */
2101 color->color_g_y = color_value;
2103 break;
2104 default:
2105 /* SDR - set boarder color to Gray */
2106 color->color_r_cr = color_value/2;
2107 color->color_b_cb = color_value/2;
2108 color->color_g_y = color_value/2;
2109 break;
2113 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2115 struct dc_bias_and_scale bns_params = {0};
2117 // program the input csc
2118 dpp->funcs->dpp_setup(dpp,
2119 plane_state->format,
2120 EXPANSION_MODE_ZERO,
2121 plane_state->input_csc_color_matrix,
2122 plane_state->color_space,
2123 NULL);
2125 //set scale and bias registers
2126 build_prescale_params(&bns_params, plane_state);
2127 if (dpp->funcs->dpp_program_bias_and_scale)
2128 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2131 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2133 struct dce_hwseq *hws = dc->hwseq;
2134 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2135 struct mpcc_blnd_cfg blnd_cfg = {{0}};
2136 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2137 int mpcc_id;
2138 struct mpcc *new_mpcc;
2139 struct mpc *mpc = dc->res_pool->mpc;
2140 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2142 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
2143 hws->funcs.get_hdr_visual_confirm_color(
2144 pipe_ctx, &blnd_cfg.black_color);
2145 } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
2146 hws->funcs.get_surface_visual_confirm_color(
2147 pipe_ctx, &blnd_cfg.black_color);
2148 } else {
2149 color_space_to_black_color(
2150 dc, pipe_ctx->stream->output_color_space,
2151 &blnd_cfg.black_color);
2154 if (per_pixel_alpha)
2155 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2156 else
2157 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2159 blnd_cfg.overlap_only = false;
2160 blnd_cfg.global_gain = 0xff;
2162 if (pipe_ctx->plane_state->global_alpha)
2163 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2164 else
2165 blnd_cfg.global_alpha = 0xff;
2167 /* DCN1.0 has output CM before MPC which seems to screw with
2168 * pre-multiplied alpha.
2170 blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
2171 pipe_ctx->stream->output_color_space)
2172 && per_pixel_alpha;
2176 * TODO: remove hack
2177 * Note: currently there is a bug in init_hw such that
2178 * on resume from hibernate, BIOS sets up MPCC0, and
2179 * we do mpcc_remove but the mpcc cannot go to idle
2180 * after remove. This cause us to pick mpcc1 here,
2181 * which causes a pstate hang for yet unknown reason.
2183 mpcc_id = hubp->inst;
2185 /* If there is no full update, don't need to touch MPC tree*/
2186 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2187 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2188 return;
2191 /* check if this MPCC is already being used */
2192 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2193 /* remove MPCC if being used */
2194 if (new_mpcc != NULL)
2195 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2196 else
2197 if (dc->debug.sanity_checks)
2198 mpc->funcs->assert_mpcc_idle_before_connect(
2199 dc->res_pool->mpc, mpcc_id);
2201 /* Call MPC to insert new plane */
2202 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2203 mpc_tree_params,
2204 &blnd_cfg,
2205 NULL,
2206 NULL,
2207 hubp->inst,
2208 mpcc_id);
2210 ASSERT(new_mpcc != NULL);
2212 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2213 hubp->mpcc_id = mpcc_id;
2216 static void update_scaler(struct pipe_ctx *pipe_ctx)
2218 bool per_pixel_alpha =
2219 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2221 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2222 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
2223 /* scaler configuration */
2224 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2225 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2228 static void dcn10_update_dchubp_dpp(
2229 struct dc *dc,
2230 struct pipe_ctx *pipe_ctx,
2231 struct dc_state *context)
2233 struct dce_hwseq *hws = dc->hwseq;
2234 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2235 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2236 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2237 struct plane_size size = plane_state->plane_size;
2238 unsigned int compat_level = 0;
2240 /* depends on DML calculation, DPP clock value may change dynamically */
2241 /* If request max dpp clk is lower than current dispclk, no need to
2242 * divided by 2
2244 if (plane_state->update_flags.bits.full_update) {
2245 bool should_divided_by_2 = context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2246 dc->clk_mgr->clks.dispclk_khz / 2;
2248 dpp->funcs->dpp_dppclk_control(
2249 dpp,
2250 should_divided_by_2,
2251 true);
2253 if (dc->res_pool->dccg)
2254 dc->res_pool->dccg->funcs->update_dpp_dto(
2255 dc->res_pool->dccg,
2256 dpp->inst,
2257 pipe_ctx->plane_res.bw.dppclk_khz);
2258 else
2259 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2260 dc->clk_mgr->clks.dispclk_khz / 2 :
2261 dc->clk_mgr->clks.dispclk_khz;
2264 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2265 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2266 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2268 if (plane_state->update_flags.bits.full_update) {
2269 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2271 hubp->funcs->hubp_setup(
2272 hubp,
2273 &pipe_ctx->dlg_regs,
2274 &pipe_ctx->ttu_regs,
2275 &pipe_ctx->rq_regs,
2276 &pipe_ctx->pipe_dlg_param);
2277 hubp->funcs->hubp_setup_interdependent(
2278 hubp,
2279 &pipe_ctx->dlg_regs,
2280 &pipe_ctx->ttu_regs);
2283 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2285 if (plane_state->update_flags.bits.full_update ||
2286 plane_state->update_flags.bits.bpp_change)
2287 dcn10_update_dpp(dpp, plane_state);
2289 if (plane_state->update_flags.bits.full_update ||
2290 plane_state->update_flags.bits.per_pixel_alpha_change ||
2291 plane_state->update_flags.bits.global_alpha_change)
2292 hws->funcs.update_mpcc(dc, pipe_ctx);
2294 if (plane_state->update_flags.bits.full_update ||
2295 plane_state->update_flags.bits.per_pixel_alpha_change ||
2296 plane_state->update_flags.bits.global_alpha_change ||
2297 plane_state->update_flags.bits.scaling_change ||
2298 plane_state->update_flags.bits.position_change) {
2299 update_scaler(pipe_ctx);
2302 if (plane_state->update_flags.bits.full_update ||
2303 plane_state->update_flags.bits.scaling_change ||
2304 plane_state->update_flags.bits.position_change) {
2305 hubp->funcs->mem_program_viewport(
2306 hubp,
2307 &pipe_ctx->plane_res.scl_data.viewport,
2308 &pipe_ctx->plane_res.scl_data.viewport_c);
2311 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2312 dc->hwss.set_cursor_position(pipe_ctx);
2313 dc->hwss.set_cursor_attribute(pipe_ctx);
2315 if (dc->hwss.set_cursor_sdr_white_level)
2316 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2319 if (plane_state->update_flags.bits.full_update) {
2320 /*gamut remap*/
2321 dc->hwss.program_gamut_remap(pipe_ctx);
2323 dc->hwss.program_output_csc(dc,
2324 pipe_ctx,
2325 pipe_ctx->stream->output_color_space,
2326 pipe_ctx->stream->csc_color_matrix.matrix,
2327 pipe_ctx->stream_res.opp->inst);
2330 if (plane_state->update_flags.bits.full_update ||
2331 plane_state->update_flags.bits.pixel_format_change ||
2332 plane_state->update_flags.bits.horizontal_mirror_change ||
2333 plane_state->update_flags.bits.rotation_change ||
2334 plane_state->update_flags.bits.swizzle_change ||
2335 plane_state->update_flags.bits.dcc_change ||
2336 plane_state->update_flags.bits.bpp_change ||
2337 plane_state->update_flags.bits.scaling_change ||
2338 plane_state->update_flags.bits.plane_size_change) {
2339 hubp->funcs->hubp_program_surface_config(
2340 hubp,
2341 plane_state->format,
2342 &plane_state->tiling_info,
2343 &size,
2344 plane_state->rotation,
2345 &plane_state->dcc,
2346 plane_state->horizontal_mirror,
2347 compat_level);
2350 hubp->power_gated = false;
2352 hws->funcs.update_plane_addr(dc, pipe_ctx);
2354 if (is_pipe_tree_visible(pipe_ctx))
2355 hubp->funcs->set_blank(hubp, false);
2358 void dcn10_blank_pixel_data(
2359 struct dc *dc,
2360 struct pipe_ctx *pipe_ctx,
2361 bool blank)
2363 enum dc_color_space color_space;
2364 struct tg_color black_color = {0};
2365 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2366 struct dc_stream_state *stream = pipe_ctx->stream;
2368 /* program otg blank color */
2369 color_space = stream->output_color_space;
2370 color_space_to_black_color(dc, color_space, &black_color);
2373 * The way 420 is packed, 2 channels carry Y component, 1 channel
2374 * alternate between Cb and Cr, so both channels need the pixel
2375 * value for Y
2377 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2378 black_color.color_r_cr = black_color.color_g_y;
2381 if (stream_res->tg->funcs->set_blank_color)
2382 stream_res->tg->funcs->set_blank_color(
2383 stream_res->tg,
2384 &black_color);
2386 if (!blank) {
2387 if (stream_res->tg->funcs->set_blank)
2388 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2389 if (stream_res->abm) {
2390 stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
2391 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2393 } else if (blank) {
2394 if (stream_res->abm)
2395 stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
2396 if (stream_res->tg->funcs->set_blank)
2397 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2401 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2403 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2404 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2405 struct custom_float_format fmt;
2407 fmt.exponenta_bits = 6;
2408 fmt.mantissa_bits = 12;
2409 fmt.sign = true;
2412 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2413 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2415 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2416 pipe_ctx->plane_res.dpp, hw_mult);
2419 void dcn10_program_pipe(
2420 struct dc *dc,
2421 struct pipe_ctx *pipe_ctx,
2422 struct dc_state *context)
2424 struct dce_hwseq *hws = dc->hwseq;
2426 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2427 dcn10_enable_plane(dc, pipe_ctx, context);
2429 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2431 hws->funcs.set_hdr_multiplier(pipe_ctx);
2433 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2434 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2435 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2436 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2438 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2439 * only do gamma programming for full update.
2440 * TODO: This can be further optimized/cleaned up
2441 * Always call this for now since it does memcmp inside before
2442 * doing heavy calculation and programming
2444 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2445 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2448 static void dcn10_program_all_pipe_in_tree(
2449 struct dc *dc,
2450 struct pipe_ctx *pipe_ctx,
2451 struct dc_state *context)
2453 struct dce_hwseq *hws = dc->hwseq;
2455 if (pipe_ctx->top_pipe == NULL) {
2456 bool blank = !is_pipe_tree_visible(pipe_ctx);
2458 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2459 pipe_ctx->stream_res.tg,
2460 pipe_ctx->pipe_dlg_param.vready_offset,
2461 pipe_ctx->pipe_dlg_param.vstartup_start,
2462 pipe_ctx->pipe_dlg_param.vupdate_offset,
2463 pipe_ctx->pipe_dlg_param.vupdate_width);
2465 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2466 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
2468 if (hws->funcs.setup_vupdate_interrupt)
2469 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2471 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2474 if (pipe_ctx->plane_state != NULL)
2475 hws->funcs.program_pipe(dc, pipe_ctx, context);
2477 if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
2478 dcn10_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
2481 static struct pipe_ctx *dcn10_find_top_pipe_for_stream(
2482 struct dc *dc,
2483 struct dc_state *context,
2484 const struct dc_stream_state *stream)
2486 int i;
2488 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2489 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2490 struct pipe_ctx *old_pipe_ctx =
2491 &dc->current_state->res_ctx.pipe_ctx[i];
2493 if (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state)
2494 continue;
2496 if (pipe_ctx->stream != stream)
2497 continue;
2499 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
2500 return pipe_ctx;
2502 return NULL;
2505 void dcn10_apply_ctx_for_surface(
2506 struct dc *dc,
2507 const struct dc_stream_state *stream,
2508 int num_planes,
2509 struct dc_state *context)
2511 struct dce_hwseq *hws = dc->hwseq;
2512 int i;
2513 struct timing_generator *tg;
2514 uint32_t underflow_check_delay_us;
2515 bool removed_pipe[4] = { false };
2516 bool interdependent_update = false;
2517 struct pipe_ctx *top_pipe_to_program =
2518 dcn10_find_top_pipe_for_stream(dc, context, stream);
2519 DC_LOGGER_INIT(dc->ctx->logger);
2521 if (!top_pipe_to_program)
2522 return;
2524 tg = top_pipe_to_program->stream_res.tg;
2526 interdependent_update = top_pipe_to_program->plane_state &&
2527 top_pipe_to_program->plane_state->update_flags.bits.full_update;
2529 underflow_check_delay_us = dc->debug.underflow_assert_delay_us;
2531 if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
2532 ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
2534 if (interdependent_update)
2535 dcn10_lock_all_pipes(dc, context, true);
2536 else
2537 dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
2539 if (underflow_check_delay_us != 0xFFFFFFFF)
2540 udelay(underflow_check_delay_us);
2542 if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
2543 ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
2545 if (num_planes == 0) {
2546 /* OTG blank before remove all front end */
2547 hws->funcs.blank_pixel_data(dc, top_pipe_to_program, true);
2550 /* Disconnect unused mpcc */
2551 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2552 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2553 struct pipe_ctx *old_pipe_ctx =
2554 &dc->current_state->res_ctx.pipe_ctx[i];
2556 * Powergate reused pipes that are not powergated
2557 * fairly hacky right now, using opp_id as indicator
2558 * TODO: After move dc_post to dc_update, this will
2559 * be removed.
2561 if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
2562 if (old_pipe_ctx->stream_res.tg == tg &&
2563 old_pipe_ctx->plane_res.hubp &&
2564 old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
2565 dc->hwss.disable_plane(dc, old_pipe_ctx);
2568 if ((!pipe_ctx->plane_state ||
2569 pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
2570 old_pipe_ctx->plane_state &&
2571 old_pipe_ctx->stream_res.tg == tg) {
2573 hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);
2574 removed_pipe[i] = true;
2576 DC_LOG_DC("Reset mpcc for pipe %d\n",
2577 old_pipe_ctx->pipe_idx);
2581 if (num_planes > 0)
2582 dcn10_program_all_pipe_in_tree(dc, top_pipe_to_program, context);
2584 /* Program secondary blending tree and writeback pipes */
2585 if ((stream->num_wb_info > 0) && (hws->funcs.program_all_writeback_pipes_in_tree))
2586 hws->funcs.program_all_writeback_pipes_in_tree(dc, stream, context);
2587 if (interdependent_update)
2588 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2589 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2590 /* Skip inactive pipes and ones already updated */
2591 if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
2592 !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
2593 continue;
2595 pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
2596 pipe_ctx->plane_res.hubp,
2597 &pipe_ctx->dlg_regs,
2598 &pipe_ctx->ttu_regs);
2601 if (interdependent_update)
2602 dcn10_lock_all_pipes(dc, context, false);
2603 else
2604 dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
2606 if (num_planes == 0)
2607 false_optc_underflow_wa(dc, stream, tg);
2609 for (i = 0; i < dc->res_pool->pipe_count; i++)
2610 if (removed_pipe[i])
2611 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2613 for (i = 0; i < dc->res_pool->pipe_count; i++)
2614 if (removed_pipe[i]) {
2615 dc->hwss.optimize_bandwidth(dc, context);
2616 break;
2619 if (dc->hwseq->wa.DEGVIDCN10_254)
2620 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
2623 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
2625 uint8_t i;
2627 for (i = 0; i < context->stream_count; i++) {
2628 if (context->streams[i]->timing.timing_3d_format
2629 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
2631 * Disable stutter
2633 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
2634 break;
2639 void dcn10_prepare_bandwidth(
2640 struct dc *dc,
2641 struct dc_state *context)
2643 struct dce_hwseq *hws = dc->hwseq;
2644 struct hubbub *hubbub = dc->res_pool->hubbub;
2646 if (dc->debug.sanity_checks)
2647 hws->funcs.verify_allow_pstate_change_high(dc);
2649 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2650 if (context->stream_count == 0)
2651 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2653 dc->clk_mgr->funcs->update_clocks(
2654 dc->clk_mgr,
2655 context,
2656 false);
2659 hubbub->funcs->program_watermarks(hubbub,
2660 &context->bw_ctx.bw.dcn.watermarks,
2661 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2662 true);
2663 dcn10_stereo_hw_frame_pack_wa(dc, context);
2665 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2666 dcn_bw_notify_pplib_of_wm_ranges(dc);
2668 if (dc->debug.sanity_checks)
2669 hws->funcs.verify_allow_pstate_change_high(dc);
2672 void dcn10_optimize_bandwidth(
2673 struct dc *dc,
2674 struct dc_state *context)
2676 struct dce_hwseq *hws = dc->hwseq;
2677 struct hubbub *hubbub = dc->res_pool->hubbub;
2679 if (dc->debug.sanity_checks)
2680 hws->funcs.verify_allow_pstate_change_high(dc);
2682 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2683 if (context->stream_count == 0)
2684 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2686 dc->clk_mgr->funcs->update_clocks(
2687 dc->clk_mgr,
2688 context,
2689 true);
2692 hubbub->funcs->program_watermarks(hubbub,
2693 &context->bw_ctx.bw.dcn.watermarks,
2694 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2695 true);
2696 dcn10_stereo_hw_frame_pack_wa(dc, context);
2698 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2699 dcn_bw_notify_pplib_of_wm_ranges(dc);
2701 if (dc->debug.sanity_checks)
2702 hws->funcs.verify_allow_pstate_change_high(dc);
2705 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
2706 int num_pipes, unsigned int vmin, unsigned int vmax,
2707 unsigned int vmid, unsigned int vmid_frame_number)
2709 int i = 0;
2710 struct drr_params params = {0};
2711 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
2712 unsigned int event_triggers = 0x800;
2713 // Note DRR trigger events are generated regardless of whether num frames met.
2714 unsigned int num_frames = 2;
2716 params.vertical_total_max = vmax;
2717 params.vertical_total_min = vmin;
2718 params.vertical_total_mid = vmid;
2719 params.vertical_total_mid_frame_num = vmid_frame_number;
2721 /* TODO: If multiple pipes are to be supported, you need
2722 * some GSL stuff. Static screen triggers may be programmed differently
2723 * as well.
2725 for (i = 0; i < num_pipes; i++) {
2726 pipe_ctx[i]->stream_res.tg->funcs->set_drr(
2727 pipe_ctx[i]->stream_res.tg, &params);
2728 if (vmax != 0 && vmin != 0)
2729 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
2730 pipe_ctx[i]->stream_res.tg,
2731 event_triggers, num_frames);
2735 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
2736 int num_pipes,
2737 struct crtc_position *position)
2739 int i = 0;
2741 /* TODO: handle pipes > 1
2743 for (i = 0; i < num_pipes; i++)
2744 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
2747 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
2748 int num_pipes, const struct dc_static_screen_params *params)
2750 unsigned int i;
2751 unsigned int triggers = 0;
2753 if (params->triggers.surface_update)
2754 triggers |= 0x80;
2755 if (params->triggers.cursor_update)
2756 triggers |= 0x2;
2757 if (params->triggers.force_trigger)
2758 triggers |= 0x1;
2760 for (i = 0; i < num_pipes; i++)
2761 pipe_ctx[i]->stream_res.tg->funcs->
2762 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
2763 triggers, params->num_frames);
2766 static void dcn10_config_stereo_parameters(
2767 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
2769 enum view_3d_format view_format = stream->view_format;
2770 enum dc_timing_3d_format timing_3d_format =\
2771 stream->timing.timing_3d_format;
2772 bool non_stereo_timing = false;
2774 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
2775 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
2776 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
2777 non_stereo_timing = true;
2779 if (non_stereo_timing == false &&
2780 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
2782 flags->PROGRAM_STEREO = 1;
2783 flags->PROGRAM_POLARITY = 1;
2784 if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
2785 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
2786 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
2787 enum display_dongle_type dongle = \
2788 stream->link->ddc->dongle_type;
2789 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
2790 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
2791 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
2792 flags->DISABLE_STEREO_DP_SYNC = 1;
2794 flags->RIGHT_EYE_POLARITY =\
2795 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
2796 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
2797 flags->FRAME_PACKED = 1;
2800 return;
2803 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
2805 struct crtc_stereo_flags flags = { 0 };
2806 struct dc_stream_state *stream = pipe_ctx->stream;
2808 dcn10_config_stereo_parameters(stream, &flags);
2810 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
2811 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
2812 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
2813 } else {
2814 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
2817 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
2818 pipe_ctx->stream_res.opp,
2819 flags.PROGRAM_STEREO == 1 ? true:false,
2820 &stream->timing);
2822 pipe_ctx->stream_res.tg->funcs->program_stereo(
2823 pipe_ctx->stream_res.tg,
2824 &stream->timing,
2825 &flags);
2827 return;
2830 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
2832 int i;
2834 for (i = 0; i < res_pool->pipe_count; i++) {
2835 if (res_pool->hubps[i]->inst == mpcc_inst)
2836 return res_pool->hubps[i];
2838 ASSERT(false);
2839 return NULL;
2842 void dcn10_wait_for_mpcc_disconnect(
2843 struct dc *dc,
2844 struct resource_pool *res_pool,
2845 struct pipe_ctx *pipe_ctx)
2847 struct dce_hwseq *hws = dc->hwseq;
2848 int mpcc_inst;
2850 if (dc->debug.sanity_checks) {
2851 hws->funcs.verify_allow_pstate_change_high(dc);
2854 if (!pipe_ctx->stream_res.opp)
2855 return;
2857 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
2858 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
2859 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
2861 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
2862 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
2863 hubp->funcs->set_blank(hubp, true);
2867 if (dc->debug.sanity_checks) {
2868 hws->funcs.verify_allow_pstate_change_high(dc);
2873 bool dcn10_dummy_display_power_gating(
2874 struct dc *dc,
2875 uint8_t controller_id,
2876 struct dc_bios *dcb,
2877 enum pipe_gating_control power_gating)
2879 return true;
2882 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
2884 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2885 struct timing_generator *tg = pipe_ctx->stream_res.tg;
2886 bool flip_pending;
2888 if (plane_state == NULL)
2889 return;
2891 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
2892 pipe_ctx->plane_res.hubp);
2894 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
2896 if (!flip_pending)
2897 plane_state->status.current_address = plane_state->status.requested_address;
2899 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
2900 tg->funcs->is_stereo_left_eye) {
2901 plane_state->status.is_right_eye =
2902 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
2906 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
2908 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
2910 /* In DCN, this programming sequence is owned by the hubbub */
2911 hubbub->funcs->update_dchub(hubbub, dh_data);
2914 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
2916 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
2917 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2918 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2919 struct dc_cursor_mi_param param = {
2920 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
2921 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
2922 .viewport = pipe_ctx->plane_res.scl_data.viewport,
2923 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
2924 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
2925 .rotation = pipe_ctx->plane_state->rotation,
2926 .mirror = pipe_ctx->plane_state->horizontal_mirror
2928 bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
2929 (pipe_ctx->bottom_pipe != NULL);
2931 int x_plane = pipe_ctx->plane_state->dst_rect.x;
2932 int y_plane = pipe_ctx->plane_state->dst_rect.y;
2933 int x_pos = pos_cpy.x;
2934 int y_pos = pos_cpy.y;
2936 // translate cursor from stream space to plane space
2937 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
2938 pipe_ctx->plane_state->dst_rect.width;
2939 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
2940 pipe_ctx->plane_state->dst_rect.height;
2942 if (x_pos < 0) {
2943 pos_cpy.x_hotspot -= x_pos;
2944 x_pos = 0;
2947 if (y_pos < 0) {
2948 pos_cpy.y_hotspot -= y_pos;
2949 y_pos = 0;
2952 pos_cpy.x = (uint32_t)x_pos;
2953 pos_cpy.y = (uint32_t)y_pos;
2955 if (pipe_ctx->plane_state->address.type
2956 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
2957 pos_cpy.enable = false;
2959 // Swap axis and mirror horizontally
2960 if (param.rotation == ROTATION_ANGLE_90) {
2961 uint32_t temp_x = pos_cpy.x;
2963 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
2964 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
2965 pos_cpy.y = temp_x;
2967 // Swap axis and mirror vertically
2968 else if (param.rotation == ROTATION_ANGLE_270) {
2969 uint32_t temp_y = pos_cpy.y;
2970 int viewport_height =
2971 pipe_ctx->plane_res.scl_data.viewport.height;
2973 if (pipe_split_on) {
2974 if (pos_cpy.x > viewport_height) {
2975 pos_cpy.x = pos_cpy.x - viewport_height;
2976 pos_cpy.y = viewport_height - pos_cpy.x;
2977 } else {
2978 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
2980 } else
2981 pos_cpy.y = viewport_height - pos_cpy.x;
2982 pos_cpy.x = temp_y;
2984 // Mirror horizontally and vertically
2985 else if (param.rotation == ROTATION_ANGLE_180) {
2986 int viewport_width =
2987 pipe_ctx->plane_res.scl_data.viewport.width;
2988 int viewport_x =
2989 pipe_ctx->plane_res.scl_data.viewport.x;
2991 if (pipe_split_on) {
2992 if (pos_cpy.x >= viewport_width + viewport_x) {
2993 pos_cpy.x = 2 * viewport_width
2994 - pos_cpy.x + 2 * viewport_x;
2995 } else {
2996 uint32_t temp_x = pos_cpy.x;
2998 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
2999 if (temp_x >= viewport_x +
3000 (int)hubp->curs_attr.width || pos_cpy.x
3001 <= (int)hubp->curs_attr.width +
3002 pipe_ctx->plane_state->src_rect.x) {
3003 pos_cpy.x = temp_x + viewport_width;
3006 } else {
3007 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3009 pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3012 hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3013 dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3016 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3018 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3020 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3021 pipe_ctx->plane_res.hubp, attributes);
3022 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3023 pipe_ctx->plane_res.dpp, attributes);
3026 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3028 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3029 struct fixed31_32 multiplier;
3030 struct dpp_cursor_attributes opt_attr = { 0 };
3031 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3032 struct custom_float_format fmt;
3034 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3035 return;
3037 fmt.exponenta_bits = 5;
3038 fmt.mantissa_bits = 10;
3039 fmt.sign = true;
3041 if (sdr_white_level > 80) {
3042 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3043 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3046 opt_attr.scale = hw_scale;
3047 opt_attr.bias = 0;
3049 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3050 pipe_ctx->plane_res.dpp, &opt_attr);
3054 * apply_front_porch_workaround TODO FPGA still need?
3056 * This is a workaround for a bug that has existed since R5xx and has not been
3057 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3059 static void apply_front_porch_workaround(
3060 struct dc_crtc_timing *timing)
3062 if (timing->flags.INTERLACE == 1) {
3063 if (timing->v_front_porch < 2)
3064 timing->v_front_porch = 2;
3065 } else {
3066 if (timing->v_front_porch < 1)
3067 timing->v_front_porch = 1;
3071 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3073 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3074 struct dc_crtc_timing patched_crtc_timing;
3075 int vesa_sync_start;
3076 int asic_blank_end;
3077 int interlace_factor;
3078 int vertical_line_start;
3080 patched_crtc_timing = *dc_crtc_timing;
3081 apply_front_porch_workaround(&patched_crtc_timing);
3083 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3085 vesa_sync_start = patched_crtc_timing.v_addressable +
3086 patched_crtc_timing.v_border_bottom +
3087 patched_crtc_timing.v_front_porch;
3089 asic_blank_end = (patched_crtc_timing.v_total -
3090 vesa_sync_start -
3091 patched_crtc_timing.v_border_top)
3092 * interlace_factor;
3094 vertical_line_start = asic_blank_end -
3095 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3097 return vertical_line_start;
3100 static void dcn10_calc_vupdate_position(
3101 struct dc *dc,
3102 struct pipe_ctx *pipe_ctx,
3103 uint32_t *start_line,
3104 uint32_t *end_line)
3106 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3107 int vline_int_offset_from_vupdate =
3108 pipe_ctx->stream->periodic_interrupt0.lines_offset;
3109 int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3110 int start_position;
3112 if (vline_int_offset_from_vupdate > 0)
3113 vline_int_offset_from_vupdate--;
3114 else if (vline_int_offset_from_vupdate < 0)
3115 vline_int_offset_from_vupdate++;
3117 start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3119 if (start_position >= 0)
3120 *start_line = start_position;
3121 else
3122 *start_line = dc_crtc_timing->v_total + start_position - 1;
3124 *end_line = *start_line + 2;
3126 if (*end_line >= dc_crtc_timing->v_total)
3127 *end_line = 2;
3130 static void dcn10_cal_vline_position(
3131 struct dc *dc,
3132 struct pipe_ctx *pipe_ctx,
3133 enum vline_select vline,
3134 uint32_t *start_line,
3135 uint32_t *end_line)
3137 enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
3139 if (vline == VLINE0)
3140 ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
3141 else if (vline == VLINE1)
3142 ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
3144 switch (ref_point) {
3145 case START_V_UPDATE:
3146 dcn10_calc_vupdate_position(
3148 pipe_ctx,
3149 start_line,
3150 end_line);
3151 break;
3152 case START_V_SYNC:
3153 // Suppose to do nothing because vsync is 0;
3154 break;
3155 default:
3156 ASSERT(0);
3157 break;
3161 void dcn10_setup_periodic_interrupt(
3162 struct dc *dc,
3163 struct pipe_ctx *pipe_ctx,
3164 enum vline_select vline)
3166 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3168 if (vline == VLINE0) {
3169 uint32_t start_line = 0;
3170 uint32_t end_line = 0;
3172 dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
3174 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3176 } else if (vline == VLINE1) {
3177 pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3179 pipe_ctx->stream->periodic_interrupt1.lines_offset);
3183 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3185 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3186 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3188 if (start_line < 0) {
3189 ASSERT(0);
3190 start_line = 0;
3193 if (tg->funcs->setup_vertical_interrupt2)
3194 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3197 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3198 struct dc_link_settings *link_settings)
3200 struct encoder_unblank_param params = { { 0 } };
3201 struct dc_stream_state *stream = pipe_ctx->stream;
3202 struct dc_link *link = stream->link;
3203 struct dce_hwseq *hws = link->dc->hwseq;
3205 /* only 3 items below are used by unblank */
3206 params.timing = pipe_ctx->stream->timing;
3208 params.link_settings.link_rate = link_settings->link_rate;
3210 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3211 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3212 params.timing.pix_clk_100hz /= 2;
3213 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
3216 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3217 hws->funcs.edp_backlight_control(link, true);
3221 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3222 const uint8_t *custom_sdp_message,
3223 unsigned int sdp_message_size)
3225 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3226 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3227 pipe_ctx->stream_res.stream_enc,
3228 custom_sdp_message,
3229 sdp_message_size);
3232 enum dc_status dcn10_set_clock(struct dc *dc,
3233 enum dc_clock_type clock_type,
3234 uint32_t clk_khz,
3235 uint32_t stepping)
3237 struct dc_state *context = dc->current_state;
3238 struct dc_clock_config clock_cfg = {0};
3239 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3241 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3242 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3243 context, clock_type, &clock_cfg);
3245 if (!dc->clk_mgr->funcs->get_clock)
3246 return DC_FAIL_UNSUPPORTED_1;
3248 if (clk_khz > clock_cfg.max_clock_khz)
3249 return DC_FAIL_CLK_EXCEED_MAX;
3251 if (clk_khz < clock_cfg.min_clock_khz)
3252 return DC_FAIL_CLK_BELOW_MIN;
3254 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3255 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3257 /*update internal request clock for update clock use*/
3258 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3259 current_clocks->dispclk_khz = clk_khz;
3260 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3261 current_clocks->dppclk_khz = clk_khz;
3262 else
3263 return DC_ERROR_UNEXPECTED;
3265 if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
3266 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3267 context, true);
3268 return DC_OK;
3272 void dcn10_get_clock(struct dc *dc,
3273 enum dc_clock_type clock_type,
3274 struct dc_clock_config *clock_cfg)
3276 struct dc_state *context = dc->current_state;
3278 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3279 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);