2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
51 #include "link_hwss.h"
52 #include "dpcd_defs.h"
54 #include "dce/dmub_hw_lock_mgr.h"
57 #define DC_LOGGER_INIT(logger)
65 #define FN(reg_name, field_name) \
66 hws->shifts->field_name, hws->masks->field_name
68 /*print is 17 wide, first two characters are spaces*/
69 #define DTN_INFO_MICRO_SEC(ref_cycle) \
70 print_microsec(dc_ctx, log_ctx, ref_cycle)
72 #define GAMMA_HW_POINTS_NUM 256
74 void print_microsec(struct dc_context
*dc_ctx
,
75 struct dc_log_buffer_ctx
*log_ctx
,
78 const uint32_t ref_clk_mhz
= dc_ctx
->dc
->res_pool
->ref_clocks
.dchub_ref_clock_inKhz
/ 1000;
79 static const unsigned int frac
= 1000;
80 uint32_t us_x10
= (ref_cycle
* frac
) / ref_clk_mhz
;
82 DTN_INFO(" %11d.%03d",
87 void dcn10_lock_all_pipes(struct dc
*dc
,
88 struct dc_state
*context
,
91 struct pipe_ctx
*pipe_ctx
;
92 struct timing_generator
*tg
;
95 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
96 pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
97 tg
= pipe_ctx
->stream_res
.tg
;
100 * Only lock the top pipe's tg to prevent redundant
101 * (un)locking. Also skip if pipe is disabled.
103 if (pipe_ctx
->top_pipe
||
104 !pipe_ctx
->stream
|| !pipe_ctx
->plane_state
||
105 !tg
->funcs
->is_tg_enabled(tg
))
109 dc
->hwss
.pipe_control_lock(dc
, pipe_ctx
, true);
111 dc
->hwss
.pipe_control_lock(dc
, pipe_ctx
, false);
115 static void log_mpc_crc(struct dc
*dc
,
116 struct dc_log_buffer_ctx
*log_ctx
)
118 struct dc_context
*dc_ctx
= dc
->ctx
;
119 struct dce_hwseq
*hws
= dc
->hwseq
;
121 if (REG(MPC_CRC_RESULT_GB
))
122 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
123 REG_READ(MPC_CRC_RESULT_GB
), REG_READ(MPC_CRC_RESULT_C
), REG_READ(MPC_CRC_RESULT_AR
));
124 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A
))
125 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
126 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A
), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G
));
129 void dcn10_log_hubbub_state(struct dc
*dc
, struct dc_log_buffer_ctx
*log_ctx
)
131 struct dc_context
*dc_ctx
= dc
->ctx
;
132 struct dcn_hubbub_wm wm
;
135 memset(&wm
, 0, sizeof(struct dcn_hubbub_wm
));
136 dc
->res_pool
->hubbub
->funcs
->wm_read_state(dc
->res_pool
->hubbub
, &wm
);
138 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
139 " sr_enter sr_exit dram_clk_change\n");
141 for (i
= 0; i
< 4; i
++) {
142 struct dcn_hubbub_wm_set
*s
;
145 DTN_INFO("WM_Set[%d]:", s
->wm_set
);
146 DTN_INFO_MICRO_SEC(s
->data_urgent
);
147 DTN_INFO_MICRO_SEC(s
->pte_meta_urgent
);
148 DTN_INFO_MICRO_SEC(s
->sr_enter
);
149 DTN_INFO_MICRO_SEC(s
->sr_exit
);
150 DTN_INFO_MICRO_SEC(s
->dram_clk_chanage
);
157 static void dcn10_log_hubp_states(struct dc
*dc
, void *log_ctx
)
159 struct dc_context
*dc_ctx
= dc
->ctx
;
160 struct resource_pool
*pool
= dc
->res_pool
;
164 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
165 for (i
= 0; i
< pool
->pipe_count
; i
++) {
166 struct hubp
*hubp
= pool
->hubps
[i
];
167 struct dcn_hubp_state
*s
= &(TO_DCN10_HUBP(hubp
)->state
);
169 hubp
->funcs
->hubp_read_state(hubp
);
172 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
185 s
->underflow_status
);
186 DTN_INFO_MICRO_SEC(s
->min_ttu_vblank
);
187 DTN_INFO_MICRO_SEC(s
->qos_level_low_wm
);
188 DTN_INFO_MICRO_SEC(s
->qos_level_high_wm
);
193 DTN_INFO("\n=========RQ========\n");
194 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
195 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
196 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
197 for (i
= 0; i
< pool
->pipe_count
; i
++) {
198 struct dcn_hubp_state
*s
= &(TO_DCN10_HUBP(pool
->hubps
[i
])->state
);
199 struct _vcs_dpi_display_rq_regs_st
*rq_regs
= &s
->rq_regs
;
202 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
203 pool
->hubps
[i
]->inst
, rq_regs
->drq_expansion_mode
, rq_regs
->prq_expansion_mode
, rq_regs
->mrq_expansion_mode
,
204 rq_regs
->crq_expansion_mode
, rq_regs
->plane1_base_address
, rq_regs
->rq_regs_l
.chunk_size
,
205 rq_regs
->rq_regs_l
.min_chunk_size
, rq_regs
->rq_regs_l
.meta_chunk_size
,
206 rq_regs
->rq_regs_l
.min_meta_chunk_size
, rq_regs
->rq_regs_l
.dpte_group_size
,
207 rq_regs
->rq_regs_l
.mpte_group_size
, rq_regs
->rq_regs_l
.swath_height
,
208 rq_regs
->rq_regs_l
.pte_row_height_linear
, rq_regs
->rq_regs_c
.chunk_size
, rq_regs
->rq_regs_c
.min_chunk_size
,
209 rq_regs
->rq_regs_c
.meta_chunk_size
, rq_regs
->rq_regs_c
.min_meta_chunk_size
,
210 rq_regs
->rq_regs_c
.dpte_group_size
, rq_regs
->rq_regs_c
.mpte_group_size
,
211 rq_regs
->rq_regs_c
.swath_height
, rq_regs
->rq_regs_c
.pte_row_height_linear
);
214 DTN_INFO("========DLG========\n");
215 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
216 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
217 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
218 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
219 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
220 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
221 " x_rp_dlay x_rr_sfl\n");
222 for (i
= 0; i
< pool
->pipe_count
; i
++) {
223 struct dcn_hubp_state
*s
= &(TO_DCN10_HUBP(pool
->hubps
[i
])->state
);
224 struct _vcs_dpi_display_dlg_regs_st
*dlg_regs
= &s
->dlg_attr
;
227 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
228 "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
229 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
230 pool
->hubps
[i
]->inst
, dlg_regs
->refcyc_h_blank_end
, dlg_regs
->dlg_vblank_end
, dlg_regs
->min_dst_y_next_start
,
231 dlg_regs
->refcyc_per_htotal
, dlg_regs
->refcyc_x_after_scaler
, dlg_regs
->dst_y_after_scaler
,
232 dlg_regs
->dst_y_prefetch
, dlg_regs
->dst_y_per_vm_vblank
, dlg_regs
->dst_y_per_row_vblank
,
233 dlg_regs
->dst_y_per_vm_flip
, dlg_regs
->dst_y_per_row_flip
, dlg_regs
->ref_freq_to_pix_freq
,
234 dlg_regs
->vratio_prefetch
, dlg_regs
->vratio_prefetch_c
, dlg_regs
->refcyc_per_pte_group_vblank_l
,
235 dlg_regs
->refcyc_per_pte_group_vblank_c
, dlg_regs
->refcyc_per_meta_chunk_vblank_l
,
236 dlg_regs
->refcyc_per_meta_chunk_vblank_c
, dlg_regs
->refcyc_per_pte_group_flip_l
,
237 dlg_regs
->refcyc_per_pte_group_flip_c
, dlg_regs
->refcyc_per_meta_chunk_flip_l
,
238 dlg_regs
->refcyc_per_meta_chunk_flip_c
, dlg_regs
->dst_y_per_pte_row_nom_l
,
239 dlg_regs
->dst_y_per_pte_row_nom_c
, dlg_regs
->refcyc_per_pte_group_nom_l
,
240 dlg_regs
->refcyc_per_pte_group_nom_c
, dlg_regs
->dst_y_per_meta_row_nom_l
,
241 dlg_regs
->dst_y_per_meta_row_nom_c
, dlg_regs
->refcyc_per_meta_chunk_nom_l
,
242 dlg_regs
->refcyc_per_meta_chunk_nom_c
, dlg_regs
->refcyc_per_line_delivery_pre_l
,
243 dlg_regs
->refcyc_per_line_delivery_pre_c
, dlg_regs
->refcyc_per_line_delivery_l
,
244 dlg_regs
->refcyc_per_line_delivery_c
, dlg_regs
->chunk_hdl_adjust_cur0
, dlg_regs
->dst_y_offset_cur1
,
245 dlg_regs
->chunk_hdl_adjust_cur1
, dlg_regs
->vready_after_vcount0
, dlg_regs
->dst_y_delta_drq_limit
,
246 dlg_regs
->xfc_reg_transfer_delay
, dlg_regs
->xfc_reg_precharge_delay
,
247 dlg_regs
->xfc_reg_remote_surface_flip_latency
);
250 DTN_INFO("========TTU========\n");
251 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
252 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
253 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
254 for (i
= 0; i
< pool
->pipe_count
; i
++) {
255 struct dcn_hubp_state
*s
= &(TO_DCN10_HUBP(pool
->hubps
[i
])->state
);
256 struct _vcs_dpi_display_ttu_regs_st
*ttu_regs
= &s
->ttu_attr
;
259 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
260 pool
->hubps
[i
]->inst
, ttu_regs
->qos_level_low_wm
, ttu_regs
->qos_level_high_wm
, ttu_regs
->min_ttu_vblank
,
261 ttu_regs
->qos_level_flip
, ttu_regs
->refcyc_per_req_delivery_pre_l
, ttu_regs
->refcyc_per_req_delivery_l
,
262 ttu_regs
->refcyc_per_req_delivery_pre_c
, ttu_regs
->refcyc_per_req_delivery_c
, ttu_regs
->refcyc_per_req_delivery_cur0
,
263 ttu_regs
->refcyc_per_req_delivery_pre_cur0
, ttu_regs
->refcyc_per_req_delivery_cur1
,
264 ttu_regs
->refcyc_per_req_delivery_pre_cur1
, ttu_regs
->qos_level_fixed_l
, ttu_regs
->qos_ramp_disable_l
,
265 ttu_regs
->qos_level_fixed_c
, ttu_regs
->qos_ramp_disable_c
, ttu_regs
->qos_level_fixed_cur0
,
266 ttu_regs
->qos_ramp_disable_cur0
, ttu_regs
->qos_level_fixed_cur1
, ttu_regs
->qos_ramp_disable_cur1
);
271 void dcn10_log_hw_state(struct dc
*dc
,
272 struct dc_log_buffer_ctx
*log_ctx
)
274 struct dc_context
*dc_ctx
= dc
->ctx
;
275 struct resource_pool
*pool
= dc
->res_pool
;
280 dcn10_log_hubbub_state(dc
, log_ctx
);
282 dcn10_log_hubp_states(dc
, log_ctx
);
284 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
285 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
286 "C31 C32 C33 C34\n");
287 for (i
= 0; i
< pool
->pipe_count
; i
++) {
288 struct dpp
*dpp
= pool
->dpps
[i
];
289 struct dcn_dpp_state s
= {0};
291 dpp
->funcs
->dpp_read_state(dpp
, &s
);
296 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
297 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
300 (s
.igam_lut_mode
== 0) ? "BypassFixed" :
301 ((s
.igam_lut_mode
== 1) ? "BypassFloat" :
302 ((s
.igam_lut_mode
== 2) ? "RAM" :
303 ((s
.igam_lut_mode
== 3) ? "RAM" :
305 (s
.dgam_lut_mode
== 0) ? "Bypass" :
306 ((s
.dgam_lut_mode
== 1) ? "sRGB" :
307 ((s
.dgam_lut_mode
== 2) ? "Ycc" :
308 ((s
.dgam_lut_mode
== 3) ? "RAM" :
309 ((s
.dgam_lut_mode
== 4) ? "RAM" :
311 (s
.rgam_lut_mode
== 0) ? "Bypass" :
312 ((s
.rgam_lut_mode
== 1) ? "sRGB" :
313 ((s
.rgam_lut_mode
== 2) ? "Ycc" :
314 ((s
.rgam_lut_mode
== 3) ? "RAM" :
315 ((s
.rgam_lut_mode
== 4) ? "RAM" :
318 s
.gamut_remap_c11_c12
,
319 s
.gamut_remap_c13_c14
,
320 s
.gamut_remap_c21_c22
,
321 s
.gamut_remap_c23_c24
,
322 s
.gamut_remap_c31_c32
,
323 s
.gamut_remap_c33_c34
);
328 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
329 for (i
= 0; i
< pool
->pipe_count
; i
++) {
330 struct mpcc_state s
= {0};
332 pool
->mpc
->funcs
->read_mpcc_state(pool
->mpc
, i
, &s
);
334 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
335 i
, s
.opp_id
, s
.dpp_id
, s
.bot_mpcc_id
,
336 s
.mode
, s
.alpha_mode
, s
.pre_multiplied_alpha
, s
.overlap_only
,
341 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
343 for (i
= 0; i
< pool
->timing_generator_count
; i
++) {
344 struct timing_generator
*tg
= pool
->timing_generators
[i
];
345 struct dcn_otg_state s
= {0};
346 /* Read shared OTG state registers for all DCNx */
347 optc1_read_otg_state(DCN10TG_FROM_TG(tg
), &s
);
350 * For DCN2 and greater, a register on the OPP is used to
351 * determine if the CRTC is blanked instead of the OTG. So use
352 * dpg_is_blanked() if exists, otherwise fallback on otg.
354 * TODO: Implement DCN-specific read_otg_state hooks.
356 if (pool
->opps
[i
]->funcs
->dpg_is_blanked
)
357 s
.blank_enabled
= pool
->opps
[i
]->funcs
->dpg_is_blanked(pool
->opps
[i
]);
359 s
.blank_enabled
= tg
->funcs
->is_blanked(tg
);
361 //only print if OTG master is enabled
362 if ((s
.otg_enabled
& 1) == 0)
365 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
383 s
.underflow_occurred_status
,
386 // Clear underflow for debug purposes
387 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
388 // This function is called only from Windows or Diags test environment, hence it's safe to clear
389 // it from here without affecting the original intent.
390 tg
->funcs
->clear_optc_underflow(tg
);
394 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
395 // TODO: Update golden log header to reflect this name change
396 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
397 for (i
= 0; i
< pool
->res_cap
->num_dsc
; i
++) {
398 struct display_stream_compressor
*dsc
= pool
->dscs
[i
];
399 struct dcn_dsc_state s
= {0};
401 dsc
->funcs
->dsc_read_state(dsc
, &s
);
402 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
406 s
.dsc_bits_per_pixel
);
411 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
412 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
413 for (i
= 0; i
< pool
->stream_enc_count
; i
++) {
414 struct stream_encoder
*enc
= pool
->stream_enc
[i
];
415 struct enc_state s
= {0};
417 if (enc
->funcs
->enc_read_state
) {
418 enc
->funcs
->enc_read_state(enc
, &s
);
419 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
422 s
.sec_gsp_pps_line_num
,
423 s
.vbid6_line_reference
,
425 s
.sec_gsp_pps_enable
,
426 s
.sec_stream_enable
);
432 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
433 for (i
= 0; i
< dc
->link_count
; i
++) {
434 struct link_encoder
*lenc
= dc
->links
[i
]->link_enc
;
436 struct link_enc_state s
= {0};
438 if (lenc
->funcs
->read_state
) {
439 lenc
->funcs
->read_state(lenc
, &s
);
440 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
443 s
.dphy_fec_ready_shadow
,
444 s
.dphy_fec_active_status
,
445 s
.dp_link_training_complete
);
451 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
452 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
453 dc
->current_state
->bw_ctx
.bw
.dcn
.clk
.dcfclk_khz
,
454 dc
->current_state
->bw_ctx
.bw
.dcn
.clk
.dcfclk_deep_sleep_khz
,
455 dc
->current_state
->bw_ctx
.bw
.dcn
.clk
.dispclk_khz
,
456 dc
->current_state
->bw_ctx
.bw
.dcn
.clk
.dppclk_khz
,
457 dc
->current_state
->bw_ctx
.bw
.dcn
.clk
.max_supported_dppclk_khz
,
458 dc
->current_state
->bw_ctx
.bw
.dcn
.clk
.fclk_khz
,
459 dc
->current_state
->bw_ctx
.bw
.dcn
.clk
.socclk_khz
);
461 log_mpc_crc(dc
, log_ctx
);
466 bool dcn10_did_underflow_occur(struct dc
*dc
, struct pipe_ctx
*pipe_ctx
)
468 struct hubp
*hubp
= pipe_ctx
->plane_res
.hubp
;
469 struct timing_generator
*tg
= pipe_ctx
->stream_res
.tg
;
471 if (tg
->funcs
->is_optc_underflow_occurred(tg
)) {
472 tg
->funcs
->clear_optc_underflow(tg
);
476 if (hubp
->funcs
->hubp_get_underflow_status(hubp
)) {
477 hubp
->funcs
->hubp_clear_underflow(hubp
);
483 void dcn10_enable_power_gating_plane(
484 struct dce_hwseq
*hws
,
487 bool force_on
= true; /* disable power gating */
493 REG_UPDATE(DOMAIN0_PG_CONFIG
, DOMAIN0_POWER_FORCEON
, force_on
);
494 REG_UPDATE(DOMAIN2_PG_CONFIG
, DOMAIN2_POWER_FORCEON
, force_on
);
495 REG_UPDATE(DOMAIN4_PG_CONFIG
, DOMAIN4_POWER_FORCEON
, force_on
);
496 REG_UPDATE(DOMAIN6_PG_CONFIG
, DOMAIN6_POWER_FORCEON
, force_on
);
499 REG_UPDATE(DOMAIN1_PG_CONFIG
, DOMAIN1_POWER_FORCEON
, force_on
);
500 REG_UPDATE(DOMAIN3_PG_CONFIG
, DOMAIN3_POWER_FORCEON
, force_on
);
501 REG_UPDATE(DOMAIN5_PG_CONFIG
, DOMAIN5_POWER_FORCEON
, force_on
);
502 REG_UPDATE(DOMAIN7_PG_CONFIG
, DOMAIN7_POWER_FORCEON
, force_on
);
505 void dcn10_disable_vga(
506 struct dce_hwseq
*hws
)
508 unsigned int in_vga1_mode
= 0;
509 unsigned int in_vga2_mode
= 0;
510 unsigned int in_vga3_mode
= 0;
511 unsigned int in_vga4_mode
= 0;
513 REG_GET(D1VGA_CONTROL
, D1VGA_MODE_ENABLE
, &in_vga1_mode
);
514 REG_GET(D2VGA_CONTROL
, D2VGA_MODE_ENABLE
, &in_vga2_mode
);
515 REG_GET(D3VGA_CONTROL
, D3VGA_MODE_ENABLE
, &in_vga3_mode
);
516 REG_GET(D4VGA_CONTROL
, D4VGA_MODE_ENABLE
, &in_vga4_mode
);
518 if (in_vga1_mode
== 0 && in_vga2_mode
== 0 &&
519 in_vga3_mode
== 0 && in_vga4_mode
== 0)
522 REG_WRITE(D1VGA_CONTROL
, 0);
523 REG_WRITE(D2VGA_CONTROL
, 0);
524 REG_WRITE(D3VGA_CONTROL
, 0);
525 REG_WRITE(D4VGA_CONTROL
, 0);
527 /* HW Engineer's Notes:
528 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
529 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
531 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
532 * VGA_TEST_ENABLE, to leave it in the same state as before.
534 REG_UPDATE(VGA_TEST_CONTROL
, VGA_TEST_ENABLE
, 1);
535 REG_UPDATE(VGA_TEST_CONTROL
, VGA_TEST_RENDER_START
, 1);
538 void dcn10_dpp_pg_control(
539 struct dce_hwseq
*hws
,
540 unsigned int dpp_inst
,
543 uint32_t power_gate
= power_on
? 0 : 1;
544 uint32_t pwr_status
= power_on
? 0 : 2;
546 if (hws
->ctx
->dc
->debug
.disable_dpp_power_gate
)
548 if (REG(DOMAIN1_PG_CONFIG
) == 0)
553 REG_UPDATE(DOMAIN1_PG_CONFIG
,
554 DOMAIN1_POWER_GATE
, power_gate
);
556 REG_WAIT(DOMAIN1_PG_STATUS
,
557 DOMAIN1_PGFSM_PWR_STATUS
, pwr_status
,
561 REG_UPDATE(DOMAIN3_PG_CONFIG
,
562 DOMAIN3_POWER_GATE
, power_gate
);
564 REG_WAIT(DOMAIN3_PG_STATUS
,
565 DOMAIN3_PGFSM_PWR_STATUS
, pwr_status
,
569 REG_UPDATE(DOMAIN5_PG_CONFIG
,
570 DOMAIN5_POWER_GATE
, power_gate
);
572 REG_WAIT(DOMAIN5_PG_STATUS
,
573 DOMAIN5_PGFSM_PWR_STATUS
, pwr_status
,
577 REG_UPDATE(DOMAIN7_PG_CONFIG
,
578 DOMAIN7_POWER_GATE
, power_gate
);
580 REG_WAIT(DOMAIN7_PG_STATUS
,
581 DOMAIN7_PGFSM_PWR_STATUS
, pwr_status
,
590 void dcn10_hubp_pg_control(
591 struct dce_hwseq
*hws
,
592 unsigned int hubp_inst
,
595 uint32_t power_gate
= power_on
? 0 : 1;
596 uint32_t pwr_status
= power_on
? 0 : 2;
598 if (hws
->ctx
->dc
->debug
.disable_hubp_power_gate
)
600 if (REG(DOMAIN0_PG_CONFIG
) == 0)
604 case 0: /* DCHUBP0 */
605 REG_UPDATE(DOMAIN0_PG_CONFIG
,
606 DOMAIN0_POWER_GATE
, power_gate
);
608 REG_WAIT(DOMAIN0_PG_STATUS
,
609 DOMAIN0_PGFSM_PWR_STATUS
, pwr_status
,
612 case 1: /* DCHUBP1 */
613 REG_UPDATE(DOMAIN2_PG_CONFIG
,
614 DOMAIN2_POWER_GATE
, power_gate
);
616 REG_WAIT(DOMAIN2_PG_STATUS
,
617 DOMAIN2_PGFSM_PWR_STATUS
, pwr_status
,
620 case 2: /* DCHUBP2 */
621 REG_UPDATE(DOMAIN4_PG_CONFIG
,
622 DOMAIN4_POWER_GATE
, power_gate
);
624 REG_WAIT(DOMAIN4_PG_STATUS
,
625 DOMAIN4_PGFSM_PWR_STATUS
, pwr_status
,
628 case 3: /* DCHUBP3 */
629 REG_UPDATE(DOMAIN6_PG_CONFIG
,
630 DOMAIN6_POWER_GATE
, power_gate
);
632 REG_WAIT(DOMAIN6_PG_STATUS
,
633 DOMAIN6_PGFSM_PWR_STATUS
, pwr_status
,
642 static void power_on_plane(
643 struct dce_hwseq
*hws
,
646 DC_LOGGER_INIT(hws
->ctx
->logger
);
647 if (REG(DC_IP_REQUEST_CNTL
)) {
648 REG_SET(DC_IP_REQUEST_CNTL
, 0,
650 hws
->funcs
.dpp_pg_control(hws
, plane_id
, true);
651 hws
->funcs
.hubp_pg_control(hws
, plane_id
, true);
652 REG_SET(DC_IP_REQUEST_CNTL
, 0,
655 "Un-gated front end for pipe %d\n", plane_id
);
659 static void undo_DEGVIDCN10_253_wa(struct dc
*dc
)
661 struct dce_hwseq
*hws
= dc
->hwseq
;
662 struct hubp
*hubp
= dc
->res_pool
->hubps
[0];
664 if (!hws
->wa_state
.DEGVIDCN10_253_applied
)
667 hubp
->funcs
->set_blank(hubp
, true);
669 REG_SET(DC_IP_REQUEST_CNTL
, 0,
672 hws
->funcs
.hubp_pg_control(hws
, 0, false);
673 REG_SET(DC_IP_REQUEST_CNTL
, 0,
676 hws
->wa_state
.DEGVIDCN10_253_applied
= false;
679 static void apply_DEGVIDCN10_253_wa(struct dc
*dc
)
681 struct dce_hwseq
*hws
= dc
->hwseq
;
682 struct hubp
*hubp
= dc
->res_pool
->hubps
[0];
685 if (dc
->debug
.disable_stutter
)
688 if (!hws
->wa
.DEGVIDCN10_253
)
691 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
692 if (!dc
->res_pool
->hubps
[i
]->power_gated
)
696 /* all pipe power gated, apply work around to enable stutter. */
698 REG_SET(DC_IP_REQUEST_CNTL
, 0,
701 hws
->funcs
.hubp_pg_control(hws
, 0, true);
702 REG_SET(DC_IP_REQUEST_CNTL
, 0,
705 hubp
->funcs
->set_hubp_blank_en(hubp
, false);
706 hws
->wa_state
.DEGVIDCN10_253_applied
= true;
709 void dcn10_bios_golden_init(struct dc
*dc
)
711 struct dce_hwseq
*hws
= dc
->hwseq
;
712 struct dc_bios
*bp
= dc
->ctx
->dc_bios
;
714 bool allow_self_fresh_force_enable
= true;
716 if (hws
->funcs
.s0i3_golden_init_wa
&& hws
->funcs
.s0i3_golden_init_wa(dc
))
719 if (dc
->res_pool
->hubbub
->funcs
->is_allow_self_refresh_enabled
)
720 allow_self_fresh_force_enable
=
721 dc
->res_pool
->hubbub
->funcs
->is_allow_self_refresh_enabled(dc
->res_pool
->hubbub
);
724 /* WA for making DF sleep when idle after resume from S0i3.
725 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
726 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
727 * before calling command table and it changed to 1 after,
728 * it should be set back to 0.
731 /* initialize dcn global */
732 bp
->funcs
->enable_disp_power_gating(bp
,
733 CONTROLLER_ID_D0
, ASIC_PIPE_INIT
);
735 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
736 /* initialize dcn per pipe */
737 bp
->funcs
->enable_disp_power_gating(bp
,
738 CONTROLLER_ID_D0
+ i
, ASIC_PIPE_DISABLE
);
741 if (dc
->res_pool
->hubbub
->funcs
->allow_self_refresh_control
)
742 if (allow_self_fresh_force_enable
== false &&
743 dc
->res_pool
->hubbub
->funcs
->is_allow_self_refresh_enabled(dc
->res_pool
->hubbub
))
744 dc
->res_pool
->hubbub
->funcs
->allow_self_refresh_control(dc
->res_pool
->hubbub
,
745 !dc
->res_pool
->hubbub
->ctx
->dc
->debug
.disable_stutter
);
749 static void false_optc_underflow_wa(
751 const struct dc_stream_state
*stream
,
752 struct timing_generator
*tg
)
757 if (!dc
->hwseq
->wa
.false_optc_underflow
)
760 underflow
= tg
->funcs
->is_optc_underflow_occurred(tg
);
762 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
763 struct pipe_ctx
*old_pipe_ctx
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
765 if (old_pipe_ctx
->stream
!= stream
)
768 dc
->hwss
.wait_for_mpcc_disconnect(dc
, dc
->res_pool
, old_pipe_ctx
);
771 if (tg
->funcs
->set_blank_data_double_buffer
)
772 tg
->funcs
->set_blank_data_double_buffer(tg
, true);
774 if (tg
->funcs
->is_optc_underflow_occurred(tg
) && !underflow
)
775 tg
->funcs
->clear_optc_underflow(tg
);
778 enum dc_status
dcn10_enable_stream_timing(
779 struct pipe_ctx
*pipe_ctx
,
780 struct dc_state
*context
,
783 struct dc_stream_state
*stream
= pipe_ctx
->stream
;
784 enum dc_color_space color_space
;
785 struct tg_color black_color
= {0};
787 /* by upper caller loop, pipe0 is parent pipe and be called first.
788 * back end is set up by for pipe0. Other children pipe share back end
789 * with pipe 0. No program is needed.
791 if (pipe_ctx
->top_pipe
!= NULL
)
794 /* TODO check if timing_changed, disable stream if timing changed */
796 /* HW program guide assume display already disable
797 * by unplug sequence. OTG assume stop.
799 pipe_ctx
->stream_res
.tg
->funcs
->enable_optc_clock(pipe_ctx
->stream_res
.tg
, true);
801 if (false == pipe_ctx
->clock_source
->funcs
->program_pix_clk(
802 pipe_ctx
->clock_source
,
803 &pipe_ctx
->stream_res
.pix_clk_params
,
804 &pipe_ctx
->pll_settings
)) {
806 return DC_ERROR_UNEXPECTED
;
809 pipe_ctx
->stream_res
.tg
->funcs
->program_timing(
810 pipe_ctx
->stream_res
.tg
,
812 pipe_ctx
->pipe_dlg_param
.vready_offset
,
813 pipe_ctx
->pipe_dlg_param
.vstartup_start
,
814 pipe_ctx
->pipe_dlg_param
.vupdate_offset
,
815 pipe_ctx
->pipe_dlg_param
.vupdate_width
,
816 pipe_ctx
->stream
->signal
,
819 #if 0 /* move to after enable_crtc */
820 /* TODO: OPP FMT, ABM. etc. should be done here. */
821 /* or FPGA now. instance 0 only. TODO: move to opp.c */
823 inst_offset
= reg_offsets
[pipe_ctx
->stream_res
.tg
->inst
].fmt
;
825 pipe_ctx
->stream_res
.opp
->funcs
->opp_program_fmt(
826 pipe_ctx
->stream_res
.opp
,
827 &stream
->bit_depth_params
,
830 /* program otg blank color */
831 color_space
= stream
->output_color_space
;
832 color_space_to_black_color(dc
, color_space
, &black_color
);
835 * The way 420 is packed, 2 channels carry Y component, 1 channel
836 * alternate between Cb and Cr, so both channels need the pixel
839 if (stream
->timing
.pixel_encoding
== PIXEL_ENCODING_YCBCR420
)
840 black_color
.color_r_cr
= black_color
.color_g_y
;
842 if (pipe_ctx
->stream_res
.tg
->funcs
->set_blank_color
)
843 pipe_ctx
->stream_res
.tg
->funcs
->set_blank_color(
844 pipe_ctx
->stream_res
.tg
,
847 if (pipe_ctx
->stream_res
.tg
->funcs
->is_blanked
&&
848 !pipe_ctx
->stream_res
.tg
->funcs
->is_blanked(pipe_ctx
->stream_res
.tg
)) {
849 pipe_ctx
->stream_res
.tg
->funcs
->set_blank(pipe_ctx
->stream_res
.tg
, true);
850 hwss_wait_for_blank_complete(pipe_ctx
->stream_res
.tg
);
851 false_optc_underflow_wa(dc
, pipe_ctx
->stream
, pipe_ctx
->stream_res
.tg
);
854 /* VTG is within DCHUB command block. DCFCLK is always on */
855 if (false == pipe_ctx
->stream_res
.tg
->funcs
->enable_crtc(pipe_ctx
->stream_res
.tg
)) {
857 return DC_ERROR_UNEXPECTED
;
860 /* TODO program crtc source select for non-virtual signal*/
861 /* TODO program FMT */
862 /* TODO setup link_enc */
863 /* TODO set stream attributes */
864 /* TODO program audio */
865 /* TODO enable stream if timing changed */
866 /* TODO unblank stream if DP */
871 static void dcn10_reset_back_end_for_pipe(
873 struct pipe_ctx
*pipe_ctx
,
874 struct dc_state
*context
)
877 struct dc_link
*link
;
878 DC_LOGGER_INIT(dc
->ctx
->logger
);
879 if (pipe_ctx
->stream_res
.stream_enc
== NULL
) {
880 pipe_ctx
->stream
= NULL
;
884 if (!IS_FPGA_MAXIMUS_DC(dc
->ctx
->dce_environment
)) {
885 link
= pipe_ctx
->stream
->link
;
886 /* DPMS may already disable or */
887 /* dpms_off status is incorrect due to fastboot
888 * feature. When system resume from S4 with second
889 * screen only, the dpms_off would be true but
890 * VBIOS lit up eDP, so check link status too.
892 if (!pipe_ctx
->stream
->dpms_off
|| link
->link_status
.link_active
)
893 core_link_disable_stream(pipe_ctx
);
894 else if (pipe_ctx
->stream_res
.audio
)
895 dc
->hwss
.disable_audio_stream(pipe_ctx
);
897 if (pipe_ctx
->stream_res
.audio
) {
898 /*disable az_endpoint*/
899 pipe_ctx
->stream_res
.audio
->funcs
->az_disable(pipe_ctx
->stream_res
.audio
);
902 if (dc
->caps
.dynamic_audio
== true) {
903 /*we have to dynamic arbitrate the audio endpoints*/
904 /*we free the resource, need reset is_audio_acquired*/
905 update_audio_usage(&dc
->current_state
->res_ctx
, dc
->res_pool
,
906 pipe_ctx
->stream_res
.audio
, false);
907 pipe_ctx
->stream_res
.audio
= NULL
;
912 /* by upper caller loop, parent pipe: pipe0, will be reset last.
913 * back end share by all pipes and will be disable only when disable
916 if (pipe_ctx
->top_pipe
== NULL
) {
918 if (pipe_ctx
->stream_res
.abm
)
919 dc
->hwss
.set_abm_immediate_disable(pipe_ctx
);
921 pipe_ctx
->stream_res
.tg
->funcs
->disable_crtc(pipe_ctx
->stream_res
.tg
);
923 pipe_ctx
->stream_res
.tg
->funcs
->enable_optc_clock(pipe_ctx
->stream_res
.tg
, false);
924 if (pipe_ctx
->stream_res
.tg
->funcs
->set_drr
)
925 pipe_ctx
->stream_res
.tg
->funcs
->set_drr(
926 pipe_ctx
->stream_res
.tg
, NULL
);
929 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++)
930 if (&dc
->current_state
->res_ctx
.pipe_ctx
[i
] == pipe_ctx
)
933 if (i
== dc
->res_pool
->pipe_count
)
936 pipe_ctx
->stream
= NULL
;
937 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
938 pipe_ctx
->pipe_idx
, pipe_ctx
->stream_res
.tg
->inst
);
941 static bool dcn10_hw_wa_force_recovery(struct dc
*dc
)
945 bool need_recover
= true;
947 if (!dc
->debug
.recovery_enabled
)
950 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
951 struct pipe_ctx
*pipe_ctx
=
952 &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
953 if (pipe_ctx
!= NULL
) {
954 hubp
= pipe_ctx
->plane_res
.hubp
;
955 if (hubp
!= NULL
&& hubp
->funcs
->hubp_get_underflow_status
) {
956 if (hubp
->funcs
->hubp_get_underflow_status(hubp
) != 0) {
957 /* one pipe underflow, we will reset all the pipes*/
966 DCHUBP_CNTL:HUBP_BLANK_EN=1
967 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
968 DCHUBP_CNTL:HUBP_DISABLE=1
969 DCHUBP_CNTL:HUBP_DISABLE=0
970 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
971 DCSURF_PRIMARY_SURFACE_ADDRESS
972 DCHUBP_CNTL:HUBP_BLANK_EN=0
975 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
976 struct pipe_ctx
*pipe_ctx
=
977 &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
978 if (pipe_ctx
!= NULL
) {
979 hubp
= pipe_ctx
->plane_res
.hubp
;
980 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
981 if (hubp
!= NULL
&& hubp
->funcs
->set_hubp_blank_en
)
982 hubp
->funcs
->set_hubp_blank_en(hubp
, true);
985 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
986 hubbub1_soft_reset(dc
->res_pool
->hubbub
, true);
988 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
989 struct pipe_ctx
*pipe_ctx
=
990 &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
991 if (pipe_ctx
!= NULL
) {
992 hubp
= pipe_ctx
->plane_res
.hubp
;
993 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
994 if (hubp
!= NULL
&& hubp
->funcs
->hubp_disable_control
)
995 hubp
->funcs
->hubp_disable_control(hubp
, true);
998 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
999 struct pipe_ctx
*pipe_ctx
=
1000 &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
1001 if (pipe_ctx
!= NULL
) {
1002 hubp
= pipe_ctx
->plane_res
.hubp
;
1003 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1004 if (hubp
!= NULL
&& hubp
->funcs
->hubp_disable_control
)
1005 hubp
->funcs
->hubp_disable_control(hubp
, true);
1008 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1009 hubbub1_soft_reset(dc
->res_pool
->hubbub
, false);
1010 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1011 struct pipe_ctx
*pipe_ctx
=
1012 &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
1013 if (pipe_ctx
!= NULL
) {
1014 hubp
= pipe_ctx
->plane_res
.hubp
;
1015 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1016 if (hubp
!= NULL
&& hubp
->funcs
->set_hubp_blank_en
)
1017 hubp
->funcs
->set_hubp_blank_en(hubp
, true);
1024 void dcn10_verify_allow_pstate_change_high(struct dc
*dc
)
1026 static bool should_log_hw_state
; /* prevent hw state log by default */
1028 if (!hubbub1_verify_allow_pstate_change_high(dc
->res_pool
->hubbub
)) {
1031 if (should_log_hw_state
)
1032 dcn10_log_hw_state(dc
, NULL
);
1034 TRACE_DC_PIPE_STATE(pipe_ctx
, i
, MAX_PIPES
);
1035 BREAK_TO_DEBUGGER();
1036 if (dcn10_hw_wa_force_recovery(dc
)) {
1038 if (!hubbub1_verify_allow_pstate_change_high(dc
->res_pool
->hubbub
))
1039 BREAK_TO_DEBUGGER();
1044 /* trigger HW to start disconnect plane from stream on the next vsync */
1045 void dcn10_plane_atomic_disconnect(struct dc
*dc
, struct pipe_ctx
*pipe_ctx
)
1047 struct dce_hwseq
*hws
= dc
->hwseq
;
1048 struct hubp
*hubp
= pipe_ctx
->plane_res
.hubp
;
1049 int dpp_id
= pipe_ctx
->plane_res
.dpp
->inst
;
1050 struct mpc
*mpc
= dc
->res_pool
->mpc
;
1051 struct mpc_tree
*mpc_tree_params
;
1052 struct mpcc
*mpcc_to_remove
= NULL
;
1053 struct output_pixel_processor
*opp
= pipe_ctx
->stream_res
.opp
;
1055 mpc_tree_params
= &(opp
->mpc_tree_params
);
1056 mpcc_to_remove
= mpc
->funcs
->get_mpcc_for_dpp(mpc_tree_params
, dpp_id
);
1059 if (mpcc_to_remove
== NULL
)
1062 mpc
->funcs
->remove_mpcc(mpc
, mpc_tree_params
, mpcc_to_remove
);
1064 opp
->mpcc_disconnect_pending
[pipe_ctx
->plane_res
.mpcc_inst
] = true;
1066 dc
->optimized_required
= true;
1068 if (hubp
->funcs
->hubp_disconnect
)
1069 hubp
->funcs
->hubp_disconnect(hubp
);
1071 if (dc
->debug
.sanity_checks
)
1072 hws
->funcs
.verify_allow_pstate_change_high(dc
);
1075 void dcn10_plane_atomic_power_down(struct dc
*dc
,
1079 struct dce_hwseq
*hws
= dc
->hwseq
;
1080 DC_LOGGER_INIT(dc
->ctx
->logger
);
1082 if (REG(DC_IP_REQUEST_CNTL
)) {
1083 REG_SET(DC_IP_REQUEST_CNTL
, 0,
1085 hws
->funcs
.dpp_pg_control(hws
, dpp
->inst
, false);
1086 hws
->funcs
.hubp_pg_control(hws
, hubp
->inst
, false);
1087 dpp
->funcs
->dpp_reset(dpp
);
1088 REG_SET(DC_IP_REQUEST_CNTL
, 0,
1091 "Power gated front end %d\n", hubp
->inst
);
1095 /* disable HW used by plane.
1096 * note: cannot disable until disconnect is complete
1098 void dcn10_plane_atomic_disable(struct dc
*dc
, struct pipe_ctx
*pipe_ctx
)
1100 struct dce_hwseq
*hws
= dc
->hwseq
;
1101 struct hubp
*hubp
= pipe_ctx
->plane_res
.hubp
;
1102 struct dpp
*dpp
= pipe_ctx
->plane_res
.dpp
;
1103 int opp_id
= hubp
->opp_id
;
1105 dc
->hwss
.wait_for_mpcc_disconnect(dc
, dc
->res_pool
, pipe_ctx
);
1107 hubp
->funcs
->hubp_clk_cntl(hubp
, false);
1109 dpp
->funcs
->dpp_dppclk_control(dpp
, false, false);
1111 if (opp_id
!= 0xf && pipe_ctx
->stream_res
.opp
->mpc_tree_params
.opp_list
== NULL
)
1112 pipe_ctx
->stream_res
.opp
->funcs
->opp_pipe_clock_control(
1113 pipe_ctx
->stream_res
.opp
,
1116 hubp
->power_gated
= true;
1117 dc
->optimized_required
= false; /* We're powering off, no need to optimize */
1119 hws
->funcs
.plane_atomic_power_down(dc
,
1120 pipe_ctx
->plane_res
.dpp
,
1121 pipe_ctx
->plane_res
.hubp
);
1123 pipe_ctx
->stream
= NULL
;
1124 memset(&pipe_ctx
->stream_res
, 0, sizeof(pipe_ctx
->stream_res
));
1125 memset(&pipe_ctx
->plane_res
, 0, sizeof(pipe_ctx
->plane_res
));
1126 pipe_ctx
->top_pipe
= NULL
;
1127 pipe_ctx
->bottom_pipe
= NULL
;
1128 pipe_ctx
->plane_state
= NULL
;
1131 void dcn10_disable_plane(struct dc
*dc
, struct pipe_ctx
*pipe_ctx
)
1133 struct dce_hwseq
*hws
= dc
->hwseq
;
1134 DC_LOGGER_INIT(dc
->ctx
->logger
);
1136 if (!pipe_ctx
->plane_res
.hubp
|| pipe_ctx
->plane_res
.hubp
->power_gated
)
1139 hws
->funcs
.plane_atomic_disable(dc
, pipe_ctx
);
1141 apply_DEGVIDCN10_253_wa(dc
);
1143 DC_LOG_DC("Power down front end %d\n",
1144 pipe_ctx
->pipe_idx
);
1147 void dcn10_init_pipes(struct dc
*dc
, struct dc_state
*context
)
1150 struct dce_hwseq
*hws
= dc
->hwseq
;
1151 bool can_apply_seamless_boot
= false;
1153 for (i
= 0; i
< context
->stream_count
; i
++) {
1154 if (context
->streams
[i
]->apply_seamless_boot_optimization
) {
1155 can_apply_seamless_boot
= true;
1160 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1161 struct timing_generator
*tg
= dc
->res_pool
->timing_generators
[i
];
1162 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
1164 /* There is assumption that pipe_ctx is not mapping irregularly
1165 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1166 * we will use the pipe, so don't disable
1168 if (pipe_ctx
->stream
!= NULL
&& can_apply_seamless_boot
)
1171 /* Blank controller using driver code instead of
1174 if (tg
->funcs
->is_tg_enabled(tg
)) {
1175 if (hws
->funcs
.init_blank
!= NULL
) {
1176 hws
->funcs
.init_blank(dc
, tg
);
1177 tg
->funcs
->lock(tg
);
1179 tg
->funcs
->lock(tg
);
1180 tg
->funcs
->set_blank(tg
, true);
1181 hwss_wait_for_blank_complete(tg
);
1186 /* num_opp will be equal to number of mpcc */
1187 for (i
= 0; i
< dc
->res_pool
->res_cap
->num_opp
; i
++) {
1188 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
1190 /* Cannot reset the MPC mux if seamless boot */
1191 if (pipe_ctx
->stream
!= NULL
&& can_apply_seamless_boot
)
1194 dc
->res_pool
->mpc
->funcs
->mpc_init_single_inst(
1195 dc
->res_pool
->mpc
, i
);
1198 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1199 struct timing_generator
*tg
= dc
->res_pool
->timing_generators
[i
];
1200 struct hubp
*hubp
= dc
->res_pool
->hubps
[i
];
1201 struct dpp
*dpp
= dc
->res_pool
->dpps
[i
];
1202 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
1204 /* There is assumption that pipe_ctx is not mapping irregularly
1205 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1206 * we will use the pipe, so don't disable
1208 if (can_apply_seamless_boot
&&
1209 pipe_ctx
->stream
!= NULL
&&
1210 pipe_ctx
->stream_res
.tg
->funcs
->is_tg_enabled(
1211 pipe_ctx
->stream_res
.tg
)) {
1212 // Enable double buffering for OTG_BLANK no matter if
1213 // seamless boot is enabled or not to suppress global sync
1214 // signals when OTG blanked. This is to prevent pipe from
1215 // requesting data while in PSR.
1216 tg
->funcs
->tg_init(tg
);
1220 /* Disable on the current state so the new one isn't cleared. */
1221 pipe_ctx
= &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
1223 dpp
->funcs
->dpp_reset(dpp
);
1225 pipe_ctx
->stream_res
.tg
= tg
;
1226 pipe_ctx
->pipe_idx
= i
;
1228 pipe_ctx
->plane_res
.hubp
= hubp
;
1229 pipe_ctx
->plane_res
.dpp
= dpp
;
1230 pipe_ctx
->plane_res
.mpcc_inst
= dpp
->inst
;
1231 hubp
->mpcc_id
= dpp
->inst
;
1232 hubp
->opp_id
= OPP_ID_INVALID
;
1233 hubp
->power_gated
= false;
1235 dc
->res_pool
->opps
[i
]->mpc_tree_params
.opp_id
= dc
->res_pool
->opps
[i
]->inst
;
1236 dc
->res_pool
->opps
[i
]->mpc_tree_params
.opp_list
= NULL
;
1237 dc
->res_pool
->opps
[i
]->mpcc_disconnect_pending
[pipe_ctx
->plane_res
.mpcc_inst
] = true;
1238 pipe_ctx
->stream_res
.opp
= dc
->res_pool
->opps
[i
];
1240 hws
->funcs
.plane_atomic_disconnect(dc
, pipe_ctx
);
1242 if (tg
->funcs
->is_tg_enabled(tg
))
1243 tg
->funcs
->unlock(tg
);
1245 dc
->hwss
.disable_plane(dc
, pipe_ctx
);
1247 pipe_ctx
->stream_res
.tg
= NULL
;
1248 pipe_ctx
->plane_res
.hubp
= NULL
;
1250 tg
->funcs
->tg_init(tg
);
1254 void dcn10_init_hw(struct dc
*dc
)
1257 struct abm
*abm
= dc
->res_pool
->abm
;
1258 struct dmcu
*dmcu
= dc
->res_pool
->dmcu
;
1259 struct dce_hwseq
*hws
= dc
->hwseq
;
1260 struct dc_bios
*dcb
= dc
->ctx
->dc_bios
;
1261 struct resource_pool
*res_pool
= dc
->res_pool
;
1262 uint32_t backlight
= MAX_BACKLIGHT_LEVEL
;
1263 bool is_optimized_init_done
= false;
1265 if (dc
->clk_mgr
&& dc
->clk_mgr
->funcs
->init_clocks
)
1266 dc
->clk_mgr
->funcs
->init_clocks(dc
->clk_mgr
);
1268 // Initialize the dccg
1269 if (dc
->res_pool
->dccg
&& dc
->res_pool
->dccg
->funcs
->dccg_init
)
1270 dc
->res_pool
->dccg
->funcs
->dccg_init(res_pool
->dccg
);
1272 if (IS_FPGA_MAXIMUS_DC(dc
->ctx
->dce_environment
)) {
1274 REG_WRITE(REFCLK_CNTL
, 0);
1275 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL
, DCHUBBUB_GLOBAL_TIMER_ENABLE
, 1);
1276 REG_WRITE(DIO_MEM_PWR_CTRL
, 0);
1278 if (!dc
->debug
.disable_clock_gate
) {
1279 /* enable all DCN clock gating */
1280 REG_WRITE(DCCG_GATE_DISABLE_CNTL
, 0);
1282 REG_WRITE(DCCG_GATE_DISABLE_CNTL2
, 0);
1284 REG_UPDATE(DCFCLK_CNTL
, DCFCLK_GATE_DIS
, 0);
1287 //Enable ability to power gate / don't force power on permanently
1288 if (hws
->funcs
.enable_power_gating_plane
)
1289 hws
->funcs
.enable_power_gating_plane(hws
, true);
1294 if (!dcb
->funcs
->is_accelerated_mode(dcb
))
1295 hws
->funcs
.disable_vga(dc
->hwseq
);
1297 hws
->funcs
.bios_golden_init(dc
);
1299 if (dc
->ctx
->dc_bios
->fw_info_valid
) {
1300 res_pool
->ref_clocks
.xtalin_clock_inKhz
=
1301 dc
->ctx
->dc_bios
->fw_info
.pll_info
.crystal_frequency
;
1303 if (!IS_FPGA_MAXIMUS_DC(dc
->ctx
->dce_environment
)) {
1304 if (res_pool
->dccg
&& res_pool
->hubbub
) {
1306 (res_pool
->dccg
->funcs
->get_dccg_ref_freq
)(res_pool
->dccg
,
1307 dc
->ctx
->dc_bios
->fw_info
.pll_info
.crystal_frequency
,
1308 &res_pool
->ref_clocks
.dccg_ref_clock_inKhz
);
1310 (res_pool
->hubbub
->funcs
->get_dchub_ref_freq
)(res_pool
->hubbub
,
1311 res_pool
->ref_clocks
.dccg_ref_clock_inKhz
,
1312 &res_pool
->ref_clocks
.dchub_ref_clock_inKhz
);
1314 // Not all ASICs have DCCG sw component
1315 res_pool
->ref_clocks
.dccg_ref_clock_inKhz
=
1316 res_pool
->ref_clocks
.xtalin_clock_inKhz
;
1317 res_pool
->ref_clocks
.dchub_ref_clock_inKhz
=
1318 res_pool
->ref_clocks
.xtalin_clock_inKhz
;
1322 ASSERT_CRITICAL(false);
1324 for (i
= 0; i
< dc
->link_count
; i
++) {
1325 /* Power up AND update implementation according to the
1326 * required signal (which may be different from the
1327 * default signal on connector).
1329 struct dc_link
*link
= dc
->links
[i
];
1331 if (!is_optimized_init_done
)
1332 link
->link_enc
->funcs
->hw_init(link
->link_enc
);
1334 /* Check for enabled DIG to identify enabled display */
1335 if (link
->link_enc
->funcs
->is_dig_enabled
&&
1336 link
->link_enc
->funcs
->is_dig_enabled(link
->link_enc
))
1337 link
->link_status
.link_active
= true;
1340 /* Power gate DSCs */
1341 if (!is_optimized_init_done
) {
1342 for (i
= 0; i
< res_pool
->res_cap
->num_dsc
; i
++)
1343 if (hws
->funcs
.dsc_pg_control
!= NULL
)
1344 hws
->funcs
.dsc_pg_control(hws
, res_pool
->dscs
[i
]->inst
, false);
1347 /* we want to turn off all dp displays before doing detection */
1348 if (dc
->config
.power_down_display_on_boot
) {
1349 uint8_t dpcd_power_state
= '\0';
1350 enum dc_status status
= DC_ERROR_UNEXPECTED
;
1352 for (i
= 0; i
< dc
->link_count
; i
++) {
1353 if (dc
->links
[i
]->connector_signal
!= SIGNAL_TYPE_DISPLAY_PORT
)
1357 * If any of the displays are lit up turn them off.
1358 * The reason is that some MST hubs cannot be turned off
1359 * completely until we tell them to do so.
1360 * If not turned off, then displays connected to MST hub
1363 status
= core_link_read_dpcd(dc
->links
[i
], DP_SET_POWER
,
1364 &dpcd_power_state
, sizeof(dpcd_power_state
));
1365 if (status
== DC_OK
&& dpcd_power_state
== DP_POWER_STATE_D0
) {
1366 /* blank dp stream before power off receiver*/
1367 if (dc
->links
[i
]->link_enc
->funcs
->get_dig_frontend
) {
1368 unsigned int fe
= dc
->links
[i
]->link_enc
->funcs
->get_dig_frontend(dc
->links
[i
]->link_enc
);
1370 for (j
= 0; j
< dc
->res_pool
->stream_enc_count
; j
++) {
1371 if (fe
== dc
->res_pool
->stream_enc
[j
]->id
) {
1372 dc
->res_pool
->stream_enc
[j
]->funcs
->dp_blank(
1373 dc
->res_pool
->stream_enc
[j
]);
1378 dp_receiver_power_ctrl(dc
->links
[i
], false);
1383 /* If taking control over from VBIOS, we may want to optimize our first
1384 * mode set, so we need to skip powering down pipes until we know which
1385 * pipes we want to use.
1386 * Otherwise, if taking control is not possible, we need to power
1389 if (dcb
->funcs
->is_accelerated_mode(dcb
) || dc
->config
.power_down_display_on_boot
) {
1390 if (!is_optimized_init_done
) {
1391 hws
->funcs
.init_pipes(dc
, dc
->current_state
);
1392 if (dc
->res_pool
->hubbub
->funcs
->allow_self_refresh_control
)
1393 dc
->res_pool
->hubbub
->funcs
->allow_self_refresh_control(dc
->res_pool
->hubbub
,
1394 !dc
->res_pool
->hubbub
->ctx
->dc
->debug
.disable_stutter
);
1398 if (!is_optimized_init_done
) {
1400 for (i
= 0; i
< res_pool
->audio_count
; i
++) {
1401 struct audio
*audio
= res_pool
->audios
[i
];
1403 audio
->funcs
->hw_init(audio
);
1406 for (i
= 0; i
< dc
->link_count
; i
++) {
1407 struct dc_link
*link
= dc
->links
[i
];
1409 if (link
->panel_cntl
)
1410 backlight
= link
->panel_cntl
->funcs
->hw_init(link
->panel_cntl
);
1414 abm
->funcs
->abm_init(abm
, backlight
);
1416 if (dmcu
!= NULL
&& !dmcu
->auto_load_dmcu
)
1417 dmcu
->funcs
->dmcu_init(dmcu
);
1420 if (abm
!= NULL
&& dmcu
!= NULL
)
1421 abm
->dmcu_is_running
= dmcu
->funcs
->is_dmcu_initialized(dmcu
);
1423 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1424 if (!is_optimized_init_done
)
1425 REG_WRITE(DIO_MEM_PWR_CTRL
, 0);
1427 if (!dc
->debug
.disable_clock_gate
) {
1428 /* enable all DCN clock gating */
1429 REG_WRITE(DCCG_GATE_DISABLE_CNTL
, 0);
1431 REG_WRITE(DCCG_GATE_DISABLE_CNTL2
, 0);
1433 REG_UPDATE(DCFCLK_CNTL
, DCFCLK_GATE_DIS
, 0);
1435 if (hws
->funcs
.enable_power_gating_plane
)
1436 hws
->funcs
.enable_power_gating_plane(dc
->hwseq
, true);
1438 if (dc
->clk_mgr
->funcs
->notify_wm_ranges
)
1439 dc
->clk_mgr
->funcs
->notify_wm_ranges(dc
->clk_mgr
);
1442 /* In headless boot cases, DIG may be turned
1443 * on which causes HW/SW discrepancies.
1444 * To avoid this, power down hardware on boot
1445 * if DIG is turned on
1447 void dcn10_power_down_on_boot(struct dc
*dc
)
1450 struct dc_link
*edp_link
;
1452 edp_link
= get_edp_link(dc
);
1454 edp_link
->link_enc
->funcs
->is_dig_enabled
&&
1455 edp_link
->link_enc
->funcs
->is_dig_enabled(edp_link
->link_enc
) &&
1456 dc
->hwseq
->funcs
.edp_backlight_control
&&
1457 dc
->hwss
.power_down
&&
1458 dc
->hwss
.edp_power_control
) {
1459 dc
->hwseq
->funcs
.edp_backlight_control(edp_link
, false);
1460 dc
->hwss
.power_down(dc
);
1461 dc
->hwss
.edp_power_control(edp_link
, false);
1463 for (i
= 0; i
< dc
->link_count
; i
++) {
1464 struct dc_link
*link
= dc
->links
[i
];
1466 if (link
->link_enc
->funcs
->is_dig_enabled
&&
1467 link
->link_enc
->funcs
->is_dig_enabled(link
->link_enc
) &&
1468 dc
->hwss
.power_down
) {
1469 dc
->hwss
.power_down(dc
);
1477 * Call update_clocks with empty context
1478 * to send DISPLAY_OFF
1479 * Otherwise DISPLAY_OFF may not be asserted
1481 if (dc
->clk_mgr
->funcs
->set_low_power_state
)
1482 dc
->clk_mgr
->funcs
->set_low_power_state(dc
->clk_mgr
);
1485 void dcn10_reset_hw_ctx_wrap(
1487 struct dc_state
*context
)
1490 struct dce_hwseq
*hws
= dc
->hwseq
;
1493 for (i
= dc
->res_pool
->pipe_count
- 1; i
>= 0 ; i
--) {
1494 struct pipe_ctx
*pipe_ctx_old
=
1495 &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
1496 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
1498 if (!pipe_ctx_old
->stream
)
1501 if (pipe_ctx_old
->top_pipe
)
1504 if (!pipe_ctx
->stream
||
1505 pipe_need_reprogram(pipe_ctx_old
, pipe_ctx
)) {
1506 struct clock_source
*old_clk
= pipe_ctx_old
->clock_source
;
1508 dcn10_reset_back_end_for_pipe(dc
, pipe_ctx_old
, dc
->current_state
);
1509 if (hws
->funcs
.enable_stream_gating
)
1510 hws
->funcs
.enable_stream_gating(dc
, pipe_ctx
);
1512 old_clk
->funcs
->cs_power_down(old_clk
);
1517 static bool patch_address_for_sbs_tb_stereo(
1518 struct pipe_ctx
*pipe_ctx
, PHYSICAL_ADDRESS_LOC
*addr
)
1520 struct dc_plane_state
*plane_state
= pipe_ctx
->plane_state
;
1521 bool sec_split
= pipe_ctx
->top_pipe
&&
1522 pipe_ctx
->top_pipe
->plane_state
== pipe_ctx
->plane_state
;
1523 if (sec_split
&& plane_state
->address
.type
== PLN_ADDR_TYPE_GRPH_STEREO
&&
1524 (pipe_ctx
->stream
->timing
.timing_3d_format
==
1525 TIMING_3D_FORMAT_SIDE_BY_SIDE
||
1526 pipe_ctx
->stream
->timing
.timing_3d_format
==
1527 TIMING_3D_FORMAT_TOP_AND_BOTTOM
)) {
1528 *addr
= plane_state
->address
.grph_stereo
.left_addr
;
1529 plane_state
->address
.grph_stereo
.left_addr
=
1530 plane_state
->address
.grph_stereo
.right_addr
;
1533 if (pipe_ctx
->stream
->view_format
!= VIEW_3D_FORMAT_NONE
&&
1534 plane_state
->address
.type
!= PLN_ADDR_TYPE_GRPH_STEREO
) {
1535 plane_state
->address
.type
= PLN_ADDR_TYPE_GRPH_STEREO
;
1536 plane_state
->address
.grph_stereo
.right_addr
=
1537 plane_state
->address
.grph_stereo
.left_addr
;
1538 plane_state
->address
.grph_stereo
.right_meta_addr
=
1539 plane_state
->address
.grph_stereo
.left_meta_addr
;
1545 void dcn10_update_plane_addr(const struct dc
*dc
, struct pipe_ctx
*pipe_ctx
)
1547 bool addr_patched
= false;
1548 PHYSICAL_ADDRESS_LOC addr
;
1549 struct dc_plane_state
*plane_state
= pipe_ctx
->plane_state
;
1551 if (plane_state
== NULL
)
1554 addr_patched
= patch_address_for_sbs_tb_stereo(pipe_ctx
, &addr
);
1556 pipe_ctx
->plane_res
.hubp
->funcs
->hubp_program_surface_flip_and_addr(
1557 pipe_ctx
->plane_res
.hubp
,
1558 &plane_state
->address
,
1559 plane_state
->flip_immediate
);
1561 plane_state
->status
.requested_address
= plane_state
->address
;
1563 if (plane_state
->flip_immediate
)
1564 plane_state
->status
.current_address
= plane_state
->address
;
1567 pipe_ctx
->plane_state
->address
.grph_stereo
.left_addr
= addr
;
1570 bool dcn10_set_input_transfer_func(struct dc
*dc
, struct pipe_ctx
*pipe_ctx
,
1571 const struct dc_plane_state
*plane_state
)
1573 struct dpp
*dpp_base
= pipe_ctx
->plane_res
.dpp
;
1574 const struct dc_transfer_func
*tf
= NULL
;
1577 if (dpp_base
== NULL
)
1580 if (plane_state
->in_transfer_func
)
1581 tf
= plane_state
->in_transfer_func
;
1583 if (plane_state
->gamma_correction
&&
1584 !dpp_base
->ctx
->dc
->debug
.always_use_regamma
1585 && !plane_state
->gamma_correction
->is_identity
1586 && dce_use_lut(plane_state
->format
))
1587 dpp_base
->funcs
->dpp_program_input_lut(dpp_base
, plane_state
->gamma_correction
);
1590 dpp_base
->funcs
->dpp_set_degamma(dpp_base
, IPP_DEGAMMA_MODE_BYPASS
);
1591 else if (tf
->type
== TF_TYPE_PREDEFINED
) {
1593 case TRANSFER_FUNCTION_SRGB
:
1594 dpp_base
->funcs
->dpp_set_degamma(dpp_base
, IPP_DEGAMMA_MODE_HW_sRGB
);
1596 case TRANSFER_FUNCTION_BT709
:
1597 dpp_base
->funcs
->dpp_set_degamma(dpp_base
, IPP_DEGAMMA_MODE_HW_xvYCC
);
1599 case TRANSFER_FUNCTION_LINEAR
:
1600 dpp_base
->funcs
->dpp_set_degamma(dpp_base
, IPP_DEGAMMA_MODE_BYPASS
);
1602 case TRANSFER_FUNCTION_PQ
:
1603 dpp_base
->funcs
->dpp_set_degamma(dpp_base
, IPP_DEGAMMA_MODE_USER_PWL
);
1604 cm_helper_translate_curve_to_degamma_hw_format(tf
, &dpp_base
->degamma_params
);
1605 dpp_base
->funcs
->dpp_program_degamma_pwl(dpp_base
, &dpp_base
->degamma_params
);
1612 } else if (tf
->type
== TF_TYPE_BYPASS
) {
1613 dpp_base
->funcs
->dpp_set_degamma(dpp_base
, IPP_DEGAMMA_MODE_BYPASS
);
1615 cm_helper_translate_curve_to_degamma_hw_format(tf
,
1616 &dpp_base
->degamma_params
);
1617 dpp_base
->funcs
->dpp_program_degamma_pwl(dpp_base
,
1618 &dpp_base
->degamma_params
);
1625 #define MAX_NUM_HW_POINTS 0x200
1627 static void log_tf(struct dc_context
*ctx
,
1628 struct dc_transfer_func
*tf
, uint32_t hw_points_num
)
1630 // DC_LOG_GAMMA is default logging of all hw points
1631 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1632 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1635 DC_LOGGER_INIT(ctx
->logger
);
1636 DC_LOG_GAMMA("Gamma Correction TF");
1637 DC_LOG_ALL_GAMMA("Logging all tf points...");
1638 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1640 for (i
= 0; i
< hw_points_num
; i
++) {
1641 DC_LOG_GAMMA("R\t%d\t%llu", i
, tf
->tf_pts
.red
[i
].value
);
1642 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i
, tf
->tf_pts
.green
[i
].value
);
1643 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i
, tf
->tf_pts
.blue
[i
].value
);
1646 for (i
= hw_points_num
; i
< MAX_NUM_HW_POINTS
; i
++) {
1647 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i
, tf
->tf_pts
.red
[i
].value
);
1648 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i
, tf
->tf_pts
.green
[i
].value
);
1649 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i
, tf
->tf_pts
.blue
[i
].value
);
1653 bool dcn10_set_output_transfer_func(struct dc
*dc
, struct pipe_ctx
*pipe_ctx
,
1654 const struct dc_stream_state
*stream
)
1656 struct dpp
*dpp
= pipe_ctx
->plane_res
.dpp
;
1661 dpp
->regamma_params
.hw_points_num
= GAMMA_HW_POINTS_NUM
;
1663 if (stream
->out_transfer_func
&&
1664 stream
->out_transfer_func
->type
== TF_TYPE_PREDEFINED
&&
1665 stream
->out_transfer_func
->tf
== TRANSFER_FUNCTION_SRGB
)
1666 dpp
->funcs
->dpp_program_regamma_pwl(dpp
, NULL
, OPP_REGAMMA_SRGB
);
1668 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1671 else if (cm_helper_translate_curve_to_hw_format(
1672 stream
->out_transfer_func
,
1673 &dpp
->regamma_params
, false)) {
1674 dpp
->funcs
->dpp_program_regamma_pwl(
1676 &dpp
->regamma_params
, OPP_REGAMMA_USER
);
1678 dpp
->funcs
->dpp_program_regamma_pwl(dpp
, NULL
, OPP_REGAMMA_BYPASS
);
1680 if (stream
!= NULL
&& stream
->ctx
!= NULL
&&
1681 stream
->out_transfer_func
!= NULL
) {
1683 stream
->out_transfer_func
,
1684 dpp
->regamma_params
.hw_points_num
);
1690 void dcn10_pipe_control_lock(
1692 struct pipe_ctx
*pipe
,
1695 struct dce_hwseq
*hws
= dc
->hwseq
;
1697 /* use TG master update lock to lock everything on the TG
1698 * therefore only top pipe need to lock
1700 if (!pipe
|| pipe
->top_pipe
)
1703 if (dc
->debug
.sanity_checks
)
1704 hws
->funcs
.verify_allow_pstate_change_high(dc
);
1707 pipe
->stream_res
.tg
->funcs
->lock(pipe
->stream_res
.tg
);
1709 pipe
->stream_res
.tg
->funcs
->unlock(pipe
->stream_res
.tg
);
1711 if (dc
->debug
.sanity_checks
)
1712 hws
->funcs
.verify_allow_pstate_change_high(dc
);
1716 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1718 * Software keepout workaround to prevent cursor update locking from stalling
1719 * out cursor updates indefinitely or from old values from being retained in
1720 * the case where the viewport changes in the same frame as the cursor.
1722 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1723 * too close to VUPDATE, then stall out until VUPDATE finishes.
1725 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1726 * to avoid the need for this workaround.
1728 static void delay_cursor_until_vupdate(struct dc
*dc
, struct pipe_ctx
*pipe_ctx
)
1730 struct dc_stream_state
*stream
= pipe_ctx
->stream
;
1731 struct crtc_position position
;
1732 uint32_t vupdate_start
, vupdate_end
;
1733 unsigned int lines_to_vupdate
, us_to_vupdate
, vpos
;
1734 unsigned int us_per_line
, us_vupdate
;
1736 if (!dc
->hwss
.calc_vupdate_position
|| !dc
->hwss
.get_position
)
1739 if (!pipe_ctx
->stream_res
.stream_enc
|| !pipe_ctx
->stream_res
.tg
)
1742 dc
->hwss
.calc_vupdate_position(dc
, pipe_ctx
, &vupdate_start
,
1745 dc
->hwss
.get_position(&pipe_ctx
, 1, &position
);
1746 vpos
= position
.vertical_count
;
1748 /* Avoid wraparound calculation issues */
1749 vupdate_start
+= stream
->timing
.v_total
;
1750 vupdate_end
+= stream
->timing
.v_total
;
1751 vpos
+= stream
->timing
.v_total
;
1753 if (vpos
<= vupdate_start
) {
1754 /* VPOS is in VACTIVE or back porch. */
1755 lines_to_vupdate
= vupdate_start
- vpos
;
1756 } else if (vpos
> vupdate_end
) {
1757 /* VPOS is in the front porch. */
1760 /* VPOS is in VUPDATE. */
1761 lines_to_vupdate
= 0;
1764 /* Calculate time until VUPDATE in microseconds. */
1766 stream
->timing
.h_total
* 10000u / stream
->timing
.pix_clk_100hz
;
1767 us_to_vupdate
= lines_to_vupdate
* us_per_line
;
1769 /* 70 us is a conservative estimate of cursor update time*/
1770 if (us_to_vupdate
> 70)
1773 /* Stall out until the cursor update completes. */
1774 if (vupdate_end
< vupdate_start
)
1775 vupdate_end
+= stream
->timing
.v_total
;
1776 us_vupdate
= (vupdate_end
- vupdate_start
+ 1) * us_per_line
;
1777 udelay(us_to_vupdate
+ us_vupdate
);
1780 void dcn10_cursor_lock(struct dc
*dc
, struct pipe_ctx
*pipe
, bool lock
)
1782 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1783 if (!pipe
|| pipe
->top_pipe
)
1786 /* Prevent cursor lock from stalling out cursor updates. */
1788 delay_cursor_until_vupdate(dc
, pipe
);
1790 if (pipe
->stream
&& should_use_dmub_lock(pipe
->stream
->link
)) {
1791 union dmub_hw_lock_flags hw_locks
= { 0 };
1792 struct dmub_hw_lock_inst_flags inst_flags
= { 0 };
1794 hw_locks
.bits
.lock_cursor
= 1;
1795 inst_flags
.opp_inst
= pipe
->stream_res
.opp
->inst
;
1797 dmub_hw_lock_mgr_cmd(dc
->ctx
->dmub_srv
,
1802 dc
->res_pool
->mpc
->funcs
->cursor_lock(dc
->res_pool
->mpc
,
1803 pipe
->stream_res
.opp
->inst
, lock
);
1806 static bool wait_for_reset_trigger_to_occur(
1807 struct dc_context
*dc_ctx
,
1808 struct timing_generator
*tg
)
1812 /* To avoid endless loop we wait at most
1813 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1814 const uint32_t frames_to_wait_on_triggered_reset
= 10;
1817 for (i
= 0; i
< frames_to_wait_on_triggered_reset
; i
++) {
1819 if (!tg
->funcs
->is_counter_moving(tg
)) {
1820 DC_ERROR("TG counter is not moving!\n");
1824 if (tg
->funcs
->did_triggered_reset_occur(tg
)) {
1826 /* usually occurs at i=1 */
1827 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1832 /* Wait for one frame. */
1833 tg
->funcs
->wait_for_state(tg
, CRTC_STATE_VACTIVE
);
1834 tg
->funcs
->wait_for_state(tg
, CRTC_STATE_VBLANK
);
1838 DC_ERROR("GSL: Timeout on reset trigger!\n");
1843 void dcn10_enable_timing_synchronization(
1847 struct pipe_ctx
*grouped_pipes
[])
1849 struct dc_context
*dc_ctx
= dc
->ctx
;
1850 struct output_pixel_processor
*opp
;
1851 struct timing_generator
*tg
;
1852 int i
, width
, height
;
1854 DC_SYNC_INFO("Setting up OTG reset trigger\n");
1856 for (i
= 1; i
< group_size
; i
++) {
1857 opp
= grouped_pipes
[i
]->stream_res
.opp
;
1858 tg
= grouped_pipes
[i
]->stream_res
.tg
;
1859 tg
->funcs
->get_otg_active_size(tg
, &width
, &height
);
1860 if (opp
->funcs
->opp_program_dpg_dimensions
)
1861 opp
->funcs
->opp_program_dpg_dimensions(opp
, width
, 2*(height
) + 1);
1864 for (i
= 1; i
< group_size
; i
++)
1865 grouped_pipes
[i
]->stream_res
.tg
->funcs
->enable_reset_trigger(
1866 grouped_pipes
[i
]->stream_res
.tg
,
1867 grouped_pipes
[0]->stream_res
.tg
->inst
);
1869 DC_SYNC_INFO("Waiting for trigger\n");
1871 /* Need to get only check 1 pipe for having reset as all the others are
1872 * synchronized. Look at last pipe programmed to reset.
1875 wait_for_reset_trigger_to_occur(dc_ctx
, grouped_pipes
[1]->stream_res
.tg
);
1876 for (i
= 1; i
< group_size
; i
++)
1877 grouped_pipes
[i
]->stream_res
.tg
->funcs
->disable_reset_trigger(
1878 grouped_pipes
[i
]->stream_res
.tg
);
1880 for (i
= 1; i
< group_size
; i
++) {
1881 opp
= grouped_pipes
[i
]->stream_res
.opp
;
1882 tg
= grouped_pipes
[i
]->stream_res
.tg
;
1883 tg
->funcs
->get_otg_active_size(tg
, &width
, &height
);
1884 if (opp
->funcs
->opp_program_dpg_dimensions
)
1885 opp
->funcs
->opp_program_dpg_dimensions(opp
, width
, height
);
1888 DC_SYNC_INFO("Sync complete\n");
1891 void dcn10_enable_per_frame_crtc_position_reset(
1894 struct pipe_ctx
*grouped_pipes
[])
1896 struct dc_context
*dc_ctx
= dc
->ctx
;
1899 DC_SYNC_INFO("Setting up\n");
1900 for (i
= 0; i
< group_size
; i
++)
1901 if (grouped_pipes
[i
]->stream_res
.tg
->funcs
->enable_crtc_reset
)
1902 grouped_pipes
[i
]->stream_res
.tg
->funcs
->enable_crtc_reset(
1903 grouped_pipes
[i
]->stream_res
.tg
,
1905 &grouped_pipes
[i
]->stream
->triggered_crtc_reset
);
1907 DC_SYNC_INFO("Waiting for trigger\n");
1909 for (i
= 0; i
< group_size
; i
++)
1910 wait_for_reset_trigger_to_occur(dc_ctx
, grouped_pipes
[i
]->stream_res
.tg
);
1912 DC_SYNC_INFO("Multi-display sync is complete\n");
1915 /*static void print_rq_dlg_ttu(
1917 struct pipe_ctx *pipe_ctx)
1919 DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1920 "\n============== DML TTU Output parameters [%d] ==============\n"
1921 "qos_level_low_wm: %d, \n"
1922 "qos_level_high_wm: %d, \n"
1923 "min_ttu_vblank: %d, \n"
1924 "qos_level_flip: %d, \n"
1925 "refcyc_per_req_delivery_l: %d, \n"
1926 "qos_level_fixed_l: %d, \n"
1927 "qos_ramp_disable_l: %d, \n"
1928 "refcyc_per_req_delivery_pre_l: %d, \n"
1929 "refcyc_per_req_delivery_c: %d, \n"
1930 "qos_level_fixed_c: %d, \n"
1931 "qos_ramp_disable_c: %d, \n"
1932 "refcyc_per_req_delivery_pre_c: %d\n"
1933 "=============================================================\n",
1935 pipe_ctx->ttu_regs.qos_level_low_wm,
1936 pipe_ctx->ttu_regs.qos_level_high_wm,
1937 pipe_ctx->ttu_regs.min_ttu_vblank,
1938 pipe_ctx->ttu_regs.qos_level_flip,
1939 pipe_ctx->ttu_regs.refcyc_per_req_delivery_l,
1940 pipe_ctx->ttu_regs.qos_level_fixed_l,
1941 pipe_ctx->ttu_regs.qos_ramp_disable_l,
1942 pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_l,
1943 pipe_ctx->ttu_regs.refcyc_per_req_delivery_c,
1944 pipe_ctx->ttu_regs.qos_level_fixed_c,
1945 pipe_ctx->ttu_regs.qos_ramp_disable_c,
1946 pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
1949 DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1950 "\n============== DML DLG Output parameters [%d] ==============\n"
1951 "refcyc_h_blank_end: %d, \n"
1952 "dlg_vblank_end: %d, \n"
1953 "min_dst_y_next_start: %d, \n"
1954 "refcyc_per_htotal: %d, \n"
1955 "refcyc_x_after_scaler: %d, \n"
1956 "dst_y_after_scaler: %d, \n"
1957 "dst_y_prefetch: %d, \n"
1958 "dst_y_per_vm_vblank: %d, \n"
1959 "dst_y_per_row_vblank: %d, \n"
1960 "ref_freq_to_pix_freq: %d, \n"
1961 "vratio_prefetch: %d, \n"
1962 "refcyc_per_pte_group_vblank_l: %d, \n"
1963 "refcyc_per_meta_chunk_vblank_l: %d, \n"
1964 "dst_y_per_pte_row_nom_l: %d, \n"
1965 "refcyc_per_pte_group_nom_l: %d, \n",
1967 pipe_ctx->dlg_regs.refcyc_h_blank_end,
1968 pipe_ctx->dlg_regs.dlg_vblank_end,
1969 pipe_ctx->dlg_regs.min_dst_y_next_start,
1970 pipe_ctx->dlg_regs.refcyc_per_htotal,
1971 pipe_ctx->dlg_regs.refcyc_x_after_scaler,
1972 pipe_ctx->dlg_regs.dst_y_after_scaler,
1973 pipe_ctx->dlg_regs.dst_y_prefetch,
1974 pipe_ctx->dlg_regs.dst_y_per_vm_vblank,
1975 pipe_ctx->dlg_regs.dst_y_per_row_vblank,
1976 pipe_ctx->dlg_regs.ref_freq_to_pix_freq,
1977 pipe_ctx->dlg_regs.vratio_prefetch,
1978 pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_l,
1979 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_l,
1980 pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_l,
1981 pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
1984 DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1985 "\ndst_y_per_meta_row_nom_l: %d, \n"
1986 "refcyc_per_meta_chunk_nom_l: %d, \n"
1987 "refcyc_per_line_delivery_pre_l: %d, \n"
1988 "refcyc_per_line_delivery_l: %d, \n"
1989 "vratio_prefetch_c: %d, \n"
1990 "refcyc_per_pte_group_vblank_c: %d, \n"
1991 "refcyc_per_meta_chunk_vblank_c: %d, \n"
1992 "dst_y_per_pte_row_nom_c: %d, \n"
1993 "refcyc_per_pte_group_nom_c: %d, \n"
1994 "dst_y_per_meta_row_nom_c: %d, \n"
1995 "refcyc_per_meta_chunk_nom_c: %d, \n"
1996 "refcyc_per_line_delivery_pre_c: %d, \n"
1997 "refcyc_per_line_delivery_c: %d \n"
1998 "========================================================\n",
1999 pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_l,
2000 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_l,
2001 pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_l,
2002 pipe_ctx->dlg_regs.refcyc_per_line_delivery_l,
2003 pipe_ctx->dlg_regs.vratio_prefetch_c,
2004 pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_c,
2005 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_c,
2006 pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_c,
2007 pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_c,
2008 pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_c,
2009 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_c,
2010 pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_c,
2011 pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
2014 DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
2015 "\n============== DML RQ Output parameters [%d] ==============\n"
2017 "min_chunk_size: %d \n"
2018 "meta_chunk_size: %d \n"
2019 "min_meta_chunk_size: %d \n"
2020 "dpte_group_size: %d \n"
2021 "mpte_group_size: %d \n"
2022 "swath_height: %d \n"
2023 "pte_row_height_linear: %d \n"
2024 "========================================================\n",
2026 pipe_ctx->rq_regs.rq_regs_l.chunk_size,
2027 pipe_ctx->rq_regs.rq_regs_l.min_chunk_size,
2028 pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size,
2029 pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size,
2030 pipe_ctx->rq_regs.rq_regs_l.dpte_group_size,
2031 pipe_ctx->rq_regs.rq_regs_l.mpte_group_size,
2032 pipe_ctx->rq_regs.rq_regs_l.swath_height,
2033 pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear
2038 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp
*hubp1
,
2039 struct vm_system_aperture_param
*apt
,
2040 struct dce_hwseq
*hws
)
2042 PHYSICAL_ADDRESS_LOC physical_page_number
;
2043 uint32_t logical_addr_low
;
2044 uint32_t logical_addr_high
;
2046 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB
,
2047 PHYSICAL_PAGE_NUMBER_MSB
, &physical_page_number
.high_part
);
2048 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB
,
2049 PHYSICAL_PAGE_NUMBER_LSB
, &physical_page_number
.low_part
);
2051 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
2052 LOGICAL_ADDR
, &logical_addr_low
);
2054 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
2055 LOGICAL_ADDR
, &logical_addr_high
);
2057 apt
->sys_default
.quad_part
= physical_page_number
.quad_part
<< 12;
2058 apt
->sys_low
.quad_part
= (int64_t)logical_addr_low
<< 18;
2059 apt
->sys_high
.quad_part
= (int64_t)logical_addr_high
<< 18;
2062 /* Temporary read settings, future will get values from kmd directly */
2063 static void mmhub_read_vm_context0_settings(struct dcn10_hubp
*hubp1
,
2064 struct vm_context0_param
*vm0
,
2065 struct dce_hwseq
*hws
)
2067 PHYSICAL_ADDRESS_LOC fb_base
;
2068 PHYSICAL_ADDRESS_LOC fb_offset
;
2069 uint32_t fb_base_value
;
2070 uint32_t fb_offset_value
;
2072 REG_GET(DCHUBBUB_SDPIF_FB_BASE
, SDPIF_FB_BASE
, &fb_base_value
);
2073 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET
, SDPIF_FB_OFFSET
, &fb_offset_value
);
2075 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
,
2076 PAGE_DIRECTORY_ENTRY_HI32
, &vm0
->pte_base
.high_part
);
2077 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
,
2078 PAGE_DIRECTORY_ENTRY_LO32
, &vm0
->pte_base
.low_part
);
2080 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
,
2081 LOGICAL_PAGE_NUMBER_HI4
, &vm0
->pte_start
.high_part
);
2082 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
,
2083 LOGICAL_PAGE_NUMBER_LO32
, &vm0
->pte_start
.low_part
);
2085 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
,
2086 LOGICAL_PAGE_NUMBER_HI4
, &vm0
->pte_end
.high_part
);
2087 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
,
2088 LOGICAL_PAGE_NUMBER_LO32
, &vm0
->pte_end
.low_part
);
2090 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32
,
2091 PHYSICAL_PAGE_ADDR_HI4
, &vm0
->fault_default
.high_part
);
2092 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32
,
2093 PHYSICAL_PAGE_ADDR_LO32
, &vm0
->fault_default
.low_part
);
2096 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2097 * Therefore we need to do
2098 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2099 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2101 fb_base
.quad_part
= (uint64_t)fb_base_value
<< 24;
2102 fb_offset
.quad_part
= (uint64_t)fb_offset_value
<< 24;
2103 vm0
->pte_base
.quad_part
+= fb_base
.quad_part
;
2104 vm0
->pte_base
.quad_part
-= fb_offset
.quad_part
;
2108 void dcn10_program_pte_vm(struct dce_hwseq
*hws
, struct hubp
*hubp
)
2110 struct dcn10_hubp
*hubp1
= TO_DCN10_HUBP(hubp
);
2111 struct vm_system_aperture_param apt
= { {{ 0 } } };
2112 struct vm_context0_param vm0
= { { { 0 } } };
2114 mmhub_read_vm_system_aperture_settings(hubp1
, &apt
, hws
);
2115 mmhub_read_vm_context0_settings(hubp1
, &vm0
, hws
);
2117 hubp
->funcs
->hubp_set_vm_system_aperture_settings(hubp
, &apt
);
2118 hubp
->funcs
->hubp_set_vm_context0_settings(hubp
, &vm0
);
2121 static void dcn10_enable_plane(
2123 struct pipe_ctx
*pipe_ctx
,
2124 struct dc_state
*context
)
2126 struct dce_hwseq
*hws
= dc
->hwseq
;
2128 if (dc
->debug
.sanity_checks
) {
2129 hws
->funcs
.verify_allow_pstate_change_high(dc
);
2132 undo_DEGVIDCN10_253_wa(dc
);
2134 power_on_plane(dc
->hwseq
,
2135 pipe_ctx
->plane_res
.hubp
->inst
);
2137 /* enable DCFCLK current DCHUB */
2138 pipe_ctx
->plane_res
.hubp
->funcs
->hubp_clk_cntl(pipe_ctx
->plane_res
.hubp
, true);
2140 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2141 pipe_ctx
->stream_res
.opp
->funcs
->opp_pipe_clock_control(
2142 pipe_ctx
->stream_res
.opp
,
2145 /* TODO: enable/disable in dm as per update type.
2147 DC_LOG_DC(dc->ctx->logger,
2148 "Pipe:%d 0x%x: addr hi:0x%x, "
2151 " %d; dst: %d, %d, %d, %d;\n",
2154 plane_state->address.grph.addr.high_part,
2155 plane_state->address.grph.addr.low_part,
2156 plane_state->src_rect.x,
2157 plane_state->src_rect.y,
2158 plane_state->src_rect.width,
2159 plane_state->src_rect.height,
2160 plane_state->dst_rect.x,
2161 plane_state->dst_rect.y,
2162 plane_state->dst_rect.width,
2163 plane_state->dst_rect.height);
2165 DC_LOG_DC(dc->ctx->logger,
2166 "Pipe %d: width, height, x, y format:%d\n"
2167 "viewport:%d, %d, %d, %d\n"
2168 "recout: %d, %d, %d, %d\n",
2170 plane_state->format,
2171 pipe_ctx->plane_res.scl_data.viewport.width,
2172 pipe_ctx->plane_res.scl_data.viewport.height,
2173 pipe_ctx->plane_res.scl_data.viewport.x,
2174 pipe_ctx->plane_res.scl_data.viewport.y,
2175 pipe_ctx->plane_res.scl_data.recout.width,
2176 pipe_ctx->plane_res.scl_data.recout.height,
2177 pipe_ctx->plane_res.scl_data.recout.x,
2178 pipe_ctx->plane_res.scl_data.recout.y);
2179 print_rq_dlg_ttu(dc, pipe_ctx);
2182 if (dc
->config
.gpu_vm_support
)
2183 dcn10_program_pte_vm(hws
, pipe_ctx
->plane_res
.hubp
);
2185 if (dc
->debug
.sanity_checks
) {
2186 hws
->funcs
.verify_allow_pstate_change_high(dc
);
2190 void dcn10_program_gamut_remap(struct pipe_ctx
*pipe_ctx
)
2193 struct dpp_grph_csc_adjustment adjust
;
2194 memset(&adjust
, 0, sizeof(adjust
));
2195 adjust
.gamut_adjust_type
= GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS
;
2198 if (pipe_ctx
->stream
->gamut_remap_matrix
.enable_remap
== true) {
2199 adjust
.gamut_adjust_type
= GRAPHICS_GAMUT_ADJUST_TYPE_SW
;
2200 for (i
= 0; i
< CSC_TEMPERATURE_MATRIX_SIZE
; i
++)
2201 adjust
.temperature_matrix
[i
] =
2202 pipe_ctx
->stream
->gamut_remap_matrix
.matrix
[i
];
2203 } else if (pipe_ctx
->plane_state
&&
2204 pipe_ctx
->plane_state
->gamut_remap_matrix
.enable_remap
== true) {
2205 adjust
.gamut_adjust_type
= GRAPHICS_GAMUT_ADJUST_TYPE_SW
;
2206 for (i
= 0; i
< CSC_TEMPERATURE_MATRIX_SIZE
; i
++)
2207 adjust
.temperature_matrix
[i
] =
2208 pipe_ctx
->plane_state
->gamut_remap_matrix
.matrix
[i
];
2211 pipe_ctx
->plane_res
.dpp
->funcs
->dpp_set_gamut_remap(pipe_ctx
->plane_res
.dpp
, &adjust
);
2215 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx
*pipe_ctx
, enum dc_color_space colorspace
)
2217 if (pipe_ctx
->plane_state
&& pipe_ctx
->plane_state
->layer_index
> 0 && is_rgb_cspace(colorspace
)) {
2218 if (pipe_ctx
->top_pipe
) {
2219 struct pipe_ctx
*top
= pipe_ctx
->top_pipe
;
2221 while (top
->top_pipe
)
2222 top
= top
->top_pipe
; // Traverse to top pipe_ctx
2223 if (top
->plane_state
&& top
->plane_state
->layer_index
== 0)
2224 return true; // Front MPO plane not hidden
2230 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx
*pipe_ctx
, uint16_t *matrix
)
2232 // Override rear plane RGB bias to fix MPO brightness
2233 uint16_t rgb_bias
= matrix
[3];
2238 pipe_ctx
->plane_res
.dpp
->funcs
->dpp_set_csc_adjustment(pipe_ctx
->plane_res
.dpp
, matrix
);
2239 matrix
[3] = rgb_bias
;
2240 matrix
[7] = rgb_bias
;
2241 matrix
[11] = rgb_bias
;
2244 void dcn10_program_output_csc(struct dc
*dc
,
2245 struct pipe_ctx
*pipe_ctx
,
2246 enum dc_color_space colorspace
,
2250 if (pipe_ctx
->stream
->csc_color_matrix
.enable_adjustment
== true) {
2251 if (pipe_ctx
->plane_res
.dpp
->funcs
->dpp_set_csc_adjustment
!= NULL
) {
2253 /* MPO is broken with RGB colorspaces when OCSC matrix
2254 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2255 * Blending adds offsets from front + rear to rear plane
2257 * Fix is to set RGB bias to 0 on rear plane, top plane
2258 * black value pixels add offset instead of rear + front
2261 int16_t rgb_bias
= matrix
[3];
2262 // matrix[3/7/11] are all the same offset value
2264 if (rgb_bias
> 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx
, colorspace
)) {
2265 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx
, matrix
);
2267 pipe_ctx
->plane_res
.dpp
->funcs
->dpp_set_csc_adjustment(pipe_ctx
->plane_res
.dpp
, matrix
);
2271 if (pipe_ctx
->plane_res
.dpp
->funcs
->dpp_set_csc_default
!= NULL
)
2272 pipe_ctx
->plane_res
.dpp
->funcs
->dpp_set_csc_default(pipe_ctx
->plane_res
.dpp
, colorspace
);
2276 void dcn10_get_surface_visual_confirm_color(
2277 const struct pipe_ctx
*pipe_ctx
,
2278 struct tg_color
*color
)
2280 uint32_t color_value
= MAX_TG_COLOR_VALUE
;
2282 switch (pipe_ctx
->plane_res
.scl_data
.format
) {
2283 case PIXEL_FORMAT_ARGB8888
:
2284 /* set border color to red */
2285 color
->color_r_cr
= color_value
;
2288 case PIXEL_FORMAT_ARGB2101010
:
2289 /* set border color to blue */
2290 color
->color_b_cb
= color_value
;
2292 case PIXEL_FORMAT_420BPP8
:
2293 /* set border color to green */
2294 color
->color_g_y
= color_value
;
2296 case PIXEL_FORMAT_420BPP10
:
2297 /* set border color to yellow */
2298 color
->color_g_y
= color_value
;
2299 color
->color_r_cr
= color_value
;
2301 case PIXEL_FORMAT_FP16
:
2302 /* set border color to white */
2303 color
->color_r_cr
= color_value
;
2304 color
->color_b_cb
= color_value
;
2305 color
->color_g_y
= color_value
;
2312 void dcn10_get_hdr_visual_confirm_color(
2313 struct pipe_ctx
*pipe_ctx
,
2314 struct tg_color
*color
)
2316 uint32_t color_value
= MAX_TG_COLOR_VALUE
;
2318 // Determine the overscan color based on the top-most (desktop) plane's context
2319 struct pipe_ctx
*top_pipe_ctx
= pipe_ctx
;
2321 while (top_pipe_ctx
->top_pipe
!= NULL
)
2322 top_pipe_ctx
= top_pipe_ctx
->top_pipe
;
2324 switch (top_pipe_ctx
->plane_res
.scl_data
.format
) {
2325 case PIXEL_FORMAT_ARGB2101010
:
2326 if (top_pipe_ctx
->stream
->out_transfer_func
->tf
== TRANSFER_FUNCTION_PQ
) {
2327 /* HDR10, ARGB2101010 - set border color to red */
2328 color
->color_r_cr
= color_value
;
2329 } else if (top_pipe_ctx
->stream
->out_transfer_func
->tf
== TRANSFER_FUNCTION_GAMMA22
) {
2330 /* FreeSync 2 ARGB2101010 - set border color to pink */
2331 color
->color_r_cr
= color_value
;
2332 color
->color_b_cb
= color_value
;
2335 case PIXEL_FORMAT_FP16
:
2336 if (top_pipe_ctx
->stream
->out_transfer_func
->tf
== TRANSFER_FUNCTION_PQ
) {
2337 /* HDR10, FP16 - set border color to blue */
2338 color
->color_b_cb
= color_value
;
2339 } else if (top_pipe_ctx
->stream
->out_transfer_func
->tf
== TRANSFER_FUNCTION_GAMMA22
) {
2340 /* FreeSync 2 HDR - set border color to green */
2341 color
->color_g_y
= color_value
;
2345 /* SDR - set border color to Gray */
2346 color
->color_r_cr
= color_value
/2;
2347 color
->color_b_cb
= color_value
/2;
2348 color
->color_g_y
= color_value
/2;
2353 static void dcn10_update_dpp(struct dpp
*dpp
, struct dc_plane_state
*plane_state
)
2355 struct dc_bias_and_scale bns_params
= {0};
2357 // program the input csc
2358 dpp
->funcs
->dpp_setup(dpp
,
2359 plane_state
->format
,
2360 EXPANSION_MODE_ZERO
,
2361 plane_state
->input_csc_color_matrix
,
2362 plane_state
->color_space
,
2365 //set scale and bias registers
2366 build_prescale_params(&bns_params
, plane_state
);
2367 if (dpp
->funcs
->dpp_program_bias_and_scale
)
2368 dpp
->funcs
->dpp_program_bias_and_scale(dpp
, &bns_params
);
2371 void dcn10_update_mpcc(struct dc
*dc
, struct pipe_ctx
*pipe_ctx
)
2373 struct dce_hwseq
*hws
= dc
->hwseq
;
2374 struct hubp
*hubp
= pipe_ctx
->plane_res
.hubp
;
2375 struct mpcc_blnd_cfg blnd_cfg
= {{0}};
2376 bool per_pixel_alpha
= pipe_ctx
->plane_state
->per_pixel_alpha
&& pipe_ctx
->bottom_pipe
;
2378 struct mpcc
*new_mpcc
;
2379 struct mpc
*mpc
= dc
->res_pool
->mpc
;
2380 struct mpc_tree
*mpc_tree_params
= &(pipe_ctx
->stream_res
.opp
->mpc_tree_params
);
2382 if (dc
->debug
.visual_confirm
== VISUAL_CONFIRM_HDR
) {
2383 hws
->funcs
.get_hdr_visual_confirm_color(
2384 pipe_ctx
, &blnd_cfg
.black_color
);
2385 } else if (dc
->debug
.visual_confirm
== VISUAL_CONFIRM_SURFACE
) {
2386 hws
->funcs
.get_surface_visual_confirm_color(
2387 pipe_ctx
, &blnd_cfg
.black_color
);
2389 color_space_to_black_color(
2390 dc
, pipe_ctx
->stream
->output_color_space
,
2391 &blnd_cfg
.black_color
);
2394 if (per_pixel_alpha
)
2395 blnd_cfg
.alpha_mode
= MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA
;
2397 blnd_cfg
.alpha_mode
= MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA
;
2399 blnd_cfg
.overlap_only
= false;
2400 blnd_cfg
.global_gain
= 0xff;
2402 if (pipe_ctx
->plane_state
->global_alpha
)
2403 blnd_cfg
.global_alpha
= pipe_ctx
->plane_state
->global_alpha_value
;
2405 blnd_cfg
.global_alpha
= 0xff;
2407 /* DCN1.0 has output CM before MPC which seems to screw with
2408 * pre-multiplied alpha.
2410 blnd_cfg
.pre_multiplied_alpha
= is_rgb_cspace(
2411 pipe_ctx
->stream
->output_color_space
)
2417 * Note: currently there is a bug in init_hw such that
2418 * on resume from hibernate, BIOS sets up MPCC0, and
2419 * we do mpcc_remove but the mpcc cannot go to idle
2420 * after remove. This cause us to pick mpcc1 here,
2421 * which causes a pstate hang for yet unknown reason.
2423 mpcc_id
= hubp
->inst
;
2425 /* If there is no full update, don't need to touch MPC tree*/
2426 if (!pipe_ctx
->plane_state
->update_flags
.bits
.full_update
) {
2427 mpc
->funcs
->update_blending(mpc
, &blnd_cfg
, mpcc_id
);
2431 /* check if this MPCC is already being used */
2432 new_mpcc
= mpc
->funcs
->get_mpcc_for_dpp(mpc_tree_params
, mpcc_id
);
2433 /* remove MPCC if being used */
2434 if (new_mpcc
!= NULL
)
2435 mpc
->funcs
->remove_mpcc(mpc
, mpc_tree_params
, new_mpcc
);
2437 if (dc
->debug
.sanity_checks
)
2438 mpc
->funcs
->assert_mpcc_idle_before_connect(
2439 dc
->res_pool
->mpc
, mpcc_id
);
2441 /* Call MPC to insert new plane */
2442 new_mpcc
= mpc
->funcs
->insert_plane(dc
->res_pool
->mpc
,
2450 ASSERT(new_mpcc
!= NULL
);
2452 hubp
->opp_id
= pipe_ctx
->stream_res
.opp
->inst
;
2453 hubp
->mpcc_id
= mpcc_id
;
2456 static void update_scaler(struct pipe_ctx
*pipe_ctx
)
2458 bool per_pixel_alpha
=
2459 pipe_ctx
->plane_state
->per_pixel_alpha
&& pipe_ctx
->bottom_pipe
;
2461 pipe_ctx
->plane_res
.scl_data
.lb_params
.alpha_en
= per_pixel_alpha
;
2462 pipe_ctx
->plane_res
.scl_data
.lb_params
.depth
= LB_PIXEL_DEPTH_30BPP
;
2463 /* scaler configuration */
2464 pipe_ctx
->plane_res
.dpp
->funcs
->dpp_set_scaler(
2465 pipe_ctx
->plane_res
.dpp
, &pipe_ctx
->plane_res
.scl_data
);
2468 static void dcn10_update_dchubp_dpp(
2470 struct pipe_ctx
*pipe_ctx
,
2471 struct dc_state
*context
)
2473 struct dce_hwseq
*hws
= dc
->hwseq
;
2474 struct hubp
*hubp
= pipe_ctx
->plane_res
.hubp
;
2475 struct dpp
*dpp
= pipe_ctx
->plane_res
.dpp
;
2476 struct dc_plane_state
*plane_state
= pipe_ctx
->plane_state
;
2477 struct plane_size size
= plane_state
->plane_size
;
2478 unsigned int compat_level
= 0;
2479 bool should_divided_by_2
= false;
2481 /* depends on DML calculation, DPP clock value may change dynamically */
2482 /* If request max dpp clk is lower than current dispclk, no need to
2485 if (plane_state
->update_flags
.bits
.full_update
) {
2487 /* new calculated dispclk, dppclk are stored in
2488 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2489 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2490 * dcn_validate_bandwidth compute new dispclk, dppclk.
2491 * dispclk will put in use after optimize_bandwidth when
2492 * ramp_up_dispclk_with_dpp is called.
2493 * there are two places for dppclk be put in use. One location
2494 * is the same as the location as dispclk. Another is within
2495 * update_dchubp_dpp which happens between pre_bandwidth and
2496 * optimize_bandwidth.
2497 * dppclk updated within update_dchubp_dpp will cause new
2498 * clock values of dispclk and dppclk not be in use at the same
2499 * time. when clocks are decreased, this may cause dppclk is
2500 * lower than previous configuration and let pipe stuck.
2501 * for example, eDP + external dp, change resolution of DP from
2502 * 1920x1080x144hz to 1280x960x60hz.
2503 * before change: dispclk = 337889 dppclk = 337889
2504 * change mode, dcn_validate_bandwidth calculate
2505 * dispclk = 143122 dppclk = 143122
2506 * update_dchubp_dpp be executed before dispclk be updated,
2507 * dispclk = 337889, but dppclk use new value dispclk /2 =
2508 * 168944. this will cause pipe pstate warning issue.
2509 * solution: between pre_bandwidth and optimize_bandwidth, while
2510 * dispclk is going to be decreased, keep dppclk = dispclk
2512 if (context
->bw_ctx
.bw
.dcn
.clk
.dispclk_khz
<
2513 dc
->clk_mgr
->clks
.dispclk_khz
)
2514 should_divided_by_2
= false;
2516 should_divided_by_2
=
2517 context
->bw_ctx
.bw
.dcn
.clk
.dppclk_khz
<=
2518 dc
->clk_mgr
->clks
.dispclk_khz
/ 2;
2520 dpp
->funcs
->dpp_dppclk_control(
2522 should_divided_by_2
,
2525 if (dc
->res_pool
->dccg
)
2526 dc
->res_pool
->dccg
->funcs
->update_dpp_dto(
2529 pipe_ctx
->plane_res
.bw
.dppclk_khz
);
2531 dc
->clk_mgr
->clks
.dppclk_khz
= should_divided_by_2
?
2532 dc
->clk_mgr
->clks
.dispclk_khz
/ 2 :
2533 dc
->clk_mgr
->clks
.dispclk_khz
;
2536 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2537 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2538 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2540 if (plane_state
->update_flags
.bits
.full_update
) {
2541 hubp
->funcs
->hubp_vtg_sel(hubp
, pipe_ctx
->stream_res
.tg
->inst
);
2543 hubp
->funcs
->hubp_setup(
2545 &pipe_ctx
->dlg_regs
,
2546 &pipe_ctx
->ttu_regs
,
2548 &pipe_ctx
->pipe_dlg_param
);
2549 hubp
->funcs
->hubp_setup_interdependent(
2551 &pipe_ctx
->dlg_regs
,
2552 &pipe_ctx
->ttu_regs
);
2555 size
.surface_size
= pipe_ctx
->plane_res
.scl_data
.viewport
;
2557 if (plane_state
->update_flags
.bits
.full_update
||
2558 plane_state
->update_flags
.bits
.bpp_change
)
2559 dcn10_update_dpp(dpp
, plane_state
);
2561 if (plane_state
->update_flags
.bits
.full_update
||
2562 plane_state
->update_flags
.bits
.per_pixel_alpha_change
||
2563 plane_state
->update_flags
.bits
.global_alpha_change
)
2564 hws
->funcs
.update_mpcc(dc
, pipe_ctx
);
2566 if (plane_state
->update_flags
.bits
.full_update
||
2567 plane_state
->update_flags
.bits
.per_pixel_alpha_change
||
2568 plane_state
->update_flags
.bits
.global_alpha_change
||
2569 plane_state
->update_flags
.bits
.scaling_change
||
2570 plane_state
->update_flags
.bits
.position_change
) {
2571 update_scaler(pipe_ctx
);
2574 if (plane_state
->update_flags
.bits
.full_update
||
2575 plane_state
->update_flags
.bits
.scaling_change
||
2576 plane_state
->update_flags
.bits
.position_change
) {
2577 hubp
->funcs
->mem_program_viewport(
2579 &pipe_ctx
->plane_res
.scl_data
.viewport
,
2580 &pipe_ctx
->plane_res
.scl_data
.viewport_c
);
2583 if (pipe_ctx
->stream
->cursor_attributes
.address
.quad_part
!= 0) {
2584 dc
->hwss
.set_cursor_position(pipe_ctx
);
2585 dc
->hwss
.set_cursor_attribute(pipe_ctx
);
2587 if (dc
->hwss
.set_cursor_sdr_white_level
)
2588 dc
->hwss
.set_cursor_sdr_white_level(pipe_ctx
);
2591 if (plane_state
->update_flags
.bits
.full_update
) {
2593 dc
->hwss
.program_gamut_remap(pipe_ctx
);
2595 dc
->hwss
.program_output_csc(dc
,
2597 pipe_ctx
->stream
->output_color_space
,
2598 pipe_ctx
->stream
->csc_color_matrix
.matrix
,
2599 pipe_ctx
->stream_res
.opp
->inst
);
2602 if (plane_state
->update_flags
.bits
.full_update
||
2603 plane_state
->update_flags
.bits
.pixel_format_change
||
2604 plane_state
->update_flags
.bits
.horizontal_mirror_change
||
2605 plane_state
->update_flags
.bits
.rotation_change
||
2606 plane_state
->update_flags
.bits
.swizzle_change
||
2607 plane_state
->update_flags
.bits
.dcc_change
||
2608 plane_state
->update_flags
.bits
.bpp_change
||
2609 plane_state
->update_flags
.bits
.scaling_change
||
2610 plane_state
->update_flags
.bits
.plane_size_change
) {
2611 hubp
->funcs
->hubp_program_surface_config(
2613 plane_state
->format
,
2614 &plane_state
->tiling_info
,
2616 plane_state
->rotation
,
2618 plane_state
->horizontal_mirror
,
2622 hubp
->power_gated
= false;
2624 hws
->funcs
.update_plane_addr(dc
, pipe_ctx
);
2626 if (is_pipe_tree_visible(pipe_ctx
))
2627 hubp
->funcs
->set_blank(hubp
, false);
2630 void dcn10_blank_pixel_data(
2632 struct pipe_ctx
*pipe_ctx
,
2635 enum dc_color_space color_space
;
2636 struct tg_color black_color
= {0};
2637 struct stream_resource
*stream_res
= &pipe_ctx
->stream_res
;
2638 struct dc_stream_state
*stream
= pipe_ctx
->stream
;
2640 /* program otg blank color */
2641 color_space
= stream
->output_color_space
;
2642 color_space_to_black_color(dc
, color_space
, &black_color
);
2645 * The way 420 is packed, 2 channels carry Y component, 1 channel
2646 * alternate between Cb and Cr, so both channels need the pixel
2649 if (stream
->timing
.pixel_encoding
== PIXEL_ENCODING_YCBCR420
)
2650 black_color
.color_r_cr
= black_color
.color_g_y
;
2653 if (stream_res
->tg
->funcs
->set_blank_color
)
2654 stream_res
->tg
->funcs
->set_blank_color(
2659 if (stream_res
->tg
->funcs
->set_blank
)
2660 stream_res
->tg
->funcs
->set_blank(stream_res
->tg
, blank
);
2661 if (stream_res
->abm
) {
2662 dc
->hwss
.set_pipe(pipe_ctx
);
2663 stream_res
->abm
->funcs
->set_abm_level(stream_res
->abm
, stream
->abm_level
);
2666 dc
->hwss
.set_abm_immediate_disable(pipe_ctx
);
2667 if (stream_res
->tg
->funcs
->set_blank
) {
2668 stream_res
->tg
->funcs
->wait_for_state(stream_res
->tg
, CRTC_STATE_VBLANK
);
2669 stream_res
->tg
->funcs
->set_blank(stream_res
->tg
, blank
);
2674 void dcn10_set_hdr_multiplier(struct pipe_ctx
*pipe_ctx
)
2676 struct fixed31_32 multiplier
= pipe_ctx
->plane_state
->hdr_mult
;
2677 uint32_t hw_mult
= 0x1f000; // 1.0 default multiplier
2678 struct custom_float_format fmt
;
2680 fmt
.exponenta_bits
= 6;
2681 fmt
.mantissa_bits
= 12;
2685 if (!dc_fixpt_eq(multiplier
, dc_fixpt_from_int(0))) // check != 0
2686 convert_to_custom_float_format(multiplier
, &fmt
, &hw_mult
);
2688 pipe_ctx
->plane_res
.dpp
->funcs
->dpp_set_hdr_multiplier(
2689 pipe_ctx
->plane_res
.dpp
, hw_mult
);
2692 void dcn10_program_pipe(
2694 struct pipe_ctx
*pipe_ctx
,
2695 struct dc_state
*context
)
2697 struct dce_hwseq
*hws
= dc
->hwseq
;
2699 if (pipe_ctx
->plane_state
->update_flags
.bits
.full_update
)
2700 dcn10_enable_plane(dc
, pipe_ctx
, context
);
2702 dcn10_update_dchubp_dpp(dc
, pipe_ctx
, context
);
2704 hws
->funcs
.set_hdr_multiplier(pipe_ctx
);
2706 if (pipe_ctx
->plane_state
->update_flags
.bits
.full_update
||
2707 pipe_ctx
->plane_state
->update_flags
.bits
.in_transfer_func_change
||
2708 pipe_ctx
->plane_state
->update_flags
.bits
.gamma_change
)
2709 hws
->funcs
.set_input_transfer_func(dc
, pipe_ctx
, pipe_ctx
->plane_state
);
2711 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2712 * only do gamma programming for full update.
2713 * TODO: This can be further optimized/cleaned up
2714 * Always call this for now since it does memcmp inside before
2715 * doing heavy calculation and programming
2717 if (pipe_ctx
->plane_state
->update_flags
.bits
.full_update
)
2718 hws
->funcs
.set_output_transfer_func(dc
, pipe_ctx
, pipe_ctx
->stream
);
2721 static void dcn10_program_all_pipe_in_tree(
2723 struct pipe_ctx
*pipe_ctx
,
2724 struct dc_state
*context
)
2726 struct dce_hwseq
*hws
= dc
->hwseq
;
2728 if (pipe_ctx
->top_pipe
== NULL
) {
2729 bool blank
= !is_pipe_tree_visible(pipe_ctx
);
2731 pipe_ctx
->stream_res
.tg
->funcs
->program_global_sync(
2732 pipe_ctx
->stream_res
.tg
,
2733 pipe_ctx
->pipe_dlg_param
.vready_offset
,
2734 pipe_ctx
->pipe_dlg_param
.vstartup_start
,
2735 pipe_ctx
->pipe_dlg_param
.vupdate_offset
,
2736 pipe_ctx
->pipe_dlg_param
.vupdate_width
);
2738 pipe_ctx
->stream_res
.tg
->funcs
->set_vtg_params(
2739 pipe_ctx
->stream_res
.tg
, &pipe_ctx
->stream
->timing
, true);
2741 if (hws
->funcs
.setup_vupdate_interrupt
)
2742 hws
->funcs
.setup_vupdate_interrupt(dc
, pipe_ctx
);
2744 hws
->funcs
.blank_pixel_data(dc
, pipe_ctx
, blank
);
2747 if (pipe_ctx
->plane_state
!= NULL
)
2748 hws
->funcs
.program_pipe(dc
, pipe_ctx
, context
);
2750 if (pipe_ctx
->bottom_pipe
!= NULL
&& pipe_ctx
->bottom_pipe
!= pipe_ctx
)
2751 dcn10_program_all_pipe_in_tree(dc
, pipe_ctx
->bottom_pipe
, context
);
2754 static struct pipe_ctx
*dcn10_find_top_pipe_for_stream(
2756 struct dc_state
*context
,
2757 const struct dc_stream_state
*stream
)
2761 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2762 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
2763 struct pipe_ctx
*old_pipe_ctx
=
2764 &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
2766 if (!pipe_ctx
->plane_state
&& !old_pipe_ctx
->plane_state
)
2769 if (pipe_ctx
->stream
!= stream
)
2772 if (!pipe_ctx
->top_pipe
&& !pipe_ctx
->prev_odm_pipe
)
2778 void dcn10_wait_for_pending_cleared(struct dc
*dc
,
2779 struct dc_state
*context
)
2781 struct pipe_ctx
*pipe_ctx
;
2782 struct timing_generator
*tg
;
2785 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2786 pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
2787 tg
= pipe_ctx
->stream_res
.tg
;
2790 * Only wait for top pipe's tg penindg bit
2791 * Also skip if pipe is disabled.
2793 if (pipe_ctx
->top_pipe
||
2794 !pipe_ctx
->stream
|| !pipe_ctx
->plane_state
||
2795 !tg
->funcs
->is_tg_enabled(tg
))
2799 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2800 * For some reason waiting for OTG_UPDATE_PENDING cleared
2801 * seems to not trigger the update right away, and if we
2802 * lock again before VUPDATE then we don't get a separated
2805 pipe_ctx
->stream_res
.tg
->funcs
->wait_for_state(pipe_ctx
->stream_res
.tg
, CRTC_STATE_VBLANK
);
2806 pipe_ctx
->stream_res
.tg
->funcs
->wait_for_state(pipe_ctx
->stream_res
.tg
, CRTC_STATE_VACTIVE
);
2810 void dcn10_apply_ctx_for_surface(
2812 const struct dc_stream_state
*stream
,
2814 struct dc_state
*context
)
2816 struct dce_hwseq
*hws
= dc
->hwseq
;
2818 struct timing_generator
*tg
;
2819 uint32_t underflow_check_delay_us
;
2820 bool interdependent_update
= false;
2821 struct pipe_ctx
*top_pipe_to_program
=
2822 dcn10_find_top_pipe_for_stream(dc
, context
, stream
);
2823 DC_LOGGER_INIT(dc
->ctx
->logger
);
2825 // Clear pipe_ctx flag
2826 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2827 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
2828 pipe_ctx
->update_flags
.raw
= 0;
2831 if (!top_pipe_to_program
)
2834 tg
= top_pipe_to_program
->stream_res
.tg
;
2836 interdependent_update
= top_pipe_to_program
->plane_state
&&
2837 top_pipe_to_program
->plane_state
->update_flags
.bits
.full_update
;
2839 underflow_check_delay_us
= dc
->debug
.underflow_assert_delay_us
;
2841 if (underflow_check_delay_us
!= 0xFFFFFFFF && hws
->funcs
.did_underflow_occur
)
2842 ASSERT(hws
->funcs
.did_underflow_occur(dc
, top_pipe_to_program
));
2844 if (underflow_check_delay_us
!= 0xFFFFFFFF)
2845 udelay(underflow_check_delay_us
);
2847 if (underflow_check_delay_us
!= 0xFFFFFFFF && hws
->funcs
.did_underflow_occur
)
2848 ASSERT(hws
->funcs
.did_underflow_occur(dc
, top_pipe_to_program
));
2850 if (num_planes
== 0) {
2851 /* OTG blank before remove all front end */
2852 hws
->funcs
.blank_pixel_data(dc
, top_pipe_to_program
, true);
2855 /* Disconnect unused mpcc */
2856 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2857 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
2858 struct pipe_ctx
*old_pipe_ctx
=
2859 &dc
->current_state
->res_ctx
.pipe_ctx
[i
];
2861 if ((!pipe_ctx
->plane_state
||
2862 pipe_ctx
->stream_res
.tg
!= old_pipe_ctx
->stream_res
.tg
) &&
2863 old_pipe_ctx
->plane_state
&&
2864 old_pipe_ctx
->stream_res
.tg
== tg
) {
2866 hws
->funcs
.plane_atomic_disconnect(dc
, old_pipe_ctx
);
2867 pipe_ctx
->update_flags
.bits
.disable
= 1;
2869 DC_LOG_DC("Reset mpcc for pipe %d\n",
2870 old_pipe_ctx
->pipe_idx
);
2875 dcn10_program_all_pipe_in_tree(dc
, top_pipe_to_program
, context
);
2877 /* Program secondary blending tree and writeback pipes */
2878 if ((stream
->num_wb_info
> 0) && (hws
->funcs
.program_all_writeback_pipes_in_tree
))
2879 hws
->funcs
.program_all_writeback_pipes_in_tree(dc
, stream
, context
);
2880 if (interdependent_update
)
2881 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2882 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
2883 /* Skip inactive pipes and ones already updated */
2884 if (!pipe_ctx
->stream
|| pipe_ctx
->stream
== stream
||
2885 !pipe_ctx
->plane_state
|| !tg
->funcs
->is_tg_enabled(tg
))
2888 pipe_ctx
->plane_res
.hubp
->funcs
->hubp_setup_interdependent(
2889 pipe_ctx
->plane_res
.hubp
,
2890 &pipe_ctx
->dlg_regs
,
2891 &pipe_ctx
->ttu_regs
);
2895 void dcn10_post_unlock_program_front_end(
2897 struct dc_state
*context
)
2901 DC_LOGGER_INIT(dc
->ctx
->logger
);
2903 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2904 struct pipe_ctx
*pipe_ctx
= &context
->res_ctx
.pipe_ctx
[i
];
2906 if (!pipe_ctx
->top_pipe
&&
2907 !pipe_ctx
->prev_odm_pipe
&&
2909 struct timing_generator
*tg
= pipe_ctx
->stream_res
.tg
;
2911 if (context
->stream_status
[i
].plane_count
== 0)
2912 false_optc_underflow_wa(dc
, pipe_ctx
->stream
, tg
);
2916 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++)
2917 if (context
->res_ctx
.pipe_ctx
[i
].update_flags
.bits
.disable
)
2918 dc
->hwss
.disable_plane(dc
, &dc
->current_state
->res_ctx
.pipe_ctx
[i
]);
2920 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++)
2921 if (context
->res_ctx
.pipe_ctx
[i
].update_flags
.bits
.disable
) {
2922 dc
->hwss
.optimize_bandwidth(dc
, context
);
2926 if (dc
->hwseq
->wa
.DEGVIDCN10_254
)
2927 hubbub1_wm_change_req_wa(dc
->res_pool
->hubbub
);
2930 static void dcn10_stereo_hw_frame_pack_wa(struct dc
*dc
, struct dc_state
*context
)
2934 for (i
= 0; i
< context
->stream_count
; i
++) {
2935 if (context
->streams
[i
]->timing
.timing_3d_format
2936 == TIMING_3D_FORMAT_HW_FRAME_PACKING
) {
2940 hubbub1_allow_self_refresh_control(dc
->res_pool
->hubbub
, false);
2946 void dcn10_prepare_bandwidth(
2948 struct dc_state
*context
)
2950 struct dce_hwseq
*hws
= dc
->hwseq
;
2951 struct hubbub
*hubbub
= dc
->res_pool
->hubbub
;
2953 if (dc
->debug
.sanity_checks
)
2954 hws
->funcs
.verify_allow_pstate_change_high(dc
);
2956 if (!IS_FPGA_MAXIMUS_DC(dc
->ctx
->dce_environment
)) {
2957 if (context
->stream_count
== 0)
2958 context
->bw_ctx
.bw
.dcn
.clk
.phyclk_khz
= 0;
2960 dc
->clk_mgr
->funcs
->update_clocks(
2966 dc
->wm_optimized_required
= hubbub
->funcs
->program_watermarks(hubbub
,
2967 &context
->bw_ctx
.bw
.dcn
.watermarks
,
2968 dc
->res_pool
->ref_clocks
.dchub_ref_clock_inKhz
/ 1000,
2970 dcn10_stereo_hw_frame_pack_wa(dc
, context
);
2972 if (dc
->debug
.pplib_wm_report_mode
== WM_REPORT_OVERRIDE
)
2973 dcn_bw_notify_pplib_of_wm_ranges(dc
);
2975 if (dc
->debug
.sanity_checks
)
2976 hws
->funcs
.verify_allow_pstate_change_high(dc
);
2979 void dcn10_optimize_bandwidth(
2981 struct dc_state
*context
)
2983 struct dce_hwseq
*hws
= dc
->hwseq
;
2984 struct hubbub
*hubbub
= dc
->res_pool
->hubbub
;
2986 if (dc
->debug
.sanity_checks
)
2987 hws
->funcs
.verify_allow_pstate_change_high(dc
);
2989 if (!IS_FPGA_MAXIMUS_DC(dc
->ctx
->dce_environment
)) {
2990 if (context
->stream_count
== 0)
2991 context
->bw_ctx
.bw
.dcn
.clk
.phyclk_khz
= 0;
2993 dc
->clk_mgr
->funcs
->update_clocks(
2999 hubbub
->funcs
->program_watermarks(hubbub
,
3000 &context
->bw_ctx
.bw
.dcn
.watermarks
,
3001 dc
->res_pool
->ref_clocks
.dchub_ref_clock_inKhz
/ 1000,
3004 dcn10_stereo_hw_frame_pack_wa(dc
, context
);
3006 if (dc
->debug
.pplib_wm_report_mode
== WM_REPORT_OVERRIDE
)
3007 dcn_bw_notify_pplib_of_wm_ranges(dc
);
3009 if (dc
->debug
.sanity_checks
)
3010 hws
->funcs
.verify_allow_pstate_change_high(dc
);
3013 void dcn10_set_drr(struct pipe_ctx
**pipe_ctx
,
3014 int num_pipes
, unsigned int vmin
, unsigned int vmax
,
3015 unsigned int vmid
, unsigned int vmid_frame_number
)
3018 struct drr_params params
= {0};
3019 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3020 unsigned int event_triggers
= 0x800;
3021 // Note DRR trigger events are generated regardless of whether num frames met.
3022 unsigned int num_frames
= 2;
3024 params
.vertical_total_max
= vmax
;
3025 params
.vertical_total_min
= vmin
;
3026 params
.vertical_total_mid
= vmid
;
3027 params
.vertical_total_mid_frame_num
= vmid_frame_number
;
3029 /* TODO: If multiple pipes are to be supported, you need
3030 * some GSL stuff. Static screen triggers may be programmed differently
3033 for (i
= 0; i
< num_pipes
; i
++) {
3034 pipe_ctx
[i
]->stream_res
.tg
->funcs
->set_drr(
3035 pipe_ctx
[i
]->stream_res
.tg
, ¶ms
);
3036 if (vmax
!= 0 && vmin
!= 0)
3037 pipe_ctx
[i
]->stream_res
.tg
->funcs
->set_static_screen_control(
3038 pipe_ctx
[i
]->stream_res
.tg
,
3039 event_triggers
, num_frames
);
3043 void dcn10_get_position(struct pipe_ctx
**pipe_ctx
,
3045 struct crtc_position
*position
)
3049 /* TODO: handle pipes > 1
3051 for (i
= 0; i
< num_pipes
; i
++)
3052 pipe_ctx
[i
]->stream_res
.tg
->funcs
->get_position(pipe_ctx
[i
]->stream_res
.tg
, position
);
3055 void dcn10_set_static_screen_control(struct pipe_ctx
**pipe_ctx
,
3056 int num_pipes
, const struct dc_static_screen_params
*params
)
3059 unsigned int triggers
= 0;
3061 if (params
->triggers
.surface_update
)
3063 if (params
->triggers
.cursor_update
)
3065 if (params
->triggers
.force_trigger
)
3068 for (i
= 0; i
< num_pipes
; i
++)
3069 pipe_ctx
[i
]->stream_res
.tg
->funcs
->
3070 set_static_screen_control(pipe_ctx
[i
]->stream_res
.tg
,
3071 triggers
, params
->num_frames
);
3074 static void dcn10_config_stereo_parameters(
3075 struct dc_stream_state
*stream
, struct crtc_stereo_flags
*flags
)
3077 enum view_3d_format view_format
= stream
->view_format
;
3078 enum dc_timing_3d_format timing_3d_format
=\
3079 stream
->timing
.timing_3d_format
;
3080 bool non_stereo_timing
= false;
3082 if (timing_3d_format
== TIMING_3D_FORMAT_NONE
||
3083 timing_3d_format
== TIMING_3D_FORMAT_SIDE_BY_SIDE
||
3084 timing_3d_format
== TIMING_3D_FORMAT_TOP_AND_BOTTOM
)
3085 non_stereo_timing
= true;
3087 if (non_stereo_timing
== false &&
3088 view_format
== VIEW_3D_FORMAT_FRAME_SEQUENTIAL
) {
3090 flags
->PROGRAM_STEREO
= 1;
3091 flags
->PROGRAM_POLARITY
= 1;
3092 if (timing_3d_format
== TIMING_3D_FORMAT_INBAND_FA
||
3093 timing_3d_format
== TIMING_3D_FORMAT_DP_HDMI_INBAND_FA
||
3094 timing_3d_format
== TIMING_3D_FORMAT_SIDEBAND_FA
) {
3095 enum display_dongle_type dongle
= \
3096 stream
->link
->ddc
->dongle_type
;
3097 if (dongle
== DISPLAY_DONGLE_DP_VGA_CONVERTER
||
3098 dongle
== DISPLAY_DONGLE_DP_DVI_CONVERTER
||
3099 dongle
== DISPLAY_DONGLE_DP_HDMI_CONVERTER
)
3100 flags
->DISABLE_STEREO_DP_SYNC
= 1;
3102 flags
->RIGHT_EYE_POLARITY
=\
3103 stream
->timing
.flags
.RIGHT_EYE_3D_POLARITY
;
3104 if (timing_3d_format
== TIMING_3D_FORMAT_HW_FRAME_PACKING
)
3105 flags
->FRAME_PACKED
= 1;
3111 void dcn10_setup_stereo(struct pipe_ctx
*pipe_ctx
, struct dc
*dc
)
3113 struct crtc_stereo_flags flags
= { 0 };
3114 struct dc_stream_state
*stream
= pipe_ctx
->stream
;
3116 dcn10_config_stereo_parameters(stream
, &flags
);
3118 if (stream
->timing
.timing_3d_format
== TIMING_3D_FORMAT_SIDEBAND_FA
) {
3119 if (!dc_set_generic_gpio_for_stereo(true, dc
->ctx
->gpio_service
))
3120 dc_set_generic_gpio_for_stereo(false, dc
->ctx
->gpio_service
);
3122 dc_set_generic_gpio_for_stereo(false, dc
->ctx
->gpio_service
);
3125 pipe_ctx
->stream_res
.opp
->funcs
->opp_program_stereo(
3126 pipe_ctx
->stream_res
.opp
,
3127 flags
.PROGRAM_STEREO
== 1 ? true:false,
3130 pipe_ctx
->stream_res
.tg
->funcs
->program_stereo(
3131 pipe_ctx
->stream_res
.tg
,
3138 static struct hubp
*get_hubp_by_inst(struct resource_pool
*res_pool
, int mpcc_inst
)
3142 for (i
= 0; i
< res_pool
->pipe_count
; i
++) {
3143 if (res_pool
->hubps
[i
]->inst
== mpcc_inst
)
3144 return res_pool
->hubps
[i
];
3150 void dcn10_wait_for_mpcc_disconnect(
3152 struct resource_pool
*res_pool
,
3153 struct pipe_ctx
*pipe_ctx
)
3155 struct dce_hwseq
*hws
= dc
->hwseq
;
3158 if (dc
->debug
.sanity_checks
) {
3159 hws
->funcs
.verify_allow_pstate_change_high(dc
);
3162 if (!pipe_ctx
->stream_res
.opp
)
3165 for (mpcc_inst
= 0; mpcc_inst
< MAX_PIPES
; mpcc_inst
++) {
3166 if (pipe_ctx
->stream_res
.opp
->mpcc_disconnect_pending
[mpcc_inst
]) {
3167 struct hubp
*hubp
= get_hubp_by_inst(res_pool
, mpcc_inst
);
3169 res_pool
->mpc
->funcs
->wait_for_idle(res_pool
->mpc
, mpcc_inst
);
3170 pipe_ctx
->stream_res
.opp
->mpcc_disconnect_pending
[mpcc_inst
] = false;
3171 hubp
->funcs
->set_blank(hubp
, true);
3175 if (dc
->debug
.sanity_checks
) {
3176 hws
->funcs
.verify_allow_pstate_change_high(dc
);
3181 bool dcn10_dummy_display_power_gating(
3183 uint8_t controller_id
,
3184 struct dc_bios
*dcb
,
3185 enum pipe_gating_control power_gating
)
3190 void dcn10_update_pending_status(struct pipe_ctx
*pipe_ctx
)
3192 struct dc_plane_state
*plane_state
= pipe_ctx
->plane_state
;
3193 struct timing_generator
*tg
= pipe_ctx
->stream_res
.tg
;
3195 struct dc
*dc
= plane_state
->ctx
->dc
;
3197 if (plane_state
== NULL
)
3200 flip_pending
= pipe_ctx
->plane_res
.hubp
->funcs
->hubp_is_flip_pending(
3201 pipe_ctx
->plane_res
.hubp
);
3203 plane_state
->status
.is_flip_pending
= plane_state
->status
.is_flip_pending
|| flip_pending
;
3206 plane_state
->status
.current_address
= plane_state
->status
.requested_address
;
3208 if (plane_state
->status
.current_address
.type
== PLN_ADDR_TYPE_GRPH_STEREO
&&
3209 tg
->funcs
->is_stereo_left_eye
) {
3210 plane_state
->status
.is_right_eye
=
3211 !tg
->funcs
->is_stereo_left_eye(pipe_ctx
->stream_res
.tg
);
3214 if (dc
->hwseq
->wa_state
.disallow_self_refresh_during_multi_plane_transition_applied
) {
3215 struct dce_hwseq
*hwseq
= dc
->hwseq
;
3216 struct timing_generator
*tg
= dc
->res_pool
->timing_generators
[0];
3217 unsigned int cur_frame
= tg
->funcs
->get_frame_count(tg
);
3219 if (cur_frame
!= hwseq
->wa_state
.disallow_self_refresh_during_multi_plane_transition_applied_on_frame
) {
3220 struct hubbub
*hubbub
= dc
->res_pool
->hubbub
;
3222 hubbub
->funcs
->allow_self_refresh_control(hubbub
, !dc
->debug
.disable_stutter
);
3223 hwseq
->wa_state
.disallow_self_refresh_during_multi_plane_transition_applied
= false;
3228 void dcn10_update_dchub(struct dce_hwseq
*hws
, struct dchub_init_data
*dh_data
)
3230 struct hubbub
*hubbub
= hws
->ctx
->dc
->res_pool
->hubbub
;
3232 /* In DCN, this programming sequence is owned by the hubbub */
3233 hubbub
->funcs
->update_dchub(hubbub
, dh_data
);
3236 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx
*pipe_ctx
)
3238 struct pipe_ctx
*test_pipe
;
3239 const struct rect
*r1
= &pipe_ctx
->plane_res
.scl_data
.recout
, *r2
;
3240 int r1_r
= r1
->x
+ r1
->width
, r1_b
= r1
->y
+ r1
->height
, r2_r
, r2_b
;
3243 * Disable the cursor if there's another pipe above this with a
3244 * plane that contains this pipe's viewport to prevent double cursor
3245 * and incorrect scaling artifacts.
3247 for (test_pipe
= pipe_ctx
->top_pipe
; test_pipe
;
3248 test_pipe
= test_pipe
->top_pipe
) {
3249 if (!test_pipe
->plane_state
->visible
)
3252 r2
= &test_pipe
->plane_res
.scl_data
.recout
;
3253 r2_r
= r2
->x
+ r2
->width
;
3254 r2_b
= r2
->y
+ r2
->height
;
3256 if (r1
->x
>= r2
->x
&& r1
->y
>= r2
->y
&& r1_r
<= r2_r
&& r1_b
<= r2_b
)
3263 void dcn10_set_cursor_position(struct pipe_ctx
*pipe_ctx
)
3265 struct dc_cursor_position pos_cpy
= pipe_ctx
->stream
->cursor_position
;
3266 struct hubp
*hubp
= pipe_ctx
->plane_res
.hubp
;
3267 struct dpp
*dpp
= pipe_ctx
->plane_res
.dpp
;
3268 struct dc_cursor_mi_param param
= {
3269 .pixel_clk_khz
= pipe_ctx
->stream
->timing
.pix_clk_100hz
/ 10,
3270 .ref_clk_khz
= pipe_ctx
->stream
->ctx
->dc
->res_pool
->ref_clocks
.dchub_ref_clock_inKhz
,
3271 .viewport
= pipe_ctx
->plane_res
.scl_data
.viewport
,
3272 .h_scale_ratio
= pipe_ctx
->plane_res
.scl_data
.ratios
.horz
,
3273 .v_scale_ratio
= pipe_ctx
->plane_res
.scl_data
.ratios
.vert
,
3274 .rotation
= pipe_ctx
->plane_state
->rotation
,
3275 .mirror
= pipe_ctx
->plane_state
->horizontal_mirror
3277 bool pipe_split_on
= (pipe_ctx
->top_pipe
!= NULL
) ||
3278 (pipe_ctx
->bottom_pipe
!= NULL
);
3279 bool odm_combine_on
= (pipe_ctx
->next_odm_pipe
!= NULL
) ||
3280 (pipe_ctx
->prev_odm_pipe
!= NULL
);
3282 int x_plane
= pipe_ctx
->plane_state
->dst_rect
.x
;
3283 int y_plane
= pipe_ctx
->plane_state
->dst_rect
.y
;
3284 int x_pos
= pos_cpy
.x
;
3285 int y_pos
= pos_cpy
.y
;
3288 * DC cursor is stream space, HW cursor is plane space and drawn
3289 * as part of the framebuffer.
3291 * Cursor position can't be negative, but hotspot can be used to
3292 * shift cursor out of the plane bounds. Hotspot must be smaller
3293 * than the cursor size.
3297 * Translate cursor from stream space to plane space.
3299 * If the cursor is scaled then we need to scale the position
3300 * to be in the approximately correct place. We can't do anything
3301 * about the actual size being incorrect, that's a limitation of
3304 x_pos
= (x_pos
- x_plane
) * pipe_ctx
->plane_state
->src_rect
.width
/
3305 pipe_ctx
->plane_state
->dst_rect
.width
;
3306 y_pos
= (y_pos
- y_plane
) * pipe_ctx
->plane_state
->src_rect
.height
/
3307 pipe_ctx
->plane_state
->dst_rect
.height
;
3310 * If the cursor's source viewport is clipped then we need to
3311 * translate the cursor to appear in the correct position on
3314 * This translation isn't affected by scaling so it needs to be
3315 * done *after* we adjust the position for the scale factor.
3317 * This is only done by opt-in for now since there are still
3318 * some usecases like tiled display that might enable the
3319 * cursor on both streams while expecting dc to clip it.
3321 if (pos_cpy
.translate_by_source
) {
3322 x_pos
+= pipe_ctx
->plane_state
->src_rect
.x
;
3323 y_pos
+= pipe_ctx
->plane_state
->src_rect
.y
;
3327 * If the position is negative then we need to add to the hotspot
3328 * to shift the cursor outside the plane.
3332 pos_cpy
.x_hotspot
-= x_pos
;
3337 pos_cpy
.y_hotspot
-= y_pos
;
3341 pos_cpy
.x
= (uint32_t)x_pos
;
3342 pos_cpy
.y
= (uint32_t)y_pos
;
3344 if (pipe_ctx
->plane_state
->address
.type
3345 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
)
3346 pos_cpy
.enable
= false;
3348 if (pos_cpy
.enable
&& dcn10_can_pipe_disable_cursor(pipe_ctx
))
3349 pos_cpy
.enable
= false;
3351 // Swap axis and mirror horizontally
3352 if (param
.rotation
== ROTATION_ANGLE_90
) {
3353 uint32_t temp_x
= pos_cpy
.x
;
3355 pos_cpy
.x
= pipe_ctx
->plane_res
.scl_data
.viewport
.width
-
3356 (pos_cpy
.y
- pipe_ctx
->plane_res
.scl_data
.viewport
.x
) + pipe_ctx
->plane_res
.scl_data
.viewport
.x
;
3359 // Swap axis and mirror vertically
3360 else if (param
.rotation
== ROTATION_ANGLE_270
) {
3361 uint32_t temp_y
= pos_cpy
.y
;
3362 int viewport_height
=
3363 pipe_ctx
->plane_res
.scl_data
.viewport
.height
;
3365 pipe_ctx
->plane_res
.scl_data
.viewport
.y
;
3368 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3369 * For pipe split cases:
3370 * - apply offset of viewport.y to normalize pos_cpy.x
3371 * - calculate the pos_cpy.y as before
3372 * - shift pos_cpy.y back by same offset to get final value
3373 * - since we iterate through both pipes, use the lower
3374 * viewport.y for offset
3375 * For non pipe split cases, use the same calculation for
3376 * pos_cpy.y as the 180 degree rotation case below,
3377 * but use pos_cpy.x as our input because we are rotating
3380 if (pipe_split_on
|| odm_combine_on
) {
3381 int pos_cpy_x_offset
;
3382 int other_pipe_viewport_y
;
3384 if (pipe_split_on
) {
3385 if (pipe_ctx
->bottom_pipe
) {
3386 other_pipe_viewport_y
=
3387 pipe_ctx
->bottom_pipe
->plane_res
.scl_data
.viewport
.y
;
3389 other_pipe_viewport_y
=
3390 pipe_ctx
->top_pipe
->plane_res
.scl_data
.viewport
.y
;
3393 if (pipe_ctx
->next_odm_pipe
) {
3394 other_pipe_viewport_y
=
3395 pipe_ctx
->next_odm_pipe
->plane_res
.scl_data
.viewport
.y
;
3397 other_pipe_viewport_y
=
3398 pipe_ctx
->prev_odm_pipe
->plane_res
.scl_data
.viewport
.y
;
3401 pos_cpy_x_offset
= (viewport_y
> other_pipe_viewport_y
) ?
3402 other_pipe_viewport_y
: viewport_y
;
3403 pos_cpy
.x
-= pos_cpy_x_offset
;
3404 if (pos_cpy
.x
> viewport_height
) {
3405 pos_cpy
.x
= pos_cpy
.x
- viewport_height
;
3406 pos_cpy
.y
= viewport_height
- pos_cpy
.x
;
3408 pos_cpy
.y
= 2 * viewport_height
- pos_cpy
.x
;
3410 pos_cpy
.y
+= pos_cpy_x_offset
;
3412 pos_cpy
.y
= (2 * viewport_y
) + viewport_height
- pos_cpy
.x
;
3416 // Mirror horizontally and vertically
3417 else if (param
.rotation
== ROTATION_ANGLE_180
) {
3418 int viewport_width
=
3419 pipe_ctx
->plane_res
.scl_data
.viewport
.width
;
3421 pipe_ctx
->plane_res
.scl_data
.viewport
.x
;
3423 if (pipe_split_on
|| odm_combine_on
) {
3424 if (pos_cpy
.x
>= viewport_width
+ viewport_x
) {
3425 pos_cpy
.x
= 2 * viewport_width
3426 - pos_cpy
.x
+ 2 * viewport_x
;
3428 uint32_t temp_x
= pos_cpy
.x
;
3430 pos_cpy
.x
= 2 * viewport_x
- pos_cpy
.x
;
3431 if (temp_x
>= viewport_x
+
3432 (int)hubp
->curs_attr
.width
|| pos_cpy
.x
3433 <= (int)hubp
->curs_attr
.width
+
3434 pipe_ctx
->plane_state
->src_rect
.x
) {
3435 pos_cpy
.x
= temp_x
+ viewport_width
;
3439 pos_cpy
.x
= viewport_width
- pos_cpy
.x
+ 2 * viewport_x
;
3443 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3445 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3446 * pos_cpy.y_new = viewport.y + delta_from_bottom
3448 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3450 pos_cpy
.y
= (2 * pipe_ctx
->plane_res
.scl_data
.viewport
.y
) +
3451 pipe_ctx
->plane_res
.scl_data
.viewport
.height
- pos_cpy
.y
;
3454 hubp
->funcs
->set_cursor_position(hubp
, &pos_cpy
, ¶m
);
3455 dpp
->funcs
->set_cursor_position(dpp
, &pos_cpy
, ¶m
, hubp
->curs_attr
.width
, hubp
->curs_attr
.height
);
3458 void dcn10_set_cursor_attribute(struct pipe_ctx
*pipe_ctx
)
3460 struct dc_cursor_attributes
*attributes
= &pipe_ctx
->stream
->cursor_attributes
;
3462 pipe_ctx
->plane_res
.hubp
->funcs
->set_cursor_attributes(
3463 pipe_ctx
->plane_res
.hubp
, attributes
);
3464 pipe_ctx
->plane_res
.dpp
->funcs
->set_cursor_attributes(
3465 pipe_ctx
->plane_res
.dpp
, attributes
);
3468 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx
*pipe_ctx
)
3470 uint32_t sdr_white_level
= pipe_ctx
->stream
->cursor_attributes
.sdr_white_level
;
3471 struct fixed31_32 multiplier
;
3472 struct dpp_cursor_attributes opt_attr
= { 0 };
3473 uint32_t hw_scale
= 0x3c00; // 1.0 default multiplier
3474 struct custom_float_format fmt
;
3476 if (!pipe_ctx
->plane_res
.dpp
->funcs
->set_optional_cursor_attributes
)
3479 fmt
.exponenta_bits
= 5;
3480 fmt
.mantissa_bits
= 10;
3483 if (sdr_white_level
> 80) {
3484 multiplier
= dc_fixpt_from_fraction(sdr_white_level
, 80);
3485 convert_to_custom_float_format(multiplier
, &fmt
, &hw_scale
);
3488 opt_attr
.scale
= hw_scale
;
3491 pipe_ctx
->plane_res
.dpp
->funcs
->set_optional_cursor_attributes(
3492 pipe_ctx
->plane_res
.dpp
, &opt_attr
);
3496 * apply_front_porch_workaround TODO FPGA still need?
3498 * This is a workaround for a bug that has existed since R5xx and has not been
3499 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3501 static void apply_front_porch_workaround(
3502 struct dc_crtc_timing
*timing
)
3504 if (timing
->flags
.INTERLACE
== 1) {
3505 if (timing
->v_front_porch
< 2)
3506 timing
->v_front_porch
= 2;
3508 if (timing
->v_front_porch
< 1)
3509 timing
->v_front_porch
= 1;
3513 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx
*pipe_ctx
)
3515 const struct dc_crtc_timing
*dc_crtc_timing
= &pipe_ctx
->stream
->timing
;
3516 struct dc_crtc_timing patched_crtc_timing
;
3517 int vesa_sync_start
;
3519 int interlace_factor
;
3520 int vertical_line_start
;
3522 patched_crtc_timing
= *dc_crtc_timing
;
3523 apply_front_porch_workaround(&patched_crtc_timing
);
3525 interlace_factor
= patched_crtc_timing
.flags
.INTERLACE
? 2 : 1;
3527 vesa_sync_start
= patched_crtc_timing
.v_addressable
+
3528 patched_crtc_timing
.v_border_bottom
+
3529 patched_crtc_timing
.v_front_porch
;
3531 asic_blank_end
= (patched_crtc_timing
.v_total
-
3533 patched_crtc_timing
.v_border_top
)
3536 vertical_line_start
= asic_blank_end
-
3537 pipe_ctx
->pipe_dlg_param
.vstartup_start
+ 1;
3539 return vertical_line_start
;
3542 void dcn10_calc_vupdate_position(
3544 struct pipe_ctx
*pipe_ctx
,
3545 uint32_t *start_line
,
3548 const struct dc_crtc_timing
*dc_crtc_timing
= &pipe_ctx
->stream
->timing
;
3549 int vline_int_offset_from_vupdate
=
3550 pipe_ctx
->stream
->periodic_interrupt0
.lines_offset
;
3551 int vupdate_offset_from_vsync
= dc
->hwss
.get_vupdate_offset_from_vsync(pipe_ctx
);
3554 if (vline_int_offset_from_vupdate
> 0)
3555 vline_int_offset_from_vupdate
--;
3556 else if (vline_int_offset_from_vupdate
< 0)
3557 vline_int_offset_from_vupdate
++;
3559 start_position
= vline_int_offset_from_vupdate
+ vupdate_offset_from_vsync
;
3561 if (start_position
>= 0)
3562 *start_line
= start_position
;
3564 *start_line
= dc_crtc_timing
->v_total
+ start_position
- 1;
3566 *end_line
= *start_line
+ 2;
3568 if (*end_line
>= dc_crtc_timing
->v_total
)
3572 static void dcn10_cal_vline_position(
3574 struct pipe_ctx
*pipe_ctx
,
3575 enum vline_select vline
,
3576 uint32_t *start_line
,
3579 enum vertical_interrupt_ref_point ref_point
= INVALID_POINT
;
3581 if (vline
== VLINE0
)
3582 ref_point
= pipe_ctx
->stream
->periodic_interrupt0
.ref_point
;
3583 else if (vline
== VLINE1
)
3584 ref_point
= pipe_ctx
->stream
->periodic_interrupt1
.ref_point
;
3586 switch (ref_point
) {
3587 case START_V_UPDATE
:
3588 dcn10_calc_vupdate_position(
3595 // Suppose to do nothing because vsync is 0;
3603 void dcn10_setup_periodic_interrupt(
3605 struct pipe_ctx
*pipe_ctx
,
3606 enum vline_select vline
)
3608 struct timing_generator
*tg
= pipe_ctx
->stream_res
.tg
;
3610 if (vline
== VLINE0
) {
3611 uint32_t start_line
= 0;
3612 uint32_t end_line
= 0;
3614 dcn10_cal_vline_position(dc
, pipe_ctx
, vline
, &start_line
, &end_line
);
3616 tg
->funcs
->setup_vertical_interrupt0(tg
, start_line
, end_line
);
3618 } else if (vline
== VLINE1
) {
3619 pipe_ctx
->stream_res
.tg
->funcs
->setup_vertical_interrupt1(
3621 pipe_ctx
->stream
->periodic_interrupt1
.lines_offset
);
3625 void dcn10_setup_vupdate_interrupt(struct dc
*dc
, struct pipe_ctx
*pipe_ctx
)
3627 struct timing_generator
*tg
= pipe_ctx
->stream_res
.tg
;
3628 int start_line
= dc
->hwss
.get_vupdate_offset_from_vsync(pipe_ctx
);
3630 if (start_line
< 0) {
3635 if (tg
->funcs
->setup_vertical_interrupt2
)
3636 tg
->funcs
->setup_vertical_interrupt2(tg
, start_line
);
3639 void dcn10_unblank_stream(struct pipe_ctx
*pipe_ctx
,
3640 struct dc_link_settings
*link_settings
)
3642 struct encoder_unblank_param params
= { { 0 } };
3643 struct dc_stream_state
*stream
= pipe_ctx
->stream
;
3644 struct dc_link
*link
= stream
->link
;
3645 struct dce_hwseq
*hws
= link
->dc
->hwseq
;
3647 /* only 3 items below are used by unblank */
3648 params
.timing
= pipe_ctx
->stream
->timing
;
3650 params
.link_settings
.link_rate
= link_settings
->link_rate
;
3652 if (dc_is_dp_signal(pipe_ctx
->stream
->signal
)) {
3653 if (params
.timing
.pixel_encoding
== PIXEL_ENCODING_YCBCR420
)
3654 params
.timing
.pix_clk_100hz
/= 2;
3655 pipe_ctx
->stream_res
.stream_enc
->funcs
->dp_unblank(pipe_ctx
->stream_res
.stream_enc
, ¶ms
);
3658 if (link
->local_sink
&& link
->local_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
3659 hws
->funcs
.edp_backlight_control(link
, true);
3663 void dcn10_send_immediate_sdp_message(struct pipe_ctx
*pipe_ctx
,
3664 const uint8_t *custom_sdp_message
,
3665 unsigned int sdp_message_size
)
3667 if (dc_is_dp_signal(pipe_ctx
->stream
->signal
)) {
3668 pipe_ctx
->stream_res
.stream_enc
->funcs
->send_immediate_sdp_message(
3669 pipe_ctx
->stream_res
.stream_enc
,
3674 enum dc_status
dcn10_set_clock(struct dc
*dc
,
3675 enum dc_clock_type clock_type
,
3679 struct dc_state
*context
= dc
->current_state
;
3680 struct dc_clock_config clock_cfg
= {0};
3681 struct dc_clocks
*current_clocks
= &context
->bw_ctx
.bw
.dcn
.clk
;
3683 if (dc
->clk_mgr
&& dc
->clk_mgr
->funcs
->get_clock
)
3684 dc
->clk_mgr
->funcs
->get_clock(dc
->clk_mgr
,
3685 context
, clock_type
, &clock_cfg
);
3687 if (!dc
->clk_mgr
->funcs
->get_clock
)
3688 return DC_FAIL_UNSUPPORTED_1
;
3690 if (clk_khz
> clock_cfg
.max_clock_khz
)
3691 return DC_FAIL_CLK_EXCEED_MAX
;
3693 if (clk_khz
< clock_cfg
.min_clock_khz
)
3694 return DC_FAIL_CLK_BELOW_MIN
;
3696 if (clk_khz
< clock_cfg
.bw_requirequired_clock_khz
)
3697 return DC_FAIL_CLK_BELOW_CFG_REQUIRED
;
3699 /*update internal request clock for update clock use*/
3700 if (clock_type
== DC_CLOCK_TYPE_DISPCLK
)
3701 current_clocks
->dispclk_khz
= clk_khz
;
3702 else if (clock_type
== DC_CLOCK_TYPE_DPPCLK
)
3703 current_clocks
->dppclk_khz
= clk_khz
;
3705 return DC_ERROR_UNEXPECTED
;
3707 if (dc
->clk_mgr
&& dc
->clk_mgr
->funcs
->update_clocks
)
3708 dc
->clk_mgr
->funcs
->update_clocks(dc
->clk_mgr
,
3714 void dcn10_get_clock(struct dc
*dc
,
3715 enum dc_clock_type clock_type
,
3716 struct dc_clock_config
*clock_cfg
)
3718 struct dc_state
*context
= dc
->current_state
;
3720 if (dc
->clk_mgr
&& dc
->clk_mgr
->funcs
->get_clock
)
3721 dc
->clk_mgr
->funcs
->get_clock(dc
->clk_mgr
, context
, clock_type
, clock_cfg
);