2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * Copyright 2019 Raptor Engineering, LLC
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <linux/slab.h>
29 #include "dm_services.h"
32 #include "dcn20_init.h"
35 #include "include/irq_service_interface.h"
36 #include "dcn20/dcn20_resource.h"
38 #include "dcn10/dcn10_hubp.h"
39 #include "dcn10/dcn10_ipp.h"
40 #include "dcn20_hubbub.h"
41 #include "dcn20_mpc.h"
42 #include "dcn20_hubp.h"
43 #include "irq/dcn20/irq_service_dcn20.h"
44 #include "dcn20_dpp.h"
45 #include "dcn20_optc.h"
46 #include "dcn20_hwseq.h"
47 #include "dce110/dce110_hw_sequencer.h"
48 #include "dcn10/dcn10_resource.h"
49 #include "dcn20_opp.h"
51 #include "dcn20_dsc.h"
53 #include "dcn20_link_encoder.h"
54 #include "dcn20_stream_encoder.h"
55 #include "dce/dce_clock_source.h"
56 #include "dce/dce_audio.h"
57 #include "dce/dce_hwseq.h"
58 #include "virtual/virtual_stream_encoder.h"
59 #include "dce110/dce110_resource.h"
60 #include "dml/display_mode_vba.h"
61 #include "dcn20_dccg.h"
62 #include "dcn20_vmid.h"
63 #include "dc_link_ddc.h"
65 #include "navi10_ip_offset.h"
67 #include "dcn/dcn_2_0_0_offset.h"
68 #include "dcn/dcn_2_0_0_sh_mask.h"
69 #include "dpcs/dpcs_2_0_0_offset.h"
70 #include "dpcs/dpcs_2_0_0_sh_mask.h"
72 #include "nbio/nbio_2_3_offset.h"
74 #include "dcn20/dcn20_dwb.h"
75 #include "dcn20/dcn20_mmhubbub.h"
77 #include "mmhub/mmhub_2_0_0_offset.h"
78 #include "mmhub/mmhub_2_0_0_sh_mask.h"
80 #include "reg_helper.h"
81 #include "dce/dce_abm.h"
82 #include "dce/dce_dmcu.h"
83 #include "dce/dce_aux.h"
84 #include "dce/dce_i2c.h"
85 #include "vm_helper.h"
87 #include "amdgpu_socbb.h"
89 #define DC_LOGGER_INIT(logger)
91 struct _vcs_dpi_ip_params_st dcn2_0_ip
= {
95 .gpuvm_max_page_table_levels
= 4,
96 .hostvm_max_page_table_levels
= 4,
97 .hostvm_cached_page_table_levels
= 0,
98 .pte_group_size_bytes
= 2048,
100 .rob_buffer_size_kbytes
= 168,
101 .det_buffer_size_kbytes
= 164,
102 .dpte_buffer_size_in_pte_reqs_luma
= 84,
103 .pde_proc_buffer_size_64k_reqs
= 48,
104 .dpp_output_buffer_pixels
= 2560,
105 .opp_output_buffer_lines
= 1,
106 .pixel_chunk_size_kbytes
= 8,
107 .pte_chunk_size_kbytes
= 2,
108 .meta_chunk_size_kbytes
= 2,
109 .writeback_chunk_size_kbytes
= 2,
110 .line_buffer_size_bits
= 789504,
111 .is_line_buffer_bpp_fixed
= 0,
112 .line_buffer_fixed_bpp
= 0,
113 .dcc_supported
= true,
114 .max_line_buffer_lines
= 12,
115 .writeback_luma_buffer_size_kbytes
= 12,
116 .writeback_chroma_buffer_size_kbytes
= 8,
117 .writeback_chroma_line_buffer_width_pixels
= 4,
118 .writeback_max_hscl_ratio
= 1,
119 .writeback_max_vscl_ratio
= 1,
120 .writeback_min_hscl_ratio
= 1,
121 .writeback_min_vscl_ratio
= 1,
122 .writeback_max_hscl_taps
= 12,
123 .writeback_max_vscl_taps
= 12,
124 .writeback_line_buffer_luma_buffer_size
= 0,
125 .writeback_line_buffer_chroma_buffer_size
= 14643,
126 .cursor_buffer_size
= 8,
127 .cursor_chunk_size
= 2,
131 .max_dchub_pscl_bw_pix_per_clk
= 4,
132 .max_pscl_lb_bw_pix_per_clk
= 2,
133 .max_lb_vscl_bw_pix_per_clk
= 4,
134 .max_vscl_hscl_bw_pix_per_clk
= 4,
141 .dispclk_ramp_margin_percent
= 1,
142 .underscan_factor
= 1.10,
143 .min_vblank_lines
= 32, //
144 .dppclk_delay_subtotal
= 77, //
145 .dppclk_delay_scl_lb_only
= 16,
146 .dppclk_delay_scl
= 50,
147 .dppclk_delay_cnvc_formatter
= 8,
148 .dppclk_delay_cnvc_cursor
= 6,
149 .dispclk_delay_subtotal
= 87, //
150 .dcfclk_cstate_latency
= 10, // SRExitTime
151 .max_inter_dcn_tile_repeaters
= 8,
153 .xfc_supported
= true,
154 .xfc_fill_bw_overhead_percent
= 10.0,
155 .xfc_fill_constant_bytes
= 0,
158 struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip
= {
162 .gpuvm_max_page_table_levels
= 4,
163 .hostvm_max_page_table_levels
= 4,
164 .hostvm_cached_page_table_levels
= 0,
166 .rob_buffer_size_kbytes
= 168,
167 .det_buffer_size_kbytes
= 164,
168 .dpte_buffer_size_in_pte_reqs_luma
= 84,
169 .dpte_buffer_size_in_pte_reqs_chroma
= 42,//todo
170 .dpp_output_buffer_pixels
= 2560,
171 .opp_output_buffer_lines
= 1,
172 .pixel_chunk_size_kbytes
= 8,
174 .max_page_table_levels
= 4,
175 .pte_chunk_size_kbytes
= 2,
176 .meta_chunk_size_kbytes
= 2,
177 .writeback_chunk_size_kbytes
= 2,
178 .line_buffer_size_bits
= 789504,
179 .is_line_buffer_bpp_fixed
= 0,
180 .line_buffer_fixed_bpp
= 0,
181 .dcc_supported
= true,
182 .max_line_buffer_lines
= 12,
183 .writeback_luma_buffer_size_kbytes
= 12,
184 .writeback_chroma_buffer_size_kbytes
= 8,
185 .writeback_chroma_line_buffer_width_pixels
= 4,
186 .writeback_max_hscl_ratio
= 1,
187 .writeback_max_vscl_ratio
= 1,
188 .writeback_min_hscl_ratio
= 1,
189 .writeback_min_vscl_ratio
= 1,
190 .writeback_max_hscl_taps
= 12,
191 .writeback_max_vscl_taps
= 12,
192 .writeback_line_buffer_luma_buffer_size
= 0,
193 .writeback_line_buffer_chroma_buffer_size
= 14643,
194 .cursor_buffer_size
= 8,
195 .cursor_chunk_size
= 2,
199 .max_dchub_pscl_bw_pix_per_clk
= 4,
200 .max_pscl_lb_bw_pix_per_clk
= 2,
201 .max_lb_vscl_bw_pix_per_clk
= 4,
202 .max_vscl_hscl_bw_pix_per_clk
= 4,
209 .dispclk_ramp_margin_percent
= 1,
210 .underscan_factor
= 1.10,
211 .min_vblank_lines
= 32, //
212 .dppclk_delay_subtotal
= 77, //
213 .dppclk_delay_scl_lb_only
= 16,
214 .dppclk_delay_scl
= 50,
215 .dppclk_delay_cnvc_formatter
= 8,
216 .dppclk_delay_cnvc_cursor
= 6,
217 .dispclk_delay_subtotal
= 87, //
218 .dcfclk_cstate_latency
= 10, // SRExitTime
219 .max_inter_dcn_tile_repeaters
= 8,
220 .xfc_supported
= true,
221 .xfc_fill_bw_overhead_percent
= 10.0,
222 .xfc_fill_constant_bytes
= 0,
226 struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc
= {
227 /* Defaults that get patched on driver load from firmware. */
232 .fabricclk_mhz
= 560.0,
233 .dispclk_mhz
= 513.0,
238 .dram_speed_mts
= 8960.0,
243 .fabricclk_mhz
= 694.0,
244 .dispclk_mhz
= 642.0,
249 .dram_speed_mts
= 11104.0,
254 .fabricclk_mhz
= 875.0,
255 .dispclk_mhz
= 734.0,
260 .dram_speed_mts
= 14000.0,
264 .dcfclk_mhz
= 1000.0,
265 .fabricclk_mhz
= 1000.0,
266 .dispclk_mhz
= 1100.0,
267 .dppclk_mhz
= 1100.0,
269 .socclk_mhz
= 1000.0,
271 .dram_speed_mts
= 16000.0,
275 .dcfclk_mhz
= 1200.0,
276 .fabricclk_mhz
= 1200.0,
277 .dispclk_mhz
= 1284.0,
278 .dppclk_mhz
= 1284.0,
280 .socclk_mhz
= 1200.0,
282 .dram_speed_mts
= 16000.0,
284 /*Extra state, no dispclk ramping*/
287 .dcfclk_mhz
= 1200.0,
288 .fabricclk_mhz
= 1200.0,
289 .dispclk_mhz
= 1284.0,
290 .dppclk_mhz
= 1284.0,
292 .socclk_mhz
= 1200.0,
294 .dram_speed_mts
= 16000.0,
298 .sr_exit_time_us
= 8.6,
299 .sr_enter_plus_exit_time_us
= 10.9,
300 .urgent_latency_us
= 4.0,
301 .urgent_latency_pixel_data_only_us
= 4.0,
302 .urgent_latency_pixel_mixed_with_vm_data_us
= 4.0,
303 .urgent_latency_vm_data_only_us
= 4.0,
304 .urgent_out_of_order_return_per_channel_pixel_only_bytes
= 4096,
305 .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes
= 4096,
306 .urgent_out_of_order_return_per_channel_vm_only_bytes
= 4096,
307 .pct_ideal_dram_sdp_bw_after_urgent_pixel_only
= 40.0,
308 .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm
= 40.0,
309 .pct_ideal_dram_sdp_bw_after_urgent_vm_only
= 40.0,
310 .max_avg_sdp_bw_use_normal_percent
= 40.0,
311 .max_avg_dram_bw_use_normal_percent
= 40.0,
312 .writeback_latency_us
= 12.0,
313 .ideal_dram_bw_after_urgent_percent
= 40.0,
314 .max_request_size_bytes
= 256,
315 .dram_channel_width_bytes
= 2,
316 .fabric_datapath_to_dcn_data_return_bytes
= 64,
317 .dcn_downspread_percent
= 0.5,
318 .downspread_percent
= 0.38,
319 .dram_page_open_time_ns
= 50.0,
320 .dram_rw_turnaround_time_ns
= 17.5,
321 .dram_return_buffer_per_channel_bytes
= 8192,
322 .round_trip_ping_latency_dcfclk_cycles
= 131,
323 .urgent_out_of_order_return_per_channel_bytes
= 256,
324 .channel_interleave_bytes
= 256,
327 .vmm_page_size_bytes
= 4096,
328 .dram_clock_change_latency_us
= 404.0,
329 .dummy_pstate_latency_us
= 5.0,
330 .writeback_dram_clock_change_latency_us
= 23.0,
331 .return_bus_width_bytes
= 64,
332 .dispclk_dppclk_vco_speed_mhz
= 3850,
333 .xfc_bus_transport_time_us
= 20,
334 .xfc_xbuf_latency_tolerance_us
= 4,
335 .use_urgent_burst_bw
= 0
338 struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc
= { 0 };
340 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
341 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
342 #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
343 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
344 #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
345 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
346 #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
347 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
348 #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
349 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
350 #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
351 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f
352 #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
353 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f
354 #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
358 enum dcn20_clk_src_array_id
{
368 /* begin *********************
369 * macros to expend register list macro defined in HW object header file */
372 /* TODO awful hack. fixup dcn20_dwb.h */
374 #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
376 #define BASE(seg) BASE_INNER(seg)
378 #define SR(reg_name)\
379 .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
382 #define SRI(reg_name, block, id)\
383 .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
384 mm ## block ## id ## _ ## reg_name
386 #define SRIR(var_name, reg_name, block, id)\
387 .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
388 mm ## block ## id ## _ ## reg_name
390 #define SRII(reg_name, block, id)\
391 .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
392 mm ## block ## id ## _ ## reg_name
394 #define DCCG_SRII(reg_name, block, id)\
395 .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
396 mm ## block ## id ## _ ## reg_name
399 #define NBIO_BASE_INNER(seg) \
400 NBIO_BASE__INST0_SEG ## seg
402 #define NBIO_BASE(seg) \
405 #define NBIO_SR(reg_name)\
406 .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
410 #define MMHUB_BASE_INNER(seg) \
411 MMHUB_BASE__INST0_SEG ## seg
413 #define MMHUB_BASE(seg) \
414 MMHUB_BASE_INNER(seg)
416 #define MMHUB_SR(reg_name)\
417 .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \
420 static const struct bios_registers bios_regs
= {
421 NBIO_SR(BIOS_SCRATCH_3
),
422 NBIO_SR(BIOS_SCRATCH_6
)
425 #define clk_src_regs(index, pllid)\
427 CS_COMMON_REG_LIST_DCN2_0(index, pllid),\
430 static const struct dce110_clk_src_regs clk_src_regs
[] = {
439 static const struct dce110_clk_src_shift cs_shift
= {
440 CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT
)
443 static const struct dce110_clk_src_mask cs_mask
= {
444 CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK
)
447 static const struct dce_dmcu_registers dmcu_regs
= {
448 DMCU_DCN10_REG_LIST()
451 static const struct dce_dmcu_shift dmcu_shift
= {
452 DMCU_MASK_SH_LIST_DCN10(__SHIFT
)
455 static const struct dce_dmcu_mask dmcu_mask
= {
456 DMCU_MASK_SH_LIST_DCN10(_MASK
)
459 static const struct dce_abm_registers abm_regs
= {
463 static const struct dce_abm_shift abm_shift
= {
464 ABM_MASK_SH_LIST_DCN20(__SHIFT
)
467 static const struct dce_abm_mask abm_mask
= {
468 ABM_MASK_SH_LIST_DCN20(_MASK
)
471 #define audio_regs(id)\
473 AUD_COMMON_REG_LIST(id)\
476 static const struct dce_audio_registers audio_regs
[] = {
486 #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
487 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
488 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
489 AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
491 static const struct dce_audio_shift audio_shift
= {
492 DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT
)
495 static const struct dce_audio_mask audio_mask
= {
496 DCE120_AUD_COMMON_MASK_SH_LIST(_MASK
)
499 #define stream_enc_regs(id)\
501 SE_DCN2_REG_LIST(id)\
504 static const struct dcn10_stream_enc_registers stream_enc_regs
[] = {
513 static const struct dcn10_stream_encoder_shift se_shift
= {
514 SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT
)
517 static const struct dcn10_stream_encoder_mask se_mask
= {
518 SE_COMMON_MASK_SH_LIST_DCN20(_MASK
)
522 #define aux_regs(id)\
524 DCN2_AUX_REG_LIST(id)\
527 static const struct dcn10_link_enc_aux_registers link_enc_aux_regs
[] = {
536 #define hpd_regs(id)\
541 static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs
[] = {
550 #define link_regs(id, phyid)\
552 LE_DCN10_REG_LIST(id), \
553 UNIPHY_DCN2_REG_LIST(phyid), \
554 DPCS_DCN2_REG_LIST(id), \
555 SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
558 static const struct dcn10_link_enc_registers link_enc_regs
[] = {
567 static const struct dcn10_link_enc_shift le_shift
= {
568 LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT
),\
569 DPCS_DCN2_MASK_SH_LIST(__SHIFT
)
572 static const struct dcn10_link_enc_mask le_mask
= {
573 LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK
),\
574 DPCS_DCN2_MASK_SH_LIST(_MASK
)
577 #define ipp_regs(id)\
579 IPP_REG_LIST_DCN20(id),\
582 static const struct dcn10_ipp_registers ipp_regs
[] = {
591 static const struct dcn10_ipp_shift ipp_shift
= {
592 IPP_MASK_SH_LIST_DCN20(__SHIFT
)
595 static const struct dcn10_ipp_mask ipp_mask
= {
596 IPP_MASK_SH_LIST_DCN20(_MASK
),
599 #define opp_regs(id)\
601 OPP_REG_LIST_DCN20(id),\
604 static const struct dcn20_opp_registers opp_regs
[] = {
613 static const struct dcn20_opp_shift opp_shift
= {
614 OPP_MASK_SH_LIST_DCN20(__SHIFT
)
617 static const struct dcn20_opp_mask opp_mask
= {
618 OPP_MASK_SH_LIST_DCN20(_MASK
)
621 #define aux_engine_regs(id)\
623 AUX_COMMON_REG_LIST0(id), \
626 .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
629 static const struct dce110_aux_registers aux_engine_regs
[] = {
640 TF_REG_LIST_DCN20(id),\
641 TF_REG_LIST_DCN20_COMMON_APPEND(id),\
644 static const struct dcn2_dpp_registers tf_regs
[] = {
653 static const struct dcn2_dpp_shift tf_shift
= {
654 TF_REG_LIST_SH_MASK_DCN20(__SHIFT
),
655 TF_DEBUG_REG_LIST_SH_DCN20
658 static const struct dcn2_dpp_mask tf_mask
= {
659 TF_REG_LIST_SH_MASK_DCN20(_MASK
),
660 TF_DEBUG_REG_LIST_MASK_DCN20
663 #define dwbc_regs_dcn2(id)\
665 DWBC_COMMON_REG_LIST_DCN2_0(id),\
668 static const struct dcn20_dwbc_registers dwbc20_regs
[] = {
672 static const struct dcn20_dwbc_shift dwbc20_shift
= {
673 DWBC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT
)
676 static const struct dcn20_dwbc_mask dwbc20_mask
= {
677 DWBC_COMMON_MASK_SH_LIST_DCN2_0(_MASK
)
680 #define mcif_wb_regs_dcn2(id)\
682 MCIF_WB_COMMON_REG_LIST_DCN2_0(id),\
685 static const struct dcn20_mmhubbub_registers mcif_wb20_regs
[] = {
686 mcif_wb_regs_dcn2(0),
689 static const struct dcn20_mmhubbub_shift mcif_wb20_shift
= {
690 MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT
)
693 static const struct dcn20_mmhubbub_mask mcif_wb20_mask
= {
694 MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(_MASK
)
697 static const struct dcn20_mpc_registers mpc_regs
= {
698 MPC_REG_LIST_DCN2_0(0),
699 MPC_REG_LIST_DCN2_0(1),
700 MPC_REG_LIST_DCN2_0(2),
701 MPC_REG_LIST_DCN2_0(3),
702 MPC_REG_LIST_DCN2_0(4),
703 MPC_REG_LIST_DCN2_0(5),
704 MPC_OUT_MUX_REG_LIST_DCN2_0(0),
705 MPC_OUT_MUX_REG_LIST_DCN2_0(1),
706 MPC_OUT_MUX_REG_LIST_DCN2_0(2),
707 MPC_OUT_MUX_REG_LIST_DCN2_0(3),
708 MPC_OUT_MUX_REG_LIST_DCN2_0(4),
709 MPC_OUT_MUX_REG_LIST_DCN2_0(5),
710 MPC_DBG_REG_LIST_DCN2_0()
713 static const struct dcn20_mpc_shift mpc_shift
= {
714 MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT
),
715 MPC_DEBUG_REG_LIST_SH_DCN20
718 static const struct dcn20_mpc_mask mpc_mask
= {
719 MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK
),
720 MPC_DEBUG_REG_LIST_MASK_DCN20
724 [id] = {TG_COMMON_REG_LIST_DCN2_0(id)}
727 static const struct dcn_optc_registers tg_regs
[] = {
736 static const struct dcn_optc_shift tg_shift
= {
737 TG_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT
)
740 static const struct dcn_optc_mask tg_mask
= {
741 TG_COMMON_MASK_SH_LIST_DCN2_0(_MASK
)
744 #define hubp_regs(id)\
746 HUBP_REG_LIST_DCN20(id)\
749 static const struct dcn_hubp2_registers hubp_regs
[] = {
758 static const struct dcn_hubp2_shift hubp_shift
= {
759 HUBP_MASK_SH_LIST_DCN20(__SHIFT
)
762 static const struct dcn_hubp2_mask hubp_mask
= {
763 HUBP_MASK_SH_LIST_DCN20(_MASK
)
766 static const struct dcn_hubbub_registers hubbub_reg
= {
767 HUBBUB_REG_LIST_DCN20(0)
770 static const struct dcn_hubbub_shift hubbub_shift
= {
771 HUBBUB_MASK_SH_LIST_DCN20(__SHIFT
)
774 static const struct dcn_hubbub_mask hubbub_mask
= {
775 HUBBUB_MASK_SH_LIST_DCN20(_MASK
)
778 #define vmid_regs(id)\
780 DCN20_VMID_REG_LIST(id)\
783 static const struct dcn_vmid_registers vmid_regs
[] = {
802 static const struct dcn20_vmid_shift vmid_shifts
= {
803 DCN20_VMID_MASK_SH_LIST(__SHIFT
)
806 static const struct dcn20_vmid_mask vmid_masks
= {
807 DCN20_VMID_MASK_SH_LIST(_MASK
)
810 static const struct dce110_aux_registers_shift aux_shift
= {
811 DCN_AUX_MASK_SH_LIST(__SHIFT
)
814 static const struct dce110_aux_registers_mask aux_mask
= {
815 DCN_AUX_MASK_SH_LIST(_MASK
)
818 static int map_transmitter_id_to_phy_instance(
819 enum transmitter transmitter
)
821 switch (transmitter
) {
822 case TRANSMITTER_UNIPHY_A
:
825 case TRANSMITTER_UNIPHY_B
:
828 case TRANSMITTER_UNIPHY_C
:
831 case TRANSMITTER_UNIPHY_D
:
834 case TRANSMITTER_UNIPHY_E
:
837 case TRANSMITTER_UNIPHY_F
:
846 #define dsc_regsDCN20(id)\
848 DSC_REG_LIST_DCN20(id)\
851 static const struct dcn20_dsc_registers dsc_regs
[] = {
860 static const struct dcn20_dsc_shift dsc_shift
= {
861 DSC_REG_LIST_SH_MASK_DCN20(__SHIFT
)
864 static const struct dcn20_dsc_mask dsc_mask
= {
865 DSC_REG_LIST_SH_MASK_DCN20(_MASK
)
868 static const struct dccg_registers dccg_regs
= {
872 static const struct dccg_shift dccg_shift
= {
873 DCCG_MASK_SH_LIST_DCN2(__SHIFT
)
876 static const struct dccg_mask dccg_mask
= {
877 DCCG_MASK_SH_LIST_DCN2(_MASK
)
880 static const struct resource_caps res_cap_nv10
= {
881 .num_timing_generator
= 6,
883 .num_video_plane
= 6,
885 .num_stream_encoder
= 6,
893 static const struct dc_plane_cap plane_cap
= {
894 .type
= DC_PLANE_TYPE_DCN_UNIVERSAL
,
895 .blends_with_above
= true,
896 .blends_with_below
= true,
897 .per_pixel_alpha
= true,
899 .pixel_format_support
= {
905 .max_upscale_factor
= {
911 .max_downscale_factor
= {
917 static const struct resource_caps res_cap_nv14
= {
918 .num_timing_generator
= 5,
920 .num_video_plane
= 5,
922 .num_stream_encoder
= 5,
930 static const struct dc_debug_options debug_defaults_drv
= {
931 .disable_dmcu
= true,
932 .force_abm_enable
= false,
933 .timing_trace
= false,
935 .disable_pplib_clock_request
= true,
936 .pipe_split_policy
= MPC_SPLIT_DYNAMIC
,
937 .force_single_disp_pipe_split
= false,
938 .disable_dcc
= DCC_ENABLE
,
940 .performance_trace
= false,
941 .max_downscale_src_width
= 5120,/*upto 5K*/
942 .disable_pplib_wm_range
= false,
943 .scl_reset_length10
= true,
944 .sanity_checks
= false,
945 .disable_tri_buf
= true,
946 .underflow_assert_delay_us
= 0xFFFFFFFF,
949 static const struct dc_debug_options debug_defaults_diags
= {
950 .disable_dmcu
= true,
951 .force_abm_enable
= false,
952 .timing_trace
= true,
954 .disable_dpp_power_gate
= true,
955 .disable_hubp_power_gate
= true,
956 .disable_clock_gate
= true,
957 .disable_pplib_clock_request
= true,
958 .disable_pplib_wm_range
= true,
959 .disable_stutter
= true,
960 .scl_reset_length10
= true,
961 .underflow_assert_delay_us
= 0xFFFFFFFF,
964 void dcn20_dpp_destroy(struct dpp
**dpp
)
966 kfree(TO_DCN20_DPP(*dpp
));
970 struct dpp
*dcn20_dpp_create(
971 struct dc_context
*ctx
,
974 struct dcn20_dpp
*dpp
=
975 kzalloc(sizeof(struct dcn20_dpp
), GFP_KERNEL
);
980 if (dpp2_construct(dpp
, ctx
, inst
,
981 &tf_regs
[inst
], &tf_shift
, &tf_mask
))
989 struct input_pixel_processor
*dcn20_ipp_create(
990 struct dc_context
*ctx
, uint32_t inst
)
992 struct dcn10_ipp
*ipp
=
993 kzalloc(sizeof(struct dcn10_ipp
), GFP_KERNEL
);
1000 dcn20_ipp_construct(ipp
, ctx
, inst
,
1001 &ipp_regs
[inst
], &ipp_shift
, &ipp_mask
);
1006 struct output_pixel_processor
*dcn20_opp_create(
1007 struct dc_context
*ctx
, uint32_t inst
)
1009 struct dcn20_opp
*opp
=
1010 kzalloc(sizeof(struct dcn20_opp
), GFP_KERNEL
);
1013 BREAK_TO_DEBUGGER();
1017 dcn20_opp_construct(opp
, ctx
, inst
,
1018 &opp_regs
[inst
], &opp_shift
, &opp_mask
);
1022 struct dce_aux
*dcn20_aux_engine_create(
1023 struct dc_context
*ctx
,
1026 struct aux_engine_dce110
*aux_engine
=
1027 kzalloc(sizeof(struct aux_engine_dce110
), GFP_KERNEL
);
1032 dce110_aux_engine_construct(aux_engine
, ctx
, inst
,
1033 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER
* AUX_TIMEOUT_PERIOD
,
1034 &aux_engine_regs
[inst
],
1037 ctx
->dc
->caps
.extended_aux_timeout_support
);
1039 return &aux_engine
->base
;
1041 #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
1043 static const struct dce_i2c_registers i2c_hw_regs
[] = {
1052 static const struct dce_i2c_shift i2c_shifts
= {
1053 I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT
)
1056 static const struct dce_i2c_mask i2c_masks
= {
1057 I2C_COMMON_MASK_SH_LIST_DCN2(_MASK
)
1060 struct dce_i2c_hw
*dcn20_i2c_hw_create(
1061 struct dc_context
*ctx
,
1064 struct dce_i2c_hw
*dce_i2c_hw
=
1065 kzalloc(sizeof(struct dce_i2c_hw
), GFP_KERNEL
);
1070 dcn2_i2c_hw_construct(dce_i2c_hw
, ctx
, inst
,
1071 &i2c_hw_regs
[inst
], &i2c_shifts
, &i2c_masks
);
1075 struct mpc
*dcn20_mpc_create(struct dc_context
*ctx
)
1077 struct dcn20_mpc
*mpc20
= kzalloc(sizeof(struct dcn20_mpc
),
1083 dcn20_mpc_construct(mpc20
, ctx
,
1089 return &mpc20
->base
;
1092 struct hubbub
*dcn20_hubbub_create(struct dc_context
*ctx
)
1095 struct dcn20_hubbub
*hubbub
= kzalloc(sizeof(struct dcn20_hubbub
),
1101 hubbub2_construct(hubbub
, ctx
,
1106 for (i
= 0; i
< res_cap_nv10
.num_vmid
; i
++) {
1107 struct dcn20_vmid
*vmid
= &hubbub
->vmid
[i
];
1111 vmid
->regs
= &vmid_regs
[i
];
1112 vmid
->shifts
= &vmid_shifts
;
1113 vmid
->masks
= &vmid_masks
;
1116 return &hubbub
->base
;
1119 struct timing_generator
*dcn20_timing_generator_create(
1120 struct dc_context
*ctx
,
1123 struct optc
*tgn10
=
1124 kzalloc(sizeof(struct optc
), GFP_KERNEL
);
1129 tgn10
->base
.inst
= instance
;
1130 tgn10
->base
.ctx
= ctx
;
1132 tgn10
->tg_regs
= &tg_regs
[instance
];
1133 tgn10
->tg_shift
= &tg_shift
;
1134 tgn10
->tg_mask
= &tg_mask
;
1136 dcn20_timing_generator_init(tgn10
);
1138 return &tgn10
->base
;
1141 static const struct encoder_feature_support link_enc_feature
= {
1142 .max_hdmi_deep_color
= COLOR_DEPTH_121212
,
1143 .max_hdmi_pixel_clock
= 600000,
1144 .hdmi_ycbcr420_supported
= true,
1145 .dp_ycbcr420_supported
= true,
1146 .flags
.bits
.IS_HBR2_CAPABLE
= true,
1147 .flags
.bits
.IS_HBR3_CAPABLE
= true,
1148 .flags
.bits
.IS_TPS3_CAPABLE
= true,
1149 .flags
.bits
.IS_TPS4_CAPABLE
= true
1152 struct link_encoder
*dcn20_link_encoder_create(
1153 const struct encoder_init_data
*enc_init_data
)
1155 struct dcn20_link_encoder
*enc20
=
1156 kzalloc(sizeof(struct dcn20_link_encoder
), GFP_KERNEL
);
1163 map_transmitter_id_to_phy_instance(enc_init_data
->transmitter
);
1165 dcn20_link_encoder_construct(enc20
,
1168 &link_enc_regs
[link_regs_id
],
1169 &link_enc_aux_regs
[enc_init_data
->channel
- 1],
1170 &link_enc_hpd_regs
[enc_init_data
->hpd_source
],
1174 return &enc20
->enc10
.base
;
1177 struct clock_source
*dcn20_clock_source_create(
1178 struct dc_context
*ctx
,
1179 struct dc_bios
*bios
,
1180 enum clock_source_id id
,
1181 const struct dce110_clk_src_regs
*regs
,
1184 struct dce110_clk_src
*clk_src
=
1185 kzalloc(sizeof(struct dce110_clk_src
), GFP_KERNEL
);
1190 if (dcn20_clk_src_construct(clk_src
, ctx
, bios
, id
,
1191 regs
, &cs_shift
, &cs_mask
)) {
1192 clk_src
->base
.dp_clk_src
= dp_clk_src
;
1193 return &clk_src
->base
;
1197 BREAK_TO_DEBUGGER();
1201 static void read_dce_straps(
1202 struct dc_context
*ctx
,
1203 struct resource_straps
*straps
)
1205 generic_reg_get(ctx
, mmDC_PINSTRAPS
+ BASE(mmDC_PINSTRAPS_BASE_IDX
),
1206 FN(DC_PINSTRAPS
, DC_PINSTRAPS_AUDIO
), &straps
->dc_pinstraps_audio
);
1209 static struct audio
*dcn20_create_audio(
1210 struct dc_context
*ctx
, unsigned int inst
)
1212 return dce_audio_create(ctx
, inst
,
1213 &audio_regs
[inst
], &audio_shift
, &audio_mask
);
1216 struct stream_encoder
*dcn20_stream_encoder_create(
1217 enum engine_id eng_id
,
1218 struct dc_context
*ctx
)
1220 struct dcn10_stream_encoder
*enc1
=
1221 kzalloc(sizeof(struct dcn10_stream_encoder
), GFP_KERNEL
);
1226 if (ASICREV_IS_NAVI14_M(ctx
->asic_id
.hw_internal_rev
)) {
1227 if (eng_id
>= ENGINE_ID_DIGD
)
1231 dcn20_stream_encoder_construct(enc1
, ctx
, ctx
->dc_bios
, eng_id
,
1232 &stream_enc_regs
[eng_id
],
1233 &se_shift
, &se_mask
);
1238 static const struct dce_hwseq_registers hwseq_reg
= {
1239 HWSEQ_DCN2_REG_LIST()
1242 static const struct dce_hwseq_shift hwseq_shift
= {
1243 HWSEQ_DCN2_MASK_SH_LIST(__SHIFT
)
1246 static const struct dce_hwseq_mask hwseq_mask
= {
1247 HWSEQ_DCN2_MASK_SH_LIST(_MASK
)
1250 struct dce_hwseq
*dcn20_hwseq_create(
1251 struct dc_context
*ctx
)
1253 struct dce_hwseq
*hws
= kzalloc(sizeof(struct dce_hwseq
), GFP_KERNEL
);
1257 hws
->regs
= &hwseq_reg
;
1258 hws
->shifts
= &hwseq_shift
;
1259 hws
->masks
= &hwseq_mask
;
1264 static const struct resource_create_funcs res_create_funcs
= {
1265 .read_dce_straps
= read_dce_straps
,
1266 .create_audio
= dcn20_create_audio
,
1267 .create_stream_encoder
= dcn20_stream_encoder_create
,
1268 .create_hwseq
= dcn20_hwseq_create
,
1271 static const struct resource_create_funcs res_create_maximus_funcs
= {
1272 .read_dce_straps
= NULL
,
1273 .create_audio
= NULL
,
1274 .create_stream_encoder
= NULL
,
1275 .create_hwseq
= dcn20_hwseq_create
,
1278 static void dcn20_pp_smu_destroy(struct pp_smu_funcs
**pp_smu
);
1280 void dcn20_clock_source_destroy(struct clock_source
**clk_src
)
1282 kfree(TO_DCE110_CLK_SRC(*clk_src
));
1287 struct display_stream_compressor
*dcn20_dsc_create(
1288 struct dc_context
*ctx
, uint32_t inst
)
1290 struct dcn20_dsc
*dsc
=
1291 kzalloc(sizeof(struct dcn20_dsc
), GFP_KERNEL
);
1294 BREAK_TO_DEBUGGER();
1298 dsc2_construct(dsc
, ctx
, inst
, &dsc_regs
[inst
], &dsc_shift
, &dsc_mask
);
1302 void dcn20_dsc_destroy(struct display_stream_compressor
**dsc
)
1304 kfree(container_of(*dsc
, struct dcn20_dsc
, base
));
1309 static void dcn20_resource_destruct(struct dcn20_resource_pool
*pool
)
1313 for (i
= 0; i
< pool
->base
.stream_enc_count
; i
++) {
1314 if (pool
->base
.stream_enc
[i
] != NULL
) {
1315 kfree(DCN10STRENC_FROM_STRENC(pool
->base
.stream_enc
[i
]));
1316 pool
->base
.stream_enc
[i
] = NULL
;
1320 for (i
= 0; i
< pool
->base
.res_cap
->num_dsc
; i
++) {
1321 if (pool
->base
.dscs
[i
] != NULL
)
1322 dcn20_dsc_destroy(&pool
->base
.dscs
[i
]);
1325 if (pool
->base
.mpc
!= NULL
) {
1326 kfree(TO_DCN20_MPC(pool
->base
.mpc
));
1327 pool
->base
.mpc
= NULL
;
1329 if (pool
->base
.hubbub
!= NULL
) {
1330 kfree(pool
->base
.hubbub
);
1331 pool
->base
.hubbub
= NULL
;
1333 for (i
= 0; i
< pool
->base
.pipe_count
; i
++) {
1334 if (pool
->base
.dpps
[i
] != NULL
)
1335 dcn20_dpp_destroy(&pool
->base
.dpps
[i
]);
1337 if (pool
->base
.ipps
[i
] != NULL
)
1338 pool
->base
.ipps
[i
]->funcs
->ipp_destroy(&pool
->base
.ipps
[i
]);
1340 if (pool
->base
.hubps
[i
] != NULL
) {
1341 kfree(TO_DCN20_HUBP(pool
->base
.hubps
[i
]));
1342 pool
->base
.hubps
[i
] = NULL
;
1345 if (pool
->base
.irqs
!= NULL
) {
1346 dal_irq_service_destroy(&pool
->base
.irqs
);
1350 for (i
= 0; i
< pool
->base
.res_cap
->num_ddc
; i
++) {
1351 if (pool
->base
.engines
[i
] != NULL
)
1352 dce110_engine_destroy(&pool
->base
.engines
[i
]);
1353 if (pool
->base
.hw_i2cs
[i
] != NULL
) {
1354 kfree(pool
->base
.hw_i2cs
[i
]);
1355 pool
->base
.hw_i2cs
[i
] = NULL
;
1357 if (pool
->base
.sw_i2cs
[i
] != NULL
) {
1358 kfree(pool
->base
.sw_i2cs
[i
]);
1359 pool
->base
.sw_i2cs
[i
] = NULL
;
1363 for (i
= 0; i
< pool
->base
.res_cap
->num_opp
; i
++) {
1364 if (pool
->base
.opps
[i
] != NULL
)
1365 pool
->base
.opps
[i
]->funcs
->opp_destroy(&pool
->base
.opps
[i
]);
1368 for (i
= 0; i
< pool
->base
.res_cap
->num_timing_generator
; i
++) {
1369 if (pool
->base
.timing_generators
[i
] != NULL
) {
1370 kfree(DCN10TG_FROM_TG(pool
->base
.timing_generators
[i
]));
1371 pool
->base
.timing_generators
[i
] = NULL
;
1375 for (i
= 0; i
< pool
->base
.res_cap
->num_dwb
; i
++) {
1376 if (pool
->base
.dwbc
[i
] != NULL
) {
1377 kfree(TO_DCN20_DWBC(pool
->base
.dwbc
[i
]));
1378 pool
->base
.dwbc
[i
] = NULL
;
1380 if (pool
->base
.mcif_wb
[i
] != NULL
) {
1381 kfree(TO_DCN20_MMHUBBUB(pool
->base
.mcif_wb
[i
]));
1382 pool
->base
.mcif_wb
[i
] = NULL
;
1386 for (i
= 0; i
< pool
->base
.audio_count
; i
++) {
1387 if (pool
->base
.audios
[i
])
1388 dce_aud_destroy(&pool
->base
.audios
[i
]);
1391 for (i
= 0; i
< pool
->base
.clk_src_count
; i
++) {
1392 if (pool
->base
.clock_sources
[i
] != NULL
) {
1393 dcn20_clock_source_destroy(&pool
->base
.clock_sources
[i
]);
1394 pool
->base
.clock_sources
[i
] = NULL
;
1398 if (pool
->base
.dp_clock_source
!= NULL
) {
1399 dcn20_clock_source_destroy(&pool
->base
.dp_clock_source
);
1400 pool
->base
.dp_clock_source
= NULL
;
1404 if (pool
->base
.abm
!= NULL
)
1405 dce_abm_destroy(&pool
->base
.abm
);
1407 if (pool
->base
.dmcu
!= NULL
)
1408 dce_dmcu_destroy(&pool
->base
.dmcu
);
1410 if (pool
->base
.dccg
!= NULL
)
1411 dcn_dccg_destroy(&pool
->base
.dccg
);
1413 if (pool
->base
.pp_smu
!= NULL
)
1414 dcn20_pp_smu_destroy(&pool
->base
.pp_smu
);
1416 if (pool
->base
.oem_device
!= NULL
)
1417 dal_ddc_service_destroy(&pool
->base
.oem_device
);
1420 struct hubp
*dcn20_hubp_create(
1421 struct dc_context
*ctx
,
1424 struct dcn20_hubp
*hubp2
=
1425 kzalloc(sizeof(struct dcn20_hubp
), GFP_KERNEL
);
1430 if (hubp2_construct(hubp2
, ctx
, inst
,
1431 &hubp_regs
[inst
], &hubp_shift
, &hubp_mask
))
1432 return &hubp2
->base
;
1434 BREAK_TO_DEBUGGER();
1439 static void get_pixel_clock_parameters(
1440 struct pipe_ctx
*pipe_ctx
,
1441 struct pixel_clk_params
*pixel_clk_params
)
1443 const struct dc_stream_state
*stream
= pipe_ctx
->stream
;
1444 struct pipe_ctx
*odm_pipe
;
1447 for (odm_pipe
= pipe_ctx
->next_odm_pipe
; odm_pipe
; odm_pipe
= odm_pipe
->next_odm_pipe
)
1450 pixel_clk_params
->requested_pix_clk_100hz
= stream
->timing
.pix_clk_100hz
;
1451 pixel_clk_params
->encoder_object_id
= stream
->link
->link_enc
->id
;
1452 pixel_clk_params
->signal_type
= pipe_ctx
->stream
->signal
;
1453 pixel_clk_params
->controller_id
= pipe_ctx
->stream_res
.tg
->inst
+ 1;
1454 /* TODO: un-hardcode*/
1455 pixel_clk_params
->requested_sym_clk
= LINK_RATE_LOW
*
1456 LINK_RATE_REF_FREQ_IN_KHZ
;
1457 pixel_clk_params
->flags
.ENABLE_SS
= 0;
1458 pixel_clk_params
->color_depth
=
1459 stream
->timing
.display_color_depth
;
1460 pixel_clk_params
->flags
.DISPLAY_BLANKED
= 1;
1461 pixel_clk_params
->pixel_encoding
= stream
->timing
.pixel_encoding
;
1463 if (stream
->timing
.pixel_encoding
== PIXEL_ENCODING_YCBCR422
)
1464 pixel_clk_params
->color_depth
= COLOR_DEPTH_888
;
1467 pixel_clk_params
->requested_pix_clk_100hz
/= 4;
1468 else if (optc2_is_two_pixels_per_containter(&stream
->timing
) || opp_cnt
== 2)
1469 pixel_clk_params
->requested_pix_clk_100hz
/= 2;
1471 if (stream
->timing
.timing_3d_format
== TIMING_3D_FORMAT_HW_FRAME_PACKING
)
1472 pixel_clk_params
->requested_pix_clk_100hz
*= 2;
1476 static void build_clamping_params(struct dc_stream_state
*stream
)
1478 stream
->clamping
.clamping_level
= CLAMPING_FULL_RANGE
;
1479 stream
->clamping
.c_depth
= stream
->timing
.display_color_depth
;
1480 stream
->clamping
.pixel_encoding
= stream
->timing
.pixel_encoding
;
1483 static enum dc_status
build_pipe_hw_param(struct pipe_ctx
*pipe_ctx
)
1486 get_pixel_clock_parameters(pipe_ctx
, &pipe_ctx
->stream_res
.pix_clk_params
);
1488 pipe_ctx
->clock_source
->funcs
->get_pix_clk_dividers(
1489 pipe_ctx
->clock_source
,
1490 &pipe_ctx
->stream_res
.pix_clk_params
,
1491 &pipe_ctx
->pll_settings
);
1493 pipe_ctx
->stream
->clamping
.pixel_encoding
= pipe_ctx
->stream
->timing
.pixel_encoding
;
1495 resource_build_bit_depth_reduction_params(pipe_ctx
->stream
,
1496 &pipe_ctx
->stream
->bit_depth_params
);
1497 build_clamping_params(pipe_ctx
->stream
);
1502 enum dc_status
dcn20_build_mapped_resource(const struct dc
*dc
, struct dc_state
*context
, struct dc_stream_state
*stream
)
1504 enum dc_status status
= DC_OK
;
1505 struct pipe_ctx
*pipe_ctx
= resource_get_head_pipe_for_stream(&context
->res_ctx
, stream
);
1507 /*TODO Seems unneeded anymore */
1508 /* if (old_context && resource_is_stream_unchanged(old_context, stream)) {
1509 if (stream != NULL && old_context->streams[i] != NULL) {
1510 todo: shouldn't have to copy missing parameter here
1511 resource_build_bit_depth_reduction_params(stream,
1512 &stream->bit_depth_params);
1513 stream->clamping.pixel_encoding =
1514 stream->timing.pixel_encoding;
1516 resource_build_bit_depth_reduction_params(stream,
1517 &stream->bit_depth_params);
1518 build_clamping_params(stream);
1526 return DC_ERROR_UNEXPECTED
;
1529 status
= build_pipe_hw_param(pipe_ctx
);
1535 static void acquire_dsc(struct resource_context
*res_ctx
,
1536 const struct resource_pool
*pool
,
1537 struct display_stream_compressor
**dsc
,
1542 ASSERT(*dsc
== NULL
);
1545 if (pool
->res_cap
->num_dsc
== pool
->res_cap
->num_opp
) {
1546 *dsc
= pool
->dscs
[pipe_idx
];
1547 res_ctx
->is_dsc_acquired
[pipe_idx
] = true;
1551 /* Find first free DSC */
1552 for (i
= 0; i
< pool
->res_cap
->num_dsc
; i
++)
1553 if (!res_ctx
->is_dsc_acquired
[i
]) {
1554 *dsc
= pool
->dscs
[i
];
1555 res_ctx
->is_dsc_acquired
[i
] = true;
1560 static void release_dsc(struct resource_context
*res_ctx
,
1561 const struct resource_pool
*pool
,
1562 struct display_stream_compressor
**dsc
)
1566 for (i
= 0; i
< pool
->res_cap
->num_dsc
; i
++)
1567 if (pool
->dscs
[i
] == *dsc
) {
1568 res_ctx
->is_dsc_acquired
[i
] = false;
1576 enum dc_status
dcn20_add_dsc_to_stream_resource(struct dc
*dc
,
1577 struct dc_state
*dc_ctx
,
1578 struct dc_stream_state
*dc_stream
)
1580 enum dc_status result
= DC_OK
;
1582 const struct resource_pool
*pool
= dc
->res_pool
;
1584 /* Get a DSC if required and available */
1585 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1586 struct pipe_ctx
*pipe_ctx
= &dc_ctx
->res_ctx
.pipe_ctx
[i
];
1588 if (pipe_ctx
->stream
!= dc_stream
)
1591 if (pipe_ctx
->stream_res
.dsc
)
1594 acquire_dsc(&dc_ctx
->res_ctx
, pool
, &pipe_ctx
->stream_res
.dsc
, i
);
1596 /* The number of DSCs can be less than the number of pipes */
1597 if (!pipe_ctx
->stream_res
.dsc
) {
1598 result
= DC_NO_DSC_RESOURCE
;
1608 static enum dc_status
remove_dsc_from_stream_resource(struct dc
*dc
,
1609 struct dc_state
*new_ctx
,
1610 struct dc_stream_state
*dc_stream
)
1612 struct pipe_ctx
*pipe_ctx
= NULL
;
1615 for (i
= 0; i
< MAX_PIPES
; i
++) {
1616 if (new_ctx
->res_ctx
.pipe_ctx
[i
].stream
== dc_stream
&& !new_ctx
->res_ctx
.pipe_ctx
[i
].top_pipe
) {
1617 pipe_ctx
= &new_ctx
->res_ctx
.pipe_ctx
[i
];
1619 if (pipe_ctx
->stream_res
.dsc
)
1620 release_dsc(&new_ctx
->res_ctx
, dc
->res_pool
, &pipe_ctx
->stream_res
.dsc
);
1625 return DC_ERROR_UNEXPECTED
;
1631 enum dc_status
dcn20_add_stream_to_ctx(struct dc
*dc
, struct dc_state
*new_ctx
, struct dc_stream_state
*dc_stream
)
1633 enum dc_status result
= DC_ERROR_UNEXPECTED
;
1635 result
= resource_map_pool_resources(dc
, new_ctx
, dc_stream
);
1637 if (result
== DC_OK
)
1638 result
= resource_map_phy_clock_resources(dc
, new_ctx
, dc_stream
);
1640 /* Get a DSC if required and available */
1641 if (result
== DC_OK
&& dc_stream
->timing
.flags
.DSC
)
1642 result
= dcn20_add_dsc_to_stream_resource(dc
, new_ctx
, dc_stream
);
1644 if (result
== DC_OK
)
1645 result
= dcn20_build_mapped_resource(dc
, new_ctx
, dc_stream
);
1651 enum dc_status
dcn20_remove_stream_from_ctx(struct dc
*dc
, struct dc_state
*new_ctx
, struct dc_stream_state
*dc_stream
)
1653 enum dc_status result
= DC_OK
;
1655 result
= remove_dsc_from_stream_resource(dc
, new_ctx
, dc_stream
);
1661 static void swizzle_to_dml_params(
1662 enum swizzle_mode_values swizzle
,
1663 unsigned int *sw_mode
)
1667 *sw_mode
= dm_sw_linear
;
1670 *sw_mode
= dm_sw_4kb_s
;
1673 *sw_mode
= dm_sw_4kb_s_x
;
1676 *sw_mode
= dm_sw_4kb_d
;
1679 *sw_mode
= dm_sw_4kb_d_x
;
1682 *sw_mode
= dm_sw_64kb_s
;
1684 case DC_SW_64KB_S_X
:
1685 *sw_mode
= dm_sw_64kb_s_x
;
1687 case DC_SW_64KB_S_T
:
1688 *sw_mode
= dm_sw_64kb_s_t
;
1691 *sw_mode
= dm_sw_64kb_d
;
1693 case DC_SW_64KB_D_X
:
1694 *sw_mode
= dm_sw_64kb_d_x
;
1696 case DC_SW_64KB_D_T
:
1697 *sw_mode
= dm_sw_64kb_d_t
;
1699 case DC_SW_64KB_R_X
:
1700 *sw_mode
= dm_sw_64kb_r_x
;
1703 *sw_mode
= dm_sw_var_s
;
1706 *sw_mode
= dm_sw_var_s_x
;
1709 *sw_mode
= dm_sw_var_d
;
1712 *sw_mode
= dm_sw_var_d_x
;
1716 ASSERT(0); /* Not supported */
1721 bool dcn20_split_stream_for_odm(
1722 struct resource_context
*res_ctx
,
1723 const struct resource_pool
*pool
,
1724 struct pipe_ctx
*prev_odm_pipe
,
1725 struct pipe_ctx
*next_odm_pipe
)
1727 int pipe_idx
= next_odm_pipe
->pipe_idx
;
1729 *next_odm_pipe
= *prev_odm_pipe
;
1731 next_odm_pipe
->pipe_idx
= pipe_idx
;
1732 next_odm_pipe
->plane_res
.mi
= pool
->mis
[next_odm_pipe
->pipe_idx
];
1733 next_odm_pipe
->plane_res
.hubp
= pool
->hubps
[next_odm_pipe
->pipe_idx
];
1734 next_odm_pipe
->plane_res
.ipp
= pool
->ipps
[next_odm_pipe
->pipe_idx
];
1735 next_odm_pipe
->plane_res
.xfm
= pool
->transforms
[next_odm_pipe
->pipe_idx
];
1736 next_odm_pipe
->plane_res
.dpp
= pool
->dpps
[next_odm_pipe
->pipe_idx
];
1737 next_odm_pipe
->plane_res
.mpcc_inst
= pool
->dpps
[next_odm_pipe
->pipe_idx
]->inst
;
1738 next_odm_pipe
->stream_res
.dsc
= NULL
;
1739 if (prev_odm_pipe
->next_odm_pipe
&& prev_odm_pipe
->next_odm_pipe
!= next_odm_pipe
) {
1740 next_odm_pipe
->next_odm_pipe
= prev_odm_pipe
->next_odm_pipe
;
1741 next_odm_pipe
->next_odm_pipe
->prev_odm_pipe
= next_odm_pipe
;
1743 prev_odm_pipe
->next_odm_pipe
= next_odm_pipe
;
1744 next_odm_pipe
->prev_odm_pipe
= prev_odm_pipe
;
1745 ASSERT(next_odm_pipe
->top_pipe
== NULL
);
1747 if (prev_odm_pipe
->plane_state
) {
1748 struct scaler_data
*sd
= &prev_odm_pipe
->plane_res
.scl_data
;
1751 /* HACTIVE halved for odm combine */
1753 /* Calculate new vp and recout for left pipe */
1754 /* Need at least 16 pixels width per side */
1755 if (sd
->recout
.x
+ 16 >= sd
->h_active
)
1757 new_width
= sd
->h_active
- sd
->recout
.x
;
1758 sd
->viewport
.width
-= dc_fixpt_floor(dc_fixpt_mul_int(
1759 sd
->ratios
.horz
, sd
->recout
.width
- new_width
));
1760 sd
->viewport_c
.width
-= dc_fixpt_floor(dc_fixpt_mul_int(
1761 sd
->ratios
.horz_c
, sd
->recout
.width
- new_width
));
1762 sd
->recout
.width
= new_width
;
1764 /* Calculate new vp and recout for right pipe */
1765 sd
= &next_odm_pipe
->plane_res
.scl_data
;
1766 /* HACTIVE halved for odm combine */
1768 /* Need at least 16 pixels width per side */
1769 if (new_width
<= 16)
1771 new_width
= sd
->recout
.width
+ sd
->recout
.x
- sd
->h_active
;
1772 sd
->viewport
.width
-= dc_fixpt_floor(dc_fixpt_mul_int(
1773 sd
->ratios
.horz
, sd
->recout
.width
- new_width
));
1774 sd
->viewport_c
.width
-= dc_fixpt_floor(dc_fixpt_mul_int(
1775 sd
->ratios
.horz_c
, sd
->recout
.width
- new_width
));
1776 sd
->recout
.width
= new_width
;
1777 sd
->viewport
.x
+= dc_fixpt_floor(dc_fixpt_mul_int(
1778 sd
->ratios
.horz
, sd
->h_active
- sd
->recout
.x
));
1779 sd
->viewport_c
.x
+= dc_fixpt_floor(dc_fixpt_mul_int(
1780 sd
->ratios
.horz_c
, sd
->h_active
- sd
->recout
.x
));
1783 next_odm_pipe
->stream_res
.opp
= pool
->opps
[next_odm_pipe
->pipe_idx
];
1784 if (next_odm_pipe
->stream
->timing
.flags
.DSC
== 1) {
1785 acquire_dsc(res_ctx
, pool
, &next_odm_pipe
->stream_res
.dsc
, next_odm_pipe
->pipe_idx
);
1786 ASSERT(next_odm_pipe
->stream_res
.dsc
);
1787 if (next_odm_pipe
->stream_res
.dsc
== NULL
)
1794 void dcn20_split_stream_for_mpc(
1795 struct resource_context
*res_ctx
,
1796 const struct resource_pool
*pool
,
1797 struct pipe_ctx
*primary_pipe
,
1798 struct pipe_ctx
*secondary_pipe
)
1800 int pipe_idx
= secondary_pipe
->pipe_idx
;
1801 struct pipe_ctx
*sec_bot_pipe
= secondary_pipe
->bottom_pipe
;
1803 *secondary_pipe
= *primary_pipe
;
1804 secondary_pipe
->bottom_pipe
= sec_bot_pipe
;
1806 secondary_pipe
->pipe_idx
= pipe_idx
;
1807 secondary_pipe
->plane_res
.mi
= pool
->mis
[secondary_pipe
->pipe_idx
];
1808 secondary_pipe
->plane_res
.hubp
= pool
->hubps
[secondary_pipe
->pipe_idx
];
1809 secondary_pipe
->plane_res
.ipp
= pool
->ipps
[secondary_pipe
->pipe_idx
];
1810 secondary_pipe
->plane_res
.xfm
= pool
->transforms
[secondary_pipe
->pipe_idx
];
1811 secondary_pipe
->plane_res
.dpp
= pool
->dpps
[secondary_pipe
->pipe_idx
];
1812 secondary_pipe
->plane_res
.mpcc_inst
= pool
->dpps
[secondary_pipe
->pipe_idx
]->inst
;
1813 secondary_pipe
->stream_res
.dsc
= NULL
;
1814 if (primary_pipe
->bottom_pipe
&& primary_pipe
->bottom_pipe
!= secondary_pipe
) {
1815 ASSERT(!secondary_pipe
->bottom_pipe
);
1816 secondary_pipe
->bottom_pipe
= primary_pipe
->bottom_pipe
;
1817 secondary_pipe
->bottom_pipe
->top_pipe
= secondary_pipe
;
1819 primary_pipe
->bottom_pipe
= secondary_pipe
;
1820 secondary_pipe
->top_pipe
= primary_pipe
;
1822 ASSERT(primary_pipe
->plane_state
);
1823 resource_build_scaling_params(primary_pipe
);
1824 resource_build_scaling_params(secondary_pipe
);
1827 void dcn20_populate_dml_writeback_from_context(
1828 struct dc
*dc
, struct resource_context
*res_ctx
, display_e2e_pipe_params_st
*pipes
)
1832 for (i
= 0, pipe_cnt
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1833 struct dc_writeback_info
*wb_info
= &res_ctx
->pipe_ctx
[i
].stream
->writeback_info
[0];
1835 if (!res_ctx
->pipe_ctx
[i
].stream
)
1838 /* Set writeback information */
1839 pipes
[pipe_cnt
].dout
.wb_enable
= (wb_info
->wb_enabled
== true) ? 1 : 0;
1840 pipes
[pipe_cnt
].dout
.num_active_wb
++;
1841 pipes
[pipe_cnt
].dout
.wb
.wb_src_height
= wb_info
->dwb_params
.cnv_params
.crop_height
;
1842 pipes
[pipe_cnt
].dout
.wb
.wb_src_width
= wb_info
->dwb_params
.cnv_params
.crop_width
;
1843 pipes
[pipe_cnt
].dout
.wb
.wb_dst_width
= wb_info
->dwb_params
.dest_width
;
1844 pipes
[pipe_cnt
].dout
.wb
.wb_dst_height
= wb_info
->dwb_params
.dest_height
;
1845 pipes
[pipe_cnt
].dout
.wb
.wb_htaps_luma
= 1;
1846 pipes
[pipe_cnt
].dout
.wb
.wb_vtaps_luma
= 1;
1847 pipes
[pipe_cnt
].dout
.wb
.wb_htaps_chroma
= wb_info
->dwb_params
.scaler_taps
.h_taps_c
;
1848 pipes
[pipe_cnt
].dout
.wb
.wb_vtaps_chroma
= wb_info
->dwb_params
.scaler_taps
.v_taps_c
;
1849 pipes
[pipe_cnt
].dout
.wb
.wb_hratio
= 1.0;
1850 pipes
[pipe_cnt
].dout
.wb
.wb_vratio
= 1.0;
1851 if (wb_info
->dwb_params
.out_format
== dwb_scaler_mode_yuv420
) {
1852 if (wb_info
->dwb_params
.output_depth
== DWB_OUTPUT_PIXEL_DEPTH_8BPC
)
1853 pipes
[pipe_cnt
].dout
.wb
.wb_pixel_format
= dm_420_8
;
1855 pipes
[pipe_cnt
].dout
.wb
.wb_pixel_format
= dm_420_10
;
1857 pipes
[pipe_cnt
].dout
.wb
.wb_pixel_format
= dm_444_32
;
1864 static int get_num_odm_heads(struct pipe_ctx
*pipe
)
1866 int odm_head_count
= 0;
1867 struct pipe_ctx
*next_pipe
= pipe
->next_odm_pipe
;
1870 next_pipe
= next_pipe
->next_odm_pipe
;
1872 pipe
= pipe
->prev_odm_pipe
;
1875 pipe
= pipe
->prev_odm_pipe
;
1877 return odm_head_count
? odm_head_count
+ 1 : 0;
1880 int dcn20_populate_dml_pipes_from_context(
1881 struct dc
*dc
, struct dc_state
*context
, display_e2e_pipe_params_st
*pipes
)
1884 bool synchronized_vblank
= true;
1885 struct resource_context
*res_ctx
= &context
->res_ctx
;
1887 for (i
= 0, pipe_cnt
= -1; i
< dc
->res_pool
->pipe_count
; i
++) {
1888 if (!res_ctx
->pipe_ctx
[i
].stream
)
1895 if (dc
->debug
.disable_timing_sync
|| !resource_are_streams_timing_synchronizable(
1896 res_ctx
->pipe_ctx
[pipe_cnt
].stream
,
1897 res_ctx
->pipe_ctx
[i
].stream
)) {
1898 synchronized_vblank
= false;
1903 for (i
= 0, pipe_cnt
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
1904 struct dc_crtc_timing
*timing
= &res_ctx
->pipe_ctx
[i
].stream
->timing
;
1905 unsigned int v_total
;
1906 unsigned int front_porch
;
1909 if (!res_ctx
->pipe_ctx
[i
].stream
)
1912 v_total
= timing
->v_total
;
1913 front_porch
= timing
->v_front_porch
;
1915 pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
1916 pipes[pipe_cnt].pipe.src.dcc = 0;
1917 pipes[pipe_cnt].pipe.src.vm = 0;*/
1919 pipes
[pipe_cnt
].clks_cfg
.refclk_mhz
= dc
->res_pool
->ref_clocks
.dchub_ref_clock_inKhz
/ 1000.0;
1921 pipes
[pipe_cnt
].dout
.dsc_enable
= res_ctx
->pipe_ctx
[i
].stream
->timing
.flags
.DSC
;
1922 /* todo: rotation?*/
1923 pipes
[pipe_cnt
].dout
.dsc_slices
= res_ctx
->pipe_ctx
[i
].stream
->timing
.dsc_cfg
.num_slices_h
;
1924 if (res_ctx
->pipe_ctx
[i
].stream
->use_dynamic_meta
) {
1925 pipes
[pipe_cnt
].pipe
.src
.dynamic_metadata_enable
= true;
1927 pipes
[pipe_cnt
].pipe
.src
.dynamic_metadata_lines_before_active
=
1928 (v_total
- timing
->v_addressable
1929 - timing
->v_border_top
- timing
->v_border_bottom
) / 2;
1930 /* 36 bytes dp, 32 hdmi */
1931 pipes
[pipe_cnt
].pipe
.src
.dynamic_metadata_xmit_bytes
=
1932 dc_is_dp_signal(res_ctx
->pipe_ctx
[i
].stream
->signal
) ? 36 : 32;
1934 pipes
[pipe_cnt
].pipe
.src
.dcc
= false;
1935 pipes
[pipe_cnt
].pipe
.src
.dcc_rate
= 1;
1936 pipes
[pipe_cnt
].pipe
.dest
.synchronized_vblank_all_planes
= synchronized_vblank
;
1937 pipes
[pipe_cnt
].pipe
.dest
.hblank_start
= timing
->h_total
- timing
->h_front_porch
;
1938 pipes
[pipe_cnt
].pipe
.dest
.hblank_end
= pipes
[pipe_cnt
].pipe
.dest
.hblank_start
1939 - timing
->h_addressable
1940 - timing
->h_border_left
1941 - timing
->h_border_right
;
1942 pipes
[pipe_cnt
].pipe
.dest
.vblank_start
= v_total
- front_porch
;
1943 pipes
[pipe_cnt
].pipe
.dest
.vblank_end
= pipes
[pipe_cnt
].pipe
.dest
.vblank_start
1944 - timing
->v_addressable
1945 - timing
->v_border_top
1946 - timing
->v_border_bottom
;
1947 pipes
[pipe_cnt
].pipe
.dest
.htotal
= timing
->h_total
;
1948 pipes
[pipe_cnt
].pipe
.dest
.vtotal
= v_total
;
1949 pipes
[pipe_cnt
].pipe
.dest
.hactive
= timing
->h_addressable
;
1950 pipes
[pipe_cnt
].pipe
.dest
.vactive
= timing
->v_addressable
;
1951 pipes
[pipe_cnt
].pipe
.dest
.interlaced
= timing
->flags
.INTERLACE
;
1952 pipes
[pipe_cnt
].pipe
.dest
.pixel_rate_mhz
= timing
->pix_clk_100hz
/10000.0;
1953 if (timing
->timing_3d_format
== TIMING_3D_FORMAT_HW_FRAME_PACKING
)
1954 pipes
[pipe_cnt
].pipe
.dest
.pixel_rate_mhz
*= 2;
1955 pipes
[pipe_cnt
].pipe
.dest
.otg_inst
= res_ctx
->pipe_ctx
[i
].stream_res
.tg
->inst
;
1956 pipes
[pipe_cnt
].dout
.dp_lanes
= 4;
1957 pipes
[pipe_cnt
].pipe
.dest
.vtotal_min
= res_ctx
->pipe_ctx
[i
].stream
->adjust
.v_total_min
;
1958 pipes
[pipe_cnt
].pipe
.dest
.vtotal_max
= res_ctx
->pipe_ctx
[i
].stream
->adjust
.v_total_max
;
1959 switch (get_num_odm_heads(&res_ctx
->pipe_ctx
[i
])) {
1961 pipes
[pipe_cnt
].pipe
.dest
.odm_combine
= dm_odm_combine_mode_2to1
;
1964 pipes
[pipe_cnt
].pipe
.dest
.odm_combine
= dm_odm_combine_mode_disabled
;
1966 pipes
[pipe_cnt
].pipe
.src
.hsplit_grp
= res_ctx
->pipe_ctx
[i
].pipe_idx
;
1967 if (res_ctx
->pipe_ctx
[i
].top_pipe
&& res_ctx
->pipe_ctx
[i
].top_pipe
->plane_state
1968 == res_ctx
->pipe_ctx
[i
].plane_state
)
1969 pipes
[pipe_cnt
].pipe
.src
.hsplit_grp
= res_ctx
->pipe_ctx
[i
].top_pipe
->pipe_idx
;
1970 else if (res_ctx
->pipe_ctx
[i
].prev_odm_pipe
) {
1971 struct pipe_ctx
*first_pipe
= res_ctx
->pipe_ctx
[i
].prev_odm_pipe
;
1973 while (first_pipe
->prev_odm_pipe
)
1974 first_pipe
= first_pipe
->prev_odm_pipe
;
1975 pipes
[pipe_cnt
].pipe
.src
.hsplit_grp
= first_pipe
->pipe_idx
;
1978 switch (res_ctx
->pipe_ctx
[i
].stream
->signal
) {
1979 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
1980 case SIGNAL_TYPE_DISPLAY_PORT
:
1981 pipes
[pipe_cnt
].dout
.output_type
= dm_dp
;
1983 case SIGNAL_TYPE_EDP
:
1984 pipes
[pipe_cnt
].dout
.output_type
= dm_edp
;
1986 case SIGNAL_TYPE_HDMI_TYPE_A
:
1987 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
1988 case SIGNAL_TYPE_DVI_DUAL_LINK
:
1989 pipes
[pipe_cnt
].dout
.output_type
= dm_hdmi
;
1992 /* In case there is no signal, set dp with 4 lanes to allow max config */
1993 pipes
[pipe_cnt
].dout
.output_type
= dm_dp
;
1994 pipes
[pipe_cnt
].dout
.dp_lanes
= 4;
1997 switch (res_ctx
->pipe_ctx
[i
].stream
->timing
.display_color_depth
) {
1998 case COLOR_DEPTH_666
:
2001 case COLOR_DEPTH_888
:
2004 case COLOR_DEPTH_101010
:
2007 case COLOR_DEPTH_121212
:
2010 case COLOR_DEPTH_141414
:
2013 case COLOR_DEPTH_161616
:
2016 case COLOR_DEPTH_999
:
2019 case COLOR_DEPTH_111111
:
2027 switch (res_ctx
->pipe_ctx
[i
].stream
->timing
.pixel_encoding
) {
2028 case PIXEL_ENCODING_RGB
:
2029 case PIXEL_ENCODING_YCBCR444
:
2030 pipes
[pipe_cnt
].dout
.output_format
= dm_444
;
2031 pipes
[pipe_cnt
].dout
.output_bpp
= output_bpc
* 3;
2033 case PIXEL_ENCODING_YCBCR420
:
2034 pipes
[pipe_cnt
].dout
.output_format
= dm_420
;
2035 pipes
[pipe_cnt
].dout
.output_bpp
= (output_bpc
* 3.0) / 2;
2037 case PIXEL_ENCODING_YCBCR422
:
2038 if (true) /* todo */
2039 pipes
[pipe_cnt
].dout
.output_format
= dm_s422
;
2041 pipes
[pipe_cnt
].dout
.output_format
= dm_n422
;
2042 pipes
[pipe_cnt
].dout
.output_bpp
= output_bpc
* 2;
2045 pipes
[pipe_cnt
].dout
.output_format
= dm_444
;
2046 pipes
[pipe_cnt
].dout
.output_bpp
= output_bpc
* 3;
2049 if (res_ctx
->pipe_ctx
[i
].stream
->timing
.flags
.DSC
)
2050 pipes
[pipe_cnt
].dout
.output_bpp
= res_ctx
->pipe_ctx
[i
].stream
->timing
.dsc_cfg
.bits_per_pixel
/ 16.0;
2052 /* todo: default max for now, until there is logic reflecting this in dc*/
2053 pipes
[pipe_cnt
].dout
.output_bpc
= 12;
2055 * Use max cursor settings for calculations to minimize
2056 * bw calculations due to cursor on/off
2058 pipes
[pipe_cnt
].pipe
.src
.num_cursors
= 2;
2059 pipes
[pipe_cnt
].pipe
.src
.cur0_src_width
= 256;
2060 pipes
[pipe_cnt
].pipe
.src
.cur0_bpp
= dm_cur_32bit
;
2061 pipes
[pipe_cnt
].pipe
.src
.cur1_src_width
= 256;
2062 pipes
[pipe_cnt
].pipe
.src
.cur1_bpp
= dm_cur_32bit
;
2064 if (!res_ctx
->pipe_ctx
[i
].plane_state
) {
2065 pipes
[pipe_cnt
].pipe
.src
.source_scan
= dm_horz
;
2066 pipes
[pipe_cnt
].pipe
.src
.sw_mode
= dm_sw_linear
;
2067 pipes
[pipe_cnt
].pipe
.src
.macro_tile_size
= dm_64k_tile
;
2068 pipes
[pipe_cnt
].pipe
.src
.viewport_width
= timing
->h_addressable
;
2069 if (pipes
[pipe_cnt
].pipe
.src
.viewport_width
> 1920)
2070 pipes
[pipe_cnt
].pipe
.src
.viewport_width
= 1920;
2071 pipes
[pipe_cnt
].pipe
.src
.viewport_height
= timing
->v_addressable
;
2072 if (pipes
[pipe_cnt
].pipe
.src
.viewport_height
> 1080)
2073 pipes
[pipe_cnt
].pipe
.src
.viewport_height
= 1080;
2074 pipes
[pipe_cnt
].pipe
.src
.surface_height_y
= pipes
[pipe_cnt
].pipe
.src
.viewport_height
;
2075 pipes
[pipe_cnt
].pipe
.src
.surface_width_y
= pipes
[pipe_cnt
].pipe
.src
.viewport_width
;
2076 pipes
[pipe_cnt
].pipe
.src
.surface_height_c
= pipes
[pipe_cnt
].pipe
.src
.viewport_height
;
2077 pipes
[pipe_cnt
].pipe
.src
.surface_width_c
= pipes
[pipe_cnt
].pipe
.src
.viewport_width
;
2078 pipes
[pipe_cnt
].pipe
.src
.data_pitch
= ((pipes
[pipe_cnt
].pipe
.src
.viewport_width
+ 63) / 64) * 64; /* linear sw only */
2079 pipes
[pipe_cnt
].pipe
.src
.source_format
= dm_444_32
;
2080 pipes
[pipe_cnt
].pipe
.dest
.recout_width
= pipes
[pipe_cnt
].pipe
.src
.viewport_width
; /*vp_width/hratio*/
2081 pipes
[pipe_cnt
].pipe
.dest
.recout_height
= pipes
[pipe_cnt
].pipe
.src
.viewport_height
; /*vp_height/vratio*/
2082 pipes
[pipe_cnt
].pipe
.dest
.full_recout_width
= pipes
[pipe_cnt
].pipe
.dest
.recout_width
; /*when is_hsplit != 1*/
2083 pipes
[pipe_cnt
].pipe
.dest
.full_recout_height
= pipes
[pipe_cnt
].pipe
.dest
.recout_height
; /*when is_hsplit != 1*/
2084 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.lb_depth
= dm_lb_16
;
2085 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.hscl_ratio
= 1.0;
2086 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.vscl_ratio
= 1.0;
2087 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.scl_enable
= 0; /*Lb only or Full scl*/
2088 pipes
[pipe_cnt
].pipe
.scale_taps
.htaps
= 1;
2089 pipes
[pipe_cnt
].pipe
.scale_taps
.vtaps
= 1;
2090 pipes
[pipe_cnt
].pipe
.src
.is_hsplit
= 0;
2091 pipes
[pipe_cnt
].pipe
.dest
.odm_combine
= 0;
2092 pipes
[pipe_cnt
].pipe
.dest
.vtotal_min
= v_total
;
2093 pipes
[pipe_cnt
].pipe
.dest
.vtotal_max
= v_total
;
2095 struct dc_plane_state
*pln
= res_ctx
->pipe_ctx
[i
].plane_state
;
2096 struct scaler_data
*scl
= &res_ctx
->pipe_ctx
[i
].plane_res
.scl_data
;
2098 pipes
[pipe_cnt
].pipe
.src
.immediate_flip
= pln
->flip_immediate
;
2099 pipes
[pipe_cnt
].pipe
.src
.is_hsplit
= (res_ctx
->pipe_ctx
[i
].bottom_pipe
2100 && res_ctx
->pipe_ctx
[i
].bottom_pipe
->plane_state
== pln
)
2101 || (res_ctx
->pipe_ctx
[i
].top_pipe
2102 && res_ctx
->pipe_ctx
[i
].top_pipe
->plane_state
== pln
);
2103 pipes
[pipe_cnt
].pipe
.src
.source_scan
= pln
->rotation
== ROTATION_ANGLE_90
2104 || pln
->rotation
== ROTATION_ANGLE_270
? dm_vert
: dm_horz
;
2105 pipes
[pipe_cnt
].pipe
.src
.viewport_y_y
= scl
->viewport
.y
;
2106 pipes
[pipe_cnt
].pipe
.src
.viewport_y_c
= scl
->viewport_c
.y
;
2107 pipes
[pipe_cnt
].pipe
.src
.viewport_width
= scl
->viewport
.width
;
2108 pipes
[pipe_cnt
].pipe
.src
.viewport_width_c
= scl
->viewport_c
.width
;
2109 pipes
[pipe_cnt
].pipe
.src
.viewport_height
= scl
->viewport
.height
;
2110 pipes
[pipe_cnt
].pipe
.src
.viewport_height_c
= scl
->viewport_c
.height
;
2111 pipes
[pipe_cnt
].pipe
.src
.surface_width_y
= pln
->plane_size
.surface_size
.width
;
2112 pipes
[pipe_cnt
].pipe
.src
.surface_height_y
= pln
->plane_size
.surface_size
.height
;
2113 pipes
[pipe_cnt
].pipe
.src
.surface_width_c
= pln
->plane_size
.chroma_size
.width
;
2114 pipes
[pipe_cnt
].pipe
.src
.surface_height_c
= pln
->plane_size
.chroma_size
.height
;
2115 if (pln
->format
>= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
2116 pipes
[pipe_cnt
].pipe
.src
.data_pitch
= pln
->plane_size
.surface_pitch
;
2117 pipes
[pipe_cnt
].pipe
.src
.data_pitch_c
= pln
->plane_size
.chroma_pitch
;
2118 pipes
[pipe_cnt
].pipe
.src
.meta_pitch
= pln
->dcc
.meta_pitch
;
2119 pipes
[pipe_cnt
].pipe
.src
.meta_pitch_c
= pln
->dcc
.meta_pitch_c
;
2121 pipes
[pipe_cnt
].pipe
.src
.data_pitch
= pln
->plane_size
.surface_pitch
;
2122 pipes
[pipe_cnt
].pipe
.src
.meta_pitch
= pln
->dcc
.meta_pitch
;
2124 pipes
[pipe_cnt
].pipe
.src
.dcc
= pln
->dcc
.enable
;
2125 pipes
[pipe_cnt
].pipe
.dest
.recout_width
= scl
->recout
.width
;
2126 pipes
[pipe_cnt
].pipe
.dest
.recout_height
= scl
->recout
.height
;
2127 pipes
[pipe_cnt
].pipe
.dest
.full_recout_width
= scl
->recout
.width
;
2128 pipes
[pipe_cnt
].pipe
.dest
.full_recout_height
= scl
->recout
.height
;
2129 if (res_ctx
->pipe_ctx
[i
].bottom_pipe
&& res_ctx
->pipe_ctx
[i
].bottom_pipe
->plane_state
== pln
) {
2130 pipes
[pipe_cnt
].pipe
.dest
.full_recout_width
+=
2131 res_ctx
->pipe_ctx
[i
].bottom_pipe
->plane_res
.scl_data
.recout
.width
;
2132 pipes
[pipe_cnt
].pipe
.dest
.full_recout_height
+=
2133 res_ctx
->pipe_ctx
[i
].bottom_pipe
->plane_res
.scl_data
.recout
.height
;
2134 } else if (res_ctx
->pipe_ctx
[i
].top_pipe
&& res_ctx
->pipe_ctx
[i
].top_pipe
->plane_state
== pln
) {
2135 pipes
[pipe_cnt
].pipe
.dest
.full_recout_width
+=
2136 res_ctx
->pipe_ctx
[i
].top_pipe
->plane_res
.scl_data
.recout
.width
;
2137 pipes
[pipe_cnt
].pipe
.dest
.full_recout_height
+=
2138 res_ctx
->pipe_ctx
[i
].top_pipe
->plane_res
.scl_data
.recout
.height
;
2141 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.lb_depth
= dm_lb_16
;
2142 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.hscl_ratio
= (double) scl
->ratios
.horz
.value
/ (1ULL<<32);
2143 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.hscl_ratio_c
= (double) scl
->ratios
.horz_c
.value
/ (1ULL<<32);
2144 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.vscl_ratio
= (double) scl
->ratios
.vert
.value
/ (1ULL<<32);
2145 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.vscl_ratio_c
= (double) scl
->ratios
.vert_c
.value
/ (1ULL<<32);
2146 pipes
[pipe_cnt
].pipe
.scale_ratio_depth
.scl_enable
=
2147 scl
->ratios
.vert
.value
!= dc_fixpt_one
.value
2148 || scl
->ratios
.horz
.value
!= dc_fixpt_one
.value
2149 || scl
->ratios
.vert_c
.value
!= dc_fixpt_one
.value
2150 || scl
->ratios
.horz_c
.value
!= dc_fixpt_one
.value
/*Lb only or Full scl*/
2151 || dc
->debug
.always_scale
; /*support always scale*/
2152 pipes
[pipe_cnt
].pipe
.scale_taps
.htaps
= scl
->taps
.h_taps
;
2153 pipes
[pipe_cnt
].pipe
.scale_taps
.htaps_c
= scl
->taps
.h_taps_c
;
2154 pipes
[pipe_cnt
].pipe
.scale_taps
.vtaps
= scl
->taps
.v_taps
;
2155 pipes
[pipe_cnt
].pipe
.scale_taps
.vtaps_c
= scl
->taps
.v_taps_c
;
2157 pipes
[pipe_cnt
].pipe
.src
.macro_tile_size
=
2158 swizzle_mode_to_macro_tile_size(pln
->tiling_info
.gfx9
.swizzle
);
2159 swizzle_to_dml_params(pln
->tiling_info
.gfx9
.swizzle
,
2160 &pipes
[pipe_cnt
].pipe
.src
.sw_mode
);
2162 switch (pln
->format
) {
2163 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
:
2164 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
:
2165 pipes
[pipe_cnt
].pipe
.src
.source_format
= dm_420_8
;
2167 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr
:
2168 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb
:
2169 pipes
[pipe_cnt
].pipe
.src
.source_format
= dm_420_10
;
2171 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616
:
2172 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F
:
2173 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F
:
2174 pipes
[pipe_cnt
].pipe
.src
.source_format
= dm_444_64
;
2176 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555
:
2177 case SURFACE_PIXEL_FORMAT_GRPH_RGB565
:
2178 pipes
[pipe_cnt
].pipe
.src
.source_format
= dm_444_16
;
2180 case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
:
2181 pipes
[pipe_cnt
].pipe
.src
.source_format
= dm_444_8
;
2184 pipes
[pipe_cnt
].pipe
.src
.source_format
= dm_444_32
;
2192 /* populate writeback information */
2193 dc
->res_pool
->funcs
->populate_dml_writeback_from_context(dc
, res_ctx
, pipes
);
2198 unsigned int dcn20_calc_max_scaled_time(
2199 unsigned int time_per_pixel
,
2200 enum mmhubbub_wbif_mode mode
,
2201 unsigned int urgent_watermark
)
2203 unsigned int time_per_byte
= 0;
2204 unsigned int total_y_free_entry
= 0x200; /* two memory piece for luma */
2205 unsigned int total_c_free_entry
= 0x140; /* two memory piece for chroma */
2206 unsigned int small_free_entry
, max_free_entry
;
2207 unsigned int buf_lh_capability
;
2208 unsigned int max_scaled_time
;
2210 if (mode
== PACKED_444
) /* packed mode */
2211 time_per_byte
= time_per_pixel
/4;
2212 else if (mode
== PLANAR_420_8BPC
)
2213 time_per_byte
= time_per_pixel
;
2214 else if (mode
== PLANAR_420_10BPC
) /* p010 */
2215 time_per_byte
= time_per_pixel
* 819/1024;
2217 if (time_per_byte
== 0)
2220 small_free_entry
= (total_y_free_entry
> total_c_free_entry
) ? total_c_free_entry
: total_y_free_entry
;
2221 max_free_entry
= (mode
== PACKED_444
) ? total_y_free_entry
+ total_c_free_entry
: small_free_entry
;
2222 buf_lh_capability
= max_free_entry
*time_per_byte
*32/16; /* there is 4bit fraction */
2223 max_scaled_time
= buf_lh_capability
- urgent_watermark
;
2224 return max_scaled_time
;
2227 void dcn20_set_mcif_arb_params(
2229 struct dc_state
*context
,
2230 display_e2e_pipe_params_st
*pipes
,
2233 enum mmhubbub_wbif_mode wbif_mode
;
2234 struct mcif_arb_params
*wb_arb_params
;
2235 int i
, j
, k
, dwb_pipe
;
2237 /* Writeback MCIF_WB arbitration parameters */
2239 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2241 if (!context
->res_ctx
.pipe_ctx
[i
].stream
)
2244 for (j
= 0; j
< MAX_DWB_PIPES
; j
++) {
2245 if (context
->res_ctx
.pipe_ctx
[i
].stream
->writeback_info
[j
].wb_enabled
== false)
2248 //wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params;
2249 wb_arb_params
= &context
->bw_ctx
.bw
.dcn
.bw_writeback
.mcif_wb_arb
[dwb_pipe
];
2251 if (context
->res_ctx
.pipe_ctx
[i
].stream
->writeback_info
[j
].dwb_params
.out_format
== dwb_scaler_mode_yuv420
) {
2252 if (context
->res_ctx
.pipe_ctx
[i
].stream
->writeback_info
[j
].dwb_params
.output_depth
== DWB_OUTPUT_PIXEL_DEPTH_8BPC
)
2253 wbif_mode
= PLANAR_420_8BPC
;
2255 wbif_mode
= PLANAR_420_10BPC
;
2257 wbif_mode
= PACKED_444
;
2259 for (k
= 0; k
< sizeof(wb_arb_params
->cli_watermark
)/sizeof(wb_arb_params
->cli_watermark
[0]); k
++) {
2260 wb_arb_params
->cli_watermark
[k
] = get_wm_writeback_urgent(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2261 wb_arb_params
->pstate_watermark
[k
] = get_wm_writeback_dram_clock_change(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2263 wb_arb_params
->time_per_pixel
= 16.0 / context
->res_ctx
.pipe_ctx
[i
].stream
->phy_pix_clk
; /* 4 bit fraction, ms */
2264 wb_arb_params
->slice_lines
= 32;
2265 wb_arb_params
->arbitration_slice
= 2;
2266 wb_arb_params
->max_scaled_time
= dcn20_calc_max_scaled_time(wb_arb_params
->time_per_pixel
,
2268 wb_arb_params
->cli_watermark
[0]); /* assume 4 watermark sets have the same value */
2272 if (dwb_pipe
>= MAX_DWB_PIPES
)
2275 if (dwb_pipe
>= MAX_DWB_PIPES
)
2280 bool dcn20_validate_dsc(struct dc
*dc
, struct dc_state
*new_ctx
)
2284 /* Validate DSC config, dsc count validation is already done */
2285 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2286 struct pipe_ctx
*pipe_ctx
= &new_ctx
->res_ctx
.pipe_ctx
[i
];
2287 struct dc_stream_state
*stream
= pipe_ctx
->stream
;
2288 struct dsc_config dsc_cfg
;
2289 struct pipe_ctx
*odm_pipe
;
2292 for (odm_pipe
= pipe_ctx
->next_odm_pipe
; odm_pipe
; odm_pipe
= odm_pipe
->next_odm_pipe
)
2295 /* Only need to validate top pipe */
2296 if (pipe_ctx
->top_pipe
|| pipe_ctx
->prev_odm_pipe
|| !stream
|| !stream
->timing
.flags
.DSC
)
2299 dsc_cfg
.pic_width
= (stream
->timing
.h_addressable
+ stream
->timing
.h_border_left
2300 + stream
->timing
.h_border_right
) / opp_cnt
;
2301 dsc_cfg
.pic_height
= stream
->timing
.v_addressable
+ stream
->timing
.v_border_top
2302 + stream
->timing
.v_border_bottom
;
2303 dsc_cfg
.pixel_encoding
= stream
->timing
.pixel_encoding
;
2304 dsc_cfg
.color_depth
= stream
->timing
.display_color_depth
;
2305 dsc_cfg
.dc_dsc_cfg
= stream
->timing
.dsc_cfg
;
2306 dsc_cfg
.dc_dsc_cfg
.num_slices_h
/= opp_cnt
;
2308 if (!pipe_ctx
->stream_res
.dsc
->funcs
->dsc_validate_stream(pipe_ctx
->stream_res
.dsc
, &dsc_cfg
))
2314 struct pipe_ctx
*dcn20_find_secondary_pipe(struct dc
*dc
,
2315 struct resource_context
*res_ctx
,
2316 const struct resource_pool
*pool
,
2317 const struct pipe_ctx
*primary_pipe
)
2319 struct pipe_ctx
*secondary_pipe
= NULL
;
2321 if (dc
&& primary_pipe
) {
2323 int preferred_pipe_idx
= 0;
2325 /* first check the prev dc state:
2326 * if this primary pipe has a bottom pipe in prev. state
2327 * and if the bottom pipe is still available (which it should be),
2328 * pick that pipe as secondary
2329 * Same logic applies for ODM pipes. Since mpo is not allowed with odm
2330 * check in else case.
2332 if (dc
->current_state
->res_ctx
.pipe_ctx
[primary_pipe
->pipe_idx
].bottom_pipe
) {
2333 preferred_pipe_idx
= dc
->current_state
->res_ctx
.pipe_ctx
[primary_pipe
->pipe_idx
].bottom_pipe
->pipe_idx
;
2334 if (res_ctx
->pipe_ctx
[preferred_pipe_idx
].stream
== NULL
) {
2335 secondary_pipe
= &res_ctx
->pipe_ctx
[preferred_pipe_idx
];
2336 secondary_pipe
->pipe_idx
= preferred_pipe_idx
;
2338 } else if (dc
->current_state
->res_ctx
.pipe_ctx
[primary_pipe
->pipe_idx
].next_odm_pipe
) {
2339 preferred_pipe_idx
= dc
->current_state
->res_ctx
.pipe_ctx
[primary_pipe
->pipe_idx
].next_odm_pipe
->pipe_idx
;
2340 if (res_ctx
->pipe_ctx
[preferred_pipe_idx
].stream
== NULL
) {
2341 secondary_pipe
= &res_ctx
->pipe_ctx
[preferred_pipe_idx
];
2342 secondary_pipe
->pipe_idx
= preferred_pipe_idx
;
2347 * if this primary pipe does not have a bottom pipe in prev. state
2348 * start backward and find a pipe that did not used to be a bottom pipe in
2349 * prev. dc state. This way we make sure we keep the same assignment as
2350 * last state and will not have to reprogram every pipe
2352 if (secondary_pipe
== NULL
) {
2353 for (j
= dc
->res_pool
->pipe_count
- 1; j
>= 0; j
--) {
2354 if (dc
->current_state
->res_ctx
.pipe_ctx
[j
].top_pipe
== NULL
2355 && dc
->current_state
->res_ctx
.pipe_ctx
[j
].prev_odm_pipe
== NULL
) {
2356 preferred_pipe_idx
= j
;
2358 if (res_ctx
->pipe_ctx
[preferred_pipe_idx
].stream
== NULL
) {
2359 secondary_pipe
= &res_ctx
->pipe_ctx
[preferred_pipe_idx
];
2360 secondary_pipe
->pipe_idx
= preferred_pipe_idx
;
2367 * We should never hit this assert unless assignments are shuffled around
2368 * if this happens we will prob. hit a vsync tdr
2370 ASSERT(secondary_pipe
);
2372 * search backwards for the second pipe to keep pipe
2373 * assignment more consistent
2375 if (secondary_pipe
== NULL
) {
2376 for (j
= dc
->res_pool
->pipe_count
- 1; j
>= 0; j
--) {
2377 preferred_pipe_idx
= j
;
2379 if (res_ctx
->pipe_ctx
[preferred_pipe_idx
].stream
== NULL
) {
2380 secondary_pipe
= &res_ctx
->pipe_ctx
[preferred_pipe_idx
];
2381 secondary_pipe
->pipe_idx
= preferred_pipe_idx
;
2388 return secondary_pipe
;
2391 void dcn20_merge_pipes_for_validate(
2393 struct dc_state
*context
)
2397 /* merge previously split odm pipes since mode support needs to make the decision */
2398 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2399 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
2400 struct pipe_ctx
*odm_pipe
= pipe
->next_odm_pipe
;
2402 if (pipe
->prev_odm_pipe
)
2405 pipe
->next_odm_pipe
= NULL
;
2407 struct pipe_ctx
*next_odm_pipe
= odm_pipe
->next_odm_pipe
;
2409 odm_pipe
->plane_state
= NULL
;
2410 odm_pipe
->stream
= NULL
;
2411 odm_pipe
->top_pipe
= NULL
;
2412 odm_pipe
->bottom_pipe
= NULL
;
2413 odm_pipe
->prev_odm_pipe
= NULL
;
2414 odm_pipe
->next_odm_pipe
= NULL
;
2415 if (odm_pipe
->stream_res
.dsc
)
2416 release_dsc(&context
->res_ctx
, dc
->res_pool
, &odm_pipe
->stream_res
.dsc
);
2417 /* Clear plane_res and stream_res */
2418 memset(&odm_pipe
->plane_res
, 0, sizeof(odm_pipe
->plane_res
));
2419 memset(&odm_pipe
->stream_res
, 0, sizeof(odm_pipe
->stream_res
));
2420 odm_pipe
= next_odm_pipe
;
2422 if (pipe
->plane_state
)
2423 resource_build_scaling_params(pipe
);
2426 /* merge previously mpc split pipes since mode support needs to make the decision */
2427 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2428 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
2429 struct pipe_ctx
*hsplit_pipe
= pipe
->bottom_pipe
;
2431 if (!hsplit_pipe
|| hsplit_pipe
->plane_state
!= pipe
->plane_state
)
2434 pipe
->bottom_pipe
= hsplit_pipe
->bottom_pipe
;
2435 if (hsplit_pipe
->bottom_pipe
)
2436 hsplit_pipe
->bottom_pipe
->top_pipe
= pipe
;
2437 hsplit_pipe
->plane_state
= NULL
;
2438 hsplit_pipe
->stream
= NULL
;
2439 hsplit_pipe
->top_pipe
= NULL
;
2440 hsplit_pipe
->bottom_pipe
= NULL
;
2442 /* Clear plane_res and stream_res */
2443 memset(&hsplit_pipe
->plane_res
, 0, sizeof(hsplit_pipe
->plane_res
));
2444 memset(&hsplit_pipe
->stream_res
, 0, sizeof(hsplit_pipe
->stream_res
));
2445 if (pipe
->plane_state
)
2446 resource_build_scaling_params(pipe
);
2450 int dcn20_validate_apply_pipe_split_flags(
2452 struct dc_state
*context
,
2456 int i
, pipe_idx
, vlevel_split
;
2457 bool force_split
= false;
2458 bool avoid_split
= dc
->debug
.pipe_split_policy
!= MPC_SPLIT_DYNAMIC
;
2460 /* Single display loop, exits if there is more than one display */
2461 for (i
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2462 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
2463 bool exit_loop
= false;
2465 if (!pipe
->stream
|| pipe
->top_pipe
)
2468 if (dc
->debug
.force_single_disp_pipe_split
) {
2472 force_split
= false;
2476 if (dc
->debug
.pipe_split_policy
== MPC_SPLIT_AVOID_MULT_DISP
) {
2478 avoid_split
= false;
2487 /* TODO: fix dc bugs and remove this split threshold thing */
2488 if (context
->stream_count
> dc
->res_pool
->pipe_count
/ 2)
2491 /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
2493 for (i
= 0, pipe_idx
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2494 if (!context
->res_ctx
.pipe_ctx
[i
].stream
)
2497 for (vlevel_split
= vlevel
; vlevel
<= context
->bw_ctx
.dml
.soc
.num_states
; vlevel
++)
2498 if (context
->bw_ctx
.dml
.vba
.NoOfDPP
[vlevel
][0][pipe_idx
] == 1)
2500 /* Impossible to not split this pipe */
2501 if (vlevel
> context
->bw_ctx
.dml
.soc
.num_states
)
2502 vlevel
= vlevel_split
;
2505 context
->bw_ctx
.dml
.vba
.maxMpcComb
= 0;
2508 /* Split loop sets which pipe should be split based on dml outputs and dc flags */
2509 for (i
= 0, pipe_idx
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2510 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
2512 if (!context
->res_ctx
.pipe_ctx
[i
].stream
)
2515 if (force_split
|| context
->bw_ctx
.dml
.vba
.NoOfDPP
[vlevel
][context
->bw_ctx
.dml
.vba
.maxMpcComb
][pipe_idx
] > 1)
2517 if ((pipe
->stream
->view_format
==
2518 VIEW_3D_FORMAT_SIDE_BY_SIDE
||
2519 pipe
->stream
->view_format
==
2520 VIEW_3D_FORMAT_TOP_AND_BOTTOM
) &&
2521 (pipe
->stream
->timing
.timing_3d_format
==
2522 TIMING_3D_FORMAT_TOP_AND_BOTTOM
||
2523 pipe
->stream
->timing
.timing_3d_format
==
2524 TIMING_3D_FORMAT_SIDE_BY_SIDE
))
2526 if (dc
->debug
.force_odm_combine
& (1 << pipe
->stream_res
.tg
->inst
)) {
2528 context
->bw_ctx
.dml
.vba
.ODMCombineEnablePerState
[vlevel
][pipe_idx
] = dm_odm_combine_mode_2to1
;
2530 context
->bw_ctx
.dml
.vba
.ODMCombineEnabled
[pipe_idx
] =
2531 context
->bw_ctx
.dml
.vba
.ODMCombineEnablePerState
[vlevel
][pipe_idx
];
2532 /* Adjust dppclk when split is forced, do not bother with dispclk */
2533 if (split
[i
] && context
->bw_ctx
.dml
.vba
.NoOfDPP
[vlevel
][context
->bw_ctx
.dml
.vba
.maxMpcComb
][pipe_idx
] == 1)
2534 context
->bw_ctx
.dml
.vba
.RequiredDPPCLK
[vlevel
][context
->bw_ctx
.dml
.vba
.maxMpcComb
][pipe_idx
] /= 2;
2541 bool dcn20_fast_validate_bw(
2543 struct dc_state
*context
,
2544 display_e2e_pipe_params_st
*pipes
,
2546 int *pipe_split_from
,
2550 bool split
[MAX_PIPES
] = { false };
2551 int pipe_cnt
, i
, pipe_idx
, vlevel
;
2557 dcn20_merge_pipes_for_validate(dc
, context
);
2559 pipe_cnt
= dc
->res_pool
->funcs
->populate_dml_pipes(dc
, context
, pipes
);
2561 *pipe_cnt_out
= pipe_cnt
;
2568 vlevel
= dml_get_voltage_level(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
);
2570 if (vlevel
> context
->bw_ctx
.dml
.soc
.num_states
)
2573 vlevel
= dcn20_validate_apply_pipe_split_flags(dc
, context
, vlevel
, split
);
2575 /*initialize pipe_just_split_from to invalid idx*/
2576 for (i
= 0; i
< MAX_PIPES
; i
++)
2577 pipe_split_from
[i
] = -1;
2579 for (i
= 0, pipe_idx
= -1; i
< dc
->res_pool
->pipe_count
; i
++) {
2580 struct pipe_ctx
*pipe
= &context
->res_ctx
.pipe_ctx
[i
];
2581 struct pipe_ctx
*hsplit_pipe
= pipe
->bottom_pipe
;
2583 if (!pipe
->stream
|| pipe_split_from
[i
] >= 0)
2588 if (!pipe
->top_pipe
&& !pipe
->plane_state
&& context
->bw_ctx
.dml
.vba
.ODMCombineEnabled
[pipe_idx
]) {
2589 hsplit_pipe
= dcn20_find_secondary_pipe(dc
, &context
->res_ctx
, dc
->res_pool
, pipe
);
2590 ASSERT(hsplit_pipe
);
2591 if (!dcn20_split_stream_for_odm(
2592 &context
->res_ctx
, dc
->res_pool
,
2595 pipe_split_from
[hsplit_pipe
->pipe_idx
] = pipe_idx
;
2596 dcn20_build_mapped_resource(dc
, context
, pipe
->stream
);
2599 if (!pipe
->plane_state
)
2601 /* Skip 2nd half of already split pipe */
2602 if (pipe
->top_pipe
&& pipe
->plane_state
== pipe
->top_pipe
->plane_state
)
2605 /* We do not support mpo + odm at the moment */
2606 if (hsplit_pipe
&& hsplit_pipe
->plane_state
!= pipe
->plane_state
2607 && context
->bw_ctx
.dml
.vba
.ODMCombineEnabled
[pipe_idx
])
2611 if (!hsplit_pipe
|| hsplit_pipe
->plane_state
!= pipe
->plane_state
) {
2612 /* pipe not split previously needs split */
2613 hsplit_pipe
= dcn20_find_secondary_pipe(dc
, &context
->res_ctx
, dc
->res_pool
, pipe
);
2614 ASSERT(hsplit_pipe
);
2616 context
->bw_ctx
.dml
.vba
.RequiredDPPCLK
[vlevel
][context
->bw_ctx
.dml
.vba
.maxMpcComb
][pipe_idx
] *= 2;
2619 if (context
->bw_ctx
.dml
.vba
.ODMCombineEnabled
[pipe_idx
]) {
2620 if (!dcn20_split_stream_for_odm(
2621 &context
->res_ctx
, dc
->res_pool
,
2624 dcn20_build_mapped_resource(dc
, context
, pipe
->stream
);
2626 dcn20_split_stream_for_mpc(
2627 &context
->res_ctx
, dc
->res_pool
,
2629 pipe_split_from
[hsplit_pipe
->pipe_idx
] = pipe_idx
;
2631 } else if (hsplit_pipe
&& hsplit_pipe
->plane_state
== pipe
->plane_state
) {
2632 /* merge should already have been done */
2636 /* Actual dsc count per stream dsc validation*/
2637 if (!dcn20_validate_dsc(dc
, context
)) {
2638 context
->bw_ctx
.dml
.vba
.ValidationStatus
[context
->bw_ctx
.dml
.vba
.soc
.num_states
] =
2639 DML_FAIL_DSC_VALIDATION_FAILURE
;
2643 *vlevel_out
= vlevel
;
2655 static void dcn20_calculate_wm(
2656 struct dc
*dc
, struct dc_state
*context
,
2657 display_e2e_pipe_params_st
*pipes
,
2659 int *pipe_split_from
,
2662 int pipe_cnt
, i
, pipe_idx
;
2664 for (i
= 0, pipe_idx
= 0, pipe_cnt
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2665 if (!context
->res_ctx
.pipe_ctx
[i
].stream
)
2668 pipes
[pipe_cnt
].clks_cfg
.refclk_mhz
= dc
->res_pool
->ref_clocks
.dchub_ref_clock_inKhz
/ 1000.0;
2669 pipes
[pipe_cnt
].clks_cfg
.dispclk_mhz
= context
->bw_ctx
.dml
.vba
.RequiredDISPCLK
[vlevel
][context
->bw_ctx
.dml
.vba
.maxMpcComb
];
2671 if (pipe_split_from
[i
] < 0) {
2672 pipes
[pipe_cnt
].clks_cfg
.dppclk_mhz
=
2673 context
->bw_ctx
.dml
.vba
.RequiredDPPCLK
[vlevel
][context
->bw_ctx
.dml
.vba
.maxMpcComb
][pipe_idx
];
2674 if (context
->bw_ctx
.dml
.vba
.BlendingAndTiming
[pipe_idx
] == pipe_idx
)
2675 pipes
[pipe_cnt
].pipe
.dest
.odm_combine
=
2676 context
->bw_ctx
.dml
.vba
.ODMCombineEnabled
[pipe_idx
];
2678 pipes
[pipe_cnt
].pipe
.dest
.odm_combine
= 0;
2681 pipes
[pipe_cnt
].clks_cfg
.dppclk_mhz
=
2682 context
->bw_ctx
.dml
.vba
.RequiredDPPCLK
[vlevel
][context
->bw_ctx
.dml
.vba
.maxMpcComb
][pipe_split_from
[i
]];
2683 if (context
->bw_ctx
.dml
.vba
.BlendingAndTiming
[pipe_split_from
[i
]] == pipe_split_from
[i
])
2684 pipes
[pipe_cnt
].pipe
.dest
.odm_combine
=
2685 context
->bw_ctx
.dml
.vba
.ODMCombineEnabled
[pipe_split_from
[i
]];
2687 pipes
[pipe_cnt
].pipe
.dest
.odm_combine
= 0;
2690 if (dc
->config
.forced_clocks
) {
2691 pipes
[pipe_cnt
].clks_cfg
.dispclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[0].dispclk_mhz
;
2692 pipes
[pipe_cnt
].clks_cfg
.dppclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[0].dppclk_mhz
;
2694 if (dc
->debug
.min_disp_clk_khz
> pipes
[pipe_cnt
].clks_cfg
.dispclk_mhz
* 1000)
2695 pipes
[pipe_cnt
].clks_cfg
.dispclk_mhz
= dc
->debug
.min_disp_clk_khz
/ 1000.0;
2696 if (dc
->debug
.min_dpp_clk_khz
> pipes
[pipe_cnt
].clks_cfg
.dppclk_mhz
* 1000)
2697 pipes
[pipe_cnt
].clks_cfg
.dppclk_mhz
= dc
->debug
.min_dpp_clk_khz
/ 1000.0;
2702 if (pipe_cnt
!= pipe_idx
) {
2703 if (dc
->res_pool
->funcs
->populate_dml_pipes
)
2704 pipe_cnt
= dc
->res_pool
->funcs
->populate_dml_pipes(dc
,
2707 pipe_cnt
= dcn20_populate_dml_pipes_from_context(dc
,
2711 *out_pipe_cnt
= pipe_cnt
;
2713 pipes
[0].clks_cfg
.voltage
= vlevel
;
2714 pipes
[0].clks_cfg
.dcfclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[vlevel
].dcfclk_mhz
;
2715 pipes
[0].clks_cfg
.socclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[vlevel
].socclk_mhz
;
2717 /* only pipe 0 is read for voltage and dcf/soc clocks */
2719 pipes
[0].clks_cfg
.voltage
= 1;
2720 pipes
[0].clks_cfg
.dcfclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[1].dcfclk_mhz
;
2721 pipes
[0].clks_cfg
.socclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[1].socclk_mhz
;
2723 context
->bw_ctx
.bw
.dcn
.watermarks
.b
.urgent_ns
= get_wm_urgent(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2724 context
->bw_ctx
.bw
.dcn
.watermarks
.b
.cstate_pstate
.cstate_enter_plus_exit_ns
= get_wm_stutter_enter_exit(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2725 context
->bw_ctx
.bw
.dcn
.watermarks
.b
.cstate_pstate
.cstate_exit_ns
= get_wm_stutter_exit(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2726 context
->bw_ctx
.bw
.dcn
.watermarks
.b
.cstate_pstate
.pstate_change_ns
= get_wm_dram_clock_change(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2727 context
->bw_ctx
.bw
.dcn
.watermarks
.b
.pte_meta_urgent_ns
= get_wm_memory_trip(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2728 context
->bw_ctx
.bw
.dcn
.watermarks
.b
.frac_urg_bw_nom
= get_fraction_of_urgent_bandwidth(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2729 context
->bw_ctx
.bw
.dcn
.watermarks
.b
.frac_urg_bw_flip
= get_fraction_of_urgent_bandwidth_imm_flip(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2730 context
->bw_ctx
.bw
.dcn
.watermarks
.b
.urgent_latency_ns
= get_urgent_latency(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2733 pipes
[0].clks_cfg
.voltage
= 2;
2734 pipes
[0].clks_cfg
.dcfclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[2].dcfclk_mhz
;
2735 pipes
[0].clks_cfg
.socclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[2].socclk_mhz
;
2737 context
->bw_ctx
.bw
.dcn
.watermarks
.c
.urgent_ns
= get_wm_urgent(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2738 context
->bw_ctx
.bw
.dcn
.watermarks
.c
.cstate_pstate
.cstate_enter_plus_exit_ns
= get_wm_stutter_enter_exit(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2739 context
->bw_ctx
.bw
.dcn
.watermarks
.c
.cstate_pstate
.cstate_exit_ns
= get_wm_stutter_exit(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2740 context
->bw_ctx
.bw
.dcn
.watermarks
.c
.cstate_pstate
.pstate_change_ns
= get_wm_dram_clock_change(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2741 context
->bw_ctx
.bw
.dcn
.watermarks
.c
.pte_meta_urgent_ns
= get_wm_memory_trip(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2742 context
->bw_ctx
.bw
.dcn
.watermarks
.c
.frac_urg_bw_nom
= get_fraction_of_urgent_bandwidth(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2743 context
->bw_ctx
.bw
.dcn
.watermarks
.c
.frac_urg_bw_flip
= get_fraction_of_urgent_bandwidth_imm_flip(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2746 pipes
[0].clks_cfg
.voltage
= 3;
2747 pipes
[0].clks_cfg
.dcfclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[2].dcfclk_mhz
;
2748 pipes
[0].clks_cfg
.socclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[2].socclk_mhz
;
2750 context
->bw_ctx
.bw
.dcn
.watermarks
.d
.urgent_ns
= get_wm_urgent(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2751 context
->bw_ctx
.bw
.dcn
.watermarks
.d
.cstate_pstate
.cstate_enter_plus_exit_ns
= get_wm_stutter_enter_exit(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2752 context
->bw_ctx
.bw
.dcn
.watermarks
.d
.cstate_pstate
.cstate_exit_ns
= get_wm_stutter_exit(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2753 context
->bw_ctx
.bw
.dcn
.watermarks
.d
.cstate_pstate
.pstate_change_ns
= get_wm_dram_clock_change(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2754 context
->bw_ctx
.bw
.dcn
.watermarks
.d
.pte_meta_urgent_ns
= get_wm_memory_trip(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2755 context
->bw_ctx
.bw
.dcn
.watermarks
.d
.frac_urg_bw_nom
= get_fraction_of_urgent_bandwidth(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2756 context
->bw_ctx
.bw
.dcn
.watermarks
.d
.frac_urg_bw_flip
= get_fraction_of_urgent_bandwidth_imm_flip(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2758 pipes
[0].clks_cfg
.voltage
= vlevel
;
2759 pipes
[0].clks_cfg
.dcfclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[vlevel
].dcfclk_mhz
;
2760 pipes
[0].clks_cfg
.socclk_mhz
= context
->bw_ctx
.dml
.soc
.clock_limits
[vlevel
].socclk_mhz
;
2761 context
->bw_ctx
.bw
.dcn
.watermarks
.a
.urgent_ns
= get_wm_urgent(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2762 context
->bw_ctx
.bw
.dcn
.watermarks
.a
.cstate_pstate
.cstate_enter_plus_exit_ns
= get_wm_stutter_enter_exit(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2763 context
->bw_ctx
.bw
.dcn
.watermarks
.a
.cstate_pstate
.cstate_exit_ns
= get_wm_stutter_exit(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2764 context
->bw_ctx
.bw
.dcn
.watermarks
.a
.cstate_pstate
.pstate_change_ns
= get_wm_dram_clock_change(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2765 context
->bw_ctx
.bw
.dcn
.watermarks
.a
.pte_meta_urgent_ns
= get_wm_memory_trip(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2766 context
->bw_ctx
.bw
.dcn
.watermarks
.a
.frac_urg_bw_nom
= get_fraction_of_urgent_bandwidth(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2767 context
->bw_ctx
.bw
.dcn
.watermarks
.a
.frac_urg_bw_flip
= get_fraction_of_urgent_bandwidth_imm_flip(&context
->bw_ctx
.dml
, pipes
, pipe_cnt
) * 1000;
2770 void dcn20_calculate_dlg_params(
2771 struct dc
*dc
, struct dc_state
*context
,
2772 display_e2e_pipe_params_st
*pipes
,
2776 int i
, j
, pipe_idx
, pipe_idx_unsplit
;
2777 bool visited
[MAX_PIPES
] = { 0 };
2779 /* Writeback MCIF_WB arbitration parameters */
2780 dc
->res_pool
->funcs
->set_mcif_arb_params(dc
, context
, pipes
, pipe_cnt
);
2782 context
->bw_ctx
.bw
.dcn
.clk
.dispclk_khz
= context
->bw_ctx
.dml
.vba
.DISPCLK
* 1000;
2783 context
->bw_ctx
.bw
.dcn
.clk
.dcfclk_khz
= context
->bw_ctx
.dml
.vba
.DCFCLK
* 1000;
2784 context
->bw_ctx
.bw
.dcn
.clk
.socclk_khz
= context
->bw_ctx
.dml
.vba
.SOCCLK
* 1000;
2785 context
->bw_ctx
.bw
.dcn
.clk
.dramclk_khz
= context
->bw_ctx
.dml
.vba
.DRAMSpeed
* 1000 / 16;
2786 context
->bw_ctx
.bw
.dcn
.clk
.dcfclk_deep_sleep_khz
= context
->bw_ctx
.dml
.vba
.DCFCLKDeepSleep
* 1000;
2787 context
->bw_ctx
.bw
.dcn
.clk
.fclk_khz
= context
->bw_ctx
.dml
.vba
.FabricClock
* 1000;
2788 context
->bw_ctx
.bw
.dcn
.clk
.p_state_change_support
=
2789 context
->bw_ctx
.dml
.vba
.DRAMClockChangeSupport
[vlevel
][context
->bw_ctx
.dml
.vba
.maxMpcComb
]
2790 != dm_dram_clock_change_unsupported
;
2791 context
->bw_ctx
.bw
.dcn
.clk
.dppclk_khz
= 0;
2794 * An artifact of dml pipe split/odm is that pipes get merged back together for
2795 * calculation. Therefore we need to only extract for first pipe in ascending index order
2796 * and copy into the other split half.
2798 for (i
= 0, pipe_idx
= 0, pipe_idx_unsplit
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2799 if (!context
->res_ctx
.pipe_ctx
[i
].stream
)
2802 if (!visited
[pipe_idx
]) {
2803 display_pipe_source_params_st
*src
= &pipes
[pipe_idx
].pipe
.src
;
2804 display_pipe_dest_params_st
*dst
= &pipes
[pipe_idx
].pipe
.dest
;
2806 dst
->vstartup_start
= context
->bw_ctx
.dml
.vba
.VStartup
[pipe_idx_unsplit
];
2807 dst
->vupdate_offset
= context
->bw_ctx
.dml
.vba
.VUpdateOffsetPix
[pipe_idx_unsplit
];
2808 dst
->vupdate_width
= context
->bw_ctx
.dml
.vba
.VUpdateWidthPix
[pipe_idx_unsplit
];
2809 dst
->vready_offset
= context
->bw_ctx
.dml
.vba
.VReadyOffsetPix
[pipe_idx_unsplit
];
2811 * j iterates inside pipes array, unlike i which iterates inside
2815 for (j
= pipe_idx
+ 1; j
< pipe_cnt
; j
++) {
2816 display_pipe_source_params_st
*src_j
= &pipes
[j
].pipe
.src
;
2817 display_pipe_dest_params_st
*dst_j
= &pipes
[j
].pipe
.dest
;
2819 if (src_j
->is_hsplit
&& !visited
[j
]
2820 && src
->hsplit_grp
== src_j
->hsplit_grp
) {
2821 dst_j
->vstartup_start
= context
->bw_ctx
.dml
.vba
.VStartup
[pipe_idx_unsplit
];
2822 dst_j
->vupdate_offset
= context
->bw_ctx
.dml
.vba
.VUpdateOffsetPix
[pipe_idx_unsplit
];
2823 dst_j
->vupdate_width
= context
->bw_ctx
.dml
.vba
.VUpdateWidthPix
[pipe_idx_unsplit
];
2824 dst_j
->vready_offset
= context
->bw_ctx
.dml
.vba
.VReadyOffsetPix
[pipe_idx_unsplit
];
2828 visited
[pipe_idx
] = true;
2834 for (i
= 0, pipe_idx
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2835 if (!context
->res_ctx
.pipe_ctx
[i
].stream
)
2837 if (context
->bw_ctx
.bw
.dcn
.clk
.dppclk_khz
< pipes
[pipe_idx
].clks_cfg
.dppclk_mhz
* 1000)
2838 context
->bw_ctx
.bw
.dcn
.clk
.dppclk_khz
= pipes
[pipe_idx
].clks_cfg
.dppclk_mhz
* 1000;
2839 context
->res_ctx
.pipe_ctx
[i
].plane_res
.bw
.dppclk_khz
=
2840 pipes
[pipe_idx
].clks_cfg
.dppclk_mhz
* 1000;
2841 ASSERT(visited
[pipe_idx
]);
2842 context
->res_ctx
.pipe_ctx
[i
].pipe_dlg_param
= pipes
[pipe_idx
].pipe
.dest
;
2845 /*save a original dppclock copy*/
2846 context
->bw_ctx
.bw
.dcn
.clk
.bw_dppclk_khz
= context
->bw_ctx
.bw
.dcn
.clk
.dppclk_khz
;
2847 context
->bw_ctx
.bw
.dcn
.clk
.bw_dispclk_khz
= context
->bw_ctx
.bw
.dcn
.clk
.dispclk_khz
;
2848 context
->bw_ctx
.bw
.dcn
.clk
.max_supported_dppclk_khz
= context
->bw_ctx
.dml
.soc
.clock_limits
[vlevel
].dppclk_mhz
* 1000;
2849 context
->bw_ctx
.bw
.dcn
.clk
.max_supported_dispclk_khz
= context
->bw_ctx
.dml
.soc
.clock_limits
[vlevel
].dispclk_mhz
* 1000;
2851 for (i
= 0, pipe_idx
= 0; i
< dc
->res_pool
->pipe_count
; i
++) {
2852 bool cstate_en
= context
->bw_ctx
.dml
.vba
.PrefetchMode
[vlevel
][context
->bw_ctx
.dml
.vba
.maxMpcComb
] != 2;
2854 if (!context
->res_ctx
.pipe_ctx
[i
].stream
)
2857 context
->bw_ctx
.dml
.funcs
.rq_dlg_get_dlg_reg(&context
->bw_ctx
.dml
,
2858 &context
->res_ctx
.pipe_ctx
[i
].dlg_regs
,
2859 &context
->res_ctx
.pipe_ctx
[i
].ttu_regs
,
2864 context
->bw_ctx
.bw
.dcn
.clk
.p_state_change_support
,
2865 false, false, false);
2867 context
->bw_ctx
.dml
.funcs
.rq_dlg_get_rq_reg(&context
->bw_ctx
.dml
,
2868 &context
->res_ctx
.pipe_ctx
[i
].rq_regs
,
2869 pipes
[pipe_idx
].pipe
);
2874 static bool dcn20_validate_bandwidth_internal(struct dc
*dc
, struct dc_state
*context
,
2879 BW_VAL_TRACE_SETUP();
2882 int pipe_split_from
[MAX_PIPES
];
2884 display_e2e_pipe_params_st
*pipes
= kzalloc(dc
->res_pool
->pipe_count
* sizeof(display_e2e_pipe_params_st
), GFP_KERNEL
);
2885 DC_LOGGER_INIT(dc
->ctx
->logger
);
2887 BW_VAL_TRACE_COUNT();
2889 out
= dcn20_fast_validate_bw(dc
, context
, pipes
, &pipe_cnt
, pipe_split_from
, &vlevel
);
2897 BW_VAL_TRACE_END_VOLTAGE_LEVEL();
2899 if (fast_validate
) {
2900 BW_VAL_TRACE_SKIP(fast
);
2904 dcn20_calculate_wm(dc
, context
, pipes
, &pipe_cnt
, pipe_split_from
, vlevel
);
2905 dcn20_calculate_dlg_params(dc
, context
, pipes
, pipe_cnt
, vlevel
);
2907 BW_VAL_TRACE_END_WATERMARKS();
2912 DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
2913 dml_get_status_message(context
->bw_ctx
.dml
.vba
.ValidationStatus
[context
->bw_ctx
.dml
.vba
.soc
.num_states
]));
2915 BW_VAL_TRACE_SKIP(fail
);
2921 BW_VAL_TRACE_FINISH();
2927 bool dcn20_validate_bandwidth(struct dc
*dc
, struct dc_state
*context
,
2930 bool voltage_supported
= false;
2931 bool full_pstate_supported
= false;
2932 bool dummy_pstate_supported
= false;
2933 double p_state_latency_us
;
2936 p_state_latency_us
= context
->bw_ctx
.dml
.soc
.dram_clock_change_latency_us
;
2937 context
->bw_ctx
.dml
.soc
.disable_dram_clock_change_vactive_support
=
2938 dc
->debug
.disable_dram_clock_change_vactive_support
;
2940 if (fast_validate
) {
2941 voltage_supported
= dcn20_validate_bandwidth_internal(dc
, context
, true);
2944 return voltage_supported
;
2947 // Best case, we support full UCLK switch latency
2948 voltage_supported
= dcn20_validate_bandwidth_internal(dc
, context
, false);
2949 full_pstate_supported
= context
->bw_ctx
.bw
.dcn
.clk
.p_state_change_support
;
2951 if (context
->bw_ctx
.dml
.soc
.dummy_pstate_latency_us
== 0 ||
2952 (voltage_supported
&& full_pstate_supported
)) {
2953 context
->bw_ctx
.bw
.dcn
.clk
.p_state_change_support
= full_pstate_supported
;
2954 goto restore_dml_state
;
2957 // Fallback: Try to only support G6 temperature read latency
2958 context
->bw_ctx
.dml
.soc
.dram_clock_change_latency_us
= context
->bw_ctx
.dml
.soc
.dummy_pstate_latency_us
;
2960 voltage_supported
= dcn20_validate_bandwidth_internal(dc
, context
, false);
2961 dummy_pstate_supported
= context
->bw_ctx
.bw
.dcn
.clk
.p_state_change_support
;
2963 if (voltage_supported
&& dummy_pstate_supported
) {
2964 context
->bw_ctx
.bw
.dcn
.clk
.p_state_change_support
= false;
2965 goto restore_dml_state
;
2968 // ERROR: fallback is supposed to always work.
2972 context
->bw_ctx
.dml
.soc
.dram_clock_change_latency_us
= p_state_latency_us
;
2975 return voltage_supported
;
2978 struct pipe_ctx
*dcn20_acquire_idle_pipe_for_layer(
2979 struct dc_state
*state
,
2980 const struct resource_pool
*pool
,
2981 struct dc_stream_state
*stream
)
2983 struct resource_context
*res_ctx
= &state
->res_ctx
;
2984 struct pipe_ctx
*head_pipe
= resource_get_head_pipe_for_stream(res_ctx
, stream
);
2985 struct pipe_ctx
*idle_pipe
= find_idle_secondary_pipe(res_ctx
, pool
, head_pipe
);
2993 idle_pipe
->stream
= head_pipe
->stream
;
2994 idle_pipe
->stream_res
.tg
= head_pipe
->stream_res
.tg
;
2995 idle_pipe
->stream_res
.opp
= head_pipe
->stream_res
.opp
;
2997 idle_pipe
->plane_res
.hubp
= pool
->hubps
[idle_pipe
->pipe_idx
];
2998 idle_pipe
->plane_res
.ipp
= pool
->ipps
[idle_pipe
->pipe_idx
];
2999 idle_pipe
->plane_res
.dpp
= pool
->dpps
[idle_pipe
->pipe_idx
];
3000 idle_pipe
->plane_res
.mpcc_inst
= pool
->dpps
[idle_pipe
->pipe_idx
]->inst
;
3005 bool dcn20_get_dcc_compression_cap(const struct dc
*dc
,
3006 const struct dc_dcc_surface_param
*input
,
3007 struct dc_surface_dcc_cap
*output
)
3009 return dc
->res_pool
->hubbub
->funcs
->get_dcc_compression_cap(
3010 dc
->res_pool
->hubbub
,
3015 static void dcn20_destroy_resource_pool(struct resource_pool
**pool
)
3017 struct dcn20_resource_pool
*dcn20_pool
= TO_DCN20_RES_POOL(*pool
);
3019 dcn20_resource_destruct(dcn20_pool
);
3025 static struct dc_cap_funcs cap_funcs
= {
3026 .get_dcc_compression_cap
= dcn20_get_dcc_compression_cap
3030 enum dc_status
dcn20_get_default_swizzle_mode(struct dc_plane_state
*plane_state
)
3032 enum dc_status result
= DC_OK
;
3034 enum surface_pixel_format surf_pix_format
= plane_state
->format
;
3035 unsigned int bpp
= resource_pixel_format_to_bpp(surf_pix_format
);
3037 enum swizzle_mode_values swizzle
= DC_SW_LINEAR
;
3040 swizzle
= DC_SW_64KB_D
;
3042 swizzle
= DC_SW_64KB_S
;
3044 plane_state
->tiling_info
.gfx9
.swizzle
= swizzle
;
3048 static struct resource_funcs dcn20_res_pool_funcs
= {
3049 .destroy
= dcn20_destroy_resource_pool
,
3050 .link_enc_create
= dcn20_link_encoder_create
,
3051 .validate_bandwidth
= dcn20_validate_bandwidth
,
3052 .acquire_idle_pipe_for_layer
= dcn20_acquire_idle_pipe_for_layer
,
3053 .add_stream_to_ctx
= dcn20_add_stream_to_ctx
,
3054 .remove_stream_from_ctx
= dcn20_remove_stream_from_ctx
,
3055 .populate_dml_writeback_from_context
= dcn20_populate_dml_writeback_from_context
,
3056 .get_default_swizzle_mode
= dcn20_get_default_swizzle_mode
,
3057 .set_mcif_arb_params
= dcn20_set_mcif_arb_params
,
3058 .populate_dml_pipes
= dcn20_populate_dml_pipes_from_context
,
3059 .find_first_free_match_stream_enc_for_link
= dcn10_find_first_free_match_stream_enc_for_link
3062 bool dcn20_dwbc_create(struct dc_context
*ctx
, struct resource_pool
*pool
)
3065 uint32_t pipe_count
= pool
->res_cap
->num_dwb
;
3067 for (i
= 0; i
< pipe_count
; i
++) {
3068 struct dcn20_dwbc
*dwbc20
= kzalloc(sizeof(struct dcn20_dwbc
),
3072 dm_error("DC: failed to create dwbc20!\n");
3075 dcn20_dwbc_construct(dwbc20
, ctx
,
3080 pool
->dwbc
[i
] = &dwbc20
->base
;
3085 bool dcn20_mmhubbub_create(struct dc_context
*ctx
, struct resource_pool
*pool
)
3088 uint32_t pipe_count
= pool
->res_cap
->num_dwb
;
3090 ASSERT(pipe_count
> 0);
3092 for (i
= 0; i
< pipe_count
; i
++) {
3093 struct dcn20_mmhubbub
*mcif_wb20
= kzalloc(sizeof(struct dcn20_mmhubbub
),
3097 dm_error("DC: failed to create mcif_wb20!\n");
3101 dcn20_mmhubbub_construct(mcif_wb20
, ctx
,
3107 pool
->mcif_wb
[i
] = &mcif_wb20
->base
;
3112 static struct pp_smu_funcs
*dcn20_pp_smu_create(struct dc_context
*ctx
)
3114 struct pp_smu_funcs
*pp_smu
= kzalloc(sizeof(*pp_smu
), GFP_KERNEL
);
3119 dm_pp_get_funcs(ctx
, pp_smu
);
3121 if (pp_smu
->ctx
.ver
!= PP_SMU_VER_NV
)
3122 pp_smu
= memset(pp_smu
, 0, sizeof(struct pp_smu_funcs
));
3127 static void dcn20_pp_smu_destroy(struct pp_smu_funcs
**pp_smu
)
3129 if (pp_smu
&& *pp_smu
) {
3135 void dcn20_cap_soc_clocks(
3136 struct _vcs_dpi_soc_bounding_box_st
*bb
,
3137 struct pp_smu_nv_clock_table max_clocks
)
3141 // First pass - cap all clocks higher than the reported max
3142 for (i
= 0; i
< bb
->num_states
; i
++) {
3143 if ((bb
->clock_limits
[i
].dcfclk_mhz
> (max_clocks
.dcfClockInKhz
/ 1000))
3144 && max_clocks
.dcfClockInKhz
!= 0)
3145 bb
->clock_limits
[i
].dcfclk_mhz
= (max_clocks
.dcfClockInKhz
/ 1000);
3147 if ((bb
->clock_limits
[i
].dram_speed_mts
> (max_clocks
.uClockInKhz
/ 1000) * 16)
3148 && max_clocks
.uClockInKhz
!= 0)
3149 bb
->clock_limits
[i
].dram_speed_mts
= (max_clocks
.uClockInKhz
/ 1000) * 16;
3151 if ((bb
->clock_limits
[i
].fabricclk_mhz
> (max_clocks
.fabricClockInKhz
/ 1000))
3152 && max_clocks
.fabricClockInKhz
!= 0)
3153 bb
->clock_limits
[i
].fabricclk_mhz
= (max_clocks
.fabricClockInKhz
/ 1000);
3155 if ((bb
->clock_limits
[i
].dispclk_mhz
> (max_clocks
.displayClockInKhz
/ 1000))
3156 && max_clocks
.displayClockInKhz
!= 0)
3157 bb
->clock_limits
[i
].dispclk_mhz
= (max_clocks
.displayClockInKhz
/ 1000);
3159 if ((bb
->clock_limits
[i
].dppclk_mhz
> (max_clocks
.dppClockInKhz
/ 1000))
3160 && max_clocks
.dppClockInKhz
!= 0)
3161 bb
->clock_limits
[i
].dppclk_mhz
= (max_clocks
.dppClockInKhz
/ 1000);
3163 if ((bb
->clock_limits
[i
].phyclk_mhz
> (max_clocks
.phyClockInKhz
/ 1000))
3164 && max_clocks
.phyClockInKhz
!= 0)
3165 bb
->clock_limits
[i
].phyclk_mhz
= (max_clocks
.phyClockInKhz
/ 1000);
3167 if ((bb
->clock_limits
[i
].socclk_mhz
> (max_clocks
.socClockInKhz
/ 1000))
3168 && max_clocks
.socClockInKhz
!= 0)
3169 bb
->clock_limits
[i
].socclk_mhz
= (max_clocks
.socClockInKhz
/ 1000);
3171 if ((bb
->clock_limits
[i
].dscclk_mhz
> (max_clocks
.dscClockInKhz
/ 1000))
3172 && max_clocks
.dscClockInKhz
!= 0)
3173 bb
->clock_limits
[i
].dscclk_mhz
= (max_clocks
.dscClockInKhz
/ 1000);
3176 // Second pass - remove all duplicate clock states
3177 for (i
= bb
->num_states
- 1; i
> 1; i
--) {
3178 bool duplicate
= true;
3180 if (bb
->clock_limits
[i
-1].dcfclk_mhz
!= bb
->clock_limits
[i
].dcfclk_mhz
)
3182 if (bb
->clock_limits
[i
-1].dispclk_mhz
!= bb
->clock_limits
[i
].dispclk_mhz
)
3184 if (bb
->clock_limits
[i
-1].dppclk_mhz
!= bb
->clock_limits
[i
].dppclk_mhz
)
3186 if (bb
->clock_limits
[i
-1].dram_speed_mts
!= bb
->clock_limits
[i
].dram_speed_mts
)
3188 if (bb
->clock_limits
[i
-1].dscclk_mhz
!= bb
->clock_limits
[i
].dscclk_mhz
)
3190 if (bb
->clock_limits
[i
-1].fabricclk_mhz
!= bb
->clock_limits
[i
].fabricclk_mhz
)
3192 if (bb
->clock_limits
[i
-1].phyclk_mhz
!= bb
->clock_limits
[i
].phyclk_mhz
)
3194 if (bb
->clock_limits
[i
-1].socclk_mhz
!= bb
->clock_limits
[i
].socclk_mhz
)
3202 void dcn20_update_bounding_box(struct dc
*dc
, struct _vcs_dpi_soc_bounding_box_st
*bb
,
3203 struct pp_smu_nv_clock_table
*max_clocks
, unsigned int *uclk_states
, unsigned int num_states
)
3205 struct _vcs_dpi_voltage_scaling_st calculated_states
[MAX_CLOCK_LIMIT_STATES
];
3207 int num_calculated_states
= 0;
3210 if (num_states
== 0)
3213 memset(calculated_states
, 0, sizeof(calculated_states
));
3215 if (dc
->bb_overrides
.min_dcfclk_mhz
> 0)
3216 min_dcfclk
= dc
->bb_overrides
.min_dcfclk_mhz
;
3218 if (ASICREV_IS_NAVI12_P(dc
->ctx
->asic_id
.hw_internal_rev
))
3221 // Accounting for SOC/DCF relationship, we can go as high as
3226 for (i
= 0; i
< num_states
; i
++) {
3227 int min_fclk_required_by_uclk
;
3228 calculated_states
[i
].state
= i
;
3229 calculated_states
[i
].dram_speed_mts
= uclk_states
[i
] * 16 / 1000;
3231 // FCLK:UCLK ratio is 1.08
3232 min_fclk_required_by_uclk
= mul_u64_u32_shr(BIT_ULL(32) * 1080 / 1000000, uclk_states
[i
], 32);
3234 calculated_states
[i
].fabricclk_mhz
= (min_fclk_required_by_uclk
< min_dcfclk
) ?
3235 min_dcfclk
: min_fclk_required_by_uclk
;
3237 calculated_states
[i
].socclk_mhz
= (calculated_states
[i
].fabricclk_mhz
> max_clocks
->socClockInKhz
/ 1000) ?
3238 max_clocks
->socClockInKhz
/ 1000 : calculated_states
[i
].fabricclk_mhz
;
3240 calculated_states
[i
].dcfclk_mhz
= (calculated_states
[i
].fabricclk_mhz
> max_clocks
->dcfClockInKhz
/ 1000) ?
3241 max_clocks
->dcfClockInKhz
/ 1000 : calculated_states
[i
].fabricclk_mhz
;
3243 calculated_states
[i
].dispclk_mhz
= max_clocks
->displayClockInKhz
/ 1000;
3244 calculated_states
[i
].dppclk_mhz
= max_clocks
->displayClockInKhz
/ 1000;
3245 calculated_states
[i
].dscclk_mhz
= max_clocks
->displayClockInKhz
/ (1000 * 3);
3247 calculated_states
[i
].phyclk_mhz
= max_clocks
->phyClockInKhz
/ 1000;
3249 num_calculated_states
++;
3252 calculated_states
[num_calculated_states
- 1].socclk_mhz
= max_clocks
->socClockInKhz
/ 1000;
3253 calculated_states
[num_calculated_states
- 1].fabricclk_mhz
= max_clocks
->socClockInKhz
/ 1000;
3254 calculated_states
[num_calculated_states
- 1].dcfclk_mhz
= max_clocks
->dcfClockInKhz
/ 1000;
3256 memcpy(bb
->clock_limits
, calculated_states
, sizeof(bb
->clock_limits
));
3257 bb
->num_states
= num_calculated_states
;
3259 // Duplicate the last state, DML always an extra state identical to max state to work
3260 memcpy(&bb
->clock_limits
[num_calculated_states
], &bb
->clock_limits
[num_calculated_states
- 1], sizeof(struct _vcs_dpi_voltage_scaling_st
));
3261 bb
->clock_limits
[num_calculated_states
].state
= bb
->num_states
;
3264 void dcn20_patch_bounding_box(struct dc
*dc
, struct _vcs_dpi_soc_bounding_box_st
*bb
)
3266 if ((int)(bb
->sr_exit_time_us
* 1000) != dc
->bb_overrides
.sr_exit_time_ns
3267 && dc
->bb_overrides
.sr_exit_time_ns
) {
3268 bb
->sr_exit_time_us
= dc
->bb_overrides
.sr_exit_time_ns
/ 1000.0;
3271 if ((int)(bb
->sr_enter_plus_exit_time_us
* 1000)
3272 != dc
->bb_overrides
.sr_enter_plus_exit_time_ns
3273 && dc
->bb_overrides
.sr_enter_plus_exit_time_ns
) {
3274 bb
->sr_enter_plus_exit_time_us
=
3275 dc
->bb_overrides
.sr_enter_plus_exit_time_ns
/ 1000.0;
3278 if ((int)(bb
->urgent_latency_us
* 1000) != dc
->bb_overrides
.urgent_latency_ns
3279 && dc
->bb_overrides
.urgent_latency_ns
) {
3280 bb
->urgent_latency_us
= dc
->bb_overrides
.urgent_latency_ns
/ 1000.0;
3283 if ((int)(bb
->dram_clock_change_latency_us
* 1000)
3284 != dc
->bb_overrides
.dram_clock_change_latency_ns
3285 && dc
->bb_overrides
.dram_clock_change_latency_ns
) {
3286 bb
->dram_clock_change_latency_us
=
3287 dc
->bb_overrides
.dram_clock_change_latency_ns
/ 1000.0;
3291 static struct _vcs_dpi_soc_bounding_box_st
*get_asic_rev_soc_bb(
3292 uint32_t hw_internal_rev
)
3294 if (ASICREV_IS_NAVI12_P(hw_internal_rev
))
3295 return &dcn2_0_nv12_soc
;
3300 static struct _vcs_dpi_ip_params_st
*get_asic_rev_ip_params(
3301 uint32_t hw_internal_rev
)
3304 if (ASICREV_IS_NAVI14_M(hw_internal_rev
))
3305 return &dcn2_0_nv14_ip
;
3311 static enum dml_project
get_dml_project_version(uint32_t hw_internal_rev
)
3313 return DML_PROJECT_NAVI10v2
;
3316 #define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
3317 #define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
3319 static bool init_soc_bounding_box(struct dc
*dc
,
3320 struct dcn20_resource_pool
*pool
)
3322 const struct gpu_info_soc_bounding_box_v1_0
*bb
= dc
->soc_bounding_box
;
3323 struct _vcs_dpi_soc_bounding_box_st
*loaded_bb
=
3324 get_asic_rev_soc_bb(dc
->ctx
->asic_id
.hw_internal_rev
);
3325 struct _vcs_dpi_ip_params_st
*loaded_ip
=
3326 get_asic_rev_ip_params(dc
->ctx
->asic_id
.hw_internal_rev
);
3328 DC_LOGGER_INIT(dc
->ctx
->logger
);
3330 /* TODO: upstream NV12 bounding box when its launched */
3331 if (!bb
&& ASICREV_IS_NAVI12_P(dc
->ctx
->asic_id
.hw_internal_rev
)) {
3332 DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__
);
3336 if (bb
&& ASICREV_IS_NAVI12_P(dc
->ctx
->asic_id
.hw_internal_rev
)) {
3339 dcn2_0_nv12_soc
.sr_exit_time_us
=
3340 fixed16_to_double_to_cpu(bb
->sr_exit_time_us
);
3341 dcn2_0_nv12_soc
.sr_enter_plus_exit_time_us
=
3342 fixed16_to_double_to_cpu(bb
->sr_enter_plus_exit_time_us
);
3343 dcn2_0_nv12_soc
.urgent_latency_us
=
3344 fixed16_to_double_to_cpu(bb
->urgent_latency_us
);
3345 dcn2_0_nv12_soc
.urgent_latency_pixel_data_only_us
=
3346 fixed16_to_double_to_cpu(bb
->urgent_latency_pixel_data_only_us
);
3347 dcn2_0_nv12_soc
.urgent_latency_pixel_mixed_with_vm_data_us
=
3348 fixed16_to_double_to_cpu(bb
->urgent_latency_pixel_mixed_with_vm_data_us
);
3349 dcn2_0_nv12_soc
.urgent_latency_vm_data_only_us
=
3350 fixed16_to_double_to_cpu(bb
->urgent_latency_vm_data_only_us
);
3351 dcn2_0_nv12_soc
.urgent_out_of_order_return_per_channel_pixel_only_bytes
=
3352 le32_to_cpu(bb
->urgent_out_of_order_return_per_channel_pixel_only_bytes
);
3353 dcn2_0_nv12_soc
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes
=
3354 le32_to_cpu(bb
->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes
);
3355 dcn2_0_nv12_soc
.urgent_out_of_order_return_per_channel_vm_only_bytes
=
3356 le32_to_cpu(bb
->urgent_out_of_order_return_per_channel_vm_only_bytes
);
3357 dcn2_0_nv12_soc
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only
=
3358 fixed16_to_double_to_cpu(bb
->pct_ideal_dram_sdp_bw_after_urgent_pixel_only
);
3359 dcn2_0_nv12_soc
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm
=
3360 fixed16_to_double_to_cpu(bb
->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm
);
3361 dcn2_0_nv12_soc
.pct_ideal_dram_sdp_bw_after_urgent_vm_only
=
3362 fixed16_to_double_to_cpu(bb
->pct_ideal_dram_sdp_bw_after_urgent_vm_only
);
3363 dcn2_0_nv12_soc
.max_avg_sdp_bw_use_normal_percent
=
3364 fixed16_to_double_to_cpu(bb
->max_avg_sdp_bw_use_normal_percent
);
3365 dcn2_0_nv12_soc
.max_avg_dram_bw_use_normal_percent
=
3366 fixed16_to_double_to_cpu(bb
->max_avg_dram_bw_use_normal_percent
);
3367 dcn2_0_nv12_soc
.writeback_latency_us
=
3368 fixed16_to_double_to_cpu(bb
->writeback_latency_us
);
3369 dcn2_0_nv12_soc
.ideal_dram_bw_after_urgent_percent
=
3370 fixed16_to_double_to_cpu(bb
->ideal_dram_bw_after_urgent_percent
);
3371 dcn2_0_nv12_soc
.max_request_size_bytes
=
3372 le32_to_cpu(bb
->max_request_size_bytes
);
3373 dcn2_0_nv12_soc
.dram_channel_width_bytes
=
3374 le32_to_cpu(bb
->dram_channel_width_bytes
);
3375 dcn2_0_nv12_soc
.fabric_datapath_to_dcn_data_return_bytes
=
3376 le32_to_cpu(bb
->fabric_datapath_to_dcn_data_return_bytes
);
3377 dcn2_0_nv12_soc
.dcn_downspread_percent
=
3378 fixed16_to_double_to_cpu(bb
->dcn_downspread_percent
);
3379 dcn2_0_nv12_soc
.downspread_percent
=
3380 fixed16_to_double_to_cpu(bb
->downspread_percent
);
3381 dcn2_0_nv12_soc
.dram_page_open_time_ns
=
3382 fixed16_to_double_to_cpu(bb
->dram_page_open_time_ns
);
3383 dcn2_0_nv12_soc
.dram_rw_turnaround_time_ns
=
3384 fixed16_to_double_to_cpu(bb
->dram_rw_turnaround_time_ns
);
3385 dcn2_0_nv12_soc
.dram_return_buffer_per_channel_bytes
=
3386 le32_to_cpu(bb
->dram_return_buffer_per_channel_bytes
);
3387 dcn2_0_nv12_soc
.round_trip_ping_latency_dcfclk_cycles
=
3388 le32_to_cpu(bb
->round_trip_ping_latency_dcfclk_cycles
);
3389 dcn2_0_nv12_soc
.urgent_out_of_order_return_per_channel_bytes
=
3390 le32_to_cpu(bb
->urgent_out_of_order_return_per_channel_bytes
);
3391 dcn2_0_nv12_soc
.channel_interleave_bytes
=
3392 le32_to_cpu(bb
->channel_interleave_bytes
);
3393 dcn2_0_nv12_soc
.num_banks
=
3394 le32_to_cpu(bb
->num_banks
);
3395 dcn2_0_nv12_soc
.num_chans
=
3396 le32_to_cpu(bb
->num_chans
);
3397 dcn2_0_nv12_soc
.vmm_page_size_bytes
=
3398 le32_to_cpu(bb
->vmm_page_size_bytes
);
3399 dcn2_0_nv12_soc
.dram_clock_change_latency_us
=
3400 fixed16_to_double_to_cpu(bb
->dram_clock_change_latency_us
);
3401 // HACK!! Lower uclock latency switch time so we don't switch
3402 dcn2_0_nv12_soc
.dram_clock_change_latency_us
= 10;
3403 dcn2_0_nv12_soc
.writeback_dram_clock_change_latency_us
=
3404 fixed16_to_double_to_cpu(bb
->writeback_dram_clock_change_latency_us
);
3405 dcn2_0_nv12_soc
.return_bus_width_bytes
=
3406 le32_to_cpu(bb
->return_bus_width_bytes
);
3407 dcn2_0_nv12_soc
.dispclk_dppclk_vco_speed_mhz
=
3408 le32_to_cpu(bb
->dispclk_dppclk_vco_speed_mhz
);
3409 dcn2_0_nv12_soc
.xfc_bus_transport_time_us
=
3410 le32_to_cpu(bb
->xfc_bus_transport_time_us
);
3411 dcn2_0_nv12_soc
.xfc_xbuf_latency_tolerance_us
=
3412 le32_to_cpu(bb
->xfc_xbuf_latency_tolerance_us
);
3413 dcn2_0_nv12_soc
.use_urgent_burst_bw
=
3414 le32_to_cpu(bb
->use_urgent_burst_bw
);
3415 dcn2_0_nv12_soc
.num_states
=
3416 le32_to_cpu(bb
->num_states
);
3418 for (i
= 0; i
< dcn2_0_nv12_soc
.num_states
; i
++) {
3419 dcn2_0_nv12_soc
.clock_limits
[i
].state
=
3420 le32_to_cpu(bb
->clock_limits
[i
].state
);
3421 dcn2_0_nv12_soc
.clock_limits
[i
].dcfclk_mhz
=
3422 fixed16_to_double_to_cpu(bb
->clock_limits
[i
].dcfclk_mhz
);
3423 dcn2_0_nv12_soc
.clock_limits
[i
].fabricclk_mhz
=
3424 fixed16_to_double_to_cpu(bb
->clock_limits
[i
].fabricclk_mhz
);
3425 dcn2_0_nv12_soc
.clock_limits
[i
].dispclk_mhz
=
3426 fixed16_to_double_to_cpu(bb
->clock_limits
[i
].dispclk_mhz
);
3427 dcn2_0_nv12_soc
.clock_limits
[i
].dppclk_mhz
=
3428 fixed16_to_double_to_cpu(bb
->clock_limits
[i
].dppclk_mhz
);
3429 dcn2_0_nv12_soc
.clock_limits
[i
].phyclk_mhz
=
3430 fixed16_to_double_to_cpu(bb
->clock_limits
[i
].phyclk_mhz
);
3431 dcn2_0_nv12_soc
.clock_limits
[i
].socclk_mhz
=
3432 fixed16_to_double_to_cpu(bb
->clock_limits
[i
].socclk_mhz
);
3433 dcn2_0_nv12_soc
.clock_limits
[i
].dscclk_mhz
=
3434 fixed16_to_double_to_cpu(bb
->clock_limits
[i
].dscclk_mhz
);
3435 dcn2_0_nv12_soc
.clock_limits
[i
].dram_speed_mts
=
3436 fixed16_to_double_to_cpu(bb
->clock_limits
[i
].dram_speed_mts
);
3440 if (pool
->base
.pp_smu
) {
3441 struct pp_smu_nv_clock_table max_clocks
= {0};
3442 unsigned int uclk_states
[8] = {0};
3443 unsigned int num_states
= 0;
3444 enum pp_smu_status status
;
3445 bool clock_limits_available
= false;
3446 bool uclk_states_available
= false;
3448 if (pool
->base
.pp_smu
->nv_funcs
.get_uclk_dpm_states
) {
3449 status
= (pool
->base
.pp_smu
->nv_funcs
.get_uclk_dpm_states
)
3450 (&pool
->base
.pp_smu
->nv_funcs
.pp_smu
, uclk_states
, &num_states
);
3452 uclk_states_available
= (status
== PP_SMU_RESULT_OK
);
3455 if (pool
->base
.pp_smu
->nv_funcs
.get_maximum_sustainable_clocks
) {
3456 status
= (*pool
->base
.pp_smu
->nv_funcs
.get_maximum_sustainable_clocks
)
3457 (&pool
->base
.pp_smu
->nv_funcs
.pp_smu
, &max_clocks
);
3458 /* SMU cannot set DCF clock to anything equal to or higher than SOC clock
3460 if (max_clocks
.dcfClockInKhz
>= max_clocks
.socClockInKhz
)
3461 max_clocks
.dcfClockInKhz
= max_clocks
.socClockInKhz
- 1000;
3462 clock_limits_available
= (status
== PP_SMU_RESULT_OK
);
3465 if (clock_limits_available
&& uclk_states_available
&& num_states
)
3466 dcn20_update_bounding_box(dc
, loaded_bb
, &max_clocks
, uclk_states
, num_states
);
3467 else if (clock_limits_available
)
3468 dcn20_cap_soc_clocks(loaded_bb
, max_clocks
);
3471 loaded_ip
->max_num_otg
= pool
->base
.res_cap
->num_timing_generator
;
3472 loaded_ip
->max_num_dpp
= pool
->base
.pipe_count
;
3473 dcn20_patch_bounding_box(dc
, loaded_bb
);
3478 static bool dcn20_resource_construct(
3479 uint8_t num_virtual_links
,
3481 struct dcn20_resource_pool
*pool
)
3484 struct dc_context
*ctx
= dc
->ctx
;
3485 struct irq_service_init_data init_data
;
3486 struct ddc_service_init_data ddc_init_data
;
3487 struct _vcs_dpi_soc_bounding_box_st
*loaded_bb
=
3488 get_asic_rev_soc_bb(ctx
->asic_id
.hw_internal_rev
);
3489 struct _vcs_dpi_ip_params_st
*loaded_ip
=
3490 get_asic_rev_ip_params(ctx
->asic_id
.hw_internal_rev
);
3491 enum dml_project dml_project_version
=
3492 get_dml_project_version(ctx
->asic_id
.hw_internal_rev
);
3496 ctx
->dc_bios
->regs
= &bios_regs
;
3497 pool
->base
.funcs
= &dcn20_res_pool_funcs
;
3499 if (ASICREV_IS_NAVI14_M(ctx
->asic_id
.hw_internal_rev
)) {
3500 pool
->base
.res_cap
= &res_cap_nv14
;
3501 pool
->base
.pipe_count
= 5;
3502 pool
->base
.mpcc_count
= 5;
3504 pool
->base
.res_cap
= &res_cap_nv10
;
3505 pool
->base
.pipe_count
= 6;
3506 pool
->base
.mpcc_count
= 6;
3508 /*************************************************
3509 * Resource + asic cap harcoding *
3510 *************************************************/
3511 pool
->base
.underlay_pipe_index
= NO_UNDERLAY_PIPE
;
3513 dc
->caps
.max_downscale_ratio
= 200;
3514 dc
->caps
.i2c_speed_in_khz
= 100;
3515 dc
->caps
.max_cursor_size
= 256;
3516 dc
->caps
.dmdata_alloc_size
= 2048;
3518 dc
->caps
.max_slave_planes
= 1;
3519 dc
->caps
.post_blend_color_processing
= true;
3520 dc
->caps
.force_dp_tps4_for_cp2520
= true;
3521 dc
->caps
.hw_3d_lut
= true;
3522 dc
->caps
.extended_aux_timeout_support
= true;
3524 if (dc
->ctx
->dce_environment
== DCE_ENV_PRODUCTION_DRV
) {
3525 dc
->debug
= debug_defaults_drv
;
3526 } else if (dc
->ctx
->dce_environment
== DCE_ENV_FPGA_MAXIMUS
) {
3527 pool
->base
.pipe_count
= 4;
3528 pool
->base
.mpcc_count
= pool
->base
.pipe_count
;
3529 dc
->debug
= debug_defaults_diags
;
3531 dc
->debug
= debug_defaults_diags
;
3534 dc
->work_arounds
.dedcn20_305_wa
= true;
3536 // Init the vm_helper
3538 vm_helper_init(dc
->vm_helper
, 16);
3540 /*************************************************
3541 * Create resources *
3542 *************************************************/
3544 pool
->base
.clock_sources
[DCN20_CLK_SRC_PLL0
] =
3545 dcn20_clock_source_create(ctx
, ctx
->dc_bios
,
3546 CLOCK_SOURCE_COMBO_PHY_PLL0
,
3547 &clk_src_regs
[0], false);
3548 pool
->base
.clock_sources
[DCN20_CLK_SRC_PLL1
] =
3549 dcn20_clock_source_create(ctx
, ctx
->dc_bios
,
3550 CLOCK_SOURCE_COMBO_PHY_PLL1
,
3551 &clk_src_regs
[1], false);
3552 pool
->base
.clock_sources
[DCN20_CLK_SRC_PLL2
] =
3553 dcn20_clock_source_create(ctx
, ctx
->dc_bios
,
3554 CLOCK_SOURCE_COMBO_PHY_PLL2
,
3555 &clk_src_regs
[2], false);
3556 pool
->base
.clock_sources
[DCN20_CLK_SRC_PLL3
] =
3557 dcn20_clock_source_create(ctx
, ctx
->dc_bios
,
3558 CLOCK_SOURCE_COMBO_PHY_PLL3
,
3559 &clk_src_regs
[3], false);
3560 pool
->base
.clock_sources
[DCN20_CLK_SRC_PLL4
] =
3561 dcn20_clock_source_create(ctx
, ctx
->dc_bios
,
3562 CLOCK_SOURCE_COMBO_PHY_PLL4
,
3563 &clk_src_regs
[4], false);
3564 pool
->base
.clock_sources
[DCN20_CLK_SRC_PLL5
] =
3565 dcn20_clock_source_create(ctx
, ctx
->dc_bios
,
3566 CLOCK_SOURCE_COMBO_PHY_PLL5
,
3567 &clk_src_regs
[5], false);
3568 pool
->base
.clk_src_count
= DCN20_CLK_SRC_TOTAL
;
3569 /* todo: not reuse phy_pll registers */
3570 pool
->base
.dp_clock_source
=
3571 dcn20_clock_source_create(ctx
, ctx
->dc_bios
,
3572 CLOCK_SOURCE_ID_DP_DTO
,
3573 &clk_src_regs
[0], true);
3575 for (i
= 0; i
< pool
->base
.clk_src_count
; i
++) {
3576 if (pool
->base
.clock_sources
[i
] == NULL
) {
3577 dm_error("DC: failed to create clock sources!\n");
3578 BREAK_TO_DEBUGGER();
3583 pool
->base
.dccg
= dccg2_create(ctx
, &dccg_regs
, &dccg_shift
, &dccg_mask
);
3584 if (pool
->base
.dccg
== NULL
) {
3585 dm_error("DC: failed to create dccg!\n");
3586 BREAK_TO_DEBUGGER();
3590 pool
->base
.dmcu
= dcn20_dmcu_create(ctx
,
3594 if (pool
->base
.dmcu
== NULL
) {
3595 dm_error("DC: failed to create dmcu!\n");
3596 BREAK_TO_DEBUGGER();
3600 pool
->base
.abm
= dce_abm_create(ctx
,
3604 if (pool
->base
.abm
== NULL
) {
3605 dm_error("DC: failed to create abm!\n");
3606 BREAK_TO_DEBUGGER();
3610 pool
->base
.pp_smu
= dcn20_pp_smu_create(ctx
);
3613 if (!init_soc_bounding_box(dc
, pool
)) {
3614 dm_error("DC: failed to initialize soc bounding box!\n");
3615 BREAK_TO_DEBUGGER();
3619 dml_init_instance(&dc
->dml
, loaded_bb
, loaded_ip
, dml_project_version
);
3621 if (!dc
->debug
.disable_pplib_wm_range
) {
3622 struct pp_smu_wm_range_sets ranges
= {0};
3625 ranges
.num_reader_wm_sets
= 0;
3627 if (loaded_bb
->num_states
== 1) {
3628 ranges
.reader_wm_sets
[0].wm_inst
= i
;
3629 ranges
.reader_wm_sets
[0].min_drain_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN
;
3630 ranges
.reader_wm_sets
[0].max_drain_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX
;
3631 ranges
.reader_wm_sets
[0].min_fill_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN
;
3632 ranges
.reader_wm_sets
[0].max_fill_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX
;
3634 ranges
.num_reader_wm_sets
= 1;
3635 } else if (loaded_bb
->num_states
> 1) {
3636 for (i
= 0; i
< 4 && i
< loaded_bb
->num_states
; i
++) {
3637 ranges
.reader_wm_sets
[i
].wm_inst
= i
;
3638 ranges
.reader_wm_sets
[i
].min_drain_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN
;
3639 ranges
.reader_wm_sets
[i
].max_drain_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX
;
3640 ranges
.reader_wm_sets
[i
].min_fill_clk_mhz
= (i
> 0) ? (loaded_bb
->clock_limits
[i
- 1].dram_speed_mts
/ 16) + 1 : 0;
3641 ranges
.reader_wm_sets
[i
].max_fill_clk_mhz
= loaded_bb
->clock_limits
[i
].dram_speed_mts
/ 16;
3643 ranges
.num_reader_wm_sets
= i
+ 1;
3646 ranges
.reader_wm_sets
[0].min_fill_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN
;
3647 ranges
.reader_wm_sets
[ranges
.num_reader_wm_sets
- 1].max_fill_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX
;
3650 ranges
.num_writer_wm_sets
= 1;
3652 ranges
.writer_wm_sets
[0].wm_inst
= 0;
3653 ranges
.writer_wm_sets
[0].min_fill_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN
;
3654 ranges
.writer_wm_sets
[0].max_fill_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX
;
3655 ranges
.writer_wm_sets
[0].min_drain_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN
;
3656 ranges
.writer_wm_sets
[0].max_drain_clk_mhz
= PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX
;
3658 /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
3659 if (pool
->base
.pp_smu
->nv_funcs
.set_wm_ranges
)
3660 pool
->base
.pp_smu
->nv_funcs
.set_wm_ranges(&pool
->base
.pp_smu
->nv_funcs
.pp_smu
, &ranges
);
3663 init_data
.ctx
= dc
->ctx
;
3664 pool
->base
.irqs
= dal_irq_service_dcn20_create(&init_data
);
3665 if (!pool
->base
.irqs
)
3668 /* mem input -> ipp -> dpp -> opp -> TG */
3669 for (i
= 0; i
< pool
->base
.pipe_count
; i
++) {
3670 pool
->base
.hubps
[i
] = dcn20_hubp_create(ctx
, i
);
3671 if (pool
->base
.hubps
[i
] == NULL
) {
3672 BREAK_TO_DEBUGGER();
3674 "DC: failed to create memory input!\n");
3678 pool
->base
.ipps
[i
] = dcn20_ipp_create(ctx
, i
);
3679 if (pool
->base
.ipps
[i
] == NULL
) {
3680 BREAK_TO_DEBUGGER();
3682 "DC: failed to create input pixel processor!\n");
3686 pool
->base
.dpps
[i
] = dcn20_dpp_create(ctx
, i
);
3687 if (pool
->base
.dpps
[i
] == NULL
) {
3688 BREAK_TO_DEBUGGER();
3690 "DC: failed to create dpps!\n");
3694 for (i
= 0; i
< pool
->base
.res_cap
->num_ddc
; i
++) {
3695 pool
->base
.engines
[i
] = dcn20_aux_engine_create(ctx
, i
);
3696 if (pool
->base
.engines
[i
] == NULL
) {
3697 BREAK_TO_DEBUGGER();
3699 "DC:failed to create aux engine!!\n");
3702 pool
->base
.hw_i2cs
[i
] = dcn20_i2c_hw_create(ctx
, i
);
3703 if (pool
->base
.hw_i2cs
[i
] == NULL
) {
3704 BREAK_TO_DEBUGGER();
3706 "DC:failed to create hw i2c!!\n");
3709 pool
->base
.sw_i2cs
[i
] = NULL
;
3712 for (i
= 0; i
< pool
->base
.res_cap
->num_opp
; i
++) {
3713 pool
->base
.opps
[i
] = dcn20_opp_create(ctx
, i
);
3714 if (pool
->base
.opps
[i
] == NULL
) {
3715 BREAK_TO_DEBUGGER();
3717 "DC: failed to create output pixel processor!\n");
3722 for (i
= 0; i
< pool
->base
.res_cap
->num_timing_generator
; i
++) {
3723 pool
->base
.timing_generators
[i
] = dcn20_timing_generator_create(
3725 if (pool
->base
.timing_generators
[i
] == NULL
) {
3726 BREAK_TO_DEBUGGER();
3727 dm_error("DC: failed to create tg!\n");
3732 pool
->base
.timing_generator_count
= i
;
3734 pool
->base
.mpc
= dcn20_mpc_create(ctx
);
3735 if (pool
->base
.mpc
== NULL
) {
3736 BREAK_TO_DEBUGGER();
3737 dm_error("DC: failed to create mpc!\n");
3741 pool
->base
.hubbub
= dcn20_hubbub_create(ctx
);
3742 if (pool
->base
.hubbub
== NULL
) {
3743 BREAK_TO_DEBUGGER();
3744 dm_error("DC: failed to create hubbub!\n");
3748 for (i
= 0; i
< pool
->base
.res_cap
->num_dsc
; i
++) {
3749 pool
->base
.dscs
[i
] = dcn20_dsc_create(ctx
, i
);
3750 if (pool
->base
.dscs
[i
] == NULL
) {
3751 BREAK_TO_DEBUGGER();
3752 dm_error("DC: failed to create display stream compressor %d!\n", i
);
3757 if (!dcn20_dwbc_create(ctx
, &pool
->base
)) {
3758 BREAK_TO_DEBUGGER();
3759 dm_error("DC: failed to create dwbc!\n");
3762 if (!dcn20_mmhubbub_create(ctx
, &pool
->base
)) {
3763 BREAK_TO_DEBUGGER();
3764 dm_error("DC: failed to create mcif_wb!\n");
3768 if (!resource_construct(num_virtual_links
, dc
, &pool
->base
,
3769 (!IS_FPGA_MAXIMUS_DC(dc
->ctx
->dce_environment
) ?
3770 &res_create_funcs
: &res_create_maximus_funcs
)))
3773 dcn20_hw_sequencer_construct(dc
);
3775 dc
->caps
.max_planes
= pool
->base
.pipe_count
;
3777 for (i
= 0; i
< dc
->caps
.max_planes
; ++i
)
3778 dc
->caps
.planes
[i
] = plane_cap
;
3780 dc
->cap_funcs
= cap_funcs
;
3782 if (dc
->ctx
->dc_bios
->fw_info
.oem_i2c_present
) {
3783 ddc_init_data
.ctx
= dc
->ctx
;
3784 ddc_init_data
.link
= NULL
;
3785 ddc_init_data
.id
.id
= dc
->ctx
->dc_bios
->fw_info
.oem_i2c_obj_id
;
3786 ddc_init_data
.id
.enum_id
= 0;
3787 ddc_init_data
.id
.type
= OBJECT_TYPE_GENERIC
;
3788 pool
->base
.oem_device
= dal_ddc_service_create(&ddc_init_data
);
3790 pool
->base
.oem_device
= NULL
;
3799 dcn20_resource_destruct(pool
);
3804 struct resource_pool
*dcn20_create_resource_pool(
3805 const struct dc_init_data
*init_data
,
3808 struct dcn20_resource_pool
*pool
=
3809 kzalloc(sizeof(struct dcn20_resource_pool
), GFP_KERNEL
);
3814 if (dcn20_resource_construct(init_data
->num_virtual_links
, dc
, pool
))
3817 BREAK_TO_DEBUGGER();