2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "amdgpu_pm.h"
26 #include "amdgpu_i2c.h"
29 #include "amdgpu_atombios.h"
30 #include "atombios_crtc.h"
31 #include "atombios_encoders.h"
32 #include "amdgpu_pll.h"
33 #include "amdgpu_connectors.h"
35 #include "dce/dce_8_0_d.h"
36 #include "dce/dce_8_0_sh_mask.h"
38 #include "gca/gfx_7_2_enum.h"
40 #include "gmc/gmc_7_1_d.h"
41 #include "gmc/gmc_7_1_sh_mask.h"
43 #include "oss/oss_2_0_d.h"
44 #include "oss/oss_2_0_sh_mask.h"
46 static void dce_v8_0_set_display_funcs(struct amdgpu_device
*adev
);
47 static void dce_v8_0_set_irq_funcs(struct amdgpu_device
*adev
);
49 static const u32 crtc_offsets
[6] =
51 CRTC0_REGISTER_OFFSET
,
52 CRTC1_REGISTER_OFFSET
,
53 CRTC2_REGISTER_OFFSET
,
54 CRTC3_REGISTER_OFFSET
,
55 CRTC4_REGISTER_OFFSET
,
59 static const uint32_t dig_offsets
[] = {
60 CRTC0_REGISTER_OFFSET
,
61 CRTC1_REGISTER_OFFSET
,
62 CRTC2_REGISTER_OFFSET
,
63 CRTC3_REGISTER_OFFSET
,
64 CRTC4_REGISTER_OFFSET
,
65 CRTC5_REGISTER_OFFSET
,
66 (0x13830 - 0x7030) >> 2,
75 } interrupt_status_offsets
[6] = { {
76 .reg
= mmDISP_INTERRUPT_STATUS
,
77 .vblank
= DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK
,
78 .vline
= DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK
,
79 .hpd
= DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
81 .reg
= mmDISP_INTERRUPT_STATUS_CONTINUE
,
82 .vblank
= DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK
,
83 .vline
= DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK
,
84 .hpd
= DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
86 .reg
= mmDISP_INTERRUPT_STATUS_CONTINUE2
,
87 .vblank
= DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK
,
88 .vline
= DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK
,
89 .hpd
= DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
91 .reg
= mmDISP_INTERRUPT_STATUS_CONTINUE3
,
92 .vblank
= DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK
,
93 .vline
= DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK
,
94 .hpd
= DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
96 .reg
= mmDISP_INTERRUPT_STATUS_CONTINUE4
,
97 .vblank
= DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK
,
98 .vline
= DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK
,
99 .hpd
= DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
101 .reg
= mmDISP_INTERRUPT_STATUS_CONTINUE5
,
102 .vblank
= DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK
,
103 .vline
= DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK
,
104 .hpd
= DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
107 static const uint32_t hpd_int_control_offsets
[6] = {
108 mmDC_HPD1_INT_CONTROL
,
109 mmDC_HPD2_INT_CONTROL
,
110 mmDC_HPD3_INT_CONTROL
,
111 mmDC_HPD4_INT_CONTROL
,
112 mmDC_HPD5_INT_CONTROL
,
113 mmDC_HPD6_INT_CONTROL
,
116 static u32
dce_v8_0_audio_endpt_rreg(struct amdgpu_device
*adev
,
117 u32 block_offset
, u32 reg
)
122 spin_lock_irqsave(&adev
->audio_endpt_idx_lock
, flags
);
123 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX
+ block_offset
, reg
);
124 r
= RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA
+ block_offset
);
125 spin_unlock_irqrestore(&adev
->audio_endpt_idx_lock
, flags
);
130 static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device
*adev
,
131 u32 block_offset
, u32 reg
, u32 v
)
135 spin_lock_irqsave(&adev
->audio_endpt_idx_lock
, flags
);
136 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX
+ block_offset
, reg
);
137 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA
+ block_offset
, v
);
138 spin_unlock_irqrestore(&adev
->audio_endpt_idx_lock
, flags
);
141 static bool dce_v8_0_is_in_vblank(struct amdgpu_device
*adev
, int crtc
)
143 if (RREG32(mmCRTC_STATUS
+ crtc_offsets
[crtc
]) &
144 CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK
)
150 static bool dce_v8_0_is_counter_moving(struct amdgpu_device
*adev
, int crtc
)
154 pos1
= RREG32(mmCRTC_STATUS_POSITION
+ crtc_offsets
[crtc
]);
155 pos2
= RREG32(mmCRTC_STATUS_POSITION
+ crtc_offsets
[crtc
]);
164 * dce_v8_0_vblank_wait - vblank wait asic callback.
166 * @adev: amdgpu_device pointer
167 * @crtc: crtc to wait for vblank on
169 * Wait for vblank on the requested crtc (evergreen+).
171 static void dce_v8_0_vblank_wait(struct amdgpu_device
*adev
, int crtc
)
175 if (crtc
>= adev
->mode_info
.num_crtc
)
178 if (!(RREG32(mmCRTC_CONTROL
+ crtc_offsets
[crtc
]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK
))
181 /* depending on when we hit vblank, we may be close to active; if so,
182 * wait for another frame.
184 while (dce_v8_0_is_in_vblank(adev
, crtc
)) {
185 if (i
++ % 100 == 0) {
186 if (!dce_v8_0_is_counter_moving(adev
, crtc
))
191 while (!dce_v8_0_is_in_vblank(adev
, crtc
)) {
192 if (i
++ % 100 == 0) {
193 if (!dce_v8_0_is_counter_moving(adev
, crtc
))
199 static u32
dce_v8_0_vblank_get_counter(struct amdgpu_device
*adev
, int crtc
)
201 if (crtc
>= adev
->mode_info
.num_crtc
)
204 return RREG32(mmCRTC_STATUS_FRAME_COUNT
+ crtc_offsets
[crtc
]);
208 * dce_v8_0_page_flip - pageflip callback.
210 * @adev: amdgpu_device pointer
211 * @crtc_id: crtc to cleanup pageflip on
212 * @crtc_base: new address of the crtc (GPU MC address)
214 * Does the actual pageflip (evergreen+).
215 * During vblank we take the crtc lock and wait for the update_pending
216 * bit to go high, when it does, we release the lock, and allow the
217 * double buffered update to take place.
218 * Returns the current update pending status.
220 static void dce_v8_0_page_flip(struct amdgpu_device
*adev
,
221 int crtc_id
, u64 crtc_base
)
223 struct amdgpu_crtc
*amdgpu_crtc
= adev
->mode_info
.crtcs
[crtc_id
];
224 u32 tmp
= RREG32(mmGRPH_UPDATE
+ amdgpu_crtc
->crtc_offset
);
227 /* Lock the graphics update lock */
228 tmp
|= GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK
;
229 WREG32(mmGRPH_UPDATE
+ amdgpu_crtc
->crtc_offset
, tmp
);
231 /* update the scanout addresses */
232 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ amdgpu_crtc
->crtc_offset
,
233 upper_32_bits(crtc_base
));
234 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS
+ amdgpu_crtc
->crtc_offset
,
237 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ amdgpu_crtc
->crtc_offset
,
238 upper_32_bits(crtc_base
));
239 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS
+ amdgpu_crtc
->crtc_offset
,
242 /* Wait for update_pending to go high. */
243 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
244 if (RREG32(mmGRPH_UPDATE
+ amdgpu_crtc
->crtc_offset
) &
245 GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK
)
249 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
251 /* Unlock the lock, so double-buffering can take place inside vblank */
252 tmp
&= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK
;
253 WREG32(mmGRPH_UPDATE
+ amdgpu_crtc
->crtc_offset
, tmp
);
256 static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device
*adev
, int crtc
,
257 u32
*vbl
, u32
*position
)
259 if ((crtc
< 0) || (crtc
>= adev
->mode_info
.num_crtc
))
262 *vbl
= RREG32(mmCRTC_V_BLANK_START_END
+ crtc_offsets
[crtc
]);
263 *position
= RREG32(mmCRTC_STATUS_POSITION
+ crtc_offsets
[crtc
]);
269 * dce_v8_0_hpd_sense - hpd sense callback.
271 * @adev: amdgpu_device pointer
272 * @hpd: hpd (hotplug detect) pin
274 * Checks if a digital monitor is connected (evergreen+).
275 * Returns true if connected, false if not connected.
277 static bool dce_v8_0_hpd_sense(struct amdgpu_device
*adev
,
278 enum amdgpu_hpd_id hpd
)
280 bool connected
= false;
284 if (RREG32(mmDC_HPD1_INT_STATUS
) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK
)
288 if (RREG32(mmDC_HPD2_INT_STATUS
) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK
)
292 if (RREG32(mmDC_HPD3_INT_STATUS
) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK
)
296 if (RREG32(mmDC_HPD4_INT_STATUS
) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK
)
300 if (RREG32(mmDC_HPD5_INT_STATUS
) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK
)
304 if (RREG32(mmDC_HPD6_INT_STATUS
) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK
)
315 * dce_v8_0_hpd_set_polarity - hpd set polarity callback.
317 * @adev: amdgpu_device pointer
318 * @hpd: hpd (hotplug detect) pin
320 * Set the polarity of the hpd pin (evergreen+).
322 static void dce_v8_0_hpd_set_polarity(struct amdgpu_device
*adev
,
323 enum amdgpu_hpd_id hpd
)
326 bool connected
= dce_v8_0_hpd_sense(adev
, hpd
);
330 tmp
= RREG32(mmDC_HPD1_INT_CONTROL
);
332 tmp
&= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK
;
334 tmp
|= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK
;
335 WREG32(mmDC_HPD1_INT_CONTROL
, tmp
);
338 tmp
= RREG32(mmDC_HPD2_INT_CONTROL
);
340 tmp
&= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK
;
342 tmp
|= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK
;
343 WREG32(mmDC_HPD2_INT_CONTROL
, tmp
);
346 tmp
= RREG32(mmDC_HPD3_INT_CONTROL
);
348 tmp
&= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK
;
350 tmp
|= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK
;
351 WREG32(mmDC_HPD3_INT_CONTROL
, tmp
);
354 tmp
= RREG32(mmDC_HPD4_INT_CONTROL
);
356 tmp
&= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK
;
358 tmp
|= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK
;
359 WREG32(mmDC_HPD4_INT_CONTROL
, tmp
);
362 tmp
= RREG32(mmDC_HPD5_INT_CONTROL
);
364 tmp
&= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK
;
366 tmp
|= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK
;
367 WREG32(mmDC_HPD5_INT_CONTROL
, tmp
);
370 tmp
= RREG32(mmDC_HPD6_INT_CONTROL
);
372 tmp
&= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK
;
374 tmp
|= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK
;
375 WREG32(mmDC_HPD6_INT_CONTROL
, tmp
);
383 * dce_v8_0_hpd_init - hpd setup callback.
385 * @adev: amdgpu_device pointer
387 * Setup the hpd pins used by the card (evergreen+).
388 * Enable the pin, set the polarity, and enable the hpd interrupts.
390 static void dce_v8_0_hpd_init(struct amdgpu_device
*adev
)
392 struct drm_device
*dev
= adev
->ddev
;
393 struct drm_connector
*connector
;
394 u32 tmp
= (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT
) |
395 (0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT
) |
396 DC_HPD1_CONTROL__DC_HPD1_EN_MASK
;
398 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
399 struct amdgpu_connector
*amdgpu_connector
= to_amdgpu_connector(connector
);
401 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
||
402 connector
->connector_type
== DRM_MODE_CONNECTOR_LVDS
) {
403 /* don't try to enable hpd on eDP or LVDS avoid breaking the
404 * aux dp channel on imac and help (but not completely fix)
405 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
406 * also avoid interrupt storms during dpms.
410 switch (amdgpu_connector
->hpd
.hpd
) {
412 WREG32(mmDC_HPD1_CONTROL
, tmp
);
415 WREG32(mmDC_HPD2_CONTROL
, tmp
);
418 WREG32(mmDC_HPD3_CONTROL
, tmp
);
421 WREG32(mmDC_HPD4_CONTROL
, tmp
);
424 WREG32(mmDC_HPD5_CONTROL
, tmp
);
427 WREG32(mmDC_HPD6_CONTROL
, tmp
);
432 dce_v8_0_hpd_set_polarity(adev
, amdgpu_connector
->hpd
.hpd
);
433 amdgpu_irq_get(adev
, &adev
->hpd_irq
, amdgpu_connector
->hpd
.hpd
);
438 * dce_v8_0_hpd_fini - hpd tear down callback.
440 * @adev: amdgpu_device pointer
442 * Tear down the hpd pins used by the card (evergreen+).
443 * Disable the hpd interrupts.
445 static void dce_v8_0_hpd_fini(struct amdgpu_device
*adev
)
447 struct drm_device
*dev
= adev
->ddev
;
448 struct drm_connector
*connector
;
450 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
451 struct amdgpu_connector
*amdgpu_connector
= to_amdgpu_connector(connector
);
453 switch (amdgpu_connector
->hpd
.hpd
) {
455 WREG32(mmDC_HPD1_CONTROL
, 0);
458 WREG32(mmDC_HPD2_CONTROL
, 0);
461 WREG32(mmDC_HPD3_CONTROL
, 0);
464 WREG32(mmDC_HPD4_CONTROL
, 0);
467 WREG32(mmDC_HPD5_CONTROL
, 0);
470 WREG32(mmDC_HPD6_CONTROL
, 0);
475 amdgpu_irq_put(adev
, &adev
->hpd_irq
, amdgpu_connector
->hpd
.hpd
);
479 static u32
dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device
*adev
)
481 return mmDC_GPIO_HPD_A
;
484 static bool dce_v8_0_is_display_hung(struct amdgpu_device
*adev
)
490 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
491 if (RREG32(mmCRTC_CONTROL
+ crtc_offsets
[i
]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK
) {
492 crtc_status
[i
] = RREG32(mmCRTC_STATUS_HV_COUNT
+ crtc_offsets
[i
]);
493 crtc_hung
|= (1 << i
);
497 for (j
= 0; j
< 10; j
++) {
498 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
499 if (crtc_hung
& (1 << i
)) {
500 tmp
= RREG32(mmCRTC_STATUS_HV_COUNT
+ crtc_offsets
[i
]);
501 if (tmp
!= crtc_status
[i
])
502 crtc_hung
&= ~(1 << i
);
513 static void dce_v8_0_stop_mc_access(struct amdgpu_device
*adev
,
514 struct amdgpu_mode_mc_save
*save
)
516 u32 crtc_enabled
, tmp
;
519 save
->vga_render_control
= RREG32(mmVGA_RENDER_CONTROL
);
520 save
->vga_hdp_control
= RREG32(mmVGA_HDP_CONTROL
);
522 /* disable VGA render */
523 tmp
= RREG32(mmVGA_RENDER_CONTROL
);
524 tmp
= REG_SET_FIELD(tmp
, VGA_RENDER_CONTROL
, VGA_VSTATUS_CNTL
, 0);
525 WREG32(mmVGA_RENDER_CONTROL
, tmp
);
527 /* blank the display controllers */
528 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
529 crtc_enabled
= REG_GET_FIELD(RREG32(mmCRTC_CONTROL
+ crtc_offsets
[i
]),
530 CRTC_CONTROL
, CRTC_MASTER_EN
);
536 save
->crtc_enabled
[i
] = true;
537 tmp
= RREG32(mmCRTC_BLANK_CONTROL
+ crtc_offsets
[i
]);
538 if (REG_GET_FIELD(tmp
, CRTC_BLANK_CONTROL
, CRTC_BLANK_DATA_EN
) == 0) {
539 amdgpu_display_vblank_wait(adev
, i
);
540 WREG32(mmCRTC_UPDATE_LOCK
+ crtc_offsets
[i
], 1);
541 tmp
= REG_SET_FIELD(tmp
, CRTC_BLANK_CONTROL
, CRTC_BLANK_DATA_EN
, 1);
542 WREG32(mmCRTC_BLANK_CONTROL
+ crtc_offsets
[i
], tmp
);
543 WREG32(mmCRTC_UPDATE_LOCK
+ crtc_offsets
[i
], 0);
545 /* wait for the next frame */
546 frame_count
= amdgpu_display_vblank_get_counter(adev
, i
);
547 for (j
= 0; j
< adev
->usec_timeout
; j
++) {
548 if (amdgpu_display_vblank_get_counter(adev
, i
) != frame_count
)
552 tmp
= RREG32(mmGRPH_UPDATE
+ crtc_offsets
[i
]);
553 if (REG_GET_FIELD(tmp
, GRPH_UPDATE
, GRPH_UPDATE_LOCK
) == 0) {
554 tmp
= REG_SET_FIELD(tmp
, GRPH_UPDATE
, GRPH_UPDATE_LOCK
, 1);
555 WREG32(mmGRPH_UPDATE
+ crtc_offsets
[i
], tmp
);
557 tmp
= RREG32(mmMASTER_UPDATE_LOCK
+ crtc_offsets
[i
]);
558 if (REG_GET_FIELD(tmp
, MASTER_UPDATE_LOCK
, MASTER_UPDATE_LOCK
) == 0) {
559 tmp
= REG_SET_FIELD(tmp
, MASTER_UPDATE_LOCK
, MASTER_UPDATE_LOCK
, 1);
560 WREG32(mmMASTER_UPDATE_LOCK
+ crtc_offsets
[i
], tmp
);
563 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
564 WREG32(mmCRTC_UPDATE_LOCK
+ crtc_offsets
[i
], 1);
565 tmp
= RREG32(mmCRTC_CONTROL
+ crtc_offsets
[i
]);
566 tmp
= REG_SET_FIELD(tmp
, CRTC_CONTROL
, CRTC_MASTER_EN
, 0);
567 WREG32(mmCRTC_CONTROL
+ crtc_offsets
[i
], tmp
);
568 WREG32(mmCRTC_UPDATE_LOCK
+ crtc_offsets
[i
], 0);
569 save
->crtc_enabled
[i
] = false;
573 save
->crtc_enabled
[i
] = false;
578 static void dce_v8_0_resume_mc_access(struct amdgpu_device
*adev
,
579 struct amdgpu_mode_mc_save
*save
)
581 u32 tmp
, frame_count
;
584 /* update crtc base addresses */
585 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
586 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ crtc_offsets
[i
],
587 upper_32_bits(adev
->mc
.vram_start
));
588 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ crtc_offsets
[i
],
589 upper_32_bits(adev
->mc
.vram_start
));
590 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS
+ crtc_offsets
[i
],
591 (u32
)adev
->mc
.vram_start
);
592 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS
+ crtc_offsets
[i
],
593 (u32
)adev
->mc
.vram_start
);
595 if (save
->crtc_enabled
[i
]) {
596 tmp
= RREG32(mmMASTER_UPDATE_MODE
+ crtc_offsets
[i
]);
597 if (REG_GET_FIELD(tmp
, MASTER_UPDATE_MODE
, MASTER_UPDATE_MODE
) != 3) {
598 tmp
= REG_SET_FIELD(tmp
, MASTER_UPDATE_MODE
, MASTER_UPDATE_MODE
, 3);
599 WREG32(mmMASTER_UPDATE_MODE
+ crtc_offsets
[i
], tmp
);
601 tmp
= RREG32(mmGRPH_UPDATE
+ crtc_offsets
[i
]);
602 if (REG_GET_FIELD(tmp
, GRPH_UPDATE
, GRPH_UPDATE_LOCK
)) {
603 tmp
= REG_SET_FIELD(tmp
, GRPH_UPDATE
, GRPH_UPDATE_LOCK
, 0);
604 WREG32(mmGRPH_UPDATE
+ crtc_offsets
[i
], tmp
);
606 tmp
= RREG32(mmMASTER_UPDATE_LOCK
+ crtc_offsets
[i
]);
607 if (REG_GET_FIELD(tmp
, MASTER_UPDATE_LOCK
, MASTER_UPDATE_LOCK
)) {
608 tmp
= REG_SET_FIELD(tmp
, MASTER_UPDATE_LOCK
, MASTER_UPDATE_LOCK
, 0);
609 WREG32(mmMASTER_UPDATE_LOCK
+ crtc_offsets
[i
], tmp
);
611 for (j
= 0; j
< adev
->usec_timeout
; j
++) {
612 tmp
= RREG32(mmGRPH_UPDATE
+ crtc_offsets
[i
]);
613 if (REG_GET_FIELD(tmp
, GRPH_UPDATE
, GRPH_SURFACE_UPDATE_PENDING
) == 0)
617 tmp
= RREG32(mmCRTC_BLANK_CONTROL
+ crtc_offsets
[i
]);
618 tmp
= REG_SET_FIELD(tmp
, CRTC_BLANK_CONTROL
, CRTC_BLANK_DATA_EN
, 0);
619 WREG32(mmCRTC_UPDATE_LOCK
+ crtc_offsets
[i
], 1);
620 WREG32(mmCRTC_BLANK_CONTROL
+ crtc_offsets
[i
], tmp
);
621 WREG32(mmCRTC_UPDATE_LOCK
+ crtc_offsets
[i
], 0);
622 /* wait for the next frame */
623 frame_count
= amdgpu_display_vblank_get_counter(adev
, i
);
624 for (j
= 0; j
< adev
->usec_timeout
; j
++) {
625 if (amdgpu_display_vblank_get_counter(adev
, i
) != frame_count
)
632 WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH
, upper_32_bits(adev
->mc
.vram_start
));
633 WREG32(mmVGA_MEMORY_BASE_ADDRESS
, lower_32_bits(adev
->mc
.vram_start
));
635 /* Unlock vga access */
636 WREG32(mmVGA_HDP_CONTROL
, save
->vga_hdp_control
);
638 WREG32(mmVGA_RENDER_CONTROL
, save
->vga_render_control
);
641 static void dce_v8_0_set_vga_render_state(struct amdgpu_device
*adev
,
646 /* Lockout access through VGA aperture*/
647 tmp
= RREG32(mmVGA_HDP_CONTROL
);
649 tmp
= REG_SET_FIELD(tmp
, VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
, 0);
651 tmp
= REG_SET_FIELD(tmp
, VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
, 1);
652 WREG32(mmVGA_HDP_CONTROL
, tmp
);
654 /* disable VGA render */
655 tmp
= RREG32(mmVGA_RENDER_CONTROL
);
657 tmp
= REG_SET_FIELD(tmp
, VGA_RENDER_CONTROL
, VGA_VSTATUS_CNTL
, 1);
659 tmp
= REG_SET_FIELD(tmp
, VGA_RENDER_CONTROL
, VGA_VSTATUS_CNTL
, 0);
660 WREG32(mmVGA_RENDER_CONTROL
, tmp
);
663 static void dce_v8_0_program_fmt(struct drm_encoder
*encoder
)
665 struct drm_device
*dev
= encoder
->dev
;
666 struct amdgpu_device
*adev
= dev
->dev_private
;
667 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
668 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(encoder
->crtc
);
669 struct drm_connector
*connector
= amdgpu_get_connector_for_encoder(encoder
);
672 enum amdgpu_connector_dither dither
= AMDGPU_FMT_DITHER_DISABLE
;
675 struct amdgpu_connector
*amdgpu_connector
= to_amdgpu_connector(connector
);
676 bpc
= amdgpu_connector_get_monitor_bpc(connector
);
677 dither
= amdgpu_connector
->dither
;
680 /* LVDS/eDP FMT is set up by atom */
681 if (amdgpu_encoder
->devices
& ATOM_DEVICE_LCD_SUPPORT
)
684 /* not needed for analog */
685 if ((amdgpu_encoder
->encoder_id
== ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1
) ||
686 (amdgpu_encoder
->encoder_id
== ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2
))
694 if (dither
== AMDGPU_FMT_DITHER_ENABLE
)
695 /* XXX sort out optimal dither settings */
696 tmp
|= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK
|
697 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK
|
698 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK
|
699 (0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT
));
701 tmp
|= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK
|
702 (0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT
));
705 if (dither
== AMDGPU_FMT_DITHER_ENABLE
)
706 /* XXX sort out optimal dither settings */
707 tmp
|= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK
|
708 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK
|
709 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK
|
710 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK
|
711 (1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT
));
713 tmp
|= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK
|
714 (1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT
));
717 if (dither
== AMDGPU_FMT_DITHER_ENABLE
)
718 /* XXX sort out optimal dither settings */
719 tmp
|= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK
|
720 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK
|
721 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK
|
722 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK
|
723 (2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT
));
725 tmp
|= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK
|
726 (2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT
));
733 WREG32(mmFMT_BIT_DEPTH_CONTROL
+ amdgpu_crtc
->crtc_offset
, tmp
);
737 /* display watermark setup */
739 * dce_v8_0_line_buffer_adjust - Set up the line buffer
741 * @adev: amdgpu_device pointer
742 * @amdgpu_crtc: the selected display controller
743 * @mode: the current display mode on the selected display
746 * Setup up the line buffer allocation for
747 * the selected display controller (CIK).
748 * Returns the line buffer size in pixels.
750 static u32
dce_v8_0_line_buffer_adjust(struct amdgpu_device
*adev
,
751 struct amdgpu_crtc
*amdgpu_crtc
,
752 struct drm_display_mode
*mode
)
754 u32 tmp
, buffer_alloc
, i
;
755 u32 pipe_offset
= amdgpu_crtc
->crtc_id
* 0x8;
758 * There are 6 line buffers, one for each display controllers.
759 * There are 3 partitions per LB. Select the number of partitions
760 * to enable based on the display width. For display widths larger
761 * than 4096, you need use to use 2 display controllers and combine
762 * them using the stereo blender.
764 if (amdgpu_crtc
->base
.enabled
&& mode
) {
765 if (mode
->crtc_hdisplay
< 1920) {
768 } else if (mode
->crtc_hdisplay
< 2560) {
771 } else if (mode
->crtc_hdisplay
< 4096) {
773 buffer_alloc
= (adev
->flags
& AMDGPU_IS_APU
) ? 2 : 4;
775 DRM_DEBUG_KMS("Mode too big for LB!\n");
777 buffer_alloc
= (adev
->flags
& AMDGPU_IS_APU
) ? 2 : 4;
784 WREG32(mmLB_MEMORY_CTRL
+ amdgpu_crtc
->crtc_offset
,
785 (tmp
<< LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT
) |
786 (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT
));
788 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL
+ pipe_offset
,
789 (buffer_alloc
<< PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT
));
790 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
791 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL
+ pipe_offset
) &
792 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK
)
797 if (amdgpu_crtc
->base
.enabled
&& mode
) {
809 /* controller not enabled, so no lb used */
814 * cik_get_number_of_dram_channels - get the number of dram channels
816 * @adev: amdgpu_device pointer
818 * Look up the number of video ram channels (CIK).
819 * Used for display watermark bandwidth calculations
820 * Returns the number of dram channels
822 static u32
cik_get_number_of_dram_channels(struct amdgpu_device
*adev
)
824 u32 tmp
= RREG32(mmMC_SHARED_CHMAP
);
826 switch ((tmp
& MC_SHARED_CHMAP__NOOFCHAN_MASK
) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT
) {
849 struct dce8_wm_params
{
850 u32 dram_channels
; /* number of dram channels */
851 u32 yclk
; /* bandwidth per dram data pin in kHz */
852 u32 sclk
; /* engine clock in kHz */
853 u32 disp_clk
; /* display clock in kHz */
854 u32 src_width
; /* viewport width */
855 u32 active_time
; /* active display time in ns */
856 u32 blank_time
; /* blank time in ns */
857 bool interlaced
; /* mode is interlaced */
858 fixed20_12 vsc
; /* vertical scale ratio */
859 u32 num_heads
; /* number of active crtcs */
860 u32 bytes_per_pixel
; /* bytes per pixel display + overlay */
861 u32 lb_size
; /* line buffer allocated to pipe */
862 u32 vtaps
; /* vertical scaler taps */
866 * dce_v8_0_dram_bandwidth - get the dram bandwidth
868 * @wm: watermark calculation data
870 * Calculate the raw dram bandwidth (CIK).
871 * Used for display watermark bandwidth calculations
872 * Returns the dram bandwidth in MBytes/s
874 static u32
dce_v8_0_dram_bandwidth(struct dce8_wm_params
*wm
)
876 /* Calculate raw DRAM Bandwidth */
877 fixed20_12 dram_efficiency
; /* 0.7 */
878 fixed20_12 yclk
, dram_channels
, bandwidth
;
881 a
.full
= dfixed_const(1000);
882 yclk
.full
= dfixed_const(wm
->yclk
);
883 yclk
.full
= dfixed_div(yclk
, a
);
884 dram_channels
.full
= dfixed_const(wm
->dram_channels
* 4);
885 a
.full
= dfixed_const(10);
886 dram_efficiency
.full
= dfixed_const(7);
887 dram_efficiency
.full
= dfixed_div(dram_efficiency
, a
);
888 bandwidth
.full
= dfixed_mul(dram_channels
, yclk
);
889 bandwidth
.full
= dfixed_mul(bandwidth
, dram_efficiency
);
891 return dfixed_trunc(bandwidth
);
895 * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display
897 * @wm: watermark calculation data
899 * Calculate the dram bandwidth used for display (CIK).
900 * Used for display watermark bandwidth calculations
901 * Returns the dram bandwidth for display in MBytes/s
903 static u32
dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params
*wm
)
905 /* Calculate DRAM Bandwidth and the part allocated to display. */
906 fixed20_12 disp_dram_allocation
; /* 0.3 to 0.7 */
907 fixed20_12 yclk
, dram_channels
, bandwidth
;
910 a
.full
= dfixed_const(1000);
911 yclk
.full
= dfixed_const(wm
->yclk
);
912 yclk
.full
= dfixed_div(yclk
, a
);
913 dram_channels
.full
= dfixed_const(wm
->dram_channels
* 4);
914 a
.full
= dfixed_const(10);
915 disp_dram_allocation
.full
= dfixed_const(3); /* XXX worse case value 0.3 */
916 disp_dram_allocation
.full
= dfixed_div(disp_dram_allocation
, a
);
917 bandwidth
.full
= dfixed_mul(dram_channels
, yclk
);
918 bandwidth
.full
= dfixed_mul(bandwidth
, disp_dram_allocation
);
920 return dfixed_trunc(bandwidth
);
924 * dce_v8_0_data_return_bandwidth - get the data return bandwidth
926 * @wm: watermark calculation data
928 * Calculate the data return bandwidth used for display (CIK).
929 * Used for display watermark bandwidth calculations
930 * Returns the data return bandwidth in MBytes/s
932 static u32
dce_v8_0_data_return_bandwidth(struct dce8_wm_params
*wm
)
934 /* Calculate the display Data return Bandwidth */
935 fixed20_12 return_efficiency
; /* 0.8 */
936 fixed20_12 sclk
, bandwidth
;
939 a
.full
= dfixed_const(1000);
940 sclk
.full
= dfixed_const(wm
->sclk
);
941 sclk
.full
= dfixed_div(sclk
, a
);
942 a
.full
= dfixed_const(10);
943 return_efficiency
.full
= dfixed_const(8);
944 return_efficiency
.full
= dfixed_div(return_efficiency
, a
);
945 a
.full
= dfixed_const(32);
946 bandwidth
.full
= dfixed_mul(a
, sclk
);
947 bandwidth
.full
= dfixed_mul(bandwidth
, return_efficiency
);
949 return dfixed_trunc(bandwidth
);
953 * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth
955 * @wm: watermark calculation data
957 * Calculate the dmif bandwidth used for display (CIK).
958 * Used for display watermark bandwidth calculations
959 * Returns the dmif bandwidth in MBytes/s
961 static u32
dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params
*wm
)
963 /* Calculate the DMIF Request Bandwidth */
964 fixed20_12 disp_clk_request_efficiency
; /* 0.8 */
965 fixed20_12 disp_clk
, bandwidth
;
968 a
.full
= dfixed_const(1000);
969 disp_clk
.full
= dfixed_const(wm
->disp_clk
);
970 disp_clk
.full
= dfixed_div(disp_clk
, a
);
971 a
.full
= dfixed_const(32);
972 b
.full
= dfixed_mul(a
, disp_clk
);
974 a
.full
= dfixed_const(10);
975 disp_clk_request_efficiency
.full
= dfixed_const(8);
976 disp_clk_request_efficiency
.full
= dfixed_div(disp_clk_request_efficiency
, a
);
978 bandwidth
.full
= dfixed_mul(b
, disp_clk_request_efficiency
);
980 return dfixed_trunc(bandwidth
);
984 * dce_v8_0_available_bandwidth - get the min available bandwidth
986 * @wm: watermark calculation data
988 * Calculate the min available bandwidth used for display (CIK).
989 * Used for display watermark bandwidth calculations
990 * Returns the min available bandwidth in MBytes/s
992 static u32
dce_v8_0_available_bandwidth(struct dce8_wm_params
*wm
)
994 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
995 u32 dram_bandwidth
= dce_v8_0_dram_bandwidth(wm
);
996 u32 data_return_bandwidth
= dce_v8_0_data_return_bandwidth(wm
);
997 u32 dmif_req_bandwidth
= dce_v8_0_dmif_request_bandwidth(wm
);
999 return min(dram_bandwidth
, min(data_return_bandwidth
, dmif_req_bandwidth
));
1003 * dce_v8_0_average_bandwidth - get the average available bandwidth
1005 * @wm: watermark calculation data
1007 * Calculate the average available bandwidth used for display (CIK).
1008 * Used for display watermark bandwidth calculations
1009 * Returns the average available bandwidth in MBytes/s
1011 static u32
dce_v8_0_average_bandwidth(struct dce8_wm_params
*wm
)
1013 /* Calculate the display mode Average Bandwidth
1014 * DisplayMode should contain the source and destination dimensions,
1018 fixed20_12 line_time
;
1019 fixed20_12 src_width
;
1020 fixed20_12 bandwidth
;
1023 a
.full
= dfixed_const(1000);
1024 line_time
.full
= dfixed_const(wm
->active_time
+ wm
->blank_time
);
1025 line_time
.full
= dfixed_div(line_time
, a
);
1026 bpp
.full
= dfixed_const(wm
->bytes_per_pixel
);
1027 src_width
.full
= dfixed_const(wm
->src_width
);
1028 bandwidth
.full
= dfixed_mul(src_width
, bpp
);
1029 bandwidth
.full
= dfixed_mul(bandwidth
, wm
->vsc
);
1030 bandwidth
.full
= dfixed_div(bandwidth
, line_time
);
1032 return dfixed_trunc(bandwidth
);
1036 * dce_v8_0_latency_watermark - get the latency watermark
1038 * @wm: watermark calculation data
1040 * Calculate the latency watermark (CIK).
1041 * Used for display watermark bandwidth calculations
1042 * Returns the latency watermark in ns
1044 static u32
dce_v8_0_latency_watermark(struct dce8_wm_params
*wm
)
1046 /* First calculate the latency in ns */
1047 u32 mc_latency
= 2000; /* 2000 ns. */
1048 u32 available_bandwidth
= dce_v8_0_available_bandwidth(wm
);
1049 u32 worst_chunk_return_time
= (512 * 8 * 1000) / available_bandwidth
;
1050 u32 cursor_line_pair_return_time
= (128 * 4 * 1000) / available_bandwidth
;
1051 u32 dc_latency
= 40000000 / wm
->disp_clk
; /* dc pipe latency */
1052 u32 other_heads_data_return_time
= ((wm
->num_heads
+ 1) * worst_chunk_return_time
) +
1053 (wm
->num_heads
* cursor_line_pair_return_time
);
1054 u32 latency
= mc_latency
+ other_heads_data_return_time
+ dc_latency
;
1055 u32 max_src_lines_per_dst_line
, lb_fill_bw
, line_fill_time
;
1056 u32 tmp
, dmif_size
= 12288;
1059 if (wm
->num_heads
== 0)
1062 a
.full
= dfixed_const(2);
1063 b
.full
= dfixed_const(1);
1064 if ((wm
->vsc
.full
> a
.full
) ||
1065 ((wm
->vsc
.full
> b
.full
) && (wm
->vtaps
>= 3)) ||
1067 ((wm
->vsc
.full
>= a
.full
) && wm
->interlaced
))
1068 max_src_lines_per_dst_line
= 4;
1070 max_src_lines_per_dst_line
= 2;
1072 a
.full
= dfixed_const(available_bandwidth
);
1073 b
.full
= dfixed_const(wm
->num_heads
);
1074 a
.full
= dfixed_div(a
, b
);
1076 b
.full
= dfixed_const(mc_latency
+ 512);
1077 c
.full
= dfixed_const(wm
->disp_clk
);
1078 b
.full
= dfixed_div(b
, c
);
1080 c
.full
= dfixed_const(dmif_size
);
1081 b
.full
= dfixed_div(c
, b
);
1083 tmp
= min(dfixed_trunc(a
), dfixed_trunc(b
));
1085 b
.full
= dfixed_const(1000);
1086 c
.full
= dfixed_const(wm
->disp_clk
);
1087 b
.full
= dfixed_div(c
, b
);
1088 c
.full
= dfixed_const(wm
->bytes_per_pixel
);
1089 b
.full
= dfixed_mul(b
, c
);
1091 lb_fill_bw
= min(tmp
, dfixed_trunc(b
));
1093 a
.full
= dfixed_const(max_src_lines_per_dst_line
* wm
->src_width
* wm
->bytes_per_pixel
);
1094 b
.full
= dfixed_const(1000);
1095 c
.full
= dfixed_const(lb_fill_bw
);
1096 b
.full
= dfixed_div(c
, b
);
1097 a
.full
= dfixed_div(a
, b
);
1098 line_fill_time
= dfixed_trunc(a
);
1100 if (line_fill_time
< wm
->active_time
)
1103 return latency
+ (line_fill_time
- wm
->active_time
);
1108 * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check
1109 * average and available dram bandwidth
1111 * @wm: watermark calculation data
1113 * Check if the display average bandwidth fits in the display
1114 * dram bandwidth (CIK).
1115 * Used for display watermark bandwidth calculations
1116 * Returns true if the display fits, false if not.
1118 static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params
*wm
)
1120 if (dce_v8_0_average_bandwidth(wm
) <=
1121 (dce_v8_0_dram_bandwidth_for_display(wm
) / wm
->num_heads
))
1128 * dce_v8_0_average_bandwidth_vs_available_bandwidth - check
1129 * average and available bandwidth
1131 * @wm: watermark calculation data
1133 * Check if the display average bandwidth fits in the display
1134 * available bandwidth (CIK).
1135 * Used for display watermark bandwidth calculations
1136 * Returns true if the display fits, false if not.
1138 static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params
*wm
)
1140 if (dce_v8_0_average_bandwidth(wm
) <=
1141 (dce_v8_0_available_bandwidth(wm
) / wm
->num_heads
))
1148 * dce_v8_0_check_latency_hiding - check latency hiding
1150 * @wm: watermark calculation data
1152 * Check latency hiding (CIK).
1153 * Used for display watermark bandwidth calculations
1154 * Returns true if the display fits, false if not.
1156 static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params
*wm
)
1158 u32 lb_partitions
= wm
->lb_size
/ wm
->src_width
;
1159 u32 line_time
= wm
->active_time
+ wm
->blank_time
;
1160 u32 latency_tolerant_lines
;
1164 a
.full
= dfixed_const(1);
1165 if (wm
->vsc
.full
> a
.full
)
1166 latency_tolerant_lines
= 1;
1168 if (lb_partitions
<= (wm
->vtaps
+ 1))
1169 latency_tolerant_lines
= 1;
1171 latency_tolerant_lines
= 2;
1174 latency_hiding
= (latency_tolerant_lines
* line_time
+ wm
->blank_time
);
1176 if (dce_v8_0_latency_watermark(wm
) <= latency_hiding
)
1183 * dce_v8_0_program_watermarks - program display watermarks
1185 * @adev: amdgpu_device pointer
1186 * @amdgpu_crtc: the selected display controller
1187 * @lb_size: line buffer size
1188 * @num_heads: number of display controllers in use
1190 * Calculate and program the display watermarks for the
1191 * selected display controller (CIK).
1193 static void dce_v8_0_program_watermarks(struct amdgpu_device
*adev
,
1194 struct amdgpu_crtc
*amdgpu_crtc
,
1195 u32 lb_size
, u32 num_heads
)
1197 struct drm_display_mode
*mode
= &amdgpu_crtc
->base
.mode
;
1198 struct dce8_wm_params wm_low
, wm_high
;
1201 u32 latency_watermark_a
= 0, latency_watermark_b
= 0;
1204 if (amdgpu_crtc
->base
.enabled
&& num_heads
&& mode
) {
1205 pixel_period
= 1000000 / (u32
)mode
->clock
;
1206 line_time
= min((u32
)mode
->crtc_htotal
* pixel_period
, (u32
)65535);
1208 /* watermark for high clocks */
1209 if (adev
->pm
.dpm_enabled
) {
1211 amdgpu_dpm_get_mclk(adev
, false) * 10;
1213 amdgpu_dpm_get_sclk(adev
, false) * 10;
1215 wm_high
.yclk
= adev
->pm
.current_mclk
* 10;
1216 wm_high
.sclk
= adev
->pm
.current_sclk
* 10;
1219 wm_high
.disp_clk
= mode
->clock
;
1220 wm_high
.src_width
= mode
->crtc_hdisplay
;
1221 wm_high
.active_time
= mode
->crtc_hdisplay
* pixel_period
;
1222 wm_high
.blank_time
= line_time
- wm_high
.active_time
;
1223 wm_high
.interlaced
= false;
1224 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
1225 wm_high
.interlaced
= true;
1226 wm_high
.vsc
= amdgpu_crtc
->vsc
;
1228 if (amdgpu_crtc
->rmx_type
!= RMX_OFF
)
1230 wm_high
.bytes_per_pixel
= 4; /* XXX: get this from fb config */
1231 wm_high
.lb_size
= lb_size
;
1232 wm_high
.dram_channels
= cik_get_number_of_dram_channels(adev
);
1233 wm_high
.num_heads
= num_heads
;
1235 /* set for high clocks */
1236 latency_watermark_a
= min(dce_v8_0_latency_watermark(&wm_high
), (u32
)65535);
1238 /* possibly force display priority to high */
1239 /* should really do this at mode validation time... */
1240 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high
) ||
1241 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high
) ||
1242 !dce_v8_0_check_latency_hiding(&wm_high
) ||
1243 (adev
->mode_info
.disp_priority
== 2)) {
1244 DRM_DEBUG_KMS("force priority to high\n");
1247 /* watermark for low clocks */
1248 if (adev
->pm
.dpm_enabled
) {
1250 amdgpu_dpm_get_mclk(adev
, true) * 10;
1252 amdgpu_dpm_get_sclk(adev
, true) * 10;
1254 wm_low
.yclk
= adev
->pm
.current_mclk
* 10;
1255 wm_low
.sclk
= adev
->pm
.current_sclk
* 10;
1258 wm_low
.disp_clk
= mode
->clock
;
1259 wm_low
.src_width
= mode
->crtc_hdisplay
;
1260 wm_low
.active_time
= mode
->crtc_hdisplay
* pixel_period
;
1261 wm_low
.blank_time
= line_time
- wm_low
.active_time
;
1262 wm_low
.interlaced
= false;
1263 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
1264 wm_low
.interlaced
= true;
1265 wm_low
.vsc
= amdgpu_crtc
->vsc
;
1267 if (amdgpu_crtc
->rmx_type
!= RMX_OFF
)
1269 wm_low
.bytes_per_pixel
= 4; /* XXX: get this from fb config */
1270 wm_low
.lb_size
= lb_size
;
1271 wm_low
.dram_channels
= cik_get_number_of_dram_channels(adev
);
1272 wm_low
.num_heads
= num_heads
;
1274 /* set for low clocks */
1275 latency_watermark_b
= min(dce_v8_0_latency_watermark(&wm_low
), (u32
)65535);
1277 /* possibly force display priority to high */
1278 /* should really do this at mode validation time... */
1279 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low
) ||
1280 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low
) ||
1281 !dce_v8_0_check_latency_hiding(&wm_low
) ||
1282 (adev
->mode_info
.disp_priority
== 2)) {
1283 DRM_DEBUG_KMS("force priority to high\n");
1288 wm_mask
= RREG32(mmDPG_WATERMARK_MASK_CONTROL
+ amdgpu_crtc
->crtc_offset
);
1290 tmp
&= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT
);
1291 tmp
|= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT
);
1292 WREG32(mmDPG_WATERMARK_MASK_CONTROL
+ amdgpu_crtc
->crtc_offset
, tmp
);
1293 WREG32(mmDPG_PIPE_URGENCY_CONTROL
+ amdgpu_crtc
->crtc_offset
,
1294 ((latency_watermark_a
<< DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT
) |
1295 (line_time
<< DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT
)));
1297 tmp
= RREG32(mmDPG_WATERMARK_MASK_CONTROL
+ amdgpu_crtc
->crtc_offset
);
1298 tmp
&= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT
);
1299 tmp
|= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT
);
1300 WREG32(mmDPG_WATERMARK_MASK_CONTROL
+ amdgpu_crtc
->crtc_offset
, tmp
);
1301 WREG32(mmDPG_PIPE_URGENCY_CONTROL
+ amdgpu_crtc
->crtc_offset
,
1302 ((latency_watermark_b
<< DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT
) |
1303 (line_time
<< DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT
)));
1304 /* restore original selection */
1305 WREG32(mmDPG_WATERMARK_MASK_CONTROL
+ amdgpu_crtc
->crtc_offset
, wm_mask
);
1307 /* save values for DPM */
1308 amdgpu_crtc
->line_time
= line_time
;
1309 amdgpu_crtc
->wm_high
= latency_watermark_a
;
1310 amdgpu_crtc
->wm_low
= latency_watermark_b
;
1314 * dce_v8_0_bandwidth_update - program display watermarks
1316 * @adev: amdgpu_device pointer
1318 * Calculate and program the display watermarks and line
1319 * buffer allocation (CIK).
1321 static void dce_v8_0_bandwidth_update(struct amdgpu_device
*adev
)
1323 struct drm_display_mode
*mode
= NULL
;
1324 u32 num_heads
= 0, lb_size
;
1327 amdgpu_update_display_priority(adev
);
1329 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
1330 if (adev
->mode_info
.crtcs
[i
]->base
.enabled
)
1333 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
1334 mode
= &adev
->mode_info
.crtcs
[i
]->base
.mode
;
1335 lb_size
= dce_v8_0_line_buffer_adjust(adev
, adev
->mode_info
.crtcs
[i
], mode
);
1336 dce_v8_0_program_watermarks(adev
, adev
->mode_info
.crtcs
[i
],
1337 lb_size
, num_heads
);
1341 static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device
*adev
)
1346 for (i
= 0; i
< adev
->mode_info
.audio
.num_pins
; i
++) {
1347 offset
= adev
->mode_info
.audio
.pin
[i
].offset
;
1348 tmp
= RREG32_AUDIO_ENDPT(offset
,
1349 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
);
1351 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK
) >>
1352 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT
) == 1)
1353 adev
->mode_info
.audio
.pin
[i
].connected
= false;
1355 adev
->mode_info
.audio
.pin
[i
].connected
= true;
1359 static struct amdgpu_audio_pin
*dce_v8_0_audio_get_pin(struct amdgpu_device
*adev
)
1363 dce_v8_0_audio_get_connected_pins(adev
);
1365 for (i
= 0; i
< adev
->mode_info
.audio
.num_pins
; i
++) {
1366 if (adev
->mode_info
.audio
.pin
[i
].connected
)
1367 return &adev
->mode_info
.audio
.pin
[i
];
1369 DRM_ERROR("No connected audio pins found!\n");
1373 static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder
*encoder
)
1375 struct amdgpu_device
*adev
= encoder
->dev
->dev_private
;
1376 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1377 struct amdgpu_encoder_atom_dig
*dig
= amdgpu_encoder
->enc_priv
;
1380 if (!dig
|| !dig
->afmt
|| !dig
->afmt
->pin
)
1383 offset
= dig
->afmt
->offset
;
1385 WREG32(mmAFMT_AUDIO_SRC_CONTROL
+ offset
,
1386 (dig
->afmt
->pin
->id
<< AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT
));
1389 static void dce_v8_0_audio_write_latency_fields(struct drm_encoder
*encoder
,
1390 struct drm_display_mode
*mode
)
1392 struct amdgpu_device
*adev
= encoder
->dev
->dev_private
;
1393 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1394 struct amdgpu_encoder_atom_dig
*dig
= amdgpu_encoder
->enc_priv
;
1395 struct drm_connector
*connector
;
1396 struct amdgpu_connector
*amdgpu_connector
= NULL
;
1397 u32 tmp
= 0, offset
;
1399 if (!dig
|| !dig
->afmt
|| !dig
->afmt
->pin
)
1402 offset
= dig
->afmt
->pin
->offset
;
1404 list_for_each_entry(connector
, &encoder
->dev
->mode_config
.connector_list
, head
) {
1405 if (connector
->encoder
== encoder
) {
1406 amdgpu_connector
= to_amdgpu_connector(connector
);
1411 if (!amdgpu_connector
) {
1412 DRM_ERROR("Couldn't find encoder's connector\n");
1416 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
1417 if (connector
->latency_present
[1])
1419 (connector
->video_latency
[1] <<
1420 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT
) |
1421 (connector
->audio_latency
[1] <<
1422 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT
);
1426 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT
) |
1428 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT
);
1430 if (connector
->latency_present
[0])
1432 (connector
->video_latency
[0] <<
1433 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT
) |
1434 (connector
->audio_latency
[0] <<
1435 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT
);
1439 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT
) |
1441 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT
);
1444 WREG32_AUDIO_ENDPT(offset
, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
, tmp
);
1447 static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder
*encoder
)
1449 struct amdgpu_device
*adev
= encoder
->dev
->dev_private
;
1450 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1451 struct amdgpu_encoder_atom_dig
*dig
= amdgpu_encoder
->enc_priv
;
1452 struct drm_connector
*connector
;
1453 struct amdgpu_connector
*amdgpu_connector
= NULL
;
1458 if (!dig
|| !dig
->afmt
|| !dig
->afmt
->pin
)
1461 offset
= dig
->afmt
->pin
->offset
;
1463 list_for_each_entry(connector
, &encoder
->dev
->mode_config
.connector_list
, head
) {
1464 if (connector
->encoder
== encoder
) {
1465 amdgpu_connector
= to_amdgpu_connector(connector
);
1470 if (!amdgpu_connector
) {
1471 DRM_ERROR("Couldn't find encoder's connector\n");
1475 sad_count
= drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector
), &sadb
);
1476 if (sad_count
< 0) {
1477 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count
);
1481 /* program the speaker allocation */
1482 tmp
= RREG32_AUDIO_ENDPT(offset
, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
);
1483 tmp
&= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK
|
1484 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK
);
1486 tmp
|= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK
;
1488 tmp
|= (sadb
[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT
);
1490 tmp
|= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT
); /* stereo */
1491 WREG32_AUDIO_ENDPT(offset
, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
, tmp
);
1496 static void dce_v8_0_audio_write_sad_regs(struct drm_encoder
*encoder
)
1498 struct amdgpu_device
*adev
= encoder
->dev
->dev_private
;
1499 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1500 struct amdgpu_encoder_atom_dig
*dig
= amdgpu_encoder
->enc_priv
;
1502 struct drm_connector
*connector
;
1503 struct amdgpu_connector
*amdgpu_connector
= NULL
;
1504 struct cea_sad
*sads
;
1507 static const u16 eld_reg_to_type
[][2] = {
1508 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
, HDMI_AUDIO_CODING_TYPE_PCM
},
1509 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
, HDMI_AUDIO_CODING_TYPE_AC3
},
1510 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
, HDMI_AUDIO_CODING_TYPE_MPEG1
},
1511 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
, HDMI_AUDIO_CODING_TYPE_MP3
},
1512 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
, HDMI_AUDIO_CODING_TYPE_MPEG2
},
1513 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
, HDMI_AUDIO_CODING_TYPE_AAC_LC
},
1514 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
, HDMI_AUDIO_CODING_TYPE_DTS
},
1515 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
, HDMI_AUDIO_CODING_TYPE_ATRAC
},
1516 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
, HDMI_AUDIO_CODING_TYPE_EAC3
},
1517 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
, HDMI_AUDIO_CODING_TYPE_DTS_HD
},
1518 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
, HDMI_AUDIO_CODING_TYPE_MLP
},
1519 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
, HDMI_AUDIO_CODING_TYPE_WMA_PRO
},
1522 if (!dig
|| !dig
->afmt
|| !dig
->afmt
->pin
)
1525 offset
= dig
->afmt
->pin
->offset
;
1527 list_for_each_entry(connector
, &encoder
->dev
->mode_config
.connector_list
, head
) {
1528 if (connector
->encoder
== encoder
) {
1529 amdgpu_connector
= to_amdgpu_connector(connector
);
1534 if (!amdgpu_connector
) {
1535 DRM_ERROR("Couldn't find encoder's connector\n");
1539 sad_count
= drm_edid_to_sad(amdgpu_connector_edid(connector
), &sads
);
1540 if (sad_count
<= 0) {
1541 DRM_ERROR("Couldn't read SADs: %d\n", sad_count
);
1546 for (i
= 0; i
< ARRAY_SIZE(eld_reg_to_type
); i
++) {
1548 u8 stereo_freqs
= 0;
1549 int max_channels
= -1;
1552 for (j
= 0; j
< sad_count
; j
++) {
1553 struct cea_sad
*sad
= &sads
[j
];
1555 if (sad
->format
== eld_reg_to_type
[i
][1]) {
1556 if (sad
->channels
> max_channels
) {
1557 value
= (sad
->channels
<<
1558 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT
) |
1560 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT
) |
1562 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT
);
1563 max_channels
= sad
->channels
;
1566 if (sad
->format
== HDMI_AUDIO_CODING_TYPE_PCM
)
1567 stereo_freqs
|= sad
->freq
;
1573 value
|= (stereo_freqs
<<
1574 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT
);
1576 WREG32_AUDIO_ENDPT(offset
, eld_reg_to_type
[i
][0], value
);
1582 static void dce_v8_0_audio_enable(struct amdgpu_device
*adev
,
1583 struct amdgpu_audio_pin
*pin
,
1589 WREG32_AUDIO_ENDPT(pin
->offset
, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
,
1590 enable
? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK
: 0);
1593 static const u32 pin_offsets
[7] =
1604 static int dce_v8_0_audio_init(struct amdgpu_device
*adev
)
1611 adev
->mode_info
.audio
.enabled
= true;
1613 if (adev
->asic_type
== CHIP_KAVERI
) /* KV: 4 streams, 7 endpoints */
1614 adev
->mode_info
.audio
.num_pins
= 7;
1615 else if ((adev
->asic_type
== CHIP_KABINI
) ||
1616 (adev
->asic_type
== CHIP_MULLINS
)) /* KB/ML: 2 streams, 3 endpoints */
1617 adev
->mode_info
.audio
.num_pins
= 3;
1618 else if ((adev
->asic_type
== CHIP_BONAIRE
) ||
1619 (adev
->asic_type
== CHIP_HAWAII
))/* BN/HW: 6 streams, 7 endpoints */
1620 adev
->mode_info
.audio
.num_pins
= 7;
1622 adev
->mode_info
.audio
.num_pins
= 3;
1624 for (i
= 0; i
< adev
->mode_info
.audio
.num_pins
; i
++) {
1625 adev
->mode_info
.audio
.pin
[i
].channels
= -1;
1626 adev
->mode_info
.audio
.pin
[i
].rate
= -1;
1627 adev
->mode_info
.audio
.pin
[i
].bits_per_sample
= -1;
1628 adev
->mode_info
.audio
.pin
[i
].status_bits
= 0;
1629 adev
->mode_info
.audio
.pin
[i
].category_code
= 0;
1630 adev
->mode_info
.audio
.pin
[i
].connected
= false;
1631 adev
->mode_info
.audio
.pin
[i
].offset
= pin_offsets
[i
];
1632 adev
->mode_info
.audio
.pin
[i
].id
= i
;
1633 /* disable audio. it will be set up later */
1634 /* XXX remove once we switch to ip funcs */
1635 dce_v8_0_audio_enable(adev
, &adev
->mode_info
.audio
.pin
[i
], false);
1641 static void dce_v8_0_audio_fini(struct amdgpu_device
*adev
)
1645 if (!adev
->mode_info
.audio
.enabled
)
1648 for (i
= 0; i
< adev
->mode_info
.audio
.num_pins
; i
++)
1649 dce_v8_0_audio_enable(adev
, &adev
->mode_info
.audio
.pin
[i
], false);
1651 adev
->mode_info
.audio
.enabled
= false;
1655 * update the N and CTS parameters for a given pixel clock rate
1657 static void dce_v8_0_afmt_update_ACR(struct drm_encoder
*encoder
, uint32_t clock
)
1659 struct drm_device
*dev
= encoder
->dev
;
1660 struct amdgpu_device
*adev
= dev
->dev_private
;
1661 struct amdgpu_afmt_acr acr
= amdgpu_afmt_acr(clock
);
1662 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1663 struct amdgpu_encoder_atom_dig
*dig
= amdgpu_encoder
->enc_priv
;
1664 uint32_t offset
= dig
->afmt
->offset
;
1666 WREG32(mmHDMI_ACR_32_0
+ offset
, (acr
.cts_32khz
<< HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT
));
1667 WREG32(mmHDMI_ACR_32_1
+ offset
, acr
.n_32khz
);
1669 WREG32(mmHDMI_ACR_44_0
+ offset
, (acr
.cts_44_1khz
<< HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT
));
1670 WREG32(mmHDMI_ACR_44_1
+ offset
, acr
.n_44_1khz
);
1672 WREG32(mmHDMI_ACR_48_0
+ offset
, (acr
.cts_48khz
<< HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT
));
1673 WREG32(mmHDMI_ACR_48_1
+ offset
, acr
.n_48khz
);
1677 * build a HDMI Video Info Frame
1679 static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder
*encoder
,
1680 void *buffer
, size_t size
)
1682 struct drm_device
*dev
= encoder
->dev
;
1683 struct amdgpu_device
*adev
= dev
->dev_private
;
1684 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1685 struct amdgpu_encoder_atom_dig
*dig
= amdgpu_encoder
->enc_priv
;
1686 uint32_t offset
= dig
->afmt
->offset
;
1687 uint8_t *frame
= buffer
+ 3;
1688 uint8_t *header
= buffer
;
1690 WREG32(mmAFMT_AVI_INFO0
+ offset
,
1691 frame
[0x0] | (frame
[0x1] << 8) | (frame
[0x2] << 16) | (frame
[0x3] << 24));
1692 WREG32(mmAFMT_AVI_INFO1
+ offset
,
1693 frame
[0x4] | (frame
[0x5] << 8) | (frame
[0x6] << 16) | (frame
[0x7] << 24));
1694 WREG32(mmAFMT_AVI_INFO2
+ offset
,
1695 frame
[0x8] | (frame
[0x9] << 8) | (frame
[0xA] << 16) | (frame
[0xB] << 24));
1696 WREG32(mmAFMT_AVI_INFO3
+ offset
,
1697 frame
[0xC] | (frame
[0xD] << 8) | (header
[1] << 24));
1700 static void dce_v8_0_audio_set_dto(struct drm_encoder
*encoder
, u32 clock
)
1702 struct drm_device
*dev
= encoder
->dev
;
1703 struct amdgpu_device
*adev
= dev
->dev_private
;
1704 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1705 struct amdgpu_encoder_atom_dig
*dig
= amdgpu_encoder
->enc_priv
;
1706 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(encoder
->crtc
);
1707 u32 dto_phase
= 24 * 1000;
1708 u32 dto_modulo
= clock
;
1710 if (!dig
|| !dig
->afmt
)
1713 /* XXX two dtos; generally use dto0 for hdmi */
1714 /* Express [24MHz / target pixel clock] as an exact rational
1715 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
1716 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1718 WREG32(mmDCCG_AUDIO_DTO_SOURCE
, (amdgpu_crtc
->crtc_id
<< DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT
));
1719 WREG32(mmDCCG_AUDIO_DTO0_PHASE
, dto_phase
);
1720 WREG32(mmDCCG_AUDIO_DTO0_MODULE
, dto_modulo
);
1724 * update the info frames with the data from the current display mode
1726 static void dce_v8_0_afmt_setmode(struct drm_encoder
*encoder
,
1727 struct drm_display_mode
*mode
)
1729 struct drm_device
*dev
= encoder
->dev
;
1730 struct amdgpu_device
*adev
= dev
->dev_private
;
1731 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1732 struct amdgpu_encoder_atom_dig
*dig
= amdgpu_encoder
->enc_priv
;
1733 struct drm_connector
*connector
= amdgpu_get_connector_for_encoder(encoder
);
1734 u8 buffer
[HDMI_INFOFRAME_HEADER_SIZE
+ HDMI_AVI_INFOFRAME_SIZE
];
1735 struct hdmi_avi_infoframe frame
;
1736 uint32_t offset
, val
;
1740 if (!dig
|| !dig
->afmt
)
1743 /* Silent, r600_hdmi_enable will raise WARN for us */
1744 if (!dig
->afmt
->enabled
)
1746 offset
= dig
->afmt
->offset
;
1748 /* hdmi deep color mode general control packets setup, if bpc > 8 */
1749 if (encoder
->crtc
) {
1750 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(encoder
->crtc
);
1751 bpc
= amdgpu_crtc
->bpc
;
1754 /* disable audio prior to setting up hw */
1755 dig
->afmt
->pin
= dce_v8_0_audio_get_pin(adev
);
1756 dce_v8_0_audio_enable(adev
, dig
->afmt
->pin
, false);
1758 dce_v8_0_audio_set_dto(encoder
, mode
->clock
);
1760 WREG32(mmHDMI_VBI_PACKET_CONTROL
+ offset
,
1761 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK
); /* send null packets when required */
1763 WREG32(mmAFMT_AUDIO_CRC_CONTROL
+ offset
, 0x1000);
1765 val
= RREG32(mmHDMI_CONTROL
+ offset
);
1766 val
&= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK
;
1767 val
&= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK
;
1775 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1776 connector
->name
, bpc
);
1779 val
|= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK
;
1780 val
|= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT
;
1781 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1785 val
|= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK
;
1786 val
|= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT
;
1787 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1792 WREG32(mmHDMI_CONTROL
+ offset
, val
);
1794 WREG32(mmHDMI_VBI_PACKET_CONTROL
+ offset
,
1795 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK
| /* send null packets when required */
1796 HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK
| /* send general control packets */
1797 HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK
); /* send general control packets every frame */
1799 WREG32(mmHDMI_INFOFRAME_CONTROL0
+ offset
,
1800 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK
| /* enable audio info frames (frames won't be set until audio is enabled) */
1801 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK
); /* required for audio info values to be updated */
1803 WREG32(mmAFMT_INFOFRAME_CONTROL0
+ offset
,
1804 AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK
); /* required for audio info values to be updated */
1806 WREG32(mmHDMI_INFOFRAME_CONTROL1
+ offset
,
1807 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT
)); /* anything other than 0 */
1809 WREG32(mmHDMI_GC
+ offset
, 0); /* unset HDMI_GC_AVMUTE */
1811 WREG32(mmHDMI_AUDIO_PACKET_CONTROL
+ offset
,
1812 (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT
) | /* set the default audio delay */
1813 (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT
)); /* should be suffient for all audio modes and small enough for all hblanks */
1815 WREG32(mmAFMT_AUDIO_PACKET_CONTROL
+ offset
,
1816 AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK
); /* allow 60958 channel status fields to be updated */
1818 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
1821 WREG32(mmHDMI_ACR_PACKET_CONTROL
+ offset
,
1822 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK
); /* allow hw to sent ACR packets when required */
1824 WREG32(mmHDMI_ACR_PACKET_CONTROL
+ offset
,
1825 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK
| /* select SW CTS value */
1826 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK
); /* allow hw to sent ACR packets when required */
1828 dce_v8_0_afmt_update_ACR(encoder
, mode
->clock
);
1830 WREG32(mmAFMT_60958_0
+ offset
,
1831 (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT
));
1833 WREG32(mmAFMT_60958_1
+ offset
,
1834 (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT
));
1836 WREG32(mmAFMT_60958_2
+ offset
,
1837 (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT
) |
1838 (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT
) |
1839 (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT
) |
1840 (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT
) |
1841 (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT
) |
1842 (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT
));
1844 dce_v8_0_audio_write_speaker_allocation(encoder
);
1847 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2
+ offset
,
1848 (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT
));
1850 dce_v8_0_afmt_audio_select_pin(encoder
);
1851 dce_v8_0_audio_write_sad_regs(encoder
);
1852 dce_v8_0_audio_write_latency_fields(encoder
, mode
);
1854 err
= drm_hdmi_avi_infoframe_from_display_mode(&frame
, mode
);
1856 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err
);
1860 err
= hdmi_avi_infoframe_pack(&frame
, buffer
, sizeof(buffer
));
1862 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err
);
1866 dce_v8_0_afmt_update_avi_infoframe(encoder
, buffer
, sizeof(buffer
));
1868 WREG32_OR(mmHDMI_INFOFRAME_CONTROL0
+ offset
,
1869 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK
| /* enable AVI info frames */
1870 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK
); /* required for audio info values to be updated */
1872 WREG32_P(mmHDMI_INFOFRAME_CONTROL1
+ offset
,
1873 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT
), /* anything other than 0 */
1874 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK
);
1876 WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL
+ offset
,
1877 AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK
); /* send audio packets */
1879 /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
1880 WREG32(mmAFMT_RAMP_CONTROL0
+ offset
, 0x00FFFFFF);
1881 WREG32(mmAFMT_RAMP_CONTROL1
+ offset
, 0x007FFFFF);
1882 WREG32(mmAFMT_RAMP_CONTROL2
+ offset
, 0x00000001);
1883 WREG32(mmAFMT_RAMP_CONTROL3
+ offset
, 0x00000001);
1885 /* enable audio after to setting up hw */
1886 dce_v8_0_audio_enable(adev
, dig
->afmt
->pin
, true);
1889 static void dce_v8_0_afmt_enable(struct drm_encoder
*encoder
, bool enable
)
1891 struct drm_device
*dev
= encoder
->dev
;
1892 struct amdgpu_device
*adev
= dev
->dev_private
;
1893 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1894 struct amdgpu_encoder_atom_dig
*dig
= amdgpu_encoder
->enc_priv
;
1896 if (!dig
|| !dig
->afmt
)
1899 /* Silent, r600_hdmi_enable will raise WARN for us */
1900 if (enable
&& dig
->afmt
->enabled
)
1902 if (!enable
&& !dig
->afmt
->enabled
)
1905 if (!enable
&& dig
->afmt
->pin
) {
1906 dce_v8_0_audio_enable(adev
, dig
->afmt
->pin
, false);
1907 dig
->afmt
->pin
= NULL
;
1910 dig
->afmt
->enabled
= enable
;
1912 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1913 enable
? "En" : "Dis", dig
->afmt
->offset
, amdgpu_encoder
->encoder_id
);
1916 static void dce_v8_0_afmt_init(struct amdgpu_device
*adev
)
1920 for (i
= 0; i
< adev
->mode_info
.num_dig
; i
++)
1921 adev
->mode_info
.afmt
[i
] = NULL
;
1923 /* DCE8 has audio blocks tied to DIG encoders */
1924 for (i
= 0; i
< adev
->mode_info
.num_dig
; i
++) {
1925 adev
->mode_info
.afmt
[i
] = kzalloc(sizeof(struct amdgpu_afmt
), GFP_KERNEL
);
1926 if (adev
->mode_info
.afmt
[i
]) {
1927 adev
->mode_info
.afmt
[i
]->offset
= dig_offsets
[i
];
1928 adev
->mode_info
.afmt
[i
]->id
= i
;
1933 static void dce_v8_0_afmt_fini(struct amdgpu_device
*adev
)
1937 for (i
= 0; i
< adev
->mode_info
.num_dig
; i
++) {
1938 kfree(adev
->mode_info
.afmt
[i
]);
1939 adev
->mode_info
.afmt
[i
] = NULL
;
1943 static const u32 vga_control_regs
[6] =
1953 static void dce_v8_0_vga_enable(struct drm_crtc
*crtc
, bool enable
)
1955 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1956 struct drm_device
*dev
= crtc
->dev
;
1957 struct amdgpu_device
*adev
= dev
->dev_private
;
1960 vga_control
= RREG32(vga_control_regs
[amdgpu_crtc
->crtc_id
]) & ~1;
1962 WREG32(vga_control_regs
[amdgpu_crtc
->crtc_id
], vga_control
| 1);
1964 WREG32(vga_control_regs
[amdgpu_crtc
->crtc_id
], vga_control
);
1967 static void dce_v8_0_grph_enable(struct drm_crtc
*crtc
, bool enable
)
1969 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1970 struct drm_device
*dev
= crtc
->dev
;
1971 struct amdgpu_device
*adev
= dev
->dev_private
;
1974 WREG32(mmGRPH_ENABLE
+ amdgpu_crtc
->crtc_offset
, 1);
1976 WREG32(mmGRPH_ENABLE
+ amdgpu_crtc
->crtc_offset
, 0);
1979 static int dce_v8_0_crtc_do_set_base(struct drm_crtc
*crtc
,
1980 struct drm_framebuffer
*fb
,
1981 int x
, int y
, int atomic
)
1983 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1984 struct drm_device
*dev
= crtc
->dev
;
1985 struct amdgpu_device
*adev
= dev
->dev_private
;
1986 struct amdgpu_framebuffer
*amdgpu_fb
;
1987 struct drm_framebuffer
*target_fb
;
1988 struct drm_gem_object
*obj
;
1989 struct amdgpu_bo
*rbo
;
1990 uint64_t fb_location
, tiling_flags
;
1991 uint32_t fb_format
, fb_pitch_pixels
;
1992 u32 fb_swap
= (GRPH_ENDIAN_NONE
<< GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT
);
1994 u32 tmp
, viewport_w
, viewport_h
;
1996 bool bypass_lut
= false;
1999 if (!atomic
&& !crtc
->primary
->fb
) {
2000 DRM_DEBUG_KMS("No FB bound\n");
2005 amdgpu_fb
= to_amdgpu_framebuffer(fb
);
2009 amdgpu_fb
= to_amdgpu_framebuffer(crtc
->primary
->fb
);
2010 target_fb
= crtc
->primary
->fb
;
2013 /* If atomic, assume fb object is pinned & idle & fenced and
2014 * just update base pointers
2016 obj
= amdgpu_fb
->obj
;
2017 rbo
= gem_to_amdgpu_bo(obj
);
2018 r
= amdgpu_bo_reserve(rbo
, false);
2019 if (unlikely(r
!= 0))
2023 fb_location
= amdgpu_bo_gpu_offset(rbo
);
2025 r
= amdgpu_bo_pin(rbo
, AMDGPU_GEM_DOMAIN_VRAM
, &fb_location
);
2026 if (unlikely(r
!= 0)) {
2027 amdgpu_bo_unreserve(rbo
);
2032 amdgpu_bo_get_tiling_flags(rbo
, &tiling_flags
);
2033 amdgpu_bo_unreserve(rbo
);
2035 pipe_config
= AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
2037 switch (target_fb
->pixel_format
) {
2039 fb_format
= ((GRPH_DEPTH_8BPP
<< GRPH_CONTROL__GRPH_DEPTH__SHIFT
) |
2040 (GRPH_FORMAT_INDEXED
<< GRPH_CONTROL__GRPH_FORMAT__SHIFT
));
2042 case DRM_FORMAT_XRGB4444
:
2043 case DRM_FORMAT_ARGB4444
:
2044 fb_format
= ((GRPH_DEPTH_16BPP
<< GRPH_CONTROL__GRPH_DEPTH__SHIFT
) |
2045 (GRPH_FORMAT_ARGB1555
<< GRPH_CONTROL__GRPH_FORMAT__SHIFT
));
2047 fb_swap
= (GRPH_ENDIAN_8IN16
<< GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT
);
2050 case DRM_FORMAT_XRGB1555
:
2051 case DRM_FORMAT_ARGB1555
:
2052 fb_format
= ((GRPH_DEPTH_16BPP
<< GRPH_CONTROL__GRPH_DEPTH__SHIFT
) |
2053 (GRPH_FORMAT_ARGB1555
<< GRPH_CONTROL__GRPH_FORMAT__SHIFT
));
2055 fb_swap
= (GRPH_ENDIAN_8IN16
<< GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT
);
2058 case DRM_FORMAT_BGRX5551
:
2059 case DRM_FORMAT_BGRA5551
:
2060 fb_format
= ((GRPH_DEPTH_16BPP
<< GRPH_CONTROL__GRPH_DEPTH__SHIFT
) |
2061 (GRPH_FORMAT_BGRA5551
<< GRPH_CONTROL__GRPH_FORMAT__SHIFT
));
2063 fb_swap
= (GRPH_ENDIAN_8IN16
<< GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT
);
2066 case DRM_FORMAT_RGB565
:
2067 fb_format
= ((GRPH_DEPTH_16BPP
<< GRPH_CONTROL__GRPH_DEPTH__SHIFT
) |
2068 (GRPH_FORMAT_ARGB565
<< GRPH_CONTROL__GRPH_FORMAT__SHIFT
));
2070 fb_swap
= (GRPH_ENDIAN_8IN16
<< GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT
);
2073 case DRM_FORMAT_XRGB8888
:
2074 case DRM_FORMAT_ARGB8888
:
2075 fb_format
= ((GRPH_DEPTH_32BPP
<< GRPH_CONTROL__GRPH_DEPTH__SHIFT
) |
2076 (GRPH_FORMAT_ARGB8888
<< GRPH_CONTROL__GRPH_FORMAT__SHIFT
));
2078 fb_swap
= (GRPH_ENDIAN_8IN32
<< GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT
);
2081 case DRM_FORMAT_XRGB2101010
:
2082 case DRM_FORMAT_ARGB2101010
:
2083 fb_format
= ((GRPH_DEPTH_32BPP
<< GRPH_CONTROL__GRPH_DEPTH__SHIFT
) |
2084 (GRPH_FORMAT_ARGB2101010
<< GRPH_CONTROL__GRPH_FORMAT__SHIFT
));
2086 fb_swap
= (GRPH_ENDIAN_8IN32
<< GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT
);
2088 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2091 case DRM_FORMAT_BGRX1010102
:
2092 case DRM_FORMAT_BGRA1010102
:
2093 fb_format
= ((GRPH_DEPTH_32BPP
<< GRPH_CONTROL__GRPH_DEPTH__SHIFT
) |
2094 (GRPH_FORMAT_BGRA1010102
<< GRPH_CONTROL__GRPH_FORMAT__SHIFT
));
2096 fb_swap
= (GRPH_ENDIAN_8IN32
<< GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT
);
2098 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2102 DRM_ERROR("Unsupported screen format %s\n",
2103 drm_get_format_name(target_fb
->pixel_format
));
2107 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == ARRAY_2D_TILED_THIN1
) {
2108 unsigned bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
2110 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
2111 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
2112 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
2113 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
2114 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
2116 fb_format
|= (num_banks
<< GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT
);
2117 fb_format
|= (GRPH_ARRAY_2D_TILED_THIN1
<< GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT
);
2118 fb_format
|= (tile_split
<< GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT
);
2119 fb_format
|= (bankw
<< GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT
);
2120 fb_format
|= (bankh
<< GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT
);
2121 fb_format
|= (mtaspect
<< GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT
);
2122 fb_format
|= (DISPLAY_MICRO_TILING
<< GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT
);
2123 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == ARRAY_1D_TILED_THIN1
) {
2124 fb_format
|= (GRPH_ARRAY_1D_TILED_THIN1
<< GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT
);
2127 fb_format
|= (pipe_config
<< GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT
);
2129 dce_v8_0_vga_enable(crtc
, false);
2131 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ amdgpu_crtc
->crtc_offset
,
2132 upper_32_bits(fb_location
));
2133 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ amdgpu_crtc
->crtc_offset
,
2134 upper_32_bits(fb_location
));
2135 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS
+ amdgpu_crtc
->crtc_offset
,
2136 (u32
)fb_location
& GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK
);
2137 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS
+ amdgpu_crtc
->crtc_offset
,
2138 (u32
) fb_location
& GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK
);
2139 WREG32(mmGRPH_CONTROL
+ amdgpu_crtc
->crtc_offset
, fb_format
);
2140 WREG32(mmGRPH_SWAP_CNTL
+ amdgpu_crtc
->crtc_offset
, fb_swap
);
2143 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2144 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2145 * retain the full precision throughout the pipeline.
2147 WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL
+ amdgpu_crtc
->crtc_offset
,
2148 (bypass_lut
? LUT_10BIT_BYPASS_EN
: 0),
2149 ~LUT_10BIT_BYPASS_EN
);
2152 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2154 WREG32(mmGRPH_SURFACE_OFFSET_X
+ amdgpu_crtc
->crtc_offset
, 0);
2155 WREG32(mmGRPH_SURFACE_OFFSET_Y
+ amdgpu_crtc
->crtc_offset
, 0);
2156 WREG32(mmGRPH_X_START
+ amdgpu_crtc
->crtc_offset
, 0);
2157 WREG32(mmGRPH_Y_START
+ amdgpu_crtc
->crtc_offset
, 0);
2158 WREG32(mmGRPH_X_END
+ amdgpu_crtc
->crtc_offset
, target_fb
->width
);
2159 WREG32(mmGRPH_Y_END
+ amdgpu_crtc
->crtc_offset
, target_fb
->height
);
2161 fb_pitch_pixels
= target_fb
->pitches
[0] / (target_fb
->bits_per_pixel
/ 8);
2162 WREG32(mmGRPH_PITCH
+ amdgpu_crtc
->crtc_offset
, fb_pitch_pixels
);
2164 dce_v8_0_grph_enable(crtc
, true);
2166 WREG32(mmLB_DESKTOP_HEIGHT
+ amdgpu_crtc
->crtc_offset
,
2171 WREG32(mmVIEWPORT_START
+ amdgpu_crtc
->crtc_offset
,
2173 viewport_w
= crtc
->mode
.hdisplay
;
2174 viewport_h
= (crtc
->mode
.vdisplay
+ 1) & ~1;
2175 WREG32(mmVIEWPORT_SIZE
+ amdgpu_crtc
->crtc_offset
,
2176 (viewport_w
<< 16) | viewport_h
);
2178 /* pageflip setup */
2179 /* make sure flip is at vb rather than hb */
2180 tmp
= RREG32(mmGRPH_FLIP_CONTROL
+ amdgpu_crtc
->crtc_offset
);
2181 tmp
&= ~GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK
;
2182 WREG32(mmGRPH_FLIP_CONTROL
+ amdgpu_crtc
->crtc_offset
, tmp
);
2184 /* set pageflip to happen only at start of vblank interval (front porch) */
2185 WREG32(mmMASTER_UPDATE_MODE
+ amdgpu_crtc
->crtc_offset
, 3);
2187 if (!atomic
&& fb
&& fb
!= crtc
->primary
->fb
) {
2188 amdgpu_fb
= to_amdgpu_framebuffer(fb
);
2189 rbo
= gem_to_amdgpu_bo(amdgpu_fb
->obj
);
2190 r
= amdgpu_bo_reserve(rbo
, false);
2191 if (unlikely(r
!= 0))
2193 amdgpu_bo_unpin(rbo
);
2194 amdgpu_bo_unreserve(rbo
);
2197 /* Bytes per pixel may have changed */
2198 dce_v8_0_bandwidth_update(adev
);
2203 static void dce_v8_0_set_interleave(struct drm_crtc
*crtc
,
2204 struct drm_display_mode
*mode
)
2206 struct drm_device
*dev
= crtc
->dev
;
2207 struct amdgpu_device
*adev
= dev
->dev_private
;
2208 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2210 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
2211 WREG32(mmLB_DATA_FORMAT
+ amdgpu_crtc
->crtc_offset
,
2212 LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT
);
2214 WREG32(mmLB_DATA_FORMAT
+ amdgpu_crtc
->crtc_offset
, 0);
2217 static void dce_v8_0_crtc_load_lut(struct drm_crtc
*crtc
)
2219 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2220 struct drm_device
*dev
= crtc
->dev
;
2221 struct amdgpu_device
*adev
= dev
->dev_private
;
2224 DRM_DEBUG_KMS("%d\n", amdgpu_crtc
->crtc_id
);
2226 WREG32(mmINPUT_CSC_CONTROL
+ amdgpu_crtc
->crtc_offset
,
2227 ((INPUT_CSC_BYPASS
<< INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT
) |
2228 (INPUT_CSC_BYPASS
<< INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT
)));
2229 WREG32(mmPRESCALE_GRPH_CONTROL
+ amdgpu_crtc
->crtc_offset
,
2230 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK
);
2231 WREG32(mmPRESCALE_OVL_CONTROL
+ amdgpu_crtc
->crtc_offset
,
2232 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK
);
2233 WREG32(mmINPUT_GAMMA_CONTROL
+ amdgpu_crtc
->crtc_offset
,
2234 ((INPUT_GAMMA_USE_LUT
<< INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT
) |
2235 (INPUT_GAMMA_USE_LUT
<< INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT
)));
2237 WREG32(mmDC_LUT_CONTROL
+ amdgpu_crtc
->crtc_offset
, 0);
2239 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE
+ amdgpu_crtc
->crtc_offset
, 0);
2240 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN
+ amdgpu_crtc
->crtc_offset
, 0);
2241 WREG32(mmDC_LUT_BLACK_OFFSET_RED
+ amdgpu_crtc
->crtc_offset
, 0);
2243 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE
+ amdgpu_crtc
->crtc_offset
, 0xffff);
2244 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN
+ amdgpu_crtc
->crtc_offset
, 0xffff);
2245 WREG32(mmDC_LUT_WHITE_OFFSET_RED
+ amdgpu_crtc
->crtc_offset
, 0xffff);
2247 WREG32(mmDC_LUT_RW_MODE
+ amdgpu_crtc
->crtc_offset
, 0);
2248 WREG32(mmDC_LUT_WRITE_EN_MASK
+ amdgpu_crtc
->crtc_offset
, 0x00000007);
2250 WREG32(mmDC_LUT_RW_INDEX
+ amdgpu_crtc
->crtc_offset
, 0);
2251 for (i
= 0; i
< 256; i
++) {
2252 WREG32(mmDC_LUT_30_COLOR
+ amdgpu_crtc
->crtc_offset
,
2253 (amdgpu_crtc
->lut_r
[i
] << 20) |
2254 (amdgpu_crtc
->lut_g
[i
] << 10) |
2255 (amdgpu_crtc
->lut_b
[i
] << 0));
2258 WREG32(mmDEGAMMA_CONTROL
+ amdgpu_crtc
->crtc_offset
,
2259 ((DEGAMMA_BYPASS
<< DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT
) |
2260 (DEGAMMA_BYPASS
<< DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT
) |
2261 (DEGAMMA_BYPASS
<< DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT
)));
2262 WREG32(mmGAMUT_REMAP_CONTROL
+ amdgpu_crtc
->crtc_offset
,
2263 ((GAMUT_REMAP_BYPASS
<< GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT
) |
2264 (GAMUT_REMAP_BYPASS
<< GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT
)));
2265 WREG32(mmREGAMMA_CONTROL
+ amdgpu_crtc
->crtc_offset
,
2266 ((REGAMMA_BYPASS
<< REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT
) |
2267 (REGAMMA_BYPASS
<< REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT
)));
2268 WREG32(mmOUTPUT_CSC_CONTROL
+ amdgpu_crtc
->crtc_offset
,
2269 ((OUTPUT_CSC_BYPASS
<< OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT
) |
2270 (OUTPUT_CSC_BYPASS
<< OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT
)));
2271 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
2272 WREG32(0x1a50 + amdgpu_crtc
->crtc_offset
, 0);
2273 /* XXX this only needs to be programmed once per crtc at startup,
2274 * not sure where the best place for it is
2276 WREG32(mmALPHA_CONTROL
+ amdgpu_crtc
->crtc_offset
,
2277 ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK
);
2280 static int dce_v8_0_pick_dig_encoder(struct drm_encoder
*encoder
)
2282 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
2283 struct amdgpu_encoder_atom_dig
*dig
= amdgpu_encoder
->enc_priv
;
2285 switch (amdgpu_encoder
->encoder_id
) {
2286 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY
:
2292 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1
:
2298 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2
:
2304 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3
:
2308 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder
->encoder_id
);
2314 * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc.
2318 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
2319 * a single PPLL can be used for all DP crtcs/encoders. For non-DP
2320 * monitors a dedicated PPLL must be used. If a particular board has
2321 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2322 * as there is no need to program the PLL itself. If we are not able to
2323 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2324 * avoid messing up an existing monitor.
2326 * Asic specific PLL information
2330 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2332 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2335 static u32
dce_v8_0_pick_pll(struct drm_crtc
*crtc
)
2337 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2338 struct drm_device
*dev
= crtc
->dev
;
2339 struct amdgpu_device
*adev
= dev
->dev_private
;
2343 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc
->encoder
))) {
2344 if (adev
->clock
.dp_extclk
)
2345 /* skip PPLL programming if using ext clock */
2346 return ATOM_PPLL_INVALID
;
2348 /* use the same PPLL for all DP monitors */
2349 pll
= amdgpu_pll_get_shared_dp_ppll(crtc
);
2350 if (pll
!= ATOM_PPLL_INVALID
)
2354 /* use the same PPLL for all monitors with the same clock */
2355 pll
= amdgpu_pll_get_shared_nondp_ppll(crtc
);
2356 if (pll
!= ATOM_PPLL_INVALID
)
2359 /* otherwise, pick one of the plls */
2360 if ((adev
->asic_type
== CHIP_KABINI
) ||
2361 (adev
->asic_type
== CHIP_MULLINS
)) {
2362 /* KB/ML has PPLL1 and PPLL2 */
2363 pll_in_use
= amdgpu_pll_get_use_mask(crtc
);
2364 if (!(pll_in_use
& (1 << ATOM_PPLL2
)))
2366 if (!(pll_in_use
& (1 << ATOM_PPLL1
)))
2368 DRM_ERROR("unable to allocate a PPLL\n");
2369 return ATOM_PPLL_INVALID
;
2371 /* CI/KV has PPLL0, PPLL1, and PPLL2 */
2372 pll_in_use
= amdgpu_pll_get_use_mask(crtc
);
2373 if (!(pll_in_use
& (1 << ATOM_PPLL2
)))
2375 if (!(pll_in_use
& (1 << ATOM_PPLL1
)))
2377 if (!(pll_in_use
& (1 << ATOM_PPLL0
)))
2379 DRM_ERROR("unable to allocate a PPLL\n");
2380 return ATOM_PPLL_INVALID
;
2382 return ATOM_PPLL_INVALID
;
2385 static void dce_v8_0_lock_cursor(struct drm_crtc
*crtc
, bool lock
)
2387 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
2388 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2391 cur_lock
= RREG32(mmCUR_UPDATE
+ amdgpu_crtc
->crtc_offset
);
2393 cur_lock
|= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK
;
2395 cur_lock
&= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK
;
2396 WREG32(mmCUR_UPDATE
+ amdgpu_crtc
->crtc_offset
, cur_lock
);
2399 static void dce_v8_0_hide_cursor(struct drm_crtc
*crtc
)
2401 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2402 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
2404 WREG32_IDX(mmCUR_CONTROL
+ amdgpu_crtc
->crtc_offset
,
2405 (CURSOR_24_8_PRE_MULT
<< CUR_CONTROL__CURSOR_MODE__SHIFT
) |
2406 (CURSOR_URGENT_1_2
<< CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT
));
2409 static void dce_v8_0_show_cursor(struct drm_crtc
*crtc
)
2411 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2412 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
2414 WREG32_IDX(mmCUR_CONTROL
+ amdgpu_crtc
->crtc_offset
,
2415 CUR_CONTROL__CURSOR_EN_MASK
|
2416 (CURSOR_24_8_PRE_MULT
<< CUR_CONTROL__CURSOR_MODE__SHIFT
) |
2417 (CURSOR_URGENT_1_2
<< CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT
));
2420 static void dce_v8_0_set_cursor(struct drm_crtc
*crtc
, struct drm_gem_object
*obj
,
2423 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2424 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
2426 WREG32(mmCUR_SURFACE_ADDRESS_HIGH
+ amdgpu_crtc
->crtc_offset
,
2427 upper_32_bits(gpu_addr
));
2428 WREG32(mmCUR_SURFACE_ADDRESS
+ amdgpu_crtc
->crtc_offset
,
2429 gpu_addr
& 0xffffffff);
2432 static int dce_v8_0_crtc_cursor_move(struct drm_crtc
*crtc
,
2435 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2436 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
2437 int xorigin
= 0, yorigin
= 0;
2439 /* avivo cursor are offset into the total surface */
2442 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x
, y
, crtc
->x
, crtc
->y
);
2445 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
2449 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
2453 dce_v8_0_lock_cursor(crtc
, true);
2454 WREG32(mmCUR_POSITION
+ amdgpu_crtc
->crtc_offset
, (x
<< 16) | y
);
2455 WREG32(mmCUR_HOT_SPOT
+ amdgpu_crtc
->crtc_offset
, (xorigin
<< 16) | yorigin
);
2456 WREG32(mmCUR_SIZE
+ amdgpu_crtc
->crtc_offset
,
2457 ((amdgpu_crtc
->cursor_width
- 1) << 16) | (amdgpu_crtc
->cursor_height
- 1));
2458 dce_v8_0_lock_cursor(crtc
, false);
2463 static int dce_v8_0_crtc_cursor_set(struct drm_crtc
*crtc
,
2464 struct drm_file
*file_priv
,
2469 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2470 struct drm_gem_object
*obj
;
2471 struct amdgpu_bo
*robj
;
2476 /* turn off cursor */
2477 dce_v8_0_hide_cursor(crtc
);
2482 if ((width
> amdgpu_crtc
->max_cursor_width
) ||
2483 (height
> amdgpu_crtc
->max_cursor_height
)) {
2484 DRM_ERROR("bad cursor width or height %d x %d\n", width
, height
);
2488 obj
= drm_gem_object_lookup(crtc
->dev
, file_priv
, handle
);
2490 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle
, amdgpu_crtc
->crtc_id
);
2494 robj
= gem_to_amdgpu_bo(obj
);
2495 ret
= amdgpu_bo_reserve(robj
, false);
2496 if (unlikely(ret
!= 0))
2498 ret
= amdgpu_bo_pin_restricted(robj
, AMDGPU_GEM_DOMAIN_VRAM
,
2500 amdgpu_bo_unreserve(robj
);
2504 amdgpu_crtc
->cursor_width
= width
;
2505 amdgpu_crtc
->cursor_height
= height
;
2507 dce_v8_0_lock_cursor(crtc
, true);
2508 dce_v8_0_set_cursor(crtc
, obj
, gpu_addr
);
2509 dce_v8_0_show_cursor(crtc
);
2510 dce_v8_0_lock_cursor(crtc
, false);
2513 if (amdgpu_crtc
->cursor_bo
) {
2514 robj
= gem_to_amdgpu_bo(amdgpu_crtc
->cursor_bo
);
2515 ret
= amdgpu_bo_reserve(robj
, false);
2516 if (likely(ret
== 0)) {
2517 amdgpu_bo_unpin(robj
);
2518 amdgpu_bo_unreserve(robj
);
2520 drm_gem_object_unreference_unlocked(amdgpu_crtc
->cursor_bo
);
2523 amdgpu_crtc
->cursor_bo
= obj
;
2526 drm_gem_object_unreference_unlocked(obj
);
2531 static void dce_v8_0_crtc_gamma_set(struct drm_crtc
*crtc
, u16
*red
, u16
*green
,
2532 u16
*blue
, uint32_t start
, uint32_t size
)
2534 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2535 int end
= (start
+ size
> 256) ? 256 : start
+ size
, i
;
2537 /* userspace palettes are always correct as is */
2538 for (i
= start
; i
< end
; i
++) {
2539 amdgpu_crtc
->lut_r
[i
] = red
[i
] >> 6;
2540 amdgpu_crtc
->lut_g
[i
] = green
[i
] >> 6;
2541 amdgpu_crtc
->lut_b
[i
] = blue
[i
] >> 6;
2543 dce_v8_0_crtc_load_lut(crtc
);
2546 static void dce_v8_0_crtc_destroy(struct drm_crtc
*crtc
)
2548 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2550 drm_crtc_cleanup(crtc
);
2551 destroy_workqueue(amdgpu_crtc
->pflip_queue
);
2555 static const struct drm_crtc_funcs dce_v8_0_crtc_funcs
= {
2556 .cursor_set
= dce_v8_0_crtc_cursor_set
,
2557 .cursor_move
= dce_v8_0_crtc_cursor_move
,
2558 .gamma_set
= dce_v8_0_crtc_gamma_set
,
2559 .set_config
= amdgpu_crtc_set_config
,
2560 .destroy
= dce_v8_0_crtc_destroy
,
2561 .page_flip
= amdgpu_crtc_page_flip
,
2564 static void dce_v8_0_crtc_dpms(struct drm_crtc
*crtc
, int mode
)
2566 struct drm_device
*dev
= crtc
->dev
;
2567 struct amdgpu_device
*adev
= dev
->dev_private
;
2568 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2572 case DRM_MODE_DPMS_ON
:
2573 amdgpu_crtc
->enabled
= true;
2574 amdgpu_atombios_crtc_enable(crtc
, ATOM_ENABLE
);
2575 dce_v8_0_vga_enable(crtc
, true);
2576 amdgpu_atombios_crtc_blank(crtc
, ATOM_DISABLE
);
2577 dce_v8_0_vga_enable(crtc
, false);
2578 /* Make sure VBLANK interrupt is still enabled */
2579 type
= amdgpu_crtc_idx_to_irq_type(adev
, amdgpu_crtc
->crtc_id
);
2580 amdgpu_irq_update(adev
, &adev
->crtc_irq
, type
);
2581 drm_vblank_post_modeset(dev
, amdgpu_crtc
->crtc_id
);
2582 dce_v8_0_crtc_load_lut(crtc
);
2584 case DRM_MODE_DPMS_STANDBY
:
2585 case DRM_MODE_DPMS_SUSPEND
:
2586 case DRM_MODE_DPMS_OFF
:
2587 drm_vblank_pre_modeset(dev
, amdgpu_crtc
->crtc_id
);
2588 if (amdgpu_crtc
->enabled
) {
2589 dce_v8_0_vga_enable(crtc
, true);
2590 amdgpu_atombios_crtc_blank(crtc
, ATOM_ENABLE
);
2591 dce_v8_0_vga_enable(crtc
, false);
2593 amdgpu_atombios_crtc_enable(crtc
, ATOM_DISABLE
);
2594 amdgpu_crtc
->enabled
= false;
2597 /* adjust pm to dpms */
2598 amdgpu_pm_compute_clocks(adev
);
2601 static void dce_v8_0_crtc_prepare(struct drm_crtc
*crtc
)
2603 /* disable crtc pair power gating before programming */
2604 amdgpu_atombios_crtc_powergate(crtc
, ATOM_DISABLE
);
2605 amdgpu_atombios_crtc_lock(crtc
, ATOM_ENABLE
);
2606 dce_v8_0_crtc_dpms(crtc
, DRM_MODE_DPMS_OFF
);
2609 static void dce_v8_0_crtc_commit(struct drm_crtc
*crtc
)
2611 dce_v8_0_crtc_dpms(crtc
, DRM_MODE_DPMS_ON
);
2612 amdgpu_atombios_crtc_lock(crtc
, ATOM_DISABLE
);
2615 static void dce_v8_0_crtc_disable(struct drm_crtc
*crtc
)
2617 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2618 struct drm_device
*dev
= crtc
->dev
;
2619 struct amdgpu_device
*adev
= dev
->dev_private
;
2620 struct amdgpu_atom_ss ss
;
2623 dce_v8_0_crtc_dpms(crtc
, DRM_MODE_DPMS_OFF
);
2624 if (crtc
->primary
->fb
) {
2626 struct amdgpu_framebuffer
*amdgpu_fb
;
2627 struct amdgpu_bo
*rbo
;
2629 amdgpu_fb
= to_amdgpu_framebuffer(crtc
->primary
->fb
);
2630 rbo
= gem_to_amdgpu_bo(amdgpu_fb
->obj
);
2631 r
= amdgpu_bo_reserve(rbo
, false);
2633 DRM_ERROR("failed to reserve rbo before unpin\n");
2635 amdgpu_bo_unpin(rbo
);
2636 amdgpu_bo_unreserve(rbo
);
2639 /* disable the GRPH */
2640 dce_v8_0_grph_enable(crtc
, false);
2642 amdgpu_atombios_crtc_powergate(crtc
, ATOM_ENABLE
);
2644 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
2645 if (adev
->mode_info
.crtcs
[i
] &&
2646 adev
->mode_info
.crtcs
[i
]->enabled
&&
2647 i
!= amdgpu_crtc
->crtc_id
&&
2648 amdgpu_crtc
->pll_id
== adev
->mode_info
.crtcs
[i
]->pll_id
) {
2649 /* one other crtc is using this pll don't turn
2656 switch (amdgpu_crtc
->pll_id
) {
2659 /* disable the ppll */
2660 amdgpu_atombios_crtc_program_pll(crtc
, amdgpu_crtc
->crtc_id
, amdgpu_crtc
->pll_id
,
2661 0, 0, ATOM_DISABLE
, 0, 0, 0, 0, 0, false, &ss
);
2664 /* disable the ppll */
2665 if ((adev
->asic_type
== CHIP_KAVERI
) ||
2666 (adev
->asic_type
== CHIP_BONAIRE
) ||
2667 (adev
->asic_type
== CHIP_HAWAII
))
2668 amdgpu_atombios_crtc_program_pll(crtc
, amdgpu_crtc
->crtc_id
, amdgpu_crtc
->pll_id
,
2669 0, 0, ATOM_DISABLE
, 0, 0, 0, 0, 0, false, &ss
);
2675 amdgpu_crtc
->pll_id
= ATOM_PPLL_INVALID
;
2676 amdgpu_crtc
->adjusted_clock
= 0;
2677 amdgpu_crtc
->encoder
= NULL
;
2678 amdgpu_crtc
->connector
= NULL
;
2681 static int dce_v8_0_crtc_mode_set(struct drm_crtc
*crtc
,
2682 struct drm_display_mode
*mode
,
2683 struct drm_display_mode
*adjusted_mode
,
2684 int x
, int y
, struct drm_framebuffer
*old_fb
)
2686 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2688 if (!amdgpu_crtc
->adjusted_clock
)
2691 amdgpu_atombios_crtc_set_pll(crtc
, adjusted_mode
);
2692 amdgpu_atombios_crtc_set_dtd_timing(crtc
, adjusted_mode
);
2693 dce_v8_0_crtc_do_set_base(crtc
, old_fb
, x
, y
, 0);
2694 amdgpu_atombios_crtc_overscan_setup(crtc
, mode
, adjusted_mode
);
2695 amdgpu_atombios_crtc_scaler_setup(crtc
);
2696 /* update the hw version fpr dpm */
2697 amdgpu_crtc
->hw_mode
= *adjusted_mode
;
2702 static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc
*crtc
,
2703 const struct drm_display_mode
*mode
,
2704 struct drm_display_mode
*adjusted_mode
)
2706 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2707 struct drm_device
*dev
= crtc
->dev
;
2708 struct drm_encoder
*encoder
;
2710 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2711 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
2712 if (encoder
->crtc
== crtc
) {
2713 amdgpu_crtc
->encoder
= encoder
;
2714 amdgpu_crtc
->connector
= amdgpu_get_connector_for_encoder(encoder
);
2718 if ((amdgpu_crtc
->encoder
== NULL
) || (amdgpu_crtc
->connector
== NULL
)) {
2719 amdgpu_crtc
->encoder
= NULL
;
2720 amdgpu_crtc
->connector
= NULL
;
2723 if (!amdgpu_crtc_scaling_mode_fixup(crtc
, mode
, adjusted_mode
))
2725 if (amdgpu_atombios_crtc_prepare_pll(crtc
, adjusted_mode
))
2728 amdgpu_crtc
->pll_id
= dce_v8_0_pick_pll(crtc
);
2729 /* if we can't get a PPLL for a non-DP encoder, fail */
2730 if ((amdgpu_crtc
->pll_id
== ATOM_PPLL_INVALID
) &&
2731 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc
->encoder
)))
2737 static int dce_v8_0_crtc_set_base(struct drm_crtc
*crtc
, int x
, int y
,
2738 struct drm_framebuffer
*old_fb
)
2740 return dce_v8_0_crtc_do_set_base(crtc
, old_fb
, x
, y
, 0);
2743 static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc
*crtc
,
2744 struct drm_framebuffer
*fb
,
2745 int x
, int y
, enum mode_set_atomic state
)
2747 return dce_v8_0_crtc_do_set_base(crtc
, fb
, x
, y
, 1);
2750 static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs
= {
2751 .dpms
= dce_v8_0_crtc_dpms
,
2752 .mode_fixup
= dce_v8_0_crtc_mode_fixup
,
2753 .mode_set
= dce_v8_0_crtc_mode_set
,
2754 .mode_set_base
= dce_v8_0_crtc_set_base
,
2755 .mode_set_base_atomic
= dce_v8_0_crtc_set_base_atomic
,
2756 .prepare
= dce_v8_0_crtc_prepare
,
2757 .commit
= dce_v8_0_crtc_commit
,
2758 .load_lut
= dce_v8_0_crtc_load_lut
,
2759 .disable
= dce_v8_0_crtc_disable
,
2762 static int dce_v8_0_crtc_init(struct amdgpu_device
*adev
, int index
)
2764 struct amdgpu_crtc
*amdgpu_crtc
;
2767 amdgpu_crtc
= kzalloc(sizeof(struct amdgpu_crtc
) +
2768 (AMDGPUFB_CONN_LIMIT
* sizeof(struct drm_connector
*)), GFP_KERNEL
);
2769 if (amdgpu_crtc
== NULL
)
2772 drm_crtc_init(adev
->ddev
, &amdgpu_crtc
->base
, &dce_v8_0_crtc_funcs
);
2774 drm_mode_crtc_set_gamma_size(&amdgpu_crtc
->base
, 256);
2775 amdgpu_crtc
->crtc_id
= index
;
2776 amdgpu_crtc
->pflip_queue
= create_singlethread_workqueue("amdgpu-pageflip-queue");
2777 adev
->mode_info
.crtcs
[index
] = amdgpu_crtc
;
2779 amdgpu_crtc
->max_cursor_width
= CIK_CURSOR_WIDTH
;
2780 amdgpu_crtc
->max_cursor_height
= CIK_CURSOR_HEIGHT
;
2781 adev
->ddev
->mode_config
.cursor_width
= amdgpu_crtc
->max_cursor_width
;
2782 adev
->ddev
->mode_config
.cursor_height
= amdgpu_crtc
->max_cursor_height
;
2784 for (i
= 0; i
< 256; i
++) {
2785 amdgpu_crtc
->lut_r
[i
] = i
<< 2;
2786 amdgpu_crtc
->lut_g
[i
] = i
<< 2;
2787 amdgpu_crtc
->lut_b
[i
] = i
<< 2;
2790 amdgpu_crtc
->crtc_offset
= crtc_offsets
[amdgpu_crtc
->crtc_id
];
2792 amdgpu_crtc
->pll_id
= ATOM_PPLL_INVALID
;
2793 amdgpu_crtc
->adjusted_clock
= 0;
2794 amdgpu_crtc
->encoder
= NULL
;
2795 amdgpu_crtc
->connector
= NULL
;
2796 drm_crtc_helper_add(&amdgpu_crtc
->base
, &dce_v8_0_crtc_helper_funcs
);
2801 static int dce_v8_0_early_init(void *handle
)
2803 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2805 adev
->audio_endpt_rreg
= &dce_v8_0_audio_endpt_rreg
;
2806 adev
->audio_endpt_wreg
= &dce_v8_0_audio_endpt_wreg
;
2808 dce_v8_0_set_display_funcs(adev
);
2809 dce_v8_0_set_irq_funcs(adev
);
2811 switch (adev
->asic_type
) {
2814 adev
->mode_info
.num_crtc
= 6;
2815 adev
->mode_info
.num_hpd
= 6;
2816 adev
->mode_info
.num_dig
= 6;
2819 adev
->mode_info
.num_crtc
= 4;
2820 adev
->mode_info
.num_hpd
= 6;
2821 adev
->mode_info
.num_dig
= 7;
2825 adev
->mode_info
.num_crtc
= 2;
2826 adev
->mode_info
.num_hpd
= 6;
2827 adev
->mode_info
.num_dig
= 6; /* ? */
2830 /* FIXME: not supported yet */
2837 static int dce_v8_0_sw_init(void *handle
)
2840 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2842 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
2843 r
= amdgpu_irq_add_id(adev
, i
+ 1, &adev
->crtc_irq
);
2848 for (i
= 8; i
< 20; i
+= 2) {
2849 r
= amdgpu_irq_add_id(adev
, i
, &adev
->pageflip_irq
);
2855 r
= amdgpu_irq_add_id(adev
, 42, &adev
->hpd_irq
);
2859 adev
->mode_info
.mode_config_initialized
= true;
2861 adev
->ddev
->mode_config
.funcs
= &amdgpu_mode_funcs
;
2863 adev
->ddev
->mode_config
.max_width
= 16384;
2864 adev
->ddev
->mode_config
.max_height
= 16384;
2866 adev
->ddev
->mode_config
.preferred_depth
= 24;
2867 adev
->ddev
->mode_config
.prefer_shadow
= 1;
2869 adev
->ddev
->mode_config
.fb_base
= adev
->mc
.aper_base
;
2871 r
= amdgpu_modeset_create_props(adev
);
2875 adev
->ddev
->mode_config
.max_width
= 16384;
2876 adev
->ddev
->mode_config
.max_height
= 16384;
2878 /* allocate crtcs */
2879 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
2880 r
= dce_v8_0_crtc_init(adev
, i
);
2885 if (amdgpu_atombios_get_connector_info_from_object_table(adev
))
2886 amdgpu_print_display_setup(adev
->ddev
);
2891 dce_v8_0_afmt_init(adev
);
2893 r
= dce_v8_0_audio_init(adev
);
2897 drm_kms_helper_poll_init(adev
->ddev
);
2902 static int dce_v8_0_sw_fini(void *handle
)
2904 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2906 kfree(adev
->mode_info
.bios_hardcoded_edid
);
2908 drm_kms_helper_poll_fini(adev
->ddev
);
2910 dce_v8_0_audio_fini(adev
);
2912 dce_v8_0_afmt_fini(adev
);
2914 drm_mode_config_cleanup(adev
->ddev
);
2915 adev
->mode_info
.mode_config_initialized
= false;
2920 static int dce_v8_0_hw_init(void *handle
)
2923 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2925 /* init dig PHYs, disp eng pll */
2926 amdgpu_atombios_encoder_init_dig(adev
);
2927 amdgpu_atombios_crtc_set_disp_eng_pll(adev
, adev
->clock
.default_dispclk
);
2929 /* initialize hpd */
2930 dce_v8_0_hpd_init(adev
);
2932 for (i
= 0; i
< adev
->mode_info
.audio
.num_pins
; i
++) {
2933 dce_v8_0_audio_enable(adev
, &adev
->mode_info
.audio
.pin
[i
], false);
2939 static int dce_v8_0_hw_fini(void *handle
)
2942 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2944 dce_v8_0_hpd_fini(adev
);
2946 for (i
= 0; i
< adev
->mode_info
.audio
.num_pins
; i
++) {
2947 dce_v8_0_audio_enable(adev
, &adev
->mode_info
.audio
.pin
[i
], false);
2953 static int dce_v8_0_suspend(void *handle
)
2955 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2957 amdgpu_atombios_scratch_regs_save(adev
);
2959 dce_v8_0_hpd_fini(adev
);
2964 static int dce_v8_0_resume(void *handle
)
2966 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2968 amdgpu_atombios_scratch_regs_restore(adev
);
2970 /* init dig PHYs, disp eng pll */
2971 amdgpu_atombios_encoder_init_dig(adev
);
2972 amdgpu_atombios_crtc_set_disp_eng_pll(adev
, adev
->clock
.default_dispclk
);
2973 /* turn on the BL */
2974 if (adev
->mode_info
.bl_encoder
) {
2975 u8 bl_level
= amdgpu_display_backlight_get_level(adev
,
2976 adev
->mode_info
.bl_encoder
);
2977 amdgpu_display_backlight_set_level(adev
, adev
->mode_info
.bl_encoder
,
2981 /* initialize hpd */
2982 dce_v8_0_hpd_init(adev
);
2987 static bool dce_v8_0_is_idle(void *handle
)
2992 static int dce_v8_0_wait_for_idle(void *handle
)
2997 static void dce_v8_0_print_status(void *handle
)
2999 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3001 dev_info(adev
->dev
, "DCE 8.x registers\n");
3005 static int dce_v8_0_soft_reset(void *handle
)
3007 u32 srbm_soft_reset
= 0, tmp
;
3008 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3010 if (dce_v8_0_is_display_hung(adev
))
3011 srbm_soft_reset
|= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK
;
3013 if (srbm_soft_reset
) {
3014 dce_v8_0_print_status((void *)adev
);
3016 tmp
= RREG32(mmSRBM_SOFT_RESET
);
3017 tmp
|= srbm_soft_reset
;
3018 dev_info(adev
->dev
, "SRBM_SOFT_RESET=0x%08X\n", tmp
);
3019 WREG32(mmSRBM_SOFT_RESET
, tmp
);
3020 tmp
= RREG32(mmSRBM_SOFT_RESET
);
3024 tmp
&= ~srbm_soft_reset
;
3025 WREG32(mmSRBM_SOFT_RESET
, tmp
);
3026 tmp
= RREG32(mmSRBM_SOFT_RESET
);
3028 /* Wait a little for things to settle down */
3030 dce_v8_0_print_status((void *)adev
);
3035 static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device
*adev
,
3037 enum amdgpu_interrupt_state state
)
3039 u32 reg_block
, lb_interrupt_mask
;
3041 if (crtc
>= adev
->mode_info
.num_crtc
) {
3042 DRM_DEBUG("invalid crtc %d\n", crtc
);
3048 reg_block
= CRTC0_REGISTER_OFFSET
;
3051 reg_block
= CRTC1_REGISTER_OFFSET
;
3054 reg_block
= CRTC2_REGISTER_OFFSET
;
3057 reg_block
= CRTC3_REGISTER_OFFSET
;
3060 reg_block
= CRTC4_REGISTER_OFFSET
;
3063 reg_block
= CRTC5_REGISTER_OFFSET
;
3066 DRM_DEBUG("invalid crtc %d\n", crtc
);
3071 case AMDGPU_IRQ_STATE_DISABLE
:
3072 lb_interrupt_mask
= RREG32(mmLB_INTERRUPT_MASK
+ reg_block
);
3073 lb_interrupt_mask
&= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK
;
3074 WREG32(mmLB_INTERRUPT_MASK
+ reg_block
, lb_interrupt_mask
);
3076 case AMDGPU_IRQ_STATE_ENABLE
:
3077 lb_interrupt_mask
= RREG32(mmLB_INTERRUPT_MASK
+ reg_block
);
3078 lb_interrupt_mask
|= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK
;
3079 WREG32(mmLB_INTERRUPT_MASK
+ reg_block
, lb_interrupt_mask
);
3086 static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device
*adev
,
3088 enum amdgpu_interrupt_state state
)
3090 u32 reg_block
, lb_interrupt_mask
;
3092 if (crtc
>= adev
->mode_info
.num_crtc
) {
3093 DRM_DEBUG("invalid crtc %d\n", crtc
);
3099 reg_block
= CRTC0_REGISTER_OFFSET
;
3102 reg_block
= CRTC1_REGISTER_OFFSET
;
3105 reg_block
= CRTC2_REGISTER_OFFSET
;
3108 reg_block
= CRTC3_REGISTER_OFFSET
;
3111 reg_block
= CRTC4_REGISTER_OFFSET
;
3114 reg_block
= CRTC5_REGISTER_OFFSET
;
3117 DRM_DEBUG("invalid crtc %d\n", crtc
);
3122 case AMDGPU_IRQ_STATE_DISABLE
:
3123 lb_interrupt_mask
= RREG32(mmLB_INTERRUPT_MASK
+ reg_block
);
3124 lb_interrupt_mask
&= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK
;
3125 WREG32(mmLB_INTERRUPT_MASK
+ reg_block
, lb_interrupt_mask
);
3127 case AMDGPU_IRQ_STATE_ENABLE
:
3128 lb_interrupt_mask
= RREG32(mmLB_INTERRUPT_MASK
+ reg_block
);
3129 lb_interrupt_mask
|= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK
;
3130 WREG32(mmLB_INTERRUPT_MASK
+ reg_block
, lb_interrupt_mask
);
3137 static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device
*adev
,
3138 struct amdgpu_irq_src
*src
,
3140 enum amdgpu_interrupt_state state
)
3142 u32 dc_hpd_int_cntl_reg
, dc_hpd_int_cntl
;
3146 dc_hpd_int_cntl_reg
= mmDC_HPD1_INT_CONTROL
;
3149 dc_hpd_int_cntl_reg
= mmDC_HPD2_INT_CONTROL
;
3152 dc_hpd_int_cntl_reg
= mmDC_HPD3_INT_CONTROL
;
3155 dc_hpd_int_cntl_reg
= mmDC_HPD4_INT_CONTROL
;
3158 dc_hpd_int_cntl_reg
= mmDC_HPD5_INT_CONTROL
;
3161 dc_hpd_int_cntl_reg
= mmDC_HPD6_INT_CONTROL
;
3164 DRM_DEBUG("invalid hdp %d\n", type
);
3169 case AMDGPU_IRQ_STATE_DISABLE
:
3170 dc_hpd_int_cntl
= RREG32(dc_hpd_int_cntl_reg
);
3171 dc_hpd_int_cntl
&= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK
;
3172 WREG32(dc_hpd_int_cntl_reg
, dc_hpd_int_cntl
);
3174 case AMDGPU_IRQ_STATE_ENABLE
:
3175 dc_hpd_int_cntl
= RREG32(dc_hpd_int_cntl_reg
);
3176 dc_hpd_int_cntl
|= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK
;
3177 WREG32(dc_hpd_int_cntl_reg
, dc_hpd_int_cntl
);
3186 static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device
*adev
,
3187 struct amdgpu_irq_src
*src
,
3189 enum amdgpu_interrupt_state state
)
3192 case AMDGPU_CRTC_IRQ_VBLANK1
:
3193 dce_v8_0_set_crtc_vblank_interrupt_state(adev
, 0, state
);
3195 case AMDGPU_CRTC_IRQ_VBLANK2
:
3196 dce_v8_0_set_crtc_vblank_interrupt_state(adev
, 1, state
);
3198 case AMDGPU_CRTC_IRQ_VBLANK3
:
3199 dce_v8_0_set_crtc_vblank_interrupt_state(adev
, 2, state
);
3201 case AMDGPU_CRTC_IRQ_VBLANK4
:
3202 dce_v8_0_set_crtc_vblank_interrupt_state(adev
, 3, state
);
3204 case AMDGPU_CRTC_IRQ_VBLANK5
:
3205 dce_v8_0_set_crtc_vblank_interrupt_state(adev
, 4, state
);
3207 case AMDGPU_CRTC_IRQ_VBLANK6
:
3208 dce_v8_0_set_crtc_vblank_interrupt_state(adev
, 5, state
);
3210 case AMDGPU_CRTC_IRQ_VLINE1
:
3211 dce_v8_0_set_crtc_vline_interrupt_state(adev
, 0, state
);
3213 case AMDGPU_CRTC_IRQ_VLINE2
:
3214 dce_v8_0_set_crtc_vline_interrupt_state(adev
, 1, state
);
3216 case AMDGPU_CRTC_IRQ_VLINE3
:
3217 dce_v8_0_set_crtc_vline_interrupt_state(adev
, 2, state
);
3219 case AMDGPU_CRTC_IRQ_VLINE4
:
3220 dce_v8_0_set_crtc_vline_interrupt_state(adev
, 3, state
);
3222 case AMDGPU_CRTC_IRQ_VLINE5
:
3223 dce_v8_0_set_crtc_vline_interrupt_state(adev
, 4, state
);
3225 case AMDGPU_CRTC_IRQ_VLINE6
:
3226 dce_v8_0_set_crtc_vline_interrupt_state(adev
, 5, state
);
3234 static int dce_v8_0_crtc_irq(struct amdgpu_device
*adev
,
3235 struct amdgpu_irq_src
*source
,
3236 struct amdgpu_iv_entry
*entry
)
3238 unsigned crtc
= entry
->src_id
- 1;
3239 uint32_t disp_int
= RREG32(interrupt_status_offsets
[crtc
].reg
);
3240 unsigned irq_type
= amdgpu_crtc_idx_to_irq_type(adev
, crtc
);
3242 switch (entry
->src_data
) {
3243 case 0: /* vblank */
3244 if (disp_int
& interrupt_status_offsets
[crtc
].vblank
)
3245 WREG32(mmLB_VBLANK_STATUS
+ crtc_offsets
[crtc
], LB_VBLANK_STATUS__VBLANK_ACK_MASK
);
3247 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3249 if (amdgpu_irq_enabled(adev
, source
, irq_type
)) {
3250 drm_handle_vblank(adev
->ddev
, crtc
);
3252 DRM_DEBUG("IH: D%d vblank\n", crtc
+ 1);
3256 if (disp_int
& interrupt_status_offsets
[crtc
].vline
)
3257 WREG32(mmLB_VLINE_STATUS
+ crtc_offsets
[crtc
], LB_VLINE_STATUS__VLINE_ACK_MASK
);
3259 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3261 DRM_DEBUG("IH: D%d vline\n", crtc
+ 1);
3265 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry
->src_id
, entry
->src_data
);
3272 static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device
*adev
,
3273 struct amdgpu_irq_src
*src
,
3275 enum amdgpu_interrupt_state state
)
3278 /* now deal with page flip IRQ */
3280 case AMDGPU_PAGEFLIP_IRQ_D1
:
3281 reg_block
= CRTC0_REGISTER_OFFSET
;
3283 case AMDGPU_PAGEFLIP_IRQ_D2
:
3284 reg_block
= CRTC1_REGISTER_OFFSET
;
3286 case AMDGPU_PAGEFLIP_IRQ_D3
:
3287 reg_block
= CRTC2_REGISTER_OFFSET
;
3289 case AMDGPU_PAGEFLIP_IRQ_D4
:
3290 reg_block
= CRTC3_REGISTER_OFFSET
;
3292 case AMDGPU_PAGEFLIP_IRQ_D5
:
3293 reg_block
= CRTC4_REGISTER_OFFSET
;
3295 case AMDGPU_PAGEFLIP_IRQ_D6
:
3296 reg_block
= CRTC5_REGISTER_OFFSET
;
3299 DRM_ERROR("invalid pageflip crtc %d\n", type
);
3303 reg
= RREG32(mmGRPH_INTERRUPT_CONTROL
+ reg_block
);
3304 if (state
== AMDGPU_IRQ_STATE_DISABLE
)
3305 WREG32(mmGRPH_INTERRUPT_CONTROL
+ reg_block
, reg
& ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK
);
3307 WREG32(mmGRPH_INTERRUPT_CONTROL
+ reg_block
, reg
| GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK
);
3312 static int dce_v8_0_pageflip_irq(struct amdgpu_device
*adev
,
3313 struct amdgpu_irq_src
*source
,
3314 struct amdgpu_iv_entry
*entry
)
3317 unsigned long flags
;
3319 struct amdgpu_crtc
*amdgpu_crtc
;
3320 struct amdgpu_flip_work
*works
;
3322 crtc_id
= (entry
->src_id
- 8) >> 1;
3323 amdgpu_crtc
= adev
->mode_info
.crtcs
[crtc_id
];
3325 /* ack the interrupt */
3327 case AMDGPU_PAGEFLIP_IRQ_D1
:
3328 reg_block
= CRTC0_REGISTER_OFFSET
;
3330 case AMDGPU_PAGEFLIP_IRQ_D2
:
3331 reg_block
= CRTC1_REGISTER_OFFSET
;
3333 case AMDGPU_PAGEFLIP_IRQ_D3
:
3334 reg_block
= CRTC2_REGISTER_OFFSET
;
3336 case AMDGPU_PAGEFLIP_IRQ_D4
:
3337 reg_block
= CRTC3_REGISTER_OFFSET
;
3339 case AMDGPU_PAGEFLIP_IRQ_D5
:
3340 reg_block
= CRTC4_REGISTER_OFFSET
;
3342 case AMDGPU_PAGEFLIP_IRQ_D6
:
3343 reg_block
= CRTC5_REGISTER_OFFSET
;
3346 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id
);
3350 if (RREG32(mmGRPH_INTERRUPT_STATUS
+ reg_block
) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK
)
3351 WREG32(mmGRPH_INTERRUPT_STATUS
+ reg_block
, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK
);
3353 /* IRQ could occur when in initial stage */
3354 if (amdgpu_crtc
== NULL
)
3357 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
3358 works
= amdgpu_crtc
->pflip_works
;
3359 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_SUBMITTED
){
3360 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3361 "AMDGPU_FLIP_SUBMITTED(%d)\n",
3362 amdgpu_crtc
->pflip_status
,
3363 AMDGPU_FLIP_SUBMITTED
);
3364 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
3368 /* page flip completed. clean up */
3369 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_NONE
;
3370 amdgpu_crtc
->pflip_works
= NULL
;
3372 /* wakeup usersapce */
3374 drm_send_vblank_event(adev
->ddev
, crtc_id
, works
->event
);
3376 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
3378 drm_vblank_put(adev
->ddev
, amdgpu_crtc
->crtc_id
);
3379 amdgpu_irq_put(adev
, &adev
->pageflip_irq
, crtc_id
);
3380 queue_work(amdgpu_crtc
->pflip_queue
, &works
->unpin_work
);
3385 static int dce_v8_0_hpd_irq(struct amdgpu_device
*adev
,
3386 struct amdgpu_irq_src
*source
,
3387 struct amdgpu_iv_entry
*entry
)
3389 uint32_t disp_int
, mask
, int_control
, tmp
;
3392 if (entry
->src_data
>= adev
->mode_info
.num_hpd
) {
3393 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry
->src_id
, entry
->src_data
);
3397 hpd
= entry
->src_data
;
3398 disp_int
= RREG32(interrupt_status_offsets
[hpd
].reg
);
3399 mask
= interrupt_status_offsets
[hpd
].hpd
;
3400 int_control
= hpd_int_control_offsets
[hpd
];
3402 if (disp_int
& mask
) {
3403 tmp
= RREG32(int_control
);
3404 tmp
|= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK
;
3405 WREG32(int_control
, tmp
);
3406 schedule_work(&adev
->hotplug_work
);
3407 DRM_DEBUG("IH: HPD%d\n", hpd
+ 1);
3414 static int dce_v8_0_set_clockgating_state(void *handle
,
3415 enum amd_clockgating_state state
)
3420 static int dce_v8_0_set_powergating_state(void *handle
,
3421 enum amd_powergating_state state
)
3426 const struct amd_ip_funcs dce_v8_0_ip_funcs
= {
3427 .early_init
= dce_v8_0_early_init
,
3429 .sw_init
= dce_v8_0_sw_init
,
3430 .sw_fini
= dce_v8_0_sw_fini
,
3431 .hw_init
= dce_v8_0_hw_init
,
3432 .hw_fini
= dce_v8_0_hw_fini
,
3433 .suspend
= dce_v8_0_suspend
,
3434 .resume
= dce_v8_0_resume
,
3435 .is_idle
= dce_v8_0_is_idle
,
3436 .wait_for_idle
= dce_v8_0_wait_for_idle
,
3437 .soft_reset
= dce_v8_0_soft_reset
,
3438 .print_status
= dce_v8_0_print_status
,
3439 .set_clockgating_state
= dce_v8_0_set_clockgating_state
,
3440 .set_powergating_state
= dce_v8_0_set_powergating_state
,
3444 dce_v8_0_encoder_mode_set(struct drm_encoder
*encoder
,
3445 struct drm_display_mode
*mode
,
3446 struct drm_display_mode
*adjusted_mode
)
3448 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3450 amdgpu_encoder
->pixel_clock
= adjusted_mode
->clock
;
3452 /* need to call this here rather than in prepare() since we need some crtc info */
3453 amdgpu_atombios_encoder_dpms(encoder
, DRM_MODE_DPMS_OFF
);
3455 /* set scaler clears this on some chips */
3456 dce_v8_0_set_interleave(encoder
->crtc
, mode
);
3458 if (amdgpu_atombios_encoder_get_encoder_mode(encoder
) == ATOM_ENCODER_MODE_HDMI
) {
3459 dce_v8_0_afmt_enable(encoder
, true);
3460 dce_v8_0_afmt_setmode(encoder
, adjusted_mode
);
3464 static void dce_v8_0_encoder_prepare(struct drm_encoder
*encoder
)
3466 struct amdgpu_device
*adev
= encoder
->dev
->dev_private
;
3467 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3468 struct drm_connector
*connector
= amdgpu_get_connector_for_encoder(encoder
);
3470 if ((amdgpu_encoder
->active_device
&
3471 (ATOM_DEVICE_DFP_SUPPORT
| ATOM_DEVICE_LCD_SUPPORT
)) ||
3472 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder
) !=
3473 ENCODER_OBJECT_ID_NONE
)) {
3474 struct amdgpu_encoder_atom_dig
*dig
= amdgpu_encoder
->enc_priv
;
3476 dig
->dig_encoder
= dce_v8_0_pick_dig_encoder(encoder
);
3477 if (amdgpu_encoder
->active_device
& ATOM_DEVICE_DFP_SUPPORT
)
3478 dig
->afmt
= adev
->mode_info
.afmt
[dig
->dig_encoder
];
3482 amdgpu_atombios_scratch_regs_lock(adev
, true);
3485 struct amdgpu_connector
*amdgpu_connector
= to_amdgpu_connector(connector
);
3487 /* select the clock/data port if it uses a router */
3488 if (amdgpu_connector
->router
.cd_valid
)
3489 amdgpu_i2c_router_select_cd_port(amdgpu_connector
);
3491 /* turn eDP panel on for mode set */
3492 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
3493 amdgpu_atombios_encoder_set_edp_panel_power(connector
,
3494 ATOM_TRANSMITTER_ACTION_POWER_ON
);
3497 /* this is needed for the pll/ss setup to work correctly in some cases */
3498 amdgpu_atombios_encoder_set_crtc_source(encoder
);
3499 /* set up the FMT blocks */
3500 dce_v8_0_program_fmt(encoder
);
3503 static void dce_v8_0_encoder_commit(struct drm_encoder
*encoder
)
3505 struct drm_device
*dev
= encoder
->dev
;
3506 struct amdgpu_device
*adev
= dev
->dev_private
;
3508 /* need to call this here as we need the crtc set up */
3509 amdgpu_atombios_encoder_dpms(encoder
, DRM_MODE_DPMS_ON
);
3510 amdgpu_atombios_scratch_regs_lock(adev
, false);
3513 static void dce_v8_0_encoder_disable(struct drm_encoder
*encoder
)
3515 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3516 struct amdgpu_encoder_atom_dig
*dig
;
3518 amdgpu_atombios_encoder_dpms(encoder
, DRM_MODE_DPMS_OFF
);
3520 if (amdgpu_atombios_encoder_is_digital(encoder
)) {
3521 if (amdgpu_atombios_encoder_get_encoder_mode(encoder
) == ATOM_ENCODER_MODE_HDMI
)
3522 dce_v8_0_afmt_enable(encoder
, false);
3523 dig
= amdgpu_encoder
->enc_priv
;
3524 dig
->dig_encoder
= -1;
3526 amdgpu_encoder
->active_device
= 0;
3529 /* these are handled by the primary encoders */
3530 static void dce_v8_0_ext_prepare(struct drm_encoder
*encoder
)
3535 static void dce_v8_0_ext_commit(struct drm_encoder
*encoder
)
3541 dce_v8_0_ext_mode_set(struct drm_encoder
*encoder
,
3542 struct drm_display_mode
*mode
,
3543 struct drm_display_mode
*adjusted_mode
)
3548 static void dce_v8_0_ext_disable(struct drm_encoder
*encoder
)
3554 dce_v8_0_ext_dpms(struct drm_encoder
*encoder
, int mode
)
3559 static bool dce_v8_0_ext_mode_fixup(struct drm_encoder
*encoder
,
3560 const struct drm_display_mode
*mode
,
3561 struct drm_display_mode
*adjusted_mode
)
3566 static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs
= {
3567 .dpms
= dce_v8_0_ext_dpms
,
3568 .mode_fixup
= dce_v8_0_ext_mode_fixup
,
3569 .prepare
= dce_v8_0_ext_prepare
,
3570 .mode_set
= dce_v8_0_ext_mode_set
,
3571 .commit
= dce_v8_0_ext_commit
,
3572 .disable
= dce_v8_0_ext_disable
,
3573 /* no detect for TMDS/LVDS yet */
3576 static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs
= {
3577 .dpms
= amdgpu_atombios_encoder_dpms
,
3578 .mode_fixup
= amdgpu_atombios_encoder_mode_fixup
,
3579 .prepare
= dce_v8_0_encoder_prepare
,
3580 .mode_set
= dce_v8_0_encoder_mode_set
,
3581 .commit
= dce_v8_0_encoder_commit
,
3582 .disable
= dce_v8_0_encoder_disable
,
3583 .detect
= amdgpu_atombios_encoder_dig_detect
,
3586 static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs
= {
3587 .dpms
= amdgpu_atombios_encoder_dpms
,
3588 .mode_fixup
= amdgpu_atombios_encoder_mode_fixup
,
3589 .prepare
= dce_v8_0_encoder_prepare
,
3590 .mode_set
= dce_v8_0_encoder_mode_set
,
3591 .commit
= dce_v8_0_encoder_commit
,
3592 .detect
= amdgpu_atombios_encoder_dac_detect
,
3595 static void dce_v8_0_encoder_destroy(struct drm_encoder
*encoder
)
3597 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3598 if (amdgpu_encoder
->devices
& (ATOM_DEVICE_LCD_SUPPORT
))
3599 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder
);
3600 kfree(amdgpu_encoder
->enc_priv
);
3601 drm_encoder_cleanup(encoder
);
3602 kfree(amdgpu_encoder
);
3605 static const struct drm_encoder_funcs dce_v8_0_encoder_funcs
= {
3606 .destroy
= dce_v8_0_encoder_destroy
,
3609 static void dce_v8_0_encoder_add(struct amdgpu_device
*adev
,
3610 uint32_t encoder_enum
,
3611 uint32_t supported_device
,
3614 struct drm_device
*dev
= adev
->ddev
;
3615 struct drm_encoder
*encoder
;
3616 struct amdgpu_encoder
*amdgpu_encoder
;
3618 /* see if we already added it */
3619 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
3620 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
3621 if (amdgpu_encoder
->encoder_enum
== encoder_enum
) {
3622 amdgpu_encoder
->devices
|= supported_device
;
3629 amdgpu_encoder
= kzalloc(sizeof(struct amdgpu_encoder
), GFP_KERNEL
);
3630 if (!amdgpu_encoder
)
3633 encoder
= &amdgpu_encoder
->base
;
3634 switch (adev
->mode_info
.num_crtc
) {
3636 encoder
->possible_crtcs
= 0x1;
3640 encoder
->possible_crtcs
= 0x3;
3643 encoder
->possible_crtcs
= 0xf;
3646 encoder
->possible_crtcs
= 0x3f;
3650 amdgpu_encoder
->enc_priv
= NULL
;
3652 amdgpu_encoder
->encoder_enum
= encoder_enum
;
3653 amdgpu_encoder
->encoder_id
= (encoder_enum
& OBJECT_ID_MASK
) >> OBJECT_ID_SHIFT
;
3654 amdgpu_encoder
->devices
= supported_device
;
3655 amdgpu_encoder
->rmx_type
= RMX_OFF
;
3656 amdgpu_encoder
->underscan_type
= UNDERSCAN_OFF
;
3657 amdgpu_encoder
->is_ext_encoder
= false;
3658 amdgpu_encoder
->caps
= caps
;
3660 switch (amdgpu_encoder
->encoder_id
) {
3661 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1
:
3662 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2
:
3663 drm_encoder_init(dev
, encoder
, &dce_v8_0_encoder_funcs
,
3664 DRM_MODE_ENCODER_DAC
);
3665 drm_encoder_helper_add(encoder
, &dce_v8_0_dac_helper_funcs
);
3667 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1
:
3668 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY
:
3669 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1
:
3670 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2
:
3671 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3
:
3672 if (amdgpu_encoder
->devices
& (ATOM_DEVICE_LCD_SUPPORT
)) {
3673 amdgpu_encoder
->rmx_type
= RMX_FULL
;
3674 drm_encoder_init(dev
, encoder
, &dce_v8_0_encoder_funcs
,
3675 DRM_MODE_ENCODER_LVDS
);
3676 amdgpu_encoder
->enc_priv
= amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder
);
3677 } else if (amdgpu_encoder
->devices
& (ATOM_DEVICE_CRT_SUPPORT
)) {
3678 drm_encoder_init(dev
, encoder
, &dce_v8_0_encoder_funcs
,
3679 DRM_MODE_ENCODER_DAC
);
3680 amdgpu_encoder
->enc_priv
= amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder
);
3682 drm_encoder_init(dev
, encoder
, &dce_v8_0_encoder_funcs
,
3683 DRM_MODE_ENCODER_TMDS
);
3684 amdgpu_encoder
->enc_priv
= amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder
);
3686 drm_encoder_helper_add(encoder
, &dce_v8_0_dig_helper_funcs
);
3688 case ENCODER_OBJECT_ID_SI170B
:
3689 case ENCODER_OBJECT_ID_CH7303
:
3690 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA
:
3691 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB
:
3692 case ENCODER_OBJECT_ID_TITFP513
:
3693 case ENCODER_OBJECT_ID_VT1623
:
3694 case ENCODER_OBJECT_ID_HDMI_SI1930
:
3695 case ENCODER_OBJECT_ID_TRAVIS
:
3696 case ENCODER_OBJECT_ID_NUTMEG
:
3697 /* these are handled by the primary encoders */
3698 amdgpu_encoder
->is_ext_encoder
= true;
3699 if (amdgpu_encoder
->devices
& (ATOM_DEVICE_LCD_SUPPORT
))
3700 drm_encoder_init(dev
, encoder
, &dce_v8_0_encoder_funcs
,
3701 DRM_MODE_ENCODER_LVDS
);
3702 else if (amdgpu_encoder
->devices
& (ATOM_DEVICE_CRT_SUPPORT
))
3703 drm_encoder_init(dev
, encoder
, &dce_v8_0_encoder_funcs
,
3704 DRM_MODE_ENCODER_DAC
);
3706 drm_encoder_init(dev
, encoder
, &dce_v8_0_encoder_funcs
,
3707 DRM_MODE_ENCODER_TMDS
);
3708 drm_encoder_helper_add(encoder
, &dce_v8_0_ext_helper_funcs
);
3713 static const struct amdgpu_display_funcs dce_v8_0_display_funcs
= {
3714 .set_vga_render_state
= &dce_v8_0_set_vga_render_state
,
3715 .bandwidth_update
= &dce_v8_0_bandwidth_update
,
3716 .vblank_get_counter
= &dce_v8_0_vblank_get_counter
,
3717 .vblank_wait
= &dce_v8_0_vblank_wait
,
3718 .is_display_hung
= &dce_v8_0_is_display_hung
,
3719 .backlight_set_level
= &amdgpu_atombios_encoder_set_backlight_level
,
3720 .backlight_get_level
= &amdgpu_atombios_encoder_get_backlight_level
,
3721 .hpd_sense
= &dce_v8_0_hpd_sense
,
3722 .hpd_set_polarity
= &dce_v8_0_hpd_set_polarity
,
3723 .hpd_get_gpio_reg
= &dce_v8_0_hpd_get_gpio_reg
,
3724 .page_flip
= &dce_v8_0_page_flip
,
3725 .page_flip_get_scanoutpos
= &dce_v8_0_crtc_get_scanoutpos
,
3726 .add_encoder
= &dce_v8_0_encoder_add
,
3727 .add_connector
= &amdgpu_connector_add
,
3728 .stop_mc_access
= &dce_v8_0_stop_mc_access
,
3729 .resume_mc_access
= &dce_v8_0_resume_mc_access
,
3732 static void dce_v8_0_set_display_funcs(struct amdgpu_device
*adev
)
3734 if (adev
->mode_info
.funcs
== NULL
)
3735 adev
->mode_info
.funcs
= &dce_v8_0_display_funcs
;
3738 static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs
= {
3739 .set
= dce_v8_0_set_crtc_interrupt_state
,
3740 .process
= dce_v8_0_crtc_irq
,
3743 static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs
= {
3744 .set
= dce_v8_0_set_pageflip_interrupt_state
,
3745 .process
= dce_v8_0_pageflip_irq
,
3748 static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs
= {
3749 .set
= dce_v8_0_set_hpd_interrupt_state
,
3750 .process
= dce_v8_0_hpd_irq
,
3753 static void dce_v8_0_set_irq_funcs(struct amdgpu_device
*adev
)
3755 adev
->crtc_irq
.num_types
= AMDGPU_CRTC_IRQ_LAST
;
3756 adev
->crtc_irq
.funcs
= &dce_v8_0_crtc_irq_funcs
;
3758 adev
->pageflip_irq
.num_types
= AMDGPU_PAGEFLIP_IRQ_LAST
;
3759 adev
->pageflip_irq
.funcs
= &dce_v8_0_pageflip_irq_funcs
;
3761 adev
->hpd_irq
.num_types
= AMDGPU_HPD_LAST
;
3762 adev
->hpd_irq
.funcs
= &dce_v8_0_hpd_irq_funcs
;