2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_vcn.h"
28 #include "amdgpu_pm.h"
31 #include "soc15_common.h"
33 #include "vcn/vcn_1_0_offset.h"
34 #include "vcn/vcn_1_0_sh_mask.h"
35 #include "hdp/hdp_4_0_offset.h"
36 #include "mmhub/mmhub_9_1_offset.h"
37 #include "mmhub/mmhub_9_1_sh_mask.h"
39 #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
40 #include "jpeg_v1_0.h"
42 #define mmUVD_RBC_XX_IB_REG_CHECK_1_0 0x05ab
43 #define mmUVD_RBC_XX_IB_REG_CHECK_1_0_BASE_IDX 1
44 #define mmUVD_REG_XX_MASK_1_0 0x05ac
45 #define mmUVD_REG_XX_MASK_1_0_BASE_IDX 1
47 static int vcn_v1_0_stop(struct amdgpu_device
*adev
);
48 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device
*adev
);
49 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device
*adev
);
50 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device
*adev
);
51 static int vcn_v1_0_set_powergating_state(void *handle
, enum amd_powergating_state state
);
52 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device
*adev
,
53 int inst_idx
, struct dpg_pause_state
*new_state
);
55 static void vcn_v1_0_idle_work_handler(struct work_struct
*work
);
58 * vcn_v1_0_early_init - set function pointers
60 * @handle: amdgpu_device pointer
62 * Set ring and irq function pointers
64 static int vcn_v1_0_early_init(void *handle
)
66 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
68 adev
->vcn
.num_vcn_inst
= 1;
69 adev
->vcn
.num_enc_rings
= 2;
71 vcn_v1_0_set_dec_ring_funcs(adev
);
72 vcn_v1_0_set_enc_ring_funcs(adev
);
73 vcn_v1_0_set_irq_funcs(adev
);
75 jpeg_v1_0_early_init(handle
);
81 * vcn_v1_0_sw_init - sw init for VCN block
83 * @handle: amdgpu_device pointer
85 * Load firmware and sw initialization
87 static int vcn_v1_0_sw_init(void *handle
)
89 struct amdgpu_ring
*ring
;
91 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
94 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_VCN
,
95 VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT
, &adev
->vcn
.inst
->irq
);
100 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
) {
101 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_VCN
, i
+ VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE
,
102 &adev
->vcn
.inst
->irq
);
107 r
= amdgpu_vcn_sw_init(adev
);
111 /* Override the work func */
112 adev
->vcn
.idle_work
.work
.func
= vcn_v1_0_idle_work_handler
;
114 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) {
115 const struct common_firmware_header
*hdr
;
116 hdr
= (const struct common_firmware_header
*)adev
->vcn
.fw
->data
;
117 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_VCN
].ucode_id
= AMDGPU_UCODE_ID_VCN
;
118 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_VCN
].fw
= adev
->vcn
.fw
;
119 adev
->firmware
.fw_size
+=
120 ALIGN(le32_to_cpu(hdr
->ucode_size_bytes
), PAGE_SIZE
);
121 DRM_INFO("PSP loading VCN firmware\n");
124 r
= amdgpu_vcn_resume(adev
);
128 ring
= &adev
->vcn
.inst
->ring_dec
;
129 sprintf(ring
->name
, "vcn_dec");
130 r
= amdgpu_ring_init(adev
, ring
, 512, &adev
->vcn
.inst
->irq
, 0);
134 adev
->vcn
.internal
.scratch9
= adev
->vcn
.inst
->external
.scratch9
=
135 SOC15_REG_OFFSET(UVD
, 0, mmUVD_SCRATCH9
);
136 adev
->vcn
.internal
.data0
= adev
->vcn
.inst
->external
.data0
=
137 SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
);
138 adev
->vcn
.internal
.data1
= adev
->vcn
.inst
->external
.data1
=
139 SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA1
);
140 adev
->vcn
.internal
.cmd
= adev
->vcn
.inst
->external
.cmd
=
141 SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
);
142 adev
->vcn
.internal
.nop
= adev
->vcn
.inst
->external
.nop
=
143 SOC15_REG_OFFSET(UVD
, 0, mmUVD_NO_OP
);
145 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
) {
146 ring
= &adev
->vcn
.inst
->ring_enc
[i
];
147 sprintf(ring
->name
, "vcn_enc%d", i
);
148 r
= amdgpu_ring_init(adev
, ring
, 512, &adev
->vcn
.inst
->irq
, 0);
153 adev
->vcn
.pause_dpg_mode
= vcn_v1_0_pause_dpg_mode
;
155 r
= jpeg_v1_0_sw_init(handle
);
161 * vcn_v1_0_sw_fini - sw fini for VCN block
163 * @handle: amdgpu_device pointer
165 * VCN suspend and free up sw allocation
167 static int vcn_v1_0_sw_fini(void *handle
)
170 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
172 r
= amdgpu_vcn_suspend(adev
);
176 jpeg_v1_0_sw_fini(handle
);
178 r
= amdgpu_vcn_sw_fini(adev
);
184 * vcn_v1_0_hw_init - start and test VCN block
186 * @handle: amdgpu_device pointer
188 * Initialize the hardware, boot up the VCPU and do some testing
190 static int vcn_v1_0_hw_init(void *handle
)
192 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
193 struct amdgpu_ring
*ring
= &adev
->vcn
.inst
->ring_dec
;
196 r
= amdgpu_ring_test_helper(ring
);
200 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
) {
201 ring
= &adev
->vcn
.inst
->ring_enc
[i
];
202 r
= amdgpu_ring_test_helper(ring
);
207 ring
= &adev
->jpeg
.inst
->ring_dec
;
208 r
= amdgpu_ring_test_helper(ring
);
214 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
215 (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
)?"DPG Mode":"SPG Mode");
221 * vcn_v1_0_hw_fini - stop the hardware block
223 * @handle: amdgpu_device pointer
225 * Stop the VCN block, mark ring as not ready any more
227 static int vcn_v1_0_hw_fini(void *handle
)
229 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
230 struct amdgpu_ring
*ring
= &adev
->vcn
.inst
->ring_dec
;
232 if ((adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
) ||
233 RREG32_SOC15(VCN
, 0, mmUVD_STATUS
))
234 vcn_v1_0_set_powergating_state(adev
, AMD_PG_STATE_GATE
);
236 ring
->sched
.ready
= false;
242 * vcn_v1_0_suspend - suspend VCN block
244 * @handle: amdgpu_device pointer
246 * HW fini and suspend VCN block
248 static int vcn_v1_0_suspend(void *handle
)
251 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
253 r
= vcn_v1_0_hw_fini(adev
);
257 r
= amdgpu_vcn_suspend(adev
);
263 * vcn_v1_0_resume - resume VCN block
265 * @handle: amdgpu_device pointer
267 * Resume firmware and hw init VCN block
269 static int vcn_v1_0_resume(void *handle
)
272 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
274 r
= amdgpu_vcn_resume(adev
);
278 r
= vcn_v1_0_hw_init(adev
);
284 * vcn_v1_0_mc_resume_spg_mode - memory controller programming
286 * @adev: amdgpu_device pointer
288 * Let the VCN memory controller know it's offsets
290 static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device
*adev
)
292 uint32_t size
= AMDGPU_GPU_PAGE_ALIGN(adev
->vcn
.fw
->size
+ 4);
295 /* cache window 0: fw */
296 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) {
297 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
,
298 (adev
->firmware
.ucode
[AMDGPU_UCODE_ID_VCN
].tmr_mc_addr_lo
));
299 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
,
300 (adev
->firmware
.ucode
[AMDGPU_UCODE_ID_VCN
].tmr_mc_addr_hi
));
301 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET0
, 0);
304 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
,
305 lower_32_bits(adev
->vcn
.inst
->gpu_addr
));
306 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
,
307 upper_32_bits(adev
->vcn
.inst
->gpu_addr
));
309 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET0
,
310 AMDGPU_UVD_FIRMWARE_OFFSET
>> 3);
313 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_SIZE0
, size
);
315 /* cache window 1: stack */
316 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW
,
317 lower_32_bits(adev
->vcn
.inst
->gpu_addr
+ offset
));
318 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH
,
319 upper_32_bits(adev
->vcn
.inst
->gpu_addr
+ offset
));
320 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET1
, 0);
321 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_SIZE1
, AMDGPU_VCN_STACK_SIZE
);
323 /* cache window 2: context */
324 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW
,
325 lower_32_bits(adev
->vcn
.inst
->gpu_addr
+ offset
+ AMDGPU_VCN_STACK_SIZE
));
326 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH
,
327 upper_32_bits(adev
->vcn
.inst
->gpu_addr
+ offset
+ AMDGPU_VCN_STACK_SIZE
));
328 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET2
, 0);
329 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_SIZE2
, AMDGPU_VCN_CONTEXT_SIZE
);
331 WREG32_SOC15(UVD
, 0, mmUVD_UDEC_ADDR_CONFIG
,
332 adev
->gfx
.config
.gb_addr_config
);
333 WREG32_SOC15(UVD
, 0, mmUVD_UDEC_DB_ADDR_CONFIG
,
334 adev
->gfx
.config
.gb_addr_config
);
335 WREG32_SOC15(UVD
, 0, mmUVD_UDEC_DBW_ADDR_CONFIG
,
336 adev
->gfx
.config
.gb_addr_config
);
337 WREG32_SOC15(UVD
, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG
,
338 adev
->gfx
.config
.gb_addr_config
);
339 WREG32_SOC15(UVD
, 0, mmUVD_MIF_CURR_ADDR_CONFIG
,
340 adev
->gfx
.config
.gb_addr_config
);
341 WREG32_SOC15(UVD
, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG
,
342 adev
->gfx
.config
.gb_addr_config
);
343 WREG32_SOC15(UVD
, 0, mmUVD_MIF_RECON1_ADDR_CONFIG
,
344 adev
->gfx
.config
.gb_addr_config
);
345 WREG32_SOC15(UVD
, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG
,
346 adev
->gfx
.config
.gb_addr_config
);
347 WREG32_SOC15(UVD
, 0, mmUVD_MIF_REF_ADDR_CONFIG
,
348 adev
->gfx
.config
.gb_addr_config
);
349 WREG32_SOC15(UVD
, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG
,
350 adev
->gfx
.config
.gb_addr_config
);
351 WREG32_SOC15(UVD
, 0, mmUVD_JPEG_ADDR_CONFIG
,
352 adev
->gfx
.config
.gb_addr_config
);
353 WREG32_SOC15(UVD
, 0, mmUVD_JPEG_UV_ADDR_CONFIG
,
354 adev
->gfx
.config
.gb_addr_config
);
357 static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device
*adev
)
359 uint32_t size
= AMDGPU_GPU_PAGE_ALIGN(adev
->vcn
.fw
->size
+ 4);
362 /* cache window 0: fw */
363 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) {
364 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
,
365 (adev
->firmware
.ucode
[AMDGPU_UCODE_ID_VCN
].tmr_mc_addr_lo
),
367 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
,
368 (adev
->firmware
.ucode
[AMDGPU_UCODE_ID_VCN
].tmr_mc_addr_hi
),
370 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET0
, 0,
374 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
,
375 lower_32_bits(adev
->vcn
.inst
->gpu_addr
), 0xFFFFFFFF, 0);
376 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
,
377 upper_32_bits(adev
->vcn
.inst
->gpu_addr
), 0xFFFFFFFF, 0);
379 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET0
,
380 AMDGPU_UVD_FIRMWARE_OFFSET
>> 3, 0xFFFFFFFF, 0);
383 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_VCPU_CACHE_SIZE0
, size
, 0xFFFFFFFF, 0);
385 /* cache window 1: stack */
386 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW
,
387 lower_32_bits(adev
->vcn
.inst
->gpu_addr
+ offset
), 0xFFFFFFFF, 0);
388 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH
,
389 upper_32_bits(adev
->vcn
.inst
->gpu_addr
+ offset
), 0xFFFFFFFF, 0);
390 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET1
, 0,
392 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_VCPU_CACHE_SIZE1
, AMDGPU_VCN_STACK_SIZE
,
395 /* cache window 2: context */
396 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW
,
397 lower_32_bits(adev
->vcn
.inst
->gpu_addr
+ offset
+ AMDGPU_VCN_STACK_SIZE
),
399 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH
,
400 upper_32_bits(adev
->vcn
.inst
->gpu_addr
+ offset
+ AMDGPU_VCN_STACK_SIZE
),
402 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET2
, 0, 0xFFFFFFFF, 0);
403 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_VCPU_CACHE_SIZE2
, AMDGPU_VCN_CONTEXT_SIZE
,
406 /* VCN global tiling registers */
407 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_UDEC_ADDR_CONFIG
,
408 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
409 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_UDEC_DB_ADDR_CONFIG
,
410 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
411 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_UDEC_DBW_ADDR_CONFIG
,
412 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
413 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG
,
414 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
415 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MIF_CURR_ADDR_CONFIG
,
416 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
417 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG
,
418 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
419 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MIF_RECON1_ADDR_CONFIG
,
420 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
421 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG
,
422 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
423 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MIF_REF_ADDR_CONFIG
,
424 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
425 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG
,
426 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
430 * vcn_v1_0_disable_clock_gating - disable VCN clock gating
432 * @adev: amdgpu_device pointer
433 * @sw: enable SW clock gating
435 * Disable clock gating for VCN block
437 static void vcn_v1_0_disable_clock_gating(struct amdgpu_device
*adev
)
441 /* JPEG disable CGC */
442 data
= RREG32_SOC15(VCN
, 0, mmJPEG_CGC_CTRL
);
444 if (adev
->cg_flags
& AMD_CG_SUPPORT_VCN_MGCG
)
445 data
|= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
447 data
&= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK
;
449 data
|= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
450 data
|= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
451 WREG32_SOC15(VCN
, 0, mmJPEG_CGC_CTRL
, data
);
453 data
= RREG32_SOC15(VCN
, 0, mmJPEG_CGC_GATE
);
454 data
&= ~(JPEG_CGC_GATE__JPEG_MASK
| JPEG_CGC_GATE__JPEG2_MASK
);
455 WREG32_SOC15(VCN
, 0, mmJPEG_CGC_GATE
, data
);
457 /* UVD disable CGC */
458 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
459 if (adev
->cg_flags
& AMD_CG_SUPPORT_VCN_MGCG
)
460 data
|= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
462 data
&= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK
;
464 data
|= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
465 data
|= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
466 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
468 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_GATE
);
469 data
&= ~(UVD_CGC_GATE__SYS_MASK
470 | UVD_CGC_GATE__UDEC_MASK
471 | UVD_CGC_GATE__MPEG2_MASK
472 | UVD_CGC_GATE__REGS_MASK
473 | UVD_CGC_GATE__RBC_MASK
474 | UVD_CGC_GATE__LMI_MC_MASK
475 | UVD_CGC_GATE__LMI_UMC_MASK
476 | UVD_CGC_GATE__IDCT_MASK
477 | UVD_CGC_GATE__MPRD_MASK
478 | UVD_CGC_GATE__MPC_MASK
479 | UVD_CGC_GATE__LBSI_MASK
480 | UVD_CGC_GATE__LRBBM_MASK
481 | UVD_CGC_GATE__UDEC_RE_MASK
482 | UVD_CGC_GATE__UDEC_CM_MASK
483 | UVD_CGC_GATE__UDEC_IT_MASK
484 | UVD_CGC_GATE__UDEC_DB_MASK
485 | UVD_CGC_GATE__UDEC_MP_MASK
486 | UVD_CGC_GATE__WCB_MASK
487 | UVD_CGC_GATE__VCPU_MASK
488 | UVD_CGC_GATE__SCPU_MASK
);
489 WREG32_SOC15(VCN
, 0, mmUVD_CGC_GATE
, data
);
491 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
492 data
&= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
493 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
494 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
495 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
496 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
497 | UVD_CGC_CTRL__SYS_MODE_MASK
498 | UVD_CGC_CTRL__UDEC_MODE_MASK
499 | UVD_CGC_CTRL__MPEG2_MODE_MASK
500 | UVD_CGC_CTRL__REGS_MODE_MASK
501 | UVD_CGC_CTRL__RBC_MODE_MASK
502 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
503 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
504 | UVD_CGC_CTRL__IDCT_MODE_MASK
505 | UVD_CGC_CTRL__MPRD_MODE_MASK
506 | UVD_CGC_CTRL__MPC_MODE_MASK
507 | UVD_CGC_CTRL__LBSI_MODE_MASK
508 | UVD_CGC_CTRL__LRBBM_MODE_MASK
509 | UVD_CGC_CTRL__WCB_MODE_MASK
510 | UVD_CGC_CTRL__VCPU_MODE_MASK
511 | UVD_CGC_CTRL__SCPU_MODE_MASK
);
512 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
515 data
= RREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_GATE
);
516 data
|= (UVD_SUVD_CGC_GATE__SRE_MASK
517 | UVD_SUVD_CGC_GATE__SIT_MASK
518 | UVD_SUVD_CGC_GATE__SMP_MASK
519 | UVD_SUVD_CGC_GATE__SCM_MASK
520 | UVD_SUVD_CGC_GATE__SDB_MASK
521 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
522 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
523 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
524 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
525 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
526 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
527 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
528 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
529 | UVD_SUVD_CGC_GATE__SCLR_MASK
530 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
531 | UVD_SUVD_CGC_GATE__ENT_MASK
532 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
533 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
534 | UVD_SUVD_CGC_GATE__SITE_MASK
535 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
536 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
537 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
538 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
539 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK
);
540 WREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_GATE
, data
);
542 data
= RREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
);
543 data
&= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
544 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
545 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
546 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
547 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
548 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
549 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
550 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
551 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
552 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
);
553 WREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
, data
);
557 * vcn_v1_0_enable_clock_gating - enable VCN clock gating
559 * @adev: amdgpu_device pointer
560 * @sw: enable SW clock gating
562 * Enable clock gating for VCN block
564 static void vcn_v1_0_enable_clock_gating(struct amdgpu_device
*adev
)
568 /* enable JPEG CGC */
569 data
= RREG32_SOC15(VCN
, 0, mmJPEG_CGC_CTRL
);
570 if (adev
->cg_flags
& AMD_CG_SUPPORT_VCN_MGCG
)
571 data
|= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
573 data
|= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
574 data
|= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
575 data
|= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
576 WREG32_SOC15(VCN
, 0, mmJPEG_CGC_CTRL
, data
);
578 data
= RREG32_SOC15(VCN
, 0, mmJPEG_CGC_GATE
);
579 data
|= (JPEG_CGC_GATE__JPEG_MASK
| JPEG_CGC_GATE__JPEG2_MASK
);
580 WREG32_SOC15(VCN
, 0, mmJPEG_CGC_GATE
, data
);
583 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
584 if (adev
->cg_flags
& AMD_CG_SUPPORT_VCN_MGCG
)
585 data
|= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
587 data
|= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
588 data
|= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
589 data
|= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
590 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
592 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
593 data
|= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
594 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
595 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
596 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
597 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
598 | UVD_CGC_CTRL__SYS_MODE_MASK
599 | UVD_CGC_CTRL__UDEC_MODE_MASK
600 | UVD_CGC_CTRL__MPEG2_MODE_MASK
601 | UVD_CGC_CTRL__REGS_MODE_MASK
602 | UVD_CGC_CTRL__RBC_MODE_MASK
603 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
604 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
605 | UVD_CGC_CTRL__IDCT_MODE_MASK
606 | UVD_CGC_CTRL__MPRD_MODE_MASK
607 | UVD_CGC_CTRL__MPC_MODE_MASK
608 | UVD_CGC_CTRL__LBSI_MODE_MASK
609 | UVD_CGC_CTRL__LRBBM_MODE_MASK
610 | UVD_CGC_CTRL__WCB_MODE_MASK
611 | UVD_CGC_CTRL__VCPU_MODE_MASK
612 | UVD_CGC_CTRL__SCPU_MODE_MASK
);
613 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
615 data
= RREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
);
616 data
|= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
617 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
618 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
619 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
620 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
621 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
622 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
623 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
624 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
625 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
);
626 WREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
, data
);
629 static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device
*adev
, uint8_t sram_sel
)
631 uint32_t reg_data
= 0;
633 /* disable JPEG CGC */
634 if (adev
->cg_flags
& AMD_CG_SUPPORT_VCN_MGCG
)
635 reg_data
= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
637 reg_data
= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
638 reg_data
|= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
639 reg_data
|= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
640 WREG32_SOC15_DPG_MODE(UVD
, 0, mmJPEG_CGC_CTRL
, reg_data
, 0xFFFFFFFF, sram_sel
);
642 WREG32_SOC15_DPG_MODE(UVD
, 0, mmJPEG_CGC_GATE
, 0, 0xFFFFFFFF, sram_sel
);
644 /* enable sw clock gating control */
645 if (adev
->cg_flags
& AMD_CG_SUPPORT_VCN_MGCG
)
646 reg_data
= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
648 reg_data
= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
649 reg_data
|= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
650 reg_data
|= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
651 reg_data
&= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
|
652 UVD_CGC_CTRL__UDEC_CM_MODE_MASK
|
653 UVD_CGC_CTRL__UDEC_IT_MODE_MASK
|
654 UVD_CGC_CTRL__UDEC_DB_MODE_MASK
|
655 UVD_CGC_CTRL__UDEC_MP_MODE_MASK
|
656 UVD_CGC_CTRL__SYS_MODE_MASK
|
657 UVD_CGC_CTRL__UDEC_MODE_MASK
|
658 UVD_CGC_CTRL__MPEG2_MODE_MASK
|
659 UVD_CGC_CTRL__REGS_MODE_MASK
|
660 UVD_CGC_CTRL__RBC_MODE_MASK
|
661 UVD_CGC_CTRL__LMI_MC_MODE_MASK
|
662 UVD_CGC_CTRL__LMI_UMC_MODE_MASK
|
663 UVD_CGC_CTRL__IDCT_MODE_MASK
|
664 UVD_CGC_CTRL__MPRD_MODE_MASK
|
665 UVD_CGC_CTRL__MPC_MODE_MASK
|
666 UVD_CGC_CTRL__LBSI_MODE_MASK
|
667 UVD_CGC_CTRL__LRBBM_MODE_MASK
|
668 UVD_CGC_CTRL__WCB_MODE_MASK
|
669 UVD_CGC_CTRL__VCPU_MODE_MASK
|
670 UVD_CGC_CTRL__SCPU_MODE_MASK
);
671 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_CGC_CTRL
, reg_data
, 0xFFFFFFFF, sram_sel
);
673 /* turn off clock gating */
674 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_CGC_GATE
, 0, 0xFFFFFFFF, sram_sel
);
676 /* turn on SUVD clock gating */
677 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_SUVD_CGC_GATE
, 1, 0xFFFFFFFF, sram_sel
);
679 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
680 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_SUVD_CGC_CTRL
, 0, 0xFFFFFFFF, sram_sel
);
683 static void vcn_1_0_disable_static_power_gating(struct amdgpu_device
*adev
)
688 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN
) {
689 data
= (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
690 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
691 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
692 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
693 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
694 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
695 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
696 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
697 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
698 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
699 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT
);
701 WREG32_SOC15(VCN
, 0, mmUVD_PGFSM_CONFIG
, data
);
702 SOC15_WAIT_ON_RREG(VCN
, 0, mmUVD_PGFSM_STATUS
, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON
, 0xFFFFFF, ret
);
704 data
= (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
705 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
706 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
707 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
708 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
709 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
710 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
711 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
712 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
713 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
714 | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT
);
715 WREG32_SOC15(VCN
, 0, mmUVD_PGFSM_CONFIG
, data
);
716 SOC15_WAIT_ON_RREG(VCN
, 0, mmUVD_PGFSM_STATUS
, 0, 0xFFFFFFFF, ret
);
719 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
721 data
= RREG32_SOC15(VCN
, 0, mmUVD_POWER_STATUS
);
723 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN
)
724 data
|= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON
| UVD_POWER_STATUS__UVD_PG_EN_MASK
;
726 WREG32_SOC15(VCN
, 0, mmUVD_POWER_STATUS
, data
);
729 static void vcn_1_0_enable_static_power_gating(struct amdgpu_device
*adev
)
734 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN
) {
735 /* Before power off, this indicator has to be turned on */
736 data
= RREG32_SOC15(VCN
, 0, mmUVD_POWER_STATUS
);
737 data
&= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK
;
738 data
|= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
;
739 WREG32_SOC15(VCN
, 0, mmUVD_POWER_STATUS
, data
);
742 data
= (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
743 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
744 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
745 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
746 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
747 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
748 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
749 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
750 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
751 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
752 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT
);
754 WREG32_SOC15(VCN
, 0, mmUVD_PGFSM_CONFIG
, data
);
756 data
= (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
757 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
758 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
759 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
760 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
761 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
762 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
763 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
764 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
765 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
766 | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT
);
767 SOC15_WAIT_ON_RREG(VCN
, 0, mmUVD_PGFSM_STATUS
, data
, 0xFFFFFFFF, ret
);
772 * vcn_v1_0_start - start VCN block
774 * @adev: amdgpu_device pointer
776 * Setup and start the VCN block
778 static int vcn_v1_0_start_spg_mode(struct amdgpu_device
*adev
)
780 struct amdgpu_ring
*ring
= &adev
->vcn
.inst
->ring_dec
;
781 uint32_t rb_bufsz
, tmp
;
782 uint32_t lmi_swap_cntl
;
785 /* disable byte swapping */
788 vcn_1_0_disable_static_power_gating(adev
);
790 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_STATUS
) | UVD_STATUS__UVD_BUSY
;
791 WREG32_SOC15(UVD
, 0, mmUVD_STATUS
, tmp
);
793 /* disable clock gating */
794 vcn_v1_0_disable_clock_gating(adev
);
796 /* disable interupt */
797 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_MASTINT_EN
), 0,
798 ~UVD_MASTINT_EN__VCPU_EN_MASK
);
800 /* initialize VCN memory controller */
801 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_LMI_CTRL
);
802 WREG32_SOC15(UVD
, 0, mmUVD_LMI_CTRL
, tmp
|
803 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK
|
804 UVD_LMI_CTRL__MASK_MC_URGENT_MASK
|
805 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK
|
806 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK
);
809 /* swap (8 in 32) RB and IB */
812 WREG32_SOC15(UVD
, 0, mmUVD_LMI_SWAP_CNTL
, lmi_swap_cntl
);
814 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_MPC_CNTL
);
815 tmp
&= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK
;
816 tmp
|= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT
;
817 WREG32_SOC15(UVD
, 0, mmUVD_MPC_CNTL
, tmp
);
819 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_MUXA0
,
820 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT
) |
821 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT
) |
822 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT
) |
823 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT
)));
825 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_MUXB0
,
826 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT
) |
827 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT
) |
828 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT
) |
829 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT
)));
831 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_MUX
,
832 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT
) |
833 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT
) |
834 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT
)));
836 vcn_v1_0_mc_resume_spg_mode(adev
);
838 WREG32_SOC15(UVD
, 0, mmUVD_REG_XX_MASK_1_0
, 0x10);
839 WREG32_SOC15(UVD
, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0
,
840 RREG32_SOC15(UVD
, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0
) | 0x3);
842 /* enable VCPU clock */
843 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CNTL
, UVD_VCPU_CNTL__CLK_EN_MASK
);
845 /* boot up the VCPU */
846 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
), 0,
847 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
850 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_CTRL2
), 0,
851 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK
);
853 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_SOFT_RESET
);
854 tmp
&= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK
;
855 tmp
&= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK
;
856 WREG32_SOC15(UVD
, 0, mmUVD_SOFT_RESET
, tmp
);
858 for (i
= 0; i
< 10; ++i
) {
861 for (j
= 0; j
< 100; ++j
) {
862 status
= RREG32_SOC15(UVD
, 0, mmUVD_STATUS
);
863 if (status
& UVD_STATUS__IDLE
)
868 if (status
& UVD_STATUS__IDLE
)
871 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
872 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
),
873 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
,
874 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
876 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
), 0,
877 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
883 DRM_ERROR("VCN decode not responding, giving up!!!\n");
886 /* enable master interrupt */
887 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_MASTINT_EN
),
888 UVD_MASTINT_EN__VCPU_EN_MASK
, ~UVD_MASTINT_EN__VCPU_EN_MASK
);
890 /* enable system interrupt for JRBC, TODO: move to set interrupt*/
891 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SYS_INT_EN
),
892 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK
,
893 ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK
);
895 /* clear the busy bit of UVD_STATUS */
896 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_STATUS
) & ~UVD_STATUS__UVD_BUSY
;
897 WREG32_SOC15(UVD
, 0, mmUVD_STATUS
, tmp
);
899 /* force RBC into idle state */
900 rb_bufsz
= order_base_2(ring
->ring_size
);
901 tmp
= REG_SET_FIELD(0, UVD_RBC_RB_CNTL
, RB_BUFSZ
, rb_bufsz
);
902 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_BLKSZ
, 1);
903 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_FETCH
, 1);
904 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_UPDATE
, 1);
905 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_RPTR_WR_EN
, 1);
906 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_CNTL
, tmp
);
908 /* set the write pointer delay */
909 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR_CNTL
, 0);
911 /* set the wb address */
912 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR_ADDR
,
913 (upper_32_bits(ring
->gpu_addr
) >> 2));
915 /* programm the RB_BASE for ring buffer */
916 WREG32_SOC15(UVD
, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW
,
917 lower_32_bits(ring
->gpu_addr
));
918 WREG32_SOC15(UVD
, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH
,
919 upper_32_bits(ring
->gpu_addr
));
921 /* Initialize the ring buffer's read and write pointers */
922 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
, 0);
924 WREG32_SOC15(UVD
, 0, mmUVD_SCRATCH2
, 0);
926 ring
->wptr
= RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
);
927 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
,
928 lower_32_bits(ring
->wptr
));
930 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_RBC_RB_CNTL
), 0,
931 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK
);
933 ring
= &adev
->vcn
.inst
->ring_enc
[0];
934 WREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR
, lower_32_bits(ring
->wptr
));
935 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
, lower_32_bits(ring
->wptr
));
936 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_LO
, ring
->gpu_addr
);
937 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_HI
, upper_32_bits(ring
->gpu_addr
));
938 WREG32_SOC15(UVD
, 0, mmUVD_RB_SIZE
, ring
->ring_size
/ 4);
940 ring
= &adev
->vcn
.inst
->ring_enc
[1];
941 WREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR2
, lower_32_bits(ring
->wptr
));
942 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
, lower_32_bits(ring
->wptr
));
943 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_LO2
, ring
->gpu_addr
);
944 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_HI2
, upper_32_bits(ring
->gpu_addr
));
945 WREG32_SOC15(UVD
, 0, mmUVD_RB_SIZE2
, ring
->ring_size
/ 4);
947 jpeg_v1_0_start(adev
, 0);
952 static int vcn_v1_0_start_dpg_mode(struct amdgpu_device
*adev
)
954 struct amdgpu_ring
*ring
= &adev
->vcn
.inst
->ring_dec
;
955 uint32_t rb_bufsz
, tmp
;
956 uint32_t lmi_swap_cntl
;
958 /* disable byte swapping */
961 vcn_1_0_enable_static_power_gating(adev
);
963 /* enable dynamic power gating mode */
964 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_POWER_STATUS
);
965 tmp
|= UVD_POWER_STATUS__UVD_PG_MODE_MASK
;
966 tmp
|= UVD_POWER_STATUS__UVD_PG_EN_MASK
;
967 WREG32_SOC15(UVD
, 0, mmUVD_POWER_STATUS
, tmp
);
969 /* enable clock gating */
970 vcn_v1_0_clock_gating_dpg_mode(adev
, 0);
972 /* enable VCPU clock */
973 tmp
= (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT
);
974 tmp
|= UVD_VCPU_CNTL__CLK_EN_MASK
;
975 tmp
|= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK
;
976 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_VCPU_CNTL
, tmp
, 0xFFFFFFFF, 0);
978 /* disable interupt */
979 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MASTINT_EN
,
980 0, UVD_MASTINT_EN__VCPU_EN_MASK
, 0);
982 /* initialize VCN memory controller */
983 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_CTRL
,
984 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT
) |
985 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK
|
986 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK
|
987 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK
|
988 UVD_LMI_CTRL__REQ_MODE_MASK
|
989 UVD_LMI_CTRL__CRC_RESET_MASK
|
990 UVD_LMI_CTRL__MASK_MC_URGENT_MASK
|
991 0x00100000L
, 0xFFFFFFFF, 0);
994 /* swap (8 in 32) RB and IB */
997 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_SWAP_CNTL
, lmi_swap_cntl
, 0xFFFFFFFF, 0);
999 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MPC_CNTL
,
1000 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT
, 0xFFFFFFFF, 0);
1002 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MPC_SET_MUXA0
,
1003 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT
) |
1004 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT
) |
1005 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT
) |
1006 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT
)), 0xFFFFFFFF, 0);
1008 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MPC_SET_MUXB0
,
1009 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT
) |
1010 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT
) |
1011 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT
) |
1012 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT
)), 0xFFFFFFFF, 0);
1014 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MPC_SET_MUX
,
1015 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT
) |
1016 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT
) |
1017 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT
)), 0xFFFFFFFF, 0);
1019 vcn_v1_0_mc_resume_dpg_mode(adev
);
1021 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_REG_XX_MASK
, 0x10, 0xFFFFFFFF, 0);
1022 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_RBC_XX_IB_REG_CHECK
, 0x3, 0xFFFFFFFF, 0);
1024 /* boot up the VCPU */
1025 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_SOFT_RESET
, 0, 0xFFFFFFFF, 0);
1028 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_CTRL2
,
1029 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT
,
1032 /* enable master interrupt */
1033 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MASTINT_EN
,
1034 UVD_MASTINT_EN__VCPU_EN_MASK
, UVD_MASTINT_EN__VCPU_EN_MASK
, 0);
1036 vcn_v1_0_clock_gating_dpg_mode(adev
, 1);
1037 /* setup mmUVD_LMI_CTRL */
1038 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_CTRL
,
1039 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT
) |
1040 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK
|
1041 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK
|
1042 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK
|
1043 UVD_LMI_CTRL__REQ_MODE_MASK
|
1044 UVD_LMI_CTRL__CRC_RESET_MASK
|
1045 UVD_LMI_CTRL__MASK_MC_URGENT_MASK
|
1046 0x00100000L
, 0xFFFFFFFF, 1);
1048 tmp
= adev
->gfx
.config
.gb_addr_config
;
1049 /* setup VCN global tiling registers */
1050 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_JPEG_ADDR_CONFIG
, tmp
, 0xFFFFFFFF, 1);
1051 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_JPEG_UV_ADDR_CONFIG
, tmp
, 0xFFFFFFFF, 1);
1053 /* enable System Interrupt for JRBC */
1054 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_SYS_INT_EN
,
1055 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK
, 0xFFFFFFFF, 1);
1057 /* force RBC into idle state */
1058 rb_bufsz
= order_base_2(ring
->ring_size
);
1059 tmp
= REG_SET_FIELD(0, UVD_RBC_RB_CNTL
, RB_BUFSZ
, rb_bufsz
);
1060 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_BLKSZ
, 1);
1061 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_FETCH
, 1);
1062 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_UPDATE
, 1);
1063 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_RPTR_WR_EN
, 1);
1064 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_CNTL
, tmp
);
1066 /* set the write pointer delay */
1067 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR_CNTL
, 0);
1069 /* set the wb address */
1070 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR_ADDR
,
1071 (upper_32_bits(ring
->gpu_addr
) >> 2));
1073 /* programm the RB_BASE for ring buffer */
1074 WREG32_SOC15(UVD
, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW
,
1075 lower_32_bits(ring
->gpu_addr
));
1076 WREG32_SOC15(UVD
, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH
,
1077 upper_32_bits(ring
->gpu_addr
));
1079 /* Initialize the ring buffer's read and write pointers */
1080 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
, 0);
1082 WREG32_SOC15(UVD
, 0, mmUVD_SCRATCH2
, 0);
1084 ring
->wptr
= RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
);
1085 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
,
1086 lower_32_bits(ring
->wptr
));
1088 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_RBC_RB_CNTL
), 0,
1089 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK
);
1091 jpeg_v1_0_start(adev
, 1);
1096 static int vcn_v1_0_start(struct amdgpu_device
*adev
)
1100 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
)
1101 r
= vcn_v1_0_start_dpg_mode(adev
);
1103 r
= vcn_v1_0_start_spg_mode(adev
);
1108 * vcn_v1_0_stop - stop VCN block
1110 * @adev: amdgpu_device pointer
1112 * stop the VCN block
1114 static int vcn_v1_0_stop_spg_mode(struct amdgpu_device
*adev
)
1118 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_STATUS
, UVD_STATUS__IDLE
, 0x7, ret_code
);
1120 tmp
= UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK
|
1121 UVD_LMI_STATUS__READ_CLEAN_MASK
|
1122 UVD_LMI_STATUS__WRITE_CLEAN_MASK
|
1123 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK
;
1124 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_LMI_STATUS
, tmp
, tmp
, ret_code
);
1126 /* put VCPU into reset */
1127 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
),
1128 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
,
1129 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
1131 tmp
= UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK
|
1132 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK
;
1133 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_LMI_STATUS
, tmp
, tmp
, ret_code
);
1135 /* disable VCPU clock */
1136 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_VCPU_CNTL
), 0,
1137 ~UVD_VCPU_CNTL__CLK_EN_MASK
);
1139 /* reset LMI UMC/LMI */
1140 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
),
1141 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK
,
1142 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK
);
1144 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
),
1145 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK
,
1146 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK
);
1148 WREG32_SOC15(UVD
, 0, mmUVD_STATUS
, 0);
1150 vcn_v1_0_enable_clock_gating(adev
);
1151 vcn_1_0_enable_static_power_gating(adev
);
1155 static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device
*adev
)
1160 /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
1161 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_POWER_STATUS
,
1162 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
,
1163 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK
, ret_code
);
1165 /* wait for read ptr to be equal to write ptr */
1166 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
);
1167 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_RB_RPTR
, tmp
, 0xFFFFFFFF, ret_code
);
1169 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
);
1170 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_RB_RPTR2
, tmp
, 0xFFFFFFFF, ret_code
);
1172 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_JRBC_RB_WPTR
);
1173 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_JRBC_RB_RPTR
, tmp
, 0xFFFFFFFF, ret_code
);
1175 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
) & 0x7FFFFFFF;
1176 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_RBC_RB_RPTR
, tmp
, 0xFFFFFFFF, ret_code
);
1178 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_POWER_STATUS
,
1179 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
,
1180 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK
, ret_code
);
1182 /* disable dynamic power gating mode */
1183 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_POWER_STATUS
), 0,
1184 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK
);
1189 static int vcn_v1_0_stop(struct amdgpu_device
*adev
)
1193 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
)
1194 r
= vcn_v1_0_stop_dpg_mode(adev
);
1196 r
= vcn_v1_0_stop_spg_mode(adev
);
1201 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device
*adev
,
1202 int inst_idx
, struct dpg_pause_state
*new_state
)
1205 uint32_t reg_data
= 0;
1206 uint32_t reg_data2
= 0;
1207 struct amdgpu_ring
*ring
;
1209 /* pause/unpause if state is changed */
1210 if (adev
->vcn
.pause_state
.fw_based
!= new_state
->fw_based
) {
1211 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1212 adev
->vcn
.pause_state
.fw_based
, adev
->vcn
.pause_state
.jpeg
,
1213 new_state
->fw_based
, new_state
->jpeg
);
1215 reg_data
= RREG32_SOC15(UVD
, 0, mmUVD_DPG_PAUSE
) &
1216 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK
);
1218 if (new_state
->fw_based
== VCN_DPG_STATE__PAUSE
) {
1221 if (!(reg_data
& UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK
))
1222 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_POWER_STATUS
,
1223 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
,
1224 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK
, ret_code
);
1227 /* pause DPG non-jpeg */
1228 reg_data
|= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK
;
1229 WREG32_SOC15(UVD
, 0, mmUVD_DPG_PAUSE
, reg_data
);
1230 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_DPG_PAUSE
,
1231 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK
,
1232 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK
, ret_code
);
1235 ring
= &adev
->vcn
.inst
->ring_enc
[0];
1236 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_LO
, ring
->gpu_addr
);
1237 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_HI
, upper_32_bits(ring
->gpu_addr
));
1238 WREG32_SOC15(UVD
, 0, mmUVD_RB_SIZE
, ring
->ring_size
/ 4);
1239 WREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR
, lower_32_bits(ring
->wptr
));
1240 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
, lower_32_bits(ring
->wptr
));
1242 ring
= &adev
->vcn
.inst
->ring_enc
[1];
1243 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_LO2
, ring
->gpu_addr
);
1244 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_HI2
, upper_32_bits(ring
->gpu_addr
));
1245 WREG32_SOC15(UVD
, 0, mmUVD_RB_SIZE2
, ring
->ring_size
/ 4);
1246 WREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR2
, lower_32_bits(ring
->wptr
));
1247 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
, lower_32_bits(ring
->wptr
));
1249 ring
= &adev
->vcn
.inst
->ring_dec
;
1250 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
,
1251 RREG32_SOC15(UVD
, 0, mmUVD_SCRATCH2
) & 0x7FFFFFFF);
1252 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_POWER_STATUS
,
1253 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON
,
1254 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK
, ret_code
);
1257 /* unpause dpg non-jpeg, no need to wait */
1258 reg_data
&= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK
;
1259 WREG32_SOC15(UVD
, 0, mmUVD_DPG_PAUSE
, reg_data
);
1261 adev
->vcn
.pause_state
.fw_based
= new_state
->fw_based
;
1264 /* pause/unpause if state is changed */
1265 if (adev
->vcn
.pause_state
.jpeg
!= new_state
->jpeg
) {
1266 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1267 adev
->vcn
.pause_state
.fw_based
, adev
->vcn
.pause_state
.jpeg
,
1268 new_state
->fw_based
, new_state
->jpeg
);
1270 reg_data
= RREG32_SOC15(UVD
, 0, mmUVD_DPG_PAUSE
) &
1271 (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK
);
1273 if (new_state
->jpeg
== VCN_DPG_STATE__PAUSE
) {
1276 if (!(reg_data
& UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK
))
1277 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_POWER_STATUS
,
1278 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
,
1279 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK
, ret_code
);
1282 /* Make sure JPRG Snoop is disabled before sending the pause */
1283 reg_data2
= RREG32_SOC15(UVD
, 0, mmUVD_POWER_STATUS
);
1284 reg_data2
|= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK
;
1285 WREG32_SOC15(UVD
, 0, mmUVD_POWER_STATUS
, reg_data2
);
1287 /* pause DPG jpeg */
1288 reg_data
|= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK
;
1289 WREG32_SOC15(UVD
, 0, mmUVD_DPG_PAUSE
, reg_data
);
1290 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_DPG_PAUSE
,
1291 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK
,
1292 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK
, ret_code
);
1295 ring
= &adev
->jpeg
.inst
->ring_dec
;
1296 WREG32_SOC15(UVD
, 0, mmUVD_LMI_JRBC_RB_VMID
, 0);
1297 WREG32_SOC15(UVD
, 0, mmUVD_JRBC_RB_CNTL
,
1298 UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK
|
1299 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK
);
1300 WREG32_SOC15(UVD
, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW
,
1301 lower_32_bits(ring
->gpu_addr
));
1302 WREG32_SOC15(UVD
, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH
,
1303 upper_32_bits(ring
->gpu_addr
));
1304 WREG32_SOC15(UVD
, 0, mmUVD_JRBC_RB_RPTR
, ring
->wptr
);
1305 WREG32_SOC15(UVD
, 0, mmUVD_JRBC_RB_WPTR
, ring
->wptr
);
1306 WREG32_SOC15(UVD
, 0, mmUVD_JRBC_RB_CNTL
,
1307 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK
);
1309 ring
= &adev
->vcn
.inst
->ring_dec
;
1310 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
,
1311 RREG32_SOC15(UVD
, 0, mmUVD_SCRATCH2
) & 0x7FFFFFFF);
1312 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_POWER_STATUS
,
1313 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON
,
1314 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK
, ret_code
);
1317 /* unpause dpg jpeg, no need to wait */
1318 reg_data
&= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK
;
1319 WREG32_SOC15(UVD
, 0, mmUVD_DPG_PAUSE
, reg_data
);
1321 adev
->vcn
.pause_state
.jpeg
= new_state
->jpeg
;
1327 static bool vcn_v1_0_is_idle(void *handle
)
1329 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1331 return (RREG32_SOC15(VCN
, 0, mmUVD_STATUS
) == UVD_STATUS__IDLE
);
1334 static int vcn_v1_0_wait_for_idle(void *handle
)
1336 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1339 SOC15_WAIT_ON_RREG(VCN
, 0, mmUVD_STATUS
, UVD_STATUS__IDLE
,
1340 UVD_STATUS__IDLE
, ret
);
1345 static int vcn_v1_0_set_clockgating_state(void *handle
,
1346 enum amd_clockgating_state state
)
1348 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1349 bool enable
= (state
== AMD_CG_STATE_GATE
) ? true : false;
1352 /* wait for STATUS to clear */
1353 if (vcn_v1_0_is_idle(handle
))
1355 vcn_v1_0_enable_clock_gating(adev
);
1357 /* disable HW gating and enable Sw gating */
1358 vcn_v1_0_disable_clock_gating(adev
);
1364 * vcn_v1_0_dec_ring_get_rptr - get read pointer
1366 * @ring: amdgpu_ring pointer
1368 * Returns the current hardware read pointer
1370 static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring
*ring
)
1372 struct amdgpu_device
*adev
= ring
->adev
;
1374 return RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
);
1378 * vcn_v1_0_dec_ring_get_wptr - get write pointer
1380 * @ring: amdgpu_ring pointer
1382 * Returns the current hardware write pointer
1384 static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring
*ring
)
1386 struct amdgpu_device
*adev
= ring
->adev
;
1388 return RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
);
1392 * vcn_v1_0_dec_ring_set_wptr - set write pointer
1394 * @ring: amdgpu_ring pointer
1396 * Commits the write pointer to the hardware
1398 static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring
*ring
)
1400 struct amdgpu_device
*adev
= ring
->adev
;
1402 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
)
1403 WREG32_SOC15(UVD
, 0, mmUVD_SCRATCH2
,
1404 lower_32_bits(ring
->wptr
) | 0x80000000);
1406 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
, lower_32_bits(ring
->wptr
));
1410 * vcn_v1_0_dec_ring_insert_start - insert a start command
1412 * @ring: amdgpu_ring pointer
1414 * Write a start command to the ring.
1416 static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring
*ring
)
1418 struct amdgpu_device
*adev
= ring
->adev
;
1420 amdgpu_ring_write(ring
,
1421 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
1422 amdgpu_ring_write(ring
, 0);
1423 amdgpu_ring_write(ring
,
1424 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
1425 amdgpu_ring_write(ring
, VCN_DEC_CMD_PACKET_START
<< 1);
1429 * vcn_v1_0_dec_ring_insert_end - insert a end command
1431 * @ring: amdgpu_ring pointer
1433 * Write a end command to the ring.
1435 static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring
*ring
)
1437 struct amdgpu_device
*adev
= ring
->adev
;
1439 amdgpu_ring_write(ring
,
1440 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
1441 amdgpu_ring_write(ring
, VCN_DEC_CMD_PACKET_END
<< 1);
1445 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
1447 * @ring: amdgpu_ring pointer
1448 * @fence: fence to emit
1450 * Write a fence and a trap command to the ring.
1452 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
, u64 seq
,
1455 struct amdgpu_device
*adev
= ring
->adev
;
1457 WARN_ON(flags
& AMDGPU_FENCE_FLAG_64BIT
);
1459 amdgpu_ring_write(ring
,
1460 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_CONTEXT_ID
), 0));
1461 amdgpu_ring_write(ring
, seq
);
1462 amdgpu_ring_write(ring
,
1463 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
1464 amdgpu_ring_write(ring
, addr
& 0xffffffff);
1465 amdgpu_ring_write(ring
,
1466 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA1
), 0));
1467 amdgpu_ring_write(ring
, upper_32_bits(addr
) & 0xff);
1468 amdgpu_ring_write(ring
,
1469 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
1470 amdgpu_ring_write(ring
, VCN_DEC_CMD_FENCE
<< 1);
1472 amdgpu_ring_write(ring
,
1473 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
1474 amdgpu_ring_write(ring
, 0);
1475 amdgpu_ring_write(ring
,
1476 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA1
), 0));
1477 amdgpu_ring_write(ring
, 0);
1478 amdgpu_ring_write(ring
,
1479 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
1480 amdgpu_ring_write(ring
, VCN_DEC_CMD_TRAP
<< 1);
1484 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
1486 * @ring: amdgpu_ring pointer
1487 * @ib: indirect buffer to execute
1489 * Write ring commands to execute the indirect buffer
1491 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring
*ring
,
1492 struct amdgpu_job
*job
,
1493 struct amdgpu_ib
*ib
,
1496 struct amdgpu_device
*adev
= ring
->adev
;
1497 unsigned vmid
= AMDGPU_JOB_GET_VMID(job
);
1499 amdgpu_ring_write(ring
,
1500 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_RBC_IB_VMID
), 0));
1501 amdgpu_ring_write(ring
, vmid
);
1503 amdgpu_ring_write(ring
,
1504 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW
), 0));
1505 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
));
1506 amdgpu_ring_write(ring
,
1507 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH
), 0));
1508 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
1509 amdgpu_ring_write(ring
,
1510 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_RBC_IB_SIZE
), 0));
1511 amdgpu_ring_write(ring
, ib
->length_dw
);
1514 static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring
*ring
,
1515 uint32_t reg
, uint32_t val
,
1518 struct amdgpu_device
*adev
= ring
->adev
;
1520 amdgpu_ring_write(ring
,
1521 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
1522 amdgpu_ring_write(ring
, reg
<< 2);
1523 amdgpu_ring_write(ring
,
1524 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA1
), 0));
1525 amdgpu_ring_write(ring
, val
);
1526 amdgpu_ring_write(ring
,
1527 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GP_SCRATCH8
), 0));
1528 amdgpu_ring_write(ring
, mask
);
1529 amdgpu_ring_write(ring
,
1530 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
1531 amdgpu_ring_write(ring
, VCN_DEC_CMD_REG_READ_COND_WAIT
<< 1);
1534 static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring
*ring
,
1535 unsigned vmid
, uint64_t pd_addr
)
1537 struct amdgpu_vmhub
*hub
= &ring
->adev
->vmhub
[ring
->funcs
->vmhub
];
1538 uint32_t data0
, data1
, mask
;
1540 pd_addr
= amdgpu_gmc_emit_flush_gpu_tlb(ring
, vmid
, pd_addr
);
1542 /* wait for register write */
1543 data0
= hub
->ctx0_ptb_addr_lo32
+ vmid
* 2;
1544 data1
= lower_32_bits(pd_addr
);
1546 vcn_v1_0_dec_ring_emit_reg_wait(ring
, data0
, data1
, mask
);
1549 static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring
*ring
,
1550 uint32_t reg
, uint32_t val
)
1552 struct amdgpu_device
*adev
= ring
->adev
;
1554 amdgpu_ring_write(ring
,
1555 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
1556 amdgpu_ring_write(ring
, reg
<< 2);
1557 amdgpu_ring_write(ring
,
1558 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA1
), 0));
1559 amdgpu_ring_write(ring
, val
);
1560 amdgpu_ring_write(ring
,
1561 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
1562 amdgpu_ring_write(ring
, VCN_DEC_CMD_WRITE_REG
<< 1);
1566 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
1568 * @ring: amdgpu_ring pointer
1570 * Returns the current hardware enc read pointer
1572 static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring
*ring
)
1574 struct amdgpu_device
*adev
= ring
->adev
;
1576 if (ring
== &adev
->vcn
.inst
->ring_enc
[0])
1577 return RREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR
);
1579 return RREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR2
);
1583 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
1585 * @ring: amdgpu_ring pointer
1587 * Returns the current hardware enc write pointer
1589 static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring
*ring
)
1591 struct amdgpu_device
*adev
= ring
->adev
;
1593 if (ring
== &adev
->vcn
.inst
->ring_enc
[0])
1594 return RREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
);
1596 return RREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
);
1600 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
1602 * @ring: amdgpu_ring pointer
1604 * Commits the enc write pointer to the hardware
1606 static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring
*ring
)
1608 struct amdgpu_device
*adev
= ring
->adev
;
1610 if (ring
== &adev
->vcn
.inst
->ring_enc
[0])
1611 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
,
1612 lower_32_bits(ring
->wptr
));
1614 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
,
1615 lower_32_bits(ring
->wptr
));
1619 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
1621 * @ring: amdgpu_ring pointer
1622 * @fence: fence to emit
1624 * Write enc a fence and a trap command to the ring.
1626 static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
,
1627 u64 seq
, unsigned flags
)
1629 WARN_ON(flags
& AMDGPU_FENCE_FLAG_64BIT
);
1631 amdgpu_ring_write(ring
, VCN_ENC_CMD_FENCE
);
1632 amdgpu_ring_write(ring
, addr
);
1633 amdgpu_ring_write(ring
, upper_32_bits(addr
));
1634 amdgpu_ring_write(ring
, seq
);
1635 amdgpu_ring_write(ring
, VCN_ENC_CMD_TRAP
);
1638 static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring
*ring
)
1640 amdgpu_ring_write(ring
, VCN_ENC_CMD_END
);
1644 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
1646 * @ring: amdgpu_ring pointer
1647 * @ib: indirect buffer to execute
1649 * Write enc ring commands to execute the indirect buffer
1651 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring
*ring
,
1652 struct amdgpu_job
*job
,
1653 struct amdgpu_ib
*ib
,
1656 unsigned vmid
= AMDGPU_JOB_GET_VMID(job
);
1658 amdgpu_ring_write(ring
, VCN_ENC_CMD_IB
);
1659 amdgpu_ring_write(ring
, vmid
);
1660 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
));
1661 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
1662 amdgpu_ring_write(ring
, ib
->length_dw
);
1665 static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring
*ring
,
1666 uint32_t reg
, uint32_t val
,
1669 amdgpu_ring_write(ring
, VCN_ENC_CMD_REG_WAIT
);
1670 amdgpu_ring_write(ring
, reg
<< 2);
1671 amdgpu_ring_write(ring
, mask
);
1672 amdgpu_ring_write(ring
, val
);
1675 static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring
*ring
,
1676 unsigned int vmid
, uint64_t pd_addr
)
1678 struct amdgpu_vmhub
*hub
= &ring
->adev
->vmhub
[ring
->funcs
->vmhub
];
1680 pd_addr
= amdgpu_gmc_emit_flush_gpu_tlb(ring
, vmid
, pd_addr
);
1682 /* wait for reg writes */
1683 vcn_v1_0_enc_ring_emit_reg_wait(ring
, hub
->ctx0_ptb_addr_lo32
+ vmid
* 2,
1684 lower_32_bits(pd_addr
), 0xffffffff);
1687 static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring
*ring
,
1688 uint32_t reg
, uint32_t val
)
1690 amdgpu_ring_write(ring
, VCN_ENC_CMD_REG_WRITE
);
1691 amdgpu_ring_write(ring
, reg
<< 2);
1692 amdgpu_ring_write(ring
, val
);
1695 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device
*adev
,
1696 struct amdgpu_irq_src
*source
,
1698 enum amdgpu_interrupt_state state
)
1703 static int vcn_v1_0_process_interrupt(struct amdgpu_device
*adev
,
1704 struct amdgpu_irq_src
*source
,
1705 struct amdgpu_iv_entry
*entry
)
1707 DRM_DEBUG("IH: VCN TRAP\n");
1709 switch (entry
->src_id
) {
1711 amdgpu_fence_process(&adev
->vcn
.inst
->ring_dec
);
1714 amdgpu_fence_process(&adev
->vcn
.inst
->ring_enc
[0]);
1717 amdgpu_fence_process(&adev
->vcn
.inst
->ring_enc
[1]);
1720 DRM_ERROR("Unhandled interrupt: %d %d\n",
1721 entry
->src_id
, entry
->src_data
[0]);
1728 static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring
*ring
, uint32_t count
)
1730 struct amdgpu_device
*adev
= ring
->adev
;
1733 WARN_ON(ring
->wptr
% 2 || count
% 2);
1735 for (i
= 0; i
< count
/ 2; i
++) {
1736 amdgpu_ring_write(ring
, PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_NO_OP
), 0));
1737 amdgpu_ring_write(ring
, 0);
1741 static int vcn_v1_0_set_powergating_state(void *handle
,
1742 enum amd_powergating_state state
)
1744 /* This doesn't actually powergate the VCN block.
1745 * That's done in the dpm code via the SMC. This
1746 * just re-inits the block as necessary. The actual
1747 * gating still happens in the dpm code. We should
1748 * revisit this when there is a cleaner line between
1749 * the smc and the hw blocks
1752 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1754 if(state
== adev
->vcn
.cur_state
)
1757 if (state
== AMD_PG_STATE_GATE
)
1758 ret
= vcn_v1_0_stop(adev
);
1760 ret
= vcn_v1_0_start(adev
);
1763 adev
->vcn
.cur_state
= state
;
1767 static void vcn_v1_0_idle_work_handler(struct work_struct
*work
)
1769 struct amdgpu_device
*adev
=
1770 container_of(work
, struct amdgpu_device
, vcn
.idle_work
.work
);
1771 unsigned int fences
= 0, i
;
1773 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
)
1774 fences
+= amdgpu_fence_count_emitted(&adev
->vcn
.inst
->ring_enc
[i
]);
1776 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
) {
1777 struct dpg_pause_state new_state
;
1780 new_state
.fw_based
= VCN_DPG_STATE__PAUSE
;
1782 new_state
.fw_based
= VCN_DPG_STATE__UNPAUSE
;
1784 if (amdgpu_fence_count_emitted(&adev
->jpeg
.inst
->ring_dec
))
1785 new_state
.jpeg
= VCN_DPG_STATE__PAUSE
;
1787 new_state
.jpeg
= VCN_DPG_STATE__UNPAUSE
;
1789 adev
->vcn
.pause_dpg_mode(adev
, 0, &new_state
);
1792 fences
+= amdgpu_fence_count_emitted(&adev
->jpeg
.inst
->ring_dec
);
1793 fences
+= amdgpu_fence_count_emitted(&adev
->vcn
.inst
->ring_dec
);
1796 amdgpu_gfx_off_ctrl(adev
, true);
1797 if (adev
->pm
.dpm_enabled
)
1798 amdgpu_dpm_enable_uvd(adev
, false);
1800 amdgpu_device_ip_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_VCN
,
1803 schedule_delayed_work(&adev
->vcn
.idle_work
, VCN_IDLE_TIMEOUT
);
1807 void vcn_v1_0_ring_begin_use(struct amdgpu_ring
*ring
)
1809 struct amdgpu_device
*adev
= ring
->adev
;
1810 bool set_clocks
= !cancel_delayed_work_sync(&adev
->vcn
.idle_work
);
1813 amdgpu_gfx_off_ctrl(adev
, false);
1814 if (adev
->pm
.dpm_enabled
)
1815 amdgpu_dpm_enable_uvd(adev
, true);
1817 amdgpu_device_ip_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_VCN
,
1818 AMD_PG_STATE_UNGATE
);
1821 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
) {
1822 struct dpg_pause_state new_state
;
1823 unsigned int fences
= 0, i
;
1825 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
)
1826 fences
+= amdgpu_fence_count_emitted(&adev
->vcn
.inst
->ring_enc
[i
]);
1829 new_state
.fw_based
= VCN_DPG_STATE__PAUSE
;
1831 new_state
.fw_based
= VCN_DPG_STATE__UNPAUSE
;
1833 if (amdgpu_fence_count_emitted(&adev
->jpeg
.inst
->ring_dec
))
1834 new_state
.jpeg
= VCN_DPG_STATE__PAUSE
;
1836 new_state
.jpeg
= VCN_DPG_STATE__UNPAUSE
;
1838 if (ring
->funcs
->type
== AMDGPU_RING_TYPE_VCN_ENC
)
1839 new_state
.fw_based
= VCN_DPG_STATE__PAUSE
;
1840 else if (ring
->funcs
->type
== AMDGPU_RING_TYPE_VCN_JPEG
)
1841 new_state
.jpeg
= VCN_DPG_STATE__PAUSE
;
1843 adev
->vcn
.pause_dpg_mode(adev
, 0, &new_state
);
1847 static const struct amd_ip_funcs vcn_v1_0_ip_funcs
= {
1849 .early_init
= vcn_v1_0_early_init
,
1851 .sw_init
= vcn_v1_0_sw_init
,
1852 .sw_fini
= vcn_v1_0_sw_fini
,
1853 .hw_init
= vcn_v1_0_hw_init
,
1854 .hw_fini
= vcn_v1_0_hw_fini
,
1855 .suspend
= vcn_v1_0_suspend
,
1856 .resume
= vcn_v1_0_resume
,
1857 .is_idle
= vcn_v1_0_is_idle
,
1858 .wait_for_idle
= vcn_v1_0_wait_for_idle
,
1859 .check_soft_reset
= NULL
/* vcn_v1_0_check_soft_reset */,
1860 .pre_soft_reset
= NULL
/* vcn_v1_0_pre_soft_reset */,
1861 .soft_reset
= NULL
/* vcn_v1_0_soft_reset */,
1862 .post_soft_reset
= NULL
/* vcn_v1_0_post_soft_reset */,
1863 .set_clockgating_state
= vcn_v1_0_set_clockgating_state
,
1864 .set_powergating_state
= vcn_v1_0_set_powergating_state
,
1867 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs
= {
1868 .type
= AMDGPU_RING_TYPE_VCN_DEC
,
1870 .support_64bit_ptrs
= false,
1871 .no_user_fence
= true,
1872 .vmhub
= AMDGPU_MMHUB_0
,
1873 .get_rptr
= vcn_v1_0_dec_ring_get_rptr
,
1874 .get_wptr
= vcn_v1_0_dec_ring_get_wptr
,
1875 .set_wptr
= vcn_v1_0_dec_ring_set_wptr
,
1877 6 + 6 + /* hdp invalidate / flush */
1878 SOC15_FLUSH_GPU_TLB_NUM_WREG
* 6 +
1879 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT
* 8 +
1880 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
1881 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
1883 .emit_ib_size
= 8, /* vcn_v1_0_dec_ring_emit_ib */
1884 .emit_ib
= vcn_v1_0_dec_ring_emit_ib
,
1885 .emit_fence
= vcn_v1_0_dec_ring_emit_fence
,
1886 .emit_vm_flush
= vcn_v1_0_dec_ring_emit_vm_flush
,
1887 .test_ring
= amdgpu_vcn_dec_ring_test_ring
,
1888 .test_ib
= amdgpu_vcn_dec_ring_test_ib
,
1889 .insert_nop
= vcn_v1_0_dec_ring_insert_nop
,
1890 .insert_start
= vcn_v1_0_dec_ring_insert_start
,
1891 .insert_end
= vcn_v1_0_dec_ring_insert_end
,
1892 .pad_ib
= amdgpu_ring_generic_pad_ib
,
1893 .begin_use
= vcn_v1_0_ring_begin_use
,
1894 .end_use
= amdgpu_vcn_ring_end_use
,
1895 .emit_wreg
= vcn_v1_0_dec_ring_emit_wreg
,
1896 .emit_reg_wait
= vcn_v1_0_dec_ring_emit_reg_wait
,
1897 .emit_reg_write_reg_wait
= amdgpu_ring_emit_reg_write_reg_wait_helper
,
1900 static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs
= {
1901 .type
= AMDGPU_RING_TYPE_VCN_ENC
,
1903 .nop
= VCN_ENC_CMD_NO_OP
,
1904 .support_64bit_ptrs
= false,
1905 .no_user_fence
= true,
1906 .vmhub
= AMDGPU_MMHUB_0
,
1907 .get_rptr
= vcn_v1_0_enc_ring_get_rptr
,
1908 .get_wptr
= vcn_v1_0_enc_ring_get_wptr
,
1909 .set_wptr
= vcn_v1_0_enc_ring_set_wptr
,
1911 SOC15_FLUSH_GPU_TLB_NUM_WREG
* 3 +
1912 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT
* 4 +
1913 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
1914 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
1915 1, /* vcn_v1_0_enc_ring_insert_end */
1916 .emit_ib_size
= 5, /* vcn_v1_0_enc_ring_emit_ib */
1917 .emit_ib
= vcn_v1_0_enc_ring_emit_ib
,
1918 .emit_fence
= vcn_v1_0_enc_ring_emit_fence
,
1919 .emit_vm_flush
= vcn_v1_0_enc_ring_emit_vm_flush
,
1920 .test_ring
= amdgpu_vcn_enc_ring_test_ring
,
1921 .test_ib
= amdgpu_vcn_enc_ring_test_ib
,
1922 .insert_nop
= amdgpu_ring_insert_nop
,
1923 .insert_end
= vcn_v1_0_enc_ring_insert_end
,
1924 .pad_ib
= amdgpu_ring_generic_pad_ib
,
1925 .begin_use
= vcn_v1_0_ring_begin_use
,
1926 .end_use
= amdgpu_vcn_ring_end_use
,
1927 .emit_wreg
= vcn_v1_0_enc_ring_emit_wreg
,
1928 .emit_reg_wait
= vcn_v1_0_enc_ring_emit_reg_wait
,
1929 .emit_reg_write_reg_wait
= amdgpu_ring_emit_reg_write_reg_wait_helper
,
1932 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device
*adev
)
1934 adev
->vcn
.inst
->ring_dec
.funcs
= &vcn_v1_0_dec_ring_vm_funcs
;
1935 DRM_INFO("VCN decode is enabled in VM mode\n");
1938 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device
*adev
)
1942 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
)
1943 adev
->vcn
.inst
->ring_enc
[i
].funcs
= &vcn_v1_0_enc_ring_vm_funcs
;
1945 DRM_INFO("VCN encode is enabled in VM mode\n");
1948 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs
= {
1949 .set
= vcn_v1_0_set_interrupt_state
,
1950 .process
= vcn_v1_0_process_interrupt
,
1953 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device
*adev
)
1955 adev
->vcn
.inst
->irq
.num_types
= adev
->vcn
.num_enc_rings
+ 2;
1956 adev
->vcn
.inst
->irq
.funcs
= &vcn_v1_0_irq_funcs
;
1959 const struct amdgpu_ip_block_version vcn_v1_0_ip_block
=
1961 .type
= AMD_IP_BLOCK_TYPE_VCN
,
1965 .funcs
= &vcn_v1_0_ip_funcs
,