2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_vcn.h"
30 #include "amdgpu_pm.h"
31 #include "amdgpu_psp.h"
33 #include "vcn/vcn_2_0_0_offset.h"
34 #include "vcn/vcn_2_0_0_sh_mask.h"
35 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
37 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x1fd
38 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x503
39 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x504
40 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x505
41 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x53f
42 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x54a
43 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
45 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x1e1
46 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x5a6
47 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7
48 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2
50 static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device
*adev
);
51 static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device
*adev
);
52 static void vcn_v2_0_set_irq_funcs(struct amdgpu_device
*adev
);
53 static int vcn_v2_0_set_powergating_state(void *handle
,
54 enum amd_powergating_state state
);
55 static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device
*adev
,
56 int inst_idx
, struct dpg_pause_state
*new_state
);
59 * vcn_v2_0_early_init - set function pointers
61 * @handle: amdgpu_device pointer
63 * Set ring and irq function pointers
65 static int vcn_v2_0_early_init(void *handle
)
67 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
69 adev
->vcn
.num_vcn_inst
= 1;
70 adev
->vcn
.num_enc_rings
= 2;
72 vcn_v2_0_set_dec_ring_funcs(adev
);
73 vcn_v2_0_set_enc_ring_funcs(adev
);
74 vcn_v2_0_set_irq_funcs(adev
);
80 * vcn_v2_0_sw_init - sw init for VCN block
82 * @handle: amdgpu_device pointer
84 * Load firmware and sw initialization
86 static int vcn_v2_0_sw_init(void *handle
)
88 struct amdgpu_ring
*ring
;
90 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
93 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_VCN
,
94 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT
,
95 &adev
->vcn
.inst
->irq
);
100 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
) {
101 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_VCN
,
102 i
+ VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE
,
103 &adev
->vcn
.inst
->irq
);
108 r
= amdgpu_vcn_sw_init(adev
);
112 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) {
113 const struct common_firmware_header
*hdr
;
114 hdr
= (const struct common_firmware_header
*)adev
->vcn
.fw
->data
;
115 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_VCN
].ucode_id
= AMDGPU_UCODE_ID_VCN
;
116 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_VCN
].fw
= adev
->vcn
.fw
;
117 adev
->firmware
.fw_size
+=
118 ALIGN(le32_to_cpu(hdr
->ucode_size_bytes
), PAGE_SIZE
);
119 DRM_INFO("PSP loading VCN firmware\n");
122 r
= amdgpu_vcn_resume(adev
);
126 ring
= &adev
->vcn
.inst
->ring_dec
;
128 ring
->use_doorbell
= true;
129 ring
->doorbell_index
= adev
->doorbell_index
.vcn
.vcn_ring0_1
<< 1;
131 sprintf(ring
->name
, "vcn_dec");
132 r
= amdgpu_ring_init(adev
, ring
, 512, &adev
->vcn
.inst
->irq
, 0);
136 adev
->vcn
.internal
.context_id
= mmUVD_CONTEXT_ID_INTERNAL_OFFSET
;
137 adev
->vcn
.internal
.ib_vmid
= mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET
;
138 adev
->vcn
.internal
.ib_bar_low
= mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET
;
139 adev
->vcn
.internal
.ib_bar_high
= mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET
;
140 adev
->vcn
.internal
.ib_size
= mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET
;
141 adev
->vcn
.internal
.gp_scratch8
= mmUVD_GP_SCRATCH8_INTERNAL_OFFSET
;
143 adev
->vcn
.internal
.scratch9
= mmUVD_SCRATCH9_INTERNAL_OFFSET
;
144 adev
->vcn
.inst
->external
.scratch9
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_SCRATCH9
);
145 adev
->vcn
.internal
.data0
= mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET
;
146 adev
->vcn
.inst
->external
.data0
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
);
147 adev
->vcn
.internal
.data1
= mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET
;
148 adev
->vcn
.inst
->external
.data1
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA1
);
149 adev
->vcn
.internal
.cmd
= mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET
;
150 adev
->vcn
.inst
->external
.cmd
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
);
151 adev
->vcn
.internal
.nop
= mmUVD_NO_OP_INTERNAL_OFFSET
;
152 adev
->vcn
.inst
->external
.nop
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_NO_OP
);
154 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
) {
155 ring
= &adev
->vcn
.inst
->ring_enc
[i
];
156 ring
->use_doorbell
= true;
157 ring
->doorbell_index
= (adev
->doorbell_index
.vcn
.vcn_ring0_1
<< 1) + 2 + i
;
158 sprintf(ring
->name
, "vcn_enc%d", i
);
159 r
= amdgpu_ring_init(adev
, ring
, 512, &adev
->vcn
.inst
->irq
, 0);
164 adev
->vcn
.pause_dpg_mode
= vcn_v2_0_pause_dpg_mode
;
170 * vcn_v2_0_sw_fini - sw fini for VCN block
172 * @handle: amdgpu_device pointer
174 * VCN suspend and free up sw allocation
176 static int vcn_v2_0_sw_fini(void *handle
)
179 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
181 r
= amdgpu_vcn_suspend(adev
);
185 r
= amdgpu_vcn_sw_fini(adev
);
191 * vcn_v2_0_hw_init - start and test VCN block
193 * @handle: amdgpu_device pointer
195 * Initialize the hardware, boot up the VCPU and do some testing
197 static int vcn_v2_0_hw_init(void *handle
)
199 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
200 struct amdgpu_ring
*ring
= &adev
->vcn
.inst
->ring_dec
;
203 adev
->nbio
.funcs
->vcn_doorbell_range(adev
, ring
->use_doorbell
,
204 ring
->doorbell_index
, 0);
206 r
= amdgpu_ring_test_helper(ring
);
210 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
) {
211 ring
= &adev
->vcn
.inst
->ring_enc
[i
];
212 r
= amdgpu_ring_test_helper(ring
);
219 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
220 (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
)?"DPG Mode":"SPG Mode");
226 * vcn_v2_0_hw_fini - stop the hardware block
228 * @handle: amdgpu_device pointer
230 * Stop the VCN block, mark ring as not ready any more
232 static int vcn_v2_0_hw_fini(void *handle
)
234 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
235 struct amdgpu_ring
*ring
= &adev
->vcn
.inst
->ring_dec
;
238 if ((adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
) ||
239 (adev
->vcn
.cur_state
!= AMD_PG_STATE_GATE
&&
240 RREG32_SOC15(VCN
, 0, mmUVD_STATUS
)))
241 vcn_v2_0_set_powergating_state(adev
, AMD_PG_STATE_GATE
);
243 ring
->sched
.ready
= false;
245 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
) {
246 ring
= &adev
->vcn
.inst
->ring_enc
[i
];
247 ring
->sched
.ready
= false;
254 * vcn_v2_0_suspend - suspend VCN block
256 * @handle: amdgpu_device pointer
258 * HW fini and suspend VCN block
260 static int vcn_v2_0_suspend(void *handle
)
263 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
265 r
= vcn_v2_0_hw_fini(adev
);
269 r
= amdgpu_vcn_suspend(adev
);
275 * vcn_v2_0_resume - resume VCN block
277 * @handle: amdgpu_device pointer
279 * Resume firmware and hw init VCN block
281 static int vcn_v2_0_resume(void *handle
)
284 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
286 r
= amdgpu_vcn_resume(adev
);
290 r
= vcn_v2_0_hw_init(adev
);
296 * vcn_v2_0_mc_resume - memory controller programming
298 * @adev: amdgpu_device pointer
300 * Let the VCN memory controller know it's offsets
302 static void vcn_v2_0_mc_resume(struct amdgpu_device
*adev
)
304 uint32_t size
= AMDGPU_GPU_PAGE_ALIGN(adev
->vcn
.fw
->size
+ 4);
307 /* cache window 0: fw */
308 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) {
309 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
,
310 (adev
->firmware
.ucode
[AMDGPU_UCODE_ID_VCN
].tmr_mc_addr_lo
));
311 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
,
312 (adev
->firmware
.ucode
[AMDGPU_UCODE_ID_VCN
].tmr_mc_addr_hi
));
313 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET0
, 0);
316 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
,
317 lower_32_bits(adev
->vcn
.inst
->gpu_addr
));
318 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
,
319 upper_32_bits(adev
->vcn
.inst
->gpu_addr
));
321 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET0
,
322 AMDGPU_UVD_FIRMWARE_OFFSET
>> 3);
325 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_SIZE0
, size
);
327 /* cache window 1: stack */
328 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW
,
329 lower_32_bits(adev
->vcn
.inst
->gpu_addr
+ offset
));
330 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH
,
331 upper_32_bits(adev
->vcn
.inst
->gpu_addr
+ offset
));
332 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET1
, 0);
333 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_SIZE1
, AMDGPU_VCN_STACK_SIZE
);
335 /* cache window 2: context */
336 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW
,
337 lower_32_bits(adev
->vcn
.inst
->gpu_addr
+ offset
+ AMDGPU_VCN_STACK_SIZE
));
338 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH
,
339 upper_32_bits(adev
->vcn
.inst
->gpu_addr
+ offset
+ AMDGPU_VCN_STACK_SIZE
));
340 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET2
, 0);
341 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_SIZE2
, AMDGPU_VCN_CONTEXT_SIZE
);
343 WREG32_SOC15(UVD
, 0, mmUVD_GFX10_ADDR_CONFIG
, adev
->gfx
.config
.gb_addr_config
);
346 static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device
*adev
, bool indirect
)
348 uint32_t size
= AMDGPU_GPU_PAGE_ALIGN(adev
->vcn
.fw
->size
+ 4);
351 /* cache window 0: fw */
352 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) {
354 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
355 UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
),
356 (adev
->firmware
.ucode
[AMDGPU_UCODE_ID_VCN
].tmr_mc_addr_lo
), 0, indirect
);
357 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
358 UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
),
359 (adev
->firmware
.ucode
[AMDGPU_UCODE_ID_VCN
].tmr_mc_addr_hi
), 0, indirect
);
360 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
361 UVD
, 0, mmUVD_VCPU_CACHE_OFFSET0
), 0, 0, indirect
);
363 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
364 UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
), 0, 0, indirect
);
365 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
366 UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
), 0, 0, indirect
);
367 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
368 UVD
, 0, mmUVD_VCPU_CACHE_OFFSET0
), 0, 0, indirect
);
372 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
373 UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
),
374 lower_32_bits(adev
->vcn
.inst
->gpu_addr
), 0, indirect
);
375 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
376 UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
),
377 upper_32_bits(adev
->vcn
.inst
->gpu_addr
), 0, indirect
);
379 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
380 UVD
, 0, mmUVD_VCPU_CACHE_OFFSET0
),
381 AMDGPU_UVD_FIRMWARE_OFFSET
>> 3, 0, indirect
);
385 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
386 UVD
, 0, mmUVD_VCPU_CACHE_SIZE0
), size
, 0, indirect
);
388 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
389 UVD
, 0, mmUVD_VCPU_CACHE_SIZE0
), 0, 0, indirect
);
391 /* cache window 1: stack */
393 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
394 UVD
, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW
),
395 lower_32_bits(adev
->vcn
.inst
->gpu_addr
+ offset
), 0, indirect
);
396 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
397 UVD
, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH
),
398 upper_32_bits(adev
->vcn
.inst
->gpu_addr
+ offset
), 0, indirect
);
399 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
400 UVD
, 0, mmUVD_VCPU_CACHE_OFFSET1
), 0, 0, indirect
);
402 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
403 UVD
, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW
), 0, 0, indirect
);
404 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
405 UVD
, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH
), 0, 0, indirect
);
406 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
407 UVD
, 0, mmUVD_VCPU_CACHE_OFFSET1
), 0, 0, indirect
);
409 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
410 UVD
, 0, mmUVD_VCPU_CACHE_SIZE1
), AMDGPU_VCN_STACK_SIZE
, 0, indirect
);
412 /* cache window 2: context */
413 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
414 UVD
, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW
),
415 lower_32_bits(adev
->vcn
.inst
->gpu_addr
+ offset
+ AMDGPU_VCN_STACK_SIZE
), 0, indirect
);
416 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
417 UVD
, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH
),
418 upper_32_bits(adev
->vcn
.inst
->gpu_addr
+ offset
+ AMDGPU_VCN_STACK_SIZE
), 0, indirect
);
419 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
420 UVD
, 0, mmUVD_VCPU_CACHE_OFFSET2
), 0, 0, indirect
);
421 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
422 UVD
, 0, mmUVD_VCPU_CACHE_SIZE2
), AMDGPU_VCN_CONTEXT_SIZE
, 0, indirect
);
424 /* non-cache window */
425 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
426 UVD
, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW
), 0, 0, indirect
);
427 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
428 UVD
, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH
), 0, 0, indirect
);
429 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
430 UVD
, 0, mmUVD_VCPU_NONCACHE_OFFSET0
), 0, 0, indirect
);
431 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
432 UVD
, 0, mmUVD_VCPU_NONCACHE_SIZE0
), 0, 0, indirect
);
434 /* VCN global tiling registers */
435 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
436 UVD
, 0, mmUVD_GFX10_ADDR_CONFIG
), adev
->gfx
.config
.gb_addr_config
, 0, indirect
);
440 * vcn_v2_0_disable_clock_gating - disable VCN clock gating
442 * @adev: amdgpu_device pointer
443 * @sw: enable SW clock gating
445 * Disable clock gating for VCN block
447 static void vcn_v2_0_disable_clock_gating(struct amdgpu_device
*adev
)
451 /* UVD disable CGC */
452 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
453 if (adev
->cg_flags
& AMD_CG_SUPPORT_VCN_MGCG
)
454 data
|= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
456 data
&= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK
;
457 data
|= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
458 data
|= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
459 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
461 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_GATE
);
462 data
&= ~(UVD_CGC_GATE__SYS_MASK
463 | UVD_CGC_GATE__UDEC_MASK
464 | UVD_CGC_GATE__MPEG2_MASK
465 | UVD_CGC_GATE__REGS_MASK
466 | UVD_CGC_GATE__RBC_MASK
467 | UVD_CGC_GATE__LMI_MC_MASK
468 | UVD_CGC_GATE__LMI_UMC_MASK
469 | UVD_CGC_GATE__IDCT_MASK
470 | UVD_CGC_GATE__MPRD_MASK
471 | UVD_CGC_GATE__MPC_MASK
472 | UVD_CGC_GATE__LBSI_MASK
473 | UVD_CGC_GATE__LRBBM_MASK
474 | UVD_CGC_GATE__UDEC_RE_MASK
475 | UVD_CGC_GATE__UDEC_CM_MASK
476 | UVD_CGC_GATE__UDEC_IT_MASK
477 | UVD_CGC_GATE__UDEC_DB_MASK
478 | UVD_CGC_GATE__UDEC_MP_MASK
479 | UVD_CGC_GATE__WCB_MASK
480 | UVD_CGC_GATE__VCPU_MASK
481 | UVD_CGC_GATE__SCPU_MASK
);
482 WREG32_SOC15(VCN
, 0, mmUVD_CGC_GATE
, data
);
484 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
485 data
&= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
486 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
487 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
488 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
489 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
490 | UVD_CGC_CTRL__SYS_MODE_MASK
491 | UVD_CGC_CTRL__UDEC_MODE_MASK
492 | UVD_CGC_CTRL__MPEG2_MODE_MASK
493 | UVD_CGC_CTRL__REGS_MODE_MASK
494 | UVD_CGC_CTRL__RBC_MODE_MASK
495 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
496 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
497 | UVD_CGC_CTRL__IDCT_MODE_MASK
498 | UVD_CGC_CTRL__MPRD_MODE_MASK
499 | UVD_CGC_CTRL__MPC_MODE_MASK
500 | UVD_CGC_CTRL__LBSI_MODE_MASK
501 | UVD_CGC_CTRL__LRBBM_MODE_MASK
502 | UVD_CGC_CTRL__WCB_MODE_MASK
503 | UVD_CGC_CTRL__VCPU_MODE_MASK
504 | UVD_CGC_CTRL__SCPU_MODE_MASK
);
505 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
508 data
= RREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_GATE
);
509 data
|= (UVD_SUVD_CGC_GATE__SRE_MASK
510 | UVD_SUVD_CGC_GATE__SIT_MASK
511 | UVD_SUVD_CGC_GATE__SMP_MASK
512 | UVD_SUVD_CGC_GATE__SCM_MASK
513 | UVD_SUVD_CGC_GATE__SDB_MASK
514 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
515 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
516 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
517 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
518 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
519 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
520 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
521 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
522 | UVD_SUVD_CGC_GATE__SCLR_MASK
523 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
524 | UVD_SUVD_CGC_GATE__ENT_MASK
525 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
526 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
527 | UVD_SUVD_CGC_GATE__SITE_MASK
528 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
529 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
530 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
531 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
532 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK
);
533 WREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_GATE
, data
);
535 data
= RREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
);
536 data
&= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
537 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
538 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
539 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
540 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
541 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
542 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
543 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
544 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
545 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
);
546 WREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
, data
);
549 static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device
*adev
,
550 uint8_t sram_sel
, uint8_t indirect
)
552 uint32_t reg_data
= 0;
554 /* enable sw clock gating control */
555 if (adev
->cg_flags
& AMD_CG_SUPPORT_VCN_MGCG
)
556 reg_data
= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
558 reg_data
= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
559 reg_data
|= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
560 reg_data
|= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
561 reg_data
&= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
|
562 UVD_CGC_CTRL__UDEC_CM_MODE_MASK
|
563 UVD_CGC_CTRL__UDEC_IT_MODE_MASK
|
564 UVD_CGC_CTRL__UDEC_DB_MODE_MASK
|
565 UVD_CGC_CTRL__UDEC_MP_MODE_MASK
|
566 UVD_CGC_CTRL__SYS_MODE_MASK
|
567 UVD_CGC_CTRL__UDEC_MODE_MASK
|
568 UVD_CGC_CTRL__MPEG2_MODE_MASK
|
569 UVD_CGC_CTRL__REGS_MODE_MASK
|
570 UVD_CGC_CTRL__RBC_MODE_MASK
|
571 UVD_CGC_CTRL__LMI_MC_MODE_MASK
|
572 UVD_CGC_CTRL__LMI_UMC_MODE_MASK
|
573 UVD_CGC_CTRL__IDCT_MODE_MASK
|
574 UVD_CGC_CTRL__MPRD_MODE_MASK
|
575 UVD_CGC_CTRL__MPC_MODE_MASK
|
576 UVD_CGC_CTRL__LBSI_MODE_MASK
|
577 UVD_CGC_CTRL__LRBBM_MODE_MASK
|
578 UVD_CGC_CTRL__WCB_MODE_MASK
|
579 UVD_CGC_CTRL__VCPU_MODE_MASK
|
580 UVD_CGC_CTRL__SCPU_MODE_MASK
);
581 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
582 UVD
, 0, mmUVD_CGC_CTRL
), reg_data
, sram_sel
, indirect
);
584 /* turn off clock gating */
585 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
586 UVD
, 0, mmUVD_CGC_GATE
), 0, sram_sel
, indirect
);
588 /* turn on SUVD clock gating */
589 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
590 UVD
, 0, mmUVD_SUVD_CGC_GATE
), 1, sram_sel
, indirect
);
592 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
593 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
594 UVD
, 0, mmUVD_SUVD_CGC_CTRL
), 0, sram_sel
, indirect
);
598 * vcn_v2_0_enable_clock_gating - enable VCN clock gating
600 * @adev: amdgpu_device pointer
601 * @sw: enable SW clock gating
603 * Enable clock gating for VCN block
605 static void vcn_v2_0_enable_clock_gating(struct amdgpu_device
*adev
)
610 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
611 if (adev
->cg_flags
& AMD_CG_SUPPORT_VCN_MGCG
)
612 data
|= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
614 data
|= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
615 data
|= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
616 data
|= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
617 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
619 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
620 data
|= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
621 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
622 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
623 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
624 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
625 | UVD_CGC_CTRL__SYS_MODE_MASK
626 | UVD_CGC_CTRL__UDEC_MODE_MASK
627 | UVD_CGC_CTRL__MPEG2_MODE_MASK
628 | UVD_CGC_CTRL__REGS_MODE_MASK
629 | UVD_CGC_CTRL__RBC_MODE_MASK
630 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
631 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
632 | UVD_CGC_CTRL__IDCT_MODE_MASK
633 | UVD_CGC_CTRL__MPRD_MODE_MASK
634 | UVD_CGC_CTRL__MPC_MODE_MASK
635 | UVD_CGC_CTRL__LBSI_MODE_MASK
636 | UVD_CGC_CTRL__LRBBM_MODE_MASK
637 | UVD_CGC_CTRL__WCB_MODE_MASK
638 | UVD_CGC_CTRL__VCPU_MODE_MASK
639 | UVD_CGC_CTRL__SCPU_MODE_MASK
);
640 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
642 data
= RREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
);
643 data
|= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
644 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
645 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
646 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
647 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
648 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
649 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
650 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
651 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
652 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
);
653 WREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
, data
);
656 static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device
*adev
)
661 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN
) {
662 data
= (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
663 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
664 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
665 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
666 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
667 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
668 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
669 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
670 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
671 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
);
673 WREG32_SOC15(VCN
, 0, mmUVD_PGFSM_CONFIG
, data
);
674 SOC15_WAIT_ON_RREG(VCN
, 0, mmUVD_PGFSM_STATUS
,
675 UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0
, 0xFFFFF, ret
);
677 data
= (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
678 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
679 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
680 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
681 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
682 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
683 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
684 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
685 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
686 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
);
687 WREG32_SOC15(VCN
, 0, mmUVD_PGFSM_CONFIG
, data
);
688 SOC15_WAIT_ON_RREG(VCN
, 0, mmUVD_PGFSM_STATUS
, 0, 0xFFFFF, ret
);
691 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS,
692 * UVDU_PWR_STATUS are 0 (power on) */
694 data
= RREG32_SOC15(VCN
, 0, mmUVD_POWER_STATUS
);
696 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN
)
697 data
|= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON
|
698 UVD_POWER_STATUS__UVD_PG_EN_MASK
;
700 WREG32_SOC15(VCN
, 0, mmUVD_POWER_STATUS
, data
);
703 static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device
*adev
)
708 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN
) {
709 /* Before power off, this indicator has to be turned on */
710 data
= RREG32_SOC15(VCN
, 0, mmUVD_POWER_STATUS
);
711 data
&= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK
;
712 data
|= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
;
713 WREG32_SOC15(VCN
, 0, mmUVD_POWER_STATUS
, data
);
716 data
= (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
717 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
718 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
719 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
720 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
721 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
722 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
723 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
724 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
725 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
);
727 WREG32_SOC15(VCN
, 0, mmUVD_PGFSM_CONFIG
, data
);
729 data
= (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
730 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
731 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
732 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
733 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
734 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
735 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
736 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
737 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
738 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
);
739 SOC15_WAIT_ON_RREG(VCN
, 0, mmUVD_PGFSM_STATUS
, data
, 0xFFFFF, ret
);
743 static int vcn_v2_0_start_dpg_mode(struct amdgpu_device
*adev
, bool indirect
)
745 struct amdgpu_ring
*ring
= &adev
->vcn
.inst
->ring_dec
;
746 uint32_t rb_bufsz
, tmp
;
748 vcn_v2_0_enable_static_power_gating(adev
);
750 /* enable dynamic power gating mode */
751 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_POWER_STATUS
);
752 tmp
|= UVD_POWER_STATUS__UVD_PG_MODE_MASK
;
753 tmp
|= UVD_POWER_STATUS__UVD_PG_EN_MASK
;
754 WREG32_SOC15(UVD
, 0, mmUVD_POWER_STATUS
, tmp
);
757 adev
->vcn
.inst
->dpg_sram_curr_addr
= (uint32_t*)adev
->vcn
.inst
->dpg_sram_cpu_addr
;
759 /* enable clock gating */
760 vcn_v2_0_clock_gating_dpg_mode(adev
, 0, indirect
);
762 /* enable VCPU clock */
763 tmp
= (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT
);
764 tmp
|= UVD_VCPU_CNTL__CLK_EN_MASK
;
765 tmp
|= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK
;
766 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
767 UVD
, 0, mmUVD_VCPU_CNTL
), tmp
, 0, indirect
);
769 /* disable master interupt */
770 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
771 UVD
, 0, mmUVD_MASTINT_EN
), 0, 0, indirect
);
773 /* setup mmUVD_LMI_CTRL */
774 tmp
= (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK
|
775 UVD_LMI_CTRL__REQ_MODE_MASK
|
776 UVD_LMI_CTRL__CRC_RESET_MASK
|
777 UVD_LMI_CTRL__MASK_MC_URGENT_MASK
|
778 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK
|
779 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK
|
780 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT
) |
782 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
783 UVD
, 0, mmUVD_LMI_CTRL
), tmp
, 0, indirect
);
785 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
786 UVD
, 0, mmUVD_MPC_CNTL
),
787 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT
, 0, indirect
);
789 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
790 UVD
, 0, mmUVD_MPC_SET_MUXA0
),
791 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT
) |
792 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT
) |
793 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT
) |
794 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT
)), 0, indirect
);
796 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
797 UVD
, 0, mmUVD_MPC_SET_MUXB0
),
798 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT
) |
799 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT
) |
800 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT
) |
801 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT
)), 0, indirect
);
803 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
804 UVD
, 0, mmUVD_MPC_SET_MUX
),
805 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT
) |
806 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT
) |
807 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT
)), 0, indirect
);
809 vcn_v2_0_mc_resume_dpg_mode(adev
, indirect
);
811 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
812 UVD
, 0, mmUVD_REG_XX_MASK
), 0x10, 0, indirect
);
813 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
814 UVD
, 0, mmUVD_RBC_XX_IB_REG_CHECK
), 0x3, 0, indirect
);
816 /* release VCPU reset to boot */
817 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
818 UVD
, 0, mmUVD_SOFT_RESET
), 0, 0, indirect
);
820 /* enable LMI MC and UMC channels */
821 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
822 UVD
, 0, mmUVD_LMI_CTRL2
),
823 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT
, 0, indirect
);
825 /* enable master interrupt */
826 WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
827 UVD
, 0, mmUVD_MASTINT_EN
),
828 UVD_MASTINT_EN__VCPU_EN_MASK
, 0, indirect
);
831 psp_update_vcn_sram(adev
, 0, adev
->vcn
.inst
->dpg_sram_gpu_addr
,
832 (uint32_t)((uintptr_t)adev
->vcn
.inst
->dpg_sram_curr_addr
-
833 (uintptr_t)adev
->vcn
.inst
->dpg_sram_cpu_addr
));
835 /* force RBC into idle state */
836 rb_bufsz
= order_base_2(ring
->ring_size
);
837 tmp
= REG_SET_FIELD(0, UVD_RBC_RB_CNTL
, RB_BUFSZ
, rb_bufsz
);
838 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_BLKSZ
, 1);
839 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_FETCH
, 1);
840 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_UPDATE
, 1);
841 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_RPTR_WR_EN
, 1);
842 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_CNTL
, tmp
);
844 /* set the write pointer delay */
845 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR_CNTL
, 0);
847 /* set the wb address */
848 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR_ADDR
,
849 (upper_32_bits(ring
->gpu_addr
) >> 2));
851 /* programm the RB_BASE for ring buffer */
852 WREG32_SOC15(UVD
, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW
,
853 lower_32_bits(ring
->gpu_addr
));
854 WREG32_SOC15(UVD
, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH
,
855 upper_32_bits(ring
->gpu_addr
));
857 /* Initialize the ring buffer's read and write pointers */
858 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
, 0);
860 WREG32_SOC15(UVD
, 0, mmUVD_SCRATCH2
, 0);
862 ring
->wptr
= RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
);
863 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
,
864 lower_32_bits(ring
->wptr
));
869 static int vcn_v2_0_start(struct amdgpu_device
*adev
)
871 struct amdgpu_ring
*ring
= &adev
->vcn
.inst
->ring_dec
;
872 uint32_t rb_bufsz
, tmp
;
873 uint32_t lmi_swap_cntl
;
876 if (adev
->pm
.dpm_enabled
)
877 amdgpu_dpm_enable_uvd(adev
, true);
879 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
)
880 return vcn_v2_0_start_dpg_mode(adev
, adev
->vcn
.indirect_sram
);
882 vcn_v2_0_disable_static_power_gating(adev
);
884 /* set uvd status busy */
885 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_STATUS
) | UVD_STATUS__UVD_BUSY
;
886 WREG32_SOC15(UVD
, 0, mmUVD_STATUS
, tmp
);
889 vcn_v2_0_disable_clock_gating(adev
);
891 /* enable VCPU clock */
892 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_VCPU_CNTL
),
893 UVD_VCPU_CNTL__CLK_EN_MASK
, ~UVD_VCPU_CNTL__CLK_EN_MASK
);
895 /* disable master interrupt */
896 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_MASTINT_EN
), 0,
897 ~UVD_MASTINT_EN__VCPU_EN_MASK
);
899 /* setup mmUVD_LMI_CTRL */
900 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_LMI_CTRL
);
901 WREG32_SOC15(UVD
, 0, mmUVD_LMI_CTRL
, tmp
|
902 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK
|
903 UVD_LMI_CTRL__MASK_MC_URGENT_MASK
|
904 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK
|
905 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK
);
907 /* setup mmUVD_MPC_CNTL */
908 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_MPC_CNTL
);
909 tmp
&= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK
;
910 tmp
|= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT
;
911 WREG32_SOC15(VCN
, 0, mmUVD_MPC_CNTL
, tmp
);
913 /* setup UVD_MPC_SET_MUXA0 */
914 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_MUXA0
,
915 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT
) |
916 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT
) |
917 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT
) |
918 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT
)));
920 /* setup UVD_MPC_SET_MUXB0 */
921 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_MUXB0
,
922 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT
) |
923 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT
) |
924 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT
) |
925 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT
)));
927 /* setup mmUVD_MPC_SET_MUX */
928 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_MUX
,
929 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT
) |
930 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT
) |
931 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT
)));
933 vcn_v2_0_mc_resume(adev
);
935 /* release VCPU reset to boot */
936 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
), 0,
937 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
939 /* enable LMI MC and UMC channels */
940 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_CTRL2
), 0,
941 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK
);
943 tmp
= RREG32_SOC15(VCN
, 0, mmUVD_SOFT_RESET
);
944 tmp
&= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK
;
945 tmp
&= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK
;
946 WREG32_SOC15(VCN
, 0, mmUVD_SOFT_RESET
, tmp
);
948 /* disable byte swapping */
951 /* swap (8 in 32) RB and IB */
954 WREG32_SOC15(UVD
, 0, mmUVD_LMI_SWAP_CNTL
, lmi_swap_cntl
);
956 for (i
= 0; i
< 10; ++i
) {
959 for (j
= 0; j
< 100; ++j
) {
960 status
= RREG32_SOC15(UVD
, 0, mmUVD_STATUS
);
969 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
970 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
),
971 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
,
972 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
974 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
), 0,
975 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
981 DRM_ERROR("VCN decode not responding, giving up!!!\n");
985 /* enable master interrupt */
986 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_MASTINT_EN
),
987 UVD_MASTINT_EN__VCPU_EN_MASK
,
988 ~UVD_MASTINT_EN__VCPU_EN_MASK
);
990 /* clear the busy bit of VCN_STATUS */
991 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_STATUS
), 0,
992 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT
));
994 WREG32_SOC15(UVD
, 0, mmUVD_LMI_RBC_RB_VMID
, 0);
996 /* force RBC into idle state */
997 rb_bufsz
= order_base_2(ring
->ring_size
);
998 tmp
= REG_SET_FIELD(0, UVD_RBC_RB_CNTL
, RB_BUFSZ
, rb_bufsz
);
999 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_BLKSZ
, 1);
1000 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_FETCH
, 1);
1001 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_UPDATE
, 1);
1002 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_RPTR_WR_EN
, 1);
1003 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_CNTL
, tmp
);
1005 /* programm the RB_BASE for ring buffer */
1006 WREG32_SOC15(UVD
, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW
,
1007 lower_32_bits(ring
->gpu_addr
));
1008 WREG32_SOC15(UVD
, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH
,
1009 upper_32_bits(ring
->gpu_addr
));
1011 /* Initialize the ring buffer's read and write pointers */
1012 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
, 0);
1014 ring
->wptr
= RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
);
1015 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
,
1016 lower_32_bits(ring
->wptr
));
1018 ring
= &adev
->vcn
.inst
->ring_enc
[0];
1019 WREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR
, lower_32_bits(ring
->wptr
));
1020 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
, lower_32_bits(ring
->wptr
));
1021 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_LO
, ring
->gpu_addr
);
1022 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_HI
, upper_32_bits(ring
->gpu_addr
));
1023 WREG32_SOC15(UVD
, 0, mmUVD_RB_SIZE
, ring
->ring_size
/ 4);
1025 ring
= &adev
->vcn
.inst
->ring_enc
[1];
1026 WREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR2
, lower_32_bits(ring
->wptr
));
1027 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
, lower_32_bits(ring
->wptr
));
1028 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_LO2
, ring
->gpu_addr
);
1029 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_HI2
, upper_32_bits(ring
->gpu_addr
));
1030 WREG32_SOC15(UVD
, 0, mmUVD_RB_SIZE2
, ring
->ring_size
/ 4);
1035 static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device
*adev
)
1040 /* Wait for power status to be 1 */
1041 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_POWER_STATUS
, 1,
1042 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK
, ret_code
);
1044 /* wait for read ptr to be equal to write ptr */
1045 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
);
1046 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_RB_RPTR
, tmp
, 0xFFFFFFFF, ret_code
);
1048 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
);
1049 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_RB_RPTR2
, tmp
, 0xFFFFFFFF, ret_code
);
1051 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
) & 0x7FFFFFFF;
1052 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_RBC_RB_RPTR
, tmp
, 0xFFFFFFFF, ret_code
);
1054 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_POWER_STATUS
, 1,
1055 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK
, ret_code
);
1057 /* disable dynamic power gating mode */
1058 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_POWER_STATUS
), 0,
1059 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK
);
1064 static int vcn_v2_0_stop(struct amdgpu_device
*adev
)
1069 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
) {
1070 r
= vcn_v2_0_stop_dpg_mode(adev
);
1076 /* wait for uvd idle */
1077 SOC15_WAIT_ON_RREG(VCN
, 0, mmUVD_STATUS
, UVD_STATUS__IDLE
, 0x7, r
);
1081 tmp
= UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK
|
1082 UVD_LMI_STATUS__READ_CLEAN_MASK
|
1083 UVD_LMI_STATUS__WRITE_CLEAN_MASK
|
1084 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK
;
1085 SOC15_WAIT_ON_RREG(VCN
, 0, mmUVD_LMI_STATUS
, tmp
, tmp
, r
);
1089 /* stall UMC channel */
1090 tmp
= RREG32_SOC15(VCN
, 0, mmUVD_LMI_CTRL2
);
1091 tmp
|= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK
;
1092 WREG32_SOC15(VCN
, 0, mmUVD_LMI_CTRL2
, tmp
);
1094 tmp
= UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK
|
1095 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK
;
1096 SOC15_WAIT_ON_RREG(VCN
, 0, mmUVD_LMI_STATUS
, tmp
, tmp
, r
);
1100 /* disable VCPU clock */
1101 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_VCPU_CNTL
), 0,
1102 ~(UVD_VCPU_CNTL__CLK_EN_MASK
));
1105 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
),
1106 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK
,
1107 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK
);
1110 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
),
1111 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK
,
1112 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK
);
1115 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
),
1116 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
,
1117 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
1120 WREG32_SOC15(VCN
, 0, mmUVD_STATUS
, 0);
1122 vcn_v2_0_enable_clock_gating(adev
);
1123 vcn_v2_0_enable_static_power_gating(adev
);
1126 if (adev
->pm
.dpm_enabled
)
1127 amdgpu_dpm_enable_uvd(adev
, false);
1132 static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device
*adev
,
1133 int inst_idx
, struct dpg_pause_state
*new_state
)
1135 struct amdgpu_ring
*ring
;
1136 uint32_t reg_data
= 0;
1139 /* pause/unpause if state is changed */
1140 if (adev
->vcn
.pause_state
.fw_based
!= new_state
->fw_based
) {
1141 DRM_DEBUG("dpg pause state changed %d -> %d",
1142 adev
->vcn
.pause_state
.fw_based
, new_state
->fw_based
);
1143 reg_data
= RREG32_SOC15(UVD
, 0, mmUVD_DPG_PAUSE
) &
1144 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK
);
1146 if (new_state
->fw_based
== VCN_DPG_STATE__PAUSE
) {
1148 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_POWER_STATUS
, 0x1,
1149 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK
, ret_code
);
1153 reg_data
|= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK
;
1154 WREG32_SOC15(UVD
, 0, mmUVD_DPG_PAUSE
, reg_data
);
1157 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_DPG_PAUSE
,
1158 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK
,
1159 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK
, ret_code
);
1162 ring
= &adev
->vcn
.inst
->ring_enc
[0];
1163 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_LO
, ring
->gpu_addr
);
1164 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_HI
, upper_32_bits(ring
->gpu_addr
));
1165 WREG32_SOC15(UVD
, 0, mmUVD_RB_SIZE
, ring
->ring_size
/ 4);
1166 WREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR
, lower_32_bits(ring
->wptr
));
1167 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
, lower_32_bits(ring
->wptr
));
1169 ring
= &adev
->vcn
.inst
->ring_enc
[1];
1170 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_LO2
, ring
->gpu_addr
);
1171 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_HI2
, upper_32_bits(ring
->gpu_addr
));
1172 WREG32_SOC15(UVD
, 0, mmUVD_RB_SIZE2
, ring
->ring_size
/ 4);
1173 WREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR2
, lower_32_bits(ring
->wptr
));
1174 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
, lower_32_bits(ring
->wptr
));
1176 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
,
1177 RREG32_SOC15(UVD
, 0, mmUVD_SCRATCH2
) & 0x7FFFFFFF);
1179 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_POWER_STATUS
,
1180 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON
,
1181 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK
, ret_code
);
1184 /* unpause dpg, no need to wait */
1185 reg_data
&= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK
;
1186 WREG32_SOC15(UVD
, 0, mmUVD_DPG_PAUSE
, reg_data
);
1188 adev
->vcn
.pause_state
.fw_based
= new_state
->fw_based
;
1194 static bool vcn_v2_0_is_idle(void *handle
)
1196 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1198 return (RREG32_SOC15(VCN
, 0, mmUVD_STATUS
) == UVD_STATUS__IDLE
);
1201 static int vcn_v2_0_wait_for_idle(void *handle
)
1203 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1206 SOC15_WAIT_ON_RREG(VCN
, 0, mmUVD_STATUS
, UVD_STATUS__IDLE
,
1207 UVD_STATUS__IDLE
, ret
);
1212 static int vcn_v2_0_set_clockgating_state(void *handle
,
1213 enum amd_clockgating_state state
)
1215 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1216 bool enable
= (state
== AMD_CG_STATE_GATE
) ? true : false;
1219 /* wait for STATUS to clear */
1220 if (vcn_v2_0_is_idle(handle
))
1222 vcn_v2_0_enable_clock_gating(adev
);
1224 /* disable HW gating and enable Sw gating */
1225 vcn_v2_0_disable_clock_gating(adev
);
1231 * vcn_v2_0_dec_ring_get_rptr - get read pointer
1233 * @ring: amdgpu_ring pointer
1235 * Returns the current hardware read pointer
1237 static uint64_t vcn_v2_0_dec_ring_get_rptr(struct amdgpu_ring
*ring
)
1239 struct amdgpu_device
*adev
= ring
->adev
;
1241 return RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
);
1245 * vcn_v2_0_dec_ring_get_wptr - get write pointer
1247 * @ring: amdgpu_ring pointer
1249 * Returns the current hardware write pointer
1251 static uint64_t vcn_v2_0_dec_ring_get_wptr(struct amdgpu_ring
*ring
)
1253 struct amdgpu_device
*adev
= ring
->adev
;
1255 if (ring
->use_doorbell
)
1256 return adev
->wb
.wb
[ring
->wptr_offs
];
1258 return RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
);
1262 * vcn_v2_0_dec_ring_set_wptr - set write pointer
1264 * @ring: amdgpu_ring pointer
1266 * Commits the write pointer to the hardware
1268 static void vcn_v2_0_dec_ring_set_wptr(struct amdgpu_ring
*ring
)
1270 struct amdgpu_device
*adev
= ring
->adev
;
1272 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
)
1273 WREG32_SOC15(UVD
, 0, mmUVD_SCRATCH2
,
1274 lower_32_bits(ring
->wptr
) | 0x80000000);
1276 if (ring
->use_doorbell
) {
1277 adev
->wb
.wb
[ring
->wptr_offs
] = lower_32_bits(ring
->wptr
);
1278 WDOORBELL32(ring
->doorbell_index
, lower_32_bits(ring
->wptr
));
1280 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
, lower_32_bits(ring
->wptr
));
1285 * vcn_v2_0_dec_ring_insert_start - insert a start command
1287 * @ring: amdgpu_ring pointer
1289 * Write a start command to the ring.
1291 void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring
*ring
)
1293 struct amdgpu_device
*adev
= ring
->adev
;
1295 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.data0
, 0));
1296 amdgpu_ring_write(ring
, 0);
1297 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.cmd
, 0));
1298 amdgpu_ring_write(ring
, VCN_DEC_KMD_CMD
| (VCN_DEC_CMD_PACKET_START
<< 1));
1302 * vcn_v2_0_dec_ring_insert_end - insert a end command
1304 * @ring: amdgpu_ring pointer
1306 * Write a end command to the ring.
1308 void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring
*ring
)
1310 struct amdgpu_device
*adev
= ring
->adev
;
1312 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.cmd
, 0));
1313 amdgpu_ring_write(ring
, VCN_DEC_KMD_CMD
| (VCN_DEC_CMD_PACKET_END
<< 1));
1317 * vcn_v2_0_dec_ring_insert_nop - insert a nop command
1319 * @ring: amdgpu_ring pointer
1321 * Write a nop command to the ring.
1323 void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring
*ring
, uint32_t count
)
1325 struct amdgpu_device
*adev
= ring
->adev
;
1328 WARN_ON(ring
->wptr
% 2 || count
% 2);
1330 for (i
= 0; i
< count
/ 2; i
++) {
1331 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.nop
, 0));
1332 amdgpu_ring_write(ring
, 0);
1337 * vcn_v2_0_dec_ring_emit_fence - emit an fence & trap command
1339 * @ring: amdgpu_ring pointer
1340 * @fence: fence to emit
1342 * Write a fence and a trap command to the ring.
1344 void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
, u64 seq
,
1347 struct amdgpu_device
*adev
= ring
->adev
;
1349 WARN_ON(flags
& AMDGPU_FENCE_FLAG_64BIT
);
1350 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.context_id
, 0));
1351 amdgpu_ring_write(ring
, seq
);
1353 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.data0
, 0));
1354 amdgpu_ring_write(ring
, addr
& 0xffffffff);
1356 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.data1
, 0));
1357 amdgpu_ring_write(ring
, upper_32_bits(addr
) & 0xff);
1359 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.cmd
, 0));
1360 amdgpu_ring_write(ring
, VCN_DEC_KMD_CMD
| (VCN_DEC_CMD_FENCE
<< 1));
1362 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.data0
, 0));
1363 amdgpu_ring_write(ring
, 0);
1365 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.data1
, 0));
1366 amdgpu_ring_write(ring
, 0);
1368 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.cmd
, 0));
1370 amdgpu_ring_write(ring
, VCN_DEC_KMD_CMD
| (VCN_DEC_CMD_TRAP
<< 1));
1374 * vcn_v2_0_dec_ring_emit_ib - execute indirect buffer
1376 * @ring: amdgpu_ring pointer
1377 * @ib: indirect buffer to execute
1379 * Write ring commands to execute the indirect buffer
1381 void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring
*ring
,
1382 struct amdgpu_job
*job
,
1383 struct amdgpu_ib
*ib
,
1386 struct amdgpu_device
*adev
= ring
->adev
;
1387 unsigned vmid
= AMDGPU_JOB_GET_VMID(job
);
1389 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.ib_vmid
, 0));
1390 amdgpu_ring_write(ring
, vmid
);
1392 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.ib_bar_low
, 0));
1393 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
));
1394 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.ib_bar_high
, 0));
1395 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
1396 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.ib_size
, 0));
1397 amdgpu_ring_write(ring
, ib
->length_dw
);
1400 void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring
*ring
, uint32_t reg
,
1401 uint32_t val
, uint32_t mask
)
1403 struct amdgpu_device
*adev
= ring
->adev
;
1405 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.data0
, 0));
1406 amdgpu_ring_write(ring
, reg
<< 2);
1408 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.data1
, 0));
1409 amdgpu_ring_write(ring
, val
);
1411 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.gp_scratch8
, 0));
1412 amdgpu_ring_write(ring
, mask
);
1414 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.cmd
, 0));
1416 amdgpu_ring_write(ring
, VCN_DEC_KMD_CMD
| (VCN_DEC_CMD_REG_READ_COND_WAIT
<< 1));
1419 void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring
*ring
,
1420 unsigned vmid
, uint64_t pd_addr
)
1422 struct amdgpu_vmhub
*hub
= &ring
->adev
->vmhub
[ring
->funcs
->vmhub
];
1423 uint32_t data0
, data1
, mask
;
1425 pd_addr
= amdgpu_gmc_emit_flush_gpu_tlb(ring
, vmid
, pd_addr
);
1427 /* wait for register write */
1428 data0
= hub
->ctx0_ptb_addr_lo32
+ vmid
* 2;
1429 data1
= lower_32_bits(pd_addr
);
1431 vcn_v2_0_dec_ring_emit_reg_wait(ring
, data0
, data1
, mask
);
1434 void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring
*ring
,
1435 uint32_t reg
, uint32_t val
)
1437 struct amdgpu_device
*adev
= ring
->adev
;
1439 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.data0
, 0));
1440 amdgpu_ring_write(ring
, reg
<< 2);
1442 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.data1
, 0));
1443 amdgpu_ring_write(ring
, val
);
1445 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.cmd
, 0));
1447 amdgpu_ring_write(ring
, VCN_DEC_KMD_CMD
| (VCN_DEC_CMD_WRITE_REG
<< 1));
1451 * vcn_v2_0_enc_ring_get_rptr - get enc read pointer
1453 * @ring: amdgpu_ring pointer
1455 * Returns the current hardware enc read pointer
1457 static uint64_t vcn_v2_0_enc_ring_get_rptr(struct amdgpu_ring
*ring
)
1459 struct amdgpu_device
*adev
= ring
->adev
;
1461 if (ring
== &adev
->vcn
.inst
->ring_enc
[0])
1462 return RREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR
);
1464 return RREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR2
);
1468 * vcn_v2_0_enc_ring_get_wptr - get enc write pointer
1470 * @ring: amdgpu_ring pointer
1472 * Returns the current hardware enc write pointer
1474 static uint64_t vcn_v2_0_enc_ring_get_wptr(struct amdgpu_ring
*ring
)
1476 struct amdgpu_device
*adev
= ring
->adev
;
1478 if (ring
== &adev
->vcn
.inst
->ring_enc
[0]) {
1479 if (ring
->use_doorbell
)
1480 return adev
->wb
.wb
[ring
->wptr_offs
];
1482 return RREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
);
1484 if (ring
->use_doorbell
)
1485 return adev
->wb
.wb
[ring
->wptr_offs
];
1487 return RREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
);
1492 * vcn_v2_0_enc_ring_set_wptr - set enc write pointer
1494 * @ring: amdgpu_ring pointer
1496 * Commits the enc write pointer to the hardware
1498 static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring
*ring
)
1500 struct amdgpu_device
*adev
= ring
->adev
;
1502 if (ring
== &adev
->vcn
.inst
->ring_enc
[0]) {
1503 if (ring
->use_doorbell
) {
1504 adev
->wb
.wb
[ring
->wptr_offs
] = lower_32_bits(ring
->wptr
);
1505 WDOORBELL32(ring
->doorbell_index
, lower_32_bits(ring
->wptr
));
1507 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
, lower_32_bits(ring
->wptr
));
1510 if (ring
->use_doorbell
) {
1511 adev
->wb
.wb
[ring
->wptr_offs
] = lower_32_bits(ring
->wptr
);
1512 WDOORBELL32(ring
->doorbell_index
, lower_32_bits(ring
->wptr
));
1514 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
, lower_32_bits(ring
->wptr
));
1520 * vcn_v2_0_enc_ring_emit_fence - emit an enc fence & trap command
1522 * @ring: amdgpu_ring pointer
1523 * @fence: fence to emit
1525 * Write enc a fence and a trap command to the ring.
1527 void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
,
1528 u64 seq
, unsigned flags
)
1530 WARN_ON(flags
& AMDGPU_FENCE_FLAG_64BIT
);
1532 amdgpu_ring_write(ring
, VCN_ENC_CMD_FENCE
);
1533 amdgpu_ring_write(ring
, addr
);
1534 amdgpu_ring_write(ring
, upper_32_bits(addr
));
1535 amdgpu_ring_write(ring
, seq
);
1536 amdgpu_ring_write(ring
, VCN_ENC_CMD_TRAP
);
1539 void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring
*ring
)
1541 amdgpu_ring_write(ring
, VCN_ENC_CMD_END
);
1545 * vcn_v2_0_enc_ring_emit_ib - enc execute indirect buffer
1547 * @ring: amdgpu_ring pointer
1548 * @ib: indirect buffer to execute
1550 * Write enc ring commands to execute the indirect buffer
1552 void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring
*ring
,
1553 struct amdgpu_job
*job
,
1554 struct amdgpu_ib
*ib
,
1557 unsigned vmid
= AMDGPU_JOB_GET_VMID(job
);
1559 amdgpu_ring_write(ring
, VCN_ENC_CMD_IB
);
1560 amdgpu_ring_write(ring
, vmid
);
1561 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
));
1562 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
1563 amdgpu_ring_write(ring
, ib
->length_dw
);
1566 void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring
*ring
, uint32_t reg
,
1567 uint32_t val
, uint32_t mask
)
1569 amdgpu_ring_write(ring
, VCN_ENC_CMD_REG_WAIT
);
1570 amdgpu_ring_write(ring
, reg
<< 2);
1571 amdgpu_ring_write(ring
, mask
);
1572 amdgpu_ring_write(ring
, val
);
1575 void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring
*ring
,
1576 unsigned int vmid
, uint64_t pd_addr
)
1578 struct amdgpu_vmhub
*hub
= &ring
->adev
->vmhub
[ring
->funcs
->vmhub
];
1580 pd_addr
= amdgpu_gmc_emit_flush_gpu_tlb(ring
, vmid
, pd_addr
);
1582 /* wait for reg writes */
1583 vcn_v2_0_enc_ring_emit_reg_wait(ring
, hub
->ctx0_ptb_addr_lo32
+ vmid
* 2,
1584 lower_32_bits(pd_addr
), 0xffffffff);
1587 void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring
*ring
, uint32_t reg
, uint32_t val
)
1589 amdgpu_ring_write(ring
, VCN_ENC_CMD_REG_WRITE
);
1590 amdgpu_ring_write(ring
, reg
<< 2);
1591 amdgpu_ring_write(ring
, val
);
1594 static int vcn_v2_0_set_interrupt_state(struct amdgpu_device
*adev
,
1595 struct amdgpu_irq_src
*source
,
1597 enum amdgpu_interrupt_state state
)
1602 static int vcn_v2_0_process_interrupt(struct amdgpu_device
*adev
,
1603 struct amdgpu_irq_src
*source
,
1604 struct amdgpu_iv_entry
*entry
)
1606 DRM_DEBUG("IH: VCN TRAP\n");
1608 switch (entry
->src_id
) {
1609 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT
:
1610 amdgpu_fence_process(&adev
->vcn
.inst
->ring_dec
);
1612 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE
:
1613 amdgpu_fence_process(&adev
->vcn
.inst
->ring_enc
[0]);
1615 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY
:
1616 amdgpu_fence_process(&adev
->vcn
.inst
->ring_enc
[1]);
1619 DRM_ERROR("Unhandled interrupt: %d %d\n",
1620 entry
->src_id
, entry
->src_data
[0]);
1627 static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring
*ring
)
1629 struct amdgpu_device
*adev
= ring
->adev
;
1634 WREG32(adev
->vcn
.inst
[ring
->me
].external
.scratch9
, 0xCAFEDEAD);
1635 r
= amdgpu_ring_alloc(ring
, 4);
1638 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.cmd
, 0));
1639 amdgpu_ring_write(ring
, VCN_DEC_KMD_CMD
| (VCN_DEC_CMD_PACKET_START
<< 1));
1640 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.scratch9
, 0));
1641 amdgpu_ring_write(ring
, 0xDEADBEEF);
1642 amdgpu_ring_commit(ring
);
1643 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
1644 tmp
= RREG32(adev
->vcn
.inst
[ring
->me
].external
.scratch9
);
1645 if (tmp
== 0xDEADBEEF)
1650 if (i
>= adev
->usec_timeout
)
1657 static int vcn_v2_0_set_powergating_state(void *handle
,
1658 enum amd_powergating_state state
)
1660 /* This doesn't actually powergate the VCN block.
1661 * That's done in the dpm code via the SMC. This
1662 * just re-inits the block as necessary. The actual
1663 * gating still happens in the dpm code. We should
1664 * revisit this when there is a cleaner line between
1665 * the smc and the hw blocks
1668 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1670 if (state
== adev
->vcn
.cur_state
)
1673 if (state
== AMD_PG_STATE_GATE
)
1674 ret
= vcn_v2_0_stop(adev
);
1676 ret
= vcn_v2_0_start(adev
);
1679 adev
->vcn
.cur_state
= state
;
1683 static const struct amd_ip_funcs vcn_v2_0_ip_funcs
= {
1685 .early_init
= vcn_v2_0_early_init
,
1687 .sw_init
= vcn_v2_0_sw_init
,
1688 .sw_fini
= vcn_v2_0_sw_fini
,
1689 .hw_init
= vcn_v2_0_hw_init
,
1690 .hw_fini
= vcn_v2_0_hw_fini
,
1691 .suspend
= vcn_v2_0_suspend
,
1692 .resume
= vcn_v2_0_resume
,
1693 .is_idle
= vcn_v2_0_is_idle
,
1694 .wait_for_idle
= vcn_v2_0_wait_for_idle
,
1695 .check_soft_reset
= NULL
,
1696 .pre_soft_reset
= NULL
,
1698 .post_soft_reset
= NULL
,
1699 .set_clockgating_state
= vcn_v2_0_set_clockgating_state
,
1700 .set_powergating_state
= vcn_v2_0_set_powergating_state
,
1703 static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs
= {
1704 .type
= AMDGPU_RING_TYPE_VCN_DEC
,
1706 .vmhub
= AMDGPU_MMHUB_0
,
1707 .get_rptr
= vcn_v2_0_dec_ring_get_rptr
,
1708 .get_wptr
= vcn_v2_0_dec_ring_get_wptr
,
1709 .set_wptr
= vcn_v2_0_dec_ring_set_wptr
,
1711 SOC15_FLUSH_GPU_TLB_NUM_WREG
* 6 +
1712 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT
* 8 +
1713 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1714 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1716 .emit_ib_size
= 8, /* vcn_v2_0_dec_ring_emit_ib */
1717 .emit_ib
= vcn_v2_0_dec_ring_emit_ib
,
1718 .emit_fence
= vcn_v2_0_dec_ring_emit_fence
,
1719 .emit_vm_flush
= vcn_v2_0_dec_ring_emit_vm_flush
,
1720 .test_ring
= vcn_v2_0_dec_ring_test_ring
,
1721 .test_ib
= amdgpu_vcn_dec_ring_test_ib
,
1722 .insert_nop
= vcn_v2_0_dec_ring_insert_nop
,
1723 .insert_start
= vcn_v2_0_dec_ring_insert_start
,
1724 .insert_end
= vcn_v2_0_dec_ring_insert_end
,
1725 .pad_ib
= amdgpu_ring_generic_pad_ib
,
1726 .begin_use
= amdgpu_vcn_ring_begin_use
,
1727 .end_use
= amdgpu_vcn_ring_end_use
,
1728 .emit_wreg
= vcn_v2_0_dec_ring_emit_wreg
,
1729 .emit_reg_wait
= vcn_v2_0_dec_ring_emit_reg_wait
,
1730 .emit_reg_write_reg_wait
= amdgpu_ring_emit_reg_write_reg_wait_helper
,
1733 static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs
= {
1734 .type
= AMDGPU_RING_TYPE_VCN_ENC
,
1736 .nop
= VCN_ENC_CMD_NO_OP
,
1737 .vmhub
= AMDGPU_MMHUB_0
,
1738 .get_rptr
= vcn_v2_0_enc_ring_get_rptr
,
1739 .get_wptr
= vcn_v2_0_enc_ring_get_wptr
,
1740 .set_wptr
= vcn_v2_0_enc_ring_set_wptr
,
1742 SOC15_FLUSH_GPU_TLB_NUM_WREG
* 3 +
1743 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT
* 4 +
1744 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1745 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1746 1, /* vcn_v2_0_enc_ring_insert_end */
1747 .emit_ib_size
= 5, /* vcn_v2_0_enc_ring_emit_ib */
1748 .emit_ib
= vcn_v2_0_enc_ring_emit_ib
,
1749 .emit_fence
= vcn_v2_0_enc_ring_emit_fence
,
1750 .emit_vm_flush
= vcn_v2_0_enc_ring_emit_vm_flush
,
1751 .test_ring
= amdgpu_vcn_enc_ring_test_ring
,
1752 .test_ib
= amdgpu_vcn_enc_ring_test_ib
,
1753 .insert_nop
= amdgpu_ring_insert_nop
,
1754 .insert_end
= vcn_v2_0_enc_ring_insert_end
,
1755 .pad_ib
= amdgpu_ring_generic_pad_ib
,
1756 .begin_use
= amdgpu_vcn_ring_begin_use
,
1757 .end_use
= amdgpu_vcn_ring_end_use
,
1758 .emit_wreg
= vcn_v2_0_enc_ring_emit_wreg
,
1759 .emit_reg_wait
= vcn_v2_0_enc_ring_emit_reg_wait
,
1760 .emit_reg_write_reg_wait
= amdgpu_ring_emit_reg_write_reg_wait_helper
,
1763 static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device
*adev
)
1765 adev
->vcn
.inst
->ring_dec
.funcs
= &vcn_v2_0_dec_ring_vm_funcs
;
1766 DRM_INFO("VCN decode is enabled in VM mode\n");
1769 static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device
*adev
)
1773 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
)
1774 adev
->vcn
.inst
->ring_enc
[i
].funcs
= &vcn_v2_0_enc_ring_vm_funcs
;
1776 DRM_INFO("VCN encode is enabled in VM mode\n");
1779 static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs
= {
1780 .set
= vcn_v2_0_set_interrupt_state
,
1781 .process
= vcn_v2_0_process_interrupt
,
1784 static void vcn_v2_0_set_irq_funcs(struct amdgpu_device
*adev
)
1786 adev
->vcn
.inst
->irq
.num_types
= adev
->vcn
.num_enc_rings
+ 1;
1787 adev
->vcn
.inst
->irq
.funcs
= &vcn_v2_0_irq_funcs
;
1790 const struct amdgpu_ip_block_version vcn_v2_0_ip_block
=
1792 .type
= AMD_IP_BLOCK_TYPE_VCN
,
1796 .funcs
= &vcn_v2_0_ip_funcs
,