2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_vcn.h"
30 #include "soc15_common.h"
32 #include "vcn/vcn_1_0_offset.h"
33 #include "vcn/vcn_1_0_sh_mask.h"
34 #include "hdp/hdp_4_0_offset.h"
35 #include "mmhub/mmhub_9_1_offset.h"
36 #include "mmhub/mmhub_9_1_sh_mask.h"
38 #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
40 #define mmUVD_RBC_XX_IB_REG_CHECK 0x05ab
41 #define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
42 #define mmUVD_REG_XX_MASK 0x05ac
43 #define mmUVD_REG_XX_MASK_BASE_IDX 1
45 static int vcn_v1_0_stop(struct amdgpu_device
*adev
);
46 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device
*adev
);
47 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device
*adev
);
48 static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device
*adev
);
49 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device
*adev
);
50 static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring
*ring
, uint32_t ptr
);
51 static int vcn_v1_0_set_powergating_state(void *handle
, enum amd_powergating_state state
);
54 * vcn_v1_0_early_init - set function pointers
56 * @handle: amdgpu_device pointer
58 * Set ring and irq function pointers
60 static int vcn_v1_0_early_init(void *handle
)
62 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
64 adev
->vcn
.num_enc_rings
= 2;
66 vcn_v1_0_set_dec_ring_funcs(adev
);
67 vcn_v1_0_set_enc_ring_funcs(adev
);
68 vcn_v1_0_set_jpeg_ring_funcs(adev
);
69 vcn_v1_0_set_irq_funcs(adev
);
75 * vcn_v1_0_sw_init - sw init for VCN block
77 * @handle: amdgpu_device pointer
79 * Load firmware and sw initialization
81 static int vcn_v1_0_sw_init(void *handle
)
83 struct amdgpu_ring
*ring
;
85 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
88 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_VCN
, VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT
, &adev
->vcn
.irq
);
93 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
) {
94 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_VCN
, i
+ VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE
,
101 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_VCN
, 126, &adev
->vcn
.irq
);
105 r
= amdgpu_vcn_sw_init(adev
);
109 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) {
110 const struct common_firmware_header
*hdr
;
111 hdr
= (const struct common_firmware_header
*)adev
->vcn
.fw
->data
;
112 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_VCN
].ucode_id
= AMDGPU_UCODE_ID_VCN
;
113 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_VCN
].fw
= adev
->vcn
.fw
;
114 adev
->firmware
.fw_size
+=
115 ALIGN(le32_to_cpu(hdr
->ucode_size_bytes
), PAGE_SIZE
);
116 DRM_INFO("PSP loading VCN firmware\n");
119 r
= amdgpu_vcn_resume(adev
);
123 ring
= &adev
->vcn
.ring_dec
;
124 sprintf(ring
->name
, "vcn_dec");
125 r
= amdgpu_ring_init(adev
, ring
, 512, &adev
->vcn
.irq
, 0);
129 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
) {
130 ring
= &adev
->vcn
.ring_enc
[i
];
131 sprintf(ring
->name
, "vcn_enc%d", i
);
132 r
= amdgpu_ring_init(adev
, ring
, 512, &adev
->vcn
.irq
, 0);
137 ring
= &adev
->vcn
.ring_jpeg
;
138 sprintf(ring
->name
, "vcn_jpeg");
139 r
= amdgpu_ring_init(adev
, ring
, 512, &adev
->vcn
.irq
, 0);
147 * vcn_v1_0_sw_fini - sw fini for VCN block
149 * @handle: amdgpu_device pointer
151 * VCN suspend and free up sw allocation
153 static int vcn_v1_0_sw_fini(void *handle
)
156 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
158 r
= amdgpu_vcn_suspend(adev
);
162 r
= amdgpu_vcn_sw_fini(adev
);
168 * vcn_v1_0_hw_init - start and test VCN block
170 * @handle: amdgpu_device pointer
172 * Initialize the hardware, boot up the VCPU and do some testing
174 static int vcn_v1_0_hw_init(void *handle
)
176 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
177 struct amdgpu_ring
*ring
= &adev
->vcn
.ring_dec
;
180 r
= amdgpu_ring_test_helper(ring
);
184 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
) {
185 ring
= &adev
->vcn
.ring_enc
[i
];
186 ring
->sched
.ready
= true;
187 r
= amdgpu_ring_test_helper(ring
);
192 ring
= &adev
->vcn
.ring_jpeg
;
193 r
= amdgpu_ring_test_helper(ring
);
199 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
200 (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
)?"DPG Mode":"SPG Mode");
206 * vcn_v1_0_hw_fini - stop the hardware block
208 * @handle: amdgpu_device pointer
210 * Stop the VCN block, mark ring as not ready any more
212 static int vcn_v1_0_hw_fini(void *handle
)
214 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
215 struct amdgpu_ring
*ring
= &adev
->vcn
.ring_dec
;
217 if ((adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
) ||
218 RREG32_SOC15(VCN
, 0, mmUVD_STATUS
))
219 vcn_v1_0_set_powergating_state(adev
, AMD_PG_STATE_GATE
);
221 ring
->sched
.ready
= false;
227 * vcn_v1_0_suspend - suspend VCN block
229 * @handle: amdgpu_device pointer
231 * HW fini and suspend VCN block
233 static int vcn_v1_0_suspend(void *handle
)
236 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
238 r
= vcn_v1_0_hw_fini(adev
);
242 r
= amdgpu_vcn_suspend(adev
);
248 * vcn_v1_0_resume - resume VCN block
250 * @handle: amdgpu_device pointer
252 * Resume firmware and hw init VCN block
254 static int vcn_v1_0_resume(void *handle
)
257 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
259 r
= amdgpu_vcn_resume(adev
);
263 r
= vcn_v1_0_hw_init(adev
);
269 * vcn_v1_0_mc_resume_spg_mode - memory controller programming
271 * @adev: amdgpu_device pointer
273 * Let the VCN memory controller know it's offsets
275 static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device
*adev
)
277 uint32_t size
= AMDGPU_GPU_PAGE_ALIGN(adev
->vcn
.fw
->size
+ 4);
280 /* cache window 0: fw */
281 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) {
282 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
,
283 (adev
->firmware
.ucode
[AMDGPU_UCODE_ID_VCN
].tmr_mc_addr_lo
));
284 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
,
285 (adev
->firmware
.ucode
[AMDGPU_UCODE_ID_VCN
].tmr_mc_addr_hi
));
286 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET0
, 0);
289 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
,
290 lower_32_bits(adev
->vcn
.gpu_addr
));
291 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
,
292 upper_32_bits(adev
->vcn
.gpu_addr
));
294 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET0
,
295 AMDGPU_UVD_FIRMWARE_OFFSET
>> 3);
298 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_SIZE0
, size
);
300 /* cache window 1: stack */
301 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW
,
302 lower_32_bits(adev
->vcn
.gpu_addr
+ offset
));
303 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH
,
304 upper_32_bits(adev
->vcn
.gpu_addr
+ offset
));
305 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET1
, 0);
306 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_SIZE1
, AMDGPU_VCN_STACK_SIZE
);
308 /* cache window 2: context */
309 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW
,
310 lower_32_bits(adev
->vcn
.gpu_addr
+ offset
+ AMDGPU_VCN_STACK_SIZE
));
311 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH
,
312 upper_32_bits(adev
->vcn
.gpu_addr
+ offset
+ AMDGPU_VCN_STACK_SIZE
));
313 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET2
, 0);
314 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_SIZE2
, AMDGPU_VCN_CONTEXT_SIZE
);
316 WREG32_SOC15(UVD
, 0, mmUVD_UDEC_ADDR_CONFIG
,
317 adev
->gfx
.config
.gb_addr_config
);
318 WREG32_SOC15(UVD
, 0, mmUVD_UDEC_DB_ADDR_CONFIG
,
319 adev
->gfx
.config
.gb_addr_config
);
320 WREG32_SOC15(UVD
, 0, mmUVD_UDEC_DBW_ADDR_CONFIG
,
321 adev
->gfx
.config
.gb_addr_config
);
322 WREG32_SOC15(UVD
, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG
,
323 adev
->gfx
.config
.gb_addr_config
);
324 WREG32_SOC15(UVD
, 0, mmUVD_MIF_CURR_ADDR_CONFIG
,
325 adev
->gfx
.config
.gb_addr_config
);
326 WREG32_SOC15(UVD
, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG
,
327 adev
->gfx
.config
.gb_addr_config
);
328 WREG32_SOC15(UVD
, 0, mmUVD_MIF_RECON1_ADDR_CONFIG
,
329 adev
->gfx
.config
.gb_addr_config
);
330 WREG32_SOC15(UVD
, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG
,
331 adev
->gfx
.config
.gb_addr_config
);
332 WREG32_SOC15(UVD
, 0, mmUVD_MIF_REF_ADDR_CONFIG
,
333 adev
->gfx
.config
.gb_addr_config
);
334 WREG32_SOC15(UVD
, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG
,
335 adev
->gfx
.config
.gb_addr_config
);
336 WREG32_SOC15(UVD
, 0, mmUVD_JPEG_ADDR_CONFIG
,
337 adev
->gfx
.config
.gb_addr_config
);
338 WREG32_SOC15(UVD
, 0, mmUVD_JPEG_UV_ADDR_CONFIG
,
339 adev
->gfx
.config
.gb_addr_config
);
342 static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device
*adev
)
344 uint32_t size
= AMDGPU_GPU_PAGE_ALIGN(adev
->vcn
.fw
->size
+ 4);
347 /* cache window 0: fw */
348 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) {
349 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
,
350 (adev
->firmware
.ucode
[AMDGPU_UCODE_ID_VCN
].tmr_mc_addr_lo
),
352 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
,
353 (adev
->firmware
.ucode
[AMDGPU_UCODE_ID_VCN
].tmr_mc_addr_hi
),
355 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET0
, 0,
359 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
,
360 lower_32_bits(adev
->vcn
.gpu_addr
), 0xFFFFFFFF, 0);
361 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
,
362 upper_32_bits(adev
->vcn
.gpu_addr
), 0xFFFFFFFF, 0);
364 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET0
,
365 AMDGPU_UVD_FIRMWARE_OFFSET
>> 3, 0xFFFFFFFF, 0);
368 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_VCPU_CACHE_SIZE0
, size
, 0xFFFFFFFF, 0);
370 /* cache window 1: stack */
371 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW
,
372 lower_32_bits(adev
->vcn
.gpu_addr
+ offset
), 0xFFFFFFFF, 0);
373 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH
,
374 upper_32_bits(adev
->vcn
.gpu_addr
+ offset
), 0xFFFFFFFF, 0);
375 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET1
, 0,
377 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_VCPU_CACHE_SIZE1
, AMDGPU_VCN_STACK_SIZE
,
380 /* cache window 2: context */
381 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW
,
382 lower_32_bits(adev
->vcn
.gpu_addr
+ offset
+ AMDGPU_VCN_STACK_SIZE
),
384 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH
,
385 upper_32_bits(adev
->vcn
.gpu_addr
+ offset
+ AMDGPU_VCN_STACK_SIZE
),
387 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET2
, 0, 0xFFFFFFFF, 0);
388 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_VCPU_CACHE_SIZE2
, AMDGPU_VCN_CONTEXT_SIZE
,
391 /* VCN global tiling registers */
392 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_UDEC_ADDR_CONFIG
,
393 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
394 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_UDEC_DB_ADDR_CONFIG
,
395 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
396 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_UDEC_DBW_ADDR_CONFIG
,
397 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
398 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG
,
399 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
400 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MIF_CURR_ADDR_CONFIG
,
401 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
402 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG
,
403 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
404 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MIF_RECON1_ADDR_CONFIG
,
405 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
406 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG
,
407 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
408 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MIF_REF_ADDR_CONFIG
,
409 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
410 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG
,
411 adev
->gfx
.config
.gb_addr_config
, 0xFFFFFFFF, 0);
415 * vcn_v1_0_disable_clock_gating - disable VCN clock gating
417 * @adev: amdgpu_device pointer
418 * @sw: enable SW clock gating
420 * Disable clock gating for VCN block
422 static void vcn_v1_0_disable_clock_gating(struct amdgpu_device
*adev
)
426 /* JPEG disable CGC */
427 data
= RREG32_SOC15(VCN
, 0, mmJPEG_CGC_CTRL
);
429 if (adev
->cg_flags
& AMD_CG_SUPPORT_VCN_MGCG
)
430 data
|= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
432 data
&= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK
;
434 data
|= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
435 data
|= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
436 WREG32_SOC15(VCN
, 0, mmJPEG_CGC_CTRL
, data
);
438 data
= RREG32_SOC15(VCN
, 0, mmJPEG_CGC_GATE
);
439 data
&= ~(JPEG_CGC_GATE__JPEG_MASK
| JPEG_CGC_GATE__JPEG2_MASK
);
440 WREG32_SOC15(VCN
, 0, mmJPEG_CGC_GATE
, data
);
442 /* UVD disable CGC */
443 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
444 if (adev
->cg_flags
& AMD_CG_SUPPORT_VCN_MGCG
)
445 data
|= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
447 data
&= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK
;
449 data
|= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
450 data
|= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
451 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
453 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_GATE
);
454 data
&= ~(UVD_CGC_GATE__SYS_MASK
455 | UVD_CGC_GATE__UDEC_MASK
456 | UVD_CGC_GATE__MPEG2_MASK
457 | UVD_CGC_GATE__REGS_MASK
458 | UVD_CGC_GATE__RBC_MASK
459 | UVD_CGC_GATE__LMI_MC_MASK
460 | UVD_CGC_GATE__LMI_UMC_MASK
461 | UVD_CGC_GATE__IDCT_MASK
462 | UVD_CGC_GATE__MPRD_MASK
463 | UVD_CGC_GATE__MPC_MASK
464 | UVD_CGC_GATE__LBSI_MASK
465 | UVD_CGC_GATE__LRBBM_MASK
466 | UVD_CGC_GATE__UDEC_RE_MASK
467 | UVD_CGC_GATE__UDEC_CM_MASK
468 | UVD_CGC_GATE__UDEC_IT_MASK
469 | UVD_CGC_GATE__UDEC_DB_MASK
470 | UVD_CGC_GATE__UDEC_MP_MASK
471 | UVD_CGC_GATE__WCB_MASK
472 | UVD_CGC_GATE__VCPU_MASK
473 | UVD_CGC_GATE__SCPU_MASK
);
474 WREG32_SOC15(VCN
, 0, mmUVD_CGC_GATE
, data
);
476 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
477 data
&= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
478 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
479 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
480 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
481 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
482 | UVD_CGC_CTRL__SYS_MODE_MASK
483 | UVD_CGC_CTRL__UDEC_MODE_MASK
484 | UVD_CGC_CTRL__MPEG2_MODE_MASK
485 | UVD_CGC_CTRL__REGS_MODE_MASK
486 | UVD_CGC_CTRL__RBC_MODE_MASK
487 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
488 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
489 | UVD_CGC_CTRL__IDCT_MODE_MASK
490 | UVD_CGC_CTRL__MPRD_MODE_MASK
491 | UVD_CGC_CTRL__MPC_MODE_MASK
492 | UVD_CGC_CTRL__LBSI_MODE_MASK
493 | UVD_CGC_CTRL__LRBBM_MODE_MASK
494 | UVD_CGC_CTRL__WCB_MODE_MASK
495 | UVD_CGC_CTRL__VCPU_MODE_MASK
496 | UVD_CGC_CTRL__SCPU_MODE_MASK
);
497 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
500 data
= RREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_GATE
);
501 data
|= (UVD_SUVD_CGC_GATE__SRE_MASK
502 | UVD_SUVD_CGC_GATE__SIT_MASK
503 | UVD_SUVD_CGC_GATE__SMP_MASK
504 | UVD_SUVD_CGC_GATE__SCM_MASK
505 | UVD_SUVD_CGC_GATE__SDB_MASK
506 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
507 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
508 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
509 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
510 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
511 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
512 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
513 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
514 | UVD_SUVD_CGC_GATE__SCLR_MASK
515 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
516 | UVD_SUVD_CGC_GATE__ENT_MASK
517 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
518 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
519 | UVD_SUVD_CGC_GATE__SITE_MASK
520 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
521 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
522 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
523 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
524 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK
);
525 WREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_GATE
, data
);
527 data
= RREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
);
528 data
&= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
529 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
530 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
531 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
532 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
533 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
534 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
535 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
536 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
537 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
);
538 WREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
, data
);
542 * vcn_v1_0_enable_clock_gating - enable VCN clock gating
544 * @adev: amdgpu_device pointer
545 * @sw: enable SW clock gating
547 * Enable clock gating for VCN block
549 static void vcn_v1_0_enable_clock_gating(struct amdgpu_device
*adev
)
553 /* enable JPEG CGC */
554 data
= RREG32_SOC15(VCN
, 0, mmJPEG_CGC_CTRL
);
555 if (adev
->cg_flags
& AMD_CG_SUPPORT_VCN_MGCG
)
556 data
|= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
558 data
|= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
559 data
|= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
560 data
|= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
561 WREG32_SOC15(VCN
, 0, mmJPEG_CGC_CTRL
, data
);
563 data
= RREG32_SOC15(VCN
, 0, mmJPEG_CGC_GATE
);
564 data
|= (JPEG_CGC_GATE__JPEG_MASK
| JPEG_CGC_GATE__JPEG2_MASK
);
565 WREG32_SOC15(VCN
, 0, mmJPEG_CGC_GATE
, data
);
568 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
569 if (adev
->cg_flags
& AMD_CG_SUPPORT_VCN_MGCG
)
570 data
|= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
572 data
|= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
573 data
|= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
574 data
|= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
575 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
577 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
578 data
|= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
579 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
580 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
581 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
582 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
583 | UVD_CGC_CTRL__SYS_MODE_MASK
584 | UVD_CGC_CTRL__UDEC_MODE_MASK
585 | UVD_CGC_CTRL__MPEG2_MODE_MASK
586 | UVD_CGC_CTRL__REGS_MODE_MASK
587 | UVD_CGC_CTRL__RBC_MODE_MASK
588 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
589 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
590 | UVD_CGC_CTRL__IDCT_MODE_MASK
591 | UVD_CGC_CTRL__MPRD_MODE_MASK
592 | UVD_CGC_CTRL__MPC_MODE_MASK
593 | UVD_CGC_CTRL__LBSI_MODE_MASK
594 | UVD_CGC_CTRL__LRBBM_MODE_MASK
595 | UVD_CGC_CTRL__WCB_MODE_MASK
596 | UVD_CGC_CTRL__VCPU_MODE_MASK
597 | UVD_CGC_CTRL__SCPU_MODE_MASK
);
598 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
600 data
= RREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
);
601 data
|= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
602 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
603 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
604 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
605 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
606 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
607 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
608 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
609 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
610 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
);
611 WREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
, data
);
614 static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device
*adev
, uint8_t sram_sel
)
616 uint32_t reg_data
= 0;
618 /* disable JPEG CGC */
619 if (adev
->cg_flags
& AMD_CG_SUPPORT_VCN_MGCG
)
620 reg_data
= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
622 reg_data
= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
623 reg_data
|= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
624 reg_data
|= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
625 WREG32_SOC15_DPG_MODE(UVD
, 0, mmJPEG_CGC_CTRL
, reg_data
, 0xFFFFFFFF, sram_sel
);
627 WREG32_SOC15_DPG_MODE(UVD
, 0, mmJPEG_CGC_GATE
, 0, 0xFFFFFFFF, sram_sel
);
629 /* enable sw clock gating control */
630 if (adev
->cg_flags
& AMD_CG_SUPPORT_VCN_MGCG
)
631 reg_data
= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
633 reg_data
= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
634 reg_data
|= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
635 reg_data
|= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
636 reg_data
&= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
|
637 UVD_CGC_CTRL__UDEC_CM_MODE_MASK
|
638 UVD_CGC_CTRL__UDEC_IT_MODE_MASK
|
639 UVD_CGC_CTRL__UDEC_DB_MODE_MASK
|
640 UVD_CGC_CTRL__UDEC_MP_MODE_MASK
|
641 UVD_CGC_CTRL__SYS_MODE_MASK
|
642 UVD_CGC_CTRL__UDEC_MODE_MASK
|
643 UVD_CGC_CTRL__MPEG2_MODE_MASK
|
644 UVD_CGC_CTRL__REGS_MODE_MASK
|
645 UVD_CGC_CTRL__RBC_MODE_MASK
|
646 UVD_CGC_CTRL__LMI_MC_MODE_MASK
|
647 UVD_CGC_CTRL__LMI_UMC_MODE_MASK
|
648 UVD_CGC_CTRL__IDCT_MODE_MASK
|
649 UVD_CGC_CTRL__MPRD_MODE_MASK
|
650 UVD_CGC_CTRL__MPC_MODE_MASK
|
651 UVD_CGC_CTRL__LBSI_MODE_MASK
|
652 UVD_CGC_CTRL__LRBBM_MODE_MASK
|
653 UVD_CGC_CTRL__WCB_MODE_MASK
|
654 UVD_CGC_CTRL__VCPU_MODE_MASK
|
655 UVD_CGC_CTRL__SCPU_MODE_MASK
);
656 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_CGC_CTRL
, reg_data
, 0xFFFFFFFF, sram_sel
);
658 /* turn off clock gating */
659 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_CGC_GATE
, 0, 0xFFFFFFFF, sram_sel
);
661 /* turn on SUVD clock gating */
662 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_SUVD_CGC_GATE
, 1, 0xFFFFFFFF, sram_sel
);
664 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
665 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_SUVD_CGC_CTRL
, 0, 0xFFFFFFFF, sram_sel
);
668 static void vcn_1_0_disable_static_power_gating(struct amdgpu_device
*adev
)
673 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN
) {
674 data
= (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
675 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
676 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
677 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
678 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
679 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
680 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
681 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
682 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
683 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
684 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT
);
686 WREG32_SOC15(VCN
, 0, mmUVD_PGFSM_CONFIG
, data
);
687 SOC15_WAIT_ON_RREG(VCN
, 0, mmUVD_PGFSM_STATUS
, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON
, 0xFFFFFF, ret
);
689 data
= (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
690 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
691 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
692 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
693 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
694 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
695 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
696 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
697 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
698 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
699 | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT
);
700 WREG32_SOC15(VCN
, 0, mmUVD_PGFSM_CONFIG
, data
);
701 SOC15_WAIT_ON_RREG(VCN
, 0, mmUVD_PGFSM_STATUS
, 0, 0xFFFFFFFF, ret
);
704 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
706 data
= RREG32_SOC15(VCN
, 0, mmUVD_POWER_STATUS
);
708 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN
)
709 data
|= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON
| UVD_POWER_STATUS__UVD_PG_EN_MASK
;
711 WREG32_SOC15(VCN
, 0, mmUVD_POWER_STATUS
, data
);
714 static void vcn_1_0_enable_static_power_gating(struct amdgpu_device
*adev
)
719 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN
) {
720 /* Before power off, this indicator has to be turned on */
721 data
= RREG32_SOC15(VCN
, 0, mmUVD_POWER_STATUS
);
722 data
&= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK
;
723 data
|= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
;
724 WREG32_SOC15(VCN
, 0, mmUVD_POWER_STATUS
, data
);
727 data
= (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
728 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
729 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
730 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
731 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
732 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
733 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
734 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
735 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
736 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
737 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT
);
739 WREG32_SOC15(VCN
, 0, mmUVD_PGFSM_CONFIG
, data
);
741 data
= (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
742 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
743 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
744 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
745 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
746 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
747 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
748 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
749 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
750 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
751 | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT
);
752 SOC15_WAIT_ON_RREG(VCN
, 0, mmUVD_PGFSM_STATUS
, data
, 0xFFFFFFFF, ret
);
757 * vcn_v1_0_start - start VCN block
759 * @adev: amdgpu_device pointer
761 * Setup and start the VCN block
763 static int vcn_v1_0_start_spg_mode(struct amdgpu_device
*adev
)
765 struct amdgpu_ring
*ring
= &adev
->vcn
.ring_dec
;
766 uint32_t rb_bufsz
, tmp
;
767 uint32_t lmi_swap_cntl
;
770 /* disable byte swapping */
773 vcn_1_0_disable_static_power_gating(adev
);
775 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_STATUS
) | UVD_STATUS__UVD_BUSY
;
776 WREG32_SOC15(UVD
, 0, mmUVD_STATUS
, tmp
);
778 /* disable clock gating */
779 vcn_v1_0_disable_clock_gating(adev
);
781 /* disable interupt */
782 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_MASTINT_EN
), 0,
783 ~UVD_MASTINT_EN__VCPU_EN_MASK
);
785 /* initialize VCN memory controller */
786 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_LMI_CTRL
);
787 WREG32_SOC15(UVD
, 0, mmUVD_LMI_CTRL
, tmp
|
788 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK
|
789 UVD_LMI_CTRL__MASK_MC_URGENT_MASK
|
790 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK
|
791 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK
);
794 /* swap (8 in 32) RB and IB */
797 WREG32_SOC15(UVD
, 0, mmUVD_LMI_SWAP_CNTL
, lmi_swap_cntl
);
799 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_MPC_CNTL
);
800 tmp
&= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK
;
801 tmp
|= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT
;
802 WREG32_SOC15(UVD
, 0, mmUVD_MPC_CNTL
, tmp
);
804 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_MUXA0
,
805 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT
) |
806 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT
) |
807 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT
) |
808 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT
)));
810 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_MUXB0
,
811 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT
) |
812 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT
) |
813 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT
) |
814 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT
)));
816 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_MUX
,
817 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT
) |
818 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT
) |
819 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT
)));
821 vcn_v1_0_mc_resume_spg_mode(adev
);
823 WREG32_SOC15(UVD
, 0, mmUVD_REG_XX_MASK
, 0x10);
824 WREG32_SOC15(UVD
, 0, mmUVD_RBC_XX_IB_REG_CHECK
,
825 RREG32_SOC15(UVD
, 0, mmUVD_RBC_XX_IB_REG_CHECK
) | 0x3);
827 /* enable VCPU clock */
828 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CNTL
, UVD_VCPU_CNTL__CLK_EN_MASK
);
830 /* boot up the VCPU */
831 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
), 0,
832 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
835 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_CTRL2
), 0,
836 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK
);
838 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_SOFT_RESET
);
839 tmp
&= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK
;
840 tmp
&= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK
;
841 WREG32_SOC15(UVD
, 0, mmUVD_SOFT_RESET
, tmp
);
843 for (i
= 0; i
< 10; ++i
) {
846 for (j
= 0; j
< 100; ++j
) {
847 status
= RREG32_SOC15(UVD
, 0, mmUVD_STATUS
);
848 if (status
& UVD_STATUS__IDLE
)
853 if (status
& UVD_STATUS__IDLE
)
856 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
857 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
),
858 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
,
859 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
861 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
), 0,
862 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
868 DRM_ERROR("VCN decode not responding, giving up!!!\n");
871 /* enable master interrupt */
872 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_MASTINT_EN
),
873 UVD_MASTINT_EN__VCPU_EN_MASK
, ~UVD_MASTINT_EN__VCPU_EN_MASK
);
875 /* enable system interrupt for JRBC, TODO: move to set interrupt*/
876 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SYS_INT_EN
),
877 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK
,
878 ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK
);
880 /* clear the busy bit of UVD_STATUS */
881 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_STATUS
) & ~UVD_STATUS__UVD_BUSY
;
882 WREG32_SOC15(UVD
, 0, mmUVD_STATUS
, tmp
);
884 /* force RBC into idle state */
885 rb_bufsz
= order_base_2(ring
->ring_size
);
886 tmp
= REG_SET_FIELD(0, UVD_RBC_RB_CNTL
, RB_BUFSZ
, rb_bufsz
);
887 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_BLKSZ
, 1);
888 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_FETCH
, 1);
889 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_UPDATE
, 1);
890 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_RPTR_WR_EN
, 1);
891 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_CNTL
, tmp
);
893 /* set the write pointer delay */
894 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR_CNTL
, 0);
896 /* set the wb address */
897 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR_ADDR
,
898 (upper_32_bits(ring
->gpu_addr
) >> 2));
900 /* programm the RB_BASE for ring buffer */
901 WREG32_SOC15(UVD
, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW
,
902 lower_32_bits(ring
->gpu_addr
));
903 WREG32_SOC15(UVD
, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH
,
904 upper_32_bits(ring
->gpu_addr
));
906 /* Initialize the ring buffer's read and write pointers */
907 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
, 0);
909 WREG32_SOC15(UVD
, 0, mmUVD_SCRATCH2
, 0);
911 ring
->wptr
= RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
);
912 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
,
913 lower_32_bits(ring
->wptr
));
915 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_RBC_RB_CNTL
), 0,
916 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK
);
918 ring
= &adev
->vcn
.ring_enc
[0];
919 WREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR
, lower_32_bits(ring
->wptr
));
920 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
, lower_32_bits(ring
->wptr
));
921 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_LO
, ring
->gpu_addr
);
922 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_HI
, upper_32_bits(ring
->gpu_addr
));
923 WREG32_SOC15(UVD
, 0, mmUVD_RB_SIZE
, ring
->ring_size
/ 4);
925 ring
= &adev
->vcn
.ring_enc
[1];
926 WREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR2
, lower_32_bits(ring
->wptr
));
927 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
, lower_32_bits(ring
->wptr
));
928 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_LO2
, ring
->gpu_addr
);
929 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_HI2
, upper_32_bits(ring
->gpu_addr
));
930 WREG32_SOC15(UVD
, 0, mmUVD_RB_SIZE2
, ring
->ring_size
/ 4);
932 ring
= &adev
->vcn
.ring_jpeg
;
933 WREG32_SOC15(UVD
, 0, mmUVD_LMI_JRBC_RB_VMID
, 0);
934 WREG32_SOC15(UVD
, 0, mmUVD_JRBC_RB_CNTL
, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK
|
935 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK
);
936 WREG32_SOC15(UVD
, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW
, lower_32_bits(ring
->gpu_addr
));
937 WREG32_SOC15(UVD
, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH
, upper_32_bits(ring
->gpu_addr
));
938 WREG32_SOC15(UVD
, 0, mmUVD_JRBC_RB_RPTR
, 0);
939 WREG32_SOC15(UVD
, 0, mmUVD_JRBC_RB_WPTR
, 0);
940 WREG32_SOC15(UVD
, 0, mmUVD_JRBC_RB_CNTL
, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK
);
942 /* initialize wptr */
943 ring
->wptr
= RREG32_SOC15(UVD
, 0, mmUVD_JRBC_RB_WPTR
);
945 /* copy patch commands to the jpeg ring */
946 vcn_v1_0_jpeg_ring_set_patch_ring(ring
,
947 (ring
->wptr
+ ring
->max_dw
* amdgpu_sched_hw_submission
));
952 static int vcn_v1_0_start_dpg_mode(struct amdgpu_device
*adev
)
954 struct amdgpu_ring
*ring
= &adev
->vcn
.ring_dec
;
955 uint32_t rb_bufsz
, tmp
;
956 uint32_t lmi_swap_cntl
;
958 /* disable byte swapping */
961 vcn_1_0_enable_static_power_gating(adev
);
963 /* enable dynamic power gating mode */
964 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_POWER_STATUS
);
965 tmp
|= UVD_POWER_STATUS__UVD_PG_MODE_MASK
;
966 tmp
|= UVD_POWER_STATUS__UVD_PG_EN_MASK
;
967 WREG32_SOC15(UVD
, 0, mmUVD_POWER_STATUS
, tmp
);
969 /* enable clock gating */
970 vcn_v1_0_clock_gating_dpg_mode(adev
, 0);
972 /* enable VCPU clock */
973 tmp
= (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT
);
974 tmp
|= UVD_VCPU_CNTL__CLK_EN_MASK
;
975 tmp
|= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK
;
976 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_VCPU_CNTL
, tmp
, 0xFFFFFFFF, 0);
978 /* disable interupt */
979 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MASTINT_EN
,
980 0, UVD_MASTINT_EN__VCPU_EN_MASK
, 0);
982 /* initialize VCN memory controller */
983 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_CTRL
,
984 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT
) |
985 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK
|
986 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK
|
987 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK
|
988 UVD_LMI_CTRL__REQ_MODE_MASK
|
989 UVD_LMI_CTRL__CRC_RESET_MASK
|
990 UVD_LMI_CTRL__MASK_MC_URGENT_MASK
|
991 0x00100000L
, 0xFFFFFFFF, 0);
994 /* swap (8 in 32) RB and IB */
997 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_SWAP_CNTL
, lmi_swap_cntl
, 0xFFFFFFFF, 0);
999 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MPC_CNTL
,
1000 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT
, 0xFFFFFFFF, 0);
1002 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MPC_SET_MUXA0
,
1003 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT
) |
1004 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT
) |
1005 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT
) |
1006 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT
)), 0xFFFFFFFF, 0);
1008 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MPC_SET_MUXB0
,
1009 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT
) |
1010 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT
) |
1011 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT
) |
1012 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT
)), 0xFFFFFFFF, 0);
1014 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MPC_SET_MUX
,
1015 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT
) |
1016 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT
) |
1017 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT
)), 0xFFFFFFFF, 0);
1019 vcn_v1_0_mc_resume_dpg_mode(adev
);
1021 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_REG_XX_MASK
, 0x10, 0xFFFFFFFF, 0);
1022 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_RBC_XX_IB_REG_CHECK
, 0x3, 0xFFFFFFFF, 0);
1024 /* boot up the VCPU */
1025 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_SOFT_RESET
, 0, 0xFFFFFFFF, 0);
1028 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_CTRL2
,
1029 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT
,
1032 /* enable master interrupt */
1033 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_MASTINT_EN
,
1034 UVD_MASTINT_EN__VCPU_EN_MASK
, UVD_MASTINT_EN__VCPU_EN_MASK
, 0);
1036 vcn_v1_0_clock_gating_dpg_mode(adev
, 1);
1037 /* setup mmUVD_LMI_CTRL */
1038 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_LMI_CTRL
,
1039 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT
) |
1040 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK
|
1041 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK
|
1042 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK
|
1043 UVD_LMI_CTRL__REQ_MODE_MASK
|
1044 UVD_LMI_CTRL__CRC_RESET_MASK
|
1045 UVD_LMI_CTRL__MASK_MC_URGENT_MASK
|
1046 0x00100000L
, 0xFFFFFFFF, 1);
1048 tmp
= adev
->gfx
.config
.gb_addr_config
;
1049 /* setup VCN global tiling registers */
1050 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_JPEG_ADDR_CONFIG
, tmp
, 0xFFFFFFFF, 1);
1051 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_JPEG_UV_ADDR_CONFIG
, tmp
, 0xFFFFFFFF, 1);
1053 /* enable System Interrupt for JRBC */
1054 WREG32_SOC15_DPG_MODE(UVD
, 0, mmUVD_SYS_INT_EN
,
1055 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK
, 0xFFFFFFFF, 1);
1057 /* force RBC into idle state */
1058 rb_bufsz
= order_base_2(ring
->ring_size
);
1059 tmp
= REG_SET_FIELD(0, UVD_RBC_RB_CNTL
, RB_BUFSZ
, rb_bufsz
);
1060 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_BLKSZ
, 1);
1061 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_FETCH
, 1);
1062 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_UPDATE
, 1);
1063 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_RPTR_WR_EN
, 1);
1064 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_CNTL
, tmp
);
1066 /* set the write pointer delay */
1067 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR_CNTL
, 0);
1069 /* set the wb address */
1070 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR_ADDR
,
1071 (upper_32_bits(ring
->gpu_addr
) >> 2));
1073 /* programm the RB_BASE for ring buffer */
1074 WREG32_SOC15(UVD
, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW
,
1075 lower_32_bits(ring
->gpu_addr
));
1076 WREG32_SOC15(UVD
, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH
,
1077 upper_32_bits(ring
->gpu_addr
));
1079 /* Initialize the ring buffer's read and write pointers */
1080 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
, 0);
1082 WREG32_SOC15(UVD
, 0, mmUVD_SCRATCH2
, 0);
1084 ring
->wptr
= RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
);
1085 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
,
1086 lower_32_bits(ring
->wptr
));
1088 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_RBC_RB_CNTL
), 0,
1089 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK
);
1091 /* initialize JPEG wptr */
1092 ring
= &adev
->vcn
.ring_jpeg
;
1093 ring
->wptr
= RREG32_SOC15(UVD
, 0, mmUVD_JRBC_RB_WPTR
);
1095 /* copy patch commands to the jpeg ring */
1096 vcn_v1_0_jpeg_ring_set_patch_ring(ring
,
1097 (ring
->wptr
+ ring
->max_dw
* amdgpu_sched_hw_submission
));
1102 static int vcn_v1_0_start(struct amdgpu_device
*adev
)
1106 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
)
1107 r
= vcn_v1_0_start_dpg_mode(adev
);
1109 r
= vcn_v1_0_start_spg_mode(adev
);
1114 * vcn_v1_0_stop - stop VCN block
1116 * @adev: amdgpu_device pointer
1118 * stop the VCN block
1120 static int vcn_v1_0_stop_spg_mode(struct amdgpu_device
*adev
)
1124 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_STATUS
, UVD_STATUS__IDLE
, 0x7, ret_code
);
1126 tmp
= UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK
|
1127 UVD_LMI_STATUS__READ_CLEAN_MASK
|
1128 UVD_LMI_STATUS__WRITE_CLEAN_MASK
|
1129 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK
;
1130 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_LMI_STATUS
, tmp
, tmp
, ret_code
);
1132 /* put VCPU into reset */
1133 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
),
1134 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
,
1135 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
1137 tmp
= UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK
|
1138 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK
;
1139 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_LMI_STATUS
, tmp
, tmp
, ret_code
);
1141 /* disable VCPU clock */
1142 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_VCPU_CNTL
), 0,
1143 ~UVD_VCPU_CNTL__CLK_EN_MASK
);
1145 /* reset LMI UMC/LMI */
1146 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
),
1147 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK
,
1148 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK
);
1150 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
),
1151 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK
,
1152 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK
);
1154 WREG32_SOC15(UVD
, 0, mmUVD_STATUS
, 0);
1156 vcn_v1_0_enable_clock_gating(adev
);
1157 vcn_1_0_enable_static_power_gating(adev
);
1161 static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device
*adev
)
1166 /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
1167 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_POWER_STATUS
,
1168 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
,
1169 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK
, ret_code
);
1171 /* wait for read ptr to be equal to write ptr */
1172 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
);
1173 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_RB_RPTR
, tmp
, 0xFFFFFFFF, ret_code
);
1175 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
);
1176 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_RB_RPTR2
, tmp
, 0xFFFFFFFF, ret_code
);
1178 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_JRBC_RB_WPTR
);
1179 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_JRBC_RB_RPTR
, tmp
, 0xFFFFFFFF, ret_code
);
1181 tmp
= RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
) & 0x7FFFFFFF;
1182 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_RBC_RB_RPTR
, tmp
, 0xFFFFFFFF, ret_code
);
1184 SOC15_WAIT_ON_RREG(UVD
, 0, mmUVD_POWER_STATUS
,
1185 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
,
1186 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK
, ret_code
);
1188 /* disable dynamic power gating mode */
1189 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_POWER_STATUS
), 0,
1190 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK
);
1195 static int vcn_v1_0_stop(struct amdgpu_device
*adev
)
1199 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
)
1200 r
= vcn_v1_0_stop_dpg_mode(adev
);
1202 r
= vcn_v1_0_stop_spg_mode(adev
);
1207 static bool vcn_v1_0_is_idle(void *handle
)
1209 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1211 return (RREG32_SOC15(VCN
, 0, mmUVD_STATUS
) == UVD_STATUS__IDLE
);
1214 static int vcn_v1_0_wait_for_idle(void *handle
)
1216 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1219 SOC15_WAIT_ON_RREG(VCN
, 0, mmUVD_STATUS
, UVD_STATUS__IDLE
,
1220 UVD_STATUS__IDLE
, ret
);
1225 static int vcn_v1_0_set_clockgating_state(void *handle
,
1226 enum amd_clockgating_state state
)
1228 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1229 bool enable
= (state
== AMD_CG_STATE_GATE
) ? true : false;
1232 /* wait for STATUS to clear */
1233 if (vcn_v1_0_is_idle(handle
))
1235 vcn_v1_0_enable_clock_gating(adev
);
1237 /* disable HW gating and enable Sw gating */
1238 vcn_v1_0_disable_clock_gating(adev
);
1244 * vcn_v1_0_dec_ring_get_rptr - get read pointer
1246 * @ring: amdgpu_ring pointer
1248 * Returns the current hardware read pointer
1250 static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring
*ring
)
1252 struct amdgpu_device
*adev
= ring
->adev
;
1254 return RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
);
1258 * vcn_v1_0_dec_ring_get_wptr - get write pointer
1260 * @ring: amdgpu_ring pointer
1262 * Returns the current hardware write pointer
1264 static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring
*ring
)
1266 struct amdgpu_device
*adev
= ring
->adev
;
1268 return RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
);
1272 * vcn_v1_0_dec_ring_set_wptr - set write pointer
1274 * @ring: amdgpu_ring pointer
1276 * Commits the write pointer to the hardware
1278 static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring
*ring
)
1280 struct amdgpu_device
*adev
= ring
->adev
;
1282 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
)
1283 WREG32_SOC15(UVD
, 0, mmUVD_SCRATCH2
,
1284 lower_32_bits(ring
->wptr
) | 0x80000000);
1286 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
, lower_32_bits(ring
->wptr
));
1290 * vcn_v1_0_dec_ring_insert_start - insert a start command
1292 * @ring: amdgpu_ring pointer
1294 * Write a start command to the ring.
1296 static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring
*ring
)
1298 struct amdgpu_device
*adev
= ring
->adev
;
1300 amdgpu_ring_write(ring
,
1301 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
1302 amdgpu_ring_write(ring
, 0);
1303 amdgpu_ring_write(ring
,
1304 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
1305 amdgpu_ring_write(ring
, VCN_DEC_CMD_PACKET_START
<< 1);
1309 * vcn_v1_0_dec_ring_insert_end - insert a end command
1311 * @ring: amdgpu_ring pointer
1313 * Write a end command to the ring.
1315 static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring
*ring
)
1317 struct amdgpu_device
*adev
= ring
->adev
;
1319 amdgpu_ring_write(ring
,
1320 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
1321 amdgpu_ring_write(ring
, VCN_DEC_CMD_PACKET_END
<< 1);
1325 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
1327 * @ring: amdgpu_ring pointer
1328 * @fence: fence to emit
1330 * Write a fence and a trap command to the ring.
1332 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
, u64 seq
,
1335 struct amdgpu_device
*adev
= ring
->adev
;
1337 WARN_ON(flags
& AMDGPU_FENCE_FLAG_64BIT
);
1339 amdgpu_ring_write(ring
,
1340 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_CONTEXT_ID
), 0));
1341 amdgpu_ring_write(ring
, seq
);
1342 amdgpu_ring_write(ring
,
1343 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
1344 amdgpu_ring_write(ring
, addr
& 0xffffffff);
1345 amdgpu_ring_write(ring
,
1346 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA1
), 0));
1347 amdgpu_ring_write(ring
, upper_32_bits(addr
) & 0xff);
1348 amdgpu_ring_write(ring
,
1349 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
1350 amdgpu_ring_write(ring
, VCN_DEC_CMD_FENCE
<< 1);
1352 amdgpu_ring_write(ring
,
1353 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
1354 amdgpu_ring_write(ring
, 0);
1355 amdgpu_ring_write(ring
,
1356 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA1
), 0));
1357 amdgpu_ring_write(ring
, 0);
1358 amdgpu_ring_write(ring
,
1359 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
1360 amdgpu_ring_write(ring
, VCN_DEC_CMD_TRAP
<< 1);
1364 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
1366 * @ring: amdgpu_ring pointer
1367 * @ib: indirect buffer to execute
1369 * Write ring commands to execute the indirect buffer
1371 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring
*ring
,
1372 struct amdgpu_job
*job
,
1373 struct amdgpu_ib
*ib
,
1376 struct amdgpu_device
*adev
= ring
->adev
;
1377 unsigned vmid
= AMDGPU_JOB_GET_VMID(job
);
1379 amdgpu_ring_write(ring
,
1380 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_RBC_IB_VMID
), 0));
1381 amdgpu_ring_write(ring
, vmid
);
1383 amdgpu_ring_write(ring
,
1384 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW
), 0));
1385 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
));
1386 amdgpu_ring_write(ring
,
1387 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH
), 0));
1388 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
1389 amdgpu_ring_write(ring
,
1390 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_RBC_IB_SIZE
), 0));
1391 amdgpu_ring_write(ring
, ib
->length_dw
);
1394 static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring
*ring
,
1395 uint32_t reg
, uint32_t val
,
1398 struct amdgpu_device
*adev
= ring
->adev
;
1400 amdgpu_ring_write(ring
,
1401 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
1402 amdgpu_ring_write(ring
, reg
<< 2);
1403 amdgpu_ring_write(ring
,
1404 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA1
), 0));
1405 amdgpu_ring_write(ring
, val
);
1406 amdgpu_ring_write(ring
,
1407 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GP_SCRATCH8
), 0));
1408 amdgpu_ring_write(ring
, mask
);
1409 amdgpu_ring_write(ring
,
1410 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
1411 amdgpu_ring_write(ring
, VCN_DEC_CMD_REG_READ_COND_WAIT
<< 1);
1414 static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring
*ring
,
1415 unsigned vmid
, uint64_t pd_addr
)
1417 struct amdgpu_vmhub
*hub
= &ring
->adev
->vmhub
[ring
->funcs
->vmhub
];
1418 uint32_t data0
, data1
, mask
;
1420 pd_addr
= amdgpu_gmc_emit_flush_gpu_tlb(ring
, vmid
, pd_addr
);
1422 /* wait for register write */
1423 data0
= hub
->ctx0_ptb_addr_lo32
+ vmid
* 2;
1424 data1
= lower_32_bits(pd_addr
);
1426 vcn_v1_0_dec_ring_emit_reg_wait(ring
, data0
, data1
, mask
);
1429 static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring
*ring
,
1430 uint32_t reg
, uint32_t val
)
1432 struct amdgpu_device
*adev
= ring
->adev
;
1434 amdgpu_ring_write(ring
,
1435 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
1436 amdgpu_ring_write(ring
, reg
<< 2);
1437 amdgpu_ring_write(ring
,
1438 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA1
), 0));
1439 amdgpu_ring_write(ring
, val
);
1440 amdgpu_ring_write(ring
,
1441 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
1442 amdgpu_ring_write(ring
, VCN_DEC_CMD_WRITE_REG
<< 1);
1446 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
1448 * @ring: amdgpu_ring pointer
1450 * Returns the current hardware enc read pointer
1452 static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring
*ring
)
1454 struct amdgpu_device
*adev
= ring
->adev
;
1456 if (ring
== &adev
->vcn
.ring_enc
[0])
1457 return RREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR
);
1459 return RREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR2
);
1463 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
1465 * @ring: amdgpu_ring pointer
1467 * Returns the current hardware enc write pointer
1469 static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring
*ring
)
1471 struct amdgpu_device
*adev
= ring
->adev
;
1473 if (ring
== &adev
->vcn
.ring_enc
[0])
1474 return RREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
);
1476 return RREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
);
1480 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
1482 * @ring: amdgpu_ring pointer
1484 * Commits the enc write pointer to the hardware
1486 static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring
*ring
)
1488 struct amdgpu_device
*adev
= ring
->adev
;
1490 if (ring
== &adev
->vcn
.ring_enc
[0])
1491 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
,
1492 lower_32_bits(ring
->wptr
));
1494 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
,
1495 lower_32_bits(ring
->wptr
));
1499 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
1501 * @ring: amdgpu_ring pointer
1502 * @fence: fence to emit
1504 * Write enc a fence and a trap command to the ring.
1506 static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
,
1507 u64 seq
, unsigned flags
)
1509 WARN_ON(flags
& AMDGPU_FENCE_FLAG_64BIT
);
1511 amdgpu_ring_write(ring
, VCN_ENC_CMD_FENCE
);
1512 amdgpu_ring_write(ring
, addr
);
1513 amdgpu_ring_write(ring
, upper_32_bits(addr
));
1514 amdgpu_ring_write(ring
, seq
);
1515 amdgpu_ring_write(ring
, VCN_ENC_CMD_TRAP
);
1518 static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring
*ring
)
1520 amdgpu_ring_write(ring
, VCN_ENC_CMD_END
);
1524 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
1526 * @ring: amdgpu_ring pointer
1527 * @ib: indirect buffer to execute
1529 * Write enc ring commands to execute the indirect buffer
1531 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring
*ring
,
1532 struct amdgpu_job
*job
,
1533 struct amdgpu_ib
*ib
,
1536 unsigned vmid
= AMDGPU_JOB_GET_VMID(job
);
1538 amdgpu_ring_write(ring
, VCN_ENC_CMD_IB
);
1539 amdgpu_ring_write(ring
, vmid
);
1540 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
));
1541 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
1542 amdgpu_ring_write(ring
, ib
->length_dw
);
1545 static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring
*ring
,
1546 uint32_t reg
, uint32_t val
,
1549 amdgpu_ring_write(ring
, VCN_ENC_CMD_REG_WAIT
);
1550 amdgpu_ring_write(ring
, reg
<< 2);
1551 amdgpu_ring_write(ring
, mask
);
1552 amdgpu_ring_write(ring
, val
);
1555 static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring
*ring
,
1556 unsigned int vmid
, uint64_t pd_addr
)
1558 struct amdgpu_vmhub
*hub
= &ring
->adev
->vmhub
[ring
->funcs
->vmhub
];
1560 pd_addr
= amdgpu_gmc_emit_flush_gpu_tlb(ring
, vmid
, pd_addr
);
1562 /* wait for reg writes */
1563 vcn_v1_0_enc_ring_emit_reg_wait(ring
, hub
->ctx0_ptb_addr_lo32
+ vmid
* 2,
1564 lower_32_bits(pd_addr
), 0xffffffff);
1567 static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring
*ring
,
1568 uint32_t reg
, uint32_t val
)
1570 amdgpu_ring_write(ring
, VCN_ENC_CMD_REG_WRITE
);
1571 amdgpu_ring_write(ring
, reg
<< 2);
1572 amdgpu_ring_write(ring
, val
);
1577 * vcn_v1_0_jpeg_ring_get_rptr - get read pointer
1579 * @ring: amdgpu_ring pointer
1581 * Returns the current hardware read pointer
1583 static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring
*ring
)
1585 struct amdgpu_device
*adev
= ring
->adev
;
1587 return RREG32_SOC15(UVD
, 0, mmUVD_JRBC_RB_RPTR
);
1591 * vcn_v1_0_jpeg_ring_get_wptr - get write pointer
1593 * @ring: amdgpu_ring pointer
1595 * Returns the current hardware write pointer
1597 static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring
*ring
)
1599 struct amdgpu_device
*adev
= ring
->adev
;
1601 return RREG32_SOC15(UVD
, 0, mmUVD_JRBC_RB_WPTR
);
1605 * vcn_v1_0_jpeg_ring_set_wptr - set write pointer
1607 * @ring: amdgpu_ring pointer
1609 * Commits the write pointer to the hardware
1611 static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring
*ring
)
1613 struct amdgpu_device
*adev
= ring
->adev
;
1615 WREG32_SOC15(UVD
, 0, mmUVD_JRBC_RB_WPTR
, lower_32_bits(ring
->wptr
));
1619 * vcn_v1_0_jpeg_ring_insert_start - insert a start command
1621 * @ring: amdgpu_ring pointer
1623 * Write a start command to the ring.
1625 static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring
*ring
)
1627 struct amdgpu_device
*adev
= ring
->adev
;
1629 amdgpu_ring_write(ring
,
1630 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_EXTERNAL_REG_BASE
), 0, 0, PACKETJ_TYPE0
));
1631 amdgpu_ring_write(ring
, 0x68e04);
1633 amdgpu_ring_write(ring
, PACKETJ(0, 0, 0, PACKETJ_TYPE0
));
1634 amdgpu_ring_write(ring
, 0x80010000);
1638 * vcn_v1_0_jpeg_ring_insert_end - insert a end command
1640 * @ring: amdgpu_ring pointer
1642 * Write a end command to the ring.
1644 static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring
*ring
)
1646 struct amdgpu_device
*adev
= ring
->adev
;
1648 amdgpu_ring_write(ring
,
1649 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_EXTERNAL_REG_BASE
), 0, 0, PACKETJ_TYPE0
));
1650 amdgpu_ring_write(ring
, 0x68e04);
1652 amdgpu_ring_write(ring
, PACKETJ(0, 0, 0, PACKETJ_TYPE0
));
1653 amdgpu_ring_write(ring
, 0x00010000);
1657 * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command
1659 * @ring: amdgpu_ring pointer
1660 * @fence: fence to emit
1662 * Write a fence and a trap command to the ring.
1664 static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
, u64 seq
,
1667 struct amdgpu_device
*adev
= ring
->adev
;
1669 WARN_ON(flags
& AMDGPU_FENCE_FLAG_64BIT
);
1671 amdgpu_ring_write(ring
,
1672 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JPEG_GPCOM_DATA0
), 0, 0, PACKETJ_TYPE0
));
1673 amdgpu_ring_write(ring
, seq
);
1675 amdgpu_ring_write(ring
,
1676 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JPEG_GPCOM_DATA1
), 0, 0, PACKETJ_TYPE0
));
1677 amdgpu_ring_write(ring
, seq
);
1679 amdgpu_ring_write(ring
,
1680 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW
), 0, 0, PACKETJ_TYPE0
));
1681 amdgpu_ring_write(ring
, lower_32_bits(addr
));
1683 amdgpu_ring_write(ring
,
1684 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH
), 0, 0, PACKETJ_TYPE0
));
1685 amdgpu_ring_write(ring
, upper_32_bits(addr
));
1687 amdgpu_ring_write(ring
,
1688 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JPEG_GPCOM_CMD
), 0, 0, PACKETJ_TYPE0
));
1689 amdgpu_ring_write(ring
, 0x8);
1691 amdgpu_ring_write(ring
,
1692 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JPEG_GPCOM_CMD
), 0, PACKETJ_CONDITION_CHECK0
, PACKETJ_TYPE4
));
1693 amdgpu_ring_write(ring
, 0);
1695 amdgpu_ring_write(ring
,
1696 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_RB_COND_RD_TIMER
), 0, 0, PACKETJ_TYPE0
));
1697 amdgpu_ring_write(ring
, 0x01400200);
1699 amdgpu_ring_write(ring
,
1700 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_RB_REF_DATA
), 0, 0, PACKETJ_TYPE0
));
1701 amdgpu_ring_write(ring
, seq
);
1703 amdgpu_ring_write(ring
,
1704 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
), 0, 0, PACKETJ_TYPE0
));
1705 amdgpu_ring_write(ring
, lower_32_bits(addr
));
1707 amdgpu_ring_write(ring
,
1708 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
), 0, 0, PACKETJ_TYPE0
));
1709 amdgpu_ring_write(ring
, upper_32_bits(addr
));
1711 amdgpu_ring_write(ring
,
1712 PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3
, PACKETJ_TYPE2
));
1713 amdgpu_ring_write(ring
, 0xffffffff);
1715 amdgpu_ring_write(ring
,
1716 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_EXTERNAL_REG_BASE
), 0, 0, PACKETJ_TYPE0
));
1717 amdgpu_ring_write(ring
, 0x3fbc);
1719 amdgpu_ring_write(ring
,
1720 PACKETJ(0, 0, 0, PACKETJ_TYPE0
));
1721 amdgpu_ring_write(ring
, 0x1);
1724 amdgpu_ring_write(ring
, PACKETJ(0, 0, 0, PACKETJ_TYPE7
));
1725 amdgpu_ring_write(ring
, 0);
1729 * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer
1731 * @ring: amdgpu_ring pointer
1732 * @ib: indirect buffer to execute
1734 * Write ring commands to execute the indirect buffer.
1736 static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring
*ring
,
1737 struct amdgpu_job
*job
,
1738 struct amdgpu_ib
*ib
,
1741 struct amdgpu_device
*adev
= ring
->adev
;
1742 unsigned vmid
= AMDGPU_JOB_GET_VMID(job
);
1744 amdgpu_ring_write(ring
,
1745 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_JRBC_IB_VMID
), 0, 0, PACKETJ_TYPE0
));
1746 amdgpu_ring_write(ring
, (vmid
| (vmid
<< 4)));
1748 amdgpu_ring_write(ring
,
1749 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_JPEG_VMID
), 0, 0, PACKETJ_TYPE0
));
1750 amdgpu_ring_write(ring
, (vmid
| (vmid
<< 4)));
1752 amdgpu_ring_write(ring
,
1753 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW
), 0, 0, PACKETJ_TYPE0
));
1754 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
));
1756 amdgpu_ring_write(ring
,
1757 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH
), 0, 0, PACKETJ_TYPE0
));
1758 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
1760 amdgpu_ring_write(ring
,
1761 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_IB_SIZE
), 0, 0, PACKETJ_TYPE0
));
1762 amdgpu_ring_write(ring
, ib
->length_dw
);
1764 amdgpu_ring_write(ring
,
1765 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
), 0, 0, PACKETJ_TYPE0
));
1766 amdgpu_ring_write(ring
, lower_32_bits(ring
->gpu_addr
));
1768 amdgpu_ring_write(ring
,
1769 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
), 0, 0, PACKETJ_TYPE0
));
1770 amdgpu_ring_write(ring
, upper_32_bits(ring
->gpu_addr
));
1772 amdgpu_ring_write(ring
,
1773 PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0
, PACKETJ_TYPE2
));
1774 amdgpu_ring_write(ring
, 0);
1776 amdgpu_ring_write(ring
,
1777 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_RB_COND_RD_TIMER
), 0, 0, PACKETJ_TYPE0
));
1778 amdgpu_ring_write(ring
, 0x01400200);
1780 amdgpu_ring_write(ring
,
1781 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_RB_REF_DATA
), 0, 0, PACKETJ_TYPE0
));
1782 amdgpu_ring_write(ring
, 0x2);
1784 amdgpu_ring_write(ring
,
1785 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_STATUS
), 0, PACKETJ_CONDITION_CHECK3
, PACKETJ_TYPE3
));
1786 amdgpu_ring_write(ring
, 0x2);
1789 static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring
*ring
,
1790 uint32_t reg
, uint32_t val
,
1793 struct amdgpu_device
*adev
= ring
->adev
;
1794 uint32_t reg_offset
= (reg
<< 2);
1796 amdgpu_ring_write(ring
,
1797 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_RB_COND_RD_TIMER
), 0, 0, PACKETJ_TYPE0
));
1798 amdgpu_ring_write(ring
, 0x01400200);
1800 amdgpu_ring_write(ring
,
1801 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_RB_REF_DATA
), 0, 0, PACKETJ_TYPE0
));
1802 amdgpu_ring_write(ring
, val
);
1804 amdgpu_ring_write(ring
,
1805 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_EXTERNAL_REG_BASE
), 0, 0, PACKETJ_TYPE0
));
1806 if (((reg_offset
>= 0x1f800) && (reg_offset
<= 0x21fff)) ||
1807 ((reg_offset
>= 0x1e000) && (reg_offset
<= 0x1e1ff))) {
1808 amdgpu_ring_write(ring
, 0);
1809 amdgpu_ring_write(ring
,
1810 PACKETJ((reg_offset
>> 2), 0, 0, PACKETJ_TYPE3
));
1812 amdgpu_ring_write(ring
, reg_offset
);
1813 amdgpu_ring_write(ring
,
1814 PACKETJ(0, 0, 0, PACKETJ_TYPE3
));
1816 amdgpu_ring_write(ring
, mask
);
1819 static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring
*ring
,
1820 unsigned vmid
, uint64_t pd_addr
)
1822 struct amdgpu_vmhub
*hub
= &ring
->adev
->vmhub
[ring
->funcs
->vmhub
];
1823 uint32_t data0
, data1
, mask
;
1825 pd_addr
= amdgpu_gmc_emit_flush_gpu_tlb(ring
, vmid
, pd_addr
);
1827 /* wait for register write */
1828 data0
= hub
->ctx0_ptb_addr_lo32
+ vmid
* 2;
1829 data1
= lower_32_bits(pd_addr
);
1831 vcn_v1_0_jpeg_ring_emit_reg_wait(ring
, data0
, data1
, mask
);
1834 static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring
*ring
,
1835 uint32_t reg
, uint32_t val
)
1837 struct amdgpu_device
*adev
= ring
->adev
;
1838 uint32_t reg_offset
= (reg
<< 2);
1840 amdgpu_ring_write(ring
,
1841 PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_EXTERNAL_REG_BASE
), 0, 0, PACKETJ_TYPE0
));
1842 if (((reg_offset
>= 0x1f800) && (reg_offset
<= 0x21fff)) ||
1843 ((reg_offset
>= 0x1e000) && (reg_offset
<= 0x1e1ff))) {
1844 amdgpu_ring_write(ring
, 0);
1845 amdgpu_ring_write(ring
,
1846 PACKETJ((reg_offset
>> 2), 0, 0, PACKETJ_TYPE0
));
1848 amdgpu_ring_write(ring
, reg_offset
);
1849 amdgpu_ring_write(ring
,
1850 PACKETJ(0, 0, 0, PACKETJ_TYPE0
));
1852 amdgpu_ring_write(ring
, val
);
1855 static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring
*ring
, uint32_t count
)
1859 WARN_ON(ring
->wptr
% 2 || count
% 2);
1861 for (i
= 0; i
< count
/ 2; i
++) {
1862 amdgpu_ring_write(ring
, PACKETJ(0, 0, 0, PACKETJ_TYPE6
));
1863 amdgpu_ring_write(ring
, 0);
1867 static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring
*ring
, uint32_t *ptr
, uint32_t reg_offset
, uint32_t val
)
1869 struct amdgpu_device
*adev
= ring
->adev
;
1870 ring
->ring
[(*ptr
)++] = PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_EXTERNAL_REG_BASE
), 0, 0, PACKETJ_TYPE0
);
1871 if (((reg_offset
>= 0x1f800) && (reg_offset
<= 0x21fff)) ||
1872 ((reg_offset
>= 0x1e000) && (reg_offset
<= 0x1e1ff))) {
1873 ring
->ring
[(*ptr
)++] = 0;
1874 ring
->ring
[(*ptr
)++] = PACKETJ((reg_offset
>> 2), 0, 0, PACKETJ_TYPE0
);
1876 ring
->ring
[(*ptr
)++] = reg_offset
;
1877 ring
->ring
[(*ptr
)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0
);
1879 ring
->ring
[(*ptr
)++] = val
;
1882 static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring
*ring
, uint32_t ptr
)
1884 struct amdgpu_device
*adev
= ring
->adev
;
1886 uint32_t reg
, reg_offset
, val
, mask
, i
;
1888 // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
1889 reg
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
);
1890 reg_offset
= (reg
<< 2);
1891 val
= lower_32_bits(ring
->gpu_addr
);
1892 vcn_v1_0_jpeg_ring_patch_wreg(ring
, &ptr
, reg_offset
, val
);
1894 // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
1895 reg
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
);
1896 reg_offset
= (reg
<< 2);
1897 val
= upper_32_bits(ring
->gpu_addr
);
1898 vcn_v1_0_jpeg_ring_patch_wreg(ring
, &ptr
, reg_offset
, val
);
1900 // 3rd to 5th: issue MEM_READ commands
1901 for (i
= 0; i
<= 2; i
++) {
1902 ring
->ring
[ptr
++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2
);
1903 ring
->ring
[ptr
++] = 0;
1906 // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
1907 reg
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_RB_CNTL
);
1908 reg_offset
= (reg
<< 2);
1910 vcn_v1_0_jpeg_ring_patch_wreg(ring
, &ptr
, reg_offset
, val
);
1912 // 7th: program mmUVD_JRBC_RB_REF_DATA
1913 reg
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_RB_REF_DATA
);
1914 reg_offset
= (reg
<< 2);
1916 vcn_v1_0_jpeg_ring_patch_wreg(ring
, &ptr
, reg_offset
, val
);
1918 // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
1919 reg
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_RB_CNTL
);
1920 reg_offset
= (reg
<< 2);
1924 ring
->ring
[ptr
++] = PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_RB_COND_RD_TIMER
), 0, 0, PACKETJ_TYPE0
);
1925 ring
->ring
[ptr
++] = 0x01400200;
1926 ring
->ring
[ptr
++] = PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_RB_REF_DATA
), 0, 0, PACKETJ_TYPE0
);
1927 ring
->ring
[ptr
++] = val
;
1928 ring
->ring
[ptr
++] = PACKETJ(SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_EXTERNAL_REG_BASE
), 0, 0, PACKETJ_TYPE0
);
1929 if (((reg_offset
>= 0x1f800) && (reg_offset
<= 0x21fff)) ||
1930 ((reg_offset
>= 0x1e000) && (reg_offset
<= 0x1e1ff))) {
1931 ring
->ring
[ptr
++] = 0;
1932 ring
->ring
[ptr
++] = PACKETJ((reg_offset
>> 2), 0, 0, PACKETJ_TYPE3
);
1934 ring
->ring
[ptr
++] = reg_offset
;
1935 ring
->ring
[ptr
++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3
);
1937 ring
->ring
[ptr
++] = mask
;
1939 //9th to 21st: insert no-op
1940 for (i
= 0; i
<= 12; i
++) {
1941 ring
->ring
[ptr
++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6
);
1942 ring
->ring
[ptr
++] = 0;
1945 //22nd: reset mmUVD_JRBC_RB_RPTR
1946 reg
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_RB_RPTR
);
1947 reg_offset
= (reg
<< 2);
1949 vcn_v1_0_jpeg_ring_patch_wreg(ring
, &ptr
, reg_offset
, val
);
1951 //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
1952 reg
= SOC15_REG_OFFSET(UVD
, 0, mmUVD_JRBC_RB_CNTL
);
1953 reg_offset
= (reg
<< 2);
1955 vcn_v1_0_jpeg_ring_patch_wreg(ring
, &ptr
, reg_offset
, val
);
1958 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device
*adev
,
1959 struct amdgpu_irq_src
*source
,
1961 enum amdgpu_interrupt_state state
)
1966 static int vcn_v1_0_process_interrupt(struct amdgpu_device
*adev
,
1967 struct amdgpu_irq_src
*source
,
1968 struct amdgpu_iv_entry
*entry
)
1970 DRM_DEBUG("IH: VCN TRAP\n");
1972 switch (entry
->src_id
) {
1974 amdgpu_fence_process(&adev
->vcn
.ring_dec
);
1977 amdgpu_fence_process(&adev
->vcn
.ring_enc
[0]);
1980 amdgpu_fence_process(&adev
->vcn
.ring_enc
[1]);
1983 amdgpu_fence_process(&adev
->vcn
.ring_jpeg
);
1986 DRM_ERROR("Unhandled interrupt: %d %d\n",
1987 entry
->src_id
, entry
->src_data
[0]);
1994 static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring
*ring
, uint32_t count
)
1996 struct amdgpu_device
*adev
= ring
->adev
;
1999 WARN_ON(ring
->wptr
% 2 || count
% 2);
2001 for (i
= 0; i
< count
/ 2; i
++) {
2002 amdgpu_ring_write(ring
, PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_NO_OP
), 0));
2003 amdgpu_ring_write(ring
, 0);
2007 static int vcn_v1_0_set_powergating_state(void *handle
,
2008 enum amd_powergating_state state
)
2010 /* This doesn't actually powergate the VCN block.
2011 * That's done in the dpm code via the SMC. This
2012 * just re-inits the block as necessary. The actual
2013 * gating still happens in the dpm code. We should
2014 * revisit this when there is a cleaner line between
2015 * the smc and the hw blocks
2018 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2020 if(state
== adev
->vcn
.cur_state
)
2023 if (state
== AMD_PG_STATE_GATE
)
2024 ret
= vcn_v1_0_stop(adev
);
2026 ret
= vcn_v1_0_start(adev
);
2029 adev
->vcn
.cur_state
= state
;
2033 static const struct amd_ip_funcs vcn_v1_0_ip_funcs
= {
2035 .early_init
= vcn_v1_0_early_init
,
2037 .sw_init
= vcn_v1_0_sw_init
,
2038 .sw_fini
= vcn_v1_0_sw_fini
,
2039 .hw_init
= vcn_v1_0_hw_init
,
2040 .hw_fini
= vcn_v1_0_hw_fini
,
2041 .suspend
= vcn_v1_0_suspend
,
2042 .resume
= vcn_v1_0_resume
,
2043 .is_idle
= vcn_v1_0_is_idle
,
2044 .wait_for_idle
= vcn_v1_0_wait_for_idle
,
2045 .check_soft_reset
= NULL
/* vcn_v1_0_check_soft_reset */,
2046 .pre_soft_reset
= NULL
/* vcn_v1_0_pre_soft_reset */,
2047 .soft_reset
= NULL
/* vcn_v1_0_soft_reset */,
2048 .post_soft_reset
= NULL
/* vcn_v1_0_post_soft_reset */,
2049 .set_clockgating_state
= vcn_v1_0_set_clockgating_state
,
2050 .set_powergating_state
= vcn_v1_0_set_powergating_state
,
2053 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs
= {
2054 .type
= AMDGPU_RING_TYPE_VCN_DEC
,
2056 .support_64bit_ptrs
= false,
2057 .vmhub
= AMDGPU_MMHUB
,
2058 .get_rptr
= vcn_v1_0_dec_ring_get_rptr
,
2059 .get_wptr
= vcn_v1_0_dec_ring_get_wptr
,
2060 .set_wptr
= vcn_v1_0_dec_ring_set_wptr
,
2062 6 + 6 + /* hdp invalidate / flush */
2063 SOC15_FLUSH_GPU_TLB_NUM_WREG
* 6 +
2064 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT
* 8 +
2065 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
2066 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
2068 .emit_ib_size
= 8, /* vcn_v1_0_dec_ring_emit_ib */
2069 .emit_ib
= vcn_v1_0_dec_ring_emit_ib
,
2070 .emit_fence
= vcn_v1_0_dec_ring_emit_fence
,
2071 .emit_vm_flush
= vcn_v1_0_dec_ring_emit_vm_flush
,
2072 .test_ring
= amdgpu_vcn_dec_ring_test_ring
,
2073 .test_ib
= amdgpu_vcn_dec_ring_test_ib
,
2074 .insert_nop
= vcn_v1_0_dec_ring_insert_nop
,
2075 .insert_start
= vcn_v1_0_dec_ring_insert_start
,
2076 .insert_end
= vcn_v1_0_dec_ring_insert_end
,
2077 .pad_ib
= amdgpu_ring_generic_pad_ib
,
2078 .begin_use
= amdgpu_vcn_ring_begin_use
,
2079 .end_use
= amdgpu_vcn_ring_end_use
,
2080 .emit_wreg
= vcn_v1_0_dec_ring_emit_wreg
,
2081 .emit_reg_wait
= vcn_v1_0_dec_ring_emit_reg_wait
,
2082 .emit_reg_write_reg_wait
= amdgpu_ring_emit_reg_write_reg_wait_helper
,
2085 static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs
= {
2086 .type
= AMDGPU_RING_TYPE_VCN_ENC
,
2088 .nop
= VCN_ENC_CMD_NO_OP
,
2089 .support_64bit_ptrs
= false,
2090 .vmhub
= AMDGPU_MMHUB
,
2091 .get_rptr
= vcn_v1_0_enc_ring_get_rptr
,
2092 .get_wptr
= vcn_v1_0_enc_ring_get_wptr
,
2093 .set_wptr
= vcn_v1_0_enc_ring_set_wptr
,
2095 SOC15_FLUSH_GPU_TLB_NUM_WREG
* 3 +
2096 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT
* 4 +
2097 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
2098 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
2099 1, /* vcn_v1_0_enc_ring_insert_end */
2100 .emit_ib_size
= 5, /* vcn_v1_0_enc_ring_emit_ib */
2101 .emit_ib
= vcn_v1_0_enc_ring_emit_ib
,
2102 .emit_fence
= vcn_v1_0_enc_ring_emit_fence
,
2103 .emit_vm_flush
= vcn_v1_0_enc_ring_emit_vm_flush
,
2104 .test_ring
= amdgpu_vcn_enc_ring_test_ring
,
2105 .test_ib
= amdgpu_vcn_enc_ring_test_ib
,
2106 .insert_nop
= amdgpu_ring_insert_nop
,
2107 .insert_end
= vcn_v1_0_enc_ring_insert_end
,
2108 .pad_ib
= amdgpu_ring_generic_pad_ib
,
2109 .begin_use
= amdgpu_vcn_ring_begin_use
,
2110 .end_use
= amdgpu_vcn_ring_end_use
,
2111 .emit_wreg
= vcn_v1_0_enc_ring_emit_wreg
,
2112 .emit_reg_wait
= vcn_v1_0_enc_ring_emit_reg_wait
,
2113 .emit_reg_write_reg_wait
= amdgpu_ring_emit_reg_write_reg_wait_helper
,
2116 static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs
= {
2117 .type
= AMDGPU_RING_TYPE_VCN_JPEG
,
2119 .nop
= PACKET0(0x81ff, 0),
2120 .support_64bit_ptrs
= false,
2121 .vmhub
= AMDGPU_MMHUB
,
2123 .get_rptr
= vcn_v1_0_jpeg_ring_get_rptr
,
2124 .get_wptr
= vcn_v1_0_jpeg_ring_get_wptr
,
2125 .set_wptr
= vcn_v1_0_jpeg_ring_set_wptr
,
2127 6 + 6 + /* hdp invalidate / flush */
2128 SOC15_FLUSH_GPU_TLB_NUM_WREG
* 6 +
2129 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT
* 8 +
2130 8 + /* vcn_v1_0_jpeg_ring_emit_vm_flush */
2131 26 + 26 + /* vcn_v1_0_jpeg_ring_emit_fence x2 vm fence */
2133 .emit_ib_size
= 22, /* vcn_v1_0_jpeg_ring_emit_ib */
2134 .emit_ib
= vcn_v1_0_jpeg_ring_emit_ib
,
2135 .emit_fence
= vcn_v1_0_jpeg_ring_emit_fence
,
2136 .emit_vm_flush
= vcn_v1_0_jpeg_ring_emit_vm_flush
,
2137 .test_ring
= amdgpu_vcn_jpeg_ring_test_ring
,
2138 .test_ib
= amdgpu_vcn_jpeg_ring_test_ib
,
2139 .insert_nop
= vcn_v1_0_jpeg_ring_nop
,
2140 .insert_start
= vcn_v1_0_jpeg_ring_insert_start
,
2141 .insert_end
= vcn_v1_0_jpeg_ring_insert_end
,
2142 .pad_ib
= amdgpu_ring_generic_pad_ib
,
2143 .begin_use
= amdgpu_vcn_ring_begin_use
,
2144 .end_use
= amdgpu_vcn_ring_end_use
,
2145 .emit_wreg
= vcn_v1_0_jpeg_ring_emit_wreg
,
2146 .emit_reg_wait
= vcn_v1_0_jpeg_ring_emit_reg_wait
,
2147 .emit_reg_write_reg_wait
= amdgpu_ring_emit_reg_write_reg_wait_helper
,
2150 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device
*adev
)
2152 adev
->vcn
.ring_dec
.funcs
= &vcn_v1_0_dec_ring_vm_funcs
;
2153 DRM_INFO("VCN decode is enabled in VM mode\n");
2156 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device
*adev
)
2160 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
)
2161 adev
->vcn
.ring_enc
[i
].funcs
= &vcn_v1_0_enc_ring_vm_funcs
;
2163 DRM_INFO("VCN encode is enabled in VM mode\n");
2166 static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device
*adev
)
2168 adev
->vcn
.ring_jpeg
.funcs
= &vcn_v1_0_jpeg_ring_vm_funcs
;
2169 DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
2172 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs
= {
2173 .set
= vcn_v1_0_set_interrupt_state
,
2174 .process
= vcn_v1_0_process_interrupt
,
2177 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device
*adev
)
2179 adev
->vcn
.irq
.num_types
= adev
->vcn
.num_enc_rings
+ 2;
2180 adev
->vcn
.irq
.funcs
= &vcn_v1_0_irq_funcs
;
2183 const struct amdgpu_ip_block_version vcn_v1_0_ip_block
=
2185 .type
= AMD_IP_BLOCK_TYPE_VCN
,
2189 .funcs
= &vcn_v1_0_ip_funcs
,