2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_vcn.h"
29 #include "soc15_common.h"
31 #include "vcn/vcn_1_0_offset.h"
32 #include "vcn/vcn_1_0_sh_mask.h"
33 #include "hdp/hdp_4_0_offset.h"
34 #include "mmhub/mmhub_9_1_offset.h"
35 #include "mmhub/mmhub_9_1_sh_mask.h"
37 static int vcn_v1_0_start(struct amdgpu_device
*adev
);
38 static int vcn_v1_0_stop(struct amdgpu_device
*adev
);
39 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device
*adev
);
40 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device
*adev
);
41 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device
*adev
);
44 * vcn_v1_0_early_init - set function pointers
46 * @handle: amdgpu_device pointer
48 * Set ring and irq function pointers
50 static int vcn_v1_0_early_init(void *handle
)
52 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
54 adev
->vcn
.num_enc_rings
= 2;
56 vcn_v1_0_set_dec_ring_funcs(adev
);
57 vcn_v1_0_set_enc_ring_funcs(adev
);
58 vcn_v1_0_set_irq_funcs(adev
);
64 * vcn_v1_0_sw_init - sw init for VCN block
66 * @handle: amdgpu_device pointer
68 * Load firmware and sw initialization
70 static int vcn_v1_0_sw_init(void *handle
)
72 struct amdgpu_ring
*ring
;
74 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
77 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_VCN
, 124, &adev
->vcn
.irq
);
82 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
) {
83 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_VCN
, i
+ 119,
89 r
= amdgpu_vcn_sw_init(adev
);
93 r
= amdgpu_vcn_resume(adev
);
97 ring
= &adev
->vcn
.ring_dec
;
98 sprintf(ring
->name
, "vcn_dec");
99 r
= amdgpu_ring_init(adev
, ring
, 512, &adev
->vcn
.irq
, 0);
103 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
) {
104 ring
= &adev
->vcn
.ring_enc
[i
];
105 sprintf(ring
->name
, "vcn_enc%d", i
);
106 r
= amdgpu_ring_init(adev
, ring
, 512, &adev
->vcn
.irq
, 0);
115 * vcn_v1_0_sw_fini - sw fini for VCN block
117 * @handle: amdgpu_device pointer
119 * VCN suspend and free up sw allocation
121 static int vcn_v1_0_sw_fini(void *handle
)
124 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
126 r
= amdgpu_vcn_suspend(adev
);
130 r
= amdgpu_vcn_sw_fini(adev
);
136 * vcn_v1_0_hw_init - start and test VCN block
138 * @handle: amdgpu_device pointer
140 * Initialize the hardware, boot up the VCPU and do some testing
142 static int vcn_v1_0_hw_init(void *handle
)
144 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
145 struct amdgpu_ring
*ring
= &adev
->vcn
.ring_dec
;
148 r
= vcn_v1_0_start(adev
);
153 r
= amdgpu_ring_test_ring(ring
);
159 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
) {
160 ring
= &adev
->vcn
.ring_enc
[i
];
162 r
= amdgpu_ring_test_ring(ring
);
171 DRM_INFO("VCN decode and encode initialized successfully.\n");
177 * vcn_v1_0_hw_fini - stop the hardware block
179 * @handle: amdgpu_device pointer
181 * Stop the VCN block, mark ring as not ready any more
183 static int vcn_v1_0_hw_fini(void *handle
)
185 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
186 struct amdgpu_ring
*ring
= &adev
->vcn
.ring_dec
;
189 r
= vcn_v1_0_stop(adev
);
199 * vcn_v1_0_suspend - suspend VCN block
201 * @handle: amdgpu_device pointer
203 * HW fini and suspend VCN block
205 static int vcn_v1_0_suspend(void *handle
)
208 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
210 r
= vcn_v1_0_hw_fini(adev
);
214 r
= amdgpu_vcn_suspend(adev
);
220 * vcn_v1_0_resume - resume VCN block
222 * @handle: amdgpu_device pointer
224 * Resume firmware and hw init VCN block
226 static int vcn_v1_0_resume(void *handle
)
229 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
231 r
= amdgpu_vcn_resume(adev
);
235 r
= vcn_v1_0_hw_init(adev
);
241 * vcn_v1_0_mc_resume - memory controller programming
243 * @adev: amdgpu_device pointer
245 * Let the VCN memory controller know it's offsets
247 static void vcn_v1_0_mc_resume(struct amdgpu_device
*adev
)
249 uint32_t size
= AMDGPU_GPU_PAGE_ALIGN(adev
->vcn
.fw
->size
+ 4);
251 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
,
252 lower_32_bits(adev
->vcn
.gpu_addr
));
253 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
,
254 upper_32_bits(adev
->vcn
.gpu_addr
));
255 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET0
,
256 AMDGPU_UVD_FIRMWARE_OFFSET
>> 3);
257 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_SIZE0
, size
);
259 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW
,
260 lower_32_bits(adev
->vcn
.gpu_addr
+ size
));
261 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH
,
262 upper_32_bits(adev
->vcn
.gpu_addr
+ size
));
263 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET1
, 0);
264 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_SIZE1
, AMDGPU_VCN_HEAP_SIZE
);
266 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW
,
267 lower_32_bits(adev
->vcn
.gpu_addr
+ size
+ AMDGPU_VCN_HEAP_SIZE
));
268 WREG32_SOC15(UVD
, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH
,
269 upper_32_bits(adev
->vcn
.gpu_addr
+ size
+ AMDGPU_VCN_HEAP_SIZE
));
270 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_OFFSET2
, 0);
271 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CACHE_SIZE2
,
272 AMDGPU_VCN_STACK_SIZE
+ (AMDGPU_VCN_SESSION_SIZE
* 40));
274 WREG32_SOC15(UVD
, 0, mmUVD_UDEC_ADDR_CONFIG
,
275 adev
->gfx
.config
.gb_addr_config
);
276 WREG32_SOC15(UVD
, 0, mmUVD_UDEC_DB_ADDR_CONFIG
,
277 adev
->gfx
.config
.gb_addr_config
);
278 WREG32_SOC15(UVD
, 0, mmUVD_UDEC_DBW_ADDR_CONFIG
,
279 adev
->gfx
.config
.gb_addr_config
);
283 * vcn_v1_0_disable_clock_gating - disable VCN clock gating
285 * @adev: amdgpu_device pointer
286 * @sw: enable SW clock gating
288 * Disable clock gating for VCN block
290 static void vcn_v1_0_disable_clock_gating(struct amdgpu_device
*adev
, bool sw
)
294 /* JPEG disable CGC */
295 data
= RREG32_SOC15(VCN
, 0, mmJPEG_CGC_CTRL
);
298 data
|= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
300 data
&= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK
;
302 data
|= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
303 data
|= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
304 WREG32_SOC15(VCN
, 0, mmJPEG_CGC_CTRL
, data
);
306 data
= RREG32_SOC15(VCN
, 0, mmJPEG_CGC_GATE
);
307 data
&= ~(JPEG_CGC_GATE__JPEG_MASK
| JPEG_CGC_GATE__JPEG2_MASK
);
308 WREG32_SOC15(VCN
, 0, mmJPEG_CGC_GATE
, data
);
310 /* UVD disable CGC */
311 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
313 data
|= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
315 data
&= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK
;
317 data
|= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
318 data
|= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
319 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
321 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_GATE
);
322 data
&= ~(UVD_CGC_GATE__SYS_MASK
323 | UVD_CGC_GATE__UDEC_MASK
324 | UVD_CGC_GATE__MPEG2_MASK
325 | UVD_CGC_GATE__REGS_MASK
326 | UVD_CGC_GATE__RBC_MASK
327 | UVD_CGC_GATE__LMI_MC_MASK
328 | UVD_CGC_GATE__LMI_UMC_MASK
329 | UVD_CGC_GATE__IDCT_MASK
330 | UVD_CGC_GATE__MPRD_MASK
331 | UVD_CGC_GATE__MPC_MASK
332 | UVD_CGC_GATE__LBSI_MASK
333 | UVD_CGC_GATE__LRBBM_MASK
334 | UVD_CGC_GATE__UDEC_RE_MASK
335 | UVD_CGC_GATE__UDEC_CM_MASK
336 | UVD_CGC_GATE__UDEC_IT_MASK
337 | UVD_CGC_GATE__UDEC_DB_MASK
338 | UVD_CGC_GATE__UDEC_MP_MASK
339 | UVD_CGC_GATE__WCB_MASK
340 | UVD_CGC_GATE__VCPU_MASK
341 | UVD_CGC_GATE__SCPU_MASK
);
342 WREG32_SOC15(VCN
, 0, mmUVD_CGC_GATE
, data
);
344 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
345 data
&= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
346 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
347 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
348 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
349 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
350 | UVD_CGC_CTRL__SYS_MODE_MASK
351 | UVD_CGC_CTRL__UDEC_MODE_MASK
352 | UVD_CGC_CTRL__MPEG2_MODE_MASK
353 | UVD_CGC_CTRL__REGS_MODE_MASK
354 | UVD_CGC_CTRL__RBC_MODE_MASK
355 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
356 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
357 | UVD_CGC_CTRL__IDCT_MODE_MASK
358 | UVD_CGC_CTRL__MPRD_MODE_MASK
359 | UVD_CGC_CTRL__MPC_MODE_MASK
360 | UVD_CGC_CTRL__LBSI_MODE_MASK
361 | UVD_CGC_CTRL__LRBBM_MODE_MASK
362 | UVD_CGC_CTRL__WCB_MODE_MASK
363 | UVD_CGC_CTRL__VCPU_MODE_MASK
364 | UVD_CGC_CTRL__SCPU_MODE_MASK
);
365 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
368 data
= RREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_GATE
);
369 data
|= (UVD_SUVD_CGC_GATE__SRE_MASK
370 | UVD_SUVD_CGC_GATE__SIT_MASK
371 | UVD_SUVD_CGC_GATE__SMP_MASK
372 | UVD_SUVD_CGC_GATE__SCM_MASK
373 | UVD_SUVD_CGC_GATE__SDB_MASK
374 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
375 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
376 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
377 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
378 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
379 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
380 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
381 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
382 | UVD_SUVD_CGC_GATE__SCLR_MASK
383 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
384 | UVD_SUVD_CGC_GATE__ENT_MASK
385 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
386 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
387 | UVD_SUVD_CGC_GATE__SITE_MASK
388 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
389 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
390 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
391 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
392 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK
);
393 WREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_GATE
, data
);
395 data
= RREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
);
396 data
&= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
397 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
398 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
399 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
400 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
401 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
402 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
403 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
404 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
405 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
);
406 WREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
, data
);
410 * vcn_v1_0_enable_clock_gating - enable VCN clock gating
412 * @adev: amdgpu_device pointer
413 * @sw: enable SW clock gating
415 * Enable clock gating for VCN block
417 static void vcn_v1_0_enable_clock_gating(struct amdgpu_device
*adev
, bool sw
)
421 /* enable JPEG CGC */
422 data
= RREG32_SOC15(VCN
, 0, mmJPEG_CGC_CTRL
);
424 data
|= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
426 data
|= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
427 data
|= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
428 data
|= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
429 WREG32_SOC15(VCN
, 0, mmJPEG_CGC_CTRL
, data
);
431 data
= RREG32_SOC15(VCN
, 0, mmJPEG_CGC_GATE
);
432 data
|= (JPEG_CGC_GATE__JPEG_MASK
| JPEG_CGC_GATE__JPEG2_MASK
);
433 WREG32_SOC15(VCN
, 0, mmJPEG_CGC_GATE
, data
);
436 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
438 data
|= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
440 data
|= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT
;
441 data
|= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT
;
442 data
|= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT
;
443 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
445 data
= RREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
);
446 data
|= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
447 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
448 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
449 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
450 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
451 | UVD_CGC_CTRL__SYS_MODE_MASK
452 | UVD_CGC_CTRL__UDEC_MODE_MASK
453 | UVD_CGC_CTRL__MPEG2_MODE_MASK
454 | UVD_CGC_CTRL__REGS_MODE_MASK
455 | UVD_CGC_CTRL__RBC_MODE_MASK
456 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
457 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
458 | UVD_CGC_CTRL__IDCT_MODE_MASK
459 | UVD_CGC_CTRL__MPRD_MODE_MASK
460 | UVD_CGC_CTRL__MPC_MODE_MASK
461 | UVD_CGC_CTRL__LBSI_MODE_MASK
462 | UVD_CGC_CTRL__LRBBM_MODE_MASK
463 | UVD_CGC_CTRL__WCB_MODE_MASK
464 | UVD_CGC_CTRL__VCPU_MODE_MASK
465 | UVD_CGC_CTRL__SCPU_MODE_MASK
);
466 WREG32_SOC15(VCN
, 0, mmUVD_CGC_CTRL
, data
);
468 data
= RREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
);
469 data
|= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
470 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
471 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
472 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
473 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
474 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
475 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
476 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
477 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
478 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
);
479 WREG32_SOC15(VCN
, 0, mmUVD_SUVD_CGC_CTRL
, data
);
483 * vcn_v1_0_start - start VCN block
485 * @adev: amdgpu_device pointer
487 * Setup and start the VCN block
489 static int vcn_v1_0_start(struct amdgpu_device
*adev
)
491 struct amdgpu_ring
*ring
= &adev
->vcn
.ring_dec
;
492 uint32_t rb_bufsz
, tmp
;
493 uint32_t lmi_swap_cntl
;
496 /* disable byte swapping */
499 vcn_v1_0_mc_resume(adev
);
501 /* disable clock gating */
502 vcn_v1_0_disable_clock_gating(adev
, true);
504 /* disable interupt */
505 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_MASTINT_EN
), 0,
506 ~UVD_MASTINT_EN__VCPU_EN_MASK
);
508 /* stall UMC and register bus before resetting VCPU */
509 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_CTRL2
),
510 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK
,
511 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK
);
514 /* put LMI, VCPU, RBC etc... into reset */
515 WREG32_SOC15(UVD
, 0, mmUVD_SOFT_RESET
,
516 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK
|
517 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
|
518 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK
|
519 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK
|
520 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK
|
521 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK
|
522 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK
|
523 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK
);
526 /* initialize VCN memory controller */
527 WREG32_SOC15(UVD
, 0, mmUVD_LMI_CTRL
,
528 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT
) |
529 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK
|
530 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK
|
531 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK
|
532 UVD_LMI_CTRL__REQ_MODE_MASK
|
536 /* swap (8 in 32) RB and IB */
539 WREG32_SOC15(UVD
, 0, mmUVD_LMI_SWAP_CNTL
, lmi_swap_cntl
);
541 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_MUXA0
, 0x40c2040);
542 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_MUXA1
, 0x0);
543 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_MUXB0
, 0x40c2040);
544 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_MUXB1
, 0x0);
545 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_ALU
, 0);
546 WREG32_SOC15(UVD
, 0, mmUVD_MPC_SET_MUX
, 0x88);
548 /* take all subblocks out of reset, except VCPU */
549 WREG32_SOC15(UVD
, 0, mmUVD_SOFT_RESET
,
550 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
553 /* enable VCPU clock */
554 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CNTL
,
555 UVD_VCPU_CNTL__CLK_EN_MASK
);
558 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_CTRL2
), 0,
559 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK
);
561 /* boot up the VCPU */
562 WREG32_SOC15(UVD
, 0, mmUVD_SOFT_RESET
, 0);
565 for (i
= 0; i
< 10; ++i
) {
568 for (j
= 0; j
< 100; ++j
) {
569 status
= RREG32_SOC15(UVD
, 0, mmUVD_STATUS
);
578 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
579 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
),
580 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
,
581 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
583 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_SOFT_RESET
), 0,
584 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
590 DRM_ERROR("VCN decode not responding, giving up!!!\n");
593 /* enable master interrupt */
594 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_MASTINT_EN
),
595 (UVD_MASTINT_EN__VCPU_EN_MASK
|UVD_MASTINT_EN__SYS_EN_MASK
),
596 ~(UVD_MASTINT_EN__VCPU_EN_MASK
|UVD_MASTINT_EN__SYS_EN_MASK
));
598 /* clear the bit 4 of VCN_STATUS */
599 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_STATUS
), 0,
600 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT
));
602 /* force RBC into idle state */
603 rb_bufsz
= order_base_2(ring
->ring_size
);
604 tmp
= REG_SET_FIELD(0, UVD_RBC_RB_CNTL
, RB_BUFSZ
, rb_bufsz
);
605 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_BLKSZ
, 1);
606 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_FETCH
, 1);
607 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_WPTR_POLL_EN
, 0);
608 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_UPDATE
, 1);
609 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_RPTR_WR_EN
, 1);
610 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_CNTL
, tmp
);
612 /* set the write pointer delay */
613 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR_CNTL
, 0);
615 /* set the wb address */
616 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR_ADDR
,
617 (upper_32_bits(ring
->gpu_addr
) >> 2));
619 /* programm the RB_BASE for ring buffer */
620 WREG32_SOC15(UVD
, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW
,
621 lower_32_bits(ring
->gpu_addr
));
622 WREG32_SOC15(UVD
, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH
,
623 upper_32_bits(ring
->gpu_addr
));
625 /* Initialize the ring buffer's read and write pointers */
626 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
, 0);
628 ring
->wptr
= RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
);
629 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
,
630 lower_32_bits(ring
->wptr
));
632 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_RBC_RB_CNTL
), 0,
633 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK
);
635 ring
= &adev
->vcn
.ring_enc
[0];
636 WREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR
, lower_32_bits(ring
->wptr
));
637 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
, lower_32_bits(ring
->wptr
));
638 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_LO
, ring
->gpu_addr
);
639 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_HI
, upper_32_bits(ring
->gpu_addr
));
640 WREG32_SOC15(UVD
, 0, mmUVD_RB_SIZE
, ring
->ring_size
/ 4);
642 ring
= &adev
->vcn
.ring_enc
[1];
643 WREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR2
, lower_32_bits(ring
->wptr
));
644 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
, lower_32_bits(ring
->wptr
));
645 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_LO2
, ring
->gpu_addr
);
646 WREG32_SOC15(UVD
, 0, mmUVD_RB_BASE_HI2
, upper_32_bits(ring
->gpu_addr
));
647 WREG32_SOC15(UVD
, 0, mmUVD_RB_SIZE2
, ring
->ring_size
/ 4);
653 * vcn_v1_0_stop - stop VCN block
655 * @adev: amdgpu_device pointer
659 static int vcn_v1_0_stop(struct amdgpu_device
*adev
)
661 /* force RBC into idle state */
662 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_CNTL
, 0x11010101);
664 /* Stall UMC and register bus before resetting VCPU */
665 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_CTRL2
),
666 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK
,
667 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK
);
670 /* put VCPU into reset */
671 WREG32_SOC15(UVD
, 0, mmUVD_SOFT_RESET
,
672 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
675 /* disable VCPU clock */
676 WREG32_SOC15(UVD
, 0, mmUVD_VCPU_CNTL
, 0x0);
678 /* Unstall UMC and register bus */
679 WREG32_P(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_CTRL2
), 0,
680 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK
);
682 /* enable clock gating */
683 vcn_v1_0_enable_clock_gating(adev
, true);
688 static int vcn_v1_0_set_clockgating_state(void *handle
,
689 enum amd_clockgating_state state
)
691 /* needed for driver unload*/
696 * vcn_v1_0_dec_ring_get_rptr - get read pointer
698 * @ring: amdgpu_ring pointer
700 * Returns the current hardware read pointer
702 static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring
*ring
)
704 struct amdgpu_device
*adev
= ring
->adev
;
706 return RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_RPTR
);
710 * vcn_v1_0_dec_ring_get_wptr - get write pointer
712 * @ring: amdgpu_ring pointer
714 * Returns the current hardware write pointer
716 static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring
*ring
)
718 struct amdgpu_device
*adev
= ring
->adev
;
720 return RREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
);
724 * vcn_v1_0_dec_ring_set_wptr - set write pointer
726 * @ring: amdgpu_ring pointer
728 * Commits the write pointer to the hardware
730 static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring
*ring
)
732 struct amdgpu_device
*adev
= ring
->adev
;
734 WREG32_SOC15(UVD
, 0, mmUVD_RBC_RB_WPTR
, lower_32_bits(ring
->wptr
));
738 * vcn_v1_0_dec_ring_insert_start - insert a start command
740 * @ring: amdgpu_ring pointer
742 * Write a start command to the ring.
744 static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring
*ring
)
746 struct amdgpu_device
*adev
= ring
->adev
;
748 amdgpu_ring_write(ring
,
749 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
750 amdgpu_ring_write(ring
, 0);
751 amdgpu_ring_write(ring
,
752 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
753 amdgpu_ring_write(ring
, VCN_DEC_CMD_PACKET_START
<< 1);
757 * vcn_v1_0_dec_ring_insert_end - insert a end command
759 * @ring: amdgpu_ring pointer
761 * Write a end command to the ring.
763 static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring
*ring
)
765 struct amdgpu_device
*adev
= ring
->adev
;
767 amdgpu_ring_write(ring
,
768 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
769 amdgpu_ring_write(ring
, VCN_DEC_CMD_PACKET_END
<< 1);
773 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
775 * @ring: amdgpu_ring pointer
776 * @fence: fence to emit
778 * Write a fence and a trap command to the ring.
780 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
, u64 seq
,
783 struct amdgpu_device
*adev
= ring
->adev
;
785 WARN_ON(flags
& AMDGPU_FENCE_FLAG_64BIT
);
787 amdgpu_ring_write(ring
,
788 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_CONTEXT_ID
), 0));
789 amdgpu_ring_write(ring
, seq
);
790 amdgpu_ring_write(ring
,
791 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
792 amdgpu_ring_write(ring
, addr
& 0xffffffff);
793 amdgpu_ring_write(ring
,
794 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA1
), 0));
795 amdgpu_ring_write(ring
, upper_32_bits(addr
) & 0xff);
796 amdgpu_ring_write(ring
,
797 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
798 amdgpu_ring_write(ring
, VCN_DEC_CMD_FENCE
<< 1);
800 amdgpu_ring_write(ring
,
801 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
802 amdgpu_ring_write(ring
, 0);
803 amdgpu_ring_write(ring
,
804 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA1
), 0));
805 amdgpu_ring_write(ring
, 0);
806 amdgpu_ring_write(ring
,
807 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
808 amdgpu_ring_write(ring
, VCN_DEC_CMD_TRAP
<< 1);
812 * vcn_v1_0_dec_ring_hdp_invalidate - emit an hdp invalidate
814 * @ring: amdgpu_ring pointer
816 * Emits an hdp invalidate.
818 static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring
*ring
)
820 struct amdgpu_device
*adev
= ring
->adev
;
822 amdgpu_ring_write(ring
, PACKET0(SOC15_REG_OFFSET(HDP
, 0, mmHDP_READ_CACHE_INVALIDATE
), 0));
823 amdgpu_ring_write(ring
, 1);
827 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
829 * @ring: amdgpu_ring pointer
830 * @ib: indirect buffer to execute
832 * Write ring commands to execute the indirect buffer
834 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring
*ring
,
835 struct amdgpu_ib
*ib
,
836 unsigned vmid
, bool ctx_switch
)
838 struct amdgpu_device
*adev
= ring
->adev
;
840 amdgpu_ring_write(ring
,
841 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_RBC_IB_VMID
), 0));
842 amdgpu_ring_write(ring
, vmid
);
844 amdgpu_ring_write(ring
,
845 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW
), 0));
846 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
));
847 amdgpu_ring_write(ring
,
848 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH
), 0));
849 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
850 amdgpu_ring_write(ring
,
851 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_RBC_IB_SIZE
), 0));
852 amdgpu_ring_write(ring
, ib
->length_dw
);
855 static void vcn_v1_0_dec_vm_reg_write(struct amdgpu_ring
*ring
,
856 uint32_t data0
, uint32_t data1
)
858 struct amdgpu_device
*adev
= ring
->adev
;
860 amdgpu_ring_write(ring
,
861 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
862 amdgpu_ring_write(ring
, data0
);
863 amdgpu_ring_write(ring
,
864 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA1
), 0));
865 amdgpu_ring_write(ring
, data1
);
866 amdgpu_ring_write(ring
,
867 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
868 amdgpu_ring_write(ring
, VCN_DEC_CMD_WRITE_REG
<< 1);
871 static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring
*ring
,
872 uint32_t data0
, uint32_t data1
, uint32_t mask
)
874 struct amdgpu_device
*adev
= ring
->adev
;
876 amdgpu_ring_write(ring
,
877 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA0
), 0));
878 amdgpu_ring_write(ring
, data0
);
879 amdgpu_ring_write(ring
,
880 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_DATA1
), 0));
881 amdgpu_ring_write(ring
, data1
);
882 amdgpu_ring_write(ring
,
883 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GP_SCRATCH8
), 0));
884 amdgpu_ring_write(ring
, mask
);
885 amdgpu_ring_write(ring
,
886 PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_GPCOM_VCPU_CMD
), 0));
887 amdgpu_ring_write(ring
, VCN_DEC_CMD_REG_READ_COND_WAIT
<< 1);
890 static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring
*ring
,
891 unsigned vmid
, uint64_t pd_addr
)
893 struct amdgpu_vmhub
*hub
= &ring
->adev
->vmhub
[ring
->funcs
->vmhub
];
894 uint32_t req
= ring
->adev
->gart
.gart_funcs
->get_invalidate_req(vmid
);
895 uint64_t flags
= AMDGPU_PTE_VALID
;
896 unsigned eng
= ring
->vm_inv_eng
;
897 uint32_t data0
, data1
, mask
;
899 amdgpu_gart_get_vm_pde(ring
->adev
, -1, &pd_addr
, &flags
);
902 data0
= (hub
->ctx0_ptb_addr_hi32
+ vmid
* 2) << 2;
903 data1
= upper_32_bits(pd_addr
);
904 vcn_v1_0_dec_vm_reg_write(ring
, data0
, data1
);
906 data0
= (hub
->ctx0_ptb_addr_lo32
+ vmid
* 2) << 2;
907 data1
= lower_32_bits(pd_addr
);
908 vcn_v1_0_dec_vm_reg_write(ring
, data0
, data1
);
910 data0
= (hub
->ctx0_ptb_addr_lo32
+ vmid
* 2) << 2;
911 data1
= lower_32_bits(pd_addr
);
913 vcn_v1_0_dec_vm_reg_wait(ring
, data0
, data1
, mask
);
916 data0
= (hub
->vm_inv_eng0_req
+ eng
) << 2;
918 vcn_v1_0_dec_vm_reg_write(ring
, data0
, data1
);
921 data0
= (hub
->vm_inv_eng0_ack
+ eng
) << 2;
924 vcn_v1_0_dec_vm_reg_wait(ring
, data0
, data1
, mask
);
928 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
930 * @ring: amdgpu_ring pointer
932 * Returns the current hardware enc read pointer
934 static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring
*ring
)
936 struct amdgpu_device
*adev
= ring
->adev
;
938 if (ring
== &adev
->vcn
.ring_enc
[0])
939 return RREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR
);
941 return RREG32_SOC15(UVD
, 0, mmUVD_RB_RPTR2
);
945 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
947 * @ring: amdgpu_ring pointer
949 * Returns the current hardware enc write pointer
951 static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring
*ring
)
953 struct amdgpu_device
*adev
= ring
->adev
;
955 if (ring
== &adev
->vcn
.ring_enc
[0])
956 return RREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
);
958 return RREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
);
962 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
964 * @ring: amdgpu_ring pointer
966 * Commits the enc write pointer to the hardware
968 static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring
*ring
)
970 struct amdgpu_device
*adev
= ring
->adev
;
972 if (ring
== &adev
->vcn
.ring_enc
[0])
973 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR
,
974 lower_32_bits(ring
->wptr
));
976 WREG32_SOC15(UVD
, 0, mmUVD_RB_WPTR2
,
977 lower_32_bits(ring
->wptr
));
981 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
983 * @ring: amdgpu_ring pointer
984 * @fence: fence to emit
986 * Write enc a fence and a trap command to the ring.
988 static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
,
989 u64 seq
, unsigned flags
)
991 WARN_ON(flags
& AMDGPU_FENCE_FLAG_64BIT
);
993 amdgpu_ring_write(ring
, VCN_ENC_CMD_FENCE
);
994 amdgpu_ring_write(ring
, addr
);
995 amdgpu_ring_write(ring
, upper_32_bits(addr
));
996 amdgpu_ring_write(ring
, seq
);
997 amdgpu_ring_write(ring
, VCN_ENC_CMD_TRAP
);
1000 static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring
*ring
)
1002 amdgpu_ring_write(ring
, VCN_ENC_CMD_END
);
1006 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
1008 * @ring: amdgpu_ring pointer
1009 * @ib: indirect buffer to execute
1011 * Write enc ring commands to execute the indirect buffer
1013 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring
*ring
,
1014 struct amdgpu_ib
*ib
, unsigned int vmid
, bool ctx_switch
)
1016 amdgpu_ring_write(ring
, VCN_ENC_CMD_IB
);
1017 amdgpu_ring_write(ring
, vmid
);
1018 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
));
1019 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
1020 amdgpu_ring_write(ring
, ib
->length_dw
);
1023 static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring
*ring
,
1024 unsigned int vmid
, uint64_t pd_addr
)
1026 struct amdgpu_vmhub
*hub
= &ring
->adev
->vmhub
[ring
->funcs
->vmhub
];
1027 uint32_t req
= ring
->adev
->gart
.gart_funcs
->get_invalidate_req(vmid
);
1028 uint64_t flags
= AMDGPU_PTE_VALID
;
1029 unsigned eng
= ring
->vm_inv_eng
;
1031 amdgpu_gart_get_vm_pde(ring
->adev
, -1, &pd_addr
, &flags
);
1034 amdgpu_ring_write(ring
, VCN_ENC_CMD_REG_WRITE
);
1035 amdgpu_ring_write(ring
,
1036 (hub
->ctx0_ptb_addr_hi32
+ vmid
* 2) << 2);
1037 amdgpu_ring_write(ring
, upper_32_bits(pd_addr
));
1039 amdgpu_ring_write(ring
, VCN_ENC_CMD_REG_WRITE
);
1040 amdgpu_ring_write(ring
,
1041 (hub
->ctx0_ptb_addr_lo32
+ vmid
* 2) << 2);
1042 amdgpu_ring_write(ring
, lower_32_bits(pd_addr
));
1044 amdgpu_ring_write(ring
, VCN_ENC_CMD_REG_WAIT
);
1045 amdgpu_ring_write(ring
,
1046 (hub
->ctx0_ptb_addr_lo32
+ vmid
* 2) << 2);
1047 amdgpu_ring_write(ring
, 0xffffffff);
1048 amdgpu_ring_write(ring
, lower_32_bits(pd_addr
));
1051 amdgpu_ring_write(ring
, VCN_ENC_CMD_REG_WRITE
);
1052 amdgpu_ring_write(ring
, (hub
->vm_inv_eng0_req
+ eng
) << 2);
1053 amdgpu_ring_write(ring
, req
);
1055 /* wait for flush */
1056 amdgpu_ring_write(ring
, VCN_ENC_CMD_REG_WAIT
);
1057 amdgpu_ring_write(ring
, (hub
->vm_inv_eng0_ack
+ eng
) << 2);
1058 amdgpu_ring_write(ring
, 1 << vmid
);
1059 amdgpu_ring_write(ring
, 1 << vmid
);
1062 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device
*adev
,
1063 struct amdgpu_irq_src
*source
,
1065 enum amdgpu_interrupt_state state
)
1070 static int vcn_v1_0_process_interrupt(struct amdgpu_device
*adev
,
1071 struct amdgpu_irq_src
*source
,
1072 struct amdgpu_iv_entry
*entry
)
1074 DRM_DEBUG("IH: VCN TRAP\n");
1076 switch (entry
->src_id
) {
1078 amdgpu_fence_process(&adev
->vcn
.ring_dec
);
1081 amdgpu_fence_process(&adev
->vcn
.ring_enc
[0]);
1084 amdgpu_fence_process(&adev
->vcn
.ring_enc
[1]);
1087 DRM_ERROR("Unhandled interrupt: %d %d\n",
1088 entry
->src_id
, entry
->src_data
[0]);
1095 static void vcn_v1_0_ring_insert_nop(struct amdgpu_ring
*ring
, uint32_t count
)
1098 struct amdgpu_device
*adev
= ring
->adev
;
1100 for (i
= 0; i
< count
; i
++)
1101 amdgpu_ring_write(ring
, PACKET0(SOC15_REG_OFFSET(UVD
, 0, mmUVD_NO_OP
), 0));
1106 static const struct amd_ip_funcs vcn_v1_0_ip_funcs
= {
1108 .early_init
= vcn_v1_0_early_init
,
1110 .sw_init
= vcn_v1_0_sw_init
,
1111 .sw_fini
= vcn_v1_0_sw_fini
,
1112 .hw_init
= vcn_v1_0_hw_init
,
1113 .hw_fini
= vcn_v1_0_hw_fini
,
1114 .suspend
= vcn_v1_0_suspend
,
1115 .resume
= vcn_v1_0_resume
,
1116 .is_idle
= NULL
/* vcn_v1_0_is_idle */,
1117 .wait_for_idle
= NULL
/* vcn_v1_0_wait_for_idle */,
1118 .check_soft_reset
= NULL
/* vcn_v1_0_check_soft_reset */,
1119 .pre_soft_reset
= NULL
/* vcn_v1_0_pre_soft_reset */,
1120 .soft_reset
= NULL
/* vcn_v1_0_soft_reset */,
1121 .post_soft_reset
= NULL
/* vcn_v1_0_post_soft_reset */,
1122 .set_clockgating_state
= vcn_v1_0_set_clockgating_state
,
1123 .set_powergating_state
= NULL
/* vcn_v1_0_set_powergating_state */,
1126 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs
= {
1127 .type
= AMDGPU_RING_TYPE_VCN_DEC
,
1129 .nop
= PACKET0(0x81ff, 0),
1130 .support_64bit_ptrs
= false,
1131 .vmhub
= AMDGPU_MMHUB
,
1132 .get_rptr
= vcn_v1_0_dec_ring_get_rptr
,
1133 .get_wptr
= vcn_v1_0_dec_ring_get_wptr
,
1134 .set_wptr
= vcn_v1_0_dec_ring_set_wptr
,
1136 2 + /* vcn_v1_0_dec_ring_emit_hdp_invalidate */
1137 34 + /* vcn_v1_0_dec_ring_emit_vm_flush */
1138 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
1140 .emit_ib_size
= 8, /* vcn_v1_0_dec_ring_emit_ib */
1141 .emit_ib
= vcn_v1_0_dec_ring_emit_ib
,
1142 .emit_fence
= vcn_v1_0_dec_ring_emit_fence
,
1143 .emit_vm_flush
= vcn_v1_0_dec_ring_emit_vm_flush
,
1144 .emit_hdp_invalidate
= vcn_v1_0_dec_ring_emit_hdp_invalidate
,
1145 .test_ring
= amdgpu_vcn_dec_ring_test_ring
,
1146 .test_ib
= amdgpu_vcn_dec_ring_test_ib
,
1147 .insert_nop
= vcn_v1_0_ring_insert_nop
,
1148 .insert_start
= vcn_v1_0_dec_ring_insert_start
,
1149 .insert_end
= vcn_v1_0_dec_ring_insert_end
,
1150 .pad_ib
= amdgpu_ring_generic_pad_ib
,
1151 .begin_use
= amdgpu_vcn_ring_begin_use
,
1152 .end_use
= amdgpu_vcn_ring_end_use
,
1155 static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs
= {
1156 .type
= AMDGPU_RING_TYPE_VCN_ENC
,
1158 .nop
= VCN_ENC_CMD_NO_OP
,
1159 .support_64bit_ptrs
= false,
1160 .vmhub
= AMDGPU_MMHUB
,
1161 .get_rptr
= vcn_v1_0_enc_ring_get_rptr
,
1162 .get_wptr
= vcn_v1_0_enc_ring_get_wptr
,
1163 .set_wptr
= vcn_v1_0_enc_ring_set_wptr
,
1165 17 + /* vcn_v1_0_enc_ring_emit_vm_flush */
1166 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
1167 1, /* vcn_v1_0_enc_ring_insert_end */
1168 .emit_ib_size
= 5, /* vcn_v1_0_enc_ring_emit_ib */
1169 .emit_ib
= vcn_v1_0_enc_ring_emit_ib
,
1170 .emit_fence
= vcn_v1_0_enc_ring_emit_fence
,
1171 .emit_vm_flush
= vcn_v1_0_enc_ring_emit_vm_flush
,
1172 .test_ring
= amdgpu_vcn_enc_ring_test_ring
,
1173 .test_ib
= amdgpu_vcn_enc_ring_test_ib
,
1174 .insert_nop
= amdgpu_ring_insert_nop
,
1175 .insert_end
= vcn_v1_0_enc_ring_insert_end
,
1176 .pad_ib
= amdgpu_ring_generic_pad_ib
,
1177 .begin_use
= amdgpu_vcn_ring_begin_use
,
1178 .end_use
= amdgpu_vcn_ring_end_use
,
1181 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device
*adev
)
1183 adev
->vcn
.ring_dec
.funcs
= &vcn_v1_0_dec_ring_vm_funcs
;
1184 DRM_INFO("VCN decode is enabled in VM mode\n");
1187 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device
*adev
)
1191 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
)
1192 adev
->vcn
.ring_enc
[i
].funcs
= &vcn_v1_0_enc_ring_vm_funcs
;
1194 DRM_INFO("VCN encode is enabled in VM mode\n");
1197 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs
= {
1198 .set
= vcn_v1_0_set_interrupt_state
,
1199 .process
= vcn_v1_0_process_interrupt
,
1202 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device
*adev
)
1204 adev
->vcn
.irq
.num_types
= adev
->vcn
.num_enc_rings
+ 1;
1205 adev
->vcn
.irq
.funcs
= &vcn_v1_0_irq_funcs
;
1208 const struct amdgpu_ip_block_version vcn_v1_0_ip_block
=
1210 .type
= AMD_IP_BLOCK_TYPE_VCN
,
1214 .funcs
= &vcn_v1_0_ip_funcs
,