2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
25 * Authors: Christian König <christian.koenig@amd.com>
28 #include <linux/firmware.h>
31 #include "amdgpu_vce.h"
33 #include "vce/vce_3_0_d.h"
34 #include "vce/vce_3_0_sh_mask.h"
35 #include "oss/oss_3_0_d.h"
36 #include "oss/oss_3_0_sh_mask.h"
37 #include "gca/gfx_8_0_d.h"
38 #include "smu/smu_7_1_2_d.h"
39 #include "smu/smu_7_1_2_sh_mask.h"
41 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
42 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
43 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
44 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
45 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
47 #define VCE_V3_0_FW_SIZE (384 * 1024)
48 #define VCE_V3_0_STACK_SIZE (64 * 1024)
49 #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
51 static void vce_v3_0_mc_resume(struct amdgpu_device
*adev
, int idx
);
52 static void vce_v3_0_set_ring_funcs(struct amdgpu_device
*adev
);
53 static void vce_v3_0_set_irq_funcs(struct amdgpu_device
*adev
);
56 * vce_v3_0_ring_get_rptr - get read pointer
58 * @ring: amdgpu_ring pointer
60 * Returns the current hardware read pointer
62 static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring
*ring
)
64 struct amdgpu_device
*adev
= ring
->adev
;
66 if (ring
== &adev
->vce
.ring
[0])
67 return RREG32(mmVCE_RB_RPTR
);
69 return RREG32(mmVCE_RB_RPTR2
);
73 * vce_v3_0_ring_get_wptr - get write pointer
75 * @ring: amdgpu_ring pointer
77 * Returns the current hardware write pointer
79 static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring
*ring
)
81 struct amdgpu_device
*adev
= ring
->adev
;
83 if (ring
== &adev
->vce
.ring
[0])
84 return RREG32(mmVCE_RB_WPTR
);
86 return RREG32(mmVCE_RB_WPTR2
);
90 * vce_v3_0_ring_set_wptr - set write pointer
92 * @ring: amdgpu_ring pointer
94 * Commits the write pointer to the hardware
96 static void vce_v3_0_ring_set_wptr(struct amdgpu_ring
*ring
)
98 struct amdgpu_device
*adev
= ring
->adev
;
100 if (ring
== &adev
->vce
.ring
[0])
101 WREG32(mmVCE_RB_WPTR
, ring
->wptr
);
103 WREG32(mmVCE_RB_WPTR2
, ring
->wptr
);
107 * vce_v3_0_start - start VCE block
109 * @adev: amdgpu_device pointer
111 * Setup and start the VCE block
113 static int vce_v3_0_start(struct amdgpu_device
*adev
)
115 struct amdgpu_ring
*ring
;
118 mutex_lock(&adev
->grbm_idx_mutex
);
119 for (idx
= 0; idx
< 2; ++idx
) {
121 if (adev
->vce
.harvest_config
& (1 << idx
))
125 WREG32_P(mmGRBM_GFX_INDEX
, 0,
126 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK
);
128 WREG32_P(mmGRBM_GFX_INDEX
,
129 GRBM_GFX_INDEX__VCE_INSTANCE_MASK
,
130 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK
);
132 vce_v3_0_mc_resume(adev
, idx
);
135 WREG32_P(mmVCE_STATUS
, 1, ~1);
136 if (adev
->asic_type
>= CHIP_STONEY
)
137 WREG32_P(mmVCE_VCPU_CNTL
, 1, ~0x200001);
139 WREG32_P(mmVCE_VCPU_CNTL
, VCE_VCPU_CNTL__CLK_EN_MASK
,
140 ~VCE_VCPU_CNTL__CLK_EN_MASK
);
142 WREG32_P(mmVCE_SOFT_RESET
,
143 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
,
144 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
148 WREG32_P(mmVCE_SOFT_RESET
, 0,
149 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
151 for (i
= 0; i
< 10; ++i
) {
153 for (j
= 0; j
< 100; ++j
) {
154 status
= RREG32(mmVCE_STATUS
);
163 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
164 WREG32_P(mmVCE_SOFT_RESET
,
165 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
,
166 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
168 WREG32_P(mmVCE_SOFT_RESET
, 0,
169 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
174 /* clear BUSY flag */
175 WREG32_P(mmVCE_STATUS
, 0, ~1);
178 DRM_ERROR("VCE not responding, giving up!!!\n");
179 mutex_unlock(&adev
->grbm_idx_mutex
);
184 WREG32_P(mmGRBM_GFX_INDEX
, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK
);
185 mutex_unlock(&adev
->grbm_idx_mutex
);
187 ring
= &adev
->vce
.ring
[0];
188 WREG32(mmVCE_RB_RPTR
, ring
->wptr
);
189 WREG32(mmVCE_RB_WPTR
, ring
->wptr
);
190 WREG32(mmVCE_RB_BASE_LO
, ring
->gpu_addr
);
191 WREG32(mmVCE_RB_BASE_HI
, upper_32_bits(ring
->gpu_addr
));
192 WREG32(mmVCE_RB_SIZE
, ring
->ring_size
/ 4);
194 ring
= &adev
->vce
.ring
[1];
195 WREG32(mmVCE_RB_RPTR2
, ring
->wptr
);
196 WREG32(mmVCE_RB_WPTR2
, ring
->wptr
);
197 WREG32(mmVCE_RB_BASE_LO2
, ring
->gpu_addr
);
198 WREG32(mmVCE_RB_BASE_HI2
, upper_32_bits(ring
->gpu_addr
));
199 WREG32(mmVCE_RB_SIZE2
, ring
->ring_size
/ 4);
204 #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
205 #define VCE_HARVEST_FUSE_MACRO__SHIFT 27
206 #define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
208 static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device
*adev
)
213 /* Fiji, Stoney are single pipe */
214 if ((adev
->asic_type
== CHIP_FIJI
) ||
215 (adev
->asic_type
== CHIP_STONEY
)){
216 ret
= AMDGPU_VCE_HARVEST_VCE1
;
220 /* Tonga and CZ are dual or single pipe */
221 if (adev
->flags
& AMD_IS_APU
)
222 tmp
= (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS
) &
223 VCE_HARVEST_FUSE_MACRO__MASK
) >>
224 VCE_HARVEST_FUSE_MACRO__SHIFT
;
226 tmp
= (RREG32_SMC(ixCC_HARVEST_FUSES
) &
227 CC_HARVEST_FUSES__VCE_DISABLE_MASK
) >>
228 CC_HARVEST_FUSES__VCE_DISABLE__SHIFT
;
232 ret
= AMDGPU_VCE_HARVEST_VCE0
;
235 ret
= AMDGPU_VCE_HARVEST_VCE1
;
238 ret
= AMDGPU_VCE_HARVEST_VCE0
| AMDGPU_VCE_HARVEST_VCE1
;
247 static int vce_v3_0_early_init(void *handle
)
249 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
251 adev
->vce
.harvest_config
= vce_v3_0_get_harvest_config(adev
);
253 if ((adev
->vce
.harvest_config
&
254 (AMDGPU_VCE_HARVEST_VCE0
| AMDGPU_VCE_HARVEST_VCE1
)) ==
255 (AMDGPU_VCE_HARVEST_VCE0
| AMDGPU_VCE_HARVEST_VCE1
))
258 vce_v3_0_set_ring_funcs(adev
);
259 vce_v3_0_set_irq_funcs(adev
);
264 static int vce_v3_0_sw_init(void *handle
)
266 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
267 struct amdgpu_ring
*ring
;
271 r
= amdgpu_irq_add_id(adev
, 167, &adev
->vce
.irq
);
275 r
= amdgpu_vce_sw_init(adev
, VCE_V3_0_FW_SIZE
+
276 (VCE_V3_0_STACK_SIZE
+ VCE_V3_0_DATA_SIZE
) * 2);
280 r
= amdgpu_vce_resume(adev
);
284 ring
= &adev
->vce
.ring
[0];
285 sprintf(ring
->name
, "vce0");
286 r
= amdgpu_ring_init(adev
, ring
, 4096, VCE_CMD_NO_OP
, 0xf,
287 &adev
->vce
.irq
, 0, AMDGPU_RING_TYPE_VCE
);
291 ring
= &adev
->vce
.ring
[1];
292 sprintf(ring
->name
, "vce1");
293 r
= amdgpu_ring_init(adev
, ring
, 4096, VCE_CMD_NO_OP
, 0xf,
294 &adev
->vce
.irq
, 0, AMDGPU_RING_TYPE_VCE
);
301 static int vce_v3_0_sw_fini(void *handle
)
304 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
306 r
= amdgpu_vce_suspend(adev
);
310 r
= amdgpu_vce_sw_fini(adev
);
317 static int vce_v3_0_hw_init(void *handle
)
319 struct amdgpu_ring
*ring
;
321 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
323 r
= vce_v3_0_start(adev
);
327 ring
= &adev
->vce
.ring
[0];
329 r
= amdgpu_ring_test_ring(ring
);
335 ring
= &adev
->vce
.ring
[1];
337 r
= amdgpu_ring_test_ring(ring
);
343 DRM_INFO("VCE initialized successfully.\n");
348 static int vce_v3_0_hw_fini(void *handle
)
353 static int vce_v3_0_suspend(void *handle
)
356 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
358 r
= vce_v3_0_hw_fini(adev
);
362 r
= amdgpu_vce_suspend(adev
);
369 static int vce_v3_0_resume(void *handle
)
372 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
374 r
= amdgpu_vce_resume(adev
);
378 r
= vce_v3_0_hw_init(adev
);
385 static void vce_v3_0_mc_resume(struct amdgpu_device
*adev
, int idx
)
387 uint32_t offset
, size
;
389 WREG32_P(mmVCE_CLOCK_GATING_A
, 0, ~(1 << 16));
390 WREG32_P(mmVCE_UENC_CLOCK_GATING
, 0x1FF000, ~0xFF9FF000);
391 WREG32_P(mmVCE_UENC_REG_CLOCK_GATING
, 0x3F, ~0x3F);
392 WREG32(mmVCE_CLOCK_GATING_B
, 0xf7);
394 WREG32(mmVCE_LMI_CTRL
, 0x00398000);
395 WREG32_P(mmVCE_LMI_CACHE_CTRL
, 0x0, ~0x1);
396 WREG32(mmVCE_LMI_SWAP_CNTL
, 0);
397 WREG32(mmVCE_LMI_SWAP_CNTL1
, 0);
398 WREG32(mmVCE_LMI_VM_CTRL
, 0);
399 if (adev
->asic_type
>= CHIP_STONEY
) {
400 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0
, (adev
->vce
.gpu_addr
>> 8));
401 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1
, (adev
->vce
.gpu_addr
>> 8));
402 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2
, (adev
->vce
.gpu_addr
>> 8));
404 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR
, (adev
->vce
.gpu_addr
>> 8));
405 offset
= AMDGPU_VCE_FIRMWARE_OFFSET
;
406 size
= VCE_V3_0_FW_SIZE
;
407 WREG32(mmVCE_VCPU_CACHE_OFFSET0
, offset
& 0x7fffffff);
408 WREG32(mmVCE_VCPU_CACHE_SIZE0
, size
);
412 size
= VCE_V3_0_STACK_SIZE
;
413 WREG32(mmVCE_VCPU_CACHE_OFFSET1
, offset
& 0x7fffffff);
414 WREG32(mmVCE_VCPU_CACHE_SIZE1
, size
);
416 size
= VCE_V3_0_DATA_SIZE
;
417 WREG32(mmVCE_VCPU_CACHE_OFFSET2
, offset
& 0x7fffffff);
418 WREG32(mmVCE_VCPU_CACHE_SIZE2
, size
);
420 offset
+= size
+ VCE_V3_0_STACK_SIZE
+ VCE_V3_0_DATA_SIZE
;
421 size
= VCE_V3_0_STACK_SIZE
;
422 WREG32(mmVCE_VCPU_CACHE_OFFSET1
, offset
& 0xfffffff);
423 WREG32(mmVCE_VCPU_CACHE_SIZE1
, size
);
425 size
= VCE_V3_0_DATA_SIZE
;
426 WREG32(mmVCE_VCPU_CACHE_OFFSET2
, offset
& 0xfffffff);
427 WREG32(mmVCE_VCPU_CACHE_SIZE2
, size
);
430 WREG32_P(mmVCE_LMI_CTRL2
, 0x0, ~0x100);
432 WREG32_P(mmVCE_SYS_INT_EN
, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
,
433 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
);
436 static bool vce_v3_0_is_idle(void *handle
)
438 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
442 for (idx
= 0; idx
< 2; ++idx
) {
443 if (adev
->vce
.harvest_config
& (1 << idx
))
447 mask
|= SRBM_STATUS2__VCE0_BUSY_MASK
;
449 mask
|= SRBM_STATUS2__VCE1_BUSY_MASK
;
452 return !(RREG32(mmSRBM_STATUS2
) & mask
);
455 static int vce_v3_0_wait_for_idle(void *handle
)
458 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
462 for (idx
= 0; idx
< 2; ++idx
) {
463 if (adev
->vce
.harvest_config
& (1 << idx
))
467 mask
|= SRBM_STATUS2__VCE0_BUSY_MASK
;
469 mask
|= SRBM_STATUS2__VCE1_BUSY_MASK
;
472 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
473 if (!(RREG32(mmSRBM_STATUS2
) & mask
))
479 static int vce_v3_0_soft_reset(void *handle
)
481 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
485 for (idx
= 0; idx
< 2; ++idx
) {
486 if (adev
->vce
.harvest_config
& (1 << idx
))
490 mask
|= SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK
;
492 mask
|= SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK
;
494 WREG32_P(mmSRBM_SOFT_RESET
, mask
,
495 ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK
|
496 SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK
));
499 return vce_v3_0_start(adev
);
502 static void vce_v3_0_print_status(void *handle
)
504 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
506 dev_info(adev
->dev
, "VCE 3.0 registers\n");
507 dev_info(adev
->dev
, " VCE_STATUS=0x%08X\n",
508 RREG32(mmVCE_STATUS
));
509 dev_info(adev
->dev
, " VCE_VCPU_CNTL=0x%08X\n",
510 RREG32(mmVCE_VCPU_CNTL
));
511 dev_info(adev
->dev
, " VCE_VCPU_CACHE_OFFSET0=0x%08X\n",
512 RREG32(mmVCE_VCPU_CACHE_OFFSET0
));
513 dev_info(adev
->dev
, " VCE_VCPU_CACHE_SIZE0=0x%08X\n",
514 RREG32(mmVCE_VCPU_CACHE_SIZE0
));
515 dev_info(adev
->dev
, " VCE_VCPU_CACHE_OFFSET1=0x%08X\n",
516 RREG32(mmVCE_VCPU_CACHE_OFFSET1
));
517 dev_info(adev
->dev
, " VCE_VCPU_CACHE_SIZE1=0x%08X\n",
518 RREG32(mmVCE_VCPU_CACHE_SIZE1
));
519 dev_info(adev
->dev
, " VCE_VCPU_CACHE_OFFSET2=0x%08X\n",
520 RREG32(mmVCE_VCPU_CACHE_OFFSET2
));
521 dev_info(adev
->dev
, " VCE_VCPU_CACHE_SIZE2=0x%08X\n",
522 RREG32(mmVCE_VCPU_CACHE_SIZE2
));
523 dev_info(adev
->dev
, " VCE_SOFT_RESET=0x%08X\n",
524 RREG32(mmVCE_SOFT_RESET
));
525 dev_info(adev
->dev
, " VCE_RB_BASE_LO2=0x%08X\n",
526 RREG32(mmVCE_RB_BASE_LO2
));
527 dev_info(adev
->dev
, " VCE_RB_BASE_HI2=0x%08X\n",
528 RREG32(mmVCE_RB_BASE_HI2
));
529 dev_info(adev
->dev
, " VCE_RB_SIZE2=0x%08X\n",
530 RREG32(mmVCE_RB_SIZE2
));
531 dev_info(adev
->dev
, " VCE_RB_RPTR2=0x%08X\n",
532 RREG32(mmVCE_RB_RPTR2
));
533 dev_info(adev
->dev
, " VCE_RB_WPTR2=0x%08X\n",
534 RREG32(mmVCE_RB_WPTR2
));
535 dev_info(adev
->dev
, " VCE_RB_BASE_LO=0x%08X\n",
536 RREG32(mmVCE_RB_BASE_LO
));
537 dev_info(adev
->dev
, " VCE_RB_BASE_HI=0x%08X\n",
538 RREG32(mmVCE_RB_BASE_HI
));
539 dev_info(adev
->dev
, " VCE_RB_SIZE=0x%08X\n",
540 RREG32(mmVCE_RB_SIZE
));
541 dev_info(adev
->dev
, " VCE_RB_RPTR=0x%08X\n",
542 RREG32(mmVCE_RB_RPTR
));
543 dev_info(adev
->dev
, " VCE_RB_WPTR=0x%08X\n",
544 RREG32(mmVCE_RB_WPTR
));
545 dev_info(adev
->dev
, " VCE_CLOCK_GATING_A=0x%08X\n",
546 RREG32(mmVCE_CLOCK_GATING_A
));
547 dev_info(adev
->dev
, " VCE_CLOCK_GATING_B=0x%08X\n",
548 RREG32(mmVCE_CLOCK_GATING_B
));
549 dev_info(adev
->dev
, " VCE_UENC_CLOCK_GATING=0x%08X\n",
550 RREG32(mmVCE_UENC_CLOCK_GATING
));
551 dev_info(adev
->dev
, " VCE_UENC_REG_CLOCK_GATING=0x%08X\n",
552 RREG32(mmVCE_UENC_REG_CLOCK_GATING
));
553 dev_info(adev
->dev
, " VCE_SYS_INT_EN=0x%08X\n",
554 RREG32(mmVCE_SYS_INT_EN
));
555 dev_info(adev
->dev
, " VCE_LMI_CTRL2=0x%08X\n",
556 RREG32(mmVCE_LMI_CTRL2
));
557 dev_info(adev
->dev
, " VCE_LMI_CTRL=0x%08X\n",
558 RREG32(mmVCE_LMI_CTRL
));
559 dev_info(adev
->dev
, " VCE_LMI_VM_CTRL=0x%08X\n",
560 RREG32(mmVCE_LMI_VM_CTRL
));
561 dev_info(adev
->dev
, " VCE_LMI_SWAP_CNTL=0x%08X\n",
562 RREG32(mmVCE_LMI_SWAP_CNTL
));
563 dev_info(adev
->dev
, " VCE_LMI_SWAP_CNTL1=0x%08X\n",
564 RREG32(mmVCE_LMI_SWAP_CNTL1
));
565 dev_info(adev
->dev
, " VCE_LMI_CACHE_CTRL=0x%08X\n",
566 RREG32(mmVCE_LMI_CACHE_CTRL
));
569 static int vce_v3_0_set_interrupt_state(struct amdgpu_device
*adev
,
570 struct amdgpu_irq_src
*source
,
572 enum amdgpu_interrupt_state state
)
576 if (state
== AMDGPU_IRQ_STATE_ENABLE
)
577 val
|= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
;
579 WREG32_P(mmVCE_SYS_INT_EN
, val
, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
);
583 static int vce_v3_0_process_interrupt(struct amdgpu_device
*adev
,
584 struct amdgpu_irq_src
*source
,
585 struct amdgpu_iv_entry
*entry
)
587 DRM_DEBUG("IH: VCE\n");
589 WREG32_P(mmVCE_SYS_INT_STATUS
,
590 VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK
,
591 ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK
);
593 switch (entry
->src_data
) {
595 amdgpu_fence_process(&adev
->vce
.ring
[0]);
598 amdgpu_fence_process(&adev
->vce
.ring
[1]);
601 DRM_ERROR("Unhandled interrupt: %d %d\n",
602 entry
->src_id
, entry
->src_data
);
609 static int vce_v3_0_set_clockgating_state(void *handle
,
610 enum amd_clockgating_state state
)
615 static int vce_v3_0_set_powergating_state(void *handle
,
616 enum amd_powergating_state state
)
618 /* This doesn't actually powergate the VCE block.
619 * That's done in the dpm code via the SMC. This
620 * just re-inits the block as necessary. The actual
621 * gating still happens in the dpm code. We should
622 * revisit this when there is a cleaner line between
623 * the smc and the hw blocks
625 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
627 if (state
== AMD_PG_STATE_GATE
)
628 /* XXX do we need a vce_v3_0_stop()? */
631 return vce_v3_0_start(adev
);
634 const struct amd_ip_funcs vce_v3_0_ip_funcs
= {
635 .early_init
= vce_v3_0_early_init
,
637 .sw_init
= vce_v3_0_sw_init
,
638 .sw_fini
= vce_v3_0_sw_fini
,
639 .hw_init
= vce_v3_0_hw_init
,
640 .hw_fini
= vce_v3_0_hw_fini
,
641 .suspend
= vce_v3_0_suspend
,
642 .resume
= vce_v3_0_resume
,
643 .is_idle
= vce_v3_0_is_idle
,
644 .wait_for_idle
= vce_v3_0_wait_for_idle
,
645 .soft_reset
= vce_v3_0_soft_reset
,
646 .print_status
= vce_v3_0_print_status
,
647 .set_clockgating_state
= vce_v3_0_set_clockgating_state
,
648 .set_powergating_state
= vce_v3_0_set_powergating_state
,
651 static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs
= {
652 .get_rptr
= vce_v3_0_ring_get_rptr
,
653 .get_wptr
= vce_v3_0_ring_get_wptr
,
654 .set_wptr
= vce_v3_0_ring_set_wptr
,
655 .parse_cs
= amdgpu_vce_ring_parse_cs
,
656 .emit_ib
= amdgpu_vce_ring_emit_ib
,
657 .emit_fence
= amdgpu_vce_ring_emit_fence
,
658 .emit_semaphore
= amdgpu_vce_ring_emit_semaphore
,
659 .test_ring
= amdgpu_vce_ring_test_ring
,
660 .test_ib
= amdgpu_vce_ring_test_ib
,
661 .insert_nop
= amdgpu_ring_insert_nop
,
664 static void vce_v3_0_set_ring_funcs(struct amdgpu_device
*adev
)
666 adev
->vce
.ring
[0].funcs
= &vce_v3_0_ring_funcs
;
667 adev
->vce
.ring
[1].funcs
= &vce_v3_0_ring_funcs
;
670 static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs
= {
671 .set
= vce_v3_0_set_interrupt_state
,
672 .process
= vce_v3_0_process_interrupt
,
675 static void vce_v3_0_set_irq_funcs(struct amdgpu_device
*adev
)
677 adev
->vce
.irq
.num_types
= 1;
678 adev
->vce
.irq
.funcs
= &vce_v3_0_irq_funcs
;