2 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
32 #include "amdgpu_pm.h"
33 #include "amdgpu_vcn.h"
37 #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
38 #define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
39 #define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
40 #define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
41 #define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
42 #define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
43 #define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
44 #define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
46 MODULE_FIRMWARE(FIRMWARE_RAVEN
);
47 MODULE_FIRMWARE(FIRMWARE_PICASSO
);
48 MODULE_FIRMWARE(FIRMWARE_RAVEN2
);
49 MODULE_FIRMWARE(FIRMWARE_ARCTURUS
);
50 MODULE_FIRMWARE(FIRMWARE_RENOIR
);
51 MODULE_FIRMWARE(FIRMWARE_NAVI10
);
52 MODULE_FIRMWARE(FIRMWARE_NAVI14
);
53 MODULE_FIRMWARE(FIRMWARE_NAVI12
);
55 static void amdgpu_vcn_idle_work_handler(struct work_struct
*work
);
57 int amdgpu_vcn_sw_init(struct amdgpu_device
*adev
)
59 unsigned long bo_size
;
61 const struct common_firmware_header
*hdr
;
62 unsigned char fw_check
;
65 INIT_DELAYED_WORK(&adev
->vcn
.idle_work
, amdgpu_vcn_idle_work_handler
);
67 switch (adev
->asic_type
) {
69 if (adev
->rev_id
>= 8)
70 fw_name
= FIRMWARE_RAVEN2
;
71 else if (adev
->pdev
->device
== 0x15d8)
72 fw_name
= FIRMWARE_PICASSO
;
74 fw_name
= FIRMWARE_RAVEN
;
77 fw_name
= FIRMWARE_ARCTURUS
;
78 if ((adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) &&
79 (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
))
80 adev
->vcn
.indirect_sram
= true;
83 fw_name
= FIRMWARE_RENOIR
;
84 if ((adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) &&
85 (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
))
86 adev
->vcn
.indirect_sram
= true;
89 fw_name
= FIRMWARE_NAVI10
;
90 if ((adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) &&
91 (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
))
92 adev
->vcn
.indirect_sram
= true;
95 fw_name
= FIRMWARE_NAVI14
;
96 if ((adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) &&
97 (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
))
98 adev
->vcn
.indirect_sram
= true;
101 fw_name
= FIRMWARE_NAVI12
;
102 if ((adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) &&
103 (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
))
104 adev
->vcn
.indirect_sram
= true;
110 r
= request_firmware(&adev
->vcn
.fw
, fw_name
, adev
->dev
);
112 dev_err(adev
->dev
, "amdgpu_vcn: Can't load firmware \"%s\"\n",
117 r
= amdgpu_ucode_validate(adev
->vcn
.fw
);
119 dev_err(adev
->dev
, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
121 release_firmware(adev
->vcn
.fw
);
126 hdr
= (const struct common_firmware_header
*)adev
->vcn
.fw
->data
;
127 adev
->vcn
.fw_version
= le32_to_cpu(hdr
->ucode_version
);
129 /* Bit 20-23, it is encode major and non-zero for new naming convention.
130 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
131 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
132 * is zero in old naming convention, this field is always zero so far.
133 * These four bits are used to tell which naming convention is present.
135 fw_check
= (le32_to_cpu(hdr
->ucode_version
) >> 20) & 0xf;
137 unsigned int dec_ver
, enc_major
, enc_minor
, vep
, fw_rev
;
139 fw_rev
= le32_to_cpu(hdr
->ucode_version
) & 0xfff;
140 enc_minor
= (le32_to_cpu(hdr
->ucode_version
) >> 12) & 0xff;
141 enc_major
= fw_check
;
142 dec_ver
= (le32_to_cpu(hdr
->ucode_version
) >> 24) & 0xf;
143 vep
= (le32_to_cpu(hdr
->ucode_version
) >> 28) & 0xf;
144 DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
145 enc_major
, enc_minor
, dec_ver
, vep
, fw_rev
);
147 unsigned int version_major
, version_minor
, family_id
;
149 family_id
= le32_to_cpu(hdr
->ucode_version
) & 0xff;
150 version_major
= (le32_to_cpu(hdr
->ucode_version
) >> 24) & 0xff;
151 version_minor
= (le32_to_cpu(hdr
->ucode_version
) >> 8) & 0xff;
152 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
153 version_major
, version_minor
, family_id
);
156 bo_size
= AMDGPU_VCN_STACK_SIZE
+ AMDGPU_VCN_CONTEXT_SIZE
;
157 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
)
158 bo_size
+= AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr
->ucode_size_bytes
) + 8);
160 for (i
= 0; i
< adev
->vcn
.num_vcn_inst
; i
++) {
161 if (adev
->vcn
.harvest_config
& (1 << i
))
164 r
= amdgpu_bo_create_kernel(adev
, bo_size
, PAGE_SIZE
,
165 AMDGPU_GEM_DOMAIN_VRAM
, &adev
->vcn
.inst
[i
].vcpu_bo
,
166 &adev
->vcn
.inst
[i
].gpu_addr
, &adev
->vcn
.inst
[i
].cpu_addr
);
168 dev_err(adev
->dev
, "(%d) failed to allocate vcn bo\n", r
);
172 if (adev
->vcn
.indirect_sram
) {
173 r
= amdgpu_bo_create_kernel(adev
, 64 * 2 * 4, PAGE_SIZE
,
174 AMDGPU_GEM_DOMAIN_VRAM
, &adev
->vcn
.inst
[i
].dpg_sram_bo
,
175 &adev
->vcn
.inst
[i
].dpg_sram_gpu_addr
, &adev
->vcn
.inst
[i
].dpg_sram_cpu_addr
);
177 dev_err(adev
->dev
, "VCN %d (%d) failed to allocate DPG bo\n", i
, r
);
186 int amdgpu_vcn_sw_fini(struct amdgpu_device
*adev
)
190 cancel_delayed_work_sync(&adev
->vcn
.idle_work
);
192 for (j
= 0; j
< adev
->vcn
.num_vcn_inst
; ++j
) {
193 if (adev
->vcn
.harvest_config
& (1 << j
))
195 if (adev
->vcn
.indirect_sram
) {
196 amdgpu_bo_free_kernel(&adev
->vcn
.inst
[j
].dpg_sram_bo
,
197 &adev
->vcn
.inst
[j
].dpg_sram_gpu_addr
,
198 (void **)&adev
->vcn
.inst
[j
].dpg_sram_cpu_addr
);
200 kvfree(adev
->vcn
.inst
[j
].saved_bo
);
202 amdgpu_bo_free_kernel(&adev
->vcn
.inst
[j
].vcpu_bo
,
203 &adev
->vcn
.inst
[j
].gpu_addr
,
204 (void **)&adev
->vcn
.inst
[j
].cpu_addr
);
206 amdgpu_ring_fini(&adev
->vcn
.inst
[j
].ring_dec
);
208 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
)
209 amdgpu_ring_fini(&adev
->vcn
.inst
[j
].ring_enc
[i
]);
212 release_firmware(adev
->vcn
.fw
);
217 int amdgpu_vcn_suspend(struct amdgpu_device
*adev
)
223 cancel_delayed_work_sync(&adev
->vcn
.idle_work
);
225 for (i
= 0; i
< adev
->vcn
.num_vcn_inst
; ++i
) {
226 if (adev
->vcn
.harvest_config
& (1 << i
))
228 if (adev
->vcn
.inst
[i
].vcpu_bo
== NULL
)
231 size
= amdgpu_bo_size(adev
->vcn
.inst
[i
].vcpu_bo
);
232 ptr
= adev
->vcn
.inst
[i
].cpu_addr
;
234 adev
->vcn
.inst
[i
].saved_bo
= kvmalloc(size
, GFP_KERNEL
);
235 if (!adev
->vcn
.inst
[i
].saved_bo
)
238 memcpy_fromio(adev
->vcn
.inst
[i
].saved_bo
, ptr
, size
);
243 int amdgpu_vcn_resume(struct amdgpu_device
*adev
)
249 for (i
= 0; i
< adev
->vcn
.num_vcn_inst
; ++i
) {
250 if (adev
->vcn
.harvest_config
& (1 << i
))
252 if (adev
->vcn
.inst
[i
].vcpu_bo
== NULL
)
255 size
= amdgpu_bo_size(adev
->vcn
.inst
[i
].vcpu_bo
);
256 ptr
= adev
->vcn
.inst
[i
].cpu_addr
;
258 if (adev
->vcn
.inst
[i
].saved_bo
!= NULL
) {
259 memcpy_toio(ptr
, adev
->vcn
.inst
[i
].saved_bo
, size
);
260 kvfree(adev
->vcn
.inst
[i
].saved_bo
);
261 adev
->vcn
.inst
[i
].saved_bo
= NULL
;
263 const struct common_firmware_header
*hdr
;
266 hdr
= (const struct common_firmware_header
*)adev
->vcn
.fw
->data
;
267 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
268 offset
= le32_to_cpu(hdr
->ucode_array_offset_bytes
);
269 memcpy_toio(adev
->vcn
.inst
[i
].cpu_addr
, adev
->vcn
.fw
->data
+ offset
,
270 le32_to_cpu(hdr
->ucode_size_bytes
));
271 size
-= le32_to_cpu(hdr
->ucode_size_bytes
);
272 ptr
+= le32_to_cpu(hdr
->ucode_size_bytes
);
274 memset_io(ptr
, 0, size
);
280 static void amdgpu_vcn_idle_work_handler(struct work_struct
*work
)
282 struct amdgpu_device
*adev
=
283 container_of(work
, struct amdgpu_device
, vcn
.idle_work
.work
);
284 unsigned int fences
= 0, fence
[AMDGPU_MAX_VCN_INSTANCES
] = {0};
287 for (j
= 0; j
< adev
->vcn
.num_vcn_inst
; ++j
) {
288 if (adev
->vcn
.harvest_config
& (1 << j
))
291 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
) {
292 fence
[j
] += amdgpu_fence_count_emitted(&adev
->vcn
.inst
[j
].ring_enc
[i
]);
295 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
) {
296 struct dpg_pause_state new_state
;
299 new_state
.fw_based
= VCN_DPG_STATE__PAUSE
;
301 new_state
.fw_based
= VCN_DPG_STATE__UNPAUSE
;
303 adev
->vcn
.pause_dpg_mode(adev
, j
, &new_state
);
306 fence
[j
] += amdgpu_fence_count_emitted(&adev
->vcn
.inst
[j
].ring_dec
);
311 amdgpu_gfx_off_ctrl(adev
, true);
312 amdgpu_device_ip_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_VCN
,
315 schedule_delayed_work(&adev
->vcn
.idle_work
, VCN_IDLE_TIMEOUT
);
319 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring
*ring
)
321 struct amdgpu_device
*adev
= ring
->adev
;
322 bool set_clocks
= !cancel_delayed_work_sync(&adev
->vcn
.idle_work
);
325 amdgpu_gfx_off_ctrl(adev
, false);
326 amdgpu_device_ip_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_VCN
,
327 AMD_PG_STATE_UNGATE
);
330 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCN_DPG
) {
331 struct dpg_pause_state new_state
;
332 unsigned int fences
= 0;
335 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; ++i
) {
336 fences
+= amdgpu_fence_count_emitted(&adev
->vcn
.inst
[ring
->me
].ring_enc
[i
]);
339 new_state
.fw_based
= VCN_DPG_STATE__PAUSE
;
341 new_state
.fw_based
= VCN_DPG_STATE__UNPAUSE
;
343 if (ring
->funcs
->type
== AMDGPU_RING_TYPE_VCN_ENC
)
344 new_state
.fw_based
= VCN_DPG_STATE__PAUSE
;
346 adev
->vcn
.pause_dpg_mode(adev
, ring
->me
, &new_state
);
350 void amdgpu_vcn_ring_end_use(struct amdgpu_ring
*ring
)
352 schedule_delayed_work(&ring
->adev
->vcn
.idle_work
, VCN_IDLE_TIMEOUT
);
355 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring
*ring
)
357 struct amdgpu_device
*adev
= ring
->adev
;
362 WREG32(adev
->vcn
.inst
[ring
->me
].external
.scratch9
, 0xCAFEDEAD);
363 r
= amdgpu_ring_alloc(ring
, 3);
366 amdgpu_ring_write(ring
, PACKET0(adev
->vcn
.internal
.scratch9
, 0));
367 amdgpu_ring_write(ring
, 0xDEADBEEF);
368 amdgpu_ring_commit(ring
);
369 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
370 tmp
= RREG32(adev
->vcn
.inst
[ring
->me
].external
.scratch9
);
371 if (tmp
== 0xDEADBEEF)
376 if (i
>= adev
->usec_timeout
)
382 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring
*ring
,
383 struct amdgpu_bo
*bo
,
384 struct dma_fence
**fence
)
386 struct amdgpu_device
*adev
= ring
->adev
;
387 struct dma_fence
*f
= NULL
;
388 struct amdgpu_job
*job
;
389 struct amdgpu_ib
*ib
;
393 r
= amdgpu_job_alloc_with_ib(adev
, 64, &job
);
398 addr
= amdgpu_bo_gpu_offset(bo
);
399 ib
->ptr
[0] = PACKET0(adev
->vcn
.internal
.data0
, 0);
401 ib
->ptr
[2] = PACKET0(adev
->vcn
.internal
.data1
, 0);
402 ib
->ptr
[3] = addr
>> 32;
403 ib
->ptr
[4] = PACKET0(adev
->vcn
.internal
.cmd
, 0);
405 for (i
= 6; i
< 16; i
+= 2) {
406 ib
->ptr
[i
] = PACKET0(adev
->vcn
.internal
.nop
, 0);
411 r
= amdgpu_job_submit_direct(job
, ring
, &f
);
415 amdgpu_bo_fence(bo
, f
, false);
416 amdgpu_bo_unreserve(bo
);
417 amdgpu_bo_unref(&bo
);
420 *fence
= dma_fence_get(f
);
426 amdgpu_job_free(job
);
429 amdgpu_bo_unreserve(bo
);
430 amdgpu_bo_unref(&bo
);
434 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring
*ring
, uint32_t handle
,
435 struct dma_fence
**fence
)
437 struct amdgpu_device
*adev
= ring
->adev
;
438 struct amdgpu_bo
*bo
= NULL
;
442 r
= amdgpu_bo_create_reserved(adev
, 1024, PAGE_SIZE
,
443 AMDGPU_GEM_DOMAIN_VRAM
,
444 &bo
, NULL
, (void **)&msg
);
448 msg
[0] = cpu_to_le32(0x00000028);
449 msg
[1] = cpu_to_le32(0x00000038);
450 msg
[2] = cpu_to_le32(0x00000001);
451 msg
[3] = cpu_to_le32(0x00000000);
452 msg
[4] = cpu_to_le32(handle
);
453 msg
[5] = cpu_to_le32(0x00000000);
454 msg
[6] = cpu_to_le32(0x00000001);
455 msg
[7] = cpu_to_le32(0x00000028);
456 msg
[8] = cpu_to_le32(0x00000010);
457 msg
[9] = cpu_to_le32(0x00000000);
458 msg
[10] = cpu_to_le32(0x00000007);
459 msg
[11] = cpu_to_le32(0x00000000);
460 msg
[12] = cpu_to_le32(0x00000780);
461 msg
[13] = cpu_to_le32(0x00000440);
462 for (i
= 14; i
< 1024; ++i
)
463 msg
[i
] = cpu_to_le32(0x0);
465 return amdgpu_vcn_dec_send_msg(ring
, bo
, fence
);
468 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring
*ring
, uint32_t handle
,
469 struct dma_fence
**fence
)
471 struct amdgpu_device
*adev
= ring
->adev
;
472 struct amdgpu_bo
*bo
= NULL
;
476 r
= amdgpu_bo_create_reserved(adev
, 1024, PAGE_SIZE
,
477 AMDGPU_GEM_DOMAIN_VRAM
,
478 &bo
, NULL
, (void **)&msg
);
482 msg
[0] = cpu_to_le32(0x00000028);
483 msg
[1] = cpu_to_le32(0x00000018);
484 msg
[2] = cpu_to_le32(0x00000000);
485 msg
[3] = cpu_to_le32(0x00000002);
486 msg
[4] = cpu_to_le32(handle
);
487 msg
[5] = cpu_to_le32(0x00000000);
488 for (i
= 6; i
< 1024; ++i
)
489 msg
[i
] = cpu_to_le32(0x0);
491 return amdgpu_vcn_dec_send_msg(ring
, bo
, fence
);
494 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring
*ring
, long timeout
)
496 struct amdgpu_device
*adev
= ring
->adev
;
497 struct dma_fence
*fence
;
500 /* temporarily disable ib test for sriov */
501 if (amdgpu_sriov_vf(adev
))
504 r
= amdgpu_vcn_dec_get_create_msg(ring
, 1, NULL
);
508 r
= amdgpu_vcn_dec_get_destroy_msg(ring
, 1, &fence
);
512 r
= dma_fence_wait_timeout(fence
, false, timeout
);
518 dma_fence_put(fence
);
523 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring
*ring
)
525 struct amdgpu_device
*adev
= ring
->adev
;
530 r
= amdgpu_ring_alloc(ring
, 16);
534 rptr
= amdgpu_ring_get_rptr(ring
);
536 amdgpu_ring_write(ring
, VCN_ENC_CMD_END
);
537 amdgpu_ring_commit(ring
);
539 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
540 if (amdgpu_ring_get_rptr(ring
) != rptr
)
545 if (i
>= adev
->usec_timeout
)
551 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring
*ring
, uint32_t handle
,
552 struct amdgpu_bo
*bo
,
553 struct dma_fence
**fence
)
555 const unsigned ib_size_dw
= 16;
556 struct amdgpu_job
*job
;
557 struct amdgpu_ib
*ib
;
558 struct dma_fence
*f
= NULL
;
562 r
= amdgpu_job_alloc_with_ib(ring
->adev
, ib_size_dw
* 4, &job
);
567 addr
= amdgpu_bo_gpu_offset(bo
);
570 ib
->ptr
[ib
->length_dw
++] = 0x00000018;
571 ib
->ptr
[ib
->length_dw
++] = 0x00000001; /* session info */
572 ib
->ptr
[ib
->length_dw
++] = handle
;
573 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(addr
);
574 ib
->ptr
[ib
->length_dw
++] = addr
;
575 ib
->ptr
[ib
->length_dw
++] = 0x0000000b;
577 ib
->ptr
[ib
->length_dw
++] = 0x00000014;
578 ib
->ptr
[ib
->length_dw
++] = 0x00000002; /* task info */
579 ib
->ptr
[ib
->length_dw
++] = 0x0000001c;
580 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
581 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
583 ib
->ptr
[ib
->length_dw
++] = 0x00000008;
584 ib
->ptr
[ib
->length_dw
++] = 0x08000001; /* op initialize */
586 for (i
= ib
->length_dw
; i
< ib_size_dw
; ++i
)
589 r
= amdgpu_job_submit_direct(job
, ring
, &f
);
594 *fence
= dma_fence_get(f
);
600 amdgpu_job_free(job
);
604 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring
*ring
, uint32_t handle
,
605 struct amdgpu_bo
*bo
,
606 struct dma_fence
**fence
)
608 const unsigned ib_size_dw
= 16;
609 struct amdgpu_job
*job
;
610 struct amdgpu_ib
*ib
;
611 struct dma_fence
*f
= NULL
;
615 r
= amdgpu_job_alloc_with_ib(ring
->adev
, ib_size_dw
* 4, &job
);
620 addr
= amdgpu_bo_gpu_offset(bo
);
623 ib
->ptr
[ib
->length_dw
++] = 0x00000018;
624 ib
->ptr
[ib
->length_dw
++] = 0x00000001;
625 ib
->ptr
[ib
->length_dw
++] = handle
;
626 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(addr
);
627 ib
->ptr
[ib
->length_dw
++] = addr
;
628 ib
->ptr
[ib
->length_dw
++] = 0x0000000b;
630 ib
->ptr
[ib
->length_dw
++] = 0x00000014;
631 ib
->ptr
[ib
->length_dw
++] = 0x00000002;
632 ib
->ptr
[ib
->length_dw
++] = 0x0000001c;
633 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
634 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
636 ib
->ptr
[ib
->length_dw
++] = 0x00000008;
637 ib
->ptr
[ib
->length_dw
++] = 0x08000002; /* op close session */
639 for (i
= ib
->length_dw
; i
< ib_size_dw
; ++i
)
642 r
= amdgpu_job_submit_direct(job
, ring
, &f
);
647 *fence
= dma_fence_get(f
);
653 amdgpu_job_free(job
);
657 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring
*ring
, long timeout
)
659 struct amdgpu_device
*adev
= ring
->adev
;
660 struct dma_fence
*fence
= NULL
;
661 struct amdgpu_bo
*bo
= NULL
;
664 /* temporarily disable ib test for sriov */
665 if (amdgpu_sriov_vf(adev
))
668 r
= amdgpu_bo_create_reserved(ring
->adev
, 128 * 1024, PAGE_SIZE
,
669 AMDGPU_GEM_DOMAIN_VRAM
,
674 r
= amdgpu_vcn_enc_get_create_msg(ring
, 1, bo
, NULL
);
678 r
= amdgpu_vcn_enc_get_destroy_msg(ring
, 1, bo
, &fence
);
682 r
= dma_fence_wait_timeout(fence
, false, timeout
);
689 dma_fence_put(fence
);
690 amdgpu_bo_unreserve(bo
);
691 amdgpu_bo_unref(&bo
);