2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
25 * Authors: Christian König <christian.koenig@amd.com>
28 #include <linux/firmware.h>
29 #include <linux/module.h>
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vce.h"
38 /* 1 second timeout */
39 #define VCE_IDLE_TIMEOUT_MS 1000
42 #ifdef CONFIG_DRM_AMDGPU_CIK
43 #define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
44 #define FIRMWARE_KABINI "radeon/kabini_vce.bin"
45 #define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
46 #define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
47 #define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
49 #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
50 #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
51 #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
52 #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
54 #ifdef CONFIG_DRM_AMDGPU_CIK
55 MODULE_FIRMWARE(FIRMWARE_BONAIRE
);
56 MODULE_FIRMWARE(FIRMWARE_KABINI
);
57 MODULE_FIRMWARE(FIRMWARE_KAVERI
);
58 MODULE_FIRMWARE(FIRMWARE_HAWAII
);
59 MODULE_FIRMWARE(FIRMWARE_MULLINS
);
61 MODULE_FIRMWARE(FIRMWARE_TONGA
);
62 MODULE_FIRMWARE(FIRMWARE_CARRIZO
);
63 MODULE_FIRMWARE(FIRMWARE_FIJI
);
64 MODULE_FIRMWARE(FIRMWARE_STONEY
);
66 static void amdgpu_vce_idle_work_handler(struct work_struct
*work
);
69 * amdgpu_vce_init - allocate memory, load vce firmware
71 * @adev: amdgpu_device pointer
73 * First step to get VCE online, allocate memory and load the firmware
75 int amdgpu_vce_sw_init(struct amdgpu_device
*adev
, unsigned long size
)
78 const struct common_firmware_header
*hdr
;
79 unsigned ucode_version
, version_major
, version_minor
, binary_id
;
82 INIT_DELAYED_WORK(&adev
->vce
.idle_work
, amdgpu_vce_idle_work_handler
);
84 switch (adev
->asic_type
) {
85 #ifdef CONFIG_DRM_AMDGPU_CIK
87 fw_name
= FIRMWARE_BONAIRE
;
90 fw_name
= FIRMWARE_KAVERI
;
93 fw_name
= FIRMWARE_KABINI
;
96 fw_name
= FIRMWARE_HAWAII
;
99 fw_name
= FIRMWARE_MULLINS
;
103 fw_name
= FIRMWARE_TONGA
;
106 fw_name
= FIRMWARE_CARRIZO
;
109 fw_name
= FIRMWARE_FIJI
;
112 fw_name
= FIRMWARE_STONEY
;
119 r
= request_firmware(&adev
->vce
.fw
, fw_name
, adev
->dev
);
121 dev_err(adev
->dev
, "amdgpu_vce: Can't load firmware \"%s\"\n",
126 r
= amdgpu_ucode_validate(adev
->vce
.fw
);
128 dev_err(adev
->dev
, "amdgpu_vce: Can't validate firmware \"%s\"\n",
130 release_firmware(adev
->vce
.fw
);
135 hdr
= (const struct common_firmware_header
*)adev
->vce
.fw
->data
;
137 ucode_version
= le32_to_cpu(hdr
->ucode_version
);
138 version_major
= (ucode_version
>> 20) & 0xfff;
139 version_minor
= (ucode_version
>> 8) & 0xfff;
140 binary_id
= ucode_version
& 0xff;
141 DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
142 version_major
, version_minor
, binary_id
);
143 adev
->vce
.fw_version
= ((version_major
<< 24) | (version_minor
<< 16) |
146 /* allocate firmware, stack and heap BO */
148 r
= amdgpu_bo_create(adev
, size
, PAGE_SIZE
, true,
149 AMDGPU_GEM_DOMAIN_VRAM
,
150 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
,
151 NULL
, NULL
, &adev
->vce
.vcpu_bo
);
153 dev_err(adev
->dev
, "(%d) failed to allocate VCE bo\n", r
);
157 r
= amdgpu_bo_reserve(adev
->vce
.vcpu_bo
, false);
159 amdgpu_bo_unref(&adev
->vce
.vcpu_bo
);
160 dev_err(adev
->dev
, "(%d) failed to reserve VCE bo\n", r
);
164 r
= amdgpu_bo_pin(adev
->vce
.vcpu_bo
, AMDGPU_GEM_DOMAIN_VRAM
,
165 &adev
->vce
.gpu_addr
);
166 amdgpu_bo_unreserve(adev
->vce
.vcpu_bo
);
168 amdgpu_bo_unref(&adev
->vce
.vcpu_bo
);
169 dev_err(adev
->dev
, "(%d) VCE bo pin failed\n", r
);
173 for (i
= 0; i
< AMDGPU_MAX_VCE_HANDLES
; ++i
) {
174 atomic_set(&adev
->vce
.handles
[i
], 0);
175 adev
->vce
.filp
[i
] = NULL
;
182 * amdgpu_vce_fini - free memory
184 * @adev: amdgpu_device pointer
186 * Last step on VCE teardown, free firmware memory
188 int amdgpu_vce_sw_fini(struct amdgpu_device
*adev
)
190 if (adev
->vce
.vcpu_bo
== NULL
)
193 amdgpu_bo_unref(&adev
->vce
.vcpu_bo
);
195 amdgpu_ring_fini(&adev
->vce
.ring
[0]);
196 amdgpu_ring_fini(&adev
->vce
.ring
[1]);
198 release_firmware(adev
->vce
.fw
);
204 * amdgpu_vce_suspend - unpin VCE fw memory
206 * @adev: amdgpu_device pointer
209 int amdgpu_vce_suspend(struct amdgpu_device
*adev
)
213 if (adev
->vce
.vcpu_bo
== NULL
)
216 for (i
= 0; i
< AMDGPU_MAX_VCE_HANDLES
; ++i
)
217 if (atomic_read(&adev
->vce
.handles
[i
]))
220 if (i
== AMDGPU_MAX_VCE_HANDLES
)
223 /* TODO: suspending running encoding sessions isn't supported */
228 * amdgpu_vce_resume - pin VCE fw memory
230 * @adev: amdgpu_device pointer
233 int amdgpu_vce_resume(struct amdgpu_device
*adev
)
236 const struct common_firmware_header
*hdr
;
240 if (adev
->vce
.vcpu_bo
== NULL
)
243 r
= amdgpu_bo_reserve(adev
->vce
.vcpu_bo
, false);
245 dev_err(adev
->dev
, "(%d) failed to reserve VCE bo\n", r
);
249 r
= amdgpu_bo_kmap(adev
->vce
.vcpu_bo
, &cpu_addr
);
251 amdgpu_bo_unreserve(adev
->vce
.vcpu_bo
);
252 dev_err(adev
->dev
, "(%d) VCE map failed\n", r
);
256 hdr
= (const struct common_firmware_header
*)adev
->vce
.fw
->data
;
257 offset
= le32_to_cpu(hdr
->ucode_array_offset_bytes
);
258 memcpy(cpu_addr
, (adev
->vce
.fw
->data
) + offset
,
259 (adev
->vce
.fw
->size
) - offset
);
261 amdgpu_bo_kunmap(adev
->vce
.vcpu_bo
);
263 amdgpu_bo_unreserve(adev
->vce
.vcpu_bo
);
269 * amdgpu_vce_idle_work_handler - power off VCE
271 * @work: pointer to work structure
273 * power of VCE when it's not used any more
275 static void amdgpu_vce_idle_work_handler(struct work_struct
*work
)
277 struct amdgpu_device
*adev
=
278 container_of(work
, struct amdgpu_device
, vce
.idle_work
.work
);
280 if ((amdgpu_fence_count_emitted(&adev
->vce
.ring
[0]) == 0) &&
281 (amdgpu_fence_count_emitted(&adev
->vce
.ring
[1]) == 0)) {
282 if (adev
->pm
.dpm_enabled
) {
283 amdgpu_dpm_enable_vce(adev
, false);
285 amdgpu_asic_set_vce_clocks(adev
, 0, 0);
288 schedule_delayed_work(&adev
->vce
.idle_work
,
289 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS
));
294 * amdgpu_vce_note_usage - power up VCE
296 * @adev: amdgpu_device pointer
298 * Make sure VCE is powerd up when we want to use it
300 static void amdgpu_vce_note_usage(struct amdgpu_device
*adev
)
302 bool streams_changed
= false;
303 bool set_clocks
= !cancel_delayed_work_sync(&adev
->vce
.idle_work
);
304 set_clocks
&= schedule_delayed_work(&adev
->vce
.idle_work
,
305 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS
));
307 if (adev
->pm
.dpm_enabled
) {
308 /* XXX figure out if the streams changed */
309 streams_changed
= false;
312 if (set_clocks
|| streams_changed
) {
313 if (adev
->pm
.dpm_enabled
) {
314 amdgpu_dpm_enable_vce(adev
, true);
316 amdgpu_asic_set_vce_clocks(adev
, 53300, 40000);
322 * amdgpu_vce_free_handles - free still open VCE handles
324 * @adev: amdgpu_device pointer
325 * @filp: drm file pointer
327 * Close all VCE handles still open by this file pointer
329 void amdgpu_vce_free_handles(struct amdgpu_device
*adev
, struct drm_file
*filp
)
331 struct amdgpu_ring
*ring
= &adev
->vce
.ring
[0];
333 for (i
= 0; i
< AMDGPU_MAX_VCE_HANDLES
; ++i
) {
334 uint32_t handle
= atomic_read(&adev
->vce
.handles
[i
]);
335 if (!handle
|| adev
->vce
.filp
[i
] != filp
)
338 amdgpu_vce_note_usage(adev
);
340 r
= amdgpu_vce_get_destroy_msg(ring
, handle
, NULL
);
342 DRM_ERROR("Error destroying VCE handle (%d)!\n", r
);
344 adev
->vce
.filp
[i
] = NULL
;
345 atomic_set(&adev
->vce
.handles
[i
], 0);
349 static int amdgpu_vce_free_job(
350 struct amdgpu_job
*job
)
352 amdgpu_ib_free(job
->adev
, job
->ibs
);
358 * amdgpu_vce_get_create_msg - generate a VCE create msg
360 * @adev: amdgpu_device pointer
361 * @ring: ring we should submit the msg to
362 * @handle: VCE session handle to use
363 * @fence: optional fence to return
365 * Open up a stream for HW test
367 int amdgpu_vce_get_create_msg(struct amdgpu_ring
*ring
, uint32_t handle
,
368 struct fence
**fence
)
370 const unsigned ib_size_dw
= 1024;
371 struct amdgpu_ib
*ib
= NULL
;
372 struct fence
*f
= NULL
;
373 struct amdgpu_device
*adev
= ring
->adev
;
377 ib
= kzalloc(sizeof(struct amdgpu_ib
), GFP_KERNEL
);
380 r
= amdgpu_ib_get(ring
, NULL
, ib_size_dw
* 4, ib
);
382 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r
);
387 dummy
= ib
->gpu_addr
+ 1024;
389 /* stitch together an VCE create msg */
391 ib
->ptr
[ib
->length_dw
++] = 0x0000000c; /* len */
392 ib
->ptr
[ib
->length_dw
++] = 0x00000001; /* session cmd */
393 ib
->ptr
[ib
->length_dw
++] = handle
;
395 if ((ring
->adev
->vce
.fw_version
>> 24) >= 52)
396 ib
->ptr
[ib
->length_dw
++] = 0x00000040; /* len */
398 ib
->ptr
[ib
->length_dw
++] = 0x00000030; /* len */
399 ib
->ptr
[ib
->length_dw
++] = 0x01000001; /* create cmd */
400 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
401 ib
->ptr
[ib
->length_dw
++] = 0x00000042;
402 ib
->ptr
[ib
->length_dw
++] = 0x0000000a;
403 ib
->ptr
[ib
->length_dw
++] = 0x00000001;
404 ib
->ptr
[ib
->length_dw
++] = 0x00000080;
405 ib
->ptr
[ib
->length_dw
++] = 0x00000060;
406 ib
->ptr
[ib
->length_dw
++] = 0x00000100;
407 ib
->ptr
[ib
->length_dw
++] = 0x00000100;
408 ib
->ptr
[ib
->length_dw
++] = 0x0000000c;
409 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
410 if ((ring
->adev
->vce
.fw_version
>> 24) >= 52) {
411 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
412 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
413 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
414 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
417 ib
->ptr
[ib
->length_dw
++] = 0x00000014; /* len */
418 ib
->ptr
[ib
->length_dw
++] = 0x05000005; /* feedback buffer */
419 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(dummy
);
420 ib
->ptr
[ib
->length_dw
++] = dummy
;
421 ib
->ptr
[ib
->length_dw
++] = 0x00000001;
423 for (i
= ib
->length_dw
; i
< ib_size_dw
; ++i
)
426 r
= amdgpu_sched_ib_submit_kernel_helper(adev
, ring
, ib
, 1,
427 &amdgpu_vce_free_job
,
428 AMDGPU_FENCE_OWNER_UNDEFINED
,
433 *fence
= fence_get(f
);
435 if (amdgpu_enable_scheduler
)
438 amdgpu_ib_free(adev
, ib
);
444 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
446 * @adev: amdgpu_device pointer
447 * @ring: ring we should submit the msg to
448 * @handle: VCE session handle to use
449 * @fence: optional fence to return
451 * Close up a stream for HW test or if userspace failed to do so
453 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring
*ring
, uint32_t handle
,
454 struct fence
**fence
)
456 const unsigned ib_size_dw
= 1024;
457 struct amdgpu_ib
*ib
= NULL
;
458 struct fence
*f
= NULL
;
459 struct amdgpu_device
*adev
= ring
->adev
;
463 ib
= kzalloc(sizeof(struct amdgpu_ib
), GFP_KERNEL
);
467 r
= amdgpu_ib_get(ring
, NULL
, ib_size_dw
* 4, ib
);
470 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r
);
474 dummy
= ib
->gpu_addr
+ 1024;
476 /* stitch together an VCE destroy msg */
478 ib
->ptr
[ib
->length_dw
++] = 0x0000000c; /* len */
479 ib
->ptr
[ib
->length_dw
++] = 0x00000001; /* session cmd */
480 ib
->ptr
[ib
->length_dw
++] = handle
;
482 ib
->ptr
[ib
->length_dw
++] = 0x00000014; /* len */
483 ib
->ptr
[ib
->length_dw
++] = 0x05000005; /* feedback buffer */
484 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(dummy
);
485 ib
->ptr
[ib
->length_dw
++] = dummy
;
486 ib
->ptr
[ib
->length_dw
++] = 0x00000001;
488 ib
->ptr
[ib
->length_dw
++] = 0x00000008; /* len */
489 ib
->ptr
[ib
->length_dw
++] = 0x02000001; /* destroy cmd */
491 for (i
= ib
->length_dw
; i
< ib_size_dw
; ++i
)
493 r
= amdgpu_sched_ib_submit_kernel_helper(adev
, ring
, ib
, 1,
494 &amdgpu_vce_free_job
,
495 AMDGPU_FENCE_OWNER_UNDEFINED
,
500 *fence
= fence_get(f
);
502 if (amdgpu_enable_scheduler
)
505 amdgpu_ib_free(adev
, ib
);
511 * amdgpu_vce_cs_reloc - command submission relocation
514 * @lo: address of lower dword
515 * @hi: address of higher dword
516 * @size: minimum size
518 * Patch relocation inside command stream with real buffer address
520 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser
*p
, uint32_t ib_idx
,
521 int lo
, int hi
, unsigned size
, uint32_t index
)
523 struct amdgpu_bo_va_mapping
*mapping
;
524 struct amdgpu_ib
*ib
= &p
->ibs
[ib_idx
];
525 struct amdgpu_bo
*bo
;
528 if (index
== 0xffffffff)
531 addr
= ((uint64_t)amdgpu_get_ib_value(p
, ib_idx
, lo
)) |
532 ((uint64_t)amdgpu_get_ib_value(p
, ib_idx
, hi
)) << 32;
533 addr
+= ((uint64_t)size
) * ((uint64_t)index
);
535 mapping
= amdgpu_cs_find_mapping(p
, addr
, &bo
);
536 if (mapping
== NULL
) {
537 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
538 addr
, lo
, hi
, size
, index
);
542 if ((addr
+ (uint64_t)size
) >
543 ((uint64_t)mapping
->it
.last
+ 1) * AMDGPU_GPU_PAGE_SIZE
) {
544 DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
549 addr
-= ((uint64_t)mapping
->it
.start
) * AMDGPU_GPU_PAGE_SIZE
;
550 addr
+= amdgpu_bo_gpu_offset(bo
);
551 addr
-= ((uint64_t)size
) * ((uint64_t)index
);
553 ib
->ptr
[lo
] = addr
& 0xFFFFFFFF;
554 ib
->ptr
[hi
] = addr
>> 32;
560 * amdgpu_vce_validate_handle - validate stream handle
563 * @handle: handle to validate
564 * @allocated: allocated a new handle?
566 * Validates the handle and return the found session index or -EINVAL
567 * we we don't have another free session index.
569 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser
*p
,
570 uint32_t handle
, bool *allocated
)
576 /* validate the handle */
577 for (i
= 0; i
< AMDGPU_MAX_VCE_HANDLES
; ++i
) {
578 if (atomic_read(&p
->adev
->vce
.handles
[i
]) == handle
) {
579 if (p
->adev
->vce
.filp
[i
] != p
->filp
) {
580 DRM_ERROR("VCE handle collision detected!\n");
587 /* handle not found try to alloc a new one */
588 for (i
= 0; i
< AMDGPU_MAX_VCE_HANDLES
; ++i
) {
589 if (!atomic_cmpxchg(&p
->adev
->vce
.handles
[i
], 0, handle
)) {
590 p
->adev
->vce
.filp
[i
] = p
->filp
;
591 p
->adev
->vce
.img_size
[i
] = 0;
597 DRM_ERROR("No more free VCE handles!\n");
602 * amdgpu_vce_cs_parse - parse and validate the command stream
607 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser
*p
, uint32_t ib_idx
)
609 struct amdgpu_ib
*ib
= &p
->ibs
[ib_idx
];
610 unsigned fb_idx
= 0, bs_idx
= 0;
611 int session_idx
= -1;
612 bool destroyed
= false;
613 bool created
= false;
614 bool allocated
= false;
615 uint32_t tmp
, handle
= 0;
616 uint32_t *size
= &tmp
;
617 int i
, r
= 0, idx
= 0;
619 amdgpu_vce_note_usage(p
->adev
);
621 while (idx
< ib
->length_dw
) {
622 uint32_t len
= amdgpu_get_ib_value(p
, ib_idx
, idx
);
623 uint32_t cmd
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 1);
625 if ((len
< 8) || (len
& 3)) {
626 DRM_ERROR("invalid VCE command length (%d)!\n", len
);
632 DRM_ERROR("No other command allowed after destroy!\n");
638 case 0x00000001: // session
639 handle
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 2);
640 session_idx
= amdgpu_vce_validate_handle(p
, handle
,
644 size
= &p
->adev
->vce
.img_size
[session_idx
];
647 case 0x00000002: // task info
648 fb_idx
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 6);
649 bs_idx
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 7);
652 case 0x01000001: // create
655 DRM_ERROR("Handle already in use!\n");
660 *size
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 8) *
661 amdgpu_get_ib_value(p
, ib_idx
, idx
+ 10) *
665 case 0x04000001: // config extension
666 case 0x04000002: // pic control
667 case 0x04000005: // rate control
668 case 0x04000007: // motion estimation
669 case 0x04000008: // rdo
670 case 0x04000009: // vui
671 case 0x05000002: // auxiliary buffer
674 case 0x03000001: // encode
675 r
= amdgpu_vce_cs_reloc(p
, ib_idx
, idx
+ 10, idx
+ 9,
680 r
= amdgpu_vce_cs_reloc(p
, ib_idx
, idx
+ 12, idx
+ 11,
686 case 0x02000001: // destroy
690 case 0x05000001: // context buffer
691 r
= amdgpu_vce_cs_reloc(p
, ib_idx
, idx
+ 3, idx
+ 2,
697 case 0x05000004: // video bitstream buffer
698 tmp
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 4);
699 r
= amdgpu_vce_cs_reloc(p
, ib_idx
, idx
+ 3, idx
+ 2,
705 case 0x05000005: // feedback buffer
706 r
= amdgpu_vce_cs_reloc(p
, ib_idx
, idx
+ 3, idx
+ 2,
713 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd
);
718 if (session_idx
== -1) {
719 DRM_ERROR("no session command at start of IB\n");
727 if (allocated
&& !created
) {
728 DRM_ERROR("New session without create command!\n");
733 if ((!r
&& destroyed
) || (r
&& allocated
)) {
735 * IB contains a destroy msg or we have allocated an
736 * handle and got an error, anyway free the handle
738 for (i
= 0; i
< AMDGPU_MAX_VCE_HANDLES
; ++i
)
739 atomic_cmpxchg(&p
->adev
->vce
.handles
[i
], handle
, 0);
746 * amdgpu_vce_ring_emit_semaphore - emit a semaphore command
748 * @ring: engine to use
749 * @semaphore: address of semaphore
750 * @emit_wait: true=emit wait, false=emit signal
753 bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring
*ring
,
754 struct amdgpu_semaphore
*semaphore
,
757 uint64_t addr
= semaphore
->gpu_addr
;
759 amdgpu_ring_write(ring
, VCE_CMD_SEMAPHORE
);
760 amdgpu_ring_write(ring
, (addr
>> 3) & 0x000FFFFF);
761 amdgpu_ring_write(ring
, (addr
>> 23) & 0x000FFFFF);
762 amdgpu_ring_write(ring
, 0x01003000 | (emit_wait
? 1 : 0));
764 amdgpu_ring_write(ring
, VCE_CMD_END
);
770 * amdgpu_vce_ring_emit_ib - execute indirect buffer
772 * @ring: engine to use
773 * @ib: the IB to execute
776 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring
*ring
, struct amdgpu_ib
*ib
)
778 amdgpu_ring_write(ring
, VCE_CMD_IB
);
779 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
));
780 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
781 amdgpu_ring_write(ring
, ib
->length_dw
);
785 * amdgpu_vce_ring_emit_fence - add a fence command to the ring
787 * @ring: engine to use
791 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
, u64 seq
,
794 WARN_ON(flags
& AMDGPU_FENCE_FLAG_64BIT
);
796 amdgpu_ring_write(ring
, VCE_CMD_FENCE
);
797 amdgpu_ring_write(ring
, addr
);
798 amdgpu_ring_write(ring
, upper_32_bits(addr
));
799 amdgpu_ring_write(ring
, seq
);
800 amdgpu_ring_write(ring
, VCE_CMD_TRAP
);
801 amdgpu_ring_write(ring
, VCE_CMD_END
);
805 * amdgpu_vce_ring_test_ring - test if VCE ring is working
807 * @ring: the engine to test on
810 int amdgpu_vce_ring_test_ring(struct amdgpu_ring
*ring
)
812 struct amdgpu_device
*adev
= ring
->adev
;
813 uint32_t rptr
= amdgpu_ring_get_rptr(ring
);
817 r
= amdgpu_ring_lock(ring
, 16);
819 DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
823 amdgpu_ring_write(ring
, VCE_CMD_END
);
824 amdgpu_ring_unlock_commit(ring
);
826 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
827 if (amdgpu_ring_get_rptr(ring
) != rptr
)
832 if (i
< adev
->usec_timeout
) {
833 DRM_INFO("ring test on %d succeeded in %d usecs\n",
836 DRM_ERROR("amdgpu: ring %d test failed\n",
845 * amdgpu_vce_ring_test_ib - test if VCE IBs are working
847 * @ring: the engine to test on
850 int amdgpu_vce_ring_test_ib(struct amdgpu_ring
*ring
)
852 struct fence
*fence
= NULL
;
855 /* skip vce ring1 ib test for now, since it's not reliable */
856 if (ring
== &ring
->adev
->vce
.ring
[1])
859 r
= amdgpu_vce_get_create_msg(ring
, 1, NULL
);
861 DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r
);
865 r
= amdgpu_vce_get_destroy_msg(ring
, 1, &fence
);
867 DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r
);
871 r
= fence_wait(fence
, false);
873 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r
);
875 DRM_INFO("ib test on ring %d succeeded\n", ring
->idx
);