2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
25 * Authors: Christian König <christian.koenig@amd.com>
28 #include <linux/firmware.h>
29 #include <linux/module.h>
34 #include "radeon_asic.h"
37 /* 1 second timeout */
38 #define VCE_IDLE_TIMEOUT_MS 1000
41 #define FIRMWARE_TAHITI "radeon/TAHITI_vce.bin"
42 #define FIRMWARE_BONAIRE "radeon/BONAIRE_vce.bin"
44 MODULE_FIRMWARE(FIRMWARE_TAHITI
);
45 MODULE_FIRMWARE(FIRMWARE_BONAIRE
);
47 static void radeon_vce_idle_work_handler(struct work_struct
*work
);
50 * radeon_vce_init - allocate memory, load vce firmware
52 * @rdev: radeon_device pointer
54 * First step to get VCE online, allocate memory and load the firmware
56 int radeon_vce_init(struct radeon_device
*rdev
)
58 static const char *fw_version
= "[ATI LIB=VCEFW,";
59 static const char *fb_version
= "[ATI LIB=VCEFWSTATS,";
61 const char *fw_name
, *c
;
62 uint8_t start
, mid
, end
;
65 INIT_DELAYED_WORK(&rdev
->vce
.idle_work
, radeon_vce_idle_work_handler
);
67 switch (rdev
->family
) {
73 fw_name
= FIRMWARE_TAHITI
;
81 fw_name
= FIRMWARE_BONAIRE
;
88 r
= request_firmware(&rdev
->vce_fw
, fw_name
, rdev
->dev
);
90 dev_err(rdev
->dev
, "radeon_vce: Can't load firmware \"%s\"\n",
95 /* search for firmware version */
97 size
= rdev
->vce_fw
->size
- strlen(fw_version
) - 9;
98 c
= rdev
->vce_fw
->data
;
99 for (;size
> 0; --size
, ++c
)
100 if (strncmp(c
, fw_version
, strlen(fw_version
)) == 0)
106 c
+= strlen(fw_version
);
107 if (sscanf(c
, "%2hhd.%2hhd.%2hhd]", &start
, &mid
, &end
) != 3)
110 /* search for feedback version */
112 size
= rdev
->vce_fw
->size
- strlen(fb_version
) - 3;
113 c
= rdev
->vce_fw
->data
;
114 for (;size
> 0; --size
, ++c
)
115 if (strncmp(c
, fb_version
, strlen(fb_version
)) == 0)
121 c
+= strlen(fb_version
);
122 if (sscanf(c
, "%2u]", &rdev
->vce
.fb_version
) != 1)
125 DRM_INFO("Found VCE firmware/feedback version %d.%d.%d / %d!\n",
126 start
, mid
, end
, rdev
->vce
.fb_version
);
128 rdev
->vce
.fw_version
= (start
<< 24) | (mid
<< 16) | (end
<< 8);
130 /* we can only work with this fw version for now */
131 if ((rdev
->vce
.fw_version
!= ((40 << 24) | (2 << 16) | (2 << 8))) &&
132 (rdev
->vce
.fw_version
!= ((50 << 24) | (0 << 16) | (1 << 8))) &&
133 (rdev
->vce
.fw_version
!= ((50 << 24) | (1 << 16) | (2 << 8))))
136 /* allocate firmware, stack and heap BO */
138 if (rdev
->family
< CHIP_BONAIRE
)
139 size
= vce_v1_0_bo_size(rdev
);
141 size
= vce_v2_0_bo_size(rdev
);
142 r
= radeon_bo_create(rdev
, size
, PAGE_SIZE
, true,
143 RADEON_GEM_DOMAIN_VRAM
, 0, NULL
, NULL
,
146 dev_err(rdev
->dev
, "(%d) failed to allocate VCE bo\n", r
);
150 r
= radeon_bo_reserve(rdev
->vce
.vcpu_bo
, false);
152 radeon_bo_unref(&rdev
->vce
.vcpu_bo
);
153 dev_err(rdev
->dev
, "(%d) failed to reserve VCE bo\n", r
);
157 r
= radeon_bo_pin(rdev
->vce
.vcpu_bo
, RADEON_GEM_DOMAIN_VRAM
,
158 &rdev
->vce
.gpu_addr
);
159 radeon_bo_unreserve(rdev
->vce
.vcpu_bo
);
161 radeon_bo_unref(&rdev
->vce
.vcpu_bo
);
162 dev_err(rdev
->dev
, "(%d) VCE bo pin failed\n", r
);
166 for (i
= 0; i
< RADEON_MAX_VCE_HANDLES
; ++i
) {
167 atomic_set(&rdev
->vce
.handles
[i
], 0);
168 rdev
->vce
.filp
[i
] = NULL
;
175 * radeon_vce_fini - free memory
177 * @rdev: radeon_device pointer
179 * Last step on VCE teardown, free firmware memory
181 void radeon_vce_fini(struct radeon_device
*rdev
)
183 if (rdev
->vce
.vcpu_bo
== NULL
)
186 radeon_bo_unref(&rdev
->vce
.vcpu_bo
);
188 release_firmware(rdev
->vce_fw
);
192 * radeon_vce_suspend - unpin VCE fw memory
194 * @rdev: radeon_device pointer
197 int radeon_vce_suspend(struct radeon_device
*rdev
)
201 if (rdev
->vce
.vcpu_bo
== NULL
)
204 for (i
= 0; i
< RADEON_MAX_VCE_HANDLES
; ++i
)
205 if (atomic_read(&rdev
->vce
.handles
[i
]))
208 if (i
== RADEON_MAX_VCE_HANDLES
)
211 /* TODO: suspending running encoding sessions isn't supported */
216 * radeon_vce_resume - pin VCE fw memory
218 * @rdev: radeon_device pointer
221 int radeon_vce_resume(struct radeon_device
*rdev
)
226 if (rdev
->vce
.vcpu_bo
== NULL
)
229 r
= radeon_bo_reserve(rdev
->vce
.vcpu_bo
, false);
231 dev_err(rdev
->dev
, "(%d) failed to reserve VCE bo\n", r
);
235 r
= radeon_bo_kmap(rdev
->vce
.vcpu_bo
, &cpu_addr
);
237 radeon_bo_unreserve(rdev
->vce
.vcpu_bo
);
238 dev_err(rdev
->dev
, "(%d) VCE map failed\n", r
);
242 memset(cpu_addr
, 0, radeon_bo_size(rdev
->vce
.vcpu_bo
));
243 if (rdev
->family
< CHIP_BONAIRE
)
244 r
= vce_v1_0_load_fw(rdev
, cpu_addr
);
246 memcpy(cpu_addr
, rdev
->vce_fw
->data
, rdev
->vce_fw
->size
);
248 radeon_bo_kunmap(rdev
->vce
.vcpu_bo
);
250 radeon_bo_unreserve(rdev
->vce
.vcpu_bo
);
256 * radeon_vce_idle_work_handler - power off VCE
258 * @work: pointer to work structure
260 * power of VCE when it's not used any more
262 static void radeon_vce_idle_work_handler(struct work_struct
*work
)
264 struct radeon_device
*rdev
=
265 container_of(work
, struct radeon_device
, vce
.idle_work
.work
);
267 if ((radeon_fence_count_emitted(rdev
, TN_RING_TYPE_VCE1_INDEX
) == 0) &&
268 (radeon_fence_count_emitted(rdev
, TN_RING_TYPE_VCE2_INDEX
) == 0)) {
269 if ((rdev
->pm
.pm_method
== PM_METHOD_DPM
) && rdev
->pm
.dpm_enabled
) {
270 radeon_dpm_enable_vce(rdev
, false);
272 radeon_set_vce_clocks(rdev
, 0, 0);
275 schedule_delayed_work(&rdev
->vce
.idle_work
,
276 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS
));
281 * radeon_vce_note_usage - power up VCE
283 * @rdev: radeon_device pointer
285 * Make sure VCE is powerd up when we want to use it
287 void radeon_vce_note_usage(struct radeon_device
*rdev
)
289 bool streams_changed
= false;
290 bool set_clocks
= !cancel_delayed_work_sync(&rdev
->vce
.idle_work
);
291 set_clocks
&= schedule_delayed_work(&rdev
->vce
.idle_work
,
292 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS
));
294 if ((rdev
->pm
.pm_method
== PM_METHOD_DPM
) && rdev
->pm
.dpm_enabled
) {
295 /* XXX figure out if the streams changed */
296 streams_changed
= false;
299 if (set_clocks
|| streams_changed
) {
300 if ((rdev
->pm
.pm_method
== PM_METHOD_DPM
) && rdev
->pm
.dpm_enabled
) {
301 radeon_dpm_enable_vce(rdev
, true);
303 radeon_set_vce_clocks(rdev
, 53300, 40000);
309 * radeon_vce_free_handles - free still open VCE handles
311 * @rdev: radeon_device pointer
312 * @filp: drm file pointer
314 * Close all VCE handles still open by this file pointer
316 void radeon_vce_free_handles(struct radeon_device
*rdev
, struct drm_file
*filp
)
319 for (i
= 0; i
< RADEON_MAX_VCE_HANDLES
; ++i
) {
320 uint32_t handle
= atomic_read(&rdev
->vce
.handles
[i
]);
321 if (!handle
|| rdev
->vce
.filp
[i
] != filp
)
324 radeon_vce_note_usage(rdev
);
326 r
= radeon_vce_get_destroy_msg(rdev
, TN_RING_TYPE_VCE1_INDEX
,
329 DRM_ERROR("Error destroying VCE handle (%d)!\n", r
);
331 rdev
->vce
.filp
[i
] = NULL
;
332 atomic_set(&rdev
->vce
.handles
[i
], 0);
337 * radeon_vce_get_create_msg - generate a VCE create msg
339 * @rdev: radeon_device pointer
340 * @ring: ring we should submit the msg to
341 * @handle: VCE session handle to use
342 * @fence: optional fence to return
344 * Open up a stream for HW test
346 int radeon_vce_get_create_msg(struct radeon_device
*rdev
, int ring
,
347 uint32_t handle
, struct radeon_fence
**fence
)
349 const unsigned ib_size_dw
= 1024;
354 r
= radeon_ib_get(rdev
, ring
, &ib
, NULL
, ib_size_dw
* 4);
356 DRM_ERROR("radeon: failed to get ib (%d).\n", r
);
360 dummy
= ib
.gpu_addr
+ 1024;
362 /* stitch together an VCE create msg */
364 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x0000000c); /* len */
365 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x00000001); /* session cmd */
366 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(handle
);
368 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x00000030); /* len */
369 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x01000001); /* create cmd */
370 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x00000000);
371 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x00000042);
372 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x0000000a);
373 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x00000001);
374 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x00000080);
375 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x00000060);
376 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x00000100);
377 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x00000100);
378 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x0000000c);
379 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x00000000);
381 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x00000014); /* len */
382 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x05000005); /* feedback buffer */
383 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(upper_32_bits(dummy
));
384 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(dummy
);
385 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x00000001);
387 for (i
= ib
.length_dw
; i
< ib_size_dw
; ++i
)
388 ib
.ptr
[i
] = cpu_to_le32(0x0);
390 r
= radeon_ib_schedule(rdev
, &ib
, NULL
, false);
392 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r
);
396 *fence
= radeon_fence_ref(ib
.fence
);
398 radeon_ib_free(rdev
, &ib
);
404 * radeon_vce_get_destroy_msg - generate a VCE destroy msg
406 * @rdev: radeon_device pointer
407 * @ring: ring we should submit the msg to
408 * @handle: VCE session handle to use
409 * @fence: optional fence to return
411 * Close up a stream for HW test or if userspace failed to do so
413 int radeon_vce_get_destroy_msg(struct radeon_device
*rdev
, int ring
,
414 uint32_t handle
, struct radeon_fence
**fence
)
416 const unsigned ib_size_dw
= 1024;
421 r
= radeon_ib_get(rdev
, ring
, &ib
, NULL
, ib_size_dw
* 4);
423 DRM_ERROR("radeon: failed to get ib (%d).\n", r
);
427 dummy
= ib
.gpu_addr
+ 1024;
429 /* stitch together an VCE destroy msg */
431 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x0000000c); /* len */
432 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x00000001); /* session cmd */
433 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(handle
);
435 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x00000014); /* len */
436 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x05000005); /* feedback buffer */
437 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(upper_32_bits(dummy
));
438 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(dummy
);
439 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x00000001);
441 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x00000008); /* len */
442 ib
.ptr
[ib
.length_dw
++] = cpu_to_le32(0x02000001); /* destroy cmd */
444 for (i
= ib
.length_dw
; i
< ib_size_dw
; ++i
)
445 ib
.ptr
[i
] = cpu_to_le32(0x0);
447 r
= radeon_ib_schedule(rdev
, &ib
, NULL
, false);
449 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r
);
453 *fence
= radeon_fence_ref(ib
.fence
);
455 radeon_ib_free(rdev
, &ib
);
461 * radeon_vce_cs_reloc - command submission relocation
464 * @lo: address of lower dword
465 * @hi: address of higher dword
466 * @size: size of checker for relocation buffer
468 * Patch relocation inside command stream with real buffer address
470 int radeon_vce_cs_reloc(struct radeon_cs_parser
*p
, int lo
, int hi
,
473 struct radeon_cs_chunk
*relocs_chunk
;
474 struct radeon_bo_list
*reloc
;
475 uint64_t start
, end
, offset
;
478 relocs_chunk
= p
->chunk_relocs
;
479 offset
= radeon_get_ib_value(p
, lo
);
480 idx
= radeon_get_ib_value(p
, hi
);
482 if (idx
>= relocs_chunk
->length_dw
) {
483 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
484 idx
, relocs_chunk
->length_dw
);
488 reloc
= &p
->relocs
[(idx
/ 4)];
489 start
= reloc
->gpu_offset
;
490 end
= start
+ radeon_bo_size(reloc
->robj
);
493 p
->ib
.ptr
[lo
] = start
& 0xFFFFFFFF;
494 p
->ib
.ptr
[hi
] = start
>> 32;
497 DRM_ERROR("invalid reloc offset %llX!\n", offset
);
500 if ((end
- start
) < size
) {
501 DRM_ERROR("buffer to small (%d / %d)!\n",
502 (unsigned)(end
- start
), size
);
510 * radeon_vce_validate_handle - validate stream handle
513 * @handle: handle to validate
514 * @allocated: allocated a new handle?
516 * Validates the handle and return the found session index or -EINVAL
517 * we we don't have another free session index.
519 static int radeon_vce_validate_handle(struct radeon_cs_parser
*p
,
520 uint32_t handle
, bool *allocated
)
526 /* validate the handle */
527 for (i
= 0; i
< RADEON_MAX_VCE_HANDLES
; ++i
) {
528 if (atomic_read(&p
->rdev
->vce
.handles
[i
]) == handle
) {
529 if (p
->rdev
->vce
.filp
[i
] != p
->filp
) {
530 DRM_ERROR("VCE handle collision detected!\n");
537 /* handle not found try to alloc a new one */
538 for (i
= 0; i
< RADEON_MAX_VCE_HANDLES
; ++i
) {
539 if (!atomic_cmpxchg(&p
->rdev
->vce
.handles
[i
], 0, handle
)) {
540 p
->rdev
->vce
.filp
[i
] = p
->filp
;
541 p
->rdev
->vce
.img_size
[i
] = 0;
547 DRM_ERROR("No more free VCE handles!\n");
552 * radeon_vce_cs_parse - parse and validate the command stream
557 int radeon_vce_cs_parse(struct radeon_cs_parser
*p
)
559 int session_idx
= -1;
560 bool destroyed
= false, created
= false, allocated
= false;
561 uint32_t tmp
, handle
= 0;
562 uint32_t *size
= &tmp
;
565 while (p
->idx
< p
->chunk_ib
->length_dw
) {
566 uint32_t len
= radeon_get_ib_value(p
, p
->idx
);
567 uint32_t cmd
= radeon_get_ib_value(p
, p
->idx
+ 1);
569 if ((len
< 8) || (len
& 3)) {
570 DRM_ERROR("invalid VCE command length (%d)!\n", len
);
576 DRM_ERROR("No other command allowed after destroy!\n");
582 case 0x00000001: // session
583 handle
= radeon_get_ib_value(p
, p
->idx
+ 2);
584 session_idx
= radeon_vce_validate_handle(p
, handle
,
588 size
= &p
->rdev
->vce
.img_size
[session_idx
];
591 case 0x00000002: // task info
594 case 0x01000001: // create
597 DRM_ERROR("Handle already in use!\n");
602 *size
= radeon_get_ib_value(p
, p
->idx
+ 8) *
603 radeon_get_ib_value(p
, p
->idx
+ 10) *
607 case 0x04000001: // config extension
608 case 0x04000002: // pic control
609 case 0x04000005: // rate control
610 case 0x04000007: // motion estimation
611 case 0x04000008: // rdo
612 case 0x04000009: // vui
615 case 0x03000001: // encode
616 r
= radeon_vce_cs_reloc(p
, p
->idx
+ 10, p
->idx
+ 9,
621 r
= radeon_vce_cs_reloc(p
, p
->idx
+ 12, p
->idx
+ 11,
627 case 0x02000001: // destroy
631 case 0x05000001: // context buffer
632 r
= radeon_vce_cs_reloc(p
, p
->idx
+ 3, p
->idx
+ 2,
638 case 0x05000004: // video bitstream buffer
639 tmp
= radeon_get_ib_value(p
, p
->idx
+ 4);
640 r
= radeon_vce_cs_reloc(p
, p
->idx
+ 3, p
->idx
+ 2,
646 case 0x05000005: // feedback buffer
647 r
= radeon_vce_cs_reloc(p
, p
->idx
+ 3, p
->idx
+ 2,
654 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd
);
659 if (session_idx
== -1) {
660 DRM_ERROR("no session command at start of IB\n");
668 if (allocated
&& !created
) {
669 DRM_ERROR("New session without create command!\n");
674 if ((!r
&& destroyed
) || (r
&& allocated
)) {
676 * IB contains a destroy msg or we have allocated an
677 * handle and got an error, anyway free the handle
679 for (i
= 0; i
< RADEON_MAX_VCE_HANDLES
; ++i
)
680 atomic_cmpxchg(&p
->rdev
->vce
.handles
[i
], handle
, 0);
687 * radeon_vce_semaphore_emit - emit a semaphore command
689 * @rdev: radeon_device pointer
690 * @ring: engine to use
691 * @semaphore: address of semaphore
692 * @emit_wait: true=emit wait, false=emit signal
695 bool radeon_vce_semaphore_emit(struct radeon_device
*rdev
,
696 struct radeon_ring
*ring
,
697 struct radeon_semaphore
*semaphore
,
700 uint64_t addr
= semaphore
->gpu_addr
;
702 radeon_ring_write(ring
, cpu_to_le32(VCE_CMD_SEMAPHORE
));
703 radeon_ring_write(ring
, cpu_to_le32((addr
>> 3) & 0x000FFFFF));
704 radeon_ring_write(ring
, cpu_to_le32((addr
>> 23) & 0x000FFFFF));
705 radeon_ring_write(ring
, cpu_to_le32(0x01003000 | (emit_wait
? 1 : 0)));
707 radeon_ring_write(ring
, cpu_to_le32(VCE_CMD_END
));
713 * radeon_vce_ib_execute - execute indirect buffer
715 * @rdev: radeon_device pointer
716 * @ib: the IB to execute
719 void radeon_vce_ib_execute(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
721 struct radeon_ring
*ring
= &rdev
->ring
[ib
->ring
];
722 radeon_ring_write(ring
, cpu_to_le32(VCE_CMD_IB
));
723 radeon_ring_write(ring
, cpu_to_le32(ib
->gpu_addr
));
724 radeon_ring_write(ring
, cpu_to_le32(upper_32_bits(ib
->gpu_addr
)));
725 radeon_ring_write(ring
, cpu_to_le32(ib
->length_dw
));
729 * radeon_vce_fence_emit - add a fence command to the ring
731 * @rdev: radeon_device pointer
735 void radeon_vce_fence_emit(struct radeon_device
*rdev
,
736 struct radeon_fence
*fence
)
738 struct radeon_ring
*ring
= &rdev
->ring
[fence
->ring
];
739 uint64_t addr
= rdev
->fence_drv
[fence
->ring
].gpu_addr
;
741 radeon_ring_write(ring
, cpu_to_le32(VCE_CMD_FENCE
));
742 radeon_ring_write(ring
, cpu_to_le32(addr
));
743 radeon_ring_write(ring
, cpu_to_le32(upper_32_bits(addr
)));
744 radeon_ring_write(ring
, cpu_to_le32(fence
->seq
));
745 radeon_ring_write(ring
, cpu_to_le32(VCE_CMD_TRAP
));
746 radeon_ring_write(ring
, cpu_to_le32(VCE_CMD_END
));
750 * radeon_vce_ring_test - test if VCE ring is working
752 * @rdev: radeon_device pointer
753 * @ring: the engine to test on
756 int radeon_vce_ring_test(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
758 uint32_t rptr
= vce_v1_0_get_rptr(rdev
, ring
);
762 r
= radeon_ring_lock(rdev
, ring
, 16);
764 DRM_ERROR("radeon: vce failed to lock ring %d (%d).\n",
768 radeon_ring_write(ring
, cpu_to_le32(VCE_CMD_END
));
769 radeon_ring_unlock_commit(rdev
, ring
, false);
771 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
772 if (vce_v1_0_get_rptr(rdev
, ring
) != rptr
)
777 if (i
< rdev
->usec_timeout
) {
778 DRM_INFO("ring test on %d succeeded in %d usecs\n",
781 DRM_ERROR("radeon: ring %d test failed\n",
790 * radeon_vce_ib_test - test if VCE IBs are working
792 * @rdev: radeon_device pointer
793 * @ring: the engine to test on
796 int radeon_vce_ib_test(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
798 struct radeon_fence
*fence
= NULL
;
801 r
= radeon_vce_get_create_msg(rdev
, ring
->idx
, 1, NULL
);
803 DRM_ERROR("radeon: failed to get create msg (%d).\n", r
);
807 r
= radeon_vce_get_destroy_msg(rdev
, ring
->idx
, 1, &fence
);
809 DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r
);
813 r
= radeon_fence_wait_timeout(fence
, false, usecs_to_jiffies(
814 RADEON_USEC_IB_TEST_TIMEOUT
));
816 DRM_ERROR("radeon: fence wait failed (%d).\n", r
);
818 DRM_ERROR("radeon: fence wait timed out.\n");
821 DRM_INFO("ib test on ring %d succeeded\n", ring
->idx
);
825 radeon_fence_unref(&fence
);