2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "CUnit/Basic.h"
28 #include "util_math.h"
30 #include "amdgpu_test.h"
31 #include "decode_messages.h"
32 #include "amdgpu_drm.h"
33 #include "amdgpu_internal.h"
36 #define MAX_RESOURCES 16
38 static amdgpu_device_handle device_handle
;
39 static uint32_t major_version
;
40 static uint32_t minor_version
;
41 static uint32_t family_id
;
42 static uint32_t chip_rev
;
43 static uint32_t chip_id
;
45 static amdgpu_context_handle context_handle
;
46 static amdgpu_bo_handle ib_handle
;
47 static uint64_t ib_mc_address
;
48 static uint32_t *ib_cpu
;
49 static amdgpu_va_handle ib_va_handle
;
51 static amdgpu_bo_handle resources
[MAX_RESOURCES
];
52 static unsigned num_resources
;
54 static void amdgpu_cs_uvd_create(void);
55 static void amdgpu_cs_uvd_decode(void);
56 static void amdgpu_cs_uvd_destroy(void);
58 CU_TestInfo cs_tests
[] = {
59 { "UVD create", amdgpu_cs_uvd_create
},
60 { "UVD decode", amdgpu_cs_uvd_decode
},
61 { "UVD destroy", amdgpu_cs_uvd_destroy
},
65 CU_BOOL
suite_cs_tests_enable(void)
67 if (amdgpu_device_initialize(drm_amdgpu
[0], &major_version
,
68 &minor_version
, &device_handle
))
71 family_id
= device_handle
->info
.family_id
;
72 chip_id
= device_handle
->info
.chip_external_rev
;
73 chip_rev
= device_handle
->info
.chip_rev
;
75 if (amdgpu_device_deinitialize(device_handle
))
79 if (family_id
>= AMDGPU_FAMILY_RV
|| family_id
== AMDGPU_FAMILY_SI
||
80 asic_is_gfx_pipe_removed(family_id
, chip_id
, chip_rev
)) {
81 printf("\n\nThe ASIC NOT support UVD, suite disabled\n");
88 int suite_cs_tests_init(void)
90 amdgpu_bo_handle ib_result_handle
;
92 uint64_t ib_result_mc_address
;
93 amdgpu_va_handle ib_result_va_handle
;
96 r
= amdgpu_device_initialize(drm_amdgpu
[0], &major_version
,
97 &minor_version
, &device_handle
);
99 if ((r
== -EACCES
) && (errno
== EACCES
))
100 printf("\n\nError:%s. "
101 "Hint:Try to run this test program as root.",
104 return CUE_SINIT_FAILED
;
107 family_id
= device_handle
->info
.family_id
;
108 /* VI asic POLARIS10/11 have specific external_rev_id */
109 chip_rev
= device_handle
->info
.chip_rev
;
110 chip_id
= device_handle
->info
.chip_external_rev
;
112 r
= amdgpu_cs_ctx_create(device_handle
, &context_handle
);
114 return CUE_SINIT_FAILED
;
116 r
= amdgpu_bo_alloc_and_map(device_handle
, IB_SIZE
, 4096,
117 AMDGPU_GEM_DOMAIN_GTT
, 0,
118 &ib_result_handle
, &ib_result_cpu
,
119 &ib_result_mc_address
,
120 &ib_result_va_handle
);
122 return CUE_SINIT_FAILED
;
124 ib_handle
= ib_result_handle
;
125 ib_mc_address
= ib_result_mc_address
;
126 ib_cpu
= ib_result_cpu
;
127 ib_va_handle
= ib_result_va_handle
;
132 int suite_cs_tests_clean(void)
136 r
= amdgpu_bo_unmap_and_free(ib_handle
, ib_va_handle
,
137 ib_mc_address
, IB_SIZE
);
139 return CUE_SCLEAN_FAILED
;
141 r
= amdgpu_cs_ctx_free(context_handle
);
143 return CUE_SCLEAN_FAILED
;
145 r
= amdgpu_device_deinitialize(device_handle
);
147 return CUE_SCLEAN_FAILED
;
152 static int submit(unsigned ndw
, unsigned ip
)
154 struct amdgpu_cs_request ibs_request
= {0};
155 struct amdgpu_cs_ib_info ib_info
= {0};
156 struct amdgpu_cs_fence fence_status
= {0};
160 ib_info
.ib_mc_address
= ib_mc_address
;
163 ibs_request
.ip_type
= ip
;
165 r
= amdgpu_bo_list_create(device_handle
, num_resources
, resources
,
166 NULL
, &ibs_request
.resources
);
170 ibs_request
.number_of_ibs
= 1;
171 ibs_request
.ibs
= &ib_info
;
172 ibs_request
.fence_info
.handle
= NULL
;
174 r
= amdgpu_cs_submit(context_handle
, 0, &ibs_request
, 1);
178 r
= amdgpu_bo_list_destroy(ibs_request
.resources
);
182 fence_status
.context
= context_handle
;
183 fence_status
.ip_type
= ip
;
184 fence_status
.fence
= ibs_request
.seq_no
;
186 r
= amdgpu_cs_query_fence_status(&fence_status
,
187 AMDGPU_TIMEOUT_INFINITE
,
195 static void uvd_cmd(uint64_t addr
, unsigned cmd
, int *idx
)
197 ib_cpu
[(*idx
)++] = (family_id
< AMDGPU_FAMILY_AI
) ? 0x3BC4 : 0x81C4;
198 ib_cpu
[(*idx
)++] = addr
;
199 ib_cpu
[(*idx
)++] = (family_id
< AMDGPU_FAMILY_AI
) ? 0x3BC5 : 0x81C5;
200 ib_cpu
[(*idx
)++] = addr
>> 32;
201 ib_cpu
[(*idx
)++] = (family_id
< AMDGPU_FAMILY_AI
) ? 0x3BC3 : 0x81C3;
202 ib_cpu
[(*idx
)++] = cmd
<< 1;
205 static void amdgpu_cs_uvd_create(void)
207 struct amdgpu_bo_alloc_request req
= {0};
208 amdgpu_bo_handle buf_handle
;
210 amdgpu_va_handle va_handle
;
214 req
.alloc_size
= 4*1024;
215 req
.preferred_heap
= AMDGPU_GEM_DOMAIN_GTT
;
217 r
= amdgpu_bo_alloc(device_handle
, &req
, &buf_handle
);
218 CU_ASSERT_EQUAL(r
, 0);
220 r
= amdgpu_va_range_alloc(device_handle
,
221 amdgpu_gpu_va_range_general
,
224 CU_ASSERT_EQUAL(r
, 0);
226 r
= amdgpu_bo_va_op(buf_handle
, 0, 4096, va
, 0, AMDGPU_VA_OP_MAP
);
227 CU_ASSERT_EQUAL(r
, 0);
229 r
= amdgpu_bo_cpu_map(buf_handle
, &msg
);
230 CU_ASSERT_EQUAL(r
, 0);
232 memcpy(msg
, uvd_create_msg
, sizeof(uvd_create_msg
));
234 if (family_id
>= AMDGPU_FAMILY_VI
) {
235 ((uint8_t*)msg
)[0x10] = 7;
236 /* chip beyond polaris 10/11 */
237 if ((family_id
== AMDGPU_FAMILY_AI
) ||
238 (chip_id
== chip_rev
+0x50 || chip_id
== chip_rev
+0x5A ||
239 chip_id
== chip_rev
+0x64)) {
241 ((uint8_t*)msg
)[0x28] = 0x00;
242 ((uint8_t*)msg
)[0x29] = 0x94;
243 ((uint8_t*)msg
)[0x2A] = 0x6B;
244 ((uint8_t*)msg
)[0x2B] = 0x00;
248 r
= amdgpu_bo_cpu_unmap(buf_handle
);
249 CU_ASSERT_EQUAL(r
, 0);
252 resources
[num_resources
++] = buf_handle
;
253 resources
[num_resources
++] = ib_handle
;
256 uvd_cmd(va
, 0x0, &i
);
258 ib_cpu
[i
] = 0x80000000;
260 r
= submit(i
, AMDGPU_HW_IP_UVD
);
261 CU_ASSERT_EQUAL(r
, 0);
263 r
= amdgpu_bo_va_op(buf_handle
, 0, 4096, va
, 0, AMDGPU_VA_OP_UNMAP
);
264 CU_ASSERT_EQUAL(r
, 0);
266 r
= amdgpu_va_range_free(va_handle
);
267 CU_ASSERT_EQUAL(r
, 0);
269 r
= amdgpu_bo_free(buf_handle
);
270 CU_ASSERT_EQUAL(r
, 0);
273 static void amdgpu_cs_uvd_decode(void)
275 const unsigned dpb_size
= 15923584, dt_size
= 737280;
276 uint64_t msg_addr
, fb_addr
, bs_addr
, dpb_addr
, ctx_addr
, dt_addr
, it_addr
;
277 struct amdgpu_bo_alloc_request req
= {0};
278 amdgpu_bo_handle buf_handle
;
279 amdgpu_va_handle va_handle
;
285 req
.alloc_size
= 4*1024; /* msg */
286 req
.alloc_size
+= 4*1024; /* fb */
287 if (family_id
>= AMDGPU_FAMILY_VI
)
288 req
.alloc_size
+= 4096; /*it_scaling_table*/
289 req
.alloc_size
+= ALIGN(sizeof(uvd_bitstream
), 4*1024);
290 req
.alloc_size
+= ALIGN(dpb_size
, 4*1024);
291 req
.alloc_size
+= ALIGN(dt_size
, 4*1024);
293 req
.preferred_heap
= AMDGPU_GEM_DOMAIN_GTT
;
295 r
= amdgpu_bo_alloc(device_handle
, &req
, &buf_handle
);
296 CU_ASSERT_EQUAL(r
, 0);
298 r
= amdgpu_va_range_alloc(device_handle
,
299 amdgpu_gpu_va_range_general
,
300 req
.alloc_size
, 1, 0, &va
,
302 CU_ASSERT_EQUAL(r
, 0);
304 r
= amdgpu_bo_va_op(buf_handle
, 0, req
.alloc_size
, va
, 0,
306 CU_ASSERT_EQUAL(r
, 0);
308 r
= amdgpu_bo_cpu_map(buf_handle
, (void **)&ptr
);
309 CU_ASSERT_EQUAL(r
, 0);
311 memcpy(ptr
, uvd_decode_msg
, sizeof(uvd_decode_msg
));
312 memcpy(ptr
+ sizeof(uvd_decode_msg
), avc_decode_msg
, sizeof(avc_decode_msg
));
314 if (family_id
>= AMDGPU_FAMILY_VI
) {
318 /* chip beyond polaris10/11 */
319 if ((family_id
== AMDGPU_FAMILY_AI
) ||
320 (chip_id
== chip_rev
+0x50 || chip_id
== chip_rev
+0x5A ||
321 chip_id
== chip_rev
+0x64)) {
336 memset(ptr
, 0, 4*1024);
337 if (family_id
>= AMDGPU_FAMILY_VI
) {
339 memcpy(ptr
, uvd_it_scaling_table
, sizeof(uvd_it_scaling_table
));
343 memcpy(ptr
, uvd_bitstream
, sizeof(uvd_bitstream
));
345 ptr
+= ALIGN(sizeof(uvd_bitstream
), 4*1024);
346 memset(ptr
, 0, dpb_size
);
348 ptr
+= ALIGN(dpb_size
, 4*1024);
349 memset(ptr
, 0, dt_size
);
352 resources
[num_resources
++] = buf_handle
;
353 resources
[num_resources
++] = ib_handle
;
356 fb_addr
= msg_addr
+ 4*1024;
357 if (family_id
>= AMDGPU_FAMILY_VI
) {
358 it_addr
= fb_addr
+ 4*1024;
359 bs_addr
= it_addr
+ 4*1024;
361 bs_addr
= fb_addr
+ 4*1024;
362 dpb_addr
= ALIGN(bs_addr
+ sizeof(uvd_bitstream
), 4*1024);
365 if (family_id
>= AMDGPU_FAMILY_VI
) {
366 if ((family_id
== AMDGPU_FAMILY_AI
) ||
367 (chip_id
== chip_rev
+0x50 || chip_id
== chip_rev
+0x5A ||
368 chip_id
== chip_rev
+0x64)) {
369 ctx_addr
= ALIGN(dpb_addr
+ 0x006B9400, 4*1024);
373 dt_addr
= ALIGN(dpb_addr
+ dpb_size
, 4*1024);
376 uvd_cmd(msg_addr
, 0x0, &i
);
377 uvd_cmd(dpb_addr
, 0x1, &i
);
378 uvd_cmd(dt_addr
, 0x2, &i
);
379 uvd_cmd(fb_addr
, 0x3, &i
);
380 uvd_cmd(bs_addr
, 0x100, &i
);
382 if (family_id
>= AMDGPU_FAMILY_VI
) {
383 uvd_cmd(it_addr
, 0x204, &i
);
384 if ((family_id
== AMDGPU_FAMILY_AI
) ||
385 (chip_id
== chip_rev
+0x50 || chip_id
== chip_rev
+0x5A ||
386 chip_id
== chip_rev
+0x64))
387 uvd_cmd(ctx_addr
, 0x206, &i
);
390 ib_cpu
[i
++] = (family_id
< AMDGPU_FAMILY_AI
) ? 0x3BC6 : 0x81C6;
393 ib_cpu
[i
] = 0x80000000;
395 r
= submit(i
, AMDGPU_HW_IP_UVD
);
396 CU_ASSERT_EQUAL(r
, 0);
398 /* TODO: use a real CRC32 */
399 for (i
= 0, sum
= 0; i
< dt_size
; ++i
)
401 CU_ASSERT_EQUAL(sum
, SUM_DECODE
);
403 r
= amdgpu_bo_cpu_unmap(buf_handle
);
404 CU_ASSERT_EQUAL(r
, 0);
406 r
= amdgpu_bo_va_op(buf_handle
, 0, req
.alloc_size
, va
, 0, AMDGPU_VA_OP_UNMAP
);
407 CU_ASSERT_EQUAL(r
, 0);
409 r
= amdgpu_va_range_free(va_handle
);
410 CU_ASSERT_EQUAL(r
, 0);
412 r
= amdgpu_bo_free(buf_handle
);
413 CU_ASSERT_EQUAL(r
, 0);
416 static void amdgpu_cs_uvd_destroy(void)
418 struct amdgpu_bo_alloc_request req
= {0};
419 amdgpu_bo_handle buf_handle
;
420 amdgpu_va_handle va_handle
;
425 req
.alloc_size
= 4*1024;
426 req
.preferred_heap
= AMDGPU_GEM_DOMAIN_GTT
;
428 r
= amdgpu_bo_alloc(device_handle
, &req
, &buf_handle
);
429 CU_ASSERT_EQUAL(r
, 0);
431 r
= amdgpu_va_range_alloc(device_handle
,
432 amdgpu_gpu_va_range_general
,
433 req
.alloc_size
, 1, 0, &va
,
435 CU_ASSERT_EQUAL(r
, 0);
437 r
= amdgpu_bo_va_op(buf_handle
, 0, req
.alloc_size
, va
, 0,
439 CU_ASSERT_EQUAL(r
, 0);
441 r
= amdgpu_bo_cpu_map(buf_handle
, &msg
);
442 CU_ASSERT_EQUAL(r
, 0);
444 memcpy(msg
, uvd_destroy_msg
, sizeof(uvd_destroy_msg
));
445 if (family_id
>= AMDGPU_FAMILY_VI
)
446 ((uint8_t*)msg
)[0x10] = 7;
448 r
= amdgpu_bo_cpu_unmap(buf_handle
);
449 CU_ASSERT_EQUAL(r
, 0);
452 resources
[num_resources
++] = buf_handle
;
453 resources
[num_resources
++] = ib_handle
;
456 uvd_cmd(va
, 0x0, &i
);
458 ib_cpu
[i
] = 0x80000000;
460 r
= submit(i
, AMDGPU_HW_IP_UVD
);
461 CU_ASSERT_EQUAL(r
, 0);
463 r
= amdgpu_bo_va_op(buf_handle
, 0, req
.alloc_size
, va
, 0, AMDGPU_VA_OP_UNMAP
);
464 CU_ASSERT_EQUAL(r
, 0);
466 r
= amdgpu_va_range_free(va_handle
);
467 CU_ASSERT_EQUAL(r
, 0);
469 r
= amdgpu_bo_free(buf_handle
);
470 CU_ASSERT_EQUAL(r
, 0);