2 * Copyright 2021 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <sys/types.h>
33 #include "CUnit/Basic.h"
35 #include "amdgpu_test.h"
36 #include "amdgpu_drm.h"
37 #include "amdgpu_internal.h"
41 #define GFX_COMPUTE_NOP 0xffff1000
43 static amdgpu_device_handle device_handle
;
44 static uint32_t major_version
;
45 static uint32_t minor_version
;
46 static char *sysfs_remove
= NULL
;
49 CU_BOOL
suite_hotunplug_tests_enable(void)
51 CU_BOOL enable
= CU_TRUE
;
54 if (drmGetDevice2(drm_amdgpu
[0], DRM_DEVICE_GET_PCI_REVISION
, &device
)) {
55 printf("\n\nGPU Failed to get DRM device PCI info!\n");
59 if (device
->bustype
!= DRM_BUS_PCI
) {
60 printf("\n\nGPU device is not on PCI bus!\n");
61 amdgpu_device_deinitialize(device_handle
);
65 /* Disable until the hot-unplug support in kernel gets into drm-next */
66 if (major_version
< 0xff)
69 if (amdgpu_device_initialize(drm_amdgpu
[0], &major_version
,
70 &minor_version
, &device_handle
))
73 /* TODO Once DRM version for unplug feature ready compare here agains it*/
75 if (amdgpu_device_deinitialize(device_handle
))
81 int suite_hotunplug_tests_init(void)
83 /* We need to open/close device at each test manually */
84 amdgpu_close_devices();
89 int suite_hotunplug_tests_clean(void)
96 static int amdgpu_hotunplug_trigger(const char *pathname
)
100 fd
= open(pathname
, O_WRONLY
);
104 len
= write(fd
, "1", 1);
110 static int amdgpu_hotunplug_setup_test()
115 if (amdgpu_open_device_on_test_index(open_render_node
) < 0) {
116 printf("\n\n Failed to reopen device file!\n");
117 return CUE_SINIT_FAILED
;
123 r
= amdgpu_device_initialize(drm_amdgpu
[0], &major_version
,
124 &minor_version
, &device_handle
);
127 if ((r
== -EACCES
) && (errno
== EACCES
))
128 printf("\n\nError:%s. "
129 "Hint:Try to run this test program as root.",
131 return CUE_SINIT_FAILED
;
134 tmp_str
= amdgpu_get_device_from_fd(drm_amdgpu
[0]);
136 printf("\n\n Device path not found!\n");
137 return CUE_SINIT_FAILED
;
140 sysfs_remove
= realloc(tmp_str
, strlen(tmp_str
) * 2);
141 strcat(sysfs_remove
, "/remove");
146 static int amdgpu_hotunplug_teardown_test()
148 if (amdgpu_device_deinitialize(device_handle
))
149 return CUE_SCLEAN_FAILED
;
151 amdgpu_close_devices();
159 static inline int amdgpu_hotunplug_remove()
161 return amdgpu_hotunplug_trigger(sysfs_remove
);
164 static inline int amdgpu_hotunplug_rescan()
166 return amdgpu_hotunplug_trigger("/sys/bus/pci/rescan");
169 static int amdgpu_cs_sync(amdgpu_context_handle context
,
170 unsigned int ip_type
,
174 struct amdgpu_cs_fence fence
= {
182 return amdgpu_cs_query_fence_status(&fence
,
183 AMDGPU_TIMEOUT_INFINITE
,
187 static void *amdgpu_nop_cs()
189 amdgpu_bo_handle ib_result_handle
;
191 uint64_t ib_result_mc_address
;
194 amdgpu_bo_list_handle bo_list
;
195 amdgpu_va_handle va_handle
;
196 amdgpu_context_handle context
;
197 struct amdgpu_cs_request ibs_request
;
198 struct amdgpu_cs_ib_info ib_info
;
200 r
= amdgpu_cs_ctx_create(device_handle
, &context
);
201 CU_ASSERT_EQUAL(r
, 0);
203 r
= amdgpu_bo_alloc_and_map(device_handle
, 4096, 4096,
204 AMDGPU_GEM_DOMAIN_GTT
, 0,
205 &ib_result_handle
, &ib_result_cpu
,
206 &ib_result_mc_address
, &va_handle
);
207 CU_ASSERT_EQUAL(r
, 0);
210 for (i
= 0; i
< 16; ++i
)
211 ptr
[i
] = GFX_COMPUTE_NOP
;
213 r
= amdgpu_bo_list_create(device_handle
, 1, &ib_result_handle
, NULL
, &bo_list
);
214 CU_ASSERT_EQUAL(r
, 0);
216 memset(&ib_info
, 0, sizeof(struct amdgpu_cs_ib_info
));
217 ib_info
.ib_mc_address
= ib_result_mc_address
;
220 memset(&ibs_request
, 0, sizeof(struct amdgpu_cs_request
));
221 ibs_request
.ip_type
= AMDGPU_HW_IP_GFX
;
222 ibs_request
.ring
= 0;
223 ibs_request
.number_of_ibs
= 1;
224 ibs_request
.ibs
= &ib_info
;
225 ibs_request
.resources
= bo_list
;
228 amdgpu_cs_submit(context
, 0, &ibs_request
, 1);
230 amdgpu_cs_sync(context
, AMDGPU_HW_IP_GFX
, 0, ibs_request
.seq_no
);
231 amdgpu_bo_list_destroy(bo_list
);
232 amdgpu_bo_unmap_and_free(ib_result_handle
, va_handle
,
233 ib_result_mc_address
, 4096);
235 amdgpu_cs_ctx_free(context
);
240 static pthread_t
* amdgpu_create_cs_thread()
243 pthread_t
*thread
= malloc(sizeof(*thread
));
249 r
= pthread_create(thread
, NULL
, amdgpu_nop_cs
, NULL
);
250 CU_ASSERT_EQUAL(r
, 0);
252 /* Give thread enough time to start*/
257 static void amdgpu_destroy_cs_thread(pthread_t
*thread
)
263 pthread_join(*thread
, &status
);
264 CU_ASSERT_EQUAL(status
, 0);
270 static void amdgpu_hotunplug_test(bool with_cs
)
273 pthread_t
*thread
= NULL
;
275 r
= amdgpu_hotunplug_setup_test();
276 CU_ASSERT_EQUAL(r
, 0);
279 thread
= amdgpu_create_cs_thread();
280 CU_ASSERT_NOT_EQUAL(thread
, NULL
);
283 r
= amdgpu_hotunplug_remove();
284 CU_ASSERT_EQUAL(r
> 0, 1);
287 amdgpu_destroy_cs_thread(thread
);
289 r
= amdgpu_hotunplug_teardown_test();
290 CU_ASSERT_EQUAL(r
, 0);
292 r
= amdgpu_hotunplug_rescan();
293 CU_ASSERT_EQUAL(r
> 0, 1);
296 static void amdgpu_hotunplug_simple(void)
298 amdgpu_hotunplug_test(false);
301 static void amdgpu_hotunplug_with_cs(void)
303 amdgpu_hotunplug_test(true);
306 static void amdgpu_hotunplug_with_exported_bo(void)
311 amdgpu_bo_handle bo_handle
;
313 struct amdgpu_bo_alloc_request request
= {
315 .phys_alignment
= 4096,
316 .preferred_heap
= AMDGPU_GEM_DOMAIN_GTT
,
320 r
= amdgpu_hotunplug_setup_test();
321 CU_ASSERT_EQUAL(r
, 0);
323 amdgpu_bo_alloc(device_handle
, &request
, &bo_handle
);
324 CU_ASSERT_EQUAL(r
, 0);
326 r
= amdgpu_bo_export(bo_handle
, amdgpu_bo_handle_type_dma_buf_fd
, &dma_buf_fd
);
327 CU_ASSERT_EQUAL(r
, 0);
329 ptr
= mmap(NULL
, 4096, PROT_READ
| PROT_WRITE
, MAP_SHARED
, dma_buf_fd
, 0);
330 CU_ASSERT_NOT_EQUAL(ptr
, MAP_FAILED
);
332 r
= amdgpu_hotunplug_remove();
333 CU_ASSERT_EQUAL(r
> 0, 1);
335 amdgpu_bo_free(bo_handle
);
337 r
= amdgpu_hotunplug_teardown_test();
338 CU_ASSERT_EQUAL(r
, 0);
345 r
= amdgpu_hotunplug_rescan();
346 CU_ASSERT_EQUAL(r
> 0, 1);
349 static void amdgpu_hotunplug_with_exported_fence(void)
351 amdgpu_bo_handle ib_result_handle
;
353 uint64_t ib_result_mc_address
;
354 uint32_t *ptr
, sync_obj_handle
, sync_obj_handle2
;
356 amdgpu_bo_list_handle bo_list
;
357 amdgpu_va_handle va_handle
;
358 uint32_t major2
, minor2
;
359 amdgpu_device_handle device2
;
360 amdgpu_context_handle context
;
361 struct amdgpu_cs_request ibs_request
;
362 struct amdgpu_cs_ib_info ib_info
;
363 struct amdgpu_cs_fence fence_status
= {0};
366 r
= amdgpu_hotunplug_setup_test();
367 CU_ASSERT_EQUAL(r
, 0);
369 r
= amdgpu_device_initialize(drm_amdgpu
[1], &major2
, &minor2
, &device2
);
370 CU_ASSERT_EQUAL(r
, 0);
372 r
= amdgpu_cs_ctx_create(device_handle
, &context
);
373 CU_ASSERT_EQUAL(r
, 0);
375 r
= amdgpu_bo_alloc_and_map(device_handle
, 4096, 4096,
376 AMDGPU_GEM_DOMAIN_GTT
, 0,
377 &ib_result_handle
, &ib_result_cpu
,
378 &ib_result_mc_address
, &va_handle
);
379 CU_ASSERT_EQUAL(r
, 0);
382 for (i
= 0; i
< 16; ++i
)
383 ptr
[i
] = GFX_COMPUTE_NOP
;
385 r
= amdgpu_bo_list_create(device_handle
, 1, &ib_result_handle
, NULL
, &bo_list
);
386 CU_ASSERT_EQUAL(r
, 0);
388 memset(&ib_info
, 0, sizeof(struct amdgpu_cs_ib_info
));
389 ib_info
.ib_mc_address
= ib_result_mc_address
;
392 memset(&ibs_request
, 0, sizeof(struct amdgpu_cs_request
));
393 ibs_request
.ip_type
= AMDGPU_HW_IP_GFX
;
394 ibs_request
.ring
= 0;
395 ibs_request
.number_of_ibs
= 1;
396 ibs_request
.ibs
= &ib_info
;
397 ibs_request
.resources
= bo_list
;
399 CU_ASSERT_EQUAL(amdgpu_cs_submit(context
, 0, &ibs_request
, 1), 0);
401 fence_status
.context
= context
;
402 fence_status
.ip_type
= AMDGPU_HW_IP_GFX
;
403 fence_status
.ip_instance
= 0;
404 fence_status
.fence
= ibs_request
.seq_no
;
406 CU_ASSERT_EQUAL(amdgpu_cs_fence_to_handle(device_handle
, &fence_status
,
407 AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ
,
411 CU_ASSERT_EQUAL(amdgpu_cs_export_syncobj(device_handle
, sync_obj_handle
, &shared_fd
), 0);
413 CU_ASSERT_EQUAL(amdgpu_cs_import_syncobj(device2
, shared_fd
, &sync_obj_handle2
), 0);
415 CU_ASSERT_EQUAL(amdgpu_cs_destroy_syncobj(device_handle
, sync_obj_handle
), 0);
417 CU_ASSERT_EQUAL(amdgpu_bo_list_destroy(bo_list
), 0);
418 CU_ASSERT_EQUAL(amdgpu_bo_unmap_and_free(ib_result_handle
, va_handle
,
419 ib_result_mc_address
, 4096), 0);
420 CU_ASSERT_EQUAL(amdgpu_cs_ctx_free(context
), 0);
422 r
= amdgpu_hotunplug_remove();
423 CU_ASSERT_EQUAL(r
> 0, 1);
425 CU_ASSERT_EQUAL(amdgpu_cs_syncobj_wait(device2
, &sync_obj_handle2
, 1, 100000000, 0, NULL
), 0);
427 CU_ASSERT_EQUAL(amdgpu_cs_destroy_syncobj(device2
, sync_obj_handle2
), 0);
429 amdgpu_device_deinitialize(device2
);
431 r
= amdgpu_hotunplug_teardown_test();
432 CU_ASSERT_EQUAL(r
, 0);
434 r
= amdgpu_hotunplug_rescan();
435 CU_ASSERT_EQUAL(r
> 0, 1);
439 CU_TestInfo hotunplug_tests
[] = {
440 { "Unplug card and rescan the bus to plug it back", amdgpu_hotunplug_simple
},
441 { "Same as first test but with command submission", amdgpu_hotunplug_with_cs
},
442 { "Unplug with exported bo", amdgpu_hotunplug_with_exported_bo
},
443 { "Unplug with exported fence", amdgpu_hotunplug_with_exported_fence
},