2 * Copyright 2009 Jerome Glisse.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Jerome Glisse
25 #include <drm/amdgpu_drm.h>
28 #define AMDGPU_BENCHMARK_ITERATIONS 1024
29 #define AMDGPU_BENCHMARK_COMMON_MODES_N 17
31 static int amdgpu_benchmark_do_move(struct amdgpu_device
*adev
, unsigned size
,
32 uint64_t saddr
, uint64_t daddr
, int n
)
34 unsigned long start_jiffies
;
35 unsigned long end_jiffies
;
36 struct dma_fence
*fence
= NULL
;
39 start_jiffies
= jiffies
;
40 for (i
= 0; i
< n
; i
++) {
41 struct amdgpu_ring
*ring
= adev
->mman
.buffer_funcs_ring
;
42 r
= amdgpu_copy_buffer(ring
, saddr
, daddr
, size
, NULL
, &fence
,
46 r
= dma_fence_wait(fence
, false);
51 end_jiffies
= jiffies
;
52 r
= jiffies_to_msecs(end_jiffies
- start_jiffies
);
61 static void amdgpu_benchmark_log_results(int n
, unsigned size
,
63 unsigned sdomain
, unsigned ddomain
,
66 unsigned int throughput
= (n
* (size
>> 10)) / time
;
67 DRM_INFO("amdgpu: %s %u bo moves of %u kB from"
68 " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
69 kind
, n
, size
>> 10, sdomain
, ddomain
, time
,
70 throughput
* 8, throughput
);
73 static void amdgpu_benchmark_move(struct amdgpu_device
*adev
, unsigned size
,
74 unsigned sdomain
, unsigned ddomain
)
76 struct amdgpu_bo
*dobj
= NULL
;
77 struct amdgpu_bo
*sobj
= NULL
;
78 struct amdgpu_bo_param bp
;
79 uint64_t saddr
, daddr
;
83 memset(&bp
, 0, sizeof(bp
));
85 bp
.byte_align
= PAGE_SIZE
;
88 bp
.type
= ttm_bo_type_kernel
;
90 n
= AMDGPU_BENCHMARK_ITERATIONS
;
91 r
= amdgpu_bo_create(adev
, &bp
, &sobj
);
95 r
= amdgpu_bo_reserve(sobj
, false);
98 r
= amdgpu_bo_pin(sobj
, sdomain
);
100 amdgpu_bo_unreserve(sobj
);
103 r
= amdgpu_ttm_alloc_gart(&sobj
->tbo
);
104 amdgpu_bo_unreserve(sobj
);
108 saddr
= amdgpu_bo_gpu_offset(sobj
);
110 r
= amdgpu_bo_create(adev
, &bp
, &dobj
);
114 r
= amdgpu_bo_reserve(dobj
, false);
115 if (unlikely(r
!= 0))
117 r
= amdgpu_bo_pin(dobj
, ddomain
);
119 amdgpu_bo_unreserve(sobj
);
122 r
= amdgpu_ttm_alloc_gart(&dobj
->tbo
);
123 amdgpu_bo_unreserve(dobj
);
127 daddr
= amdgpu_bo_gpu_offset(dobj
);
129 if (adev
->mman
.buffer_funcs
) {
130 time
= amdgpu_benchmark_do_move(adev
, size
, saddr
, daddr
, n
);
134 amdgpu_benchmark_log_results(n
, size
, time
,
135 sdomain
, ddomain
, "dma");
139 /* Check error value now. The value can be overwritten when clean up.*/
141 DRM_ERROR("Error while benchmarking BO move.\n");
145 r
= amdgpu_bo_reserve(sobj
, true);
146 if (likely(r
== 0)) {
147 amdgpu_bo_unpin(sobj
);
148 amdgpu_bo_unreserve(sobj
);
150 amdgpu_bo_unref(&sobj
);
153 r
= amdgpu_bo_reserve(dobj
, true);
154 if (likely(r
== 0)) {
155 amdgpu_bo_unpin(dobj
);
156 amdgpu_bo_unreserve(dobj
);
158 amdgpu_bo_unref(&dobj
);
162 void amdgpu_benchmark(struct amdgpu_device
*adev
, int test_number
)
165 static const int common_modes
[AMDGPU_BENCHMARK_COMMON_MODES_N
] = {
185 switch (test_number
) {
187 /* simple test, VRAM to GTT and GTT to VRAM */
188 amdgpu_benchmark_move(adev
, 1024*1024, AMDGPU_GEM_DOMAIN_GTT
,
189 AMDGPU_GEM_DOMAIN_VRAM
);
190 amdgpu_benchmark_move(adev
, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM
,
191 AMDGPU_GEM_DOMAIN_GTT
);
194 /* simple test, VRAM to VRAM */
195 amdgpu_benchmark_move(adev
, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM
,
196 AMDGPU_GEM_DOMAIN_VRAM
);
199 /* GTT to VRAM, buffer size sweep, powers of 2 */
200 for (i
= 1; i
<= 16384; i
<<= 1)
201 amdgpu_benchmark_move(adev
, i
* AMDGPU_GPU_PAGE_SIZE
,
202 AMDGPU_GEM_DOMAIN_GTT
,
203 AMDGPU_GEM_DOMAIN_VRAM
);
206 /* VRAM to GTT, buffer size sweep, powers of 2 */
207 for (i
= 1; i
<= 16384; i
<<= 1)
208 amdgpu_benchmark_move(adev
, i
* AMDGPU_GPU_PAGE_SIZE
,
209 AMDGPU_GEM_DOMAIN_VRAM
,
210 AMDGPU_GEM_DOMAIN_GTT
);
213 /* VRAM to VRAM, buffer size sweep, powers of 2 */
214 for (i
= 1; i
<= 16384; i
<<= 1)
215 amdgpu_benchmark_move(adev
, i
* AMDGPU_GPU_PAGE_SIZE
,
216 AMDGPU_GEM_DOMAIN_VRAM
,
217 AMDGPU_GEM_DOMAIN_VRAM
);
220 /* GTT to VRAM, buffer size sweep, common modes */
221 for (i
= 0; i
< AMDGPU_BENCHMARK_COMMON_MODES_N
; i
++)
222 amdgpu_benchmark_move(adev
, common_modes
[i
],
223 AMDGPU_GEM_DOMAIN_GTT
,
224 AMDGPU_GEM_DOMAIN_VRAM
);
227 /* VRAM to GTT, buffer size sweep, common modes */
228 for (i
= 0; i
< AMDGPU_BENCHMARK_COMMON_MODES_N
; i
++)
229 amdgpu_benchmark_move(adev
, common_modes
[i
],
230 AMDGPU_GEM_DOMAIN_VRAM
,
231 AMDGPU_GEM_DOMAIN_GTT
);
234 /* VRAM to VRAM, buffer size sweep, common modes */
235 for (i
= 0; i
< AMDGPU_BENCHMARK_COMMON_MODES_N
; i
++)
236 amdgpu_benchmark_move(adev
, common_modes
[i
],
237 AMDGPU_GEM_DOMAIN_VRAM
,
238 AMDGPU_GEM_DOMAIN_VRAM
);
242 DRM_ERROR("Unknown benchmark\n");