2 * Copyright 2009 Jerome Glisse.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Jerome Glisse
25 #include <drm/radeon_drm.h>
26 #include "radeon_reg.h"
29 #define RADEON_BENCHMARK_COPY_BLIT 1
30 #define RADEON_BENCHMARK_COPY_DMA 0
32 #define RADEON_BENCHMARK_ITERATIONS 1024
33 #define RADEON_BENCHMARK_COMMON_MODES_N 17
35 static int radeon_benchmark_do_move(struct radeon_device
*rdev
, unsigned size
,
36 uint64_t saddr
, uint64_t daddr
,
38 struct reservation_object
*resv
)
40 unsigned long start_jiffies
;
41 unsigned long end_jiffies
;
42 struct radeon_fence
*fence
= NULL
;
45 start_jiffies
= jiffies
;
46 for (i
= 0; i
< n
; i
++) {
48 case RADEON_BENCHMARK_COPY_DMA
:
49 fence
= radeon_copy_dma(rdev
, saddr
, daddr
,
50 size
/ RADEON_GPU_PAGE_SIZE
,
53 case RADEON_BENCHMARK_COPY_BLIT
:
54 fence
= radeon_copy_blit(rdev
, saddr
, daddr
,
55 size
/ RADEON_GPU_PAGE_SIZE
,
59 DRM_ERROR("Unknown copy method\n");
63 return PTR_ERR(fence
);
65 r
= radeon_fence_wait(fence
, false);
66 radeon_fence_unref(&fence
);
70 end_jiffies
= jiffies
;
71 return jiffies_to_msecs(end_jiffies
- start_jiffies
);
75 static void radeon_benchmark_log_results(int n
, unsigned size
,
77 unsigned sdomain
, unsigned ddomain
,
80 unsigned int throughput
= (n
* (size
>> 10)) / time
;
81 DRM_INFO("radeon: %s %u bo moves of %u kB from"
82 " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
83 kind
, n
, size
>> 10, sdomain
, ddomain
, time
,
84 throughput
* 8, throughput
);
87 static void radeon_benchmark_move(struct radeon_device
*rdev
, unsigned size
,
88 unsigned sdomain
, unsigned ddomain
)
90 struct radeon_bo
*dobj
= NULL
;
91 struct radeon_bo
*sobj
= NULL
;
92 uint64_t saddr
, daddr
;
96 n
= RADEON_BENCHMARK_ITERATIONS
;
97 r
= radeon_bo_create(rdev
, size
, PAGE_SIZE
, true, sdomain
, 0, NULL
, NULL
, &sobj
);
101 r
= radeon_bo_reserve(sobj
, false);
102 if (unlikely(r
!= 0))
104 r
= radeon_bo_pin(sobj
, sdomain
, &saddr
);
105 radeon_bo_unreserve(sobj
);
109 r
= radeon_bo_create(rdev
, size
, PAGE_SIZE
, true, ddomain
, 0, NULL
, NULL
, &dobj
);
113 r
= radeon_bo_reserve(dobj
, false);
114 if (unlikely(r
!= 0))
116 r
= radeon_bo_pin(dobj
, ddomain
, &daddr
);
117 radeon_bo_unreserve(dobj
);
122 if (rdev
->asic
->copy
.dma
) {
123 time
= radeon_benchmark_do_move(rdev
, size
, saddr
, daddr
,
124 RADEON_BENCHMARK_COPY_DMA
, n
,
129 radeon_benchmark_log_results(n
, size
, time
,
130 sdomain
, ddomain
, "dma");
133 if (rdev
->asic
->copy
.blit
) {
134 time
= radeon_benchmark_do_move(rdev
, size
, saddr
, daddr
,
135 RADEON_BENCHMARK_COPY_BLIT
, n
,
140 radeon_benchmark_log_results(n
, size
, time
,
141 sdomain
, ddomain
, "blit");
146 r
= radeon_bo_reserve(sobj
, false);
147 if (likely(r
== 0)) {
148 radeon_bo_unpin(sobj
);
149 radeon_bo_unreserve(sobj
);
151 radeon_bo_unref(&sobj
);
154 r
= radeon_bo_reserve(dobj
, false);
155 if (likely(r
== 0)) {
156 radeon_bo_unpin(dobj
);
157 radeon_bo_unreserve(dobj
);
159 radeon_bo_unref(&dobj
);
163 DRM_ERROR("Error while benchmarking BO move.\n");
167 void radeon_benchmark(struct radeon_device
*rdev
, int test_number
)
170 int common_modes
[RADEON_BENCHMARK_COMMON_MODES_N
] = {
190 switch (test_number
) {
192 /* simple test, VRAM to GTT and GTT to VRAM */
193 radeon_benchmark_move(rdev
, 1024*1024, RADEON_GEM_DOMAIN_GTT
,
194 RADEON_GEM_DOMAIN_VRAM
);
195 radeon_benchmark_move(rdev
, 1024*1024, RADEON_GEM_DOMAIN_VRAM
,
196 RADEON_GEM_DOMAIN_GTT
);
199 /* simple test, VRAM to VRAM */
200 radeon_benchmark_move(rdev
, 1024*1024, RADEON_GEM_DOMAIN_VRAM
,
201 RADEON_GEM_DOMAIN_VRAM
);
204 /* GTT to VRAM, buffer size sweep, powers of 2 */
205 for (i
= 1; i
<= 16384; i
<<= 1)
206 radeon_benchmark_move(rdev
, i
* RADEON_GPU_PAGE_SIZE
,
207 RADEON_GEM_DOMAIN_GTT
,
208 RADEON_GEM_DOMAIN_VRAM
);
211 /* VRAM to GTT, buffer size sweep, powers of 2 */
212 for (i
= 1; i
<= 16384; i
<<= 1)
213 radeon_benchmark_move(rdev
, i
* RADEON_GPU_PAGE_SIZE
,
214 RADEON_GEM_DOMAIN_VRAM
,
215 RADEON_GEM_DOMAIN_GTT
);
218 /* VRAM to VRAM, buffer size sweep, powers of 2 */
219 for (i
= 1; i
<= 16384; i
<<= 1)
220 radeon_benchmark_move(rdev
, i
* RADEON_GPU_PAGE_SIZE
,
221 RADEON_GEM_DOMAIN_VRAM
,
222 RADEON_GEM_DOMAIN_VRAM
);
225 /* GTT to VRAM, buffer size sweep, common modes */
226 for (i
= 0; i
< RADEON_BENCHMARK_COMMON_MODES_N
; i
++)
227 radeon_benchmark_move(rdev
, common_modes
[i
],
228 RADEON_GEM_DOMAIN_GTT
,
229 RADEON_GEM_DOMAIN_VRAM
);
232 /* VRAM to GTT, buffer size sweep, common modes */
233 for (i
= 0; i
< RADEON_BENCHMARK_COMMON_MODES_N
; i
++)
234 radeon_benchmark_move(rdev
, common_modes
[i
],
235 RADEON_GEM_DOMAIN_VRAM
,
236 RADEON_GEM_DOMAIN_GTT
);
239 /* VRAM to VRAM, buffer size sweep, common modes */
240 for (i
= 0; i
< RADEON_BENCHMARK_COMMON_MODES_N
; i
++)
241 radeon_benchmark_move(rdev
, common_modes
[i
],
242 RADEON_GEM_DOMAIN_VRAM
,
243 RADEON_GEM_DOMAIN_VRAM
);
247 DRM_ERROR("Unknown benchmark\n");