2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "../i915_selftest.h"
27 #include "mock_gem_device.h"
28 #include "huge_gem_object.h"
30 static int igt_gem_object(void *arg
)
32 struct drm_i915_private
*i915
= arg
;
33 struct drm_i915_gem_object
*obj
;
36 /* Basic test to ensure we can create an object */
38 obj
= i915_gem_object_create(i915
, PAGE_SIZE
);
41 pr_err("i915_gem_object_create failed, err=%d\n", err
);
46 i915_gem_object_put(obj
);
51 static int igt_phys_object(void *arg
)
53 struct drm_i915_private
*i915
= arg
;
54 struct drm_i915_gem_object
*obj
;
57 /* Create an object and bind it to a contiguous set of physical pages,
58 * i.e. exercise the i915_gem_object_phys API.
61 obj
= i915_gem_object_create(i915
, PAGE_SIZE
);
64 pr_err("i915_gem_object_create failed, err=%d\n", err
);
68 mutex_lock(&i915
->drm
.struct_mutex
);
69 err
= i915_gem_object_attach_phys(obj
, PAGE_SIZE
);
70 mutex_unlock(&i915
->drm
.struct_mutex
);
72 pr_err("i915_gem_object_attach_phys failed, err=%d\n", err
);
76 if (obj
->ops
!= &i915_gem_phys_ops
) {
77 pr_err("i915_gem_object_attach_phys did not create a phys object\n");
82 if (!atomic_read(&obj
->mm
.pages_pin_count
)) {
83 pr_err("i915_gem_object_attach_phys did not pin its phys pages\n");
88 /* Make the object dirty so that put_pages must do copy back the data */
89 mutex_lock(&i915
->drm
.struct_mutex
);
90 err
= i915_gem_object_set_to_gtt_domain(obj
, true);
91 mutex_unlock(&i915
->drm
.struct_mutex
);
93 pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
99 i915_gem_object_put(obj
);
104 static int igt_gem_huge(void *arg
)
106 const unsigned int nreal
= 509; /* just to be awkward */
107 struct drm_i915_private
*i915
= arg
;
108 struct drm_i915_gem_object
*obj
;
112 /* Basic sanitycheck of our huge fake object allocation */
114 obj
= huge_gem_object(i915
,
116 i915
->ggtt
.base
.total
+ PAGE_SIZE
);
120 err
= i915_gem_object_pin_pages(obj
);
122 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
123 nreal
, obj
->base
.size
/ PAGE_SIZE
, err
);
127 for (n
= 0; n
< obj
->base
.size
/ PAGE_SIZE
; n
++) {
128 if (i915_gem_object_get_page(obj
, n
) !=
129 i915_gem_object_get_page(obj
, n
% nreal
)) {
130 pr_err("Page lookup mismatch at index %u [%u]\n",
138 i915_gem_object_unpin_pages(obj
);
140 i915_gem_object_put(obj
);
150 unsigned int swizzle
;
153 static u64
swizzle_bit(unsigned int bit
, u64 offset
)
155 return (offset
& BIT_ULL(bit
)) >> (bit
- 6);
158 static u64
tiled_offset(const struct tile
*tile
, u64 v
)
162 if (tile
->tiling
== I915_TILING_NONE
)
165 y
= div64_u64_rem(v
, tile
->stride
, &x
);
166 v
= div64_u64_rem(y
, tile
->height
, &y
) * tile
->stride
* tile
->height
;
168 if (tile
->tiling
== I915_TILING_X
) {
169 v
+= y
* tile
->width
;
170 v
+= div64_u64_rem(x
, tile
->width
, &x
) << tile
->size
;
173 const unsigned int ytile_span
= 16;
174 const unsigned int ytile_height
= 32 * ytile_span
;
177 v
+= div64_u64_rem(x
, ytile_span
, &x
) * ytile_height
;
181 switch (tile
->swizzle
) {
182 case I915_BIT_6_SWIZZLE_9
:
183 v
^= swizzle_bit(9, v
);
185 case I915_BIT_6_SWIZZLE_9_10
:
186 v
^= swizzle_bit(9, v
) ^ swizzle_bit(10, v
);
188 case I915_BIT_6_SWIZZLE_9_11
:
189 v
^= swizzle_bit(9, v
) ^ swizzle_bit(11, v
);
191 case I915_BIT_6_SWIZZLE_9_10_11
:
192 v
^= swizzle_bit(9, v
) ^ swizzle_bit(10, v
) ^ swizzle_bit(11, v
);
199 static int check_partial_mapping(struct drm_i915_gem_object
*obj
,
200 const struct tile
*tile
,
201 unsigned long end_time
)
203 const unsigned int nreal
= obj
->scratch
/ PAGE_SIZE
;
204 const unsigned long npages
= obj
->base
.size
/ PAGE_SIZE
;
205 struct i915_vma
*vma
;
209 if (igt_timeout(end_time
,
210 "%s: timed out before tiling=%d stride=%d\n",
211 __func__
, tile
->tiling
, tile
->stride
))
214 err
= i915_gem_object_set_tiling(obj
, tile
->tiling
, tile
->stride
);
218 GEM_BUG_ON(i915_gem_object_get_tiling(obj
) != tile
->tiling
);
219 GEM_BUG_ON(i915_gem_object_get_stride(obj
) != tile
->stride
);
221 for_each_prime_number_from(page
, 1, npages
) {
222 struct i915_ggtt_view view
=
223 compute_partial_view(obj
, page
, MIN_CHUNK_PAGES
);
230 GEM_BUG_ON(view
.partial
.size
> nreal
);
232 err
= i915_gem_object_set_to_gtt_domain(obj
, true);
236 vma
= i915_gem_object_ggtt_pin(obj
, &view
, 0, 0, PIN_MAPPABLE
);
238 pr_err("Failed to pin partial view: offset=%lu\n",
243 n
= page
- view
.partial
.offset
;
244 GEM_BUG_ON(n
>= view
.partial
.size
);
246 io
= i915_vma_pin_iomap(vma
);
249 pr_err("Failed to iomap partial view: offset=%lu\n",
254 iowrite32(page
, io
+ n
* PAGE_SIZE
/sizeof(*io
));
255 i915_vma_unpin_iomap(vma
);
257 offset
= tiled_offset(tile
, page
<< PAGE_SHIFT
);
258 if (offset
>= obj
->base
.size
)
261 flush_write_domain(obj
, ~I915_GEM_DOMAIN_CPU
);
263 p
= i915_gem_object_get_page(obj
, offset
>> PAGE_SHIFT
);
264 cpu
= kmap(p
) + offset_in_page(offset
);
265 drm_clflush_virt_range(cpu
, sizeof(*cpu
));
266 if (*cpu
!= (u32
)page
) {
267 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
271 vma
->size
>> PAGE_SHIFT
,
273 vma
->fence
? vma
->fence
->id
: -1, tile
->tiling
, tile
->stride
,
274 offset
>> PAGE_SHIFT
,
275 (unsigned int)offset_in_page(offset
),
281 drm_clflush_virt_range(cpu
, sizeof(*cpu
));
290 static int igt_partial_tiling(void *arg
)
292 const unsigned int nreal
= 1 << 12; /* largest tile row x2 */
293 struct drm_i915_private
*i915
= arg
;
294 struct drm_i915_gem_object
*obj
;
298 /* We want to check the page mapping and fencing of a large object
299 * mmapped through the GTT. The object we create is larger than can
300 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
301 * We then check that a write through each partial GGTT vma ends up
302 * in the right set of pages within the object, and with the expected
303 * tiling, which we verify by manual swizzling.
306 obj
= huge_gem_object(i915
,
308 (1 + next_prime_number(i915
->ggtt
.base
.total
>> PAGE_SHIFT
)) << PAGE_SHIFT
);
312 err
= i915_gem_object_pin_pages(obj
);
314 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
315 nreal
, obj
->base
.size
/ PAGE_SIZE
, err
);
319 mutex_lock(&i915
->drm
.struct_mutex
);
320 intel_runtime_pm_get(i915
);
330 tile
.swizzle
= I915_BIT_6_SWIZZLE_NONE
;
331 tile
.tiling
= I915_TILING_NONE
;
333 err
= check_partial_mapping(obj
, &tile
, end
);
334 if (err
&& err
!= -EINTR
)
338 for (tiling
= I915_TILING_X
; tiling
<= I915_TILING_Y
; tiling
++) {
340 unsigned int max_pitch
;
344 tile
.tiling
= tiling
;
347 tile
.swizzle
= i915
->mm
.bit_6_swizzle_x
;
350 tile
.swizzle
= i915
->mm
.bit_6_swizzle_y
;
354 if (tile
.swizzle
== I915_BIT_6_SWIZZLE_UNKNOWN
||
355 tile
.swizzle
== I915_BIT_6_SWIZZLE_9_10_17
)
358 if (INTEL_GEN(i915
) <= 2) {
362 } else if (tile
.tiling
== I915_TILING_Y
&&
363 HAS_128_BYTE_Y_TILING(i915
)) {
373 if (INTEL_GEN(i915
) < 4)
374 max_pitch
= 8192 / tile
.width
;
375 else if (INTEL_GEN(i915
) < 7)
376 max_pitch
= 128 * I965_FENCE_MAX_PITCH_VAL
/ tile
.width
;
378 max_pitch
= 128 * GEN7_FENCE_MAX_PITCH_VAL
/ tile
.width
;
380 for (pitch
= max_pitch
; pitch
; pitch
>>= 1) {
381 tile
.stride
= tile
.width
* pitch
;
382 err
= check_partial_mapping(obj
, &tile
, end
);
388 if (pitch
> 2 && INTEL_GEN(i915
) >= 4) {
389 tile
.stride
= tile
.width
* (pitch
- 1);
390 err
= check_partial_mapping(obj
, &tile
, end
);
397 if (pitch
< max_pitch
&& INTEL_GEN(i915
) >= 4) {
398 tile
.stride
= tile
.width
* (pitch
+ 1);
399 err
= check_partial_mapping(obj
, &tile
, end
);
407 if (INTEL_GEN(i915
) >= 4) {
408 for_each_prime_number(pitch
, max_pitch
) {
409 tile
.stride
= tile
.width
* pitch
;
410 err
= check_partial_mapping(obj
, &tile
, end
);
422 intel_runtime_pm_put(i915
);
423 mutex_unlock(&i915
->drm
.struct_mutex
);
424 i915_gem_object_unpin_pages(obj
);
426 i915_gem_object_put(obj
);
430 static int make_obj_busy(struct drm_i915_gem_object
*obj
)
432 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
433 struct drm_i915_gem_request
*rq
;
434 struct i915_vma
*vma
;
437 vma
= i915_vma_instance(obj
, &i915
->ggtt
.base
, NULL
);
441 err
= i915_vma_pin(vma
, 0, 0, PIN_USER
);
445 rq
= i915_gem_request_alloc(i915
->engine
[RCS
], i915
->kernel_context
);
451 i915_vma_move_to_active(vma
, rq
, 0);
452 i915_add_request(rq
);
454 i915_gem_object_set_active_reference(obj
);
459 static bool assert_mmap_offset(struct drm_i915_private
*i915
,
463 struct drm_i915_gem_object
*obj
;
466 obj
= i915_gem_object_create_internal(i915
, size
);
470 err
= i915_gem_object_create_mmap_offset(obj
);
471 i915_gem_object_put(obj
);
473 return err
== expected
;
476 static int igt_mmap_offset_exhaustion(void *arg
)
478 struct drm_i915_private
*i915
= arg
;
479 struct drm_mm
*mm
= &i915
->drm
.vma_offset_manager
->vm_addr_space_mm
;
480 struct drm_i915_gem_object
*obj
;
481 struct drm_mm_node resv
, *hole
;
482 u64 hole_start
, hole_end
;
485 /* Trim the device mmap space to only a page */
486 memset(&resv
, 0, sizeof(resv
));
487 drm_mm_for_each_hole(hole
, mm
, hole_start
, hole_end
) {
488 resv
.start
= hole_start
;
489 resv
.size
= hole_end
- hole_start
- 1; /* PAGE_SIZE units */
490 err
= drm_mm_reserve_node(mm
, &resv
);
492 pr_err("Failed to trim VMA manager, err=%d\n", err
);
499 if (!assert_mmap_offset(i915
, PAGE_SIZE
, 0)) {
500 pr_err("Unable to insert object into single page hole\n");
506 if (!assert_mmap_offset(i915
, 2*PAGE_SIZE
, -ENOSPC
)) {
507 pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
512 /* Fill the hole, further allocation attempts should then fail */
513 obj
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
519 err
= i915_gem_object_create_mmap_offset(obj
);
521 pr_err("Unable to insert object into reclaimed hole\n");
525 if (!assert_mmap_offset(i915
, PAGE_SIZE
, -ENOSPC
)) {
526 pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
531 i915_gem_object_put(obj
);
533 /* Now fill with busy dead objects that we expect to reap */
534 for (loop
= 0; loop
< 3; loop
++) {
535 obj
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
541 mutex_lock(&i915
->drm
.struct_mutex
);
542 intel_runtime_pm_get(i915
);
543 err
= make_obj_busy(obj
);
544 intel_runtime_pm_put(i915
);
545 mutex_unlock(&i915
->drm
.struct_mutex
);
547 pr_err("[loop %d] Failed to busy the object\n", loop
);
551 GEM_BUG_ON(!i915_gem_object_is_active(obj
));
552 err
= i915_gem_object_create_mmap_offset(obj
);
554 pr_err("[loop %d] i915_gem_object_create_mmap_offset failed with err=%d\n",
561 drm_mm_remove_node(&resv
);
564 i915_gem_object_put(obj
);
568 int i915_gem_object_mock_selftests(void)
570 static const struct i915_subtest tests
[] = {
571 SUBTEST(igt_gem_object
),
572 SUBTEST(igt_phys_object
),
574 struct drm_i915_private
*i915
;
577 i915
= mock_gem_device();
581 err
= i915_subtests(tests
, i915
);
583 drm_dev_unref(&i915
->drm
);
587 int i915_gem_object_live_selftests(struct drm_i915_private
*i915
)
589 static const struct i915_subtest tests
[] = {
590 SUBTEST(igt_gem_huge
),
591 SUBTEST(igt_partial_tiling
),
592 SUBTEST(igt_mmap_offset_exhaustion
),
595 return i915_subtests(tests
, i915
);