2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
28 #include "../i915_selftest.h"
29 #include "i915_random.h"
31 #include "mock_context.h"
33 #include "mock_gem_device.h"
35 static void cleanup_freed_objects(struct drm_i915_private
*i915
)
38 * As we may hold onto the struct_mutex for inordinate lengths of
39 * time, the NMI khungtaskd detector may fire for the free objects
42 mutex_unlock(&i915
->drm
.struct_mutex
);
44 i915_gem_drain_freed_objects(i915
);
46 mutex_lock(&i915
->drm
.struct_mutex
);
49 static void fake_free_pages(struct drm_i915_gem_object
*obj
,
50 struct sg_table
*pages
)
56 static int fake_get_pages(struct drm_i915_gem_object
*obj
)
58 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
59 #define PFN_BIAS 0x1000
60 struct sg_table
*pages
;
61 struct scatterlist
*sg
;
62 unsigned int sg_page_sizes
;
63 typeof(obj
->base
.size
) rem
;
65 pages
= kmalloc(sizeof(*pages
), GFP
);
69 rem
= round_up(obj
->base
.size
, BIT(31)) >> 31;
70 if (sg_alloc_table(pages
, rem
, GFP
)) {
77 for (sg
= pages
->sgl
; sg
; sg
= sg_next(sg
)) {
78 unsigned long len
= min_t(typeof(rem
), rem
, BIT(31));
81 sg_set_page(sg
, pfn_to_page(PFN_BIAS
), len
, 0);
82 sg_dma_address(sg
) = page_to_phys(sg_page(sg
));
90 obj
->mm
.madv
= I915_MADV_DONTNEED
;
92 __i915_gem_object_set_pages(obj
, pages
, sg_page_sizes
);
98 static void fake_put_pages(struct drm_i915_gem_object
*obj
,
99 struct sg_table
*pages
)
101 fake_free_pages(obj
, pages
);
102 obj
->mm
.dirty
= false;
103 obj
->mm
.madv
= I915_MADV_WILLNEED
;
106 static const struct drm_i915_gem_object_ops fake_ops
= {
107 .flags
= I915_GEM_OBJECT_IS_SHRINKABLE
,
108 .get_pages
= fake_get_pages
,
109 .put_pages
= fake_put_pages
,
112 static struct drm_i915_gem_object
*
113 fake_dma_object(struct drm_i915_private
*i915
, u64 size
)
115 struct drm_i915_gem_object
*obj
;
118 GEM_BUG_ON(!IS_ALIGNED(size
, I915_GTT_PAGE_SIZE
));
120 if (overflows_type(size
, obj
->base
.size
))
121 return ERR_PTR(-E2BIG
);
123 obj
= i915_gem_object_alloc(i915
);
127 drm_gem_private_object_init(&i915
->drm
, &obj
->base
, size
);
128 i915_gem_object_init(obj
, &fake_ops
);
130 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
131 obj
->read_domains
= I915_GEM_DOMAIN_CPU
;
132 obj
->cache_level
= I915_CACHE_NONE
;
134 /* Preallocate the "backing storage" */
135 if (i915_gem_object_pin_pages(obj
))
138 i915_gem_object_unpin_pages(obj
);
142 i915_gem_object_put(obj
);
144 return ERR_PTR(-ENOMEM
);
147 static int igt_ppgtt_alloc(void *arg
)
149 struct drm_i915_private
*dev_priv
= arg
;
150 struct i915_hw_ppgtt
*ppgtt
;
151 u64 size
, last
, limit
;
154 /* Allocate a ppggt and try to fill the entire range */
156 if (!USES_PPGTT(dev_priv
))
159 ppgtt
= __hw_ppgtt_create(dev_priv
);
161 return PTR_ERR(ppgtt
);
163 if (!ppgtt
->vm
.allocate_va_range
)
164 goto err_ppgtt_cleanup
;
167 * While we only allocate the page tables here and so we could
168 * address a much larger GTT than we could actually fit into
169 * RAM, a practical limit is the amount of physical pages in the system.
170 * This should ensure that we do not run into the oomkiller during
171 * the test and take down the machine wilfully.
173 limit
= totalram_pages
<< PAGE_SHIFT
;
174 limit
= min(ppgtt
->vm
.total
, limit
);
176 /* Check we can allocate the entire range */
177 for (size
= 4096; size
<= limit
; size
<<= 2) {
178 err
= ppgtt
->vm
.allocate_va_range(&ppgtt
->vm
, 0, size
);
180 if (err
== -ENOMEM
) {
181 pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
183 err
= 0; /* virtual space too large! */
185 goto err_ppgtt_cleanup
;
190 ppgtt
->vm
.clear_range(&ppgtt
->vm
, 0, size
);
193 /* Check we can incrementally allocate the entire range */
194 for (last
= 0, size
= 4096; size
<= limit
; last
= size
, size
<<= 2) {
195 err
= ppgtt
->vm
.allocate_va_range(&ppgtt
->vm
,
198 if (err
== -ENOMEM
) {
199 pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
200 last
, size
- last
, ilog2(size
));
201 err
= 0; /* virtual space too large! */
203 goto err_ppgtt_cleanup
;
210 mutex_lock(&dev_priv
->drm
.struct_mutex
);
211 i915_ppgtt_put(ppgtt
);
212 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
216 static int lowlevel_hole(struct drm_i915_private
*i915
,
217 struct i915_address_space
*vm
,
218 u64 hole_start
, u64 hole_end
,
219 unsigned long end_time
)
221 I915_RND_STATE(seed_prng
);
223 struct i915_vma mock_vma
;
225 memset(&mock_vma
, 0, sizeof(struct i915_vma
));
227 /* Keep creating larger objects until one cannot fit into the hole */
228 for (size
= 12; (hole_end
- hole_start
) >> size
; size
++) {
229 I915_RND_SUBSTATE(prng
, seed_prng
);
230 struct drm_i915_gem_object
*obj
;
231 unsigned int *order
, count
, n
;
234 hole_size
= (hole_end
- hole_start
) >> size
;
235 if (hole_size
> KMALLOC_MAX_SIZE
/ sizeof(u32
))
236 hole_size
= KMALLOC_MAX_SIZE
/ sizeof(u32
);
237 count
= hole_size
>> 1;
239 pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
240 __func__
, hole_start
, hole_end
, size
, hole_size
);
245 order
= i915_random_order(count
, &prng
);
248 } while (count
>>= 1);
253 GEM_BUG_ON(count
* BIT_ULL(size
) > vm
->total
);
254 GEM_BUG_ON(hole_start
+ count
* BIT_ULL(size
) > hole_end
);
256 /* Ignore allocation failures (i.e. don't report them as
257 * a test failure) as we are purposefully allocating very
258 * large objects without checking that we have sufficient
259 * memory. We expect to hit -ENOMEM.
262 obj
= fake_dma_object(i915
, BIT_ULL(size
));
268 GEM_BUG_ON(obj
->base
.size
!= BIT_ULL(size
));
270 if (i915_gem_object_pin_pages(obj
)) {
271 i915_gem_object_put(obj
);
276 for (n
= 0; n
< count
; n
++) {
277 u64 addr
= hole_start
+ order
[n
] * BIT_ULL(size
);
279 GEM_BUG_ON(addr
+ BIT_ULL(size
) > vm
->total
);
281 if (igt_timeout(end_time
,
282 "%s timed out before %d/%d\n",
283 __func__
, n
, count
)) {
284 hole_end
= hole_start
; /* quit */
288 if (vm
->allocate_va_range
&&
289 vm
->allocate_va_range(vm
, addr
, BIT_ULL(size
)))
292 mock_vma
.pages
= obj
->mm
.pages
;
293 mock_vma
.node
.size
= BIT_ULL(size
);
294 mock_vma
.node
.start
= addr
;
296 intel_runtime_pm_get(i915
);
297 vm
->insert_entries(vm
, &mock_vma
, I915_CACHE_NONE
, 0);
298 intel_runtime_pm_put(i915
);
302 i915_random_reorder(order
, count
, &prng
);
303 for (n
= 0; n
< count
; n
++) {
304 u64 addr
= hole_start
+ order
[n
] * BIT_ULL(size
);
306 GEM_BUG_ON(addr
+ BIT_ULL(size
) > vm
->total
);
307 vm
->clear_range(vm
, addr
, BIT_ULL(size
));
310 i915_gem_object_unpin_pages(obj
);
311 i915_gem_object_put(obj
);
315 cleanup_freed_objects(i915
);
321 static void close_object_list(struct list_head
*objects
,
322 struct i915_address_space
*vm
)
324 struct drm_i915_gem_object
*obj
, *on
;
327 list_for_each_entry_safe(obj
, on
, objects
, st_link
) {
328 struct i915_vma
*vma
;
330 vma
= i915_vma_instance(obj
, vm
, NULL
);
332 ignored
= i915_vma_unbind(vma
);
333 /* Only ppgtt vma may be closed before the object is freed */
334 if (!IS_ERR(vma
) && !i915_vma_is_ggtt(vma
))
337 list_del(&obj
->st_link
);
338 i915_gem_object_put(obj
);
342 static int fill_hole(struct drm_i915_private
*i915
,
343 struct i915_address_space
*vm
,
344 u64 hole_start
, u64 hole_end
,
345 unsigned long end_time
)
347 const u64 hole_size
= hole_end
- hole_start
;
348 struct drm_i915_gem_object
*obj
;
349 const unsigned long max_pages
=
350 min_t(u64
, ULONG_MAX
- 1, hole_size
/2 >> PAGE_SHIFT
);
351 const unsigned long max_step
= max(int_sqrt(max_pages
), 2UL);
352 unsigned long npages
, prime
, flags
;
353 struct i915_vma
*vma
;
357 /* Try binding many VMA working inwards from either edge */
359 flags
= PIN_OFFSET_FIXED
| PIN_USER
;
360 if (i915_is_ggtt(vm
))
363 for_each_prime_number_from(prime
, 2, max_step
) {
364 for (npages
= 1; npages
<= max_pages
; npages
*= prime
) {
365 const u64 full_size
= npages
<< PAGE_SHIFT
;
371 { "top-down", hole_end
, -1, },
372 { "bottom-up", hole_start
, 1, },
376 obj
= fake_dma_object(i915
, full_size
);
380 list_add(&obj
->st_link
, &objects
);
382 /* Align differing sized objects against the edges, and
383 * check we don't walk off into the void when binding
386 for (p
= phases
; p
->name
; p
++) {
390 list_for_each_entry(obj
, &objects
, st_link
) {
391 vma
= i915_vma_instance(obj
, vm
, NULL
);
396 if (offset
< hole_start
+ obj
->base
.size
)
398 offset
-= obj
->base
.size
;
401 err
= i915_vma_pin(vma
, 0, 0, offset
| flags
);
403 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
404 __func__
, p
->name
, err
, npages
, prime
, offset
);
408 if (!drm_mm_node_allocated(&vma
->node
) ||
409 i915_vma_misplaced(vma
, 0, 0, offset
| flags
)) {
410 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
411 __func__
, p
->name
, vma
->node
.start
, vma
->node
.size
, drm_mm_node_allocated(&vma
->node
),
420 if (offset
+ obj
->base
.size
> hole_end
)
422 offset
+= obj
->base
.size
;
427 list_for_each_entry(obj
, &objects
, st_link
) {
428 vma
= i915_vma_instance(obj
, vm
, NULL
);
433 if (offset
< hole_start
+ obj
->base
.size
)
435 offset
-= obj
->base
.size
;
438 if (!drm_mm_node_allocated(&vma
->node
) ||
439 i915_vma_misplaced(vma
, 0, 0, offset
| flags
)) {
440 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
441 __func__
, p
->name
, vma
->node
.start
, vma
->node
.size
,
447 err
= i915_vma_unbind(vma
);
449 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
450 __func__
, p
->name
, vma
->node
.start
, vma
->node
.size
,
456 if (offset
+ obj
->base
.size
> hole_end
)
458 offset
+= obj
->base
.size
;
463 list_for_each_entry_reverse(obj
, &objects
, st_link
) {
464 vma
= i915_vma_instance(obj
, vm
, NULL
);
469 if (offset
< hole_start
+ obj
->base
.size
)
471 offset
-= obj
->base
.size
;
474 err
= i915_vma_pin(vma
, 0, 0, offset
| flags
);
476 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
477 __func__
, p
->name
, err
, npages
, prime
, offset
);
481 if (!drm_mm_node_allocated(&vma
->node
) ||
482 i915_vma_misplaced(vma
, 0, 0, offset
| flags
)) {
483 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
484 __func__
, p
->name
, vma
->node
.start
, vma
->node
.size
, drm_mm_node_allocated(&vma
->node
),
493 if (offset
+ obj
->base
.size
> hole_end
)
495 offset
+= obj
->base
.size
;
500 list_for_each_entry_reverse(obj
, &objects
, st_link
) {
501 vma
= i915_vma_instance(obj
, vm
, NULL
);
506 if (offset
< hole_start
+ obj
->base
.size
)
508 offset
-= obj
->base
.size
;
511 if (!drm_mm_node_allocated(&vma
->node
) ||
512 i915_vma_misplaced(vma
, 0, 0, offset
| flags
)) {
513 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
514 __func__
, p
->name
, vma
->node
.start
, vma
->node
.size
, drm_mm_node_allocated(&vma
->node
),
520 err
= i915_vma_unbind(vma
);
522 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
523 __func__
, p
->name
, vma
->node
.start
, vma
->node
.size
,
529 if (offset
+ obj
->base
.size
> hole_end
)
531 offset
+= obj
->base
.size
;
536 if (igt_timeout(end_time
, "%s timed out (npages=%lu, prime=%lu)\n",
537 __func__
, npages
, prime
)) {
543 close_object_list(&objects
, vm
);
544 cleanup_freed_objects(i915
);
550 close_object_list(&objects
, vm
);
554 static int walk_hole(struct drm_i915_private
*i915
,
555 struct i915_address_space
*vm
,
556 u64 hole_start
, u64 hole_end
,
557 unsigned long end_time
)
559 const u64 hole_size
= hole_end
- hole_start
;
560 const unsigned long max_pages
=
561 min_t(u64
, ULONG_MAX
- 1, hole_size
>> PAGE_SHIFT
);
565 /* Try binding a single VMA in different positions within the hole */
567 flags
= PIN_OFFSET_FIXED
| PIN_USER
;
568 if (i915_is_ggtt(vm
))
571 for_each_prime_number_from(size
, 1, max_pages
) {
572 struct drm_i915_gem_object
*obj
;
573 struct i915_vma
*vma
;
577 obj
= fake_dma_object(i915
, size
<< PAGE_SHIFT
);
581 vma
= i915_vma_instance(obj
, vm
, NULL
);
587 for (addr
= hole_start
;
588 addr
+ obj
->base
.size
< hole_end
;
589 addr
+= obj
->base
.size
) {
590 err
= i915_vma_pin(vma
, 0, 0, addr
| flags
);
592 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
593 __func__
, addr
, vma
->size
,
594 hole_start
, hole_end
, err
);
599 if (!drm_mm_node_allocated(&vma
->node
) ||
600 i915_vma_misplaced(vma
, 0, 0, addr
| flags
)) {
601 pr_err("%s incorrect at %llx + %llx\n",
602 __func__
, addr
, vma
->size
);
607 err
= i915_vma_unbind(vma
);
609 pr_err("%s unbind failed at %llx + %llx with err=%d\n",
610 __func__
, addr
, vma
->size
, err
);
614 GEM_BUG_ON(drm_mm_node_allocated(&vma
->node
));
616 if (igt_timeout(end_time
,
617 "%s timed out at %llx\n",
625 if (!i915_vma_is_ggtt(vma
))
628 i915_gem_object_put(obj
);
632 cleanup_freed_objects(i915
);
638 static int pot_hole(struct drm_i915_private
*i915
,
639 struct i915_address_space
*vm
,
640 u64 hole_start
, u64 hole_end
,
641 unsigned long end_time
)
643 struct drm_i915_gem_object
*obj
;
644 struct i915_vma
*vma
;
649 flags
= PIN_OFFSET_FIXED
| PIN_USER
;
650 if (i915_is_ggtt(vm
))
653 obj
= i915_gem_object_create_internal(i915
, 2 * I915_GTT_PAGE_SIZE
);
657 vma
= i915_vma_instance(obj
, vm
, NULL
);
663 /* Insert a pair of pages across every pot boundary within the hole */
664 for (pot
= fls64(hole_end
- 1) - 1;
665 pot
> ilog2(2 * I915_GTT_PAGE_SIZE
);
667 u64 step
= BIT_ULL(pot
);
670 for (addr
= round_up(hole_start
+ I915_GTT_PAGE_SIZE
, step
) - I915_GTT_PAGE_SIZE
;
671 addr
<= round_down(hole_end
- 2*I915_GTT_PAGE_SIZE
, step
) - I915_GTT_PAGE_SIZE
;
673 err
= i915_vma_pin(vma
, 0, 0, addr
| flags
);
675 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
678 hole_start
, hole_end
,
683 if (!drm_mm_node_allocated(&vma
->node
) ||
684 i915_vma_misplaced(vma
, 0, 0, addr
| flags
)) {
685 pr_err("%s incorrect at %llx + %llx\n",
686 __func__
, addr
, vma
->size
);
688 err
= i915_vma_unbind(vma
);
694 err
= i915_vma_unbind(vma
);
698 if (igt_timeout(end_time
,
699 "%s timed out after %d/%d\n",
700 __func__
, pot
, fls64(hole_end
- 1) - 1)) {
707 if (!i915_vma_is_ggtt(vma
))
710 i915_gem_object_put(obj
);
714 static int drunk_hole(struct drm_i915_private
*i915
,
715 struct i915_address_space
*vm
,
716 u64 hole_start
, u64 hole_end
,
717 unsigned long end_time
)
719 I915_RND_STATE(prng
);
723 flags
= PIN_OFFSET_FIXED
| PIN_USER
;
724 if (i915_is_ggtt(vm
))
727 /* Keep creating larger objects until one cannot fit into the hole */
728 for (size
= 12; (hole_end
- hole_start
) >> size
; size
++) {
729 struct drm_i915_gem_object
*obj
;
730 unsigned int *order
, count
, n
;
731 struct i915_vma
*vma
;
735 hole_size
= (hole_end
- hole_start
) >> size
;
736 if (hole_size
> KMALLOC_MAX_SIZE
/ sizeof(u32
))
737 hole_size
= KMALLOC_MAX_SIZE
/ sizeof(u32
);
738 count
= hole_size
>> 1;
740 pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
741 __func__
, hole_start
, hole_end
, size
, hole_size
);
746 order
= i915_random_order(count
, &prng
);
749 } while (count
>>= 1);
754 /* Ignore allocation failures (i.e. don't report them as
755 * a test failure) as we are purposefully allocating very
756 * large objects without checking that we have sufficient
757 * memory. We expect to hit -ENOMEM.
760 obj
= fake_dma_object(i915
, BIT_ULL(size
));
766 vma
= i915_vma_instance(obj
, vm
, NULL
);
772 GEM_BUG_ON(vma
->size
!= BIT_ULL(size
));
774 for (n
= 0; n
< count
; n
++) {
775 u64 addr
= hole_start
+ order
[n
] * BIT_ULL(size
);
777 err
= i915_vma_pin(vma
, 0, 0, addr
| flags
);
779 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
782 hole_start
, hole_end
,
787 if (!drm_mm_node_allocated(&vma
->node
) ||
788 i915_vma_misplaced(vma
, 0, 0, addr
| flags
)) {
789 pr_err("%s incorrect at %llx + %llx\n",
790 __func__
, addr
, BIT_ULL(size
));
792 err
= i915_vma_unbind(vma
);
798 err
= i915_vma_unbind(vma
);
801 if (igt_timeout(end_time
,
802 "%s timed out after %d/%d\n",
803 __func__
, n
, count
)) {
810 if (!i915_vma_is_ggtt(vma
))
813 i915_gem_object_put(obj
);
818 cleanup_freed_objects(i915
);
824 static int __shrink_hole(struct drm_i915_private
*i915
,
825 struct i915_address_space
*vm
,
826 u64 hole_start
, u64 hole_end
,
827 unsigned long end_time
)
829 struct drm_i915_gem_object
*obj
;
830 unsigned long flags
= PIN_OFFSET_FIXED
| PIN_USER
;
831 unsigned int order
= 12;
836 /* Keep creating larger objects until one cannot fit into the hole */
837 for (addr
= hole_start
; addr
< hole_end
; ) {
838 struct i915_vma
*vma
;
839 u64 size
= BIT_ULL(order
++);
841 size
= min(size
, hole_end
- addr
);
842 obj
= fake_dma_object(i915
, size
);
848 list_add(&obj
->st_link
, &objects
);
850 vma
= i915_vma_instance(obj
, vm
, NULL
);
856 GEM_BUG_ON(vma
->size
!= size
);
858 err
= i915_vma_pin(vma
, 0, 0, addr
| flags
);
860 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
861 __func__
, addr
, size
, hole_start
, hole_end
, err
);
865 if (!drm_mm_node_allocated(&vma
->node
) ||
866 i915_vma_misplaced(vma
, 0, 0, addr
| flags
)) {
867 pr_err("%s incorrect at %llx + %llx\n",
868 __func__
, addr
, size
);
870 err
= i915_vma_unbind(vma
);
878 if (igt_timeout(end_time
,
879 "%s timed out at ofset %llx [%llx - %llx]\n",
880 __func__
, addr
, hole_start
, hole_end
)) {
886 close_object_list(&objects
, vm
);
887 cleanup_freed_objects(i915
);
891 static int shrink_hole(struct drm_i915_private
*i915
,
892 struct i915_address_space
*vm
,
893 u64 hole_start
, u64 hole_end
,
894 unsigned long end_time
)
899 vm
->fault_attr
.probability
= 999;
900 atomic_set(&vm
->fault_attr
.times
, -1);
902 for_each_prime_number_from(prime
, 0, ULONG_MAX
- 1) {
903 vm
->fault_attr
.interval
= prime
;
904 err
= __shrink_hole(i915
, vm
, hole_start
, hole_end
, end_time
);
909 memset(&vm
->fault_attr
, 0, sizeof(vm
->fault_attr
));
914 static int shrink_boom(struct drm_i915_private
*i915
,
915 struct i915_address_space
*vm
,
916 u64 hole_start
, u64 hole_end
,
917 unsigned long end_time
)
919 unsigned int sizes
[] = { SZ_2M
, SZ_1G
};
920 struct drm_i915_gem_object
*purge
;
921 struct drm_i915_gem_object
*explode
;
926 * Catch the case which shrink_hole seems to miss. The setup here
927 * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
928 * ensuring that all vma assiocated with the respective pd/pdp are
929 * unpinned at the time.
932 for (i
= 0; i
< ARRAY_SIZE(sizes
); ++i
) {
933 unsigned int flags
= PIN_USER
| PIN_OFFSET_FIXED
;
934 unsigned int size
= sizes
[i
];
935 struct i915_vma
*vma
;
937 purge
= fake_dma_object(i915
, size
);
939 return PTR_ERR(purge
);
941 vma
= i915_vma_instance(purge
, vm
, NULL
);
947 err
= i915_vma_pin(vma
, 0, 0, flags
);
951 /* Should now be ripe for purging */
954 explode
= fake_dma_object(i915
, size
);
955 if (IS_ERR(explode
)) {
956 err
= PTR_ERR(explode
);
960 vm
->fault_attr
.probability
= 100;
961 vm
->fault_attr
.interval
= 1;
962 atomic_set(&vm
->fault_attr
.times
, -1);
964 vma
= i915_vma_instance(explode
, vm
, NULL
);
970 err
= i915_vma_pin(vma
, 0, 0, flags
| size
);
976 i915_gem_object_put(purge
);
977 i915_gem_object_put(explode
);
979 memset(&vm
->fault_attr
, 0, sizeof(vm
->fault_attr
));
980 cleanup_freed_objects(i915
);
986 i915_gem_object_put(explode
);
988 i915_gem_object_put(purge
);
989 memset(&vm
->fault_attr
, 0, sizeof(vm
->fault_attr
));
993 static int exercise_ppgtt(struct drm_i915_private
*dev_priv
,
994 int (*func
)(struct drm_i915_private
*i915
,
995 struct i915_address_space
*vm
,
996 u64 hole_start
, u64 hole_end
,
997 unsigned long end_time
))
999 struct drm_file
*file
;
1000 struct i915_hw_ppgtt
*ppgtt
;
1001 IGT_TIMEOUT(end_time
);
1004 if (!USES_FULL_PPGTT(dev_priv
))
1007 file
= mock_file(dev_priv
);
1009 return PTR_ERR(file
);
1011 mutex_lock(&dev_priv
->drm
.struct_mutex
);
1012 ppgtt
= i915_ppgtt_create(dev_priv
, file
->driver_priv
);
1013 if (IS_ERR(ppgtt
)) {
1014 err
= PTR_ERR(ppgtt
);
1017 GEM_BUG_ON(offset_in_page(ppgtt
->vm
.total
));
1018 GEM_BUG_ON(ppgtt
->vm
.closed
);
1020 err
= func(dev_priv
, &ppgtt
->vm
, 0, ppgtt
->vm
.total
, end_time
);
1022 i915_ppgtt_close(&ppgtt
->vm
);
1023 i915_ppgtt_put(ppgtt
);
1025 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
1027 mock_file_free(dev_priv
, file
);
1031 static int igt_ppgtt_fill(void *arg
)
1033 return exercise_ppgtt(arg
, fill_hole
);
1036 static int igt_ppgtt_walk(void *arg
)
1038 return exercise_ppgtt(arg
, walk_hole
);
1041 static int igt_ppgtt_pot(void *arg
)
1043 return exercise_ppgtt(arg
, pot_hole
);
1046 static int igt_ppgtt_drunk(void *arg
)
1048 return exercise_ppgtt(arg
, drunk_hole
);
1051 static int igt_ppgtt_lowlevel(void *arg
)
1053 return exercise_ppgtt(arg
, lowlevel_hole
);
1056 static int igt_ppgtt_shrink(void *arg
)
1058 return exercise_ppgtt(arg
, shrink_hole
);
1061 static int igt_ppgtt_shrink_boom(void *arg
)
1063 return exercise_ppgtt(arg
, shrink_boom
);
1066 static int sort_holes(void *priv
, struct list_head
*A
, struct list_head
*B
)
1068 struct drm_mm_node
*a
= list_entry(A
, typeof(*a
), hole_stack
);
1069 struct drm_mm_node
*b
= list_entry(B
, typeof(*b
), hole_stack
);
1071 if (a
->start
< b
->start
)
1077 static int exercise_ggtt(struct drm_i915_private
*i915
,
1078 int (*func
)(struct drm_i915_private
*i915
,
1079 struct i915_address_space
*vm
,
1080 u64 hole_start
, u64 hole_end
,
1081 unsigned long end_time
))
1083 struct i915_ggtt
*ggtt
= &i915
->ggtt
;
1084 u64 hole_start
, hole_end
, last
= 0;
1085 struct drm_mm_node
*node
;
1086 IGT_TIMEOUT(end_time
);
1089 mutex_lock(&i915
->drm
.struct_mutex
);
1091 list_sort(NULL
, &ggtt
->vm
.mm
.hole_stack
, sort_holes
);
1092 drm_mm_for_each_hole(node
, &ggtt
->vm
.mm
, hole_start
, hole_end
) {
1093 if (hole_start
< last
)
1096 if (ggtt
->vm
.mm
.color_adjust
)
1097 ggtt
->vm
.mm
.color_adjust(node
, 0,
1098 &hole_start
, &hole_end
);
1099 if (hole_start
>= hole_end
)
1102 err
= func(i915
, &ggtt
->vm
, hole_start
, hole_end
, end_time
);
1106 /* As we have manipulated the drm_mm, the list may be corrupt */
1110 mutex_unlock(&i915
->drm
.struct_mutex
);
1115 static int igt_ggtt_fill(void *arg
)
1117 return exercise_ggtt(arg
, fill_hole
);
1120 static int igt_ggtt_walk(void *arg
)
1122 return exercise_ggtt(arg
, walk_hole
);
1125 static int igt_ggtt_pot(void *arg
)
1127 return exercise_ggtt(arg
, pot_hole
);
1130 static int igt_ggtt_drunk(void *arg
)
1132 return exercise_ggtt(arg
, drunk_hole
);
1135 static int igt_ggtt_lowlevel(void *arg
)
1137 return exercise_ggtt(arg
, lowlevel_hole
);
1140 static int igt_ggtt_page(void *arg
)
1142 const unsigned int count
= PAGE_SIZE
/sizeof(u32
);
1143 I915_RND_STATE(prng
);
1144 struct drm_i915_private
*i915
= arg
;
1145 struct i915_ggtt
*ggtt
= &i915
->ggtt
;
1146 struct drm_i915_gem_object
*obj
;
1147 struct drm_mm_node tmp
;
1148 unsigned int *order
, n
;
1151 mutex_lock(&i915
->drm
.struct_mutex
);
1153 obj
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
1159 err
= i915_gem_object_pin_pages(obj
);
1163 memset(&tmp
, 0, sizeof(tmp
));
1164 err
= drm_mm_insert_node_in_range(&ggtt
->vm
.mm
, &tmp
,
1165 count
* PAGE_SIZE
, 0,
1166 I915_COLOR_UNEVICTABLE
,
1167 0, ggtt
->mappable_end
,
1172 intel_runtime_pm_get(i915
);
1174 for (n
= 0; n
< count
; n
++) {
1175 u64 offset
= tmp
.start
+ n
* PAGE_SIZE
;
1177 ggtt
->vm
.insert_page(&ggtt
->vm
,
1178 i915_gem_object_get_dma_address(obj
, 0),
1179 offset
, I915_CACHE_NONE
, 0);
1182 order
= i915_random_order(count
, &prng
);
1188 for (n
= 0; n
< count
; n
++) {
1189 u64 offset
= tmp
.start
+ order
[n
] * PAGE_SIZE
;
1192 vaddr
= io_mapping_map_atomic_wc(&ggtt
->iomap
, offset
);
1193 iowrite32(n
, vaddr
+ n
);
1194 io_mapping_unmap_atomic(vaddr
);
1196 i915_gem_flush_ggtt_writes(i915
);
1198 i915_random_reorder(order
, count
, &prng
);
1199 for (n
= 0; n
< count
; n
++) {
1200 u64 offset
= tmp
.start
+ order
[n
] * PAGE_SIZE
;
1204 vaddr
= io_mapping_map_atomic_wc(&ggtt
->iomap
, offset
);
1205 val
= ioread32(vaddr
+ n
);
1206 io_mapping_unmap_atomic(vaddr
);
1209 pr_err("insert page failed: found %d, expected %d\n",
1218 ggtt
->vm
.clear_range(&ggtt
->vm
, tmp
.start
, tmp
.size
);
1219 intel_runtime_pm_put(i915
);
1220 drm_mm_remove_node(&tmp
);
1222 i915_gem_object_unpin_pages(obj
);
1224 i915_gem_object_put(obj
);
1226 mutex_unlock(&i915
->drm
.struct_mutex
);
1230 static void track_vma_bind(struct i915_vma
*vma
)
1232 struct drm_i915_gem_object
*obj
= vma
->obj
;
1234 obj
->bind_count
++; /* track for eviction later */
1235 __i915_gem_object_pin_pages(obj
);
1237 vma
->pages
= obj
->mm
.pages
;
1238 list_move_tail(&vma
->vm_link
, &vma
->vm
->inactive_list
);
1241 static int exercise_mock(struct drm_i915_private
*i915
,
1242 int (*func
)(struct drm_i915_private
*i915
,
1243 struct i915_address_space
*vm
,
1244 u64 hole_start
, u64 hole_end
,
1245 unsigned long end_time
))
1247 const u64 limit
= totalram_pages
<< PAGE_SHIFT
;
1248 struct i915_gem_context
*ctx
;
1249 struct i915_hw_ppgtt
*ppgtt
;
1250 IGT_TIMEOUT(end_time
);
1253 ctx
= mock_context(i915
, "mock");
1260 err
= func(i915
, &ppgtt
->vm
, 0, min(ppgtt
->vm
.total
, limit
), end_time
);
1262 mock_context_close(ctx
);
1266 static int igt_mock_fill(void *arg
)
1268 return exercise_mock(arg
, fill_hole
);
1271 static int igt_mock_walk(void *arg
)
1273 return exercise_mock(arg
, walk_hole
);
1276 static int igt_mock_pot(void *arg
)
1278 return exercise_mock(arg
, pot_hole
);
1281 static int igt_mock_drunk(void *arg
)
1283 return exercise_mock(arg
, drunk_hole
);
1286 static int igt_gtt_reserve(void *arg
)
1288 struct drm_i915_private
*i915
= arg
;
1289 struct drm_i915_gem_object
*obj
, *on
;
1294 /* i915_gem_gtt_reserve() tries to reserve the precise range
1295 * for the node, and evicts if it has to. So our test checks that
1296 * it can give us the requsted space and prevent overlaps.
1299 /* Start by filling the GGTT */
1301 total
+ 2*I915_GTT_PAGE_SIZE
<= i915
->ggtt
.vm
.total
;
1302 total
+= 2*I915_GTT_PAGE_SIZE
) {
1303 struct i915_vma
*vma
;
1305 obj
= i915_gem_object_create_internal(i915
, 2*PAGE_SIZE
);
1311 err
= i915_gem_object_pin_pages(obj
);
1313 i915_gem_object_put(obj
);
1317 list_add(&obj
->st_link
, &objects
);
1319 vma
= i915_vma_instance(obj
, &i915
->ggtt
.vm
, NULL
);
1325 err
= i915_gem_gtt_reserve(&i915
->ggtt
.vm
, &vma
->node
,
1331 pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1332 total
, i915
->ggtt
.vm
.total
, err
);
1335 track_vma_bind(vma
);
1337 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1338 if (vma
->node
.start
!= total
||
1339 vma
->node
.size
!= 2*I915_GTT_PAGE_SIZE
) {
1340 pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1341 vma
->node
.start
, vma
->node
.size
,
1342 total
, 2*I915_GTT_PAGE_SIZE
);
1348 /* Now we start forcing evictions */
1349 for (total
= I915_GTT_PAGE_SIZE
;
1350 total
+ 2*I915_GTT_PAGE_SIZE
<= i915
->ggtt
.vm
.total
;
1351 total
+= 2*I915_GTT_PAGE_SIZE
) {
1352 struct i915_vma
*vma
;
1354 obj
= i915_gem_object_create_internal(i915
, 2*PAGE_SIZE
);
1360 err
= i915_gem_object_pin_pages(obj
);
1362 i915_gem_object_put(obj
);
1366 list_add(&obj
->st_link
, &objects
);
1368 vma
= i915_vma_instance(obj
, &i915
->ggtt
.vm
, NULL
);
1374 err
= i915_gem_gtt_reserve(&i915
->ggtt
.vm
, &vma
->node
,
1380 pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1381 total
, i915
->ggtt
.vm
.total
, err
);
1384 track_vma_bind(vma
);
1386 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1387 if (vma
->node
.start
!= total
||
1388 vma
->node
.size
!= 2*I915_GTT_PAGE_SIZE
) {
1389 pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1390 vma
->node
.start
, vma
->node
.size
,
1391 total
, 2*I915_GTT_PAGE_SIZE
);
1397 /* And then try at random */
1398 list_for_each_entry_safe(obj
, on
, &objects
, st_link
) {
1399 struct i915_vma
*vma
;
1402 vma
= i915_vma_instance(obj
, &i915
->ggtt
.vm
, NULL
);
1408 err
= i915_vma_unbind(vma
);
1410 pr_err("i915_vma_unbind failed with err=%d!\n", err
);
1414 offset
= random_offset(0, i915
->ggtt
.vm
.total
,
1415 2*I915_GTT_PAGE_SIZE
,
1416 I915_GTT_MIN_ALIGNMENT
);
1418 err
= i915_gem_gtt_reserve(&i915
->ggtt
.vm
, &vma
->node
,
1424 pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1425 total
, i915
->ggtt
.vm
.total
, err
);
1428 track_vma_bind(vma
);
1430 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1431 if (vma
->node
.start
!= offset
||
1432 vma
->node
.size
!= 2*I915_GTT_PAGE_SIZE
) {
1433 pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1434 vma
->node
.start
, vma
->node
.size
,
1435 offset
, 2*I915_GTT_PAGE_SIZE
);
1442 list_for_each_entry_safe(obj
, on
, &objects
, st_link
) {
1443 i915_gem_object_unpin_pages(obj
);
1444 i915_gem_object_put(obj
);
1449 static int igt_gtt_insert(void *arg
)
1451 struct drm_i915_private
*i915
= arg
;
1452 struct drm_i915_gem_object
*obj
, *on
;
1453 struct drm_mm_node tmp
= {};
1454 const struct invalid_insert
{
1458 } invalid_insert
[] = {
1460 i915
->ggtt
.vm
.total
+ I915_GTT_PAGE_SIZE
, 0,
1461 0, i915
->ggtt
.vm
.total
,
1464 2*I915_GTT_PAGE_SIZE
, 0,
1465 0, I915_GTT_PAGE_SIZE
,
1468 -(u64
)I915_GTT_PAGE_SIZE
, 0,
1469 0, 4*I915_GTT_PAGE_SIZE
,
1472 -(u64
)2*I915_GTT_PAGE_SIZE
, 2*I915_GTT_PAGE_SIZE
,
1473 0, 4*I915_GTT_PAGE_SIZE
,
1476 I915_GTT_PAGE_SIZE
, I915_GTT_MIN_ALIGNMENT
<< 1,
1477 I915_GTT_MIN_ALIGNMENT
, I915_GTT_MIN_ALIGNMENT
<< 1,
1485 /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1486 * to the node, evicting if required.
1489 /* Check a couple of obviously invalid requests */
1490 for (ii
= invalid_insert
; ii
->size
; ii
++) {
1491 err
= i915_gem_gtt_insert(&i915
->ggtt
.vm
, &tmp
,
1492 ii
->size
, ii
->alignment
,
1493 I915_COLOR_UNEVICTABLE
,
1496 if (err
!= -ENOSPC
) {
1497 pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1498 ii
->size
, ii
->alignment
, ii
->start
, ii
->end
,
1504 /* Start by filling the GGTT */
1506 total
+ I915_GTT_PAGE_SIZE
<= i915
->ggtt
.vm
.total
;
1507 total
+= I915_GTT_PAGE_SIZE
) {
1508 struct i915_vma
*vma
;
1510 obj
= i915_gem_object_create_internal(i915
, I915_GTT_PAGE_SIZE
);
1516 err
= i915_gem_object_pin_pages(obj
);
1518 i915_gem_object_put(obj
);
1522 list_add(&obj
->st_link
, &objects
);
1524 vma
= i915_vma_instance(obj
, &i915
->ggtt
.vm
, NULL
);
1530 err
= i915_gem_gtt_insert(&i915
->ggtt
.vm
, &vma
->node
,
1531 obj
->base
.size
, 0, obj
->cache_level
,
1532 0, i915
->ggtt
.vm
.total
,
1534 if (err
== -ENOSPC
) {
1535 /* maxed out the GGTT space */
1536 i915_gem_object_put(obj
);
1540 pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1541 total
, i915
->ggtt
.vm
.total
, err
);
1544 track_vma_bind(vma
);
1545 __i915_vma_pin(vma
);
1547 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1550 list_for_each_entry(obj
, &objects
, st_link
) {
1551 struct i915_vma
*vma
;
1553 vma
= i915_vma_instance(obj
, &i915
->ggtt
.vm
, NULL
);
1559 if (!drm_mm_node_allocated(&vma
->node
)) {
1560 pr_err("VMA was unexpectedly evicted!\n");
1565 __i915_vma_unpin(vma
);
1568 /* If we then reinsert, we should find the same hole */
1569 list_for_each_entry_safe(obj
, on
, &objects
, st_link
) {
1570 struct i915_vma
*vma
;
1573 vma
= i915_vma_instance(obj
, &i915
->ggtt
.vm
, NULL
);
1579 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1580 offset
= vma
->node
.start
;
1582 err
= i915_vma_unbind(vma
);
1584 pr_err("i915_vma_unbind failed with err=%d!\n", err
);
1588 err
= i915_gem_gtt_insert(&i915
->ggtt
.vm
, &vma
->node
,
1589 obj
->base
.size
, 0, obj
->cache_level
,
1590 0, i915
->ggtt
.vm
.total
,
1593 pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1594 total
, i915
->ggtt
.vm
.total
, err
);
1597 track_vma_bind(vma
);
1599 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1600 if (vma
->node
.start
!= offset
) {
1601 pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1602 offset
, vma
->node
.start
);
1608 /* And then force evictions */
1610 total
+ 2*I915_GTT_PAGE_SIZE
<= i915
->ggtt
.vm
.total
;
1611 total
+= 2*I915_GTT_PAGE_SIZE
) {
1612 struct i915_vma
*vma
;
1614 obj
= i915_gem_object_create_internal(i915
, 2*I915_GTT_PAGE_SIZE
);
1620 err
= i915_gem_object_pin_pages(obj
);
1622 i915_gem_object_put(obj
);
1626 list_add(&obj
->st_link
, &objects
);
1628 vma
= i915_vma_instance(obj
, &i915
->ggtt
.vm
, NULL
);
1634 err
= i915_gem_gtt_insert(&i915
->ggtt
.vm
, &vma
->node
,
1635 obj
->base
.size
, 0, obj
->cache_level
,
1636 0, i915
->ggtt
.vm
.total
,
1639 pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1640 total
, i915
->ggtt
.vm
.total
, err
);
1643 track_vma_bind(vma
);
1645 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1649 list_for_each_entry_safe(obj
, on
, &objects
, st_link
) {
1650 i915_gem_object_unpin_pages(obj
);
1651 i915_gem_object_put(obj
);
1656 int i915_gem_gtt_mock_selftests(void)
1658 static const struct i915_subtest tests
[] = {
1659 SUBTEST(igt_mock_drunk
),
1660 SUBTEST(igt_mock_walk
),
1661 SUBTEST(igt_mock_pot
),
1662 SUBTEST(igt_mock_fill
),
1663 SUBTEST(igt_gtt_reserve
),
1664 SUBTEST(igt_gtt_insert
),
1666 struct drm_i915_private
*i915
;
1669 i915
= mock_gem_device();
1673 mutex_lock(&i915
->drm
.struct_mutex
);
1674 err
= i915_subtests(tests
, i915
);
1675 mutex_unlock(&i915
->drm
.struct_mutex
);
1677 drm_dev_put(&i915
->drm
);
1681 int i915_gem_gtt_live_selftests(struct drm_i915_private
*i915
)
1683 static const struct i915_subtest tests
[] = {
1684 SUBTEST(igt_ppgtt_alloc
),
1685 SUBTEST(igt_ppgtt_lowlevel
),
1686 SUBTEST(igt_ppgtt_drunk
),
1687 SUBTEST(igt_ppgtt_walk
),
1688 SUBTEST(igt_ppgtt_pot
),
1689 SUBTEST(igt_ppgtt_fill
),
1690 SUBTEST(igt_ppgtt_shrink
),
1691 SUBTEST(igt_ppgtt_shrink_boom
),
1692 SUBTEST(igt_ggtt_lowlevel
),
1693 SUBTEST(igt_ggtt_drunk
),
1694 SUBTEST(igt_ggtt_walk
),
1695 SUBTEST(igt_ggtt_pot
),
1696 SUBTEST(igt_ggtt_fill
),
1697 SUBTEST(igt_ggtt_page
),
1700 GEM_BUG_ON(offset_in_page(i915
->ggtt
.vm
.total
));
1702 return i915_subtests(tests
, i915
);