2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
28 #include "../i915_selftest.h"
29 #include "i915_random.h"
31 #include "mock_context.h"
33 #include "mock_gem_device.h"
35 static void fake_free_pages(struct drm_i915_gem_object
*obj
,
36 struct sg_table
*pages
)
42 static int fake_get_pages(struct drm_i915_gem_object
*obj
)
44 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
45 #define PFN_BIAS 0x1000
46 struct sg_table
*pages
;
47 struct scatterlist
*sg
;
48 unsigned int sg_page_sizes
;
49 typeof(obj
->base
.size
) rem
;
51 pages
= kmalloc(sizeof(*pages
), GFP
);
55 rem
= round_up(obj
->base
.size
, BIT(31)) >> 31;
56 if (sg_alloc_table(pages
, rem
, GFP
)) {
63 for (sg
= pages
->sgl
; sg
; sg
= sg_next(sg
)) {
64 unsigned long len
= min_t(typeof(rem
), rem
, BIT(31));
67 sg_set_page(sg
, pfn_to_page(PFN_BIAS
), len
, 0);
68 sg_dma_address(sg
) = page_to_phys(sg_page(sg
));
76 obj
->mm
.madv
= I915_MADV_DONTNEED
;
78 __i915_gem_object_set_pages(obj
, pages
, sg_page_sizes
);
84 static void fake_put_pages(struct drm_i915_gem_object
*obj
,
85 struct sg_table
*pages
)
87 fake_free_pages(obj
, pages
);
88 obj
->mm
.dirty
= false;
89 obj
->mm
.madv
= I915_MADV_WILLNEED
;
92 static const struct drm_i915_gem_object_ops fake_ops
= {
93 .flags
= I915_GEM_OBJECT_IS_SHRINKABLE
,
94 .get_pages
= fake_get_pages
,
95 .put_pages
= fake_put_pages
,
98 static struct drm_i915_gem_object
*
99 fake_dma_object(struct drm_i915_private
*i915
, u64 size
)
101 struct drm_i915_gem_object
*obj
;
104 GEM_BUG_ON(!IS_ALIGNED(size
, I915_GTT_PAGE_SIZE
));
106 if (overflows_type(size
, obj
->base
.size
))
107 return ERR_PTR(-E2BIG
);
109 obj
= i915_gem_object_alloc(i915
);
113 drm_gem_private_object_init(&i915
->drm
, &obj
->base
, size
);
114 i915_gem_object_init(obj
, &fake_ops
);
116 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
117 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
118 obj
->cache_level
= I915_CACHE_NONE
;
120 /* Preallocate the "backing storage" */
121 if (i915_gem_object_pin_pages(obj
))
124 i915_gem_object_unpin_pages(obj
);
128 i915_gem_object_put(obj
);
130 return ERR_PTR(-ENOMEM
);
133 static int igt_ppgtt_alloc(void *arg
)
135 struct drm_i915_private
*dev_priv
= arg
;
136 struct i915_hw_ppgtt
*ppgtt
;
140 /* Allocate a ppggt and try to fill the entire range */
142 if (!USES_PPGTT(dev_priv
))
145 ppgtt
= kzalloc(sizeof(*ppgtt
), GFP_KERNEL
);
149 mutex_lock(&dev_priv
->drm
.struct_mutex
);
150 err
= __hw_ppgtt_init(ppgtt
, dev_priv
);
154 if (!ppgtt
->base
.allocate_va_range
)
155 goto err_ppgtt_cleanup
;
157 /* Check we can allocate the entire range */
159 size
<= ppgtt
->base
.total
;
161 err
= ppgtt
->base
.allocate_va_range(&ppgtt
->base
, 0, size
);
163 if (err
== -ENOMEM
) {
164 pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
166 err
= 0; /* virtual space too large! */
168 goto err_ppgtt_cleanup
;
171 ppgtt
->base
.clear_range(&ppgtt
->base
, 0, size
);
174 /* Check we can incrementally allocate the entire range */
175 for (last
= 0, size
= 4096;
176 size
<= ppgtt
->base
.total
;
177 last
= size
, size
<<= 2) {
178 err
= ppgtt
->base
.allocate_va_range(&ppgtt
->base
,
181 if (err
== -ENOMEM
) {
182 pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
183 last
, size
- last
, ilog2(size
));
184 err
= 0; /* virtual space too large! */
186 goto err_ppgtt_cleanup
;
191 ppgtt
->base
.cleanup(&ppgtt
->base
);
193 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
198 static int lowlevel_hole(struct drm_i915_private
*i915
,
199 struct i915_address_space
*vm
,
200 u64 hole_start
, u64 hole_end
,
201 unsigned long end_time
)
203 I915_RND_STATE(seed_prng
);
205 struct i915_vma mock_vma
;
207 memset(&mock_vma
, 0, sizeof(struct i915_vma
));
209 /* Keep creating larger objects until one cannot fit into the hole */
210 for (size
= 12; (hole_end
- hole_start
) >> size
; size
++) {
211 I915_RND_SUBSTATE(prng
, seed_prng
);
212 struct drm_i915_gem_object
*obj
;
213 unsigned int *order
, count
, n
;
216 hole_size
= (hole_end
- hole_start
) >> size
;
217 if (hole_size
> KMALLOC_MAX_SIZE
/ sizeof(u32
))
218 hole_size
= KMALLOC_MAX_SIZE
/ sizeof(u32
);
219 count
= hole_size
>> 1;
221 pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
222 __func__
, hole_start
, hole_end
, size
, hole_size
);
227 order
= i915_random_order(count
, &prng
);
230 } while (count
>>= 1);
235 GEM_BUG_ON(count
* BIT_ULL(size
) > vm
->total
);
236 GEM_BUG_ON(hole_start
+ count
* BIT_ULL(size
) > hole_end
);
238 /* Ignore allocation failures (i.e. don't report them as
239 * a test failure) as we are purposefully allocating very
240 * large objects without checking that we have sufficient
241 * memory. We expect to hit -ENOMEM.
244 obj
= fake_dma_object(i915
, BIT_ULL(size
));
250 GEM_BUG_ON(obj
->base
.size
!= BIT_ULL(size
));
252 if (i915_gem_object_pin_pages(obj
)) {
253 i915_gem_object_put(obj
);
258 for (n
= 0; n
< count
; n
++) {
259 u64 addr
= hole_start
+ order
[n
] * BIT_ULL(size
);
261 GEM_BUG_ON(addr
+ BIT_ULL(size
) > vm
->total
);
263 if (igt_timeout(end_time
,
264 "%s timed out before %d/%d\n",
265 __func__
, n
, count
)) {
266 hole_end
= hole_start
; /* quit */
270 if (vm
->allocate_va_range
&&
271 vm
->allocate_va_range(vm
, addr
, BIT_ULL(size
)))
274 mock_vma
.pages
= obj
->mm
.pages
;
275 mock_vma
.node
.size
= BIT_ULL(size
);
276 mock_vma
.node
.start
= addr
;
278 intel_runtime_pm_get(i915
);
279 vm
->insert_entries(vm
, &mock_vma
, I915_CACHE_NONE
, 0);
280 intel_runtime_pm_put(i915
);
284 i915_random_reorder(order
, count
, &prng
);
285 for (n
= 0; n
< count
; n
++) {
286 u64 addr
= hole_start
+ order
[n
] * BIT_ULL(size
);
288 GEM_BUG_ON(addr
+ BIT_ULL(size
) > vm
->total
);
289 vm
->clear_range(vm
, addr
, BIT_ULL(size
));
292 i915_gem_object_unpin_pages(obj
);
293 i915_gem_object_put(obj
);
301 static void close_object_list(struct list_head
*objects
,
302 struct i915_address_space
*vm
)
304 struct drm_i915_gem_object
*obj
, *on
;
307 list_for_each_entry_safe(obj
, on
, objects
, st_link
) {
308 struct i915_vma
*vma
;
310 vma
= i915_vma_instance(obj
, vm
, NULL
);
312 ignored
= i915_vma_unbind(vma
);
313 /* Only ppgtt vma may be closed before the object is freed */
314 if (!IS_ERR(vma
) && !i915_vma_is_ggtt(vma
))
317 list_del(&obj
->st_link
);
318 i915_gem_object_put(obj
);
322 static int fill_hole(struct drm_i915_private
*i915
,
323 struct i915_address_space
*vm
,
324 u64 hole_start
, u64 hole_end
,
325 unsigned long end_time
)
327 const u64 hole_size
= hole_end
- hole_start
;
328 struct drm_i915_gem_object
*obj
;
329 const unsigned long max_pages
=
330 min_t(u64
, ULONG_MAX
- 1, hole_size
/2 >> PAGE_SHIFT
);
331 const unsigned long max_step
= max(int_sqrt(max_pages
), 2UL);
332 unsigned long npages
, prime
, flags
;
333 struct i915_vma
*vma
;
337 /* Try binding many VMA working inwards from either edge */
339 flags
= PIN_OFFSET_FIXED
| PIN_USER
;
340 if (i915_is_ggtt(vm
))
343 for_each_prime_number_from(prime
, 2, max_step
) {
344 for (npages
= 1; npages
<= max_pages
; npages
*= prime
) {
345 const u64 full_size
= npages
<< PAGE_SHIFT
;
351 { "top-down", hole_end
, -1, },
352 { "bottom-up", hole_start
, 1, },
356 obj
= fake_dma_object(i915
, full_size
);
360 list_add(&obj
->st_link
, &objects
);
362 /* Align differing sized objects against the edges, and
363 * check we don't walk off into the void when binding
366 for (p
= phases
; p
->name
; p
++) {
370 list_for_each_entry(obj
, &objects
, st_link
) {
371 vma
= i915_vma_instance(obj
, vm
, NULL
);
376 if (offset
< hole_start
+ obj
->base
.size
)
378 offset
-= obj
->base
.size
;
381 err
= i915_vma_pin(vma
, 0, 0, offset
| flags
);
383 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
384 __func__
, p
->name
, err
, npages
, prime
, offset
);
388 if (!drm_mm_node_allocated(&vma
->node
) ||
389 i915_vma_misplaced(vma
, 0, 0, offset
| flags
)) {
390 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
391 __func__
, p
->name
, vma
->node
.start
, vma
->node
.size
, drm_mm_node_allocated(&vma
->node
),
400 if (offset
+ obj
->base
.size
> hole_end
)
402 offset
+= obj
->base
.size
;
407 list_for_each_entry(obj
, &objects
, st_link
) {
408 vma
= i915_vma_instance(obj
, vm
, NULL
);
413 if (offset
< hole_start
+ obj
->base
.size
)
415 offset
-= obj
->base
.size
;
418 if (!drm_mm_node_allocated(&vma
->node
) ||
419 i915_vma_misplaced(vma
, 0, 0, offset
| flags
)) {
420 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
421 __func__
, p
->name
, vma
->node
.start
, vma
->node
.size
,
427 err
= i915_vma_unbind(vma
);
429 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
430 __func__
, p
->name
, vma
->node
.start
, vma
->node
.size
,
436 if (offset
+ obj
->base
.size
> hole_end
)
438 offset
+= obj
->base
.size
;
443 list_for_each_entry_reverse(obj
, &objects
, st_link
) {
444 vma
= i915_vma_instance(obj
, vm
, NULL
);
449 if (offset
< hole_start
+ obj
->base
.size
)
451 offset
-= obj
->base
.size
;
454 err
= i915_vma_pin(vma
, 0, 0, offset
| flags
);
456 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
457 __func__
, p
->name
, err
, npages
, prime
, offset
);
461 if (!drm_mm_node_allocated(&vma
->node
) ||
462 i915_vma_misplaced(vma
, 0, 0, offset
| flags
)) {
463 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
464 __func__
, p
->name
, vma
->node
.start
, vma
->node
.size
, drm_mm_node_allocated(&vma
->node
),
473 if (offset
+ obj
->base
.size
> hole_end
)
475 offset
+= obj
->base
.size
;
480 list_for_each_entry_reverse(obj
, &objects
, st_link
) {
481 vma
= i915_vma_instance(obj
, vm
, NULL
);
486 if (offset
< hole_start
+ obj
->base
.size
)
488 offset
-= obj
->base
.size
;
491 if (!drm_mm_node_allocated(&vma
->node
) ||
492 i915_vma_misplaced(vma
, 0, 0, offset
| flags
)) {
493 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
494 __func__
, p
->name
, vma
->node
.start
, vma
->node
.size
, drm_mm_node_allocated(&vma
->node
),
500 err
= i915_vma_unbind(vma
);
502 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
503 __func__
, p
->name
, vma
->node
.start
, vma
->node
.size
,
509 if (offset
+ obj
->base
.size
> hole_end
)
511 offset
+= obj
->base
.size
;
516 if (igt_timeout(end_time
, "%s timed out (npages=%lu, prime=%lu)\n",
517 __func__
, npages
, prime
)) {
523 close_object_list(&objects
, vm
);
529 close_object_list(&objects
, vm
);
533 static int walk_hole(struct drm_i915_private
*i915
,
534 struct i915_address_space
*vm
,
535 u64 hole_start
, u64 hole_end
,
536 unsigned long end_time
)
538 const u64 hole_size
= hole_end
- hole_start
;
539 const unsigned long max_pages
=
540 min_t(u64
, ULONG_MAX
- 1, hole_size
>> PAGE_SHIFT
);
544 /* Try binding a single VMA in different positions within the hole */
546 flags
= PIN_OFFSET_FIXED
| PIN_USER
;
547 if (i915_is_ggtt(vm
))
550 for_each_prime_number_from(size
, 1, max_pages
) {
551 struct drm_i915_gem_object
*obj
;
552 struct i915_vma
*vma
;
556 obj
= fake_dma_object(i915
, size
<< PAGE_SHIFT
);
560 vma
= i915_vma_instance(obj
, vm
, NULL
);
566 for (addr
= hole_start
;
567 addr
+ obj
->base
.size
< hole_end
;
568 addr
+= obj
->base
.size
) {
569 err
= i915_vma_pin(vma
, 0, 0, addr
| flags
);
571 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
572 __func__
, addr
, vma
->size
,
573 hole_start
, hole_end
, err
);
578 if (!drm_mm_node_allocated(&vma
->node
) ||
579 i915_vma_misplaced(vma
, 0, 0, addr
| flags
)) {
580 pr_err("%s incorrect at %llx + %llx\n",
581 __func__
, addr
, vma
->size
);
586 err
= i915_vma_unbind(vma
);
588 pr_err("%s unbind failed at %llx + %llx with err=%d\n",
589 __func__
, addr
, vma
->size
, err
);
593 GEM_BUG_ON(drm_mm_node_allocated(&vma
->node
));
595 if (igt_timeout(end_time
,
596 "%s timed out at %llx\n",
604 if (!i915_vma_is_ggtt(vma
))
607 i915_gem_object_put(obj
);
615 static int pot_hole(struct drm_i915_private
*i915
,
616 struct i915_address_space
*vm
,
617 u64 hole_start
, u64 hole_end
,
618 unsigned long end_time
)
620 struct drm_i915_gem_object
*obj
;
621 struct i915_vma
*vma
;
626 flags
= PIN_OFFSET_FIXED
| PIN_USER
;
627 if (i915_is_ggtt(vm
))
630 obj
= i915_gem_object_create_internal(i915
, 2 * I915_GTT_PAGE_SIZE
);
634 vma
= i915_vma_instance(obj
, vm
, NULL
);
640 /* Insert a pair of pages across every pot boundary within the hole */
641 for (pot
= fls64(hole_end
- 1) - 1;
642 pot
> ilog2(2 * I915_GTT_PAGE_SIZE
);
644 u64 step
= BIT_ULL(pot
);
647 for (addr
= round_up(hole_start
+ I915_GTT_PAGE_SIZE
, step
) - I915_GTT_PAGE_SIZE
;
648 addr
<= round_down(hole_end
- 2*I915_GTT_PAGE_SIZE
, step
) - I915_GTT_PAGE_SIZE
;
650 err
= i915_vma_pin(vma
, 0, 0, addr
| flags
);
652 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
655 hole_start
, hole_end
,
660 if (!drm_mm_node_allocated(&vma
->node
) ||
661 i915_vma_misplaced(vma
, 0, 0, addr
| flags
)) {
662 pr_err("%s incorrect at %llx + %llx\n",
663 __func__
, addr
, vma
->size
);
665 err
= i915_vma_unbind(vma
);
671 err
= i915_vma_unbind(vma
);
675 if (igt_timeout(end_time
,
676 "%s timed out after %d/%d\n",
677 __func__
, pot
, fls64(hole_end
- 1) - 1)) {
684 if (!i915_vma_is_ggtt(vma
))
687 i915_gem_object_put(obj
);
691 static int drunk_hole(struct drm_i915_private
*i915
,
692 struct i915_address_space
*vm
,
693 u64 hole_start
, u64 hole_end
,
694 unsigned long end_time
)
696 I915_RND_STATE(prng
);
700 flags
= PIN_OFFSET_FIXED
| PIN_USER
;
701 if (i915_is_ggtt(vm
))
704 /* Keep creating larger objects until one cannot fit into the hole */
705 for (size
= 12; (hole_end
- hole_start
) >> size
; size
++) {
706 struct drm_i915_gem_object
*obj
;
707 unsigned int *order
, count
, n
;
708 struct i915_vma
*vma
;
712 hole_size
= (hole_end
- hole_start
) >> size
;
713 if (hole_size
> KMALLOC_MAX_SIZE
/ sizeof(u32
))
714 hole_size
= KMALLOC_MAX_SIZE
/ sizeof(u32
);
715 count
= hole_size
>> 1;
717 pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
718 __func__
, hole_start
, hole_end
, size
, hole_size
);
723 order
= i915_random_order(count
, &prng
);
726 } while (count
>>= 1);
731 /* Ignore allocation failures (i.e. don't report them as
732 * a test failure) as we are purposefully allocating very
733 * large objects without checking that we have sufficient
734 * memory. We expect to hit -ENOMEM.
737 obj
= fake_dma_object(i915
, BIT_ULL(size
));
743 vma
= i915_vma_instance(obj
, vm
, NULL
);
749 GEM_BUG_ON(vma
->size
!= BIT_ULL(size
));
751 for (n
= 0; n
< count
; n
++) {
752 u64 addr
= hole_start
+ order
[n
] * BIT_ULL(size
);
754 err
= i915_vma_pin(vma
, 0, 0, addr
| flags
);
756 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
759 hole_start
, hole_end
,
764 if (!drm_mm_node_allocated(&vma
->node
) ||
765 i915_vma_misplaced(vma
, 0, 0, addr
| flags
)) {
766 pr_err("%s incorrect at %llx + %llx\n",
767 __func__
, addr
, BIT_ULL(size
));
769 err
= i915_vma_unbind(vma
);
775 err
= i915_vma_unbind(vma
);
778 if (igt_timeout(end_time
,
779 "%s timed out after %d/%d\n",
780 __func__
, n
, count
)) {
787 if (!i915_vma_is_ggtt(vma
))
790 i915_gem_object_put(obj
);
799 static int __shrink_hole(struct drm_i915_private
*i915
,
800 struct i915_address_space
*vm
,
801 u64 hole_start
, u64 hole_end
,
802 unsigned long end_time
)
804 struct drm_i915_gem_object
*obj
;
805 unsigned long flags
= PIN_OFFSET_FIXED
| PIN_USER
;
806 unsigned int order
= 12;
811 /* Keep creating larger objects until one cannot fit into the hole */
812 for (addr
= hole_start
; addr
< hole_end
; ) {
813 struct i915_vma
*vma
;
814 u64 size
= BIT_ULL(order
++);
816 size
= min(size
, hole_end
- addr
);
817 obj
= fake_dma_object(i915
, size
);
823 list_add(&obj
->st_link
, &objects
);
825 vma
= i915_vma_instance(obj
, vm
, NULL
);
831 GEM_BUG_ON(vma
->size
!= size
);
833 err
= i915_vma_pin(vma
, 0, 0, addr
| flags
);
835 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
836 __func__
, addr
, size
, hole_start
, hole_end
, err
);
840 if (!drm_mm_node_allocated(&vma
->node
) ||
841 i915_vma_misplaced(vma
, 0, 0, addr
| flags
)) {
842 pr_err("%s incorrect at %llx + %llx\n",
843 __func__
, addr
, size
);
845 err
= i915_vma_unbind(vma
);
853 if (igt_timeout(end_time
,
854 "%s timed out at ofset %llx [%llx - %llx]\n",
855 __func__
, addr
, hole_start
, hole_end
)) {
861 close_object_list(&objects
, vm
);
865 static int shrink_hole(struct drm_i915_private
*i915
,
866 struct i915_address_space
*vm
,
867 u64 hole_start
, u64 hole_end
,
868 unsigned long end_time
)
873 vm
->fault_attr
.probability
= 999;
874 atomic_set(&vm
->fault_attr
.times
, -1);
876 for_each_prime_number_from(prime
, 0, ULONG_MAX
- 1) {
877 vm
->fault_attr
.interval
= prime
;
878 err
= __shrink_hole(i915
, vm
, hole_start
, hole_end
, end_time
);
883 memset(&vm
->fault_attr
, 0, sizeof(vm
->fault_attr
));
888 static int exercise_ppgtt(struct drm_i915_private
*dev_priv
,
889 int (*func
)(struct drm_i915_private
*i915
,
890 struct i915_address_space
*vm
,
891 u64 hole_start
, u64 hole_end
,
892 unsigned long end_time
))
894 struct drm_file
*file
;
895 struct i915_hw_ppgtt
*ppgtt
;
896 IGT_TIMEOUT(end_time
);
899 if (!USES_FULL_PPGTT(dev_priv
))
902 file
= mock_file(dev_priv
);
904 return PTR_ERR(file
);
906 mutex_lock(&dev_priv
->drm
.struct_mutex
);
907 ppgtt
= i915_ppgtt_create(dev_priv
, file
->driver_priv
, "mock");
909 err
= PTR_ERR(ppgtt
);
912 GEM_BUG_ON(offset_in_page(ppgtt
->base
.total
));
913 GEM_BUG_ON(ppgtt
->base
.closed
);
915 err
= func(dev_priv
, &ppgtt
->base
, 0, ppgtt
->base
.total
, end_time
);
917 i915_ppgtt_close(&ppgtt
->base
);
918 i915_ppgtt_put(ppgtt
);
920 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
922 mock_file_free(dev_priv
, file
);
926 static int igt_ppgtt_fill(void *arg
)
928 return exercise_ppgtt(arg
, fill_hole
);
931 static int igt_ppgtt_walk(void *arg
)
933 return exercise_ppgtt(arg
, walk_hole
);
936 static int igt_ppgtt_pot(void *arg
)
938 return exercise_ppgtt(arg
, pot_hole
);
941 static int igt_ppgtt_drunk(void *arg
)
943 return exercise_ppgtt(arg
, drunk_hole
);
946 static int igt_ppgtt_lowlevel(void *arg
)
948 return exercise_ppgtt(arg
, lowlevel_hole
);
951 static int igt_ppgtt_shrink(void *arg
)
953 return exercise_ppgtt(arg
, shrink_hole
);
956 static int sort_holes(void *priv
, struct list_head
*A
, struct list_head
*B
)
958 struct drm_mm_node
*a
= list_entry(A
, typeof(*a
), hole_stack
);
959 struct drm_mm_node
*b
= list_entry(B
, typeof(*b
), hole_stack
);
961 if (a
->start
< b
->start
)
967 static int exercise_ggtt(struct drm_i915_private
*i915
,
968 int (*func
)(struct drm_i915_private
*i915
,
969 struct i915_address_space
*vm
,
970 u64 hole_start
, u64 hole_end
,
971 unsigned long end_time
))
973 struct i915_ggtt
*ggtt
= &i915
->ggtt
;
974 u64 hole_start
, hole_end
, last
= 0;
975 struct drm_mm_node
*node
;
976 IGT_TIMEOUT(end_time
);
979 mutex_lock(&i915
->drm
.struct_mutex
);
981 list_sort(NULL
, &ggtt
->base
.mm
.hole_stack
, sort_holes
);
982 drm_mm_for_each_hole(node
, &ggtt
->base
.mm
, hole_start
, hole_end
) {
983 if (hole_start
< last
)
986 if (ggtt
->base
.mm
.color_adjust
)
987 ggtt
->base
.mm
.color_adjust(node
, 0,
988 &hole_start
, &hole_end
);
989 if (hole_start
>= hole_end
)
992 err
= func(i915
, &ggtt
->base
, hole_start
, hole_end
, end_time
);
996 /* As we have manipulated the drm_mm, the list may be corrupt */
1000 mutex_unlock(&i915
->drm
.struct_mutex
);
1005 static int igt_ggtt_fill(void *arg
)
1007 return exercise_ggtt(arg
, fill_hole
);
1010 static int igt_ggtt_walk(void *arg
)
1012 return exercise_ggtt(arg
, walk_hole
);
1015 static int igt_ggtt_pot(void *arg
)
1017 return exercise_ggtt(arg
, pot_hole
);
1020 static int igt_ggtt_drunk(void *arg
)
1022 return exercise_ggtt(arg
, drunk_hole
);
1025 static int igt_ggtt_lowlevel(void *arg
)
1027 return exercise_ggtt(arg
, lowlevel_hole
);
1030 static int igt_ggtt_page(void *arg
)
1032 const unsigned int count
= PAGE_SIZE
/sizeof(u32
);
1033 I915_RND_STATE(prng
);
1034 struct drm_i915_private
*i915
= arg
;
1035 struct i915_ggtt
*ggtt
= &i915
->ggtt
;
1036 struct drm_i915_gem_object
*obj
;
1037 struct drm_mm_node tmp
;
1038 unsigned int *order
, n
;
1041 mutex_lock(&i915
->drm
.struct_mutex
);
1043 obj
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
1049 err
= i915_gem_object_pin_pages(obj
);
1053 memset(&tmp
, 0, sizeof(tmp
));
1054 err
= drm_mm_insert_node_in_range(&ggtt
->base
.mm
, &tmp
,
1055 1024 * PAGE_SIZE
, 0,
1056 I915_COLOR_UNEVICTABLE
,
1057 0, ggtt
->mappable_end
,
1062 order
= i915_random_order(count
, &prng
);
1068 intel_runtime_pm_get(i915
);
1069 for (n
= 0; n
< count
; n
++) {
1070 u64 offset
= tmp
.start
+ order
[n
] * PAGE_SIZE
;
1073 ggtt
->base
.insert_page(&ggtt
->base
,
1074 i915_gem_object_get_dma_address(obj
, 0),
1075 offset
, I915_CACHE_NONE
, 0);
1077 vaddr
= io_mapping_map_atomic_wc(&ggtt
->iomap
, offset
);
1078 iowrite32(n
, vaddr
+ n
);
1079 io_mapping_unmap_atomic(vaddr
);
1082 ggtt
->base
.clear_range(&ggtt
->base
, offset
, PAGE_SIZE
);
1085 i915_random_reorder(order
, count
, &prng
);
1086 for (n
= 0; n
< count
; n
++) {
1087 u64 offset
= tmp
.start
+ order
[n
] * PAGE_SIZE
;
1091 ggtt
->base
.insert_page(&ggtt
->base
,
1092 i915_gem_object_get_dma_address(obj
, 0),
1093 offset
, I915_CACHE_NONE
, 0);
1095 vaddr
= io_mapping_map_atomic_wc(&ggtt
->iomap
, offset
);
1096 val
= ioread32(vaddr
+ n
);
1097 io_mapping_unmap_atomic(vaddr
);
1099 ggtt
->base
.clear_range(&ggtt
->base
, offset
, PAGE_SIZE
);
1102 pr_err("insert page failed: found %d, expected %d\n",
1108 intel_runtime_pm_put(i915
);
1112 drm_mm_remove_node(&tmp
);
1114 i915_gem_object_unpin_pages(obj
);
1116 i915_gem_object_put(obj
);
1118 mutex_unlock(&i915
->drm
.struct_mutex
);
1122 static void track_vma_bind(struct i915_vma
*vma
)
1124 struct drm_i915_gem_object
*obj
= vma
->obj
;
1126 obj
->bind_count
++; /* track for eviction later */
1127 __i915_gem_object_pin_pages(obj
);
1129 vma
->pages
= obj
->mm
.pages
;
1130 list_move_tail(&vma
->vm_link
, &vma
->vm
->inactive_list
);
1133 static int exercise_mock(struct drm_i915_private
*i915
,
1134 int (*func
)(struct drm_i915_private
*i915
,
1135 struct i915_address_space
*vm
,
1136 u64 hole_start
, u64 hole_end
,
1137 unsigned long end_time
))
1139 struct i915_gem_context
*ctx
;
1140 struct i915_hw_ppgtt
*ppgtt
;
1141 IGT_TIMEOUT(end_time
);
1144 ctx
= mock_context(i915
, "mock");
1151 err
= func(i915
, &ppgtt
->base
, 0, ppgtt
->base
.total
, end_time
);
1153 mock_context_close(ctx
);
1157 static int igt_mock_fill(void *arg
)
1159 return exercise_mock(arg
, fill_hole
);
1162 static int igt_mock_walk(void *arg
)
1164 return exercise_mock(arg
, walk_hole
);
1167 static int igt_mock_pot(void *arg
)
1169 return exercise_mock(arg
, pot_hole
);
1172 static int igt_mock_drunk(void *arg
)
1174 return exercise_mock(arg
, drunk_hole
);
1177 static int igt_gtt_reserve(void *arg
)
1179 struct drm_i915_private
*i915
= arg
;
1180 struct drm_i915_gem_object
*obj
, *on
;
1185 /* i915_gem_gtt_reserve() tries to reserve the precise range
1186 * for the node, and evicts if it has to. So our test checks that
1187 * it can give us the requsted space and prevent overlaps.
1190 /* Start by filling the GGTT */
1192 total
+ 2*I915_GTT_PAGE_SIZE
<= i915
->ggtt
.base
.total
;
1193 total
+= 2*I915_GTT_PAGE_SIZE
) {
1194 struct i915_vma
*vma
;
1196 obj
= i915_gem_object_create_internal(i915
, 2*PAGE_SIZE
);
1202 err
= i915_gem_object_pin_pages(obj
);
1204 i915_gem_object_put(obj
);
1208 list_add(&obj
->st_link
, &objects
);
1210 vma
= i915_vma_instance(obj
, &i915
->ggtt
.base
, NULL
);
1216 err
= i915_gem_gtt_reserve(&i915
->ggtt
.base
, &vma
->node
,
1222 pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1223 total
, i915
->ggtt
.base
.total
, err
);
1226 track_vma_bind(vma
);
1228 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1229 if (vma
->node
.start
!= total
||
1230 vma
->node
.size
!= 2*I915_GTT_PAGE_SIZE
) {
1231 pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1232 vma
->node
.start
, vma
->node
.size
,
1233 total
, 2*I915_GTT_PAGE_SIZE
);
1239 /* Now we start forcing evictions */
1240 for (total
= I915_GTT_PAGE_SIZE
;
1241 total
+ 2*I915_GTT_PAGE_SIZE
<= i915
->ggtt
.base
.total
;
1242 total
+= 2*I915_GTT_PAGE_SIZE
) {
1243 struct i915_vma
*vma
;
1245 obj
= i915_gem_object_create_internal(i915
, 2*PAGE_SIZE
);
1251 err
= i915_gem_object_pin_pages(obj
);
1253 i915_gem_object_put(obj
);
1257 list_add(&obj
->st_link
, &objects
);
1259 vma
= i915_vma_instance(obj
, &i915
->ggtt
.base
, NULL
);
1265 err
= i915_gem_gtt_reserve(&i915
->ggtt
.base
, &vma
->node
,
1271 pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1272 total
, i915
->ggtt
.base
.total
, err
);
1275 track_vma_bind(vma
);
1277 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1278 if (vma
->node
.start
!= total
||
1279 vma
->node
.size
!= 2*I915_GTT_PAGE_SIZE
) {
1280 pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1281 vma
->node
.start
, vma
->node
.size
,
1282 total
, 2*I915_GTT_PAGE_SIZE
);
1288 /* And then try at random */
1289 list_for_each_entry_safe(obj
, on
, &objects
, st_link
) {
1290 struct i915_vma
*vma
;
1293 vma
= i915_vma_instance(obj
, &i915
->ggtt
.base
, NULL
);
1299 err
= i915_vma_unbind(vma
);
1301 pr_err("i915_vma_unbind failed with err=%d!\n", err
);
1305 offset
= random_offset(0, i915
->ggtt
.base
.total
,
1306 2*I915_GTT_PAGE_SIZE
,
1307 I915_GTT_MIN_ALIGNMENT
);
1309 err
= i915_gem_gtt_reserve(&i915
->ggtt
.base
, &vma
->node
,
1315 pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1316 total
, i915
->ggtt
.base
.total
, err
);
1319 track_vma_bind(vma
);
1321 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1322 if (vma
->node
.start
!= offset
||
1323 vma
->node
.size
!= 2*I915_GTT_PAGE_SIZE
) {
1324 pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1325 vma
->node
.start
, vma
->node
.size
,
1326 offset
, 2*I915_GTT_PAGE_SIZE
);
1333 list_for_each_entry_safe(obj
, on
, &objects
, st_link
) {
1334 i915_gem_object_unpin_pages(obj
);
1335 i915_gem_object_put(obj
);
1340 static int igt_gtt_insert(void *arg
)
1342 struct drm_i915_private
*i915
= arg
;
1343 struct drm_i915_gem_object
*obj
, *on
;
1344 struct drm_mm_node tmp
= {};
1345 const struct invalid_insert
{
1349 } invalid_insert
[] = {
1351 i915
->ggtt
.base
.total
+ I915_GTT_PAGE_SIZE
, 0,
1352 0, i915
->ggtt
.base
.total
,
1355 2*I915_GTT_PAGE_SIZE
, 0,
1356 0, I915_GTT_PAGE_SIZE
,
1359 -(u64
)I915_GTT_PAGE_SIZE
, 0,
1360 0, 4*I915_GTT_PAGE_SIZE
,
1363 -(u64
)2*I915_GTT_PAGE_SIZE
, 2*I915_GTT_PAGE_SIZE
,
1364 0, 4*I915_GTT_PAGE_SIZE
,
1367 I915_GTT_PAGE_SIZE
, I915_GTT_MIN_ALIGNMENT
<< 1,
1368 I915_GTT_MIN_ALIGNMENT
, I915_GTT_MIN_ALIGNMENT
<< 1,
1376 /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1377 * to the node, evicting if required.
1380 /* Check a couple of obviously invalid requests */
1381 for (ii
= invalid_insert
; ii
->size
; ii
++) {
1382 err
= i915_gem_gtt_insert(&i915
->ggtt
.base
, &tmp
,
1383 ii
->size
, ii
->alignment
,
1384 I915_COLOR_UNEVICTABLE
,
1387 if (err
!= -ENOSPC
) {
1388 pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1389 ii
->size
, ii
->alignment
, ii
->start
, ii
->end
,
1395 /* Start by filling the GGTT */
1397 total
+ I915_GTT_PAGE_SIZE
<= i915
->ggtt
.base
.total
;
1398 total
+= I915_GTT_PAGE_SIZE
) {
1399 struct i915_vma
*vma
;
1401 obj
= i915_gem_object_create_internal(i915
, I915_GTT_PAGE_SIZE
);
1407 err
= i915_gem_object_pin_pages(obj
);
1409 i915_gem_object_put(obj
);
1413 list_add(&obj
->st_link
, &objects
);
1415 vma
= i915_vma_instance(obj
, &i915
->ggtt
.base
, NULL
);
1421 err
= i915_gem_gtt_insert(&i915
->ggtt
.base
, &vma
->node
,
1422 obj
->base
.size
, 0, obj
->cache_level
,
1423 0, i915
->ggtt
.base
.total
,
1425 if (err
== -ENOSPC
) {
1426 /* maxed out the GGTT space */
1427 i915_gem_object_put(obj
);
1431 pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1432 total
, i915
->ggtt
.base
.total
, err
);
1435 track_vma_bind(vma
);
1436 __i915_vma_pin(vma
);
1438 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1441 list_for_each_entry(obj
, &objects
, st_link
) {
1442 struct i915_vma
*vma
;
1444 vma
= i915_vma_instance(obj
, &i915
->ggtt
.base
, NULL
);
1450 if (!drm_mm_node_allocated(&vma
->node
)) {
1451 pr_err("VMA was unexpectedly evicted!\n");
1456 __i915_vma_unpin(vma
);
1459 /* If we then reinsert, we should find the same hole */
1460 list_for_each_entry_safe(obj
, on
, &objects
, st_link
) {
1461 struct i915_vma
*vma
;
1464 vma
= i915_vma_instance(obj
, &i915
->ggtt
.base
, NULL
);
1470 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1471 offset
= vma
->node
.start
;
1473 err
= i915_vma_unbind(vma
);
1475 pr_err("i915_vma_unbind failed with err=%d!\n", err
);
1479 err
= i915_gem_gtt_insert(&i915
->ggtt
.base
, &vma
->node
,
1480 obj
->base
.size
, 0, obj
->cache_level
,
1481 0, i915
->ggtt
.base
.total
,
1484 pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1485 total
, i915
->ggtt
.base
.total
, err
);
1488 track_vma_bind(vma
);
1490 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1491 if (vma
->node
.start
!= offset
) {
1492 pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1493 offset
, vma
->node
.start
);
1499 /* And then force evictions */
1501 total
+ 2*I915_GTT_PAGE_SIZE
<= i915
->ggtt
.base
.total
;
1502 total
+= 2*I915_GTT_PAGE_SIZE
) {
1503 struct i915_vma
*vma
;
1505 obj
= i915_gem_object_create_internal(i915
, 2*I915_GTT_PAGE_SIZE
);
1511 err
= i915_gem_object_pin_pages(obj
);
1513 i915_gem_object_put(obj
);
1517 list_add(&obj
->st_link
, &objects
);
1519 vma
= i915_vma_instance(obj
, &i915
->ggtt
.base
, NULL
);
1525 err
= i915_gem_gtt_insert(&i915
->ggtt
.base
, &vma
->node
,
1526 obj
->base
.size
, 0, obj
->cache_level
,
1527 0, i915
->ggtt
.base
.total
,
1530 pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1531 total
, i915
->ggtt
.base
.total
, err
);
1534 track_vma_bind(vma
);
1536 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1540 list_for_each_entry_safe(obj
, on
, &objects
, st_link
) {
1541 i915_gem_object_unpin_pages(obj
);
1542 i915_gem_object_put(obj
);
1547 int i915_gem_gtt_mock_selftests(void)
1549 static const struct i915_subtest tests
[] = {
1550 SUBTEST(igt_mock_drunk
),
1551 SUBTEST(igt_mock_walk
),
1552 SUBTEST(igt_mock_pot
),
1553 SUBTEST(igt_mock_fill
),
1554 SUBTEST(igt_gtt_reserve
),
1555 SUBTEST(igt_gtt_insert
),
1557 struct drm_i915_private
*i915
;
1560 i915
= mock_gem_device();
1564 mutex_lock(&i915
->drm
.struct_mutex
);
1565 err
= i915_subtests(tests
, i915
);
1566 mutex_unlock(&i915
->drm
.struct_mutex
);
1568 drm_dev_unref(&i915
->drm
);
1572 int i915_gem_gtt_live_selftests(struct drm_i915_private
*i915
)
1574 static const struct i915_subtest tests
[] = {
1575 SUBTEST(igt_ppgtt_alloc
),
1576 SUBTEST(igt_ppgtt_lowlevel
),
1577 SUBTEST(igt_ppgtt_drunk
),
1578 SUBTEST(igt_ppgtt_walk
),
1579 SUBTEST(igt_ppgtt_pot
),
1580 SUBTEST(igt_ppgtt_fill
),
1581 SUBTEST(igt_ppgtt_shrink
),
1582 SUBTEST(igt_ggtt_lowlevel
),
1583 SUBTEST(igt_ggtt_drunk
),
1584 SUBTEST(igt_ggtt_walk
),
1585 SUBTEST(igt_ggtt_pot
),
1586 SUBTEST(igt_ggtt_fill
),
1587 SUBTEST(igt_ggtt_page
),
1590 GEM_BUG_ON(offset_in_page(i915
->ggtt
.base
.total
));
1592 return i915_subtests(tests
, i915
);