Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cris-mirror.git] / drivers / gpu / drm / i915 / selftests / i915_gem_gtt.c
blob4a28d713a7d8236fa2095b39977c1e727a859ad6
1 /*
2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
28 #include "../i915_selftest.h"
29 #include "i915_random.h"
31 #include "mock_context.h"
32 #include "mock_drm.h"
33 #include "mock_gem_device.h"
35 static void fake_free_pages(struct drm_i915_gem_object *obj,
36 struct sg_table *pages)
38 sg_free_table(pages);
39 kfree(pages);
42 static int fake_get_pages(struct drm_i915_gem_object *obj)
44 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
45 #define PFN_BIAS 0x1000
46 struct sg_table *pages;
47 struct scatterlist *sg;
48 unsigned int sg_page_sizes;
49 typeof(obj->base.size) rem;
51 pages = kmalloc(sizeof(*pages), GFP);
52 if (!pages)
53 return -ENOMEM;
55 rem = round_up(obj->base.size, BIT(31)) >> 31;
56 if (sg_alloc_table(pages, rem, GFP)) {
57 kfree(pages);
58 return -ENOMEM;
61 sg_page_sizes = 0;
62 rem = obj->base.size;
63 for (sg = pages->sgl; sg; sg = sg_next(sg)) {
64 unsigned long len = min_t(typeof(rem), rem, BIT(31));
66 GEM_BUG_ON(!len);
67 sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
68 sg_dma_address(sg) = page_to_phys(sg_page(sg));
69 sg_dma_len(sg) = len;
70 sg_page_sizes |= len;
72 rem -= len;
74 GEM_BUG_ON(rem);
76 obj->mm.madv = I915_MADV_DONTNEED;
78 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
80 return 0;
81 #undef GFP
84 static void fake_put_pages(struct drm_i915_gem_object *obj,
85 struct sg_table *pages)
87 fake_free_pages(obj, pages);
88 obj->mm.dirty = false;
89 obj->mm.madv = I915_MADV_WILLNEED;
92 static const struct drm_i915_gem_object_ops fake_ops = {
93 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
94 .get_pages = fake_get_pages,
95 .put_pages = fake_put_pages,
98 static struct drm_i915_gem_object *
99 fake_dma_object(struct drm_i915_private *i915, u64 size)
101 struct drm_i915_gem_object *obj;
103 GEM_BUG_ON(!size);
104 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
106 if (overflows_type(size, obj->base.size))
107 return ERR_PTR(-E2BIG);
109 obj = i915_gem_object_alloc(i915);
110 if (!obj)
111 goto err;
113 drm_gem_private_object_init(&i915->drm, &obj->base, size);
114 i915_gem_object_init(obj, &fake_ops);
116 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
117 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
118 obj->cache_level = I915_CACHE_NONE;
120 /* Preallocate the "backing storage" */
121 if (i915_gem_object_pin_pages(obj))
122 goto err_obj;
124 i915_gem_object_unpin_pages(obj);
125 return obj;
127 err_obj:
128 i915_gem_object_put(obj);
129 err:
130 return ERR_PTR(-ENOMEM);
133 static int igt_ppgtt_alloc(void *arg)
135 struct drm_i915_private *dev_priv = arg;
136 struct i915_hw_ppgtt *ppgtt;
137 u64 size, last;
138 int err;
140 /* Allocate a ppggt and try to fill the entire range */
142 if (!USES_PPGTT(dev_priv))
143 return 0;
145 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
146 if (!ppgtt)
147 return -ENOMEM;
149 mutex_lock(&dev_priv->drm.struct_mutex);
150 err = __hw_ppgtt_init(ppgtt, dev_priv);
151 if (err)
152 goto err_ppgtt;
154 if (!ppgtt->base.allocate_va_range)
155 goto err_ppgtt_cleanup;
157 /* Check we can allocate the entire range */
158 for (size = 4096;
159 size <= ppgtt->base.total;
160 size <<= 2) {
161 err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
162 if (err) {
163 if (err == -ENOMEM) {
164 pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
165 size, ilog2(size));
166 err = 0; /* virtual space too large! */
168 goto err_ppgtt_cleanup;
171 ppgtt->base.clear_range(&ppgtt->base, 0, size);
174 /* Check we can incrementally allocate the entire range */
175 for (last = 0, size = 4096;
176 size <= ppgtt->base.total;
177 last = size, size <<= 2) {
178 err = ppgtt->base.allocate_va_range(&ppgtt->base,
179 last, size - last);
180 if (err) {
181 if (err == -ENOMEM) {
182 pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
183 last, size - last, ilog2(size));
184 err = 0; /* virtual space too large! */
186 goto err_ppgtt_cleanup;
190 err_ppgtt_cleanup:
191 ppgtt->base.cleanup(&ppgtt->base);
192 err_ppgtt:
193 mutex_unlock(&dev_priv->drm.struct_mutex);
194 kfree(ppgtt);
195 return err;
198 static int lowlevel_hole(struct drm_i915_private *i915,
199 struct i915_address_space *vm,
200 u64 hole_start, u64 hole_end,
201 unsigned long end_time)
203 I915_RND_STATE(seed_prng);
204 unsigned int size;
205 struct i915_vma mock_vma;
207 memset(&mock_vma, 0, sizeof(struct i915_vma));
209 /* Keep creating larger objects until one cannot fit into the hole */
210 for (size = 12; (hole_end - hole_start) >> size; size++) {
211 I915_RND_SUBSTATE(prng, seed_prng);
212 struct drm_i915_gem_object *obj;
213 unsigned int *order, count, n;
214 u64 hole_size;
216 hole_size = (hole_end - hole_start) >> size;
217 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
218 hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
219 count = hole_size >> 1;
220 if (!count) {
221 pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
222 __func__, hole_start, hole_end, size, hole_size);
223 break;
226 do {
227 order = i915_random_order(count, &prng);
228 if (order)
229 break;
230 } while (count >>= 1);
231 if (!count)
232 return -ENOMEM;
233 GEM_BUG_ON(!order);
235 GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
236 GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
238 /* Ignore allocation failures (i.e. don't report them as
239 * a test failure) as we are purposefully allocating very
240 * large objects without checking that we have sufficient
241 * memory. We expect to hit -ENOMEM.
244 obj = fake_dma_object(i915, BIT_ULL(size));
245 if (IS_ERR(obj)) {
246 kfree(order);
247 break;
250 GEM_BUG_ON(obj->base.size != BIT_ULL(size));
252 if (i915_gem_object_pin_pages(obj)) {
253 i915_gem_object_put(obj);
254 kfree(order);
255 break;
258 for (n = 0; n < count; n++) {
259 u64 addr = hole_start + order[n] * BIT_ULL(size);
261 GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
263 if (igt_timeout(end_time,
264 "%s timed out before %d/%d\n",
265 __func__, n, count)) {
266 hole_end = hole_start; /* quit */
267 break;
270 if (vm->allocate_va_range &&
271 vm->allocate_va_range(vm, addr, BIT_ULL(size)))
272 break;
274 mock_vma.pages = obj->mm.pages;
275 mock_vma.node.size = BIT_ULL(size);
276 mock_vma.node.start = addr;
278 intel_runtime_pm_get(i915);
279 vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
280 intel_runtime_pm_put(i915);
282 count = n;
284 i915_random_reorder(order, count, &prng);
285 for (n = 0; n < count; n++) {
286 u64 addr = hole_start + order[n] * BIT_ULL(size);
288 GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
289 vm->clear_range(vm, addr, BIT_ULL(size));
292 i915_gem_object_unpin_pages(obj);
293 i915_gem_object_put(obj);
295 kfree(order);
298 return 0;
301 static void close_object_list(struct list_head *objects,
302 struct i915_address_space *vm)
304 struct drm_i915_gem_object *obj, *on;
305 int ignored;
307 list_for_each_entry_safe(obj, on, objects, st_link) {
308 struct i915_vma *vma;
310 vma = i915_vma_instance(obj, vm, NULL);
311 if (!IS_ERR(vma))
312 ignored = i915_vma_unbind(vma);
313 /* Only ppgtt vma may be closed before the object is freed */
314 if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
315 i915_vma_close(vma);
317 list_del(&obj->st_link);
318 i915_gem_object_put(obj);
322 static int fill_hole(struct drm_i915_private *i915,
323 struct i915_address_space *vm,
324 u64 hole_start, u64 hole_end,
325 unsigned long end_time)
327 const u64 hole_size = hole_end - hole_start;
328 struct drm_i915_gem_object *obj;
329 const unsigned long max_pages =
330 min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
331 const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
332 unsigned long npages, prime, flags;
333 struct i915_vma *vma;
334 LIST_HEAD(objects);
335 int err;
337 /* Try binding many VMA working inwards from either edge */
339 flags = PIN_OFFSET_FIXED | PIN_USER;
340 if (i915_is_ggtt(vm))
341 flags |= PIN_GLOBAL;
343 for_each_prime_number_from(prime, 2, max_step) {
344 for (npages = 1; npages <= max_pages; npages *= prime) {
345 const u64 full_size = npages << PAGE_SHIFT;
346 const struct {
347 const char *name;
348 u64 offset;
349 int step;
350 } phases[] = {
351 { "top-down", hole_end, -1, },
352 { "bottom-up", hole_start, 1, },
354 }, *p;
356 obj = fake_dma_object(i915, full_size);
357 if (IS_ERR(obj))
358 break;
360 list_add(&obj->st_link, &objects);
362 /* Align differing sized objects against the edges, and
363 * check we don't walk off into the void when binding
364 * them into the GTT.
366 for (p = phases; p->name; p++) {
367 u64 offset;
369 offset = p->offset;
370 list_for_each_entry(obj, &objects, st_link) {
371 vma = i915_vma_instance(obj, vm, NULL);
372 if (IS_ERR(vma))
373 continue;
375 if (p->step < 0) {
376 if (offset < hole_start + obj->base.size)
377 break;
378 offset -= obj->base.size;
381 err = i915_vma_pin(vma, 0, 0, offset | flags);
382 if (err) {
383 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
384 __func__, p->name, err, npages, prime, offset);
385 goto err;
388 if (!drm_mm_node_allocated(&vma->node) ||
389 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
390 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
391 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
392 offset);
393 err = -EINVAL;
394 goto err;
397 i915_vma_unpin(vma);
399 if (p->step > 0) {
400 if (offset + obj->base.size > hole_end)
401 break;
402 offset += obj->base.size;
406 offset = p->offset;
407 list_for_each_entry(obj, &objects, st_link) {
408 vma = i915_vma_instance(obj, vm, NULL);
409 if (IS_ERR(vma))
410 continue;
412 if (p->step < 0) {
413 if (offset < hole_start + obj->base.size)
414 break;
415 offset -= obj->base.size;
418 if (!drm_mm_node_allocated(&vma->node) ||
419 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
420 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
421 __func__, p->name, vma->node.start, vma->node.size,
422 offset);
423 err = -EINVAL;
424 goto err;
427 err = i915_vma_unbind(vma);
428 if (err) {
429 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
430 __func__, p->name, vma->node.start, vma->node.size,
431 err);
432 goto err;
435 if (p->step > 0) {
436 if (offset + obj->base.size > hole_end)
437 break;
438 offset += obj->base.size;
442 offset = p->offset;
443 list_for_each_entry_reverse(obj, &objects, st_link) {
444 vma = i915_vma_instance(obj, vm, NULL);
445 if (IS_ERR(vma))
446 continue;
448 if (p->step < 0) {
449 if (offset < hole_start + obj->base.size)
450 break;
451 offset -= obj->base.size;
454 err = i915_vma_pin(vma, 0, 0, offset | flags);
455 if (err) {
456 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
457 __func__, p->name, err, npages, prime, offset);
458 goto err;
461 if (!drm_mm_node_allocated(&vma->node) ||
462 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
463 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
464 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
465 offset);
466 err = -EINVAL;
467 goto err;
470 i915_vma_unpin(vma);
472 if (p->step > 0) {
473 if (offset + obj->base.size > hole_end)
474 break;
475 offset += obj->base.size;
479 offset = p->offset;
480 list_for_each_entry_reverse(obj, &objects, st_link) {
481 vma = i915_vma_instance(obj, vm, NULL);
482 if (IS_ERR(vma))
483 continue;
485 if (p->step < 0) {
486 if (offset < hole_start + obj->base.size)
487 break;
488 offset -= obj->base.size;
491 if (!drm_mm_node_allocated(&vma->node) ||
492 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
493 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
494 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
495 offset);
496 err = -EINVAL;
497 goto err;
500 err = i915_vma_unbind(vma);
501 if (err) {
502 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
503 __func__, p->name, vma->node.start, vma->node.size,
504 err);
505 goto err;
508 if (p->step > 0) {
509 if (offset + obj->base.size > hole_end)
510 break;
511 offset += obj->base.size;
516 if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
517 __func__, npages, prime)) {
518 err = -EINTR;
519 goto err;
523 close_object_list(&objects, vm);
526 return 0;
528 err:
529 close_object_list(&objects, vm);
530 return err;
533 static int walk_hole(struct drm_i915_private *i915,
534 struct i915_address_space *vm,
535 u64 hole_start, u64 hole_end,
536 unsigned long end_time)
538 const u64 hole_size = hole_end - hole_start;
539 const unsigned long max_pages =
540 min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
541 unsigned long flags;
542 u64 size;
544 /* Try binding a single VMA in different positions within the hole */
546 flags = PIN_OFFSET_FIXED | PIN_USER;
547 if (i915_is_ggtt(vm))
548 flags |= PIN_GLOBAL;
550 for_each_prime_number_from(size, 1, max_pages) {
551 struct drm_i915_gem_object *obj;
552 struct i915_vma *vma;
553 u64 addr;
554 int err = 0;
556 obj = fake_dma_object(i915, size << PAGE_SHIFT);
557 if (IS_ERR(obj))
558 break;
560 vma = i915_vma_instance(obj, vm, NULL);
561 if (IS_ERR(vma)) {
562 err = PTR_ERR(vma);
563 goto err_put;
566 for (addr = hole_start;
567 addr + obj->base.size < hole_end;
568 addr += obj->base.size) {
569 err = i915_vma_pin(vma, 0, 0, addr | flags);
570 if (err) {
571 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
572 __func__, addr, vma->size,
573 hole_start, hole_end, err);
574 goto err_close;
576 i915_vma_unpin(vma);
578 if (!drm_mm_node_allocated(&vma->node) ||
579 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
580 pr_err("%s incorrect at %llx + %llx\n",
581 __func__, addr, vma->size);
582 err = -EINVAL;
583 goto err_close;
586 err = i915_vma_unbind(vma);
587 if (err) {
588 pr_err("%s unbind failed at %llx + %llx with err=%d\n",
589 __func__, addr, vma->size, err);
590 goto err_close;
593 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
595 if (igt_timeout(end_time,
596 "%s timed out at %llx\n",
597 __func__, addr)) {
598 err = -EINTR;
599 goto err_close;
603 err_close:
604 if (!i915_vma_is_ggtt(vma))
605 i915_vma_close(vma);
606 err_put:
607 i915_gem_object_put(obj);
608 if (err)
609 return err;
612 return 0;
615 static int pot_hole(struct drm_i915_private *i915,
616 struct i915_address_space *vm,
617 u64 hole_start, u64 hole_end,
618 unsigned long end_time)
620 struct drm_i915_gem_object *obj;
621 struct i915_vma *vma;
622 unsigned long flags;
623 unsigned int pot;
624 int err = 0;
626 flags = PIN_OFFSET_FIXED | PIN_USER;
627 if (i915_is_ggtt(vm))
628 flags |= PIN_GLOBAL;
630 obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
631 if (IS_ERR(obj))
632 return PTR_ERR(obj);
634 vma = i915_vma_instance(obj, vm, NULL);
635 if (IS_ERR(vma)) {
636 err = PTR_ERR(vma);
637 goto err_obj;
640 /* Insert a pair of pages across every pot boundary within the hole */
641 for (pot = fls64(hole_end - 1) - 1;
642 pot > ilog2(2 * I915_GTT_PAGE_SIZE);
643 pot--) {
644 u64 step = BIT_ULL(pot);
645 u64 addr;
647 for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
648 addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
649 addr += step) {
650 err = i915_vma_pin(vma, 0, 0, addr | flags);
651 if (err) {
652 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
653 __func__,
654 addr,
655 hole_start, hole_end,
656 err);
657 goto err;
660 if (!drm_mm_node_allocated(&vma->node) ||
661 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
662 pr_err("%s incorrect at %llx + %llx\n",
663 __func__, addr, vma->size);
664 i915_vma_unpin(vma);
665 err = i915_vma_unbind(vma);
666 err = -EINVAL;
667 goto err;
670 i915_vma_unpin(vma);
671 err = i915_vma_unbind(vma);
672 GEM_BUG_ON(err);
675 if (igt_timeout(end_time,
676 "%s timed out after %d/%d\n",
677 __func__, pot, fls64(hole_end - 1) - 1)) {
678 err = -EINTR;
679 goto err;
683 err:
684 if (!i915_vma_is_ggtt(vma))
685 i915_vma_close(vma);
686 err_obj:
687 i915_gem_object_put(obj);
688 return err;
691 static int drunk_hole(struct drm_i915_private *i915,
692 struct i915_address_space *vm,
693 u64 hole_start, u64 hole_end,
694 unsigned long end_time)
696 I915_RND_STATE(prng);
697 unsigned int size;
698 unsigned long flags;
700 flags = PIN_OFFSET_FIXED | PIN_USER;
701 if (i915_is_ggtt(vm))
702 flags |= PIN_GLOBAL;
704 /* Keep creating larger objects until one cannot fit into the hole */
705 for (size = 12; (hole_end - hole_start) >> size; size++) {
706 struct drm_i915_gem_object *obj;
707 unsigned int *order, count, n;
708 struct i915_vma *vma;
709 u64 hole_size;
710 int err = -ENODEV;
712 hole_size = (hole_end - hole_start) >> size;
713 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
714 hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
715 count = hole_size >> 1;
716 if (!count) {
717 pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
718 __func__, hole_start, hole_end, size, hole_size);
719 break;
722 do {
723 order = i915_random_order(count, &prng);
724 if (order)
725 break;
726 } while (count >>= 1);
727 if (!count)
728 return -ENOMEM;
729 GEM_BUG_ON(!order);
731 /* Ignore allocation failures (i.e. don't report them as
732 * a test failure) as we are purposefully allocating very
733 * large objects without checking that we have sufficient
734 * memory. We expect to hit -ENOMEM.
737 obj = fake_dma_object(i915, BIT_ULL(size));
738 if (IS_ERR(obj)) {
739 kfree(order);
740 break;
743 vma = i915_vma_instance(obj, vm, NULL);
744 if (IS_ERR(vma)) {
745 err = PTR_ERR(vma);
746 goto err_obj;
749 GEM_BUG_ON(vma->size != BIT_ULL(size));
751 for (n = 0; n < count; n++) {
752 u64 addr = hole_start + order[n] * BIT_ULL(size);
754 err = i915_vma_pin(vma, 0, 0, addr | flags);
755 if (err) {
756 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
757 __func__,
758 addr, BIT_ULL(size),
759 hole_start, hole_end,
760 err);
761 goto err;
764 if (!drm_mm_node_allocated(&vma->node) ||
765 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
766 pr_err("%s incorrect at %llx + %llx\n",
767 __func__, addr, BIT_ULL(size));
768 i915_vma_unpin(vma);
769 err = i915_vma_unbind(vma);
770 err = -EINVAL;
771 goto err;
774 i915_vma_unpin(vma);
775 err = i915_vma_unbind(vma);
776 GEM_BUG_ON(err);
778 if (igt_timeout(end_time,
779 "%s timed out after %d/%d\n",
780 __func__, n, count)) {
781 err = -EINTR;
782 goto err;
786 err:
787 if (!i915_vma_is_ggtt(vma))
788 i915_vma_close(vma);
789 err_obj:
790 i915_gem_object_put(obj);
791 kfree(order);
792 if (err)
793 return err;
796 return 0;
799 static int __shrink_hole(struct drm_i915_private *i915,
800 struct i915_address_space *vm,
801 u64 hole_start, u64 hole_end,
802 unsigned long end_time)
804 struct drm_i915_gem_object *obj;
805 unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
806 unsigned int order = 12;
807 LIST_HEAD(objects);
808 int err = 0;
809 u64 addr;
811 /* Keep creating larger objects until one cannot fit into the hole */
812 for (addr = hole_start; addr < hole_end; ) {
813 struct i915_vma *vma;
814 u64 size = BIT_ULL(order++);
816 size = min(size, hole_end - addr);
817 obj = fake_dma_object(i915, size);
818 if (IS_ERR(obj)) {
819 err = PTR_ERR(obj);
820 break;
823 list_add(&obj->st_link, &objects);
825 vma = i915_vma_instance(obj, vm, NULL);
826 if (IS_ERR(vma)) {
827 err = PTR_ERR(vma);
828 break;
831 GEM_BUG_ON(vma->size != size);
833 err = i915_vma_pin(vma, 0, 0, addr | flags);
834 if (err) {
835 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
836 __func__, addr, size, hole_start, hole_end, err);
837 break;
840 if (!drm_mm_node_allocated(&vma->node) ||
841 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
842 pr_err("%s incorrect at %llx + %llx\n",
843 __func__, addr, size);
844 i915_vma_unpin(vma);
845 err = i915_vma_unbind(vma);
846 err = -EINVAL;
847 break;
850 i915_vma_unpin(vma);
851 addr += size;
853 if (igt_timeout(end_time,
854 "%s timed out at ofset %llx [%llx - %llx]\n",
855 __func__, addr, hole_start, hole_end)) {
856 err = -EINTR;
857 break;
861 close_object_list(&objects, vm);
862 return err;
865 static int shrink_hole(struct drm_i915_private *i915,
866 struct i915_address_space *vm,
867 u64 hole_start, u64 hole_end,
868 unsigned long end_time)
870 unsigned long prime;
871 int err;
873 vm->fault_attr.probability = 999;
874 atomic_set(&vm->fault_attr.times, -1);
876 for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
877 vm->fault_attr.interval = prime;
878 err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
879 if (err)
880 break;
883 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
885 return err;
888 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
889 int (*func)(struct drm_i915_private *i915,
890 struct i915_address_space *vm,
891 u64 hole_start, u64 hole_end,
892 unsigned long end_time))
894 struct drm_file *file;
895 struct i915_hw_ppgtt *ppgtt;
896 IGT_TIMEOUT(end_time);
897 int err;
899 if (!USES_FULL_PPGTT(dev_priv))
900 return 0;
902 file = mock_file(dev_priv);
903 if (IS_ERR(file))
904 return PTR_ERR(file);
906 mutex_lock(&dev_priv->drm.struct_mutex);
907 ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
908 if (IS_ERR(ppgtt)) {
909 err = PTR_ERR(ppgtt);
910 goto out_unlock;
912 GEM_BUG_ON(offset_in_page(ppgtt->base.total));
913 GEM_BUG_ON(ppgtt->base.closed);
915 err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
917 i915_ppgtt_close(&ppgtt->base);
918 i915_ppgtt_put(ppgtt);
919 out_unlock:
920 mutex_unlock(&dev_priv->drm.struct_mutex);
922 mock_file_free(dev_priv, file);
923 return err;
926 static int igt_ppgtt_fill(void *arg)
928 return exercise_ppgtt(arg, fill_hole);
931 static int igt_ppgtt_walk(void *arg)
933 return exercise_ppgtt(arg, walk_hole);
936 static int igt_ppgtt_pot(void *arg)
938 return exercise_ppgtt(arg, pot_hole);
941 static int igt_ppgtt_drunk(void *arg)
943 return exercise_ppgtt(arg, drunk_hole);
946 static int igt_ppgtt_lowlevel(void *arg)
948 return exercise_ppgtt(arg, lowlevel_hole);
951 static int igt_ppgtt_shrink(void *arg)
953 return exercise_ppgtt(arg, shrink_hole);
956 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
958 struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
959 struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
961 if (a->start < b->start)
962 return -1;
963 else
964 return 1;
967 static int exercise_ggtt(struct drm_i915_private *i915,
968 int (*func)(struct drm_i915_private *i915,
969 struct i915_address_space *vm,
970 u64 hole_start, u64 hole_end,
971 unsigned long end_time))
973 struct i915_ggtt *ggtt = &i915->ggtt;
974 u64 hole_start, hole_end, last = 0;
975 struct drm_mm_node *node;
976 IGT_TIMEOUT(end_time);
977 int err = 0;
979 mutex_lock(&i915->drm.struct_mutex);
980 restart:
981 list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
982 drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
983 if (hole_start < last)
984 continue;
986 if (ggtt->base.mm.color_adjust)
987 ggtt->base.mm.color_adjust(node, 0,
988 &hole_start, &hole_end);
989 if (hole_start >= hole_end)
990 continue;
992 err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
993 if (err)
994 break;
996 /* As we have manipulated the drm_mm, the list may be corrupt */
997 last = hole_end;
998 goto restart;
1000 mutex_unlock(&i915->drm.struct_mutex);
1002 return err;
1005 static int igt_ggtt_fill(void *arg)
1007 return exercise_ggtt(arg, fill_hole);
1010 static int igt_ggtt_walk(void *arg)
1012 return exercise_ggtt(arg, walk_hole);
1015 static int igt_ggtt_pot(void *arg)
1017 return exercise_ggtt(arg, pot_hole);
1020 static int igt_ggtt_drunk(void *arg)
1022 return exercise_ggtt(arg, drunk_hole);
1025 static int igt_ggtt_lowlevel(void *arg)
1027 return exercise_ggtt(arg, lowlevel_hole);
1030 static int igt_ggtt_page(void *arg)
1032 const unsigned int count = PAGE_SIZE/sizeof(u32);
1033 I915_RND_STATE(prng);
1034 struct drm_i915_private *i915 = arg;
1035 struct i915_ggtt *ggtt = &i915->ggtt;
1036 struct drm_i915_gem_object *obj;
1037 struct drm_mm_node tmp;
1038 unsigned int *order, n;
1039 int err;
1041 mutex_lock(&i915->drm.struct_mutex);
1043 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1044 if (IS_ERR(obj)) {
1045 err = PTR_ERR(obj);
1046 goto out_unlock;
1049 err = i915_gem_object_pin_pages(obj);
1050 if (err)
1051 goto out_free;
1053 memset(&tmp, 0, sizeof(tmp));
1054 err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
1055 1024 * PAGE_SIZE, 0,
1056 I915_COLOR_UNEVICTABLE,
1057 0, ggtt->mappable_end,
1058 DRM_MM_INSERT_LOW);
1059 if (err)
1060 goto out_unpin;
1062 order = i915_random_order(count, &prng);
1063 if (!order) {
1064 err = -ENOMEM;
1065 goto out_remove;
1068 intel_runtime_pm_get(i915);
1069 for (n = 0; n < count; n++) {
1070 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1071 u32 __iomem *vaddr;
1073 ggtt->base.insert_page(&ggtt->base,
1074 i915_gem_object_get_dma_address(obj, 0),
1075 offset, I915_CACHE_NONE, 0);
1077 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1078 iowrite32(n, vaddr + n);
1079 io_mapping_unmap_atomic(vaddr);
1081 wmb();
1082 ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
1085 i915_random_reorder(order, count, &prng);
1086 for (n = 0; n < count; n++) {
1087 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1088 u32 __iomem *vaddr;
1089 u32 val;
1091 ggtt->base.insert_page(&ggtt->base,
1092 i915_gem_object_get_dma_address(obj, 0),
1093 offset, I915_CACHE_NONE, 0);
1095 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1096 val = ioread32(vaddr + n);
1097 io_mapping_unmap_atomic(vaddr);
1099 ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
1101 if (val != n) {
1102 pr_err("insert page failed: found %d, expected %d\n",
1103 val, n);
1104 err = -EINVAL;
1105 break;
1108 intel_runtime_pm_put(i915);
1110 kfree(order);
1111 out_remove:
1112 drm_mm_remove_node(&tmp);
1113 out_unpin:
1114 i915_gem_object_unpin_pages(obj);
1115 out_free:
1116 i915_gem_object_put(obj);
1117 out_unlock:
1118 mutex_unlock(&i915->drm.struct_mutex);
1119 return err;
1122 static void track_vma_bind(struct i915_vma *vma)
1124 struct drm_i915_gem_object *obj = vma->obj;
1126 obj->bind_count++; /* track for eviction later */
1127 __i915_gem_object_pin_pages(obj);
1129 vma->pages = obj->mm.pages;
1130 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1133 static int exercise_mock(struct drm_i915_private *i915,
1134 int (*func)(struct drm_i915_private *i915,
1135 struct i915_address_space *vm,
1136 u64 hole_start, u64 hole_end,
1137 unsigned long end_time))
1139 struct i915_gem_context *ctx;
1140 struct i915_hw_ppgtt *ppgtt;
1141 IGT_TIMEOUT(end_time);
1142 int err;
1144 ctx = mock_context(i915, "mock");
1145 if (!ctx)
1146 return -ENOMEM;
1148 ppgtt = ctx->ppgtt;
1149 GEM_BUG_ON(!ppgtt);
1151 err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
1153 mock_context_close(ctx);
1154 return err;
1157 static int igt_mock_fill(void *arg)
1159 return exercise_mock(arg, fill_hole);
1162 static int igt_mock_walk(void *arg)
1164 return exercise_mock(arg, walk_hole);
1167 static int igt_mock_pot(void *arg)
1169 return exercise_mock(arg, pot_hole);
1172 static int igt_mock_drunk(void *arg)
1174 return exercise_mock(arg, drunk_hole);
1177 static int igt_gtt_reserve(void *arg)
1179 struct drm_i915_private *i915 = arg;
1180 struct drm_i915_gem_object *obj, *on;
1181 LIST_HEAD(objects);
1182 u64 total;
1183 int err = -ENODEV;
1185 /* i915_gem_gtt_reserve() tries to reserve the precise range
1186 * for the node, and evicts if it has to. So our test checks that
1187 * it can give us the requsted space and prevent overlaps.
1190 /* Start by filling the GGTT */
1191 for (total = 0;
1192 total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1193 total += 2*I915_GTT_PAGE_SIZE) {
1194 struct i915_vma *vma;
1196 obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1197 if (IS_ERR(obj)) {
1198 err = PTR_ERR(obj);
1199 goto out;
1202 err = i915_gem_object_pin_pages(obj);
1203 if (err) {
1204 i915_gem_object_put(obj);
1205 goto out;
1208 list_add(&obj->st_link, &objects);
1210 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1211 if (IS_ERR(vma)) {
1212 err = PTR_ERR(vma);
1213 goto out;
1216 err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1217 obj->base.size,
1218 total,
1219 obj->cache_level,
1221 if (err) {
1222 pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1223 total, i915->ggtt.base.total, err);
1224 goto out;
1226 track_vma_bind(vma);
1228 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1229 if (vma->node.start != total ||
1230 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1231 pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1232 vma->node.start, vma->node.size,
1233 total, 2*I915_GTT_PAGE_SIZE);
1234 err = -EINVAL;
1235 goto out;
1239 /* Now we start forcing evictions */
1240 for (total = I915_GTT_PAGE_SIZE;
1241 total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1242 total += 2*I915_GTT_PAGE_SIZE) {
1243 struct i915_vma *vma;
1245 obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1246 if (IS_ERR(obj)) {
1247 err = PTR_ERR(obj);
1248 goto out;
1251 err = i915_gem_object_pin_pages(obj);
1252 if (err) {
1253 i915_gem_object_put(obj);
1254 goto out;
1257 list_add(&obj->st_link, &objects);
1259 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1260 if (IS_ERR(vma)) {
1261 err = PTR_ERR(vma);
1262 goto out;
1265 err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1266 obj->base.size,
1267 total,
1268 obj->cache_level,
1270 if (err) {
1271 pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1272 total, i915->ggtt.base.total, err);
1273 goto out;
1275 track_vma_bind(vma);
1277 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1278 if (vma->node.start != total ||
1279 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1280 pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1281 vma->node.start, vma->node.size,
1282 total, 2*I915_GTT_PAGE_SIZE);
1283 err = -EINVAL;
1284 goto out;
1288 /* And then try at random */
1289 list_for_each_entry_safe(obj, on, &objects, st_link) {
1290 struct i915_vma *vma;
1291 u64 offset;
1293 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1294 if (IS_ERR(vma)) {
1295 err = PTR_ERR(vma);
1296 goto out;
1299 err = i915_vma_unbind(vma);
1300 if (err) {
1301 pr_err("i915_vma_unbind failed with err=%d!\n", err);
1302 goto out;
1305 offset = random_offset(0, i915->ggtt.base.total,
1306 2*I915_GTT_PAGE_SIZE,
1307 I915_GTT_MIN_ALIGNMENT);
1309 err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1310 obj->base.size,
1311 offset,
1312 obj->cache_level,
1314 if (err) {
1315 pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1316 total, i915->ggtt.base.total, err);
1317 goto out;
1319 track_vma_bind(vma);
1321 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1322 if (vma->node.start != offset ||
1323 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1324 pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1325 vma->node.start, vma->node.size,
1326 offset, 2*I915_GTT_PAGE_SIZE);
1327 err = -EINVAL;
1328 goto out;
1332 out:
1333 list_for_each_entry_safe(obj, on, &objects, st_link) {
1334 i915_gem_object_unpin_pages(obj);
1335 i915_gem_object_put(obj);
1337 return err;
1340 static int igt_gtt_insert(void *arg)
1342 struct drm_i915_private *i915 = arg;
1343 struct drm_i915_gem_object *obj, *on;
1344 struct drm_mm_node tmp = {};
1345 const struct invalid_insert {
1346 u64 size;
1347 u64 alignment;
1348 u64 start, end;
1349 } invalid_insert[] = {
1351 i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
1352 0, i915->ggtt.base.total,
1355 2*I915_GTT_PAGE_SIZE, 0,
1356 0, I915_GTT_PAGE_SIZE,
1359 -(u64)I915_GTT_PAGE_SIZE, 0,
1360 0, 4*I915_GTT_PAGE_SIZE,
1363 -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1364 0, 4*I915_GTT_PAGE_SIZE,
1367 I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1368 I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1371 }, *ii;
1372 LIST_HEAD(objects);
1373 u64 total;
1374 int err = -ENODEV;
1376 /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1377 * to the node, evicting if required.
1380 /* Check a couple of obviously invalid requests */
1381 for (ii = invalid_insert; ii->size; ii++) {
1382 err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
1383 ii->size, ii->alignment,
1384 I915_COLOR_UNEVICTABLE,
1385 ii->start, ii->end,
1387 if (err != -ENOSPC) {
1388 pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1389 ii->size, ii->alignment, ii->start, ii->end,
1390 err);
1391 return -EINVAL;
1395 /* Start by filling the GGTT */
1396 for (total = 0;
1397 total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1398 total += I915_GTT_PAGE_SIZE) {
1399 struct i915_vma *vma;
1401 obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
1402 if (IS_ERR(obj)) {
1403 err = PTR_ERR(obj);
1404 goto out;
1407 err = i915_gem_object_pin_pages(obj);
1408 if (err) {
1409 i915_gem_object_put(obj);
1410 goto out;
1413 list_add(&obj->st_link, &objects);
1415 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1416 if (IS_ERR(vma)) {
1417 err = PTR_ERR(vma);
1418 goto out;
1421 err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1422 obj->base.size, 0, obj->cache_level,
1423 0, i915->ggtt.base.total,
1425 if (err == -ENOSPC) {
1426 /* maxed out the GGTT space */
1427 i915_gem_object_put(obj);
1428 break;
1430 if (err) {
1431 pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1432 total, i915->ggtt.base.total, err);
1433 goto out;
1435 track_vma_bind(vma);
1436 __i915_vma_pin(vma);
1438 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1441 list_for_each_entry(obj, &objects, st_link) {
1442 struct i915_vma *vma;
1444 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1445 if (IS_ERR(vma)) {
1446 err = PTR_ERR(vma);
1447 goto out;
1450 if (!drm_mm_node_allocated(&vma->node)) {
1451 pr_err("VMA was unexpectedly evicted!\n");
1452 err = -EINVAL;
1453 goto out;
1456 __i915_vma_unpin(vma);
1459 /* If we then reinsert, we should find the same hole */
1460 list_for_each_entry_safe(obj, on, &objects, st_link) {
1461 struct i915_vma *vma;
1462 u64 offset;
1464 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1465 if (IS_ERR(vma)) {
1466 err = PTR_ERR(vma);
1467 goto out;
1470 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1471 offset = vma->node.start;
1473 err = i915_vma_unbind(vma);
1474 if (err) {
1475 pr_err("i915_vma_unbind failed with err=%d!\n", err);
1476 goto out;
1479 err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1480 obj->base.size, 0, obj->cache_level,
1481 0, i915->ggtt.base.total,
1483 if (err) {
1484 pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1485 total, i915->ggtt.base.total, err);
1486 goto out;
1488 track_vma_bind(vma);
1490 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1491 if (vma->node.start != offset) {
1492 pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1493 offset, vma->node.start);
1494 err = -EINVAL;
1495 goto out;
1499 /* And then force evictions */
1500 for (total = 0;
1501 total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1502 total += 2*I915_GTT_PAGE_SIZE) {
1503 struct i915_vma *vma;
1505 obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
1506 if (IS_ERR(obj)) {
1507 err = PTR_ERR(obj);
1508 goto out;
1511 err = i915_gem_object_pin_pages(obj);
1512 if (err) {
1513 i915_gem_object_put(obj);
1514 goto out;
1517 list_add(&obj->st_link, &objects);
1519 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1520 if (IS_ERR(vma)) {
1521 err = PTR_ERR(vma);
1522 goto out;
1525 err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1526 obj->base.size, 0, obj->cache_level,
1527 0, i915->ggtt.base.total,
1529 if (err) {
1530 pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1531 total, i915->ggtt.base.total, err);
1532 goto out;
1534 track_vma_bind(vma);
1536 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1539 out:
1540 list_for_each_entry_safe(obj, on, &objects, st_link) {
1541 i915_gem_object_unpin_pages(obj);
1542 i915_gem_object_put(obj);
1544 return err;
1547 int i915_gem_gtt_mock_selftests(void)
1549 static const struct i915_subtest tests[] = {
1550 SUBTEST(igt_mock_drunk),
1551 SUBTEST(igt_mock_walk),
1552 SUBTEST(igt_mock_pot),
1553 SUBTEST(igt_mock_fill),
1554 SUBTEST(igt_gtt_reserve),
1555 SUBTEST(igt_gtt_insert),
1557 struct drm_i915_private *i915;
1558 int err;
1560 i915 = mock_gem_device();
1561 if (!i915)
1562 return -ENOMEM;
1564 mutex_lock(&i915->drm.struct_mutex);
1565 err = i915_subtests(tests, i915);
1566 mutex_unlock(&i915->drm.struct_mutex);
1568 drm_dev_unref(&i915->drm);
1569 return err;
1572 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
1574 static const struct i915_subtest tests[] = {
1575 SUBTEST(igt_ppgtt_alloc),
1576 SUBTEST(igt_ppgtt_lowlevel),
1577 SUBTEST(igt_ppgtt_drunk),
1578 SUBTEST(igt_ppgtt_walk),
1579 SUBTEST(igt_ppgtt_pot),
1580 SUBTEST(igt_ppgtt_fill),
1581 SUBTEST(igt_ppgtt_shrink),
1582 SUBTEST(igt_ggtt_lowlevel),
1583 SUBTEST(igt_ggtt_drunk),
1584 SUBTEST(igt_ggtt_walk),
1585 SUBTEST(igt_ggtt_pot),
1586 SUBTEST(igt_ggtt_fill),
1587 SUBTEST(igt_ggtt_page),
1590 GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
1592 return i915_subtests(tests, i915);