1 // SPDX-License-Identifier: GPL-2.0-only
3 * Test cases for the drm_mm range manager
6 #define pr_fmt(fmt) "drm_mm: " fmt
8 #include <linux/module.h>
9 #include <linux/prime_numbers.h>
10 #include <linux/slab.h>
11 #include <linux/random.h>
12 #include <linux/vmalloc.h>
13 #include <linux/ktime.h>
15 #include <drm/drm_mm.h>
17 #include "../lib/drm_random.h"
19 #define TESTS "drm_mm_selftests.h"
20 #include "drm_selftest.h"
22 static unsigned int random_seed
;
23 static unsigned int max_iterations
= 8192;
24 static unsigned int max_prime
= 128;
33 static const struct insert_mode
{
35 enum drm_mm_insert_mode mode
;
37 [BEST
] = { "best", DRM_MM_INSERT_BEST
},
38 [BOTTOMUP
] = { "bottom-up", DRM_MM_INSERT_LOW
},
39 [TOPDOWN
] = { "top-down", DRM_MM_INSERT_HIGH
},
40 [EVICT
] = { "evict", DRM_MM_INSERT_EVICT
},
43 { "bottom-up", DRM_MM_INSERT_LOW
},
44 { "top-down", DRM_MM_INSERT_HIGH
},
48 static int igt_sanitycheck(void *ignored
)
50 pr_info("%s - ok!\n", __func__
);
54 static bool assert_no_holes(const struct drm_mm
*mm
)
56 struct drm_mm_node
*hole
;
57 u64 hole_start
, __always_unused hole_end
;
61 drm_mm_for_each_hole(hole
, mm
, hole_start
, hole_end
)
64 pr_err("Expected to find no holes (after reserve), found %lu instead\n", count
);
68 drm_mm_for_each_node(hole
, mm
) {
69 if (drm_mm_hole_follows(hole
)) {
70 pr_err("Hole follows node, expected none!\n");
78 static bool assert_one_hole(const struct drm_mm
*mm
, u64 start
, u64 end
)
80 struct drm_mm_node
*hole
;
81 u64 hole_start
, hole_end
;
89 drm_mm_for_each_hole(hole
, mm
, hole_start
, hole_end
) {
90 if (start
!= hole_start
|| end
!= hole_end
) {
92 pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
100 pr_err("Expected to find one hole, found %lu instead\n", count
);
107 static bool assert_continuous(const struct drm_mm
*mm
, u64 size
)
109 struct drm_mm_node
*node
, *check
, *found
;
113 if (!assert_no_holes(mm
))
118 drm_mm_for_each_node(node
, mm
) {
119 if (node
->start
!= addr
) {
120 pr_err("node[%ld] list out of order, expected %llx found %llx\n",
121 n
, addr
, node
->start
);
125 if (node
->size
!= size
) {
126 pr_err("node[%ld].size incorrect, expected %llx, found %llx\n",
127 n
, size
, node
->size
);
131 if (drm_mm_hole_follows(node
)) {
132 pr_err("node[%ld] is followed by a hole!\n", n
);
137 drm_mm_for_each_node_in_range(check
, mm
, addr
, addr
+ size
) {
139 pr_err("lookup return wrong node, expected start %llx, found %llx\n",
140 node
->start
, check
->start
);
146 pr_err("lookup failed for node %llx + %llx\n",
158 static u64
misalignment(struct drm_mm_node
*node
, u64 alignment
)
165 div64_u64_rem(node
->start
, alignment
, &rem
);
169 static bool assert_node(struct drm_mm_node
*node
, struct drm_mm
*mm
,
170 u64 size
, u64 alignment
, unsigned long color
)
174 if (!drm_mm_node_allocated(node
) || node
->mm
!= mm
) {
175 pr_err("node not allocated\n");
179 if (node
->size
!= size
) {
180 pr_err("node has wrong size, found %llu, expected %llu\n",
185 if (misalignment(node
, alignment
)) {
186 pr_err("node is misaligned, start %llx rem %llu, expected alignment %llu\n",
187 node
->start
, misalignment(node
, alignment
), alignment
);
191 if (node
->color
!= color
) {
192 pr_err("node has wrong color, found %lu, expected %lu\n",
200 #define show_mm(mm) do { \
201 struct drm_printer __p = drm_debug_printer(__func__); \
202 drm_mm_print((mm), &__p); } while (0)
204 static int igt_init(void *ignored
)
206 const unsigned int size
= 4096;
208 struct drm_mm_node tmp
;
211 /* Start with some simple checks on initialising the struct drm_mm */
212 memset(&mm
, 0, sizeof(mm
));
213 if (drm_mm_initialized(&mm
)) {
214 pr_err("zeroed mm claims to be initialized\n");
218 memset(&mm
, 0xff, sizeof(mm
));
219 drm_mm_init(&mm
, 0, size
);
220 if (!drm_mm_initialized(&mm
)) {
221 pr_err("mm claims not to be initialized\n");
225 if (!drm_mm_clean(&mm
)) {
226 pr_err("mm not empty on creation\n");
230 /* After creation, it should all be one massive hole */
231 if (!assert_one_hole(&mm
, 0, size
)) {
236 memset(&tmp
, 0, sizeof(tmp
));
239 ret
= drm_mm_reserve_node(&mm
, &tmp
);
241 pr_err("failed to reserve whole drm_mm\n");
245 /* After filling the range entirely, there should be no holes */
246 if (!assert_no_holes(&mm
)) {
251 /* And then after emptying it again, the massive hole should be back */
252 drm_mm_remove_node(&tmp
);
253 if (!assert_one_hole(&mm
, 0, size
)) {
261 drm_mm_takedown(&mm
);
265 static int igt_debug(void *ignored
)
268 struct drm_mm_node nodes
[2];
271 /* Create a small drm_mm with a couple of nodes and a few holes, and
272 * check that the debug iterator doesn't explode over a trivial drm_mm.
275 drm_mm_init(&mm
, 0, 4096);
277 memset(nodes
, 0, sizeof(nodes
));
278 nodes
[0].start
= 512;
279 nodes
[0].size
= 1024;
280 ret
= drm_mm_reserve_node(&mm
, &nodes
[0]);
282 pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n",
283 nodes
[0].start
, nodes
[0].size
);
287 nodes
[1].size
= 1024;
288 nodes
[1].start
= 4096 - 512 - nodes
[1].size
;
289 ret
= drm_mm_reserve_node(&mm
, &nodes
[1]);
291 pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n",
292 nodes
[1].start
, nodes
[1].size
);
300 static struct drm_mm_node
*set_node(struct drm_mm_node
*node
,
308 static bool expect_reserve_fail(struct drm_mm
*mm
, struct drm_mm_node
*node
)
312 err
= drm_mm_reserve_node(mm
, node
);
313 if (likely(err
== -ENOSPC
))
317 pr_err("impossible reserve succeeded, node %llu + %llu\n",
318 node
->start
, node
->size
);
319 drm_mm_remove_node(node
);
321 pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
322 err
, -ENOSPC
, node
->start
, node
->size
);
327 static bool check_reserve_boundaries(struct drm_mm
*mm
,
331 const struct boundary
{
335 #define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
346 B(count
*size
, -size
),
347 B(count
*size
, count
*size
),
348 B(count
*size
, -count
*size
),
349 B(count
*size
, -(count
+1)*size
),
350 B((count
+1)*size
, size
),
351 B((count
+1)*size
, -size
),
352 B((count
+1)*size
, -2*size
),
355 struct drm_mm_node tmp
= {};
358 for (n
= 0; n
< ARRAY_SIZE(boundaries
); n
++) {
359 if (!expect_reserve_fail(mm
,
362 boundaries
[n
].size
))) {
363 pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n",
364 n
, boundaries
[n
].name
, count
, size
);
372 static int __igt_reserve(unsigned int count
, u64 size
)
374 DRM_RND_STATE(prng
, random_seed
);
376 struct drm_mm_node tmp
, *nodes
, *node
, *next
;
377 unsigned int *order
, n
, m
, o
= 0;
380 /* For exercising drm_mm_reserve_node(), we want to check that
381 * reservations outside of the drm_mm range are rejected, and to
382 * overlapping and otherwise already occupied ranges. Afterwards,
383 * the tree and nodes should be intact.
386 DRM_MM_BUG_ON(!count
);
387 DRM_MM_BUG_ON(!size
);
390 order
= drm_random_order(count
, &prng
);
394 nodes
= vzalloc(array_size(count
, sizeof(*nodes
)));
399 drm_mm_init(&mm
, 0, count
* size
);
401 if (!check_reserve_boundaries(&mm
, count
, size
))
404 for (n
= 0; n
< count
; n
++) {
405 nodes
[n
].start
= order
[n
] * size
;
406 nodes
[n
].size
= size
;
408 err
= drm_mm_reserve_node(&mm
, &nodes
[n
]);
410 pr_err("reserve failed, step %d, start %llu\n",
416 if (!drm_mm_node_allocated(&nodes
[n
])) {
417 pr_err("reserved node not allocated! step %d, start %llu\n",
422 if (!expect_reserve_fail(&mm
, &nodes
[n
]))
426 /* After random insertion the nodes should be in order */
427 if (!assert_continuous(&mm
, size
))
430 /* Repeated use should then fail */
431 drm_random_reorder(order
, count
, &prng
);
432 for (n
= 0; n
< count
; n
++) {
433 if (!expect_reserve_fail(&mm
,
434 set_node(&tmp
, order
[n
] * size
, 1)))
437 /* Remove and reinsert should work */
438 drm_mm_remove_node(&nodes
[order
[n
]]);
439 err
= drm_mm_reserve_node(&mm
, &nodes
[order
[n
]]);
441 pr_err("reserve failed, step %d, start %llu\n",
448 if (!assert_continuous(&mm
, size
))
451 /* Overlapping use should then fail */
452 for (n
= 0; n
< count
; n
++) {
453 if (!expect_reserve_fail(&mm
, set_node(&tmp
, 0, size
*count
)))
456 for (n
= 0; n
< count
; n
++) {
457 if (!expect_reserve_fail(&mm
,
460 size
* (count
- n
))))
464 /* Remove several, reinsert, check full */
465 for_each_prime_number(n
, min(max_prime
, count
)) {
466 for (m
= 0; m
< n
; m
++) {
467 node
= &nodes
[order
[(o
+ m
) % count
]];
468 drm_mm_remove_node(node
);
471 for (m
= 0; m
< n
; m
++) {
472 node
= &nodes
[order
[(o
+ m
) % count
]];
473 err
= drm_mm_reserve_node(&mm
, node
);
475 pr_err("reserve failed, step %d/%d, start %llu\n",
484 if (!assert_continuous(&mm
, size
))
490 drm_mm_for_each_node_safe(node
, next
, &mm
)
491 drm_mm_remove_node(node
);
492 drm_mm_takedown(&mm
);
500 static int igt_reserve(void *ignored
)
502 const unsigned int count
= min_t(unsigned int, BIT(10), max_iterations
);
505 for_each_prime_number_from(n
, 1, 54) {
506 u64 size
= BIT_ULL(n
);
508 ret
= __igt_reserve(count
, size
- 1);
512 ret
= __igt_reserve(count
, size
);
516 ret
= __igt_reserve(count
, size
+ 1);
526 static bool expect_insert(struct drm_mm
*mm
, struct drm_mm_node
*node
,
527 u64 size
, u64 alignment
, unsigned long color
,
528 const struct insert_mode
*mode
)
532 err
= drm_mm_insert_node_generic(mm
, node
,
533 size
, alignment
, color
,
536 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
537 size
, alignment
, color
, mode
->name
, err
);
541 if (!assert_node(node
, mm
, size
, alignment
, color
)) {
542 drm_mm_remove_node(node
);
549 static bool expect_insert_fail(struct drm_mm
*mm
, u64 size
)
551 struct drm_mm_node tmp
= {};
554 err
= drm_mm_insert_node(mm
, &tmp
, size
);
555 if (likely(err
== -ENOSPC
))
559 pr_err("impossible insert succeeded, node %llu + %llu\n",
560 tmp
.start
, tmp
.size
);
561 drm_mm_remove_node(&tmp
);
563 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n",
569 static int __igt_insert(unsigned int count
, u64 size
, bool replace
)
571 DRM_RND_STATE(prng
, random_seed
);
572 const struct insert_mode
*mode
;
574 struct drm_mm_node
*nodes
, *node
, *next
;
575 unsigned int *order
, n
, m
, o
= 0;
578 /* Fill a range with lots of nodes, check it doesn't fail too early */
580 DRM_MM_BUG_ON(!count
);
581 DRM_MM_BUG_ON(!size
);
584 nodes
= vmalloc(array_size(count
, sizeof(*nodes
)));
588 order
= drm_random_order(count
, &prng
);
593 drm_mm_init(&mm
, 0, count
* size
);
595 for (mode
= insert_modes
; mode
->name
; mode
++) {
596 for (n
= 0; n
< count
; n
++) {
597 struct drm_mm_node tmp
;
599 node
= replace
? &tmp
: &nodes
[n
];
600 memset(node
, 0, sizeof(*node
));
601 if (!expect_insert(&mm
, node
, size
, 0, n
, mode
)) {
602 pr_err("%s insert failed, size %llu step %d\n",
603 mode
->name
, size
, n
);
608 drm_mm_replace_node(&tmp
, &nodes
[n
]);
609 if (drm_mm_node_allocated(&tmp
)) {
610 pr_err("replaced old-node still allocated! step %d\n",
615 if (!assert_node(&nodes
[n
], &mm
, size
, 0, n
)) {
616 pr_err("replaced node did not inherit parameters, size %llu step %d\n",
621 if (tmp
.start
!= nodes
[n
].start
) {
622 pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
624 nodes
[n
].start
, nodes
[n
].size
);
630 /* After random insertion the nodes should be in order */
631 if (!assert_continuous(&mm
, size
))
634 /* Repeated use should then fail */
635 if (!expect_insert_fail(&mm
, size
))
638 /* Remove one and reinsert, as the only hole it should refill itself */
639 for (n
= 0; n
< count
; n
++) {
640 u64 addr
= nodes
[n
].start
;
642 drm_mm_remove_node(&nodes
[n
]);
643 if (!expect_insert(&mm
, &nodes
[n
], size
, 0, n
, mode
)) {
644 pr_err("%s reinsert failed, size %llu step %d\n",
645 mode
->name
, size
, n
);
649 if (nodes
[n
].start
!= addr
) {
650 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
651 mode
->name
, n
, addr
, nodes
[n
].start
);
655 if (!assert_continuous(&mm
, size
))
659 /* Remove several, reinsert, check full */
660 for_each_prime_number(n
, min(max_prime
, count
)) {
661 for (m
= 0; m
< n
; m
++) {
662 node
= &nodes
[order
[(o
+ m
) % count
]];
663 drm_mm_remove_node(node
);
666 for (m
= 0; m
< n
; m
++) {
667 node
= &nodes
[order
[(o
+ m
) % count
]];
668 if (!expect_insert(&mm
, node
, size
, 0, n
, mode
)) {
669 pr_err("%s multiple reinsert failed, size %llu step %d\n",
670 mode
->name
, size
, n
);
677 if (!assert_continuous(&mm
, size
))
680 if (!expect_insert_fail(&mm
, size
))
684 drm_mm_for_each_node_safe(node
, next
, &mm
)
685 drm_mm_remove_node(node
);
686 DRM_MM_BUG_ON(!drm_mm_clean(&mm
));
693 drm_mm_for_each_node_safe(node
, next
, &mm
)
694 drm_mm_remove_node(node
);
695 drm_mm_takedown(&mm
);
703 static int igt_insert(void *ignored
)
705 const unsigned int count
= min_t(unsigned int, BIT(10), max_iterations
);
709 for_each_prime_number_from(n
, 1, 54) {
710 u64 size
= BIT_ULL(n
);
712 ret
= __igt_insert(count
, size
- 1, false);
716 ret
= __igt_insert(count
, size
, false);
720 ret
= __igt_insert(count
, size
+ 1, false);
730 static int igt_replace(void *ignored
)
732 const unsigned int count
= min_t(unsigned int, BIT(10), max_iterations
);
736 /* Reuse igt_insert to exercise replacement by inserting a dummy node,
737 * then replacing it with the intended node. We want to check that
738 * the tree is intact and all the information we need is carried
739 * across to the target node.
742 for_each_prime_number_from(n
, 1, 54) {
743 u64 size
= BIT_ULL(n
);
745 ret
= __igt_insert(count
, size
- 1, true);
749 ret
= __igt_insert(count
, size
, true);
753 ret
= __igt_insert(count
, size
+ 1, true);
763 static bool expect_insert_in_range(struct drm_mm
*mm
, struct drm_mm_node
*node
,
764 u64 size
, u64 alignment
, unsigned long color
,
765 u64 range_start
, u64 range_end
,
766 const struct insert_mode
*mode
)
770 err
= drm_mm_insert_node_in_range(mm
, node
,
771 size
, alignment
, color
,
772 range_start
, range_end
,
775 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
776 size
, alignment
, color
, mode
->name
,
777 range_start
, range_end
, err
);
781 if (!assert_node(node
, mm
, size
, alignment
, color
)) {
782 drm_mm_remove_node(node
);
789 static bool expect_insert_in_range_fail(struct drm_mm
*mm
,
794 struct drm_mm_node tmp
= {};
797 err
= drm_mm_insert_node_in_range(mm
, &tmp
,
799 range_start
, range_end
,
801 if (likely(err
== -ENOSPC
))
805 pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
806 tmp
.start
, tmp
.size
, range_start
, range_end
);
807 drm_mm_remove_node(&tmp
);
809 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
810 err
, -ENOSPC
, size
, range_start
, range_end
);
816 static bool assert_contiguous_in_range(struct drm_mm
*mm
,
821 struct drm_mm_node
*node
;
824 if (!expect_insert_in_range_fail(mm
, size
, start
, end
))
827 n
= div64_u64(start
+ size
- 1, size
);
828 drm_mm_for_each_node(node
, mm
) {
829 if (node
->start
< start
|| node
->start
+ node
->size
> end
) {
830 pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
831 n
, node
->start
, node
->start
+ node
->size
, start
, end
);
835 if (node
->start
!= n
* size
) {
836 pr_err("node %d out of order, expected start %llx, found %llx\n",
837 n
, n
* size
, node
->start
);
841 if (node
->size
!= size
) {
842 pr_err("node %d has wrong size, expected size %llx, found %llx\n",
843 n
, size
, node
->size
);
847 if (drm_mm_hole_follows(node
) &&
848 drm_mm_hole_node_end(node
) < end
) {
849 pr_err("node %d is followed by a hole!\n", n
);
857 node
= __drm_mm_interval_first(mm
, 0, start
- 1);
858 if (drm_mm_node_allocated(node
)) {
859 pr_err("node before start: node=%llx+%llu, start=%llx\n",
860 node
->start
, node
->size
, start
);
866 node
= __drm_mm_interval_first(mm
, end
, U64_MAX
);
867 if (drm_mm_node_allocated(node
)) {
868 pr_err("node after end: node=%llx+%llu, end=%llx\n",
869 node
->start
, node
->size
, end
);
877 static int __igt_insert_range(unsigned int count
, u64 size
, u64 start
, u64 end
)
879 const struct insert_mode
*mode
;
881 struct drm_mm_node
*nodes
, *node
, *next
;
882 unsigned int n
, start_n
, end_n
;
885 DRM_MM_BUG_ON(!count
);
886 DRM_MM_BUG_ON(!size
);
887 DRM_MM_BUG_ON(end
<= start
);
889 /* Very similar to __igt_insert(), but now instead of populating the
890 * full range of the drm_mm, we try to fill a small portion of it.
894 nodes
= vzalloc(array_size(count
, sizeof(*nodes
)));
899 drm_mm_init(&mm
, 0, count
* size
);
901 start_n
= div64_u64(start
+ size
- 1, size
);
902 end_n
= div64_u64(end
- size
, size
);
904 for (mode
= insert_modes
; mode
->name
; mode
++) {
905 for (n
= start_n
; n
<= end_n
; n
++) {
906 if (!expect_insert_in_range(&mm
, &nodes
[n
],
909 pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
917 if (!assert_contiguous_in_range(&mm
, size
, start
, end
)) {
918 pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
919 mode
->name
, start
, end
, size
);
923 /* Remove one and reinsert, it should refill itself */
924 for (n
= start_n
; n
<= end_n
; n
++) {
925 u64 addr
= nodes
[n
].start
;
927 drm_mm_remove_node(&nodes
[n
]);
928 if (!expect_insert_in_range(&mm
, &nodes
[n
],
931 pr_err("%s reinsert failed, step %d\n", mode
->name
, n
);
935 if (nodes
[n
].start
!= addr
) {
936 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
937 mode
->name
, n
, addr
, nodes
[n
].start
);
942 if (!assert_contiguous_in_range(&mm
, size
, start
, end
)) {
943 pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
944 mode
->name
, start
, end
, size
);
948 drm_mm_for_each_node_safe(node
, next
, &mm
)
949 drm_mm_remove_node(node
);
950 DRM_MM_BUG_ON(!drm_mm_clean(&mm
));
957 drm_mm_for_each_node_safe(node
, next
, &mm
)
958 drm_mm_remove_node(node
);
959 drm_mm_takedown(&mm
);
965 static int insert_outside_range(void)
968 const unsigned int start
= 1024;
969 const unsigned int end
= 2048;
970 const unsigned int size
= end
- start
;
972 drm_mm_init(&mm
, start
, size
);
974 if (!expect_insert_in_range_fail(&mm
, 1, 0, start
))
977 if (!expect_insert_in_range_fail(&mm
, size
,
978 start
- size
/2, start
+ (size
+1)/2))
981 if (!expect_insert_in_range_fail(&mm
, size
,
982 end
- (size
+1)/2, end
+ size
/2))
985 if (!expect_insert_in_range_fail(&mm
, 1, end
, end
+ size
))
988 drm_mm_takedown(&mm
);
992 static int igt_insert_range(void *ignored
)
994 const unsigned int count
= min_t(unsigned int, BIT(13), max_iterations
);
998 /* Check that requests outside the bounds of drm_mm are rejected. */
999 ret
= insert_outside_range();
1003 for_each_prime_number_from(n
, 1, 50) {
1004 const u64 size
= BIT_ULL(n
);
1005 const u64 max
= count
* size
;
1007 ret
= __igt_insert_range(count
, size
, 0, max
);
1011 ret
= __igt_insert_range(count
, size
, 1, max
);
1015 ret
= __igt_insert_range(count
, size
, 0, max
- 1);
1019 ret
= __igt_insert_range(count
, size
, 0, max
/2);
1023 ret
= __igt_insert_range(count
, size
, max
/2, max
);
1027 ret
= __igt_insert_range(count
, size
, max
/4+1, 3*max
/4-1);
1037 static int prepare_igt_frag(struct drm_mm
*mm
,
1038 struct drm_mm_node
*nodes
,
1039 unsigned int num_insert
,
1040 const struct insert_mode
*mode
)
1042 unsigned int size
= 4096;
1045 for (i
= 0; i
< num_insert
; i
++) {
1046 if (!expect_insert(mm
, &nodes
[i
], size
, 0, i
,
1048 pr_err("%s insert failed\n", mode
->name
);
1053 /* introduce fragmentation by freeing every other node */
1054 for (i
= 0; i
< num_insert
; i
++) {
1056 drm_mm_remove_node(&nodes
[i
]);
1063 static u64
get_insert_time(struct drm_mm
*mm
,
1064 unsigned int num_insert
,
1065 struct drm_mm_node
*nodes
,
1066 const struct insert_mode
*mode
)
1068 unsigned int size
= 8192;
1072 start
= ktime_get();
1073 for (i
= 0; i
< num_insert
; i
++) {
1074 if (!expect_insert(mm
, &nodes
[i
], size
, 0, i
, mode
) != 0) {
1075 pr_err("%s insert failed\n", mode
->name
);
1080 return ktime_to_ns(ktime_sub(ktime_get(), start
));
1083 static int igt_frag(void *ignored
)
1086 const struct insert_mode
*mode
;
1087 struct drm_mm_node
*nodes
, *node
, *next
;
1088 unsigned int insert_size
= 10000;
1089 unsigned int scale_factor
= 4;
1092 /* We need 4 * insert_size nodes to hold intermediate allocated
1094 * 1 times for prepare_igt_frag()
1095 * 1 times for get_insert_time()
1096 * 2 times for get_insert_time()
1098 nodes
= vzalloc(array_size(insert_size
* 4, sizeof(*nodes
)));
1102 /* For BOTTOMUP and TOPDOWN, we first fragment the
1103 * address space using prepare_igt_frag() and then try to verify
1104 * that that insertions scale quadratically from 10k to 20k insertions
1106 drm_mm_init(&mm
, 1, U64_MAX
- 2);
1107 for (mode
= insert_modes
; mode
->name
; mode
++) {
1108 u64 insert_time1
, insert_time2
;
1110 if (mode
->mode
!= DRM_MM_INSERT_LOW
&&
1111 mode
->mode
!= DRM_MM_INSERT_HIGH
)
1114 ret
= prepare_igt_frag(&mm
, nodes
, insert_size
, mode
);
1118 insert_time1
= get_insert_time(&mm
, insert_size
,
1119 nodes
+ insert_size
, mode
);
1120 if (insert_time1
== 0)
1123 insert_time2
= get_insert_time(&mm
, (insert_size
* 2),
1124 nodes
+ insert_size
* 2, mode
);
1125 if (insert_time2
== 0)
1128 pr_info("%s fragmented insert of %u and %u insertions took %llu and %llu nsecs\n",
1129 mode
->name
, insert_size
, insert_size
* 2,
1130 insert_time1
, insert_time2
);
1132 if (insert_time2
> (scale_factor
* insert_time1
)) {
1133 pr_err("%s fragmented insert took %llu nsecs more\n",
1135 insert_time2
- (scale_factor
* insert_time1
));
1139 drm_mm_for_each_node_safe(node
, next
, &mm
)
1140 drm_mm_remove_node(node
);
1145 drm_mm_for_each_node_safe(node
, next
, &mm
)
1146 drm_mm_remove_node(node
);
1147 drm_mm_takedown(&mm
);
1153 static int igt_align(void *ignored
)
1155 const struct insert_mode
*mode
;
1156 const unsigned int max_count
= min(8192u, max_prime
);
1158 struct drm_mm_node
*nodes
, *node
, *next
;
1162 /* For each of the possible insertion modes, we pick a few
1163 * arbitrary alignments and check that the inserted node
1164 * meets our requirements.
1167 nodes
= vzalloc(array_size(max_count
, sizeof(*nodes
)));
1171 drm_mm_init(&mm
, 1, U64_MAX
- 2);
1173 for (mode
= insert_modes
; mode
->name
; mode
++) {
1176 for_each_prime_number_from(prime
, 1, max_count
) {
1177 u64 size
= next_prime_number(prime
);
1179 if (!expect_insert(&mm
, &nodes
[i
],
1182 pr_err("%s insert failed with alignment=%d",
1190 drm_mm_for_each_node_safe(node
, next
, &mm
)
1191 drm_mm_remove_node(node
);
1192 DRM_MM_BUG_ON(!drm_mm_clean(&mm
));
1199 drm_mm_for_each_node_safe(node
, next
, &mm
)
1200 drm_mm_remove_node(node
);
1201 drm_mm_takedown(&mm
);
1207 static int igt_align_pot(int max
)
1210 struct drm_mm_node
*node
, *next
;
1214 /* Check that we can align to the full u64 address space */
1216 drm_mm_init(&mm
, 1, U64_MAX
- 2);
1218 for (bit
= max
- 1; bit
; bit
--) {
1221 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
1227 align
= BIT_ULL(bit
);
1228 size
= BIT_ULL(bit
-1) + 1;
1229 if (!expect_insert(&mm
, node
,
1231 &insert_modes
[0])) {
1232 pr_err("insert failed with alignment=%llx [%d]",
1242 drm_mm_for_each_node_safe(node
, next
, &mm
) {
1243 drm_mm_remove_node(node
);
1246 drm_mm_takedown(&mm
);
1250 static int igt_align32(void *ignored
)
1252 return igt_align_pot(32);
1255 static int igt_align64(void *ignored
)
1257 return igt_align_pot(64);
1260 static void show_scan(const struct drm_mm_scan
*scan
)
1262 pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
1263 scan
->hit_start
, scan
->hit_end
,
1264 scan
->size
, scan
->alignment
, scan
->color
);
1267 static void show_holes(const struct drm_mm
*mm
, int count
)
1269 u64 hole_start
, hole_end
;
1270 struct drm_mm_node
*hole
;
1272 drm_mm_for_each_hole(hole
, mm
, hole_start
, hole_end
) {
1273 struct drm_mm_node
*next
= list_next_entry(hole
, node_list
);
1274 const char *node1
= NULL
, *node2
= NULL
;
1276 if (drm_mm_node_allocated(hole
))
1277 node1
= kasprintf(GFP_KERNEL
,
1278 "[%llx + %lld, color=%ld], ",
1279 hole
->start
, hole
->size
, hole
->color
);
1281 if (drm_mm_node_allocated(next
))
1282 node2
= kasprintf(GFP_KERNEL
,
1283 ", [%llx + %lld, color=%ld]",
1284 next
->start
, next
->size
, next
->color
);
1286 pr_info("%sHole [%llx - %llx, size %lld]%s\n",
1288 hole_start
, hole_end
, hole_end
- hole_start
,
1300 struct drm_mm_node node
;
1301 struct list_head link
;
1304 static bool evict_nodes(struct drm_mm_scan
*scan
,
1305 struct evict_node
*nodes
,
1306 unsigned int *order
,
1309 struct list_head
*evict_list
)
1311 struct evict_node
*e
, *en
;
1314 for (i
= 0; i
< count
; i
++) {
1315 e
= &nodes
[order
? order
[i
] : i
];
1316 list_add(&e
->link
, evict_list
);
1317 if (drm_mm_scan_add_block(scan
, &e
->node
))
1320 list_for_each_entry_safe(e
, en
, evict_list
, link
) {
1321 if (!drm_mm_scan_remove_block(scan
, &e
->node
))
1324 if (list_empty(evict_list
)) {
1325 pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
1326 scan
->size
, count
, scan
->alignment
, scan
->color
);
1330 list_for_each_entry(e
, evict_list
, link
)
1331 drm_mm_remove_node(&e
->node
);
1334 struct drm_mm_node
*node
;
1336 while ((node
= drm_mm_scan_color_evict(scan
))) {
1337 e
= container_of(node
, typeof(*e
), node
);
1338 drm_mm_remove_node(&e
->node
);
1339 list_add(&e
->link
, evict_list
);
1342 if (drm_mm_scan_color_evict(scan
)) {
1343 pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
1351 static bool evict_nothing(struct drm_mm
*mm
,
1352 unsigned int total_size
,
1353 struct evict_node
*nodes
)
1355 struct drm_mm_scan scan
;
1356 LIST_HEAD(evict_list
);
1357 struct evict_node
*e
;
1358 struct drm_mm_node
*node
;
1361 drm_mm_scan_init(&scan
, mm
, 1, 0, 0, 0);
1362 for (n
= 0; n
< total_size
; n
++) {
1364 list_add(&e
->link
, &evict_list
);
1365 drm_mm_scan_add_block(&scan
, &e
->node
);
1367 list_for_each_entry(e
, &evict_list
, link
)
1368 drm_mm_scan_remove_block(&scan
, &e
->node
);
1370 for (n
= 0; n
< total_size
; n
++) {
1373 if (!drm_mm_node_allocated(&e
->node
)) {
1374 pr_err("node[%d] no longer allocated!\n", n
);
1378 e
->link
.next
= NULL
;
1381 drm_mm_for_each_node(node
, mm
) {
1382 e
= container_of(node
, typeof(*e
), node
);
1383 e
->link
.next
= &e
->link
;
1386 for (n
= 0; n
< total_size
; n
++) {
1389 if (!e
->link
.next
) {
1390 pr_err("node[%d] no longer connected!\n", n
);
1395 return assert_continuous(mm
, nodes
[0].node
.size
);
1398 static bool evict_everything(struct drm_mm
*mm
,
1399 unsigned int total_size
,
1400 struct evict_node
*nodes
)
1402 struct drm_mm_scan scan
;
1403 LIST_HEAD(evict_list
);
1404 struct evict_node
*e
;
1408 drm_mm_scan_init(&scan
, mm
, total_size
, 0, 0, 0);
1409 for (n
= 0; n
< total_size
; n
++) {
1411 list_add(&e
->link
, &evict_list
);
1412 if (drm_mm_scan_add_block(&scan
, &e
->node
))
1417 list_for_each_entry(e
, &evict_list
, link
) {
1418 if (!drm_mm_scan_remove_block(&scan
, &e
->node
)) {
1420 pr_err("Node %lld not marked for eviction!\n",
1429 list_for_each_entry(e
, &evict_list
, link
)
1430 drm_mm_remove_node(&e
->node
);
1432 if (!assert_one_hole(mm
, 0, total_size
))
1435 list_for_each_entry(e
, &evict_list
, link
) {
1436 err
= drm_mm_reserve_node(mm
, &e
->node
);
1438 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1444 return assert_continuous(mm
, nodes
[0].node
.size
);
1447 static int evict_something(struct drm_mm
*mm
,
1448 u64 range_start
, u64 range_end
,
1449 struct evict_node
*nodes
,
1450 unsigned int *order
,
1453 unsigned int alignment
,
1454 const struct insert_mode
*mode
)
1456 struct drm_mm_scan scan
;
1457 LIST_HEAD(evict_list
);
1458 struct evict_node
*e
;
1459 struct drm_mm_node tmp
;
1462 drm_mm_scan_init_with_range(&scan
, mm
,
1464 range_start
, range_end
,
1466 if (!evict_nodes(&scan
,
1467 nodes
, order
, count
, false,
1471 memset(&tmp
, 0, sizeof(tmp
));
1472 err
= drm_mm_insert_node_generic(mm
, &tmp
, size
, alignment
, 0,
1473 DRM_MM_INSERT_EVICT
);
1475 pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
1482 if (tmp
.start
< range_start
|| tmp
.start
+ tmp
.size
> range_end
) {
1483 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1484 tmp
.start
, tmp
.size
, range_start
, range_end
);
1488 if (!assert_node(&tmp
, mm
, size
, alignment
, 0) ||
1489 drm_mm_hole_follows(&tmp
)) {
1490 pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
1492 alignment
, misalignment(&tmp
, alignment
),
1493 tmp
.start
, drm_mm_hole_follows(&tmp
));
1497 drm_mm_remove_node(&tmp
);
1501 list_for_each_entry(e
, &evict_list
, link
) {
1502 err
= drm_mm_reserve_node(mm
, &e
->node
);
1504 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1510 if (!assert_continuous(mm
, nodes
[0].node
.size
)) {
1511 pr_err("range is no longer continuous\n");
1518 static int igt_evict(void *ignored
)
1520 DRM_RND_STATE(prng
, random_seed
);
1521 const unsigned int size
= 8192;
1522 const struct insert_mode
*mode
;
1524 struct evict_node
*nodes
;
1525 struct drm_mm_node
*node
, *next
;
1526 unsigned int *order
, n
;
1529 /* Here we populate a full drm_mm and then try and insert a new node
1530 * by evicting other nodes in a random order. The drm_mm_scan should
1531 * pick the first matching hole it finds from the random list. We
1532 * repeat that for different allocation strategies, alignments and
1533 * sizes to try and stress the hole finder.
1537 nodes
= vzalloc(array_size(size
, sizeof(*nodes
)));
1541 order
= drm_random_order(size
, &prng
);
1546 drm_mm_init(&mm
, 0, size
);
1547 for (n
= 0; n
< size
; n
++) {
1548 err
= drm_mm_insert_node(&mm
, &nodes
[n
].node
, 1);
1550 pr_err("insert failed, step %d\n", n
);
1556 /* First check that using the scanner doesn't break the mm */
1557 if (!evict_nothing(&mm
, size
, nodes
)) {
1558 pr_err("evict_nothing() failed\n");
1561 if (!evict_everything(&mm
, size
, nodes
)) {
1562 pr_err("evict_everything() failed\n");
1566 for (mode
= evict_modes
; mode
->name
; mode
++) {
1567 for (n
= 1; n
<= size
; n
<<= 1) {
1568 drm_random_reorder(order
, size
, &prng
);
1569 err
= evict_something(&mm
, 0, U64_MAX
,
1574 pr_err("%s evict_something(size=%u) failed\n",
1581 for (n
= 1; n
< size
; n
<<= 1) {
1582 drm_random_reorder(order
, size
, &prng
);
1583 err
= evict_something(&mm
, 0, U64_MAX
,
1588 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1589 mode
->name
, size
/2, n
);
1595 for_each_prime_number_from(n
, 1, min(size
, max_prime
)) {
1596 unsigned int nsize
= (size
- n
+ 1) / 2;
1598 DRM_MM_BUG_ON(!nsize
);
1600 drm_random_reorder(order
, size
, &prng
);
1601 err
= evict_something(&mm
, 0, U64_MAX
,
1606 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1607 mode
->name
, nsize
, n
);
1618 drm_mm_for_each_node_safe(node
, next
, &mm
)
1619 drm_mm_remove_node(node
);
1620 drm_mm_takedown(&mm
);
1628 static int igt_evict_range(void *ignored
)
1630 DRM_RND_STATE(prng
, random_seed
);
1631 const unsigned int size
= 8192;
1632 const unsigned int range_size
= size
/ 2;
1633 const unsigned int range_start
= size
/ 4;
1634 const unsigned int range_end
= range_start
+ range_size
;
1635 const struct insert_mode
*mode
;
1637 struct evict_node
*nodes
;
1638 struct drm_mm_node
*node
, *next
;
1639 unsigned int *order
, n
;
1642 /* Like igt_evict() but now we are limiting the search to a
1643 * small portion of the full drm_mm.
1647 nodes
= vzalloc(array_size(size
, sizeof(*nodes
)));
1651 order
= drm_random_order(size
, &prng
);
1656 drm_mm_init(&mm
, 0, size
);
1657 for (n
= 0; n
< size
; n
++) {
1658 err
= drm_mm_insert_node(&mm
, &nodes
[n
].node
, 1);
1660 pr_err("insert failed, step %d\n", n
);
1666 for (mode
= evict_modes
; mode
->name
; mode
++) {
1667 for (n
= 1; n
<= range_size
; n
<<= 1) {
1668 drm_random_reorder(order
, size
, &prng
);
1669 err
= evict_something(&mm
, range_start
, range_end
,
1674 pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n",
1675 mode
->name
, n
, range_start
, range_end
);
1680 for (n
= 1; n
<= range_size
; n
<<= 1) {
1681 drm_random_reorder(order
, size
, &prng
);
1682 err
= evict_something(&mm
, range_start
, range_end
,
1687 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1688 mode
->name
, range_size
/2, n
, range_start
, range_end
);
1693 for_each_prime_number_from(n
, 1, min(range_size
, max_prime
)) {
1694 unsigned int nsize
= (range_size
- n
+ 1) / 2;
1696 DRM_MM_BUG_ON(!nsize
);
1698 drm_random_reorder(order
, size
, &prng
);
1699 err
= evict_something(&mm
, range_start
, range_end
,
1704 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1705 mode
->name
, nsize
, n
, range_start
, range_end
);
1715 drm_mm_for_each_node_safe(node
, next
, &mm
)
1716 drm_mm_remove_node(node
);
1717 drm_mm_takedown(&mm
);
1725 static unsigned int node_index(const struct drm_mm_node
*node
)
1727 return div64_u64(node
->start
, node
->size
);
1730 static int igt_topdown(void *ignored
)
1732 const struct insert_mode
*topdown
= &insert_modes
[TOPDOWN
];
1733 DRM_RND_STATE(prng
, random_seed
);
1734 const unsigned int count
= 8192;
1736 unsigned long *bitmap
;
1738 struct drm_mm_node
*nodes
, *node
, *next
;
1739 unsigned int *order
, n
, m
, o
= 0;
1742 /* When allocating top-down, we expect to be returned a node
1743 * from a suitable hole at the top of the drm_mm. We check that
1744 * the returned node does match the highest available slot.
1748 nodes
= vzalloc(array_size(count
, sizeof(*nodes
)));
1752 bitmap
= bitmap_zalloc(count
, GFP_KERNEL
);
1756 order
= drm_random_order(count
, &prng
);
1761 for (size
= 1; size
<= 64; size
<<= 1) {
1762 drm_mm_init(&mm
, 0, size
*count
);
1763 for (n
= 0; n
< count
; n
++) {
1764 if (!expect_insert(&mm
, &nodes
[n
],
1767 pr_err("insert failed, size %u step %d\n", size
, n
);
1771 if (drm_mm_hole_follows(&nodes
[n
])) {
1772 pr_err("hole after topdown insert %d, start=%llx\n, size=%u",
1773 n
, nodes
[n
].start
, size
);
1777 if (!assert_one_hole(&mm
, 0, size
*(count
- n
- 1)))
1781 if (!assert_continuous(&mm
, size
))
1784 drm_random_reorder(order
, count
, &prng
);
1785 for_each_prime_number_from(n
, 1, min(count
, max_prime
)) {
1786 for (m
= 0; m
< n
; m
++) {
1787 node
= &nodes
[order
[(o
+ m
) % count
]];
1788 drm_mm_remove_node(node
);
1789 __set_bit(node_index(node
), bitmap
);
1792 for (m
= 0; m
< n
; m
++) {
1795 node
= &nodes
[order
[(o
+ m
) % count
]];
1796 if (!expect_insert(&mm
, node
,
1799 pr_err("insert failed, step %d/%d\n", m
, n
);
1803 if (drm_mm_hole_follows(node
)) {
1804 pr_err("hole after topdown insert %d/%d, start=%llx\n",
1809 last
= find_last_bit(bitmap
, count
);
1810 if (node_index(node
) != last
) {
1811 pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
1812 m
, n
, size
, last
, node_index(node
));
1816 __clear_bit(last
, bitmap
);
1819 DRM_MM_BUG_ON(find_first_bit(bitmap
, count
) != count
);
1824 drm_mm_for_each_node_safe(node
, next
, &mm
)
1825 drm_mm_remove_node(node
);
1826 DRM_MM_BUG_ON(!drm_mm_clean(&mm
));
1832 drm_mm_for_each_node_safe(node
, next
, &mm
)
1833 drm_mm_remove_node(node
);
1834 drm_mm_takedown(&mm
);
1837 bitmap_free(bitmap
);
1844 static int igt_bottomup(void *ignored
)
1846 const struct insert_mode
*bottomup
= &insert_modes
[BOTTOMUP
];
1847 DRM_RND_STATE(prng
, random_seed
);
1848 const unsigned int count
= 8192;
1850 unsigned long *bitmap
;
1852 struct drm_mm_node
*nodes
, *node
, *next
;
1853 unsigned int *order
, n
, m
, o
= 0;
1856 /* Like igt_topdown, but instead of searching for the last hole,
1857 * we search for the first.
1861 nodes
= vzalloc(array_size(count
, sizeof(*nodes
)));
1865 bitmap
= bitmap_zalloc(count
, GFP_KERNEL
);
1869 order
= drm_random_order(count
, &prng
);
1874 for (size
= 1; size
<= 64; size
<<= 1) {
1875 drm_mm_init(&mm
, 0, size
*count
);
1876 for (n
= 0; n
< count
; n
++) {
1877 if (!expect_insert(&mm
, &nodes
[n
],
1880 pr_err("bottomup insert failed, size %u step %d\n", size
, n
);
1884 if (!assert_one_hole(&mm
, size
*(n
+ 1), size
*count
))
1888 if (!assert_continuous(&mm
, size
))
1891 drm_random_reorder(order
, count
, &prng
);
1892 for_each_prime_number_from(n
, 1, min(count
, max_prime
)) {
1893 for (m
= 0; m
< n
; m
++) {
1894 node
= &nodes
[order
[(o
+ m
) % count
]];
1895 drm_mm_remove_node(node
);
1896 __set_bit(node_index(node
), bitmap
);
1899 for (m
= 0; m
< n
; m
++) {
1902 node
= &nodes
[order
[(o
+ m
) % count
]];
1903 if (!expect_insert(&mm
, node
,
1906 pr_err("insert failed, step %d/%d\n", m
, n
);
1910 first
= find_first_bit(bitmap
, count
);
1911 if (node_index(node
) != first
) {
1912 pr_err("node %d/%d not inserted into bottom hole, expected %d, found %d\n",
1913 m
, n
, first
, node_index(node
));
1916 __clear_bit(first
, bitmap
);
1919 DRM_MM_BUG_ON(find_first_bit(bitmap
, count
) != count
);
1924 drm_mm_for_each_node_safe(node
, next
, &mm
)
1925 drm_mm_remove_node(node
);
1926 DRM_MM_BUG_ON(!drm_mm_clean(&mm
));
1932 drm_mm_for_each_node_safe(node
, next
, &mm
)
1933 drm_mm_remove_node(node
);
1934 drm_mm_takedown(&mm
);
1937 bitmap_free(bitmap
);
1944 static int __igt_once(unsigned int mode
)
1947 struct drm_mm_node rsvd_lo
, rsvd_hi
, node
;
1950 drm_mm_init(&mm
, 0, 7);
1952 memset(&rsvd_lo
, 0, sizeof(rsvd_lo
));
1955 err
= drm_mm_reserve_node(&mm
, &rsvd_lo
);
1957 pr_err("Could not reserve low node\n");
1961 memset(&rsvd_hi
, 0, sizeof(rsvd_hi
));
1964 err
= drm_mm_reserve_node(&mm
, &rsvd_hi
);
1966 pr_err("Could not reserve low node\n");
1970 if (!drm_mm_hole_follows(&rsvd_lo
) || !drm_mm_hole_follows(&rsvd_hi
)) {
1971 pr_err("Expected a hole after lo and high nodes!\n");
1976 memset(&node
, 0, sizeof(node
));
1977 err
= drm_mm_insert_node_generic(&mm
, &node
, 2, 0, 0, mode
);
1979 pr_err("Could not insert the node into the available hole!\n");
1984 drm_mm_remove_node(&node
);
1986 drm_mm_remove_node(&rsvd_hi
);
1988 drm_mm_remove_node(&rsvd_lo
);
1990 drm_mm_takedown(&mm
);
1994 static int igt_lowest(void *ignored
)
1996 return __igt_once(DRM_MM_INSERT_LOW
);
1999 static int igt_highest(void *ignored
)
2001 return __igt_once(DRM_MM_INSERT_HIGH
);
2004 static void separate_adjacent_colors(const struct drm_mm_node
*node
,
2005 unsigned long color
,
2009 if (drm_mm_node_allocated(node
) && node
->color
!= color
)
2012 node
= list_next_entry(node
, node_list
);
2013 if (drm_mm_node_allocated(node
) && node
->color
!= color
)
2017 static bool colors_abutt(const struct drm_mm_node
*node
)
2019 if (!drm_mm_hole_follows(node
) &&
2020 drm_mm_node_allocated(list_next_entry(node
, node_list
))) {
2021 pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
2022 node
->color
, node
->start
, node
->size
,
2023 list_next_entry(node
, node_list
)->color
,
2024 list_next_entry(node
, node_list
)->start
,
2025 list_next_entry(node
, node_list
)->size
);
2032 static int igt_color(void *ignored
)
2034 const unsigned int count
= min(4096u, max_iterations
);
2035 const struct insert_mode
*mode
;
2037 struct drm_mm_node
*node
, *nn
;
2039 int ret
= -EINVAL
, err
;
2041 /* Color adjustment complicates everything. First we just check
2042 * that when we insert a node we apply any color_adjustment callback.
2043 * The callback we use should ensure that there is a gap between
2044 * any two nodes, and so after each insertion we check that those
2045 * holes are inserted and that they are preserved.
2048 drm_mm_init(&mm
, 0, U64_MAX
);
2050 for (n
= 1; n
<= count
; n
++) {
2051 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
2057 if (!expect_insert(&mm
, node
,
2059 &insert_modes
[0])) {
2060 pr_err("insert failed, step %d\n", n
);
2066 drm_mm_for_each_node_safe(node
, nn
, &mm
) {
2067 if (node
->color
!= node
->size
) {
2068 pr_err("invalid color stored: expected %lld, found %ld\n",
2069 node
->size
, node
->color
);
2074 drm_mm_remove_node(node
);
2078 /* Now, let's start experimenting with applying a color callback */
2079 mm
.color_adjust
= separate_adjacent_colors
;
2080 for (mode
= insert_modes
; mode
->name
; mode
++) {
2083 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
2089 node
->size
= 1 + 2*count
;
2090 node
->color
= node
->size
;
2092 err
= drm_mm_reserve_node(&mm
, node
);
2094 pr_err("initial reserve failed!\n");
2099 last
= node
->start
+ node
->size
;
2101 for (n
= 1; n
<= count
; n
++) {
2104 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
2111 node
->size
= n
+ count
;
2112 node
->color
= node
->size
;
2114 err
= drm_mm_reserve_node(&mm
, node
);
2115 if (err
!= -ENOSPC
) {
2116 pr_err("reserve %d did not report color overlap! err=%d\n",
2121 node
->start
+= n
+ 1;
2122 rem
= misalignment(node
, n
+ count
);
2123 node
->start
+= n
+ count
- rem
;
2125 err
= drm_mm_reserve_node(&mm
, node
);
2127 pr_err("reserve %d failed, err=%d\n", n
, err
);
2132 last
= node
->start
+ node
->size
;
2135 for (n
= 1; n
<= count
; n
++) {
2136 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
2142 if (!expect_insert(&mm
, node
,
2145 pr_err("%s insert failed, step %d\n",
2152 drm_mm_for_each_node_safe(node
, nn
, &mm
) {
2155 if (node
->color
!= node
->size
) {
2156 pr_err("%s invalid color stored: expected %lld, found %ld\n",
2157 mode
->name
, node
->size
, node
->color
);
2162 if (colors_abutt(node
))
2165 div64_u64_rem(node
->start
, node
->size
, &rem
);
2167 pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
2168 mode
->name
, node
->start
, node
->size
, rem
);
2172 drm_mm_remove_node(node
);
2181 drm_mm_for_each_node_safe(node
, nn
, &mm
) {
2182 drm_mm_remove_node(node
);
2185 drm_mm_takedown(&mm
);
2189 static int evict_color(struct drm_mm
*mm
,
2190 u64 range_start
, u64 range_end
,
2191 struct evict_node
*nodes
,
2192 unsigned int *order
,
2195 unsigned int alignment
,
2196 unsigned long color
,
2197 const struct insert_mode
*mode
)
2199 struct drm_mm_scan scan
;
2200 LIST_HEAD(evict_list
);
2201 struct evict_node
*e
;
2202 struct drm_mm_node tmp
;
2205 drm_mm_scan_init_with_range(&scan
, mm
,
2206 size
, alignment
, color
,
2207 range_start
, range_end
,
2209 if (!evict_nodes(&scan
,
2210 nodes
, order
, count
, true,
2214 memset(&tmp
, 0, sizeof(tmp
));
2215 err
= drm_mm_insert_node_generic(mm
, &tmp
, size
, alignment
, color
,
2216 DRM_MM_INSERT_EVICT
);
2218 pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
2219 size
, alignment
, color
, err
);
2225 if (tmp
.start
< range_start
|| tmp
.start
+ tmp
.size
> range_end
) {
2226 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
2227 tmp
.start
, tmp
.size
, range_start
, range_end
);
2231 if (colors_abutt(&tmp
))
2234 if (!assert_node(&tmp
, mm
, size
, alignment
, color
)) {
2235 pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
2237 alignment
, misalignment(&tmp
, alignment
), tmp
.start
);
2241 drm_mm_remove_node(&tmp
);
2245 list_for_each_entry(e
, &evict_list
, link
) {
2246 err
= drm_mm_reserve_node(mm
, &e
->node
);
2248 pr_err("Failed to reinsert node after eviction: start=%llx\n",
2258 static int igt_color_evict(void *ignored
)
2260 DRM_RND_STATE(prng
, random_seed
);
2261 const unsigned int total_size
= min(8192u, max_iterations
);
2262 const struct insert_mode
*mode
;
2263 unsigned long color
= 0;
2265 struct evict_node
*nodes
;
2266 struct drm_mm_node
*node
, *next
;
2267 unsigned int *order
, n
;
2270 /* Check that the drm_mm_scan also honours color adjustment when
2271 * choosing its victims to create a hole. Our color_adjust does not
2272 * allow two nodes to be placed together without an intervening hole
2273 * enlarging the set of victims that must be evicted.
2277 nodes
= vzalloc(array_size(total_size
, sizeof(*nodes
)));
2281 order
= drm_random_order(total_size
, &prng
);
2286 drm_mm_init(&mm
, 0, 2*total_size
- 1);
2287 mm
.color_adjust
= separate_adjacent_colors
;
2288 for (n
= 0; n
< total_size
; n
++) {
2289 if (!expect_insert(&mm
, &nodes
[n
].node
,
2291 &insert_modes
[0])) {
2292 pr_err("insert failed, step %d\n", n
);
2297 for (mode
= evict_modes
; mode
->name
; mode
++) {
2298 for (n
= 1; n
<= total_size
; n
<<= 1) {
2299 drm_random_reorder(order
, total_size
, &prng
);
2300 err
= evict_color(&mm
, 0, U64_MAX
,
2301 nodes
, order
, total_size
,
2305 pr_err("%s evict_color(size=%u) failed\n",
2311 for (n
= 1; n
< total_size
; n
<<= 1) {
2312 drm_random_reorder(order
, total_size
, &prng
);
2313 err
= evict_color(&mm
, 0, U64_MAX
,
2314 nodes
, order
, total_size
,
2315 total_size
/2, n
, color
++,
2318 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2319 mode
->name
, total_size
/2, n
);
2324 for_each_prime_number_from(n
, 1, min(total_size
, max_prime
)) {
2325 unsigned int nsize
= (total_size
- n
+ 1) / 2;
2327 DRM_MM_BUG_ON(!nsize
);
2329 drm_random_reorder(order
, total_size
, &prng
);
2330 err
= evict_color(&mm
, 0, U64_MAX
,
2331 nodes
, order
, total_size
,
2335 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2336 mode
->name
, nsize
, n
);
2348 drm_mm_for_each_node_safe(node
, next
, &mm
)
2349 drm_mm_remove_node(node
);
2350 drm_mm_takedown(&mm
);
2358 static int igt_color_evict_range(void *ignored
)
2360 DRM_RND_STATE(prng
, random_seed
);
2361 const unsigned int total_size
= 8192;
2362 const unsigned int range_size
= total_size
/ 2;
2363 const unsigned int range_start
= total_size
/ 4;
2364 const unsigned int range_end
= range_start
+ range_size
;
2365 const struct insert_mode
*mode
;
2366 unsigned long color
= 0;
2368 struct evict_node
*nodes
;
2369 struct drm_mm_node
*node
, *next
;
2370 unsigned int *order
, n
;
2373 /* Like igt_color_evict(), but limited to small portion of the full
2378 nodes
= vzalloc(array_size(total_size
, sizeof(*nodes
)));
2382 order
= drm_random_order(total_size
, &prng
);
2387 drm_mm_init(&mm
, 0, 2*total_size
- 1);
2388 mm
.color_adjust
= separate_adjacent_colors
;
2389 for (n
= 0; n
< total_size
; n
++) {
2390 if (!expect_insert(&mm
, &nodes
[n
].node
,
2392 &insert_modes
[0])) {
2393 pr_err("insert failed, step %d\n", n
);
2398 for (mode
= evict_modes
; mode
->name
; mode
++) {
2399 for (n
= 1; n
<= range_size
; n
<<= 1) {
2400 drm_random_reorder(order
, range_size
, &prng
);
2401 err
= evict_color(&mm
, range_start
, range_end
,
2402 nodes
, order
, total_size
,
2406 pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n",
2407 mode
->name
, n
, range_start
, range_end
);
2412 for (n
= 1; n
< range_size
; n
<<= 1) {
2413 drm_random_reorder(order
, total_size
, &prng
);
2414 err
= evict_color(&mm
, range_start
, range_end
,
2415 nodes
, order
, total_size
,
2416 range_size
/2, n
, color
++,
2419 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2420 mode
->name
, total_size
/2, n
, range_start
, range_end
);
2425 for_each_prime_number_from(n
, 1, min(range_size
, max_prime
)) {
2426 unsigned int nsize
= (range_size
- n
+ 1) / 2;
2428 DRM_MM_BUG_ON(!nsize
);
2430 drm_random_reorder(order
, total_size
, &prng
);
2431 err
= evict_color(&mm
, range_start
, range_end
,
2432 nodes
, order
, total_size
,
2436 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2437 mode
->name
, nsize
, n
, range_start
, range_end
);
2449 drm_mm_for_each_node_safe(node
, next
, &mm
)
2450 drm_mm_remove_node(node
);
2451 drm_mm_takedown(&mm
);
2459 #include "drm_selftest.c"
2461 static int __init
test_drm_mm_init(void)
2465 while (!random_seed
)
2466 random_seed
= get_random_int();
2468 pr_info("Testing DRM range manager (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
2469 random_seed
, max_iterations
, max_prime
);
2470 err
= run_selftests(selftests
, ARRAY_SIZE(selftests
), NULL
);
2472 return err
> 0 ? 0 : err
;
2475 static void __exit
test_drm_mm_exit(void)
2479 module_init(test_drm_mm_init
);
2480 module_exit(test_drm_mm_exit
);
2482 module_param(random_seed
, uint
, 0400);
2483 module_param(max_iterations
, uint
, 0400);
2484 module_param(max_prime
, uint
, 0400);
2486 MODULE_AUTHOR("Intel Corporation");
2487 MODULE_LICENSE("GPL");