treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / selftests / i915_buddy.c
blob1b856bae67b531ce27f3aa305fc653e0bd4d147e
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
6 #include <linux/prime_numbers.h>
8 #include "../i915_selftest.h"
9 #include "i915_random.h"
11 #define SZ_8G (1ULL << 33)
13 static void __igt_dump_block(struct i915_buddy_mm *mm,
14 struct i915_buddy_block *block,
15 bool buddy)
17 pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n",
18 block->header,
19 i915_buddy_block_state(block),
20 i915_buddy_block_order(block),
21 i915_buddy_block_offset(block),
22 i915_buddy_block_size(mm, block),
23 yesno(!block->parent),
24 yesno(buddy));
27 static void igt_dump_block(struct i915_buddy_mm *mm,
28 struct i915_buddy_block *block)
30 struct i915_buddy_block *buddy;
32 __igt_dump_block(mm, block, false);
34 buddy = get_buddy(block);
35 if (buddy)
36 __igt_dump_block(mm, buddy, true);
39 static int igt_check_block(struct i915_buddy_mm *mm,
40 struct i915_buddy_block *block)
42 struct i915_buddy_block *buddy;
43 unsigned int block_state;
44 u64 block_size;
45 u64 offset;
46 int err = 0;
48 block_state = i915_buddy_block_state(block);
50 if (block_state != I915_BUDDY_ALLOCATED &&
51 block_state != I915_BUDDY_FREE &&
52 block_state != I915_BUDDY_SPLIT) {
53 pr_err("block state mismatch\n");
54 err = -EINVAL;
57 block_size = i915_buddy_block_size(mm, block);
58 offset = i915_buddy_block_offset(block);
60 if (block_size < mm->chunk_size) {
61 pr_err("block size smaller than min size\n");
62 err = -EINVAL;
65 if (!is_power_of_2(block_size)) {
66 pr_err("block size not power of two\n");
67 err = -EINVAL;
70 if (!IS_ALIGNED(block_size, mm->chunk_size)) {
71 pr_err("block size not aligned to min size\n");
72 err = -EINVAL;
75 if (!IS_ALIGNED(offset, mm->chunk_size)) {
76 pr_err("block offset not aligned to min size\n");
77 err = -EINVAL;
80 if (!IS_ALIGNED(offset, block_size)) {
81 pr_err("block offset not aligned to block size\n");
82 err = -EINVAL;
85 buddy = get_buddy(block);
87 if (!buddy && block->parent) {
88 pr_err("buddy has gone fishing\n");
89 err = -EINVAL;
92 if (buddy) {
93 if (i915_buddy_block_offset(buddy) != (offset ^ block_size)) {
94 pr_err("buddy has wrong offset\n");
95 err = -EINVAL;
98 if (i915_buddy_block_size(mm, buddy) != block_size) {
99 pr_err("buddy size mismatch\n");
100 err = -EINVAL;
103 if (i915_buddy_block_state(buddy) == block_state &&
104 block_state == I915_BUDDY_FREE) {
105 pr_err("block and its buddy are free\n");
106 err = -EINVAL;
110 return err;
113 static int igt_check_blocks(struct i915_buddy_mm *mm,
114 struct list_head *blocks,
115 u64 expected_size,
116 bool is_contiguous)
118 struct i915_buddy_block *block;
119 struct i915_buddy_block *prev;
120 u64 total;
121 int err = 0;
123 block = NULL;
124 prev = NULL;
125 total = 0;
127 list_for_each_entry(block, blocks, link) {
128 err = igt_check_block(mm, block);
130 if (!i915_buddy_block_is_allocated(block)) {
131 pr_err("block not allocated\n"),
132 err = -EINVAL;
135 if (is_contiguous && prev) {
136 u64 prev_block_size;
137 u64 prev_offset;
138 u64 offset;
140 prev_offset = i915_buddy_block_offset(prev);
141 prev_block_size = i915_buddy_block_size(mm, prev);
142 offset = i915_buddy_block_offset(block);
144 if (offset != (prev_offset + prev_block_size)) {
145 pr_err("block offset mismatch\n");
146 err = -EINVAL;
150 if (err)
151 break;
153 total += i915_buddy_block_size(mm, block);
154 prev = block;
157 if (!err) {
158 if (total != expected_size) {
159 pr_err("size mismatch, expected=%llx, found=%llx\n",
160 expected_size, total);
161 err = -EINVAL;
163 return err;
166 if (prev) {
167 pr_err("prev block, dump:\n");
168 igt_dump_block(mm, prev);
171 if (block) {
172 pr_err("bad block, dump:\n");
173 igt_dump_block(mm, block);
176 return err;
179 static int igt_check_mm(struct i915_buddy_mm *mm)
181 struct i915_buddy_block *root;
182 struct i915_buddy_block *prev;
183 unsigned int i;
184 u64 total;
185 int err = 0;
187 if (!mm->n_roots) {
188 pr_err("n_roots is zero\n");
189 return -EINVAL;
192 if (mm->n_roots != hweight64(mm->size)) {
193 pr_err("n_roots mismatch, n_roots=%u, expected=%lu\n",
194 mm->n_roots, hweight64(mm->size));
195 return -EINVAL;
198 root = NULL;
199 prev = NULL;
200 total = 0;
202 for (i = 0; i < mm->n_roots; ++i) {
203 struct i915_buddy_block *block;
204 unsigned int order;
206 root = mm->roots[i];
207 if (!root) {
208 pr_err("root(%u) is NULL\n", i);
209 err = -EINVAL;
210 break;
213 err = igt_check_block(mm, root);
215 if (!i915_buddy_block_is_free(root)) {
216 pr_err("root not free\n");
217 err = -EINVAL;
220 order = i915_buddy_block_order(root);
222 if (!i) {
223 if (order != mm->max_order) {
224 pr_err("max order root missing\n");
225 err = -EINVAL;
229 if (prev) {
230 u64 prev_block_size;
231 u64 prev_offset;
232 u64 offset;
234 prev_offset = i915_buddy_block_offset(prev);
235 prev_block_size = i915_buddy_block_size(mm, prev);
236 offset = i915_buddy_block_offset(root);
238 if (offset != (prev_offset + prev_block_size)) {
239 pr_err("root offset mismatch\n");
240 err = -EINVAL;
244 block = list_first_entry_or_null(&mm->free_list[order],
245 struct i915_buddy_block,
246 link);
247 if (block != root) {
248 pr_err("root mismatch at order=%u\n", order);
249 err = -EINVAL;
252 if (err)
253 break;
255 prev = root;
256 total += i915_buddy_block_size(mm, root);
259 if (!err) {
260 if (total != mm->size) {
261 pr_err("expected mm size=%llx, found=%llx\n", mm->size,
262 total);
263 err = -EINVAL;
265 return err;
268 if (prev) {
269 pr_err("prev root(%u), dump:\n", i - 1);
270 igt_dump_block(mm, prev);
273 if (root) {
274 pr_err("bad root(%u), dump:\n", i);
275 igt_dump_block(mm, root);
278 return err;
281 static void igt_mm_config(u64 *size, u64 *chunk_size)
283 I915_RND_STATE(prng);
284 u64 s, ms;
286 /* Nothing fancy, just try to get an interesting bit pattern */
288 prandom_seed_state(&prng, i915_selftest.random_seed);
290 s = i915_prandom_u64_state(&prng) & (SZ_8G - 1);
291 ms = BIT_ULL(12 + (prandom_u32_state(&prng) % ilog2(s >> 12)));
292 s = max(s & -ms, ms);
294 *chunk_size = ms;
295 *size = s;
298 static int igt_buddy_alloc_smoke(void *arg)
300 struct i915_buddy_mm mm;
301 int max_order;
302 u64 chunk_size;
303 u64 mm_size;
304 int err;
306 igt_mm_config(&mm_size, &chunk_size);
308 pr_info("buddy_init with size=%llx, chunk_size=%llx\n", mm_size, chunk_size);
310 err = i915_buddy_init(&mm, mm_size, chunk_size);
311 if (err) {
312 pr_err("buddy_init failed(%d)\n", err);
313 return err;
316 for (max_order = mm.max_order; max_order >= 0; max_order--) {
317 struct i915_buddy_block *block;
318 int order;
319 LIST_HEAD(blocks);
320 u64 total;
322 err = igt_check_mm(&mm);
323 if (err) {
324 pr_err("pre-mm check failed, abort\n");
325 break;
328 pr_info("filling from max_order=%u\n", max_order);
330 order = max_order;
331 total = 0;
333 do {
334 retry:
335 block = i915_buddy_alloc(&mm, order);
336 if (IS_ERR(block)) {
337 err = PTR_ERR(block);
338 if (err == -ENOMEM) {
339 pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
340 order);
341 } else {
342 if (order--) {
343 err = 0;
344 goto retry;
347 pr_err("buddy_alloc with order=%d failed(%d)\n",
348 order, err);
351 break;
354 list_add_tail(&block->link, &blocks);
356 if (i915_buddy_block_order(block) != order) {
357 pr_err("buddy_alloc order mismatch\n");
358 err = -EINVAL;
359 break;
362 total += i915_buddy_block_size(&mm, block);
363 } while (total < mm.size);
365 if (!err)
366 err = igt_check_blocks(&mm, &blocks, total, false);
368 i915_buddy_free_list(&mm, &blocks);
370 if (!err) {
371 err = igt_check_mm(&mm);
372 if (err)
373 pr_err("post-mm check failed\n");
376 if (err)
377 break;
379 cond_resched();
382 if (err == -ENOMEM)
383 err = 0;
385 i915_buddy_fini(&mm);
387 return err;
390 static int igt_buddy_alloc_pessimistic(void *arg)
392 const unsigned int max_order = 16;
393 struct i915_buddy_block *block, *bn;
394 struct i915_buddy_mm mm;
395 unsigned int order;
396 LIST_HEAD(blocks);
397 int err;
400 * Create a pot-sized mm, then allocate one of each possible
401 * order within. This should leave the mm with exactly one
402 * page left.
405 err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
406 if (err) {
407 pr_err("buddy_init failed(%d)\n", err);
408 return err;
410 GEM_BUG_ON(mm.max_order != max_order);
412 for (order = 0; order < max_order; order++) {
413 block = i915_buddy_alloc(&mm, order);
414 if (IS_ERR(block)) {
415 pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
416 order);
417 err = PTR_ERR(block);
418 goto err;
421 list_add_tail(&block->link, &blocks);
424 /* And now the last remaining block available */
425 block = i915_buddy_alloc(&mm, 0);
426 if (IS_ERR(block)) {
427 pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
428 err = PTR_ERR(block);
429 goto err;
431 list_add_tail(&block->link, &blocks);
433 /* Should be completely full! */
434 for (order = max_order; order--; ) {
435 block = i915_buddy_alloc(&mm, order);
436 if (!IS_ERR(block)) {
437 pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
438 order);
439 list_add_tail(&block->link, &blocks);
440 err = -EINVAL;
441 goto err;
445 block = list_last_entry(&blocks, typeof(*block), link);
446 list_del(&block->link);
447 i915_buddy_free(&mm, block);
449 /* As we free in increasing size, we make available larger blocks */
450 order = 1;
451 list_for_each_entry_safe(block, bn, &blocks, link) {
452 list_del(&block->link);
453 i915_buddy_free(&mm, block);
455 block = i915_buddy_alloc(&mm, order);
456 if (IS_ERR(block)) {
457 pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
458 order);
459 err = PTR_ERR(block);
460 goto err;
462 i915_buddy_free(&mm, block);
463 order++;
466 /* To confirm, now the whole mm should be available */
467 block = i915_buddy_alloc(&mm, max_order);
468 if (IS_ERR(block)) {
469 pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
470 max_order);
471 err = PTR_ERR(block);
472 goto err;
474 i915_buddy_free(&mm, block);
476 err:
477 i915_buddy_free_list(&mm, &blocks);
478 i915_buddy_fini(&mm);
479 return err;
482 static int igt_buddy_alloc_optimistic(void *arg)
484 const int max_order = 16;
485 struct i915_buddy_block *block;
486 struct i915_buddy_mm mm;
487 LIST_HEAD(blocks);
488 int order;
489 int err;
492 * Create a mm with one block of each order available, and
493 * try to allocate them all.
496 err = i915_buddy_init(&mm,
497 PAGE_SIZE * ((1 << (max_order + 1)) - 1),
498 PAGE_SIZE);
499 if (err) {
500 pr_err("buddy_init failed(%d)\n", err);
501 return err;
503 GEM_BUG_ON(mm.max_order != max_order);
505 for (order = 0; order <= max_order; order++) {
506 block = i915_buddy_alloc(&mm, order);
507 if (IS_ERR(block)) {
508 pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
509 order);
510 err = PTR_ERR(block);
511 goto err;
514 list_add_tail(&block->link, &blocks);
517 /* Should be completely full! */
518 block = i915_buddy_alloc(&mm, 0);
519 if (!IS_ERR(block)) {
520 pr_info("buddy_alloc unexpectedly succeeded, it should be full!");
521 list_add_tail(&block->link, &blocks);
522 err = -EINVAL;
523 goto err;
526 err:
527 i915_buddy_free_list(&mm, &blocks);
528 i915_buddy_fini(&mm);
529 return err;
532 static int igt_buddy_alloc_pathological(void *arg)
534 const int max_order = 16;
535 struct i915_buddy_block *block;
536 struct i915_buddy_mm mm;
537 LIST_HEAD(blocks);
538 LIST_HEAD(holes);
539 int order, top;
540 int err;
543 * Create a pot-sized mm, then allocate one of each possible
544 * order within. This should leave the mm with exactly one
545 * page left. Free the largest block, then whittle down again.
546 * Eventually we will have a fully 50% fragmented mm.
549 err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
550 if (err) {
551 pr_err("buddy_init failed(%d)\n", err);
552 return err;
554 GEM_BUG_ON(mm.max_order != max_order);
556 for (top = max_order; top; top--) {
557 /* Make room by freeing the largest allocated block */
558 block = list_first_entry_or_null(&blocks, typeof(*block), link);
559 if (block) {
560 list_del(&block->link);
561 i915_buddy_free(&mm, block);
564 for (order = top; order--; ) {
565 block = i915_buddy_alloc(&mm, order);
566 if (IS_ERR(block)) {
567 pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
568 order, top);
569 err = PTR_ERR(block);
570 goto err;
572 list_add_tail(&block->link, &blocks);
575 /* There should be one final page for this sub-allocation */
576 block = i915_buddy_alloc(&mm, 0);
577 if (IS_ERR(block)) {
578 pr_info("buddy_alloc hit -ENOMEM for hole\n");
579 err = PTR_ERR(block);
580 goto err;
582 list_add_tail(&block->link, &holes);
584 block = i915_buddy_alloc(&mm, top);
585 if (!IS_ERR(block)) {
586 pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
587 top, max_order);
588 list_add_tail(&block->link, &blocks);
589 err = -EINVAL;
590 goto err;
594 i915_buddy_free_list(&mm, &holes);
596 /* Nothing larger than blocks of chunk_size now available */
597 for (order = 1; order <= max_order; order++) {
598 block = i915_buddy_alloc(&mm, order);
599 if (!IS_ERR(block)) {
600 pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
601 order);
602 list_add_tail(&block->link, &blocks);
603 err = -EINVAL;
604 goto err;
608 err:
609 list_splice_tail(&holes, &blocks);
610 i915_buddy_free_list(&mm, &blocks);
611 i915_buddy_fini(&mm);
612 return err;
615 static int igt_buddy_alloc_range(void *arg)
617 struct i915_buddy_mm mm;
618 unsigned long page_num;
619 LIST_HEAD(blocks);
620 u64 chunk_size;
621 u64 offset;
622 u64 size;
623 u64 rem;
624 int err;
626 igt_mm_config(&size, &chunk_size);
628 pr_info("buddy_init with size=%llx, chunk_size=%llx\n", size, chunk_size);
630 err = i915_buddy_init(&mm, size, chunk_size);
631 if (err) {
632 pr_err("buddy_init failed(%d)\n", err);
633 return err;
636 err = igt_check_mm(&mm);
637 if (err) {
638 pr_err("pre-mm check failed, abort, abort, abort!\n");
639 goto err_fini;
642 rem = mm.size;
643 offset = 0;
645 for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
646 struct i915_buddy_block *block;
647 LIST_HEAD(tmp);
649 size = min(page_num * mm.chunk_size, rem);
651 err = i915_buddy_alloc_range(&mm, &tmp, offset, size);
652 if (err) {
653 if (err == -ENOMEM) {
654 pr_info("alloc_range hit -ENOMEM with size=%llx\n",
655 size);
656 } else {
657 pr_err("alloc_range with offset=%llx, size=%llx failed(%d)\n",
658 offset, size, err);
661 break;
664 block = list_first_entry_or_null(&tmp,
665 struct i915_buddy_block,
666 link);
667 if (!block) {
668 pr_err("alloc_range has no blocks\n");
669 err = -EINVAL;
670 break;
673 if (i915_buddy_block_offset(block) != offset) {
674 pr_err("alloc_range start offset mismatch, found=%llx, expected=%llx\n",
675 i915_buddy_block_offset(block), offset);
676 err = -EINVAL;
679 if (!err)
680 err = igt_check_blocks(&mm, &tmp, size, true);
682 list_splice_tail(&tmp, &blocks);
684 if (err)
685 break;
687 offset += size;
689 rem -= size;
690 if (!rem)
691 break;
693 cond_resched();
696 if (err == -ENOMEM)
697 err = 0;
699 i915_buddy_free_list(&mm, &blocks);
701 if (!err) {
702 err = igt_check_mm(&mm);
703 if (err)
704 pr_err("post-mm check failed\n");
707 err_fini:
708 i915_buddy_fini(&mm);
710 return err;
713 int i915_buddy_mock_selftests(void)
715 static const struct i915_subtest tests[] = {
716 SUBTEST(igt_buddy_alloc_pessimistic),
717 SUBTEST(igt_buddy_alloc_optimistic),
718 SUBTEST(igt_buddy_alloc_pathological),
719 SUBTEST(igt_buddy_alloc_smoke),
720 SUBTEST(igt_buddy_alloc_range),
723 return i915_subtests(tests, NULL);