treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / selftests / intel_memory_region.c
blob3ef3620e0da5f65bc222a3b52161873a9d84c581
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
6 #include <linux/prime_numbers.h>
8 #include "../i915_selftest.h"
10 #include "mock_drm.h"
11 #include "mock_gem_device.h"
12 #include "mock_region.h"
14 #include "gem/i915_gem_context.h"
15 #include "gem/i915_gem_lmem.h"
16 #include "gem/i915_gem_region.h"
17 #include "gem/i915_gem_object_blt.h"
18 #include "gem/selftests/igt_gem_utils.h"
19 #include "gem/selftests/mock_context.h"
20 #include "gt/intel_engine_user.h"
21 #include "gt/intel_gt.h"
22 #include "selftests/igt_flush_test.h"
23 #include "selftests/i915_random.h"
25 static void close_objects(struct intel_memory_region *mem,
26 struct list_head *objects)
28 struct drm_i915_private *i915 = mem->i915;
29 struct drm_i915_gem_object *obj, *on;
31 list_for_each_entry_safe(obj, on, objects, st_link) {
32 if (i915_gem_object_has_pinned_pages(obj))
33 i915_gem_object_unpin_pages(obj);
34 /* No polluting the memory region between tests */
35 __i915_gem_object_put_pages(obj);
36 list_del(&obj->st_link);
37 i915_gem_object_put(obj);
40 cond_resched();
42 i915_gem_drain_freed_objects(i915);
45 static int igt_mock_fill(void *arg)
47 struct intel_memory_region *mem = arg;
48 resource_size_t total = resource_size(&mem->region);
49 resource_size_t page_size;
50 resource_size_t rem;
51 unsigned long max_pages;
52 unsigned long page_num;
53 LIST_HEAD(objects);
54 int err = 0;
56 page_size = mem->mm.chunk_size;
57 max_pages = div64_u64(total, page_size);
58 rem = total;
60 for_each_prime_number_from(page_num, 1, max_pages) {
61 resource_size_t size = page_num * page_size;
62 struct drm_i915_gem_object *obj;
64 obj = i915_gem_object_create_region(mem, size, 0);
65 if (IS_ERR(obj)) {
66 err = PTR_ERR(obj);
67 break;
70 err = i915_gem_object_pin_pages(obj);
71 if (err) {
72 i915_gem_object_put(obj);
73 break;
76 list_add(&obj->st_link, &objects);
77 rem -= size;
80 if (err == -ENOMEM)
81 err = 0;
82 if (err == -ENXIO) {
83 if (page_num * page_size <= rem) {
84 pr_err("%s failed, space still left in region\n",
85 __func__);
86 err = -EINVAL;
87 } else {
88 err = 0;
92 close_objects(mem, &objects);
94 return err;
97 static struct drm_i915_gem_object *
98 igt_object_create(struct intel_memory_region *mem,
99 struct list_head *objects,
100 u64 size,
101 unsigned int flags)
103 struct drm_i915_gem_object *obj;
104 int err;
106 obj = i915_gem_object_create_region(mem, size, flags);
107 if (IS_ERR(obj))
108 return obj;
110 err = i915_gem_object_pin_pages(obj);
111 if (err)
112 goto put;
114 list_add(&obj->st_link, objects);
115 return obj;
117 put:
118 i915_gem_object_put(obj);
119 return ERR_PTR(err);
122 static void igt_object_release(struct drm_i915_gem_object *obj)
124 i915_gem_object_unpin_pages(obj);
125 __i915_gem_object_put_pages(obj);
126 list_del(&obj->st_link);
127 i915_gem_object_put(obj);
130 static int igt_mock_contiguous(void *arg)
132 struct intel_memory_region *mem = arg;
133 struct drm_i915_gem_object *obj;
134 unsigned long n_objects;
135 LIST_HEAD(objects);
136 LIST_HEAD(holes);
137 I915_RND_STATE(prng);
138 resource_size_t total;
139 resource_size_t min;
140 u64 target;
141 int err = 0;
143 total = resource_size(&mem->region);
145 /* Min size */
146 obj = igt_object_create(mem, &objects, mem->mm.chunk_size,
147 I915_BO_ALLOC_CONTIGUOUS);
148 if (IS_ERR(obj))
149 return PTR_ERR(obj);
151 if (obj->mm.pages->nents != 1) {
152 pr_err("%s min object spans multiple sg entries\n", __func__);
153 err = -EINVAL;
154 goto err_close_objects;
157 igt_object_release(obj);
159 /* Max size */
160 obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
161 if (IS_ERR(obj))
162 return PTR_ERR(obj);
164 if (obj->mm.pages->nents != 1) {
165 pr_err("%s max object spans multiple sg entries\n", __func__);
166 err = -EINVAL;
167 goto err_close_objects;
170 igt_object_release(obj);
172 /* Internal fragmentation should not bleed into the object size */
173 target = i915_prandom_u64_state(&prng);
174 div64_u64_rem(target, total, &target);
175 target = round_up(target, PAGE_SIZE);
176 target = max_t(u64, PAGE_SIZE, target);
178 obj = igt_object_create(mem, &objects, target,
179 I915_BO_ALLOC_CONTIGUOUS);
180 if (IS_ERR(obj))
181 return PTR_ERR(obj);
183 if (obj->base.size != target) {
184 pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__,
185 obj->base.size, target);
186 err = -EINVAL;
187 goto err_close_objects;
190 if (obj->mm.pages->nents != 1) {
191 pr_err("%s object spans multiple sg entries\n", __func__);
192 err = -EINVAL;
193 goto err_close_objects;
196 igt_object_release(obj);
199 * Try to fragment the address space, such that half of it is free, but
200 * the max contiguous block size is SZ_64K.
203 target = SZ_64K;
204 n_objects = div64_u64(total, target);
206 while (n_objects--) {
207 struct list_head *list;
209 if (n_objects % 2)
210 list = &holes;
211 else
212 list = &objects;
214 obj = igt_object_create(mem, list, target,
215 I915_BO_ALLOC_CONTIGUOUS);
216 if (IS_ERR(obj)) {
217 err = PTR_ERR(obj);
218 goto err_close_objects;
222 close_objects(mem, &holes);
224 min = target;
225 target = total >> 1;
227 /* Make sure we can still allocate all the fragmented space */
228 obj = igt_object_create(mem, &objects, target, 0);
229 if (IS_ERR(obj)) {
230 err = PTR_ERR(obj);
231 goto err_close_objects;
234 igt_object_release(obj);
237 * Even though we have enough free space, we don't have a big enough
238 * contiguous block. Make sure that holds true.
241 do {
242 bool should_fail = target > min;
244 obj = igt_object_create(mem, &objects, target,
245 I915_BO_ALLOC_CONTIGUOUS);
246 if (should_fail != IS_ERR(obj)) {
247 pr_err("%s target allocation(%llx) mismatch\n",
248 __func__, target);
249 err = -EINVAL;
250 goto err_close_objects;
253 target >>= 1;
254 } while (target >= mem->mm.chunk_size);
256 err_close_objects:
257 list_splice_tail(&holes, &objects);
258 close_objects(mem, &objects);
259 return err;
262 static int igt_gpu_write_dw(struct intel_context *ce,
263 struct i915_vma *vma,
264 u32 dword,
265 u32 value)
267 return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
268 vma->size >> PAGE_SHIFT, value);
271 static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
273 unsigned long n = obj->base.size >> PAGE_SHIFT;
274 u32 *ptr;
275 int err;
277 err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
278 if (err)
279 return err;
281 ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
282 if (IS_ERR(ptr))
283 return PTR_ERR(ptr);
285 ptr += dword;
286 while (n--) {
287 if (*ptr != val) {
288 pr_err("base[%u]=%08x, val=%08x\n",
289 dword, *ptr, val);
290 err = -EINVAL;
291 break;
294 ptr += PAGE_SIZE / sizeof(*ptr);
297 i915_gem_object_unpin_map(obj);
298 return err;
301 static int igt_gpu_write(struct i915_gem_context *ctx,
302 struct drm_i915_gem_object *obj)
304 struct i915_gem_engines *engines;
305 struct i915_gem_engines_iter it;
306 struct i915_address_space *vm;
307 struct intel_context *ce;
308 I915_RND_STATE(prng);
309 IGT_TIMEOUT(end_time);
310 unsigned int count;
311 struct i915_vma *vma;
312 int *order;
313 int i, n;
314 int err = 0;
316 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
318 n = 0;
319 count = 0;
320 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
321 count++;
322 if (!intel_engine_can_store_dword(ce->engine))
323 continue;
325 vm = ce->vm;
326 n++;
328 i915_gem_context_unlock_engines(ctx);
329 if (!n)
330 return 0;
332 order = i915_random_order(count * count, &prng);
333 if (!order)
334 return -ENOMEM;
336 vma = i915_vma_instance(obj, vm, NULL);
337 if (IS_ERR(vma)) {
338 err = PTR_ERR(vma);
339 goto out_free;
342 err = i915_vma_pin(vma, 0, 0, PIN_USER);
343 if (err)
344 goto out_free;
346 i = 0;
347 engines = i915_gem_context_lock_engines(ctx);
348 do {
349 u32 rng = prandom_u32_state(&prng);
350 u32 dword = offset_in_page(rng) / 4;
352 ce = engines->engines[order[i] % engines->num_engines];
353 i = (i + 1) % (count * count);
354 if (!ce || !intel_engine_can_store_dword(ce->engine))
355 continue;
357 err = igt_gpu_write_dw(ce, vma, dword, rng);
358 if (err)
359 break;
361 err = igt_cpu_check(obj, dword, rng);
362 if (err)
363 break;
364 } while (!__igt_timeout(end_time, NULL));
365 i915_gem_context_unlock_engines(ctx);
367 out_free:
368 kfree(order);
370 if (err == -ENOMEM)
371 err = 0;
373 return err;
376 static int igt_lmem_create(void *arg)
378 struct drm_i915_private *i915 = arg;
379 struct drm_i915_gem_object *obj;
380 int err = 0;
382 obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
383 if (IS_ERR(obj))
384 return PTR_ERR(obj);
386 err = i915_gem_object_pin_pages(obj);
387 if (err)
388 goto out_put;
390 i915_gem_object_unpin_pages(obj);
391 out_put:
392 i915_gem_object_put(obj);
394 return err;
397 static int igt_lmem_write_gpu(void *arg)
399 struct drm_i915_private *i915 = arg;
400 struct drm_i915_gem_object *obj;
401 struct i915_gem_context *ctx;
402 struct file *file;
403 I915_RND_STATE(prng);
404 u32 sz;
405 int err;
407 file = mock_file(i915);
408 if (IS_ERR(file))
409 return PTR_ERR(file);
411 ctx = live_context(i915, file);
412 if (IS_ERR(ctx)) {
413 err = PTR_ERR(ctx);
414 goto out_file;
417 sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
419 obj = i915_gem_object_create_lmem(i915, sz, 0);
420 if (IS_ERR(obj)) {
421 err = PTR_ERR(obj);
422 goto out_file;
425 err = i915_gem_object_pin_pages(obj);
426 if (err)
427 goto out_put;
429 err = igt_gpu_write(ctx, obj);
430 if (err)
431 pr_err("igt_gpu_write failed(%d)\n", err);
433 i915_gem_object_unpin_pages(obj);
434 out_put:
435 i915_gem_object_put(obj);
436 out_file:
437 fput(file);
438 return err;
441 static struct intel_engine_cs *
442 random_engine_class(struct drm_i915_private *i915,
443 unsigned int class,
444 struct rnd_state *prng)
446 struct intel_engine_cs *engine;
447 unsigned int count;
449 count = 0;
450 for (engine = intel_engine_lookup_user(i915, class, 0);
451 engine && engine->uabi_class == class;
452 engine = rb_entry_safe(rb_next(&engine->uabi_node),
453 typeof(*engine), uabi_node))
454 count++;
456 count = i915_prandom_u32_max_state(count, prng);
457 return intel_engine_lookup_user(i915, class, count);
460 static int igt_lmem_write_cpu(void *arg)
462 struct drm_i915_private *i915 = arg;
463 struct drm_i915_gem_object *obj;
464 I915_RND_STATE(prng);
465 IGT_TIMEOUT(end_time);
466 u32 bytes[] = {
467 0, /* rng placeholder */
468 sizeof(u32),
469 sizeof(u64),
470 64, /* cl */
471 PAGE_SIZE,
472 PAGE_SIZE - sizeof(u32),
473 PAGE_SIZE - sizeof(u64),
474 PAGE_SIZE - 64,
476 struct intel_engine_cs *engine;
477 u32 *vaddr;
478 u32 sz;
479 u32 i;
480 int *order;
481 int count;
482 int err;
484 engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng);
485 if (!engine)
486 return 0;
488 pr_info("%s: using %s\n", __func__, engine->name);
490 sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
491 sz = max_t(u32, 2 * PAGE_SIZE, sz);
493 obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
494 if (IS_ERR(obj))
495 return PTR_ERR(obj);
497 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
498 if (IS_ERR(vaddr)) {
499 err = PTR_ERR(vaddr);
500 goto out_put;
503 /* Put the pages into a known state -- from the gpu for added fun */
504 intel_engine_pm_get(engine);
505 err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf);
506 intel_engine_pm_put(engine);
507 if (err)
508 goto out_unpin;
510 i915_gem_object_lock(obj);
511 err = i915_gem_object_set_to_wc_domain(obj, true);
512 i915_gem_object_unlock(obj);
513 if (err)
514 goto out_unpin;
516 count = ARRAY_SIZE(bytes);
517 order = i915_random_order(count * count, &prng);
518 if (!order) {
519 err = -ENOMEM;
520 goto out_unpin;
523 /* We want to throw in a random width/align */
524 bytes[0] = igt_random_offset(&prng, 0, PAGE_SIZE, sizeof(u32),
525 sizeof(u32));
527 i = 0;
528 do {
529 u32 offset;
530 u32 align;
531 u32 dword;
532 u32 size;
533 u32 val;
535 size = bytes[order[i] % count];
536 i = (i + 1) % (count * count);
538 align = bytes[order[i] % count];
539 i = (i + 1) % (count * count);
541 align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
543 offset = igt_random_offset(&prng, 0, obj->base.size,
544 size, align);
546 val = prandom_u32_state(&prng);
547 memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
548 size / sizeof(u32));
551 * Sample random dw -- don't waste precious time reading every
552 * single dw.
554 dword = igt_random_offset(&prng, offset,
555 offset + size,
556 sizeof(u32), sizeof(u32));
557 dword /= sizeof(u32);
558 if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
559 pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
560 __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
561 size, align, offset);
562 err = -EINVAL;
563 break;
565 } while (!__igt_timeout(end_time, NULL));
567 out_unpin:
568 i915_gem_object_unpin_map(obj);
569 out_put:
570 i915_gem_object_put(obj);
572 return err;
575 int intel_memory_region_mock_selftests(void)
577 static const struct i915_subtest tests[] = {
578 SUBTEST(igt_mock_fill),
579 SUBTEST(igt_mock_contiguous),
581 struct intel_memory_region *mem;
582 struct drm_i915_private *i915;
583 int err;
585 i915 = mock_gem_device();
586 if (!i915)
587 return -ENOMEM;
589 mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
590 if (IS_ERR(mem)) {
591 pr_err("failed to create memory region\n");
592 err = PTR_ERR(mem);
593 goto out_unref;
596 err = i915_subtests(tests, mem);
598 intel_memory_region_put(mem);
599 out_unref:
600 drm_dev_put(&i915->drm);
601 return err;
604 int intel_memory_region_live_selftests(struct drm_i915_private *i915)
606 static const struct i915_subtest tests[] = {
607 SUBTEST(igt_lmem_create),
608 SUBTEST(igt_lmem_write_cpu),
609 SUBTEST(igt_lmem_write_gpu),
612 if (!HAS_LMEM(i915)) {
613 pr_info("device lacks LMEM support, skipping\n");
614 return 0;
617 if (intel_gt_is_wedged(&i915->gt))
618 return 0;
620 return i915_live_subtests(tests, i915);