Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cris-mirror.git] / drivers / gpu / drm / i915 / selftests / i915_gem_object.c
blobf32aa6bb79e294e004a989ab0a9d3c8fda2a2c25
1 /*
2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
25 #include "../i915_selftest.h"
27 #include "mock_gem_device.h"
28 #include "huge_gem_object.h"
30 static int igt_gem_object(void *arg)
32 struct drm_i915_private *i915 = arg;
33 struct drm_i915_gem_object *obj;
34 int err = -ENOMEM;
36 /* Basic test to ensure we can create an object */
38 obj = i915_gem_object_create(i915, PAGE_SIZE);
39 if (IS_ERR(obj)) {
40 err = PTR_ERR(obj);
41 pr_err("i915_gem_object_create failed, err=%d\n", err);
42 goto out;
45 err = 0;
46 i915_gem_object_put(obj);
47 out:
48 return err;
51 static int igt_phys_object(void *arg)
53 struct drm_i915_private *i915 = arg;
54 struct drm_i915_gem_object *obj;
55 int err;
57 /* Create an object and bind it to a contiguous set of physical pages,
58 * i.e. exercise the i915_gem_object_phys API.
61 obj = i915_gem_object_create(i915, PAGE_SIZE);
62 if (IS_ERR(obj)) {
63 err = PTR_ERR(obj);
64 pr_err("i915_gem_object_create failed, err=%d\n", err);
65 goto out;
68 mutex_lock(&i915->drm.struct_mutex);
69 err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
70 mutex_unlock(&i915->drm.struct_mutex);
71 if (err) {
72 pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
73 goto out_obj;
76 if (obj->ops != &i915_gem_phys_ops) {
77 pr_err("i915_gem_object_attach_phys did not create a phys object\n");
78 err = -EINVAL;
79 goto out_obj;
82 if (!atomic_read(&obj->mm.pages_pin_count)) {
83 pr_err("i915_gem_object_attach_phys did not pin its phys pages\n");
84 err = -EINVAL;
85 goto out_obj;
88 /* Make the object dirty so that put_pages must do copy back the data */
89 mutex_lock(&i915->drm.struct_mutex);
90 err = i915_gem_object_set_to_gtt_domain(obj, true);
91 mutex_unlock(&i915->drm.struct_mutex);
92 if (err) {
93 pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
94 err);
95 goto out_obj;
98 out_obj:
99 i915_gem_object_put(obj);
100 out:
101 return err;
104 static int igt_gem_huge(void *arg)
106 const unsigned int nreal = 509; /* just to be awkward */
107 struct drm_i915_private *i915 = arg;
108 struct drm_i915_gem_object *obj;
109 unsigned int n;
110 int err;
112 /* Basic sanitycheck of our huge fake object allocation */
114 obj = huge_gem_object(i915,
115 nreal * PAGE_SIZE,
116 i915->ggtt.base.total + PAGE_SIZE);
117 if (IS_ERR(obj))
118 return PTR_ERR(obj);
120 err = i915_gem_object_pin_pages(obj);
121 if (err) {
122 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
123 nreal, obj->base.size / PAGE_SIZE, err);
124 goto out;
127 for (n = 0; n < obj->base.size / PAGE_SIZE; n++) {
128 if (i915_gem_object_get_page(obj, n) !=
129 i915_gem_object_get_page(obj, n % nreal)) {
130 pr_err("Page lookup mismatch at index %u [%u]\n",
131 n, n % nreal);
132 err = -EINVAL;
133 goto out_unpin;
137 out_unpin:
138 i915_gem_object_unpin_pages(obj);
139 out:
140 i915_gem_object_put(obj);
141 return err;
144 struct tile {
145 unsigned int width;
146 unsigned int height;
147 unsigned int stride;
148 unsigned int size;
149 unsigned int tiling;
150 unsigned int swizzle;
153 static u64 swizzle_bit(unsigned int bit, u64 offset)
155 return (offset & BIT_ULL(bit)) >> (bit - 6);
158 static u64 tiled_offset(const struct tile *tile, u64 v)
160 u64 x, y;
162 if (tile->tiling == I915_TILING_NONE)
163 return v;
165 y = div64_u64_rem(v, tile->stride, &x);
166 v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
168 if (tile->tiling == I915_TILING_X) {
169 v += y * tile->width;
170 v += div64_u64_rem(x, tile->width, &x) << tile->size;
171 v += x;
172 } else {
173 const unsigned int ytile_span = 16;
174 const unsigned int ytile_height = 32 * ytile_span;
176 v += y * ytile_span;
177 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
178 v += x;
181 switch (tile->swizzle) {
182 case I915_BIT_6_SWIZZLE_9:
183 v ^= swizzle_bit(9, v);
184 break;
185 case I915_BIT_6_SWIZZLE_9_10:
186 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
187 break;
188 case I915_BIT_6_SWIZZLE_9_11:
189 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
190 break;
191 case I915_BIT_6_SWIZZLE_9_10_11:
192 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
193 break;
196 return v;
199 static int check_partial_mapping(struct drm_i915_gem_object *obj,
200 const struct tile *tile,
201 unsigned long end_time)
203 const unsigned int nreal = obj->scratch / PAGE_SIZE;
204 const unsigned long npages = obj->base.size / PAGE_SIZE;
205 struct i915_vma *vma;
206 unsigned long page;
207 int err;
209 if (igt_timeout(end_time,
210 "%s: timed out before tiling=%d stride=%d\n",
211 __func__, tile->tiling, tile->stride))
212 return -EINTR;
214 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
215 if (err)
216 return err;
218 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
219 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
221 for_each_prime_number_from(page, 1, npages) {
222 struct i915_ggtt_view view =
223 compute_partial_view(obj, page, MIN_CHUNK_PAGES);
224 u32 __iomem *io;
225 struct page *p;
226 unsigned int n;
227 u64 offset;
228 u32 *cpu;
230 GEM_BUG_ON(view.partial.size > nreal);
232 err = i915_gem_object_set_to_gtt_domain(obj, true);
233 if (err)
234 return err;
236 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
237 if (IS_ERR(vma)) {
238 pr_err("Failed to pin partial view: offset=%lu\n",
239 page);
240 return PTR_ERR(vma);
243 n = page - view.partial.offset;
244 GEM_BUG_ON(n >= view.partial.size);
246 io = i915_vma_pin_iomap(vma);
247 i915_vma_unpin(vma);
248 if (IS_ERR(io)) {
249 pr_err("Failed to iomap partial view: offset=%lu\n",
250 page);
251 return PTR_ERR(io);
254 iowrite32(page, io + n * PAGE_SIZE/sizeof(*io));
255 i915_vma_unpin_iomap(vma);
257 offset = tiled_offset(tile, page << PAGE_SHIFT);
258 if (offset >= obj->base.size)
259 continue;
261 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
263 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
264 cpu = kmap(p) + offset_in_page(offset);
265 drm_clflush_virt_range(cpu, sizeof(*cpu));
266 if (*cpu != (u32)page) {
267 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
268 page, n,
269 view.partial.offset,
270 view.partial.size,
271 vma->size >> PAGE_SHIFT,
272 tile_row_pages(obj),
273 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
274 offset >> PAGE_SHIFT,
275 (unsigned int)offset_in_page(offset),
276 offset,
277 (u32)page, *cpu);
278 err = -EINVAL;
280 *cpu = 0;
281 drm_clflush_virt_range(cpu, sizeof(*cpu));
282 kunmap(p);
283 if (err)
284 return err;
287 return 0;
290 static int igt_partial_tiling(void *arg)
292 const unsigned int nreal = 1 << 12; /* largest tile row x2 */
293 struct drm_i915_private *i915 = arg;
294 struct drm_i915_gem_object *obj;
295 int tiling;
296 int err;
298 /* We want to check the page mapping and fencing of a large object
299 * mmapped through the GTT. The object we create is larger than can
300 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
301 * We then check that a write through each partial GGTT vma ends up
302 * in the right set of pages within the object, and with the expected
303 * tiling, which we verify by manual swizzling.
306 obj = huge_gem_object(i915,
307 nreal << PAGE_SHIFT,
308 (1 + next_prime_number(i915->ggtt.base.total >> PAGE_SHIFT)) << PAGE_SHIFT);
309 if (IS_ERR(obj))
310 return PTR_ERR(obj);
312 err = i915_gem_object_pin_pages(obj);
313 if (err) {
314 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
315 nreal, obj->base.size / PAGE_SIZE, err);
316 goto out;
319 mutex_lock(&i915->drm.struct_mutex);
320 intel_runtime_pm_get(i915);
322 if (1) {
323 IGT_TIMEOUT(end);
324 struct tile tile;
326 tile.height = 1;
327 tile.width = 1;
328 tile.size = 0;
329 tile.stride = 0;
330 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
331 tile.tiling = I915_TILING_NONE;
333 err = check_partial_mapping(obj, &tile, end);
334 if (err && err != -EINTR)
335 goto out_unlock;
338 for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
339 IGT_TIMEOUT(end);
340 unsigned int max_pitch;
341 unsigned int pitch;
342 struct tile tile;
344 tile.tiling = tiling;
345 switch (tiling) {
346 case I915_TILING_X:
347 tile.swizzle = i915->mm.bit_6_swizzle_x;
348 break;
349 case I915_TILING_Y:
350 tile.swizzle = i915->mm.bit_6_swizzle_y;
351 break;
354 if (tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN ||
355 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
356 continue;
358 if (INTEL_GEN(i915) <= 2) {
359 tile.height = 16;
360 tile.width = 128;
361 tile.size = 11;
362 } else if (tile.tiling == I915_TILING_Y &&
363 HAS_128_BYTE_Y_TILING(i915)) {
364 tile.height = 32;
365 tile.width = 128;
366 tile.size = 12;
367 } else {
368 tile.height = 8;
369 tile.width = 512;
370 tile.size = 12;
373 if (INTEL_GEN(i915) < 4)
374 max_pitch = 8192 / tile.width;
375 else if (INTEL_GEN(i915) < 7)
376 max_pitch = 128 * I965_FENCE_MAX_PITCH_VAL / tile.width;
377 else
378 max_pitch = 128 * GEN7_FENCE_MAX_PITCH_VAL / tile.width;
380 for (pitch = max_pitch; pitch; pitch >>= 1) {
381 tile.stride = tile.width * pitch;
382 err = check_partial_mapping(obj, &tile, end);
383 if (err == -EINTR)
384 goto next_tiling;
385 if (err)
386 goto out_unlock;
388 if (pitch > 2 && INTEL_GEN(i915) >= 4) {
389 tile.stride = tile.width * (pitch - 1);
390 err = check_partial_mapping(obj, &tile, end);
391 if (err == -EINTR)
392 goto next_tiling;
393 if (err)
394 goto out_unlock;
397 if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
398 tile.stride = tile.width * (pitch + 1);
399 err = check_partial_mapping(obj, &tile, end);
400 if (err == -EINTR)
401 goto next_tiling;
402 if (err)
403 goto out_unlock;
407 if (INTEL_GEN(i915) >= 4) {
408 for_each_prime_number(pitch, max_pitch) {
409 tile.stride = tile.width * pitch;
410 err = check_partial_mapping(obj, &tile, end);
411 if (err == -EINTR)
412 goto next_tiling;
413 if (err)
414 goto out_unlock;
418 next_tiling: ;
421 out_unlock:
422 intel_runtime_pm_put(i915);
423 mutex_unlock(&i915->drm.struct_mutex);
424 i915_gem_object_unpin_pages(obj);
425 out:
426 i915_gem_object_put(obj);
427 return err;
430 static int make_obj_busy(struct drm_i915_gem_object *obj)
432 struct drm_i915_private *i915 = to_i915(obj->base.dev);
433 struct drm_i915_gem_request *rq;
434 struct i915_vma *vma;
435 int err;
437 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
438 if (IS_ERR(vma))
439 return PTR_ERR(vma);
441 err = i915_vma_pin(vma, 0, 0, PIN_USER);
442 if (err)
443 return err;
445 rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context);
446 if (IS_ERR(rq)) {
447 i915_vma_unpin(vma);
448 return PTR_ERR(rq);
451 i915_vma_move_to_active(vma, rq, 0);
452 i915_add_request(rq);
454 i915_gem_object_set_active_reference(obj);
455 i915_vma_unpin(vma);
456 return 0;
459 static bool assert_mmap_offset(struct drm_i915_private *i915,
460 unsigned long size,
461 int expected)
463 struct drm_i915_gem_object *obj;
464 int err;
466 obj = i915_gem_object_create_internal(i915, size);
467 if (IS_ERR(obj))
468 return PTR_ERR(obj);
470 err = i915_gem_object_create_mmap_offset(obj);
471 i915_gem_object_put(obj);
473 return err == expected;
476 static int igt_mmap_offset_exhaustion(void *arg)
478 struct drm_i915_private *i915 = arg;
479 struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
480 struct drm_i915_gem_object *obj;
481 struct drm_mm_node resv, *hole;
482 u64 hole_start, hole_end;
483 int loop, err;
485 /* Trim the device mmap space to only a page */
486 memset(&resv, 0, sizeof(resv));
487 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
488 resv.start = hole_start;
489 resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */
490 err = drm_mm_reserve_node(mm, &resv);
491 if (err) {
492 pr_err("Failed to trim VMA manager, err=%d\n", err);
493 return err;
495 break;
498 /* Just fits! */
499 if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
500 pr_err("Unable to insert object into single page hole\n");
501 err = -EINVAL;
502 goto out;
505 /* Too large */
506 if (!assert_mmap_offset(i915, 2*PAGE_SIZE, -ENOSPC)) {
507 pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
508 err = -EINVAL;
509 goto out;
512 /* Fill the hole, further allocation attempts should then fail */
513 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
514 if (IS_ERR(obj)) {
515 err = PTR_ERR(obj);
516 goto out;
519 err = i915_gem_object_create_mmap_offset(obj);
520 if (err) {
521 pr_err("Unable to insert object into reclaimed hole\n");
522 goto err_obj;
525 if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
526 pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
527 err = -EINVAL;
528 goto err_obj;
531 i915_gem_object_put(obj);
533 /* Now fill with busy dead objects that we expect to reap */
534 for (loop = 0; loop < 3; loop++) {
535 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
536 if (IS_ERR(obj)) {
537 err = PTR_ERR(obj);
538 goto out;
541 mutex_lock(&i915->drm.struct_mutex);
542 intel_runtime_pm_get(i915);
543 err = make_obj_busy(obj);
544 intel_runtime_pm_put(i915);
545 mutex_unlock(&i915->drm.struct_mutex);
546 if (err) {
547 pr_err("[loop %d] Failed to busy the object\n", loop);
548 goto err_obj;
551 GEM_BUG_ON(!i915_gem_object_is_active(obj));
552 err = i915_gem_object_create_mmap_offset(obj);
553 if (err) {
554 pr_err("[loop %d] i915_gem_object_create_mmap_offset failed with err=%d\n",
555 loop, err);
556 goto out;
560 out:
561 drm_mm_remove_node(&resv);
562 return err;
563 err_obj:
564 i915_gem_object_put(obj);
565 goto out;
568 int i915_gem_object_mock_selftests(void)
570 static const struct i915_subtest tests[] = {
571 SUBTEST(igt_gem_object),
572 SUBTEST(igt_phys_object),
574 struct drm_i915_private *i915;
575 int err;
577 i915 = mock_gem_device();
578 if (!i915)
579 return -ENOMEM;
581 err = i915_subtests(tests, i915);
583 drm_dev_unref(&i915->drm);
584 return err;
587 int i915_gem_object_live_selftests(struct drm_i915_private *i915)
589 static const struct i915_subtest tests[] = {
590 SUBTEST(igt_gem_huge),
591 SUBTEST(igt_partial_tiling),
592 SUBTEST(igt_mmap_offset_exhaustion),
595 return i915_subtests(tests, i915);