2 * SPDX-License-Identifier: MIT
4 * Copyright © 2016 Intel Corporation
7 #include <linux/prime_numbers.h>
9 #include "gt/intel_engine_pm.h"
10 #include "gt/intel_gt.h"
11 #include "gt/intel_gt_pm.h"
12 #include "gem/i915_gem_region.h"
13 #include "huge_gem_object.h"
14 #include "i915_selftest.h"
15 #include "selftests/i915_random.h"
16 #include "selftests/igt_flush_test.h"
17 #include "selftests/igt_mmap.h"
28 static u64
swizzle_bit(unsigned int bit
, u64 offset
)
30 return (offset
& BIT_ULL(bit
)) >> (bit
- 6);
33 static u64
tiled_offset(const struct tile
*tile
, u64 v
)
37 if (tile
->tiling
== I915_TILING_NONE
)
40 y
= div64_u64_rem(v
, tile
->stride
, &x
);
41 v
= div64_u64_rem(y
, tile
->height
, &y
) * tile
->stride
* tile
->height
;
43 if (tile
->tiling
== I915_TILING_X
) {
45 v
+= div64_u64_rem(x
, tile
->width
, &x
) << tile
->size
;
47 } else if (tile
->width
== 128) {
48 const unsigned int ytile_span
= 16;
49 const unsigned int ytile_height
= 512;
52 v
+= div64_u64_rem(x
, ytile_span
, &x
) * ytile_height
;
55 const unsigned int ytile_span
= 32;
56 const unsigned int ytile_height
= 256;
59 v
+= div64_u64_rem(x
, ytile_span
, &x
) * ytile_height
;
63 switch (tile
->swizzle
) {
64 case I915_BIT_6_SWIZZLE_9
:
65 v
^= swizzle_bit(9, v
);
67 case I915_BIT_6_SWIZZLE_9_10
:
68 v
^= swizzle_bit(9, v
) ^ swizzle_bit(10, v
);
70 case I915_BIT_6_SWIZZLE_9_11
:
71 v
^= swizzle_bit(9, v
) ^ swizzle_bit(11, v
);
73 case I915_BIT_6_SWIZZLE_9_10_11
:
74 v
^= swizzle_bit(9, v
) ^ swizzle_bit(10, v
) ^ swizzle_bit(11, v
);
81 static int check_partial_mapping(struct drm_i915_gem_object
*obj
,
82 const struct tile
*tile
,
83 struct rnd_state
*prng
)
85 const unsigned long npages
= obj
->base
.size
/ PAGE_SIZE
;
86 struct i915_ggtt_view view
;
96 err
= i915_gem_object_set_tiling(obj
, tile
->tiling
, tile
->stride
);
98 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
99 tile
->tiling
, tile
->stride
, err
);
103 GEM_BUG_ON(i915_gem_object_get_tiling(obj
) != tile
->tiling
);
104 GEM_BUG_ON(i915_gem_object_get_stride(obj
) != tile
->stride
);
106 i915_gem_object_lock(obj
, NULL
);
107 err
= i915_gem_object_set_to_gtt_domain(obj
, true);
108 i915_gem_object_unlock(obj
);
110 pr_err("Failed to flush to GTT write domain; err=%d\n", err
);
114 page
= i915_prandom_u32_max_state(npages
, prng
);
115 view
= compute_partial_view(obj
, page
, MIN_CHUNK_PAGES
);
117 vma
= i915_gem_object_ggtt_pin(obj
, &view
, 0, 0, PIN_MAPPABLE
);
119 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
120 page
, (int)PTR_ERR(vma
));
124 n
= page
- view
.partial
.offset
;
125 GEM_BUG_ON(n
>= view
.partial
.size
);
127 io
= i915_vma_pin_iomap(vma
);
130 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
131 page
, (int)PTR_ERR(io
));
136 iowrite32(page
, io
+ n
* PAGE_SIZE
/ sizeof(*io
));
137 i915_vma_unpin_iomap(vma
);
139 offset
= tiled_offset(tile
, page
<< PAGE_SHIFT
);
140 if (offset
>= obj
->base
.size
)
143 intel_gt_flush_ggtt_writes(&to_i915(obj
->base
.dev
)->gt
);
145 p
= i915_gem_object_get_page(obj
, offset
>> PAGE_SHIFT
);
146 cpu
= kmap(p
) + offset_in_page(offset
);
147 drm_clflush_virt_range(cpu
, sizeof(*cpu
));
148 if (*cpu
!= (u32
)page
) {
149 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
153 vma
->size
>> PAGE_SHIFT
,
154 tile
->tiling
? tile_row_pages(obj
) : 0,
155 vma
->fence
? vma
->fence
->id
: -1, tile
->tiling
, tile
->stride
,
156 offset
>> PAGE_SHIFT
,
157 (unsigned int)offset_in_page(offset
),
163 drm_clflush_virt_range(cpu
, sizeof(*cpu
));
171 static int check_partial_mappings(struct drm_i915_gem_object
*obj
,
172 const struct tile
*tile
,
173 unsigned long end_time
)
175 const unsigned int nreal
= obj
->scratch
/ PAGE_SIZE
;
176 const unsigned long npages
= obj
->base
.size
/ PAGE_SIZE
;
177 struct i915_vma
*vma
;
181 err
= i915_gem_object_set_tiling(obj
, tile
->tiling
, tile
->stride
);
183 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
184 tile
->tiling
, tile
->stride
, err
);
188 GEM_BUG_ON(i915_gem_object_get_tiling(obj
) != tile
->tiling
);
189 GEM_BUG_ON(i915_gem_object_get_stride(obj
) != tile
->stride
);
191 i915_gem_object_lock(obj
, NULL
);
192 err
= i915_gem_object_set_to_gtt_domain(obj
, true);
193 i915_gem_object_unlock(obj
);
195 pr_err("Failed to flush to GTT write domain; err=%d\n", err
);
199 for_each_prime_number_from(page
, 1, npages
) {
200 struct i915_ggtt_view view
=
201 compute_partial_view(obj
, page
, MIN_CHUNK_PAGES
);
208 GEM_BUG_ON(view
.partial
.size
> nreal
);
211 vma
= i915_gem_object_ggtt_pin(obj
, &view
, 0, 0, PIN_MAPPABLE
);
213 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
214 page
, (int)PTR_ERR(vma
));
218 n
= page
- view
.partial
.offset
;
219 GEM_BUG_ON(n
>= view
.partial
.size
);
221 io
= i915_vma_pin_iomap(vma
);
224 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
225 page
, (int)PTR_ERR(io
));
229 iowrite32(page
, io
+ n
* PAGE_SIZE
/ sizeof(*io
));
230 i915_vma_unpin_iomap(vma
);
232 offset
= tiled_offset(tile
, page
<< PAGE_SHIFT
);
233 if (offset
>= obj
->base
.size
)
236 intel_gt_flush_ggtt_writes(&to_i915(obj
->base
.dev
)->gt
);
238 p
= i915_gem_object_get_page(obj
, offset
>> PAGE_SHIFT
);
239 cpu
= kmap(p
) + offset_in_page(offset
);
240 drm_clflush_virt_range(cpu
, sizeof(*cpu
));
241 if (*cpu
!= (u32
)page
) {
242 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
246 vma
->size
>> PAGE_SHIFT
,
247 tile
->tiling
? tile_row_pages(obj
) : 0,
248 vma
->fence
? vma
->fence
->id
: -1, tile
->tiling
, tile
->stride
,
249 offset
>> PAGE_SHIFT
,
250 (unsigned int)offset_in_page(offset
),
256 drm_clflush_virt_range(cpu
, sizeof(*cpu
));
263 if (igt_timeout(end_time
,
264 "%s: timed out after tiling=%d stride=%d\n",
265 __func__
, tile
->tiling
, tile
->stride
))
273 setup_tile_size(struct tile
*tile
, struct drm_i915_private
*i915
)
275 if (INTEL_GEN(i915
) <= 2) {
279 } else if (tile
->tiling
== I915_TILING_Y
&&
280 HAS_128_BYTE_Y_TILING(i915
)) {
290 if (INTEL_GEN(i915
) < 4)
291 return 8192 / tile
->width
;
292 else if (INTEL_GEN(i915
) < 7)
293 return 128 * I965_FENCE_MAX_PITCH_VAL
/ tile
->width
;
295 return 128 * GEN7_FENCE_MAX_PITCH_VAL
/ tile
->width
;
298 static int igt_partial_tiling(void *arg
)
300 const unsigned int nreal
= 1 << 12; /* largest tile row x2 */
301 struct drm_i915_private
*i915
= arg
;
302 struct drm_i915_gem_object
*obj
;
303 intel_wakeref_t wakeref
;
307 if (!i915_ggtt_has_aperture(&i915
->ggtt
))
310 /* We want to check the page mapping and fencing of a large object
311 * mmapped through the GTT. The object we create is larger than can
312 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
313 * We then check that a write through each partial GGTT vma ends up
314 * in the right set of pages within the object, and with the expected
315 * tiling, which we verify by manual swizzling.
318 obj
= huge_gem_object(i915
,
320 (1 + next_prime_number(i915
->ggtt
.vm
.total
>> PAGE_SHIFT
)) << PAGE_SHIFT
);
324 err
= i915_gem_object_pin_pages(obj
);
326 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
327 nreal
, obj
->base
.size
/ PAGE_SIZE
, err
);
331 wakeref
= intel_runtime_pm_get(&i915
->runtime_pm
);
341 tile
.swizzle
= I915_BIT_6_SWIZZLE_NONE
;
342 tile
.tiling
= I915_TILING_NONE
;
344 err
= check_partial_mappings(obj
, &tile
, end
);
345 if (err
&& err
!= -EINTR
)
349 for (tiling
= I915_TILING_X
; tiling
<= I915_TILING_Y
; tiling
++) {
351 unsigned int max_pitch
;
355 if (i915
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
)
357 * The swizzling pattern is actually unknown as it
358 * varies based on physical address of each page.
359 * See i915_gem_detect_bit_6_swizzle().
363 tile
.tiling
= tiling
;
366 tile
.swizzle
= i915
->ggtt
.bit_6_swizzle_x
;
369 tile
.swizzle
= i915
->ggtt
.bit_6_swizzle_y
;
373 GEM_BUG_ON(tile
.swizzle
== I915_BIT_6_SWIZZLE_UNKNOWN
);
374 if (tile
.swizzle
== I915_BIT_6_SWIZZLE_9_17
||
375 tile
.swizzle
== I915_BIT_6_SWIZZLE_9_10_17
)
378 max_pitch
= setup_tile_size(&tile
, i915
);
380 for (pitch
= max_pitch
; pitch
; pitch
>>= 1) {
381 tile
.stride
= tile
.width
* pitch
;
382 err
= check_partial_mappings(obj
, &tile
, end
);
388 if (pitch
> 2 && INTEL_GEN(i915
) >= 4) {
389 tile
.stride
= tile
.width
* (pitch
- 1);
390 err
= check_partial_mappings(obj
, &tile
, end
);
397 if (pitch
< max_pitch
&& INTEL_GEN(i915
) >= 4) {
398 tile
.stride
= tile
.width
* (pitch
+ 1);
399 err
= check_partial_mappings(obj
, &tile
, end
);
407 if (INTEL_GEN(i915
) >= 4) {
408 for_each_prime_number(pitch
, max_pitch
) {
409 tile
.stride
= tile
.width
* pitch
;
410 err
= check_partial_mappings(obj
, &tile
, end
);
422 intel_runtime_pm_put(&i915
->runtime_pm
, wakeref
);
423 i915_gem_object_unpin_pages(obj
);
425 i915_gem_object_put(obj
);
429 static int igt_smoke_tiling(void *arg
)
431 const unsigned int nreal
= 1 << 12; /* largest tile row x2 */
432 struct drm_i915_private
*i915
= arg
;
433 struct drm_i915_gem_object
*obj
;
434 intel_wakeref_t wakeref
;
435 I915_RND_STATE(prng
);
440 if (!i915_ggtt_has_aperture(&i915
->ggtt
))
444 * igt_partial_tiling() does an exhastive check of partial tiling
445 * chunking, but will undoubtably run out of time. Here, we do a
446 * randomised search and hope over many runs of 1s with different
447 * seeds we will do a thorough check.
449 * Remember to look at the st_seed if we see a flip-flop in BAT!
452 if (i915
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
)
455 obj
= huge_gem_object(i915
,
457 (1 + next_prime_number(i915
->ggtt
.vm
.total
>> PAGE_SHIFT
)) << PAGE_SHIFT
);
461 err
= i915_gem_object_pin_pages(obj
);
463 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
464 nreal
, obj
->base
.size
/ PAGE_SIZE
, err
);
468 wakeref
= intel_runtime_pm_get(&i915
->runtime_pm
);
475 i915_prandom_u32_max_state(I915_TILING_Y
+ 1, &prng
);
476 switch (tile
.tiling
) {
477 case I915_TILING_NONE
:
482 tile
.swizzle
= I915_BIT_6_SWIZZLE_NONE
;
486 tile
.swizzle
= i915
->ggtt
.bit_6_swizzle_x
;
489 tile
.swizzle
= i915
->ggtt
.bit_6_swizzle_y
;
493 if (tile
.swizzle
== I915_BIT_6_SWIZZLE_9_17
||
494 tile
.swizzle
== I915_BIT_6_SWIZZLE_9_10_17
)
497 if (tile
.tiling
!= I915_TILING_NONE
) {
498 unsigned int max_pitch
= setup_tile_size(&tile
, i915
);
501 i915_prandom_u32_max_state(max_pitch
, &prng
);
502 tile
.stride
= (1 + tile
.stride
) * tile
.width
;
503 if (INTEL_GEN(i915
) < 4)
504 tile
.stride
= rounddown_pow_of_two(tile
.stride
);
507 err
= check_partial_mapping(obj
, &tile
, &prng
);
512 } while (!__igt_timeout(end
, NULL
));
514 pr_info("%s: Completed %lu trials\n", __func__
, count
);
516 intel_runtime_pm_put(&i915
->runtime_pm
, wakeref
);
517 i915_gem_object_unpin_pages(obj
);
519 i915_gem_object_put(obj
);
523 static int make_obj_busy(struct drm_i915_gem_object
*obj
)
525 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
526 struct intel_engine_cs
*engine
;
528 for_each_uabi_engine(engine
, i915
) {
529 struct i915_request
*rq
;
530 struct i915_vma
*vma
;
531 struct i915_gem_ww_ctx ww
;
534 vma
= i915_vma_instance(obj
, &engine
->gt
->ggtt
->vm
, NULL
);
538 i915_gem_ww_ctx_init(&ww
, false);
540 err
= i915_gem_object_lock(obj
, &ww
);
542 err
= i915_vma_pin_ww(vma
, &ww
, 0, 0, PIN_USER
);
546 rq
= intel_engine_create_kernel_request(engine
);
552 err
= i915_request_await_object(rq
, vma
->obj
, true);
554 err
= i915_vma_move_to_active(vma
, rq
,
557 i915_request_add(rq
);
561 if (err
== -EDEADLK
) {
562 err
= i915_gem_ww_ctx_backoff(&ww
);
566 i915_gem_ww_ctx_fini(&ww
);
571 i915_gem_object_put(obj
); /* leave it only alive via its active ref */
575 static bool assert_mmap_offset(struct drm_i915_private
*i915
,
579 struct drm_i915_gem_object
*obj
;
580 struct i915_mmap_offset
*mmo
;
582 obj
= i915_gem_object_create_internal(i915
, size
);
586 mmo
= mmap_offset_attach(obj
, I915_MMAP_OFFSET_GTT
, NULL
);
587 i915_gem_object_put(obj
);
589 return PTR_ERR_OR_ZERO(mmo
) == expected
;
592 static void disable_retire_worker(struct drm_i915_private
*i915
)
594 i915_gem_driver_unregister__shrinker(i915
);
595 intel_gt_pm_get(&i915
->gt
);
596 cancel_delayed_work_sync(&i915
->gt
.requests
.retire_work
);
599 static void restore_retire_worker(struct drm_i915_private
*i915
)
601 igt_flush_test(i915
);
602 intel_gt_pm_put(&i915
->gt
);
603 i915_gem_driver_register__shrinker(i915
);
606 static void mmap_offset_lock(struct drm_i915_private
*i915
)
607 __acquires(&i915
->drm
.vma_offset_manager
->vm_lock
)
609 write_lock(&i915
->drm
.vma_offset_manager
->vm_lock
);
612 static void mmap_offset_unlock(struct drm_i915_private
*i915
)
613 __releases(&i915
->drm
.vma_offset_manager
->vm_lock
)
615 write_unlock(&i915
->drm
.vma_offset_manager
->vm_lock
);
618 static int igt_mmap_offset_exhaustion(void *arg
)
620 struct drm_i915_private
*i915
= arg
;
621 struct drm_mm
*mm
= &i915
->drm
.vma_offset_manager
->vm_addr_space_mm
;
622 struct drm_i915_gem_object
*obj
;
623 struct drm_mm_node
*hole
, *next
;
624 struct i915_mmap_offset
*mmo
;
627 /* Disable background reaper */
628 disable_retire_worker(i915
);
629 GEM_BUG_ON(!i915
->gt
.awake
);
630 intel_gt_retire_requests(&i915
->gt
);
631 i915_gem_drain_freed_objects(i915
);
633 /* Trim the device mmap space to only a page */
634 mmap_offset_lock(i915
);
635 loop
= 1; /* PAGE_SIZE units */
636 list_for_each_entry_safe(hole
, next
, &mm
->hole_stack
, hole_stack
) {
637 struct drm_mm_node
*resv
;
639 resv
= kzalloc(sizeof(*resv
), GFP_NOWAIT
);
645 resv
->start
= drm_mm_hole_node_start(hole
) + loop
;
646 resv
->size
= hole
->hole_size
- loop
;
655 pr_debug("Reserving hole [%llx + %llx]\n",
656 resv
->start
, resv
->size
);
658 err
= drm_mm_reserve_node(mm
, resv
);
660 pr_err("Failed to trim VMA manager, err=%d\n", err
);
665 GEM_BUG_ON(!list_is_singular(&mm
->hole_stack
));
666 mmap_offset_unlock(i915
);
669 if (!assert_mmap_offset(i915
, PAGE_SIZE
, 0)) {
670 pr_err("Unable to insert object into single page hole\n");
676 if (!assert_mmap_offset(i915
, 2 * PAGE_SIZE
, -ENOSPC
)) {
677 pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
682 /* Fill the hole, further allocation attempts should then fail */
683 obj
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
689 mmo
= mmap_offset_attach(obj
, I915_MMAP_OFFSET_GTT
, NULL
);
691 pr_err("Unable to insert object into reclaimed hole\n");
696 if (!assert_mmap_offset(i915
, PAGE_SIZE
, -ENOSPC
)) {
697 pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
702 i915_gem_object_put(obj
);
704 /* Now fill with busy dead objects that we expect to reap */
705 for (loop
= 0; loop
< 3; loop
++) {
706 if (intel_gt_is_wedged(&i915
->gt
))
709 obj
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
715 err
= make_obj_busy(obj
);
717 pr_err("[loop %d] Failed to busy the object\n", loop
);
723 mmap_offset_lock(i915
);
725 drm_mm_for_each_node_safe(hole
, next
, mm
) {
726 if (hole
->color
!= -1ul)
729 drm_mm_remove_node(hole
);
732 mmap_offset_unlock(i915
);
733 restore_retire_worker(i915
);
736 i915_gem_object_put(obj
);
740 static int gtt_set(struct drm_i915_gem_object
*obj
)
742 struct i915_vma
*vma
;
746 vma
= i915_gem_object_ggtt_pin(obj
, NULL
, 0, 0, PIN_MAPPABLE
);
750 intel_gt_pm_get(vma
->vm
->gt
);
751 map
= i915_vma_pin_iomap(vma
);
758 memset_io(map
, POISON_INUSE
, obj
->base
.size
);
759 i915_vma_unpin_iomap(vma
);
762 intel_gt_pm_put(vma
->vm
->gt
);
766 static int gtt_check(struct drm_i915_gem_object
*obj
)
768 struct i915_vma
*vma
;
772 vma
= i915_gem_object_ggtt_pin(obj
, NULL
, 0, 0, PIN_MAPPABLE
);
776 intel_gt_pm_get(vma
->vm
->gt
);
777 map
= i915_vma_pin_iomap(vma
);
784 if (memchr_inv((void __force
*)map
, POISON_FREE
, obj
->base
.size
)) {
785 pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
786 obj
->mm
.region
->name
);
789 i915_vma_unpin_iomap(vma
);
792 intel_gt_pm_put(vma
->vm
->gt
);
796 static int wc_set(struct drm_i915_gem_object
*obj
)
800 vaddr
= i915_gem_object_pin_map(obj
, I915_MAP_WC
);
802 return PTR_ERR(vaddr
);
804 memset(vaddr
, POISON_INUSE
, obj
->base
.size
);
805 i915_gem_object_flush_map(obj
);
806 i915_gem_object_unpin_map(obj
);
811 static int wc_check(struct drm_i915_gem_object
*obj
)
816 vaddr
= i915_gem_object_pin_map(obj
, I915_MAP_WC
);
818 return PTR_ERR(vaddr
);
820 if (memchr_inv(vaddr
, POISON_FREE
, obj
->base
.size
)) {
821 pr_err("%s: Write via mmap did not land in backing store (WC)\n",
822 obj
->mm
.region
->name
);
825 i915_gem_object_unpin_map(obj
);
830 static bool can_mmap(struct drm_i915_gem_object
*obj
, enum i915_mmap_type type
)
832 if (type
== I915_MMAP_TYPE_GTT
&&
833 !i915_ggtt_has_aperture(&to_i915(obj
->base
.dev
)->ggtt
))
836 if (type
!= I915_MMAP_TYPE_GTT
&&
837 !i915_gem_object_type_has(obj
,
838 I915_GEM_OBJECT_HAS_STRUCT_PAGE
|
839 I915_GEM_OBJECT_HAS_IOMEM
))
845 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
846 static int __igt_mmap(struct drm_i915_private
*i915
,
847 struct drm_i915_gem_object
*obj
,
848 enum i915_mmap_type type
)
850 struct i915_mmap_offset
*mmo
;
851 struct vm_area_struct
*area
;
855 if (!can_mmap(obj
, type
))
864 mmo
= mmap_offset_attach(obj
, type
, NULL
);
868 addr
= igt_mmap_node(i915
, &mmo
->vma_node
, 0, PROT_WRITE
, MAP_SHARED
);
869 if (IS_ERR_VALUE(addr
))
872 pr_debug("igt_mmap(%s, %d) @ %lx\n", obj
->mm
.region
->name
, type
, addr
);
874 area
= find_vma(current
->mm
, addr
);
876 pr_err("%s: Did not create a vm_area_struct for the mmap\n",
877 obj
->mm
.region
->name
);
882 if (area
->vm_private_data
!= mmo
) {
883 pr_err("%s: vm_area_struct did not point back to our mmap_offset object!\n",
884 obj
->mm
.region
->name
);
889 for (i
= 0; i
< obj
->base
.size
/ sizeof(u32
); i
++) {
890 u32 __user
*ux
= u64_to_user_ptr((u64
)(addr
+ i
* sizeof(*ux
)));
893 if (get_user(x
, ux
)) {
894 pr_err("%s: Unable to read from mmap, offset:%zd\n",
895 obj
->mm
.region
->name
, i
* sizeof(x
));
900 if (x
!= expand32(POISON_INUSE
)) {
901 pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
902 obj
->mm
.region
->name
,
903 i
* sizeof(x
), x
, expand32(POISON_INUSE
));
908 x
= expand32(POISON_FREE
);
909 if (put_user(x
, ux
)) {
910 pr_err("%s: Unable to write to mmap, offset:%zd\n",
911 obj
->mm
.region
->name
, i
* sizeof(x
));
917 if (type
== I915_MMAP_TYPE_GTT
)
918 intel_gt_flush_ggtt_writes(&i915
->gt
);
922 err
= gtt_check(obj
);
924 vm_munmap(addr
, obj
->base
.size
);
928 static int igt_mmap(void *arg
)
930 struct drm_i915_private
*i915
= arg
;
931 struct intel_memory_region
*mr
;
932 enum intel_region_id id
;
934 for_each_memory_region(mr
, i915
, id
) {
935 unsigned long sizes
[] = {
942 for (i
= 0; i
< ARRAY_SIZE(sizes
); i
++) {
943 struct drm_i915_gem_object
*obj
;
946 obj
= i915_gem_object_create_region(mr
, sizes
[i
], 0);
947 if (obj
== ERR_PTR(-ENODEV
))
953 err
= __igt_mmap(i915
, obj
, I915_MMAP_TYPE_GTT
);
955 err
= __igt_mmap(i915
, obj
, I915_MMAP_TYPE_WC
);
957 i915_gem_object_put(obj
);
966 static const char *repr_mmap_type(enum i915_mmap_type type
)
969 case I915_MMAP_TYPE_GTT
: return "gtt";
970 case I915_MMAP_TYPE_WB
: return "wb";
971 case I915_MMAP_TYPE_WC
: return "wc";
972 case I915_MMAP_TYPE_UC
: return "uc";
973 default: return "unknown";
977 static bool can_access(const struct drm_i915_gem_object
*obj
)
980 I915_GEM_OBJECT_HAS_STRUCT_PAGE
| I915_GEM_OBJECT_HAS_IOMEM
;
982 return i915_gem_object_type_has(obj
, flags
);
985 static int __igt_mmap_access(struct drm_i915_private
*i915
,
986 struct drm_i915_gem_object
*obj
,
987 enum i915_mmap_type type
)
989 struct i915_mmap_offset
*mmo
;
990 unsigned long __user
*ptr
;
996 memset(&A
, 0xAA, sizeof(A
));
997 memset(&B
, 0xBB, sizeof(B
));
999 if (!can_mmap(obj
, type
) || !can_access(obj
))
1002 mmo
= mmap_offset_attach(obj
, type
, NULL
);
1004 return PTR_ERR(mmo
);
1006 addr
= igt_mmap_node(i915
, &mmo
->vma_node
, 0, PROT_WRITE
, MAP_SHARED
);
1007 if (IS_ERR_VALUE(addr
))
1009 ptr
= (unsigned long __user
*)addr
;
1011 err
= __put_user(A
, ptr
);
1013 pr_err("%s(%s): failed to write into user mmap\n",
1014 obj
->mm
.region
->name
, repr_mmap_type(type
));
1018 intel_gt_flush_ggtt_writes(&i915
->gt
);
1020 err
= access_process_vm(current
, addr
, &x
, sizeof(x
), 0);
1021 if (err
!= sizeof(x
)) {
1022 pr_err("%s(%s): access_process_vm() read failed\n",
1023 obj
->mm
.region
->name
, repr_mmap_type(type
));
1027 err
= access_process_vm(current
, addr
, &B
, sizeof(B
), FOLL_WRITE
);
1028 if (err
!= sizeof(B
)) {
1029 pr_err("%s(%s): access_process_vm() write failed\n",
1030 obj
->mm
.region
->name
, repr_mmap_type(type
));
1034 intel_gt_flush_ggtt_writes(&i915
->gt
);
1036 err
= __get_user(y
, ptr
);
1038 pr_err("%s(%s): failed to read from user mmap\n",
1039 obj
->mm
.region
->name
, repr_mmap_type(type
));
1043 if (x
!= A
|| y
!= B
) {
1044 pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
1045 obj
->mm
.region
->name
, repr_mmap_type(type
),
1052 vm_munmap(addr
, obj
->base
.size
);
1056 static int igt_mmap_access(void *arg
)
1058 struct drm_i915_private
*i915
= arg
;
1059 struct intel_memory_region
*mr
;
1060 enum intel_region_id id
;
1062 for_each_memory_region(mr
, i915
, id
) {
1063 struct drm_i915_gem_object
*obj
;
1066 obj
= i915_gem_object_create_region(mr
, PAGE_SIZE
, 0);
1067 if (obj
== ERR_PTR(-ENODEV
))
1071 return PTR_ERR(obj
);
1073 err
= __igt_mmap_access(i915
, obj
, I915_MMAP_TYPE_GTT
);
1075 err
= __igt_mmap_access(i915
, obj
, I915_MMAP_TYPE_WB
);
1077 err
= __igt_mmap_access(i915
, obj
, I915_MMAP_TYPE_WC
);
1079 err
= __igt_mmap_access(i915
, obj
, I915_MMAP_TYPE_UC
);
1081 i915_gem_object_put(obj
);
1089 static int __igt_mmap_gpu(struct drm_i915_private
*i915
,
1090 struct drm_i915_gem_object
*obj
,
1091 enum i915_mmap_type type
)
1093 struct intel_engine_cs
*engine
;
1094 struct i915_mmap_offset
*mmo
;
1101 * Verify that the mmap access into the backing store aligns with
1102 * that of the GPU, i.e. that mmap is indeed writing into the same
1103 * page as being read by the GPU.
1106 if (!can_mmap(obj
, type
))
1115 mmo
= mmap_offset_attach(obj
, type
, NULL
);
1117 return PTR_ERR(mmo
);
1119 addr
= igt_mmap_node(i915
, &mmo
->vma_node
, 0, PROT_WRITE
, MAP_SHARED
);
1120 if (IS_ERR_VALUE(addr
))
1123 ux
= u64_to_user_ptr((u64
)addr
);
1124 bbe
= MI_BATCH_BUFFER_END
;
1125 if (put_user(bbe
, ux
)) {
1126 pr_err("%s: Unable to write to mmap\n", obj
->mm
.region
->name
);
1131 if (type
== I915_MMAP_TYPE_GTT
)
1132 intel_gt_flush_ggtt_writes(&i915
->gt
);
1134 for_each_uabi_engine(engine
, i915
) {
1135 struct i915_request
*rq
;
1136 struct i915_vma
*vma
;
1137 struct i915_gem_ww_ctx ww
;
1139 vma
= i915_vma_instance(obj
, engine
->kernel_context
->vm
, NULL
);
1145 i915_gem_ww_ctx_init(&ww
, false);
1147 err
= i915_gem_object_lock(obj
, &ww
);
1149 err
= i915_vma_pin_ww(vma
, &ww
, 0, 0, PIN_USER
);
1153 rq
= i915_request_create(engine
->kernel_context
);
1159 err
= i915_request_await_object(rq
, vma
->obj
, false);
1161 err
= i915_vma_move_to_active(vma
, rq
, 0);
1163 err
= engine
->emit_bb_start(rq
, vma
->node
.start
, 0, 0);
1164 i915_request_get(rq
);
1165 i915_request_add(rq
);
1167 if (i915_request_wait(rq
, 0, HZ
/ 5) < 0) {
1168 struct drm_printer p
=
1169 drm_info_printer(engine
->i915
->drm
.dev
);
1171 pr_err("%s(%s, %s): Failed to execute batch\n",
1172 __func__
, engine
->name
, obj
->mm
.region
->name
);
1173 intel_engine_dump(engine
, &p
,
1174 "%s\n", engine
->name
);
1176 intel_gt_set_wedged(engine
->gt
);
1179 i915_request_put(rq
);
1182 i915_vma_unpin(vma
);
1184 if (err
== -EDEADLK
) {
1185 err
= i915_gem_ww_ctx_backoff(&ww
);
1189 i915_gem_ww_ctx_fini(&ww
);
1195 vm_munmap(addr
, obj
->base
.size
);
1199 static int igt_mmap_gpu(void *arg
)
1201 struct drm_i915_private
*i915
= arg
;
1202 struct intel_memory_region
*mr
;
1203 enum intel_region_id id
;
1205 for_each_memory_region(mr
, i915
, id
) {
1206 struct drm_i915_gem_object
*obj
;
1209 obj
= i915_gem_object_create_region(mr
, PAGE_SIZE
, 0);
1210 if (obj
== ERR_PTR(-ENODEV
))
1214 return PTR_ERR(obj
);
1216 err
= __igt_mmap_gpu(i915
, obj
, I915_MMAP_TYPE_GTT
);
1218 err
= __igt_mmap_gpu(i915
, obj
, I915_MMAP_TYPE_WC
);
1220 i915_gem_object_put(obj
);
1228 static int check_present_pte(pte_t
*pte
, unsigned long addr
, void *data
)
1230 if (!pte_present(*pte
) || pte_none(*pte
)) {
1231 pr_err("missing PTE:%lx\n",
1232 (addr
- (unsigned long)data
) >> PAGE_SHIFT
);
1239 static int check_absent_pte(pte_t
*pte
, unsigned long addr
, void *data
)
1241 if (pte_present(*pte
) && !pte_none(*pte
)) {
1242 pr_err("present PTE:%lx; expected to be revoked\n",
1243 (addr
- (unsigned long)data
) >> PAGE_SHIFT
);
1250 static int check_present(unsigned long addr
, unsigned long len
)
1252 return apply_to_page_range(current
->mm
, addr
, len
,
1253 check_present_pte
, (void *)addr
);
1256 static int check_absent(unsigned long addr
, unsigned long len
)
1258 return apply_to_page_range(current
->mm
, addr
, len
,
1259 check_absent_pte
, (void *)addr
);
1262 static int prefault_range(u64 start
, u64 len
)
1264 const char __user
*addr
, *end
;
1265 char __maybe_unused c
;
1268 addr
= u64_to_user_ptr(start
);
1271 for (; addr
< end
; addr
+= PAGE_SIZE
) {
1272 err
= __get_user(c
, addr
);
1277 return __get_user(c
, end
- 1);
1280 static int __igt_mmap_revoke(struct drm_i915_private
*i915
,
1281 struct drm_i915_gem_object
*obj
,
1282 enum i915_mmap_type type
)
1284 struct i915_mmap_offset
*mmo
;
1288 if (!can_mmap(obj
, type
))
1291 mmo
= mmap_offset_attach(obj
, type
, NULL
);
1293 return PTR_ERR(mmo
);
1295 addr
= igt_mmap_node(i915
, &mmo
->vma_node
, 0, PROT_WRITE
, MAP_SHARED
);
1296 if (IS_ERR_VALUE(addr
))
1299 err
= prefault_range(addr
, obj
->base
.size
);
1303 err
= check_present(addr
, obj
->base
.size
);
1305 pr_err("%s: was not present\n", obj
->mm
.region
->name
);
1310 * After unbinding the object from the GGTT, its address may be reused
1311 * for other objects. Ergo we have to revoke the previous mmap PTE
1312 * access as it no longer points to the same object.
1314 err
= i915_gem_object_unbind(obj
, I915_GEM_OBJECT_UNBIND_ACTIVE
);
1316 pr_err("Failed to unbind object!\n");
1320 if (type
!= I915_MMAP_TYPE_GTT
) {
1321 __i915_gem_object_put_pages(obj
);
1322 if (i915_gem_object_has_pages(obj
)) {
1323 pr_err("Failed to put-pages object!\n");
1329 err
= check_absent(addr
, obj
->base
.size
);
1331 pr_err("%s: was not absent\n", obj
->mm
.region
->name
);
1336 vm_munmap(addr
, obj
->base
.size
);
1340 static int igt_mmap_revoke(void *arg
)
1342 struct drm_i915_private
*i915
= arg
;
1343 struct intel_memory_region
*mr
;
1344 enum intel_region_id id
;
1346 for_each_memory_region(mr
, i915
, id
) {
1347 struct drm_i915_gem_object
*obj
;
1350 obj
= i915_gem_object_create_region(mr
, PAGE_SIZE
, 0);
1351 if (obj
== ERR_PTR(-ENODEV
))
1355 return PTR_ERR(obj
);
1357 err
= __igt_mmap_revoke(i915
, obj
, I915_MMAP_TYPE_GTT
);
1359 err
= __igt_mmap_revoke(i915
, obj
, I915_MMAP_TYPE_WC
);
1361 i915_gem_object_put(obj
);
1369 int i915_gem_mman_live_selftests(struct drm_i915_private
*i915
)
1371 static const struct i915_subtest tests
[] = {
1372 SUBTEST(igt_partial_tiling
),
1373 SUBTEST(igt_smoke_tiling
),
1374 SUBTEST(igt_mmap_offset_exhaustion
),
1376 SUBTEST(igt_mmap_access
),
1377 SUBTEST(igt_mmap_revoke
),
1378 SUBTEST(igt_mmap_gpu
),
1381 return i915_subtests(tests
, i915
);