1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include "i915_selftest.h"
8 #include "gt/intel_engine_user.h"
9 #include "gt/intel_gt.h"
11 #include "selftests/igt_flush_test.h"
12 #include "selftests/mock_drm.h"
13 #include "huge_gem_object.h"
14 #include "mock_context.h"
16 static int __igt_client_fill(struct intel_engine_cs
*engine
)
18 struct intel_context
*ce
= engine
->kernel_context
;
19 struct drm_i915_gem_object
*obj
;
20 struct rnd_state prng
;
25 prandom_seed_state(&prng
, i915_selftest
.random_seed
);
27 intel_engine_pm_get(engine
);
29 const u32 max_block_size
= S16_MAX
* PAGE_SIZE
;
30 u32 sz
= min_t(u64
, ce
->vm
->total
>> 4, prandom_u32_state(&prng
));
31 u32 phys_sz
= sz
% (max_block_size
+ 1);
32 u32 val
= prandom_u32_state(&prng
);
35 sz
= round_up(sz
, PAGE_SIZE
);
36 phys_sz
= round_up(phys_sz
, PAGE_SIZE
);
38 pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__
,
41 obj
= huge_gem_object(engine
->i915
, phys_sz
, sz
);
47 vaddr
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
54 * XXX: The goal is move this to get_pages, so try to dirty the
55 * CPU cache first to check that we do the required clflush
56 * before scheduling the blt for !llc platforms. This matches
57 * some version of reality where at get_pages the pages
58 * themselves may not yet be coherent with the GPU(swap-in). If
59 * we are missing the flush then we should see the stale cache
60 * values after we do the set_to_cpu_domain and pick it up as a
63 memset32(vaddr
, val
^ 0xdeadbeaf,
64 huge_gem_object_phys_size(obj
) / sizeof(u32
));
66 if (!(obj
->cache_coherent
& I915_BO_CACHE_COHERENT_FOR_WRITE
))
67 obj
->cache_dirty
= true;
69 err
= i915_gem_schedule_fill_pages_blt(obj
, ce
, obj
->mm
.pages
,
75 i915_gem_object_lock(obj
);
76 err
= i915_gem_object_set_to_cpu_domain(obj
, false);
77 i915_gem_object_unlock(obj
);
81 for (i
= 0; i
< huge_gem_object_phys_size(obj
) / sizeof(u32
); ++i
) {
82 if (vaddr
[i
] != val
) {
83 pr_err("vaddr[%u]=%x, expected=%x\n", i
,
90 i915_gem_object_unpin_map(obj
);
91 i915_gem_object_put(obj
);
92 } while (!time_after(jiffies
, end
));
97 i915_gem_object_unpin_map(obj
);
99 i915_gem_object_put(obj
);
103 intel_engine_pm_put(engine
);
108 static int igt_client_fill(void *arg
)
113 struct intel_engine_cs
*engine
;
116 engine
= intel_engine_lookup_user(arg
,
117 I915_ENGINE_CLASS_COPY
,
122 err
= __igt_client_fill(engine
);
130 int i915_gem_client_blt_live_selftests(struct drm_i915_private
*i915
)
132 static const struct i915_subtest tests
[] = {
133 SUBTEST(igt_client_fill
),
136 if (intel_gt_is_wedged(&i915
->gt
))
139 if (!HAS_ENGINE(i915
, BCS0
))
142 return i915_live_subtests(tests
, i915
);