2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include "igt_gem_utils.h"
9 #include "gem/i915_gem_context.h"
10 #include "gem/i915_gem_pm.h"
11 #include "gt/intel_context.h"
12 #include "gt/intel_gt.h"
16 #include "i915_request.h"
19 igt_request_alloc(struct i915_gem_context
*ctx
, struct intel_engine_cs
*engine
)
21 struct intel_context
*ce
;
22 struct i915_request
*rq
;
25 * Pinning the contexts may generate requests in order to acquire
26 * GGTT space, so do this first before we reserve a seqno for
29 ce
= i915_gem_context_get_engine(ctx
, engine
->legacy_idx
);
33 rq
= intel_context_create_request(ce
);
34 intel_context_put(ce
);
40 igt_emit_store_dw(struct i915_vma
*vma
,
45 struct drm_i915_gem_object
*obj
;
46 const int gen
= INTEL_GEN(vma
->vm
->i915
);
47 unsigned long n
, size
;
51 size
= (4 * count
+ 1) * sizeof(u32
);
52 size
= round_up(size
, PAGE_SIZE
);
53 obj
= i915_gem_object_create_internal(vma
->vm
->i915
, size
);
57 cmd
= i915_gem_object_pin_map(obj
, I915_MAP_WC
);
63 GEM_BUG_ON(offset
+ (count
- 1) * PAGE_SIZE
> vma
->node
.size
);
64 offset
+= vma
->node
.start
;
66 for (n
= 0; n
< count
; n
++) {
68 *cmd
++ = MI_STORE_DWORD_IMM_GEN4
;
69 *cmd
++ = lower_32_bits(offset
);
70 *cmd
++ = upper_32_bits(offset
);
72 } else if (gen
>= 4) {
73 *cmd
++ = MI_STORE_DWORD_IMM_GEN4
|
74 (gen
< 6 ? MI_USE_GGTT
: 0);
79 *cmd
++ = MI_STORE_DWORD_IMM
| MI_MEM_VIRTUAL
;
85 *cmd
= MI_BATCH_BUFFER_END
;
87 i915_gem_object_flush_map(obj
);
88 i915_gem_object_unpin_map(obj
);
90 intel_gt_chipset_flush(vma
->vm
->gt
);
92 vma
= i915_vma_instance(obj
, vma
->vm
, NULL
);
98 err
= i915_vma_pin(vma
, 0, 0, PIN_USER
);
105 i915_gem_object_put(obj
);
109 int igt_gpu_fill_dw(struct intel_context
*ce
,
110 struct i915_vma
*vma
, u64 offset
,
111 unsigned long count
, u32 val
)
113 struct i915_request
*rq
;
114 struct i915_vma
*batch
;
118 GEM_BUG_ON(!intel_engine_can_store_dword(ce
->engine
));
119 GEM_BUG_ON(!i915_vma_is_pinned(vma
));
121 batch
= igt_emit_store_dw(vma
, offset
, count
, val
);
123 return PTR_ERR(batch
);
125 rq
= intel_context_create_request(ce
);
131 i915_vma_lock(batch
);
132 err
= i915_request_await_object(rq
, batch
->obj
, false);
134 err
= i915_vma_move_to_active(batch
, rq
, 0);
135 i915_vma_unlock(batch
);
140 err
= i915_request_await_object(rq
, vma
->obj
, true);
142 err
= i915_vma_move_to_active(vma
, rq
, EXEC_OBJECT_WRITE
);
143 i915_vma_unlock(vma
);
148 if (INTEL_GEN(ce
->vm
->i915
) <= 5)
149 flags
|= I915_DISPATCH_SECURE
;
151 err
= rq
->engine
->emit_bb_start(rq
,
152 batch
->node
.start
, batch
->node
.size
,
157 i915_request_set_error_once(rq
, err
);
158 i915_request_add(rq
);
160 i915_vma_unpin_and_release(&batch
, 0);