2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Mika Kuoppala <mika.kuoppala@intel.com>
29 #include "i915_gem_render_state.h"
30 #include "intel_renderstate.h"
32 struct intel_render_state
{
33 const struct intel_renderstate_rodata
*rodata
;
34 struct drm_i915_gem_object
*obj
;
42 static const struct intel_renderstate_rodata
*
43 render_state_get_rodata(const struct intel_engine_cs
*engine
)
45 if (engine
->id
!= RCS
)
48 switch (INTEL_GEN(engine
->i915
)) {
50 return &gen6_null_state
;
52 return &gen7_null_state
;
54 return &gen8_null_state
;
56 return &gen9_null_state
;
63 * Macro to add commands to auxiliary batch.
64 * This macro only checks for page overflow before inserting the commands,
65 * this is sufficient as the null state generator makes the final batch
66 * with two passes to build command and state separately. At this point
67 * the size of both are known and it compacts them by relocating the state
68 * right after the commands taking care of alignment so we should sufficient
69 * space below them for adding new commands.
71 #define OUT_BATCH(batch, i, val) \
73 if ((i) >= PAGE_SIZE / sizeof(u32)) \
75 (batch)[(i)++] = (val); \
78 static int render_state_setup(struct intel_render_state
*so
,
79 struct drm_i915_private
*i915
)
81 const struct intel_renderstate_rodata
*rodata
= so
->rodata
;
82 unsigned int i
= 0, reloc_index
= 0;
83 unsigned int needs_clflush
;
87 ret
= i915_gem_obj_prepare_shmem_write(so
->obj
, &needs_clflush
);
91 d
= kmap_atomic(i915_gem_object_get_dirty_page(so
->obj
, 0));
93 while (i
< rodata
->batch_items
) {
94 u32 s
= rodata
->batch
[i
];
96 if (i
* 4 == rodata
->reloc
[reloc_index
]) {
97 u64 r
= s
+ so
->vma
->node
.start
;
99 if (HAS_64BIT_RELOC(i915
)) {
100 if (i
+ 1 >= rodata
->batch_items
||
101 rodata
->batch
[i
+ 1] != 0)
105 s
= upper_32_bits(r
);
114 if (rodata
->reloc
[reloc_index
] != -1) {
115 DRM_ERROR("only %d relocs resolved\n", reloc_index
);
119 so
->batch_offset
= i915_ggtt_offset(so
->vma
);
120 so
->batch_size
= rodata
->batch_items
* sizeof(u32
);
122 while (i
% CACHELINE_DWORDS
)
123 OUT_BATCH(d
, i
, MI_NOOP
);
125 so
->aux_offset
= i
* sizeof(u32
);
127 if (HAS_POOLED_EU(i915
)) {
129 * We always program 3x6 pool config but depending upon which
130 * subslice is disabled HW drops down to appropriate config
133 * In the below table 2x6 config always refers to
134 * fused-down version, native 2x6 is not available and can
137 * SNo subslices config eu pool configuration
138 * -----------------------------------------------------------
139 * 1 3 subslices enabled (3x6) - 0x00777000 (9+9)
140 * 2 ss0 disabled (2x6) - 0x00777000 (3+9)
141 * 3 ss1 disabled (2x6) - 0x00770000 (6+6)
142 * 4 ss2 disabled (2x6) - 0x00007000 (9+3)
144 u32 eu_pool_config
= 0x00777000;
146 OUT_BATCH(d
, i
, GEN9_MEDIA_POOL_STATE
);
147 OUT_BATCH(d
, i
, GEN9_MEDIA_POOL_ENABLE
);
148 OUT_BATCH(d
, i
, eu_pool_config
);
154 OUT_BATCH(d
, i
, MI_BATCH_BUFFER_END
);
155 so
->aux_size
= i
* sizeof(u32
) - so
->aux_offset
;
156 so
->aux_offset
+= so
->batch_offset
;
158 * Since we are sending length, we need to strictly conform to
159 * all requirements. For Gen2 this must be a multiple of 8.
161 so
->aux_size
= ALIGN(so
->aux_size
, 8);
164 drm_clflush_virt_range(d
, i
* sizeof(u32
));
167 ret
= i915_gem_object_set_to_gtt_domain(so
->obj
, false);
169 i915_gem_obj_finish_shmem_access(so
->obj
);
180 int i915_gem_render_state_emit(struct i915_request
*rq
)
182 struct intel_engine_cs
*engine
= rq
->engine
;
183 struct intel_render_state so
= {}; /* keep the compiler happy */
186 so
.rodata
= render_state_get_rodata(engine
);
190 if (so
.rodata
->batch_items
* 4 > PAGE_SIZE
)
193 so
.obj
= i915_gem_object_create_internal(engine
->i915
, PAGE_SIZE
);
195 return PTR_ERR(so
.obj
);
197 so
.vma
= i915_vma_instance(so
.obj
, &engine
->i915
->ggtt
.vm
, NULL
);
198 if (IS_ERR(so
.vma
)) {
199 err
= PTR_ERR(so
.vma
);
203 err
= i915_vma_pin(so
.vma
, 0, 0, PIN_GLOBAL
| PIN_HIGH
);
207 err
= render_state_setup(&so
, rq
->i915
);
211 err
= engine
->emit_bb_start(rq
,
212 so
.batch_offset
, so
.batch_size
,
213 I915_DISPATCH_SECURE
);
217 if (so
.aux_size
> 8) {
218 err
= engine
->emit_bb_start(rq
,
219 so
.aux_offset
, so
.aux_size
,
220 I915_DISPATCH_SECURE
);
225 err
= i915_vma_move_to_active(so
.vma
, rq
, 0);
227 i915_vma_unpin(so
.vma
);
229 i915_vma_close(so
.vma
);
231 __i915_gem_object_release_unless_active(so
.obj
);