2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include "gem/i915_gem_object.h"
10 #include "intel_engine.h"
11 #include "intel_ring.h"
12 #include "intel_timeline.h"
14 unsigned int intel_ring_update_space(struct intel_ring
*ring
)
18 space
= __intel_ring_space(ring
->head
, ring
->emit
, ring
->size
);
24 void __intel_ring_pin(struct intel_ring
*ring
)
26 GEM_BUG_ON(!atomic_read(&ring
->pin_count
));
27 atomic_inc(&ring
->pin_count
);
30 int intel_ring_pin(struct intel_ring
*ring
, struct i915_gem_ww_ctx
*ww
)
32 struct i915_vma
*vma
= ring
->vma
;
37 if (atomic_fetch_inc(&ring
->pin_count
))
40 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
41 flags
= PIN_OFFSET_BIAS
| i915_ggtt_pin_bias(vma
);
44 flags
|= PIN_MAPPABLE
;
48 ret
= i915_ggtt_pin(vma
, ww
, 0, flags
);
52 if (i915_vma_is_map_and_fenceable(vma
))
53 addr
= (void __force
*)i915_vma_pin_iomap(vma
);
55 addr
= i915_gem_object_pin_map(vma
->obj
,
56 i915_coherent_map_type(vma
->vm
->i915
));
62 i915_vma_make_unshrinkable(vma
);
64 /* Discard any unused bytes beyond that submitted to hw. */
65 intel_ring_reset(ring
, ring
->emit
);
73 atomic_dec(&ring
->pin_count
);
77 void intel_ring_reset(struct intel_ring
*ring
, u32 tail
)
79 tail
= intel_ring_wrap(ring
, tail
);
83 intel_ring_update_space(ring
);
86 void intel_ring_unpin(struct intel_ring
*ring
)
88 struct i915_vma
*vma
= ring
->vma
;
90 if (!atomic_dec_and_test(&ring
->pin_count
))
93 i915_vma_unset_ggtt_write(vma
);
94 if (i915_vma_is_map_and_fenceable(vma
))
95 i915_vma_unpin_iomap(vma
);
97 i915_gem_object_unpin_map(vma
->obj
);
99 i915_vma_make_purgeable(vma
);
103 static struct i915_vma
*create_ring_vma(struct i915_ggtt
*ggtt
, int size
)
105 struct i915_address_space
*vm
= &ggtt
->vm
;
106 struct drm_i915_private
*i915
= vm
->i915
;
107 struct drm_i915_gem_object
*obj
;
108 struct i915_vma
*vma
;
110 obj
= ERR_PTR(-ENODEV
);
111 if (i915_ggtt_has_aperture(ggtt
))
112 obj
= i915_gem_object_create_stolen(i915
, size
);
114 obj
= i915_gem_object_create_internal(i915
, size
);
116 return ERR_CAST(obj
);
119 * Mark ring buffers as read-only from GPU side (so no stray overwrites)
120 * if supported by the platform's GGTT.
122 if (vm
->has_read_only
)
123 i915_gem_object_set_readonly(obj
);
125 vma
= i915_vma_instance(obj
, vm
, NULL
);
132 i915_gem_object_put(obj
);
137 intel_engine_create_ring(struct intel_engine_cs
*engine
, int size
)
139 struct drm_i915_private
*i915
= engine
->i915
;
140 struct intel_ring
*ring
;
141 struct i915_vma
*vma
;
143 GEM_BUG_ON(!is_power_of_2(size
));
144 GEM_BUG_ON(RING_CTL_SIZE(size
) & ~RING_NR_PAGES
);
146 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
148 return ERR_PTR(-ENOMEM
);
150 kref_init(&ring
->ref
);
152 ring
->wrap
= BITS_PER_TYPE(ring
->size
) - ilog2(size
);
155 * Workaround an erratum on the i830 which causes a hang if
156 * the TAIL pointer points to within the last 2 cachelines
159 ring
->effective_size
= size
;
160 if (IS_I830(i915
) || IS_I845G(i915
))
161 ring
->effective_size
-= 2 * CACHELINE_BYTES
;
163 intel_ring_update_space(ring
);
165 vma
= create_ring_vma(engine
->gt
->ggtt
, size
);
168 return ERR_CAST(vma
);
175 void intel_ring_free(struct kref
*ref
)
177 struct intel_ring
*ring
= container_of(ref
, typeof(*ring
), ref
);
179 i915_vma_put(ring
->vma
);
184 wait_for_space(struct intel_ring
*ring
,
185 struct intel_timeline
*tl
,
188 struct i915_request
*target
;
191 if (intel_ring_update_space(ring
) >= bytes
)
194 GEM_BUG_ON(list_empty(&tl
->requests
));
195 list_for_each_entry(target
, &tl
->requests
, link
) {
196 if (target
->ring
!= ring
)
199 /* Would completion of this request free enough space? */
200 if (bytes
<= __intel_ring_space(target
->postfix
,
201 ring
->emit
, ring
->size
))
205 if (GEM_WARN_ON(&target
->link
== &tl
->requests
))
208 timeout
= i915_request_wait(target
,
209 I915_WAIT_INTERRUPTIBLE
,
210 MAX_SCHEDULE_TIMEOUT
);
214 i915_request_retire_upto(target
);
216 intel_ring_update_space(ring
);
217 GEM_BUG_ON(ring
->space
< bytes
);
221 u32
*intel_ring_begin(struct i915_request
*rq
, unsigned int num_dwords
)
223 struct intel_ring
*ring
= rq
->ring
;
224 const unsigned int remain_usable
= ring
->effective_size
- ring
->emit
;
225 const unsigned int bytes
= num_dwords
* sizeof(u32
);
226 unsigned int need_wrap
= 0;
227 unsigned int total_bytes
;
230 /* Packets must be qword aligned. */
231 GEM_BUG_ON(num_dwords
& 1);
233 total_bytes
= bytes
+ rq
->reserved_space
;
234 GEM_BUG_ON(total_bytes
> ring
->effective_size
);
236 if (unlikely(total_bytes
> remain_usable
)) {
237 const int remain_actual
= ring
->size
- ring
->emit
;
239 if (bytes
> remain_usable
) {
241 * Not enough space for the basic request. So need to
242 * flush out the remainder and then wait for
245 total_bytes
+= remain_actual
;
246 need_wrap
= remain_actual
| 1;
249 * The base request will fit but the reserved space
250 * falls off the end. So we don't need an immediate
251 * wrap and only need to effectively wait for the
252 * reserved size from the start of ringbuffer.
254 total_bytes
= rq
->reserved_space
+ remain_actual
;
258 if (unlikely(total_bytes
> ring
->space
)) {
262 * Space is reserved in the ringbuffer for finalising the
263 * request, as that cannot be allowed to fail. During request
264 * finalisation, reserved_space is set to 0 to stop the
265 * overallocation and the assumption is that then we never need
266 * to wait (which has the risk of failing with EINTR).
268 * See also i915_request_alloc() and i915_request_add().
270 GEM_BUG_ON(!rq
->reserved_space
);
272 ret
= wait_for_space(ring
,
273 i915_request_timeline(rq
),
279 if (unlikely(need_wrap
)) {
281 GEM_BUG_ON(need_wrap
> ring
->space
);
282 GEM_BUG_ON(ring
->emit
+ need_wrap
> ring
->size
);
283 GEM_BUG_ON(!IS_ALIGNED(need_wrap
, sizeof(u64
)));
285 /* Fill the tail with MI_NOOP */
286 memset64(ring
->vaddr
+ ring
->emit
, 0, need_wrap
/ sizeof(u64
));
287 ring
->space
-= need_wrap
;
291 GEM_BUG_ON(ring
->emit
> ring
->size
- bytes
);
292 GEM_BUG_ON(ring
->space
< bytes
);
293 cs
= ring
->vaddr
+ ring
->emit
;
294 GEM_DEBUG_EXEC(memset32(cs
, POISON_INUSE
, bytes
/ sizeof(*cs
)));
296 ring
->space
-= bytes
;
301 /* Align the ring tail to a cacheline boundary */
302 int intel_ring_cacheline_align(struct i915_request
*rq
)
307 num_dwords
= (rq
->ring
->emit
& (CACHELINE_BYTES
- 1)) / sizeof(u32
);
311 num_dwords
= CACHELINE_DWORDS
- num_dwords
;
312 GEM_BUG_ON(num_dwords
& 1);
314 cs
= intel_ring_begin(rq
, num_dwords
);
318 memset64(cs
, (u64
)MI_NOOP
<< 32 | MI_NOOP
, num_dwords
/ 2);
319 intel_ring_advance(rq
, cs
+ num_dwords
);
321 GEM_BUG_ON(rq
->ring
->emit
& (CACHELINE_BYTES
- 1));
325 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
326 #include "selftest_ring.c"