2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include "gem/i915_gem_context.h"
8 #include "gem/i915_gem_pm.h"
11 #include "i915_globals.h"
13 #include "intel_context.h"
14 #include "intel_engine.h"
15 #include "intel_engine_pm.h"
16 #include "intel_ring.h"
18 static struct i915_global_context
{
19 struct i915_global base
;
20 struct kmem_cache
*slab_ce
;
23 static struct intel_context
*intel_context_alloc(void)
25 return kmem_cache_zalloc(global
.slab_ce
, GFP_KERNEL
);
28 void intel_context_free(struct intel_context
*ce
)
30 kmem_cache_free(global
.slab_ce
, ce
);
33 struct intel_context
*
34 intel_context_create(struct intel_engine_cs
*engine
)
36 struct intel_context
*ce
;
38 ce
= intel_context_alloc();
40 return ERR_PTR(-ENOMEM
);
42 intel_context_init(ce
, engine
);
46 int intel_context_alloc_state(struct intel_context
*ce
)
50 if (mutex_lock_interruptible(&ce
->pin_mutex
))
53 if (!test_bit(CONTEXT_ALLOC_BIT
, &ce
->flags
)) {
54 err
= ce
->ops
->alloc(ce
);
58 set_bit(CONTEXT_ALLOC_BIT
, &ce
->flags
);
62 mutex_unlock(&ce
->pin_mutex
);
66 static int intel_context_active_acquire(struct intel_context
*ce
)
70 err
= i915_active_acquire(&ce
->active
);
74 /* Preallocate tracking nodes */
75 if (!intel_context_is_barrier(ce
)) {
76 err
= i915_active_acquire_preallocate_barrier(&ce
->active
,
79 i915_active_release(&ce
->active
);
87 static void intel_context_active_release(struct intel_context
*ce
)
89 /* Nodes preallocated in intel_context_active() */
90 i915_active_acquire_barrier(&ce
->active
);
91 i915_active_release(&ce
->active
);
94 int __intel_context_do_pin(struct intel_context
*ce
)
98 if (unlikely(!test_bit(CONTEXT_ALLOC_BIT
, &ce
->flags
))) {
99 err
= intel_context_alloc_state(ce
);
104 if (mutex_lock_interruptible(&ce
->pin_mutex
))
107 if (likely(!atomic_read(&ce
->pin_count
))) {
108 err
= intel_context_active_acquire(ce
);
112 err
= ce
->ops
->pin(ce
);
116 CE_TRACE(ce
, "pin ring:{head:%04x, tail:%04x}\n",
117 ce
->ring
->head
, ce
->ring
->tail
);
119 smp_mb__before_atomic(); /* flush pin before it is visible */
122 atomic_inc(&ce
->pin_count
);
123 GEM_BUG_ON(!intel_context_is_pinned(ce
)); /* no overflow! */
125 mutex_unlock(&ce
->pin_mutex
);
129 intel_context_active_release(ce
);
131 mutex_unlock(&ce
->pin_mutex
);
135 void intel_context_unpin(struct intel_context
*ce
)
137 if (!atomic_dec_and_test(&ce
->pin_count
))
140 CE_TRACE(ce
, "unpin\n");
144 * Once released, we may asynchronously drop the active reference.
145 * As that may be the only reference keeping the context alive,
146 * take an extra now so that it is not freed before we finish
149 intel_context_get(ce
);
150 intel_context_active_release(ce
);
151 intel_context_put(ce
);
154 static int __context_pin_state(struct i915_vma
*vma
)
156 unsigned int bias
= i915_ggtt_pin_bias(vma
) | PIN_OFFSET_BIAS
;
159 err
= i915_ggtt_pin(vma
, 0, bias
| PIN_HIGH
);
163 err
= i915_active_acquire(&vma
->active
);
168 * And mark it as a globally pinned object to let the shrinker know
169 * it cannot reclaim the object until we release it.
171 i915_vma_make_unshrinkable(vma
);
172 vma
->obj
->mm
.dirty
= true;
181 static void __context_unpin_state(struct i915_vma
*vma
)
183 i915_vma_make_shrinkable(vma
);
184 i915_active_release(&vma
->active
);
185 __i915_vma_unpin(vma
);
188 static int __ring_active(struct intel_ring
*ring
)
192 err
= i915_active_acquire(&ring
->vma
->active
);
196 err
= intel_ring_pin(ring
);
203 i915_active_release(&ring
->vma
->active
);
207 static void __ring_retire(struct intel_ring
*ring
)
209 intel_ring_unpin(ring
);
210 i915_active_release(&ring
->vma
->active
);
214 static void __intel_context_retire(struct i915_active
*active
)
216 struct intel_context
*ce
= container_of(active
, typeof(*ce
), active
);
218 CE_TRACE(ce
, "retire\n");
220 set_bit(CONTEXT_VALID_BIT
, &ce
->flags
);
222 __context_unpin_state(ce
->state
);
224 intel_timeline_unpin(ce
->timeline
);
225 __ring_retire(ce
->ring
);
227 intel_context_put(ce
);
230 static int __intel_context_active(struct i915_active
*active
)
232 struct intel_context
*ce
= container_of(active
, typeof(*ce
), active
);
235 CE_TRACE(ce
, "active\n");
237 intel_context_get(ce
);
239 err
= __ring_active(ce
->ring
);
243 err
= intel_timeline_pin(ce
->timeline
);
250 err
= __context_pin_state(ce
->state
);
257 intel_timeline_unpin(ce
->timeline
);
259 __ring_retire(ce
->ring
);
261 intel_context_put(ce
);
266 intel_context_init(struct intel_context
*ce
,
267 struct intel_engine_cs
*engine
)
269 GEM_BUG_ON(!engine
->cops
);
270 GEM_BUG_ON(!engine
->gt
->vm
);
275 ce
->ops
= engine
->cops
;
276 ce
->sseu
= engine
->sseu
;
277 ce
->ring
= __intel_context_ring_size(SZ_4K
);
279 ce
->vm
= i915_vm_get(engine
->gt
->vm
);
281 INIT_LIST_HEAD(&ce
->signal_link
);
282 INIT_LIST_HEAD(&ce
->signals
);
284 mutex_init(&ce
->pin_mutex
);
286 i915_active_init(&ce
->active
,
287 __intel_context_active
, __intel_context_retire
);
290 void intel_context_fini(struct intel_context
*ce
)
293 intel_timeline_put(ce
->timeline
);
296 mutex_destroy(&ce
->pin_mutex
);
297 i915_active_fini(&ce
->active
);
300 static void i915_global_context_shrink(void)
302 kmem_cache_shrink(global
.slab_ce
);
305 static void i915_global_context_exit(void)
307 kmem_cache_destroy(global
.slab_ce
);
310 static struct i915_global_context global
= { {
311 .shrink
= i915_global_context_shrink
,
312 .exit
= i915_global_context_exit
,
315 int __init
i915_global_context_init(void)
317 global
.slab_ce
= KMEM_CACHE(intel_context
, SLAB_HWCACHE_ALIGN
);
321 i915_global_register(&global
.base
);
325 void intel_context_enter_engine(struct intel_context
*ce
)
327 intel_engine_pm_get(ce
->engine
);
328 intel_timeline_enter(ce
->timeline
);
331 void intel_context_exit_engine(struct intel_context
*ce
)
333 intel_timeline_exit(ce
->timeline
);
334 intel_engine_pm_put(ce
->engine
);
337 int intel_context_prepare_remote_request(struct intel_context
*ce
,
338 struct i915_request
*rq
)
340 struct intel_timeline
*tl
= ce
->timeline
;
343 /* Only suitable for use in remotely modifying this context */
344 GEM_BUG_ON(rq
->context
== ce
);
346 if (rcu_access_pointer(rq
->timeline
) != tl
) { /* timeline sharing! */
347 /* Queue this switch after current activity by this context. */
348 err
= i915_active_fence_set(&tl
->last_request
, rq
);
354 * Guarantee context image and the timeline remains pinned until the
355 * modifying request is retired by setting the ce activity tracker.
357 * But we only need to take one pin on the account of it. Or in other
358 * words transfer the pinned ce object to tracked active request.
360 GEM_BUG_ON(i915_active_is_idle(&ce
->active
));
361 return i915_active_add_request(&ce
->active
, rq
);
364 struct i915_request
*intel_context_create_request(struct intel_context
*ce
)
366 struct i915_request
*rq
;
369 err
= intel_context_pin(ce
);
373 rq
= i915_request_create(ce
);
374 intel_context_unpin(ce
);
379 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
380 #include "selftest_context.c"