treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / gt / intel_engine_pm.c
blobea90ab3e396ef14d3aa8132392df4cb475e004d7
1 /*
2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
5 */
7 #include "i915_drv.h"
9 #include "intel_context.h"
10 #include "intel_engine.h"
11 #include "intel_engine_heartbeat.h"
12 #include "intel_engine_pm.h"
13 #include "intel_engine_pool.h"
14 #include "intel_gt.h"
15 #include "intel_gt_pm.h"
16 #include "intel_rc6.h"
17 #include "intel_ring.h"
19 static int __engine_unpark(struct intel_wakeref *wf)
21 struct intel_engine_cs *engine =
22 container_of(wf, typeof(*engine), wakeref);
23 struct intel_context *ce;
24 void *map;
26 ENGINE_TRACE(engine, "\n");
28 intel_gt_pm_get(engine->gt);
30 /* Pin the default state for fast resets from atomic context. */
31 map = NULL;
32 if (engine->default_state)
33 map = i915_gem_object_pin_map(engine->default_state,
34 I915_MAP_WB);
35 if (!IS_ERR_OR_NULL(map))
36 engine->pinned_default_state = map;
38 /* Discard stale context state from across idling */
39 ce = engine->kernel_context;
40 if (ce) {
41 GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
43 /* First poison the image to verify we never fully trust it */
44 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) {
45 struct drm_i915_gem_object *obj = ce->state->obj;
46 int type = i915_coherent_map_type(engine->i915);
48 map = i915_gem_object_pin_map(obj, type);
49 if (!IS_ERR(map)) {
50 memset(map, CONTEXT_REDZONE, obj->base.size);
51 i915_gem_object_flush_map(obj);
52 i915_gem_object_unpin_map(obj);
56 ce->ops->reset(ce);
59 if (engine->unpark)
60 engine->unpark(engine);
62 intel_engine_unpark_heartbeat(engine);
63 return 0;
66 #if IS_ENABLED(CONFIG_LOCKDEP)
68 static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
70 unsigned long flags;
72 local_irq_save(flags);
73 mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
75 return flags;
78 static inline void __timeline_mark_unlock(struct intel_context *ce,
79 unsigned long flags)
81 mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
82 local_irq_restore(flags);
85 #else
87 static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
89 return 0;
92 static inline void __timeline_mark_unlock(struct intel_context *ce,
93 unsigned long flags)
97 #endif /* !IS_ENABLED(CONFIG_LOCKDEP) */
99 static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
101 struct i915_request *rq = to_request(fence);
103 ewma__engine_latency_add(&rq->engine->latency,
104 ktime_us_delta(rq->fence.timestamp,
105 rq->duration.emitted));
108 static void
109 __queue_and_release_pm(struct i915_request *rq,
110 struct intel_timeline *tl,
111 struct intel_engine_cs *engine)
113 struct intel_gt_timelines *timelines = &engine->gt->timelines;
115 ENGINE_TRACE(engine, "\n");
118 * We have to serialise all potential retirement paths with our
119 * submission, as we don't want to underflow either the
120 * engine->wakeref.counter or our timeline->active_count.
122 * Equally, we cannot allow a new submission to start until
123 * after we finish queueing, nor could we allow that submitter
124 * to retire us before we are ready!
126 spin_lock(&timelines->lock);
128 /* Let intel_gt_retire_requests() retire us (acquired under lock) */
129 if (!atomic_fetch_inc(&tl->active_count))
130 list_add_tail(&tl->link, &timelines->active_list);
132 /* Hand the request over to HW and so engine_retire() */
133 __i915_request_queue(rq, NULL);
135 /* Let new submissions commence (and maybe retire this timeline) */
136 __intel_wakeref_defer_park(&engine->wakeref);
138 spin_unlock(&timelines->lock);
141 static bool switch_to_kernel_context(struct intel_engine_cs *engine)
143 struct intel_context *ce = engine->kernel_context;
144 struct i915_request *rq;
145 unsigned long flags;
146 bool result = true;
148 /* GPU is pointing to the void, as good as in the kernel context. */
149 if (intel_gt_is_wedged(engine->gt))
150 return true;
152 GEM_BUG_ON(!intel_context_is_barrier(ce));
154 /* Already inside the kernel context, safe to power down. */
155 if (engine->wakeref_serial == engine->serial)
156 return true;
159 * Note, we do this without taking the timeline->mutex. We cannot
160 * as we may be called while retiring the kernel context and so
161 * already underneath the timeline->mutex. Instead we rely on the
162 * exclusive property of the __engine_park that prevents anyone
163 * else from creating a request on this engine. This also requires
164 * that the ring is empty and we avoid any waits while constructing
165 * the context, as they assume protection by the timeline->mutex.
166 * This should hold true as we can only park the engine after
167 * retiring the last request, thus all rings should be empty and
168 * all timelines idle.
170 * For unlocking, there are 2 other parties and the GPU who have a
171 * stake here.
173 * A new gpu user will be waiting on the engine-pm to start their
174 * engine_unpark. New waiters are predicated on engine->wakeref.count
175 * and so intel_wakeref_defer_park() acts like a mutex_unlock of the
176 * engine->wakeref.
178 * The other party is intel_gt_retire_requests(), which is walking the
179 * list of active timelines looking for completions. Meanwhile as soon
180 * as we call __i915_request_queue(), the GPU may complete our request.
181 * Ergo, if we put ourselves on the timelines.active_list
182 * (se intel_timeline_enter()) before we increment the
183 * engine->wakeref.count, we may see the request completion and retire
184 * it causing an undeflow of the engine->wakeref.
186 flags = __timeline_mark_lock(ce);
187 GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
189 rq = __i915_request_create(ce, GFP_NOWAIT);
190 if (IS_ERR(rq))
191 /* Context switch failed, hope for the best! Maybe reset? */
192 goto out_unlock;
194 /* Check again on the next retirement. */
195 engine->wakeref_serial = engine->serial + 1;
196 i915_request_add_active_barriers(rq);
198 /* Install ourselves as a preemption barrier */
199 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
200 if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */
202 * Use an interrupt for precise measurement of duration,
203 * otherwise we rely on someone else retiring all the requests
204 * which may delay the signaling (i.e. we will likely wait
205 * until the background request retirement running every
206 * second or two).
208 BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
209 dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
210 rq->duration.emitted = ktime_get();
213 /* Expose ourselves to the world */
214 __queue_and_release_pm(rq, ce->timeline, engine);
216 result = false;
217 out_unlock:
218 __timeline_mark_unlock(ce, flags);
219 return result;
222 static void call_idle_barriers(struct intel_engine_cs *engine)
224 struct llist_node *node, *next;
226 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
227 struct dma_fence_cb *cb =
228 container_of((struct list_head *)node,
229 typeof(*cb), node);
231 cb->func(ERR_PTR(-EAGAIN), cb);
235 static int __engine_park(struct intel_wakeref *wf)
237 struct intel_engine_cs *engine =
238 container_of(wf, typeof(*engine), wakeref);
240 engine->saturated = 0;
243 * If one and only one request is completed between pm events,
244 * we know that we are inside the kernel context and it is
245 * safe to power down. (We are paranoid in case that runtime
246 * suspend causes corruption to the active context image, and
247 * want to avoid that impacting userspace.)
249 if (!switch_to_kernel_context(engine))
250 return -EBUSY;
252 ENGINE_TRACE(engine, "\n");
254 call_idle_barriers(engine); /* cleanup after wedging */
256 intel_engine_park_heartbeat(engine);
257 intel_engine_disarm_breadcrumbs(engine);
258 intel_engine_pool_park(&engine->pool);
260 /* Must be reset upon idling, or we may miss the busy wakeup. */
261 GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
263 if (engine->park)
264 engine->park(engine);
266 if (engine->pinned_default_state) {
267 i915_gem_object_unpin_map(engine->default_state);
268 engine->pinned_default_state = NULL;
271 engine->execlists.no_priolist = false;
273 /* While gt calls i915_vma_parked(), we have to break the lock cycle */
274 intel_gt_pm_put_async(engine->gt);
275 return 0;
278 static const struct intel_wakeref_ops wf_ops = {
279 .get = __engine_unpark,
280 .put = __engine_park,
283 void intel_engine_init__pm(struct intel_engine_cs *engine)
285 struct intel_runtime_pm *rpm = engine->uncore->rpm;
287 intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
288 intel_engine_init_heartbeat(engine);
291 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
292 #include "selftest_engine_pm.c"
293 #endif