Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / gem / i915_gem_throttle.c
blob1929d6cf415082ef5931f74896630948d55e1eb9
1 /*
2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
5 */
7 #include <linux/jiffies.h>
9 #include <drm/drm_file.h>
11 #include "i915_drv.h"
12 #include "i915_gem_context.h"
13 #include "i915_gem_ioctls.h"
14 #include "i915_gem_object.h"
17 * 20ms is a fairly arbitrary limit (greater than the average frame time)
18 * chosen to prevent the CPU getting more than a frame ahead of the GPU
19 * (when using lax throttling for the frontbuffer). We also use it to
20 * offer free GPU waitboosts for severely congested workloads.
22 #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
25 * Throttle our rendering by waiting until the ring has completed our requests
26 * emitted over 20 msec ago.
28 * Note that if we were to use the current jiffies each time around the loop,
29 * we wouldn't escape the function with any frames outstanding if the time to
30 * render a frame was over 20ms.
32 * This should get us reasonable parallelism between CPU and GPU but also
33 * relatively low latency when blocking on a particular request to finish.
35 int
36 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
37 struct drm_file *file)
39 const unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
40 struct drm_i915_file_private *file_priv = file->driver_priv;
41 struct i915_gem_context *ctx;
42 unsigned long idx;
43 long ret;
45 /* ABI: return -EIO if already wedged */
46 ret = intel_gt_terminally_wedged(&to_i915(dev)->gt);
47 if (ret)
48 return ret;
50 rcu_read_lock();
51 xa_for_each(&file_priv->context_xa, idx, ctx) {
52 struct i915_gem_engines_iter it;
53 struct intel_context *ce;
55 if (!kref_get_unless_zero(&ctx->ref))
56 continue;
57 rcu_read_unlock();
59 for_each_gem_engine(ce,
60 i915_gem_context_lock_engines(ctx),
61 it) {
62 struct i915_request *rq, *target = NULL;
64 if (!ce->timeline)
65 continue;
67 mutex_lock(&ce->timeline->mutex);
68 list_for_each_entry_reverse(rq,
69 &ce->timeline->requests,
70 link) {
71 if (i915_request_completed(rq))
72 break;
74 if (time_after(rq->emitted_jiffies,
75 recent_enough))
76 continue;
78 target = i915_request_get(rq);
79 break;
81 mutex_unlock(&ce->timeline->mutex);
82 if (!target)
83 continue;
85 ret = i915_request_wait(target,
86 I915_WAIT_INTERRUPTIBLE,
87 MAX_SCHEDULE_TIMEOUT);
88 i915_request_put(target);
89 if (ret < 0)
90 break;
92 i915_gem_context_unlock_engines(ctx);
93 i915_gem_context_put(ctx);
95 rcu_read_lock();
97 rcu_read_unlock();
99 return ret < 0 ? ret : 0;