2 * Copyright © 2008-2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #ifndef I915_REQUEST_H
26 #define I915_REQUEST_H
28 #include <linux/dma-fence.h>
29 #include <linux/lockdep.h>
31 #include "gem/i915_gem_context_types.h"
32 #include "gt/intel_context_types.h"
33 #include "gt/intel_engine_types.h"
34 #include "gt/intel_timeline_types.h"
37 #include "i915_scheduler.h"
38 #include "i915_selftest.h"
39 #include "i915_sw_fence.h"
41 #include <uapi/drm/i915_drm.h>
44 struct drm_i915_gem_object
;
47 struct i915_capture_list
{
48 struct i915_capture_list
*next
;
52 #define RQ_TRACE(rq, fmt, ...) do { \
53 const struct i915_request *rq__ = (rq); \
54 ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \
55 rq__->fence.context, rq__->fence.seqno, \
56 hwsp_seqno(rq__), ##__VA_ARGS__); \
61 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
63 * Set by __i915_request_submit() on handing over to HW, and cleared
64 * by __i915_request_unsubmit() if we preempt this request.
66 * Finally cleared for consistency on retiring the request, when
67 * we know the HW is no longer running this request.
69 * See i915_request_is_active()
71 I915_FENCE_FLAG_ACTIVE
= DMA_FENCE_FLAG_USER_BITS
,
74 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
76 * Internal bookkeeping used by the breadcrumb code to track when
77 * a request is on the various signal_list.
79 I915_FENCE_FLAG_SIGNAL
,
82 * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
84 * The execution of some requests should not be interrupted. This is
85 * a sensitive operation as it makes the request super important,
86 * blocking other higher priority work. Abuse of this flag will
87 * lead to quality of service issues.
89 I915_FENCE_FLAG_NOPREEMPT
,
92 * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
94 * A high priority sentinel request may be submitted to clear the
95 * submission queue. As it will be the only request in-flight, upon
96 * execution all other active requests will have been preempted and
97 * unsubmitted. This preemptive pulse is used to re-evaluate the
98 * in-flight requests, particularly in cases where an active context
99 * is banned and those active requests need to be cancelled.
101 I915_FENCE_FLAG_SENTINEL
,
104 * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
106 * Some requests are more important than others! In particular, a
107 * request that the user is waiting on is typically required for
108 * interactive latency, for which we want to minimise by upclocking
109 * the GPU. Here we track such boost requests on a per-request basis.
111 I915_FENCE_FLAG_BOOST
,
115 * Request queue structure.
117 * The request queue allows us to note sequence numbers that have been emitted
118 * and may be associated with active buffers to be retired.
120 * By keeping this list, we can avoid having to do questionable sequence
121 * number comparisons on buffer last_read|write_seqno. It also allows an
122 * emission time to be associated with the request for tracking how far ahead
123 * of the GPU the submission is.
125 * When modifying this structure be very aware that we perform a lockless
126 * RCU lookup of it that may race against reallocation of the struct
127 * from the slab freelist. We intentionally do not zero the structure on
128 * allocation so that the lookup can use the dangling pointers (and is
129 * cogniscent that those pointers may be wrong). Instead, everything that
130 * needs to be initialised must be done so explicitly.
132 * The requests are reference counted.
134 struct i915_request
{
135 struct dma_fence fence
;
138 /** On Which ring this request was generated */
139 struct drm_i915_private
*i915
;
142 * Context and ring buffer related to this request
143 * Contexts are refcounted, so when this request is associated with a
144 * context, we must increment the context's refcount, to guarantee that
145 * it persists while any request is linked to it. Requests themselves
146 * are also refcounted, so the request will only be freed when the last
147 * reference to it is dismissed, and the code in
148 * i915_request_free() will then decrement the refcount on the
151 struct intel_engine_cs
*engine
;
152 struct intel_context
*context
;
153 struct intel_ring
*ring
;
154 struct intel_timeline __rcu
*timeline
;
155 struct list_head signal_link
;
158 * The rcu epoch of when this request was allocated. Used to judiciously
159 * apply backpressure on future allocations to ensure that under
160 * mempressure there is sufficient RCU ticks for us to reclaim our
161 * RCU protected slabs.
163 unsigned long rcustate
;
166 * We pin the timeline->mutex while constructing the request to
167 * ensure that no caller accidentally drops it during construction.
168 * The timeline->mutex must be held to ensure that only this caller
169 * can use the ring and manipulate the associated timeline during
172 struct pin_cookie cookie
;
175 * Fences for the various phases in the request's lifetime.
177 * The submit fence is used to await upon all of the request's
178 * dependencies. When it is signaled, the request is ready to run.
179 * It is used by the driver to then queue the request for execution.
181 struct i915_sw_fence submit
;
183 wait_queue_entry_t submitq
;
184 struct i915_sw_dma_fence_cb dmaq
;
185 struct i915_request_duration_cb
{
186 struct dma_fence_cb cb
;
190 struct list_head execute_cb
;
191 struct i915_sw_fence semaphore
;
194 * A list of everyone we wait upon, and everyone who waits upon us.
195 * Even though we will not be submitted to the hardware before the
196 * submit fence is signaled (it waits for all external events as well
197 * as our own requests), the scheduler still needs to know the
198 * dependency tree for the lifetime of the request (from execbuf
199 * to retirement), i.e. bidirectional dependency information for the
200 * request not tied to individual fences.
202 struct i915_sched_node sched
;
203 struct i915_dependency dep
;
204 intel_engine_mask_t execution_mask
;
207 * A convenience pointer to the current breadcrumb value stored in
208 * the HW status page (or our timeline's local equivalent). The full
209 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
211 const u32
*hwsp_seqno
;
214 * If we need to access the timeline's seqno for this request in
215 * another request, we need to keep a read reference to this associated
216 * cacheline, so that we do not free and recycle it before the foreign
217 * observers have completed. Hence, we keep a pointer to the cacheline
218 * inside the timeline's HWSP vma, but it is only valid while this
219 * request has not completed and guarded by the timeline mutex.
221 struct intel_timeline_cacheline __rcu
*hwsp_cacheline
;
223 /** Position in the ring of the start of the request */
226 /** Position in the ring of the start of the user packets */
230 * Position in the ring of the start of the postfix.
231 * This is required to calculate the maximum available ring space
232 * without overwriting the postfix.
236 /** Position in the ring of the end of the whole request */
239 /** Position in the ring of the end of any workarounds after the tail */
242 /** Preallocate space in the ring for the emitting the request */
245 /** Batch buffer related to this request if any (used for
246 * error state dump only).
248 struct i915_vma
*batch
;
250 * Additional buffers requested by userspace to be captured upon
251 * a GPU hang. The vma/obj on this list are protected by their
252 * active reference - all objects on this list must also be
253 * on the active_list (of their final request).
255 struct i915_capture_list
*capture_list
;
257 /** Time at which this request was emitted, in jiffies. */
258 unsigned long emitted_jiffies
;
260 /** timeline->request entry for this request */
261 struct list_head link
;
263 struct drm_i915_file_private
*file_priv
;
264 /** file_priv list entry for this request */
265 struct list_head client_link
;
267 I915_SELFTEST_DECLARE(struct {
268 struct list_head link
;
273 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
275 extern const struct dma_fence_ops i915_fence_ops
;
277 static inline bool dma_fence_is_i915(const struct dma_fence
*fence
)
279 return fence
->ops
== &i915_fence_ops
;
282 struct i915_request
* __must_check
283 __i915_request_create(struct intel_context
*ce
, gfp_t gfp
);
284 struct i915_request
* __must_check
285 i915_request_create(struct intel_context
*ce
);
287 struct i915_request
*__i915_request_commit(struct i915_request
*request
);
288 void __i915_request_queue(struct i915_request
*rq
,
289 const struct i915_sched_attr
*attr
);
291 bool i915_request_retire(struct i915_request
*rq
);
292 void i915_request_retire_upto(struct i915_request
*rq
);
294 static inline struct i915_request
*
295 to_request(struct dma_fence
*fence
)
297 /* We assume that NULL fence/request are interoperable */
298 BUILD_BUG_ON(offsetof(struct i915_request
, fence
) != 0);
299 GEM_BUG_ON(fence
&& !dma_fence_is_i915(fence
));
300 return container_of(fence
, struct i915_request
, fence
);
303 static inline struct i915_request
*
304 i915_request_get(struct i915_request
*rq
)
306 return to_request(dma_fence_get(&rq
->fence
));
309 static inline struct i915_request
*
310 i915_request_get_rcu(struct i915_request
*rq
)
312 return to_request(dma_fence_get_rcu(&rq
->fence
));
316 i915_request_put(struct i915_request
*rq
)
318 dma_fence_put(&rq
->fence
);
321 int i915_request_await_object(struct i915_request
*to
,
322 struct drm_i915_gem_object
*obj
,
324 int i915_request_await_dma_fence(struct i915_request
*rq
,
325 struct dma_fence
*fence
);
326 int i915_request_await_execution(struct i915_request
*rq
,
327 struct dma_fence
*fence
,
328 void (*hook
)(struct i915_request
*rq
,
329 struct dma_fence
*signal
));
331 void i915_request_add(struct i915_request
*rq
);
333 bool __i915_request_submit(struct i915_request
*request
);
334 void i915_request_submit(struct i915_request
*request
);
336 void i915_request_skip(struct i915_request
*request
, int error
);
338 void __i915_request_unsubmit(struct i915_request
*request
);
339 void i915_request_unsubmit(struct i915_request
*request
);
341 /* Note: part of the intel_breadcrumbs family */
342 bool i915_request_enable_breadcrumb(struct i915_request
*request
);
343 void i915_request_cancel_breadcrumb(struct i915_request
*request
);
345 long i915_request_wait(struct i915_request
*rq
,
348 __attribute__((nonnull(1)));
349 #define I915_WAIT_INTERRUPTIBLE BIT(0)
350 #define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
351 #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
353 static inline bool i915_request_signaled(const struct i915_request
*rq
)
355 /* The request may live longer than its HWSP, so check flags first! */
356 return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &rq
->fence
.flags
);
359 static inline bool i915_request_is_active(const struct i915_request
*rq
)
361 return test_bit(I915_FENCE_FLAG_ACTIVE
, &rq
->fence
.flags
);
365 * Returns true if seq1 is later than seq2.
367 static inline bool i915_seqno_passed(u32 seq1
, u32 seq2
)
369 return (s32
)(seq1
- seq2
) >= 0;
372 static inline u32
__hwsp_seqno(const struct i915_request
*rq
)
374 return READ_ONCE(*rq
->hwsp_seqno
);
378 * hwsp_seqno - the current breadcrumb value in the HW status page
379 * @rq: the request, to chase the relevant HW status page
381 * The emphasis in naming here is that hwsp_seqno() is not a property of the
382 * request, but an indication of the current HW state (associated with this
383 * request). Its value will change as the GPU executes more requests.
385 * Returns the current breadcrumb value in the associated HW status page (or
386 * the local timeline's equivalent) for this request. The request itself
387 * has the associated breadcrumb value of rq->fence.seqno, when the HW
388 * status page has that breadcrumb or later, this request is complete.
390 static inline u32
hwsp_seqno(const struct i915_request
*rq
)
394 rcu_read_lock(); /* the HWSP may be freed at runtime */
395 seqno
= __hwsp_seqno(rq
);
401 static inline bool __i915_request_has_started(const struct i915_request
*rq
)
403 return i915_seqno_passed(hwsp_seqno(rq
), rq
->fence
.seqno
- 1);
407 * i915_request_started - check if the request has begun being executed
410 * If the timeline is not using initial breadcrumbs, a request is
411 * considered started if the previous request on its timeline (i.e.
412 * context) has been signaled.
414 * If the timeline is using semaphores, it will also be emitting an
415 * "initial breadcrumb" after the semaphores are complete and just before
416 * it began executing the user payload. A request can therefore be active
417 * on the HW and not yet started as it is still busywaiting on its
418 * dependencies (via HW semaphores).
420 * If the request has started, its dependencies will have been signaled
421 * (either by fences or by semaphores) and it will have begun processing
424 * However, even if a request has started, it may have been preempted and
425 * so no longer active, or it may have already completed.
427 * See also i915_request_is_active().
429 * Returns true if the request has begun executing the user payload, or
432 static inline bool i915_request_started(const struct i915_request
*rq
)
434 if (i915_request_signaled(rq
))
437 /* Remember: started but may have since been preempted! */
438 return __i915_request_has_started(rq
);
442 * i915_request_is_running - check if the request may actually be executing
445 * Returns true if the request is currently submitted to hardware, has passed
446 * its start point (i.e. the context is setup and not busywaiting). Note that
447 * it may no longer be running by the time the function returns!
449 static inline bool i915_request_is_running(const struct i915_request
*rq
)
451 if (!i915_request_is_active(rq
))
454 return __i915_request_has_started(rq
);
457 static inline bool i915_request_completed(const struct i915_request
*rq
)
459 if (i915_request_signaled(rq
))
462 return i915_seqno_passed(hwsp_seqno(rq
), rq
->fence
.seqno
);
465 static inline void i915_request_mark_complete(struct i915_request
*rq
)
467 rq
->hwsp_seqno
= (u32
*)&rq
->fence
.seqno
; /* decouple from HWSP */
470 static inline bool i915_request_has_waitboost(const struct i915_request
*rq
)
472 return test_bit(I915_FENCE_FLAG_BOOST
, &rq
->fence
.flags
);
475 static inline bool i915_request_has_nopreempt(const struct i915_request
*rq
)
477 /* Preemption should only be disabled very rarely */
478 return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT
, &rq
->fence
.flags
));
481 static inline bool i915_request_has_sentinel(const struct i915_request
*rq
)
483 return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL
, &rq
->fence
.flags
));
486 static inline struct intel_timeline
*
487 i915_request_timeline(struct i915_request
*rq
)
489 /* Valid only while the request is being constructed (or retired). */
490 return rcu_dereference_protected(rq
->timeline
,
491 lockdep_is_held(&rcu_access_pointer(rq
->timeline
)->mutex
));
494 static inline struct i915_gem_context
*
495 i915_request_gem_context(struct i915_request
*rq
)
497 /* Valid only while the request is being constructed (or retired). */
498 return rcu_dereference_protected(rq
->context
->gem_context
, true);
501 static inline struct intel_timeline
*
502 i915_request_active_timeline(struct i915_request
*rq
)
505 * When in use during submission, we are protected by a guarantee that
506 * the context/timeline is pinned and must remain pinned until after
509 return rcu_dereference_protected(rq
->timeline
,
510 lockdep_is_held(&rq
->engine
->active
.lock
));
513 #endif /* I915_REQUEST_H */