2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #ifndef I915_GEM_TIMELINE_H
26 #define I915_GEM_TIMELINE_H
28 #include <linux/list.h>
30 #include "i915_utils.h"
31 #include "i915_gem_request.h"
32 #include "i915_syncmap.h"
34 struct i915_gem_timeline
;
36 struct intel_timeline
{
41 * Count of outstanding requests, from the time they are constructed
42 * to the moment they are retired. Loosely coupled to hardware.
49 * List of breadcrumbs associated with GPU requests currently
52 struct list_head requests
;
54 /* Contains an RCU guarded pointer to the last request. No reference is
55 * held to the request, users must carefully acquire a reference to
56 * the request using i915_gem_active_get_request_rcu(), or hold the
59 struct i915_gem_active last_request
;
62 * We track the most recent seqno that we wait on in every context so
63 * that we only have to emit a new await and dependency on a more
64 * recent sync point. As the contexts may be executed out-of-order, we
65 * have to track each individually and can not rely on an absolute
66 * global_seqno. When we know that all tracked fences are completed
67 * (i.e. when the driver is idle), we know that the syncmap is
68 * redundant and we can discard it without loss of generality.
70 struct i915_syncmap
*sync
;
72 * Separately to the inter-context seqno map above, we track the last
73 * barrier (e.g. semaphore wait) to the global engine timelines. Note
74 * that this tracks global_seqno rather than the context.seqno, and
75 * so it is subject to the limitations of hw wraparound and that we
76 * may need to revoke global_seqno (on pre-emption).
78 u32 global_sync
[I915_NUM_ENGINES
];
80 struct i915_gem_timeline
*common
;
83 struct i915_gem_timeline
{
84 struct list_head link
;
86 struct drm_i915_private
*i915
;
89 struct intel_timeline engine
[I915_NUM_ENGINES
];
92 int i915_gem_timeline_init(struct drm_i915_private
*i915
,
93 struct i915_gem_timeline
*tl
,
95 int i915_gem_timeline_init__global(struct drm_i915_private
*i915
);
96 void i915_gem_timelines_park(struct drm_i915_private
*i915
);
97 void i915_gem_timeline_fini(struct i915_gem_timeline
*tl
);
99 static inline int __intel_timeline_sync_set(struct intel_timeline
*tl
,
100 u64 context
, u32 seqno
)
102 return i915_syncmap_set(&tl
->sync
, context
, seqno
);
105 static inline int intel_timeline_sync_set(struct intel_timeline
*tl
,
106 const struct dma_fence
*fence
)
108 return __intel_timeline_sync_set(tl
, fence
->context
, fence
->seqno
);
111 static inline bool __intel_timeline_sync_is_later(struct intel_timeline
*tl
,
112 u64 context
, u32 seqno
)
114 return i915_syncmap_is_later(&tl
->sync
, context
, seqno
);
117 static inline bool intel_timeline_sync_is_later(struct intel_timeline
*tl
,
118 const struct dma_fence
*fence
)
120 return __intel_timeline_sync_is_later(tl
, fence
->context
, fence
->seqno
);