2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #ifndef __INTEL_CONTEXT_H__
8 #define __INTEL_CONTEXT_H__
10 #include <linux/bitops.h>
11 #include <linux/lockdep.h>
12 #include <linux/types.h>
14 #include "i915_active.h"
15 #include "intel_context_types.h"
16 #include "intel_engine_types.h"
17 #include "intel_ring_types.h"
18 #include "intel_timeline_types.h"
20 #define CE_TRACE(ce, fmt, ...) do { \
21 const struct intel_context *ce__ = (ce); \
22 ENGINE_TRACE(ce__->engine, "context:%llx " fmt, \
23 ce__->timeline->fence_context, \
27 void intel_context_init(struct intel_context
*ce
,
28 struct intel_engine_cs
*engine
);
29 void intel_context_fini(struct intel_context
*ce
);
31 struct intel_context
*
32 intel_context_create(struct intel_engine_cs
*engine
);
34 int intel_context_alloc_state(struct intel_context
*ce
);
36 void intel_context_free(struct intel_context
*ce
);
39 * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
42 * Acquire a lock on the pinned status of the HW context, such that the context
43 * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
44 * intel_context_is_pinned() remains stable.
46 static inline int intel_context_lock_pinned(struct intel_context
*ce
)
47 __acquires(ce
->pin_mutex
)
49 return mutex_lock_interruptible(&ce
->pin_mutex
);
53 * intel_context_is_pinned - Reports the 'pinned' status
56 * While in use by the GPU, the context, along with its ring and page
57 * tables is pinned into memory and the GTT.
59 * Returns: true if the context is currently pinned for use by the GPU.
62 intel_context_is_pinned(struct intel_context
*ce
)
64 return atomic_read(&ce
->pin_count
);
68 * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
71 * Releases the lock earlier acquired by intel_context_unlock_pinned().
73 static inline void intel_context_unlock_pinned(struct intel_context
*ce
)
74 __releases(ce
->pin_mutex
)
76 mutex_unlock(&ce
->pin_mutex
);
79 int __intel_context_do_pin(struct intel_context
*ce
);
81 static inline bool intel_context_pin_if_active(struct intel_context
*ce
)
83 return atomic_inc_not_zero(&ce
->pin_count
);
86 static inline int intel_context_pin(struct intel_context
*ce
)
88 if (likely(intel_context_pin_if_active(ce
)))
91 return __intel_context_do_pin(ce
);
94 static inline void __intel_context_pin(struct intel_context
*ce
)
96 GEM_BUG_ON(!intel_context_is_pinned(ce
));
97 atomic_inc(&ce
->pin_count
);
100 void intel_context_unpin(struct intel_context
*ce
);
102 void intel_context_enter_engine(struct intel_context
*ce
);
103 void intel_context_exit_engine(struct intel_context
*ce
);
105 static inline void intel_context_enter(struct intel_context
*ce
)
107 lockdep_assert_held(&ce
->timeline
->mutex
);
108 if (!ce
->active_count
++)
112 static inline void intel_context_mark_active(struct intel_context
*ce
)
114 lockdep_assert_held(&ce
->timeline
->mutex
);
118 static inline void intel_context_exit(struct intel_context
*ce
)
120 lockdep_assert_held(&ce
->timeline
->mutex
);
121 GEM_BUG_ON(!ce
->active_count
);
122 if (!--ce
->active_count
)
126 static inline struct intel_context
*intel_context_get(struct intel_context
*ce
)
132 static inline void intel_context_put(struct intel_context
*ce
)
134 kref_put(&ce
->ref
, ce
->ops
->destroy
);
137 static inline struct intel_timeline
*__must_check
138 intel_context_timeline_lock(struct intel_context
*ce
)
139 __acquires(&ce
->timeline
->mutex
)
141 struct intel_timeline
*tl
= ce
->timeline
;
144 err
= mutex_lock_interruptible(&tl
->mutex
);
151 static inline void intel_context_timeline_unlock(struct intel_timeline
*tl
)
152 __releases(&tl
->mutex
)
154 mutex_unlock(&tl
->mutex
);
157 int intel_context_prepare_remote_request(struct intel_context
*ce
,
158 struct i915_request
*rq
);
160 struct i915_request
*intel_context_create_request(struct intel_context
*ce
);
162 static inline struct intel_ring
*__intel_context_ring_size(u64 sz
)
164 return u64_to_ptr(struct intel_ring
, sz
);
167 static inline bool intel_context_is_barrier(const struct intel_context
*ce
)
169 return test_bit(CONTEXT_BARRIER_BIT
, &ce
->flags
);
172 static inline bool intel_context_use_semaphores(const struct intel_context
*ce
)
174 return test_bit(CONTEXT_USE_SEMAPHORES
, &ce
->flags
);
177 static inline void intel_context_set_use_semaphores(struct intel_context
*ce
)
179 set_bit(CONTEXT_USE_SEMAPHORES
, &ce
->flags
);
182 static inline void intel_context_clear_use_semaphores(struct intel_context
*ce
)
184 clear_bit(CONTEXT_USE_SEMAPHORES
, &ce
->flags
);
187 static inline bool intel_context_is_banned(const struct intel_context
*ce
)
189 return test_bit(CONTEXT_BANNED
, &ce
->flags
);
192 static inline bool intel_context_set_banned(struct intel_context
*ce
)
194 return test_and_set_bit(CONTEXT_BANNED
, &ce
->flags
);
198 intel_context_force_single_submission(const struct intel_context
*ce
)
200 return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION
, &ce
->flags
);
204 intel_context_set_single_submission(struct intel_context
*ce
)
206 __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION
, &ce
->flags
);
210 intel_context_nopreempt(const struct intel_context
*ce
)
212 return test_bit(CONTEXT_NOPREEMPT
, &ce
->flags
);
216 intel_context_set_nopreempt(struct intel_context
*ce
)
218 set_bit(CONTEXT_NOPREEMPT
, &ce
->flags
);
222 intel_context_clear_nopreempt(struct intel_context
*ce
)
224 clear_bit(CONTEXT_NOPREEMPT
, &ce
->flags
);
227 #endif /* __INTEL_CONTEXT_H__ */