2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #ifndef __I915_GEM_CONTEXT_H__
26 #define __I915_GEM_CONTEXT_H__
28 #include <linux/bitops.h>
29 #include <linux/list.h>
30 #include <linux/radix-tree.h>
33 #include "i915_scheduler.h"
40 struct drm_i915_private
;
41 struct drm_i915_file_private
;
47 #define DEFAULT_CONTEXT_HANDLE 0
51 struct intel_context_ops
{
52 void (*unpin
)(struct intel_context
*ce
);
53 void (*destroy
)(struct intel_context
*ce
);
57 * struct i915_gem_context - client state
59 * The struct i915_gem_context represents the combined view of the driver and
60 * logical hardware state for a particular client.
62 struct i915_gem_context
{
63 /** i915: i915 device backpointer */
64 struct drm_i915_private
*i915
;
66 /** file_priv: owning file descriptor */
67 struct drm_i915_file_private
*file_priv
;
70 * @ppgtt: unique address space (GTT)
72 * In full-ppgtt mode, each context has its own address space ensuring
73 * complete seperation of one client from all others.
75 * In other modes, this is a NULL pointer with the expectation that
76 * the caller uses the shared global GTT.
78 struct i915_hw_ppgtt
*ppgtt
;
81 * @pid: process id of creator
83 * Note that who created the context may not be the principle user,
84 * as the context may be shared across a local socket. However,
85 * that should only affect the default context, all contexts created
86 * explicitly by the client are expected to be isolated.
91 * @name: arbitrary name
93 * A name is constructed for the context from the creator's process
94 * name, pid and user handle in order to uniquely identify the
95 * context in messages.
99 /** link: place with &drm_i915_private.context_list */
100 struct list_head link
;
101 struct llist_node free_link
;
104 * @ref: reference count
106 * A reference to a context is held by both the client who created it
107 * and on each request submitted to the hardware using the request
108 * (to ensure the hardware has access to the state until it has
109 * finished all pending writes). See i915_gem_context_get() and
110 * i915_gem_context_put() for access.
115 * @rcu: rcu_head for deferred freeing.
120 * @flags: small set of booleans
123 #define CONTEXT_NO_ZEROMAP BIT(0)
124 #define CONTEXT_NO_ERROR_CAPTURE 1
125 #define CONTEXT_CLOSED 2
126 #define CONTEXT_BANNABLE 3
127 #define CONTEXT_BANNED 4
128 #define CONTEXT_FORCE_SINGLE_SUBMISSION 5
131 * @hw_id: - unique identifier for the context
133 * The hardware needs to uniquely identify the context for a few
134 * functions like fault reporting, PASID, scheduling. The
135 * &drm_i915_private.context_hw_ida is used to assign a unqiue
136 * id for the lifetime of the context.
141 * @user_handle: userspace identifier
143 * A unique per-file identifier is generated from
144 * &drm_i915_file_private.contexts.
148 struct i915_sched_attr sched
;
150 /** ggtt_offset_bias: placement restriction for context objects */
151 u32 ggtt_offset_bias
;
153 /** engine: per-engine logical HW state */
154 struct intel_context
{
155 struct i915_gem_context
*gem_context
;
156 struct i915_vma
*state
;
157 struct intel_ring
*ring
;
162 const struct intel_context_ops
*ops
;
163 } __engine
[I915_NUM_ENGINES
];
165 /** ring_size: size for allocating the per-engine ring buffer */
167 /** desc_template: invariant fields for the HW context descriptor */
170 /** guilty_count: How many times this context has caused a GPU hang. */
171 atomic_t guilty_count
;
173 * @active_count: How many times this context was active during a GPU
174 * hang, but did not cause it.
176 atomic_t active_count
;
178 #define CONTEXT_SCORE_GUILTY 10
179 #define CONTEXT_SCORE_BAN_THRESHOLD 40
180 /** ban_score: Accumulated score of all hangs caused by this context. */
183 /** remap_slice: Bitmask of cache lines that need remapping */
186 /** handles_vma: rbtree to look up our context specific obj/vma for
187 * the user handle. (user handles are per fd, but the binding is
188 * per vm, which may be one per context or shared with the global GTT)
190 struct radix_tree_root handles_vma
;
192 /** handles_list: reverse list of all the rbtree entries in use for
193 * this context, which allows us to free all the allocations on
196 struct list_head handles_list
;
199 static inline bool i915_gem_context_is_closed(const struct i915_gem_context
*ctx
)
201 return test_bit(CONTEXT_CLOSED
, &ctx
->flags
);
204 static inline void i915_gem_context_set_closed(struct i915_gem_context
*ctx
)
206 GEM_BUG_ON(i915_gem_context_is_closed(ctx
));
207 __set_bit(CONTEXT_CLOSED
, &ctx
->flags
);
210 static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context
*ctx
)
212 return test_bit(CONTEXT_NO_ERROR_CAPTURE
, &ctx
->flags
);
215 static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context
*ctx
)
217 __set_bit(CONTEXT_NO_ERROR_CAPTURE
, &ctx
->flags
);
220 static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context
*ctx
)
222 __clear_bit(CONTEXT_NO_ERROR_CAPTURE
, &ctx
->flags
);
225 static inline bool i915_gem_context_is_bannable(const struct i915_gem_context
*ctx
)
227 return test_bit(CONTEXT_BANNABLE
, &ctx
->flags
);
230 static inline void i915_gem_context_set_bannable(struct i915_gem_context
*ctx
)
232 __set_bit(CONTEXT_BANNABLE
, &ctx
->flags
);
235 static inline void i915_gem_context_clear_bannable(struct i915_gem_context
*ctx
)
237 __clear_bit(CONTEXT_BANNABLE
, &ctx
->flags
);
240 static inline bool i915_gem_context_is_banned(const struct i915_gem_context
*ctx
)
242 return test_bit(CONTEXT_BANNED
, &ctx
->flags
);
245 static inline void i915_gem_context_set_banned(struct i915_gem_context
*ctx
)
247 __set_bit(CONTEXT_BANNED
, &ctx
->flags
);
250 static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context
*ctx
)
252 return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION
, &ctx
->flags
);
255 static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context
*ctx
)
257 __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION
, &ctx
->flags
);
260 static inline bool i915_gem_context_is_default(const struct i915_gem_context
*c
)
262 return c
->user_handle
== DEFAULT_CONTEXT_HANDLE
;
265 static inline bool i915_gem_context_is_kernel(struct i915_gem_context
*ctx
)
267 return !ctx
->file_priv
;
270 static inline struct intel_context
*
271 to_intel_context(struct i915_gem_context
*ctx
,
272 const struct intel_engine_cs
*engine
)
274 return &ctx
->__engine
[engine
->id
];
277 static inline struct intel_context
*
278 intel_context_pin(struct i915_gem_context
*ctx
, struct intel_engine_cs
*engine
)
280 return engine
->context_pin(engine
, ctx
);
283 static inline void __intel_context_pin(struct intel_context
*ce
)
285 GEM_BUG_ON(!ce
->pin_count
);
289 static inline void intel_context_unpin(struct intel_context
*ce
)
291 GEM_BUG_ON(!ce
->pin_count
);
295 GEM_BUG_ON(!ce
->ops
);
299 /* i915_gem_context.c */
300 int __must_check
i915_gem_contexts_init(struct drm_i915_private
*dev_priv
);
301 void i915_gem_contexts_lost(struct drm_i915_private
*dev_priv
);
302 void i915_gem_contexts_fini(struct drm_i915_private
*dev_priv
);
304 int i915_gem_context_open(struct drm_i915_private
*i915
,
305 struct drm_file
*file
);
306 void i915_gem_context_close(struct drm_file
*file
);
308 int i915_switch_context(struct i915_request
*rq
);
309 int i915_gem_switch_to_kernel_context(struct drm_i915_private
*dev_priv
);
311 void i915_gem_context_release(struct kref
*ctx_ref
);
312 struct i915_gem_context
*
313 i915_gem_context_create_gvt(struct drm_device
*dev
);
315 int i915_gem_context_create_ioctl(struct drm_device
*dev
, void *data
,
316 struct drm_file
*file
);
317 int i915_gem_context_destroy_ioctl(struct drm_device
*dev
, void *data
,
318 struct drm_file
*file
);
319 int i915_gem_context_getparam_ioctl(struct drm_device
*dev
, void *data
,
320 struct drm_file
*file_priv
);
321 int i915_gem_context_setparam_ioctl(struct drm_device
*dev
, void *data
,
322 struct drm_file
*file_priv
);
323 int i915_gem_context_reset_stats_ioctl(struct drm_device
*dev
, void *data
,
324 struct drm_file
*file
);
326 struct i915_gem_context
*
327 i915_gem_context_create_kernel(struct drm_i915_private
*i915
, int prio
);
329 static inline struct i915_gem_context
*
330 i915_gem_context_get(struct i915_gem_context
*ctx
)
336 static inline void i915_gem_context_put(struct i915_gem_context
*ctx
)
338 kref_put(&ctx
->ref
, i915_gem_context_release
);
341 #endif /* !__I915_GEM_CONTEXT_H__ */