2 * Copyright © 2011-2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
29 * This file implements HW context support. On gen5+ a HW context consists of an
30 * opaque GPU object which is referenced at times of context saves and restores.
31 * With RC6 enabled, the context is also referenced as the GPU enters and exists
32 * from RC6 (GPU has it's own internal power context, except on gen5). Though
33 * something like a context does exist for the media ring, the code only
34 * supports contexts for the render ring.
36 * In software, there is a distinction between contexts created by the user,
37 * and the default HW context. The default HW context is used by GPU clients
38 * that do not request setup of their own hardware context. The default
39 * context's state is never restored to help prevent programming errors. This
40 * would happen if a client ran and piggy-backed off another clients GPU state.
41 * The default context only exists to give the GPU some offset to load as the
42 * current to invoke a save of the context we actually care about. In fact, the
43 * code could likely be constructed, albeit in a more complicated fashion, to
44 * never use the default context, though that limits the driver's ability to
45 * swap out, and/or destroy other contexts.
47 * All other contexts are created as a request by the GPU client. These contexts
48 * store GPU state, and thus allow GPU clients to not re-emit state (and
49 * potentially query certain state) at any time. The kernel driver makes
50 * certain that the appropriate commands are inserted.
52 * The context life cycle is semi-complicated in that context BOs may live
53 * longer than the context itself because of the way the hardware, and object
54 * tracking works. Below is a very crude representation of the state machine
55 * describing the context life.
56 * refcount pincount active
57 * S0: initial state 0 0 0
58 * S1: context created 1 0 0
59 * S2: context is currently running 2 1 X
60 * S3: GPU referenced, but not current 2 0 1
61 * S4: context is current, but destroyed 1 1 0
62 * S5: like S3, but destroyed 1 0 1
64 * The most common (but not all) transitions:
65 * S0->S1: client creates a context
66 * S1->S2: client submits execbuf with context
67 * S2->S3: other clients submits execbuf with context
68 * S3->S1: context object was retired
69 * S3->S2: clients submits another execbuf
70 * S2->S4: context destroy called with current context
71 * S3->S5->S0: destroy path
72 * S4->S5->S0: destroy path on current context
74 * There are two confusing terms used above:
75 * The "current context" means the context which is currently running on the
76 * GPU. The GPU has loaded its state already and has stored away the gtt
77 * offset of the BO. The GPU is not actively referencing the data at this
78 * offset, but it will on the next context switch. The only way to avoid this
79 * is to do a GPU reset.
81 * An "active context' is one which was previously the "current context" and is
82 * on the active list waiting for the next context switch to occur. Until this
83 * happens, the object must remain at the same gtt offset. It is therefore
84 * possible to destroy a context, but it is still active.
88 #include <linux/log2.h>
90 #include <drm/i915_drm.h>
92 #include "i915_trace.h"
94 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
96 static void lut_close(struct i915_gem_context
*ctx
)
98 struct i915_lut_handle
*lut
, *ln
;
99 struct radix_tree_iter iter
;
102 list_for_each_entry_safe(lut
, ln
, &ctx
->handles_list
, ctx_link
) {
103 list_del(&lut
->obj_link
);
104 kmem_cache_free(ctx
->i915
->luts
, lut
);
108 radix_tree_for_each_slot(slot
, &ctx
->handles_vma
, &iter
, 0) {
109 struct i915_vma
*vma
= rcu_dereference_raw(*slot
);
111 radix_tree_iter_delete(&ctx
->handles_vma
, &iter
, slot
);
112 __i915_gem_object_release_unless_active(vma
->obj
);
117 static void i915_gem_context_free(struct i915_gem_context
*ctx
)
121 lockdep_assert_held(&ctx
->i915
->drm
.struct_mutex
);
122 GEM_BUG_ON(!i915_gem_context_is_closed(ctx
));
124 i915_ppgtt_put(ctx
->ppgtt
);
126 for (i
= 0; i
< I915_NUM_ENGINES
; i
++) {
127 struct intel_context
*ce
= &ctx
->engine
[i
];
132 WARN_ON(ce
->pin_count
);
134 intel_ring_free(ce
->ring
);
136 __i915_gem_object_release_unless_active(ce
->state
->obj
);
142 list_del(&ctx
->link
);
144 ida_simple_remove(&ctx
->i915
->contexts
.hw_ida
, ctx
->hw_id
);
148 static void contexts_free(struct drm_i915_private
*i915
)
150 struct llist_node
*freed
= llist_del_all(&i915
->contexts
.free_list
);
151 struct i915_gem_context
*ctx
, *cn
;
153 lockdep_assert_held(&i915
->drm
.struct_mutex
);
155 llist_for_each_entry_safe(ctx
, cn
, freed
, free_link
)
156 i915_gem_context_free(ctx
);
159 static void contexts_free_first(struct drm_i915_private
*i915
)
161 struct i915_gem_context
*ctx
;
162 struct llist_node
*freed
;
164 lockdep_assert_held(&i915
->drm
.struct_mutex
);
166 freed
= llist_del_first(&i915
->contexts
.free_list
);
170 ctx
= container_of(freed
, typeof(*ctx
), free_link
);
171 i915_gem_context_free(ctx
);
174 static void contexts_free_worker(struct work_struct
*work
)
176 struct drm_i915_private
*i915
=
177 container_of(work
, typeof(*i915
), contexts
.free_work
);
179 mutex_lock(&i915
->drm
.struct_mutex
);
181 mutex_unlock(&i915
->drm
.struct_mutex
);
184 void i915_gem_context_release(struct kref
*ref
)
186 struct i915_gem_context
*ctx
= container_of(ref
, typeof(*ctx
), ref
);
187 struct drm_i915_private
*i915
= ctx
->i915
;
189 trace_i915_context_free(ctx
);
190 if (llist_add(&ctx
->free_link
, &i915
->contexts
.free_list
))
191 queue_work(i915
->wq
, &i915
->contexts
.free_work
);
194 static void context_close(struct i915_gem_context
*ctx
)
196 i915_gem_context_set_closed(ctx
);
199 * The LUT uses the VMA as a backpointer to unref the object,
200 * so we need to clear the LUT before we close all the VMA (inside
205 i915_ppgtt_close(&ctx
->ppgtt
->base
);
207 ctx
->file_priv
= ERR_PTR(-EBADF
);
208 i915_gem_context_put(ctx
);
211 static int assign_hw_id(struct drm_i915_private
*dev_priv
, unsigned *out
)
215 ret
= ida_simple_get(&dev_priv
->contexts
.hw_ida
,
216 0, MAX_CONTEXT_HW_ID
, GFP_KERNEL
);
218 /* Contexts are only released when no longer active.
219 * Flush any pending retires to hopefully release some
220 * stale contexts and try again.
222 i915_gem_retire_requests(dev_priv
);
223 ret
= ida_simple_get(&dev_priv
->contexts
.hw_ida
,
224 0, MAX_CONTEXT_HW_ID
, GFP_KERNEL
);
233 static u32
default_desc_template(const struct drm_i915_private
*i915
,
234 const struct i915_hw_ppgtt
*ppgtt
)
239 desc
= GEN8_CTX_VALID
| GEN8_CTX_PRIVILEGE
;
241 address_mode
= INTEL_LEGACY_32B_CONTEXT
;
242 if (ppgtt
&& i915_vm_is_48bit(&ppgtt
->base
))
243 address_mode
= INTEL_LEGACY_64B_CONTEXT
;
244 desc
|= address_mode
<< GEN8_CTX_ADDRESSING_MODE_SHIFT
;
247 desc
|= GEN8_CTX_L3LLC_COHERENT
;
249 /* TODO: WaDisableLiteRestore when we start using semaphore
250 * signalling between Command Streamers
251 * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
257 static struct i915_gem_context
*
258 __create_hw_context(struct drm_i915_private
*dev_priv
,
259 struct drm_i915_file_private
*file_priv
)
261 struct i915_gem_context
*ctx
;
264 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
266 return ERR_PTR(-ENOMEM
);
268 ret
= assign_hw_id(dev_priv
, &ctx
->hw_id
);
274 kref_init(&ctx
->ref
);
275 list_add_tail(&ctx
->link
, &dev_priv
->contexts
.list
);
276 ctx
->i915
= dev_priv
;
277 ctx
->priority
= I915_PRIORITY_NORMAL
;
279 INIT_RADIX_TREE(&ctx
->handles_vma
, GFP_KERNEL
);
280 INIT_LIST_HEAD(&ctx
->handles_list
);
282 /* Default context will never have a file_priv */
283 ret
= DEFAULT_CONTEXT_HANDLE
;
285 ret
= idr_alloc(&file_priv
->context_idr
, ctx
,
286 DEFAULT_CONTEXT_HANDLE
, 0, GFP_KERNEL
);
290 ctx
->user_handle
= ret
;
292 ctx
->file_priv
= file_priv
;
294 ctx
->pid
= get_task_pid(current
, PIDTYPE_PID
);
295 ctx
->name
= kasprintf(GFP_KERNEL
, "%s[%d]/%x",
305 /* NB: Mark all slices as needing a remap so that when the context first
306 * loads it will restore whatever remap state already exists. If there
307 * is no remap info, it will be a NOP. */
308 ctx
->remap_slice
= ALL_L3_SLICES(dev_priv
);
310 i915_gem_context_set_bannable(ctx
);
311 ctx
->ring_size
= 4 * PAGE_SIZE
;
313 default_desc_template(dev_priv
, dev_priv
->mm
.aliasing_ppgtt
);
315 /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
316 * present or not in use we still need a small bias as ring wraparound
317 * at offset 0 sometimes hangs. No idea why.
319 if (USES_GUC(dev_priv
))
320 ctx
->ggtt_offset_bias
= GUC_WOPCM_TOP
;
322 ctx
->ggtt_offset_bias
= I915_GTT_PAGE_SIZE
;
328 idr_remove(&file_priv
->context_idr
, ctx
->user_handle
);
334 static void __destroy_hw_context(struct i915_gem_context
*ctx
,
335 struct drm_i915_file_private
*file_priv
)
337 idr_remove(&file_priv
->context_idr
, ctx
->user_handle
);
342 * The default context needs to exist per ring that uses contexts. It stores the
343 * context state of the GPU for applications that don't utilize HW contexts, as
344 * well as an idle case.
346 static struct i915_gem_context
*
347 i915_gem_create_context(struct drm_i915_private
*dev_priv
,
348 struct drm_i915_file_private
*file_priv
)
350 struct i915_gem_context
*ctx
;
352 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
354 /* Reap the most stale context */
355 contexts_free_first(dev_priv
);
357 ctx
= __create_hw_context(dev_priv
, file_priv
);
361 if (USES_FULL_PPGTT(dev_priv
)) {
362 struct i915_hw_ppgtt
*ppgtt
;
364 ppgtt
= i915_ppgtt_create(dev_priv
, file_priv
, ctx
->name
);
366 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
368 __destroy_hw_context(ctx
, file_priv
);
369 return ERR_CAST(ppgtt
);
373 ctx
->desc_template
= default_desc_template(dev_priv
, ppgtt
);
376 trace_i915_context_create(ctx
);
382 * i915_gem_context_create_gvt - create a GVT GEM context
385 * This function is used to create a GVT specific GEM context.
388 * pointer to i915_gem_context on success, error pointer if failed
391 struct i915_gem_context
*
392 i915_gem_context_create_gvt(struct drm_device
*dev
)
394 struct i915_gem_context
*ctx
;
397 if (!IS_ENABLED(CONFIG_DRM_I915_GVT
))
398 return ERR_PTR(-ENODEV
);
400 ret
= i915_mutex_lock_interruptible(dev
);
404 ctx
= __create_hw_context(to_i915(dev
), NULL
);
408 ctx
->file_priv
= ERR_PTR(-EBADF
);
409 i915_gem_context_set_closed(ctx
); /* not user accessible */
410 i915_gem_context_clear_bannable(ctx
);
411 i915_gem_context_set_force_single_submission(ctx
);
412 if (!USES_GUC_SUBMISSION(to_i915(dev
)))
413 ctx
->ring_size
= 512 * PAGE_SIZE
; /* Max ring buffer size */
415 GEM_BUG_ON(i915_gem_context_is_kernel(ctx
));
417 mutex_unlock(&dev
->struct_mutex
);
421 struct i915_gem_context
*
422 i915_gem_context_create_kernel(struct drm_i915_private
*i915
, int prio
)
424 struct i915_gem_context
*ctx
;
426 ctx
= i915_gem_create_context(i915
, NULL
);
430 i915_gem_context_clear_bannable(ctx
);
431 ctx
->priority
= prio
;
432 ctx
->ring_size
= PAGE_SIZE
;
434 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx
));
440 destroy_kernel_context(struct i915_gem_context
**ctxp
)
442 struct i915_gem_context
*ctx
;
444 /* Keep the context ref so that we can free it immediately ourselves */
445 ctx
= i915_gem_context_get(fetch_and_zero(ctxp
));
446 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx
));
449 i915_gem_context_free(ctx
);
452 int i915_gem_contexts_init(struct drm_i915_private
*dev_priv
)
454 struct i915_gem_context
*ctx
;
457 GEM_BUG_ON(dev_priv
->kernel_context
);
459 INIT_LIST_HEAD(&dev_priv
->contexts
.list
);
460 INIT_WORK(&dev_priv
->contexts
.free_work
, contexts_free_worker
);
461 init_llist_head(&dev_priv
->contexts
.free_list
);
463 /* Using the simple ida interface, the max is limited by sizeof(int) */
464 BUILD_BUG_ON(MAX_CONTEXT_HW_ID
> INT_MAX
);
465 ida_init(&dev_priv
->contexts
.hw_ida
);
467 /* lowest priority; idle task */
468 ctx
= i915_gem_context_create_kernel(dev_priv
, I915_PRIORITY_MIN
);
470 DRM_ERROR("Failed to create default global context\n");
475 * For easy recognisablity, we want the kernel context to be 0 and then
476 * all user contexts will have non-zero hw_id.
478 GEM_BUG_ON(ctx
->hw_id
);
479 dev_priv
->kernel_context
= ctx
;
481 /* highest priority; preempting task */
482 ctx
= i915_gem_context_create_kernel(dev_priv
, INT_MAX
);
484 DRM_ERROR("Failed to create default preempt context\n");
486 goto err_kernel_context
;
488 dev_priv
->preempt_context
= ctx
;
490 DRM_DEBUG_DRIVER("%s context support initialized\n",
491 dev_priv
->engine
[RCS
]->context_size
? "logical" :
496 destroy_kernel_context(&dev_priv
->kernel_context
);
501 void i915_gem_contexts_lost(struct drm_i915_private
*dev_priv
)
503 struct intel_engine_cs
*engine
;
504 enum intel_engine_id id
;
506 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
508 for_each_engine(engine
, dev_priv
, id
) {
509 engine
->legacy_active_context
= NULL
;
510 engine
->legacy_active_ppgtt
= NULL
;
512 if (!engine
->last_retired_context
)
515 engine
->context_unpin(engine
, engine
->last_retired_context
);
516 engine
->last_retired_context
= NULL
;
520 void i915_gem_contexts_fini(struct drm_i915_private
*i915
)
522 lockdep_assert_held(&i915
->drm
.struct_mutex
);
524 destroy_kernel_context(&i915
->preempt_context
);
525 destroy_kernel_context(&i915
->kernel_context
);
527 /* Must free all deferred contexts (via flush_workqueue) first */
528 ida_destroy(&i915
->contexts
.hw_ida
);
531 static int context_idr_cleanup(int id
, void *p
, void *data
)
533 struct i915_gem_context
*ctx
= p
;
539 int i915_gem_context_open(struct drm_i915_private
*i915
,
540 struct drm_file
*file
)
542 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
543 struct i915_gem_context
*ctx
;
545 idr_init(&file_priv
->context_idr
);
547 mutex_lock(&i915
->drm
.struct_mutex
);
548 ctx
= i915_gem_create_context(i915
, file_priv
);
549 mutex_unlock(&i915
->drm
.struct_mutex
);
551 idr_destroy(&file_priv
->context_idr
);
555 GEM_BUG_ON(i915_gem_context_is_kernel(ctx
));
560 void i915_gem_context_close(struct drm_file
*file
)
562 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
564 lockdep_assert_held(&file_priv
->dev_priv
->drm
.struct_mutex
);
566 idr_for_each(&file_priv
->context_idr
, context_idr_cleanup
, NULL
);
567 idr_destroy(&file_priv
->context_idr
);
570 static bool engine_has_idle_kernel_context(struct intel_engine_cs
*engine
)
572 struct i915_gem_timeline
*timeline
;
574 list_for_each_entry(timeline
, &engine
->i915
->gt
.timelines
, link
) {
575 struct intel_timeline
*tl
;
577 if (timeline
== &engine
->i915
->gt
.global_timeline
)
580 tl
= &timeline
->engine
[engine
->id
];
581 if (i915_gem_active_peek(&tl
->last_request
,
582 &engine
->i915
->drm
.struct_mutex
))
586 return intel_engine_has_kernel_context(engine
);
589 int i915_gem_switch_to_kernel_context(struct drm_i915_private
*dev_priv
)
591 struct intel_engine_cs
*engine
;
592 struct i915_gem_timeline
*timeline
;
593 enum intel_engine_id id
;
595 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
597 i915_gem_retire_requests(dev_priv
);
599 for_each_engine(engine
, dev_priv
, id
) {
600 struct drm_i915_gem_request
*req
;
602 if (engine_has_idle_kernel_context(engine
))
605 req
= i915_gem_request_alloc(engine
, dev_priv
->kernel_context
);
609 /* Queue this switch after all other activity */
610 list_for_each_entry(timeline
, &dev_priv
->gt
.timelines
, link
) {
611 struct drm_i915_gem_request
*prev
;
612 struct intel_timeline
*tl
;
614 tl
= &timeline
->engine
[engine
->id
];
615 prev
= i915_gem_active_raw(&tl
->last_request
,
616 &dev_priv
->drm
.struct_mutex
);
618 i915_sw_fence_await_sw_fence_gfp(&req
->submit
,
624 * Force a flush after the switch to ensure that all rendering
625 * and operations prior to switching to the kernel context hits
626 * memory. This should be guaranteed by the previous request,
627 * but an extra layer of paranoia before we declare the system
628 * idle (on suspend etc) is advisable!
630 __i915_add_request(req
, true);
636 static bool client_is_banned(struct drm_i915_file_private
*file_priv
)
638 return atomic_read(&file_priv
->context_bans
) > I915_MAX_CLIENT_CONTEXT_BANS
;
641 int i915_gem_context_create_ioctl(struct drm_device
*dev
, void *data
,
642 struct drm_file
*file
)
644 struct drm_i915_private
*dev_priv
= to_i915(dev
);
645 struct drm_i915_gem_context_create
*args
= data
;
646 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
647 struct i915_gem_context
*ctx
;
650 if (!dev_priv
->engine
[RCS
]->context_size
)
656 if (client_is_banned(file_priv
)) {
657 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
659 pid_nr(get_task_pid(current
, PIDTYPE_PID
)));
664 ret
= i915_mutex_lock_interruptible(dev
);
668 ctx
= i915_gem_create_context(dev_priv
, file_priv
);
669 mutex_unlock(&dev
->struct_mutex
);
673 GEM_BUG_ON(i915_gem_context_is_kernel(ctx
));
675 args
->ctx_id
= ctx
->user_handle
;
676 DRM_DEBUG("HW context %d created\n", args
->ctx_id
);
681 int i915_gem_context_destroy_ioctl(struct drm_device
*dev
, void *data
,
682 struct drm_file
*file
)
684 struct drm_i915_gem_context_destroy
*args
= data
;
685 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
686 struct i915_gem_context
*ctx
;
692 if (args
->ctx_id
== DEFAULT_CONTEXT_HANDLE
)
695 ctx
= i915_gem_context_lookup(file_priv
, args
->ctx_id
);
699 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
703 __destroy_hw_context(ctx
, file_priv
);
704 mutex_unlock(&dev
->struct_mutex
);
707 i915_gem_context_put(ctx
);
711 int i915_gem_context_getparam_ioctl(struct drm_device
*dev
, void *data
,
712 struct drm_file
*file
)
714 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
715 struct drm_i915_gem_context_param
*args
= data
;
716 struct i915_gem_context
*ctx
;
719 ctx
= i915_gem_context_lookup(file_priv
, args
->ctx_id
);
724 switch (args
->param
) {
725 case I915_CONTEXT_PARAM_BAN_PERIOD
:
728 case I915_CONTEXT_PARAM_NO_ZEROMAP
:
729 args
->value
= ctx
->flags
& CONTEXT_NO_ZEROMAP
;
731 case I915_CONTEXT_PARAM_GTT_SIZE
:
733 args
->value
= ctx
->ppgtt
->base
.total
;
734 else if (to_i915(dev
)->mm
.aliasing_ppgtt
)
735 args
->value
= to_i915(dev
)->mm
.aliasing_ppgtt
->base
.total
;
737 args
->value
= to_i915(dev
)->ggtt
.base
.total
;
739 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE
:
740 args
->value
= i915_gem_context_no_error_capture(ctx
);
742 case I915_CONTEXT_PARAM_BANNABLE
:
743 args
->value
= i915_gem_context_is_bannable(ctx
);
745 case I915_CONTEXT_PARAM_PRIORITY
:
746 args
->value
= ctx
->priority
;
753 i915_gem_context_put(ctx
);
757 int i915_gem_context_setparam_ioctl(struct drm_device
*dev
, void *data
,
758 struct drm_file
*file
)
760 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
761 struct drm_i915_gem_context_param
*args
= data
;
762 struct i915_gem_context
*ctx
;
765 ctx
= i915_gem_context_lookup(file_priv
, args
->ctx_id
);
769 ret
= i915_mutex_lock_interruptible(dev
);
773 switch (args
->param
) {
774 case I915_CONTEXT_PARAM_BAN_PERIOD
:
777 case I915_CONTEXT_PARAM_NO_ZEROMAP
:
781 ctx
->flags
&= ~CONTEXT_NO_ZEROMAP
;
782 ctx
->flags
|= args
->value
? CONTEXT_NO_ZEROMAP
: 0;
785 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE
:
788 else if (args
->value
)
789 i915_gem_context_set_no_error_capture(ctx
);
791 i915_gem_context_clear_no_error_capture(ctx
);
793 case I915_CONTEXT_PARAM_BANNABLE
:
796 else if (!capable(CAP_SYS_ADMIN
) && !args
->value
)
798 else if (args
->value
)
799 i915_gem_context_set_bannable(ctx
);
801 i915_gem_context_clear_bannable(ctx
);
804 case I915_CONTEXT_PARAM_PRIORITY
:
806 s64 priority
= args
->value
;
810 else if (!to_i915(dev
)->engine
[RCS
]->schedule
)
812 else if (priority
> I915_CONTEXT_MAX_USER_PRIORITY
||
813 priority
< I915_CONTEXT_MIN_USER_PRIORITY
)
815 else if (priority
> I915_CONTEXT_DEFAULT_PRIORITY
&&
816 !capable(CAP_SYS_NICE
))
819 ctx
->priority
= priority
;
827 mutex_unlock(&dev
->struct_mutex
);
830 i915_gem_context_put(ctx
);
834 int i915_gem_context_reset_stats_ioctl(struct drm_device
*dev
,
835 void *data
, struct drm_file
*file
)
837 struct drm_i915_private
*dev_priv
= to_i915(dev
);
838 struct drm_i915_reset_stats
*args
= data
;
839 struct i915_gem_context
*ctx
;
842 if (args
->flags
|| args
->pad
)
847 ctx
= __i915_gem_context_lookup_rcu(file
->driver_priv
, args
->ctx_id
);
852 * We opt for unserialised reads here. This may result in tearing
853 * in the extremely unlikely event of a GPU hang on this context
854 * as we are querying them. If we need that extra layer of protection,
855 * we should wrap the hangstats with a seqlock.
858 if (capable(CAP_SYS_ADMIN
))
859 args
->reset_count
= i915_reset_count(&dev_priv
->gpu_error
);
861 args
->reset_count
= 0;
863 args
->batch_active
= atomic_read(&ctx
->guilty_count
);
864 args
->batch_pending
= atomic_read(&ctx
->active_count
);
872 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
873 #include "selftests/mock_context.c"
874 #include "selftests/i915_gem_context.c"