treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / gem / i915_gem_context.c
bloba2e57e62af30adc05e562ca734f3891ceda33fe2
1 /*
2 * SPDX-License-Identifier: MIT
4 * Copyright © 2011-2012 Intel Corporation
5 */
7 /*
8 * This file implements HW context support. On gen5+ a HW context consists of an
9 * opaque GPU object which is referenced at times of context saves and restores.
10 * With RC6 enabled, the context is also referenced as the GPU enters and exists
11 * from RC6 (GPU has it's own internal power context, except on gen5). Though
12 * something like a context does exist for the media ring, the code only
13 * supports contexts for the render ring.
15 * In software, there is a distinction between contexts created by the user,
16 * and the default HW context. The default HW context is used by GPU clients
17 * that do not request setup of their own hardware context. The default
18 * context's state is never restored to help prevent programming errors. This
19 * would happen if a client ran and piggy-backed off another clients GPU state.
20 * The default context only exists to give the GPU some offset to load as the
21 * current to invoke a save of the context we actually care about. In fact, the
22 * code could likely be constructed, albeit in a more complicated fashion, to
23 * never use the default context, though that limits the driver's ability to
24 * swap out, and/or destroy other contexts.
26 * All other contexts are created as a request by the GPU client. These contexts
27 * store GPU state, and thus allow GPU clients to not re-emit state (and
28 * potentially query certain state) at any time. The kernel driver makes
29 * certain that the appropriate commands are inserted.
31 * The context life cycle is semi-complicated in that context BOs may live
32 * longer than the context itself because of the way the hardware, and object
33 * tracking works. Below is a very crude representation of the state machine
34 * describing the context life.
35 * refcount pincount active
36 * S0: initial state 0 0 0
37 * S1: context created 1 0 0
38 * S2: context is currently running 2 1 X
39 * S3: GPU referenced, but not current 2 0 1
40 * S4: context is current, but destroyed 1 1 0
41 * S5: like S3, but destroyed 1 0 1
43 * The most common (but not all) transitions:
44 * S0->S1: client creates a context
45 * S1->S2: client submits execbuf with context
46 * S2->S3: other clients submits execbuf with context
47 * S3->S1: context object was retired
48 * S3->S2: clients submits another execbuf
49 * S2->S4: context destroy called with current context
50 * S3->S5->S0: destroy path
51 * S4->S5->S0: destroy path on current context
53 * There are two confusing terms used above:
54 * The "current context" means the context which is currently running on the
55 * GPU. The GPU has loaded its state already and has stored away the gtt
56 * offset of the BO. The GPU is not actively referencing the data at this
57 * offset, but it will on the next context switch. The only way to avoid this
58 * is to do a GPU reset.
60 * An "active context' is one which was previously the "current context" and is
61 * on the active list waiting for the next context switch to occur. Until this
62 * happens, the object must remain at the same gtt offset. It is therefore
63 * possible to destroy a context, but it is still active.
67 #include <linux/log2.h>
68 #include <linux/nospec.h>
70 #include <drm/i915_drm.h>
72 #include "gt/gen6_ppgtt.h"
73 #include "gt/intel_context.h"
74 #include "gt/intel_engine_heartbeat.h"
75 #include "gt/intel_engine_pm.h"
76 #include "gt/intel_engine_user.h"
77 #include "gt/intel_lrc_reg.h"
78 #include "gt/intel_ring.h"
80 #include "i915_gem_context.h"
81 #include "i915_globals.h"
82 #include "i915_trace.h"
83 #include "i915_user_extensions.h"
85 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
87 static struct i915_global_gem_context {
88 struct i915_global base;
89 struct kmem_cache *slab_luts;
90 } global;
92 struct i915_lut_handle *i915_lut_handle_alloc(void)
94 return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
97 void i915_lut_handle_free(struct i915_lut_handle *lut)
99 return kmem_cache_free(global.slab_luts, lut);
102 static void lut_close(struct i915_gem_context *ctx)
104 struct radix_tree_iter iter;
105 void __rcu **slot;
107 lockdep_assert_held(&ctx->mutex);
109 rcu_read_lock();
110 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
111 struct i915_vma *vma = rcu_dereference_raw(*slot);
112 struct drm_i915_gem_object *obj = vma->obj;
113 struct i915_lut_handle *lut;
115 if (!kref_get_unless_zero(&obj->base.refcount))
116 continue;
118 rcu_read_unlock();
119 i915_gem_object_lock(obj);
120 list_for_each_entry(lut, &obj->lut_list, obj_link) {
121 if (lut->ctx != ctx)
122 continue;
124 if (lut->handle != iter.index)
125 continue;
127 list_del(&lut->obj_link);
128 break;
130 i915_gem_object_unlock(obj);
131 rcu_read_lock();
133 if (&lut->obj_link != &obj->lut_list) {
134 i915_lut_handle_free(lut);
135 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
136 if (atomic_dec_and_test(&vma->open_count) &&
137 !i915_vma_is_ggtt(vma))
138 i915_vma_close(vma);
139 i915_gem_object_put(obj);
142 i915_gem_object_put(obj);
144 rcu_read_unlock();
147 static struct intel_context *
148 lookup_user_engine(struct i915_gem_context *ctx,
149 unsigned long flags,
150 const struct i915_engine_class_instance *ci)
151 #define LOOKUP_USER_INDEX BIT(0)
153 int idx;
155 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
156 return ERR_PTR(-EINVAL);
158 if (!i915_gem_context_user_engines(ctx)) {
159 struct intel_engine_cs *engine;
161 engine = intel_engine_lookup_user(ctx->i915,
162 ci->engine_class,
163 ci->engine_instance);
164 if (!engine)
165 return ERR_PTR(-EINVAL);
167 idx = engine->legacy_idx;
168 } else {
169 idx = ci->engine_instance;
172 return i915_gem_context_get_engine(ctx, idx);
175 static struct i915_address_space *
176 context_get_vm_rcu(struct i915_gem_context *ctx)
178 GEM_BUG_ON(!rcu_access_pointer(ctx->vm));
180 do {
181 struct i915_address_space *vm;
184 * We do not allow downgrading from full-ppgtt [to a shared
185 * global gtt], so ctx->vm cannot become NULL.
187 vm = rcu_dereference(ctx->vm);
188 if (!kref_get_unless_zero(&vm->ref))
189 continue;
192 * This ppgtt may have be reallocated between
193 * the read and the kref, and reassigned to a third
194 * context. In order to avoid inadvertent sharing
195 * of this ppgtt with that third context (and not
196 * src), we have to confirm that we have the same
197 * ppgtt after passing through the strong memory
198 * barrier implied by a successful
199 * kref_get_unless_zero().
201 * Once we have acquired the current ppgtt of ctx,
202 * we no longer care if it is released from ctx, as
203 * it cannot be reallocated elsewhere.
206 if (vm == rcu_access_pointer(ctx->vm))
207 return rcu_pointer_handoff(vm);
209 i915_vm_put(vm);
210 } while (1);
213 static void intel_context_set_gem(struct intel_context *ce,
214 struct i915_gem_context *ctx)
216 GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
217 RCU_INIT_POINTER(ce->gem_context, ctx);
219 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
220 ce->ring = __intel_context_ring_size(SZ_16K);
222 if (rcu_access_pointer(ctx->vm)) {
223 struct i915_address_space *vm;
225 rcu_read_lock();
226 vm = context_get_vm_rcu(ctx); /* hmm */
227 rcu_read_unlock();
229 i915_vm_put(ce->vm);
230 ce->vm = vm;
233 GEM_BUG_ON(ce->timeline);
234 if (ctx->timeline)
235 ce->timeline = intel_timeline_get(ctx->timeline);
237 if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
238 intel_engine_has_semaphores(ce->engine))
239 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
242 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
244 while (count--) {
245 if (!e->engines[count])
246 continue;
248 RCU_INIT_POINTER(e->engines[count]->gem_context, NULL);
249 intel_context_put(e->engines[count]);
251 kfree(e);
254 static void free_engines(struct i915_gem_engines *e)
256 __free_engines(e, e->num_engines);
259 static void free_engines_rcu(struct rcu_head *rcu)
261 free_engines(container_of(rcu, struct i915_gem_engines, rcu));
264 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
266 const struct intel_gt *gt = &ctx->i915->gt;
267 struct intel_engine_cs *engine;
268 struct i915_gem_engines *e;
269 enum intel_engine_id id;
271 e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
272 if (!e)
273 return ERR_PTR(-ENOMEM);
275 init_rcu_head(&e->rcu);
276 for_each_engine(engine, gt, id) {
277 struct intel_context *ce;
279 if (engine->legacy_idx == INVALID_ENGINE)
280 continue;
282 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
283 GEM_BUG_ON(e->engines[engine->legacy_idx]);
285 ce = intel_context_create(engine);
286 if (IS_ERR(ce)) {
287 __free_engines(e, e->num_engines + 1);
288 return ERR_CAST(ce);
291 intel_context_set_gem(ce, ctx);
293 e->engines[engine->legacy_idx] = ce;
294 e->num_engines = max(e->num_engines, engine->legacy_idx);
296 e->num_engines++;
298 return e;
301 static void i915_gem_context_free(struct i915_gem_context *ctx)
303 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
305 spin_lock(&ctx->i915->gem.contexts.lock);
306 list_del(&ctx->link);
307 spin_unlock(&ctx->i915->gem.contexts.lock);
309 free_engines(rcu_access_pointer(ctx->engines));
310 mutex_destroy(&ctx->engines_mutex);
312 if (ctx->timeline)
313 intel_timeline_put(ctx->timeline);
315 put_pid(ctx->pid);
316 mutex_destroy(&ctx->mutex);
318 kfree_rcu(ctx, rcu);
321 static void contexts_free_all(struct llist_node *list)
323 struct i915_gem_context *ctx, *cn;
325 llist_for_each_entry_safe(ctx, cn, list, free_link)
326 i915_gem_context_free(ctx);
329 static void contexts_flush_free(struct i915_gem_contexts *gc)
331 contexts_free_all(llist_del_all(&gc->free_list));
334 static void contexts_free_worker(struct work_struct *work)
336 struct i915_gem_contexts *gc =
337 container_of(work, typeof(*gc), free_work);
339 contexts_flush_free(gc);
342 void i915_gem_context_release(struct kref *ref)
344 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
345 struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
347 trace_i915_context_free(ctx);
348 if (llist_add(&ctx->free_link, &gc->free_list))
349 schedule_work(&gc->free_work);
352 static inline struct i915_gem_engines *
353 __context_engines_static(const struct i915_gem_context *ctx)
355 return rcu_dereference_protected(ctx->engines, true);
358 static bool __reset_engine(struct intel_engine_cs *engine)
360 struct intel_gt *gt = engine->gt;
361 bool success = false;
363 if (!intel_has_reset_engine(gt))
364 return false;
366 if (!test_and_set_bit(I915_RESET_ENGINE + engine->id,
367 &gt->reset.flags)) {
368 success = intel_engine_reset(engine, NULL) == 0;
369 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
370 &gt->reset.flags);
373 return success;
376 static void __reset_context(struct i915_gem_context *ctx,
377 struct intel_engine_cs *engine)
379 intel_gt_handle_error(engine->gt, engine->mask, 0,
380 "context closure in %s", ctx->name);
383 static bool __cancel_engine(struct intel_engine_cs *engine)
386 * Send a "high priority pulse" down the engine to cause the
387 * current request to be momentarily preempted. (If it fails to
388 * be preempted, it will be reset). As we have marked our context
389 * as banned, any incomplete request, including any running, will
390 * be skipped following the preemption.
392 * If there is no hangchecking (one of the reasons why we try to
393 * cancel the context) and no forced preemption, there may be no
394 * means by which we reset the GPU and evict the persistent hog.
395 * Ergo if we are unable to inject a preemptive pulse that can
396 * kill the banned context, we fallback to doing a local reset
397 * instead.
399 if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) &&
400 !intel_engine_pulse(engine))
401 return true;
403 /* If we are unable to send a pulse, try resetting this engine. */
404 return __reset_engine(engine);
407 static struct intel_engine_cs *__active_engine(struct i915_request *rq)
409 struct intel_engine_cs *engine, *locked;
412 * Serialise with __i915_request_submit() so that it sees
413 * is-banned?, or we know the request is already inflight.
415 locked = READ_ONCE(rq->engine);
416 spin_lock_irq(&locked->active.lock);
417 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
418 spin_unlock(&locked->active.lock);
419 spin_lock(&engine->active.lock);
420 locked = engine;
423 engine = NULL;
424 if (i915_request_is_active(rq) && !rq->fence.error)
425 engine = rq->engine;
427 spin_unlock_irq(&locked->active.lock);
429 return engine;
432 static struct intel_engine_cs *active_engine(struct intel_context *ce)
434 struct intel_engine_cs *engine = NULL;
435 struct i915_request *rq;
437 if (!ce->timeline)
438 return NULL;
440 mutex_lock(&ce->timeline->mutex);
441 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
442 if (i915_request_completed(rq))
443 break;
445 /* Check with the backend if the request is inflight */
446 engine = __active_engine(rq);
447 if (engine)
448 break;
450 mutex_unlock(&ce->timeline->mutex);
452 return engine;
455 static void kill_context(struct i915_gem_context *ctx)
457 struct i915_gem_engines_iter it;
458 struct intel_context *ce;
461 * Map the user's engine back to the actual engines; one virtual
462 * engine will be mapped to multiple engines, and using ctx->engine[]
463 * the same engine may be have multiple instances in the user's map.
464 * However, we only care about pending requests, so only include
465 * engines on which there are incomplete requests.
467 for_each_gem_engine(ce, __context_engines_static(ctx), it) {
468 struct intel_engine_cs *engine;
470 if (intel_context_set_banned(ce))
471 continue;
474 * Check the current active state of this context; if we
475 * are currently executing on the GPU we need to evict
476 * ourselves. On the other hand, if we haven't yet been
477 * submitted to the GPU or if everything is complete,
478 * we have nothing to do.
480 engine = active_engine(ce);
482 /* First attempt to gracefully cancel the context */
483 if (engine && !__cancel_engine(engine))
485 * If we are unable to send a preemptive pulse to bump
486 * the context from the GPU, we have to resort to a full
487 * reset. We hope the collateral damage is worth it.
489 __reset_context(ctx, engine);
493 static void set_closed_name(struct i915_gem_context *ctx)
495 char *s;
497 /* Replace '[]' with '<>' to indicate closed in debug prints */
499 s = strrchr(ctx->name, '[');
500 if (!s)
501 return;
503 *s = '<';
505 s = strchr(s + 1, ']');
506 if (s)
507 *s = '>';
510 static void context_close(struct i915_gem_context *ctx)
512 struct i915_address_space *vm;
514 i915_gem_context_set_closed(ctx);
515 set_closed_name(ctx);
517 mutex_lock(&ctx->mutex);
519 vm = i915_gem_context_vm(ctx);
520 if (vm)
521 i915_vm_close(vm);
523 ctx->file_priv = ERR_PTR(-EBADF);
526 * The LUT uses the VMA as a backpointer to unref the object,
527 * so we need to clear the LUT before we close all the VMA (inside
528 * the ppgtt).
530 lut_close(ctx);
532 mutex_unlock(&ctx->mutex);
535 * If the user has disabled hangchecking, we can not be sure that
536 * the batches will ever complete after the context is closed,
537 * keeping the context and all resources pinned forever. So in this
538 * case we opt to forcibly kill off all remaining requests on
539 * context close.
541 if (!i915_gem_context_is_persistent(ctx) ||
542 !i915_modparams.enable_hangcheck)
543 kill_context(ctx);
545 i915_gem_context_put(ctx);
548 static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
550 if (i915_gem_context_is_persistent(ctx) == state)
551 return 0;
553 if (state) {
555 * Only contexts that are short-lived [that will expire or be
556 * reset] are allowed to survive past termination. We require
557 * hangcheck to ensure that the persistent requests are healthy.
559 if (!i915_modparams.enable_hangcheck)
560 return -EINVAL;
562 i915_gem_context_set_persistence(ctx);
563 } else {
564 /* To cancel a context we use "preempt-to-idle" */
565 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
566 return -ENODEV;
568 i915_gem_context_clear_persistence(ctx);
571 return 0;
574 static struct i915_gem_context *
575 __create_context(struct drm_i915_private *i915)
577 struct i915_gem_context *ctx;
578 struct i915_gem_engines *e;
579 int err;
580 int i;
582 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
583 if (!ctx)
584 return ERR_PTR(-ENOMEM);
586 kref_init(&ctx->ref);
587 ctx->i915 = i915;
588 ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
589 mutex_init(&ctx->mutex);
591 mutex_init(&ctx->engines_mutex);
592 e = default_engines(ctx);
593 if (IS_ERR(e)) {
594 err = PTR_ERR(e);
595 goto err_free;
597 RCU_INIT_POINTER(ctx->engines, e);
599 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
601 /* NB: Mark all slices as needing a remap so that when the context first
602 * loads it will restore whatever remap state already exists. If there
603 * is no remap info, it will be a NOP. */
604 ctx->remap_slice = ALL_L3_SLICES(i915);
606 i915_gem_context_set_bannable(ctx);
607 i915_gem_context_set_recoverable(ctx);
608 __context_set_persistence(ctx, true /* cgroup hook? */);
610 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
611 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
613 spin_lock(&i915->gem.contexts.lock);
614 list_add_tail(&ctx->link, &i915->gem.contexts.list);
615 spin_unlock(&i915->gem.contexts.lock);
617 return ctx;
619 err_free:
620 kfree(ctx);
621 return ERR_PTR(err);
624 static void
625 context_apply_all(struct i915_gem_context *ctx,
626 void (*fn)(struct intel_context *ce, void *data),
627 void *data)
629 struct i915_gem_engines_iter it;
630 struct intel_context *ce;
632 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
633 fn(ce, data);
634 i915_gem_context_unlock_engines(ctx);
637 static void __apply_ppgtt(struct intel_context *ce, void *vm)
639 i915_vm_put(ce->vm);
640 ce->vm = i915_vm_get(vm);
643 static struct i915_address_space *
644 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
646 struct i915_address_space *old = i915_gem_context_vm(ctx);
648 GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
650 rcu_assign_pointer(ctx->vm, i915_vm_open(vm));
651 context_apply_all(ctx, __apply_ppgtt, vm);
653 return old;
656 static void __assign_ppgtt(struct i915_gem_context *ctx,
657 struct i915_address_space *vm)
659 if (vm == rcu_access_pointer(ctx->vm))
660 return;
662 vm = __set_ppgtt(ctx, vm);
663 if (vm)
664 i915_vm_close(vm);
667 static void __set_timeline(struct intel_timeline **dst,
668 struct intel_timeline *src)
670 struct intel_timeline *old = *dst;
672 *dst = src ? intel_timeline_get(src) : NULL;
674 if (old)
675 intel_timeline_put(old);
678 static void __apply_timeline(struct intel_context *ce, void *timeline)
680 __set_timeline(&ce->timeline, timeline);
683 static void __assign_timeline(struct i915_gem_context *ctx,
684 struct intel_timeline *timeline)
686 __set_timeline(&ctx->timeline, timeline);
687 context_apply_all(ctx, __apply_timeline, timeline);
690 static struct i915_gem_context *
691 i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
693 struct i915_gem_context *ctx;
695 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
696 !HAS_EXECLISTS(i915))
697 return ERR_PTR(-EINVAL);
699 /* Reap the stale contexts */
700 contexts_flush_free(&i915->gem.contexts);
702 ctx = __create_context(i915);
703 if (IS_ERR(ctx))
704 return ctx;
706 if (HAS_FULL_PPGTT(i915)) {
707 struct i915_ppgtt *ppgtt;
709 ppgtt = i915_ppgtt_create(&i915->gt);
710 if (IS_ERR(ppgtt)) {
711 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
712 PTR_ERR(ppgtt));
713 context_close(ctx);
714 return ERR_CAST(ppgtt);
717 mutex_lock(&ctx->mutex);
718 __assign_ppgtt(ctx, &ppgtt->vm);
719 mutex_unlock(&ctx->mutex);
721 i915_vm_put(&ppgtt->vm);
724 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
725 struct intel_timeline *timeline;
727 timeline = intel_timeline_create(&i915->gt, NULL);
728 if (IS_ERR(timeline)) {
729 context_close(ctx);
730 return ERR_CAST(timeline);
733 __assign_timeline(ctx, timeline);
734 intel_timeline_put(timeline);
737 trace_i915_context_create(ctx);
739 return ctx;
742 static void init_contexts(struct i915_gem_contexts *gc)
744 spin_lock_init(&gc->lock);
745 INIT_LIST_HEAD(&gc->list);
747 INIT_WORK(&gc->free_work, contexts_free_worker);
748 init_llist_head(&gc->free_list);
751 void i915_gem_init__contexts(struct drm_i915_private *i915)
753 init_contexts(&i915->gem.contexts);
754 DRM_DEBUG_DRIVER("%s context support initialized\n",
755 DRIVER_CAPS(i915)->has_logical_contexts ?
756 "logical" : "fake");
759 void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
761 flush_work(&i915->gem.contexts.free_work);
764 static int vm_idr_cleanup(int id, void *p, void *data)
766 i915_vm_put(p);
767 return 0;
770 static int gem_context_register(struct i915_gem_context *ctx,
771 struct drm_i915_file_private *fpriv,
772 u32 *id)
774 struct i915_address_space *vm;
775 int ret;
777 ctx->file_priv = fpriv;
779 mutex_lock(&ctx->mutex);
780 vm = i915_gem_context_vm(ctx);
781 if (vm)
782 WRITE_ONCE(vm->file, fpriv); /* XXX */
783 mutex_unlock(&ctx->mutex);
785 ctx->pid = get_task_pid(current, PIDTYPE_PID);
786 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
787 current->comm, pid_nr(ctx->pid));
789 /* And finally expose ourselves to userspace via the idr */
790 ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
791 if (ret)
792 put_pid(fetch_and_zero(&ctx->pid));
794 return ret;
797 int i915_gem_context_open(struct drm_i915_private *i915,
798 struct drm_file *file)
800 struct drm_i915_file_private *file_priv = file->driver_priv;
801 struct i915_gem_context *ctx;
802 int err;
803 u32 id;
805 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
807 mutex_init(&file_priv->vm_idr_lock);
808 idr_init_base(&file_priv->vm_idr, 1);
810 ctx = i915_gem_create_context(i915, 0);
811 if (IS_ERR(ctx)) {
812 err = PTR_ERR(ctx);
813 goto err;
816 err = gem_context_register(ctx, file_priv, &id);
817 if (err < 0)
818 goto err_ctx;
820 GEM_BUG_ON(id);
821 return 0;
823 err_ctx:
824 context_close(ctx);
825 err:
826 idr_destroy(&file_priv->vm_idr);
827 xa_destroy(&file_priv->context_xa);
828 mutex_destroy(&file_priv->vm_idr_lock);
829 return err;
832 void i915_gem_context_close(struct drm_file *file)
834 struct drm_i915_file_private *file_priv = file->driver_priv;
835 struct drm_i915_private *i915 = file_priv->dev_priv;
836 struct i915_gem_context *ctx;
837 unsigned long idx;
839 xa_for_each(&file_priv->context_xa, idx, ctx)
840 context_close(ctx);
841 xa_destroy(&file_priv->context_xa);
843 idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
844 idr_destroy(&file_priv->vm_idr);
845 mutex_destroy(&file_priv->vm_idr_lock);
847 contexts_flush_free(&i915->gem.contexts);
850 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
851 struct drm_file *file)
853 struct drm_i915_private *i915 = to_i915(dev);
854 struct drm_i915_gem_vm_control *args = data;
855 struct drm_i915_file_private *file_priv = file->driver_priv;
856 struct i915_ppgtt *ppgtt;
857 int err;
859 if (!HAS_FULL_PPGTT(i915))
860 return -ENODEV;
862 if (args->flags)
863 return -EINVAL;
865 ppgtt = i915_ppgtt_create(&i915->gt);
866 if (IS_ERR(ppgtt))
867 return PTR_ERR(ppgtt);
869 ppgtt->vm.file = file_priv;
871 if (args->extensions) {
872 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
873 NULL, 0,
874 ppgtt);
875 if (err)
876 goto err_put;
879 err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
880 if (err)
881 goto err_put;
883 err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
884 if (err < 0)
885 goto err_unlock;
887 GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
889 mutex_unlock(&file_priv->vm_idr_lock);
891 args->vm_id = err;
892 return 0;
894 err_unlock:
895 mutex_unlock(&file_priv->vm_idr_lock);
896 err_put:
897 i915_vm_put(&ppgtt->vm);
898 return err;
901 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
902 struct drm_file *file)
904 struct drm_i915_file_private *file_priv = file->driver_priv;
905 struct drm_i915_gem_vm_control *args = data;
906 struct i915_address_space *vm;
907 int err;
908 u32 id;
910 if (args->flags)
911 return -EINVAL;
913 if (args->extensions)
914 return -EINVAL;
916 id = args->vm_id;
917 if (!id)
918 return -ENOENT;
920 err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
921 if (err)
922 return err;
924 vm = idr_remove(&file_priv->vm_idr, id);
926 mutex_unlock(&file_priv->vm_idr_lock);
927 if (!vm)
928 return -ENOENT;
930 i915_vm_put(vm);
931 return 0;
934 struct context_barrier_task {
935 struct i915_active base;
936 void (*task)(void *data);
937 void *data;
940 __i915_active_call
941 static void cb_retire(struct i915_active *base)
943 struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
945 if (cb->task)
946 cb->task(cb->data);
948 i915_active_fini(&cb->base);
949 kfree(cb);
952 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
953 static int context_barrier_task(struct i915_gem_context *ctx,
954 intel_engine_mask_t engines,
955 bool (*skip)(struct intel_context *ce, void *data),
956 int (*emit)(struct i915_request *rq, void *data),
957 void (*task)(void *data),
958 void *data)
960 struct context_barrier_task *cb;
961 struct i915_gem_engines_iter it;
962 struct intel_context *ce;
963 int err = 0;
965 GEM_BUG_ON(!task);
967 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
968 if (!cb)
969 return -ENOMEM;
971 i915_active_init(&cb->base, NULL, cb_retire);
972 err = i915_active_acquire(&cb->base);
973 if (err) {
974 kfree(cb);
975 return err;
978 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
979 struct i915_request *rq;
981 if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
982 ce->engine->mask)) {
983 err = -ENXIO;
984 break;
987 if (!(ce->engine->mask & engines))
988 continue;
990 if (skip && skip(ce, data))
991 continue;
993 rq = intel_context_create_request(ce);
994 if (IS_ERR(rq)) {
995 err = PTR_ERR(rq);
996 break;
999 err = 0;
1000 if (emit)
1001 err = emit(rq, data);
1002 if (err == 0)
1003 err = i915_active_add_request(&cb->base, rq);
1005 i915_request_add(rq);
1006 if (err)
1007 break;
1009 i915_gem_context_unlock_engines(ctx);
1011 cb->task = err ? NULL : task; /* caller needs to unwind instead */
1012 cb->data = data;
1014 i915_active_release(&cb->base);
1016 return err;
1019 static int get_ppgtt(struct drm_i915_file_private *file_priv,
1020 struct i915_gem_context *ctx,
1021 struct drm_i915_gem_context_param *args)
1023 struct i915_address_space *vm;
1024 int ret;
1026 if (!rcu_access_pointer(ctx->vm))
1027 return -ENODEV;
1029 rcu_read_lock();
1030 vm = context_get_vm_rcu(ctx);
1031 rcu_read_unlock();
1033 ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
1034 if (ret)
1035 goto err_put;
1037 ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
1038 GEM_BUG_ON(!ret);
1039 if (ret < 0)
1040 goto err_unlock;
1042 i915_vm_open(vm);
1044 args->size = 0;
1045 args->value = ret;
1047 ret = 0;
1048 err_unlock:
1049 mutex_unlock(&file_priv->vm_idr_lock);
1050 err_put:
1051 i915_vm_put(vm);
1052 return ret;
1055 static void set_ppgtt_barrier(void *data)
1057 struct i915_address_space *old = data;
1059 if (INTEL_GEN(old->i915) < 8)
1060 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
1062 i915_vm_close(old);
1065 static int emit_ppgtt_update(struct i915_request *rq, void *data)
1067 struct i915_address_space *vm = rq->context->vm;
1068 struct intel_engine_cs *engine = rq->engine;
1069 u32 base = engine->mmio_base;
1070 u32 *cs;
1071 int i;
1073 if (i915_vm_is_4lvl(vm)) {
1074 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1075 const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
1077 cs = intel_ring_begin(rq, 6);
1078 if (IS_ERR(cs))
1079 return PTR_ERR(cs);
1081 *cs++ = MI_LOAD_REGISTER_IMM(2);
1083 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1084 *cs++ = upper_32_bits(pd_daddr);
1085 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1086 *cs++ = lower_32_bits(pd_daddr);
1088 *cs++ = MI_NOOP;
1089 intel_ring_advance(rq, cs);
1090 } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1091 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1092 int err;
1094 /* Magic required to prevent forcewake errors! */
1095 err = engine->emit_flush(rq, EMIT_INVALIDATE);
1096 if (err)
1097 return err;
1099 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1100 if (IS_ERR(cs))
1101 return PTR_ERR(cs);
1103 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
1104 for (i = GEN8_3LVL_PDPES; i--; ) {
1105 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1107 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1108 *cs++ = upper_32_bits(pd_daddr);
1109 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1110 *cs++ = lower_32_bits(pd_daddr);
1112 *cs++ = MI_NOOP;
1113 intel_ring_advance(rq, cs);
1116 return 0;
1119 static bool skip_ppgtt_update(struct intel_context *ce, void *data)
1121 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
1122 return true;
1124 if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
1125 return false;
1127 if (!atomic_read(&ce->pin_count))
1128 return true;
1130 /* ppGTT is not part of the legacy context image */
1131 if (gen6_ppgtt_pin(i915_vm_to_ppgtt(ce->vm)))
1132 return true;
1134 return false;
1137 static int set_ppgtt(struct drm_i915_file_private *file_priv,
1138 struct i915_gem_context *ctx,
1139 struct drm_i915_gem_context_param *args)
1141 struct i915_address_space *vm, *old;
1142 int err;
1144 if (args->size)
1145 return -EINVAL;
1147 if (!rcu_access_pointer(ctx->vm))
1148 return -ENODEV;
1150 if (upper_32_bits(args->value))
1151 return -ENOENT;
1153 rcu_read_lock();
1154 vm = idr_find(&file_priv->vm_idr, args->value);
1155 if (vm && !kref_get_unless_zero(&vm->ref))
1156 vm = NULL;
1157 rcu_read_unlock();
1158 if (!vm)
1159 return -ENOENT;
1161 err = mutex_lock_interruptible(&ctx->mutex);
1162 if (err)
1163 goto out;
1165 if (i915_gem_context_is_closed(ctx)) {
1166 err = -ENOENT;
1167 goto unlock;
1170 if (vm == rcu_access_pointer(ctx->vm))
1171 goto unlock;
1173 /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1174 lut_close(ctx);
1176 old = __set_ppgtt(ctx, vm);
1179 * We need to flush any requests using the current ppgtt before
1180 * we release it as the requests do not hold a reference themselves,
1181 * only indirectly through the context.
1183 err = context_barrier_task(ctx, ALL_ENGINES,
1184 skip_ppgtt_update,
1185 emit_ppgtt_update,
1186 set_ppgtt_barrier,
1187 old);
1188 if (err) {
1189 i915_vm_close(__set_ppgtt(ctx, old));
1190 i915_vm_close(old);
1193 unlock:
1194 mutex_unlock(&ctx->mutex);
1195 out:
1196 i915_vm_put(vm);
1197 return err;
1200 static int gen8_emit_rpcs_config(struct i915_request *rq,
1201 struct intel_context *ce,
1202 struct intel_sseu sseu)
1204 u64 offset;
1205 u32 *cs;
1207 cs = intel_ring_begin(rq, 4);
1208 if (IS_ERR(cs))
1209 return PTR_ERR(cs);
1211 offset = i915_ggtt_offset(ce->state) +
1212 LRC_STATE_PN * PAGE_SIZE +
1213 CTX_R_PWR_CLK_STATE * 4;
1215 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1216 *cs++ = lower_32_bits(offset);
1217 *cs++ = upper_32_bits(offset);
1218 *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
1220 intel_ring_advance(rq, cs);
1222 return 0;
1225 static int
1226 gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
1228 struct i915_request *rq;
1229 int ret;
1231 lockdep_assert_held(&ce->pin_mutex);
1234 * If the context is not idle, we have to submit an ordered request to
1235 * modify its context image via the kernel context (writing to our own
1236 * image, or into the registers directory, does not stick). Pristine
1237 * and idle contexts will be configured on pinning.
1239 if (!intel_context_pin_if_active(ce))
1240 return 0;
1242 rq = intel_engine_create_kernel_request(ce->engine);
1243 if (IS_ERR(rq)) {
1244 ret = PTR_ERR(rq);
1245 goto out_unpin;
1248 /* Serialise with the remote context */
1249 ret = intel_context_prepare_remote_request(ce, rq);
1250 if (ret == 0)
1251 ret = gen8_emit_rpcs_config(rq, ce, sseu);
1253 i915_request_add(rq);
1254 out_unpin:
1255 intel_context_unpin(ce);
1256 return ret;
1259 static int
1260 intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
1262 int ret;
1264 GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
1266 ret = intel_context_lock_pinned(ce);
1267 if (ret)
1268 return ret;
1270 /* Nothing to do if unmodified. */
1271 if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
1272 goto unlock;
1274 ret = gen8_modify_rpcs(ce, sseu);
1275 if (!ret)
1276 ce->sseu = sseu;
1278 unlock:
1279 intel_context_unlock_pinned(ce);
1280 return ret;
1283 static int
1284 user_to_context_sseu(struct drm_i915_private *i915,
1285 const struct drm_i915_gem_context_param_sseu *user,
1286 struct intel_sseu *context)
1288 const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
1290 /* No zeros in any field. */
1291 if (!user->slice_mask || !user->subslice_mask ||
1292 !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1293 return -EINVAL;
1295 /* Max > min. */
1296 if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1297 return -EINVAL;
1300 * Some future proofing on the types since the uAPI is wider than the
1301 * current internal implementation.
1303 if (overflows_type(user->slice_mask, context->slice_mask) ||
1304 overflows_type(user->subslice_mask, context->subslice_mask) ||
1305 overflows_type(user->min_eus_per_subslice,
1306 context->min_eus_per_subslice) ||
1307 overflows_type(user->max_eus_per_subslice,
1308 context->max_eus_per_subslice))
1309 return -EINVAL;
1311 /* Check validity against hardware. */
1312 if (user->slice_mask & ~device->slice_mask)
1313 return -EINVAL;
1315 if (user->subslice_mask & ~device->subslice_mask[0])
1316 return -EINVAL;
1318 if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1319 return -EINVAL;
1321 context->slice_mask = user->slice_mask;
1322 context->subslice_mask = user->subslice_mask;
1323 context->min_eus_per_subslice = user->min_eus_per_subslice;
1324 context->max_eus_per_subslice = user->max_eus_per_subslice;
1326 /* Part specific restrictions. */
1327 if (IS_GEN(i915, 11)) {
1328 unsigned int hw_s = hweight8(device->slice_mask);
1329 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1330 unsigned int req_s = hweight8(context->slice_mask);
1331 unsigned int req_ss = hweight8(context->subslice_mask);
1334 * Only full subslice enablement is possible if more than one
1335 * slice is turned on.
1337 if (req_s > 1 && req_ss != hw_ss_per_s)
1338 return -EINVAL;
1341 * If more than four (SScount bitfield limit) subslices are
1342 * requested then the number has to be even.
1344 if (req_ss > 4 && (req_ss & 1))
1345 return -EINVAL;
1348 * If only one slice is enabled and subslice count is below the
1349 * device full enablement, it must be at most half of the all
1350 * available subslices.
1352 if (req_s == 1 && req_ss < hw_ss_per_s &&
1353 req_ss > (hw_ss_per_s / 2))
1354 return -EINVAL;
1356 /* ABI restriction - VME use case only. */
1358 /* All slices or one slice only. */
1359 if (req_s != 1 && req_s != hw_s)
1360 return -EINVAL;
1363 * Half subslices or full enablement only when one slice is
1364 * enabled.
1366 if (req_s == 1 &&
1367 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1368 return -EINVAL;
1370 /* No EU configuration changes. */
1371 if ((user->min_eus_per_subslice !=
1372 device->max_eus_per_subslice) ||
1373 (user->max_eus_per_subslice !=
1374 device->max_eus_per_subslice))
1375 return -EINVAL;
1378 return 0;
1381 static int set_sseu(struct i915_gem_context *ctx,
1382 struct drm_i915_gem_context_param *args)
1384 struct drm_i915_private *i915 = ctx->i915;
1385 struct drm_i915_gem_context_param_sseu user_sseu;
1386 struct intel_context *ce;
1387 struct intel_sseu sseu;
1388 unsigned long lookup;
1389 int ret;
1391 if (args->size < sizeof(user_sseu))
1392 return -EINVAL;
1394 if (!IS_GEN(i915, 11))
1395 return -ENODEV;
1397 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1398 sizeof(user_sseu)))
1399 return -EFAULT;
1401 if (user_sseu.rsvd)
1402 return -EINVAL;
1404 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1405 return -EINVAL;
1407 lookup = 0;
1408 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1409 lookup |= LOOKUP_USER_INDEX;
1411 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1412 if (IS_ERR(ce))
1413 return PTR_ERR(ce);
1415 /* Only render engine supports RPCS configuration. */
1416 if (ce->engine->class != RENDER_CLASS) {
1417 ret = -ENODEV;
1418 goto out_ce;
1421 ret = user_to_context_sseu(i915, &user_sseu, &sseu);
1422 if (ret)
1423 goto out_ce;
1425 ret = intel_context_reconfigure_sseu(ce, sseu);
1426 if (ret)
1427 goto out_ce;
1429 args->size = sizeof(user_sseu);
1431 out_ce:
1432 intel_context_put(ce);
1433 return ret;
1436 struct set_engines {
1437 struct i915_gem_context *ctx;
1438 struct i915_gem_engines *engines;
1441 static int
1442 set_engines__load_balance(struct i915_user_extension __user *base, void *data)
1444 struct i915_context_engines_load_balance __user *ext =
1445 container_of_user(base, typeof(*ext), base);
1446 const struct set_engines *set = data;
1447 struct intel_engine_cs *stack[16];
1448 struct intel_engine_cs **siblings;
1449 struct intel_context *ce;
1450 u16 num_siblings, idx;
1451 unsigned int n;
1452 int err;
1454 if (!HAS_EXECLISTS(set->ctx->i915))
1455 return -ENODEV;
1457 if (USES_GUC_SUBMISSION(set->ctx->i915))
1458 return -ENODEV; /* not implement yet */
1460 if (get_user(idx, &ext->engine_index))
1461 return -EFAULT;
1463 if (idx >= set->engines->num_engines) {
1464 DRM_DEBUG("Invalid placement value, %d >= %d\n",
1465 idx, set->engines->num_engines);
1466 return -EINVAL;
1469 idx = array_index_nospec(idx, set->engines->num_engines);
1470 if (set->engines->engines[idx]) {
1471 DRM_DEBUG("Invalid placement[%d], already occupied\n", idx);
1472 return -EEXIST;
1475 if (get_user(num_siblings, &ext->num_siblings))
1476 return -EFAULT;
1478 err = check_user_mbz(&ext->flags);
1479 if (err)
1480 return err;
1482 err = check_user_mbz(&ext->mbz64);
1483 if (err)
1484 return err;
1486 siblings = stack;
1487 if (num_siblings > ARRAY_SIZE(stack)) {
1488 siblings = kmalloc_array(num_siblings,
1489 sizeof(*siblings),
1490 GFP_KERNEL);
1491 if (!siblings)
1492 return -ENOMEM;
1495 for (n = 0; n < num_siblings; n++) {
1496 struct i915_engine_class_instance ci;
1498 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
1499 err = -EFAULT;
1500 goto out_siblings;
1503 siblings[n] = intel_engine_lookup_user(set->ctx->i915,
1504 ci.engine_class,
1505 ci.engine_instance);
1506 if (!siblings[n]) {
1507 DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n",
1508 n, ci.engine_class, ci.engine_instance);
1509 err = -EINVAL;
1510 goto out_siblings;
1514 ce = intel_execlists_create_virtual(siblings, n);
1515 if (IS_ERR(ce)) {
1516 err = PTR_ERR(ce);
1517 goto out_siblings;
1520 intel_context_set_gem(ce, set->ctx);
1522 if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
1523 intel_context_put(ce);
1524 err = -EEXIST;
1525 goto out_siblings;
1528 out_siblings:
1529 if (siblings != stack)
1530 kfree(siblings);
1532 return err;
1535 static int
1536 set_engines__bond(struct i915_user_extension __user *base, void *data)
1538 struct i915_context_engines_bond __user *ext =
1539 container_of_user(base, typeof(*ext), base);
1540 const struct set_engines *set = data;
1541 struct i915_engine_class_instance ci;
1542 struct intel_engine_cs *virtual;
1543 struct intel_engine_cs *master;
1544 u16 idx, num_bonds;
1545 int err, n;
1547 if (get_user(idx, &ext->virtual_index))
1548 return -EFAULT;
1550 if (idx >= set->engines->num_engines) {
1551 DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n",
1552 idx, set->engines->num_engines);
1553 return -EINVAL;
1556 idx = array_index_nospec(idx, set->engines->num_engines);
1557 if (!set->engines->engines[idx]) {
1558 DRM_DEBUG("Invalid engine at %d\n", idx);
1559 return -EINVAL;
1561 virtual = set->engines->engines[idx]->engine;
1563 err = check_user_mbz(&ext->flags);
1564 if (err)
1565 return err;
1567 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
1568 err = check_user_mbz(&ext->mbz64[n]);
1569 if (err)
1570 return err;
1573 if (copy_from_user(&ci, &ext->master, sizeof(ci)))
1574 return -EFAULT;
1576 master = intel_engine_lookup_user(set->ctx->i915,
1577 ci.engine_class, ci.engine_instance);
1578 if (!master) {
1579 DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n",
1580 ci.engine_class, ci.engine_instance);
1581 return -EINVAL;
1584 if (get_user(num_bonds, &ext->num_bonds))
1585 return -EFAULT;
1587 for (n = 0; n < num_bonds; n++) {
1588 struct intel_engine_cs *bond;
1590 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
1591 return -EFAULT;
1593 bond = intel_engine_lookup_user(set->ctx->i915,
1594 ci.engine_class,
1595 ci.engine_instance);
1596 if (!bond) {
1597 DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
1598 n, ci.engine_class, ci.engine_instance);
1599 return -EINVAL;
1603 * A non-virtual engine has no siblings to choose between; and
1604 * a submit fence will always be directed to the one engine.
1606 if (intel_engine_is_virtual(virtual)) {
1607 err = intel_virtual_engine_attach_bond(virtual,
1608 master,
1609 bond);
1610 if (err)
1611 return err;
1615 return 0;
1618 static const i915_user_extension_fn set_engines__extensions[] = {
1619 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
1620 [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
1623 static int
1624 set_engines(struct i915_gem_context *ctx,
1625 const struct drm_i915_gem_context_param *args)
1627 struct i915_context_param_engines __user *user =
1628 u64_to_user_ptr(args->value);
1629 struct set_engines set = { .ctx = ctx };
1630 unsigned int num_engines, n;
1631 u64 extensions;
1632 int err;
1634 if (!args->size) { /* switch back to legacy user_ring_map */
1635 if (!i915_gem_context_user_engines(ctx))
1636 return 0;
1638 set.engines = default_engines(ctx);
1639 if (IS_ERR(set.engines))
1640 return PTR_ERR(set.engines);
1642 goto replace;
1645 BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1646 if (args->size < sizeof(*user) ||
1647 !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1648 DRM_DEBUG("Invalid size for engine array: %d\n",
1649 args->size);
1650 return -EINVAL;
1654 * Note that I915_EXEC_RING_MASK limits execbuf to only using the
1655 * first 64 engines defined here.
1657 num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1659 set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
1660 GFP_KERNEL);
1661 if (!set.engines)
1662 return -ENOMEM;
1664 init_rcu_head(&set.engines->rcu);
1665 for (n = 0; n < num_engines; n++) {
1666 struct i915_engine_class_instance ci;
1667 struct intel_engine_cs *engine;
1668 struct intel_context *ce;
1670 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1671 __free_engines(set.engines, n);
1672 return -EFAULT;
1675 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1676 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1677 set.engines->engines[n] = NULL;
1678 continue;
1681 engine = intel_engine_lookup_user(ctx->i915,
1682 ci.engine_class,
1683 ci.engine_instance);
1684 if (!engine) {
1685 DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n",
1686 n, ci.engine_class, ci.engine_instance);
1687 __free_engines(set.engines, n);
1688 return -ENOENT;
1691 ce = intel_context_create(engine);
1692 if (IS_ERR(ce)) {
1693 __free_engines(set.engines, n);
1694 return PTR_ERR(ce);
1697 intel_context_set_gem(ce, ctx);
1699 set.engines->engines[n] = ce;
1701 set.engines->num_engines = num_engines;
1703 err = -EFAULT;
1704 if (!get_user(extensions, &user->extensions))
1705 err = i915_user_extensions(u64_to_user_ptr(extensions),
1706 set_engines__extensions,
1707 ARRAY_SIZE(set_engines__extensions),
1708 &set);
1709 if (err) {
1710 free_engines(set.engines);
1711 return err;
1714 replace:
1715 mutex_lock(&ctx->engines_mutex);
1716 if (args->size)
1717 i915_gem_context_set_user_engines(ctx);
1718 else
1719 i915_gem_context_clear_user_engines(ctx);
1720 set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1);
1721 mutex_unlock(&ctx->engines_mutex);
1723 call_rcu(&set.engines->rcu, free_engines_rcu);
1725 return 0;
1728 static struct i915_gem_engines *
1729 __copy_engines(struct i915_gem_engines *e)
1731 struct i915_gem_engines *copy;
1732 unsigned int n;
1734 copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1735 if (!copy)
1736 return ERR_PTR(-ENOMEM);
1738 init_rcu_head(&copy->rcu);
1739 for (n = 0; n < e->num_engines; n++) {
1740 if (e->engines[n])
1741 copy->engines[n] = intel_context_get(e->engines[n]);
1742 else
1743 copy->engines[n] = NULL;
1745 copy->num_engines = n;
1747 return copy;
1750 static int
1751 get_engines(struct i915_gem_context *ctx,
1752 struct drm_i915_gem_context_param *args)
1754 struct i915_context_param_engines __user *user;
1755 struct i915_gem_engines *e;
1756 size_t n, count, size;
1757 int err = 0;
1759 err = mutex_lock_interruptible(&ctx->engines_mutex);
1760 if (err)
1761 return err;
1763 e = NULL;
1764 if (i915_gem_context_user_engines(ctx))
1765 e = __copy_engines(i915_gem_context_engines(ctx));
1766 mutex_unlock(&ctx->engines_mutex);
1767 if (IS_ERR_OR_NULL(e)) {
1768 args->size = 0;
1769 return PTR_ERR_OR_ZERO(e);
1772 count = e->num_engines;
1774 /* Be paranoid in case we have an impedance mismatch */
1775 if (!check_struct_size(user, engines, count, &size)) {
1776 err = -EINVAL;
1777 goto err_free;
1779 if (overflows_type(size, args->size)) {
1780 err = -EINVAL;
1781 goto err_free;
1784 if (!args->size) {
1785 args->size = size;
1786 goto err_free;
1789 if (args->size < size) {
1790 err = -EINVAL;
1791 goto err_free;
1794 user = u64_to_user_ptr(args->value);
1795 if (!access_ok(user, size)) {
1796 err = -EFAULT;
1797 goto err_free;
1800 if (put_user(0, &user->extensions)) {
1801 err = -EFAULT;
1802 goto err_free;
1805 for (n = 0; n < count; n++) {
1806 struct i915_engine_class_instance ci = {
1807 .engine_class = I915_ENGINE_CLASS_INVALID,
1808 .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1811 if (e->engines[n]) {
1812 ci.engine_class = e->engines[n]->engine->uabi_class;
1813 ci.engine_instance = e->engines[n]->engine->uabi_instance;
1816 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1817 err = -EFAULT;
1818 goto err_free;
1822 args->size = size;
1824 err_free:
1825 free_engines(e);
1826 return err;
1829 static int
1830 set_persistence(struct i915_gem_context *ctx,
1831 const struct drm_i915_gem_context_param *args)
1833 if (args->size)
1834 return -EINVAL;
1836 return __context_set_persistence(ctx, args->value);
1839 static void __apply_priority(struct intel_context *ce, void *arg)
1841 struct i915_gem_context *ctx = arg;
1843 if (!intel_engine_has_semaphores(ce->engine))
1844 return;
1846 if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
1847 intel_context_set_use_semaphores(ce);
1848 else
1849 intel_context_clear_use_semaphores(ce);
1852 static int set_priority(struct i915_gem_context *ctx,
1853 const struct drm_i915_gem_context_param *args)
1855 s64 priority = args->value;
1857 if (args->size)
1858 return -EINVAL;
1860 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1861 return -ENODEV;
1863 if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1864 priority < I915_CONTEXT_MIN_USER_PRIORITY)
1865 return -EINVAL;
1867 if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1868 !capable(CAP_SYS_NICE))
1869 return -EPERM;
1871 ctx->sched.priority = I915_USER_PRIORITY(priority);
1872 context_apply_all(ctx, __apply_priority, ctx);
1874 return 0;
1877 static int ctx_setparam(struct drm_i915_file_private *fpriv,
1878 struct i915_gem_context *ctx,
1879 struct drm_i915_gem_context_param *args)
1881 int ret = 0;
1883 switch (args->param) {
1884 case I915_CONTEXT_PARAM_NO_ZEROMAP:
1885 if (args->size)
1886 ret = -EINVAL;
1887 else if (args->value)
1888 set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1889 else
1890 clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1891 break;
1893 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1894 if (args->size)
1895 ret = -EINVAL;
1896 else if (args->value)
1897 i915_gem_context_set_no_error_capture(ctx);
1898 else
1899 i915_gem_context_clear_no_error_capture(ctx);
1900 break;
1902 case I915_CONTEXT_PARAM_BANNABLE:
1903 if (args->size)
1904 ret = -EINVAL;
1905 else if (!capable(CAP_SYS_ADMIN) && !args->value)
1906 ret = -EPERM;
1907 else if (args->value)
1908 i915_gem_context_set_bannable(ctx);
1909 else
1910 i915_gem_context_clear_bannable(ctx);
1911 break;
1913 case I915_CONTEXT_PARAM_RECOVERABLE:
1914 if (args->size)
1915 ret = -EINVAL;
1916 else if (args->value)
1917 i915_gem_context_set_recoverable(ctx);
1918 else
1919 i915_gem_context_clear_recoverable(ctx);
1920 break;
1922 case I915_CONTEXT_PARAM_PRIORITY:
1923 ret = set_priority(ctx, args);
1924 break;
1926 case I915_CONTEXT_PARAM_SSEU:
1927 ret = set_sseu(ctx, args);
1928 break;
1930 case I915_CONTEXT_PARAM_VM:
1931 ret = set_ppgtt(fpriv, ctx, args);
1932 break;
1934 case I915_CONTEXT_PARAM_ENGINES:
1935 ret = set_engines(ctx, args);
1936 break;
1938 case I915_CONTEXT_PARAM_PERSISTENCE:
1939 ret = set_persistence(ctx, args);
1940 break;
1942 case I915_CONTEXT_PARAM_BAN_PERIOD:
1943 default:
1944 ret = -EINVAL;
1945 break;
1948 return ret;
1951 struct create_ext {
1952 struct i915_gem_context *ctx;
1953 struct drm_i915_file_private *fpriv;
1956 static int create_setparam(struct i915_user_extension __user *ext, void *data)
1958 struct drm_i915_gem_context_create_ext_setparam local;
1959 const struct create_ext *arg = data;
1961 if (copy_from_user(&local, ext, sizeof(local)))
1962 return -EFAULT;
1964 if (local.param.ctx_id)
1965 return -EINVAL;
1967 return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
1970 static int clone_engines(struct i915_gem_context *dst,
1971 struct i915_gem_context *src)
1973 struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
1974 struct i915_gem_engines *clone;
1975 bool user_engines;
1976 unsigned long n;
1978 clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
1979 if (!clone)
1980 goto err_unlock;
1982 init_rcu_head(&clone->rcu);
1983 for (n = 0; n < e->num_engines; n++) {
1984 struct intel_engine_cs *engine;
1986 if (!e->engines[n]) {
1987 clone->engines[n] = NULL;
1988 continue;
1990 engine = e->engines[n]->engine;
1993 * Virtual engines are singletons; they can only exist
1994 * inside a single context, because they embed their
1995 * HW context... As each virtual context implies a single
1996 * timeline (each engine can only dequeue a single request
1997 * at any time), it would be surprising for two contexts
1998 * to use the same engine. So let's create a copy of
1999 * the virtual engine instead.
2001 if (intel_engine_is_virtual(engine))
2002 clone->engines[n] =
2003 intel_execlists_clone_virtual(engine);
2004 else
2005 clone->engines[n] = intel_context_create(engine);
2006 if (IS_ERR_OR_NULL(clone->engines[n])) {
2007 __free_engines(clone, n);
2008 goto err_unlock;
2011 intel_context_set_gem(clone->engines[n], dst);
2013 clone->num_engines = n;
2015 user_engines = i915_gem_context_user_engines(src);
2016 i915_gem_context_unlock_engines(src);
2018 /* Serialised by constructor */
2019 free_engines(__context_engines_static(dst));
2020 RCU_INIT_POINTER(dst->engines, clone);
2021 if (user_engines)
2022 i915_gem_context_set_user_engines(dst);
2023 else
2024 i915_gem_context_clear_user_engines(dst);
2025 return 0;
2027 err_unlock:
2028 i915_gem_context_unlock_engines(src);
2029 return -ENOMEM;
2032 static int clone_flags(struct i915_gem_context *dst,
2033 struct i915_gem_context *src)
2035 dst->user_flags = src->user_flags;
2036 return 0;
2039 static int clone_schedattr(struct i915_gem_context *dst,
2040 struct i915_gem_context *src)
2042 dst->sched = src->sched;
2043 return 0;
2046 static int clone_sseu(struct i915_gem_context *dst,
2047 struct i915_gem_context *src)
2049 struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
2050 struct i915_gem_engines *clone;
2051 unsigned long n;
2052 int err;
2054 /* no locking required; sole access under constructor*/
2055 clone = __context_engines_static(dst);
2056 if (e->num_engines != clone->num_engines) {
2057 err = -EINVAL;
2058 goto unlock;
2061 for (n = 0; n < e->num_engines; n++) {
2062 struct intel_context *ce = e->engines[n];
2064 if (clone->engines[n]->engine->class != ce->engine->class) {
2065 /* Must have compatible engine maps! */
2066 err = -EINVAL;
2067 goto unlock;
2070 /* serialises with set_sseu */
2071 err = intel_context_lock_pinned(ce);
2072 if (err)
2073 goto unlock;
2075 clone->engines[n]->sseu = ce->sseu;
2076 intel_context_unlock_pinned(ce);
2079 err = 0;
2080 unlock:
2081 i915_gem_context_unlock_engines(src);
2082 return err;
2085 static int clone_timeline(struct i915_gem_context *dst,
2086 struct i915_gem_context *src)
2088 if (src->timeline)
2089 __assign_timeline(dst, src->timeline);
2091 return 0;
2094 static int clone_vm(struct i915_gem_context *dst,
2095 struct i915_gem_context *src)
2097 struct i915_address_space *vm;
2098 int err = 0;
2100 if (!rcu_access_pointer(src->vm))
2101 return 0;
2103 rcu_read_lock();
2104 vm = context_get_vm_rcu(src);
2105 rcu_read_unlock();
2107 if (!mutex_lock_interruptible(&dst->mutex)) {
2108 __assign_ppgtt(dst, vm);
2109 mutex_unlock(&dst->mutex);
2110 } else {
2111 err = -EINTR;
2114 i915_vm_put(vm);
2115 return err;
2118 static int create_clone(struct i915_user_extension __user *ext, void *data)
2120 static int (* const fn[])(struct i915_gem_context *dst,
2121 struct i915_gem_context *src) = {
2122 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
2123 MAP(ENGINES, clone_engines),
2124 MAP(FLAGS, clone_flags),
2125 MAP(SCHEDATTR, clone_schedattr),
2126 MAP(SSEU, clone_sseu),
2127 MAP(TIMELINE, clone_timeline),
2128 MAP(VM, clone_vm),
2129 #undef MAP
2131 struct drm_i915_gem_context_create_ext_clone local;
2132 const struct create_ext *arg = data;
2133 struct i915_gem_context *dst = arg->ctx;
2134 struct i915_gem_context *src;
2135 int err, bit;
2137 if (copy_from_user(&local, ext, sizeof(local)))
2138 return -EFAULT;
2140 BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
2141 I915_CONTEXT_CLONE_UNKNOWN);
2143 if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
2144 return -EINVAL;
2146 if (local.rsvd)
2147 return -EINVAL;
2149 rcu_read_lock();
2150 src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
2151 rcu_read_unlock();
2152 if (!src)
2153 return -ENOENT;
2155 GEM_BUG_ON(src == dst);
2157 for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
2158 if (!(local.flags & BIT(bit)))
2159 continue;
2161 err = fn[bit](dst, src);
2162 if (err)
2163 return err;
2166 return 0;
2169 static const i915_user_extension_fn create_extensions[] = {
2170 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2171 [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
2174 static bool client_is_banned(struct drm_i915_file_private *file_priv)
2176 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2179 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2180 struct drm_file *file)
2182 struct drm_i915_private *i915 = to_i915(dev);
2183 struct drm_i915_gem_context_create_ext *args = data;
2184 struct create_ext ext_data;
2185 int ret;
2186 u32 id;
2188 if (!DRIVER_CAPS(i915)->has_logical_contexts)
2189 return -ENODEV;
2191 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2192 return -EINVAL;
2194 ret = intel_gt_terminally_wedged(&i915->gt);
2195 if (ret)
2196 return ret;
2198 ext_data.fpriv = file->driver_priv;
2199 if (client_is_banned(ext_data.fpriv)) {
2200 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
2201 current->comm, task_pid_nr(current));
2202 return -EIO;
2205 ext_data.ctx = i915_gem_create_context(i915, args->flags);
2206 if (IS_ERR(ext_data.ctx))
2207 return PTR_ERR(ext_data.ctx);
2209 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2210 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2211 create_extensions,
2212 ARRAY_SIZE(create_extensions),
2213 &ext_data);
2214 if (ret)
2215 goto err_ctx;
2218 ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id);
2219 if (ret < 0)
2220 goto err_ctx;
2222 args->ctx_id = id;
2223 DRM_DEBUG("HW context %d created\n", args->ctx_id);
2225 return 0;
2227 err_ctx:
2228 context_close(ext_data.ctx);
2229 return ret;
2232 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2233 struct drm_file *file)
2235 struct drm_i915_gem_context_destroy *args = data;
2236 struct drm_i915_file_private *file_priv = file->driver_priv;
2237 struct i915_gem_context *ctx;
2239 if (args->pad != 0)
2240 return -EINVAL;
2242 if (!args->ctx_id)
2243 return -ENOENT;
2245 ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2246 if (!ctx)
2247 return -ENOENT;
2249 context_close(ctx);
2250 return 0;
2253 static int get_sseu(struct i915_gem_context *ctx,
2254 struct drm_i915_gem_context_param *args)
2256 struct drm_i915_gem_context_param_sseu user_sseu;
2257 struct intel_context *ce;
2258 unsigned long lookup;
2259 int err;
2261 if (args->size == 0)
2262 goto out;
2263 else if (args->size < sizeof(user_sseu))
2264 return -EINVAL;
2266 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2267 sizeof(user_sseu)))
2268 return -EFAULT;
2270 if (user_sseu.rsvd)
2271 return -EINVAL;
2273 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2274 return -EINVAL;
2276 lookup = 0;
2277 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2278 lookup |= LOOKUP_USER_INDEX;
2280 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2281 if (IS_ERR(ce))
2282 return PTR_ERR(ce);
2284 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2285 if (err) {
2286 intel_context_put(ce);
2287 return err;
2290 user_sseu.slice_mask = ce->sseu.slice_mask;
2291 user_sseu.subslice_mask = ce->sseu.subslice_mask;
2292 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2293 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2295 intel_context_unlock_pinned(ce);
2296 intel_context_put(ce);
2298 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2299 sizeof(user_sseu)))
2300 return -EFAULT;
2302 out:
2303 args->size = sizeof(user_sseu);
2305 return 0;
2308 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2309 struct drm_file *file)
2311 struct drm_i915_file_private *file_priv = file->driver_priv;
2312 struct drm_i915_gem_context_param *args = data;
2313 struct i915_gem_context *ctx;
2314 int ret = 0;
2316 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2317 if (!ctx)
2318 return -ENOENT;
2320 switch (args->param) {
2321 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2322 args->size = 0;
2323 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2324 break;
2326 case I915_CONTEXT_PARAM_GTT_SIZE:
2327 args->size = 0;
2328 rcu_read_lock();
2329 if (rcu_access_pointer(ctx->vm))
2330 args->value = rcu_dereference(ctx->vm)->total;
2331 else
2332 args->value = to_i915(dev)->ggtt.vm.total;
2333 rcu_read_unlock();
2334 break;
2336 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2337 args->size = 0;
2338 args->value = i915_gem_context_no_error_capture(ctx);
2339 break;
2341 case I915_CONTEXT_PARAM_BANNABLE:
2342 args->size = 0;
2343 args->value = i915_gem_context_is_bannable(ctx);
2344 break;
2346 case I915_CONTEXT_PARAM_RECOVERABLE:
2347 args->size = 0;
2348 args->value = i915_gem_context_is_recoverable(ctx);
2349 break;
2351 case I915_CONTEXT_PARAM_PRIORITY:
2352 args->size = 0;
2353 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
2354 break;
2356 case I915_CONTEXT_PARAM_SSEU:
2357 ret = get_sseu(ctx, args);
2358 break;
2360 case I915_CONTEXT_PARAM_VM:
2361 ret = get_ppgtt(file_priv, ctx, args);
2362 break;
2364 case I915_CONTEXT_PARAM_ENGINES:
2365 ret = get_engines(ctx, args);
2366 break;
2368 case I915_CONTEXT_PARAM_PERSISTENCE:
2369 args->size = 0;
2370 args->value = i915_gem_context_is_persistent(ctx);
2371 break;
2373 case I915_CONTEXT_PARAM_BAN_PERIOD:
2374 default:
2375 ret = -EINVAL;
2376 break;
2379 i915_gem_context_put(ctx);
2380 return ret;
2383 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2384 struct drm_file *file)
2386 struct drm_i915_file_private *file_priv = file->driver_priv;
2387 struct drm_i915_gem_context_param *args = data;
2388 struct i915_gem_context *ctx;
2389 int ret;
2391 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2392 if (!ctx)
2393 return -ENOENT;
2395 ret = ctx_setparam(file_priv, ctx, args);
2397 i915_gem_context_put(ctx);
2398 return ret;
2401 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2402 void *data, struct drm_file *file)
2404 struct drm_i915_private *i915 = to_i915(dev);
2405 struct drm_i915_reset_stats *args = data;
2406 struct i915_gem_context *ctx;
2407 int ret;
2409 if (args->flags || args->pad)
2410 return -EINVAL;
2412 ret = -ENOENT;
2413 rcu_read_lock();
2414 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2415 if (!ctx)
2416 goto out;
2419 * We opt for unserialised reads here. This may result in tearing
2420 * in the extremely unlikely event of a GPU hang on this context
2421 * as we are querying them. If we need that extra layer of protection,
2422 * we should wrap the hangstats with a seqlock.
2425 if (capable(CAP_SYS_ADMIN))
2426 args->reset_count = i915_reset_count(&i915->gpu_error);
2427 else
2428 args->reset_count = 0;
2430 args->batch_active = atomic_read(&ctx->guilty_count);
2431 args->batch_pending = atomic_read(&ctx->active_count);
2433 ret = 0;
2434 out:
2435 rcu_read_unlock();
2436 return ret;
2439 /* GEM context-engines iterator: for_each_gem_engine() */
2440 struct intel_context *
2441 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2443 const struct i915_gem_engines *e = it->engines;
2444 struct intel_context *ctx;
2446 do {
2447 if (it->idx >= e->num_engines)
2448 return NULL;
2450 ctx = e->engines[it->idx++];
2451 } while (!ctx);
2453 return ctx;
2456 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2457 #include "selftests/mock_context.c"
2458 #include "selftests/i915_gem_context.c"
2459 #endif
2461 static void i915_global_gem_context_shrink(void)
2463 kmem_cache_shrink(global.slab_luts);
2466 static void i915_global_gem_context_exit(void)
2468 kmem_cache_destroy(global.slab_luts);
2471 static struct i915_global_gem_context global = { {
2472 .shrink = i915_global_gem_context_shrink,
2473 .exit = i915_global_gem_context_exit,
2474 } };
2476 int __init i915_global_gem_context_init(void)
2478 global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2479 if (!global.slab_luts)
2480 return -ENOMEM;
2482 i915_global_register(&global.base);
2483 return 0;