WIP FPC-III support
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / gt / intel_engine_user.c
blob34e6096f196ed8eb647cfcce68faf427a23e60cf
1 /*
2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
5 */
7 #include <linux/list.h>
8 #include <linux/list_sort.h>
9 #include <linux/llist.h>
11 #include "i915_drv.h"
12 #include "intel_engine.h"
13 #include "intel_engine_user.h"
14 #include "intel_gt.h"
16 struct intel_engine_cs *
17 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
19 struct rb_node *p = i915->uabi_engines.rb_node;
21 while (p) {
22 struct intel_engine_cs *it =
23 rb_entry(p, typeof(*it), uabi_node);
25 if (class < it->uabi_class)
26 p = p->rb_left;
27 else if (class > it->uabi_class ||
28 instance > it->uabi_instance)
29 p = p->rb_right;
30 else if (instance < it->uabi_instance)
31 p = p->rb_left;
32 else
33 return it;
36 return NULL;
39 void intel_engine_add_user(struct intel_engine_cs *engine)
41 llist_add((struct llist_node *)&engine->uabi_node,
42 (struct llist_head *)&engine->i915->uabi_engines);
45 static const u8 uabi_classes[] = {
46 [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
47 [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
48 [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
49 [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
52 static int engine_cmp(void *priv, struct list_head *A, struct list_head *B)
54 const struct intel_engine_cs *a =
55 container_of((struct rb_node *)A, typeof(*a), uabi_node);
56 const struct intel_engine_cs *b =
57 container_of((struct rb_node *)B, typeof(*b), uabi_node);
59 if (uabi_classes[a->class] < uabi_classes[b->class])
60 return -1;
61 if (uabi_classes[a->class] > uabi_classes[b->class])
62 return 1;
64 if (a->instance < b->instance)
65 return -1;
66 if (a->instance > b->instance)
67 return 1;
69 return 0;
72 static struct llist_node *get_engines(struct drm_i915_private *i915)
74 return llist_del_all((struct llist_head *)&i915->uabi_engines);
77 static void sort_engines(struct drm_i915_private *i915,
78 struct list_head *engines)
80 struct llist_node *pos, *next;
82 llist_for_each_safe(pos, next, get_engines(i915)) {
83 struct intel_engine_cs *engine =
84 container_of((struct rb_node *)pos, typeof(*engine),
85 uabi_node);
86 list_add((struct list_head *)&engine->uabi_node, engines);
88 list_sort(NULL, engines, engine_cmp);
91 static void set_scheduler_caps(struct drm_i915_private *i915)
93 static const struct {
94 u8 engine;
95 u8 sched;
96 } map[] = {
97 #define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
98 MAP(HAS_PREEMPTION, PREEMPTION),
99 MAP(HAS_SEMAPHORES, SEMAPHORES),
100 MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
101 #undef MAP
103 struct intel_engine_cs *engine;
104 u32 enabled, disabled;
106 enabled = 0;
107 disabled = 0;
108 for_each_uabi_engine(engine, i915) { /* all engines must agree! */
109 int i;
111 if (engine->schedule)
112 enabled |= (I915_SCHEDULER_CAP_ENABLED |
113 I915_SCHEDULER_CAP_PRIORITY);
114 else
115 disabled |= (I915_SCHEDULER_CAP_ENABLED |
116 I915_SCHEDULER_CAP_PRIORITY);
118 for (i = 0; i < ARRAY_SIZE(map); i++) {
119 if (engine->flags & BIT(map[i].engine))
120 enabled |= BIT(map[i].sched);
121 else
122 disabled |= BIT(map[i].sched);
126 i915->caps.scheduler = enabled & ~disabled;
127 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
128 i915->caps.scheduler = 0;
131 const char *intel_engine_class_repr(u8 class)
133 static const char * const uabi_names[] = {
134 [RENDER_CLASS] = "rcs",
135 [COPY_ENGINE_CLASS] = "bcs",
136 [VIDEO_DECODE_CLASS] = "vcs",
137 [VIDEO_ENHANCEMENT_CLASS] = "vecs",
140 if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class])
141 return "xxx";
143 return uabi_names[class];
146 struct legacy_ring {
147 struct intel_gt *gt;
148 u8 class;
149 u8 instance;
152 static int legacy_ring_idx(const struct legacy_ring *ring)
154 static const struct {
155 u8 base, max;
156 } map[] = {
157 [RENDER_CLASS] = { RCS0, 1 },
158 [COPY_ENGINE_CLASS] = { BCS0, 1 },
159 [VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS },
160 [VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS },
163 if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
164 return INVALID_ENGINE;
166 if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
167 return INVALID_ENGINE;
169 return map[ring->class].base + ring->instance;
172 static void add_legacy_ring(struct legacy_ring *ring,
173 struct intel_engine_cs *engine)
175 if (engine->gt != ring->gt || engine->class != ring->class) {
176 ring->gt = engine->gt;
177 ring->class = engine->class;
178 ring->instance = 0;
181 engine->legacy_idx = legacy_ring_idx(ring);
182 if (engine->legacy_idx != INVALID_ENGINE)
183 ring->instance++;
186 void intel_engines_driver_register(struct drm_i915_private *i915)
188 struct legacy_ring ring = {};
189 u8 uabi_instances[4] = {};
190 struct list_head *it, *next;
191 struct rb_node **p, *prev;
192 LIST_HEAD(engines);
194 sort_engines(i915, &engines);
196 prev = NULL;
197 p = &i915->uabi_engines.rb_node;
198 list_for_each_safe(it, next, &engines) {
199 struct intel_engine_cs *engine =
200 container_of((struct rb_node *)it, typeof(*engine),
201 uabi_node);
202 char old[sizeof(engine->name)];
204 if (intel_gt_has_unrecoverable_error(engine->gt))
205 continue; /* ignore incomplete engines */
207 GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
208 engine->uabi_class = uabi_classes[engine->class];
210 GEM_BUG_ON(engine->uabi_class >= ARRAY_SIZE(uabi_instances));
211 engine->uabi_instance = uabi_instances[engine->uabi_class]++;
213 /* Replace the internal name with the final user facing name */
214 memcpy(old, engine->name, sizeof(engine->name));
215 scnprintf(engine->name, sizeof(engine->name), "%s%u",
216 intel_engine_class_repr(engine->class),
217 engine->uabi_instance);
218 DRM_DEBUG_DRIVER("renamed %s to %s\n", old, engine->name);
220 rb_link_node(&engine->uabi_node, prev, p);
221 rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
223 GEM_BUG_ON(intel_engine_lookup_user(i915,
224 engine->uabi_class,
225 engine->uabi_instance) != engine);
227 /* Fix up the mapping to match default execbuf::user_map[] */
228 add_legacy_ring(&ring, engine);
230 prev = &engine->uabi_node;
231 p = &prev->rb_right;
234 if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) &&
235 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
236 struct intel_engine_cs *engine;
237 unsigned int isolation;
238 int class, inst;
239 int errors = 0;
241 for (class = 0; class < ARRAY_SIZE(uabi_instances); class++) {
242 for (inst = 0; inst < uabi_instances[class]; inst++) {
243 engine = intel_engine_lookup_user(i915,
244 class, inst);
245 if (!engine) {
246 pr_err("UABI engine not found for { class:%d, instance:%d }\n",
247 class, inst);
248 errors++;
249 continue;
252 if (engine->uabi_class != class ||
253 engine->uabi_instance != inst) {
254 pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n",
255 engine->name,
256 engine->uabi_class,
257 engine->uabi_instance,
258 class, inst);
259 errors++;
260 continue;
266 * Make sure that classes with multiple engine instances all
267 * share the same basic configuration.
269 isolation = intel_engines_has_context_isolation(i915);
270 for_each_uabi_engine(engine, i915) {
271 unsigned int bit = BIT(engine->uabi_class);
272 unsigned int expected = engine->default_state ? bit : 0;
274 if ((isolation & bit) != expected) {
275 pr_err("mismatching default context state for class %d on engine %s\n",
276 engine->uabi_class, engine->name);
277 errors++;
281 if (drm_WARN(&i915->drm, errors,
282 "Invalid UABI engine mapping found"))
283 i915->uabi_engines = RB_ROOT;
286 set_scheduler_caps(i915);
289 unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
291 struct intel_engine_cs *engine;
292 unsigned int which;
294 which = 0;
295 for_each_uabi_engine(engine, i915)
296 if (engine->default_state)
297 which |= BIT(engine->uabi_class);
299 return which;