Merge branch 'akpm' (patches from Andrew)
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / i915_debugfs.c
blobd5a9b8a964c23c53a80fc01da5a025470e268da0
1 /*
2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/sched/mm.h>
30 #include <linux/sort.h>
32 #include <drm/drm_debugfs.h>
33 #include <drm/drm_fourcc.h>
35 #include "display/intel_display_types.h"
36 #include "display/intel_dp.h"
37 #include "display/intel_fbc.h"
38 #include "display/intel_hdcp.h"
39 #include "display/intel_hdmi.h"
40 #include "display/intel_psr.h"
42 #include "gem/i915_gem_context.h"
43 #include "gt/intel_gt_pm.h"
44 #include "gt/intel_gt_requests.h"
45 #include "gt/intel_reset.h"
46 #include "gt/intel_rc6.h"
47 #include "gt/intel_rps.h"
48 #include "gt/uc/intel_guc_submission.h"
50 #include "i915_debugfs.h"
51 #include "i915_irq.h"
52 #include "i915_trace.h"
53 #include "intel_csr.h"
54 #include "intel_pm.h"
55 #include "intel_sideband.h"
57 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
59 return to_i915(node->minor->dev);
62 static int i915_capabilities(struct seq_file *m, void *data)
64 struct drm_i915_private *i915 = node_to_i915(m->private);
65 struct drm_printer p = drm_seq_file_printer(m);
67 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
69 intel_device_info_print_static(INTEL_INFO(i915), &p);
70 intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
71 intel_driver_caps_print(&i915->caps, &p);
73 kernel_param_lock(THIS_MODULE);
74 i915_params_dump(&i915_modparams, &p);
75 kernel_param_unlock(THIS_MODULE);
77 return 0;
80 static char get_tiling_flag(struct drm_i915_gem_object *obj)
82 switch (i915_gem_object_get_tiling(obj)) {
83 default:
84 case I915_TILING_NONE: return ' ';
85 case I915_TILING_X: return 'X';
86 case I915_TILING_Y: return 'Y';
90 static char get_global_flag(struct drm_i915_gem_object *obj)
92 return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
95 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
97 return obj->mm.mapping ? 'M' : ' ';
100 static const char *
101 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
103 size_t x = 0;
105 switch (page_sizes) {
106 case 0:
107 return "";
108 case I915_GTT_PAGE_SIZE_4K:
109 return "4K";
110 case I915_GTT_PAGE_SIZE_64K:
111 return "64K";
112 case I915_GTT_PAGE_SIZE_2M:
113 return "2M";
114 default:
115 if (!buf)
116 return "M";
118 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
119 x += snprintf(buf + x, len - x, "2M, ");
120 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
121 x += snprintf(buf + x, len - x, "64K, ");
122 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
123 x += snprintf(buf + x, len - x, "4K, ");
124 buf[x-2] = '\0';
126 return buf;
130 static void
131 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
133 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
134 struct intel_engine_cs *engine;
135 struct i915_vma *vma;
136 int pin_count = 0;
138 seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
139 &obj->base,
140 get_tiling_flag(obj),
141 get_global_flag(obj),
142 get_pin_mapped_flag(obj),
143 obj->base.size / 1024,
144 obj->read_domains,
145 obj->write_domain,
146 i915_cache_level_str(dev_priv, obj->cache_level),
147 obj->mm.dirty ? " dirty" : "",
148 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
149 if (obj->base.name)
150 seq_printf(m, " (name: %d)", obj->base.name);
152 spin_lock(&obj->vma.lock);
153 list_for_each_entry(vma, &obj->vma.list, obj_link) {
154 if (!drm_mm_node_allocated(&vma->node))
155 continue;
157 spin_unlock(&obj->vma.lock);
159 if (i915_vma_is_pinned(vma))
160 pin_count++;
162 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
163 i915_vma_is_ggtt(vma) ? "g" : "pp",
164 vma->node.start, vma->node.size,
165 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
166 if (i915_vma_is_ggtt(vma)) {
167 switch (vma->ggtt_view.type) {
168 case I915_GGTT_VIEW_NORMAL:
169 seq_puts(m, ", normal");
170 break;
172 case I915_GGTT_VIEW_PARTIAL:
173 seq_printf(m, ", partial [%08llx+%x]",
174 vma->ggtt_view.partial.offset << PAGE_SHIFT,
175 vma->ggtt_view.partial.size << PAGE_SHIFT);
176 break;
178 case I915_GGTT_VIEW_ROTATED:
179 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
180 vma->ggtt_view.rotated.plane[0].width,
181 vma->ggtt_view.rotated.plane[0].height,
182 vma->ggtt_view.rotated.plane[0].stride,
183 vma->ggtt_view.rotated.plane[0].offset,
184 vma->ggtt_view.rotated.plane[1].width,
185 vma->ggtt_view.rotated.plane[1].height,
186 vma->ggtt_view.rotated.plane[1].stride,
187 vma->ggtt_view.rotated.plane[1].offset);
188 break;
190 case I915_GGTT_VIEW_REMAPPED:
191 seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
192 vma->ggtt_view.remapped.plane[0].width,
193 vma->ggtt_view.remapped.plane[0].height,
194 vma->ggtt_view.remapped.plane[0].stride,
195 vma->ggtt_view.remapped.plane[0].offset,
196 vma->ggtt_view.remapped.plane[1].width,
197 vma->ggtt_view.remapped.plane[1].height,
198 vma->ggtt_view.remapped.plane[1].stride,
199 vma->ggtt_view.remapped.plane[1].offset);
200 break;
202 default:
203 MISSING_CASE(vma->ggtt_view.type);
204 break;
207 if (vma->fence)
208 seq_printf(m, " , fence: %d", vma->fence->id);
209 seq_puts(m, ")");
211 spin_lock(&obj->vma.lock);
213 spin_unlock(&obj->vma.lock);
215 seq_printf(m, " (pinned x %d)", pin_count);
216 if (obj->stolen)
217 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
218 if (i915_gem_object_is_framebuffer(obj))
219 seq_printf(m, " (fb)");
221 engine = i915_gem_object_last_write_engine(obj);
222 if (engine)
223 seq_printf(m, " (%s)", engine->name);
226 struct file_stats {
227 struct i915_address_space *vm;
228 unsigned long count;
229 u64 total, unbound;
230 u64 active, inactive;
231 u64 closed;
234 static int per_file_stats(int id, void *ptr, void *data)
236 struct drm_i915_gem_object *obj = ptr;
237 struct file_stats *stats = data;
238 struct i915_vma *vma;
240 if (!kref_get_unless_zero(&obj->base.refcount))
241 return 0;
243 stats->count++;
244 stats->total += obj->base.size;
245 if (!atomic_read(&obj->bind_count))
246 stats->unbound += obj->base.size;
248 spin_lock(&obj->vma.lock);
249 if (!stats->vm) {
250 for_each_ggtt_vma(vma, obj) {
251 if (!drm_mm_node_allocated(&vma->node))
252 continue;
254 if (i915_vma_is_active(vma))
255 stats->active += vma->node.size;
256 else
257 stats->inactive += vma->node.size;
259 if (i915_vma_is_closed(vma))
260 stats->closed += vma->node.size;
262 } else {
263 struct rb_node *p = obj->vma.tree.rb_node;
265 while (p) {
266 long cmp;
268 vma = rb_entry(p, typeof(*vma), obj_node);
269 cmp = i915_vma_compare(vma, stats->vm, NULL);
270 if (cmp == 0) {
271 if (drm_mm_node_allocated(&vma->node)) {
272 if (i915_vma_is_active(vma))
273 stats->active += vma->node.size;
274 else
275 stats->inactive += vma->node.size;
277 if (i915_vma_is_closed(vma))
278 stats->closed += vma->node.size;
280 break;
282 if (cmp < 0)
283 p = p->rb_right;
284 else
285 p = p->rb_left;
288 spin_unlock(&obj->vma.lock);
290 i915_gem_object_put(obj);
291 return 0;
294 #define print_file_stats(m, name, stats) do { \
295 if (stats.count) \
296 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
297 name, \
298 stats.count, \
299 stats.total, \
300 stats.active, \
301 stats.inactive, \
302 stats.unbound, \
303 stats.closed); \
304 } while (0)
306 static void print_context_stats(struct seq_file *m,
307 struct drm_i915_private *i915)
309 struct file_stats kstats = {};
310 struct i915_gem_context *ctx, *cn;
312 spin_lock(&i915->gem.contexts.lock);
313 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
314 struct i915_gem_engines_iter it;
315 struct intel_context *ce;
317 if (!kref_get_unless_zero(&ctx->ref))
318 continue;
320 spin_unlock(&i915->gem.contexts.lock);
322 for_each_gem_engine(ce,
323 i915_gem_context_lock_engines(ctx), it) {
324 if (intel_context_pin_if_active(ce)) {
325 rcu_read_lock();
326 if (ce->state)
327 per_file_stats(0,
328 ce->state->obj, &kstats);
329 per_file_stats(0, ce->ring->vma->obj, &kstats);
330 rcu_read_unlock();
331 intel_context_unpin(ce);
334 i915_gem_context_unlock_engines(ctx);
336 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
337 struct file_stats stats = {
338 .vm = rcu_access_pointer(ctx->vm),
340 struct drm_file *file = ctx->file_priv->file;
341 struct task_struct *task;
342 char name[80];
344 rcu_read_lock();
345 idr_for_each(&file->object_idr, per_file_stats, &stats);
346 rcu_read_unlock();
348 rcu_read_lock();
349 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
350 snprintf(name, sizeof(name), "%s",
351 task ? task->comm : "<unknown>");
352 rcu_read_unlock();
354 print_file_stats(m, name, stats);
357 spin_lock(&i915->gem.contexts.lock);
358 list_safe_reset_next(ctx, cn, link);
359 i915_gem_context_put(ctx);
361 spin_unlock(&i915->gem.contexts.lock);
363 print_file_stats(m, "[k]contexts", kstats);
366 static int i915_gem_object_info(struct seq_file *m, void *data)
368 struct drm_i915_private *i915 = node_to_i915(m->private);
369 struct intel_memory_region *mr;
370 enum intel_region_id id;
372 seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
373 i915->mm.shrink_count,
374 atomic_read(&i915->mm.free_count),
375 i915->mm.shrink_memory);
376 for_each_memory_region(mr, i915, id)
377 seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
378 mr->name, &mr->total, &mr->avail);
379 seq_putc(m, '\n');
381 print_context_stats(m, i915);
383 return 0;
386 static void gen8_display_interrupt_info(struct seq_file *m)
388 struct drm_i915_private *dev_priv = node_to_i915(m->private);
389 enum pipe pipe;
391 for_each_pipe(dev_priv, pipe) {
392 enum intel_display_power_domain power_domain;
393 intel_wakeref_t wakeref;
395 power_domain = POWER_DOMAIN_PIPE(pipe);
396 wakeref = intel_display_power_get_if_enabled(dev_priv,
397 power_domain);
398 if (!wakeref) {
399 seq_printf(m, "Pipe %c power disabled\n",
400 pipe_name(pipe));
401 continue;
403 seq_printf(m, "Pipe %c IMR:\t%08x\n",
404 pipe_name(pipe),
405 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
406 seq_printf(m, "Pipe %c IIR:\t%08x\n",
407 pipe_name(pipe),
408 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
409 seq_printf(m, "Pipe %c IER:\t%08x\n",
410 pipe_name(pipe),
411 I915_READ(GEN8_DE_PIPE_IER(pipe)));
413 intel_display_power_put(dev_priv, power_domain, wakeref);
416 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
417 I915_READ(GEN8_DE_PORT_IMR));
418 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
419 I915_READ(GEN8_DE_PORT_IIR));
420 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
421 I915_READ(GEN8_DE_PORT_IER));
423 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
424 I915_READ(GEN8_DE_MISC_IMR));
425 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
426 I915_READ(GEN8_DE_MISC_IIR));
427 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
428 I915_READ(GEN8_DE_MISC_IER));
430 seq_printf(m, "PCU interrupt mask:\t%08x\n",
431 I915_READ(GEN8_PCU_IMR));
432 seq_printf(m, "PCU interrupt identity:\t%08x\n",
433 I915_READ(GEN8_PCU_IIR));
434 seq_printf(m, "PCU interrupt enable:\t%08x\n",
435 I915_READ(GEN8_PCU_IER));
438 static int i915_interrupt_info(struct seq_file *m, void *data)
440 struct drm_i915_private *dev_priv = node_to_i915(m->private);
441 struct intel_engine_cs *engine;
442 intel_wakeref_t wakeref;
443 int i, pipe;
445 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
447 if (IS_CHERRYVIEW(dev_priv)) {
448 intel_wakeref_t pref;
450 seq_printf(m, "Master Interrupt Control:\t%08x\n",
451 I915_READ(GEN8_MASTER_IRQ));
453 seq_printf(m, "Display IER:\t%08x\n",
454 I915_READ(VLV_IER));
455 seq_printf(m, "Display IIR:\t%08x\n",
456 I915_READ(VLV_IIR));
457 seq_printf(m, "Display IIR_RW:\t%08x\n",
458 I915_READ(VLV_IIR_RW));
459 seq_printf(m, "Display IMR:\t%08x\n",
460 I915_READ(VLV_IMR));
461 for_each_pipe(dev_priv, pipe) {
462 enum intel_display_power_domain power_domain;
464 power_domain = POWER_DOMAIN_PIPE(pipe);
465 pref = intel_display_power_get_if_enabled(dev_priv,
466 power_domain);
467 if (!pref) {
468 seq_printf(m, "Pipe %c power disabled\n",
469 pipe_name(pipe));
470 continue;
473 seq_printf(m, "Pipe %c stat:\t%08x\n",
474 pipe_name(pipe),
475 I915_READ(PIPESTAT(pipe)));
477 intel_display_power_put(dev_priv, power_domain, pref);
480 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
481 seq_printf(m, "Port hotplug:\t%08x\n",
482 I915_READ(PORT_HOTPLUG_EN));
483 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
484 I915_READ(VLV_DPFLIPSTAT));
485 seq_printf(m, "DPINVGTT:\t%08x\n",
486 I915_READ(DPINVGTT));
487 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
489 for (i = 0; i < 4; i++) {
490 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
491 i, I915_READ(GEN8_GT_IMR(i)));
492 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
493 i, I915_READ(GEN8_GT_IIR(i)));
494 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
495 i, I915_READ(GEN8_GT_IER(i)));
498 seq_printf(m, "PCU interrupt mask:\t%08x\n",
499 I915_READ(GEN8_PCU_IMR));
500 seq_printf(m, "PCU interrupt identity:\t%08x\n",
501 I915_READ(GEN8_PCU_IIR));
502 seq_printf(m, "PCU interrupt enable:\t%08x\n",
503 I915_READ(GEN8_PCU_IER));
504 } else if (INTEL_GEN(dev_priv) >= 11) {
505 seq_printf(m, "Master Interrupt Control: %08x\n",
506 I915_READ(GEN11_GFX_MSTR_IRQ));
508 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
509 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
510 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
511 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
512 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
513 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
514 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
515 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
516 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
517 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
518 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
519 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
521 seq_printf(m, "Display Interrupt Control:\t%08x\n",
522 I915_READ(GEN11_DISPLAY_INT_CTL));
524 gen8_display_interrupt_info(m);
525 } else if (INTEL_GEN(dev_priv) >= 8) {
526 seq_printf(m, "Master Interrupt Control:\t%08x\n",
527 I915_READ(GEN8_MASTER_IRQ));
529 for (i = 0; i < 4; i++) {
530 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
531 i, I915_READ(GEN8_GT_IMR(i)));
532 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
533 i, I915_READ(GEN8_GT_IIR(i)));
534 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
535 i, I915_READ(GEN8_GT_IER(i)));
538 gen8_display_interrupt_info(m);
539 } else if (IS_VALLEYVIEW(dev_priv)) {
540 intel_wakeref_t pref;
542 seq_printf(m, "Display IER:\t%08x\n",
543 I915_READ(VLV_IER));
544 seq_printf(m, "Display IIR:\t%08x\n",
545 I915_READ(VLV_IIR));
546 seq_printf(m, "Display IIR_RW:\t%08x\n",
547 I915_READ(VLV_IIR_RW));
548 seq_printf(m, "Display IMR:\t%08x\n",
549 I915_READ(VLV_IMR));
550 for_each_pipe(dev_priv, pipe) {
551 enum intel_display_power_domain power_domain;
553 power_domain = POWER_DOMAIN_PIPE(pipe);
554 pref = intel_display_power_get_if_enabled(dev_priv,
555 power_domain);
556 if (!pref) {
557 seq_printf(m, "Pipe %c power disabled\n",
558 pipe_name(pipe));
559 continue;
562 seq_printf(m, "Pipe %c stat:\t%08x\n",
563 pipe_name(pipe),
564 I915_READ(PIPESTAT(pipe)));
565 intel_display_power_put(dev_priv, power_domain, pref);
568 seq_printf(m, "Master IER:\t%08x\n",
569 I915_READ(VLV_MASTER_IER));
571 seq_printf(m, "Render IER:\t%08x\n",
572 I915_READ(GTIER));
573 seq_printf(m, "Render IIR:\t%08x\n",
574 I915_READ(GTIIR));
575 seq_printf(m, "Render IMR:\t%08x\n",
576 I915_READ(GTIMR));
578 seq_printf(m, "PM IER:\t\t%08x\n",
579 I915_READ(GEN6_PMIER));
580 seq_printf(m, "PM IIR:\t\t%08x\n",
581 I915_READ(GEN6_PMIIR));
582 seq_printf(m, "PM IMR:\t\t%08x\n",
583 I915_READ(GEN6_PMIMR));
585 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
586 seq_printf(m, "Port hotplug:\t%08x\n",
587 I915_READ(PORT_HOTPLUG_EN));
588 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
589 I915_READ(VLV_DPFLIPSTAT));
590 seq_printf(m, "DPINVGTT:\t%08x\n",
591 I915_READ(DPINVGTT));
592 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
594 } else if (!HAS_PCH_SPLIT(dev_priv)) {
595 seq_printf(m, "Interrupt enable: %08x\n",
596 I915_READ(GEN2_IER));
597 seq_printf(m, "Interrupt identity: %08x\n",
598 I915_READ(GEN2_IIR));
599 seq_printf(m, "Interrupt mask: %08x\n",
600 I915_READ(GEN2_IMR));
601 for_each_pipe(dev_priv, pipe)
602 seq_printf(m, "Pipe %c stat: %08x\n",
603 pipe_name(pipe),
604 I915_READ(PIPESTAT(pipe)));
605 } else {
606 seq_printf(m, "North Display Interrupt enable: %08x\n",
607 I915_READ(DEIER));
608 seq_printf(m, "North Display Interrupt identity: %08x\n",
609 I915_READ(DEIIR));
610 seq_printf(m, "North Display Interrupt mask: %08x\n",
611 I915_READ(DEIMR));
612 seq_printf(m, "South Display Interrupt enable: %08x\n",
613 I915_READ(SDEIER));
614 seq_printf(m, "South Display Interrupt identity: %08x\n",
615 I915_READ(SDEIIR));
616 seq_printf(m, "South Display Interrupt mask: %08x\n",
617 I915_READ(SDEIMR));
618 seq_printf(m, "Graphics Interrupt enable: %08x\n",
619 I915_READ(GTIER));
620 seq_printf(m, "Graphics Interrupt identity: %08x\n",
621 I915_READ(GTIIR));
622 seq_printf(m, "Graphics Interrupt mask: %08x\n",
623 I915_READ(GTIMR));
626 if (INTEL_GEN(dev_priv) >= 11) {
627 seq_printf(m, "RCS Intr Mask:\t %08x\n",
628 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
629 seq_printf(m, "BCS Intr Mask:\t %08x\n",
630 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
631 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
632 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
633 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
634 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
635 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
636 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
637 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
638 I915_READ(GEN11_GUC_SG_INTR_MASK));
639 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
640 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
641 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
642 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
643 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
644 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
646 } else if (INTEL_GEN(dev_priv) >= 6) {
647 for_each_uabi_engine(engine, dev_priv) {
648 seq_printf(m,
649 "Graphics Interrupt mask (%s): %08x\n",
650 engine->name, ENGINE_READ(engine, RING_IMR));
654 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
656 return 0;
659 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
661 struct drm_i915_private *i915 = node_to_i915(m->private);
662 unsigned int i;
664 seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
666 rcu_read_lock();
667 for (i = 0; i < i915->ggtt.num_fences; i++) {
668 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
669 struct i915_vma *vma = reg->vma;
671 seq_printf(m, "Fence %d, pin count = %d, object = ",
672 i, atomic_read(&reg->pin_count));
673 if (!vma)
674 seq_puts(m, "unused");
675 else
676 describe_obj(m, vma->obj);
677 seq_putc(m, '\n');
679 rcu_read_unlock();
681 return 0;
684 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
685 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
686 size_t count, loff_t *pos)
688 struct i915_gpu_coredump *error;
689 ssize_t ret;
690 void *buf;
692 error = file->private_data;
693 if (!error)
694 return 0;
696 /* Bounce buffer required because of kernfs __user API convenience. */
697 buf = kmalloc(count, GFP_KERNEL);
698 if (!buf)
699 return -ENOMEM;
701 ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
702 if (ret <= 0)
703 goto out;
705 if (!copy_to_user(ubuf, buf, ret))
706 *pos += ret;
707 else
708 ret = -EFAULT;
710 out:
711 kfree(buf);
712 return ret;
715 static int gpu_state_release(struct inode *inode, struct file *file)
717 i915_gpu_coredump_put(file->private_data);
718 return 0;
721 static int i915_gpu_info_open(struct inode *inode, struct file *file)
723 struct drm_i915_private *i915 = inode->i_private;
724 struct i915_gpu_coredump *gpu;
725 intel_wakeref_t wakeref;
727 gpu = NULL;
728 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
729 gpu = i915_gpu_coredump(i915);
730 if (IS_ERR(gpu))
731 return PTR_ERR(gpu);
733 file->private_data = gpu;
734 return 0;
737 static const struct file_operations i915_gpu_info_fops = {
738 .owner = THIS_MODULE,
739 .open = i915_gpu_info_open,
740 .read = gpu_state_read,
741 .llseek = default_llseek,
742 .release = gpu_state_release,
745 static ssize_t
746 i915_error_state_write(struct file *filp,
747 const char __user *ubuf,
748 size_t cnt,
749 loff_t *ppos)
751 struct i915_gpu_coredump *error = filp->private_data;
753 if (!error)
754 return 0;
756 DRM_DEBUG_DRIVER("Resetting error state\n");
757 i915_reset_error_state(error->i915);
759 return cnt;
762 static int i915_error_state_open(struct inode *inode, struct file *file)
764 struct i915_gpu_coredump *error;
766 error = i915_first_error_state(inode->i_private);
767 if (IS_ERR(error))
768 return PTR_ERR(error);
770 file->private_data = error;
771 return 0;
774 static const struct file_operations i915_error_state_fops = {
775 .owner = THIS_MODULE,
776 .open = i915_error_state_open,
777 .read = gpu_state_read,
778 .write = i915_error_state_write,
779 .llseek = default_llseek,
780 .release = gpu_state_release,
782 #endif
784 static int i915_frequency_info(struct seq_file *m, void *unused)
786 struct drm_i915_private *dev_priv = node_to_i915(m->private);
787 struct intel_uncore *uncore = &dev_priv->uncore;
788 struct intel_rps *rps = &dev_priv->gt.rps;
789 intel_wakeref_t wakeref;
790 int ret = 0;
792 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
794 if (IS_GEN(dev_priv, 5)) {
795 u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
796 u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
798 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
799 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
800 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
801 MEMSTAT_VID_SHIFT);
802 seq_printf(m, "Current P-state: %d\n",
803 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
804 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
805 u32 rpmodectl, freq_sts;
807 rpmodectl = I915_READ(GEN6_RP_CONTROL);
808 seq_printf(m, "Video Turbo Mode: %s\n",
809 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
810 seq_printf(m, "HW control enabled: %s\n",
811 yesno(rpmodectl & GEN6_RP_ENABLE));
812 seq_printf(m, "SW control enabled: %s\n",
813 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
814 GEN6_RP_MEDIA_SW_MODE));
816 vlv_punit_get(dev_priv);
817 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
818 vlv_punit_put(dev_priv);
820 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
821 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
823 seq_printf(m, "actual GPU freq: %d MHz\n",
824 intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
826 seq_printf(m, "current GPU freq: %d MHz\n",
827 intel_gpu_freq(rps, rps->cur_freq));
829 seq_printf(m, "max GPU freq: %d MHz\n",
830 intel_gpu_freq(rps, rps->max_freq));
832 seq_printf(m, "min GPU freq: %d MHz\n",
833 intel_gpu_freq(rps, rps->min_freq));
835 seq_printf(m, "idle GPU freq: %d MHz\n",
836 intel_gpu_freq(rps, rps->idle_freq));
838 seq_printf(m,
839 "efficient (RPe) frequency: %d MHz\n",
840 intel_gpu_freq(rps, rps->efficient_freq));
841 } else if (INTEL_GEN(dev_priv) >= 6) {
842 u32 rp_state_limits;
843 u32 gt_perf_status;
844 u32 rp_state_cap;
845 u32 rpmodectl, rpinclimit, rpdeclimit;
846 u32 rpstat, cagf, reqf;
847 u32 rpupei, rpcurup, rpprevup;
848 u32 rpdownei, rpcurdown, rpprevdown;
849 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
850 int max_freq;
852 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
853 if (IS_GEN9_LP(dev_priv)) {
854 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
855 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
856 } else {
857 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
858 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
861 /* RPSTAT1 is in the GT power well */
862 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
864 reqf = I915_READ(GEN6_RPNSWREQ);
865 if (INTEL_GEN(dev_priv) >= 9)
866 reqf >>= 23;
867 else {
868 reqf &= ~GEN6_TURBO_DISABLE;
869 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
870 reqf >>= 24;
871 else
872 reqf >>= 25;
874 reqf = intel_gpu_freq(rps, reqf);
876 rpmodectl = I915_READ(GEN6_RP_CONTROL);
877 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
878 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
880 rpstat = I915_READ(GEN6_RPSTAT1);
881 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
882 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
883 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
884 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
885 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
886 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
887 cagf = intel_rps_read_actual_frequency(rps);
889 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
891 if (INTEL_GEN(dev_priv) >= 11) {
892 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
893 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
895 * The equivalent to the PM ISR & IIR cannot be read
896 * without affecting the current state of the system
898 pm_isr = 0;
899 pm_iir = 0;
900 } else if (INTEL_GEN(dev_priv) >= 8) {
901 pm_ier = I915_READ(GEN8_GT_IER(2));
902 pm_imr = I915_READ(GEN8_GT_IMR(2));
903 pm_isr = I915_READ(GEN8_GT_ISR(2));
904 pm_iir = I915_READ(GEN8_GT_IIR(2));
905 } else {
906 pm_ier = I915_READ(GEN6_PMIER);
907 pm_imr = I915_READ(GEN6_PMIMR);
908 pm_isr = I915_READ(GEN6_PMISR);
909 pm_iir = I915_READ(GEN6_PMIIR);
911 pm_mask = I915_READ(GEN6_PMINTRMSK);
913 seq_printf(m, "Video Turbo Mode: %s\n",
914 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
915 seq_printf(m, "HW control enabled: %s\n",
916 yesno(rpmodectl & GEN6_RP_ENABLE));
917 seq_printf(m, "SW control enabled: %s\n",
918 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
919 GEN6_RP_MEDIA_SW_MODE));
921 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
922 pm_ier, pm_imr, pm_mask);
923 if (INTEL_GEN(dev_priv) <= 10)
924 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
925 pm_isr, pm_iir);
926 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
927 rps->pm_intrmsk_mbz);
928 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
929 seq_printf(m, "Render p-state ratio: %d\n",
930 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
931 seq_printf(m, "Render p-state VID: %d\n",
932 gt_perf_status & 0xff);
933 seq_printf(m, "Render p-state limit: %d\n",
934 rp_state_limits & 0xff);
935 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
936 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
937 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
938 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
939 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
940 seq_printf(m, "CAGF: %dMHz\n", cagf);
941 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
942 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
943 seq_printf(m, "RP CUR UP: %d (%dus)\n",
944 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
945 seq_printf(m, "RP PREV UP: %d (%dus)\n",
946 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
947 seq_printf(m, "Up threshold: %d%%\n",
948 rps->power.up_threshold);
950 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
951 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
952 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
953 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
954 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
955 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
956 seq_printf(m, "Down threshold: %d%%\n",
957 rps->power.down_threshold);
959 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
960 rp_state_cap >> 16) & 0xff;
961 max_freq *= (IS_GEN9_BC(dev_priv) ||
962 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
963 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
964 intel_gpu_freq(rps, max_freq));
966 max_freq = (rp_state_cap & 0xff00) >> 8;
967 max_freq *= (IS_GEN9_BC(dev_priv) ||
968 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
969 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
970 intel_gpu_freq(rps, max_freq));
972 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
973 rp_state_cap >> 0) & 0xff;
974 max_freq *= (IS_GEN9_BC(dev_priv) ||
975 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
976 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
977 intel_gpu_freq(rps, max_freq));
978 seq_printf(m, "Max overclocked frequency: %dMHz\n",
979 intel_gpu_freq(rps, rps->max_freq));
981 seq_printf(m, "Current freq: %d MHz\n",
982 intel_gpu_freq(rps, rps->cur_freq));
983 seq_printf(m, "Actual freq: %d MHz\n", cagf);
984 seq_printf(m, "Idle freq: %d MHz\n",
985 intel_gpu_freq(rps, rps->idle_freq));
986 seq_printf(m, "Min freq: %d MHz\n",
987 intel_gpu_freq(rps, rps->min_freq));
988 seq_printf(m, "Boost freq: %d MHz\n",
989 intel_gpu_freq(rps, rps->boost_freq));
990 seq_printf(m, "Max freq: %d MHz\n",
991 intel_gpu_freq(rps, rps->max_freq));
992 seq_printf(m,
993 "efficient (RPe) frequency: %d MHz\n",
994 intel_gpu_freq(rps, rps->efficient_freq));
995 } else {
996 seq_puts(m, "no P-state info available\n");
999 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1000 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1001 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1003 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1004 return ret;
1007 static int ilk_drpc_info(struct seq_file *m)
1009 struct drm_i915_private *i915 = node_to_i915(m->private);
1010 struct intel_uncore *uncore = &i915->uncore;
1011 u32 rgvmodectl, rstdbyctl;
1012 u16 crstandvid;
1014 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
1015 rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
1016 crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
1018 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1019 seq_printf(m, "Boost freq: %d\n",
1020 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1021 MEMMODE_BOOST_FREQ_SHIFT);
1022 seq_printf(m, "HW control enabled: %s\n",
1023 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1024 seq_printf(m, "SW control enabled: %s\n",
1025 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1026 seq_printf(m, "Gated voltage change: %s\n",
1027 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1028 seq_printf(m, "Starting frequency: P%d\n",
1029 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1030 seq_printf(m, "Max P-state: P%d\n",
1031 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1032 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1033 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1034 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1035 seq_printf(m, "Render standby enabled: %s\n",
1036 yesno(!(rstdbyctl & RCX_SW_EXIT)));
1037 seq_puts(m, "Current RS state: ");
1038 switch (rstdbyctl & RSX_STATUS_MASK) {
1039 case RSX_STATUS_ON:
1040 seq_puts(m, "on\n");
1041 break;
1042 case RSX_STATUS_RC1:
1043 seq_puts(m, "RC1\n");
1044 break;
1045 case RSX_STATUS_RC1E:
1046 seq_puts(m, "RC1E\n");
1047 break;
1048 case RSX_STATUS_RS1:
1049 seq_puts(m, "RS1\n");
1050 break;
1051 case RSX_STATUS_RS2:
1052 seq_puts(m, "RS2 (RC6)\n");
1053 break;
1054 case RSX_STATUS_RS3:
1055 seq_puts(m, "RC3 (RC6+)\n");
1056 break;
1057 default:
1058 seq_puts(m, "unknown\n");
1059 break;
1062 return 0;
1065 static int i915_forcewake_domains(struct seq_file *m, void *data)
1067 struct drm_i915_private *i915 = node_to_i915(m->private);
1068 struct intel_uncore *uncore = &i915->uncore;
1069 struct intel_uncore_forcewake_domain *fw_domain;
1070 unsigned int tmp;
1072 seq_printf(m, "user.bypass_count = %u\n",
1073 uncore->user_forcewake_count);
1075 for_each_fw_domain(fw_domain, uncore, tmp)
1076 seq_printf(m, "%s.wake_count = %u\n",
1077 intel_uncore_forcewake_domain_to_str(fw_domain->id),
1078 READ_ONCE(fw_domain->wake_count));
1080 return 0;
1083 static void print_rc6_res(struct seq_file *m,
1084 const char *title,
1085 const i915_reg_t reg)
1087 struct drm_i915_private *i915 = node_to_i915(m->private);
1088 intel_wakeref_t wakeref;
1090 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
1091 seq_printf(m, "%s %u (%llu us)\n", title,
1092 intel_uncore_read(&i915->uncore, reg),
1093 intel_rc6_residency_us(&i915->gt.rc6, reg));
1096 static int vlv_drpc_info(struct seq_file *m)
1098 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1099 u32 rcctl1, pw_status;
1101 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1102 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1104 seq_printf(m, "RC6 Enabled: %s\n",
1105 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1106 GEN6_RC_CTL_EI_MODE(1))));
1107 seq_printf(m, "Render Power Well: %s\n",
1108 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1109 seq_printf(m, "Media Power Well: %s\n",
1110 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1112 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1113 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1115 return i915_forcewake_domains(m, NULL);
1118 static int gen6_drpc_info(struct seq_file *m)
1120 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1121 u32 gt_core_status, rcctl1, rc6vids = 0;
1122 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1124 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1125 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1127 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1128 if (INTEL_GEN(dev_priv) >= 9) {
1129 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1130 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1133 if (INTEL_GEN(dev_priv) <= 7)
1134 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1135 &rc6vids, NULL);
1137 seq_printf(m, "RC1e Enabled: %s\n",
1138 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1139 seq_printf(m, "RC6 Enabled: %s\n",
1140 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1141 if (INTEL_GEN(dev_priv) >= 9) {
1142 seq_printf(m, "Render Well Gating Enabled: %s\n",
1143 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1144 seq_printf(m, "Media Well Gating Enabled: %s\n",
1145 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1147 seq_printf(m, "Deep RC6 Enabled: %s\n",
1148 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1149 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1150 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1151 seq_puts(m, "Current RC state: ");
1152 switch (gt_core_status & GEN6_RCn_MASK) {
1153 case GEN6_RC0:
1154 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1155 seq_puts(m, "Core Power Down\n");
1156 else
1157 seq_puts(m, "on\n");
1158 break;
1159 case GEN6_RC3:
1160 seq_puts(m, "RC3\n");
1161 break;
1162 case GEN6_RC6:
1163 seq_puts(m, "RC6\n");
1164 break;
1165 case GEN6_RC7:
1166 seq_puts(m, "RC7\n");
1167 break;
1168 default:
1169 seq_puts(m, "Unknown\n");
1170 break;
1173 seq_printf(m, "Core Power Down: %s\n",
1174 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1175 if (INTEL_GEN(dev_priv) >= 9) {
1176 seq_printf(m, "Render Power Well: %s\n",
1177 (gen9_powergate_status &
1178 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1179 seq_printf(m, "Media Power Well: %s\n",
1180 (gen9_powergate_status &
1181 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1184 /* Not exactly sure what this is */
1185 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1186 GEN6_GT_GFX_RC6_LOCKED);
1187 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1188 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1189 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1191 if (INTEL_GEN(dev_priv) <= 7) {
1192 seq_printf(m, "RC6 voltage: %dmV\n",
1193 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1194 seq_printf(m, "RC6+ voltage: %dmV\n",
1195 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1196 seq_printf(m, "RC6++ voltage: %dmV\n",
1197 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1200 return i915_forcewake_domains(m, NULL);
1203 static int i915_drpc_info(struct seq_file *m, void *unused)
1205 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1206 intel_wakeref_t wakeref;
1207 int err = -ENODEV;
1209 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1210 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1211 err = vlv_drpc_info(m);
1212 else if (INTEL_GEN(dev_priv) >= 6)
1213 err = gen6_drpc_info(m);
1214 else
1215 err = ilk_drpc_info(m);
1218 return err;
1221 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1223 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1225 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1226 dev_priv->fb_tracking.busy_bits);
1228 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1229 dev_priv->fb_tracking.flip_bits);
1231 return 0;
1234 static int i915_fbc_status(struct seq_file *m, void *unused)
1236 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1237 struct intel_fbc *fbc = &dev_priv->fbc;
1238 intel_wakeref_t wakeref;
1240 if (!HAS_FBC(dev_priv))
1241 return -ENODEV;
1243 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1244 mutex_lock(&fbc->lock);
1246 if (intel_fbc_is_active(dev_priv))
1247 seq_puts(m, "FBC enabled\n");
1248 else
1249 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1251 if (intel_fbc_is_active(dev_priv)) {
1252 u32 mask;
1254 if (INTEL_GEN(dev_priv) >= 8)
1255 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1256 else if (INTEL_GEN(dev_priv) >= 7)
1257 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1258 else if (INTEL_GEN(dev_priv) >= 5)
1259 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1260 else if (IS_G4X(dev_priv))
1261 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1262 else
1263 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1264 FBC_STAT_COMPRESSED);
1266 seq_printf(m, "Compressing: %s\n", yesno(mask));
1269 mutex_unlock(&fbc->lock);
1270 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1272 return 0;
1275 static int i915_fbc_false_color_get(void *data, u64 *val)
1277 struct drm_i915_private *dev_priv = data;
1279 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1280 return -ENODEV;
1282 *val = dev_priv->fbc.false_color;
1284 return 0;
1287 static int i915_fbc_false_color_set(void *data, u64 val)
1289 struct drm_i915_private *dev_priv = data;
1290 u32 reg;
1292 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1293 return -ENODEV;
1295 mutex_lock(&dev_priv->fbc.lock);
1297 reg = I915_READ(ILK_DPFC_CONTROL);
1298 dev_priv->fbc.false_color = val;
1300 I915_WRITE(ILK_DPFC_CONTROL, val ?
1301 (reg | FBC_CTL_FALSE_COLOR) :
1302 (reg & ~FBC_CTL_FALSE_COLOR));
1304 mutex_unlock(&dev_priv->fbc.lock);
1305 return 0;
1308 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1309 i915_fbc_false_color_get, i915_fbc_false_color_set,
1310 "%llu\n");
1312 static int i915_ips_status(struct seq_file *m, void *unused)
1314 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1315 intel_wakeref_t wakeref;
1317 if (!HAS_IPS(dev_priv))
1318 return -ENODEV;
1320 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1322 seq_printf(m, "Enabled by kernel parameter: %s\n",
1323 yesno(i915_modparams.enable_ips));
1325 if (INTEL_GEN(dev_priv) >= 8) {
1326 seq_puts(m, "Currently: unknown\n");
1327 } else {
1328 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1329 seq_puts(m, "Currently: enabled\n");
1330 else
1331 seq_puts(m, "Currently: disabled\n");
1334 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1336 return 0;
1339 static int i915_sr_status(struct seq_file *m, void *unused)
1341 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1342 intel_wakeref_t wakeref;
1343 bool sr_enabled = false;
1345 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1347 if (INTEL_GEN(dev_priv) >= 9)
1348 /* no global SR status; inspect per-plane WM */;
1349 else if (HAS_PCH_SPLIT(dev_priv))
1350 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1351 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1352 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1353 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1354 else if (IS_I915GM(dev_priv))
1355 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1356 else if (IS_PINEVIEW(dev_priv))
1357 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1358 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1359 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1361 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1363 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1365 return 0;
1368 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1370 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1371 struct intel_rps *rps = &dev_priv->gt.rps;
1372 unsigned int max_gpu_freq, min_gpu_freq;
1373 intel_wakeref_t wakeref;
1374 int gpu_freq, ia_freq;
1376 if (!HAS_LLC(dev_priv))
1377 return -ENODEV;
1379 min_gpu_freq = rps->min_freq;
1380 max_gpu_freq = rps->max_freq;
1381 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1382 /* Convert GT frequency to 50 HZ units */
1383 min_gpu_freq /= GEN9_FREQ_SCALER;
1384 max_gpu_freq /= GEN9_FREQ_SCALER;
1387 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1389 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1390 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1391 ia_freq = gpu_freq;
1392 sandybridge_pcode_read(dev_priv,
1393 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1394 &ia_freq, NULL);
1395 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1396 intel_gpu_freq(rps,
1397 (gpu_freq *
1398 (IS_GEN9_BC(dev_priv) ||
1399 INTEL_GEN(dev_priv) >= 10 ?
1400 GEN9_FREQ_SCALER : 1))),
1401 ((ia_freq >> 0) & 0xff) * 100,
1402 ((ia_freq >> 8) & 0xff) * 100);
1404 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1406 return 0;
1409 static int i915_opregion(struct seq_file *m, void *unused)
1411 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1413 if (opregion->header)
1414 seq_write(m, opregion->header, OPREGION_SIZE);
1416 return 0;
1419 static int i915_vbt(struct seq_file *m, void *unused)
1421 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1423 if (opregion->vbt)
1424 seq_write(m, opregion->vbt, opregion->vbt_size);
1426 return 0;
1429 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1431 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1432 struct drm_device *dev = &dev_priv->drm;
1433 struct intel_framebuffer *fbdev_fb = NULL;
1434 struct drm_framebuffer *drm_fb;
1436 #ifdef CONFIG_DRM_FBDEV_EMULATION
1437 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1438 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1440 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1441 fbdev_fb->base.width,
1442 fbdev_fb->base.height,
1443 fbdev_fb->base.format->depth,
1444 fbdev_fb->base.format->cpp[0] * 8,
1445 fbdev_fb->base.modifier,
1446 drm_framebuffer_read_refcount(&fbdev_fb->base));
1447 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1448 seq_putc(m, '\n');
1450 #endif
1452 mutex_lock(&dev->mode_config.fb_lock);
1453 drm_for_each_fb(drm_fb, dev) {
1454 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1455 if (fb == fbdev_fb)
1456 continue;
1458 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1459 fb->base.width,
1460 fb->base.height,
1461 fb->base.format->depth,
1462 fb->base.format->cpp[0] * 8,
1463 fb->base.modifier,
1464 drm_framebuffer_read_refcount(&fb->base));
1465 describe_obj(m, intel_fb_obj(&fb->base));
1466 seq_putc(m, '\n');
1468 mutex_unlock(&dev->mode_config.fb_lock);
1470 return 0;
1473 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1475 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1476 ring->space, ring->head, ring->tail, ring->emit);
1479 static int i915_context_status(struct seq_file *m, void *unused)
1481 struct drm_i915_private *i915 = node_to_i915(m->private);
1482 struct i915_gem_context *ctx, *cn;
1484 spin_lock(&i915->gem.contexts.lock);
1485 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
1486 struct i915_gem_engines_iter it;
1487 struct intel_context *ce;
1489 if (!kref_get_unless_zero(&ctx->ref))
1490 continue;
1492 spin_unlock(&i915->gem.contexts.lock);
1494 seq_puts(m, "HW context ");
1495 if (ctx->pid) {
1496 struct task_struct *task;
1498 task = get_pid_task(ctx->pid, PIDTYPE_PID);
1499 if (task) {
1500 seq_printf(m, "(%s [%d]) ",
1501 task->comm, task->pid);
1502 put_task_struct(task);
1504 } else if (IS_ERR(ctx->file_priv)) {
1505 seq_puts(m, "(deleted) ");
1506 } else {
1507 seq_puts(m, "(kernel) ");
1510 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1511 seq_putc(m, '\n');
1513 for_each_gem_engine(ce,
1514 i915_gem_context_lock_engines(ctx), it) {
1515 if (intel_context_pin_if_active(ce)) {
1516 seq_printf(m, "%s: ", ce->engine->name);
1517 if (ce->state)
1518 describe_obj(m, ce->state->obj);
1519 describe_ctx_ring(m, ce->ring);
1520 seq_putc(m, '\n');
1521 intel_context_unpin(ce);
1524 i915_gem_context_unlock_engines(ctx);
1526 seq_putc(m, '\n');
1528 spin_lock(&i915->gem.contexts.lock);
1529 list_safe_reset_next(ctx, cn, link);
1530 i915_gem_context_put(ctx);
1532 spin_unlock(&i915->gem.contexts.lock);
1534 return 0;
1537 static const char *swizzle_string(unsigned swizzle)
1539 switch (swizzle) {
1540 case I915_BIT_6_SWIZZLE_NONE:
1541 return "none";
1542 case I915_BIT_6_SWIZZLE_9:
1543 return "bit9";
1544 case I915_BIT_6_SWIZZLE_9_10:
1545 return "bit9/bit10";
1546 case I915_BIT_6_SWIZZLE_9_11:
1547 return "bit9/bit11";
1548 case I915_BIT_6_SWIZZLE_9_10_11:
1549 return "bit9/bit10/bit11";
1550 case I915_BIT_6_SWIZZLE_9_17:
1551 return "bit9/bit17";
1552 case I915_BIT_6_SWIZZLE_9_10_17:
1553 return "bit9/bit10/bit17";
1554 case I915_BIT_6_SWIZZLE_UNKNOWN:
1555 return "unknown";
1558 return "bug";
1561 static int i915_swizzle_info(struct seq_file *m, void *data)
1563 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1564 struct intel_uncore *uncore = &dev_priv->uncore;
1565 intel_wakeref_t wakeref;
1567 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1569 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1570 swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
1571 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1572 swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
1574 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1575 seq_printf(m, "DDC = 0x%08x\n",
1576 intel_uncore_read(uncore, DCC));
1577 seq_printf(m, "DDC2 = 0x%08x\n",
1578 intel_uncore_read(uncore, DCC2));
1579 seq_printf(m, "C0DRB3 = 0x%04x\n",
1580 intel_uncore_read16(uncore, C0DRB3));
1581 seq_printf(m, "C1DRB3 = 0x%04x\n",
1582 intel_uncore_read16(uncore, C1DRB3));
1583 } else if (INTEL_GEN(dev_priv) >= 6) {
1584 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1585 intel_uncore_read(uncore, MAD_DIMM_C0));
1586 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1587 intel_uncore_read(uncore, MAD_DIMM_C1));
1588 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1589 intel_uncore_read(uncore, MAD_DIMM_C2));
1590 seq_printf(m, "TILECTL = 0x%08x\n",
1591 intel_uncore_read(uncore, TILECTL));
1592 if (INTEL_GEN(dev_priv) >= 8)
1593 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1594 intel_uncore_read(uncore, GAMTARBMODE));
1595 else
1596 seq_printf(m, "ARB_MODE = 0x%08x\n",
1597 intel_uncore_read(uncore, ARB_MODE));
1598 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1599 intel_uncore_read(uncore, DISP_ARB_CTL));
1602 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1603 seq_puts(m, "L-shaped memory detected\n");
1605 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1607 return 0;
1610 static const char *rps_power_to_str(unsigned int power)
1612 static const char * const strings[] = {
1613 [LOW_POWER] = "low power",
1614 [BETWEEN] = "mixed",
1615 [HIGH_POWER] = "high power",
1618 if (power >= ARRAY_SIZE(strings) || !strings[power])
1619 return "unknown";
1621 return strings[power];
1624 static int i915_rps_boost_info(struct seq_file *m, void *data)
1626 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1627 struct intel_rps *rps = &dev_priv->gt.rps;
1629 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
1630 seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
1631 seq_printf(m, "Boosts outstanding? %d\n",
1632 atomic_read(&rps->num_waiters));
1633 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
1634 seq_printf(m, "Frequency requested %d, actual %d\n",
1635 intel_gpu_freq(rps, rps->cur_freq),
1636 intel_rps_read_actual_frequency(rps));
1637 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1638 intel_gpu_freq(rps, rps->min_freq),
1639 intel_gpu_freq(rps, rps->min_freq_softlimit),
1640 intel_gpu_freq(rps, rps->max_freq_softlimit),
1641 intel_gpu_freq(rps, rps->max_freq));
1642 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
1643 intel_gpu_freq(rps, rps->idle_freq),
1644 intel_gpu_freq(rps, rps->efficient_freq),
1645 intel_gpu_freq(rps, rps->boost_freq));
1647 seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1649 if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
1650 u32 rpup, rpupei;
1651 u32 rpdown, rpdownei;
1653 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1654 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
1655 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
1656 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
1657 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
1658 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1660 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
1661 rps_power_to_str(rps->power.mode));
1662 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
1663 rpup && rpupei ? 100 * rpup / rpupei : 0,
1664 rps->power.up_threshold);
1665 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
1666 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
1667 rps->power.down_threshold);
1668 } else {
1669 seq_puts(m, "\nRPS Autotuning inactive\n");
1672 return 0;
1675 static int i915_llc(struct seq_file *m, void *data)
1677 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1678 const bool edram = INTEL_GEN(dev_priv) > 8;
1680 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
1681 seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1682 dev_priv->edram_size_mb);
1684 return 0;
1687 static int i915_huc_load_status_info(struct seq_file *m, void *data)
1689 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1690 intel_wakeref_t wakeref;
1691 struct drm_printer p;
1693 if (!HAS_GT_UC(dev_priv))
1694 return -ENODEV;
1696 p = drm_seq_file_printer(m);
1697 intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
1699 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1700 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
1702 return 0;
1705 static int i915_guc_load_status_info(struct seq_file *m, void *data)
1707 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1708 intel_wakeref_t wakeref;
1709 struct drm_printer p;
1711 if (!HAS_GT_UC(dev_priv))
1712 return -ENODEV;
1714 p = drm_seq_file_printer(m);
1715 intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
1717 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1718 u32 tmp = I915_READ(GUC_STATUS);
1719 u32 i;
1721 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
1722 seq_printf(m, "\tBootrom status = 0x%x\n",
1723 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
1724 seq_printf(m, "\tuKernel status = 0x%x\n",
1725 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
1726 seq_printf(m, "\tMIA Core status = 0x%x\n",
1727 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
1728 seq_puts(m, "\nScratch registers:\n");
1729 for (i = 0; i < 16; i++) {
1730 seq_printf(m, "\t%2d: \t0x%x\n",
1731 i, I915_READ(SOFT_SCRATCH(i)));
1735 return 0;
1738 static const char *
1739 stringify_guc_log_type(enum guc_log_buffer_type type)
1741 switch (type) {
1742 case GUC_ISR_LOG_BUFFER:
1743 return "ISR";
1744 case GUC_DPC_LOG_BUFFER:
1745 return "DPC";
1746 case GUC_CRASH_DUMP_LOG_BUFFER:
1747 return "CRASH";
1748 default:
1749 MISSING_CASE(type);
1752 return "";
1755 static void i915_guc_log_info(struct seq_file *m,
1756 struct drm_i915_private *dev_priv)
1758 struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
1759 enum guc_log_buffer_type type;
1761 if (!intel_guc_log_relay_created(log)) {
1762 seq_puts(m, "GuC log relay not created\n");
1763 return;
1766 seq_puts(m, "GuC logging stats:\n");
1768 seq_printf(m, "\tRelay full count: %u\n",
1769 log->relay.full_count);
1771 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
1772 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
1773 stringify_guc_log_type(type),
1774 log->stats[type].flush,
1775 log->stats[type].sampled_overflow);
1779 static int i915_guc_info(struct seq_file *m, void *data)
1781 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1783 if (!USES_GUC(dev_priv))
1784 return -ENODEV;
1786 i915_guc_log_info(m, dev_priv);
1788 /* Add more as required ... */
1790 return 0;
1793 static int i915_guc_stage_pool(struct seq_file *m, void *data)
1795 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1796 const struct intel_guc *guc = &dev_priv->gt.uc.guc;
1797 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
1798 int index;
1800 if (!USES_GUC_SUBMISSION(dev_priv))
1801 return -ENODEV;
1803 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
1804 struct intel_engine_cs *engine;
1806 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
1807 continue;
1809 seq_printf(m, "GuC stage descriptor %u:\n", index);
1810 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
1811 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
1812 seq_printf(m, "\tPriority: %d\n", desc->priority);
1813 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
1814 seq_printf(m, "\tEngines used: 0x%x\n",
1815 desc->engines_used);
1816 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
1817 desc->db_trigger_phy,
1818 desc->db_trigger_cpu,
1819 desc->db_trigger_uk);
1820 seq_printf(m, "\tProcess descriptor: 0x%x\n",
1821 desc->process_desc);
1822 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
1823 desc->wq_addr, desc->wq_size);
1824 seq_putc(m, '\n');
1826 for_each_uabi_engine(engine, dev_priv) {
1827 u32 guc_engine_id = engine->guc_id;
1828 struct guc_execlist_context *lrc =
1829 &desc->lrc[guc_engine_id];
1831 seq_printf(m, "\t%s LRC:\n", engine->name);
1832 seq_printf(m, "\t\tContext desc: 0x%x\n",
1833 lrc->context_desc);
1834 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
1835 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
1836 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
1837 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
1838 seq_putc(m, '\n');
1842 return 0;
1845 static int i915_guc_log_dump(struct seq_file *m, void *data)
1847 struct drm_info_node *node = m->private;
1848 struct drm_i915_private *dev_priv = node_to_i915(node);
1849 bool dump_load_err = !!node->info_ent->data;
1850 struct drm_i915_gem_object *obj = NULL;
1851 u32 *log;
1852 int i = 0;
1854 if (!HAS_GT_UC(dev_priv))
1855 return -ENODEV;
1857 if (dump_load_err)
1858 obj = dev_priv->gt.uc.load_err_log;
1859 else if (dev_priv->gt.uc.guc.log.vma)
1860 obj = dev_priv->gt.uc.guc.log.vma->obj;
1862 if (!obj)
1863 return 0;
1865 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
1866 if (IS_ERR(log)) {
1867 DRM_DEBUG("Failed to pin object\n");
1868 seq_puts(m, "(log data unaccessible)\n");
1869 return PTR_ERR(log);
1872 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
1873 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
1874 *(log + i), *(log + i + 1),
1875 *(log + i + 2), *(log + i + 3));
1877 seq_putc(m, '\n');
1879 i915_gem_object_unpin_map(obj);
1881 return 0;
1884 static int i915_guc_log_level_get(void *data, u64 *val)
1886 struct drm_i915_private *dev_priv = data;
1888 if (!USES_GUC(dev_priv))
1889 return -ENODEV;
1891 *val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
1893 return 0;
1896 static int i915_guc_log_level_set(void *data, u64 val)
1898 struct drm_i915_private *dev_priv = data;
1900 if (!USES_GUC(dev_priv))
1901 return -ENODEV;
1903 return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
1906 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
1907 i915_guc_log_level_get, i915_guc_log_level_set,
1908 "%lld\n");
1910 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
1912 struct drm_i915_private *i915 = inode->i_private;
1913 struct intel_guc *guc = &i915->gt.uc.guc;
1914 struct intel_guc_log *log = &guc->log;
1916 if (!intel_guc_is_running(guc))
1917 return -ENODEV;
1919 file->private_data = log;
1921 return intel_guc_log_relay_open(log);
1924 static ssize_t
1925 i915_guc_log_relay_write(struct file *filp,
1926 const char __user *ubuf,
1927 size_t cnt,
1928 loff_t *ppos)
1930 struct intel_guc_log *log = filp->private_data;
1931 int val;
1932 int ret;
1934 ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
1935 if (ret < 0)
1936 return ret;
1939 * Enable and start the guc log relay on value of 1.
1940 * Flush log relay for any other value.
1942 if (val == 1)
1943 ret = intel_guc_log_relay_start(log);
1944 else
1945 intel_guc_log_relay_flush(log);
1947 return ret ?: cnt;
1950 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
1952 struct drm_i915_private *i915 = inode->i_private;
1953 struct intel_guc *guc = &i915->gt.uc.guc;
1955 intel_guc_log_relay_close(&guc->log);
1956 return 0;
1959 static const struct file_operations i915_guc_log_relay_fops = {
1960 .owner = THIS_MODULE,
1961 .open = i915_guc_log_relay_open,
1962 .write = i915_guc_log_relay_write,
1963 .release = i915_guc_log_relay_release,
1966 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
1968 u8 val;
1969 static const char * const sink_status[] = {
1970 "inactive",
1971 "transition to active, capture and display",
1972 "active, display from RFB",
1973 "active, capture and display on sink device timings",
1974 "transition to inactive, capture and display, timing re-sync",
1975 "reserved",
1976 "reserved",
1977 "sink internal error",
1979 struct drm_connector *connector = m->private;
1980 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1981 struct intel_dp *intel_dp =
1982 enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
1983 int ret;
1985 if (!CAN_PSR(dev_priv)) {
1986 seq_puts(m, "PSR Unsupported\n");
1987 return -ENODEV;
1990 if (connector->status != connector_status_connected)
1991 return -ENODEV;
1993 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
1995 if (ret == 1) {
1996 const char *str = "unknown";
1998 val &= DP_PSR_SINK_STATE_MASK;
1999 if (val < ARRAY_SIZE(sink_status))
2000 str = sink_status[val];
2001 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2002 } else {
2003 return ret;
2006 return 0;
2008 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2010 static void
2011 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2013 u32 val, status_val;
2014 const char *status = "unknown";
2016 if (dev_priv->psr.psr2_enabled) {
2017 static const char * const live_status[] = {
2018 "IDLE",
2019 "CAPTURE",
2020 "CAPTURE_FS",
2021 "SLEEP",
2022 "BUFON_FW",
2023 "ML_UP",
2024 "SU_STANDBY",
2025 "FAST_SLEEP",
2026 "DEEP_SLEEP",
2027 "BUF_ON",
2028 "TG_ON"
2030 val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder));
2031 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2032 EDP_PSR2_STATUS_STATE_SHIFT;
2033 if (status_val < ARRAY_SIZE(live_status))
2034 status = live_status[status_val];
2035 } else {
2036 static const char * const live_status[] = {
2037 "IDLE",
2038 "SRDONACK",
2039 "SRDENT",
2040 "BUFOFF",
2041 "BUFON",
2042 "AUXACK",
2043 "SRDOFFACK",
2044 "SRDENT_ON",
2046 val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder));
2047 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2048 EDP_PSR_STATUS_STATE_SHIFT;
2049 if (status_val < ARRAY_SIZE(live_status))
2050 status = live_status[status_val];
2053 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2056 static int i915_edp_psr_status(struct seq_file *m, void *data)
2058 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2059 struct i915_psr *psr = &dev_priv->psr;
2060 intel_wakeref_t wakeref;
2061 const char *status;
2062 bool enabled;
2063 u32 val;
2065 if (!HAS_PSR(dev_priv))
2066 return -ENODEV;
2068 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2069 if (psr->dp)
2070 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2071 seq_puts(m, "\n");
2073 if (!psr->sink_support)
2074 return 0;
2076 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2077 mutex_lock(&psr->lock);
2079 if (psr->enabled)
2080 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2081 else
2082 status = "disabled";
2083 seq_printf(m, "PSR mode: %s\n", status);
2085 if (!psr->enabled) {
2086 seq_printf(m, "PSR sink not reliable: %s\n",
2087 yesno(psr->sink_not_reliable));
2089 goto unlock;
2092 if (psr->psr2_enabled) {
2093 val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
2094 enabled = val & EDP_PSR2_ENABLE;
2095 } else {
2096 val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
2097 enabled = val & EDP_PSR_ENABLE;
2099 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2100 enableddisabled(enabled), val);
2101 psr_source_status(dev_priv, m);
2102 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2103 psr->busy_frontbuffer_bits);
2106 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2108 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2109 val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
2110 val &= EDP_PSR_PERF_CNT_MASK;
2111 seq_printf(m, "Performance counter: %u\n", val);
2114 if (psr->debug & I915_PSR_DEBUG_IRQ) {
2115 seq_printf(m, "Last attempted entry at: %lld\n",
2116 psr->last_entry_attempt);
2117 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2120 if (psr->psr2_enabled) {
2121 u32 su_frames_val[3];
2122 int frame;
2125 * Reading all 3 registers before hand to minimize crossing a
2126 * frame boundary between register reads
2128 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
2129 val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder,
2130 frame));
2131 su_frames_val[frame / 3] = val;
2134 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2136 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2137 u32 su_blocks;
2139 su_blocks = su_frames_val[frame / 3] &
2140 PSR2_SU_STATUS_MASK(frame);
2141 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2142 seq_printf(m, "%d\t%d\n", frame, su_blocks);
2146 unlock:
2147 mutex_unlock(&psr->lock);
2148 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2150 return 0;
2153 static int
2154 i915_edp_psr_debug_set(void *data, u64 val)
2156 struct drm_i915_private *dev_priv = data;
2157 intel_wakeref_t wakeref;
2158 int ret;
2160 if (!CAN_PSR(dev_priv))
2161 return -ENODEV;
2163 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2165 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2167 ret = intel_psr_debug_set(dev_priv, val);
2169 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2171 return ret;
2174 static int
2175 i915_edp_psr_debug_get(void *data, u64 *val)
2177 struct drm_i915_private *dev_priv = data;
2179 if (!CAN_PSR(dev_priv))
2180 return -ENODEV;
2182 *val = READ_ONCE(dev_priv->psr.debug);
2183 return 0;
2186 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2187 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2188 "%llu\n");
2190 static int i915_energy_uJ(struct seq_file *m, void *data)
2192 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2193 unsigned long long power;
2194 intel_wakeref_t wakeref;
2195 u32 units;
2197 if (INTEL_GEN(dev_priv) < 6)
2198 return -ENODEV;
2200 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2201 return -ENODEV;
2203 units = (power & 0x1f00) >> 8;
2204 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
2205 power = I915_READ(MCH_SECP_NRG_STTS);
2207 power = (1000000 * power) >> units; /* convert to uJ */
2208 seq_printf(m, "%llu", power);
2210 return 0;
2213 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2215 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2216 struct pci_dev *pdev = dev_priv->drm.pdev;
2218 if (!HAS_RUNTIME_PM(dev_priv))
2219 seq_puts(m, "Runtime power management not supported\n");
2221 seq_printf(m, "Runtime power status: %s\n",
2222 enableddisabled(!dev_priv->power_domains.wakeref));
2224 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2225 seq_printf(m, "IRQs disabled: %s\n",
2226 yesno(!intel_irqs_enabled(dev_priv)));
2227 #ifdef CONFIG_PM
2228 seq_printf(m, "Usage count: %d\n",
2229 atomic_read(&dev_priv->drm.dev->power.usage_count));
2230 #else
2231 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2232 #endif
2233 seq_printf(m, "PCI device power state: %s [%d]\n",
2234 pci_power_name(pdev->current_state),
2235 pdev->current_state);
2237 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2238 struct drm_printer p = drm_seq_file_printer(m);
2240 print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
2243 return 0;
2246 static int i915_power_domain_info(struct seq_file *m, void *unused)
2248 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2249 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2250 int i;
2252 mutex_lock(&power_domains->lock);
2254 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2255 for (i = 0; i < power_domains->power_well_count; i++) {
2256 struct i915_power_well *power_well;
2257 enum intel_display_power_domain power_domain;
2259 power_well = &power_domains->power_wells[i];
2260 seq_printf(m, "%-25s %d\n", power_well->desc->name,
2261 power_well->count);
2263 for_each_power_domain(power_domain, power_well->desc->domains)
2264 seq_printf(m, " %-23s %d\n",
2265 intel_display_power_domain_str(power_domain),
2266 power_domains->domain_use_count[power_domain]);
2269 mutex_unlock(&power_domains->lock);
2271 return 0;
2274 static int i915_dmc_info(struct seq_file *m, void *unused)
2276 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2277 intel_wakeref_t wakeref;
2278 struct intel_csr *csr;
2279 i915_reg_t dc5_reg, dc6_reg = {};
2281 if (!HAS_CSR(dev_priv))
2282 return -ENODEV;
2284 csr = &dev_priv->csr;
2286 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2288 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2289 seq_printf(m, "path: %s\n", csr->fw_path);
2291 if (!csr->dmc_payload)
2292 goto out;
2294 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2295 CSR_VERSION_MINOR(csr->version));
2297 if (INTEL_GEN(dev_priv) >= 12) {
2298 dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
2299 dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
2301 * NOTE: DMC_DEBUG3 is a general purpose reg.
2302 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
2303 * reg for DC3CO debugging and validation,
2304 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
2306 seq_printf(m, "DC3CO count: %d\n", I915_READ(DMC_DEBUG3));
2307 } else {
2308 dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2309 SKL_CSR_DC3_DC5_COUNT;
2310 if (!IS_GEN9_LP(dev_priv))
2311 dc6_reg = SKL_CSR_DC5_DC6_COUNT;
2314 seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg));
2315 if (dc6_reg.reg)
2316 seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg));
2318 out:
2319 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2320 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2321 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2323 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2325 return 0;
2328 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2329 const struct drm_display_mode *mode)
2331 int i;
2333 for (i = 0; i < tabs; i++)
2334 seq_putc(m, '\t');
2336 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2339 static void intel_encoder_info(struct seq_file *m,
2340 struct intel_crtc *crtc,
2341 struct intel_encoder *encoder)
2343 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2344 struct drm_connector_list_iter conn_iter;
2345 struct drm_connector *connector;
2347 seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
2348 encoder->base.base.id, encoder->base.name);
2350 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2351 drm_for_each_connector_iter(connector, &conn_iter) {
2352 const struct drm_connector_state *conn_state =
2353 connector->state;
2355 if (conn_state->best_encoder != &encoder->base)
2356 continue;
2358 seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
2359 connector->base.id, connector->name);
2361 drm_connector_list_iter_end(&conn_iter);
2364 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2366 const struct drm_display_mode *mode = panel->fixed_mode;
2368 seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2371 static void intel_hdcp_info(struct seq_file *m,
2372 struct intel_connector *intel_connector)
2374 bool hdcp_cap, hdcp2_cap;
2376 hdcp_cap = intel_hdcp_capable(intel_connector);
2377 hdcp2_cap = intel_hdcp2_capable(intel_connector);
2379 if (hdcp_cap)
2380 seq_puts(m, "HDCP1.4 ");
2381 if (hdcp2_cap)
2382 seq_puts(m, "HDCP2.2 ");
2384 if (!hdcp_cap && !hdcp2_cap)
2385 seq_puts(m, "None");
2387 seq_puts(m, "\n");
2390 static void intel_dp_info(struct seq_file *m,
2391 struct intel_connector *intel_connector)
2393 struct intel_encoder *intel_encoder = intel_connector->encoder;
2394 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
2396 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2397 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2398 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2399 intel_panel_info(m, &intel_connector->panel);
2401 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2402 &intel_dp->aux);
2403 if (intel_connector->hdcp.shim) {
2404 seq_puts(m, "\tHDCP version: ");
2405 intel_hdcp_info(m, intel_connector);
2409 static void intel_dp_mst_info(struct seq_file *m,
2410 struct intel_connector *intel_connector)
2412 struct intel_encoder *intel_encoder = intel_connector->encoder;
2413 struct intel_dp_mst_encoder *intel_mst =
2414 enc_to_mst(intel_encoder);
2415 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2416 struct intel_dp *intel_dp = &intel_dig_port->dp;
2417 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2418 intel_connector->port);
2420 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2423 static void intel_hdmi_info(struct seq_file *m,
2424 struct intel_connector *intel_connector)
2426 struct intel_encoder *intel_encoder = intel_connector->encoder;
2427 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
2429 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2430 if (intel_connector->hdcp.shim) {
2431 seq_puts(m, "\tHDCP version: ");
2432 intel_hdcp_info(m, intel_connector);
2436 static void intel_lvds_info(struct seq_file *m,
2437 struct intel_connector *intel_connector)
2439 intel_panel_info(m, &intel_connector->panel);
2442 static void intel_connector_info(struct seq_file *m,
2443 struct drm_connector *connector)
2445 struct intel_connector *intel_connector = to_intel_connector(connector);
2446 const struct drm_connector_state *conn_state = connector->state;
2447 struct intel_encoder *encoder =
2448 to_intel_encoder(conn_state->best_encoder);
2449 const struct drm_display_mode *mode;
2451 seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
2452 connector->base.id, connector->name,
2453 drm_get_connector_status_name(connector->status));
2455 if (connector->status == connector_status_disconnected)
2456 return;
2458 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2459 connector->display_info.width_mm,
2460 connector->display_info.height_mm);
2461 seq_printf(m, "\tsubpixel order: %s\n",
2462 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2463 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2465 if (!encoder)
2466 return;
2468 switch (connector->connector_type) {
2469 case DRM_MODE_CONNECTOR_DisplayPort:
2470 case DRM_MODE_CONNECTOR_eDP:
2471 if (encoder->type == INTEL_OUTPUT_DP_MST)
2472 intel_dp_mst_info(m, intel_connector);
2473 else
2474 intel_dp_info(m, intel_connector);
2475 break;
2476 case DRM_MODE_CONNECTOR_LVDS:
2477 if (encoder->type == INTEL_OUTPUT_LVDS)
2478 intel_lvds_info(m, intel_connector);
2479 break;
2480 case DRM_MODE_CONNECTOR_HDMIA:
2481 if (encoder->type == INTEL_OUTPUT_HDMI ||
2482 encoder->type == INTEL_OUTPUT_DDI)
2483 intel_hdmi_info(m, intel_connector);
2484 break;
2485 default:
2486 break;
2489 seq_printf(m, "\tmodes:\n");
2490 list_for_each_entry(mode, &connector->modes, head)
2491 intel_seq_print_mode(m, 2, mode);
2494 static const char *plane_type(enum drm_plane_type type)
2496 switch (type) {
2497 case DRM_PLANE_TYPE_OVERLAY:
2498 return "OVL";
2499 case DRM_PLANE_TYPE_PRIMARY:
2500 return "PRI";
2501 case DRM_PLANE_TYPE_CURSOR:
2502 return "CUR";
2504 * Deliberately omitting default: to generate compiler warnings
2505 * when a new drm_plane_type gets added.
2509 return "unknown";
2512 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2515 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2516 * will print them all to visualize if the values are misused
2518 snprintf(buf, bufsize,
2519 "%s%s%s%s%s%s(0x%08x)",
2520 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2521 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2522 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2523 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2524 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2525 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2526 rotation);
2529 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
2531 const struct intel_plane_state *plane_state =
2532 to_intel_plane_state(plane->base.state);
2533 const struct drm_framebuffer *fb = plane_state->uapi.fb;
2534 struct drm_format_name_buf format_name;
2535 struct drm_rect src, dst;
2536 char rot_str[48];
2538 src = drm_plane_state_src(&plane_state->uapi);
2539 dst = drm_plane_state_dest(&plane_state->uapi);
2541 if (fb)
2542 drm_get_format_name(fb->format->format, &format_name);
2544 plane_rotation(rot_str, sizeof(rot_str),
2545 plane_state->uapi.rotation);
2547 seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
2548 fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
2549 fb ? fb->width : 0, fb ? fb->height : 0,
2550 DRM_RECT_FP_ARG(&src),
2551 DRM_RECT_ARG(&dst),
2552 rot_str);
2555 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
2557 const struct intel_plane_state *plane_state =
2558 to_intel_plane_state(plane->base.state);
2559 const struct drm_framebuffer *fb = plane_state->hw.fb;
2560 struct drm_format_name_buf format_name;
2561 char rot_str[48];
2563 if (!fb)
2564 return;
2566 drm_get_format_name(fb->format->format, &format_name);
2568 plane_rotation(rot_str, sizeof(rot_str),
2569 plane_state->hw.rotation);
2571 seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
2572 fb->base.id, format_name.str,
2573 fb->width, fb->height,
2574 yesno(plane_state->uapi.visible),
2575 DRM_RECT_FP_ARG(&plane_state->uapi.src),
2576 DRM_RECT_ARG(&plane_state->uapi.dst),
2577 rot_str);
2580 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
2582 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2583 struct intel_plane *plane;
2585 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
2586 seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
2587 plane->base.base.id, plane->base.name,
2588 plane_type(plane->base.type));
2589 intel_plane_uapi_info(m, plane);
2590 intel_plane_hw_info(m, plane);
2594 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
2596 const struct intel_crtc_state *crtc_state =
2597 to_intel_crtc_state(crtc->base.state);
2598 int num_scalers = crtc->num_scalers;
2599 int i;
2601 /* Not all platformas have a scaler */
2602 if (num_scalers) {
2603 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2604 num_scalers,
2605 crtc_state->scaler_state.scaler_users,
2606 crtc_state->scaler_state.scaler_id);
2608 for (i = 0; i < num_scalers; i++) {
2609 const struct intel_scaler *sc =
2610 &crtc_state->scaler_state.scalers[i];
2612 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
2613 i, yesno(sc->in_use), sc->mode);
2615 seq_puts(m, "\n");
2616 } else {
2617 seq_puts(m, "\tNo scalers available on this platform\n");
2621 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
2623 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2624 const struct intel_crtc_state *crtc_state =
2625 to_intel_crtc_state(crtc->base.state);
2626 struct intel_encoder *encoder;
2628 seq_printf(m, "[CRTC:%d:%s]:\n",
2629 crtc->base.base.id, crtc->base.name);
2631 seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
2632 yesno(crtc_state->uapi.enable),
2633 yesno(crtc_state->uapi.active),
2634 DRM_MODE_ARG(&crtc_state->uapi.mode));
2636 if (crtc_state->hw.enable) {
2637 seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
2638 yesno(crtc_state->hw.active),
2639 DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
2641 seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
2642 crtc_state->pipe_src_w, crtc_state->pipe_src_h,
2643 yesno(crtc_state->dither), crtc_state->pipe_bpp);
2645 intel_scaler_info(m, crtc);
2648 for_each_intel_encoder_mask(&dev_priv->drm, encoder,
2649 crtc_state->uapi.encoder_mask)
2650 intel_encoder_info(m, crtc, encoder);
2652 intel_plane_info(m, crtc);
2654 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
2655 yesno(!crtc->cpu_fifo_underrun_disabled),
2656 yesno(!crtc->pch_fifo_underrun_disabled));
2659 static int i915_display_info(struct seq_file *m, void *unused)
2661 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2662 struct drm_device *dev = &dev_priv->drm;
2663 struct intel_crtc *crtc;
2664 struct drm_connector *connector;
2665 struct drm_connector_list_iter conn_iter;
2666 intel_wakeref_t wakeref;
2668 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2670 drm_modeset_lock_all(dev);
2672 seq_printf(m, "CRTC info\n");
2673 seq_printf(m, "---------\n");
2674 for_each_intel_crtc(dev, crtc)
2675 intel_crtc_info(m, crtc);
2677 seq_printf(m, "\n");
2678 seq_printf(m, "Connector info\n");
2679 seq_printf(m, "--------------\n");
2680 drm_connector_list_iter_begin(dev, &conn_iter);
2681 drm_for_each_connector_iter(connector, &conn_iter)
2682 intel_connector_info(m, connector);
2683 drm_connector_list_iter_end(&conn_iter);
2685 drm_modeset_unlock_all(dev);
2687 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2689 return 0;
2692 static int i915_engine_info(struct seq_file *m, void *unused)
2694 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2695 struct intel_engine_cs *engine;
2696 intel_wakeref_t wakeref;
2697 struct drm_printer p;
2699 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2701 seq_printf(m, "GT awake? %s [%d]\n",
2702 yesno(dev_priv->gt.awake),
2703 atomic_read(&dev_priv->gt.wakeref.count));
2704 seq_printf(m, "CS timestamp frequency: %u kHz\n",
2705 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
2707 p = drm_seq_file_printer(m);
2708 for_each_uabi_engine(engine, dev_priv)
2709 intel_engine_dump(engine, &p, "%s\n", engine->name);
2711 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2713 return 0;
2716 static int i915_rcs_topology(struct seq_file *m, void *unused)
2718 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2719 struct drm_printer p = drm_seq_file_printer(m);
2721 intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
2723 return 0;
2726 static int i915_shrinker_info(struct seq_file *m, void *unused)
2728 struct drm_i915_private *i915 = node_to_i915(m->private);
2730 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
2731 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
2733 return 0;
2736 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2738 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2739 struct drm_device *dev = &dev_priv->drm;
2740 int i;
2742 drm_modeset_lock_all(dev);
2743 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2744 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2746 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
2747 pll->info->id);
2748 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2749 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
2750 seq_printf(m, " tracked hardware state:\n");
2751 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
2752 seq_printf(m, " dpll_md: 0x%08x\n",
2753 pll->state.hw_state.dpll_md);
2754 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
2755 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
2756 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
2757 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
2758 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
2759 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
2760 pll->state.hw_state.mg_refclkin_ctl);
2761 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
2762 pll->state.hw_state.mg_clktop2_coreclkctl1);
2763 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
2764 pll->state.hw_state.mg_clktop2_hsclkctl);
2765 seq_printf(m, " mg_pll_div0: 0x%08x\n",
2766 pll->state.hw_state.mg_pll_div0);
2767 seq_printf(m, " mg_pll_div1: 0x%08x\n",
2768 pll->state.hw_state.mg_pll_div1);
2769 seq_printf(m, " mg_pll_lf: 0x%08x\n",
2770 pll->state.hw_state.mg_pll_lf);
2771 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
2772 pll->state.hw_state.mg_pll_frac_lock);
2773 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
2774 pll->state.hw_state.mg_pll_ssc);
2775 seq_printf(m, " mg_pll_bias: 0x%08x\n",
2776 pll->state.hw_state.mg_pll_bias);
2777 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
2778 pll->state.hw_state.mg_pll_tdc_coldst_bias);
2780 drm_modeset_unlock_all(dev);
2782 return 0;
2785 static int i915_wa_registers(struct seq_file *m, void *unused)
2787 struct drm_i915_private *i915 = node_to_i915(m->private);
2788 struct intel_engine_cs *engine;
2790 for_each_uabi_engine(engine, i915) {
2791 const struct i915_wa_list *wal = &engine->ctx_wa_list;
2792 const struct i915_wa *wa;
2793 unsigned int count;
2795 count = wal->count;
2796 if (!count)
2797 continue;
2799 seq_printf(m, "%s: Workarounds applied: %u\n",
2800 engine->name, count);
2802 for (wa = wal->list; count--; wa++)
2803 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
2804 i915_mmio_reg_offset(wa->reg),
2805 wa->val, wa->mask);
2807 seq_printf(m, "\n");
2810 return 0;
2813 static int i915_ipc_status_show(struct seq_file *m, void *data)
2815 struct drm_i915_private *dev_priv = m->private;
2817 seq_printf(m, "Isochronous Priority Control: %s\n",
2818 yesno(dev_priv->ipc_enabled));
2819 return 0;
2822 static int i915_ipc_status_open(struct inode *inode, struct file *file)
2824 struct drm_i915_private *dev_priv = inode->i_private;
2826 if (!HAS_IPC(dev_priv))
2827 return -ENODEV;
2829 return single_open(file, i915_ipc_status_show, dev_priv);
2832 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
2833 size_t len, loff_t *offp)
2835 struct seq_file *m = file->private_data;
2836 struct drm_i915_private *dev_priv = m->private;
2837 intel_wakeref_t wakeref;
2838 bool enable;
2839 int ret;
2841 ret = kstrtobool_from_user(ubuf, len, &enable);
2842 if (ret < 0)
2843 return ret;
2845 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
2846 if (!dev_priv->ipc_enabled && enable)
2847 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
2848 dev_priv->wm.distrust_bios_wm = true;
2849 dev_priv->ipc_enabled = enable;
2850 intel_enable_ipc(dev_priv);
2853 return len;
2856 static const struct file_operations i915_ipc_status_fops = {
2857 .owner = THIS_MODULE,
2858 .open = i915_ipc_status_open,
2859 .read = seq_read,
2860 .llseek = seq_lseek,
2861 .release = single_release,
2862 .write = i915_ipc_status_write
2865 static int i915_ddb_info(struct seq_file *m, void *unused)
2867 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2868 struct drm_device *dev = &dev_priv->drm;
2869 struct skl_ddb_entry *entry;
2870 struct intel_crtc *crtc;
2872 if (INTEL_GEN(dev_priv) < 9)
2873 return -ENODEV;
2875 drm_modeset_lock_all(dev);
2877 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
2879 for_each_intel_crtc(&dev_priv->drm, crtc) {
2880 struct intel_crtc_state *crtc_state =
2881 to_intel_crtc_state(crtc->base.state);
2882 enum pipe pipe = crtc->pipe;
2883 enum plane_id plane_id;
2885 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
2887 for_each_plane_id_on_crtc(crtc, plane_id) {
2888 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
2889 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
2890 entry->start, entry->end,
2891 skl_ddb_entry_size(entry));
2894 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
2895 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
2896 entry->end, skl_ddb_entry_size(entry));
2899 drm_modeset_unlock_all(dev);
2901 return 0;
2904 static void drrs_status_per_crtc(struct seq_file *m,
2905 struct drm_device *dev,
2906 struct intel_crtc *intel_crtc)
2908 struct drm_i915_private *dev_priv = to_i915(dev);
2909 struct i915_drrs *drrs = &dev_priv->drrs;
2910 int vrefresh = 0;
2911 struct drm_connector *connector;
2912 struct drm_connector_list_iter conn_iter;
2914 drm_connector_list_iter_begin(dev, &conn_iter);
2915 drm_for_each_connector_iter(connector, &conn_iter) {
2916 if (connector->state->crtc != &intel_crtc->base)
2917 continue;
2919 seq_printf(m, "%s:\n", connector->name);
2921 drm_connector_list_iter_end(&conn_iter);
2923 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
2924 seq_puts(m, "\tVBT: DRRS_type: Static");
2925 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
2926 seq_puts(m, "\tVBT: DRRS_type: Seamless");
2927 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
2928 seq_puts(m, "\tVBT: DRRS_type: None");
2929 else
2930 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
2932 seq_puts(m, "\n\n");
2934 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
2935 struct intel_panel *panel;
2937 mutex_lock(&drrs->mutex);
2938 /* DRRS Supported */
2939 seq_puts(m, "\tDRRS Supported: Yes\n");
2941 /* disable_drrs() will make drrs->dp NULL */
2942 if (!drrs->dp) {
2943 seq_puts(m, "Idleness DRRS: Disabled\n");
2944 if (dev_priv->psr.enabled)
2945 seq_puts(m,
2946 "\tAs PSR is enabled, DRRS is not enabled\n");
2947 mutex_unlock(&drrs->mutex);
2948 return;
2951 panel = &drrs->dp->attached_connector->panel;
2952 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
2953 drrs->busy_frontbuffer_bits);
2955 seq_puts(m, "\n\t\t");
2956 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
2957 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
2958 vrefresh = panel->fixed_mode->vrefresh;
2959 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
2960 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
2961 vrefresh = panel->downclock_mode->vrefresh;
2962 } else {
2963 seq_printf(m, "DRRS_State: Unknown(%d)\n",
2964 drrs->refresh_rate_type);
2965 mutex_unlock(&drrs->mutex);
2966 return;
2968 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
2970 seq_puts(m, "\n\t\t");
2971 mutex_unlock(&drrs->mutex);
2972 } else {
2973 /* DRRS not supported. Print the VBT parameter*/
2974 seq_puts(m, "\tDRRS Supported : No");
2976 seq_puts(m, "\n");
2979 static int i915_drrs_status(struct seq_file *m, void *unused)
2981 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2982 struct drm_device *dev = &dev_priv->drm;
2983 struct intel_crtc *intel_crtc;
2984 int active_crtc_cnt = 0;
2986 drm_modeset_lock_all(dev);
2987 for_each_intel_crtc(dev, intel_crtc) {
2988 if (intel_crtc->base.state->active) {
2989 active_crtc_cnt++;
2990 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
2992 drrs_status_per_crtc(m, dev, intel_crtc);
2995 drm_modeset_unlock_all(dev);
2997 if (!active_crtc_cnt)
2998 seq_puts(m, "No active crtc found\n");
3000 return 0;
3003 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3005 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3006 struct drm_device *dev = &dev_priv->drm;
3007 struct intel_encoder *intel_encoder;
3008 struct intel_digital_port *intel_dig_port;
3009 struct drm_connector *connector;
3010 struct drm_connector_list_iter conn_iter;
3012 drm_connector_list_iter_begin(dev, &conn_iter);
3013 drm_for_each_connector_iter(connector, &conn_iter) {
3014 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3015 continue;
3017 intel_encoder = intel_attached_encoder(to_intel_connector(connector));
3018 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3019 continue;
3021 intel_dig_port = enc_to_dig_port(intel_encoder);
3022 if (!intel_dig_port->dp.can_mst)
3023 continue;
3025 seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
3026 intel_dig_port->base.base.base.id,
3027 intel_dig_port->base.base.name);
3028 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3030 drm_connector_list_iter_end(&conn_iter);
3032 return 0;
3035 static ssize_t i915_displayport_test_active_write(struct file *file,
3036 const char __user *ubuf,
3037 size_t len, loff_t *offp)
3039 char *input_buffer;
3040 int status = 0;
3041 struct drm_device *dev;
3042 struct drm_connector *connector;
3043 struct drm_connector_list_iter conn_iter;
3044 struct intel_dp *intel_dp;
3045 int val = 0;
3047 dev = ((struct seq_file *)file->private_data)->private;
3049 if (len == 0)
3050 return 0;
3052 input_buffer = memdup_user_nul(ubuf, len);
3053 if (IS_ERR(input_buffer))
3054 return PTR_ERR(input_buffer);
3056 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3058 drm_connector_list_iter_begin(dev, &conn_iter);
3059 drm_for_each_connector_iter(connector, &conn_iter) {
3060 struct intel_encoder *encoder;
3062 if (connector->connector_type !=
3063 DRM_MODE_CONNECTOR_DisplayPort)
3064 continue;
3066 encoder = to_intel_encoder(connector->encoder);
3067 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3068 continue;
3070 if (encoder && connector->status == connector_status_connected) {
3071 intel_dp = enc_to_intel_dp(encoder);
3072 status = kstrtoint(input_buffer, 10, &val);
3073 if (status < 0)
3074 break;
3075 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3076 /* To prevent erroneous activation of the compliance
3077 * testing code, only accept an actual value of 1 here
3079 if (val == 1)
3080 intel_dp->compliance.test_active = true;
3081 else
3082 intel_dp->compliance.test_active = false;
3085 drm_connector_list_iter_end(&conn_iter);
3086 kfree(input_buffer);
3087 if (status < 0)
3088 return status;
3090 *offp += len;
3091 return len;
3094 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3096 struct drm_i915_private *dev_priv = m->private;
3097 struct drm_device *dev = &dev_priv->drm;
3098 struct drm_connector *connector;
3099 struct drm_connector_list_iter conn_iter;
3100 struct intel_dp *intel_dp;
3102 drm_connector_list_iter_begin(dev, &conn_iter);
3103 drm_for_each_connector_iter(connector, &conn_iter) {
3104 struct intel_encoder *encoder;
3106 if (connector->connector_type !=
3107 DRM_MODE_CONNECTOR_DisplayPort)
3108 continue;
3110 encoder = to_intel_encoder(connector->encoder);
3111 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3112 continue;
3114 if (encoder && connector->status == connector_status_connected) {
3115 intel_dp = enc_to_intel_dp(encoder);
3116 if (intel_dp->compliance.test_active)
3117 seq_puts(m, "1");
3118 else
3119 seq_puts(m, "0");
3120 } else
3121 seq_puts(m, "0");
3123 drm_connector_list_iter_end(&conn_iter);
3125 return 0;
3128 static int i915_displayport_test_active_open(struct inode *inode,
3129 struct file *file)
3131 return single_open(file, i915_displayport_test_active_show,
3132 inode->i_private);
3135 static const struct file_operations i915_displayport_test_active_fops = {
3136 .owner = THIS_MODULE,
3137 .open = i915_displayport_test_active_open,
3138 .read = seq_read,
3139 .llseek = seq_lseek,
3140 .release = single_release,
3141 .write = i915_displayport_test_active_write
3144 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3146 struct drm_i915_private *dev_priv = m->private;
3147 struct drm_device *dev = &dev_priv->drm;
3148 struct drm_connector *connector;
3149 struct drm_connector_list_iter conn_iter;
3150 struct intel_dp *intel_dp;
3152 drm_connector_list_iter_begin(dev, &conn_iter);
3153 drm_for_each_connector_iter(connector, &conn_iter) {
3154 struct intel_encoder *encoder;
3156 if (connector->connector_type !=
3157 DRM_MODE_CONNECTOR_DisplayPort)
3158 continue;
3160 encoder = to_intel_encoder(connector->encoder);
3161 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3162 continue;
3164 if (encoder && connector->status == connector_status_connected) {
3165 intel_dp = enc_to_intel_dp(encoder);
3166 if (intel_dp->compliance.test_type ==
3167 DP_TEST_LINK_EDID_READ)
3168 seq_printf(m, "%lx",
3169 intel_dp->compliance.test_data.edid);
3170 else if (intel_dp->compliance.test_type ==
3171 DP_TEST_LINK_VIDEO_PATTERN) {
3172 seq_printf(m, "hdisplay: %d\n",
3173 intel_dp->compliance.test_data.hdisplay);
3174 seq_printf(m, "vdisplay: %d\n",
3175 intel_dp->compliance.test_data.vdisplay);
3176 seq_printf(m, "bpc: %u\n",
3177 intel_dp->compliance.test_data.bpc);
3179 } else
3180 seq_puts(m, "0");
3182 drm_connector_list_iter_end(&conn_iter);
3184 return 0;
3186 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3188 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3190 struct drm_i915_private *dev_priv = m->private;
3191 struct drm_device *dev = &dev_priv->drm;
3192 struct drm_connector *connector;
3193 struct drm_connector_list_iter conn_iter;
3194 struct intel_dp *intel_dp;
3196 drm_connector_list_iter_begin(dev, &conn_iter);
3197 drm_for_each_connector_iter(connector, &conn_iter) {
3198 struct intel_encoder *encoder;
3200 if (connector->connector_type !=
3201 DRM_MODE_CONNECTOR_DisplayPort)
3202 continue;
3204 encoder = to_intel_encoder(connector->encoder);
3205 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3206 continue;
3208 if (encoder && connector->status == connector_status_connected) {
3209 intel_dp = enc_to_intel_dp(encoder);
3210 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3211 } else
3212 seq_puts(m, "0");
3214 drm_connector_list_iter_end(&conn_iter);
3216 return 0;
3218 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3220 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
3222 struct drm_i915_private *dev_priv = m->private;
3223 struct drm_device *dev = &dev_priv->drm;
3224 int level;
3225 int num_levels;
3227 if (IS_CHERRYVIEW(dev_priv))
3228 num_levels = 3;
3229 else if (IS_VALLEYVIEW(dev_priv))
3230 num_levels = 1;
3231 else if (IS_G4X(dev_priv))
3232 num_levels = 3;
3233 else
3234 num_levels = ilk_wm_max_level(dev_priv) + 1;
3236 drm_modeset_lock_all(dev);
3238 for (level = 0; level < num_levels; level++) {
3239 unsigned int latency = wm[level];
3242 * - WM1+ latency values in 0.5us units
3243 * - latencies are in us on gen9/vlv/chv
3245 if (INTEL_GEN(dev_priv) >= 9 ||
3246 IS_VALLEYVIEW(dev_priv) ||
3247 IS_CHERRYVIEW(dev_priv) ||
3248 IS_G4X(dev_priv))
3249 latency *= 10;
3250 else if (level > 0)
3251 latency *= 5;
3253 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3254 level, wm[level], latency / 10, latency % 10);
3257 drm_modeset_unlock_all(dev);
3260 static int pri_wm_latency_show(struct seq_file *m, void *data)
3262 struct drm_i915_private *dev_priv = m->private;
3263 const u16 *latencies;
3265 if (INTEL_GEN(dev_priv) >= 9)
3266 latencies = dev_priv->wm.skl_latency;
3267 else
3268 latencies = dev_priv->wm.pri_latency;
3270 wm_latency_show(m, latencies);
3272 return 0;
3275 static int spr_wm_latency_show(struct seq_file *m, void *data)
3277 struct drm_i915_private *dev_priv = m->private;
3278 const u16 *latencies;
3280 if (INTEL_GEN(dev_priv) >= 9)
3281 latencies = dev_priv->wm.skl_latency;
3282 else
3283 latencies = dev_priv->wm.spr_latency;
3285 wm_latency_show(m, latencies);
3287 return 0;
3290 static int cur_wm_latency_show(struct seq_file *m, void *data)
3292 struct drm_i915_private *dev_priv = m->private;
3293 const u16 *latencies;
3295 if (INTEL_GEN(dev_priv) >= 9)
3296 latencies = dev_priv->wm.skl_latency;
3297 else
3298 latencies = dev_priv->wm.cur_latency;
3300 wm_latency_show(m, latencies);
3302 return 0;
3305 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3307 struct drm_i915_private *dev_priv = inode->i_private;
3309 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3310 return -ENODEV;
3312 return single_open(file, pri_wm_latency_show, dev_priv);
3315 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3317 struct drm_i915_private *dev_priv = inode->i_private;
3319 if (HAS_GMCH(dev_priv))
3320 return -ENODEV;
3322 return single_open(file, spr_wm_latency_show, dev_priv);
3325 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3327 struct drm_i915_private *dev_priv = inode->i_private;
3329 if (HAS_GMCH(dev_priv))
3330 return -ENODEV;
3332 return single_open(file, cur_wm_latency_show, dev_priv);
3335 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3336 size_t len, loff_t *offp, u16 wm[8])
3338 struct seq_file *m = file->private_data;
3339 struct drm_i915_private *dev_priv = m->private;
3340 struct drm_device *dev = &dev_priv->drm;
3341 u16 new[8] = { 0 };
3342 int num_levels;
3343 int level;
3344 int ret;
3345 char tmp[32];
3347 if (IS_CHERRYVIEW(dev_priv))
3348 num_levels = 3;
3349 else if (IS_VALLEYVIEW(dev_priv))
3350 num_levels = 1;
3351 else if (IS_G4X(dev_priv))
3352 num_levels = 3;
3353 else
3354 num_levels = ilk_wm_max_level(dev_priv) + 1;
3356 if (len >= sizeof(tmp))
3357 return -EINVAL;
3359 if (copy_from_user(tmp, ubuf, len))
3360 return -EFAULT;
3362 tmp[len] = '\0';
3364 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3365 &new[0], &new[1], &new[2], &new[3],
3366 &new[4], &new[5], &new[6], &new[7]);
3367 if (ret != num_levels)
3368 return -EINVAL;
3370 drm_modeset_lock_all(dev);
3372 for (level = 0; level < num_levels; level++)
3373 wm[level] = new[level];
3375 drm_modeset_unlock_all(dev);
3377 return len;
3381 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3382 size_t len, loff_t *offp)
3384 struct seq_file *m = file->private_data;
3385 struct drm_i915_private *dev_priv = m->private;
3386 u16 *latencies;
3388 if (INTEL_GEN(dev_priv) >= 9)
3389 latencies = dev_priv->wm.skl_latency;
3390 else
3391 latencies = dev_priv->wm.pri_latency;
3393 return wm_latency_write(file, ubuf, len, offp, latencies);
3396 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3397 size_t len, loff_t *offp)
3399 struct seq_file *m = file->private_data;
3400 struct drm_i915_private *dev_priv = m->private;
3401 u16 *latencies;
3403 if (INTEL_GEN(dev_priv) >= 9)
3404 latencies = dev_priv->wm.skl_latency;
3405 else
3406 latencies = dev_priv->wm.spr_latency;
3408 return wm_latency_write(file, ubuf, len, offp, latencies);
3411 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3412 size_t len, loff_t *offp)
3414 struct seq_file *m = file->private_data;
3415 struct drm_i915_private *dev_priv = m->private;
3416 u16 *latencies;
3418 if (INTEL_GEN(dev_priv) >= 9)
3419 latencies = dev_priv->wm.skl_latency;
3420 else
3421 latencies = dev_priv->wm.cur_latency;
3423 return wm_latency_write(file, ubuf, len, offp, latencies);
3426 static const struct file_operations i915_pri_wm_latency_fops = {
3427 .owner = THIS_MODULE,
3428 .open = pri_wm_latency_open,
3429 .read = seq_read,
3430 .llseek = seq_lseek,
3431 .release = single_release,
3432 .write = pri_wm_latency_write
3435 static const struct file_operations i915_spr_wm_latency_fops = {
3436 .owner = THIS_MODULE,
3437 .open = spr_wm_latency_open,
3438 .read = seq_read,
3439 .llseek = seq_lseek,
3440 .release = single_release,
3441 .write = spr_wm_latency_write
3444 static const struct file_operations i915_cur_wm_latency_fops = {
3445 .owner = THIS_MODULE,
3446 .open = cur_wm_latency_open,
3447 .read = seq_read,
3448 .llseek = seq_lseek,
3449 .release = single_release,
3450 .write = cur_wm_latency_write
3453 static int
3454 i915_wedged_get(void *data, u64 *val)
3456 struct drm_i915_private *i915 = data;
3457 int ret = intel_gt_terminally_wedged(&i915->gt);
3459 switch (ret) {
3460 case -EIO:
3461 *val = 1;
3462 return 0;
3463 case 0:
3464 *val = 0;
3465 return 0;
3466 default:
3467 return ret;
3471 static int
3472 i915_wedged_set(void *data, u64 val)
3474 struct drm_i915_private *i915 = data;
3476 /* Flush any previous reset before applying for a new one */
3477 wait_event(i915->gt.reset.queue,
3478 !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
3480 intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
3481 "Manually set wedged engine mask = %llx", val);
3482 return 0;
3485 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3486 i915_wedged_get, i915_wedged_set,
3487 "%llu\n");
3489 static int
3490 i915_perf_noa_delay_set(void *data, u64 val)
3492 struct drm_i915_private *i915 = data;
3493 const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
3496 * This would lead to infinite waits as we're doing timestamp
3497 * difference on the CS with only 32bits.
3499 if (val > mul_u32_u32(U32_MAX, clk))
3500 return -EINVAL;
3502 atomic64_set(&i915->perf.noa_programming_delay, val);
3503 return 0;
3506 static int
3507 i915_perf_noa_delay_get(void *data, u64 *val)
3509 struct drm_i915_private *i915 = data;
3511 *val = atomic64_read(&i915->perf.noa_programming_delay);
3512 return 0;
3515 DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
3516 i915_perf_noa_delay_get,
3517 i915_perf_noa_delay_set,
3518 "%llu\n");
3520 #define DROP_UNBOUND BIT(0)
3521 #define DROP_BOUND BIT(1)
3522 #define DROP_RETIRE BIT(2)
3523 #define DROP_ACTIVE BIT(3)
3524 #define DROP_FREED BIT(4)
3525 #define DROP_SHRINK_ALL BIT(5)
3526 #define DROP_IDLE BIT(6)
3527 #define DROP_RESET_ACTIVE BIT(7)
3528 #define DROP_RESET_SEQNO BIT(8)
3529 #define DROP_RCU BIT(9)
3530 #define DROP_ALL (DROP_UNBOUND | \
3531 DROP_BOUND | \
3532 DROP_RETIRE | \
3533 DROP_ACTIVE | \
3534 DROP_FREED | \
3535 DROP_SHRINK_ALL |\
3536 DROP_IDLE | \
3537 DROP_RESET_ACTIVE | \
3538 DROP_RESET_SEQNO | \
3539 DROP_RCU)
3540 static int
3541 i915_drop_caches_get(void *data, u64 *val)
3543 *val = DROP_ALL;
3545 return 0;
3547 static int
3548 gt_drop_caches(struct intel_gt *gt, u64 val)
3550 int ret;
3552 if (val & DROP_RESET_ACTIVE &&
3553 wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
3554 intel_gt_set_wedged(gt);
3556 if (val & DROP_RETIRE)
3557 intel_gt_retire_requests(gt);
3559 if (val & (DROP_IDLE | DROP_ACTIVE)) {
3560 ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
3561 if (ret)
3562 return ret;
3565 if (val & DROP_IDLE) {
3566 ret = intel_gt_pm_wait_for_idle(gt);
3567 if (ret)
3568 return ret;
3571 if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
3572 intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
3574 return 0;
3577 static int
3578 i915_drop_caches_set(void *data, u64 val)
3580 struct drm_i915_private *i915 = data;
3581 int ret;
3583 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3584 val, val & DROP_ALL);
3586 ret = gt_drop_caches(&i915->gt, val);
3587 if (ret)
3588 return ret;
3590 fs_reclaim_acquire(GFP_KERNEL);
3591 if (val & DROP_BOUND)
3592 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
3594 if (val & DROP_UNBOUND)
3595 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
3597 if (val & DROP_SHRINK_ALL)
3598 i915_gem_shrink_all(i915);
3599 fs_reclaim_release(GFP_KERNEL);
3601 if (val & DROP_RCU)
3602 rcu_barrier();
3604 if (val & DROP_FREED)
3605 i915_gem_drain_freed_objects(i915);
3607 return 0;
3610 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3611 i915_drop_caches_get, i915_drop_caches_set,
3612 "0x%08llx\n");
3614 static int
3615 i915_cache_sharing_get(void *data, u64 *val)
3617 struct drm_i915_private *dev_priv = data;
3618 intel_wakeref_t wakeref;
3619 u32 snpcr = 0;
3621 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3622 return -ENODEV;
3624 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
3625 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3627 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3629 return 0;
3632 static int
3633 i915_cache_sharing_set(void *data, u64 val)
3635 struct drm_i915_private *dev_priv = data;
3636 intel_wakeref_t wakeref;
3638 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3639 return -ENODEV;
3641 if (val > 3)
3642 return -EINVAL;
3644 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3645 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3646 u32 snpcr;
3648 /* Update the cache sharing policy here as well */
3649 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3650 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3651 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3652 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3655 return 0;
3658 static void
3659 intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
3660 u8 *to_mask)
3662 int offset = slice * sseu->ss_stride;
3664 memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
3667 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3668 i915_cache_sharing_get, i915_cache_sharing_set,
3669 "%llu\n");
3671 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
3672 struct sseu_dev_info *sseu)
3674 #define SS_MAX 2
3675 const int ss_max = SS_MAX;
3676 u32 sig1[SS_MAX], sig2[SS_MAX];
3677 int ss;
3679 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
3680 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
3681 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
3682 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
3684 for (ss = 0; ss < ss_max; ss++) {
3685 unsigned int eu_cnt;
3687 if (sig1[ss] & CHV_SS_PG_ENABLE)
3688 /* skip disabled subslice */
3689 continue;
3691 sseu->slice_mask = BIT(0);
3692 sseu->subslice_mask[0] |= BIT(ss);
3693 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
3694 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
3695 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
3696 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
3697 sseu->eu_total += eu_cnt;
3698 sseu->eu_per_subslice = max_t(unsigned int,
3699 sseu->eu_per_subslice, eu_cnt);
3701 #undef SS_MAX
3704 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
3705 struct sseu_dev_info *sseu)
3707 #define SS_MAX 6
3708 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3709 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3710 int s, ss;
3712 for (s = 0; s < info->sseu.max_slices; s++) {
3714 * FIXME: Valid SS Mask respects the spec and read
3715 * only valid bits for those registers, excluding reserved
3716 * although this seems wrong because it would leave many
3717 * subslices without ACK.
3719 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
3720 GEN10_PGCTL_VALID_SS_MASK(s);
3721 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
3722 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
3725 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3726 GEN9_PGCTL_SSA_EU19_ACK |
3727 GEN9_PGCTL_SSA_EU210_ACK |
3728 GEN9_PGCTL_SSA_EU311_ACK;
3729 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3730 GEN9_PGCTL_SSB_EU19_ACK |
3731 GEN9_PGCTL_SSB_EU210_ACK |
3732 GEN9_PGCTL_SSB_EU311_ACK;
3734 for (s = 0; s < info->sseu.max_slices; s++) {
3735 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3736 /* skip disabled slice */
3737 continue;
3739 sseu->slice_mask |= BIT(s);
3740 intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
3742 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3743 unsigned int eu_cnt;
3745 if (info->sseu.has_subslice_pg &&
3746 !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3747 /* skip disabled subslice */
3748 continue;
3750 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
3751 eu_mask[ss % 2]);
3752 sseu->eu_total += eu_cnt;
3753 sseu->eu_per_subslice = max_t(unsigned int,
3754 sseu->eu_per_subslice,
3755 eu_cnt);
3758 #undef SS_MAX
3761 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
3762 struct sseu_dev_info *sseu)
3764 #define SS_MAX 3
3765 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3766 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3767 int s, ss;
3769 for (s = 0; s < info->sseu.max_slices; s++) {
3770 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
3771 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
3772 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
3775 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3776 GEN9_PGCTL_SSA_EU19_ACK |
3777 GEN9_PGCTL_SSA_EU210_ACK |
3778 GEN9_PGCTL_SSA_EU311_ACK;
3779 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3780 GEN9_PGCTL_SSB_EU19_ACK |
3781 GEN9_PGCTL_SSB_EU210_ACK |
3782 GEN9_PGCTL_SSB_EU311_ACK;
3784 for (s = 0; s < info->sseu.max_slices; s++) {
3785 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3786 /* skip disabled slice */
3787 continue;
3789 sseu->slice_mask |= BIT(s);
3791 if (IS_GEN9_BC(dev_priv))
3792 intel_sseu_copy_subslices(&info->sseu, s,
3793 sseu->subslice_mask);
3795 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3796 unsigned int eu_cnt;
3797 u8 ss_idx = s * info->sseu.ss_stride +
3798 ss / BITS_PER_BYTE;
3800 if (IS_GEN9_LP(dev_priv)) {
3801 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3802 /* skip disabled subslice */
3803 continue;
3805 sseu->subslice_mask[ss_idx] |=
3806 BIT(ss % BITS_PER_BYTE);
3809 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
3810 eu_mask[ss%2]);
3811 sseu->eu_total += eu_cnt;
3812 sseu->eu_per_subslice = max_t(unsigned int,
3813 sseu->eu_per_subslice,
3814 eu_cnt);
3817 #undef SS_MAX
3820 static void bdw_sseu_device_status(struct drm_i915_private *dev_priv,
3821 struct sseu_dev_info *sseu)
3823 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3824 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
3825 int s;
3827 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
3829 if (sseu->slice_mask) {
3830 sseu->eu_per_subslice = info->sseu.eu_per_subslice;
3831 for (s = 0; s < fls(sseu->slice_mask); s++)
3832 intel_sseu_copy_subslices(&info->sseu, s,
3833 sseu->subslice_mask);
3834 sseu->eu_total = sseu->eu_per_subslice *
3835 intel_sseu_subslice_total(sseu);
3837 /* subtract fused off EU(s) from enabled slice(s) */
3838 for (s = 0; s < fls(sseu->slice_mask); s++) {
3839 u8 subslice_7eu = info->sseu.subslice_7eu[s];
3841 sseu->eu_total -= hweight8(subslice_7eu);
3846 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
3847 const struct sseu_dev_info *sseu)
3849 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3850 const char *type = is_available_info ? "Available" : "Enabled";
3851 int s;
3853 seq_printf(m, " %s Slice Mask: %04x\n", type,
3854 sseu->slice_mask);
3855 seq_printf(m, " %s Slice Total: %u\n", type,
3856 hweight8(sseu->slice_mask));
3857 seq_printf(m, " %s Subslice Total: %u\n", type,
3858 intel_sseu_subslice_total(sseu));
3859 for (s = 0; s < fls(sseu->slice_mask); s++) {
3860 seq_printf(m, " %s Slice%i subslices: %u\n", type,
3861 s, intel_sseu_subslices_per_slice(sseu, s));
3863 seq_printf(m, " %s EU Total: %u\n", type,
3864 sseu->eu_total);
3865 seq_printf(m, " %s EU Per Subslice: %u\n", type,
3866 sseu->eu_per_subslice);
3868 if (!is_available_info)
3869 return;
3871 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
3872 if (HAS_POOLED_EU(dev_priv))
3873 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
3875 seq_printf(m, " Has Slice Power Gating: %s\n",
3876 yesno(sseu->has_slice_pg));
3877 seq_printf(m, " Has Subslice Power Gating: %s\n",
3878 yesno(sseu->has_subslice_pg));
3879 seq_printf(m, " Has EU Power Gating: %s\n",
3880 yesno(sseu->has_eu_pg));
3883 static int i915_sseu_status(struct seq_file *m, void *unused)
3885 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3886 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3887 struct sseu_dev_info sseu;
3888 intel_wakeref_t wakeref;
3890 if (INTEL_GEN(dev_priv) < 8)
3891 return -ENODEV;
3893 seq_puts(m, "SSEU Device Info\n");
3894 i915_print_sseu_info(m, true, &info->sseu);
3896 seq_puts(m, "SSEU Device Status\n");
3897 memset(&sseu, 0, sizeof(sseu));
3898 intel_sseu_set_info(&sseu, info->sseu.max_slices,
3899 info->sseu.max_subslices,
3900 info->sseu.max_eus_per_subslice);
3902 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3903 if (IS_CHERRYVIEW(dev_priv))
3904 cherryview_sseu_device_status(dev_priv, &sseu);
3905 else if (IS_BROADWELL(dev_priv))
3906 bdw_sseu_device_status(dev_priv, &sseu);
3907 else if (IS_GEN(dev_priv, 9))
3908 gen9_sseu_device_status(dev_priv, &sseu);
3909 else if (INTEL_GEN(dev_priv) >= 10)
3910 gen10_sseu_device_status(dev_priv, &sseu);
3913 i915_print_sseu_info(m, false, &sseu);
3915 return 0;
3918 static int i915_forcewake_open(struct inode *inode, struct file *file)
3920 struct drm_i915_private *i915 = inode->i_private;
3921 struct intel_gt *gt = &i915->gt;
3923 atomic_inc(&gt->user_wakeref);
3924 intel_gt_pm_get(gt);
3925 if (INTEL_GEN(i915) >= 6)
3926 intel_uncore_forcewake_user_get(gt->uncore);
3928 return 0;
3931 static int i915_forcewake_release(struct inode *inode, struct file *file)
3933 struct drm_i915_private *i915 = inode->i_private;
3934 struct intel_gt *gt = &i915->gt;
3936 if (INTEL_GEN(i915) >= 6)
3937 intel_uncore_forcewake_user_put(&i915->uncore);
3938 intel_gt_pm_put(gt);
3939 atomic_dec(&gt->user_wakeref);
3941 return 0;
3944 static const struct file_operations i915_forcewake_fops = {
3945 .owner = THIS_MODULE,
3946 .open = i915_forcewake_open,
3947 .release = i915_forcewake_release,
3950 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
3952 struct drm_i915_private *dev_priv = m->private;
3953 struct i915_hotplug *hotplug = &dev_priv->hotplug;
3955 /* Synchronize with everything first in case there's been an HPD
3956 * storm, but we haven't finished handling it in the kernel yet
3958 intel_synchronize_irq(dev_priv);
3959 flush_work(&dev_priv->hotplug.dig_port_work);
3960 flush_delayed_work(&dev_priv->hotplug.hotplug_work);
3962 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
3963 seq_printf(m, "Detected: %s\n",
3964 yesno(delayed_work_pending(&hotplug->reenable_work)));
3966 return 0;
3969 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
3970 const char __user *ubuf, size_t len,
3971 loff_t *offp)
3973 struct seq_file *m = file->private_data;
3974 struct drm_i915_private *dev_priv = m->private;
3975 struct i915_hotplug *hotplug = &dev_priv->hotplug;
3976 unsigned int new_threshold;
3977 int i;
3978 char *newline;
3979 char tmp[16];
3981 if (len >= sizeof(tmp))
3982 return -EINVAL;
3984 if (copy_from_user(tmp, ubuf, len))
3985 return -EFAULT;
3987 tmp[len] = '\0';
3989 /* Strip newline, if any */
3990 newline = strchr(tmp, '\n');
3991 if (newline)
3992 *newline = '\0';
3994 if (strcmp(tmp, "reset") == 0)
3995 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
3996 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
3997 return -EINVAL;
3999 if (new_threshold > 0)
4000 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4001 new_threshold);
4002 else
4003 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4005 spin_lock_irq(&dev_priv->irq_lock);
4006 hotplug->hpd_storm_threshold = new_threshold;
4007 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4008 for_each_hpd_pin(i)
4009 hotplug->stats[i].count = 0;
4010 spin_unlock_irq(&dev_priv->irq_lock);
4012 /* Re-enable hpd immediately if we were in an irq storm */
4013 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4015 return len;
4018 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4020 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4023 static const struct file_operations i915_hpd_storm_ctl_fops = {
4024 .owner = THIS_MODULE,
4025 .open = i915_hpd_storm_ctl_open,
4026 .read = seq_read,
4027 .llseek = seq_lseek,
4028 .release = single_release,
4029 .write = i915_hpd_storm_ctl_write
4032 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4034 struct drm_i915_private *dev_priv = m->private;
4036 seq_printf(m, "Enabled: %s\n",
4037 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4039 return 0;
4042 static int
4043 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4045 return single_open(file, i915_hpd_short_storm_ctl_show,
4046 inode->i_private);
4049 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4050 const char __user *ubuf,
4051 size_t len, loff_t *offp)
4053 struct seq_file *m = file->private_data;
4054 struct drm_i915_private *dev_priv = m->private;
4055 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4056 char *newline;
4057 char tmp[16];
4058 int i;
4059 bool new_state;
4061 if (len >= sizeof(tmp))
4062 return -EINVAL;
4064 if (copy_from_user(tmp, ubuf, len))
4065 return -EFAULT;
4067 tmp[len] = '\0';
4069 /* Strip newline, if any */
4070 newline = strchr(tmp, '\n');
4071 if (newline)
4072 *newline = '\0';
4074 /* Reset to the "default" state for this system */
4075 if (strcmp(tmp, "reset") == 0)
4076 new_state = !HAS_DP_MST(dev_priv);
4077 else if (kstrtobool(tmp, &new_state) != 0)
4078 return -EINVAL;
4080 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4081 new_state ? "En" : "Dis");
4083 spin_lock_irq(&dev_priv->irq_lock);
4084 hotplug->hpd_short_storm_enabled = new_state;
4085 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4086 for_each_hpd_pin(i)
4087 hotplug->stats[i].count = 0;
4088 spin_unlock_irq(&dev_priv->irq_lock);
4090 /* Re-enable hpd immediately if we were in an irq storm */
4091 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4093 return len;
4096 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4097 .owner = THIS_MODULE,
4098 .open = i915_hpd_short_storm_ctl_open,
4099 .read = seq_read,
4100 .llseek = seq_lseek,
4101 .release = single_release,
4102 .write = i915_hpd_short_storm_ctl_write,
4105 static int i915_drrs_ctl_set(void *data, u64 val)
4107 struct drm_i915_private *dev_priv = data;
4108 struct drm_device *dev = &dev_priv->drm;
4109 struct intel_crtc *crtc;
4111 if (INTEL_GEN(dev_priv) < 7)
4112 return -ENODEV;
4114 for_each_intel_crtc(dev, crtc) {
4115 struct drm_connector_list_iter conn_iter;
4116 struct intel_crtc_state *crtc_state;
4117 struct drm_connector *connector;
4118 struct drm_crtc_commit *commit;
4119 int ret;
4121 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4122 if (ret)
4123 return ret;
4125 crtc_state = to_intel_crtc_state(crtc->base.state);
4127 if (!crtc_state->hw.active ||
4128 !crtc_state->has_drrs)
4129 goto out;
4131 commit = crtc_state->uapi.commit;
4132 if (commit) {
4133 ret = wait_for_completion_interruptible(&commit->hw_done);
4134 if (ret)
4135 goto out;
4138 drm_connector_list_iter_begin(dev, &conn_iter);
4139 drm_for_each_connector_iter(connector, &conn_iter) {
4140 struct intel_encoder *encoder;
4141 struct intel_dp *intel_dp;
4143 if (!(crtc_state->uapi.connector_mask &
4144 drm_connector_mask(connector)))
4145 continue;
4147 encoder = intel_attached_encoder(to_intel_connector(connector));
4148 if (encoder->type != INTEL_OUTPUT_EDP)
4149 continue;
4151 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4152 val ? "en" : "dis", val);
4154 intel_dp = enc_to_intel_dp(encoder);
4155 if (val)
4156 intel_edp_drrs_enable(intel_dp,
4157 crtc_state);
4158 else
4159 intel_edp_drrs_disable(intel_dp,
4160 crtc_state);
4162 drm_connector_list_iter_end(&conn_iter);
4164 out:
4165 drm_modeset_unlock(&crtc->base.mutex);
4166 if (ret)
4167 return ret;
4170 return 0;
4173 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4175 static ssize_t
4176 i915_fifo_underrun_reset_write(struct file *filp,
4177 const char __user *ubuf,
4178 size_t cnt, loff_t *ppos)
4180 struct drm_i915_private *dev_priv = filp->private_data;
4181 struct intel_crtc *intel_crtc;
4182 struct drm_device *dev = &dev_priv->drm;
4183 int ret;
4184 bool reset;
4186 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4187 if (ret)
4188 return ret;
4190 if (!reset)
4191 return cnt;
4193 for_each_intel_crtc(dev, intel_crtc) {
4194 struct drm_crtc_commit *commit;
4195 struct intel_crtc_state *crtc_state;
4197 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4198 if (ret)
4199 return ret;
4201 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4202 commit = crtc_state->uapi.commit;
4203 if (commit) {
4204 ret = wait_for_completion_interruptible(&commit->hw_done);
4205 if (!ret)
4206 ret = wait_for_completion_interruptible(&commit->flip_done);
4209 if (!ret && crtc_state->hw.active) {
4210 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4211 pipe_name(intel_crtc->pipe));
4213 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4216 drm_modeset_unlock(&intel_crtc->base.mutex);
4218 if (ret)
4219 return ret;
4222 ret = intel_fbc_reset_underrun(dev_priv);
4223 if (ret)
4224 return ret;
4226 return cnt;
4229 static const struct file_operations i915_fifo_underrun_reset_ops = {
4230 .owner = THIS_MODULE,
4231 .open = simple_open,
4232 .write = i915_fifo_underrun_reset_write,
4233 .llseek = default_llseek,
4236 static const struct drm_info_list i915_debugfs_list[] = {
4237 {"i915_capabilities", i915_capabilities, 0},
4238 {"i915_gem_objects", i915_gem_object_info, 0},
4239 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4240 {"i915_gem_interrupt", i915_interrupt_info, 0},
4241 {"i915_guc_info", i915_guc_info, 0},
4242 {"i915_guc_load_status", i915_guc_load_status_info, 0},
4243 {"i915_guc_log_dump", i915_guc_log_dump, 0},
4244 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4245 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4246 {"i915_huc_load_status", i915_huc_load_status_info, 0},
4247 {"i915_frequency_info", i915_frequency_info, 0},
4248 {"i915_drpc_info", i915_drpc_info, 0},
4249 {"i915_ring_freq_table", i915_ring_freq_table, 0},
4250 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4251 {"i915_fbc_status", i915_fbc_status, 0},
4252 {"i915_ips_status", i915_ips_status, 0},
4253 {"i915_sr_status", i915_sr_status, 0},
4254 {"i915_opregion", i915_opregion, 0},
4255 {"i915_vbt", i915_vbt, 0},
4256 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4257 {"i915_context_status", i915_context_status, 0},
4258 {"i915_forcewake_domains", i915_forcewake_domains, 0},
4259 {"i915_swizzle_info", i915_swizzle_info, 0},
4260 {"i915_llc", i915_llc, 0},
4261 {"i915_edp_psr_status", i915_edp_psr_status, 0},
4262 {"i915_energy_uJ", i915_energy_uJ, 0},
4263 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4264 {"i915_power_domain_info", i915_power_domain_info, 0},
4265 {"i915_dmc_info", i915_dmc_info, 0},
4266 {"i915_display_info", i915_display_info, 0},
4267 {"i915_engine_info", i915_engine_info, 0},
4268 {"i915_rcs_topology", i915_rcs_topology, 0},
4269 {"i915_shrinker_info", i915_shrinker_info, 0},
4270 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4271 {"i915_dp_mst_info", i915_dp_mst_info, 0},
4272 {"i915_wa_registers", i915_wa_registers, 0},
4273 {"i915_ddb_info", i915_ddb_info, 0},
4274 {"i915_sseu_status", i915_sseu_status, 0},
4275 {"i915_drrs_status", i915_drrs_status, 0},
4276 {"i915_rps_boost_info", i915_rps_boost_info, 0},
4278 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4280 static const struct i915_debugfs_files {
4281 const char *name;
4282 const struct file_operations *fops;
4283 } i915_debugfs_files[] = {
4284 {"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
4285 {"i915_wedged", &i915_wedged_fops},
4286 {"i915_cache_sharing", &i915_cache_sharing_fops},
4287 {"i915_gem_drop_caches", &i915_drop_caches_fops},
4288 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4289 {"i915_error_state", &i915_error_state_fops},
4290 {"i915_gpu_info", &i915_gpu_info_fops},
4291 #endif
4292 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4293 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4294 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4295 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4296 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4297 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4298 {"i915_dp_test_type", &i915_displayport_test_type_fops},
4299 {"i915_dp_test_active", &i915_displayport_test_active_fops},
4300 {"i915_guc_log_level", &i915_guc_log_level_fops},
4301 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4302 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4303 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4304 {"i915_ipc_status", &i915_ipc_status_fops},
4305 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4306 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4309 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4311 struct drm_minor *minor = dev_priv->drm.primary;
4312 int i;
4314 debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
4315 to_i915(minor->dev), &i915_forcewake_fops);
4317 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4318 debugfs_create_file(i915_debugfs_files[i].name,
4319 S_IRUGO | S_IWUSR,
4320 minor->debugfs_root,
4321 to_i915(minor->dev),
4322 i915_debugfs_files[i].fops);
4325 return drm_debugfs_create_files(i915_debugfs_list,
4326 I915_DEBUGFS_ENTRIES,
4327 minor->debugfs_root, minor);
4330 struct dpcd_block {
4331 /* DPCD dump start address. */
4332 unsigned int offset;
4333 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4334 unsigned int end;
4335 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4336 size_t size;
4337 /* Only valid for eDP. */
4338 bool edp;
4341 static const struct dpcd_block i915_dpcd_debug[] = {
4342 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4343 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4344 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4345 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4346 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4347 { .offset = DP_SET_POWER },
4348 { .offset = DP_EDP_DPCD_REV },
4349 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4350 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4351 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4354 static int i915_dpcd_show(struct seq_file *m, void *data)
4356 struct drm_connector *connector = m->private;
4357 struct intel_dp *intel_dp =
4358 enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
4359 u8 buf[16];
4360 ssize_t err;
4361 int i;
4363 if (connector->status != connector_status_connected)
4364 return -ENODEV;
4366 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4367 const struct dpcd_block *b = &i915_dpcd_debug[i];
4368 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4370 if (b->edp &&
4371 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4372 continue;
4374 /* low tech for now */
4375 if (WARN_ON(size > sizeof(buf)))
4376 continue;
4378 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4379 if (err < 0)
4380 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4381 else
4382 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4385 return 0;
4387 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4389 static int i915_panel_show(struct seq_file *m, void *data)
4391 struct drm_connector *connector = m->private;
4392 struct intel_dp *intel_dp =
4393 enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
4395 if (connector->status != connector_status_connected)
4396 return -ENODEV;
4398 seq_printf(m, "Panel power up delay: %d\n",
4399 intel_dp->panel_power_up_delay);
4400 seq_printf(m, "Panel power down delay: %d\n",
4401 intel_dp->panel_power_down_delay);
4402 seq_printf(m, "Backlight on delay: %d\n",
4403 intel_dp->backlight_on_delay);
4404 seq_printf(m, "Backlight off delay: %d\n",
4405 intel_dp->backlight_off_delay);
4407 return 0;
4409 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4411 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4413 struct drm_connector *connector = m->private;
4414 struct intel_connector *intel_connector = to_intel_connector(connector);
4416 if (connector->status != connector_status_connected)
4417 return -ENODEV;
4419 /* HDCP is supported by connector */
4420 if (!intel_connector->hdcp.shim)
4421 return -EINVAL;
4423 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4424 connector->base.id);
4425 intel_hdcp_info(m, intel_connector);
4427 return 0;
4429 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4431 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4433 struct drm_connector *connector = m->private;
4434 struct drm_device *dev = connector->dev;
4435 struct drm_crtc *crtc;
4436 struct intel_dp *intel_dp;
4437 struct drm_modeset_acquire_ctx ctx;
4438 struct intel_crtc_state *crtc_state = NULL;
4439 int ret = 0;
4440 bool try_again = false;
4442 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4444 do {
4445 try_again = false;
4446 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4447 &ctx);
4448 if (ret) {
4449 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4450 try_again = true;
4451 continue;
4453 break;
4455 crtc = connector->state->crtc;
4456 if (connector->status != connector_status_connected || !crtc) {
4457 ret = -ENODEV;
4458 break;
4460 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4461 if (ret == -EDEADLK) {
4462 ret = drm_modeset_backoff(&ctx);
4463 if (!ret) {
4464 try_again = true;
4465 continue;
4467 break;
4468 } else if (ret) {
4469 break;
4471 intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
4472 crtc_state = to_intel_crtc_state(crtc->state);
4473 seq_printf(m, "DSC_Enabled: %s\n",
4474 yesno(crtc_state->dsc.compression_enable));
4475 seq_printf(m, "DSC_Sink_Support: %s\n",
4476 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4477 seq_printf(m, "Force_DSC_Enable: %s\n",
4478 yesno(intel_dp->force_dsc_en));
4479 if (!intel_dp_is_edp(intel_dp))
4480 seq_printf(m, "FEC_Sink_Support: %s\n",
4481 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4482 } while (try_again);
4484 drm_modeset_drop_locks(&ctx);
4485 drm_modeset_acquire_fini(&ctx);
4487 return ret;
4490 static ssize_t i915_dsc_fec_support_write(struct file *file,
4491 const char __user *ubuf,
4492 size_t len, loff_t *offp)
4494 bool dsc_enable = false;
4495 int ret;
4496 struct drm_connector *connector =
4497 ((struct seq_file *)file->private_data)->private;
4498 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
4499 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4501 if (len == 0)
4502 return 0;
4504 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4505 len);
4507 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4508 if (ret < 0)
4509 return ret;
4511 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4512 (dsc_enable) ? "true" : "false");
4513 intel_dp->force_dsc_en = dsc_enable;
4515 *offp += len;
4516 return len;
4519 static int i915_dsc_fec_support_open(struct inode *inode,
4520 struct file *file)
4522 return single_open(file, i915_dsc_fec_support_show,
4523 inode->i_private);
4526 static const struct file_operations i915_dsc_fec_support_fops = {
4527 .owner = THIS_MODULE,
4528 .open = i915_dsc_fec_support_open,
4529 .read = seq_read,
4530 .llseek = seq_lseek,
4531 .release = single_release,
4532 .write = i915_dsc_fec_support_write
4536 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4537 * @connector: pointer to a registered drm_connector
4539 * Cleanup will be done by drm_connector_unregister() through a call to
4540 * drm_debugfs_connector_remove().
4542 * Returns 0 on success, negative error codes on error.
4544 int i915_debugfs_connector_add(struct drm_connector *connector)
4546 struct dentry *root = connector->debugfs_entry;
4547 struct drm_i915_private *dev_priv = to_i915(connector->dev);
4549 /* The connector must have been registered beforehands. */
4550 if (!root)
4551 return -ENODEV;
4553 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4554 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4555 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4556 connector, &i915_dpcd_fops);
4558 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4559 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4560 connector, &i915_panel_fops);
4561 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4562 connector, &i915_psr_sink_status_fops);
4565 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4566 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4567 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4568 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4569 connector, &i915_hdcp_sink_capability_fops);
4572 if (INTEL_GEN(dev_priv) >= 10 &&
4573 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4574 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4575 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4576 connector, &i915_dsc_fec_support_fops);
4578 return 0;