2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include <linux/sched/mm.h>
32 #include "intel_drv.h"
33 #include "intel_guc_submission.h"
35 static inline struct drm_i915_private
*node_to_i915(struct drm_info_node
*node
)
37 return to_i915(node
->minor
->dev
);
40 static int i915_capabilities(struct seq_file
*m
, void *data
)
42 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
43 const struct intel_device_info
*info
= INTEL_INFO(dev_priv
);
44 struct drm_printer p
= drm_seq_file_printer(m
);
46 seq_printf(m
, "gen: %d\n", INTEL_GEN(dev_priv
));
47 seq_printf(m
, "platform: %s\n", intel_platform_name(info
->platform
));
48 seq_printf(m
, "pch: %d\n", INTEL_PCH_TYPE(dev_priv
));
50 intel_device_info_dump_flags(info
, &p
);
51 intel_device_info_dump_runtime(info
, &p
);
53 kernel_param_lock(THIS_MODULE
);
54 i915_params_dump(&i915_modparams
, &p
);
55 kernel_param_unlock(THIS_MODULE
);
60 static char get_active_flag(struct drm_i915_gem_object
*obj
)
62 return i915_gem_object_is_active(obj
) ? '*' : ' ';
65 static char get_pin_flag(struct drm_i915_gem_object
*obj
)
67 return obj
->pin_global
? 'p' : ' ';
70 static char get_tiling_flag(struct drm_i915_gem_object
*obj
)
72 switch (i915_gem_object_get_tiling(obj
)) {
74 case I915_TILING_NONE
: return ' ';
75 case I915_TILING_X
: return 'X';
76 case I915_TILING_Y
: return 'Y';
80 static char get_global_flag(struct drm_i915_gem_object
*obj
)
82 return obj
->userfault_count
? 'g' : ' ';
85 static char get_pin_mapped_flag(struct drm_i915_gem_object
*obj
)
87 return obj
->mm
.mapping
? 'M' : ' ';
90 static u64
i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object
*obj
)
95 for_each_ggtt_vma(vma
, obj
) {
96 if (drm_mm_node_allocated(&vma
->node
))
97 size
+= vma
->node
.size
;
104 stringify_page_sizes(unsigned int page_sizes
, char *buf
, size_t len
)
108 switch (page_sizes
) {
111 case I915_GTT_PAGE_SIZE_4K
:
113 case I915_GTT_PAGE_SIZE_64K
:
115 case I915_GTT_PAGE_SIZE_2M
:
121 if (page_sizes
& I915_GTT_PAGE_SIZE_2M
)
122 x
+= snprintf(buf
+ x
, len
- x
, "2M, ");
123 if (page_sizes
& I915_GTT_PAGE_SIZE_64K
)
124 x
+= snprintf(buf
+ x
, len
- x
, "64K, ");
125 if (page_sizes
& I915_GTT_PAGE_SIZE_4K
)
126 x
+= snprintf(buf
+ x
, len
- x
, "4K, ");
134 describe_obj(struct seq_file
*m
, struct drm_i915_gem_object
*obj
)
136 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
137 struct intel_engine_cs
*engine
;
138 struct i915_vma
*vma
;
139 unsigned int frontbuffer_bits
;
142 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
144 seq_printf(m
, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
146 get_active_flag(obj
),
148 get_tiling_flag(obj
),
149 get_global_flag(obj
),
150 get_pin_mapped_flag(obj
),
151 obj
->base
.size
/ 1024,
152 obj
->base
.read_domains
,
153 obj
->base
.write_domain
,
154 i915_cache_level_str(dev_priv
, obj
->cache_level
),
155 obj
->mm
.dirty
? " dirty" : "",
156 obj
->mm
.madv
== I915_MADV_DONTNEED
? " purgeable" : "");
158 seq_printf(m
, " (name: %d)", obj
->base
.name
);
159 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
160 if (i915_vma_is_pinned(vma
))
163 seq_printf(m
, " (pinned x %d)", pin_count
);
165 seq_printf(m
, " (global)");
166 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
167 if (!drm_mm_node_allocated(&vma
->node
))
170 seq_printf(m
, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
171 i915_vma_is_ggtt(vma
) ? "g" : "pp",
172 vma
->node
.start
, vma
->node
.size
,
173 stringify_page_sizes(vma
->page_sizes
.gtt
, NULL
, 0));
174 if (i915_vma_is_ggtt(vma
)) {
175 switch (vma
->ggtt_view
.type
) {
176 case I915_GGTT_VIEW_NORMAL
:
177 seq_puts(m
, ", normal");
180 case I915_GGTT_VIEW_PARTIAL
:
181 seq_printf(m
, ", partial [%08llx+%x]",
182 vma
->ggtt_view
.partial
.offset
<< PAGE_SHIFT
,
183 vma
->ggtt_view
.partial
.size
<< PAGE_SHIFT
);
186 case I915_GGTT_VIEW_ROTATED
:
187 seq_printf(m
, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
188 vma
->ggtt_view
.rotated
.plane
[0].width
,
189 vma
->ggtt_view
.rotated
.plane
[0].height
,
190 vma
->ggtt_view
.rotated
.plane
[0].stride
,
191 vma
->ggtt_view
.rotated
.plane
[0].offset
,
192 vma
->ggtt_view
.rotated
.plane
[1].width
,
193 vma
->ggtt_view
.rotated
.plane
[1].height
,
194 vma
->ggtt_view
.rotated
.plane
[1].stride
,
195 vma
->ggtt_view
.rotated
.plane
[1].offset
);
199 MISSING_CASE(vma
->ggtt_view
.type
);
204 seq_printf(m
, " , fence: %d%s",
206 i915_gem_active_isset(&vma
->last_fence
) ? "*" : "");
210 seq_printf(m
, " (stolen: %08llx)", obj
->stolen
->start
);
212 engine
= i915_gem_object_last_write_engine(obj
);
214 seq_printf(m
, " (%s)", engine
->name
);
216 frontbuffer_bits
= atomic_read(&obj
->frontbuffer_bits
);
217 if (frontbuffer_bits
)
218 seq_printf(m
, " (frontbuffer: 0x%03x)", frontbuffer_bits
);
221 static int obj_rank_by_stolen(const void *A
, const void *B
)
223 const struct drm_i915_gem_object
*a
=
224 *(const struct drm_i915_gem_object
**)A
;
225 const struct drm_i915_gem_object
*b
=
226 *(const struct drm_i915_gem_object
**)B
;
228 if (a
->stolen
->start
< b
->stolen
->start
)
230 if (a
->stolen
->start
> b
->stolen
->start
)
235 static int i915_gem_stolen_list_info(struct seq_file
*m
, void *data
)
237 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
238 struct drm_device
*dev
= &dev_priv
->drm
;
239 struct drm_i915_gem_object
**objects
;
240 struct drm_i915_gem_object
*obj
;
241 u64 total_obj_size
, total_gtt_size
;
242 unsigned long total
, count
, n
;
245 total
= READ_ONCE(dev_priv
->mm
.object_count
);
246 objects
= kvmalloc_array(total
, sizeof(*objects
), GFP_KERNEL
);
250 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
254 total_obj_size
= total_gtt_size
= count
= 0;
256 spin_lock(&dev_priv
->mm
.obj_lock
);
257 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, mm
.link
) {
261 if (obj
->stolen
== NULL
)
264 objects
[count
++] = obj
;
265 total_obj_size
+= obj
->base
.size
;
266 total_gtt_size
+= i915_gem_obj_total_ggtt_size(obj
);
269 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, mm
.link
) {
273 if (obj
->stolen
== NULL
)
276 objects
[count
++] = obj
;
277 total_obj_size
+= obj
->base
.size
;
279 spin_unlock(&dev_priv
->mm
.obj_lock
);
281 sort(objects
, count
, sizeof(*objects
), obj_rank_by_stolen
, NULL
);
283 seq_puts(m
, "Stolen:\n");
284 for (n
= 0; n
< count
; n
++) {
286 describe_obj(m
, objects
[n
]);
289 seq_printf(m
, "Total %lu objects, %llu bytes, %llu GTT size\n",
290 count
, total_obj_size
, total_gtt_size
);
292 mutex_unlock(&dev
->struct_mutex
);
299 struct drm_i915_file_private
*file_priv
;
303 u64 active
, inactive
;
306 static int per_file_stats(int id
, void *ptr
, void *data
)
308 struct drm_i915_gem_object
*obj
= ptr
;
309 struct file_stats
*stats
= data
;
310 struct i915_vma
*vma
;
312 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
315 stats
->total
+= obj
->base
.size
;
316 if (!obj
->bind_count
)
317 stats
->unbound
+= obj
->base
.size
;
318 if (obj
->base
.name
|| obj
->base
.dma_buf
)
319 stats
->shared
+= obj
->base
.size
;
321 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
322 if (!drm_mm_node_allocated(&vma
->node
))
325 if (i915_vma_is_ggtt(vma
)) {
326 stats
->global
+= vma
->node
.size
;
328 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vma
->vm
);
330 if (ppgtt
->base
.file
!= stats
->file_priv
)
334 if (i915_vma_is_active(vma
))
335 stats
->active
+= vma
->node
.size
;
337 stats
->inactive
+= vma
->node
.size
;
343 #define print_file_stats(m, name, stats) do { \
345 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
356 static void print_batch_pool_stats(struct seq_file
*m
,
357 struct drm_i915_private
*dev_priv
)
359 struct drm_i915_gem_object
*obj
;
360 struct file_stats stats
;
361 struct intel_engine_cs
*engine
;
362 enum intel_engine_id id
;
365 memset(&stats
, 0, sizeof(stats
));
367 for_each_engine(engine
, dev_priv
, id
) {
368 for (j
= 0; j
< ARRAY_SIZE(engine
->batch_pool
.cache_list
); j
++) {
369 list_for_each_entry(obj
,
370 &engine
->batch_pool
.cache_list
[j
],
372 per_file_stats(0, obj
, &stats
);
376 print_file_stats(m
, "[k]batch pool", stats
);
379 static int per_file_ctx_stats(int id
, void *ptr
, void *data
)
381 struct i915_gem_context
*ctx
= ptr
;
384 for (n
= 0; n
< ARRAY_SIZE(ctx
->engine
); n
++) {
385 if (ctx
->engine
[n
].state
)
386 per_file_stats(0, ctx
->engine
[n
].state
->obj
, data
);
387 if (ctx
->engine
[n
].ring
)
388 per_file_stats(0, ctx
->engine
[n
].ring
->vma
->obj
, data
);
394 static void print_context_stats(struct seq_file
*m
,
395 struct drm_i915_private
*dev_priv
)
397 struct drm_device
*dev
= &dev_priv
->drm
;
398 struct file_stats stats
;
399 struct drm_file
*file
;
401 memset(&stats
, 0, sizeof(stats
));
403 mutex_lock(&dev
->struct_mutex
);
404 if (dev_priv
->kernel_context
)
405 per_file_ctx_stats(0, dev_priv
->kernel_context
, &stats
);
407 list_for_each_entry(file
, &dev
->filelist
, lhead
) {
408 struct drm_i915_file_private
*fpriv
= file
->driver_priv
;
409 idr_for_each(&fpriv
->context_idr
, per_file_ctx_stats
, &stats
);
411 mutex_unlock(&dev
->struct_mutex
);
413 print_file_stats(m
, "[k]contexts", stats
);
416 static int i915_gem_object_info(struct seq_file
*m
, void *data
)
418 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
419 struct drm_device
*dev
= &dev_priv
->drm
;
420 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
421 u32 count
, mapped_count
, purgeable_count
, dpy_count
, huge_count
;
422 u64 size
, mapped_size
, purgeable_size
, dpy_size
, huge_size
;
423 struct drm_i915_gem_object
*obj
;
424 unsigned int page_sizes
= 0;
425 struct drm_file
*file
;
429 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
433 seq_printf(m
, "%u objects, %llu bytes\n",
434 dev_priv
->mm
.object_count
,
435 dev_priv
->mm
.object_memory
);
438 mapped_size
= mapped_count
= 0;
439 purgeable_size
= purgeable_count
= 0;
440 huge_size
= huge_count
= 0;
442 spin_lock(&dev_priv
->mm
.obj_lock
);
443 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, mm
.link
) {
444 size
+= obj
->base
.size
;
447 if (obj
->mm
.madv
== I915_MADV_DONTNEED
) {
448 purgeable_size
+= obj
->base
.size
;
452 if (obj
->mm
.mapping
) {
454 mapped_size
+= obj
->base
.size
;
457 if (obj
->mm
.page_sizes
.sg
> I915_GTT_PAGE_SIZE
) {
459 huge_size
+= obj
->base
.size
;
460 page_sizes
|= obj
->mm
.page_sizes
.sg
;
463 seq_printf(m
, "%u unbound objects, %llu bytes\n", count
, size
);
465 size
= count
= dpy_size
= dpy_count
= 0;
466 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, mm
.link
) {
467 size
+= obj
->base
.size
;
470 if (obj
->pin_global
) {
471 dpy_size
+= obj
->base
.size
;
475 if (obj
->mm
.madv
== I915_MADV_DONTNEED
) {
476 purgeable_size
+= obj
->base
.size
;
480 if (obj
->mm
.mapping
) {
482 mapped_size
+= obj
->base
.size
;
485 if (obj
->mm
.page_sizes
.sg
> I915_GTT_PAGE_SIZE
) {
487 huge_size
+= obj
->base
.size
;
488 page_sizes
|= obj
->mm
.page_sizes
.sg
;
491 spin_unlock(&dev_priv
->mm
.obj_lock
);
493 seq_printf(m
, "%u bound objects, %llu bytes\n",
495 seq_printf(m
, "%u purgeable objects, %llu bytes\n",
496 purgeable_count
, purgeable_size
);
497 seq_printf(m
, "%u mapped objects, %llu bytes\n",
498 mapped_count
, mapped_size
);
499 seq_printf(m
, "%u huge-paged objects (%s) %llu bytes\n",
501 stringify_page_sizes(page_sizes
, buf
, sizeof(buf
)),
503 seq_printf(m
, "%u display objects (globally pinned), %llu bytes\n",
504 dpy_count
, dpy_size
);
506 seq_printf(m
, "%llu [%pa] gtt total\n",
507 ggtt
->base
.total
, &ggtt
->mappable_end
);
508 seq_printf(m
, "Supported page sizes: %s\n",
509 stringify_page_sizes(INTEL_INFO(dev_priv
)->page_sizes
,
513 print_batch_pool_stats(m
, dev_priv
);
514 mutex_unlock(&dev
->struct_mutex
);
516 mutex_lock(&dev
->filelist_mutex
);
517 print_context_stats(m
, dev_priv
);
518 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
519 struct file_stats stats
;
520 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
521 struct drm_i915_gem_request
*request
;
522 struct task_struct
*task
;
524 mutex_lock(&dev
->struct_mutex
);
526 memset(&stats
, 0, sizeof(stats
));
527 stats
.file_priv
= file
->driver_priv
;
528 spin_lock(&file
->table_lock
);
529 idr_for_each(&file
->object_idr
, per_file_stats
, &stats
);
530 spin_unlock(&file
->table_lock
);
532 * Although we have a valid reference on file->pid, that does
533 * not guarantee that the task_struct who called get_pid() is
534 * still alive (e.g. get_pid(current) => fork() => exit()).
535 * Therefore, we need to protect this ->comm access using RCU.
537 request
= list_first_entry_or_null(&file_priv
->mm
.request_list
,
538 struct drm_i915_gem_request
,
541 task
= pid_task(request
&& request
->ctx
->pid
?
542 request
->ctx
->pid
: file
->pid
,
544 print_file_stats(m
, task
? task
->comm
: "<unknown>", stats
);
547 mutex_unlock(&dev
->struct_mutex
);
549 mutex_unlock(&dev
->filelist_mutex
);
554 static int i915_gem_gtt_info(struct seq_file
*m
, void *data
)
556 struct drm_info_node
*node
= m
->private;
557 struct drm_i915_private
*dev_priv
= node_to_i915(node
);
558 struct drm_device
*dev
= &dev_priv
->drm
;
559 struct drm_i915_gem_object
**objects
;
560 struct drm_i915_gem_object
*obj
;
561 u64 total_obj_size
, total_gtt_size
;
562 unsigned long nobject
, n
;
565 nobject
= READ_ONCE(dev_priv
->mm
.object_count
);
566 objects
= kvmalloc_array(nobject
, sizeof(*objects
), GFP_KERNEL
);
570 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
575 spin_lock(&dev_priv
->mm
.obj_lock
);
576 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, mm
.link
) {
577 objects
[count
++] = obj
;
578 if (count
== nobject
)
581 spin_unlock(&dev_priv
->mm
.obj_lock
);
583 total_obj_size
= total_gtt_size
= 0;
584 for (n
= 0; n
< count
; n
++) {
588 describe_obj(m
, obj
);
590 total_obj_size
+= obj
->base
.size
;
591 total_gtt_size
+= i915_gem_obj_total_ggtt_size(obj
);
594 mutex_unlock(&dev
->struct_mutex
);
596 seq_printf(m
, "Total %d objects, %llu bytes, %llu GTT size\n",
597 count
, total_obj_size
, total_gtt_size
);
603 static int i915_gem_batch_pool_info(struct seq_file
*m
, void *data
)
605 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
606 struct drm_device
*dev
= &dev_priv
->drm
;
607 struct drm_i915_gem_object
*obj
;
608 struct intel_engine_cs
*engine
;
609 enum intel_engine_id id
;
613 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
617 for_each_engine(engine
, dev_priv
, id
) {
618 for (j
= 0; j
< ARRAY_SIZE(engine
->batch_pool
.cache_list
); j
++) {
622 list_for_each_entry(obj
,
623 &engine
->batch_pool
.cache_list
[j
],
626 seq_printf(m
, "%s cache[%d]: %d objects\n",
627 engine
->name
, j
, count
);
629 list_for_each_entry(obj
,
630 &engine
->batch_pool
.cache_list
[j
],
633 describe_obj(m
, obj
);
641 seq_printf(m
, "total: %d\n", total
);
643 mutex_unlock(&dev
->struct_mutex
);
648 static int i915_interrupt_info(struct seq_file
*m
, void *data
)
650 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
651 struct intel_engine_cs
*engine
;
652 enum intel_engine_id id
;
655 intel_runtime_pm_get(dev_priv
);
657 if (IS_CHERRYVIEW(dev_priv
)) {
658 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
659 I915_READ(GEN8_MASTER_IRQ
));
661 seq_printf(m
, "Display IER:\t%08x\n",
663 seq_printf(m
, "Display IIR:\t%08x\n",
665 seq_printf(m
, "Display IIR_RW:\t%08x\n",
666 I915_READ(VLV_IIR_RW
));
667 seq_printf(m
, "Display IMR:\t%08x\n",
669 for_each_pipe(dev_priv
, pipe
) {
670 enum intel_display_power_domain power_domain
;
672 power_domain
= POWER_DOMAIN_PIPE(pipe
);
673 if (!intel_display_power_get_if_enabled(dev_priv
,
675 seq_printf(m
, "Pipe %c power disabled\n",
680 seq_printf(m
, "Pipe %c stat:\t%08x\n",
682 I915_READ(PIPESTAT(pipe
)));
684 intel_display_power_put(dev_priv
, power_domain
);
687 intel_display_power_get(dev_priv
, POWER_DOMAIN_INIT
);
688 seq_printf(m
, "Port hotplug:\t%08x\n",
689 I915_READ(PORT_HOTPLUG_EN
));
690 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
691 I915_READ(VLV_DPFLIPSTAT
));
692 seq_printf(m
, "DPINVGTT:\t%08x\n",
693 I915_READ(DPINVGTT
));
694 intel_display_power_put(dev_priv
, POWER_DOMAIN_INIT
);
696 for (i
= 0; i
< 4; i
++) {
697 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
698 i
, I915_READ(GEN8_GT_IMR(i
)));
699 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
700 i
, I915_READ(GEN8_GT_IIR(i
)));
701 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
702 i
, I915_READ(GEN8_GT_IER(i
)));
705 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
706 I915_READ(GEN8_PCU_IMR
));
707 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
708 I915_READ(GEN8_PCU_IIR
));
709 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
710 I915_READ(GEN8_PCU_IER
));
711 } else if (INTEL_GEN(dev_priv
) >= 8) {
712 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
713 I915_READ(GEN8_MASTER_IRQ
));
715 for (i
= 0; i
< 4; i
++) {
716 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
717 i
, I915_READ(GEN8_GT_IMR(i
)));
718 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
719 i
, I915_READ(GEN8_GT_IIR(i
)));
720 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
721 i
, I915_READ(GEN8_GT_IER(i
)));
724 for_each_pipe(dev_priv
, pipe
) {
725 enum intel_display_power_domain power_domain
;
727 power_domain
= POWER_DOMAIN_PIPE(pipe
);
728 if (!intel_display_power_get_if_enabled(dev_priv
,
730 seq_printf(m
, "Pipe %c power disabled\n",
734 seq_printf(m
, "Pipe %c IMR:\t%08x\n",
736 I915_READ(GEN8_DE_PIPE_IMR(pipe
)));
737 seq_printf(m
, "Pipe %c IIR:\t%08x\n",
739 I915_READ(GEN8_DE_PIPE_IIR(pipe
)));
740 seq_printf(m
, "Pipe %c IER:\t%08x\n",
742 I915_READ(GEN8_DE_PIPE_IER(pipe
)));
744 intel_display_power_put(dev_priv
, power_domain
);
747 seq_printf(m
, "Display Engine port interrupt mask:\t%08x\n",
748 I915_READ(GEN8_DE_PORT_IMR
));
749 seq_printf(m
, "Display Engine port interrupt identity:\t%08x\n",
750 I915_READ(GEN8_DE_PORT_IIR
));
751 seq_printf(m
, "Display Engine port interrupt enable:\t%08x\n",
752 I915_READ(GEN8_DE_PORT_IER
));
754 seq_printf(m
, "Display Engine misc interrupt mask:\t%08x\n",
755 I915_READ(GEN8_DE_MISC_IMR
));
756 seq_printf(m
, "Display Engine misc interrupt identity:\t%08x\n",
757 I915_READ(GEN8_DE_MISC_IIR
));
758 seq_printf(m
, "Display Engine misc interrupt enable:\t%08x\n",
759 I915_READ(GEN8_DE_MISC_IER
));
761 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
762 I915_READ(GEN8_PCU_IMR
));
763 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
764 I915_READ(GEN8_PCU_IIR
));
765 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
766 I915_READ(GEN8_PCU_IER
));
767 } else if (IS_VALLEYVIEW(dev_priv
)) {
768 seq_printf(m
, "Display IER:\t%08x\n",
770 seq_printf(m
, "Display IIR:\t%08x\n",
772 seq_printf(m
, "Display IIR_RW:\t%08x\n",
773 I915_READ(VLV_IIR_RW
));
774 seq_printf(m
, "Display IMR:\t%08x\n",
776 for_each_pipe(dev_priv
, pipe
) {
777 enum intel_display_power_domain power_domain
;
779 power_domain
= POWER_DOMAIN_PIPE(pipe
);
780 if (!intel_display_power_get_if_enabled(dev_priv
,
782 seq_printf(m
, "Pipe %c power disabled\n",
787 seq_printf(m
, "Pipe %c stat:\t%08x\n",
789 I915_READ(PIPESTAT(pipe
)));
790 intel_display_power_put(dev_priv
, power_domain
);
793 seq_printf(m
, "Master IER:\t%08x\n",
794 I915_READ(VLV_MASTER_IER
));
796 seq_printf(m
, "Render IER:\t%08x\n",
798 seq_printf(m
, "Render IIR:\t%08x\n",
800 seq_printf(m
, "Render IMR:\t%08x\n",
803 seq_printf(m
, "PM IER:\t\t%08x\n",
804 I915_READ(GEN6_PMIER
));
805 seq_printf(m
, "PM IIR:\t\t%08x\n",
806 I915_READ(GEN6_PMIIR
));
807 seq_printf(m
, "PM IMR:\t\t%08x\n",
808 I915_READ(GEN6_PMIMR
));
810 seq_printf(m
, "Port hotplug:\t%08x\n",
811 I915_READ(PORT_HOTPLUG_EN
));
812 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
813 I915_READ(VLV_DPFLIPSTAT
));
814 seq_printf(m
, "DPINVGTT:\t%08x\n",
815 I915_READ(DPINVGTT
));
817 } else if (!HAS_PCH_SPLIT(dev_priv
)) {
818 seq_printf(m
, "Interrupt enable: %08x\n",
820 seq_printf(m
, "Interrupt identity: %08x\n",
822 seq_printf(m
, "Interrupt mask: %08x\n",
824 for_each_pipe(dev_priv
, pipe
)
825 seq_printf(m
, "Pipe %c stat: %08x\n",
827 I915_READ(PIPESTAT(pipe
)));
829 seq_printf(m
, "North Display Interrupt enable: %08x\n",
831 seq_printf(m
, "North Display Interrupt identity: %08x\n",
833 seq_printf(m
, "North Display Interrupt mask: %08x\n",
835 seq_printf(m
, "South Display Interrupt enable: %08x\n",
837 seq_printf(m
, "South Display Interrupt identity: %08x\n",
839 seq_printf(m
, "South Display Interrupt mask: %08x\n",
841 seq_printf(m
, "Graphics Interrupt enable: %08x\n",
843 seq_printf(m
, "Graphics Interrupt identity: %08x\n",
845 seq_printf(m
, "Graphics Interrupt mask: %08x\n",
848 if (INTEL_GEN(dev_priv
) >= 6) {
849 for_each_engine(engine
, dev_priv
, id
) {
851 "Graphics Interrupt mask (%s): %08x\n",
852 engine
->name
, I915_READ_IMR(engine
));
855 intel_runtime_pm_put(dev_priv
);
860 static int i915_gem_fence_regs_info(struct seq_file
*m
, void *data
)
862 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
863 struct drm_device
*dev
= &dev_priv
->drm
;
866 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
870 seq_printf(m
, "Total fences = %d\n", dev_priv
->num_fence_regs
);
871 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++) {
872 struct i915_vma
*vma
= dev_priv
->fence_regs
[i
].vma
;
874 seq_printf(m
, "Fence %d, pin count = %d, object = ",
875 i
, dev_priv
->fence_regs
[i
].pin_count
);
877 seq_puts(m
, "unused");
879 describe_obj(m
, vma
->obj
);
883 mutex_unlock(&dev
->struct_mutex
);
887 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
888 static ssize_t
gpu_state_read(struct file
*file
, char __user
*ubuf
,
889 size_t count
, loff_t
*pos
)
891 struct i915_gpu_state
*error
= file
->private_data
;
892 struct drm_i915_error_state_buf str
;
899 ret
= i915_error_state_buf_init(&str
, error
->i915
, count
, *pos
);
903 ret
= i915_error_state_to_str(&str
, error
);
908 ret
= simple_read_from_buffer(ubuf
, count
, &tmp
, str
.buf
, str
.bytes
);
912 *pos
= str
.start
+ ret
;
914 i915_error_state_buf_release(&str
);
918 static int gpu_state_release(struct inode
*inode
, struct file
*file
)
920 i915_gpu_state_put(file
->private_data
);
924 static int i915_gpu_info_open(struct inode
*inode
, struct file
*file
)
926 struct drm_i915_private
*i915
= inode
->i_private
;
927 struct i915_gpu_state
*gpu
;
929 intel_runtime_pm_get(i915
);
930 gpu
= i915_capture_gpu_state(i915
);
931 intel_runtime_pm_put(i915
);
935 file
->private_data
= gpu
;
939 static const struct file_operations i915_gpu_info_fops
= {
940 .owner
= THIS_MODULE
,
941 .open
= i915_gpu_info_open
,
942 .read
= gpu_state_read
,
943 .llseek
= default_llseek
,
944 .release
= gpu_state_release
,
948 i915_error_state_write(struct file
*filp
,
949 const char __user
*ubuf
,
953 struct i915_gpu_state
*error
= filp
->private_data
;
958 DRM_DEBUG_DRIVER("Resetting error state\n");
959 i915_reset_error_state(error
->i915
);
964 static int i915_error_state_open(struct inode
*inode
, struct file
*file
)
966 file
->private_data
= i915_first_error_state(inode
->i_private
);
970 static const struct file_operations i915_error_state_fops
= {
971 .owner
= THIS_MODULE
,
972 .open
= i915_error_state_open
,
973 .read
= gpu_state_read
,
974 .write
= i915_error_state_write
,
975 .llseek
= default_llseek
,
976 .release
= gpu_state_release
,
981 i915_next_seqno_set(void *data
, u64 val
)
983 struct drm_i915_private
*dev_priv
= data
;
984 struct drm_device
*dev
= &dev_priv
->drm
;
987 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
991 ret
= i915_gem_set_global_seqno(dev
, val
);
992 mutex_unlock(&dev
->struct_mutex
);
997 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops
,
998 NULL
, i915_next_seqno_set
,
1001 static int i915_frequency_info(struct seq_file
*m
, void *unused
)
1003 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1004 struct intel_rps
*rps
= &dev_priv
->gt_pm
.rps
;
1007 intel_runtime_pm_get(dev_priv
);
1009 if (IS_GEN5(dev_priv
)) {
1010 u16 rgvswctl
= I915_READ16(MEMSWCTL
);
1011 u16 rgvstat
= I915_READ16(MEMSTAT_ILK
);
1013 seq_printf(m
, "Requested P-state: %d\n", (rgvswctl
>> 8) & 0xf);
1014 seq_printf(m
, "Requested VID: %d\n", rgvswctl
& 0x3f);
1015 seq_printf(m
, "Current VID: %d\n", (rgvstat
& MEMSTAT_VID_MASK
) >>
1017 seq_printf(m
, "Current P-state: %d\n",
1018 (rgvstat
& MEMSTAT_PSTATE_MASK
) >> MEMSTAT_PSTATE_SHIFT
);
1019 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
1020 u32 rpmodectl
, freq_sts
;
1022 mutex_lock(&dev_priv
->pcu_lock
);
1024 rpmodectl
= I915_READ(GEN6_RP_CONTROL
);
1025 seq_printf(m
, "Video Turbo Mode: %s\n",
1026 yesno(rpmodectl
& GEN6_RP_MEDIA_TURBO
));
1027 seq_printf(m
, "HW control enabled: %s\n",
1028 yesno(rpmodectl
& GEN6_RP_ENABLE
));
1029 seq_printf(m
, "SW control enabled: %s\n",
1030 yesno((rpmodectl
& GEN6_RP_MEDIA_MODE_MASK
) ==
1031 GEN6_RP_MEDIA_SW_MODE
));
1033 freq_sts
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
1034 seq_printf(m
, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts
);
1035 seq_printf(m
, "DDR freq: %d MHz\n", dev_priv
->mem_freq
);
1037 seq_printf(m
, "actual GPU freq: %d MHz\n",
1038 intel_gpu_freq(dev_priv
, (freq_sts
>> 8) & 0xff));
1040 seq_printf(m
, "current GPU freq: %d MHz\n",
1041 intel_gpu_freq(dev_priv
, rps
->cur_freq
));
1043 seq_printf(m
, "max GPU freq: %d MHz\n",
1044 intel_gpu_freq(dev_priv
, rps
->max_freq
));
1046 seq_printf(m
, "min GPU freq: %d MHz\n",
1047 intel_gpu_freq(dev_priv
, rps
->min_freq
));
1049 seq_printf(m
, "idle GPU freq: %d MHz\n",
1050 intel_gpu_freq(dev_priv
, rps
->idle_freq
));
1053 "efficient (RPe) frequency: %d MHz\n",
1054 intel_gpu_freq(dev_priv
, rps
->efficient_freq
));
1055 mutex_unlock(&dev_priv
->pcu_lock
);
1056 } else if (INTEL_GEN(dev_priv
) >= 6) {
1057 u32 rp_state_limits
;
1060 u32 rpmodectl
, rpinclimit
, rpdeclimit
;
1061 u32 rpstat
, cagf
, reqf
;
1062 u32 rpupei
, rpcurup
, rpprevup
;
1063 u32 rpdownei
, rpcurdown
, rpprevdown
;
1064 u32 pm_ier
, pm_imr
, pm_isr
, pm_iir
, pm_mask
;
1067 rp_state_limits
= I915_READ(GEN6_RP_STATE_LIMITS
);
1068 if (IS_GEN9_LP(dev_priv
)) {
1069 rp_state_cap
= I915_READ(BXT_RP_STATE_CAP
);
1070 gt_perf_status
= I915_READ(BXT_GT_PERF_STATUS
);
1072 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
1073 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
1076 /* RPSTAT1 is in the GT power well */
1077 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
1079 reqf
= I915_READ(GEN6_RPNSWREQ
);
1080 if (INTEL_GEN(dev_priv
) >= 9)
1083 reqf
&= ~GEN6_TURBO_DISABLE
;
1084 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
1089 reqf
= intel_gpu_freq(dev_priv
, reqf
);
1091 rpmodectl
= I915_READ(GEN6_RP_CONTROL
);
1092 rpinclimit
= I915_READ(GEN6_RP_UP_THRESHOLD
);
1093 rpdeclimit
= I915_READ(GEN6_RP_DOWN_THRESHOLD
);
1095 rpstat
= I915_READ(GEN6_RPSTAT1
);
1096 rpupei
= I915_READ(GEN6_RP_CUR_UP_EI
) & GEN6_CURICONT_MASK
;
1097 rpcurup
= I915_READ(GEN6_RP_CUR_UP
) & GEN6_CURBSYTAVG_MASK
;
1098 rpprevup
= I915_READ(GEN6_RP_PREV_UP
) & GEN6_CURBSYTAVG_MASK
;
1099 rpdownei
= I915_READ(GEN6_RP_CUR_DOWN_EI
) & GEN6_CURIAVG_MASK
;
1100 rpcurdown
= I915_READ(GEN6_RP_CUR_DOWN
) & GEN6_CURBSYTAVG_MASK
;
1101 rpprevdown
= I915_READ(GEN6_RP_PREV_DOWN
) & GEN6_CURBSYTAVG_MASK
;
1102 cagf
= intel_gpu_freq(dev_priv
,
1103 intel_get_cagf(dev_priv
, rpstat
));
1105 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
1107 if (IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
)) {
1108 pm_ier
= I915_READ(GEN6_PMIER
);
1109 pm_imr
= I915_READ(GEN6_PMIMR
);
1110 pm_isr
= I915_READ(GEN6_PMISR
);
1111 pm_iir
= I915_READ(GEN6_PMIIR
);
1112 pm_mask
= I915_READ(GEN6_PMINTRMSK
);
1114 pm_ier
= I915_READ(GEN8_GT_IER(2));
1115 pm_imr
= I915_READ(GEN8_GT_IMR(2));
1116 pm_isr
= I915_READ(GEN8_GT_ISR(2));
1117 pm_iir
= I915_READ(GEN8_GT_IIR(2));
1118 pm_mask
= I915_READ(GEN6_PMINTRMSK
);
1120 seq_printf(m
, "Video Turbo Mode: %s\n",
1121 yesno(rpmodectl
& GEN6_RP_MEDIA_TURBO
));
1122 seq_printf(m
, "HW control enabled: %s\n",
1123 yesno(rpmodectl
& GEN6_RP_ENABLE
));
1124 seq_printf(m
, "SW control enabled: %s\n",
1125 yesno((rpmodectl
& GEN6_RP_MEDIA_MODE_MASK
) ==
1126 GEN6_RP_MEDIA_SW_MODE
));
1127 seq_printf(m
, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1128 pm_ier
, pm_imr
, pm_isr
, pm_iir
, pm_mask
);
1129 seq_printf(m
, "pm_intrmsk_mbz: 0x%08x\n",
1130 rps
->pm_intrmsk_mbz
);
1131 seq_printf(m
, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status
);
1132 seq_printf(m
, "Render p-state ratio: %d\n",
1133 (gt_perf_status
& (INTEL_GEN(dev_priv
) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1134 seq_printf(m
, "Render p-state VID: %d\n",
1135 gt_perf_status
& 0xff);
1136 seq_printf(m
, "Render p-state limit: %d\n",
1137 rp_state_limits
& 0xff);
1138 seq_printf(m
, "RPSTAT1: 0x%08x\n", rpstat
);
1139 seq_printf(m
, "RPMODECTL: 0x%08x\n", rpmodectl
);
1140 seq_printf(m
, "RPINCLIMIT: 0x%08x\n", rpinclimit
);
1141 seq_printf(m
, "RPDECLIMIT: 0x%08x\n", rpdeclimit
);
1142 seq_printf(m
, "RPNSWREQ: %dMHz\n", reqf
);
1143 seq_printf(m
, "CAGF: %dMHz\n", cagf
);
1144 seq_printf(m
, "RP CUR UP EI: %d (%dus)\n",
1145 rpupei
, GT_PM_INTERVAL_TO_US(dev_priv
, rpupei
));
1146 seq_printf(m
, "RP CUR UP: %d (%dus)\n",
1147 rpcurup
, GT_PM_INTERVAL_TO_US(dev_priv
, rpcurup
));
1148 seq_printf(m
, "RP PREV UP: %d (%dus)\n",
1149 rpprevup
, GT_PM_INTERVAL_TO_US(dev_priv
, rpprevup
));
1150 seq_printf(m
, "Up threshold: %d%%\n", rps
->up_threshold
);
1152 seq_printf(m
, "RP CUR DOWN EI: %d (%dus)\n",
1153 rpdownei
, GT_PM_INTERVAL_TO_US(dev_priv
, rpdownei
));
1154 seq_printf(m
, "RP CUR DOWN: %d (%dus)\n",
1155 rpcurdown
, GT_PM_INTERVAL_TO_US(dev_priv
, rpcurdown
));
1156 seq_printf(m
, "RP PREV DOWN: %d (%dus)\n",
1157 rpprevdown
, GT_PM_INTERVAL_TO_US(dev_priv
, rpprevdown
));
1158 seq_printf(m
, "Down threshold: %d%%\n", rps
->down_threshold
);
1160 max_freq
= (IS_GEN9_LP(dev_priv
) ? rp_state_cap
>> 0 :
1161 rp_state_cap
>> 16) & 0xff;
1162 max_freq
*= (IS_GEN9_BC(dev_priv
) ||
1163 IS_CANNONLAKE(dev_priv
) ? GEN9_FREQ_SCALER
: 1);
1164 seq_printf(m
, "Lowest (RPN) frequency: %dMHz\n",
1165 intel_gpu_freq(dev_priv
, max_freq
));
1167 max_freq
= (rp_state_cap
& 0xff00) >> 8;
1168 max_freq
*= (IS_GEN9_BC(dev_priv
) ||
1169 IS_CANNONLAKE(dev_priv
) ? GEN9_FREQ_SCALER
: 1);
1170 seq_printf(m
, "Nominal (RP1) frequency: %dMHz\n",
1171 intel_gpu_freq(dev_priv
, max_freq
));
1173 max_freq
= (IS_GEN9_LP(dev_priv
) ? rp_state_cap
>> 16 :
1174 rp_state_cap
>> 0) & 0xff;
1175 max_freq
*= (IS_GEN9_BC(dev_priv
) ||
1176 IS_CANNONLAKE(dev_priv
) ? GEN9_FREQ_SCALER
: 1);
1177 seq_printf(m
, "Max non-overclocked (RP0) frequency: %dMHz\n",
1178 intel_gpu_freq(dev_priv
, max_freq
));
1179 seq_printf(m
, "Max overclocked frequency: %dMHz\n",
1180 intel_gpu_freq(dev_priv
, rps
->max_freq
));
1182 seq_printf(m
, "Current freq: %d MHz\n",
1183 intel_gpu_freq(dev_priv
, rps
->cur_freq
));
1184 seq_printf(m
, "Actual freq: %d MHz\n", cagf
);
1185 seq_printf(m
, "Idle freq: %d MHz\n",
1186 intel_gpu_freq(dev_priv
, rps
->idle_freq
));
1187 seq_printf(m
, "Min freq: %d MHz\n",
1188 intel_gpu_freq(dev_priv
, rps
->min_freq
));
1189 seq_printf(m
, "Boost freq: %d MHz\n",
1190 intel_gpu_freq(dev_priv
, rps
->boost_freq
));
1191 seq_printf(m
, "Max freq: %d MHz\n",
1192 intel_gpu_freq(dev_priv
, rps
->max_freq
));
1194 "efficient (RPe) frequency: %d MHz\n",
1195 intel_gpu_freq(dev_priv
, rps
->efficient_freq
));
1197 seq_puts(m
, "no P-state info available\n");
1200 seq_printf(m
, "Current CD clock frequency: %d kHz\n", dev_priv
->cdclk
.hw
.cdclk
);
1201 seq_printf(m
, "Max CD clock frequency: %d kHz\n", dev_priv
->max_cdclk_freq
);
1202 seq_printf(m
, "Max pixel clock frequency: %d kHz\n", dev_priv
->max_dotclk_freq
);
1204 intel_runtime_pm_put(dev_priv
);
1208 static void i915_instdone_info(struct drm_i915_private
*dev_priv
,
1210 struct intel_instdone
*instdone
)
1215 seq_printf(m
, "\t\tINSTDONE: 0x%08x\n",
1216 instdone
->instdone
);
1218 if (INTEL_GEN(dev_priv
) <= 3)
1221 seq_printf(m
, "\t\tSC_INSTDONE: 0x%08x\n",
1222 instdone
->slice_common
);
1224 if (INTEL_GEN(dev_priv
) <= 6)
1227 for_each_instdone_slice_subslice(dev_priv
, slice
, subslice
)
1228 seq_printf(m
, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1229 slice
, subslice
, instdone
->sampler
[slice
][subslice
]);
1231 for_each_instdone_slice_subslice(dev_priv
, slice
, subslice
)
1232 seq_printf(m
, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1233 slice
, subslice
, instdone
->row
[slice
][subslice
]);
1236 static int i915_hangcheck_info(struct seq_file
*m
, void *unused
)
1238 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1239 struct intel_engine_cs
*engine
;
1240 u64 acthd
[I915_NUM_ENGINES
];
1241 u32 seqno
[I915_NUM_ENGINES
];
1242 struct intel_instdone instdone
;
1243 enum intel_engine_id id
;
1245 if (test_bit(I915_WEDGED
, &dev_priv
->gpu_error
.flags
))
1246 seq_puts(m
, "Wedged\n");
1247 if (test_bit(I915_RESET_BACKOFF
, &dev_priv
->gpu_error
.flags
))
1248 seq_puts(m
, "Reset in progress: struct_mutex backoff\n");
1249 if (test_bit(I915_RESET_HANDOFF
, &dev_priv
->gpu_error
.flags
))
1250 seq_puts(m
, "Reset in progress: reset handoff to waiter\n");
1251 if (waitqueue_active(&dev_priv
->gpu_error
.wait_queue
))
1252 seq_puts(m
, "Waiter holding struct mutex\n");
1253 if (waitqueue_active(&dev_priv
->gpu_error
.reset_queue
))
1254 seq_puts(m
, "struct_mutex blocked for reset\n");
1256 if (!i915_modparams
.enable_hangcheck
) {
1257 seq_puts(m
, "Hangcheck disabled\n");
1261 intel_runtime_pm_get(dev_priv
);
1263 for_each_engine(engine
, dev_priv
, id
) {
1264 acthd
[id
] = intel_engine_get_active_head(engine
);
1265 seqno
[id
] = intel_engine_get_seqno(engine
);
1268 intel_engine_get_instdone(dev_priv
->engine
[RCS
], &instdone
);
1270 intel_runtime_pm_put(dev_priv
);
1272 if (timer_pending(&dev_priv
->gpu_error
.hangcheck_work
.timer
))
1273 seq_printf(m
, "Hangcheck active, timer fires in %dms\n",
1274 jiffies_to_msecs(dev_priv
->gpu_error
.hangcheck_work
.timer
.expires
-
1276 else if (delayed_work_pending(&dev_priv
->gpu_error
.hangcheck_work
))
1277 seq_puts(m
, "Hangcheck active, work pending\n");
1279 seq_puts(m
, "Hangcheck inactive\n");
1281 seq_printf(m
, "GT active? %s\n", yesno(dev_priv
->gt
.awake
));
1283 for_each_engine(engine
, dev_priv
, id
) {
1284 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
1287 seq_printf(m
, "%s:\n", engine
->name
);
1288 seq_printf(m
, "\tseqno = %x [current %x, last %x], inflight %d\n",
1289 engine
->hangcheck
.seqno
, seqno
[id
],
1290 intel_engine_last_submit(engine
),
1291 engine
->timeline
->inflight_seqnos
);
1292 seq_printf(m
, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
1293 yesno(intel_engine_has_waiter(engine
)),
1294 yesno(test_bit(engine
->id
,
1295 &dev_priv
->gpu_error
.missed_irq_rings
)),
1296 yesno(engine
->hangcheck
.stalled
));
1298 spin_lock_irq(&b
->rb_lock
);
1299 for (rb
= rb_first(&b
->waiters
); rb
; rb
= rb_next(rb
)) {
1300 struct intel_wait
*w
= rb_entry(rb
, typeof(*w
), node
);
1302 seq_printf(m
, "\t%s [%d] waiting for %x\n",
1303 w
->tsk
->comm
, w
->tsk
->pid
, w
->seqno
);
1305 spin_unlock_irq(&b
->rb_lock
);
1307 seq_printf(m
, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1308 (long long)engine
->hangcheck
.acthd
,
1309 (long long)acthd
[id
]);
1310 seq_printf(m
, "\taction = %s(%d) %d ms ago\n",
1311 hangcheck_action_to_str(engine
->hangcheck
.action
),
1312 engine
->hangcheck
.action
,
1313 jiffies_to_msecs(jiffies
-
1314 engine
->hangcheck
.action_timestamp
));
1316 if (engine
->id
== RCS
) {
1317 seq_puts(m
, "\tinstdone read =\n");
1319 i915_instdone_info(dev_priv
, m
, &instdone
);
1321 seq_puts(m
, "\tinstdone accu =\n");
1323 i915_instdone_info(dev_priv
, m
,
1324 &engine
->hangcheck
.instdone
);
1331 static int i915_reset_info(struct seq_file
*m
, void *unused
)
1333 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1334 struct i915_gpu_error
*error
= &dev_priv
->gpu_error
;
1335 struct intel_engine_cs
*engine
;
1336 enum intel_engine_id id
;
1338 seq_printf(m
, "full gpu reset = %u\n", i915_reset_count(error
));
1340 for_each_engine(engine
, dev_priv
, id
) {
1341 seq_printf(m
, "%s = %u\n", engine
->name
,
1342 i915_reset_engine_count(error
, engine
));
1348 static int ironlake_drpc_info(struct seq_file
*m
)
1350 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1351 u32 rgvmodectl
, rstdbyctl
;
1354 rgvmodectl
= I915_READ(MEMMODECTL
);
1355 rstdbyctl
= I915_READ(RSTDBYCTL
);
1356 crstandvid
= I915_READ16(CRSTANDVID
);
1358 seq_printf(m
, "HD boost: %s\n", yesno(rgvmodectl
& MEMMODE_BOOST_EN
));
1359 seq_printf(m
, "Boost freq: %d\n",
1360 (rgvmodectl
& MEMMODE_BOOST_FREQ_MASK
) >>
1361 MEMMODE_BOOST_FREQ_SHIFT
);
1362 seq_printf(m
, "HW control enabled: %s\n",
1363 yesno(rgvmodectl
& MEMMODE_HWIDLE_EN
));
1364 seq_printf(m
, "SW control enabled: %s\n",
1365 yesno(rgvmodectl
& MEMMODE_SWMODE_EN
));
1366 seq_printf(m
, "Gated voltage change: %s\n",
1367 yesno(rgvmodectl
& MEMMODE_RCLK_GATE
));
1368 seq_printf(m
, "Starting frequency: P%d\n",
1369 (rgvmodectl
& MEMMODE_FSTART_MASK
) >> MEMMODE_FSTART_SHIFT
);
1370 seq_printf(m
, "Max P-state: P%d\n",
1371 (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
);
1372 seq_printf(m
, "Min P-state: P%d\n", (rgvmodectl
& MEMMODE_FMIN_MASK
));
1373 seq_printf(m
, "RS1 VID: %d\n", (crstandvid
& 0x3f));
1374 seq_printf(m
, "RS2 VID: %d\n", ((crstandvid
>> 8) & 0x3f));
1375 seq_printf(m
, "Render standby enabled: %s\n",
1376 yesno(!(rstdbyctl
& RCX_SW_EXIT
)));
1377 seq_puts(m
, "Current RS state: ");
1378 switch (rstdbyctl
& RSX_STATUS_MASK
) {
1380 seq_puts(m
, "on\n");
1382 case RSX_STATUS_RC1
:
1383 seq_puts(m
, "RC1\n");
1385 case RSX_STATUS_RC1E
:
1386 seq_puts(m
, "RC1E\n");
1388 case RSX_STATUS_RS1
:
1389 seq_puts(m
, "RS1\n");
1391 case RSX_STATUS_RS2
:
1392 seq_puts(m
, "RS2 (RC6)\n");
1394 case RSX_STATUS_RS3
:
1395 seq_puts(m
, "RC3 (RC6+)\n");
1398 seq_puts(m
, "unknown\n");
1405 static int i915_forcewake_domains(struct seq_file
*m
, void *data
)
1407 struct drm_i915_private
*i915
= node_to_i915(m
->private);
1408 struct intel_uncore_forcewake_domain
*fw_domain
;
1411 seq_printf(m
, "user.bypass_count = %u\n",
1412 i915
->uncore
.user_forcewake
.count
);
1414 for_each_fw_domain(fw_domain
, i915
, tmp
)
1415 seq_printf(m
, "%s.wake_count = %u\n",
1416 intel_uncore_forcewake_domain_to_str(fw_domain
->id
),
1417 READ_ONCE(fw_domain
->wake_count
));
1422 static void print_rc6_res(struct seq_file
*m
,
1424 const i915_reg_t reg
)
1426 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1428 seq_printf(m
, "%s %u (%llu us)\n",
1429 title
, I915_READ(reg
),
1430 intel_rc6_residency_us(dev_priv
, reg
));
1433 static int vlv_drpc_info(struct seq_file
*m
)
1435 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1436 u32 rcctl1
, pw_status
;
1438 pw_status
= I915_READ(VLV_GTLC_PW_STATUS
);
1439 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1441 seq_printf(m
, "RC6 Enabled: %s\n",
1442 yesno(rcctl1
& (GEN7_RC_CTL_TO_MODE
|
1443 GEN6_RC_CTL_EI_MODE(1))));
1444 seq_printf(m
, "Render Power Well: %s\n",
1445 (pw_status
& VLV_GTLC_PW_RENDER_STATUS_MASK
) ? "Up" : "Down");
1446 seq_printf(m
, "Media Power Well: %s\n",
1447 (pw_status
& VLV_GTLC_PW_MEDIA_STATUS_MASK
) ? "Up" : "Down");
1449 print_rc6_res(m
, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6
);
1450 print_rc6_res(m
, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6
);
1452 return i915_forcewake_domains(m
, NULL
);
1455 static int gen6_drpc_info(struct seq_file
*m
)
1457 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1458 u32 gt_core_status
, rcctl1
, rc6vids
= 0;
1459 u32 gen9_powergate_enable
= 0, gen9_powergate_status
= 0;
1460 unsigned forcewake_count
;
1463 forcewake_count
= READ_ONCE(dev_priv
->uncore
.fw_domain
[FW_DOMAIN_ID_RENDER
].wake_count
);
1464 if (forcewake_count
) {
1465 seq_puts(m
, "RC information inaccurate because somebody "
1466 "holds a forcewake reference \n");
1468 /* NB: we cannot use forcewake, else we read the wrong values */
1469 while (count
++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK
) & 1))
1471 seq_printf(m
, "RC information accurate: %s\n", yesno(count
< 51));
1474 gt_core_status
= I915_READ_FW(GEN6_GT_CORE_STATUS
);
1475 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS
, gt_core_status
, 4, true);
1477 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1478 if (INTEL_GEN(dev_priv
) >= 9) {
1479 gen9_powergate_enable
= I915_READ(GEN9_PG_ENABLE
);
1480 gen9_powergate_status
= I915_READ(GEN9_PWRGT_DOMAIN_STATUS
);
1483 mutex_lock(&dev_priv
->pcu_lock
);
1484 sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
1485 mutex_unlock(&dev_priv
->pcu_lock
);
1487 seq_printf(m
, "RC1e Enabled: %s\n",
1488 yesno(rcctl1
& GEN6_RC_CTL_RC1e_ENABLE
));
1489 seq_printf(m
, "RC6 Enabled: %s\n",
1490 yesno(rcctl1
& GEN6_RC_CTL_RC6_ENABLE
));
1491 if (INTEL_GEN(dev_priv
) >= 9) {
1492 seq_printf(m
, "Render Well Gating Enabled: %s\n",
1493 yesno(gen9_powergate_enable
& GEN9_RENDER_PG_ENABLE
));
1494 seq_printf(m
, "Media Well Gating Enabled: %s\n",
1495 yesno(gen9_powergate_enable
& GEN9_MEDIA_PG_ENABLE
));
1497 seq_printf(m
, "Deep RC6 Enabled: %s\n",
1498 yesno(rcctl1
& GEN6_RC_CTL_RC6p_ENABLE
));
1499 seq_printf(m
, "Deepest RC6 Enabled: %s\n",
1500 yesno(rcctl1
& GEN6_RC_CTL_RC6pp_ENABLE
));
1501 seq_puts(m
, "Current RC state: ");
1502 switch (gt_core_status
& GEN6_RCn_MASK
) {
1504 if (gt_core_status
& GEN6_CORE_CPD_STATE_MASK
)
1505 seq_puts(m
, "Core Power Down\n");
1507 seq_puts(m
, "on\n");
1510 seq_puts(m
, "RC3\n");
1513 seq_puts(m
, "RC6\n");
1516 seq_puts(m
, "RC7\n");
1519 seq_puts(m
, "Unknown\n");
1523 seq_printf(m
, "Core Power Down: %s\n",
1524 yesno(gt_core_status
& GEN6_CORE_CPD_STATE_MASK
));
1525 if (INTEL_GEN(dev_priv
) >= 9) {
1526 seq_printf(m
, "Render Power Well: %s\n",
1527 (gen9_powergate_status
&
1528 GEN9_PWRGT_RENDER_STATUS_MASK
) ? "Up" : "Down");
1529 seq_printf(m
, "Media Power Well: %s\n",
1530 (gen9_powergate_status
&
1531 GEN9_PWRGT_MEDIA_STATUS_MASK
) ? "Up" : "Down");
1534 /* Not exactly sure what this is */
1535 print_rc6_res(m
, "RC6 \"Locked to RPn\" residency since boot:",
1536 GEN6_GT_GFX_RC6_LOCKED
);
1537 print_rc6_res(m
, "RC6 residency since boot:", GEN6_GT_GFX_RC6
);
1538 print_rc6_res(m
, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p
);
1539 print_rc6_res(m
, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp
);
1541 seq_printf(m
, "RC6 voltage: %dmV\n",
1542 GEN6_DECODE_RC6_VID(((rc6vids
>> 0) & 0xff)));
1543 seq_printf(m
, "RC6+ voltage: %dmV\n",
1544 GEN6_DECODE_RC6_VID(((rc6vids
>> 8) & 0xff)));
1545 seq_printf(m
, "RC6++ voltage: %dmV\n",
1546 GEN6_DECODE_RC6_VID(((rc6vids
>> 16) & 0xff)));
1547 return i915_forcewake_domains(m
, NULL
);
1550 static int i915_drpc_info(struct seq_file
*m
, void *unused
)
1552 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1555 intel_runtime_pm_get(dev_priv
);
1557 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
1558 err
= vlv_drpc_info(m
);
1559 else if (INTEL_GEN(dev_priv
) >= 6)
1560 err
= gen6_drpc_info(m
);
1562 err
= ironlake_drpc_info(m
);
1564 intel_runtime_pm_put(dev_priv
);
1569 static int i915_frontbuffer_tracking(struct seq_file
*m
, void *unused
)
1571 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1573 seq_printf(m
, "FB tracking busy bits: 0x%08x\n",
1574 dev_priv
->fb_tracking
.busy_bits
);
1576 seq_printf(m
, "FB tracking flip bits: 0x%08x\n",
1577 dev_priv
->fb_tracking
.flip_bits
);
1582 static int i915_fbc_status(struct seq_file
*m
, void *unused
)
1584 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1585 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1587 if (!HAS_FBC(dev_priv
))
1590 intel_runtime_pm_get(dev_priv
);
1591 mutex_lock(&fbc
->lock
);
1593 if (intel_fbc_is_active(dev_priv
))
1594 seq_puts(m
, "FBC enabled\n");
1596 seq_printf(m
, "FBC disabled: %s\n", fbc
->no_fbc_reason
);
1598 if (fbc
->work
.scheduled
)
1599 seq_printf(m
, "FBC worker scheduled on vblank %u, now %llu\n",
1600 fbc
->work
.scheduled_vblank
,
1601 drm_crtc_vblank_count(&fbc
->crtc
->base
));
1603 if (intel_fbc_is_active(dev_priv
)) {
1606 if (INTEL_GEN(dev_priv
) >= 8)
1607 mask
= I915_READ(IVB_FBC_STATUS2
) & BDW_FBC_COMP_SEG_MASK
;
1608 else if (INTEL_GEN(dev_priv
) >= 7)
1609 mask
= I915_READ(IVB_FBC_STATUS2
) & IVB_FBC_COMP_SEG_MASK
;
1610 else if (INTEL_GEN(dev_priv
) >= 5)
1611 mask
= I915_READ(ILK_DPFC_STATUS
) & ILK_DPFC_COMP_SEG_MASK
;
1612 else if (IS_G4X(dev_priv
))
1613 mask
= I915_READ(DPFC_STATUS
) & DPFC_COMP_SEG_MASK
;
1615 mask
= I915_READ(FBC_STATUS
) & (FBC_STAT_COMPRESSING
|
1616 FBC_STAT_COMPRESSED
);
1618 seq_printf(m
, "Compressing: %s\n", yesno(mask
));
1621 mutex_unlock(&fbc
->lock
);
1622 intel_runtime_pm_put(dev_priv
);
1627 static int i915_fbc_false_color_get(void *data
, u64
*val
)
1629 struct drm_i915_private
*dev_priv
= data
;
1631 if (INTEL_GEN(dev_priv
) < 7 || !HAS_FBC(dev_priv
))
1634 *val
= dev_priv
->fbc
.false_color
;
1639 static int i915_fbc_false_color_set(void *data
, u64 val
)
1641 struct drm_i915_private
*dev_priv
= data
;
1644 if (INTEL_GEN(dev_priv
) < 7 || !HAS_FBC(dev_priv
))
1647 mutex_lock(&dev_priv
->fbc
.lock
);
1649 reg
= I915_READ(ILK_DPFC_CONTROL
);
1650 dev_priv
->fbc
.false_color
= val
;
1652 I915_WRITE(ILK_DPFC_CONTROL
, val
?
1653 (reg
| FBC_CTL_FALSE_COLOR
) :
1654 (reg
& ~FBC_CTL_FALSE_COLOR
));
1656 mutex_unlock(&dev_priv
->fbc
.lock
);
1660 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops
,
1661 i915_fbc_false_color_get
, i915_fbc_false_color_set
,
1664 static int i915_ips_status(struct seq_file
*m
, void *unused
)
1666 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1668 if (!HAS_IPS(dev_priv
))
1671 intel_runtime_pm_get(dev_priv
);
1673 seq_printf(m
, "Enabled by kernel parameter: %s\n",
1674 yesno(i915_modparams
.enable_ips
));
1676 if (INTEL_GEN(dev_priv
) >= 8) {
1677 seq_puts(m
, "Currently: unknown\n");
1679 if (I915_READ(IPS_CTL
) & IPS_ENABLE
)
1680 seq_puts(m
, "Currently: enabled\n");
1682 seq_puts(m
, "Currently: disabled\n");
1685 intel_runtime_pm_put(dev_priv
);
1690 static int i915_sr_status(struct seq_file
*m
, void *unused
)
1692 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1693 bool sr_enabled
= false;
1695 intel_runtime_pm_get(dev_priv
);
1696 intel_display_power_get(dev_priv
, POWER_DOMAIN_INIT
);
1698 if (INTEL_GEN(dev_priv
) >= 9)
1699 /* no global SR status; inspect per-plane WM */;
1700 else if (HAS_PCH_SPLIT(dev_priv
))
1701 sr_enabled
= I915_READ(WM1_LP_ILK
) & WM1_LP_SR_EN
;
1702 else if (IS_I965GM(dev_priv
) || IS_G4X(dev_priv
) ||
1703 IS_I945G(dev_priv
) || IS_I945GM(dev_priv
))
1704 sr_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
1705 else if (IS_I915GM(dev_priv
))
1706 sr_enabled
= I915_READ(INSTPM
) & INSTPM_SELF_EN
;
1707 else if (IS_PINEVIEW(dev_priv
))
1708 sr_enabled
= I915_READ(DSPFW3
) & PINEVIEW_SELF_REFRESH_EN
;
1709 else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
1710 sr_enabled
= I915_READ(FW_BLC_SELF_VLV
) & FW_CSPWRDWNEN
;
1712 intel_display_power_put(dev_priv
, POWER_DOMAIN_INIT
);
1713 intel_runtime_pm_put(dev_priv
);
1715 seq_printf(m
, "self-refresh: %s\n", enableddisabled(sr_enabled
));
1720 static int i915_emon_status(struct seq_file
*m
, void *unused
)
1722 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1723 struct drm_device
*dev
= &dev_priv
->drm
;
1724 unsigned long temp
, chipset
, gfx
;
1727 if (!IS_GEN5(dev_priv
))
1730 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1734 temp
= i915_mch_val(dev_priv
);
1735 chipset
= i915_chipset_val(dev_priv
);
1736 gfx
= i915_gfx_val(dev_priv
);
1737 mutex_unlock(&dev
->struct_mutex
);
1739 seq_printf(m
, "GMCH temp: %ld\n", temp
);
1740 seq_printf(m
, "Chipset power: %ld\n", chipset
);
1741 seq_printf(m
, "GFX power: %ld\n", gfx
);
1742 seq_printf(m
, "Total power: %ld\n", chipset
+ gfx
);
1747 static int i915_ring_freq_table(struct seq_file
*m
, void *unused
)
1749 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1750 struct intel_rps
*rps
= &dev_priv
->gt_pm
.rps
;
1752 int gpu_freq
, ia_freq
;
1753 unsigned int max_gpu_freq
, min_gpu_freq
;
1755 if (!HAS_LLC(dev_priv
))
1758 intel_runtime_pm_get(dev_priv
);
1760 ret
= mutex_lock_interruptible(&dev_priv
->pcu_lock
);
1764 if (IS_GEN9_BC(dev_priv
) || IS_CANNONLAKE(dev_priv
)) {
1765 /* Convert GT frequency to 50 HZ units */
1766 min_gpu_freq
= rps
->min_freq_softlimit
/ GEN9_FREQ_SCALER
;
1767 max_gpu_freq
= rps
->max_freq_softlimit
/ GEN9_FREQ_SCALER
;
1769 min_gpu_freq
= rps
->min_freq_softlimit
;
1770 max_gpu_freq
= rps
->max_freq_softlimit
;
1773 seq_puts(m
, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1775 for (gpu_freq
= min_gpu_freq
; gpu_freq
<= max_gpu_freq
; gpu_freq
++) {
1777 sandybridge_pcode_read(dev_priv
,
1778 GEN6_PCODE_READ_MIN_FREQ_TABLE
,
1780 seq_printf(m
, "%d\t\t%d\t\t\t\t%d\n",
1781 intel_gpu_freq(dev_priv
, (gpu_freq
*
1782 (IS_GEN9_BC(dev_priv
) ||
1783 IS_CANNONLAKE(dev_priv
) ?
1784 GEN9_FREQ_SCALER
: 1))),
1785 ((ia_freq
>> 0) & 0xff) * 100,
1786 ((ia_freq
>> 8) & 0xff) * 100);
1789 mutex_unlock(&dev_priv
->pcu_lock
);
1792 intel_runtime_pm_put(dev_priv
);
1796 static int i915_opregion(struct seq_file
*m
, void *unused
)
1798 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1799 struct drm_device
*dev
= &dev_priv
->drm
;
1800 struct intel_opregion
*opregion
= &dev_priv
->opregion
;
1803 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1807 if (opregion
->header
)
1808 seq_write(m
, opregion
->header
, OPREGION_SIZE
);
1810 mutex_unlock(&dev
->struct_mutex
);
1816 static int i915_vbt(struct seq_file
*m
, void *unused
)
1818 struct intel_opregion
*opregion
= &node_to_i915(m
->private)->opregion
;
1821 seq_write(m
, opregion
->vbt
, opregion
->vbt_size
);
1826 static int i915_gem_framebuffer_info(struct seq_file
*m
, void *data
)
1828 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1829 struct drm_device
*dev
= &dev_priv
->drm
;
1830 struct intel_framebuffer
*fbdev_fb
= NULL
;
1831 struct drm_framebuffer
*drm_fb
;
1834 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1838 #ifdef CONFIG_DRM_FBDEV_EMULATION
1839 if (dev_priv
->fbdev
&& dev_priv
->fbdev
->helper
.fb
) {
1840 fbdev_fb
= to_intel_framebuffer(dev_priv
->fbdev
->helper
.fb
);
1842 seq_printf(m
, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1843 fbdev_fb
->base
.width
,
1844 fbdev_fb
->base
.height
,
1845 fbdev_fb
->base
.format
->depth
,
1846 fbdev_fb
->base
.format
->cpp
[0] * 8,
1847 fbdev_fb
->base
.modifier
,
1848 drm_framebuffer_read_refcount(&fbdev_fb
->base
));
1849 describe_obj(m
, fbdev_fb
->obj
);
1854 mutex_lock(&dev
->mode_config
.fb_lock
);
1855 drm_for_each_fb(drm_fb
, dev
) {
1856 struct intel_framebuffer
*fb
= to_intel_framebuffer(drm_fb
);
1860 seq_printf(m
, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1863 fb
->base
.format
->depth
,
1864 fb
->base
.format
->cpp
[0] * 8,
1866 drm_framebuffer_read_refcount(&fb
->base
));
1867 describe_obj(m
, fb
->obj
);
1870 mutex_unlock(&dev
->mode_config
.fb_lock
);
1871 mutex_unlock(&dev
->struct_mutex
);
1876 static void describe_ctx_ring(struct seq_file
*m
, struct intel_ring
*ring
)
1878 seq_printf(m
, " (ringbuffer, space: %d, head: %u, tail: %u)",
1879 ring
->space
, ring
->head
, ring
->tail
);
1882 static int i915_context_status(struct seq_file
*m
, void *unused
)
1884 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1885 struct drm_device
*dev
= &dev_priv
->drm
;
1886 struct intel_engine_cs
*engine
;
1887 struct i915_gem_context
*ctx
;
1888 enum intel_engine_id id
;
1891 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1895 list_for_each_entry(ctx
, &dev_priv
->contexts
.list
, link
) {
1896 seq_printf(m
, "HW context %u ", ctx
->hw_id
);
1898 struct task_struct
*task
;
1900 task
= get_pid_task(ctx
->pid
, PIDTYPE_PID
);
1902 seq_printf(m
, "(%s [%d]) ",
1903 task
->comm
, task
->pid
);
1904 put_task_struct(task
);
1906 } else if (IS_ERR(ctx
->file_priv
)) {
1907 seq_puts(m
, "(deleted) ");
1909 seq_puts(m
, "(kernel) ");
1912 seq_putc(m
, ctx
->remap_slice
? 'R' : 'r');
1915 for_each_engine(engine
, dev_priv
, id
) {
1916 struct intel_context
*ce
= &ctx
->engine
[engine
->id
];
1918 seq_printf(m
, "%s: ", engine
->name
);
1920 describe_obj(m
, ce
->state
->obj
);
1922 describe_ctx_ring(m
, ce
->ring
);
1929 mutex_unlock(&dev
->struct_mutex
);
1934 static const char *swizzle_string(unsigned swizzle
)
1937 case I915_BIT_6_SWIZZLE_NONE
:
1939 case I915_BIT_6_SWIZZLE_9
:
1941 case I915_BIT_6_SWIZZLE_9_10
:
1942 return "bit9/bit10";
1943 case I915_BIT_6_SWIZZLE_9_11
:
1944 return "bit9/bit11";
1945 case I915_BIT_6_SWIZZLE_9_10_11
:
1946 return "bit9/bit10/bit11";
1947 case I915_BIT_6_SWIZZLE_9_17
:
1948 return "bit9/bit17";
1949 case I915_BIT_6_SWIZZLE_9_10_17
:
1950 return "bit9/bit10/bit17";
1951 case I915_BIT_6_SWIZZLE_UNKNOWN
:
1958 static int i915_swizzle_info(struct seq_file
*m
, void *data
)
1960 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1962 intel_runtime_pm_get(dev_priv
);
1964 seq_printf(m
, "bit6 swizzle for X-tiling = %s\n",
1965 swizzle_string(dev_priv
->mm
.bit_6_swizzle_x
));
1966 seq_printf(m
, "bit6 swizzle for Y-tiling = %s\n",
1967 swizzle_string(dev_priv
->mm
.bit_6_swizzle_y
));
1969 if (IS_GEN3(dev_priv
) || IS_GEN4(dev_priv
)) {
1970 seq_printf(m
, "DDC = 0x%08x\n",
1972 seq_printf(m
, "DDC2 = 0x%08x\n",
1974 seq_printf(m
, "C0DRB3 = 0x%04x\n",
1975 I915_READ16(C0DRB3
));
1976 seq_printf(m
, "C1DRB3 = 0x%04x\n",
1977 I915_READ16(C1DRB3
));
1978 } else if (INTEL_GEN(dev_priv
) >= 6) {
1979 seq_printf(m
, "MAD_DIMM_C0 = 0x%08x\n",
1980 I915_READ(MAD_DIMM_C0
));
1981 seq_printf(m
, "MAD_DIMM_C1 = 0x%08x\n",
1982 I915_READ(MAD_DIMM_C1
));
1983 seq_printf(m
, "MAD_DIMM_C2 = 0x%08x\n",
1984 I915_READ(MAD_DIMM_C2
));
1985 seq_printf(m
, "TILECTL = 0x%08x\n",
1986 I915_READ(TILECTL
));
1987 if (INTEL_GEN(dev_priv
) >= 8)
1988 seq_printf(m
, "GAMTARBMODE = 0x%08x\n",
1989 I915_READ(GAMTARBMODE
));
1991 seq_printf(m
, "ARB_MODE = 0x%08x\n",
1992 I915_READ(ARB_MODE
));
1993 seq_printf(m
, "DISP_ARB_CTL = 0x%08x\n",
1994 I915_READ(DISP_ARB_CTL
));
1997 if (dev_priv
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
)
1998 seq_puts(m
, "L-shaped memory detected\n");
2000 intel_runtime_pm_put(dev_priv
);
2005 static int per_file_ctx(int id
, void *ptr
, void *data
)
2007 struct i915_gem_context
*ctx
= ptr
;
2008 struct seq_file
*m
= data
;
2009 struct i915_hw_ppgtt
*ppgtt
= ctx
->ppgtt
;
2012 seq_printf(m
, " no ppgtt for context %d\n",
2017 if (i915_gem_context_is_default(ctx
))
2018 seq_puts(m
, " default context:\n");
2020 seq_printf(m
, " context %d:\n", ctx
->user_handle
);
2021 ppgtt
->debug_dump(ppgtt
, m
);
2026 static void gen8_ppgtt_info(struct seq_file
*m
,
2027 struct drm_i915_private
*dev_priv
)
2029 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
2030 struct intel_engine_cs
*engine
;
2031 enum intel_engine_id id
;
2037 for_each_engine(engine
, dev_priv
, id
) {
2038 seq_printf(m
, "%s\n", engine
->name
);
2039 for (i
= 0; i
< 4; i
++) {
2040 u64 pdp
= I915_READ(GEN8_RING_PDP_UDW(engine
, i
));
2042 pdp
|= I915_READ(GEN8_RING_PDP_LDW(engine
, i
));
2043 seq_printf(m
, "\tPDP%d 0x%016llx\n", i
, pdp
);
2048 static void gen6_ppgtt_info(struct seq_file
*m
,
2049 struct drm_i915_private
*dev_priv
)
2051 struct intel_engine_cs
*engine
;
2052 enum intel_engine_id id
;
2054 if (IS_GEN6(dev_priv
))
2055 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE
));
2057 for_each_engine(engine
, dev_priv
, id
) {
2058 seq_printf(m
, "%s\n", engine
->name
);
2059 if (IS_GEN7(dev_priv
))
2060 seq_printf(m
, "GFX_MODE: 0x%08x\n",
2061 I915_READ(RING_MODE_GEN7(engine
)));
2062 seq_printf(m
, "PP_DIR_BASE: 0x%08x\n",
2063 I915_READ(RING_PP_DIR_BASE(engine
)));
2064 seq_printf(m
, "PP_DIR_BASE_READ: 0x%08x\n",
2065 I915_READ(RING_PP_DIR_BASE_READ(engine
)));
2066 seq_printf(m
, "PP_DIR_DCLV: 0x%08x\n",
2067 I915_READ(RING_PP_DIR_DCLV(engine
)));
2069 if (dev_priv
->mm
.aliasing_ppgtt
) {
2070 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
2072 seq_puts(m
, "aliasing PPGTT:\n");
2073 seq_printf(m
, "pd gtt offset: 0x%08x\n", ppgtt
->pd
.base
.ggtt_offset
);
2075 ppgtt
->debug_dump(ppgtt
, m
);
2078 seq_printf(m
, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK
));
2081 static int i915_ppgtt_info(struct seq_file
*m
, void *data
)
2083 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2084 struct drm_device
*dev
= &dev_priv
->drm
;
2085 struct drm_file
*file
;
2088 mutex_lock(&dev
->filelist_mutex
);
2089 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2093 intel_runtime_pm_get(dev_priv
);
2095 if (INTEL_GEN(dev_priv
) >= 8)
2096 gen8_ppgtt_info(m
, dev_priv
);
2097 else if (INTEL_GEN(dev_priv
) >= 6)
2098 gen6_ppgtt_info(m
, dev_priv
);
2100 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
2101 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2102 struct task_struct
*task
;
2104 task
= get_pid_task(file
->pid
, PIDTYPE_PID
);
2109 seq_printf(m
, "\nproc: %s\n", task
->comm
);
2110 put_task_struct(task
);
2111 idr_for_each(&file_priv
->context_idr
, per_file_ctx
,
2112 (void *)(unsigned long)m
);
2116 intel_runtime_pm_put(dev_priv
);
2117 mutex_unlock(&dev
->struct_mutex
);
2119 mutex_unlock(&dev
->filelist_mutex
);
2123 static int count_irq_waiters(struct drm_i915_private
*i915
)
2125 struct intel_engine_cs
*engine
;
2126 enum intel_engine_id id
;
2129 for_each_engine(engine
, i915
, id
)
2130 count
+= intel_engine_has_waiter(engine
);
2135 static const char *rps_power_to_str(unsigned int power
)
2137 static const char * const strings
[] = {
2138 [LOW_POWER
] = "low power",
2139 [BETWEEN
] = "mixed",
2140 [HIGH_POWER
] = "high power",
2143 if (power
>= ARRAY_SIZE(strings
) || !strings
[power
])
2146 return strings
[power
];
2149 static int i915_rps_boost_info(struct seq_file
*m
, void *data
)
2151 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2152 struct drm_device
*dev
= &dev_priv
->drm
;
2153 struct intel_rps
*rps
= &dev_priv
->gt_pm
.rps
;
2154 struct drm_file
*file
;
2156 seq_printf(m
, "RPS enabled? %d\n", rps
->enabled
);
2157 seq_printf(m
, "GPU busy? %s [%d requests]\n",
2158 yesno(dev_priv
->gt
.awake
), dev_priv
->gt
.active_requests
);
2159 seq_printf(m
, "CPU waiting? %d\n", count_irq_waiters(dev_priv
));
2160 seq_printf(m
, "Boosts outstanding? %d\n",
2161 atomic_read(&rps
->num_waiters
));
2162 seq_printf(m
, "Frequency requested %d\n",
2163 intel_gpu_freq(dev_priv
, rps
->cur_freq
));
2164 seq_printf(m
, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2165 intel_gpu_freq(dev_priv
, rps
->min_freq
),
2166 intel_gpu_freq(dev_priv
, rps
->min_freq_softlimit
),
2167 intel_gpu_freq(dev_priv
, rps
->max_freq_softlimit
),
2168 intel_gpu_freq(dev_priv
, rps
->max_freq
));
2169 seq_printf(m
, " idle:%d, efficient:%d, boost:%d\n",
2170 intel_gpu_freq(dev_priv
, rps
->idle_freq
),
2171 intel_gpu_freq(dev_priv
, rps
->efficient_freq
),
2172 intel_gpu_freq(dev_priv
, rps
->boost_freq
));
2174 mutex_lock(&dev
->filelist_mutex
);
2175 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
2176 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2177 struct task_struct
*task
;
2180 task
= pid_task(file
->pid
, PIDTYPE_PID
);
2181 seq_printf(m
, "%s [%d]: %d boosts\n",
2182 task
? task
->comm
: "<unknown>",
2183 task
? task
->pid
: -1,
2184 atomic_read(&file_priv
->rps_client
.boosts
));
2187 seq_printf(m
, "Kernel (anonymous) boosts: %d\n",
2188 atomic_read(&rps
->boosts
));
2189 mutex_unlock(&dev
->filelist_mutex
);
2191 if (INTEL_GEN(dev_priv
) >= 6 &&
2193 dev_priv
->gt
.active_requests
) {
2195 u32 rpdown
, rpdownei
;
2197 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
2198 rpup
= I915_READ_FW(GEN6_RP_CUR_UP
) & GEN6_RP_EI_MASK
;
2199 rpupei
= I915_READ_FW(GEN6_RP_CUR_UP_EI
) & GEN6_RP_EI_MASK
;
2200 rpdown
= I915_READ_FW(GEN6_RP_CUR_DOWN
) & GEN6_RP_EI_MASK
;
2201 rpdownei
= I915_READ_FW(GEN6_RP_CUR_DOWN_EI
) & GEN6_RP_EI_MASK
;
2202 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
2204 seq_printf(m
, "\nRPS Autotuning (current \"%s\" window):\n",
2205 rps_power_to_str(rps
->power
));
2206 seq_printf(m
, " Avg. up: %d%% [above threshold? %d%%]\n",
2207 rpup
&& rpupei
? 100 * rpup
/ rpupei
: 0,
2209 seq_printf(m
, " Avg. down: %d%% [below threshold? %d%%]\n",
2210 rpdown
&& rpdownei
? 100 * rpdown
/ rpdownei
: 0,
2211 rps
->down_threshold
);
2213 seq_puts(m
, "\nRPS Autotuning inactive\n");
2219 static int i915_llc(struct seq_file
*m
, void *data
)
2221 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2222 const bool edram
= INTEL_GEN(dev_priv
) > 8;
2224 seq_printf(m
, "LLC: %s\n", yesno(HAS_LLC(dev_priv
)));
2225 seq_printf(m
, "%s: %lluMB\n", edram
? "eDRAM" : "eLLC",
2226 intel_uncore_edram_size(dev_priv
)/1024/1024);
2231 static int i915_huc_load_status_info(struct seq_file
*m
, void *data
)
2233 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2234 struct drm_printer p
;
2236 if (!HAS_HUC(dev_priv
))
2239 p
= drm_seq_file_printer(m
);
2240 intel_uc_fw_dump(&dev_priv
->huc
.fw
, &p
);
2242 intel_runtime_pm_get(dev_priv
);
2243 seq_printf(m
, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2
));
2244 intel_runtime_pm_put(dev_priv
);
2249 static int i915_guc_load_status_info(struct seq_file
*m
, void *data
)
2251 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2252 struct drm_printer p
;
2255 if (!HAS_GUC(dev_priv
))
2258 p
= drm_seq_file_printer(m
);
2259 intel_uc_fw_dump(&dev_priv
->guc
.fw
, &p
);
2261 intel_runtime_pm_get(dev_priv
);
2263 tmp
= I915_READ(GUC_STATUS
);
2265 seq_printf(m
, "\nGuC status 0x%08x:\n", tmp
);
2266 seq_printf(m
, "\tBootrom status = 0x%x\n",
2267 (tmp
& GS_BOOTROM_MASK
) >> GS_BOOTROM_SHIFT
);
2268 seq_printf(m
, "\tuKernel status = 0x%x\n",
2269 (tmp
& GS_UKERNEL_MASK
) >> GS_UKERNEL_SHIFT
);
2270 seq_printf(m
, "\tMIA Core status = 0x%x\n",
2271 (tmp
& GS_MIA_MASK
) >> GS_MIA_SHIFT
);
2272 seq_puts(m
, "\nScratch registers:\n");
2273 for (i
= 0; i
< 16; i
++)
2274 seq_printf(m
, "\t%2d: \t0x%x\n", i
, I915_READ(SOFT_SCRATCH(i
)));
2276 intel_runtime_pm_put(dev_priv
);
2281 static void i915_guc_log_info(struct seq_file
*m
,
2282 struct drm_i915_private
*dev_priv
)
2284 struct intel_guc
*guc
= &dev_priv
->guc
;
2286 seq_puts(m
, "\nGuC logging stats:\n");
2288 seq_printf(m
, "\tISR: flush count %10u, overflow count %10u\n",
2289 guc
->log
.flush_count
[GUC_ISR_LOG_BUFFER
],
2290 guc
->log
.total_overflow_count
[GUC_ISR_LOG_BUFFER
]);
2292 seq_printf(m
, "\tDPC: flush count %10u, overflow count %10u\n",
2293 guc
->log
.flush_count
[GUC_DPC_LOG_BUFFER
],
2294 guc
->log
.total_overflow_count
[GUC_DPC_LOG_BUFFER
]);
2296 seq_printf(m
, "\tCRASH: flush count %10u, overflow count %10u\n",
2297 guc
->log
.flush_count
[GUC_CRASH_DUMP_LOG_BUFFER
],
2298 guc
->log
.total_overflow_count
[GUC_CRASH_DUMP_LOG_BUFFER
]);
2300 seq_printf(m
, "\tTotal flush interrupt count: %u\n",
2301 guc
->log
.flush_interrupt_count
);
2303 seq_printf(m
, "\tCapture miss count: %u\n",
2304 guc
->log
.capture_miss_count
);
2307 static void i915_guc_client_info(struct seq_file
*m
,
2308 struct drm_i915_private
*dev_priv
,
2309 struct intel_guc_client
*client
)
2311 struct intel_engine_cs
*engine
;
2312 enum intel_engine_id id
;
2315 seq_printf(m
, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2316 client
->priority
, client
->stage_id
, client
->proc_desc_offset
);
2317 seq_printf(m
, "\tDoorbell id %d, offset: 0x%lx\n",
2318 client
->doorbell_id
, client
->doorbell_offset
);
2320 for_each_engine(engine
, dev_priv
, id
) {
2321 u64 submissions
= client
->submissions
[id
];
2323 seq_printf(m
, "\tSubmissions: %llu %s\n",
2324 submissions
, engine
->name
);
2326 seq_printf(m
, "\tTotal: %llu\n", tot
);
2329 static int i915_guc_info(struct seq_file
*m
, void *data
)
2331 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2332 const struct intel_guc
*guc
= &dev_priv
->guc
;
2334 if (!USES_GUC_SUBMISSION(dev_priv
))
2337 GEM_BUG_ON(!guc
->execbuf_client
);
2338 GEM_BUG_ON(!guc
->preempt_client
);
2340 seq_printf(m
, "Doorbell map:\n");
2341 seq_printf(m
, "\t%*pb\n", GUC_NUM_DOORBELLS
, guc
->doorbell_bitmap
);
2342 seq_printf(m
, "Doorbell next cacheline: 0x%x\n\n", guc
->db_cacheline
);
2344 seq_printf(m
, "\nGuC execbuf client @ %p:\n", guc
->execbuf_client
);
2345 i915_guc_client_info(m
, dev_priv
, guc
->execbuf_client
);
2346 seq_printf(m
, "\nGuC preempt client @ %p:\n", guc
->preempt_client
);
2347 i915_guc_client_info(m
, dev_priv
, guc
->preempt_client
);
2349 i915_guc_log_info(m
, dev_priv
);
2351 /* Add more as required ... */
2356 static int i915_guc_stage_pool(struct seq_file
*m
, void *data
)
2358 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2359 const struct intel_guc
*guc
= &dev_priv
->guc
;
2360 struct guc_stage_desc
*desc
= guc
->stage_desc_pool_vaddr
;
2361 struct intel_guc_client
*client
= guc
->execbuf_client
;
2365 if (!USES_GUC_SUBMISSION(dev_priv
))
2368 for (index
= 0; index
< GUC_MAX_STAGE_DESCRIPTORS
; index
++, desc
++) {
2369 struct intel_engine_cs
*engine
;
2371 if (!(desc
->attribute
& GUC_STAGE_DESC_ATTR_ACTIVE
))
2374 seq_printf(m
, "GuC stage descriptor %u:\n", index
);
2375 seq_printf(m
, "\tIndex: %u\n", desc
->stage_id
);
2376 seq_printf(m
, "\tAttribute: 0x%x\n", desc
->attribute
);
2377 seq_printf(m
, "\tPriority: %d\n", desc
->priority
);
2378 seq_printf(m
, "\tDoorbell id: %d\n", desc
->db_id
);
2379 seq_printf(m
, "\tEngines used: 0x%x\n",
2380 desc
->engines_used
);
2381 seq_printf(m
, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2382 desc
->db_trigger_phy
,
2383 desc
->db_trigger_cpu
,
2384 desc
->db_trigger_uk
);
2385 seq_printf(m
, "\tProcess descriptor: 0x%x\n",
2386 desc
->process_desc
);
2387 seq_printf(m
, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2388 desc
->wq_addr
, desc
->wq_size
);
2391 for_each_engine_masked(engine
, dev_priv
, client
->engines
, tmp
) {
2392 u32 guc_engine_id
= engine
->guc_id
;
2393 struct guc_execlist_context
*lrc
=
2394 &desc
->lrc
[guc_engine_id
];
2396 seq_printf(m
, "\t%s LRC:\n", engine
->name
);
2397 seq_printf(m
, "\t\tContext desc: 0x%x\n",
2399 seq_printf(m
, "\t\tContext id: 0x%x\n", lrc
->context_id
);
2400 seq_printf(m
, "\t\tLRCA: 0x%x\n", lrc
->ring_lrca
);
2401 seq_printf(m
, "\t\tRing begin: 0x%x\n", lrc
->ring_begin
);
2402 seq_printf(m
, "\t\tRing end: 0x%x\n", lrc
->ring_end
);
2410 static int i915_guc_log_dump(struct seq_file
*m
, void *data
)
2412 struct drm_info_node
*node
= m
->private;
2413 struct drm_i915_private
*dev_priv
= node_to_i915(node
);
2414 bool dump_load_err
= !!node
->info_ent
->data
;
2415 struct drm_i915_gem_object
*obj
= NULL
;
2419 if (!HAS_GUC(dev_priv
))
2423 obj
= dev_priv
->guc
.load_err_log
;
2424 else if (dev_priv
->guc
.log
.vma
)
2425 obj
= dev_priv
->guc
.log
.vma
->obj
;
2430 log
= i915_gem_object_pin_map(obj
, I915_MAP_WC
);
2432 DRM_DEBUG("Failed to pin object\n");
2433 seq_puts(m
, "(log data unaccessible)\n");
2434 return PTR_ERR(log
);
2437 for (i
= 0; i
< obj
->base
.size
/ sizeof(u32
); i
+= 4)
2438 seq_printf(m
, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2439 *(log
+ i
), *(log
+ i
+ 1),
2440 *(log
+ i
+ 2), *(log
+ i
+ 3));
2444 i915_gem_object_unpin_map(obj
);
2449 static int i915_guc_log_control_get(void *data
, u64
*val
)
2451 struct drm_i915_private
*dev_priv
= data
;
2453 if (!HAS_GUC(dev_priv
))
2456 if (!dev_priv
->guc
.log
.vma
)
2459 *val
= i915_modparams
.guc_log_level
;
2464 static int i915_guc_log_control_set(void *data
, u64 val
)
2466 struct drm_i915_private
*dev_priv
= data
;
2469 if (!HAS_GUC(dev_priv
))
2472 if (!dev_priv
->guc
.log
.vma
)
2475 ret
= mutex_lock_interruptible(&dev_priv
->drm
.struct_mutex
);
2479 intel_runtime_pm_get(dev_priv
);
2480 ret
= i915_guc_log_control(dev_priv
, val
);
2481 intel_runtime_pm_put(dev_priv
);
2483 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
2487 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops
,
2488 i915_guc_log_control_get
, i915_guc_log_control_set
,
2491 static const char *psr2_live_status(u32 val
)
2493 static const char * const live_status
[] = {
2507 val
= (val
& EDP_PSR2_STATUS_STATE_MASK
) >> EDP_PSR2_STATUS_STATE_SHIFT
;
2508 if (val
< ARRAY_SIZE(live_status
))
2509 return live_status
[val
];
2514 static int i915_edp_psr_status(struct seq_file
*m
, void *data
)
2516 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2520 bool enabled
= false;
2522 if (!HAS_PSR(dev_priv
))
2525 intel_runtime_pm_get(dev_priv
);
2527 mutex_lock(&dev_priv
->psr
.lock
);
2528 seq_printf(m
, "Sink_Support: %s\n", yesno(dev_priv
->psr
.sink_support
));
2529 seq_printf(m
, "Source_OK: %s\n", yesno(dev_priv
->psr
.source_ok
));
2530 seq_printf(m
, "Enabled: %s\n", yesno((bool)dev_priv
->psr
.enabled
));
2531 seq_printf(m
, "Active: %s\n", yesno(dev_priv
->psr
.active
));
2532 seq_printf(m
, "Busy frontbuffer bits: 0x%03x\n",
2533 dev_priv
->psr
.busy_frontbuffer_bits
);
2534 seq_printf(m
, "Re-enable work scheduled: %s\n",
2535 yesno(work_busy(&dev_priv
->psr
.work
.work
)));
2537 if (HAS_DDI(dev_priv
)) {
2538 if (dev_priv
->psr
.psr2_support
)
2539 enabled
= I915_READ(EDP_PSR2_CTL
) & EDP_PSR2_ENABLE
;
2541 enabled
= I915_READ(EDP_PSR_CTL
) & EDP_PSR_ENABLE
;
2543 for_each_pipe(dev_priv
, pipe
) {
2544 enum transcoder cpu_transcoder
=
2545 intel_pipe_to_cpu_transcoder(dev_priv
, pipe
);
2546 enum intel_display_power_domain power_domain
;
2548 power_domain
= POWER_DOMAIN_TRANSCODER(cpu_transcoder
);
2549 if (!intel_display_power_get_if_enabled(dev_priv
,
2553 stat
[pipe
] = I915_READ(VLV_PSRSTAT(pipe
)) &
2554 VLV_EDP_PSR_CURR_STATE_MASK
;
2555 if ((stat
[pipe
] == VLV_EDP_PSR_ACTIVE_NORFB_UP
) ||
2556 (stat
[pipe
] == VLV_EDP_PSR_ACTIVE_SF_UPDATE
))
2559 intel_display_power_put(dev_priv
, power_domain
);
2563 seq_printf(m
, "Main link in standby mode: %s\n",
2564 yesno(dev_priv
->psr
.link_standby
));
2566 seq_printf(m
, "HW Enabled & Active bit: %s", yesno(enabled
));
2568 if (!HAS_DDI(dev_priv
))
2569 for_each_pipe(dev_priv
, pipe
) {
2570 if ((stat
[pipe
] == VLV_EDP_PSR_ACTIVE_NORFB_UP
) ||
2571 (stat
[pipe
] == VLV_EDP_PSR_ACTIVE_SF_UPDATE
))
2572 seq_printf(m
, " pipe %c", pipe_name(pipe
));
2577 * VLV/CHV PSR has no kind of performance counter
2578 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2580 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
2581 psrperf
= I915_READ(EDP_PSR_PERF_CNT
) &
2582 EDP_PSR_PERF_CNT_MASK
;
2584 seq_printf(m
, "Performance_Counter: %u\n", psrperf
);
2586 if (dev_priv
->psr
.psr2_support
) {
2587 u32 psr2
= I915_READ(EDP_PSR2_STATUS_CTL
);
2589 seq_printf(m
, "EDP_PSR2_STATUS_CTL: %x [%s]\n",
2590 psr2
, psr2_live_status(psr2
));
2592 mutex_unlock(&dev_priv
->psr
.lock
);
2594 intel_runtime_pm_put(dev_priv
);
2598 static int i915_sink_crc(struct seq_file
*m
, void *data
)
2600 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2601 struct drm_device
*dev
= &dev_priv
->drm
;
2602 struct intel_connector
*connector
;
2603 struct drm_connector_list_iter conn_iter
;
2604 struct intel_dp
*intel_dp
= NULL
;
2605 struct drm_modeset_acquire_ctx ctx
;
2609 drm_modeset_acquire_init(&ctx
, DRM_MODESET_ACQUIRE_INTERRUPTIBLE
);
2611 drm_connector_list_iter_begin(dev
, &conn_iter
);
2613 for_each_intel_connector_iter(connector
, &conn_iter
) {
2614 struct drm_crtc
*crtc
;
2615 struct drm_connector_state
*state
;
2616 struct intel_crtc_state
*crtc_state
;
2618 if (connector
->base
.connector_type
!= DRM_MODE_CONNECTOR_eDP
)
2622 ret
= drm_modeset_lock(&dev
->mode_config
.connection_mutex
, &ctx
);
2626 state
= connector
->base
.state
;
2627 if (!state
->best_encoder
)
2631 ret
= drm_modeset_lock(&crtc
->mutex
, &ctx
);
2635 crtc_state
= to_intel_crtc_state(crtc
->state
);
2636 if (!crtc_state
->base
.active
)
2640 * We need to wait for all crtc updates to complete, to make
2641 * sure any pending modesets and plane updates are completed.
2643 if (crtc_state
->base
.commit
) {
2644 ret
= wait_for_completion_interruptible(&crtc_state
->base
.commit
->hw_done
);
2650 intel_dp
= enc_to_intel_dp(state
->best_encoder
);
2652 ret
= intel_dp_sink_crc(intel_dp
, crtc_state
, crc
);
2656 seq_printf(m
, "%02x%02x%02x%02x%02x%02x\n",
2657 crc
[0], crc
[1], crc
[2],
2658 crc
[3], crc
[4], crc
[5]);
2662 if (ret
== -EDEADLK
) {
2663 ret
= drm_modeset_backoff(&ctx
);
2671 drm_connector_list_iter_end(&conn_iter
);
2672 drm_modeset_drop_locks(&ctx
);
2673 drm_modeset_acquire_fini(&ctx
);
2678 static int i915_energy_uJ(struct seq_file
*m
, void *data
)
2680 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2681 unsigned long long power
;
2684 if (INTEL_GEN(dev_priv
) < 6)
2687 intel_runtime_pm_get(dev_priv
);
2689 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT
, &power
)) {
2690 intel_runtime_pm_put(dev_priv
);
2694 units
= (power
& 0x1f00) >> 8;
2695 power
= I915_READ(MCH_SECP_NRG_STTS
);
2696 power
= (1000000 * power
) >> units
; /* convert to uJ */
2698 intel_runtime_pm_put(dev_priv
);
2700 seq_printf(m
, "%llu", power
);
2705 static int i915_runtime_pm_status(struct seq_file
*m
, void *unused
)
2707 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2708 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
2710 if (!HAS_RUNTIME_PM(dev_priv
))
2711 seq_puts(m
, "Runtime power management not supported\n");
2713 seq_printf(m
, "GPU idle: %s\n", yesno(!dev_priv
->gt
.awake
));
2714 seq_printf(m
, "IRQs disabled: %s\n",
2715 yesno(!intel_irqs_enabled(dev_priv
)));
2717 seq_printf(m
, "Usage count: %d\n",
2718 atomic_read(&dev_priv
->drm
.dev
->power
.usage_count
));
2720 seq_printf(m
, "Device Power Management (CONFIG_PM) disabled\n");
2722 seq_printf(m
, "PCI device power state: %s [%d]\n",
2723 pci_power_name(pdev
->current_state
),
2724 pdev
->current_state
);
2729 static int i915_power_domain_info(struct seq_file
*m
, void *unused
)
2731 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2732 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
2735 mutex_lock(&power_domains
->lock
);
2737 seq_printf(m
, "%-25s %s\n", "Power well/domain", "Use count");
2738 for (i
= 0; i
< power_domains
->power_well_count
; i
++) {
2739 struct i915_power_well
*power_well
;
2740 enum intel_display_power_domain power_domain
;
2742 power_well
= &power_domains
->power_wells
[i
];
2743 seq_printf(m
, "%-25s %d\n", power_well
->name
,
2746 for_each_power_domain(power_domain
, power_well
->domains
)
2747 seq_printf(m
, " %-23s %d\n",
2748 intel_display_power_domain_str(power_domain
),
2749 power_domains
->domain_use_count
[power_domain
]);
2752 mutex_unlock(&power_domains
->lock
);
2757 static int i915_dmc_info(struct seq_file
*m
, void *unused
)
2759 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2760 struct intel_csr
*csr
;
2762 if (!HAS_CSR(dev_priv
))
2765 csr
= &dev_priv
->csr
;
2767 intel_runtime_pm_get(dev_priv
);
2769 seq_printf(m
, "fw loaded: %s\n", yesno(csr
->dmc_payload
!= NULL
));
2770 seq_printf(m
, "path: %s\n", csr
->fw_path
);
2772 if (!csr
->dmc_payload
)
2775 seq_printf(m
, "version: %d.%d\n", CSR_VERSION_MAJOR(csr
->version
),
2776 CSR_VERSION_MINOR(csr
->version
));
2778 if (IS_KABYLAKE(dev_priv
) ||
2779 (IS_SKYLAKE(dev_priv
) && csr
->version
>= CSR_VERSION(1, 6))) {
2780 seq_printf(m
, "DC3 -> DC5 count: %d\n",
2781 I915_READ(SKL_CSR_DC3_DC5_COUNT
));
2782 seq_printf(m
, "DC5 -> DC6 count: %d\n",
2783 I915_READ(SKL_CSR_DC5_DC6_COUNT
));
2784 } else if (IS_BROXTON(dev_priv
) && csr
->version
>= CSR_VERSION(1, 4)) {
2785 seq_printf(m
, "DC3 -> DC5 count: %d\n",
2786 I915_READ(BXT_CSR_DC3_DC5_COUNT
));
2790 seq_printf(m
, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2791 seq_printf(m
, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE
));
2792 seq_printf(m
, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL
));
2794 intel_runtime_pm_put(dev_priv
);
2799 static void intel_seq_print_mode(struct seq_file
*m
, int tabs
,
2800 struct drm_display_mode
*mode
)
2804 for (i
= 0; i
< tabs
; i
++)
2807 seq_printf(m
, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2808 mode
->base
.id
, mode
->name
,
2809 mode
->vrefresh
, mode
->clock
,
2810 mode
->hdisplay
, mode
->hsync_start
,
2811 mode
->hsync_end
, mode
->htotal
,
2812 mode
->vdisplay
, mode
->vsync_start
,
2813 mode
->vsync_end
, mode
->vtotal
,
2814 mode
->type
, mode
->flags
);
2817 static void intel_encoder_info(struct seq_file
*m
,
2818 struct intel_crtc
*intel_crtc
,
2819 struct intel_encoder
*intel_encoder
)
2821 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2822 struct drm_device
*dev
= &dev_priv
->drm
;
2823 struct drm_crtc
*crtc
= &intel_crtc
->base
;
2824 struct intel_connector
*intel_connector
;
2825 struct drm_encoder
*encoder
;
2827 encoder
= &intel_encoder
->base
;
2828 seq_printf(m
, "\tencoder %d: type: %s, connectors:\n",
2829 encoder
->base
.id
, encoder
->name
);
2830 for_each_connector_on_encoder(dev
, encoder
, intel_connector
) {
2831 struct drm_connector
*connector
= &intel_connector
->base
;
2832 seq_printf(m
, "\t\tconnector %d: type: %s, status: %s",
2835 drm_get_connector_status_name(connector
->status
));
2836 if (connector
->status
== connector_status_connected
) {
2837 struct drm_display_mode
*mode
= &crtc
->mode
;
2838 seq_printf(m
, ", mode:\n");
2839 intel_seq_print_mode(m
, 2, mode
);
2846 static void intel_crtc_info(struct seq_file
*m
, struct intel_crtc
*intel_crtc
)
2848 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2849 struct drm_device
*dev
= &dev_priv
->drm
;
2850 struct drm_crtc
*crtc
= &intel_crtc
->base
;
2851 struct intel_encoder
*intel_encoder
;
2852 struct drm_plane_state
*plane_state
= crtc
->primary
->state
;
2853 struct drm_framebuffer
*fb
= plane_state
->fb
;
2856 seq_printf(m
, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2857 fb
->base
.id
, plane_state
->src_x
>> 16,
2858 plane_state
->src_y
>> 16, fb
->width
, fb
->height
);
2860 seq_puts(m
, "\tprimary plane disabled\n");
2861 for_each_encoder_on_crtc(dev
, crtc
, intel_encoder
)
2862 intel_encoder_info(m
, intel_crtc
, intel_encoder
);
2865 static void intel_panel_info(struct seq_file
*m
, struct intel_panel
*panel
)
2867 struct drm_display_mode
*mode
= panel
->fixed_mode
;
2869 seq_printf(m
, "\tfixed mode:\n");
2870 intel_seq_print_mode(m
, 2, mode
);
2873 static void intel_dp_info(struct seq_file
*m
,
2874 struct intel_connector
*intel_connector
)
2876 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2877 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
2879 seq_printf(m
, "\tDPCD rev: %x\n", intel_dp
->dpcd
[DP_DPCD_REV
]);
2880 seq_printf(m
, "\taudio support: %s\n", yesno(intel_dp
->has_audio
));
2881 if (intel_connector
->base
.connector_type
== DRM_MODE_CONNECTOR_eDP
)
2882 intel_panel_info(m
, &intel_connector
->panel
);
2884 drm_dp_downstream_debug(m
, intel_dp
->dpcd
, intel_dp
->downstream_ports
,
2888 static void intel_dp_mst_info(struct seq_file
*m
,
2889 struct intel_connector
*intel_connector
)
2891 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2892 struct intel_dp_mst_encoder
*intel_mst
=
2893 enc_to_mst(&intel_encoder
->base
);
2894 struct intel_digital_port
*intel_dig_port
= intel_mst
->primary
;
2895 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
2896 bool has_audio
= drm_dp_mst_port_has_audio(&intel_dp
->mst_mgr
,
2897 intel_connector
->port
);
2899 seq_printf(m
, "\taudio support: %s\n", yesno(has_audio
));
2902 static void intel_hdmi_info(struct seq_file
*m
,
2903 struct intel_connector
*intel_connector
)
2905 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2906 struct intel_hdmi
*intel_hdmi
= enc_to_intel_hdmi(&intel_encoder
->base
);
2908 seq_printf(m
, "\taudio support: %s\n", yesno(intel_hdmi
->has_audio
));
2911 static void intel_lvds_info(struct seq_file
*m
,
2912 struct intel_connector
*intel_connector
)
2914 intel_panel_info(m
, &intel_connector
->panel
);
2917 static void intel_connector_info(struct seq_file
*m
,
2918 struct drm_connector
*connector
)
2920 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
2921 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2922 struct drm_display_mode
*mode
;
2924 seq_printf(m
, "connector %d: type %s, status: %s\n",
2925 connector
->base
.id
, connector
->name
,
2926 drm_get_connector_status_name(connector
->status
));
2927 if (connector
->status
== connector_status_connected
) {
2928 seq_printf(m
, "\tname: %s\n", connector
->display_info
.name
);
2929 seq_printf(m
, "\tphysical dimensions: %dx%dmm\n",
2930 connector
->display_info
.width_mm
,
2931 connector
->display_info
.height_mm
);
2932 seq_printf(m
, "\tsubpixel order: %s\n",
2933 drm_get_subpixel_order_name(connector
->display_info
.subpixel_order
));
2934 seq_printf(m
, "\tCEA rev: %d\n",
2935 connector
->display_info
.cea_rev
);
2941 switch (connector
->connector_type
) {
2942 case DRM_MODE_CONNECTOR_DisplayPort
:
2943 case DRM_MODE_CONNECTOR_eDP
:
2944 if (intel_encoder
->type
== INTEL_OUTPUT_DP_MST
)
2945 intel_dp_mst_info(m
, intel_connector
);
2947 intel_dp_info(m
, intel_connector
);
2949 case DRM_MODE_CONNECTOR_LVDS
:
2950 if (intel_encoder
->type
== INTEL_OUTPUT_LVDS
)
2951 intel_lvds_info(m
, intel_connector
);
2953 case DRM_MODE_CONNECTOR_HDMIA
:
2954 if (intel_encoder
->type
== INTEL_OUTPUT_HDMI
||
2955 intel_encoder
->type
== INTEL_OUTPUT_DDI
)
2956 intel_hdmi_info(m
, intel_connector
);
2962 seq_printf(m
, "\tmodes:\n");
2963 list_for_each_entry(mode
, &connector
->modes
, head
)
2964 intel_seq_print_mode(m
, 2, mode
);
2967 static const char *plane_type(enum drm_plane_type type
)
2970 case DRM_PLANE_TYPE_OVERLAY
:
2972 case DRM_PLANE_TYPE_PRIMARY
:
2974 case DRM_PLANE_TYPE_CURSOR
:
2977 * Deliberately omitting default: to generate compiler warnings
2978 * when a new drm_plane_type gets added.
2985 static const char *plane_rotation(unsigned int rotation
)
2987 static char buf
[48];
2989 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2990 * will print them all to visualize if the values are misused
2992 snprintf(buf
, sizeof(buf
),
2993 "%s%s%s%s%s%s(0x%08x)",
2994 (rotation
& DRM_MODE_ROTATE_0
) ? "0 " : "",
2995 (rotation
& DRM_MODE_ROTATE_90
) ? "90 " : "",
2996 (rotation
& DRM_MODE_ROTATE_180
) ? "180 " : "",
2997 (rotation
& DRM_MODE_ROTATE_270
) ? "270 " : "",
2998 (rotation
& DRM_MODE_REFLECT_X
) ? "FLIPX " : "",
2999 (rotation
& DRM_MODE_REFLECT_Y
) ? "FLIPY " : "",
3005 static void intel_plane_info(struct seq_file
*m
, struct intel_crtc
*intel_crtc
)
3007 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3008 struct drm_device
*dev
= &dev_priv
->drm
;
3009 struct intel_plane
*intel_plane
;
3011 for_each_intel_plane_on_crtc(dev
, intel_crtc
, intel_plane
) {
3012 struct drm_plane_state
*state
;
3013 struct drm_plane
*plane
= &intel_plane
->base
;
3014 struct drm_format_name_buf format_name
;
3016 if (!plane
->state
) {
3017 seq_puts(m
, "plane->state is NULL!\n");
3021 state
= plane
->state
;
3024 drm_get_format_name(state
->fb
->format
->format
,
3027 sprintf(format_name
.str
, "N/A");
3030 seq_printf(m
, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3032 plane_type(intel_plane
->base
.type
),
3033 state
->crtc_x
, state
->crtc_y
,
3034 state
->crtc_w
, state
->crtc_h
,
3035 (state
->src_x
>> 16),
3036 ((state
->src_x
& 0xffff) * 15625) >> 10,
3037 (state
->src_y
>> 16),
3038 ((state
->src_y
& 0xffff) * 15625) >> 10,
3039 (state
->src_w
>> 16),
3040 ((state
->src_w
& 0xffff) * 15625) >> 10,
3041 (state
->src_h
>> 16),
3042 ((state
->src_h
& 0xffff) * 15625) >> 10,
3044 plane_rotation(state
->rotation
));
3048 static void intel_scaler_info(struct seq_file
*m
, struct intel_crtc
*intel_crtc
)
3050 struct intel_crtc_state
*pipe_config
;
3051 int num_scalers
= intel_crtc
->num_scalers
;
3054 pipe_config
= to_intel_crtc_state(intel_crtc
->base
.state
);
3056 /* Not all platformas have a scaler */
3058 seq_printf(m
, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3060 pipe_config
->scaler_state
.scaler_users
,
3061 pipe_config
->scaler_state
.scaler_id
);
3063 for (i
= 0; i
< num_scalers
; i
++) {
3064 struct intel_scaler
*sc
=
3065 &pipe_config
->scaler_state
.scalers
[i
];
3067 seq_printf(m
, ", scalers[%d]: use=%s, mode=%x",
3068 i
, yesno(sc
->in_use
), sc
->mode
);
3072 seq_puts(m
, "\tNo scalers available on this platform\n");
3076 static int i915_display_info(struct seq_file
*m
, void *unused
)
3078 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3079 struct drm_device
*dev
= &dev_priv
->drm
;
3080 struct intel_crtc
*crtc
;
3081 struct drm_connector
*connector
;
3082 struct drm_connector_list_iter conn_iter
;
3084 intel_runtime_pm_get(dev_priv
);
3085 seq_printf(m
, "CRTC info\n");
3086 seq_printf(m
, "---------\n");
3087 for_each_intel_crtc(dev
, crtc
) {
3088 struct intel_crtc_state
*pipe_config
;
3090 drm_modeset_lock(&crtc
->base
.mutex
, NULL
);
3091 pipe_config
= to_intel_crtc_state(crtc
->base
.state
);
3093 seq_printf(m
, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3094 crtc
->base
.base
.id
, pipe_name(crtc
->pipe
),
3095 yesno(pipe_config
->base
.active
),
3096 pipe_config
->pipe_src_w
, pipe_config
->pipe_src_h
,
3097 yesno(pipe_config
->dither
), pipe_config
->pipe_bpp
);
3099 if (pipe_config
->base
.active
) {
3100 struct intel_plane
*cursor
=
3101 to_intel_plane(crtc
->base
.cursor
);
3103 intel_crtc_info(m
, crtc
);
3105 seq_printf(m
, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3106 yesno(cursor
->base
.state
->visible
),
3107 cursor
->base
.state
->crtc_x
,
3108 cursor
->base
.state
->crtc_y
,
3109 cursor
->base
.state
->crtc_w
,
3110 cursor
->base
.state
->crtc_h
,
3111 cursor
->cursor
.base
);
3112 intel_scaler_info(m
, crtc
);
3113 intel_plane_info(m
, crtc
);
3116 seq_printf(m
, "\tunderrun reporting: cpu=%s pch=%s \n",
3117 yesno(!crtc
->cpu_fifo_underrun_disabled
),
3118 yesno(!crtc
->pch_fifo_underrun_disabled
));
3119 drm_modeset_unlock(&crtc
->base
.mutex
);
3122 seq_printf(m
, "\n");
3123 seq_printf(m
, "Connector info\n");
3124 seq_printf(m
, "--------------\n");
3125 mutex_lock(&dev
->mode_config
.mutex
);
3126 drm_connector_list_iter_begin(dev
, &conn_iter
);
3127 drm_for_each_connector_iter(connector
, &conn_iter
)
3128 intel_connector_info(m
, connector
);
3129 drm_connector_list_iter_end(&conn_iter
);
3130 mutex_unlock(&dev
->mode_config
.mutex
);
3132 intel_runtime_pm_put(dev_priv
);
3137 static int i915_engine_info(struct seq_file
*m
, void *unused
)
3139 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3140 struct intel_engine_cs
*engine
;
3141 enum intel_engine_id id
;
3142 struct drm_printer p
;
3144 intel_runtime_pm_get(dev_priv
);
3146 seq_printf(m
, "GT awake? %s\n",
3147 yesno(dev_priv
->gt
.awake
));
3148 seq_printf(m
, "Global active requests: %d\n",
3149 dev_priv
->gt
.active_requests
);
3150 seq_printf(m
, "CS timestamp frequency: %u kHz\n",
3151 dev_priv
->info
.cs_timestamp_frequency_khz
);
3153 p
= drm_seq_file_printer(m
);
3154 for_each_engine(engine
, dev_priv
, id
)
3155 intel_engine_dump(engine
, &p
, "%s\n", engine
->name
);
3157 intel_runtime_pm_put(dev_priv
);
3162 static int i915_shrinker_info(struct seq_file
*m
, void *unused
)
3164 struct drm_i915_private
*i915
= node_to_i915(m
->private);
3166 seq_printf(m
, "seeks = %d\n", i915
->mm
.shrinker
.seeks
);
3167 seq_printf(m
, "batch = %lu\n", i915
->mm
.shrinker
.batch
);
3172 static int i915_shared_dplls_info(struct seq_file
*m
, void *unused
)
3174 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3175 struct drm_device
*dev
= &dev_priv
->drm
;
3178 drm_modeset_lock_all(dev
);
3179 for (i
= 0; i
< dev_priv
->num_shared_dpll
; i
++) {
3180 struct intel_shared_dpll
*pll
= &dev_priv
->shared_dplls
[i
];
3182 seq_printf(m
, "DPLL%i: %s, id: %i\n", i
, pll
->name
, pll
->id
);
3183 seq_printf(m
, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3184 pll
->state
.crtc_mask
, pll
->active_mask
, yesno(pll
->on
));
3185 seq_printf(m
, " tracked hardware state:\n");
3186 seq_printf(m
, " dpll: 0x%08x\n", pll
->state
.hw_state
.dpll
);
3187 seq_printf(m
, " dpll_md: 0x%08x\n",
3188 pll
->state
.hw_state
.dpll_md
);
3189 seq_printf(m
, " fp0: 0x%08x\n", pll
->state
.hw_state
.fp0
);
3190 seq_printf(m
, " fp1: 0x%08x\n", pll
->state
.hw_state
.fp1
);
3191 seq_printf(m
, " wrpll: 0x%08x\n", pll
->state
.hw_state
.wrpll
);
3193 drm_modeset_unlock_all(dev
);
3198 static int i915_wa_registers(struct seq_file
*m
, void *unused
)
3202 struct intel_engine_cs
*engine
;
3203 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3204 struct drm_device
*dev
= &dev_priv
->drm
;
3205 struct i915_workarounds
*workarounds
= &dev_priv
->workarounds
;
3206 enum intel_engine_id id
;
3208 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
3212 intel_runtime_pm_get(dev_priv
);
3214 seq_printf(m
, "Workarounds applied: %d\n", workarounds
->count
);
3215 for_each_engine(engine
, dev_priv
, id
)
3216 seq_printf(m
, "HW whitelist count for %s: %d\n",
3217 engine
->name
, workarounds
->hw_whitelist_count
[id
]);
3218 for (i
= 0; i
< workarounds
->count
; ++i
) {
3220 u32 mask
, value
, read
;
3223 addr
= workarounds
->reg
[i
].addr
;
3224 mask
= workarounds
->reg
[i
].mask
;
3225 value
= workarounds
->reg
[i
].value
;
3226 read
= I915_READ(addr
);
3227 ok
= (value
& mask
) == (read
& mask
);
3228 seq_printf(m
, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3229 i915_mmio_reg_offset(addr
), value
, mask
, read
, ok
? "OK" : "FAIL");
3232 intel_runtime_pm_put(dev_priv
);
3233 mutex_unlock(&dev
->struct_mutex
);
3238 static int i915_ipc_status_show(struct seq_file
*m
, void *data
)
3240 struct drm_i915_private
*dev_priv
= m
->private;
3242 seq_printf(m
, "Isochronous Priority Control: %s\n",
3243 yesno(dev_priv
->ipc_enabled
));
3247 static int i915_ipc_status_open(struct inode
*inode
, struct file
*file
)
3249 struct drm_i915_private
*dev_priv
= inode
->i_private
;
3251 if (!HAS_IPC(dev_priv
))
3254 return single_open(file
, i915_ipc_status_show
, dev_priv
);
3257 static ssize_t
i915_ipc_status_write(struct file
*file
, const char __user
*ubuf
,
3258 size_t len
, loff_t
*offp
)
3260 struct seq_file
*m
= file
->private_data
;
3261 struct drm_i915_private
*dev_priv
= m
->private;
3265 ret
= kstrtobool_from_user(ubuf
, len
, &enable
);
3269 intel_runtime_pm_get(dev_priv
);
3270 if (!dev_priv
->ipc_enabled
&& enable
)
3271 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3272 dev_priv
->wm
.distrust_bios_wm
= true;
3273 dev_priv
->ipc_enabled
= enable
;
3274 intel_enable_ipc(dev_priv
);
3275 intel_runtime_pm_put(dev_priv
);
3280 static const struct file_operations i915_ipc_status_fops
= {
3281 .owner
= THIS_MODULE
,
3282 .open
= i915_ipc_status_open
,
3284 .llseek
= seq_lseek
,
3285 .release
= single_release
,
3286 .write
= i915_ipc_status_write
3289 static int i915_ddb_info(struct seq_file
*m
, void *unused
)
3291 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3292 struct drm_device
*dev
= &dev_priv
->drm
;
3293 struct skl_ddb_allocation
*ddb
;
3294 struct skl_ddb_entry
*entry
;
3298 if (INTEL_GEN(dev_priv
) < 9)
3301 drm_modeset_lock_all(dev
);
3303 ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
3305 seq_printf(m
, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3307 for_each_pipe(dev_priv
, pipe
) {
3308 seq_printf(m
, "Pipe %c\n", pipe_name(pipe
));
3310 for_each_universal_plane(dev_priv
, pipe
, plane
) {
3311 entry
= &ddb
->plane
[pipe
][plane
];
3312 seq_printf(m
, " Plane%-8d%8u%8u%8u\n", plane
+ 1,
3313 entry
->start
, entry
->end
,
3314 skl_ddb_entry_size(entry
));
3317 entry
= &ddb
->plane
[pipe
][PLANE_CURSOR
];
3318 seq_printf(m
, " %-13s%8u%8u%8u\n", "Cursor", entry
->start
,
3319 entry
->end
, skl_ddb_entry_size(entry
));
3322 drm_modeset_unlock_all(dev
);
3327 static void drrs_status_per_crtc(struct seq_file
*m
,
3328 struct drm_device
*dev
,
3329 struct intel_crtc
*intel_crtc
)
3331 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3332 struct i915_drrs
*drrs
= &dev_priv
->drrs
;
3334 struct drm_connector
*connector
;
3335 struct drm_connector_list_iter conn_iter
;
3337 drm_connector_list_iter_begin(dev
, &conn_iter
);
3338 drm_for_each_connector_iter(connector
, &conn_iter
) {
3339 if (connector
->state
->crtc
!= &intel_crtc
->base
)
3342 seq_printf(m
, "%s:\n", connector
->name
);
3344 drm_connector_list_iter_end(&conn_iter
);
3346 if (dev_priv
->vbt
.drrs_type
== STATIC_DRRS_SUPPORT
)
3347 seq_puts(m
, "\tVBT: DRRS_type: Static");
3348 else if (dev_priv
->vbt
.drrs_type
== SEAMLESS_DRRS_SUPPORT
)
3349 seq_puts(m
, "\tVBT: DRRS_type: Seamless");
3350 else if (dev_priv
->vbt
.drrs_type
== DRRS_NOT_SUPPORTED
)
3351 seq_puts(m
, "\tVBT: DRRS_type: None");
3353 seq_puts(m
, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3355 seq_puts(m
, "\n\n");
3357 if (to_intel_crtc_state(intel_crtc
->base
.state
)->has_drrs
) {
3358 struct intel_panel
*panel
;
3360 mutex_lock(&drrs
->mutex
);
3361 /* DRRS Supported */
3362 seq_puts(m
, "\tDRRS Supported: Yes\n");
3364 /* disable_drrs() will make drrs->dp NULL */
3366 seq_puts(m
, "Idleness DRRS: Disabled");
3367 mutex_unlock(&drrs
->mutex
);
3371 panel
= &drrs
->dp
->attached_connector
->panel
;
3372 seq_printf(m
, "\t\tBusy_frontbuffer_bits: 0x%X",
3373 drrs
->busy_frontbuffer_bits
);
3375 seq_puts(m
, "\n\t\t");
3376 if (drrs
->refresh_rate_type
== DRRS_HIGH_RR
) {
3377 seq_puts(m
, "DRRS_State: DRRS_HIGH_RR\n");
3378 vrefresh
= panel
->fixed_mode
->vrefresh
;
3379 } else if (drrs
->refresh_rate_type
== DRRS_LOW_RR
) {
3380 seq_puts(m
, "DRRS_State: DRRS_LOW_RR\n");
3381 vrefresh
= panel
->downclock_mode
->vrefresh
;
3383 seq_printf(m
, "DRRS_State: Unknown(%d)\n",
3384 drrs
->refresh_rate_type
);
3385 mutex_unlock(&drrs
->mutex
);
3388 seq_printf(m
, "\t\tVrefresh: %d", vrefresh
);
3390 seq_puts(m
, "\n\t\t");
3391 mutex_unlock(&drrs
->mutex
);
3393 /* DRRS not supported. Print the VBT parameter*/
3394 seq_puts(m
, "\tDRRS Supported : No");
3399 static int i915_drrs_status(struct seq_file
*m
, void *unused
)
3401 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3402 struct drm_device
*dev
= &dev_priv
->drm
;
3403 struct intel_crtc
*intel_crtc
;
3404 int active_crtc_cnt
= 0;
3406 drm_modeset_lock_all(dev
);
3407 for_each_intel_crtc(dev
, intel_crtc
) {
3408 if (intel_crtc
->base
.state
->active
) {
3410 seq_printf(m
, "\nCRTC %d: ", active_crtc_cnt
);
3412 drrs_status_per_crtc(m
, dev
, intel_crtc
);
3415 drm_modeset_unlock_all(dev
);
3417 if (!active_crtc_cnt
)
3418 seq_puts(m
, "No active crtc found\n");
3423 static int i915_dp_mst_info(struct seq_file
*m
, void *unused
)
3425 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3426 struct drm_device
*dev
= &dev_priv
->drm
;
3427 struct intel_encoder
*intel_encoder
;
3428 struct intel_digital_port
*intel_dig_port
;
3429 struct drm_connector
*connector
;
3430 struct drm_connector_list_iter conn_iter
;
3432 drm_connector_list_iter_begin(dev
, &conn_iter
);
3433 drm_for_each_connector_iter(connector
, &conn_iter
) {
3434 if (connector
->connector_type
!= DRM_MODE_CONNECTOR_DisplayPort
)
3437 intel_encoder
= intel_attached_encoder(connector
);
3438 if (!intel_encoder
|| intel_encoder
->type
== INTEL_OUTPUT_DP_MST
)
3441 intel_dig_port
= enc_to_dig_port(&intel_encoder
->base
);
3442 if (!intel_dig_port
->dp
.can_mst
)
3445 seq_printf(m
, "MST Source Port %c\n",
3446 port_name(intel_dig_port
->base
.port
));
3447 drm_dp_mst_dump_topology(m
, &intel_dig_port
->dp
.mst_mgr
);
3449 drm_connector_list_iter_end(&conn_iter
);
3454 static ssize_t
i915_displayport_test_active_write(struct file
*file
,
3455 const char __user
*ubuf
,
3456 size_t len
, loff_t
*offp
)
3460 struct drm_device
*dev
;
3461 struct drm_connector
*connector
;
3462 struct drm_connector_list_iter conn_iter
;
3463 struct intel_dp
*intel_dp
;
3466 dev
= ((struct seq_file
*)file
->private_data
)->private;
3471 input_buffer
= memdup_user_nul(ubuf
, len
);
3472 if (IS_ERR(input_buffer
))
3473 return PTR_ERR(input_buffer
);
3475 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len
);
3477 drm_connector_list_iter_begin(dev
, &conn_iter
);
3478 drm_for_each_connector_iter(connector
, &conn_iter
) {
3479 struct intel_encoder
*encoder
;
3481 if (connector
->connector_type
!=
3482 DRM_MODE_CONNECTOR_DisplayPort
)
3485 encoder
= to_intel_encoder(connector
->encoder
);
3486 if (encoder
&& encoder
->type
== INTEL_OUTPUT_DP_MST
)
3489 if (encoder
&& connector
->status
== connector_status_connected
) {
3490 intel_dp
= enc_to_intel_dp(&encoder
->base
);
3491 status
= kstrtoint(input_buffer
, 10, &val
);
3494 DRM_DEBUG_DRIVER("Got %d for test active\n", val
);
3495 /* To prevent erroneous activation of the compliance
3496 * testing code, only accept an actual value of 1 here
3499 intel_dp
->compliance
.test_active
= 1;
3501 intel_dp
->compliance
.test_active
= 0;
3504 drm_connector_list_iter_end(&conn_iter
);
3505 kfree(input_buffer
);
3513 static int i915_displayport_test_active_show(struct seq_file
*m
, void *data
)
3515 struct drm_device
*dev
= m
->private;
3516 struct drm_connector
*connector
;
3517 struct drm_connector_list_iter conn_iter
;
3518 struct intel_dp
*intel_dp
;
3520 drm_connector_list_iter_begin(dev
, &conn_iter
);
3521 drm_for_each_connector_iter(connector
, &conn_iter
) {
3522 struct intel_encoder
*encoder
;
3524 if (connector
->connector_type
!=
3525 DRM_MODE_CONNECTOR_DisplayPort
)
3528 encoder
= to_intel_encoder(connector
->encoder
);
3529 if (encoder
&& encoder
->type
== INTEL_OUTPUT_DP_MST
)
3532 if (encoder
&& connector
->status
== connector_status_connected
) {
3533 intel_dp
= enc_to_intel_dp(&encoder
->base
);
3534 if (intel_dp
->compliance
.test_active
)
3541 drm_connector_list_iter_end(&conn_iter
);
3546 static int i915_displayport_test_active_open(struct inode
*inode
,
3549 struct drm_i915_private
*dev_priv
= inode
->i_private
;
3551 return single_open(file
, i915_displayport_test_active_show
,
3555 static const struct file_operations i915_displayport_test_active_fops
= {
3556 .owner
= THIS_MODULE
,
3557 .open
= i915_displayport_test_active_open
,
3559 .llseek
= seq_lseek
,
3560 .release
= single_release
,
3561 .write
= i915_displayport_test_active_write
3564 static int i915_displayport_test_data_show(struct seq_file
*m
, void *data
)
3566 struct drm_device
*dev
= m
->private;
3567 struct drm_connector
*connector
;
3568 struct drm_connector_list_iter conn_iter
;
3569 struct intel_dp
*intel_dp
;
3571 drm_connector_list_iter_begin(dev
, &conn_iter
);
3572 drm_for_each_connector_iter(connector
, &conn_iter
) {
3573 struct intel_encoder
*encoder
;
3575 if (connector
->connector_type
!=
3576 DRM_MODE_CONNECTOR_DisplayPort
)
3579 encoder
= to_intel_encoder(connector
->encoder
);
3580 if (encoder
&& encoder
->type
== INTEL_OUTPUT_DP_MST
)
3583 if (encoder
&& connector
->status
== connector_status_connected
) {
3584 intel_dp
= enc_to_intel_dp(&encoder
->base
);
3585 if (intel_dp
->compliance
.test_type
==
3586 DP_TEST_LINK_EDID_READ
)
3587 seq_printf(m
, "%lx",
3588 intel_dp
->compliance
.test_data
.edid
);
3589 else if (intel_dp
->compliance
.test_type
==
3590 DP_TEST_LINK_VIDEO_PATTERN
) {
3591 seq_printf(m
, "hdisplay: %d\n",
3592 intel_dp
->compliance
.test_data
.hdisplay
);
3593 seq_printf(m
, "vdisplay: %d\n",
3594 intel_dp
->compliance
.test_data
.vdisplay
);
3595 seq_printf(m
, "bpc: %u\n",
3596 intel_dp
->compliance
.test_data
.bpc
);
3601 drm_connector_list_iter_end(&conn_iter
);
3605 static int i915_displayport_test_data_open(struct inode
*inode
,
3608 struct drm_i915_private
*dev_priv
= inode
->i_private
;
3610 return single_open(file
, i915_displayport_test_data_show
,
3614 static const struct file_operations i915_displayport_test_data_fops
= {
3615 .owner
= THIS_MODULE
,
3616 .open
= i915_displayport_test_data_open
,
3618 .llseek
= seq_lseek
,
3619 .release
= single_release
3622 static int i915_displayport_test_type_show(struct seq_file
*m
, void *data
)
3624 struct drm_device
*dev
= m
->private;
3625 struct drm_connector
*connector
;
3626 struct drm_connector_list_iter conn_iter
;
3627 struct intel_dp
*intel_dp
;
3629 drm_connector_list_iter_begin(dev
, &conn_iter
);
3630 drm_for_each_connector_iter(connector
, &conn_iter
) {
3631 struct intel_encoder
*encoder
;
3633 if (connector
->connector_type
!=
3634 DRM_MODE_CONNECTOR_DisplayPort
)
3637 encoder
= to_intel_encoder(connector
->encoder
);
3638 if (encoder
&& encoder
->type
== INTEL_OUTPUT_DP_MST
)
3641 if (encoder
&& connector
->status
== connector_status_connected
) {
3642 intel_dp
= enc_to_intel_dp(&encoder
->base
);
3643 seq_printf(m
, "%02lx", intel_dp
->compliance
.test_type
);
3647 drm_connector_list_iter_end(&conn_iter
);
3652 static int i915_displayport_test_type_open(struct inode
*inode
,
3655 struct drm_i915_private
*dev_priv
= inode
->i_private
;
3657 return single_open(file
, i915_displayport_test_type_show
,
3661 static const struct file_operations i915_displayport_test_type_fops
= {
3662 .owner
= THIS_MODULE
,
3663 .open
= i915_displayport_test_type_open
,
3665 .llseek
= seq_lseek
,
3666 .release
= single_release
3669 static void wm_latency_show(struct seq_file
*m
, const uint16_t wm
[8])
3671 struct drm_i915_private
*dev_priv
= m
->private;
3672 struct drm_device
*dev
= &dev_priv
->drm
;
3676 if (IS_CHERRYVIEW(dev_priv
))
3678 else if (IS_VALLEYVIEW(dev_priv
))
3680 else if (IS_G4X(dev_priv
))
3683 num_levels
= ilk_wm_max_level(dev_priv
) + 1;
3685 drm_modeset_lock_all(dev
);
3687 for (level
= 0; level
< num_levels
; level
++) {
3688 unsigned int latency
= wm
[level
];
3691 * - WM1+ latency values in 0.5us units
3692 * - latencies are in us on gen9/vlv/chv
3694 if (INTEL_GEN(dev_priv
) >= 9 ||
3695 IS_VALLEYVIEW(dev_priv
) ||
3696 IS_CHERRYVIEW(dev_priv
) ||
3702 seq_printf(m
, "WM%d %u (%u.%u usec)\n",
3703 level
, wm
[level
], latency
/ 10, latency
% 10);
3706 drm_modeset_unlock_all(dev
);
3709 static int pri_wm_latency_show(struct seq_file
*m
, void *data
)
3711 struct drm_i915_private
*dev_priv
= m
->private;
3712 const uint16_t *latencies
;
3714 if (INTEL_GEN(dev_priv
) >= 9)
3715 latencies
= dev_priv
->wm
.skl_latency
;
3717 latencies
= dev_priv
->wm
.pri_latency
;
3719 wm_latency_show(m
, latencies
);
3724 static int spr_wm_latency_show(struct seq_file
*m
, void *data
)
3726 struct drm_i915_private
*dev_priv
= m
->private;
3727 const uint16_t *latencies
;
3729 if (INTEL_GEN(dev_priv
) >= 9)
3730 latencies
= dev_priv
->wm
.skl_latency
;
3732 latencies
= dev_priv
->wm
.spr_latency
;
3734 wm_latency_show(m
, latencies
);
3739 static int cur_wm_latency_show(struct seq_file
*m
, void *data
)
3741 struct drm_i915_private
*dev_priv
= m
->private;
3742 const uint16_t *latencies
;
3744 if (INTEL_GEN(dev_priv
) >= 9)
3745 latencies
= dev_priv
->wm
.skl_latency
;
3747 latencies
= dev_priv
->wm
.cur_latency
;
3749 wm_latency_show(m
, latencies
);
3754 static int pri_wm_latency_open(struct inode
*inode
, struct file
*file
)
3756 struct drm_i915_private
*dev_priv
= inode
->i_private
;
3758 if (INTEL_GEN(dev_priv
) < 5 && !IS_G4X(dev_priv
))
3761 return single_open(file
, pri_wm_latency_show
, dev_priv
);
3764 static int spr_wm_latency_open(struct inode
*inode
, struct file
*file
)
3766 struct drm_i915_private
*dev_priv
= inode
->i_private
;
3768 if (HAS_GMCH_DISPLAY(dev_priv
))
3771 return single_open(file
, spr_wm_latency_show
, dev_priv
);
3774 static int cur_wm_latency_open(struct inode
*inode
, struct file
*file
)
3776 struct drm_i915_private
*dev_priv
= inode
->i_private
;
3778 if (HAS_GMCH_DISPLAY(dev_priv
))
3781 return single_open(file
, cur_wm_latency_show
, dev_priv
);
3784 static ssize_t
wm_latency_write(struct file
*file
, const char __user
*ubuf
,
3785 size_t len
, loff_t
*offp
, uint16_t wm
[8])
3787 struct seq_file
*m
= file
->private_data
;
3788 struct drm_i915_private
*dev_priv
= m
->private;
3789 struct drm_device
*dev
= &dev_priv
->drm
;
3790 uint16_t new[8] = { 0 };
3796 if (IS_CHERRYVIEW(dev_priv
))
3798 else if (IS_VALLEYVIEW(dev_priv
))
3800 else if (IS_G4X(dev_priv
))
3803 num_levels
= ilk_wm_max_level(dev_priv
) + 1;
3805 if (len
>= sizeof(tmp
))
3808 if (copy_from_user(tmp
, ubuf
, len
))
3813 ret
= sscanf(tmp
, "%hu %hu %hu %hu %hu %hu %hu %hu",
3814 &new[0], &new[1], &new[2], &new[3],
3815 &new[4], &new[5], &new[6], &new[7]);
3816 if (ret
!= num_levels
)
3819 drm_modeset_lock_all(dev
);
3821 for (level
= 0; level
< num_levels
; level
++)
3822 wm
[level
] = new[level
];
3824 drm_modeset_unlock_all(dev
);
3830 static ssize_t
pri_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
3831 size_t len
, loff_t
*offp
)
3833 struct seq_file
*m
= file
->private_data
;
3834 struct drm_i915_private
*dev_priv
= m
->private;
3835 uint16_t *latencies
;
3837 if (INTEL_GEN(dev_priv
) >= 9)
3838 latencies
= dev_priv
->wm
.skl_latency
;
3840 latencies
= dev_priv
->wm
.pri_latency
;
3842 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
3845 static ssize_t
spr_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
3846 size_t len
, loff_t
*offp
)
3848 struct seq_file
*m
= file
->private_data
;
3849 struct drm_i915_private
*dev_priv
= m
->private;
3850 uint16_t *latencies
;
3852 if (INTEL_GEN(dev_priv
) >= 9)
3853 latencies
= dev_priv
->wm
.skl_latency
;
3855 latencies
= dev_priv
->wm
.spr_latency
;
3857 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
3860 static ssize_t
cur_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
3861 size_t len
, loff_t
*offp
)
3863 struct seq_file
*m
= file
->private_data
;
3864 struct drm_i915_private
*dev_priv
= m
->private;
3865 uint16_t *latencies
;
3867 if (INTEL_GEN(dev_priv
) >= 9)
3868 latencies
= dev_priv
->wm
.skl_latency
;
3870 latencies
= dev_priv
->wm
.cur_latency
;
3872 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
3875 static const struct file_operations i915_pri_wm_latency_fops
= {
3876 .owner
= THIS_MODULE
,
3877 .open
= pri_wm_latency_open
,
3879 .llseek
= seq_lseek
,
3880 .release
= single_release
,
3881 .write
= pri_wm_latency_write
3884 static const struct file_operations i915_spr_wm_latency_fops
= {
3885 .owner
= THIS_MODULE
,
3886 .open
= spr_wm_latency_open
,
3888 .llseek
= seq_lseek
,
3889 .release
= single_release
,
3890 .write
= spr_wm_latency_write
3893 static const struct file_operations i915_cur_wm_latency_fops
= {
3894 .owner
= THIS_MODULE
,
3895 .open
= cur_wm_latency_open
,
3897 .llseek
= seq_lseek
,
3898 .release
= single_release
,
3899 .write
= cur_wm_latency_write
3903 i915_wedged_get(void *data
, u64
*val
)
3905 struct drm_i915_private
*dev_priv
= data
;
3907 *val
= i915_terminally_wedged(&dev_priv
->gpu_error
);
3913 i915_wedged_set(void *data
, u64 val
)
3915 struct drm_i915_private
*i915
= data
;
3916 struct intel_engine_cs
*engine
;
3920 * There is no safeguard against this debugfs entry colliding
3921 * with the hangcheck calling same i915_handle_error() in
3922 * parallel, causing an explosion. For now we assume that the
3923 * test harness is responsible enough not to inject gpu hangs
3924 * while it is writing to 'i915_wedged'
3927 if (i915_reset_backoff(&i915
->gpu_error
))
3930 for_each_engine_masked(engine
, i915
, val
, tmp
) {
3931 engine
->hangcheck
.seqno
= intel_engine_get_seqno(engine
);
3932 engine
->hangcheck
.stalled
= true;
3935 i915_handle_error(i915
, val
, "Manually setting wedged to %llu", val
);
3937 wait_on_bit(&i915
->gpu_error
.flags
,
3939 TASK_UNINTERRUPTIBLE
);
3944 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops
,
3945 i915_wedged_get
, i915_wedged_set
,
3949 fault_irq_set(struct drm_i915_private
*i915
,
3955 err
= mutex_lock_interruptible(&i915
->drm
.struct_mutex
);
3959 err
= i915_gem_wait_for_idle(i915
,
3961 I915_WAIT_INTERRUPTIBLE
);
3966 mutex_unlock(&i915
->drm
.struct_mutex
);
3968 /* Flush idle worker to disarm irq */
3969 drain_delayed_work(&i915
->gt
.idle_work
);
3974 mutex_unlock(&i915
->drm
.struct_mutex
);
3979 i915_ring_missed_irq_get(void *data
, u64
*val
)
3981 struct drm_i915_private
*dev_priv
= data
;
3983 *val
= dev_priv
->gpu_error
.missed_irq_rings
;
3988 i915_ring_missed_irq_set(void *data
, u64 val
)
3990 struct drm_i915_private
*i915
= data
;
3992 return fault_irq_set(i915
, &i915
->gpu_error
.missed_irq_rings
, val
);
3995 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops
,
3996 i915_ring_missed_irq_get
, i915_ring_missed_irq_set
,
4000 i915_ring_test_irq_get(void *data
, u64
*val
)
4002 struct drm_i915_private
*dev_priv
= data
;
4004 *val
= dev_priv
->gpu_error
.test_irq_rings
;
4010 i915_ring_test_irq_set(void *data
, u64 val
)
4012 struct drm_i915_private
*i915
= data
;
4014 val
&= INTEL_INFO(i915
)->ring_mask
;
4015 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val
);
4017 return fault_irq_set(i915
, &i915
->gpu_error
.test_irq_rings
, val
);
4020 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops
,
4021 i915_ring_test_irq_get
, i915_ring_test_irq_set
,
4024 #define DROP_UNBOUND BIT(0)
4025 #define DROP_BOUND BIT(1)
4026 #define DROP_RETIRE BIT(2)
4027 #define DROP_ACTIVE BIT(3)
4028 #define DROP_FREED BIT(4)
4029 #define DROP_SHRINK_ALL BIT(5)
4030 #define DROP_IDLE BIT(6)
4031 #define DROP_ALL (DROP_UNBOUND | \
4039 i915_drop_caches_get(void *data
, u64
*val
)
4047 i915_drop_caches_set(void *data
, u64 val
)
4049 struct drm_i915_private
*dev_priv
= data
;
4050 struct drm_device
*dev
= &dev_priv
->drm
;
4053 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4054 val
, val
& DROP_ALL
);
4056 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4057 * on ioctls on -EAGAIN. */
4058 if (val
& (DROP_ACTIVE
| DROP_RETIRE
)) {
4059 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4063 if (val
& DROP_ACTIVE
)
4064 ret
= i915_gem_wait_for_idle(dev_priv
,
4065 I915_WAIT_INTERRUPTIBLE
|
4068 if (val
& DROP_RETIRE
)
4069 i915_gem_retire_requests(dev_priv
);
4071 mutex_unlock(&dev
->struct_mutex
);
4074 fs_reclaim_acquire(GFP_KERNEL
);
4075 if (val
& DROP_BOUND
)
4076 i915_gem_shrink(dev_priv
, LONG_MAX
, NULL
, I915_SHRINK_BOUND
);
4078 if (val
& DROP_UNBOUND
)
4079 i915_gem_shrink(dev_priv
, LONG_MAX
, NULL
, I915_SHRINK_UNBOUND
);
4081 if (val
& DROP_SHRINK_ALL
)
4082 i915_gem_shrink_all(dev_priv
);
4083 fs_reclaim_release(GFP_KERNEL
);
4085 if (val
& DROP_IDLE
)
4086 drain_delayed_work(&dev_priv
->gt
.idle_work
);
4088 if (val
& DROP_FREED
) {
4090 i915_gem_drain_freed_objects(dev_priv
);
4096 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops
,
4097 i915_drop_caches_get
, i915_drop_caches_set
,
4101 i915_max_freq_get(void *data
, u64
*val
)
4103 struct drm_i915_private
*dev_priv
= data
;
4105 if (INTEL_GEN(dev_priv
) < 6)
4108 *val
= intel_gpu_freq(dev_priv
, dev_priv
->gt_pm
.rps
.max_freq_softlimit
);
4113 i915_max_freq_set(void *data
, u64 val
)
4115 struct drm_i915_private
*dev_priv
= data
;
4116 struct intel_rps
*rps
= &dev_priv
->gt_pm
.rps
;
4120 if (INTEL_GEN(dev_priv
) < 6)
4123 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val
);
4125 ret
= mutex_lock_interruptible(&dev_priv
->pcu_lock
);
4130 * Turbo will still be enabled, but won't go above the set value.
4132 val
= intel_freq_opcode(dev_priv
, val
);
4134 hw_max
= rps
->max_freq
;
4135 hw_min
= rps
->min_freq
;
4137 if (val
< hw_min
|| val
> hw_max
|| val
< rps
->min_freq_softlimit
) {
4138 mutex_unlock(&dev_priv
->pcu_lock
);
4142 rps
->max_freq_softlimit
= val
;
4144 if (intel_set_rps(dev_priv
, val
))
4145 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
4147 mutex_unlock(&dev_priv
->pcu_lock
);
4152 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops
,
4153 i915_max_freq_get
, i915_max_freq_set
,
4157 i915_min_freq_get(void *data
, u64
*val
)
4159 struct drm_i915_private
*dev_priv
= data
;
4161 if (INTEL_GEN(dev_priv
) < 6)
4164 *val
= intel_gpu_freq(dev_priv
, dev_priv
->gt_pm
.rps
.min_freq_softlimit
);
4169 i915_min_freq_set(void *data
, u64 val
)
4171 struct drm_i915_private
*dev_priv
= data
;
4172 struct intel_rps
*rps
= &dev_priv
->gt_pm
.rps
;
4176 if (INTEL_GEN(dev_priv
) < 6)
4179 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val
);
4181 ret
= mutex_lock_interruptible(&dev_priv
->pcu_lock
);
4186 * Turbo will still be enabled, but won't go below the set value.
4188 val
= intel_freq_opcode(dev_priv
, val
);
4190 hw_max
= rps
->max_freq
;
4191 hw_min
= rps
->min_freq
;
4194 val
> hw_max
|| val
> rps
->max_freq_softlimit
) {
4195 mutex_unlock(&dev_priv
->pcu_lock
);
4199 rps
->min_freq_softlimit
= val
;
4201 if (intel_set_rps(dev_priv
, val
))
4202 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
4204 mutex_unlock(&dev_priv
->pcu_lock
);
4209 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops
,
4210 i915_min_freq_get
, i915_min_freq_set
,
4214 i915_cache_sharing_get(void *data
, u64
*val
)
4216 struct drm_i915_private
*dev_priv
= data
;
4219 if (!(IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
)))
4222 intel_runtime_pm_get(dev_priv
);
4224 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
4226 intel_runtime_pm_put(dev_priv
);
4228 *val
= (snpcr
& GEN6_MBC_SNPCR_MASK
) >> GEN6_MBC_SNPCR_SHIFT
;
4234 i915_cache_sharing_set(void *data
, u64 val
)
4236 struct drm_i915_private
*dev_priv
= data
;
4239 if (!(IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
)))
4245 intel_runtime_pm_get(dev_priv
);
4246 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val
);
4248 /* Update the cache sharing policy here as well */
4249 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
4250 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
4251 snpcr
|= (val
<< GEN6_MBC_SNPCR_SHIFT
);
4252 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
4254 intel_runtime_pm_put(dev_priv
);
4258 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops
,
4259 i915_cache_sharing_get
, i915_cache_sharing_set
,
4262 static void cherryview_sseu_device_status(struct drm_i915_private
*dev_priv
,
4263 struct sseu_dev_info
*sseu
)
4267 u32 sig1
[ss_max
], sig2
[ss_max
];
4269 sig1
[0] = I915_READ(CHV_POWER_SS0_SIG1
);
4270 sig1
[1] = I915_READ(CHV_POWER_SS1_SIG1
);
4271 sig2
[0] = I915_READ(CHV_POWER_SS0_SIG2
);
4272 sig2
[1] = I915_READ(CHV_POWER_SS1_SIG2
);
4274 for (ss
= 0; ss
< ss_max
; ss
++) {
4275 unsigned int eu_cnt
;
4277 if (sig1
[ss
] & CHV_SS_PG_ENABLE
)
4278 /* skip disabled subslice */
4281 sseu
->slice_mask
= BIT(0);
4282 sseu
->subslice_mask
|= BIT(ss
);
4283 eu_cnt
= ((sig1
[ss
] & CHV_EU08_PG_ENABLE
) ? 0 : 2) +
4284 ((sig1
[ss
] & CHV_EU19_PG_ENABLE
) ? 0 : 2) +
4285 ((sig1
[ss
] & CHV_EU210_PG_ENABLE
) ? 0 : 2) +
4286 ((sig2
[ss
] & CHV_EU311_PG_ENABLE
) ? 0 : 2);
4287 sseu
->eu_total
+= eu_cnt
;
4288 sseu
->eu_per_subslice
= max_t(unsigned int,
4289 sseu
->eu_per_subslice
, eu_cnt
);
4293 static void gen10_sseu_device_status(struct drm_i915_private
*dev_priv
,
4294 struct sseu_dev_info
*sseu
)
4296 const struct intel_device_info
*info
= INTEL_INFO(dev_priv
);
4297 int s_max
= 6, ss_max
= 4;
4299 u32 s_reg
[s_max
], eu_reg
[2 * s_max
], eu_mask
[2];
4301 for (s
= 0; s
< s_max
; s
++) {
4303 * FIXME: Valid SS Mask respects the spec and read
4304 * only valid bits for those registers, excluding reserverd
4305 * although this seems wrong because it would leave many
4306 * subslices without ACK.
4308 s_reg
[s
] = I915_READ(GEN10_SLICE_PGCTL_ACK(s
)) &
4309 GEN10_PGCTL_VALID_SS_MASK(s
);
4310 eu_reg
[2 * s
] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s
));
4311 eu_reg
[2 * s
+ 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s
));
4314 eu_mask
[0] = GEN9_PGCTL_SSA_EU08_ACK
|
4315 GEN9_PGCTL_SSA_EU19_ACK
|
4316 GEN9_PGCTL_SSA_EU210_ACK
|
4317 GEN9_PGCTL_SSA_EU311_ACK
;
4318 eu_mask
[1] = GEN9_PGCTL_SSB_EU08_ACK
|
4319 GEN9_PGCTL_SSB_EU19_ACK
|
4320 GEN9_PGCTL_SSB_EU210_ACK
|
4321 GEN9_PGCTL_SSB_EU311_ACK
;
4323 for (s
= 0; s
< s_max
; s
++) {
4324 if ((s_reg
[s
] & GEN9_PGCTL_SLICE_ACK
) == 0)
4325 /* skip disabled slice */
4328 sseu
->slice_mask
|= BIT(s
);
4329 sseu
->subslice_mask
= info
->sseu
.subslice_mask
;
4331 for (ss
= 0; ss
< ss_max
; ss
++) {
4332 unsigned int eu_cnt
;
4334 if (!(s_reg
[s
] & (GEN9_PGCTL_SS_ACK(ss
))))
4335 /* skip disabled subslice */
4338 eu_cnt
= 2 * hweight32(eu_reg
[2 * s
+ ss
/ 2] &
4340 sseu
->eu_total
+= eu_cnt
;
4341 sseu
->eu_per_subslice
= max_t(unsigned int,
4342 sseu
->eu_per_subslice
,
4348 static void gen9_sseu_device_status(struct drm_i915_private
*dev_priv
,
4349 struct sseu_dev_info
*sseu
)
4351 int s_max
= 3, ss_max
= 4;
4353 u32 s_reg
[s_max
], eu_reg
[2*s_max
], eu_mask
[2];
4355 /* BXT has a single slice and at most 3 subslices. */
4356 if (IS_GEN9_LP(dev_priv
)) {
4361 for (s
= 0; s
< s_max
; s
++) {
4362 s_reg
[s
] = I915_READ(GEN9_SLICE_PGCTL_ACK(s
));
4363 eu_reg
[2*s
] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s
));
4364 eu_reg
[2*s
+ 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s
));
4367 eu_mask
[0] = GEN9_PGCTL_SSA_EU08_ACK
|
4368 GEN9_PGCTL_SSA_EU19_ACK
|
4369 GEN9_PGCTL_SSA_EU210_ACK
|
4370 GEN9_PGCTL_SSA_EU311_ACK
;
4371 eu_mask
[1] = GEN9_PGCTL_SSB_EU08_ACK
|
4372 GEN9_PGCTL_SSB_EU19_ACK
|
4373 GEN9_PGCTL_SSB_EU210_ACK
|
4374 GEN9_PGCTL_SSB_EU311_ACK
;
4376 for (s
= 0; s
< s_max
; s
++) {
4377 if ((s_reg
[s
] & GEN9_PGCTL_SLICE_ACK
) == 0)
4378 /* skip disabled slice */
4381 sseu
->slice_mask
|= BIT(s
);
4383 if (IS_GEN9_BC(dev_priv
))
4384 sseu
->subslice_mask
=
4385 INTEL_INFO(dev_priv
)->sseu
.subslice_mask
;
4387 for (ss
= 0; ss
< ss_max
; ss
++) {
4388 unsigned int eu_cnt
;
4390 if (IS_GEN9_LP(dev_priv
)) {
4391 if (!(s_reg
[s
] & (GEN9_PGCTL_SS_ACK(ss
))))
4392 /* skip disabled subslice */
4395 sseu
->subslice_mask
|= BIT(ss
);
4398 eu_cnt
= 2 * hweight32(eu_reg
[2*s
+ ss
/2] &
4400 sseu
->eu_total
+= eu_cnt
;
4401 sseu
->eu_per_subslice
= max_t(unsigned int,
4402 sseu
->eu_per_subslice
,
4408 static void broadwell_sseu_device_status(struct drm_i915_private
*dev_priv
,
4409 struct sseu_dev_info
*sseu
)
4411 u32 slice_info
= I915_READ(GEN8_GT_SLICE_INFO
);
4414 sseu
->slice_mask
= slice_info
& GEN8_LSLICESTAT_MASK
;
4416 if (sseu
->slice_mask
) {
4417 sseu
->subslice_mask
= INTEL_INFO(dev_priv
)->sseu
.subslice_mask
;
4418 sseu
->eu_per_subslice
=
4419 INTEL_INFO(dev_priv
)->sseu
.eu_per_subslice
;
4420 sseu
->eu_total
= sseu
->eu_per_subslice
*
4421 sseu_subslice_total(sseu
);
4423 /* subtract fused off EU(s) from enabled slice(s) */
4424 for (s
= 0; s
< fls(sseu
->slice_mask
); s
++) {
4426 INTEL_INFO(dev_priv
)->sseu
.subslice_7eu
[s
];
4428 sseu
->eu_total
-= hweight8(subslice_7eu
);
4433 static void i915_print_sseu_info(struct seq_file
*m
, bool is_available_info
,
4434 const struct sseu_dev_info
*sseu
)
4436 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
4437 const char *type
= is_available_info
? "Available" : "Enabled";
4439 seq_printf(m
, " %s Slice Mask: %04x\n", type
,
4441 seq_printf(m
, " %s Slice Total: %u\n", type
,
4442 hweight8(sseu
->slice_mask
));
4443 seq_printf(m
, " %s Subslice Total: %u\n", type
,
4444 sseu_subslice_total(sseu
));
4445 seq_printf(m
, " %s Subslice Mask: %04x\n", type
,
4446 sseu
->subslice_mask
);
4447 seq_printf(m
, " %s Subslice Per Slice: %u\n", type
,
4448 hweight8(sseu
->subslice_mask
));
4449 seq_printf(m
, " %s EU Total: %u\n", type
,
4451 seq_printf(m
, " %s EU Per Subslice: %u\n", type
,
4452 sseu
->eu_per_subslice
);
4454 if (!is_available_info
)
4457 seq_printf(m
, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv
)));
4458 if (HAS_POOLED_EU(dev_priv
))
4459 seq_printf(m
, " Min EU in pool: %u\n", sseu
->min_eu_in_pool
);
4461 seq_printf(m
, " Has Slice Power Gating: %s\n",
4462 yesno(sseu
->has_slice_pg
));
4463 seq_printf(m
, " Has Subslice Power Gating: %s\n",
4464 yesno(sseu
->has_subslice_pg
));
4465 seq_printf(m
, " Has EU Power Gating: %s\n",
4466 yesno(sseu
->has_eu_pg
));
4469 static int i915_sseu_status(struct seq_file
*m
, void *unused
)
4471 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
4472 struct sseu_dev_info sseu
;
4474 if (INTEL_GEN(dev_priv
) < 8)
4477 seq_puts(m
, "SSEU Device Info\n");
4478 i915_print_sseu_info(m
, true, &INTEL_INFO(dev_priv
)->sseu
);
4480 seq_puts(m
, "SSEU Device Status\n");
4481 memset(&sseu
, 0, sizeof(sseu
));
4483 intel_runtime_pm_get(dev_priv
);
4485 if (IS_CHERRYVIEW(dev_priv
)) {
4486 cherryview_sseu_device_status(dev_priv
, &sseu
);
4487 } else if (IS_BROADWELL(dev_priv
)) {
4488 broadwell_sseu_device_status(dev_priv
, &sseu
);
4489 } else if (IS_GEN9(dev_priv
)) {
4490 gen9_sseu_device_status(dev_priv
, &sseu
);
4491 } else if (INTEL_GEN(dev_priv
) >= 10) {
4492 gen10_sseu_device_status(dev_priv
, &sseu
);
4495 intel_runtime_pm_put(dev_priv
);
4497 i915_print_sseu_info(m
, false, &sseu
);
4502 static int i915_forcewake_open(struct inode
*inode
, struct file
*file
)
4504 struct drm_i915_private
*i915
= inode
->i_private
;
4506 if (INTEL_GEN(i915
) < 6)
4509 intel_runtime_pm_get(i915
);
4510 intel_uncore_forcewake_user_get(i915
);
4515 static int i915_forcewake_release(struct inode
*inode
, struct file
*file
)
4517 struct drm_i915_private
*i915
= inode
->i_private
;
4519 if (INTEL_GEN(i915
) < 6)
4522 intel_uncore_forcewake_user_put(i915
);
4523 intel_runtime_pm_put(i915
);
4528 static const struct file_operations i915_forcewake_fops
= {
4529 .owner
= THIS_MODULE
,
4530 .open
= i915_forcewake_open
,
4531 .release
= i915_forcewake_release
,
4534 static int i915_hpd_storm_ctl_show(struct seq_file
*m
, void *data
)
4536 struct drm_i915_private
*dev_priv
= m
->private;
4537 struct i915_hotplug
*hotplug
= &dev_priv
->hotplug
;
4539 seq_printf(m
, "Threshold: %d\n", hotplug
->hpd_storm_threshold
);
4540 seq_printf(m
, "Detected: %s\n",
4541 yesno(delayed_work_pending(&hotplug
->reenable_work
)));
4546 static ssize_t
i915_hpd_storm_ctl_write(struct file
*file
,
4547 const char __user
*ubuf
, size_t len
,
4550 struct seq_file
*m
= file
->private_data
;
4551 struct drm_i915_private
*dev_priv
= m
->private;
4552 struct i915_hotplug
*hotplug
= &dev_priv
->hotplug
;
4553 unsigned int new_threshold
;
4558 if (len
>= sizeof(tmp
))
4561 if (copy_from_user(tmp
, ubuf
, len
))
4566 /* Strip newline, if any */
4567 newline
= strchr(tmp
, '\n');
4571 if (strcmp(tmp
, "reset") == 0)
4572 new_threshold
= HPD_STORM_DEFAULT_THRESHOLD
;
4573 else if (kstrtouint(tmp
, 10, &new_threshold
) != 0)
4576 if (new_threshold
> 0)
4577 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4580 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4582 spin_lock_irq(&dev_priv
->irq_lock
);
4583 hotplug
->hpd_storm_threshold
= new_threshold
;
4584 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4586 hotplug
->stats
[i
].count
= 0;
4587 spin_unlock_irq(&dev_priv
->irq_lock
);
4589 /* Re-enable hpd immediately if we were in an irq storm */
4590 flush_delayed_work(&dev_priv
->hotplug
.reenable_work
);
4595 static int i915_hpd_storm_ctl_open(struct inode
*inode
, struct file
*file
)
4597 return single_open(file
, i915_hpd_storm_ctl_show
, inode
->i_private
);
4600 static const struct file_operations i915_hpd_storm_ctl_fops
= {
4601 .owner
= THIS_MODULE
,
4602 .open
= i915_hpd_storm_ctl_open
,
4604 .llseek
= seq_lseek
,
4605 .release
= single_release
,
4606 .write
= i915_hpd_storm_ctl_write
4609 static const struct drm_info_list i915_debugfs_list
[] = {
4610 {"i915_capabilities", i915_capabilities
, 0},
4611 {"i915_gem_objects", i915_gem_object_info
, 0},
4612 {"i915_gem_gtt", i915_gem_gtt_info
, 0},
4613 {"i915_gem_stolen", i915_gem_stolen_list_info
},
4614 {"i915_gem_fence_regs", i915_gem_fence_regs_info
, 0},
4615 {"i915_gem_interrupt", i915_interrupt_info
, 0},
4616 {"i915_gem_batch_pool", i915_gem_batch_pool_info
, 0},
4617 {"i915_guc_info", i915_guc_info
, 0},
4618 {"i915_guc_load_status", i915_guc_load_status_info
, 0},
4619 {"i915_guc_log_dump", i915_guc_log_dump
, 0},
4620 {"i915_guc_load_err_log_dump", i915_guc_log_dump
, 0, (void *)1},
4621 {"i915_guc_stage_pool", i915_guc_stage_pool
, 0},
4622 {"i915_huc_load_status", i915_huc_load_status_info
, 0},
4623 {"i915_frequency_info", i915_frequency_info
, 0},
4624 {"i915_hangcheck_info", i915_hangcheck_info
, 0},
4625 {"i915_reset_info", i915_reset_info
, 0},
4626 {"i915_drpc_info", i915_drpc_info
, 0},
4627 {"i915_emon_status", i915_emon_status
, 0},
4628 {"i915_ring_freq_table", i915_ring_freq_table
, 0},
4629 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking
, 0},
4630 {"i915_fbc_status", i915_fbc_status
, 0},
4631 {"i915_ips_status", i915_ips_status
, 0},
4632 {"i915_sr_status", i915_sr_status
, 0},
4633 {"i915_opregion", i915_opregion
, 0},
4634 {"i915_vbt", i915_vbt
, 0},
4635 {"i915_gem_framebuffer", i915_gem_framebuffer_info
, 0},
4636 {"i915_context_status", i915_context_status
, 0},
4637 {"i915_forcewake_domains", i915_forcewake_domains
, 0},
4638 {"i915_swizzle_info", i915_swizzle_info
, 0},
4639 {"i915_ppgtt_info", i915_ppgtt_info
, 0},
4640 {"i915_llc", i915_llc
, 0},
4641 {"i915_edp_psr_status", i915_edp_psr_status
, 0},
4642 {"i915_sink_crc_eDP1", i915_sink_crc
, 0},
4643 {"i915_energy_uJ", i915_energy_uJ
, 0},
4644 {"i915_runtime_pm_status", i915_runtime_pm_status
, 0},
4645 {"i915_power_domain_info", i915_power_domain_info
, 0},
4646 {"i915_dmc_info", i915_dmc_info
, 0},
4647 {"i915_display_info", i915_display_info
, 0},
4648 {"i915_engine_info", i915_engine_info
, 0},
4649 {"i915_shrinker_info", i915_shrinker_info
, 0},
4650 {"i915_shared_dplls_info", i915_shared_dplls_info
, 0},
4651 {"i915_dp_mst_info", i915_dp_mst_info
, 0},
4652 {"i915_wa_registers", i915_wa_registers
, 0},
4653 {"i915_ddb_info", i915_ddb_info
, 0},
4654 {"i915_sseu_status", i915_sseu_status
, 0},
4655 {"i915_drrs_status", i915_drrs_status
, 0},
4656 {"i915_rps_boost_info", i915_rps_boost_info
, 0},
4658 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4660 static const struct i915_debugfs_files
{
4662 const struct file_operations
*fops
;
4663 } i915_debugfs_files
[] = {
4664 {"i915_wedged", &i915_wedged_fops
},
4665 {"i915_max_freq", &i915_max_freq_fops
},
4666 {"i915_min_freq", &i915_min_freq_fops
},
4667 {"i915_cache_sharing", &i915_cache_sharing_fops
},
4668 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops
},
4669 {"i915_ring_test_irq", &i915_ring_test_irq_fops
},
4670 {"i915_gem_drop_caches", &i915_drop_caches_fops
},
4671 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4672 {"i915_error_state", &i915_error_state_fops
},
4673 {"i915_gpu_info", &i915_gpu_info_fops
},
4675 {"i915_next_seqno", &i915_next_seqno_fops
},
4676 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops
},
4677 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops
},
4678 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops
},
4679 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops
},
4680 {"i915_fbc_false_color", &i915_fbc_false_color_fops
},
4681 {"i915_dp_test_data", &i915_displayport_test_data_fops
},
4682 {"i915_dp_test_type", &i915_displayport_test_type_fops
},
4683 {"i915_dp_test_active", &i915_displayport_test_active_fops
},
4684 {"i915_guc_log_control", &i915_guc_log_control_fops
},
4685 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops
},
4686 {"i915_ipc_status", &i915_ipc_status_fops
}
4689 int i915_debugfs_register(struct drm_i915_private
*dev_priv
)
4691 struct drm_minor
*minor
= dev_priv
->drm
.primary
;
4695 ent
= debugfs_create_file("i915_forcewake_user", S_IRUSR
,
4696 minor
->debugfs_root
, to_i915(minor
->dev
),
4697 &i915_forcewake_fops
);
4701 ret
= intel_pipe_crc_create(minor
);
4705 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
4706 ent
= debugfs_create_file(i915_debugfs_files
[i
].name
,
4708 minor
->debugfs_root
,
4709 to_i915(minor
->dev
),
4710 i915_debugfs_files
[i
].fops
);
4715 return drm_debugfs_create_files(i915_debugfs_list
,
4716 I915_DEBUGFS_ENTRIES
,
4717 minor
->debugfs_root
, minor
);
4721 /* DPCD dump start address. */
4722 unsigned int offset
;
4723 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4725 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4727 /* Only valid for eDP. */
4731 static const struct dpcd_block i915_dpcd_debug
[] = {
4732 { .offset
= DP_DPCD_REV
, .size
= DP_RECEIVER_CAP_SIZE
},
4733 { .offset
= DP_PSR_SUPPORT
, .end
= DP_PSR_CAPS
},
4734 { .offset
= DP_DOWNSTREAM_PORT_0
, .size
= 16 },
4735 { .offset
= DP_LINK_BW_SET
, .end
= DP_EDP_CONFIGURATION_SET
},
4736 { .offset
= DP_SINK_COUNT
, .end
= DP_ADJUST_REQUEST_LANE2_3
},
4737 { .offset
= DP_SET_POWER
},
4738 { .offset
= DP_EDP_DPCD_REV
},
4739 { .offset
= DP_EDP_GENERAL_CAP_1
, .end
= DP_EDP_GENERAL_CAP_3
},
4740 { .offset
= DP_EDP_DISPLAY_CONTROL_REGISTER
, .end
= DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB
},
4741 { .offset
= DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET
, .end
= DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET
},
4744 static int i915_dpcd_show(struct seq_file
*m
, void *data
)
4746 struct drm_connector
*connector
= m
->private;
4747 struct intel_dp
*intel_dp
=
4748 enc_to_intel_dp(&intel_attached_encoder(connector
)->base
);
4753 if (connector
->status
!= connector_status_connected
)
4756 for (i
= 0; i
< ARRAY_SIZE(i915_dpcd_debug
); i
++) {
4757 const struct dpcd_block
*b
= &i915_dpcd_debug
[i
];
4758 size_t size
= b
->end
? b
->end
- b
->offset
+ 1 : (b
->size
?: 1);
4761 connector
->connector_type
!= DRM_MODE_CONNECTOR_eDP
)
4764 /* low tech for now */
4765 if (WARN_ON(size
> sizeof(buf
)))
4768 err
= drm_dp_dpcd_read(&intel_dp
->aux
, b
->offset
, buf
, size
);
4770 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4771 size
, b
->offset
, err
);
4775 seq_printf(m
, "%04x: %*ph\n", b
->offset
, (int) size
, buf
);
4781 static int i915_dpcd_open(struct inode
*inode
, struct file
*file
)
4783 return single_open(file
, i915_dpcd_show
, inode
->i_private
);
4786 static const struct file_operations i915_dpcd_fops
= {
4787 .owner
= THIS_MODULE
,
4788 .open
= i915_dpcd_open
,
4790 .llseek
= seq_lseek
,
4791 .release
= single_release
,
4794 static int i915_panel_show(struct seq_file
*m
, void *data
)
4796 struct drm_connector
*connector
= m
->private;
4797 struct intel_dp
*intel_dp
=
4798 enc_to_intel_dp(&intel_attached_encoder(connector
)->base
);
4800 if (connector
->status
!= connector_status_connected
)
4803 seq_printf(m
, "Panel power up delay: %d\n",
4804 intel_dp
->panel_power_up_delay
);
4805 seq_printf(m
, "Panel power down delay: %d\n",
4806 intel_dp
->panel_power_down_delay
);
4807 seq_printf(m
, "Backlight on delay: %d\n",
4808 intel_dp
->backlight_on_delay
);
4809 seq_printf(m
, "Backlight off delay: %d\n",
4810 intel_dp
->backlight_off_delay
);
4815 static int i915_panel_open(struct inode
*inode
, struct file
*file
)
4817 return single_open(file
, i915_panel_show
, inode
->i_private
);
4820 static const struct file_operations i915_panel_fops
= {
4821 .owner
= THIS_MODULE
,
4822 .open
= i915_panel_open
,
4824 .llseek
= seq_lseek
,
4825 .release
= single_release
,
4829 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4830 * @connector: pointer to a registered drm_connector
4832 * Cleanup will be done by drm_connector_unregister() through a call to
4833 * drm_debugfs_connector_remove().
4835 * Returns 0 on success, negative error codes on error.
4837 int i915_debugfs_connector_add(struct drm_connector
*connector
)
4839 struct dentry
*root
= connector
->debugfs_entry
;
4841 /* The connector must have been registered beforehands. */
4845 if (connector
->connector_type
== DRM_MODE_CONNECTOR_DisplayPort
||
4846 connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
4847 debugfs_create_file("i915_dpcd", S_IRUGO
, root
,
4848 connector
, &i915_dpcd_fops
);
4850 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
4851 debugfs_create_file("i915_panel_timings", S_IRUGO
, root
,
4852 connector
, &i915_panel_fops
);