2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include <linux/sched/mm.h>
32 #include "intel_drv.h"
33 #include "intel_guc_submission.h"
35 static inline struct drm_i915_private
*node_to_i915(struct drm_info_node
*node
)
37 return to_i915(node
->minor
->dev
);
40 static int i915_capabilities(struct seq_file
*m
, void *data
)
42 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
43 const struct intel_device_info
*info
= INTEL_INFO(dev_priv
);
44 struct drm_printer p
= drm_seq_file_printer(m
);
46 seq_printf(m
, "gen: %d\n", INTEL_GEN(dev_priv
));
47 seq_printf(m
, "platform: %s\n", intel_platform_name(info
->platform
));
48 seq_printf(m
, "pch: %d\n", INTEL_PCH_TYPE(dev_priv
));
50 intel_device_info_dump_flags(info
, &p
);
51 intel_device_info_dump_runtime(info
, &p
);
52 intel_driver_caps_print(&dev_priv
->caps
, &p
);
54 kernel_param_lock(THIS_MODULE
);
55 i915_params_dump(&i915_modparams
, &p
);
56 kernel_param_unlock(THIS_MODULE
);
61 static char get_active_flag(struct drm_i915_gem_object
*obj
)
63 return i915_gem_object_is_active(obj
) ? '*' : ' ';
66 static char get_pin_flag(struct drm_i915_gem_object
*obj
)
68 return obj
->pin_global
? 'p' : ' ';
71 static char get_tiling_flag(struct drm_i915_gem_object
*obj
)
73 switch (i915_gem_object_get_tiling(obj
)) {
75 case I915_TILING_NONE
: return ' ';
76 case I915_TILING_X
: return 'X';
77 case I915_TILING_Y
: return 'Y';
81 static char get_global_flag(struct drm_i915_gem_object
*obj
)
83 return obj
->userfault_count
? 'g' : ' ';
86 static char get_pin_mapped_flag(struct drm_i915_gem_object
*obj
)
88 return obj
->mm
.mapping
? 'M' : ' ';
91 static u64
i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object
*obj
)
96 for_each_ggtt_vma(vma
, obj
) {
97 if (drm_mm_node_allocated(&vma
->node
))
98 size
+= vma
->node
.size
;
105 stringify_page_sizes(unsigned int page_sizes
, char *buf
, size_t len
)
109 switch (page_sizes
) {
112 case I915_GTT_PAGE_SIZE_4K
:
114 case I915_GTT_PAGE_SIZE_64K
:
116 case I915_GTT_PAGE_SIZE_2M
:
122 if (page_sizes
& I915_GTT_PAGE_SIZE_2M
)
123 x
+= snprintf(buf
+ x
, len
- x
, "2M, ");
124 if (page_sizes
& I915_GTT_PAGE_SIZE_64K
)
125 x
+= snprintf(buf
+ x
, len
- x
, "64K, ");
126 if (page_sizes
& I915_GTT_PAGE_SIZE_4K
)
127 x
+= snprintf(buf
+ x
, len
- x
, "4K, ");
135 describe_obj(struct seq_file
*m
, struct drm_i915_gem_object
*obj
)
137 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
138 struct intel_engine_cs
*engine
;
139 struct i915_vma
*vma
;
140 unsigned int frontbuffer_bits
;
143 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
145 seq_printf(m
, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
147 get_active_flag(obj
),
149 get_tiling_flag(obj
),
150 get_global_flag(obj
),
151 get_pin_mapped_flag(obj
),
152 obj
->base
.size
/ 1024,
155 i915_cache_level_str(dev_priv
, obj
->cache_level
),
156 obj
->mm
.dirty
? " dirty" : "",
157 obj
->mm
.madv
== I915_MADV_DONTNEED
? " purgeable" : "");
159 seq_printf(m
, " (name: %d)", obj
->base
.name
);
160 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
161 if (i915_vma_is_pinned(vma
))
164 seq_printf(m
, " (pinned x %d)", pin_count
);
166 seq_printf(m
, " (global)");
167 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
168 if (!drm_mm_node_allocated(&vma
->node
))
171 seq_printf(m
, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
172 i915_vma_is_ggtt(vma
) ? "g" : "pp",
173 vma
->node
.start
, vma
->node
.size
,
174 stringify_page_sizes(vma
->page_sizes
.gtt
, NULL
, 0));
175 if (i915_vma_is_ggtt(vma
)) {
176 switch (vma
->ggtt_view
.type
) {
177 case I915_GGTT_VIEW_NORMAL
:
178 seq_puts(m
, ", normal");
181 case I915_GGTT_VIEW_PARTIAL
:
182 seq_printf(m
, ", partial [%08llx+%x]",
183 vma
->ggtt_view
.partial
.offset
<< PAGE_SHIFT
,
184 vma
->ggtt_view
.partial
.size
<< PAGE_SHIFT
);
187 case I915_GGTT_VIEW_ROTATED
:
188 seq_printf(m
, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
189 vma
->ggtt_view
.rotated
.plane
[0].width
,
190 vma
->ggtt_view
.rotated
.plane
[0].height
,
191 vma
->ggtt_view
.rotated
.plane
[0].stride
,
192 vma
->ggtt_view
.rotated
.plane
[0].offset
,
193 vma
->ggtt_view
.rotated
.plane
[1].width
,
194 vma
->ggtt_view
.rotated
.plane
[1].height
,
195 vma
->ggtt_view
.rotated
.plane
[1].stride
,
196 vma
->ggtt_view
.rotated
.plane
[1].offset
);
200 MISSING_CASE(vma
->ggtt_view
.type
);
205 seq_printf(m
, " , fence: %d%s",
207 i915_gem_active_isset(&vma
->last_fence
) ? "*" : "");
211 seq_printf(m
, " (stolen: %08llx)", obj
->stolen
->start
);
213 engine
= i915_gem_object_last_write_engine(obj
);
215 seq_printf(m
, " (%s)", engine
->name
);
217 frontbuffer_bits
= atomic_read(&obj
->frontbuffer_bits
);
218 if (frontbuffer_bits
)
219 seq_printf(m
, " (frontbuffer: 0x%03x)", frontbuffer_bits
);
222 static int obj_rank_by_stolen(const void *A
, const void *B
)
224 const struct drm_i915_gem_object
*a
=
225 *(const struct drm_i915_gem_object
**)A
;
226 const struct drm_i915_gem_object
*b
=
227 *(const struct drm_i915_gem_object
**)B
;
229 if (a
->stolen
->start
< b
->stolen
->start
)
231 if (a
->stolen
->start
> b
->stolen
->start
)
236 static int i915_gem_stolen_list_info(struct seq_file
*m
, void *data
)
238 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
239 struct drm_device
*dev
= &dev_priv
->drm
;
240 struct drm_i915_gem_object
**objects
;
241 struct drm_i915_gem_object
*obj
;
242 u64 total_obj_size
, total_gtt_size
;
243 unsigned long total
, count
, n
;
246 total
= READ_ONCE(dev_priv
->mm
.object_count
);
247 objects
= kvmalloc_array(total
, sizeof(*objects
), GFP_KERNEL
);
251 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
255 total_obj_size
= total_gtt_size
= count
= 0;
257 spin_lock(&dev_priv
->mm
.obj_lock
);
258 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, mm
.link
) {
262 if (obj
->stolen
== NULL
)
265 objects
[count
++] = obj
;
266 total_obj_size
+= obj
->base
.size
;
267 total_gtt_size
+= i915_gem_obj_total_ggtt_size(obj
);
270 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, mm
.link
) {
274 if (obj
->stolen
== NULL
)
277 objects
[count
++] = obj
;
278 total_obj_size
+= obj
->base
.size
;
280 spin_unlock(&dev_priv
->mm
.obj_lock
);
282 sort(objects
, count
, sizeof(*objects
), obj_rank_by_stolen
, NULL
);
284 seq_puts(m
, "Stolen:\n");
285 for (n
= 0; n
< count
; n
++) {
287 describe_obj(m
, objects
[n
]);
290 seq_printf(m
, "Total %lu objects, %llu bytes, %llu GTT size\n",
291 count
, total_obj_size
, total_gtt_size
);
293 mutex_unlock(&dev
->struct_mutex
);
300 struct drm_i915_file_private
*file_priv
;
304 u64 active
, inactive
;
307 static int per_file_stats(int id
, void *ptr
, void *data
)
309 struct drm_i915_gem_object
*obj
= ptr
;
310 struct file_stats
*stats
= data
;
311 struct i915_vma
*vma
;
313 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
316 stats
->total
+= obj
->base
.size
;
317 if (!obj
->bind_count
)
318 stats
->unbound
+= obj
->base
.size
;
319 if (obj
->base
.name
|| obj
->base
.dma_buf
)
320 stats
->shared
+= obj
->base
.size
;
322 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
323 if (!drm_mm_node_allocated(&vma
->node
))
326 if (i915_vma_is_ggtt(vma
)) {
327 stats
->global
+= vma
->node
.size
;
329 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vma
->vm
);
331 if (ppgtt
->vm
.file
!= stats
->file_priv
)
335 if (i915_vma_is_active(vma
))
336 stats
->active
+= vma
->node
.size
;
338 stats
->inactive
+= vma
->node
.size
;
344 #define print_file_stats(m, name, stats) do { \
346 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
357 static void print_batch_pool_stats(struct seq_file
*m
,
358 struct drm_i915_private
*dev_priv
)
360 struct drm_i915_gem_object
*obj
;
361 struct file_stats stats
;
362 struct intel_engine_cs
*engine
;
363 enum intel_engine_id id
;
366 memset(&stats
, 0, sizeof(stats
));
368 for_each_engine(engine
, dev_priv
, id
) {
369 for (j
= 0; j
< ARRAY_SIZE(engine
->batch_pool
.cache_list
); j
++) {
370 list_for_each_entry(obj
,
371 &engine
->batch_pool
.cache_list
[j
],
373 per_file_stats(0, obj
, &stats
);
377 print_file_stats(m
, "[k]batch pool", stats
);
380 static int per_file_ctx_stats(int idx
, void *ptr
, void *data
)
382 struct i915_gem_context
*ctx
= ptr
;
383 struct intel_engine_cs
*engine
;
384 enum intel_engine_id id
;
386 for_each_engine(engine
, ctx
->i915
, id
) {
387 struct intel_context
*ce
= to_intel_context(ctx
, engine
);
390 per_file_stats(0, ce
->state
->obj
, data
);
392 per_file_stats(0, ce
->ring
->vma
->obj
, data
);
398 static void print_context_stats(struct seq_file
*m
,
399 struct drm_i915_private
*dev_priv
)
401 struct drm_device
*dev
= &dev_priv
->drm
;
402 struct file_stats stats
;
403 struct drm_file
*file
;
405 memset(&stats
, 0, sizeof(stats
));
407 mutex_lock(&dev
->struct_mutex
);
408 if (dev_priv
->kernel_context
)
409 per_file_ctx_stats(0, dev_priv
->kernel_context
, &stats
);
411 list_for_each_entry(file
, &dev
->filelist
, lhead
) {
412 struct drm_i915_file_private
*fpriv
= file
->driver_priv
;
413 idr_for_each(&fpriv
->context_idr
, per_file_ctx_stats
, &stats
);
415 mutex_unlock(&dev
->struct_mutex
);
417 print_file_stats(m
, "[k]contexts", stats
);
420 static int i915_gem_object_info(struct seq_file
*m
, void *data
)
422 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
423 struct drm_device
*dev
= &dev_priv
->drm
;
424 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
425 u32 count
, mapped_count
, purgeable_count
, dpy_count
, huge_count
;
426 u64 size
, mapped_size
, purgeable_size
, dpy_size
, huge_size
;
427 struct drm_i915_gem_object
*obj
;
428 unsigned int page_sizes
= 0;
429 struct drm_file
*file
;
433 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
437 seq_printf(m
, "%u objects, %llu bytes\n",
438 dev_priv
->mm
.object_count
,
439 dev_priv
->mm
.object_memory
);
442 mapped_size
= mapped_count
= 0;
443 purgeable_size
= purgeable_count
= 0;
444 huge_size
= huge_count
= 0;
446 spin_lock(&dev_priv
->mm
.obj_lock
);
447 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, mm
.link
) {
448 size
+= obj
->base
.size
;
451 if (obj
->mm
.madv
== I915_MADV_DONTNEED
) {
452 purgeable_size
+= obj
->base
.size
;
456 if (obj
->mm
.mapping
) {
458 mapped_size
+= obj
->base
.size
;
461 if (obj
->mm
.page_sizes
.sg
> I915_GTT_PAGE_SIZE
) {
463 huge_size
+= obj
->base
.size
;
464 page_sizes
|= obj
->mm
.page_sizes
.sg
;
467 seq_printf(m
, "%u unbound objects, %llu bytes\n", count
, size
);
469 size
= count
= dpy_size
= dpy_count
= 0;
470 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, mm
.link
) {
471 size
+= obj
->base
.size
;
474 if (obj
->pin_global
) {
475 dpy_size
+= obj
->base
.size
;
479 if (obj
->mm
.madv
== I915_MADV_DONTNEED
) {
480 purgeable_size
+= obj
->base
.size
;
484 if (obj
->mm
.mapping
) {
486 mapped_size
+= obj
->base
.size
;
489 if (obj
->mm
.page_sizes
.sg
> I915_GTT_PAGE_SIZE
) {
491 huge_size
+= obj
->base
.size
;
492 page_sizes
|= obj
->mm
.page_sizes
.sg
;
495 spin_unlock(&dev_priv
->mm
.obj_lock
);
497 seq_printf(m
, "%u bound objects, %llu bytes\n",
499 seq_printf(m
, "%u purgeable objects, %llu bytes\n",
500 purgeable_count
, purgeable_size
);
501 seq_printf(m
, "%u mapped objects, %llu bytes\n",
502 mapped_count
, mapped_size
);
503 seq_printf(m
, "%u huge-paged objects (%s) %llu bytes\n",
505 stringify_page_sizes(page_sizes
, buf
, sizeof(buf
)),
507 seq_printf(m
, "%u display objects (globally pinned), %llu bytes\n",
508 dpy_count
, dpy_size
);
510 seq_printf(m
, "%llu [%pa] gtt total\n",
511 ggtt
->vm
.total
, &ggtt
->mappable_end
);
512 seq_printf(m
, "Supported page sizes: %s\n",
513 stringify_page_sizes(INTEL_INFO(dev_priv
)->page_sizes
,
517 print_batch_pool_stats(m
, dev_priv
);
518 mutex_unlock(&dev
->struct_mutex
);
520 mutex_lock(&dev
->filelist_mutex
);
521 print_context_stats(m
, dev_priv
);
522 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
523 struct file_stats stats
;
524 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
525 struct i915_request
*request
;
526 struct task_struct
*task
;
528 mutex_lock(&dev
->struct_mutex
);
530 memset(&stats
, 0, sizeof(stats
));
531 stats
.file_priv
= file
->driver_priv
;
532 spin_lock(&file
->table_lock
);
533 idr_for_each(&file
->object_idr
, per_file_stats
, &stats
);
534 spin_unlock(&file
->table_lock
);
536 * Although we have a valid reference on file->pid, that does
537 * not guarantee that the task_struct who called get_pid() is
538 * still alive (e.g. get_pid(current) => fork() => exit()).
539 * Therefore, we need to protect this ->comm access using RCU.
541 request
= list_first_entry_or_null(&file_priv
->mm
.request_list
,
545 task
= pid_task(request
&& request
->gem_context
->pid
?
546 request
->gem_context
->pid
: file
->pid
,
548 print_file_stats(m
, task
? task
->comm
: "<unknown>", stats
);
551 mutex_unlock(&dev
->struct_mutex
);
553 mutex_unlock(&dev
->filelist_mutex
);
558 static int i915_gem_gtt_info(struct seq_file
*m
, void *data
)
560 struct drm_info_node
*node
= m
->private;
561 struct drm_i915_private
*dev_priv
= node_to_i915(node
);
562 struct drm_device
*dev
= &dev_priv
->drm
;
563 struct drm_i915_gem_object
**objects
;
564 struct drm_i915_gem_object
*obj
;
565 u64 total_obj_size
, total_gtt_size
;
566 unsigned long nobject
, n
;
569 nobject
= READ_ONCE(dev_priv
->mm
.object_count
);
570 objects
= kvmalloc_array(nobject
, sizeof(*objects
), GFP_KERNEL
);
574 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
579 spin_lock(&dev_priv
->mm
.obj_lock
);
580 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, mm
.link
) {
581 objects
[count
++] = obj
;
582 if (count
== nobject
)
585 spin_unlock(&dev_priv
->mm
.obj_lock
);
587 total_obj_size
= total_gtt_size
= 0;
588 for (n
= 0; n
< count
; n
++) {
592 describe_obj(m
, obj
);
594 total_obj_size
+= obj
->base
.size
;
595 total_gtt_size
+= i915_gem_obj_total_ggtt_size(obj
);
598 mutex_unlock(&dev
->struct_mutex
);
600 seq_printf(m
, "Total %d objects, %llu bytes, %llu GTT size\n",
601 count
, total_obj_size
, total_gtt_size
);
607 static int i915_gem_batch_pool_info(struct seq_file
*m
, void *data
)
609 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
610 struct drm_device
*dev
= &dev_priv
->drm
;
611 struct drm_i915_gem_object
*obj
;
612 struct intel_engine_cs
*engine
;
613 enum intel_engine_id id
;
617 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
621 for_each_engine(engine
, dev_priv
, id
) {
622 for (j
= 0; j
< ARRAY_SIZE(engine
->batch_pool
.cache_list
); j
++) {
626 list_for_each_entry(obj
,
627 &engine
->batch_pool
.cache_list
[j
],
630 seq_printf(m
, "%s cache[%d]: %d objects\n",
631 engine
->name
, j
, count
);
633 list_for_each_entry(obj
,
634 &engine
->batch_pool
.cache_list
[j
],
637 describe_obj(m
, obj
);
645 seq_printf(m
, "total: %d\n", total
);
647 mutex_unlock(&dev
->struct_mutex
);
652 static void gen8_display_interrupt_info(struct seq_file
*m
)
654 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
657 for_each_pipe(dev_priv
, pipe
) {
658 enum intel_display_power_domain power_domain
;
660 power_domain
= POWER_DOMAIN_PIPE(pipe
);
661 if (!intel_display_power_get_if_enabled(dev_priv
,
663 seq_printf(m
, "Pipe %c power disabled\n",
667 seq_printf(m
, "Pipe %c IMR:\t%08x\n",
669 I915_READ(GEN8_DE_PIPE_IMR(pipe
)));
670 seq_printf(m
, "Pipe %c IIR:\t%08x\n",
672 I915_READ(GEN8_DE_PIPE_IIR(pipe
)));
673 seq_printf(m
, "Pipe %c IER:\t%08x\n",
675 I915_READ(GEN8_DE_PIPE_IER(pipe
)));
677 intel_display_power_put(dev_priv
, power_domain
);
680 seq_printf(m
, "Display Engine port interrupt mask:\t%08x\n",
681 I915_READ(GEN8_DE_PORT_IMR
));
682 seq_printf(m
, "Display Engine port interrupt identity:\t%08x\n",
683 I915_READ(GEN8_DE_PORT_IIR
));
684 seq_printf(m
, "Display Engine port interrupt enable:\t%08x\n",
685 I915_READ(GEN8_DE_PORT_IER
));
687 seq_printf(m
, "Display Engine misc interrupt mask:\t%08x\n",
688 I915_READ(GEN8_DE_MISC_IMR
));
689 seq_printf(m
, "Display Engine misc interrupt identity:\t%08x\n",
690 I915_READ(GEN8_DE_MISC_IIR
));
691 seq_printf(m
, "Display Engine misc interrupt enable:\t%08x\n",
692 I915_READ(GEN8_DE_MISC_IER
));
694 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
695 I915_READ(GEN8_PCU_IMR
));
696 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
697 I915_READ(GEN8_PCU_IIR
));
698 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
699 I915_READ(GEN8_PCU_IER
));
702 static int i915_interrupt_info(struct seq_file
*m
, void *data
)
704 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
705 struct intel_engine_cs
*engine
;
706 enum intel_engine_id id
;
709 intel_runtime_pm_get(dev_priv
);
711 if (IS_CHERRYVIEW(dev_priv
)) {
712 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
713 I915_READ(GEN8_MASTER_IRQ
));
715 seq_printf(m
, "Display IER:\t%08x\n",
717 seq_printf(m
, "Display IIR:\t%08x\n",
719 seq_printf(m
, "Display IIR_RW:\t%08x\n",
720 I915_READ(VLV_IIR_RW
));
721 seq_printf(m
, "Display IMR:\t%08x\n",
723 for_each_pipe(dev_priv
, pipe
) {
724 enum intel_display_power_domain power_domain
;
726 power_domain
= POWER_DOMAIN_PIPE(pipe
);
727 if (!intel_display_power_get_if_enabled(dev_priv
,
729 seq_printf(m
, "Pipe %c power disabled\n",
734 seq_printf(m
, "Pipe %c stat:\t%08x\n",
736 I915_READ(PIPESTAT(pipe
)));
738 intel_display_power_put(dev_priv
, power_domain
);
741 intel_display_power_get(dev_priv
, POWER_DOMAIN_INIT
);
742 seq_printf(m
, "Port hotplug:\t%08x\n",
743 I915_READ(PORT_HOTPLUG_EN
));
744 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
745 I915_READ(VLV_DPFLIPSTAT
));
746 seq_printf(m
, "DPINVGTT:\t%08x\n",
747 I915_READ(DPINVGTT
));
748 intel_display_power_put(dev_priv
, POWER_DOMAIN_INIT
);
750 for (i
= 0; i
< 4; i
++) {
751 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
752 i
, I915_READ(GEN8_GT_IMR(i
)));
753 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
754 i
, I915_READ(GEN8_GT_IIR(i
)));
755 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
756 i
, I915_READ(GEN8_GT_IER(i
)));
759 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
760 I915_READ(GEN8_PCU_IMR
));
761 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
762 I915_READ(GEN8_PCU_IIR
));
763 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
764 I915_READ(GEN8_PCU_IER
));
765 } else if (INTEL_GEN(dev_priv
) >= 11) {
766 seq_printf(m
, "Master Interrupt Control: %08x\n",
767 I915_READ(GEN11_GFX_MSTR_IRQ
));
769 seq_printf(m
, "Render/Copy Intr Enable: %08x\n",
770 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE
));
771 seq_printf(m
, "VCS/VECS Intr Enable: %08x\n",
772 I915_READ(GEN11_VCS_VECS_INTR_ENABLE
));
773 seq_printf(m
, "GUC/SG Intr Enable:\t %08x\n",
774 I915_READ(GEN11_GUC_SG_INTR_ENABLE
));
775 seq_printf(m
, "GPM/WGBOXPERF Intr Enable: %08x\n",
776 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE
));
777 seq_printf(m
, "Crypto Intr Enable:\t %08x\n",
778 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE
));
779 seq_printf(m
, "GUnit/CSME Intr Enable:\t %08x\n",
780 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE
));
782 seq_printf(m
, "Display Interrupt Control:\t%08x\n",
783 I915_READ(GEN11_DISPLAY_INT_CTL
));
785 gen8_display_interrupt_info(m
);
786 } else if (INTEL_GEN(dev_priv
) >= 8) {
787 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
788 I915_READ(GEN8_MASTER_IRQ
));
790 for (i
= 0; i
< 4; i
++) {
791 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
792 i
, I915_READ(GEN8_GT_IMR(i
)));
793 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
794 i
, I915_READ(GEN8_GT_IIR(i
)));
795 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
796 i
, I915_READ(GEN8_GT_IER(i
)));
799 gen8_display_interrupt_info(m
);
800 } else if (IS_VALLEYVIEW(dev_priv
)) {
801 seq_printf(m
, "Display IER:\t%08x\n",
803 seq_printf(m
, "Display IIR:\t%08x\n",
805 seq_printf(m
, "Display IIR_RW:\t%08x\n",
806 I915_READ(VLV_IIR_RW
));
807 seq_printf(m
, "Display IMR:\t%08x\n",
809 for_each_pipe(dev_priv
, pipe
) {
810 enum intel_display_power_domain power_domain
;
812 power_domain
= POWER_DOMAIN_PIPE(pipe
);
813 if (!intel_display_power_get_if_enabled(dev_priv
,
815 seq_printf(m
, "Pipe %c power disabled\n",
820 seq_printf(m
, "Pipe %c stat:\t%08x\n",
822 I915_READ(PIPESTAT(pipe
)));
823 intel_display_power_put(dev_priv
, power_domain
);
826 seq_printf(m
, "Master IER:\t%08x\n",
827 I915_READ(VLV_MASTER_IER
));
829 seq_printf(m
, "Render IER:\t%08x\n",
831 seq_printf(m
, "Render IIR:\t%08x\n",
833 seq_printf(m
, "Render IMR:\t%08x\n",
836 seq_printf(m
, "PM IER:\t\t%08x\n",
837 I915_READ(GEN6_PMIER
));
838 seq_printf(m
, "PM IIR:\t\t%08x\n",
839 I915_READ(GEN6_PMIIR
));
840 seq_printf(m
, "PM IMR:\t\t%08x\n",
841 I915_READ(GEN6_PMIMR
));
843 seq_printf(m
, "Port hotplug:\t%08x\n",
844 I915_READ(PORT_HOTPLUG_EN
));
845 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
846 I915_READ(VLV_DPFLIPSTAT
));
847 seq_printf(m
, "DPINVGTT:\t%08x\n",
848 I915_READ(DPINVGTT
));
850 } else if (!HAS_PCH_SPLIT(dev_priv
)) {
851 seq_printf(m
, "Interrupt enable: %08x\n",
853 seq_printf(m
, "Interrupt identity: %08x\n",
855 seq_printf(m
, "Interrupt mask: %08x\n",
857 for_each_pipe(dev_priv
, pipe
)
858 seq_printf(m
, "Pipe %c stat: %08x\n",
860 I915_READ(PIPESTAT(pipe
)));
862 seq_printf(m
, "North Display Interrupt enable: %08x\n",
864 seq_printf(m
, "North Display Interrupt identity: %08x\n",
866 seq_printf(m
, "North Display Interrupt mask: %08x\n",
868 seq_printf(m
, "South Display Interrupt enable: %08x\n",
870 seq_printf(m
, "South Display Interrupt identity: %08x\n",
872 seq_printf(m
, "South Display Interrupt mask: %08x\n",
874 seq_printf(m
, "Graphics Interrupt enable: %08x\n",
876 seq_printf(m
, "Graphics Interrupt identity: %08x\n",
878 seq_printf(m
, "Graphics Interrupt mask: %08x\n",
882 if (INTEL_GEN(dev_priv
) >= 11) {
883 seq_printf(m
, "RCS Intr Mask:\t %08x\n",
884 I915_READ(GEN11_RCS0_RSVD_INTR_MASK
));
885 seq_printf(m
, "BCS Intr Mask:\t %08x\n",
886 I915_READ(GEN11_BCS_RSVD_INTR_MASK
));
887 seq_printf(m
, "VCS0/VCS1 Intr Mask:\t %08x\n",
888 I915_READ(GEN11_VCS0_VCS1_INTR_MASK
));
889 seq_printf(m
, "VCS2/VCS3 Intr Mask:\t %08x\n",
890 I915_READ(GEN11_VCS2_VCS3_INTR_MASK
));
891 seq_printf(m
, "VECS0/VECS1 Intr Mask:\t %08x\n",
892 I915_READ(GEN11_VECS0_VECS1_INTR_MASK
));
893 seq_printf(m
, "GUC/SG Intr Mask:\t %08x\n",
894 I915_READ(GEN11_GUC_SG_INTR_MASK
));
895 seq_printf(m
, "GPM/WGBOXPERF Intr Mask: %08x\n",
896 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK
));
897 seq_printf(m
, "Crypto Intr Mask:\t %08x\n",
898 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK
));
899 seq_printf(m
, "Gunit/CSME Intr Mask:\t %08x\n",
900 I915_READ(GEN11_GUNIT_CSME_INTR_MASK
));
902 } else if (INTEL_GEN(dev_priv
) >= 6) {
903 for_each_engine(engine
, dev_priv
, id
) {
905 "Graphics Interrupt mask (%s): %08x\n",
906 engine
->name
, I915_READ_IMR(engine
));
910 intel_runtime_pm_put(dev_priv
);
915 static int i915_gem_fence_regs_info(struct seq_file
*m
, void *data
)
917 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
918 struct drm_device
*dev
= &dev_priv
->drm
;
921 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
925 seq_printf(m
, "Total fences = %d\n", dev_priv
->num_fence_regs
);
926 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++) {
927 struct i915_vma
*vma
= dev_priv
->fence_regs
[i
].vma
;
929 seq_printf(m
, "Fence %d, pin count = %d, object = ",
930 i
, dev_priv
->fence_regs
[i
].pin_count
);
932 seq_puts(m
, "unused");
934 describe_obj(m
, vma
->obj
);
938 mutex_unlock(&dev
->struct_mutex
);
942 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
943 static ssize_t
gpu_state_read(struct file
*file
, char __user
*ubuf
,
944 size_t count
, loff_t
*pos
)
946 struct i915_gpu_state
*error
= file
->private_data
;
947 struct drm_i915_error_state_buf str
;
954 ret
= i915_error_state_buf_init(&str
, error
->i915
, count
, *pos
);
958 ret
= i915_error_state_to_str(&str
, error
);
963 ret
= simple_read_from_buffer(ubuf
, count
, &tmp
, str
.buf
, str
.bytes
);
967 *pos
= str
.start
+ ret
;
969 i915_error_state_buf_release(&str
);
973 static int gpu_state_release(struct inode
*inode
, struct file
*file
)
975 i915_gpu_state_put(file
->private_data
);
979 static int i915_gpu_info_open(struct inode
*inode
, struct file
*file
)
981 struct drm_i915_private
*i915
= inode
->i_private
;
982 struct i915_gpu_state
*gpu
;
984 intel_runtime_pm_get(i915
);
985 gpu
= i915_capture_gpu_state(i915
);
986 intel_runtime_pm_put(i915
);
990 file
->private_data
= gpu
;
994 static const struct file_operations i915_gpu_info_fops
= {
995 .owner
= THIS_MODULE
,
996 .open
= i915_gpu_info_open
,
997 .read
= gpu_state_read
,
998 .llseek
= default_llseek
,
999 .release
= gpu_state_release
,
1003 i915_error_state_write(struct file
*filp
,
1004 const char __user
*ubuf
,
1008 struct i915_gpu_state
*error
= filp
->private_data
;
1013 DRM_DEBUG_DRIVER("Resetting error state\n");
1014 i915_reset_error_state(error
->i915
);
1019 static int i915_error_state_open(struct inode
*inode
, struct file
*file
)
1021 file
->private_data
= i915_first_error_state(inode
->i_private
);
1025 static const struct file_operations i915_error_state_fops
= {
1026 .owner
= THIS_MODULE
,
1027 .open
= i915_error_state_open
,
1028 .read
= gpu_state_read
,
1029 .write
= i915_error_state_write
,
1030 .llseek
= default_llseek
,
1031 .release
= gpu_state_release
,
1036 i915_next_seqno_set(void *data
, u64 val
)
1038 struct drm_i915_private
*dev_priv
= data
;
1039 struct drm_device
*dev
= &dev_priv
->drm
;
1042 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1046 intel_runtime_pm_get(dev_priv
);
1047 ret
= i915_gem_set_global_seqno(dev
, val
);
1048 intel_runtime_pm_put(dev_priv
);
1050 mutex_unlock(&dev
->struct_mutex
);
1055 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops
,
1056 NULL
, i915_next_seqno_set
,
1059 static int i915_frequency_info(struct seq_file
*m
, void *unused
)
1061 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1062 struct intel_rps
*rps
= &dev_priv
->gt_pm
.rps
;
1065 intel_runtime_pm_get(dev_priv
);
1067 if (IS_GEN5(dev_priv
)) {
1068 u16 rgvswctl
= I915_READ16(MEMSWCTL
);
1069 u16 rgvstat
= I915_READ16(MEMSTAT_ILK
);
1071 seq_printf(m
, "Requested P-state: %d\n", (rgvswctl
>> 8) & 0xf);
1072 seq_printf(m
, "Requested VID: %d\n", rgvswctl
& 0x3f);
1073 seq_printf(m
, "Current VID: %d\n", (rgvstat
& MEMSTAT_VID_MASK
) >>
1075 seq_printf(m
, "Current P-state: %d\n",
1076 (rgvstat
& MEMSTAT_PSTATE_MASK
) >> MEMSTAT_PSTATE_SHIFT
);
1077 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
1078 u32 rpmodectl
, freq_sts
;
1080 mutex_lock(&dev_priv
->pcu_lock
);
1082 rpmodectl
= I915_READ(GEN6_RP_CONTROL
);
1083 seq_printf(m
, "Video Turbo Mode: %s\n",
1084 yesno(rpmodectl
& GEN6_RP_MEDIA_TURBO
));
1085 seq_printf(m
, "HW control enabled: %s\n",
1086 yesno(rpmodectl
& GEN6_RP_ENABLE
));
1087 seq_printf(m
, "SW control enabled: %s\n",
1088 yesno((rpmodectl
& GEN6_RP_MEDIA_MODE_MASK
) ==
1089 GEN6_RP_MEDIA_SW_MODE
));
1091 freq_sts
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
1092 seq_printf(m
, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts
);
1093 seq_printf(m
, "DDR freq: %d MHz\n", dev_priv
->mem_freq
);
1095 seq_printf(m
, "actual GPU freq: %d MHz\n",
1096 intel_gpu_freq(dev_priv
, (freq_sts
>> 8) & 0xff));
1098 seq_printf(m
, "current GPU freq: %d MHz\n",
1099 intel_gpu_freq(dev_priv
, rps
->cur_freq
));
1101 seq_printf(m
, "max GPU freq: %d MHz\n",
1102 intel_gpu_freq(dev_priv
, rps
->max_freq
));
1104 seq_printf(m
, "min GPU freq: %d MHz\n",
1105 intel_gpu_freq(dev_priv
, rps
->min_freq
));
1107 seq_printf(m
, "idle GPU freq: %d MHz\n",
1108 intel_gpu_freq(dev_priv
, rps
->idle_freq
));
1111 "efficient (RPe) frequency: %d MHz\n",
1112 intel_gpu_freq(dev_priv
, rps
->efficient_freq
));
1113 mutex_unlock(&dev_priv
->pcu_lock
);
1114 } else if (INTEL_GEN(dev_priv
) >= 6) {
1115 u32 rp_state_limits
;
1118 u32 rpmodectl
, rpinclimit
, rpdeclimit
;
1119 u32 rpstat
, cagf
, reqf
;
1120 u32 rpupei
, rpcurup
, rpprevup
;
1121 u32 rpdownei
, rpcurdown
, rpprevdown
;
1122 u32 pm_ier
, pm_imr
, pm_isr
, pm_iir
, pm_mask
;
1125 rp_state_limits
= I915_READ(GEN6_RP_STATE_LIMITS
);
1126 if (IS_GEN9_LP(dev_priv
)) {
1127 rp_state_cap
= I915_READ(BXT_RP_STATE_CAP
);
1128 gt_perf_status
= I915_READ(BXT_GT_PERF_STATUS
);
1130 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
1131 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
1134 /* RPSTAT1 is in the GT power well */
1135 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
1137 reqf
= I915_READ(GEN6_RPNSWREQ
);
1138 if (INTEL_GEN(dev_priv
) >= 9)
1141 reqf
&= ~GEN6_TURBO_DISABLE
;
1142 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
1147 reqf
= intel_gpu_freq(dev_priv
, reqf
);
1149 rpmodectl
= I915_READ(GEN6_RP_CONTROL
);
1150 rpinclimit
= I915_READ(GEN6_RP_UP_THRESHOLD
);
1151 rpdeclimit
= I915_READ(GEN6_RP_DOWN_THRESHOLD
);
1153 rpstat
= I915_READ(GEN6_RPSTAT1
);
1154 rpupei
= I915_READ(GEN6_RP_CUR_UP_EI
) & GEN6_CURICONT_MASK
;
1155 rpcurup
= I915_READ(GEN6_RP_CUR_UP
) & GEN6_CURBSYTAVG_MASK
;
1156 rpprevup
= I915_READ(GEN6_RP_PREV_UP
) & GEN6_CURBSYTAVG_MASK
;
1157 rpdownei
= I915_READ(GEN6_RP_CUR_DOWN_EI
) & GEN6_CURIAVG_MASK
;
1158 rpcurdown
= I915_READ(GEN6_RP_CUR_DOWN
) & GEN6_CURBSYTAVG_MASK
;
1159 rpprevdown
= I915_READ(GEN6_RP_PREV_DOWN
) & GEN6_CURBSYTAVG_MASK
;
1160 cagf
= intel_gpu_freq(dev_priv
,
1161 intel_get_cagf(dev_priv
, rpstat
));
1163 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
1165 if (INTEL_GEN(dev_priv
) >= 11) {
1166 pm_ier
= I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE
);
1167 pm_imr
= I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK
);
1169 * The equivalent to the PM ISR & IIR cannot be read
1170 * without affecting the current state of the system
1174 } else if (INTEL_GEN(dev_priv
) >= 8) {
1175 pm_ier
= I915_READ(GEN8_GT_IER(2));
1176 pm_imr
= I915_READ(GEN8_GT_IMR(2));
1177 pm_isr
= I915_READ(GEN8_GT_ISR(2));
1178 pm_iir
= I915_READ(GEN8_GT_IIR(2));
1180 pm_ier
= I915_READ(GEN6_PMIER
);
1181 pm_imr
= I915_READ(GEN6_PMIMR
);
1182 pm_isr
= I915_READ(GEN6_PMISR
);
1183 pm_iir
= I915_READ(GEN6_PMIIR
);
1185 pm_mask
= I915_READ(GEN6_PMINTRMSK
);
1187 seq_printf(m
, "Video Turbo Mode: %s\n",
1188 yesno(rpmodectl
& GEN6_RP_MEDIA_TURBO
));
1189 seq_printf(m
, "HW control enabled: %s\n",
1190 yesno(rpmodectl
& GEN6_RP_ENABLE
));
1191 seq_printf(m
, "SW control enabled: %s\n",
1192 yesno((rpmodectl
& GEN6_RP_MEDIA_MODE_MASK
) ==
1193 GEN6_RP_MEDIA_SW_MODE
));
1195 seq_printf(m
, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1196 pm_ier
, pm_imr
, pm_mask
);
1197 if (INTEL_GEN(dev_priv
) <= 10)
1198 seq_printf(m
, "PM ISR=0x%08x IIR=0x%08x\n",
1200 seq_printf(m
, "pm_intrmsk_mbz: 0x%08x\n",
1201 rps
->pm_intrmsk_mbz
);
1202 seq_printf(m
, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status
);
1203 seq_printf(m
, "Render p-state ratio: %d\n",
1204 (gt_perf_status
& (INTEL_GEN(dev_priv
) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1205 seq_printf(m
, "Render p-state VID: %d\n",
1206 gt_perf_status
& 0xff);
1207 seq_printf(m
, "Render p-state limit: %d\n",
1208 rp_state_limits
& 0xff);
1209 seq_printf(m
, "RPSTAT1: 0x%08x\n", rpstat
);
1210 seq_printf(m
, "RPMODECTL: 0x%08x\n", rpmodectl
);
1211 seq_printf(m
, "RPINCLIMIT: 0x%08x\n", rpinclimit
);
1212 seq_printf(m
, "RPDECLIMIT: 0x%08x\n", rpdeclimit
);
1213 seq_printf(m
, "RPNSWREQ: %dMHz\n", reqf
);
1214 seq_printf(m
, "CAGF: %dMHz\n", cagf
);
1215 seq_printf(m
, "RP CUR UP EI: %d (%dus)\n",
1216 rpupei
, GT_PM_INTERVAL_TO_US(dev_priv
, rpupei
));
1217 seq_printf(m
, "RP CUR UP: %d (%dus)\n",
1218 rpcurup
, GT_PM_INTERVAL_TO_US(dev_priv
, rpcurup
));
1219 seq_printf(m
, "RP PREV UP: %d (%dus)\n",
1220 rpprevup
, GT_PM_INTERVAL_TO_US(dev_priv
, rpprevup
));
1221 seq_printf(m
, "Up threshold: %d%%\n",
1222 rps
->power
.up_threshold
);
1224 seq_printf(m
, "RP CUR DOWN EI: %d (%dus)\n",
1225 rpdownei
, GT_PM_INTERVAL_TO_US(dev_priv
, rpdownei
));
1226 seq_printf(m
, "RP CUR DOWN: %d (%dus)\n",
1227 rpcurdown
, GT_PM_INTERVAL_TO_US(dev_priv
, rpcurdown
));
1228 seq_printf(m
, "RP PREV DOWN: %d (%dus)\n",
1229 rpprevdown
, GT_PM_INTERVAL_TO_US(dev_priv
, rpprevdown
));
1230 seq_printf(m
, "Down threshold: %d%%\n",
1231 rps
->power
.down_threshold
);
1233 max_freq
= (IS_GEN9_LP(dev_priv
) ? rp_state_cap
>> 0 :
1234 rp_state_cap
>> 16) & 0xff;
1235 max_freq
*= (IS_GEN9_BC(dev_priv
) ||
1236 INTEL_GEN(dev_priv
) >= 10 ? GEN9_FREQ_SCALER
: 1);
1237 seq_printf(m
, "Lowest (RPN) frequency: %dMHz\n",
1238 intel_gpu_freq(dev_priv
, max_freq
));
1240 max_freq
= (rp_state_cap
& 0xff00) >> 8;
1241 max_freq
*= (IS_GEN9_BC(dev_priv
) ||
1242 INTEL_GEN(dev_priv
) >= 10 ? GEN9_FREQ_SCALER
: 1);
1243 seq_printf(m
, "Nominal (RP1) frequency: %dMHz\n",
1244 intel_gpu_freq(dev_priv
, max_freq
));
1246 max_freq
= (IS_GEN9_LP(dev_priv
) ? rp_state_cap
>> 16 :
1247 rp_state_cap
>> 0) & 0xff;
1248 max_freq
*= (IS_GEN9_BC(dev_priv
) ||
1249 INTEL_GEN(dev_priv
) >= 10 ? GEN9_FREQ_SCALER
: 1);
1250 seq_printf(m
, "Max non-overclocked (RP0) frequency: %dMHz\n",
1251 intel_gpu_freq(dev_priv
, max_freq
));
1252 seq_printf(m
, "Max overclocked frequency: %dMHz\n",
1253 intel_gpu_freq(dev_priv
, rps
->max_freq
));
1255 seq_printf(m
, "Current freq: %d MHz\n",
1256 intel_gpu_freq(dev_priv
, rps
->cur_freq
));
1257 seq_printf(m
, "Actual freq: %d MHz\n", cagf
);
1258 seq_printf(m
, "Idle freq: %d MHz\n",
1259 intel_gpu_freq(dev_priv
, rps
->idle_freq
));
1260 seq_printf(m
, "Min freq: %d MHz\n",
1261 intel_gpu_freq(dev_priv
, rps
->min_freq
));
1262 seq_printf(m
, "Boost freq: %d MHz\n",
1263 intel_gpu_freq(dev_priv
, rps
->boost_freq
));
1264 seq_printf(m
, "Max freq: %d MHz\n",
1265 intel_gpu_freq(dev_priv
, rps
->max_freq
));
1267 "efficient (RPe) frequency: %d MHz\n",
1268 intel_gpu_freq(dev_priv
, rps
->efficient_freq
));
1270 seq_puts(m
, "no P-state info available\n");
1273 seq_printf(m
, "Current CD clock frequency: %d kHz\n", dev_priv
->cdclk
.hw
.cdclk
);
1274 seq_printf(m
, "Max CD clock frequency: %d kHz\n", dev_priv
->max_cdclk_freq
);
1275 seq_printf(m
, "Max pixel clock frequency: %d kHz\n", dev_priv
->max_dotclk_freq
);
1277 intel_runtime_pm_put(dev_priv
);
1281 static void i915_instdone_info(struct drm_i915_private
*dev_priv
,
1283 struct intel_instdone
*instdone
)
1288 seq_printf(m
, "\t\tINSTDONE: 0x%08x\n",
1289 instdone
->instdone
);
1291 if (INTEL_GEN(dev_priv
) <= 3)
1294 seq_printf(m
, "\t\tSC_INSTDONE: 0x%08x\n",
1295 instdone
->slice_common
);
1297 if (INTEL_GEN(dev_priv
) <= 6)
1300 for_each_instdone_slice_subslice(dev_priv
, slice
, subslice
)
1301 seq_printf(m
, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1302 slice
, subslice
, instdone
->sampler
[slice
][subslice
]);
1304 for_each_instdone_slice_subslice(dev_priv
, slice
, subslice
)
1305 seq_printf(m
, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1306 slice
, subslice
, instdone
->row
[slice
][subslice
]);
1309 static int i915_hangcheck_info(struct seq_file
*m
, void *unused
)
1311 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1312 struct intel_engine_cs
*engine
;
1313 u64 acthd
[I915_NUM_ENGINES
];
1314 u32 seqno
[I915_NUM_ENGINES
];
1315 struct intel_instdone instdone
;
1316 enum intel_engine_id id
;
1318 if (test_bit(I915_WEDGED
, &dev_priv
->gpu_error
.flags
))
1319 seq_puts(m
, "Wedged\n");
1320 if (test_bit(I915_RESET_BACKOFF
, &dev_priv
->gpu_error
.flags
))
1321 seq_puts(m
, "Reset in progress: struct_mutex backoff\n");
1322 if (test_bit(I915_RESET_HANDOFF
, &dev_priv
->gpu_error
.flags
))
1323 seq_puts(m
, "Reset in progress: reset handoff to waiter\n");
1324 if (waitqueue_active(&dev_priv
->gpu_error
.wait_queue
))
1325 seq_puts(m
, "Waiter holding struct mutex\n");
1326 if (waitqueue_active(&dev_priv
->gpu_error
.reset_queue
))
1327 seq_puts(m
, "struct_mutex blocked for reset\n");
1329 if (!i915_modparams
.enable_hangcheck
) {
1330 seq_puts(m
, "Hangcheck disabled\n");
1334 intel_runtime_pm_get(dev_priv
);
1336 for_each_engine(engine
, dev_priv
, id
) {
1337 acthd
[id
] = intel_engine_get_active_head(engine
);
1338 seqno
[id
] = intel_engine_get_seqno(engine
);
1341 intel_engine_get_instdone(dev_priv
->engine
[RCS
], &instdone
);
1343 intel_runtime_pm_put(dev_priv
);
1345 if (timer_pending(&dev_priv
->gpu_error
.hangcheck_work
.timer
))
1346 seq_printf(m
, "Hangcheck active, timer fires in %dms\n",
1347 jiffies_to_msecs(dev_priv
->gpu_error
.hangcheck_work
.timer
.expires
-
1349 else if (delayed_work_pending(&dev_priv
->gpu_error
.hangcheck_work
))
1350 seq_puts(m
, "Hangcheck active, work pending\n");
1352 seq_puts(m
, "Hangcheck inactive\n");
1354 seq_printf(m
, "GT active? %s\n", yesno(dev_priv
->gt
.awake
));
1356 for_each_engine(engine
, dev_priv
, id
) {
1357 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
1360 seq_printf(m
, "%s:\n", engine
->name
);
1361 seq_printf(m
, "\tseqno = %x [current %x, last %x]\n",
1362 engine
->hangcheck
.seqno
, seqno
[id
],
1363 intel_engine_last_submit(engine
));
1364 seq_printf(m
, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
1365 yesno(intel_engine_has_waiter(engine
)),
1366 yesno(test_bit(engine
->id
,
1367 &dev_priv
->gpu_error
.missed_irq_rings
)),
1368 yesno(engine
->hangcheck
.stalled
),
1369 yesno(engine
->hangcheck
.wedged
));
1371 spin_lock_irq(&b
->rb_lock
);
1372 for (rb
= rb_first(&b
->waiters
); rb
; rb
= rb_next(rb
)) {
1373 struct intel_wait
*w
= rb_entry(rb
, typeof(*w
), node
);
1375 seq_printf(m
, "\t%s [%d] waiting for %x\n",
1376 w
->tsk
->comm
, w
->tsk
->pid
, w
->seqno
);
1378 spin_unlock_irq(&b
->rb_lock
);
1380 seq_printf(m
, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1381 (long long)engine
->hangcheck
.acthd
,
1382 (long long)acthd
[id
]);
1383 seq_printf(m
, "\taction = %s(%d) %d ms ago\n",
1384 hangcheck_action_to_str(engine
->hangcheck
.action
),
1385 engine
->hangcheck
.action
,
1386 jiffies_to_msecs(jiffies
-
1387 engine
->hangcheck
.action_timestamp
));
1389 if (engine
->id
== RCS
) {
1390 seq_puts(m
, "\tinstdone read =\n");
1392 i915_instdone_info(dev_priv
, m
, &instdone
);
1394 seq_puts(m
, "\tinstdone accu =\n");
1396 i915_instdone_info(dev_priv
, m
,
1397 &engine
->hangcheck
.instdone
);
1404 static int i915_reset_info(struct seq_file
*m
, void *unused
)
1406 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1407 struct i915_gpu_error
*error
= &dev_priv
->gpu_error
;
1408 struct intel_engine_cs
*engine
;
1409 enum intel_engine_id id
;
1411 seq_printf(m
, "full gpu reset = %u\n", i915_reset_count(error
));
1413 for_each_engine(engine
, dev_priv
, id
) {
1414 seq_printf(m
, "%s = %u\n", engine
->name
,
1415 i915_reset_engine_count(error
, engine
));
1421 static int ironlake_drpc_info(struct seq_file
*m
)
1423 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1424 u32 rgvmodectl
, rstdbyctl
;
1427 rgvmodectl
= I915_READ(MEMMODECTL
);
1428 rstdbyctl
= I915_READ(RSTDBYCTL
);
1429 crstandvid
= I915_READ16(CRSTANDVID
);
1431 seq_printf(m
, "HD boost: %s\n", yesno(rgvmodectl
& MEMMODE_BOOST_EN
));
1432 seq_printf(m
, "Boost freq: %d\n",
1433 (rgvmodectl
& MEMMODE_BOOST_FREQ_MASK
) >>
1434 MEMMODE_BOOST_FREQ_SHIFT
);
1435 seq_printf(m
, "HW control enabled: %s\n",
1436 yesno(rgvmodectl
& MEMMODE_HWIDLE_EN
));
1437 seq_printf(m
, "SW control enabled: %s\n",
1438 yesno(rgvmodectl
& MEMMODE_SWMODE_EN
));
1439 seq_printf(m
, "Gated voltage change: %s\n",
1440 yesno(rgvmodectl
& MEMMODE_RCLK_GATE
));
1441 seq_printf(m
, "Starting frequency: P%d\n",
1442 (rgvmodectl
& MEMMODE_FSTART_MASK
) >> MEMMODE_FSTART_SHIFT
);
1443 seq_printf(m
, "Max P-state: P%d\n",
1444 (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
);
1445 seq_printf(m
, "Min P-state: P%d\n", (rgvmodectl
& MEMMODE_FMIN_MASK
));
1446 seq_printf(m
, "RS1 VID: %d\n", (crstandvid
& 0x3f));
1447 seq_printf(m
, "RS2 VID: %d\n", ((crstandvid
>> 8) & 0x3f));
1448 seq_printf(m
, "Render standby enabled: %s\n",
1449 yesno(!(rstdbyctl
& RCX_SW_EXIT
)));
1450 seq_puts(m
, "Current RS state: ");
1451 switch (rstdbyctl
& RSX_STATUS_MASK
) {
1453 seq_puts(m
, "on\n");
1455 case RSX_STATUS_RC1
:
1456 seq_puts(m
, "RC1\n");
1458 case RSX_STATUS_RC1E
:
1459 seq_puts(m
, "RC1E\n");
1461 case RSX_STATUS_RS1
:
1462 seq_puts(m
, "RS1\n");
1464 case RSX_STATUS_RS2
:
1465 seq_puts(m
, "RS2 (RC6)\n");
1467 case RSX_STATUS_RS3
:
1468 seq_puts(m
, "RC3 (RC6+)\n");
1471 seq_puts(m
, "unknown\n");
1478 static int i915_forcewake_domains(struct seq_file
*m
, void *data
)
1480 struct drm_i915_private
*i915
= node_to_i915(m
->private);
1481 struct intel_uncore_forcewake_domain
*fw_domain
;
1484 seq_printf(m
, "user.bypass_count = %u\n",
1485 i915
->uncore
.user_forcewake
.count
);
1487 for_each_fw_domain(fw_domain
, i915
, tmp
)
1488 seq_printf(m
, "%s.wake_count = %u\n",
1489 intel_uncore_forcewake_domain_to_str(fw_domain
->id
),
1490 READ_ONCE(fw_domain
->wake_count
));
1495 static void print_rc6_res(struct seq_file
*m
,
1497 const i915_reg_t reg
)
1499 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1501 seq_printf(m
, "%s %u (%llu us)\n",
1502 title
, I915_READ(reg
),
1503 intel_rc6_residency_us(dev_priv
, reg
));
1506 static int vlv_drpc_info(struct seq_file
*m
)
1508 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1509 u32 rcctl1
, pw_status
;
1511 pw_status
= I915_READ(VLV_GTLC_PW_STATUS
);
1512 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1514 seq_printf(m
, "RC6 Enabled: %s\n",
1515 yesno(rcctl1
& (GEN7_RC_CTL_TO_MODE
|
1516 GEN6_RC_CTL_EI_MODE(1))));
1517 seq_printf(m
, "Render Power Well: %s\n",
1518 (pw_status
& VLV_GTLC_PW_RENDER_STATUS_MASK
) ? "Up" : "Down");
1519 seq_printf(m
, "Media Power Well: %s\n",
1520 (pw_status
& VLV_GTLC_PW_MEDIA_STATUS_MASK
) ? "Up" : "Down");
1522 print_rc6_res(m
, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6
);
1523 print_rc6_res(m
, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6
);
1525 return i915_forcewake_domains(m
, NULL
);
1528 static int gen6_drpc_info(struct seq_file
*m
)
1530 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1531 u32 gt_core_status
, rcctl1
, rc6vids
= 0;
1532 u32 gen9_powergate_enable
= 0, gen9_powergate_status
= 0;
1534 gt_core_status
= I915_READ_FW(GEN6_GT_CORE_STATUS
);
1535 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS
, gt_core_status
, 4, true);
1537 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1538 if (INTEL_GEN(dev_priv
) >= 9) {
1539 gen9_powergate_enable
= I915_READ(GEN9_PG_ENABLE
);
1540 gen9_powergate_status
= I915_READ(GEN9_PWRGT_DOMAIN_STATUS
);
1543 if (INTEL_GEN(dev_priv
) <= 7) {
1544 mutex_lock(&dev_priv
->pcu_lock
);
1545 sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
,
1547 mutex_unlock(&dev_priv
->pcu_lock
);
1550 seq_printf(m
, "RC1e Enabled: %s\n",
1551 yesno(rcctl1
& GEN6_RC_CTL_RC1e_ENABLE
));
1552 seq_printf(m
, "RC6 Enabled: %s\n",
1553 yesno(rcctl1
& GEN6_RC_CTL_RC6_ENABLE
));
1554 if (INTEL_GEN(dev_priv
) >= 9) {
1555 seq_printf(m
, "Render Well Gating Enabled: %s\n",
1556 yesno(gen9_powergate_enable
& GEN9_RENDER_PG_ENABLE
));
1557 seq_printf(m
, "Media Well Gating Enabled: %s\n",
1558 yesno(gen9_powergate_enable
& GEN9_MEDIA_PG_ENABLE
));
1560 seq_printf(m
, "Deep RC6 Enabled: %s\n",
1561 yesno(rcctl1
& GEN6_RC_CTL_RC6p_ENABLE
));
1562 seq_printf(m
, "Deepest RC6 Enabled: %s\n",
1563 yesno(rcctl1
& GEN6_RC_CTL_RC6pp_ENABLE
));
1564 seq_puts(m
, "Current RC state: ");
1565 switch (gt_core_status
& GEN6_RCn_MASK
) {
1567 if (gt_core_status
& GEN6_CORE_CPD_STATE_MASK
)
1568 seq_puts(m
, "Core Power Down\n");
1570 seq_puts(m
, "on\n");
1573 seq_puts(m
, "RC3\n");
1576 seq_puts(m
, "RC6\n");
1579 seq_puts(m
, "RC7\n");
1582 seq_puts(m
, "Unknown\n");
1586 seq_printf(m
, "Core Power Down: %s\n",
1587 yesno(gt_core_status
& GEN6_CORE_CPD_STATE_MASK
));
1588 if (INTEL_GEN(dev_priv
) >= 9) {
1589 seq_printf(m
, "Render Power Well: %s\n",
1590 (gen9_powergate_status
&
1591 GEN9_PWRGT_RENDER_STATUS_MASK
) ? "Up" : "Down");
1592 seq_printf(m
, "Media Power Well: %s\n",
1593 (gen9_powergate_status
&
1594 GEN9_PWRGT_MEDIA_STATUS_MASK
) ? "Up" : "Down");
1597 /* Not exactly sure what this is */
1598 print_rc6_res(m
, "RC6 \"Locked to RPn\" residency since boot:",
1599 GEN6_GT_GFX_RC6_LOCKED
);
1600 print_rc6_res(m
, "RC6 residency since boot:", GEN6_GT_GFX_RC6
);
1601 print_rc6_res(m
, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p
);
1602 print_rc6_res(m
, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp
);
1604 if (INTEL_GEN(dev_priv
) <= 7) {
1605 seq_printf(m
, "RC6 voltage: %dmV\n",
1606 GEN6_DECODE_RC6_VID(((rc6vids
>> 0) & 0xff)));
1607 seq_printf(m
, "RC6+ voltage: %dmV\n",
1608 GEN6_DECODE_RC6_VID(((rc6vids
>> 8) & 0xff)));
1609 seq_printf(m
, "RC6++ voltage: %dmV\n",
1610 GEN6_DECODE_RC6_VID(((rc6vids
>> 16) & 0xff)));
1613 return i915_forcewake_domains(m
, NULL
);
1616 static int i915_drpc_info(struct seq_file
*m
, void *unused
)
1618 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1621 intel_runtime_pm_get(dev_priv
);
1623 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
1624 err
= vlv_drpc_info(m
);
1625 else if (INTEL_GEN(dev_priv
) >= 6)
1626 err
= gen6_drpc_info(m
);
1628 err
= ironlake_drpc_info(m
);
1630 intel_runtime_pm_put(dev_priv
);
1635 static int i915_frontbuffer_tracking(struct seq_file
*m
, void *unused
)
1637 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1639 seq_printf(m
, "FB tracking busy bits: 0x%08x\n",
1640 dev_priv
->fb_tracking
.busy_bits
);
1642 seq_printf(m
, "FB tracking flip bits: 0x%08x\n",
1643 dev_priv
->fb_tracking
.flip_bits
);
1648 static int i915_fbc_status(struct seq_file
*m
, void *unused
)
1650 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1651 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1653 if (!HAS_FBC(dev_priv
))
1656 intel_runtime_pm_get(dev_priv
);
1657 mutex_lock(&fbc
->lock
);
1659 if (intel_fbc_is_active(dev_priv
))
1660 seq_puts(m
, "FBC enabled\n");
1662 seq_printf(m
, "FBC disabled: %s\n", fbc
->no_fbc_reason
);
1664 if (intel_fbc_is_active(dev_priv
)) {
1667 if (INTEL_GEN(dev_priv
) >= 8)
1668 mask
= I915_READ(IVB_FBC_STATUS2
) & BDW_FBC_COMP_SEG_MASK
;
1669 else if (INTEL_GEN(dev_priv
) >= 7)
1670 mask
= I915_READ(IVB_FBC_STATUS2
) & IVB_FBC_COMP_SEG_MASK
;
1671 else if (INTEL_GEN(dev_priv
) >= 5)
1672 mask
= I915_READ(ILK_DPFC_STATUS
) & ILK_DPFC_COMP_SEG_MASK
;
1673 else if (IS_G4X(dev_priv
))
1674 mask
= I915_READ(DPFC_STATUS
) & DPFC_COMP_SEG_MASK
;
1676 mask
= I915_READ(FBC_STATUS
) & (FBC_STAT_COMPRESSING
|
1677 FBC_STAT_COMPRESSED
);
1679 seq_printf(m
, "Compressing: %s\n", yesno(mask
));
1682 mutex_unlock(&fbc
->lock
);
1683 intel_runtime_pm_put(dev_priv
);
1688 static int i915_fbc_false_color_get(void *data
, u64
*val
)
1690 struct drm_i915_private
*dev_priv
= data
;
1692 if (INTEL_GEN(dev_priv
) < 7 || !HAS_FBC(dev_priv
))
1695 *val
= dev_priv
->fbc
.false_color
;
1700 static int i915_fbc_false_color_set(void *data
, u64 val
)
1702 struct drm_i915_private
*dev_priv
= data
;
1705 if (INTEL_GEN(dev_priv
) < 7 || !HAS_FBC(dev_priv
))
1708 mutex_lock(&dev_priv
->fbc
.lock
);
1710 reg
= I915_READ(ILK_DPFC_CONTROL
);
1711 dev_priv
->fbc
.false_color
= val
;
1713 I915_WRITE(ILK_DPFC_CONTROL
, val
?
1714 (reg
| FBC_CTL_FALSE_COLOR
) :
1715 (reg
& ~FBC_CTL_FALSE_COLOR
));
1717 mutex_unlock(&dev_priv
->fbc
.lock
);
1721 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops
,
1722 i915_fbc_false_color_get
, i915_fbc_false_color_set
,
1725 static int i915_ips_status(struct seq_file
*m
, void *unused
)
1727 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1729 if (!HAS_IPS(dev_priv
))
1732 intel_runtime_pm_get(dev_priv
);
1734 seq_printf(m
, "Enabled by kernel parameter: %s\n",
1735 yesno(i915_modparams
.enable_ips
));
1737 if (INTEL_GEN(dev_priv
) >= 8) {
1738 seq_puts(m
, "Currently: unknown\n");
1740 if (I915_READ(IPS_CTL
) & IPS_ENABLE
)
1741 seq_puts(m
, "Currently: enabled\n");
1743 seq_puts(m
, "Currently: disabled\n");
1746 intel_runtime_pm_put(dev_priv
);
1751 static int i915_sr_status(struct seq_file
*m
, void *unused
)
1753 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1754 bool sr_enabled
= false;
1756 intel_runtime_pm_get(dev_priv
);
1757 intel_display_power_get(dev_priv
, POWER_DOMAIN_INIT
);
1759 if (INTEL_GEN(dev_priv
) >= 9)
1760 /* no global SR status; inspect per-plane WM */;
1761 else if (HAS_PCH_SPLIT(dev_priv
))
1762 sr_enabled
= I915_READ(WM1_LP_ILK
) & WM1_LP_SR_EN
;
1763 else if (IS_I965GM(dev_priv
) || IS_G4X(dev_priv
) ||
1764 IS_I945G(dev_priv
) || IS_I945GM(dev_priv
))
1765 sr_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
1766 else if (IS_I915GM(dev_priv
))
1767 sr_enabled
= I915_READ(INSTPM
) & INSTPM_SELF_EN
;
1768 else if (IS_PINEVIEW(dev_priv
))
1769 sr_enabled
= I915_READ(DSPFW3
) & PINEVIEW_SELF_REFRESH_EN
;
1770 else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
1771 sr_enabled
= I915_READ(FW_BLC_SELF_VLV
) & FW_CSPWRDWNEN
;
1773 intel_display_power_put(dev_priv
, POWER_DOMAIN_INIT
);
1774 intel_runtime_pm_put(dev_priv
);
1776 seq_printf(m
, "self-refresh: %s\n", enableddisabled(sr_enabled
));
1781 static int i915_emon_status(struct seq_file
*m
, void *unused
)
1783 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1784 struct drm_device
*dev
= &dev_priv
->drm
;
1785 unsigned long temp
, chipset
, gfx
;
1788 if (!IS_GEN5(dev_priv
))
1791 intel_runtime_pm_get(dev_priv
);
1793 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1797 temp
= i915_mch_val(dev_priv
);
1798 chipset
= i915_chipset_val(dev_priv
);
1799 gfx
= i915_gfx_val(dev_priv
);
1800 mutex_unlock(&dev
->struct_mutex
);
1802 seq_printf(m
, "GMCH temp: %ld\n", temp
);
1803 seq_printf(m
, "Chipset power: %ld\n", chipset
);
1804 seq_printf(m
, "GFX power: %ld\n", gfx
);
1805 seq_printf(m
, "Total power: %ld\n", chipset
+ gfx
);
1807 intel_runtime_pm_put(dev_priv
);
1812 static int i915_ring_freq_table(struct seq_file
*m
, void *unused
)
1814 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1815 struct intel_rps
*rps
= &dev_priv
->gt_pm
.rps
;
1816 unsigned int max_gpu_freq
, min_gpu_freq
;
1817 int gpu_freq
, ia_freq
;
1820 if (!HAS_LLC(dev_priv
))
1823 intel_runtime_pm_get(dev_priv
);
1825 ret
= mutex_lock_interruptible(&dev_priv
->pcu_lock
);
1829 min_gpu_freq
= rps
->min_freq
;
1830 max_gpu_freq
= rps
->max_freq
;
1831 if (IS_GEN9_BC(dev_priv
) || INTEL_GEN(dev_priv
) >= 10) {
1832 /* Convert GT frequency to 50 HZ units */
1833 min_gpu_freq
/= GEN9_FREQ_SCALER
;
1834 max_gpu_freq
/= GEN9_FREQ_SCALER
;
1837 seq_puts(m
, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1839 for (gpu_freq
= min_gpu_freq
; gpu_freq
<= max_gpu_freq
; gpu_freq
++) {
1841 sandybridge_pcode_read(dev_priv
,
1842 GEN6_PCODE_READ_MIN_FREQ_TABLE
,
1844 seq_printf(m
, "%d\t\t%d\t\t\t\t%d\n",
1845 intel_gpu_freq(dev_priv
, (gpu_freq
*
1846 (IS_GEN9_BC(dev_priv
) ||
1847 INTEL_GEN(dev_priv
) >= 10 ?
1848 GEN9_FREQ_SCALER
: 1))),
1849 ((ia_freq
>> 0) & 0xff) * 100,
1850 ((ia_freq
>> 8) & 0xff) * 100);
1853 mutex_unlock(&dev_priv
->pcu_lock
);
1856 intel_runtime_pm_put(dev_priv
);
1860 static int i915_opregion(struct seq_file
*m
, void *unused
)
1862 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1863 struct drm_device
*dev
= &dev_priv
->drm
;
1864 struct intel_opregion
*opregion
= &dev_priv
->opregion
;
1867 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1871 if (opregion
->header
)
1872 seq_write(m
, opregion
->header
, OPREGION_SIZE
);
1874 mutex_unlock(&dev
->struct_mutex
);
1880 static int i915_vbt(struct seq_file
*m
, void *unused
)
1882 struct intel_opregion
*opregion
= &node_to_i915(m
->private)->opregion
;
1885 seq_write(m
, opregion
->vbt
, opregion
->vbt_size
);
1890 static int i915_gem_framebuffer_info(struct seq_file
*m
, void *data
)
1892 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1893 struct drm_device
*dev
= &dev_priv
->drm
;
1894 struct intel_framebuffer
*fbdev_fb
= NULL
;
1895 struct drm_framebuffer
*drm_fb
;
1898 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1902 #ifdef CONFIG_DRM_FBDEV_EMULATION
1903 if (dev_priv
->fbdev
&& dev_priv
->fbdev
->helper
.fb
) {
1904 fbdev_fb
= to_intel_framebuffer(dev_priv
->fbdev
->helper
.fb
);
1906 seq_printf(m
, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1907 fbdev_fb
->base
.width
,
1908 fbdev_fb
->base
.height
,
1909 fbdev_fb
->base
.format
->depth
,
1910 fbdev_fb
->base
.format
->cpp
[0] * 8,
1911 fbdev_fb
->base
.modifier
,
1912 drm_framebuffer_read_refcount(&fbdev_fb
->base
));
1913 describe_obj(m
, intel_fb_obj(&fbdev_fb
->base
));
1918 mutex_lock(&dev
->mode_config
.fb_lock
);
1919 drm_for_each_fb(drm_fb
, dev
) {
1920 struct intel_framebuffer
*fb
= to_intel_framebuffer(drm_fb
);
1924 seq_printf(m
, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1927 fb
->base
.format
->depth
,
1928 fb
->base
.format
->cpp
[0] * 8,
1930 drm_framebuffer_read_refcount(&fb
->base
));
1931 describe_obj(m
, intel_fb_obj(&fb
->base
));
1934 mutex_unlock(&dev
->mode_config
.fb_lock
);
1935 mutex_unlock(&dev
->struct_mutex
);
1940 static void describe_ctx_ring(struct seq_file
*m
, struct intel_ring
*ring
)
1942 seq_printf(m
, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1943 ring
->space
, ring
->head
, ring
->tail
, ring
->emit
);
1946 static int i915_context_status(struct seq_file
*m
, void *unused
)
1948 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1949 struct drm_device
*dev
= &dev_priv
->drm
;
1950 struct intel_engine_cs
*engine
;
1951 struct i915_gem_context
*ctx
;
1952 enum intel_engine_id id
;
1955 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1959 list_for_each_entry(ctx
, &dev_priv
->contexts
.list
, link
) {
1960 seq_printf(m
, "HW context %u ", ctx
->hw_id
);
1962 struct task_struct
*task
;
1964 task
= get_pid_task(ctx
->pid
, PIDTYPE_PID
);
1966 seq_printf(m
, "(%s [%d]) ",
1967 task
->comm
, task
->pid
);
1968 put_task_struct(task
);
1970 } else if (IS_ERR(ctx
->file_priv
)) {
1971 seq_puts(m
, "(deleted) ");
1973 seq_puts(m
, "(kernel) ");
1976 seq_putc(m
, ctx
->remap_slice
? 'R' : 'r');
1979 for_each_engine(engine
, dev_priv
, id
) {
1980 struct intel_context
*ce
=
1981 to_intel_context(ctx
, engine
);
1983 seq_printf(m
, "%s: ", engine
->name
);
1985 describe_obj(m
, ce
->state
->obj
);
1987 describe_ctx_ring(m
, ce
->ring
);
1994 mutex_unlock(&dev
->struct_mutex
);
1999 static const char *swizzle_string(unsigned swizzle
)
2002 case I915_BIT_6_SWIZZLE_NONE
:
2004 case I915_BIT_6_SWIZZLE_9
:
2006 case I915_BIT_6_SWIZZLE_9_10
:
2007 return "bit9/bit10";
2008 case I915_BIT_6_SWIZZLE_9_11
:
2009 return "bit9/bit11";
2010 case I915_BIT_6_SWIZZLE_9_10_11
:
2011 return "bit9/bit10/bit11";
2012 case I915_BIT_6_SWIZZLE_9_17
:
2013 return "bit9/bit17";
2014 case I915_BIT_6_SWIZZLE_9_10_17
:
2015 return "bit9/bit10/bit17";
2016 case I915_BIT_6_SWIZZLE_UNKNOWN
:
2023 static int i915_swizzle_info(struct seq_file
*m
, void *data
)
2025 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2027 intel_runtime_pm_get(dev_priv
);
2029 seq_printf(m
, "bit6 swizzle for X-tiling = %s\n",
2030 swizzle_string(dev_priv
->mm
.bit_6_swizzle_x
));
2031 seq_printf(m
, "bit6 swizzle for Y-tiling = %s\n",
2032 swizzle_string(dev_priv
->mm
.bit_6_swizzle_y
));
2034 if (IS_GEN3(dev_priv
) || IS_GEN4(dev_priv
)) {
2035 seq_printf(m
, "DDC = 0x%08x\n",
2037 seq_printf(m
, "DDC2 = 0x%08x\n",
2039 seq_printf(m
, "C0DRB3 = 0x%04x\n",
2040 I915_READ16(C0DRB3
));
2041 seq_printf(m
, "C1DRB3 = 0x%04x\n",
2042 I915_READ16(C1DRB3
));
2043 } else if (INTEL_GEN(dev_priv
) >= 6) {
2044 seq_printf(m
, "MAD_DIMM_C0 = 0x%08x\n",
2045 I915_READ(MAD_DIMM_C0
));
2046 seq_printf(m
, "MAD_DIMM_C1 = 0x%08x\n",
2047 I915_READ(MAD_DIMM_C1
));
2048 seq_printf(m
, "MAD_DIMM_C2 = 0x%08x\n",
2049 I915_READ(MAD_DIMM_C2
));
2050 seq_printf(m
, "TILECTL = 0x%08x\n",
2051 I915_READ(TILECTL
));
2052 if (INTEL_GEN(dev_priv
) >= 8)
2053 seq_printf(m
, "GAMTARBMODE = 0x%08x\n",
2054 I915_READ(GAMTARBMODE
));
2056 seq_printf(m
, "ARB_MODE = 0x%08x\n",
2057 I915_READ(ARB_MODE
));
2058 seq_printf(m
, "DISP_ARB_CTL = 0x%08x\n",
2059 I915_READ(DISP_ARB_CTL
));
2062 if (dev_priv
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
)
2063 seq_puts(m
, "L-shaped memory detected\n");
2065 intel_runtime_pm_put(dev_priv
);
2070 static int per_file_ctx(int id
, void *ptr
, void *data
)
2072 struct i915_gem_context
*ctx
= ptr
;
2073 struct seq_file
*m
= data
;
2074 struct i915_hw_ppgtt
*ppgtt
= ctx
->ppgtt
;
2077 seq_printf(m
, " no ppgtt for context %d\n",
2082 if (i915_gem_context_is_default(ctx
))
2083 seq_puts(m
, " default context:\n");
2085 seq_printf(m
, " context %d:\n", ctx
->user_handle
);
2086 ppgtt
->debug_dump(ppgtt
, m
);
2091 static void gen8_ppgtt_info(struct seq_file
*m
,
2092 struct drm_i915_private
*dev_priv
)
2094 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
2095 struct intel_engine_cs
*engine
;
2096 enum intel_engine_id id
;
2102 for_each_engine(engine
, dev_priv
, id
) {
2103 seq_printf(m
, "%s\n", engine
->name
);
2104 for (i
= 0; i
< 4; i
++) {
2105 u64 pdp
= I915_READ(GEN8_RING_PDP_UDW(engine
, i
));
2107 pdp
|= I915_READ(GEN8_RING_PDP_LDW(engine
, i
));
2108 seq_printf(m
, "\tPDP%d 0x%016llx\n", i
, pdp
);
2113 static void gen6_ppgtt_info(struct seq_file
*m
,
2114 struct drm_i915_private
*dev_priv
)
2116 struct intel_engine_cs
*engine
;
2117 enum intel_engine_id id
;
2119 if (IS_GEN6(dev_priv
))
2120 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE
));
2122 for_each_engine(engine
, dev_priv
, id
) {
2123 seq_printf(m
, "%s\n", engine
->name
);
2124 if (IS_GEN7(dev_priv
))
2125 seq_printf(m
, "GFX_MODE: 0x%08x\n",
2126 I915_READ(RING_MODE_GEN7(engine
)));
2127 seq_printf(m
, "PP_DIR_BASE: 0x%08x\n",
2128 I915_READ(RING_PP_DIR_BASE(engine
)));
2129 seq_printf(m
, "PP_DIR_BASE_READ: 0x%08x\n",
2130 I915_READ(RING_PP_DIR_BASE_READ(engine
)));
2131 seq_printf(m
, "PP_DIR_DCLV: 0x%08x\n",
2132 I915_READ(RING_PP_DIR_DCLV(engine
)));
2134 if (dev_priv
->mm
.aliasing_ppgtt
) {
2135 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
2137 seq_puts(m
, "aliasing PPGTT:\n");
2138 seq_printf(m
, "pd gtt offset: 0x%08x\n", ppgtt
->pd
.base
.ggtt_offset
);
2140 ppgtt
->debug_dump(ppgtt
, m
);
2143 seq_printf(m
, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK
));
2146 static int i915_ppgtt_info(struct seq_file
*m
, void *data
)
2148 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2149 struct drm_device
*dev
= &dev_priv
->drm
;
2150 struct drm_file
*file
;
2153 mutex_lock(&dev
->filelist_mutex
);
2154 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2158 intel_runtime_pm_get(dev_priv
);
2160 if (INTEL_GEN(dev_priv
) >= 8)
2161 gen8_ppgtt_info(m
, dev_priv
);
2162 else if (INTEL_GEN(dev_priv
) >= 6)
2163 gen6_ppgtt_info(m
, dev_priv
);
2165 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
2166 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2167 struct task_struct
*task
;
2169 task
= get_pid_task(file
->pid
, PIDTYPE_PID
);
2174 seq_printf(m
, "\nproc: %s\n", task
->comm
);
2175 put_task_struct(task
);
2176 idr_for_each(&file_priv
->context_idr
, per_file_ctx
,
2177 (void *)(unsigned long)m
);
2181 intel_runtime_pm_put(dev_priv
);
2182 mutex_unlock(&dev
->struct_mutex
);
2184 mutex_unlock(&dev
->filelist_mutex
);
2188 static int count_irq_waiters(struct drm_i915_private
*i915
)
2190 struct intel_engine_cs
*engine
;
2191 enum intel_engine_id id
;
2194 for_each_engine(engine
, i915
, id
)
2195 count
+= intel_engine_has_waiter(engine
);
2200 static const char *rps_power_to_str(unsigned int power
)
2202 static const char * const strings
[] = {
2203 [LOW_POWER
] = "low power",
2204 [BETWEEN
] = "mixed",
2205 [HIGH_POWER
] = "high power",
2208 if (power
>= ARRAY_SIZE(strings
) || !strings
[power
])
2211 return strings
[power
];
2214 static int i915_rps_boost_info(struct seq_file
*m
, void *data
)
2216 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2217 struct drm_device
*dev
= &dev_priv
->drm
;
2218 struct intel_rps
*rps
= &dev_priv
->gt_pm
.rps
;
2219 struct drm_file
*file
;
2221 seq_printf(m
, "RPS enabled? %d\n", rps
->enabled
);
2222 seq_printf(m
, "GPU busy? %s [%d requests]\n",
2223 yesno(dev_priv
->gt
.awake
), dev_priv
->gt
.active_requests
);
2224 seq_printf(m
, "CPU waiting? %d\n", count_irq_waiters(dev_priv
));
2225 seq_printf(m
, "Boosts outstanding? %d\n",
2226 atomic_read(&rps
->num_waiters
));
2227 seq_printf(m
, "Interactive? %d\n", READ_ONCE(rps
->power
.interactive
));
2228 seq_printf(m
, "Frequency requested %d\n",
2229 intel_gpu_freq(dev_priv
, rps
->cur_freq
));
2230 seq_printf(m
, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2231 intel_gpu_freq(dev_priv
, rps
->min_freq
),
2232 intel_gpu_freq(dev_priv
, rps
->min_freq_softlimit
),
2233 intel_gpu_freq(dev_priv
, rps
->max_freq_softlimit
),
2234 intel_gpu_freq(dev_priv
, rps
->max_freq
));
2235 seq_printf(m
, " idle:%d, efficient:%d, boost:%d\n",
2236 intel_gpu_freq(dev_priv
, rps
->idle_freq
),
2237 intel_gpu_freq(dev_priv
, rps
->efficient_freq
),
2238 intel_gpu_freq(dev_priv
, rps
->boost_freq
));
2240 mutex_lock(&dev
->filelist_mutex
);
2241 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
2242 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2243 struct task_struct
*task
;
2246 task
= pid_task(file
->pid
, PIDTYPE_PID
);
2247 seq_printf(m
, "%s [%d]: %d boosts\n",
2248 task
? task
->comm
: "<unknown>",
2249 task
? task
->pid
: -1,
2250 atomic_read(&file_priv
->rps_client
.boosts
));
2253 seq_printf(m
, "Kernel (anonymous) boosts: %d\n",
2254 atomic_read(&rps
->boosts
));
2255 mutex_unlock(&dev
->filelist_mutex
);
2257 if (INTEL_GEN(dev_priv
) >= 6 &&
2259 dev_priv
->gt
.active_requests
) {
2261 u32 rpdown
, rpdownei
;
2263 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
2264 rpup
= I915_READ_FW(GEN6_RP_CUR_UP
) & GEN6_RP_EI_MASK
;
2265 rpupei
= I915_READ_FW(GEN6_RP_CUR_UP_EI
) & GEN6_RP_EI_MASK
;
2266 rpdown
= I915_READ_FW(GEN6_RP_CUR_DOWN
) & GEN6_RP_EI_MASK
;
2267 rpdownei
= I915_READ_FW(GEN6_RP_CUR_DOWN_EI
) & GEN6_RP_EI_MASK
;
2268 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
2270 seq_printf(m
, "\nRPS Autotuning (current \"%s\" window):\n",
2271 rps_power_to_str(rps
->power
.mode
));
2272 seq_printf(m
, " Avg. up: %d%% [above threshold? %d%%]\n",
2273 rpup
&& rpupei
? 100 * rpup
/ rpupei
: 0,
2274 rps
->power
.up_threshold
);
2275 seq_printf(m
, " Avg. down: %d%% [below threshold? %d%%]\n",
2276 rpdown
&& rpdownei
? 100 * rpdown
/ rpdownei
: 0,
2277 rps
->power
.down_threshold
);
2279 seq_puts(m
, "\nRPS Autotuning inactive\n");
2285 static int i915_llc(struct seq_file
*m
, void *data
)
2287 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2288 const bool edram
= INTEL_GEN(dev_priv
) > 8;
2290 seq_printf(m
, "LLC: %s\n", yesno(HAS_LLC(dev_priv
)));
2291 seq_printf(m
, "%s: %lluMB\n", edram
? "eDRAM" : "eLLC",
2292 intel_uncore_edram_size(dev_priv
)/1024/1024);
2297 static int i915_huc_load_status_info(struct seq_file
*m
, void *data
)
2299 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2300 struct drm_printer p
;
2302 if (!HAS_HUC(dev_priv
))
2305 p
= drm_seq_file_printer(m
);
2306 intel_uc_fw_dump(&dev_priv
->huc
.fw
, &p
);
2308 intel_runtime_pm_get(dev_priv
);
2309 seq_printf(m
, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2
));
2310 intel_runtime_pm_put(dev_priv
);
2315 static int i915_guc_load_status_info(struct seq_file
*m
, void *data
)
2317 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2318 struct drm_printer p
;
2321 if (!HAS_GUC(dev_priv
))
2324 p
= drm_seq_file_printer(m
);
2325 intel_uc_fw_dump(&dev_priv
->guc
.fw
, &p
);
2327 intel_runtime_pm_get(dev_priv
);
2329 tmp
= I915_READ(GUC_STATUS
);
2331 seq_printf(m
, "\nGuC status 0x%08x:\n", tmp
);
2332 seq_printf(m
, "\tBootrom status = 0x%x\n",
2333 (tmp
& GS_BOOTROM_MASK
) >> GS_BOOTROM_SHIFT
);
2334 seq_printf(m
, "\tuKernel status = 0x%x\n",
2335 (tmp
& GS_UKERNEL_MASK
) >> GS_UKERNEL_SHIFT
);
2336 seq_printf(m
, "\tMIA Core status = 0x%x\n",
2337 (tmp
& GS_MIA_MASK
) >> GS_MIA_SHIFT
);
2338 seq_puts(m
, "\nScratch registers:\n");
2339 for (i
= 0; i
< 16; i
++)
2340 seq_printf(m
, "\t%2d: \t0x%x\n", i
, I915_READ(SOFT_SCRATCH(i
)));
2342 intel_runtime_pm_put(dev_priv
);
2348 stringify_guc_log_type(enum guc_log_buffer_type type
)
2351 case GUC_ISR_LOG_BUFFER
:
2353 case GUC_DPC_LOG_BUFFER
:
2355 case GUC_CRASH_DUMP_LOG_BUFFER
:
2364 static void i915_guc_log_info(struct seq_file
*m
,
2365 struct drm_i915_private
*dev_priv
)
2367 struct intel_guc_log
*log
= &dev_priv
->guc
.log
;
2368 enum guc_log_buffer_type type
;
2370 if (!intel_guc_log_relay_enabled(log
)) {
2371 seq_puts(m
, "GuC log relay disabled\n");
2375 seq_puts(m
, "GuC logging stats:\n");
2377 seq_printf(m
, "\tRelay full count: %u\n",
2378 log
->relay
.full_count
);
2380 for (type
= GUC_ISR_LOG_BUFFER
; type
< GUC_MAX_LOG_BUFFER
; type
++) {
2381 seq_printf(m
, "\t%s:\tflush count %10u, overflow count %10u\n",
2382 stringify_guc_log_type(type
),
2383 log
->stats
[type
].flush
,
2384 log
->stats
[type
].sampled_overflow
);
2388 static void i915_guc_client_info(struct seq_file
*m
,
2389 struct drm_i915_private
*dev_priv
,
2390 struct intel_guc_client
*client
)
2392 struct intel_engine_cs
*engine
;
2393 enum intel_engine_id id
;
2396 seq_printf(m
, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2397 client
->priority
, client
->stage_id
, client
->proc_desc_offset
);
2398 seq_printf(m
, "\tDoorbell id %d, offset: 0x%lx\n",
2399 client
->doorbell_id
, client
->doorbell_offset
);
2401 for_each_engine(engine
, dev_priv
, id
) {
2402 u64 submissions
= client
->submissions
[id
];
2404 seq_printf(m
, "\tSubmissions: %llu %s\n",
2405 submissions
, engine
->name
);
2407 seq_printf(m
, "\tTotal: %llu\n", tot
);
2410 static int i915_guc_info(struct seq_file
*m
, void *data
)
2412 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2413 const struct intel_guc
*guc
= &dev_priv
->guc
;
2415 if (!USES_GUC(dev_priv
))
2418 i915_guc_log_info(m
, dev_priv
);
2420 if (!USES_GUC_SUBMISSION(dev_priv
))
2423 GEM_BUG_ON(!guc
->execbuf_client
);
2425 seq_printf(m
, "\nDoorbell map:\n");
2426 seq_printf(m
, "\t%*pb\n", GUC_NUM_DOORBELLS
, guc
->doorbell_bitmap
);
2427 seq_printf(m
, "Doorbell next cacheline: 0x%x\n", guc
->db_cacheline
);
2429 seq_printf(m
, "\nGuC execbuf client @ %p:\n", guc
->execbuf_client
);
2430 i915_guc_client_info(m
, dev_priv
, guc
->execbuf_client
);
2431 if (guc
->preempt_client
) {
2432 seq_printf(m
, "\nGuC preempt client @ %p:\n",
2433 guc
->preempt_client
);
2434 i915_guc_client_info(m
, dev_priv
, guc
->preempt_client
);
2437 /* Add more as required ... */
2442 static int i915_guc_stage_pool(struct seq_file
*m
, void *data
)
2444 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2445 const struct intel_guc
*guc
= &dev_priv
->guc
;
2446 struct guc_stage_desc
*desc
= guc
->stage_desc_pool_vaddr
;
2447 struct intel_guc_client
*client
= guc
->execbuf_client
;
2451 if (!USES_GUC_SUBMISSION(dev_priv
))
2454 for (index
= 0; index
< GUC_MAX_STAGE_DESCRIPTORS
; index
++, desc
++) {
2455 struct intel_engine_cs
*engine
;
2457 if (!(desc
->attribute
& GUC_STAGE_DESC_ATTR_ACTIVE
))
2460 seq_printf(m
, "GuC stage descriptor %u:\n", index
);
2461 seq_printf(m
, "\tIndex: %u\n", desc
->stage_id
);
2462 seq_printf(m
, "\tAttribute: 0x%x\n", desc
->attribute
);
2463 seq_printf(m
, "\tPriority: %d\n", desc
->priority
);
2464 seq_printf(m
, "\tDoorbell id: %d\n", desc
->db_id
);
2465 seq_printf(m
, "\tEngines used: 0x%x\n",
2466 desc
->engines_used
);
2467 seq_printf(m
, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2468 desc
->db_trigger_phy
,
2469 desc
->db_trigger_cpu
,
2470 desc
->db_trigger_uk
);
2471 seq_printf(m
, "\tProcess descriptor: 0x%x\n",
2472 desc
->process_desc
);
2473 seq_printf(m
, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2474 desc
->wq_addr
, desc
->wq_size
);
2477 for_each_engine_masked(engine
, dev_priv
, client
->engines
, tmp
) {
2478 u32 guc_engine_id
= engine
->guc_id
;
2479 struct guc_execlist_context
*lrc
=
2480 &desc
->lrc
[guc_engine_id
];
2482 seq_printf(m
, "\t%s LRC:\n", engine
->name
);
2483 seq_printf(m
, "\t\tContext desc: 0x%x\n",
2485 seq_printf(m
, "\t\tContext id: 0x%x\n", lrc
->context_id
);
2486 seq_printf(m
, "\t\tLRCA: 0x%x\n", lrc
->ring_lrca
);
2487 seq_printf(m
, "\t\tRing begin: 0x%x\n", lrc
->ring_begin
);
2488 seq_printf(m
, "\t\tRing end: 0x%x\n", lrc
->ring_end
);
2496 static int i915_guc_log_dump(struct seq_file
*m
, void *data
)
2498 struct drm_info_node
*node
= m
->private;
2499 struct drm_i915_private
*dev_priv
= node_to_i915(node
);
2500 bool dump_load_err
= !!node
->info_ent
->data
;
2501 struct drm_i915_gem_object
*obj
= NULL
;
2505 if (!HAS_GUC(dev_priv
))
2509 obj
= dev_priv
->guc
.load_err_log
;
2510 else if (dev_priv
->guc
.log
.vma
)
2511 obj
= dev_priv
->guc
.log
.vma
->obj
;
2516 log
= i915_gem_object_pin_map(obj
, I915_MAP_WC
);
2518 DRM_DEBUG("Failed to pin object\n");
2519 seq_puts(m
, "(log data unaccessible)\n");
2520 return PTR_ERR(log
);
2523 for (i
= 0; i
< obj
->base
.size
/ sizeof(u32
); i
+= 4)
2524 seq_printf(m
, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2525 *(log
+ i
), *(log
+ i
+ 1),
2526 *(log
+ i
+ 2), *(log
+ i
+ 3));
2530 i915_gem_object_unpin_map(obj
);
2535 static int i915_guc_log_level_get(void *data
, u64
*val
)
2537 struct drm_i915_private
*dev_priv
= data
;
2539 if (!USES_GUC(dev_priv
))
2542 *val
= intel_guc_log_get_level(&dev_priv
->guc
.log
);
2547 static int i915_guc_log_level_set(void *data
, u64 val
)
2549 struct drm_i915_private
*dev_priv
= data
;
2551 if (!USES_GUC(dev_priv
))
2554 return intel_guc_log_set_level(&dev_priv
->guc
.log
, val
);
2557 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops
,
2558 i915_guc_log_level_get
, i915_guc_log_level_set
,
2561 static int i915_guc_log_relay_open(struct inode
*inode
, struct file
*file
)
2563 struct drm_i915_private
*dev_priv
= inode
->i_private
;
2565 if (!USES_GUC(dev_priv
))
2568 file
->private_data
= &dev_priv
->guc
.log
;
2570 return intel_guc_log_relay_open(&dev_priv
->guc
.log
);
2574 i915_guc_log_relay_write(struct file
*filp
,
2575 const char __user
*ubuf
,
2579 struct intel_guc_log
*log
= filp
->private_data
;
2581 intel_guc_log_relay_flush(log
);
2586 static int i915_guc_log_relay_release(struct inode
*inode
, struct file
*file
)
2588 struct drm_i915_private
*dev_priv
= inode
->i_private
;
2590 intel_guc_log_relay_close(&dev_priv
->guc
.log
);
2595 static const struct file_operations i915_guc_log_relay_fops
= {
2596 .owner
= THIS_MODULE
,
2597 .open
= i915_guc_log_relay_open
,
2598 .write
= i915_guc_log_relay_write
,
2599 .release
= i915_guc_log_relay_release
,
2602 static int i915_psr_sink_status_show(struct seq_file
*m
, void *data
)
2605 static const char * const sink_status
[] = {
2607 "transition to active, capture and display",
2608 "active, display from RFB",
2609 "active, capture and display on sink device timings",
2610 "transition to inactive, capture and display, timing re-sync",
2613 "sink internal error",
2615 struct drm_connector
*connector
= m
->private;
2616 struct drm_i915_private
*dev_priv
= to_i915(connector
->dev
);
2617 struct intel_dp
*intel_dp
=
2618 enc_to_intel_dp(&intel_attached_encoder(connector
)->base
);
2621 if (!CAN_PSR(dev_priv
)) {
2622 seq_puts(m
, "PSR Unsupported\n");
2626 if (connector
->status
!= connector_status_connected
)
2629 ret
= drm_dp_dpcd_readb(&intel_dp
->aux
, DP_PSR_STATUS
, &val
);
2632 const char *str
= "unknown";
2634 val
&= DP_PSR_SINK_STATE_MASK
;
2635 if (val
< ARRAY_SIZE(sink_status
))
2636 str
= sink_status
[val
];
2637 seq_printf(m
, "Sink PSR status: 0x%x [%s]\n", val
, str
);
2644 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status
);
2647 psr_source_status(struct drm_i915_private
*dev_priv
, struct seq_file
*m
)
2649 u32 val
, psr_status
;
2651 if (dev_priv
->psr
.psr2_enabled
) {
2652 static const char * const live_status
[] = {
2665 psr_status
= I915_READ(EDP_PSR2_STATUS
);
2666 val
= (psr_status
& EDP_PSR2_STATUS_STATE_MASK
) >>
2667 EDP_PSR2_STATUS_STATE_SHIFT
;
2668 if (val
< ARRAY_SIZE(live_status
)) {
2669 seq_printf(m
, "Source PSR status: 0x%x [%s]\n",
2670 psr_status
, live_status
[val
]);
2674 static const char * const live_status
[] = {
2684 psr_status
= I915_READ(EDP_PSR_STATUS
);
2685 val
= (psr_status
& EDP_PSR_STATUS_STATE_MASK
) >>
2686 EDP_PSR_STATUS_STATE_SHIFT
;
2687 if (val
< ARRAY_SIZE(live_status
)) {
2688 seq_printf(m
, "Source PSR status: 0x%x [%s]\n",
2689 psr_status
, live_status
[val
]);
2694 seq_printf(m
, "Source PSR status: 0x%x [%s]\n", psr_status
, "unknown");
2697 static int i915_edp_psr_status(struct seq_file
*m
, void *data
)
2699 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2701 bool enabled
= false;
2704 if (!HAS_PSR(dev_priv
))
2707 sink_support
= dev_priv
->psr
.sink_support
;
2708 seq_printf(m
, "Sink_Support: %s\n", yesno(sink_support
));
2712 intel_runtime_pm_get(dev_priv
);
2714 mutex_lock(&dev_priv
->psr
.lock
);
2715 seq_printf(m
, "Enabled: %s\n", yesno((bool)dev_priv
->psr
.enabled
));
2716 seq_printf(m
, "Busy frontbuffer bits: 0x%03x\n",
2717 dev_priv
->psr
.busy_frontbuffer_bits
);
2719 if (dev_priv
->psr
.psr2_enabled
)
2720 enabled
= I915_READ(EDP_PSR2_CTL
) & EDP_PSR2_ENABLE
;
2722 enabled
= I915_READ(EDP_PSR_CTL
) & EDP_PSR_ENABLE
;
2724 seq_printf(m
, "Main link in standby mode: %s\n",
2725 yesno(dev_priv
->psr
.link_standby
));
2727 seq_printf(m
, "HW Enabled & Active bit: %s\n", yesno(enabled
));
2730 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2732 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
2733 psrperf
= I915_READ(EDP_PSR_PERF_CNT
) &
2734 EDP_PSR_PERF_CNT_MASK
;
2736 seq_printf(m
, "Performance_Counter: %u\n", psrperf
);
2739 psr_source_status(dev_priv
, m
);
2740 mutex_unlock(&dev_priv
->psr
.lock
);
2742 if (READ_ONCE(dev_priv
->psr
.debug
)) {
2743 seq_printf(m
, "Last attempted entry at: %lld\n",
2744 dev_priv
->psr
.last_entry_attempt
);
2745 seq_printf(m
, "Last exit at: %lld\n",
2746 dev_priv
->psr
.last_exit
);
2749 intel_runtime_pm_put(dev_priv
);
2754 i915_edp_psr_debug_set(void *data
, u64 val
)
2756 struct drm_i915_private
*dev_priv
= data
;
2758 if (!CAN_PSR(dev_priv
))
2761 DRM_DEBUG_KMS("PSR debug %s\n", enableddisabled(val
));
2763 intel_runtime_pm_get(dev_priv
);
2764 intel_psr_irq_control(dev_priv
, !!val
);
2765 intel_runtime_pm_put(dev_priv
);
2771 i915_edp_psr_debug_get(void *data
, u64
*val
)
2773 struct drm_i915_private
*dev_priv
= data
;
2775 if (!CAN_PSR(dev_priv
))
2778 *val
= READ_ONCE(dev_priv
->psr
.debug
);
2782 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops
,
2783 i915_edp_psr_debug_get
, i915_edp_psr_debug_set
,
2786 static int i915_energy_uJ(struct seq_file
*m
, void *data
)
2788 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2789 unsigned long long power
;
2792 if (INTEL_GEN(dev_priv
) < 6)
2795 intel_runtime_pm_get(dev_priv
);
2797 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT
, &power
)) {
2798 intel_runtime_pm_put(dev_priv
);
2802 units
= (power
& 0x1f00) >> 8;
2803 power
= I915_READ(MCH_SECP_NRG_STTS
);
2804 power
= (1000000 * power
) >> units
; /* convert to uJ */
2806 intel_runtime_pm_put(dev_priv
);
2808 seq_printf(m
, "%llu", power
);
2813 static int i915_runtime_pm_status(struct seq_file
*m
, void *unused
)
2815 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2816 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
2818 if (!HAS_RUNTIME_PM(dev_priv
))
2819 seq_puts(m
, "Runtime power management not supported\n");
2821 seq_printf(m
, "GPU idle: %s (epoch %u)\n",
2822 yesno(!dev_priv
->gt
.awake
), dev_priv
->gt
.epoch
);
2823 seq_printf(m
, "IRQs disabled: %s\n",
2824 yesno(!intel_irqs_enabled(dev_priv
)));
2826 seq_printf(m
, "Usage count: %d\n",
2827 atomic_read(&dev_priv
->drm
.dev
->power
.usage_count
));
2829 seq_printf(m
, "Device Power Management (CONFIG_PM) disabled\n");
2831 seq_printf(m
, "PCI device power state: %s [%d]\n",
2832 pci_power_name(pdev
->current_state
),
2833 pdev
->current_state
);
2838 static int i915_power_domain_info(struct seq_file
*m
, void *unused
)
2840 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2841 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
2844 mutex_lock(&power_domains
->lock
);
2846 seq_printf(m
, "%-25s %s\n", "Power well/domain", "Use count");
2847 for (i
= 0; i
< power_domains
->power_well_count
; i
++) {
2848 struct i915_power_well
*power_well
;
2849 enum intel_display_power_domain power_domain
;
2851 power_well
= &power_domains
->power_wells
[i
];
2852 seq_printf(m
, "%-25s %d\n", power_well
->name
,
2855 for_each_power_domain(power_domain
, power_well
->domains
)
2856 seq_printf(m
, " %-23s %d\n",
2857 intel_display_power_domain_str(power_domain
),
2858 power_domains
->domain_use_count
[power_domain
]);
2861 mutex_unlock(&power_domains
->lock
);
2866 static int i915_dmc_info(struct seq_file
*m
, void *unused
)
2868 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2869 struct intel_csr
*csr
;
2871 if (!HAS_CSR(dev_priv
))
2874 csr
= &dev_priv
->csr
;
2876 intel_runtime_pm_get(dev_priv
);
2878 seq_printf(m
, "fw loaded: %s\n", yesno(csr
->dmc_payload
!= NULL
));
2879 seq_printf(m
, "path: %s\n", csr
->fw_path
);
2881 if (!csr
->dmc_payload
)
2884 seq_printf(m
, "version: %d.%d\n", CSR_VERSION_MAJOR(csr
->version
),
2885 CSR_VERSION_MINOR(csr
->version
));
2887 if (IS_KABYLAKE(dev_priv
) ||
2888 (IS_SKYLAKE(dev_priv
) && csr
->version
>= CSR_VERSION(1, 6))) {
2889 seq_printf(m
, "DC3 -> DC5 count: %d\n",
2890 I915_READ(SKL_CSR_DC3_DC5_COUNT
));
2891 seq_printf(m
, "DC5 -> DC6 count: %d\n",
2892 I915_READ(SKL_CSR_DC5_DC6_COUNT
));
2893 } else if (IS_BROXTON(dev_priv
) && csr
->version
>= CSR_VERSION(1, 4)) {
2894 seq_printf(m
, "DC3 -> DC5 count: %d\n",
2895 I915_READ(BXT_CSR_DC3_DC5_COUNT
));
2899 seq_printf(m
, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2900 seq_printf(m
, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE
));
2901 seq_printf(m
, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL
));
2903 intel_runtime_pm_put(dev_priv
);
2908 static void intel_seq_print_mode(struct seq_file
*m
, int tabs
,
2909 struct drm_display_mode
*mode
)
2913 for (i
= 0; i
< tabs
; i
++)
2916 seq_printf(m
, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2917 mode
->base
.id
, mode
->name
,
2918 mode
->vrefresh
, mode
->clock
,
2919 mode
->hdisplay
, mode
->hsync_start
,
2920 mode
->hsync_end
, mode
->htotal
,
2921 mode
->vdisplay
, mode
->vsync_start
,
2922 mode
->vsync_end
, mode
->vtotal
,
2923 mode
->type
, mode
->flags
);
2926 static void intel_encoder_info(struct seq_file
*m
,
2927 struct intel_crtc
*intel_crtc
,
2928 struct intel_encoder
*intel_encoder
)
2930 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2931 struct drm_device
*dev
= &dev_priv
->drm
;
2932 struct drm_crtc
*crtc
= &intel_crtc
->base
;
2933 struct intel_connector
*intel_connector
;
2934 struct drm_encoder
*encoder
;
2936 encoder
= &intel_encoder
->base
;
2937 seq_printf(m
, "\tencoder %d: type: %s, connectors:\n",
2938 encoder
->base
.id
, encoder
->name
);
2939 for_each_connector_on_encoder(dev
, encoder
, intel_connector
) {
2940 struct drm_connector
*connector
= &intel_connector
->base
;
2941 seq_printf(m
, "\t\tconnector %d: type: %s, status: %s",
2944 drm_get_connector_status_name(connector
->status
));
2945 if (connector
->status
== connector_status_connected
) {
2946 struct drm_display_mode
*mode
= &crtc
->mode
;
2947 seq_printf(m
, ", mode:\n");
2948 intel_seq_print_mode(m
, 2, mode
);
2955 static void intel_crtc_info(struct seq_file
*m
, struct intel_crtc
*intel_crtc
)
2957 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
2958 struct drm_device
*dev
= &dev_priv
->drm
;
2959 struct drm_crtc
*crtc
= &intel_crtc
->base
;
2960 struct intel_encoder
*intel_encoder
;
2961 struct drm_plane_state
*plane_state
= crtc
->primary
->state
;
2962 struct drm_framebuffer
*fb
= plane_state
->fb
;
2965 seq_printf(m
, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2966 fb
->base
.id
, plane_state
->src_x
>> 16,
2967 plane_state
->src_y
>> 16, fb
->width
, fb
->height
);
2969 seq_puts(m
, "\tprimary plane disabled\n");
2970 for_each_encoder_on_crtc(dev
, crtc
, intel_encoder
)
2971 intel_encoder_info(m
, intel_crtc
, intel_encoder
);
2974 static void intel_panel_info(struct seq_file
*m
, struct intel_panel
*panel
)
2976 struct drm_display_mode
*mode
= panel
->fixed_mode
;
2978 seq_printf(m
, "\tfixed mode:\n");
2979 intel_seq_print_mode(m
, 2, mode
);
2982 static void intel_dp_info(struct seq_file
*m
,
2983 struct intel_connector
*intel_connector
)
2985 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2986 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
2988 seq_printf(m
, "\tDPCD rev: %x\n", intel_dp
->dpcd
[DP_DPCD_REV
]);
2989 seq_printf(m
, "\taudio support: %s\n", yesno(intel_dp
->has_audio
));
2990 if (intel_connector
->base
.connector_type
== DRM_MODE_CONNECTOR_eDP
)
2991 intel_panel_info(m
, &intel_connector
->panel
);
2993 drm_dp_downstream_debug(m
, intel_dp
->dpcd
, intel_dp
->downstream_ports
,
2997 static void intel_dp_mst_info(struct seq_file
*m
,
2998 struct intel_connector
*intel_connector
)
3000 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
3001 struct intel_dp_mst_encoder
*intel_mst
=
3002 enc_to_mst(&intel_encoder
->base
);
3003 struct intel_digital_port
*intel_dig_port
= intel_mst
->primary
;
3004 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
3005 bool has_audio
= drm_dp_mst_port_has_audio(&intel_dp
->mst_mgr
,
3006 intel_connector
->port
);
3008 seq_printf(m
, "\taudio support: %s\n", yesno(has_audio
));
3011 static void intel_hdmi_info(struct seq_file
*m
,
3012 struct intel_connector
*intel_connector
)
3014 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
3015 struct intel_hdmi
*intel_hdmi
= enc_to_intel_hdmi(&intel_encoder
->base
);
3017 seq_printf(m
, "\taudio support: %s\n", yesno(intel_hdmi
->has_audio
));
3020 static void intel_lvds_info(struct seq_file
*m
,
3021 struct intel_connector
*intel_connector
)
3023 intel_panel_info(m
, &intel_connector
->panel
);
3026 static void intel_connector_info(struct seq_file
*m
,
3027 struct drm_connector
*connector
)
3029 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
3030 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
3031 struct drm_display_mode
*mode
;
3033 seq_printf(m
, "connector %d: type %s, status: %s\n",
3034 connector
->base
.id
, connector
->name
,
3035 drm_get_connector_status_name(connector
->status
));
3036 if (connector
->status
== connector_status_connected
) {
3037 seq_printf(m
, "\tname: %s\n", connector
->display_info
.name
);
3038 seq_printf(m
, "\tphysical dimensions: %dx%dmm\n",
3039 connector
->display_info
.width_mm
,
3040 connector
->display_info
.height_mm
);
3041 seq_printf(m
, "\tsubpixel order: %s\n",
3042 drm_get_subpixel_order_name(connector
->display_info
.subpixel_order
));
3043 seq_printf(m
, "\tCEA rev: %d\n",
3044 connector
->display_info
.cea_rev
);
3050 switch (connector
->connector_type
) {
3051 case DRM_MODE_CONNECTOR_DisplayPort
:
3052 case DRM_MODE_CONNECTOR_eDP
:
3053 if (intel_encoder
->type
== INTEL_OUTPUT_DP_MST
)
3054 intel_dp_mst_info(m
, intel_connector
);
3056 intel_dp_info(m
, intel_connector
);
3058 case DRM_MODE_CONNECTOR_LVDS
:
3059 if (intel_encoder
->type
== INTEL_OUTPUT_LVDS
)
3060 intel_lvds_info(m
, intel_connector
);
3062 case DRM_MODE_CONNECTOR_HDMIA
:
3063 if (intel_encoder
->type
== INTEL_OUTPUT_HDMI
||
3064 intel_encoder
->type
== INTEL_OUTPUT_DDI
)
3065 intel_hdmi_info(m
, intel_connector
);
3071 seq_printf(m
, "\tmodes:\n");
3072 list_for_each_entry(mode
, &connector
->modes
, head
)
3073 intel_seq_print_mode(m
, 2, mode
);
3076 static const char *plane_type(enum drm_plane_type type
)
3079 case DRM_PLANE_TYPE_OVERLAY
:
3081 case DRM_PLANE_TYPE_PRIMARY
:
3083 case DRM_PLANE_TYPE_CURSOR
:
3086 * Deliberately omitting default: to generate compiler warnings
3087 * when a new drm_plane_type gets added.
3094 static const char *plane_rotation(unsigned int rotation
)
3096 static char buf
[48];
3098 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3099 * will print them all to visualize if the values are misused
3101 snprintf(buf
, sizeof(buf
),
3102 "%s%s%s%s%s%s(0x%08x)",
3103 (rotation
& DRM_MODE_ROTATE_0
) ? "0 " : "",
3104 (rotation
& DRM_MODE_ROTATE_90
) ? "90 " : "",
3105 (rotation
& DRM_MODE_ROTATE_180
) ? "180 " : "",
3106 (rotation
& DRM_MODE_ROTATE_270
) ? "270 " : "",
3107 (rotation
& DRM_MODE_REFLECT_X
) ? "FLIPX " : "",
3108 (rotation
& DRM_MODE_REFLECT_Y
) ? "FLIPY " : "",
3114 static void intel_plane_info(struct seq_file
*m
, struct intel_crtc
*intel_crtc
)
3116 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3117 struct drm_device
*dev
= &dev_priv
->drm
;
3118 struct intel_plane
*intel_plane
;
3120 for_each_intel_plane_on_crtc(dev
, intel_crtc
, intel_plane
) {
3121 struct drm_plane_state
*state
;
3122 struct drm_plane
*plane
= &intel_plane
->base
;
3123 struct drm_format_name_buf format_name
;
3125 if (!plane
->state
) {
3126 seq_puts(m
, "plane->state is NULL!\n");
3130 state
= plane
->state
;
3133 drm_get_format_name(state
->fb
->format
->format
,
3136 sprintf(format_name
.str
, "N/A");
3139 seq_printf(m
, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3141 plane_type(intel_plane
->base
.type
),
3142 state
->crtc_x
, state
->crtc_y
,
3143 state
->crtc_w
, state
->crtc_h
,
3144 (state
->src_x
>> 16),
3145 ((state
->src_x
& 0xffff) * 15625) >> 10,
3146 (state
->src_y
>> 16),
3147 ((state
->src_y
& 0xffff) * 15625) >> 10,
3148 (state
->src_w
>> 16),
3149 ((state
->src_w
& 0xffff) * 15625) >> 10,
3150 (state
->src_h
>> 16),
3151 ((state
->src_h
& 0xffff) * 15625) >> 10,
3153 plane_rotation(state
->rotation
));
3157 static void intel_scaler_info(struct seq_file
*m
, struct intel_crtc
*intel_crtc
)
3159 struct intel_crtc_state
*pipe_config
;
3160 int num_scalers
= intel_crtc
->num_scalers
;
3163 pipe_config
= to_intel_crtc_state(intel_crtc
->base
.state
);
3165 /* Not all platformas have a scaler */
3167 seq_printf(m
, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3169 pipe_config
->scaler_state
.scaler_users
,
3170 pipe_config
->scaler_state
.scaler_id
);
3172 for (i
= 0; i
< num_scalers
; i
++) {
3173 struct intel_scaler
*sc
=
3174 &pipe_config
->scaler_state
.scalers
[i
];
3176 seq_printf(m
, ", scalers[%d]: use=%s, mode=%x",
3177 i
, yesno(sc
->in_use
), sc
->mode
);
3181 seq_puts(m
, "\tNo scalers available on this platform\n");
3185 static int i915_display_info(struct seq_file
*m
, void *unused
)
3187 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3188 struct drm_device
*dev
= &dev_priv
->drm
;
3189 struct intel_crtc
*crtc
;
3190 struct drm_connector
*connector
;
3191 struct drm_connector_list_iter conn_iter
;
3193 intel_runtime_pm_get(dev_priv
);
3194 seq_printf(m
, "CRTC info\n");
3195 seq_printf(m
, "---------\n");
3196 for_each_intel_crtc(dev
, crtc
) {
3197 struct intel_crtc_state
*pipe_config
;
3199 drm_modeset_lock(&crtc
->base
.mutex
, NULL
);
3200 pipe_config
= to_intel_crtc_state(crtc
->base
.state
);
3202 seq_printf(m
, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3203 crtc
->base
.base
.id
, pipe_name(crtc
->pipe
),
3204 yesno(pipe_config
->base
.active
),
3205 pipe_config
->pipe_src_w
, pipe_config
->pipe_src_h
,
3206 yesno(pipe_config
->dither
), pipe_config
->pipe_bpp
);
3208 if (pipe_config
->base
.active
) {
3209 struct intel_plane
*cursor
=
3210 to_intel_plane(crtc
->base
.cursor
);
3212 intel_crtc_info(m
, crtc
);
3214 seq_printf(m
, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3215 yesno(cursor
->base
.state
->visible
),
3216 cursor
->base
.state
->crtc_x
,
3217 cursor
->base
.state
->crtc_y
,
3218 cursor
->base
.state
->crtc_w
,
3219 cursor
->base
.state
->crtc_h
,
3220 cursor
->cursor
.base
);
3221 intel_scaler_info(m
, crtc
);
3222 intel_plane_info(m
, crtc
);
3225 seq_printf(m
, "\tunderrun reporting: cpu=%s pch=%s \n",
3226 yesno(!crtc
->cpu_fifo_underrun_disabled
),
3227 yesno(!crtc
->pch_fifo_underrun_disabled
));
3228 drm_modeset_unlock(&crtc
->base
.mutex
);
3231 seq_printf(m
, "\n");
3232 seq_printf(m
, "Connector info\n");
3233 seq_printf(m
, "--------------\n");
3234 mutex_lock(&dev
->mode_config
.mutex
);
3235 drm_connector_list_iter_begin(dev
, &conn_iter
);
3236 drm_for_each_connector_iter(connector
, &conn_iter
)
3237 intel_connector_info(m
, connector
);
3238 drm_connector_list_iter_end(&conn_iter
);
3239 mutex_unlock(&dev
->mode_config
.mutex
);
3241 intel_runtime_pm_put(dev_priv
);
3246 static int i915_engine_info(struct seq_file
*m
, void *unused
)
3248 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3249 struct intel_engine_cs
*engine
;
3250 enum intel_engine_id id
;
3251 struct drm_printer p
;
3253 intel_runtime_pm_get(dev_priv
);
3255 seq_printf(m
, "GT awake? %s (epoch %u)\n",
3256 yesno(dev_priv
->gt
.awake
), dev_priv
->gt
.epoch
);
3257 seq_printf(m
, "Global active requests: %d\n",
3258 dev_priv
->gt
.active_requests
);
3259 seq_printf(m
, "CS timestamp frequency: %u kHz\n",
3260 dev_priv
->info
.cs_timestamp_frequency_khz
);
3262 p
= drm_seq_file_printer(m
);
3263 for_each_engine(engine
, dev_priv
, id
)
3264 intel_engine_dump(engine
, &p
, "%s\n", engine
->name
);
3266 intel_runtime_pm_put(dev_priv
);
3271 static int i915_rcs_topology(struct seq_file
*m
, void *unused
)
3273 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3274 struct drm_printer p
= drm_seq_file_printer(m
);
3276 intel_device_info_dump_topology(&INTEL_INFO(dev_priv
)->sseu
, &p
);
3281 static int i915_shrinker_info(struct seq_file
*m
, void *unused
)
3283 struct drm_i915_private
*i915
= node_to_i915(m
->private);
3285 seq_printf(m
, "seeks = %d\n", i915
->mm
.shrinker
.seeks
);
3286 seq_printf(m
, "batch = %lu\n", i915
->mm
.shrinker
.batch
);
3291 static int i915_shared_dplls_info(struct seq_file
*m
, void *unused
)
3293 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3294 struct drm_device
*dev
= &dev_priv
->drm
;
3297 drm_modeset_lock_all(dev
);
3298 for (i
= 0; i
< dev_priv
->num_shared_dpll
; i
++) {
3299 struct intel_shared_dpll
*pll
= &dev_priv
->shared_dplls
[i
];
3301 seq_printf(m
, "DPLL%i: %s, id: %i\n", i
, pll
->info
->name
,
3303 seq_printf(m
, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3304 pll
->state
.crtc_mask
, pll
->active_mask
, yesno(pll
->on
));
3305 seq_printf(m
, " tracked hardware state:\n");
3306 seq_printf(m
, " dpll: 0x%08x\n", pll
->state
.hw_state
.dpll
);
3307 seq_printf(m
, " dpll_md: 0x%08x\n",
3308 pll
->state
.hw_state
.dpll_md
);
3309 seq_printf(m
, " fp0: 0x%08x\n", pll
->state
.hw_state
.fp0
);
3310 seq_printf(m
, " fp1: 0x%08x\n", pll
->state
.hw_state
.fp1
);
3311 seq_printf(m
, " wrpll: 0x%08x\n", pll
->state
.hw_state
.wrpll
);
3312 seq_printf(m
, " cfgcr0: 0x%08x\n", pll
->state
.hw_state
.cfgcr0
);
3313 seq_printf(m
, " cfgcr1: 0x%08x\n", pll
->state
.hw_state
.cfgcr1
);
3314 seq_printf(m
, " mg_refclkin_ctl: 0x%08x\n",
3315 pll
->state
.hw_state
.mg_refclkin_ctl
);
3316 seq_printf(m
, " mg_clktop2_coreclkctl1: 0x%08x\n",
3317 pll
->state
.hw_state
.mg_clktop2_coreclkctl1
);
3318 seq_printf(m
, " mg_clktop2_hsclkctl: 0x%08x\n",
3319 pll
->state
.hw_state
.mg_clktop2_hsclkctl
);
3320 seq_printf(m
, " mg_pll_div0: 0x%08x\n",
3321 pll
->state
.hw_state
.mg_pll_div0
);
3322 seq_printf(m
, " mg_pll_div1: 0x%08x\n",
3323 pll
->state
.hw_state
.mg_pll_div1
);
3324 seq_printf(m
, " mg_pll_lf: 0x%08x\n",
3325 pll
->state
.hw_state
.mg_pll_lf
);
3326 seq_printf(m
, " mg_pll_frac_lock: 0x%08x\n",
3327 pll
->state
.hw_state
.mg_pll_frac_lock
);
3328 seq_printf(m
, " mg_pll_ssc: 0x%08x\n",
3329 pll
->state
.hw_state
.mg_pll_ssc
);
3330 seq_printf(m
, " mg_pll_bias: 0x%08x\n",
3331 pll
->state
.hw_state
.mg_pll_bias
);
3332 seq_printf(m
, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3333 pll
->state
.hw_state
.mg_pll_tdc_coldst_bias
);
3335 drm_modeset_unlock_all(dev
);
3340 static int i915_wa_registers(struct seq_file
*m
, void *unused
)
3342 struct i915_workarounds
*wa
= &node_to_i915(m
->private)->workarounds
;
3345 seq_printf(m
, "Workarounds applied: %d\n", wa
->count
);
3346 for (i
= 0; i
< wa
->count
; ++i
)
3347 seq_printf(m
, "0x%X: 0x%08X, mask: 0x%08X\n",
3348 wa
->reg
[i
].addr
, wa
->reg
[i
].value
, wa
->reg
[i
].mask
);
3353 static int i915_ipc_status_show(struct seq_file
*m
, void *data
)
3355 struct drm_i915_private
*dev_priv
= m
->private;
3357 seq_printf(m
, "Isochronous Priority Control: %s\n",
3358 yesno(dev_priv
->ipc_enabled
));
3362 static int i915_ipc_status_open(struct inode
*inode
, struct file
*file
)
3364 struct drm_i915_private
*dev_priv
= inode
->i_private
;
3366 if (!HAS_IPC(dev_priv
))
3369 return single_open(file
, i915_ipc_status_show
, dev_priv
);
3372 static ssize_t
i915_ipc_status_write(struct file
*file
, const char __user
*ubuf
,
3373 size_t len
, loff_t
*offp
)
3375 struct seq_file
*m
= file
->private_data
;
3376 struct drm_i915_private
*dev_priv
= m
->private;
3380 ret
= kstrtobool_from_user(ubuf
, len
, &enable
);
3384 intel_runtime_pm_get(dev_priv
);
3385 if (!dev_priv
->ipc_enabled
&& enable
)
3386 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3387 dev_priv
->wm
.distrust_bios_wm
= true;
3388 dev_priv
->ipc_enabled
= enable
;
3389 intel_enable_ipc(dev_priv
);
3390 intel_runtime_pm_put(dev_priv
);
3395 static const struct file_operations i915_ipc_status_fops
= {
3396 .owner
= THIS_MODULE
,
3397 .open
= i915_ipc_status_open
,
3399 .llseek
= seq_lseek
,
3400 .release
= single_release
,
3401 .write
= i915_ipc_status_write
3404 static int i915_ddb_info(struct seq_file
*m
, void *unused
)
3406 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3407 struct drm_device
*dev
= &dev_priv
->drm
;
3408 struct skl_ddb_allocation
*ddb
;
3409 struct skl_ddb_entry
*entry
;
3413 if (INTEL_GEN(dev_priv
) < 9)
3416 drm_modeset_lock_all(dev
);
3418 ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
3420 seq_printf(m
, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3422 for_each_pipe(dev_priv
, pipe
) {
3423 seq_printf(m
, "Pipe %c\n", pipe_name(pipe
));
3425 for_each_universal_plane(dev_priv
, pipe
, plane
) {
3426 entry
= &ddb
->plane
[pipe
][plane
];
3427 seq_printf(m
, " Plane%-8d%8u%8u%8u\n", plane
+ 1,
3428 entry
->start
, entry
->end
,
3429 skl_ddb_entry_size(entry
));
3432 entry
= &ddb
->plane
[pipe
][PLANE_CURSOR
];
3433 seq_printf(m
, " %-13s%8u%8u%8u\n", "Cursor", entry
->start
,
3434 entry
->end
, skl_ddb_entry_size(entry
));
3437 drm_modeset_unlock_all(dev
);
3442 static void drrs_status_per_crtc(struct seq_file
*m
,
3443 struct drm_device
*dev
,
3444 struct intel_crtc
*intel_crtc
)
3446 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3447 struct i915_drrs
*drrs
= &dev_priv
->drrs
;
3449 struct drm_connector
*connector
;
3450 struct drm_connector_list_iter conn_iter
;
3452 drm_connector_list_iter_begin(dev
, &conn_iter
);
3453 drm_for_each_connector_iter(connector
, &conn_iter
) {
3454 if (connector
->state
->crtc
!= &intel_crtc
->base
)
3457 seq_printf(m
, "%s:\n", connector
->name
);
3459 drm_connector_list_iter_end(&conn_iter
);
3461 if (dev_priv
->vbt
.drrs_type
== STATIC_DRRS_SUPPORT
)
3462 seq_puts(m
, "\tVBT: DRRS_type: Static");
3463 else if (dev_priv
->vbt
.drrs_type
== SEAMLESS_DRRS_SUPPORT
)
3464 seq_puts(m
, "\tVBT: DRRS_type: Seamless");
3465 else if (dev_priv
->vbt
.drrs_type
== DRRS_NOT_SUPPORTED
)
3466 seq_puts(m
, "\tVBT: DRRS_type: None");
3468 seq_puts(m
, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3470 seq_puts(m
, "\n\n");
3472 if (to_intel_crtc_state(intel_crtc
->base
.state
)->has_drrs
) {
3473 struct intel_panel
*panel
;
3475 mutex_lock(&drrs
->mutex
);
3476 /* DRRS Supported */
3477 seq_puts(m
, "\tDRRS Supported: Yes\n");
3479 /* disable_drrs() will make drrs->dp NULL */
3481 seq_puts(m
, "Idleness DRRS: Disabled\n");
3482 if (dev_priv
->psr
.enabled
)
3484 "\tAs PSR is enabled, DRRS is not enabled\n");
3485 mutex_unlock(&drrs
->mutex
);
3489 panel
= &drrs
->dp
->attached_connector
->panel
;
3490 seq_printf(m
, "\t\tBusy_frontbuffer_bits: 0x%X",
3491 drrs
->busy_frontbuffer_bits
);
3493 seq_puts(m
, "\n\t\t");
3494 if (drrs
->refresh_rate_type
== DRRS_HIGH_RR
) {
3495 seq_puts(m
, "DRRS_State: DRRS_HIGH_RR\n");
3496 vrefresh
= panel
->fixed_mode
->vrefresh
;
3497 } else if (drrs
->refresh_rate_type
== DRRS_LOW_RR
) {
3498 seq_puts(m
, "DRRS_State: DRRS_LOW_RR\n");
3499 vrefresh
= panel
->downclock_mode
->vrefresh
;
3501 seq_printf(m
, "DRRS_State: Unknown(%d)\n",
3502 drrs
->refresh_rate_type
);
3503 mutex_unlock(&drrs
->mutex
);
3506 seq_printf(m
, "\t\tVrefresh: %d", vrefresh
);
3508 seq_puts(m
, "\n\t\t");
3509 mutex_unlock(&drrs
->mutex
);
3511 /* DRRS not supported. Print the VBT parameter*/
3512 seq_puts(m
, "\tDRRS Supported : No");
3517 static int i915_drrs_status(struct seq_file
*m
, void *unused
)
3519 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3520 struct drm_device
*dev
= &dev_priv
->drm
;
3521 struct intel_crtc
*intel_crtc
;
3522 int active_crtc_cnt
= 0;
3524 drm_modeset_lock_all(dev
);
3525 for_each_intel_crtc(dev
, intel_crtc
) {
3526 if (intel_crtc
->base
.state
->active
) {
3528 seq_printf(m
, "\nCRTC %d: ", active_crtc_cnt
);
3530 drrs_status_per_crtc(m
, dev
, intel_crtc
);
3533 drm_modeset_unlock_all(dev
);
3535 if (!active_crtc_cnt
)
3536 seq_puts(m
, "No active crtc found\n");
3541 static int i915_dp_mst_info(struct seq_file
*m
, void *unused
)
3543 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
3544 struct drm_device
*dev
= &dev_priv
->drm
;
3545 struct intel_encoder
*intel_encoder
;
3546 struct intel_digital_port
*intel_dig_port
;
3547 struct drm_connector
*connector
;
3548 struct drm_connector_list_iter conn_iter
;
3550 drm_connector_list_iter_begin(dev
, &conn_iter
);
3551 drm_for_each_connector_iter(connector
, &conn_iter
) {
3552 if (connector
->connector_type
!= DRM_MODE_CONNECTOR_DisplayPort
)
3555 intel_encoder
= intel_attached_encoder(connector
);
3556 if (!intel_encoder
|| intel_encoder
->type
== INTEL_OUTPUT_DP_MST
)
3559 intel_dig_port
= enc_to_dig_port(&intel_encoder
->base
);
3560 if (!intel_dig_port
->dp
.can_mst
)
3563 seq_printf(m
, "MST Source Port %c\n",
3564 port_name(intel_dig_port
->base
.port
));
3565 drm_dp_mst_dump_topology(m
, &intel_dig_port
->dp
.mst_mgr
);
3567 drm_connector_list_iter_end(&conn_iter
);
3572 static ssize_t
i915_displayport_test_active_write(struct file
*file
,
3573 const char __user
*ubuf
,
3574 size_t len
, loff_t
*offp
)
3578 struct drm_device
*dev
;
3579 struct drm_connector
*connector
;
3580 struct drm_connector_list_iter conn_iter
;
3581 struct intel_dp
*intel_dp
;
3584 dev
= ((struct seq_file
*)file
->private_data
)->private;
3589 input_buffer
= memdup_user_nul(ubuf
, len
);
3590 if (IS_ERR(input_buffer
))
3591 return PTR_ERR(input_buffer
);
3593 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len
);
3595 drm_connector_list_iter_begin(dev
, &conn_iter
);
3596 drm_for_each_connector_iter(connector
, &conn_iter
) {
3597 struct intel_encoder
*encoder
;
3599 if (connector
->connector_type
!=
3600 DRM_MODE_CONNECTOR_DisplayPort
)
3603 encoder
= to_intel_encoder(connector
->encoder
);
3604 if (encoder
&& encoder
->type
== INTEL_OUTPUT_DP_MST
)
3607 if (encoder
&& connector
->status
== connector_status_connected
) {
3608 intel_dp
= enc_to_intel_dp(&encoder
->base
);
3609 status
= kstrtoint(input_buffer
, 10, &val
);
3612 DRM_DEBUG_DRIVER("Got %d for test active\n", val
);
3613 /* To prevent erroneous activation of the compliance
3614 * testing code, only accept an actual value of 1 here
3617 intel_dp
->compliance
.test_active
= 1;
3619 intel_dp
->compliance
.test_active
= 0;
3622 drm_connector_list_iter_end(&conn_iter
);
3623 kfree(input_buffer
);
3631 static int i915_displayport_test_active_show(struct seq_file
*m
, void *data
)
3633 struct drm_i915_private
*dev_priv
= m
->private;
3634 struct drm_device
*dev
= &dev_priv
->drm
;
3635 struct drm_connector
*connector
;
3636 struct drm_connector_list_iter conn_iter
;
3637 struct intel_dp
*intel_dp
;
3639 drm_connector_list_iter_begin(dev
, &conn_iter
);
3640 drm_for_each_connector_iter(connector
, &conn_iter
) {
3641 struct intel_encoder
*encoder
;
3643 if (connector
->connector_type
!=
3644 DRM_MODE_CONNECTOR_DisplayPort
)
3647 encoder
= to_intel_encoder(connector
->encoder
);
3648 if (encoder
&& encoder
->type
== INTEL_OUTPUT_DP_MST
)
3651 if (encoder
&& connector
->status
== connector_status_connected
) {
3652 intel_dp
= enc_to_intel_dp(&encoder
->base
);
3653 if (intel_dp
->compliance
.test_active
)
3660 drm_connector_list_iter_end(&conn_iter
);
3665 static int i915_displayport_test_active_open(struct inode
*inode
,
3668 return single_open(file
, i915_displayport_test_active_show
,
3672 static const struct file_operations i915_displayport_test_active_fops
= {
3673 .owner
= THIS_MODULE
,
3674 .open
= i915_displayport_test_active_open
,
3676 .llseek
= seq_lseek
,
3677 .release
= single_release
,
3678 .write
= i915_displayport_test_active_write
3681 static int i915_displayport_test_data_show(struct seq_file
*m
, void *data
)
3683 struct drm_i915_private
*dev_priv
= m
->private;
3684 struct drm_device
*dev
= &dev_priv
->drm
;
3685 struct drm_connector
*connector
;
3686 struct drm_connector_list_iter conn_iter
;
3687 struct intel_dp
*intel_dp
;
3689 drm_connector_list_iter_begin(dev
, &conn_iter
);
3690 drm_for_each_connector_iter(connector
, &conn_iter
) {
3691 struct intel_encoder
*encoder
;
3693 if (connector
->connector_type
!=
3694 DRM_MODE_CONNECTOR_DisplayPort
)
3697 encoder
= to_intel_encoder(connector
->encoder
);
3698 if (encoder
&& encoder
->type
== INTEL_OUTPUT_DP_MST
)
3701 if (encoder
&& connector
->status
== connector_status_connected
) {
3702 intel_dp
= enc_to_intel_dp(&encoder
->base
);
3703 if (intel_dp
->compliance
.test_type
==
3704 DP_TEST_LINK_EDID_READ
)
3705 seq_printf(m
, "%lx",
3706 intel_dp
->compliance
.test_data
.edid
);
3707 else if (intel_dp
->compliance
.test_type
==
3708 DP_TEST_LINK_VIDEO_PATTERN
) {
3709 seq_printf(m
, "hdisplay: %d\n",
3710 intel_dp
->compliance
.test_data
.hdisplay
);
3711 seq_printf(m
, "vdisplay: %d\n",
3712 intel_dp
->compliance
.test_data
.vdisplay
);
3713 seq_printf(m
, "bpc: %u\n",
3714 intel_dp
->compliance
.test_data
.bpc
);
3719 drm_connector_list_iter_end(&conn_iter
);
3723 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data
);
3725 static int i915_displayport_test_type_show(struct seq_file
*m
, void *data
)
3727 struct drm_i915_private
*dev_priv
= m
->private;
3728 struct drm_device
*dev
= &dev_priv
->drm
;
3729 struct drm_connector
*connector
;
3730 struct drm_connector_list_iter conn_iter
;
3731 struct intel_dp
*intel_dp
;
3733 drm_connector_list_iter_begin(dev
, &conn_iter
);
3734 drm_for_each_connector_iter(connector
, &conn_iter
) {
3735 struct intel_encoder
*encoder
;
3737 if (connector
->connector_type
!=
3738 DRM_MODE_CONNECTOR_DisplayPort
)
3741 encoder
= to_intel_encoder(connector
->encoder
);
3742 if (encoder
&& encoder
->type
== INTEL_OUTPUT_DP_MST
)
3745 if (encoder
&& connector
->status
== connector_status_connected
) {
3746 intel_dp
= enc_to_intel_dp(&encoder
->base
);
3747 seq_printf(m
, "%02lx", intel_dp
->compliance
.test_type
);
3751 drm_connector_list_iter_end(&conn_iter
);
3755 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type
);
3757 static void wm_latency_show(struct seq_file
*m
, const uint16_t wm
[8])
3759 struct drm_i915_private
*dev_priv
= m
->private;
3760 struct drm_device
*dev
= &dev_priv
->drm
;
3764 if (IS_CHERRYVIEW(dev_priv
))
3766 else if (IS_VALLEYVIEW(dev_priv
))
3768 else if (IS_G4X(dev_priv
))
3771 num_levels
= ilk_wm_max_level(dev_priv
) + 1;
3773 drm_modeset_lock_all(dev
);
3775 for (level
= 0; level
< num_levels
; level
++) {
3776 unsigned int latency
= wm
[level
];
3779 * - WM1+ latency values in 0.5us units
3780 * - latencies are in us on gen9/vlv/chv
3782 if (INTEL_GEN(dev_priv
) >= 9 ||
3783 IS_VALLEYVIEW(dev_priv
) ||
3784 IS_CHERRYVIEW(dev_priv
) ||
3790 seq_printf(m
, "WM%d %u (%u.%u usec)\n",
3791 level
, wm
[level
], latency
/ 10, latency
% 10);
3794 drm_modeset_unlock_all(dev
);
3797 static int pri_wm_latency_show(struct seq_file
*m
, void *data
)
3799 struct drm_i915_private
*dev_priv
= m
->private;
3800 const uint16_t *latencies
;
3802 if (INTEL_GEN(dev_priv
) >= 9)
3803 latencies
= dev_priv
->wm
.skl_latency
;
3805 latencies
= dev_priv
->wm
.pri_latency
;
3807 wm_latency_show(m
, latencies
);
3812 static int spr_wm_latency_show(struct seq_file
*m
, void *data
)
3814 struct drm_i915_private
*dev_priv
= m
->private;
3815 const uint16_t *latencies
;
3817 if (INTEL_GEN(dev_priv
) >= 9)
3818 latencies
= dev_priv
->wm
.skl_latency
;
3820 latencies
= dev_priv
->wm
.spr_latency
;
3822 wm_latency_show(m
, latencies
);
3827 static int cur_wm_latency_show(struct seq_file
*m
, void *data
)
3829 struct drm_i915_private
*dev_priv
= m
->private;
3830 const uint16_t *latencies
;
3832 if (INTEL_GEN(dev_priv
) >= 9)
3833 latencies
= dev_priv
->wm
.skl_latency
;
3835 latencies
= dev_priv
->wm
.cur_latency
;
3837 wm_latency_show(m
, latencies
);
3842 static int pri_wm_latency_open(struct inode
*inode
, struct file
*file
)
3844 struct drm_i915_private
*dev_priv
= inode
->i_private
;
3846 if (INTEL_GEN(dev_priv
) < 5 && !IS_G4X(dev_priv
))
3849 return single_open(file
, pri_wm_latency_show
, dev_priv
);
3852 static int spr_wm_latency_open(struct inode
*inode
, struct file
*file
)
3854 struct drm_i915_private
*dev_priv
= inode
->i_private
;
3856 if (HAS_GMCH_DISPLAY(dev_priv
))
3859 return single_open(file
, spr_wm_latency_show
, dev_priv
);
3862 static int cur_wm_latency_open(struct inode
*inode
, struct file
*file
)
3864 struct drm_i915_private
*dev_priv
= inode
->i_private
;
3866 if (HAS_GMCH_DISPLAY(dev_priv
))
3869 return single_open(file
, cur_wm_latency_show
, dev_priv
);
3872 static ssize_t
wm_latency_write(struct file
*file
, const char __user
*ubuf
,
3873 size_t len
, loff_t
*offp
, uint16_t wm
[8])
3875 struct seq_file
*m
= file
->private_data
;
3876 struct drm_i915_private
*dev_priv
= m
->private;
3877 struct drm_device
*dev
= &dev_priv
->drm
;
3878 uint16_t new[8] = { 0 };
3884 if (IS_CHERRYVIEW(dev_priv
))
3886 else if (IS_VALLEYVIEW(dev_priv
))
3888 else if (IS_G4X(dev_priv
))
3891 num_levels
= ilk_wm_max_level(dev_priv
) + 1;
3893 if (len
>= sizeof(tmp
))
3896 if (copy_from_user(tmp
, ubuf
, len
))
3901 ret
= sscanf(tmp
, "%hu %hu %hu %hu %hu %hu %hu %hu",
3902 &new[0], &new[1], &new[2], &new[3],
3903 &new[4], &new[5], &new[6], &new[7]);
3904 if (ret
!= num_levels
)
3907 drm_modeset_lock_all(dev
);
3909 for (level
= 0; level
< num_levels
; level
++)
3910 wm
[level
] = new[level
];
3912 drm_modeset_unlock_all(dev
);
3918 static ssize_t
pri_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
3919 size_t len
, loff_t
*offp
)
3921 struct seq_file
*m
= file
->private_data
;
3922 struct drm_i915_private
*dev_priv
= m
->private;
3923 uint16_t *latencies
;
3925 if (INTEL_GEN(dev_priv
) >= 9)
3926 latencies
= dev_priv
->wm
.skl_latency
;
3928 latencies
= dev_priv
->wm
.pri_latency
;
3930 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
3933 static ssize_t
spr_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
3934 size_t len
, loff_t
*offp
)
3936 struct seq_file
*m
= file
->private_data
;
3937 struct drm_i915_private
*dev_priv
= m
->private;
3938 uint16_t *latencies
;
3940 if (INTEL_GEN(dev_priv
) >= 9)
3941 latencies
= dev_priv
->wm
.skl_latency
;
3943 latencies
= dev_priv
->wm
.spr_latency
;
3945 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
3948 static ssize_t
cur_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
3949 size_t len
, loff_t
*offp
)
3951 struct seq_file
*m
= file
->private_data
;
3952 struct drm_i915_private
*dev_priv
= m
->private;
3953 uint16_t *latencies
;
3955 if (INTEL_GEN(dev_priv
) >= 9)
3956 latencies
= dev_priv
->wm
.skl_latency
;
3958 latencies
= dev_priv
->wm
.cur_latency
;
3960 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
3963 static const struct file_operations i915_pri_wm_latency_fops
= {
3964 .owner
= THIS_MODULE
,
3965 .open
= pri_wm_latency_open
,
3967 .llseek
= seq_lseek
,
3968 .release
= single_release
,
3969 .write
= pri_wm_latency_write
3972 static const struct file_operations i915_spr_wm_latency_fops
= {
3973 .owner
= THIS_MODULE
,
3974 .open
= spr_wm_latency_open
,
3976 .llseek
= seq_lseek
,
3977 .release
= single_release
,
3978 .write
= spr_wm_latency_write
3981 static const struct file_operations i915_cur_wm_latency_fops
= {
3982 .owner
= THIS_MODULE
,
3983 .open
= cur_wm_latency_open
,
3985 .llseek
= seq_lseek
,
3986 .release
= single_release
,
3987 .write
= cur_wm_latency_write
3991 i915_wedged_get(void *data
, u64
*val
)
3993 struct drm_i915_private
*dev_priv
= data
;
3995 *val
= i915_terminally_wedged(&dev_priv
->gpu_error
);
4001 i915_wedged_set(void *data
, u64 val
)
4003 struct drm_i915_private
*i915
= data
;
4004 struct intel_engine_cs
*engine
;
4008 * There is no safeguard against this debugfs entry colliding
4009 * with the hangcheck calling same i915_handle_error() in
4010 * parallel, causing an explosion. For now we assume that the
4011 * test harness is responsible enough not to inject gpu hangs
4012 * while it is writing to 'i915_wedged'
4015 if (i915_reset_backoff(&i915
->gpu_error
))
4018 for_each_engine_masked(engine
, i915
, val
, tmp
) {
4019 engine
->hangcheck
.seqno
= intel_engine_get_seqno(engine
);
4020 engine
->hangcheck
.stalled
= true;
4023 i915_handle_error(i915
, val
, I915_ERROR_CAPTURE
,
4024 "Manually set wedged engine mask = %llx", val
);
4026 wait_on_bit(&i915
->gpu_error
.flags
,
4028 TASK_UNINTERRUPTIBLE
);
4033 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops
,
4034 i915_wedged_get
, i915_wedged_set
,
4038 fault_irq_set(struct drm_i915_private
*i915
,
4044 err
= mutex_lock_interruptible(&i915
->drm
.struct_mutex
);
4048 err
= i915_gem_wait_for_idle(i915
,
4050 I915_WAIT_INTERRUPTIBLE
,
4051 MAX_SCHEDULE_TIMEOUT
);
4056 mutex_unlock(&i915
->drm
.struct_mutex
);
4058 /* Flush idle worker to disarm irq */
4059 drain_delayed_work(&i915
->gt
.idle_work
);
4064 mutex_unlock(&i915
->drm
.struct_mutex
);
4069 i915_ring_missed_irq_get(void *data
, u64
*val
)
4071 struct drm_i915_private
*dev_priv
= data
;
4073 *val
= dev_priv
->gpu_error
.missed_irq_rings
;
4078 i915_ring_missed_irq_set(void *data
, u64 val
)
4080 struct drm_i915_private
*i915
= data
;
4082 return fault_irq_set(i915
, &i915
->gpu_error
.missed_irq_rings
, val
);
4085 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops
,
4086 i915_ring_missed_irq_get
, i915_ring_missed_irq_set
,
4090 i915_ring_test_irq_get(void *data
, u64
*val
)
4092 struct drm_i915_private
*dev_priv
= data
;
4094 *val
= dev_priv
->gpu_error
.test_irq_rings
;
4100 i915_ring_test_irq_set(void *data
, u64 val
)
4102 struct drm_i915_private
*i915
= data
;
4104 val
&= INTEL_INFO(i915
)->ring_mask
;
4105 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val
);
4107 return fault_irq_set(i915
, &i915
->gpu_error
.test_irq_rings
, val
);
4110 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops
,
4111 i915_ring_test_irq_get
, i915_ring_test_irq_set
,
4114 #define DROP_UNBOUND BIT(0)
4115 #define DROP_BOUND BIT(1)
4116 #define DROP_RETIRE BIT(2)
4117 #define DROP_ACTIVE BIT(3)
4118 #define DROP_FREED BIT(4)
4119 #define DROP_SHRINK_ALL BIT(5)
4120 #define DROP_IDLE BIT(6)
4121 #define DROP_ALL (DROP_UNBOUND | \
4129 i915_drop_caches_get(void *data
, u64
*val
)
4137 i915_drop_caches_set(void *data
, u64 val
)
4139 struct drm_i915_private
*dev_priv
= data
;
4140 struct drm_device
*dev
= &dev_priv
->drm
;
4143 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4144 val
, val
& DROP_ALL
);
4146 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4147 * on ioctls on -EAGAIN. */
4148 if (val
& (DROP_ACTIVE
| DROP_RETIRE
)) {
4149 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4153 if (val
& DROP_ACTIVE
)
4154 ret
= i915_gem_wait_for_idle(dev_priv
,
4155 I915_WAIT_INTERRUPTIBLE
|
4157 MAX_SCHEDULE_TIMEOUT
);
4159 if (val
& DROP_RETIRE
)
4160 i915_retire_requests(dev_priv
);
4162 mutex_unlock(&dev
->struct_mutex
);
4165 fs_reclaim_acquire(GFP_KERNEL
);
4166 if (val
& DROP_BOUND
)
4167 i915_gem_shrink(dev_priv
, LONG_MAX
, NULL
, I915_SHRINK_BOUND
);
4169 if (val
& DROP_UNBOUND
)
4170 i915_gem_shrink(dev_priv
, LONG_MAX
, NULL
, I915_SHRINK_UNBOUND
);
4172 if (val
& DROP_SHRINK_ALL
)
4173 i915_gem_shrink_all(dev_priv
);
4174 fs_reclaim_release(GFP_KERNEL
);
4176 if (val
& DROP_IDLE
) {
4178 if (READ_ONCE(dev_priv
->gt
.active_requests
))
4179 flush_delayed_work(&dev_priv
->gt
.retire_work
);
4180 drain_delayed_work(&dev_priv
->gt
.idle_work
);
4181 } while (READ_ONCE(dev_priv
->gt
.awake
));
4184 if (val
& DROP_FREED
)
4185 i915_gem_drain_freed_objects(dev_priv
);
4190 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops
,
4191 i915_drop_caches_get
, i915_drop_caches_set
,
4195 i915_cache_sharing_get(void *data
, u64
*val
)
4197 struct drm_i915_private
*dev_priv
= data
;
4200 if (!(IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
)))
4203 intel_runtime_pm_get(dev_priv
);
4205 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
4207 intel_runtime_pm_put(dev_priv
);
4209 *val
= (snpcr
& GEN6_MBC_SNPCR_MASK
) >> GEN6_MBC_SNPCR_SHIFT
;
4215 i915_cache_sharing_set(void *data
, u64 val
)
4217 struct drm_i915_private
*dev_priv
= data
;
4220 if (!(IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
)))
4226 intel_runtime_pm_get(dev_priv
);
4227 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val
);
4229 /* Update the cache sharing policy here as well */
4230 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
4231 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
4232 snpcr
|= (val
<< GEN6_MBC_SNPCR_SHIFT
);
4233 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
4235 intel_runtime_pm_put(dev_priv
);
4239 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops
,
4240 i915_cache_sharing_get
, i915_cache_sharing_set
,
4243 static void cherryview_sseu_device_status(struct drm_i915_private
*dev_priv
,
4244 struct sseu_dev_info
*sseu
)
4247 const int ss_max
= SS_MAX
;
4248 u32 sig1
[SS_MAX
], sig2
[SS_MAX
];
4251 sig1
[0] = I915_READ(CHV_POWER_SS0_SIG1
);
4252 sig1
[1] = I915_READ(CHV_POWER_SS1_SIG1
);
4253 sig2
[0] = I915_READ(CHV_POWER_SS0_SIG2
);
4254 sig2
[1] = I915_READ(CHV_POWER_SS1_SIG2
);
4256 for (ss
= 0; ss
< ss_max
; ss
++) {
4257 unsigned int eu_cnt
;
4259 if (sig1
[ss
] & CHV_SS_PG_ENABLE
)
4260 /* skip disabled subslice */
4263 sseu
->slice_mask
= BIT(0);
4264 sseu
->subslice_mask
[0] |= BIT(ss
);
4265 eu_cnt
= ((sig1
[ss
] & CHV_EU08_PG_ENABLE
) ? 0 : 2) +
4266 ((sig1
[ss
] & CHV_EU19_PG_ENABLE
) ? 0 : 2) +
4267 ((sig1
[ss
] & CHV_EU210_PG_ENABLE
) ? 0 : 2) +
4268 ((sig2
[ss
] & CHV_EU311_PG_ENABLE
) ? 0 : 2);
4269 sseu
->eu_total
+= eu_cnt
;
4270 sseu
->eu_per_subslice
= max_t(unsigned int,
4271 sseu
->eu_per_subslice
, eu_cnt
);
4276 static void gen10_sseu_device_status(struct drm_i915_private
*dev_priv
,
4277 struct sseu_dev_info
*sseu
)
4280 const struct intel_device_info
*info
= INTEL_INFO(dev_priv
);
4281 u32 s_reg
[SS_MAX
], eu_reg
[2 * SS_MAX
], eu_mask
[2];
4284 for (s
= 0; s
< info
->sseu
.max_slices
; s
++) {
4286 * FIXME: Valid SS Mask respects the spec and read
4287 * only valid bits for those registers, excluding reserverd
4288 * although this seems wrong because it would leave many
4289 * subslices without ACK.
4291 s_reg
[s
] = I915_READ(GEN10_SLICE_PGCTL_ACK(s
)) &
4292 GEN10_PGCTL_VALID_SS_MASK(s
);
4293 eu_reg
[2 * s
] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s
));
4294 eu_reg
[2 * s
+ 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s
));
4297 eu_mask
[0] = GEN9_PGCTL_SSA_EU08_ACK
|
4298 GEN9_PGCTL_SSA_EU19_ACK
|
4299 GEN9_PGCTL_SSA_EU210_ACK
|
4300 GEN9_PGCTL_SSA_EU311_ACK
;
4301 eu_mask
[1] = GEN9_PGCTL_SSB_EU08_ACK
|
4302 GEN9_PGCTL_SSB_EU19_ACK
|
4303 GEN9_PGCTL_SSB_EU210_ACK
|
4304 GEN9_PGCTL_SSB_EU311_ACK
;
4306 for (s
= 0; s
< info
->sseu
.max_slices
; s
++) {
4307 if ((s_reg
[s
] & GEN9_PGCTL_SLICE_ACK
) == 0)
4308 /* skip disabled slice */
4311 sseu
->slice_mask
|= BIT(s
);
4312 sseu
->subslice_mask
[s
] = info
->sseu
.subslice_mask
[s
];
4314 for (ss
= 0; ss
< info
->sseu
.max_subslices
; ss
++) {
4315 unsigned int eu_cnt
;
4317 if (!(s_reg
[s
] & (GEN9_PGCTL_SS_ACK(ss
))))
4318 /* skip disabled subslice */
4321 eu_cnt
= 2 * hweight32(eu_reg
[2 * s
+ ss
/ 2] &
4323 sseu
->eu_total
+= eu_cnt
;
4324 sseu
->eu_per_subslice
= max_t(unsigned int,
4325 sseu
->eu_per_subslice
,
4332 static void gen9_sseu_device_status(struct drm_i915_private
*dev_priv
,
4333 struct sseu_dev_info
*sseu
)
4336 const struct intel_device_info
*info
= INTEL_INFO(dev_priv
);
4337 u32 s_reg
[SS_MAX
], eu_reg
[2 * SS_MAX
], eu_mask
[2];
4340 for (s
= 0; s
< info
->sseu
.max_slices
; s
++) {
4341 s_reg
[s
] = I915_READ(GEN9_SLICE_PGCTL_ACK(s
));
4342 eu_reg
[2*s
] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s
));
4343 eu_reg
[2*s
+ 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s
));
4346 eu_mask
[0] = GEN9_PGCTL_SSA_EU08_ACK
|
4347 GEN9_PGCTL_SSA_EU19_ACK
|
4348 GEN9_PGCTL_SSA_EU210_ACK
|
4349 GEN9_PGCTL_SSA_EU311_ACK
;
4350 eu_mask
[1] = GEN9_PGCTL_SSB_EU08_ACK
|
4351 GEN9_PGCTL_SSB_EU19_ACK
|
4352 GEN9_PGCTL_SSB_EU210_ACK
|
4353 GEN9_PGCTL_SSB_EU311_ACK
;
4355 for (s
= 0; s
< info
->sseu
.max_slices
; s
++) {
4356 if ((s_reg
[s
] & GEN9_PGCTL_SLICE_ACK
) == 0)
4357 /* skip disabled slice */
4360 sseu
->slice_mask
|= BIT(s
);
4362 if (IS_GEN9_BC(dev_priv
))
4363 sseu
->subslice_mask
[s
] =
4364 INTEL_INFO(dev_priv
)->sseu
.subslice_mask
[s
];
4366 for (ss
= 0; ss
< info
->sseu
.max_subslices
; ss
++) {
4367 unsigned int eu_cnt
;
4369 if (IS_GEN9_LP(dev_priv
)) {
4370 if (!(s_reg
[s
] & (GEN9_PGCTL_SS_ACK(ss
))))
4371 /* skip disabled subslice */
4374 sseu
->subslice_mask
[s
] |= BIT(ss
);
4377 eu_cnt
= 2 * hweight32(eu_reg
[2*s
+ ss
/2] &
4379 sseu
->eu_total
+= eu_cnt
;
4380 sseu
->eu_per_subslice
= max_t(unsigned int,
4381 sseu
->eu_per_subslice
,
4388 static void broadwell_sseu_device_status(struct drm_i915_private
*dev_priv
,
4389 struct sseu_dev_info
*sseu
)
4391 u32 slice_info
= I915_READ(GEN8_GT_SLICE_INFO
);
4394 sseu
->slice_mask
= slice_info
& GEN8_LSLICESTAT_MASK
;
4396 if (sseu
->slice_mask
) {
4397 sseu
->eu_per_subslice
=
4398 INTEL_INFO(dev_priv
)->sseu
.eu_per_subslice
;
4399 for (s
= 0; s
< fls(sseu
->slice_mask
); s
++) {
4400 sseu
->subslice_mask
[s
] =
4401 INTEL_INFO(dev_priv
)->sseu
.subslice_mask
[s
];
4403 sseu
->eu_total
= sseu
->eu_per_subslice
*
4404 sseu_subslice_total(sseu
);
4406 /* subtract fused off EU(s) from enabled slice(s) */
4407 for (s
= 0; s
< fls(sseu
->slice_mask
); s
++) {
4409 INTEL_INFO(dev_priv
)->sseu
.subslice_7eu
[s
];
4411 sseu
->eu_total
-= hweight8(subslice_7eu
);
4416 static void i915_print_sseu_info(struct seq_file
*m
, bool is_available_info
,
4417 const struct sseu_dev_info
*sseu
)
4419 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
4420 const char *type
= is_available_info
? "Available" : "Enabled";
4423 seq_printf(m
, " %s Slice Mask: %04x\n", type
,
4425 seq_printf(m
, " %s Slice Total: %u\n", type
,
4426 hweight8(sseu
->slice_mask
));
4427 seq_printf(m
, " %s Subslice Total: %u\n", type
,
4428 sseu_subslice_total(sseu
));
4429 for (s
= 0; s
< fls(sseu
->slice_mask
); s
++) {
4430 seq_printf(m
, " %s Slice%i subslices: %u\n", type
,
4431 s
, hweight8(sseu
->subslice_mask
[s
]));
4433 seq_printf(m
, " %s EU Total: %u\n", type
,
4435 seq_printf(m
, " %s EU Per Subslice: %u\n", type
,
4436 sseu
->eu_per_subslice
);
4438 if (!is_available_info
)
4441 seq_printf(m
, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv
)));
4442 if (HAS_POOLED_EU(dev_priv
))
4443 seq_printf(m
, " Min EU in pool: %u\n", sseu
->min_eu_in_pool
);
4445 seq_printf(m
, " Has Slice Power Gating: %s\n",
4446 yesno(sseu
->has_slice_pg
));
4447 seq_printf(m
, " Has Subslice Power Gating: %s\n",
4448 yesno(sseu
->has_subslice_pg
));
4449 seq_printf(m
, " Has EU Power Gating: %s\n",
4450 yesno(sseu
->has_eu_pg
));
4453 static int i915_sseu_status(struct seq_file
*m
, void *unused
)
4455 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
4456 struct sseu_dev_info sseu
;
4458 if (INTEL_GEN(dev_priv
) < 8)
4461 seq_puts(m
, "SSEU Device Info\n");
4462 i915_print_sseu_info(m
, true, &INTEL_INFO(dev_priv
)->sseu
);
4464 seq_puts(m
, "SSEU Device Status\n");
4465 memset(&sseu
, 0, sizeof(sseu
));
4466 sseu
.max_slices
= INTEL_INFO(dev_priv
)->sseu
.max_slices
;
4467 sseu
.max_subslices
= INTEL_INFO(dev_priv
)->sseu
.max_subslices
;
4468 sseu
.max_eus_per_subslice
=
4469 INTEL_INFO(dev_priv
)->sseu
.max_eus_per_subslice
;
4471 intel_runtime_pm_get(dev_priv
);
4473 if (IS_CHERRYVIEW(dev_priv
)) {
4474 cherryview_sseu_device_status(dev_priv
, &sseu
);
4475 } else if (IS_BROADWELL(dev_priv
)) {
4476 broadwell_sseu_device_status(dev_priv
, &sseu
);
4477 } else if (IS_GEN9(dev_priv
)) {
4478 gen9_sseu_device_status(dev_priv
, &sseu
);
4479 } else if (INTEL_GEN(dev_priv
) >= 10) {
4480 gen10_sseu_device_status(dev_priv
, &sseu
);
4483 intel_runtime_pm_put(dev_priv
);
4485 i915_print_sseu_info(m
, false, &sseu
);
4490 static int i915_forcewake_open(struct inode
*inode
, struct file
*file
)
4492 struct drm_i915_private
*i915
= inode
->i_private
;
4494 if (INTEL_GEN(i915
) < 6)
4497 intel_runtime_pm_get(i915
);
4498 intel_uncore_forcewake_user_get(i915
);
4503 static int i915_forcewake_release(struct inode
*inode
, struct file
*file
)
4505 struct drm_i915_private
*i915
= inode
->i_private
;
4507 if (INTEL_GEN(i915
) < 6)
4510 intel_uncore_forcewake_user_put(i915
);
4511 intel_runtime_pm_put(i915
);
4516 static const struct file_operations i915_forcewake_fops
= {
4517 .owner
= THIS_MODULE
,
4518 .open
= i915_forcewake_open
,
4519 .release
= i915_forcewake_release
,
4522 static int i915_hpd_storm_ctl_show(struct seq_file
*m
, void *data
)
4524 struct drm_i915_private
*dev_priv
= m
->private;
4525 struct i915_hotplug
*hotplug
= &dev_priv
->hotplug
;
4527 seq_printf(m
, "Threshold: %d\n", hotplug
->hpd_storm_threshold
);
4528 seq_printf(m
, "Detected: %s\n",
4529 yesno(delayed_work_pending(&hotplug
->reenable_work
)));
4534 static ssize_t
i915_hpd_storm_ctl_write(struct file
*file
,
4535 const char __user
*ubuf
, size_t len
,
4538 struct seq_file
*m
= file
->private_data
;
4539 struct drm_i915_private
*dev_priv
= m
->private;
4540 struct i915_hotplug
*hotplug
= &dev_priv
->hotplug
;
4541 unsigned int new_threshold
;
4546 if (len
>= sizeof(tmp
))
4549 if (copy_from_user(tmp
, ubuf
, len
))
4554 /* Strip newline, if any */
4555 newline
= strchr(tmp
, '\n');
4559 if (strcmp(tmp
, "reset") == 0)
4560 new_threshold
= HPD_STORM_DEFAULT_THRESHOLD
;
4561 else if (kstrtouint(tmp
, 10, &new_threshold
) != 0)
4564 if (new_threshold
> 0)
4565 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4568 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4570 spin_lock_irq(&dev_priv
->irq_lock
);
4571 hotplug
->hpd_storm_threshold
= new_threshold
;
4572 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4574 hotplug
->stats
[i
].count
= 0;
4575 spin_unlock_irq(&dev_priv
->irq_lock
);
4577 /* Re-enable hpd immediately if we were in an irq storm */
4578 flush_delayed_work(&dev_priv
->hotplug
.reenable_work
);
4583 static int i915_hpd_storm_ctl_open(struct inode
*inode
, struct file
*file
)
4585 return single_open(file
, i915_hpd_storm_ctl_show
, inode
->i_private
);
4588 static const struct file_operations i915_hpd_storm_ctl_fops
= {
4589 .owner
= THIS_MODULE
,
4590 .open
= i915_hpd_storm_ctl_open
,
4592 .llseek
= seq_lseek
,
4593 .release
= single_release
,
4594 .write
= i915_hpd_storm_ctl_write
4597 static int i915_drrs_ctl_set(void *data
, u64 val
)
4599 struct drm_i915_private
*dev_priv
= data
;
4600 struct drm_device
*dev
= &dev_priv
->drm
;
4601 struct intel_crtc
*intel_crtc
;
4602 struct intel_encoder
*encoder
;
4603 struct intel_dp
*intel_dp
;
4605 if (INTEL_GEN(dev_priv
) < 7)
4608 drm_modeset_lock_all(dev
);
4609 for_each_intel_crtc(dev
, intel_crtc
) {
4610 if (!intel_crtc
->base
.state
->active
||
4611 !intel_crtc
->config
->has_drrs
)
4614 for_each_encoder_on_crtc(dev
, &intel_crtc
->base
, encoder
) {
4615 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
4618 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4619 val
? "en" : "dis", val
);
4621 intel_dp
= enc_to_intel_dp(&encoder
->base
);
4623 intel_edp_drrs_enable(intel_dp
,
4624 intel_crtc
->config
);
4626 intel_edp_drrs_disable(intel_dp
,
4627 intel_crtc
->config
);
4630 drm_modeset_unlock_all(dev
);
4635 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops
, NULL
, i915_drrs_ctl_set
, "%llu\n");
4638 i915_fifo_underrun_reset_write(struct file
*filp
,
4639 const char __user
*ubuf
,
4640 size_t cnt
, loff_t
*ppos
)
4642 struct drm_i915_private
*dev_priv
= filp
->private_data
;
4643 struct intel_crtc
*intel_crtc
;
4644 struct drm_device
*dev
= &dev_priv
->drm
;
4648 ret
= kstrtobool_from_user(ubuf
, cnt
, &reset
);
4655 for_each_intel_crtc(dev
, intel_crtc
) {
4656 struct drm_crtc_commit
*commit
;
4657 struct intel_crtc_state
*crtc_state
;
4659 ret
= drm_modeset_lock_single_interruptible(&intel_crtc
->base
.mutex
);
4663 crtc_state
= to_intel_crtc_state(intel_crtc
->base
.state
);
4664 commit
= crtc_state
->base
.commit
;
4666 ret
= wait_for_completion_interruptible(&commit
->hw_done
);
4668 ret
= wait_for_completion_interruptible(&commit
->flip_done
);
4671 if (!ret
&& crtc_state
->base
.active
) {
4672 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4673 pipe_name(intel_crtc
->pipe
));
4675 intel_crtc_arm_fifo_underrun(intel_crtc
, crtc_state
);
4678 drm_modeset_unlock(&intel_crtc
->base
.mutex
);
4684 ret
= intel_fbc_reset_underrun(dev_priv
);
4691 static const struct file_operations i915_fifo_underrun_reset_ops
= {
4692 .owner
= THIS_MODULE
,
4693 .open
= simple_open
,
4694 .write
= i915_fifo_underrun_reset_write
,
4695 .llseek
= default_llseek
,
4698 static const struct drm_info_list i915_debugfs_list
[] = {
4699 {"i915_capabilities", i915_capabilities
, 0},
4700 {"i915_gem_objects", i915_gem_object_info
, 0},
4701 {"i915_gem_gtt", i915_gem_gtt_info
, 0},
4702 {"i915_gem_stolen", i915_gem_stolen_list_info
},
4703 {"i915_gem_fence_regs", i915_gem_fence_regs_info
, 0},
4704 {"i915_gem_interrupt", i915_interrupt_info
, 0},
4705 {"i915_gem_batch_pool", i915_gem_batch_pool_info
, 0},
4706 {"i915_guc_info", i915_guc_info
, 0},
4707 {"i915_guc_load_status", i915_guc_load_status_info
, 0},
4708 {"i915_guc_log_dump", i915_guc_log_dump
, 0},
4709 {"i915_guc_load_err_log_dump", i915_guc_log_dump
, 0, (void *)1},
4710 {"i915_guc_stage_pool", i915_guc_stage_pool
, 0},
4711 {"i915_huc_load_status", i915_huc_load_status_info
, 0},
4712 {"i915_frequency_info", i915_frequency_info
, 0},
4713 {"i915_hangcheck_info", i915_hangcheck_info
, 0},
4714 {"i915_reset_info", i915_reset_info
, 0},
4715 {"i915_drpc_info", i915_drpc_info
, 0},
4716 {"i915_emon_status", i915_emon_status
, 0},
4717 {"i915_ring_freq_table", i915_ring_freq_table
, 0},
4718 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking
, 0},
4719 {"i915_fbc_status", i915_fbc_status
, 0},
4720 {"i915_ips_status", i915_ips_status
, 0},
4721 {"i915_sr_status", i915_sr_status
, 0},
4722 {"i915_opregion", i915_opregion
, 0},
4723 {"i915_vbt", i915_vbt
, 0},
4724 {"i915_gem_framebuffer", i915_gem_framebuffer_info
, 0},
4725 {"i915_context_status", i915_context_status
, 0},
4726 {"i915_forcewake_domains", i915_forcewake_domains
, 0},
4727 {"i915_swizzle_info", i915_swizzle_info
, 0},
4728 {"i915_ppgtt_info", i915_ppgtt_info
, 0},
4729 {"i915_llc", i915_llc
, 0},
4730 {"i915_edp_psr_status", i915_edp_psr_status
, 0},
4731 {"i915_energy_uJ", i915_energy_uJ
, 0},
4732 {"i915_runtime_pm_status", i915_runtime_pm_status
, 0},
4733 {"i915_power_domain_info", i915_power_domain_info
, 0},
4734 {"i915_dmc_info", i915_dmc_info
, 0},
4735 {"i915_display_info", i915_display_info
, 0},
4736 {"i915_engine_info", i915_engine_info
, 0},
4737 {"i915_rcs_topology", i915_rcs_topology
, 0},
4738 {"i915_shrinker_info", i915_shrinker_info
, 0},
4739 {"i915_shared_dplls_info", i915_shared_dplls_info
, 0},
4740 {"i915_dp_mst_info", i915_dp_mst_info
, 0},
4741 {"i915_wa_registers", i915_wa_registers
, 0},
4742 {"i915_ddb_info", i915_ddb_info
, 0},
4743 {"i915_sseu_status", i915_sseu_status
, 0},
4744 {"i915_drrs_status", i915_drrs_status
, 0},
4745 {"i915_rps_boost_info", i915_rps_boost_info
, 0},
4747 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4749 static const struct i915_debugfs_files
{
4751 const struct file_operations
*fops
;
4752 } i915_debugfs_files
[] = {
4753 {"i915_wedged", &i915_wedged_fops
},
4754 {"i915_cache_sharing", &i915_cache_sharing_fops
},
4755 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops
},
4756 {"i915_ring_test_irq", &i915_ring_test_irq_fops
},
4757 {"i915_gem_drop_caches", &i915_drop_caches_fops
},
4758 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4759 {"i915_error_state", &i915_error_state_fops
},
4760 {"i915_gpu_info", &i915_gpu_info_fops
},
4762 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops
},
4763 {"i915_next_seqno", &i915_next_seqno_fops
},
4764 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops
},
4765 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops
},
4766 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops
},
4767 {"i915_fbc_false_color", &i915_fbc_false_color_fops
},
4768 {"i915_dp_test_data", &i915_displayport_test_data_fops
},
4769 {"i915_dp_test_type", &i915_displayport_test_type_fops
},
4770 {"i915_dp_test_active", &i915_displayport_test_active_fops
},
4771 {"i915_guc_log_level", &i915_guc_log_level_fops
},
4772 {"i915_guc_log_relay", &i915_guc_log_relay_fops
},
4773 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops
},
4774 {"i915_ipc_status", &i915_ipc_status_fops
},
4775 {"i915_drrs_ctl", &i915_drrs_ctl_fops
},
4776 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops
}
4779 int i915_debugfs_register(struct drm_i915_private
*dev_priv
)
4781 struct drm_minor
*minor
= dev_priv
->drm
.primary
;
4785 ent
= debugfs_create_file("i915_forcewake_user", S_IRUSR
,
4786 minor
->debugfs_root
, to_i915(minor
->dev
),
4787 &i915_forcewake_fops
);
4791 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
4792 ent
= debugfs_create_file(i915_debugfs_files
[i
].name
,
4794 minor
->debugfs_root
,
4795 to_i915(minor
->dev
),
4796 i915_debugfs_files
[i
].fops
);
4801 return drm_debugfs_create_files(i915_debugfs_list
,
4802 I915_DEBUGFS_ENTRIES
,
4803 minor
->debugfs_root
, minor
);
4807 /* DPCD dump start address. */
4808 unsigned int offset
;
4809 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4811 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4813 /* Only valid for eDP. */
4817 static const struct dpcd_block i915_dpcd_debug
[] = {
4818 { .offset
= DP_DPCD_REV
, .size
= DP_RECEIVER_CAP_SIZE
},
4819 { .offset
= DP_PSR_SUPPORT
, .end
= DP_PSR_CAPS
},
4820 { .offset
= DP_DOWNSTREAM_PORT_0
, .size
= 16 },
4821 { .offset
= DP_LINK_BW_SET
, .end
= DP_EDP_CONFIGURATION_SET
},
4822 { .offset
= DP_SINK_COUNT
, .end
= DP_ADJUST_REQUEST_LANE2_3
},
4823 { .offset
= DP_SET_POWER
},
4824 { .offset
= DP_EDP_DPCD_REV
},
4825 { .offset
= DP_EDP_GENERAL_CAP_1
, .end
= DP_EDP_GENERAL_CAP_3
},
4826 { .offset
= DP_EDP_DISPLAY_CONTROL_REGISTER
, .end
= DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB
},
4827 { .offset
= DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET
, .end
= DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET
},
4830 static int i915_dpcd_show(struct seq_file
*m
, void *data
)
4832 struct drm_connector
*connector
= m
->private;
4833 struct intel_dp
*intel_dp
=
4834 enc_to_intel_dp(&intel_attached_encoder(connector
)->base
);
4839 if (connector
->status
!= connector_status_connected
)
4842 for (i
= 0; i
< ARRAY_SIZE(i915_dpcd_debug
); i
++) {
4843 const struct dpcd_block
*b
= &i915_dpcd_debug
[i
];
4844 size_t size
= b
->end
? b
->end
- b
->offset
+ 1 : (b
->size
?: 1);
4847 connector
->connector_type
!= DRM_MODE_CONNECTOR_eDP
)
4850 /* low tech for now */
4851 if (WARN_ON(size
> sizeof(buf
)))
4854 err
= drm_dp_dpcd_read(&intel_dp
->aux
, b
->offset
, buf
, size
);
4856 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4857 size
, b
->offset
, err
);
4861 seq_printf(m
, "%04x: %*ph\n", b
->offset
, (int) size
, buf
);
4866 DEFINE_SHOW_ATTRIBUTE(i915_dpcd
);
4868 static int i915_panel_show(struct seq_file
*m
, void *data
)
4870 struct drm_connector
*connector
= m
->private;
4871 struct intel_dp
*intel_dp
=
4872 enc_to_intel_dp(&intel_attached_encoder(connector
)->base
);
4874 if (connector
->status
!= connector_status_connected
)
4877 seq_printf(m
, "Panel power up delay: %d\n",
4878 intel_dp
->panel_power_up_delay
);
4879 seq_printf(m
, "Panel power down delay: %d\n",
4880 intel_dp
->panel_power_down_delay
);
4881 seq_printf(m
, "Backlight on delay: %d\n",
4882 intel_dp
->backlight_on_delay
);
4883 seq_printf(m
, "Backlight off delay: %d\n",
4884 intel_dp
->backlight_off_delay
);
4888 DEFINE_SHOW_ATTRIBUTE(i915_panel
);
4891 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4892 * @connector: pointer to a registered drm_connector
4894 * Cleanup will be done by drm_connector_unregister() through a call to
4895 * drm_debugfs_connector_remove().
4897 * Returns 0 on success, negative error codes on error.
4899 int i915_debugfs_connector_add(struct drm_connector
*connector
)
4901 struct dentry
*root
= connector
->debugfs_entry
;
4903 /* The connector must have been registered beforehands. */
4907 if (connector
->connector_type
== DRM_MODE_CONNECTOR_DisplayPort
||
4908 connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
4909 debugfs_create_file("i915_dpcd", S_IRUGO
, root
,
4910 connector
, &i915_dpcd_fops
);
4912 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
) {
4913 debugfs_create_file("i915_panel_timings", S_IRUGO
, root
,
4914 connector
, &i915_panel_fops
);
4915 debugfs_create_file("i915_psr_sink_status", S_IRUGO
, root
,
4916 connector
, &i915_psr_sink_status_fops
);