2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/sched/mm.h>
30 #include <linux/sort.h>
32 #include <drm/drm_debugfs.h>
34 #include "gem/i915_gem_context.h"
35 #include "gt/intel_gt_buffer_pool.h"
36 #include "gt/intel_gt_clock_utils.h"
37 #include "gt/intel_gt.h"
38 #include "gt/intel_gt_pm.h"
39 #include "gt/intel_gt_requests.h"
40 #include "gt/intel_reset.h"
41 #include "gt/intel_rc6.h"
42 #include "gt/intel_rps.h"
43 #include "gt/intel_sseu_debugfs.h"
45 #include "i915_debugfs.h"
46 #include "i915_debugfs_params.h"
48 #include "i915_trace.h"
50 #include "intel_sideband.h"
52 static inline struct drm_i915_private
*node_to_i915(struct drm_info_node
*node
)
54 return to_i915(node
->minor
->dev
);
57 static int i915_capabilities(struct seq_file
*m
, void *data
)
59 struct drm_i915_private
*i915
= node_to_i915(m
->private);
60 struct drm_printer p
= drm_seq_file_printer(m
);
62 seq_printf(m
, "pch: %d\n", INTEL_PCH_TYPE(i915
));
64 intel_device_info_print_static(INTEL_INFO(i915
), &p
);
65 intel_device_info_print_runtime(RUNTIME_INFO(i915
), &p
);
66 intel_gt_info_print(&i915
->gt
.info
, &p
);
67 intel_driver_caps_print(&i915
->caps
, &p
);
69 kernel_param_lock(THIS_MODULE
);
70 i915_params_dump(&i915
->params
, &p
);
71 kernel_param_unlock(THIS_MODULE
);
76 static char get_tiling_flag(struct drm_i915_gem_object
*obj
)
78 switch (i915_gem_object_get_tiling(obj
)) {
80 case I915_TILING_NONE
: return ' ';
81 case I915_TILING_X
: return 'X';
82 case I915_TILING_Y
: return 'Y';
86 static char get_global_flag(struct drm_i915_gem_object
*obj
)
88 return READ_ONCE(obj
->userfault_count
) ? 'g' : ' ';
91 static char get_pin_mapped_flag(struct drm_i915_gem_object
*obj
)
93 return obj
->mm
.mapping
? 'M' : ' ';
97 stringify_page_sizes(unsigned int page_sizes
, char *buf
, size_t len
)
101 switch (page_sizes
) {
104 case I915_GTT_PAGE_SIZE_4K
:
106 case I915_GTT_PAGE_SIZE_64K
:
108 case I915_GTT_PAGE_SIZE_2M
:
114 if (page_sizes
& I915_GTT_PAGE_SIZE_2M
)
115 x
+= snprintf(buf
+ x
, len
- x
, "2M, ");
116 if (page_sizes
& I915_GTT_PAGE_SIZE_64K
)
117 x
+= snprintf(buf
+ x
, len
- x
, "64K, ");
118 if (page_sizes
& I915_GTT_PAGE_SIZE_4K
)
119 x
+= snprintf(buf
+ x
, len
- x
, "4K, ");
127 i915_debugfs_describe_obj(struct seq_file
*m
, struct drm_i915_gem_object
*obj
)
129 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
130 struct intel_engine_cs
*engine
;
131 struct i915_vma
*vma
;
134 seq_printf(m
, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
136 get_tiling_flag(obj
),
137 get_global_flag(obj
),
138 get_pin_mapped_flag(obj
),
139 obj
->base
.size
/ 1024,
142 i915_cache_level_str(dev_priv
, obj
->cache_level
),
143 obj
->mm
.dirty
? " dirty" : "",
144 obj
->mm
.madv
== I915_MADV_DONTNEED
? " purgeable" : "");
146 seq_printf(m
, " (name: %d)", obj
->base
.name
);
148 spin_lock(&obj
->vma
.lock
);
149 list_for_each_entry(vma
, &obj
->vma
.list
, obj_link
) {
150 if (!drm_mm_node_allocated(&vma
->node
))
153 spin_unlock(&obj
->vma
.lock
);
155 if (i915_vma_is_pinned(vma
))
158 seq_printf(m
, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
159 i915_vma_is_ggtt(vma
) ? "g" : "pp",
160 vma
->node
.start
, vma
->node
.size
,
161 stringify_page_sizes(vma
->page_sizes
.gtt
, NULL
, 0));
162 if (i915_vma_is_ggtt(vma
)) {
163 switch (vma
->ggtt_view
.type
) {
164 case I915_GGTT_VIEW_NORMAL
:
165 seq_puts(m
, ", normal");
168 case I915_GGTT_VIEW_PARTIAL
:
169 seq_printf(m
, ", partial [%08llx+%x]",
170 vma
->ggtt_view
.partial
.offset
<< PAGE_SHIFT
,
171 vma
->ggtt_view
.partial
.size
<< PAGE_SHIFT
);
174 case I915_GGTT_VIEW_ROTATED
:
175 seq_printf(m
, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
176 vma
->ggtt_view
.rotated
.plane
[0].width
,
177 vma
->ggtt_view
.rotated
.plane
[0].height
,
178 vma
->ggtt_view
.rotated
.plane
[0].stride
,
179 vma
->ggtt_view
.rotated
.plane
[0].offset
,
180 vma
->ggtt_view
.rotated
.plane
[1].width
,
181 vma
->ggtt_view
.rotated
.plane
[1].height
,
182 vma
->ggtt_view
.rotated
.plane
[1].stride
,
183 vma
->ggtt_view
.rotated
.plane
[1].offset
);
186 case I915_GGTT_VIEW_REMAPPED
:
187 seq_printf(m
, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
188 vma
->ggtt_view
.remapped
.plane
[0].width
,
189 vma
->ggtt_view
.remapped
.plane
[0].height
,
190 vma
->ggtt_view
.remapped
.plane
[0].stride
,
191 vma
->ggtt_view
.remapped
.plane
[0].offset
,
192 vma
->ggtt_view
.remapped
.plane
[1].width
,
193 vma
->ggtt_view
.remapped
.plane
[1].height
,
194 vma
->ggtt_view
.remapped
.plane
[1].stride
,
195 vma
->ggtt_view
.remapped
.plane
[1].offset
);
199 MISSING_CASE(vma
->ggtt_view
.type
);
204 seq_printf(m
, " , fence: %d", vma
->fence
->id
);
207 spin_lock(&obj
->vma
.lock
);
209 spin_unlock(&obj
->vma
.lock
);
211 seq_printf(m
, " (pinned x %d)", pin_count
);
213 seq_printf(m
, " (stolen: %08llx)", obj
->stolen
->start
);
214 if (i915_gem_object_is_framebuffer(obj
))
215 seq_printf(m
, " (fb)");
217 engine
= i915_gem_object_last_write_engine(obj
);
219 seq_printf(m
, " (%s)", engine
->name
);
223 struct i915_address_space
*vm
;
226 u64 active
, inactive
;
230 static int per_file_stats(int id
, void *ptr
, void *data
)
232 struct drm_i915_gem_object
*obj
= ptr
;
233 struct file_stats
*stats
= data
;
234 struct i915_vma
*vma
;
236 if (IS_ERR_OR_NULL(obj
) || !kref_get_unless_zero(&obj
->base
.refcount
))
240 stats
->total
+= obj
->base
.size
;
242 spin_lock(&obj
->vma
.lock
);
244 for_each_ggtt_vma(vma
, obj
) {
245 if (!drm_mm_node_allocated(&vma
->node
))
248 if (i915_vma_is_active(vma
))
249 stats
->active
+= vma
->node
.size
;
251 stats
->inactive
+= vma
->node
.size
;
253 if (i915_vma_is_closed(vma
))
254 stats
->closed
+= vma
->node
.size
;
257 struct rb_node
*p
= obj
->vma
.tree
.rb_node
;
262 vma
= rb_entry(p
, typeof(*vma
), obj_node
);
263 cmp
= i915_vma_compare(vma
, stats
->vm
, NULL
);
265 if (drm_mm_node_allocated(&vma
->node
)) {
266 if (i915_vma_is_active(vma
))
267 stats
->active
+= vma
->node
.size
;
269 stats
->inactive
+= vma
->node
.size
;
271 if (i915_vma_is_closed(vma
))
272 stats
->closed
+= vma
->node
.size
;
282 spin_unlock(&obj
->vma
.lock
);
284 i915_gem_object_put(obj
);
288 #define print_file_stats(m, name, stats) do { \
290 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu closed)\n", \
299 static void print_context_stats(struct seq_file
*m
,
300 struct drm_i915_private
*i915
)
302 struct file_stats kstats
= {};
303 struct i915_gem_context
*ctx
, *cn
;
305 spin_lock(&i915
->gem
.contexts
.lock
);
306 list_for_each_entry_safe(ctx
, cn
, &i915
->gem
.contexts
.list
, link
) {
307 struct i915_gem_engines_iter it
;
308 struct intel_context
*ce
;
310 if (!kref_get_unless_zero(&ctx
->ref
))
313 spin_unlock(&i915
->gem
.contexts
.lock
);
315 for_each_gem_engine(ce
,
316 i915_gem_context_lock_engines(ctx
), it
) {
317 if (intel_context_pin_if_active(ce
)) {
321 ce
->state
->obj
, &kstats
);
322 per_file_stats(0, ce
->ring
->vma
->obj
, &kstats
);
324 intel_context_unpin(ce
);
327 i915_gem_context_unlock_engines(ctx
);
329 mutex_lock(&ctx
->mutex
);
330 if (!IS_ERR_OR_NULL(ctx
->file_priv
)) {
331 struct file_stats stats
= {
332 .vm
= rcu_access_pointer(ctx
->vm
),
334 struct drm_file
*file
= ctx
->file_priv
->file
;
335 struct task_struct
*task
;
339 idr_for_each(&file
->object_idr
, per_file_stats
, &stats
);
343 task
= pid_task(ctx
->pid
?: file
->pid
, PIDTYPE_PID
);
344 snprintf(name
, sizeof(name
), "%s",
345 task
? task
->comm
: "<unknown>");
348 print_file_stats(m
, name
, stats
);
350 mutex_unlock(&ctx
->mutex
);
352 spin_lock(&i915
->gem
.contexts
.lock
);
353 list_safe_reset_next(ctx
, cn
, link
);
354 i915_gem_context_put(ctx
);
356 spin_unlock(&i915
->gem
.contexts
.lock
);
358 print_file_stats(m
, "[k]contexts", kstats
);
361 static int i915_gem_object_info(struct seq_file
*m
, void *data
)
363 struct drm_i915_private
*i915
= node_to_i915(m
->private);
364 struct intel_memory_region
*mr
;
365 enum intel_region_id id
;
367 seq_printf(m
, "%u shrinkable [%u free] objects, %llu bytes\n",
368 i915
->mm
.shrink_count
,
369 atomic_read(&i915
->mm
.free_count
),
370 i915
->mm
.shrink_memory
);
371 for_each_memory_region(mr
, i915
, id
)
372 seq_printf(m
, "%s: total:%pa, available:%pa bytes\n",
373 mr
->name
, &mr
->total
, &mr
->avail
);
376 print_context_stats(m
, i915
);
381 static void gen8_display_interrupt_info(struct seq_file
*m
)
383 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
386 for_each_pipe(dev_priv
, pipe
) {
387 enum intel_display_power_domain power_domain
;
388 intel_wakeref_t wakeref
;
390 power_domain
= POWER_DOMAIN_PIPE(pipe
);
391 wakeref
= intel_display_power_get_if_enabled(dev_priv
,
394 seq_printf(m
, "Pipe %c power disabled\n",
398 seq_printf(m
, "Pipe %c IMR:\t%08x\n",
400 I915_READ(GEN8_DE_PIPE_IMR(pipe
)));
401 seq_printf(m
, "Pipe %c IIR:\t%08x\n",
403 I915_READ(GEN8_DE_PIPE_IIR(pipe
)));
404 seq_printf(m
, "Pipe %c IER:\t%08x\n",
406 I915_READ(GEN8_DE_PIPE_IER(pipe
)));
408 intel_display_power_put(dev_priv
, power_domain
, wakeref
);
411 seq_printf(m
, "Display Engine port interrupt mask:\t%08x\n",
412 I915_READ(GEN8_DE_PORT_IMR
));
413 seq_printf(m
, "Display Engine port interrupt identity:\t%08x\n",
414 I915_READ(GEN8_DE_PORT_IIR
));
415 seq_printf(m
, "Display Engine port interrupt enable:\t%08x\n",
416 I915_READ(GEN8_DE_PORT_IER
));
418 seq_printf(m
, "Display Engine misc interrupt mask:\t%08x\n",
419 I915_READ(GEN8_DE_MISC_IMR
));
420 seq_printf(m
, "Display Engine misc interrupt identity:\t%08x\n",
421 I915_READ(GEN8_DE_MISC_IIR
));
422 seq_printf(m
, "Display Engine misc interrupt enable:\t%08x\n",
423 I915_READ(GEN8_DE_MISC_IER
));
425 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
426 I915_READ(GEN8_PCU_IMR
));
427 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
428 I915_READ(GEN8_PCU_IIR
));
429 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
430 I915_READ(GEN8_PCU_IER
));
433 static int i915_interrupt_info(struct seq_file
*m
, void *data
)
435 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
436 struct intel_engine_cs
*engine
;
437 intel_wakeref_t wakeref
;
440 wakeref
= intel_runtime_pm_get(&dev_priv
->runtime_pm
);
442 if (IS_CHERRYVIEW(dev_priv
)) {
443 intel_wakeref_t pref
;
445 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
446 I915_READ(GEN8_MASTER_IRQ
));
448 seq_printf(m
, "Display IER:\t%08x\n",
450 seq_printf(m
, "Display IIR:\t%08x\n",
452 seq_printf(m
, "Display IIR_RW:\t%08x\n",
453 I915_READ(VLV_IIR_RW
));
454 seq_printf(m
, "Display IMR:\t%08x\n",
456 for_each_pipe(dev_priv
, pipe
) {
457 enum intel_display_power_domain power_domain
;
459 power_domain
= POWER_DOMAIN_PIPE(pipe
);
460 pref
= intel_display_power_get_if_enabled(dev_priv
,
463 seq_printf(m
, "Pipe %c power disabled\n",
468 seq_printf(m
, "Pipe %c stat:\t%08x\n",
470 I915_READ(PIPESTAT(pipe
)));
472 intel_display_power_put(dev_priv
, power_domain
, pref
);
475 pref
= intel_display_power_get(dev_priv
, POWER_DOMAIN_INIT
);
476 seq_printf(m
, "Port hotplug:\t%08x\n",
477 I915_READ(PORT_HOTPLUG_EN
));
478 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
479 I915_READ(VLV_DPFLIPSTAT
));
480 seq_printf(m
, "DPINVGTT:\t%08x\n",
481 I915_READ(DPINVGTT
));
482 intel_display_power_put(dev_priv
, POWER_DOMAIN_INIT
, pref
);
484 for (i
= 0; i
< 4; i
++) {
485 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
486 i
, I915_READ(GEN8_GT_IMR(i
)));
487 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
488 i
, I915_READ(GEN8_GT_IIR(i
)));
489 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
490 i
, I915_READ(GEN8_GT_IER(i
)));
493 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
494 I915_READ(GEN8_PCU_IMR
));
495 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
496 I915_READ(GEN8_PCU_IIR
));
497 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
498 I915_READ(GEN8_PCU_IER
));
499 } else if (INTEL_GEN(dev_priv
) >= 11) {
500 if (HAS_MASTER_UNIT_IRQ(dev_priv
))
501 seq_printf(m
, "Master Unit Interrupt Control: %08x\n",
502 I915_READ(DG1_MSTR_UNIT_INTR
));
504 seq_printf(m
, "Master Interrupt Control: %08x\n",
505 I915_READ(GEN11_GFX_MSTR_IRQ
));
507 seq_printf(m
, "Render/Copy Intr Enable: %08x\n",
508 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE
));
509 seq_printf(m
, "VCS/VECS Intr Enable: %08x\n",
510 I915_READ(GEN11_VCS_VECS_INTR_ENABLE
));
511 seq_printf(m
, "GUC/SG Intr Enable:\t %08x\n",
512 I915_READ(GEN11_GUC_SG_INTR_ENABLE
));
513 seq_printf(m
, "GPM/WGBOXPERF Intr Enable: %08x\n",
514 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE
));
515 seq_printf(m
, "Crypto Intr Enable:\t %08x\n",
516 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE
));
517 seq_printf(m
, "GUnit/CSME Intr Enable:\t %08x\n",
518 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE
));
520 seq_printf(m
, "Display Interrupt Control:\t%08x\n",
521 I915_READ(GEN11_DISPLAY_INT_CTL
));
523 gen8_display_interrupt_info(m
);
524 } else if (INTEL_GEN(dev_priv
) >= 8) {
525 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
526 I915_READ(GEN8_MASTER_IRQ
));
528 for (i
= 0; i
< 4; i
++) {
529 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
530 i
, I915_READ(GEN8_GT_IMR(i
)));
531 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
532 i
, I915_READ(GEN8_GT_IIR(i
)));
533 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
534 i
, I915_READ(GEN8_GT_IER(i
)));
537 gen8_display_interrupt_info(m
);
538 } else if (IS_VALLEYVIEW(dev_priv
)) {
539 intel_wakeref_t pref
;
541 seq_printf(m
, "Display IER:\t%08x\n",
543 seq_printf(m
, "Display IIR:\t%08x\n",
545 seq_printf(m
, "Display IIR_RW:\t%08x\n",
546 I915_READ(VLV_IIR_RW
));
547 seq_printf(m
, "Display IMR:\t%08x\n",
549 for_each_pipe(dev_priv
, pipe
) {
550 enum intel_display_power_domain power_domain
;
552 power_domain
= POWER_DOMAIN_PIPE(pipe
);
553 pref
= intel_display_power_get_if_enabled(dev_priv
,
556 seq_printf(m
, "Pipe %c power disabled\n",
561 seq_printf(m
, "Pipe %c stat:\t%08x\n",
563 I915_READ(PIPESTAT(pipe
)));
564 intel_display_power_put(dev_priv
, power_domain
, pref
);
567 seq_printf(m
, "Master IER:\t%08x\n",
568 I915_READ(VLV_MASTER_IER
));
570 seq_printf(m
, "Render IER:\t%08x\n",
572 seq_printf(m
, "Render IIR:\t%08x\n",
574 seq_printf(m
, "Render IMR:\t%08x\n",
577 seq_printf(m
, "PM IER:\t\t%08x\n",
578 I915_READ(GEN6_PMIER
));
579 seq_printf(m
, "PM IIR:\t\t%08x\n",
580 I915_READ(GEN6_PMIIR
));
581 seq_printf(m
, "PM IMR:\t\t%08x\n",
582 I915_READ(GEN6_PMIMR
));
584 pref
= intel_display_power_get(dev_priv
, POWER_DOMAIN_INIT
);
585 seq_printf(m
, "Port hotplug:\t%08x\n",
586 I915_READ(PORT_HOTPLUG_EN
));
587 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
588 I915_READ(VLV_DPFLIPSTAT
));
589 seq_printf(m
, "DPINVGTT:\t%08x\n",
590 I915_READ(DPINVGTT
));
591 intel_display_power_put(dev_priv
, POWER_DOMAIN_INIT
, pref
);
593 } else if (!HAS_PCH_SPLIT(dev_priv
)) {
594 seq_printf(m
, "Interrupt enable: %08x\n",
595 I915_READ(GEN2_IER
));
596 seq_printf(m
, "Interrupt identity: %08x\n",
597 I915_READ(GEN2_IIR
));
598 seq_printf(m
, "Interrupt mask: %08x\n",
599 I915_READ(GEN2_IMR
));
600 for_each_pipe(dev_priv
, pipe
)
601 seq_printf(m
, "Pipe %c stat: %08x\n",
603 I915_READ(PIPESTAT(pipe
)));
605 seq_printf(m
, "North Display Interrupt enable: %08x\n",
607 seq_printf(m
, "North Display Interrupt identity: %08x\n",
609 seq_printf(m
, "North Display Interrupt mask: %08x\n",
611 seq_printf(m
, "South Display Interrupt enable: %08x\n",
613 seq_printf(m
, "South Display Interrupt identity: %08x\n",
615 seq_printf(m
, "South Display Interrupt mask: %08x\n",
617 seq_printf(m
, "Graphics Interrupt enable: %08x\n",
619 seq_printf(m
, "Graphics Interrupt identity: %08x\n",
621 seq_printf(m
, "Graphics Interrupt mask: %08x\n",
625 if (INTEL_GEN(dev_priv
) >= 11) {
626 seq_printf(m
, "RCS Intr Mask:\t %08x\n",
627 I915_READ(GEN11_RCS0_RSVD_INTR_MASK
));
628 seq_printf(m
, "BCS Intr Mask:\t %08x\n",
629 I915_READ(GEN11_BCS_RSVD_INTR_MASK
));
630 seq_printf(m
, "VCS0/VCS1 Intr Mask:\t %08x\n",
631 I915_READ(GEN11_VCS0_VCS1_INTR_MASK
));
632 seq_printf(m
, "VCS2/VCS3 Intr Mask:\t %08x\n",
633 I915_READ(GEN11_VCS2_VCS3_INTR_MASK
));
634 seq_printf(m
, "VECS0/VECS1 Intr Mask:\t %08x\n",
635 I915_READ(GEN11_VECS0_VECS1_INTR_MASK
));
636 seq_printf(m
, "GUC/SG Intr Mask:\t %08x\n",
637 I915_READ(GEN11_GUC_SG_INTR_MASK
));
638 seq_printf(m
, "GPM/WGBOXPERF Intr Mask: %08x\n",
639 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK
));
640 seq_printf(m
, "Crypto Intr Mask:\t %08x\n",
641 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK
));
642 seq_printf(m
, "Gunit/CSME Intr Mask:\t %08x\n",
643 I915_READ(GEN11_GUNIT_CSME_INTR_MASK
));
645 } else if (INTEL_GEN(dev_priv
) >= 6) {
646 for_each_uabi_engine(engine
, dev_priv
) {
648 "Graphics Interrupt mask (%s): %08x\n",
649 engine
->name
, ENGINE_READ(engine
, RING_IMR
));
653 intel_runtime_pm_put(&dev_priv
->runtime_pm
, wakeref
);
658 static int i915_gem_fence_regs_info(struct seq_file
*m
, void *data
)
660 struct drm_i915_private
*i915
= node_to_i915(m
->private);
663 seq_printf(m
, "Total fences = %d\n", i915
->ggtt
.num_fences
);
666 for (i
= 0; i
< i915
->ggtt
.num_fences
; i
++) {
667 struct i915_fence_reg
*reg
= &i915
->ggtt
.fence_regs
[i
];
668 struct i915_vma
*vma
= reg
->vma
;
670 seq_printf(m
, "Fence %d, pin count = %d, object = ",
671 i
, atomic_read(®
->pin_count
));
673 seq_puts(m
, "unused");
675 i915_debugfs_describe_obj(m
, vma
->obj
);
683 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
684 static ssize_t
gpu_state_read(struct file
*file
, char __user
*ubuf
,
685 size_t count
, loff_t
*pos
)
687 struct i915_gpu_coredump
*error
;
691 error
= file
->private_data
;
695 /* Bounce buffer required because of kernfs __user API convenience. */
696 buf
= kmalloc(count
, GFP_KERNEL
);
700 ret
= i915_gpu_coredump_copy_to_buffer(error
, buf
, *pos
, count
);
704 if (!copy_to_user(ubuf
, buf
, ret
))
714 static int gpu_state_release(struct inode
*inode
, struct file
*file
)
716 i915_gpu_coredump_put(file
->private_data
);
720 static int i915_gpu_info_open(struct inode
*inode
, struct file
*file
)
722 struct drm_i915_private
*i915
= inode
->i_private
;
723 struct i915_gpu_coredump
*gpu
;
724 intel_wakeref_t wakeref
;
727 with_intel_runtime_pm(&i915
->runtime_pm
, wakeref
)
728 gpu
= i915_gpu_coredump(&i915
->gt
, ALL_ENGINES
);
732 file
->private_data
= gpu
;
736 static const struct file_operations i915_gpu_info_fops
= {
737 .owner
= THIS_MODULE
,
738 .open
= i915_gpu_info_open
,
739 .read
= gpu_state_read
,
740 .llseek
= default_llseek
,
741 .release
= gpu_state_release
,
745 i915_error_state_write(struct file
*filp
,
746 const char __user
*ubuf
,
750 struct i915_gpu_coredump
*error
= filp
->private_data
;
755 drm_dbg(&error
->i915
->drm
, "Resetting error state\n");
756 i915_reset_error_state(error
->i915
);
761 static int i915_error_state_open(struct inode
*inode
, struct file
*file
)
763 struct i915_gpu_coredump
*error
;
765 error
= i915_first_error_state(inode
->i_private
);
767 return PTR_ERR(error
);
769 file
->private_data
= error
;
773 static const struct file_operations i915_error_state_fops
= {
774 .owner
= THIS_MODULE
,
775 .open
= i915_error_state_open
,
776 .read
= gpu_state_read
,
777 .write
= i915_error_state_write
,
778 .llseek
= default_llseek
,
779 .release
= gpu_state_release
,
783 static int i915_frequency_info(struct seq_file
*m
, void *unused
)
785 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
786 struct intel_uncore
*uncore
= &dev_priv
->uncore
;
787 struct intel_rps
*rps
= &dev_priv
->gt
.rps
;
788 intel_wakeref_t wakeref
;
790 wakeref
= intel_runtime_pm_get(&dev_priv
->runtime_pm
);
792 if (IS_GEN(dev_priv
, 5)) {
793 u16 rgvswctl
= intel_uncore_read16(uncore
, MEMSWCTL
);
794 u16 rgvstat
= intel_uncore_read16(uncore
, MEMSTAT_ILK
);
796 seq_printf(m
, "Requested P-state: %d\n", (rgvswctl
>> 8) & 0xf);
797 seq_printf(m
, "Requested VID: %d\n", rgvswctl
& 0x3f);
798 seq_printf(m
, "Current VID: %d\n", (rgvstat
& MEMSTAT_VID_MASK
) >>
800 seq_printf(m
, "Current P-state: %d\n",
801 (rgvstat
& MEMSTAT_PSTATE_MASK
) >> MEMSTAT_PSTATE_SHIFT
);
802 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
803 u32 rpmodectl
, freq_sts
;
805 rpmodectl
= I915_READ(GEN6_RP_CONTROL
);
806 seq_printf(m
, "Video Turbo Mode: %s\n",
807 yesno(rpmodectl
& GEN6_RP_MEDIA_TURBO
));
808 seq_printf(m
, "HW control enabled: %s\n",
809 yesno(rpmodectl
& GEN6_RP_ENABLE
));
810 seq_printf(m
, "SW control enabled: %s\n",
811 yesno((rpmodectl
& GEN6_RP_MEDIA_MODE_MASK
) ==
812 GEN6_RP_MEDIA_SW_MODE
));
814 vlv_punit_get(dev_priv
);
815 freq_sts
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
816 vlv_punit_put(dev_priv
);
818 seq_printf(m
, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts
);
819 seq_printf(m
, "DDR freq: %d MHz\n", dev_priv
->mem_freq
);
821 seq_printf(m
, "actual GPU freq: %d MHz\n",
822 intel_gpu_freq(rps
, (freq_sts
>> 8) & 0xff));
824 seq_printf(m
, "current GPU freq: %d MHz\n",
825 intel_gpu_freq(rps
, rps
->cur_freq
));
827 seq_printf(m
, "max GPU freq: %d MHz\n",
828 intel_gpu_freq(rps
, rps
->max_freq
));
830 seq_printf(m
, "min GPU freq: %d MHz\n",
831 intel_gpu_freq(rps
, rps
->min_freq
));
833 seq_printf(m
, "idle GPU freq: %d MHz\n",
834 intel_gpu_freq(rps
, rps
->idle_freq
));
837 "efficient (RPe) frequency: %d MHz\n",
838 intel_gpu_freq(rps
, rps
->efficient_freq
));
839 } else if (INTEL_GEN(dev_priv
) >= 6) {
843 u32 rpmodectl
, rpinclimit
, rpdeclimit
;
844 u32 rpstat
, cagf
, reqf
;
845 u32 rpupei
, rpcurup
, rpprevup
;
846 u32 rpdownei
, rpcurdown
, rpprevdown
;
847 u32 pm_ier
, pm_imr
, pm_isr
, pm_iir
, pm_mask
;
850 rp_state_limits
= I915_READ(GEN6_RP_STATE_LIMITS
);
851 if (IS_GEN9_LP(dev_priv
)) {
852 rp_state_cap
= I915_READ(BXT_RP_STATE_CAP
);
853 gt_perf_status
= I915_READ(BXT_GT_PERF_STATUS
);
855 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
856 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
859 /* RPSTAT1 is in the GT power well */
860 intel_uncore_forcewake_get(&dev_priv
->uncore
, FORCEWAKE_ALL
);
862 reqf
= I915_READ(GEN6_RPNSWREQ
);
863 if (INTEL_GEN(dev_priv
) >= 9)
866 reqf
&= ~GEN6_TURBO_DISABLE
;
867 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
872 reqf
= intel_gpu_freq(rps
, reqf
);
874 rpmodectl
= I915_READ(GEN6_RP_CONTROL
);
875 rpinclimit
= I915_READ(GEN6_RP_UP_THRESHOLD
);
876 rpdeclimit
= I915_READ(GEN6_RP_DOWN_THRESHOLD
);
878 rpstat
= I915_READ(GEN6_RPSTAT1
);
879 rpupei
= I915_READ(GEN6_RP_CUR_UP_EI
) & GEN6_CURICONT_MASK
;
880 rpcurup
= I915_READ(GEN6_RP_CUR_UP
) & GEN6_CURBSYTAVG_MASK
;
881 rpprevup
= I915_READ(GEN6_RP_PREV_UP
) & GEN6_CURBSYTAVG_MASK
;
882 rpdownei
= I915_READ(GEN6_RP_CUR_DOWN_EI
) & GEN6_CURIAVG_MASK
;
883 rpcurdown
= I915_READ(GEN6_RP_CUR_DOWN
) & GEN6_CURBSYTAVG_MASK
;
884 rpprevdown
= I915_READ(GEN6_RP_PREV_DOWN
) & GEN6_CURBSYTAVG_MASK
;
885 cagf
= intel_rps_read_actual_frequency(rps
);
887 intel_uncore_forcewake_put(&dev_priv
->uncore
, FORCEWAKE_ALL
);
889 if (INTEL_GEN(dev_priv
) >= 11) {
890 pm_ier
= I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE
);
891 pm_imr
= I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK
);
893 * The equivalent to the PM ISR & IIR cannot be read
894 * without affecting the current state of the system
898 } else if (INTEL_GEN(dev_priv
) >= 8) {
899 pm_ier
= I915_READ(GEN8_GT_IER(2));
900 pm_imr
= I915_READ(GEN8_GT_IMR(2));
901 pm_isr
= I915_READ(GEN8_GT_ISR(2));
902 pm_iir
= I915_READ(GEN8_GT_IIR(2));
904 pm_ier
= I915_READ(GEN6_PMIER
);
905 pm_imr
= I915_READ(GEN6_PMIMR
);
906 pm_isr
= I915_READ(GEN6_PMISR
);
907 pm_iir
= I915_READ(GEN6_PMIIR
);
909 pm_mask
= I915_READ(GEN6_PMINTRMSK
);
911 seq_printf(m
, "Video Turbo Mode: %s\n",
912 yesno(rpmodectl
& GEN6_RP_MEDIA_TURBO
));
913 seq_printf(m
, "HW control enabled: %s\n",
914 yesno(rpmodectl
& GEN6_RP_ENABLE
));
915 seq_printf(m
, "SW control enabled: %s\n",
916 yesno((rpmodectl
& GEN6_RP_MEDIA_MODE_MASK
) ==
917 GEN6_RP_MEDIA_SW_MODE
));
919 seq_printf(m
, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
920 pm_ier
, pm_imr
, pm_mask
);
921 if (INTEL_GEN(dev_priv
) <= 10)
922 seq_printf(m
, "PM ISR=0x%08x IIR=0x%08x\n",
924 seq_printf(m
, "pm_intrmsk_mbz: 0x%08x\n",
925 rps
->pm_intrmsk_mbz
);
926 seq_printf(m
, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status
);
927 seq_printf(m
, "Render p-state ratio: %d\n",
928 (gt_perf_status
& (INTEL_GEN(dev_priv
) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
929 seq_printf(m
, "Render p-state VID: %d\n",
930 gt_perf_status
& 0xff);
931 seq_printf(m
, "Render p-state limit: %d\n",
932 rp_state_limits
& 0xff);
933 seq_printf(m
, "RPSTAT1: 0x%08x\n", rpstat
);
934 seq_printf(m
, "RPMODECTL: 0x%08x\n", rpmodectl
);
935 seq_printf(m
, "RPINCLIMIT: 0x%08x\n", rpinclimit
);
936 seq_printf(m
, "RPDECLIMIT: 0x%08x\n", rpdeclimit
);
937 seq_printf(m
, "RPNSWREQ: %dMHz\n", reqf
);
938 seq_printf(m
, "CAGF: %dMHz\n", cagf
);
939 seq_printf(m
, "RP CUR UP EI: %d (%dns)\n",
941 intel_gt_pm_interval_to_ns(&dev_priv
->gt
, rpupei
));
942 seq_printf(m
, "RP CUR UP: %d (%dun)\n",
944 intel_gt_pm_interval_to_ns(&dev_priv
->gt
, rpcurup
));
945 seq_printf(m
, "RP PREV UP: %d (%dns)\n",
947 intel_gt_pm_interval_to_ns(&dev_priv
->gt
, rpprevup
));
948 seq_printf(m
, "Up threshold: %d%%\n",
949 rps
->power
.up_threshold
);
951 seq_printf(m
, "RP CUR DOWN EI: %d (%dns)\n",
953 intel_gt_pm_interval_to_ns(&dev_priv
->gt
,
955 seq_printf(m
, "RP CUR DOWN: %d (%dns)\n",
957 intel_gt_pm_interval_to_ns(&dev_priv
->gt
,
959 seq_printf(m
, "RP PREV DOWN: %d (%dns)\n",
961 intel_gt_pm_interval_to_ns(&dev_priv
->gt
,
963 seq_printf(m
, "Down threshold: %d%%\n",
964 rps
->power
.down_threshold
);
966 max_freq
= (IS_GEN9_LP(dev_priv
) ? rp_state_cap
>> 0 :
967 rp_state_cap
>> 16) & 0xff;
968 max_freq
*= (IS_GEN9_BC(dev_priv
) ||
969 INTEL_GEN(dev_priv
) >= 10 ? GEN9_FREQ_SCALER
: 1);
970 seq_printf(m
, "Lowest (RPN) frequency: %dMHz\n",
971 intel_gpu_freq(rps
, max_freq
));
973 max_freq
= (rp_state_cap
& 0xff00) >> 8;
974 max_freq
*= (IS_GEN9_BC(dev_priv
) ||
975 INTEL_GEN(dev_priv
) >= 10 ? GEN9_FREQ_SCALER
: 1);
976 seq_printf(m
, "Nominal (RP1) frequency: %dMHz\n",
977 intel_gpu_freq(rps
, max_freq
));
979 max_freq
= (IS_GEN9_LP(dev_priv
) ? rp_state_cap
>> 16 :
980 rp_state_cap
>> 0) & 0xff;
981 max_freq
*= (IS_GEN9_BC(dev_priv
) ||
982 INTEL_GEN(dev_priv
) >= 10 ? GEN9_FREQ_SCALER
: 1);
983 seq_printf(m
, "Max non-overclocked (RP0) frequency: %dMHz\n",
984 intel_gpu_freq(rps
, max_freq
));
985 seq_printf(m
, "Max overclocked frequency: %dMHz\n",
986 intel_gpu_freq(rps
, rps
->max_freq
));
988 seq_printf(m
, "Current freq: %d MHz\n",
989 intel_gpu_freq(rps
, rps
->cur_freq
));
990 seq_printf(m
, "Actual freq: %d MHz\n", cagf
);
991 seq_printf(m
, "Idle freq: %d MHz\n",
992 intel_gpu_freq(rps
, rps
->idle_freq
));
993 seq_printf(m
, "Min freq: %d MHz\n",
994 intel_gpu_freq(rps
, rps
->min_freq
));
995 seq_printf(m
, "Boost freq: %d MHz\n",
996 intel_gpu_freq(rps
, rps
->boost_freq
));
997 seq_printf(m
, "Max freq: %d MHz\n",
998 intel_gpu_freq(rps
, rps
->max_freq
));
1000 "efficient (RPe) frequency: %d MHz\n",
1001 intel_gpu_freq(rps
, rps
->efficient_freq
));
1003 seq_puts(m
, "no P-state info available\n");
1006 seq_printf(m
, "Current CD clock frequency: %d kHz\n", dev_priv
->cdclk
.hw
.cdclk
);
1007 seq_printf(m
, "Max CD clock frequency: %d kHz\n", dev_priv
->max_cdclk_freq
);
1008 seq_printf(m
, "Max pixel clock frequency: %d kHz\n", dev_priv
->max_dotclk_freq
);
1010 intel_runtime_pm_put(&dev_priv
->runtime_pm
, wakeref
);
1014 static int i915_ring_freq_table(struct seq_file
*m
, void *unused
)
1016 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1017 struct intel_rps
*rps
= &dev_priv
->gt
.rps
;
1018 unsigned int max_gpu_freq
, min_gpu_freq
;
1019 intel_wakeref_t wakeref
;
1020 int gpu_freq
, ia_freq
;
1022 if (!HAS_LLC(dev_priv
))
1025 min_gpu_freq
= rps
->min_freq
;
1026 max_gpu_freq
= rps
->max_freq
;
1027 if (IS_GEN9_BC(dev_priv
) || INTEL_GEN(dev_priv
) >= 10) {
1028 /* Convert GT frequency to 50 HZ units */
1029 min_gpu_freq
/= GEN9_FREQ_SCALER
;
1030 max_gpu_freq
/= GEN9_FREQ_SCALER
;
1033 seq_puts(m
, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1035 wakeref
= intel_runtime_pm_get(&dev_priv
->runtime_pm
);
1036 for (gpu_freq
= min_gpu_freq
; gpu_freq
<= max_gpu_freq
; gpu_freq
++) {
1038 sandybridge_pcode_read(dev_priv
,
1039 GEN6_PCODE_READ_MIN_FREQ_TABLE
,
1041 seq_printf(m
, "%d\t\t%d\t\t\t\t%d\n",
1044 (IS_GEN9_BC(dev_priv
) ||
1045 INTEL_GEN(dev_priv
) >= 10 ?
1046 GEN9_FREQ_SCALER
: 1))),
1047 ((ia_freq
>> 0) & 0xff) * 100,
1048 ((ia_freq
>> 8) & 0xff) * 100);
1050 intel_runtime_pm_put(&dev_priv
->runtime_pm
, wakeref
);
1055 static void describe_ctx_ring(struct seq_file
*m
, struct intel_ring
*ring
)
1057 seq_printf(m
, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1058 ring
->space
, ring
->head
, ring
->tail
, ring
->emit
);
1061 static int i915_context_status(struct seq_file
*m
, void *unused
)
1063 struct drm_i915_private
*i915
= node_to_i915(m
->private);
1064 struct i915_gem_context
*ctx
, *cn
;
1066 spin_lock(&i915
->gem
.contexts
.lock
);
1067 list_for_each_entry_safe(ctx
, cn
, &i915
->gem
.contexts
.list
, link
) {
1068 struct i915_gem_engines_iter it
;
1069 struct intel_context
*ce
;
1071 if (!kref_get_unless_zero(&ctx
->ref
))
1074 spin_unlock(&i915
->gem
.contexts
.lock
);
1076 seq_puts(m
, "HW context ");
1078 struct task_struct
*task
;
1080 task
= get_pid_task(ctx
->pid
, PIDTYPE_PID
);
1082 seq_printf(m
, "(%s [%d]) ",
1083 task
->comm
, task
->pid
);
1084 put_task_struct(task
);
1086 } else if (IS_ERR(ctx
->file_priv
)) {
1087 seq_puts(m
, "(deleted) ");
1089 seq_puts(m
, "(kernel) ");
1092 seq_putc(m
, ctx
->remap_slice
? 'R' : 'r');
1095 for_each_gem_engine(ce
,
1096 i915_gem_context_lock_engines(ctx
), it
) {
1097 if (intel_context_pin_if_active(ce
)) {
1098 seq_printf(m
, "%s: ", ce
->engine
->name
);
1100 i915_debugfs_describe_obj(m
, ce
->state
->obj
);
1101 describe_ctx_ring(m
, ce
->ring
);
1103 intel_context_unpin(ce
);
1106 i915_gem_context_unlock_engines(ctx
);
1110 spin_lock(&i915
->gem
.contexts
.lock
);
1111 list_safe_reset_next(ctx
, cn
, link
);
1112 i915_gem_context_put(ctx
);
1114 spin_unlock(&i915
->gem
.contexts
.lock
);
1119 static const char *swizzle_string(unsigned swizzle
)
1122 case I915_BIT_6_SWIZZLE_NONE
:
1124 case I915_BIT_6_SWIZZLE_9
:
1126 case I915_BIT_6_SWIZZLE_9_10
:
1127 return "bit9/bit10";
1128 case I915_BIT_6_SWIZZLE_9_11
:
1129 return "bit9/bit11";
1130 case I915_BIT_6_SWIZZLE_9_10_11
:
1131 return "bit9/bit10/bit11";
1132 case I915_BIT_6_SWIZZLE_9_17
:
1133 return "bit9/bit17";
1134 case I915_BIT_6_SWIZZLE_9_10_17
:
1135 return "bit9/bit10/bit17";
1136 case I915_BIT_6_SWIZZLE_UNKNOWN
:
1143 static int i915_swizzle_info(struct seq_file
*m
, void *data
)
1145 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1146 struct intel_uncore
*uncore
= &dev_priv
->uncore
;
1147 intel_wakeref_t wakeref
;
1149 seq_printf(m
, "bit6 swizzle for X-tiling = %s\n",
1150 swizzle_string(dev_priv
->ggtt
.bit_6_swizzle_x
));
1151 seq_printf(m
, "bit6 swizzle for Y-tiling = %s\n",
1152 swizzle_string(dev_priv
->ggtt
.bit_6_swizzle_y
));
1154 if (dev_priv
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
)
1155 seq_puts(m
, "L-shaped memory detected\n");
1157 /* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */
1158 if (INTEL_GEN(dev_priv
) >= 8 || IS_VALLEYVIEW(dev_priv
))
1161 wakeref
= intel_runtime_pm_get(&dev_priv
->runtime_pm
);
1163 if (IS_GEN_RANGE(dev_priv
, 3, 4)) {
1164 seq_printf(m
, "DDC = 0x%08x\n",
1165 intel_uncore_read(uncore
, DCC
));
1166 seq_printf(m
, "DDC2 = 0x%08x\n",
1167 intel_uncore_read(uncore
, DCC2
));
1168 seq_printf(m
, "C0DRB3 = 0x%04x\n",
1169 intel_uncore_read16(uncore
, C0DRB3
));
1170 seq_printf(m
, "C1DRB3 = 0x%04x\n",
1171 intel_uncore_read16(uncore
, C1DRB3
));
1172 } else if (INTEL_GEN(dev_priv
) >= 6) {
1173 seq_printf(m
, "MAD_DIMM_C0 = 0x%08x\n",
1174 intel_uncore_read(uncore
, MAD_DIMM_C0
));
1175 seq_printf(m
, "MAD_DIMM_C1 = 0x%08x\n",
1176 intel_uncore_read(uncore
, MAD_DIMM_C1
));
1177 seq_printf(m
, "MAD_DIMM_C2 = 0x%08x\n",
1178 intel_uncore_read(uncore
, MAD_DIMM_C2
));
1179 seq_printf(m
, "TILECTL = 0x%08x\n",
1180 intel_uncore_read(uncore
, TILECTL
));
1181 if (INTEL_GEN(dev_priv
) >= 8)
1182 seq_printf(m
, "GAMTARBMODE = 0x%08x\n",
1183 intel_uncore_read(uncore
, GAMTARBMODE
));
1185 seq_printf(m
, "ARB_MODE = 0x%08x\n",
1186 intel_uncore_read(uncore
, ARB_MODE
));
1187 seq_printf(m
, "DISP_ARB_CTL = 0x%08x\n",
1188 intel_uncore_read(uncore
, DISP_ARB_CTL
));
1191 intel_runtime_pm_put(&dev_priv
->runtime_pm
, wakeref
);
1196 static const char *rps_power_to_str(unsigned int power
)
1198 static const char * const strings
[] = {
1199 [LOW_POWER
] = "low power",
1200 [BETWEEN
] = "mixed",
1201 [HIGH_POWER
] = "high power",
1204 if (power
>= ARRAY_SIZE(strings
) || !strings
[power
])
1207 return strings
[power
];
1210 static int i915_rps_boost_info(struct seq_file
*m
, void *data
)
1212 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1213 struct intel_rps
*rps
= &dev_priv
->gt
.rps
;
1215 seq_printf(m
, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps
)));
1216 seq_printf(m
, "RPS active? %s\n", yesno(intel_rps_is_active(rps
)));
1217 seq_printf(m
, "GPU busy? %s\n", yesno(dev_priv
->gt
.awake
));
1218 seq_printf(m
, "Boosts outstanding? %d\n",
1219 atomic_read(&rps
->num_waiters
));
1220 seq_printf(m
, "Interactive? %d\n", READ_ONCE(rps
->power
.interactive
));
1221 seq_printf(m
, "Frequency requested %d, actual %d\n",
1222 intel_gpu_freq(rps
, rps
->cur_freq
),
1223 intel_rps_read_actual_frequency(rps
));
1224 seq_printf(m
, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1225 intel_gpu_freq(rps
, rps
->min_freq
),
1226 intel_gpu_freq(rps
, rps
->min_freq_softlimit
),
1227 intel_gpu_freq(rps
, rps
->max_freq_softlimit
),
1228 intel_gpu_freq(rps
, rps
->max_freq
));
1229 seq_printf(m
, " idle:%d, efficient:%d, boost:%d\n",
1230 intel_gpu_freq(rps
, rps
->idle_freq
),
1231 intel_gpu_freq(rps
, rps
->efficient_freq
),
1232 intel_gpu_freq(rps
, rps
->boost_freq
));
1234 seq_printf(m
, "Wait boosts: %d\n", atomic_read(&rps
->boosts
));
1236 if (INTEL_GEN(dev_priv
) >= 6 && intel_rps_is_active(rps
)) {
1238 u32 rpdown
, rpdownei
;
1240 intel_uncore_forcewake_get(&dev_priv
->uncore
, FORCEWAKE_ALL
);
1241 rpup
= I915_READ_FW(GEN6_RP_CUR_UP
) & GEN6_RP_EI_MASK
;
1242 rpupei
= I915_READ_FW(GEN6_RP_CUR_UP_EI
) & GEN6_RP_EI_MASK
;
1243 rpdown
= I915_READ_FW(GEN6_RP_CUR_DOWN
) & GEN6_RP_EI_MASK
;
1244 rpdownei
= I915_READ_FW(GEN6_RP_CUR_DOWN_EI
) & GEN6_RP_EI_MASK
;
1245 intel_uncore_forcewake_put(&dev_priv
->uncore
, FORCEWAKE_ALL
);
1247 seq_printf(m
, "\nRPS Autotuning (current \"%s\" window):\n",
1248 rps_power_to_str(rps
->power
.mode
));
1249 seq_printf(m
, " Avg. up: %d%% [above threshold? %d%%]\n",
1250 rpup
&& rpupei
? 100 * rpup
/ rpupei
: 0,
1251 rps
->power
.up_threshold
);
1252 seq_printf(m
, " Avg. down: %d%% [below threshold? %d%%]\n",
1253 rpdown
&& rpdownei
? 100 * rpdown
/ rpdownei
: 0,
1254 rps
->power
.down_threshold
);
1256 seq_puts(m
, "\nRPS Autotuning inactive\n");
1262 static int i915_llc(struct seq_file
*m
, void *data
)
1264 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1265 const bool edram
= INTEL_GEN(dev_priv
) > 8;
1267 seq_printf(m
, "LLC: %s\n", yesno(HAS_LLC(dev_priv
)));
1268 seq_printf(m
, "%s: %uMB\n", edram
? "eDRAM" : "eLLC",
1269 dev_priv
->edram_size_mb
);
1274 static int i915_runtime_pm_status(struct seq_file
*m
, void *unused
)
1276 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1277 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
1279 if (!HAS_RUNTIME_PM(dev_priv
))
1280 seq_puts(m
, "Runtime power management not supported\n");
1282 seq_printf(m
, "Runtime power status: %s\n",
1283 enableddisabled(!dev_priv
->power_domains
.wakeref
));
1285 seq_printf(m
, "GPU idle: %s\n", yesno(!dev_priv
->gt
.awake
));
1286 seq_printf(m
, "IRQs disabled: %s\n",
1287 yesno(!intel_irqs_enabled(dev_priv
)));
1289 seq_printf(m
, "Usage count: %d\n",
1290 atomic_read(&dev_priv
->drm
.dev
->power
.usage_count
));
1292 seq_printf(m
, "Device Power Management (CONFIG_PM) disabled\n");
1294 seq_printf(m
, "PCI device power state: %s [%d]\n",
1295 pci_power_name(pdev
->current_state
),
1296 pdev
->current_state
);
1298 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM
)) {
1299 struct drm_printer p
= drm_seq_file_printer(m
);
1301 print_intel_runtime_pm_wakeref(&dev_priv
->runtime_pm
, &p
);
1307 static int i915_engine_info(struct seq_file
*m
, void *unused
)
1309 struct drm_i915_private
*dev_priv
= node_to_i915(m
->private);
1310 struct intel_engine_cs
*engine
;
1311 intel_wakeref_t wakeref
;
1312 struct drm_printer p
;
1314 wakeref
= intel_runtime_pm_get(&dev_priv
->runtime_pm
);
1316 seq_printf(m
, "GT awake? %s [%d]\n",
1317 yesno(dev_priv
->gt
.awake
),
1318 atomic_read(&dev_priv
->gt
.wakeref
.count
));
1319 seq_printf(m
, "CS timestamp frequency: %u Hz\n",
1320 RUNTIME_INFO(dev_priv
)->cs_timestamp_frequency_hz
);
1322 p
= drm_seq_file_printer(m
);
1323 for_each_uabi_engine(engine
, dev_priv
)
1324 intel_engine_dump(engine
, &p
, "%s\n", engine
->name
);
1326 intel_runtime_pm_put(&dev_priv
->runtime_pm
, wakeref
);
1331 static int i915_shrinker_info(struct seq_file
*m
, void *unused
)
1333 struct drm_i915_private
*i915
= node_to_i915(m
->private);
1335 seq_printf(m
, "seeks = %d\n", i915
->mm
.shrinker
.seeks
);
1336 seq_printf(m
, "batch = %lu\n", i915
->mm
.shrinker
.batch
);
1341 static int i915_wa_registers(struct seq_file
*m
, void *unused
)
1343 struct drm_i915_private
*i915
= node_to_i915(m
->private);
1344 struct intel_engine_cs
*engine
;
1346 for_each_uabi_engine(engine
, i915
) {
1347 const struct i915_wa_list
*wal
= &engine
->ctx_wa_list
;
1348 const struct i915_wa
*wa
;
1355 seq_printf(m
, "%s: Workarounds applied: %u\n",
1356 engine
->name
, count
);
1358 for (wa
= wal
->list
; count
--; wa
++)
1359 seq_printf(m
, "0x%X: 0x%08X, mask: 0x%08X\n",
1360 i915_mmio_reg_offset(wa
->reg
),
1363 seq_printf(m
, "\n");
1370 i915_wedged_get(void *data
, u64
*val
)
1372 struct drm_i915_private
*i915
= data
;
1373 int ret
= intel_gt_terminally_wedged(&i915
->gt
);
1388 i915_wedged_set(void *data
, u64 val
)
1390 struct drm_i915_private
*i915
= data
;
1392 /* Flush any previous reset before applying for a new one */
1393 wait_event(i915
->gt
.reset
.queue
,
1394 !test_bit(I915_RESET_BACKOFF
, &i915
->gt
.reset
.flags
));
1396 intel_gt_handle_error(&i915
->gt
, val
, I915_ERROR_CAPTURE
,
1397 "Manually set wedged engine mask = %llx", val
);
1401 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops
,
1402 i915_wedged_get
, i915_wedged_set
,
1406 i915_perf_noa_delay_set(void *data
, u64 val
)
1408 struct drm_i915_private
*i915
= data
;
1411 * This would lead to infinite waits as we're doing timestamp
1412 * difference on the CS with only 32bits.
1414 if (i915_cs_timestamp_ns_to_ticks(i915
, val
) > U32_MAX
)
1417 atomic64_set(&i915
->perf
.noa_programming_delay
, val
);
1422 i915_perf_noa_delay_get(void *data
, u64
*val
)
1424 struct drm_i915_private
*i915
= data
;
1426 *val
= atomic64_read(&i915
->perf
.noa_programming_delay
);
1430 DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops
,
1431 i915_perf_noa_delay_get
,
1432 i915_perf_noa_delay_set
,
1435 #define DROP_UNBOUND BIT(0)
1436 #define DROP_BOUND BIT(1)
1437 #define DROP_RETIRE BIT(2)
1438 #define DROP_ACTIVE BIT(3)
1439 #define DROP_FREED BIT(4)
1440 #define DROP_SHRINK_ALL BIT(5)
1441 #define DROP_IDLE BIT(6)
1442 #define DROP_RESET_ACTIVE BIT(7)
1443 #define DROP_RESET_SEQNO BIT(8)
1444 #define DROP_RCU BIT(9)
1445 #define DROP_ALL (DROP_UNBOUND | \
1452 DROP_RESET_ACTIVE | \
1453 DROP_RESET_SEQNO | \
1456 i915_drop_caches_get(void *data
, u64
*val
)
1463 gt_drop_caches(struct intel_gt
*gt
, u64 val
)
1467 if (val
& DROP_RESET_ACTIVE
&&
1468 wait_for(intel_engines_are_idle(gt
), I915_IDLE_ENGINES_TIMEOUT
))
1469 intel_gt_set_wedged(gt
);
1471 if (val
& DROP_RETIRE
)
1472 intel_gt_retire_requests(gt
);
1474 if (val
& (DROP_IDLE
| DROP_ACTIVE
)) {
1475 ret
= intel_gt_wait_for_idle(gt
, MAX_SCHEDULE_TIMEOUT
);
1480 if (val
& DROP_IDLE
) {
1481 ret
= intel_gt_pm_wait_for_idle(gt
);
1486 if (val
& DROP_RESET_ACTIVE
&& intel_gt_terminally_wedged(gt
))
1487 intel_gt_handle_error(gt
, ALL_ENGINES
, 0, NULL
);
1489 if (val
& DROP_FREED
)
1490 intel_gt_flush_buffer_pool(gt
);
1496 i915_drop_caches_set(void *data
, u64 val
)
1498 struct drm_i915_private
*i915
= data
;
1501 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
1502 val
, val
& DROP_ALL
);
1504 ret
= gt_drop_caches(&i915
->gt
, val
);
1508 fs_reclaim_acquire(GFP_KERNEL
);
1509 if (val
& DROP_BOUND
)
1510 i915_gem_shrink(i915
, LONG_MAX
, NULL
, I915_SHRINK_BOUND
);
1512 if (val
& DROP_UNBOUND
)
1513 i915_gem_shrink(i915
, LONG_MAX
, NULL
, I915_SHRINK_UNBOUND
);
1515 if (val
& DROP_SHRINK_ALL
)
1516 i915_gem_shrink_all(i915
);
1517 fs_reclaim_release(GFP_KERNEL
);
1522 if (val
& DROP_FREED
)
1523 i915_gem_drain_freed_objects(i915
);
1528 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops
,
1529 i915_drop_caches_get
, i915_drop_caches_set
,
1533 i915_cache_sharing_get(void *data
, u64
*val
)
1535 struct drm_i915_private
*dev_priv
= data
;
1536 intel_wakeref_t wakeref
;
1539 if (!(IS_GEN_RANGE(dev_priv
, 6, 7)))
1542 with_intel_runtime_pm(&dev_priv
->runtime_pm
, wakeref
)
1543 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
1545 *val
= (snpcr
& GEN6_MBC_SNPCR_MASK
) >> GEN6_MBC_SNPCR_SHIFT
;
1551 i915_cache_sharing_set(void *data
, u64 val
)
1553 struct drm_i915_private
*dev_priv
= data
;
1554 intel_wakeref_t wakeref
;
1556 if (!(IS_GEN_RANGE(dev_priv
, 6, 7)))
1562 drm_dbg(&dev_priv
->drm
,
1563 "Manually setting uncore sharing to %llu\n", val
);
1564 with_intel_runtime_pm(&dev_priv
->runtime_pm
, wakeref
) {
1567 /* Update the cache sharing policy here as well */
1568 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
1569 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
1570 snpcr
|= val
<< GEN6_MBC_SNPCR_SHIFT
;
1571 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
1577 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops
,
1578 i915_cache_sharing_get
, i915_cache_sharing_set
,
1581 static int i915_sseu_status(struct seq_file
*m
, void *unused
)
1583 struct drm_i915_private
*i915
= node_to_i915(m
->private);
1584 struct intel_gt
*gt
= &i915
->gt
;
1586 return intel_sseu_status(m
, gt
);
1589 static int i915_forcewake_open(struct inode
*inode
, struct file
*file
)
1591 struct drm_i915_private
*i915
= inode
->i_private
;
1592 struct intel_gt
*gt
= &i915
->gt
;
1594 atomic_inc(>
->user_wakeref
);
1595 intel_gt_pm_get(gt
);
1596 if (INTEL_GEN(i915
) >= 6)
1597 intel_uncore_forcewake_user_get(gt
->uncore
);
1602 static int i915_forcewake_release(struct inode
*inode
, struct file
*file
)
1604 struct drm_i915_private
*i915
= inode
->i_private
;
1605 struct intel_gt
*gt
= &i915
->gt
;
1607 if (INTEL_GEN(i915
) >= 6)
1608 intel_uncore_forcewake_user_put(&i915
->uncore
);
1609 intel_gt_pm_put(gt
);
1610 atomic_dec(>
->user_wakeref
);
1615 static const struct file_operations i915_forcewake_fops
= {
1616 .owner
= THIS_MODULE
,
1617 .open
= i915_forcewake_open
,
1618 .release
= i915_forcewake_release
,
1621 static const struct drm_info_list i915_debugfs_list
[] = {
1622 {"i915_capabilities", i915_capabilities
, 0},
1623 {"i915_gem_objects", i915_gem_object_info
, 0},
1624 {"i915_gem_fence_regs", i915_gem_fence_regs_info
, 0},
1625 {"i915_gem_interrupt", i915_interrupt_info
, 0},
1626 {"i915_frequency_info", i915_frequency_info
, 0},
1627 {"i915_ring_freq_table", i915_ring_freq_table
, 0},
1628 {"i915_context_status", i915_context_status
, 0},
1629 {"i915_swizzle_info", i915_swizzle_info
, 0},
1630 {"i915_llc", i915_llc
, 0},
1631 {"i915_runtime_pm_status", i915_runtime_pm_status
, 0},
1632 {"i915_engine_info", i915_engine_info
, 0},
1633 {"i915_shrinker_info", i915_shrinker_info
, 0},
1634 {"i915_wa_registers", i915_wa_registers
, 0},
1635 {"i915_sseu_status", i915_sseu_status
, 0},
1636 {"i915_rps_boost_info", i915_rps_boost_info
, 0},
1638 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
1640 static const struct i915_debugfs_files
{
1642 const struct file_operations
*fops
;
1643 } i915_debugfs_files
[] = {
1644 {"i915_perf_noa_delay", &i915_perf_noa_delay_fops
},
1645 {"i915_wedged", &i915_wedged_fops
},
1646 {"i915_cache_sharing", &i915_cache_sharing_fops
},
1647 {"i915_gem_drop_caches", &i915_drop_caches_fops
},
1648 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
1649 {"i915_error_state", &i915_error_state_fops
},
1650 {"i915_gpu_info", &i915_gpu_info_fops
},
1654 void i915_debugfs_register(struct drm_i915_private
*dev_priv
)
1656 struct drm_minor
*minor
= dev_priv
->drm
.primary
;
1659 i915_debugfs_params(dev_priv
);
1661 debugfs_create_file("i915_forcewake_user", S_IRUSR
, minor
->debugfs_root
,
1662 to_i915(minor
->dev
), &i915_forcewake_fops
);
1663 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
1664 debugfs_create_file(i915_debugfs_files
[i
].name
,
1666 minor
->debugfs_root
,
1667 to_i915(minor
->dev
),
1668 i915_debugfs_files
[i
].fops
);
1671 drm_debugfs_create_files(i915_debugfs_list
,
1672 I915_DEBUGFS_ENTRIES
,
1673 minor
->debugfs_root
, minor
);