2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
49 static const char *yesno(int v
)
51 return v
? "yes" : "no";
54 /* As the drm_debugfs_init() routines are called before dev->dev_private is
55 * allocated we need to hook into the minor for release. */
57 drm_add_fake_info_node(struct drm_minor
*minor
,
61 struct drm_info_node
*node
;
63 node
= kmalloc(sizeof(*node
), GFP_KERNEL
);
71 node
->info_ent
= (void *) key
;
73 mutex_lock(&minor
->debugfs_lock
);
74 list_add(&node
->list
, &minor
->debugfs_list
);
75 mutex_unlock(&minor
->debugfs_lock
);
80 static int i915_capabilities(struct seq_file
*m
, void *data
)
82 struct drm_info_node
*node
= m
->private;
83 struct drm_device
*dev
= node
->minor
->dev
;
84 const struct intel_device_info
*info
= INTEL_INFO(dev
);
86 seq_printf(m
, "gen: %d\n", info
->gen
);
87 seq_printf(m
, "pch: %d\n", INTEL_PCH_TYPE(dev
));
88 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
89 #define SEP_SEMICOLON ;
90 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
, SEP_SEMICOLON
);
97 static const char *get_pin_flag(struct drm_i915_gem_object
*obj
)
105 static const char *get_tiling_flag(struct drm_i915_gem_object
*obj
)
107 switch (obj
->tiling_mode
) {
109 case I915_TILING_NONE
: return " ";
110 case I915_TILING_X
: return "X";
111 case I915_TILING_Y
: return "Y";
115 static inline const char *get_global_flag(struct drm_i915_gem_object
*obj
)
117 return i915_gem_obj_to_ggtt(obj
) ? "g" : " ";
121 describe_obj(struct seq_file
*m
, struct drm_i915_gem_object
*obj
)
123 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
124 struct intel_engine_cs
*ring
;
125 struct i915_vma
*vma
;
129 seq_printf(m
, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
131 obj
->active
? "*" : " ",
133 get_tiling_flag(obj
),
134 get_global_flag(obj
),
135 obj
->base
.size
/ 1024,
136 obj
->base
.read_domains
,
137 obj
->base
.write_domain
);
138 for_each_ring(ring
, dev_priv
, i
)
140 i915_gem_request_get_seqno(obj
->last_read_req
[i
]));
141 seq_printf(m
, "] %x %x%s%s%s",
142 i915_gem_request_get_seqno(obj
->last_write_req
),
143 i915_gem_request_get_seqno(obj
->last_fenced_req
),
144 i915_cache_level_str(to_i915(obj
->base
.dev
), obj
->cache_level
),
145 obj
->dirty
? " dirty" : "",
146 obj
->madv
== I915_MADV_DONTNEED
? " purgeable" : "");
148 seq_printf(m
, " (name: %d)", obj
->base
.name
);
149 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
) {
150 if (vma
->pin_count
> 0)
153 seq_printf(m
, " (pinned x %d)", pin_count
);
154 if (obj
->pin_display
)
155 seq_printf(m
, " (display)");
156 if (obj
->fence_reg
!= I915_FENCE_REG_NONE
)
157 seq_printf(m
, " (fence: %d)", obj
->fence_reg
);
158 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
) {
159 if (!i915_is_ggtt(vma
->vm
))
163 seq_printf(m
, "gtt offset: %08llx, size: %08llx, type: %u)",
164 vma
->node
.start
, vma
->node
.size
,
165 vma
->ggtt_view
.type
);
168 seq_printf(m
, " (stolen: %08llx)", obj
->stolen
->start
);
169 if (obj
->pin_display
|| obj
->fault_mappable
) {
171 if (obj
->pin_display
)
173 if (obj
->fault_mappable
)
176 seq_printf(m
, " (%s mappable)", s
);
178 if (obj
->last_write_req
!= NULL
)
179 seq_printf(m
, " (%s)",
180 i915_gem_request_get_ring(obj
->last_write_req
)->name
);
181 if (obj
->frontbuffer_bits
)
182 seq_printf(m
, " (frontbuffer: 0x%03x)", obj
->frontbuffer_bits
);
185 static void describe_ctx(struct seq_file
*m
, struct intel_context
*ctx
)
187 seq_putc(m
, ctx
->legacy_hw_ctx
.initialized
? 'I' : 'i');
188 seq_putc(m
, ctx
->remap_slice
? 'R' : 'r');
192 static int i915_gem_object_list_info(struct seq_file
*m
, void *data
)
194 struct drm_info_node
*node
= m
->private;
195 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
196 struct list_head
*head
;
197 struct drm_device
*dev
= node
->minor
->dev
;
198 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
199 struct i915_address_space
*vm
= &dev_priv
->gtt
.base
;
200 struct i915_vma
*vma
;
201 size_t total_obj_size
, total_gtt_size
;
204 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
208 /* FIXME: the user of this interface might want more than just GGTT */
211 seq_puts(m
, "Active:\n");
212 head
= &vm
->active_list
;
215 seq_puts(m
, "Inactive:\n");
216 head
= &vm
->inactive_list
;
219 mutex_unlock(&dev
->struct_mutex
);
223 total_obj_size
= total_gtt_size
= count
= 0;
224 list_for_each_entry(vma
, head
, mm_list
) {
226 describe_obj(m
, vma
->obj
);
228 total_obj_size
+= vma
->obj
->base
.size
;
229 total_gtt_size
+= vma
->node
.size
;
232 mutex_unlock(&dev
->struct_mutex
);
234 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
235 count
, total_obj_size
, total_gtt_size
);
239 static int obj_rank_by_stolen(void *priv
,
240 struct list_head
*A
, struct list_head
*B
)
242 struct drm_i915_gem_object
*a
=
243 container_of(A
, struct drm_i915_gem_object
, obj_exec_link
);
244 struct drm_i915_gem_object
*b
=
245 container_of(B
, struct drm_i915_gem_object
, obj_exec_link
);
247 return a
->stolen
->start
- b
->stolen
->start
;
250 static int i915_gem_stolen_list_info(struct seq_file
*m
, void *data
)
252 struct drm_info_node
*node
= m
->private;
253 struct drm_device
*dev
= node
->minor
->dev
;
254 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
255 struct drm_i915_gem_object
*obj
;
256 size_t total_obj_size
, total_gtt_size
;
260 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
264 total_obj_size
= total_gtt_size
= count
= 0;
265 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
266 if (obj
->stolen
== NULL
)
269 list_add(&obj
->obj_exec_link
, &stolen
);
271 total_obj_size
+= obj
->base
.size
;
272 total_gtt_size
+= i915_gem_obj_ggtt_size(obj
);
275 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
276 if (obj
->stolen
== NULL
)
279 list_add(&obj
->obj_exec_link
, &stolen
);
281 total_obj_size
+= obj
->base
.size
;
284 list_sort(NULL
, &stolen
, obj_rank_by_stolen
);
285 seq_puts(m
, "Stolen:\n");
286 while (!list_empty(&stolen
)) {
287 obj
= list_first_entry(&stolen
, typeof(*obj
), obj_exec_link
);
289 describe_obj(m
, obj
);
291 list_del_init(&obj
->obj_exec_link
);
293 mutex_unlock(&dev
->struct_mutex
);
295 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
296 count
, total_obj_size
, total_gtt_size
);
300 #define count_objects(list, member) do { \
301 list_for_each_entry(obj, list, member) { \
302 size += i915_gem_obj_ggtt_size(obj); \
304 if (obj->map_and_fenceable) { \
305 mappable_size += i915_gem_obj_ggtt_size(obj); \
312 struct drm_i915_file_private
*file_priv
;
314 size_t total
, unbound
;
315 size_t global
, shared
;
316 size_t active
, inactive
;
319 static int per_file_stats(int id
, void *ptr
, void *data
)
321 struct drm_i915_gem_object
*obj
= ptr
;
322 struct file_stats
*stats
= data
;
323 struct i915_vma
*vma
;
326 stats
->total
+= obj
->base
.size
;
328 if (obj
->base
.name
|| obj
->base
.dma_buf
)
329 stats
->shared
+= obj
->base
.size
;
331 if (USES_FULL_PPGTT(obj
->base
.dev
)) {
332 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
) {
333 struct i915_hw_ppgtt
*ppgtt
;
335 if (!drm_mm_node_allocated(&vma
->node
))
338 if (i915_is_ggtt(vma
->vm
)) {
339 stats
->global
+= obj
->base
.size
;
343 ppgtt
= container_of(vma
->vm
, struct i915_hw_ppgtt
, base
);
344 if (ppgtt
->file_priv
!= stats
->file_priv
)
347 if (obj
->active
) /* XXX per-vma statistic */
348 stats
->active
+= obj
->base
.size
;
350 stats
->inactive
+= obj
->base
.size
;
355 if (i915_gem_obj_ggtt_bound(obj
)) {
356 stats
->global
+= obj
->base
.size
;
358 stats
->active
+= obj
->base
.size
;
360 stats
->inactive
+= obj
->base
.size
;
365 if (!list_empty(&obj
->global_list
))
366 stats
->unbound
+= obj
->base
.size
;
371 #define print_file_stats(m, name, stats) do { \
373 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \
384 static void print_batch_pool_stats(struct seq_file
*m
,
385 struct drm_i915_private
*dev_priv
)
387 struct drm_i915_gem_object
*obj
;
388 struct file_stats stats
;
389 struct intel_engine_cs
*ring
;
392 memset(&stats
, 0, sizeof(stats
));
394 for_each_ring(ring
, dev_priv
, i
) {
395 for (j
= 0; j
< ARRAY_SIZE(ring
->batch_pool
.cache_list
); j
++) {
396 list_for_each_entry(obj
,
397 &ring
->batch_pool
.cache_list
[j
],
399 per_file_stats(0, obj
, &stats
);
403 print_file_stats(m
, "[k]batch pool", stats
);
406 #define count_vmas(list, member) do { \
407 list_for_each_entry(vma, list, member) { \
408 size += i915_gem_obj_ggtt_size(vma->obj); \
410 if (vma->obj->map_and_fenceable) { \
411 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
417 static int i915_gem_object_info(struct seq_file
*m
, void* data
)
419 struct drm_info_node
*node
= m
->private;
420 struct drm_device
*dev
= node
->minor
->dev
;
421 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
422 u32 count
, mappable_count
, purgeable_count
;
423 size_t size
, mappable_size
, purgeable_size
;
424 struct drm_i915_gem_object
*obj
;
425 struct i915_address_space
*vm
= &dev_priv
->gtt
.base
;
426 struct drm_file
*file
;
427 struct i915_vma
*vma
;
430 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
434 seq_printf(m
, "%u objects, %zu bytes\n",
435 dev_priv
->mm
.object_count
,
436 dev_priv
->mm
.object_memory
);
438 size
= count
= mappable_size
= mappable_count
= 0;
439 count_objects(&dev_priv
->mm
.bound_list
, global_list
);
440 seq_printf(m
, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
441 count
, mappable_count
, size
, mappable_size
);
443 size
= count
= mappable_size
= mappable_count
= 0;
444 count_vmas(&vm
->active_list
, mm_list
);
445 seq_printf(m
, " %u [%u] active objects, %zu [%zu] bytes\n",
446 count
, mappable_count
, size
, mappable_size
);
448 size
= count
= mappable_size
= mappable_count
= 0;
449 count_vmas(&vm
->inactive_list
, mm_list
);
450 seq_printf(m
, " %u [%u] inactive objects, %zu [%zu] bytes\n",
451 count
, mappable_count
, size
, mappable_size
);
453 size
= count
= purgeable_size
= purgeable_count
= 0;
454 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
455 size
+= obj
->base
.size
, ++count
;
456 if (obj
->madv
== I915_MADV_DONTNEED
)
457 purgeable_size
+= obj
->base
.size
, ++purgeable_count
;
459 seq_printf(m
, "%u unbound objects, %zu bytes\n", count
, size
);
461 size
= count
= mappable_size
= mappable_count
= 0;
462 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
463 if (obj
->fault_mappable
) {
464 size
+= i915_gem_obj_ggtt_size(obj
);
467 if (obj
->pin_display
) {
468 mappable_size
+= i915_gem_obj_ggtt_size(obj
);
471 if (obj
->madv
== I915_MADV_DONTNEED
) {
472 purgeable_size
+= obj
->base
.size
;
476 seq_printf(m
, "%u purgeable objects, %zu bytes\n",
477 purgeable_count
, purgeable_size
);
478 seq_printf(m
, "%u pinned mappable objects, %zu bytes\n",
479 mappable_count
, mappable_size
);
480 seq_printf(m
, "%u fault mappable objects, %zu bytes\n",
483 seq_printf(m
, "%zu [%lu] gtt total\n",
484 dev_priv
->gtt
.base
.total
,
485 dev_priv
->gtt
.mappable_end
- dev_priv
->gtt
.base
.start
);
488 print_batch_pool_stats(m
, dev_priv
);
489 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
490 struct file_stats stats
;
491 struct task_struct
*task
;
493 memset(&stats
, 0, sizeof(stats
));
494 stats
.file_priv
= file
->driver_priv
;
495 spin_lock(&file
->table_lock
);
496 idr_for_each(&file
->object_idr
, per_file_stats
, &stats
);
497 spin_unlock(&file
->table_lock
);
499 * Although we have a valid reference on file->pid, that does
500 * not guarantee that the task_struct who called get_pid() is
501 * still alive (e.g. get_pid(current) => fork() => exit()).
502 * Therefore, we need to protect this ->comm access using RCU.
505 task
= pid_task(file
->pid
, PIDTYPE_PID
);
506 print_file_stats(m
, task
? task
->comm
: "<unknown>", stats
);
510 mutex_unlock(&dev
->struct_mutex
);
515 static int i915_gem_gtt_info(struct seq_file
*m
, void *data
)
517 struct drm_info_node
*node
= m
->private;
518 struct drm_device
*dev
= node
->minor
->dev
;
519 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
520 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
521 struct drm_i915_gem_object
*obj
;
522 size_t total_obj_size
, total_gtt_size
;
525 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
529 total_obj_size
= total_gtt_size
= count
= 0;
530 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
531 if (list
== PINNED_LIST
&& !i915_gem_obj_is_pinned(obj
))
535 describe_obj(m
, obj
);
537 total_obj_size
+= obj
->base
.size
;
538 total_gtt_size
+= i915_gem_obj_ggtt_size(obj
);
542 mutex_unlock(&dev
->struct_mutex
);
544 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
545 count
, total_obj_size
, total_gtt_size
);
550 static int i915_gem_pageflip_info(struct seq_file
*m
, void *data
)
552 struct drm_info_node
*node
= m
->private;
553 struct drm_device
*dev
= node
->minor
->dev
;
554 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
555 struct intel_crtc
*crtc
;
558 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
562 for_each_intel_crtc(dev
, crtc
) {
563 const char pipe
= pipe_name(crtc
->pipe
);
564 const char plane
= plane_name(crtc
->plane
);
565 struct intel_unpin_work
*work
;
567 spin_lock_irq(&dev
->event_lock
);
568 work
= crtc
->unpin_work
;
570 seq_printf(m
, "No flip due on pipe %c (plane %c)\n",
575 if (atomic_read(&work
->pending
) < INTEL_FLIP_COMPLETE
) {
576 seq_printf(m
, "Flip queued on pipe %c (plane %c)\n",
579 seq_printf(m
, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
582 if (work
->flip_queued_req
) {
583 struct intel_engine_cs
*ring
=
584 i915_gem_request_get_ring(work
->flip_queued_req
);
586 seq_printf(m
, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
588 i915_gem_request_get_seqno(work
->flip_queued_req
),
589 dev_priv
->next_seqno
,
590 ring
->get_seqno(ring
, true),
591 i915_gem_request_completed(work
->flip_queued_req
, true));
593 seq_printf(m
, "Flip not associated with any ring\n");
594 seq_printf(m
, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
595 work
->flip_queued_vblank
,
596 work
->flip_ready_vblank
,
597 drm_crtc_vblank_count(&crtc
->base
));
598 if (work
->enable_stall_check
)
599 seq_puts(m
, "Stall check enabled, ");
601 seq_puts(m
, "Stall check waiting for page flip ioctl, ");
602 seq_printf(m
, "%d prepares\n", atomic_read(&work
->pending
));
604 if (INTEL_INFO(dev
)->gen
>= 4)
605 addr
= I915_HI_DISPBASE(I915_READ(DSPSURF(crtc
->plane
)));
607 addr
= I915_READ(DSPADDR(crtc
->plane
));
608 seq_printf(m
, "Current scanout address 0x%08x\n", addr
);
610 if (work
->pending_flip_obj
) {
611 seq_printf(m
, "New framebuffer address 0x%08lx\n", (long)work
->gtt_offset
);
612 seq_printf(m
, "MMIO update completed? %d\n", addr
== work
->gtt_offset
);
615 spin_unlock_irq(&dev
->event_lock
);
618 mutex_unlock(&dev
->struct_mutex
);
623 static int i915_gem_batch_pool_info(struct seq_file
*m
, void *data
)
625 struct drm_info_node
*node
= m
->private;
626 struct drm_device
*dev
= node
->minor
->dev
;
627 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
628 struct drm_i915_gem_object
*obj
;
629 struct intel_engine_cs
*ring
;
633 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
637 for_each_ring(ring
, dev_priv
, i
) {
638 for (j
= 0; j
< ARRAY_SIZE(ring
->batch_pool
.cache_list
); j
++) {
642 list_for_each_entry(obj
,
643 &ring
->batch_pool
.cache_list
[j
],
646 seq_printf(m
, "%s cache[%d]: %d objects\n",
647 ring
->name
, j
, count
);
649 list_for_each_entry(obj
,
650 &ring
->batch_pool
.cache_list
[j
],
653 describe_obj(m
, obj
);
661 seq_printf(m
, "total: %d\n", total
);
663 mutex_unlock(&dev
->struct_mutex
);
668 static int i915_gem_request_info(struct seq_file
*m
, void *data
)
670 struct drm_info_node
*node
= m
->private;
671 struct drm_device
*dev
= node
->minor
->dev
;
672 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
673 struct intel_engine_cs
*ring
;
674 struct drm_i915_gem_request
*req
;
677 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
682 for_each_ring(ring
, dev_priv
, i
) {
686 list_for_each_entry(req
, &ring
->request_list
, list
)
691 seq_printf(m
, "%s requests: %d\n", ring
->name
, count
);
692 list_for_each_entry(req
, &ring
->request_list
, list
) {
693 struct task_struct
*task
;
698 task
= pid_task(req
->pid
, PIDTYPE_PID
);
699 seq_printf(m
, " %x @ %d: %s [%d]\n",
701 (int) (jiffies
- req
->emitted_jiffies
),
702 task
? task
->comm
: "<unknown>",
703 task
? task
->pid
: -1);
709 mutex_unlock(&dev
->struct_mutex
);
712 seq_puts(m
, "No requests\n");
717 static void i915_ring_seqno_info(struct seq_file
*m
,
718 struct intel_engine_cs
*ring
)
720 if (ring
->get_seqno
) {
721 seq_printf(m
, "Current sequence (%s): %x\n",
722 ring
->name
, ring
->get_seqno(ring
, false));
726 static int i915_gem_seqno_info(struct seq_file
*m
, void *data
)
728 struct drm_info_node
*node
= m
->private;
729 struct drm_device
*dev
= node
->minor
->dev
;
730 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
731 struct intel_engine_cs
*ring
;
734 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
737 intel_runtime_pm_get(dev_priv
);
739 for_each_ring(ring
, dev_priv
, i
)
740 i915_ring_seqno_info(m
, ring
);
742 intel_runtime_pm_put(dev_priv
);
743 mutex_unlock(&dev
->struct_mutex
);
749 static int i915_interrupt_info(struct seq_file
*m
, void *data
)
751 struct drm_info_node
*node
= m
->private;
752 struct drm_device
*dev
= node
->minor
->dev
;
753 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
754 struct intel_engine_cs
*ring
;
757 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
760 intel_runtime_pm_get(dev_priv
);
762 if (IS_CHERRYVIEW(dev
)) {
763 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
764 I915_READ(GEN8_MASTER_IRQ
));
766 seq_printf(m
, "Display IER:\t%08x\n",
768 seq_printf(m
, "Display IIR:\t%08x\n",
770 seq_printf(m
, "Display IIR_RW:\t%08x\n",
771 I915_READ(VLV_IIR_RW
));
772 seq_printf(m
, "Display IMR:\t%08x\n",
774 for_each_pipe(dev_priv
, pipe
)
775 seq_printf(m
, "Pipe %c stat:\t%08x\n",
777 I915_READ(PIPESTAT(pipe
)));
779 seq_printf(m
, "Port hotplug:\t%08x\n",
780 I915_READ(PORT_HOTPLUG_EN
));
781 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
782 I915_READ(VLV_DPFLIPSTAT
));
783 seq_printf(m
, "DPINVGTT:\t%08x\n",
784 I915_READ(DPINVGTT
));
786 for (i
= 0; i
< 4; i
++) {
787 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
788 i
, I915_READ(GEN8_GT_IMR(i
)));
789 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
790 i
, I915_READ(GEN8_GT_IIR(i
)));
791 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
792 i
, I915_READ(GEN8_GT_IER(i
)));
795 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
796 I915_READ(GEN8_PCU_IMR
));
797 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
798 I915_READ(GEN8_PCU_IIR
));
799 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
800 I915_READ(GEN8_PCU_IER
));
801 } else if (INTEL_INFO(dev
)->gen
>= 8) {
802 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
803 I915_READ(GEN8_MASTER_IRQ
));
805 for (i
= 0; i
< 4; i
++) {
806 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
807 i
, I915_READ(GEN8_GT_IMR(i
)));
808 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
809 i
, I915_READ(GEN8_GT_IIR(i
)));
810 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
811 i
, I915_READ(GEN8_GT_IER(i
)));
814 for_each_pipe(dev_priv
, pipe
) {
815 if (!intel_display_power_is_enabled(dev_priv
,
816 POWER_DOMAIN_PIPE(pipe
))) {
817 seq_printf(m
, "Pipe %c power disabled\n",
821 seq_printf(m
, "Pipe %c IMR:\t%08x\n",
823 I915_READ(GEN8_DE_PIPE_IMR(pipe
)));
824 seq_printf(m
, "Pipe %c IIR:\t%08x\n",
826 I915_READ(GEN8_DE_PIPE_IIR(pipe
)));
827 seq_printf(m
, "Pipe %c IER:\t%08x\n",
829 I915_READ(GEN8_DE_PIPE_IER(pipe
)));
832 seq_printf(m
, "Display Engine port interrupt mask:\t%08x\n",
833 I915_READ(GEN8_DE_PORT_IMR
));
834 seq_printf(m
, "Display Engine port interrupt identity:\t%08x\n",
835 I915_READ(GEN8_DE_PORT_IIR
));
836 seq_printf(m
, "Display Engine port interrupt enable:\t%08x\n",
837 I915_READ(GEN8_DE_PORT_IER
));
839 seq_printf(m
, "Display Engine misc interrupt mask:\t%08x\n",
840 I915_READ(GEN8_DE_MISC_IMR
));
841 seq_printf(m
, "Display Engine misc interrupt identity:\t%08x\n",
842 I915_READ(GEN8_DE_MISC_IIR
));
843 seq_printf(m
, "Display Engine misc interrupt enable:\t%08x\n",
844 I915_READ(GEN8_DE_MISC_IER
));
846 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
847 I915_READ(GEN8_PCU_IMR
));
848 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
849 I915_READ(GEN8_PCU_IIR
));
850 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
851 I915_READ(GEN8_PCU_IER
));
852 } else if (IS_VALLEYVIEW(dev
)) {
853 seq_printf(m
, "Display IER:\t%08x\n",
855 seq_printf(m
, "Display IIR:\t%08x\n",
857 seq_printf(m
, "Display IIR_RW:\t%08x\n",
858 I915_READ(VLV_IIR_RW
));
859 seq_printf(m
, "Display IMR:\t%08x\n",
861 for_each_pipe(dev_priv
, pipe
)
862 seq_printf(m
, "Pipe %c stat:\t%08x\n",
864 I915_READ(PIPESTAT(pipe
)));
866 seq_printf(m
, "Master IER:\t%08x\n",
867 I915_READ(VLV_MASTER_IER
));
869 seq_printf(m
, "Render IER:\t%08x\n",
871 seq_printf(m
, "Render IIR:\t%08x\n",
873 seq_printf(m
, "Render IMR:\t%08x\n",
876 seq_printf(m
, "PM IER:\t\t%08x\n",
877 I915_READ(GEN6_PMIER
));
878 seq_printf(m
, "PM IIR:\t\t%08x\n",
879 I915_READ(GEN6_PMIIR
));
880 seq_printf(m
, "PM IMR:\t\t%08x\n",
881 I915_READ(GEN6_PMIMR
));
883 seq_printf(m
, "Port hotplug:\t%08x\n",
884 I915_READ(PORT_HOTPLUG_EN
));
885 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
886 I915_READ(VLV_DPFLIPSTAT
));
887 seq_printf(m
, "DPINVGTT:\t%08x\n",
888 I915_READ(DPINVGTT
));
890 } else if (!HAS_PCH_SPLIT(dev
)) {
891 seq_printf(m
, "Interrupt enable: %08x\n",
893 seq_printf(m
, "Interrupt identity: %08x\n",
895 seq_printf(m
, "Interrupt mask: %08x\n",
897 for_each_pipe(dev_priv
, pipe
)
898 seq_printf(m
, "Pipe %c stat: %08x\n",
900 I915_READ(PIPESTAT(pipe
)));
902 seq_printf(m
, "North Display Interrupt enable: %08x\n",
904 seq_printf(m
, "North Display Interrupt identity: %08x\n",
906 seq_printf(m
, "North Display Interrupt mask: %08x\n",
908 seq_printf(m
, "South Display Interrupt enable: %08x\n",
910 seq_printf(m
, "South Display Interrupt identity: %08x\n",
912 seq_printf(m
, "South Display Interrupt mask: %08x\n",
914 seq_printf(m
, "Graphics Interrupt enable: %08x\n",
916 seq_printf(m
, "Graphics Interrupt identity: %08x\n",
918 seq_printf(m
, "Graphics Interrupt mask: %08x\n",
921 for_each_ring(ring
, dev_priv
, i
) {
922 if (INTEL_INFO(dev
)->gen
>= 6) {
924 "Graphics Interrupt mask (%s): %08x\n",
925 ring
->name
, I915_READ_IMR(ring
));
927 i915_ring_seqno_info(m
, ring
);
929 intel_runtime_pm_put(dev_priv
);
930 mutex_unlock(&dev
->struct_mutex
);
935 static int i915_gem_fence_regs_info(struct seq_file
*m
, void *data
)
937 struct drm_info_node
*node
= m
->private;
938 struct drm_device
*dev
= node
->minor
->dev
;
939 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
942 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
946 seq_printf(m
, "Reserved fences = %d\n", dev_priv
->fence_reg_start
);
947 seq_printf(m
, "Total fences = %d\n", dev_priv
->num_fence_regs
);
948 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++) {
949 struct drm_i915_gem_object
*obj
= dev_priv
->fence_regs
[i
].obj
;
951 seq_printf(m
, "Fence %d, pin count = %d, object = ",
952 i
, dev_priv
->fence_regs
[i
].pin_count
);
954 seq_puts(m
, "unused");
956 describe_obj(m
, obj
);
960 mutex_unlock(&dev
->struct_mutex
);
964 static int i915_hws_info(struct seq_file
*m
, void *data
)
966 struct drm_info_node
*node
= m
->private;
967 struct drm_device
*dev
= node
->minor
->dev
;
968 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
969 struct intel_engine_cs
*ring
;
973 ring
= &dev_priv
->ring
[(uintptr_t)node
->info_ent
->data
];
974 hws
= ring
->status_page
.page_addr
;
978 for (i
= 0; i
< 4096 / sizeof(u32
) / 4; i
+= 4) {
979 seq_printf(m
, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
981 hws
[i
], hws
[i
+ 1], hws
[i
+ 2], hws
[i
+ 3]);
987 i915_error_state_write(struct file
*filp
,
988 const char __user
*ubuf
,
992 struct i915_error_state_file_priv
*error_priv
= filp
->private_data
;
993 struct drm_device
*dev
= error_priv
->dev
;
996 DRM_DEBUG_DRIVER("Resetting error state\n");
998 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1002 i915_destroy_error_state(dev
);
1003 mutex_unlock(&dev
->struct_mutex
);
1008 static int i915_error_state_open(struct inode
*inode
, struct file
*file
)
1010 struct drm_device
*dev
= inode
->i_private
;
1011 struct i915_error_state_file_priv
*error_priv
;
1013 error_priv
= kzalloc(sizeof(*error_priv
), GFP_KERNEL
);
1017 error_priv
->dev
= dev
;
1019 i915_error_state_get(dev
, error_priv
);
1021 file
->private_data
= error_priv
;
1026 static int i915_error_state_release(struct inode
*inode
, struct file
*file
)
1028 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
1030 i915_error_state_put(error_priv
);
1036 static ssize_t
i915_error_state_read(struct file
*file
, char __user
*userbuf
,
1037 size_t count
, loff_t
*pos
)
1039 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
1040 struct drm_i915_error_state_buf error_str
;
1042 ssize_t ret_count
= 0;
1045 ret
= i915_error_state_buf_init(&error_str
, to_i915(error_priv
->dev
), count
, *pos
);
1049 ret
= i915_error_state_to_str(&error_str
, error_priv
);
1053 ret_count
= simple_read_from_buffer(userbuf
, count
, &tmp_pos
,
1060 *pos
= error_str
.start
+ ret_count
;
1062 i915_error_state_buf_release(&error_str
);
1063 return ret
?: ret_count
;
1066 static const struct file_operations i915_error_state_fops
= {
1067 .owner
= THIS_MODULE
,
1068 .open
= i915_error_state_open
,
1069 .read
= i915_error_state_read
,
1070 .write
= i915_error_state_write
,
1071 .llseek
= default_llseek
,
1072 .release
= i915_error_state_release
,
1076 i915_next_seqno_get(void *data
, u64
*val
)
1078 struct drm_device
*dev
= data
;
1079 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1082 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1086 *val
= dev_priv
->next_seqno
;
1087 mutex_unlock(&dev
->struct_mutex
);
1093 i915_next_seqno_set(void *data
, u64 val
)
1095 struct drm_device
*dev
= data
;
1098 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1102 ret
= i915_gem_set_seqno(dev
, val
);
1103 mutex_unlock(&dev
->struct_mutex
);
1108 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops
,
1109 i915_next_seqno_get
, i915_next_seqno_set
,
1112 static int i915_frequency_info(struct seq_file
*m
, void *unused
)
1114 struct drm_info_node
*node
= m
->private;
1115 struct drm_device
*dev
= node
->minor
->dev
;
1116 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1119 intel_runtime_pm_get(dev_priv
);
1121 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
1124 u16 rgvswctl
= I915_READ16(MEMSWCTL
);
1125 u16 rgvstat
= I915_READ16(MEMSTAT_ILK
);
1127 seq_printf(m
, "Requested P-state: %d\n", (rgvswctl
>> 8) & 0xf);
1128 seq_printf(m
, "Requested VID: %d\n", rgvswctl
& 0x3f);
1129 seq_printf(m
, "Current VID: %d\n", (rgvstat
& MEMSTAT_VID_MASK
) >>
1131 seq_printf(m
, "Current P-state: %d\n",
1132 (rgvstat
& MEMSTAT_PSTATE_MASK
) >> MEMSTAT_PSTATE_SHIFT
);
1133 } else if (IS_GEN6(dev
) || (IS_GEN7(dev
) && !IS_VALLEYVIEW(dev
)) ||
1134 IS_BROADWELL(dev
) || IS_GEN9(dev
)) {
1135 u32 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
1136 u32 rp_state_limits
= I915_READ(GEN6_RP_STATE_LIMITS
);
1137 u32 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
1138 u32 rpmodectl
, rpinclimit
, rpdeclimit
;
1139 u32 rpstat
, cagf
, reqf
;
1140 u32 rpupei
, rpcurup
, rpprevup
;
1141 u32 rpdownei
, rpcurdown
, rpprevdown
;
1142 u32 pm_ier
, pm_imr
, pm_isr
, pm_iir
, pm_mask
;
1145 /* RPSTAT1 is in the GT power well */
1146 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1150 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
1152 reqf
= I915_READ(GEN6_RPNSWREQ
);
1156 reqf
&= ~GEN6_TURBO_DISABLE
;
1157 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
1162 reqf
= intel_gpu_freq(dev_priv
, reqf
);
1164 rpmodectl
= I915_READ(GEN6_RP_CONTROL
);
1165 rpinclimit
= I915_READ(GEN6_RP_UP_THRESHOLD
);
1166 rpdeclimit
= I915_READ(GEN6_RP_DOWN_THRESHOLD
);
1168 rpstat
= I915_READ(GEN6_RPSTAT1
);
1169 rpupei
= I915_READ(GEN6_RP_CUR_UP_EI
);
1170 rpcurup
= I915_READ(GEN6_RP_CUR_UP
);
1171 rpprevup
= I915_READ(GEN6_RP_PREV_UP
);
1172 rpdownei
= I915_READ(GEN6_RP_CUR_DOWN_EI
);
1173 rpcurdown
= I915_READ(GEN6_RP_CUR_DOWN
);
1174 rpprevdown
= I915_READ(GEN6_RP_PREV_DOWN
);
1176 cagf
= (rpstat
& GEN9_CAGF_MASK
) >> GEN9_CAGF_SHIFT
;
1177 else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
1178 cagf
= (rpstat
& HSW_CAGF_MASK
) >> HSW_CAGF_SHIFT
;
1180 cagf
= (rpstat
& GEN6_CAGF_MASK
) >> GEN6_CAGF_SHIFT
;
1181 cagf
= intel_gpu_freq(dev_priv
, cagf
);
1183 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
1184 mutex_unlock(&dev
->struct_mutex
);
1186 if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
1187 pm_ier
= I915_READ(GEN6_PMIER
);
1188 pm_imr
= I915_READ(GEN6_PMIMR
);
1189 pm_isr
= I915_READ(GEN6_PMISR
);
1190 pm_iir
= I915_READ(GEN6_PMIIR
);
1191 pm_mask
= I915_READ(GEN6_PMINTRMSK
);
1193 pm_ier
= I915_READ(GEN8_GT_IER(2));
1194 pm_imr
= I915_READ(GEN8_GT_IMR(2));
1195 pm_isr
= I915_READ(GEN8_GT_ISR(2));
1196 pm_iir
= I915_READ(GEN8_GT_IIR(2));
1197 pm_mask
= I915_READ(GEN6_PMINTRMSK
);
1199 seq_printf(m
, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1200 pm_ier
, pm_imr
, pm_isr
, pm_iir
, pm_mask
);
1201 seq_printf(m
, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status
);
1202 seq_printf(m
, "Render p-state ratio: %d\n",
1203 (gt_perf_status
& (IS_GEN9(dev
) ? 0x1ff00 : 0xff00)) >> 8);
1204 seq_printf(m
, "Render p-state VID: %d\n",
1205 gt_perf_status
& 0xff);
1206 seq_printf(m
, "Render p-state limit: %d\n",
1207 rp_state_limits
& 0xff);
1208 seq_printf(m
, "RPSTAT1: 0x%08x\n", rpstat
);
1209 seq_printf(m
, "RPMODECTL: 0x%08x\n", rpmodectl
);
1210 seq_printf(m
, "RPINCLIMIT: 0x%08x\n", rpinclimit
);
1211 seq_printf(m
, "RPDECLIMIT: 0x%08x\n", rpdeclimit
);
1212 seq_printf(m
, "RPNSWREQ: %dMHz\n", reqf
);
1213 seq_printf(m
, "CAGF: %dMHz\n", cagf
);
1214 seq_printf(m
, "RP CUR UP EI: %dus\n", rpupei
&
1215 GEN6_CURICONT_MASK
);
1216 seq_printf(m
, "RP CUR UP: %dus\n", rpcurup
&
1217 GEN6_CURBSYTAVG_MASK
);
1218 seq_printf(m
, "RP PREV UP: %dus\n", rpprevup
&
1219 GEN6_CURBSYTAVG_MASK
);
1220 seq_printf(m
, "Up threshold: %d%%\n",
1221 dev_priv
->rps
.up_threshold
);
1223 seq_printf(m
, "RP CUR DOWN EI: %dus\n", rpdownei
&
1225 seq_printf(m
, "RP CUR DOWN: %dus\n", rpcurdown
&
1226 GEN6_CURBSYTAVG_MASK
);
1227 seq_printf(m
, "RP PREV DOWN: %dus\n", rpprevdown
&
1228 GEN6_CURBSYTAVG_MASK
);
1229 seq_printf(m
, "Down threshold: %d%%\n",
1230 dev_priv
->rps
.down_threshold
);
1232 max_freq
= (rp_state_cap
& 0xff0000) >> 16;
1233 max_freq
*= (IS_SKYLAKE(dev
) ? GEN9_FREQ_SCALER
: 1);
1234 seq_printf(m
, "Lowest (RPN) frequency: %dMHz\n",
1235 intel_gpu_freq(dev_priv
, max_freq
));
1237 max_freq
= (rp_state_cap
& 0xff00) >> 8;
1238 max_freq
*= (IS_SKYLAKE(dev
) ? GEN9_FREQ_SCALER
: 1);
1239 seq_printf(m
, "Nominal (RP1) frequency: %dMHz\n",
1240 intel_gpu_freq(dev_priv
, max_freq
));
1242 max_freq
= rp_state_cap
& 0xff;
1243 max_freq
*= (IS_SKYLAKE(dev
) ? GEN9_FREQ_SCALER
: 1);
1244 seq_printf(m
, "Max non-overclocked (RP0) frequency: %dMHz\n",
1245 intel_gpu_freq(dev_priv
, max_freq
));
1246 seq_printf(m
, "Max overclocked frequency: %dMHz\n",
1247 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
));
1249 seq_printf(m
, "Current freq: %d MHz\n",
1250 intel_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
));
1251 seq_printf(m
, "Actual freq: %d MHz\n", cagf
);
1252 seq_printf(m
, "Idle freq: %d MHz\n",
1253 intel_gpu_freq(dev_priv
, dev_priv
->rps
.idle_freq
));
1254 seq_printf(m
, "Min freq: %d MHz\n",
1255 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
));
1256 seq_printf(m
, "Max freq: %d MHz\n",
1257 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
));
1259 "efficient (RPe) frequency: %d MHz\n",
1260 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
));
1261 } else if (IS_VALLEYVIEW(dev
)) {
1264 mutex_lock(&dev_priv
->rps
.hw_lock
);
1265 freq_sts
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
1266 seq_printf(m
, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts
);
1267 seq_printf(m
, "DDR freq: %d MHz\n", dev_priv
->mem_freq
);
1269 seq_printf(m
, "actual GPU freq: %d MHz\n",
1270 intel_gpu_freq(dev_priv
, (freq_sts
>> 8) & 0xff));
1272 seq_printf(m
, "current GPU freq: %d MHz\n",
1273 intel_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
));
1275 seq_printf(m
, "max GPU freq: %d MHz\n",
1276 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
));
1278 seq_printf(m
, "min GPU freq: %d MHz\n",
1279 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
));
1281 seq_printf(m
, "idle GPU freq: %d MHz\n",
1282 intel_gpu_freq(dev_priv
, dev_priv
->rps
.idle_freq
));
1285 "efficient (RPe) frequency: %d MHz\n",
1286 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
));
1287 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1289 seq_puts(m
, "no P-state info available\n");
1293 intel_runtime_pm_put(dev_priv
);
1297 static int i915_hangcheck_info(struct seq_file
*m
, void *unused
)
1299 struct drm_info_node
*node
= m
->private;
1300 struct drm_device
*dev
= node
->minor
->dev
;
1301 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1302 struct intel_engine_cs
*ring
;
1303 u64 acthd
[I915_NUM_RINGS
];
1304 u32 seqno
[I915_NUM_RINGS
];
1307 if (!i915
.enable_hangcheck
) {
1308 seq_printf(m
, "Hangcheck disabled\n");
1312 intel_runtime_pm_get(dev_priv
);
1314 for_each_ring(ring
, dev_priv
, i
) {
1315 seqno
[i
] = ring
->get_seqno(ring
, false);
1316 acthd
[i
] = intel_ring_get_active_head(ring
);
1319 intel_runtime_pm_put(dev_priv
);
1321 if (delayed_work_pending(&dev_priv
->gpu_error
.hangcheck_work
)) {
1322 seq_printf(m
, "Hangcheck active, fires in %dms\n",
1323 jiffies_to_msecs(dev_priv
->gpu_error
.hangcheck_work
.timer
.expires
-
1326 seq_printf(m
, "Hangcheck inactive\n");
1328 for_each_ring(ring
, dev_priv
, i
) {
1329 seq_printf(m
, "%s:\n", ring
->name
);
1330 seq_printf(m
, "\tseqno = %x [current %x]\n",
1331 ring
->hangcheck
.seqno
, seqno
[i
]);
1332 seq_printf(m
, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1333 (long long)ring
->hangcheck
.acthd
,
1334 (long long)acthd
[i
]);
1335 seq_printf(m
, "\tmax ACTHD = 0x%08llx\n",
1336 (long long)ring
->hangcheck
.max_acthd
);
1337 seq_printf(m
, "\tscore = %d\n", ring
->hangcheck
.score
);
1338 seq_printf(m
, "\taction = %d\n", ring
->hangcheck
.action
);
1344 static int ironlake_drpc_info(struct seq_file
*m
)
1346 struct drm_info_node
*node
= m
->private;
1347 struct drm_device
*dev
= node
->minor
->dev
;
1348 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1349 u32 rgvmodectl
, rstdbyctl
;
1353 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1356 intel_runtime_pm_get(dev_priv
);
1358 rgvmodectl
= I915_READ(MEMMODECTL
);
1359 rstdbyctl
= I915_READ(RSTDBYCTL
);
1360 crstandvid
= I915_READ16(CRSTANDVID
);
1362 intel_runtime_pm_put(dev_priv
);
1363 mutex_unlock(&dev
->struct_mutex
);
1365 seq_printf(m
, "HD boost: %s\n", (rgvmodectl
& MEMMODE_BOOST_EN
) ?
1367 seq_printf(m
, "Boost freq: %d\n",
1368 (rgvmodectl
& MEMMODE_BOOST_FREQ_MASK
) >>
1369 MEMMODE_BOOST_FREQ_SHIFT
);
1370 seq_printf(m
, "HW control enabled: %s\n",
1371 rgvmodectl
& MEMMODE_HWIDLE_EN
? "yes" : "no");
1372 seq_printf(m
, "SW control enabled: %s\n",
1373 rgvmodectl
& MEMMODE_SWMODE_EN
? "yes" : "no");
1374 seq_printf(m
, "Gated voltage change: %s\n",
1375 rgvmodectl
& MEMMODE_RCLK_GATE
? "yes" : "no");
1376 seq_printf(m
, "Starting frequency: P%d\n",
1377 (rgvmodectl
& MEMMODE_FSTART_MASK
) >> MEMMODE_FSTART_SHIFT
);
1378 seq_printf(m
, "Max P-state: P%d\n",
1379 (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
);
1380 seq_printf(m
, "Min P-state: P%d\n", (rgvmodectl
& MEMMODE_FMIN_MASK
));
1381 seq_printf(m
, "RS1 VID: %d\n", (crstandvid
& 0x3f));
1382 seq_printf(m
, "RS2 VID: %d\n", ((crstandvid
>> 8) & 0x3f));
1383 seq_printf(m
, "Render standby enabled: %s\n",
1384 (rstdbyctl
& RCX_SW_EXIT
) ? "no" : "yes");
1385 seq_puts(m
, "Current RS state: ");
1386 switch (rstdbyctl
& RSX_STATUS_MASK
) {
1388 seq_puts(m
, "on\n");
1390 case RSX_STATUS_RC1
:
1391 seq_puts(m
, "RC1\n");
1393 case RSX_STATUS_RC1E
:
1394 seq_puts(m
, "RC1E\n");
1396 case RSX_STATUS_RS1
:
1397 seq_puts(m
, "RS1\n");
1399 case RSX_STATUS_RS2
:
1400 seq_puts(m
, "RS2 (RC6)\n");
1402 case RSX_STATUS_RS3
:
1403 seq_puts(m
, "RC3 (RC6+)\n");
1406 seq_puts(m
, "unknown\n");
1413 static int i915_forcewake_domains(struct seq_file
*m
, void *data
)
1415 struct drm_info_node
*node
= m
->private;
1416 struct drm_device
*dev
= node
->minor
->dev
;
1417 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1418 struct intel_uncore_forcewake_domain
*fw_domain
;
1421 spin_lock_irq(&dev_priv
->uncore
.lock
);
1422 for_each_fw_domain(fw_domain
, dev_priv
, i
) {
1423 seq_printf(m
, "%s.wake_count = %u\n",
1424 intel_uncore_forcewake_domain_to_str(i
),
1425 fw_domain
->wake_count
);
1427 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1432 static int vlv_drpc_info(struct seq_file
*m
)
1434 struct drm_info_node
*node
= m
->private;
1435 struct drm_device
*dev
= node
->minor
->dev
;
1436 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1437 u32 rpmodectl1
, rcctl1
, pw_status
;
1439 intel_runtime_pm_get(dev_priv
);
1441 pw_status
= I915_READ(VLV_GTLC_PW_STATUS
);
1442 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1443 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1445 intel_runtime_pm_put(dev_priv
);
1447 seq_printf(m
, "Video Turbo Mode: %s\n",
1448 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1449 seq_printf(m
, "Turbo enabled: %s\n",
1450 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1451 seq_printf(m
, "HW control enabled: %s\n",
1452 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1453 seq_printf(m
, "SW control enabled: %s\n",
1454 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1455 GEN6_RP_MEDIA_SW_MODE
));
1456 seq_printf(m
, "RC6 Enabled: %s\n",
1457 yesno(rcctl1
& (GEN7_RC_CTL_TO_MODE
|
1458 GEN6_RC_CTL_EI_MODE(1))));
1459 seq_printf(m
, "Render Power Well: %s\n",
1460 (pw_status
& VLV_GTLC_PW_RENDER_STATUS_MASK
) ? "Up" : "Down");
1461 seq_printf(m
, "Media Power Well: %s\n",
1462 (pw_status
& VLV_GTLC_PW_MEDIA_STATUS_MASK
) ? "Up" : "Down");
1464 seq_printf(m
, "Render RC6 residency since boot: %u\n",
1465 I915_READ(VLV_GT_RENDER_RC6
));
1466 seq_printf(m
, "Media RC6 residency since boot: %u\n",
1467 I915_READ(VLV_GT_MEDIA_RC6
));
1469 return i915_forcewake_domains(m
, NULL
);
1472 static int gen6_drpc_info(struct seq_file
*m
)
1474 struct drm_info_node
*node
= m
->private;
1475 struct drm_device
*dev
= node
->minor
->dev
;
1476 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1477 u32 rpmodectl1
, gt_core_status
, rcctl1
, rc6vids
= 0;
1478 unsigned forcewake_count
;
1481 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1484 intel_runtime_pm_get(dev_priv
);
1486 spin_lock_irq(&dev_priv
->uncore
.lock
);
1487 forcewake_count
= dev_priv
->uncore
.fw_domain
[FW_DOMAIN_ID_RENDER
].wake_count
;
1488 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1490 if (forcewake_count
) {
1491 seq_puts(m
, "RC information inaccurate because somebody "
1492 "holds a forcewake reference \n");
1494 /* NB: we cannot use forcewake, else we read the wrong values */
1495 while (count
++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK
) & 1))
1497 seq_printf(m
, "RC information accurate: %s\n", yesno(count
< 51));
1500 gt_core_status
= readl(dev_priv
->regs
+ GEN6_GT_CORE_STATUS
);
1501 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS
, gt_core_status
, 4, true);
1503 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1504 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1505 mutex_unlock(&dev
->struct_mutex
);
1506 mutex_lock(&dev_priv
->rps
.hw_lock
);
1507 sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
1508 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1510 intel_runtime_pm_put(dev_priv
);
1512 seq_printf(m
, "Video Turbo Mode: %s\n",
1513 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1514 seq_printf(m
, "HW control enabled: %s\n",
1515 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1516 seq_printf(m
, "SW control enabled: %s\n",
1517 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1518 GEN6_RP_MEDIA_SW_MODE
));
1519 seq_printf(m
, "RC1e Enabled: %s\n",
1520 yesno(rcctl1
& GEN6_RC_CTL_RC1e_ENABLE
));
1521 seq_printf(m
, "RC6 Enabled: %s\n",
1522 yesno(rcctl1
& GEN6_RC_CTL_RC6_ENABLE
));
1523 seq_printf(m
, "Deep RC6 Enabled: %s\n",
1524 yesno(rcctl1
& GEN6_RC_CTL_RC6p_ENABLE
));
1525 seq_printf(m
, "Deepest RC6 Enabled: %s\n",
1526 yesno(rcctl1
& GEN6_RC_CTL_RC6pp_ENABLE
));
1527 seq_puts(m
, "Current RC state: ");
1528 switch (gt_core_status
& GEN6_RCn_MASK
) {
1530 if (gt_core_status
& GEN6_CORE_CPD_STATE_MASK
)
1531 seq_puts(m
, "Core Power Down\n");
1533 seq_puts(m
, "on\n");
1536 seq_puts(m
, "RC3\n");
1539 seq_puts(m
, "RC6\n");
1542 seq_puts(m
, "RC7\n");
1545 seq_puts(m
, "Unknown\n");
1549 seq_printf(m
, "Core Power Down: %s\n",
1550 yesno(gt_core_status
& GEN6_CORE_CPD_STATE_MASK
));
1552 /* Not exactly sure what this is */
1553 seq_printf(m
, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1554 I915_READ(GEN6_GT_GFX_RC6_LOCKED
));
1555 seq_printf(m
, "RC6 residency since boot: %u\n",
1556 I915_READ(GEN6_GT_GFX_RC6
));
1557 seq_printf(m
, "RC6+ residency since boot: %u\n",
1558 I915_READ(GEN6_GT_GFX_RC6p
));
1559 seq_printf(m
, "RC6++ residency since boot: %u\n",
1560 I915_READ(GEN6_GT_GFX_RC6pp
));
1562 seq_printf(m
, "RC6 voltage: %dmV\n",
1563 GEN6_DECODE_RC6_VID(((rc6vids
>> 0) & 0xff)));
1564 seq_printf(m
, "RC6+ voltage: %dmV\n",
1565 GEN6_DECODE_RC6_VID(((rc6vids
>> 8) & 0xff)));
1566 seq_printf(m
, "RC6++ voltage: %dmV\n",
1567 GEN6_DECODE_RC6_VID(((rc6vids
>> 16) & 0xff)));
1571 static int i915_drpc_info(struct seq_file
*m
, void *unused
)
1573 struct drm_info_node
*node
= m
->private;
1574 struct drm_device
*dev
= node
->minor
->dev
;
1576 if (IS_VALLEYVIEW(dev
))
1577 return vlv_drpc_info(m
);
1578 else if (INTEL_INFO(dev
)->gen
>= 6)
1579 return gen6_drpc_info(m
);
1581 return ironlake_drpc_info(m
);
1584 static int i915_fbc_status(struct seq_file
*m
, void *unused
)
1586 struct drm_info_node
*node
= m
->private;
1587 struct drm_device
*dev
= node
->minor
->dev
;
1588 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1590 if (!HAS_FBC(dev
)) {
1591 seq_puts(m
, "FBC unsupported on this chipset\n");
1595 intel_runtime_pm_get(dev_priv
);
1597 if (intel_fbc_enabled(dev
)) {
1598 seq_puts(m
, "FBC enabled\n");
1600 seq_puts(m
, "FBC disabled: ");
1601 switch (dev_priv
->fbc
.no_fbc_reason
) {
1603 seq_puts(m
, "FBC actived, but currently disabled in hardware");
1605 case FBC_UNSUPPORTED
:
1606 seq_puts(m
, "unsupported by this chipset");
1609 seq_puts(m
, "no outputs");
1611 case FBC_STOLEN_TOO_SMALL
:
1612 seq_puts(m
, "not enough stolen memory");
1614 case FBC_UNSUPPORTED_MODE
:
1615 seq_puts(m
, "mode not supported");
1617 case FBC_MODE_TOO_LARGE
:
1618 seq_puts(m
, "mode too large");
1621 seq_puts(m
, "FBC unsupported on plane");
1624 seq_puts(m
, "scanout buffer not tiled");
1626 case FBC_MULTIPLE_PIPES
:
1627 seq_puts(m
, "multiple pipes are enabled");
1629 case FBC_MODULE_PARAM
:
1630 seq_puts(m
, "disabled per module param (default off)");
1632 case FBC_CHIP_DEFAULT
:
1633 seq_puts(m
, "disabled per chip default");
1636 seq_puts(m
, "unknown reason");
1641 intel_runtime_pm_put(dev_priv
);
1646 static int i915_fbc_fc_get(void *data
, u64
*val
)
1648 struct drm_device
*dev
= data
;
1649 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1651 if (INTEL_INFO(dev
)->gen
< 7 || !HAS_FBC(dev
))
1654 drm_modeset_lock_all(dev
);
1655 *val
= dev_priv
->fbc
.false_color
;
1656 drm_modeset_unlock_all(dev
);
1661 static int i915_fbc_fc_set(void *data
, u64 val
)
1663 struct drm_device
*dev
= data
;
1664 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1667 if (INTEL_INFO(dev
)->gen
< 7 || !HAS_FBC(dev
))
1670 drm_modeset_lock_all(dev
);
1672 reg
= I915_READ(ILK_DPFC_CONTROL
);
1673 dev_priv
->fbc
.false_color
= val
;
1675 I915_WRITE(ILK_DPFC_CONTROL
, val
?
1676 (reg
| FBC_CTL_FALSE_COLOR
) :
1677 (reg
& ~FBC_CTL_FALSE_COLOR
));
1679 drm_modeset_unlock_all(dev
);
1683 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops
,
1684 i915_fbc_fc_get
, i915_fbc_fc_set
,
1687 static int i915_ips_status(struct seq_file
*m
, void *unused
)
1689 struct drm_info_node
*node
= m
->private;
1690 struct drm_device
*dev
= node
->minor
->dev
;
1691 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1693 if (!HAS_IPS(dev
)) {
1694 seq_puts(m
, "not supported\n");
1698 intel_runtime_pm_get(dev_priv
);
1700 seq_printf(m
, "Enabled by kernel parameter: %s\n",
1701 yesno(i915
.enable_ips
));
1703 if (INTEL_INFO(dev
)->gen
>= 8) {
1704 seq_puts(m
, "Currently: unknown\n");
1706 if (I915_READ(IPS_CTL
) & IPS_ENABLE
)
1707 seq_puts(m
, "Currently: enabled\n");
1709 seq_puts(m
, "Currently: disabled\n");
1712 intel_runtime_pm_put(dev_priv
);
1717 static int i915_sr_status(struct seq_file
*m
, void *unused
)
1719 struct drm_info_node
*node
= m
->private;
1720 struct drm_device
*dev
= node
->minor
->dev
;
1721 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1722 bool sr_enabled
= false;
1724 intel_runtime_pm_get(dev_priv
);
1726 if (HAS_PCH_SPLIT(dev
))
1727 sr_enabled
= I915_READ(WM1_LP_ILK
) & WM1_LP_SR_EN
;
1728 else if (IS_CRESTLINE(dev
) || IS_G4X(dev
) ||
1729 IS_I945G(dev
) || IS_I945GM(dev
))
1730 sr_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
1731 else if (IS_I915GM(dev
))
1732 sr_enabled
= I915_READ(INSTPM
) & INSTPM_SELF_EN
;
1733 else if (IS_PINEVIEW(dev
))
1734 sr_enabled
= I915_READ(DSPFW3
) & PINEVIEW_SELF_REFRESH_EN
;
1735 else if (IS_VALLEYVIEW(dev
))
1736 sr_enabled
= I915_READ(FW_BLC_SELF_VLV
) & FW_CSPWRDWNEN
;
1738 intel_runtime_pm_put(dev_priv
);
1740 seq_printf(m
, "self-refresh: %s\n",
1741 sr_enabled
? "enabled" : "disabled");
1746 static int i915_emon_status(struct seq_file
*m
, void *unused
)
1748 struct drm_info_node
*node
= m
->private;
1749 struct drm_device
*dev
= node
->minor
->dev
;
1750 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1751 unsigned long temp
, chipset
, gfx
;
1757 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1761 temp
= i915_mch_val(dev_priv
);
1762 chipset
= i915_chipset_val(dev_priv
);
1763 gfx
= i915_gfx_val(dev_priv
);
1764 mutex_unlock(&dev
->struct_mutex
);
1766 seq_printf(m
, "GMCH temp: %ld\n", temp
);
1767 seq_printf(m
, "Chipset power: %ld\n", chipset
);
1768 seq_printf(m
, "GFX power: %ld\n", gfx
);
1769 seq_printf(m
, "Total power: %ld\n", chipset
+ gfx
);
1774 static int i915_ring_freq_table(struct seq_file
*m
, void *unused
)
1776 struct drm_info_node
*node
= m
->private;
1777 struct drm_device
*dev
= node
->minor
->dev
;
1778 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1780 int gpu_freq
, ia_freq
;
1782 if (!(IS_GEN6(dev
) || IS_GEN7(dev
))) {
1783 seq_puts(m
, "unsupported on this chipset\n");
1787 intel_runtime_pm_get(dev_priv
);
1789 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
1791 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1795 seq_puts(m
, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1797 for (gpu_freq
= dev_priv
->rps
.min_freq_softlimit
;
1798 gpu_freq
<= dev_priv
->rps
.max_freq_softlimit
;
1801 sandybridge_pcode_read(dev_priv
,
1802 GEN6_PCODE_READ_MIN_FREQ_TABLE
,
1804 seq_printf(m
, "%d\t\t%d\t\t\t\t%d\n",
1805 intel_gpu_freq(dev_priv
, gpu_freq
),
1806 ((ia_freq
>> 0) & 0xff) * 100,
1807 ((ia_freq
>> 8) & 0xff) * 100);
1810 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1813 intel_runtime_pm_put(dev_priv
);
1817 static int i915_opregion(struct seq_file
*m
, void *unused
)
1819 struct drm_info_node
*node
= m
->private;
1820 struct drm_device
*dev
= node
->minor
->dev
;
1821 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1822 struct intel_opregion
*opregion
= &dev_priv
->opregion
;
1823 void *data
= kmalloc(OPREGION_SIZE
, GFP_KERNEL
);
1829 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1833 if (opregion
->header
) {
1834 memcpy_fromio(data
, opregion
->header
, OPREGION_SIZE
);
1835 seq_write(m
, data
, OPREGION_SIZE
);
1838 mutex_unlock(&dev
->struct_mutex
);
1845 static int i915_gem_framebuffer_info(struct seq_file
*m
, void *data
)
1847 struct drm_info_node
*node
= m
->private;
1848 struct drm_device
*dev
= node
->minor
->dev
;
1849 struct intel_fbdev
*ifbdev
= NULL
;
1850 struct intel_framebuffer
*fb
;
1852 #ifdef CONFIG_DRM_I915_FBDEV
1853 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1855 ifbdev
= dev_priv
->fbdev
;
1856 fb
= to_intel_framebuffer(ifbdev
->helper
.fb
);
1858 seq_printf(m
, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1862 fb
->base
.bits_per_pixel
,
1863 fb
->base
.modifier
[0],
1864 atomic_read(&fb
->base
.refcount
.refcount
));
1865 describe_obj(m
, fb
->obj
);
1869 mutex_lock(&dev
->mode_config
.fb_lock
);
1870 list_for_each_entry(fb
, &dev
->mode_config
.fb_list
, base
.head
) {
1871 if (ifbdev
&& &fb
->base
== ifbdev
->helper
.fb
)
1874 seq_printf(m
, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1878 fb
->base
.bits_per_pixel
,
1879 fb
->base
.modifier
[0],
1880 atomic_read(&fb
->base
.refcount
.refcount
));
1881 describe_obj(m
, fb
->obj
);
1884 mutex_unlock(&dev
->mode_config
.fb_lock
);
1889 static void describe_ctx_ringbuf(struct seq_file
*m
,
1890 struct intel_ringbuffer
*ringbuf
)
1892 seq_printf(m
, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
1893 ringbuf
->space
, ringbuf
->head
, ringbuf
->tail
,
1894 ringbuf
->last_retired_head
);
1897 static int i915_context_status(struct seq_file
*m
, void *unused
)
1899 struct drm_info_node
*node
= m
->private;
1900 struct drm_device
*dev
= node
->minor
->dev
;
1901 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1902 struct intel_engine_cs
*ring
;
1903 struct intel_context
*ctx
;
1906 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1910 list_for_each_entry(ctx
, &dev_priv
->context_list
, link
) {
1911 if (!i915
.enable_execlists
&&
1912 ctx
->legacy_hw_ctx
.rcs_state
== NULL
)
1915 seq_puts(m
, "HW context ");
1916 describe_ctx(m
, ctx
);
1917 for_each_ring(ring
, dev_priv
, i
) {
1918 if (ring
->default_context
== ctx
)
1919 seq_printf(m
, "(default context %s) ",
1923 if (i915
.enable_execlists
) {
1925 for_each_ring(ring
, dev_priv
, i
) {
1926 struct drm_i915_gem_object
*ctx_obj
=
1927 ctx
->engine
[i
].state
;
1928 struct intel_ringbuffer
*ringbuf
=
1929 ctx
->engine
[i
].ringbuf
;
1931 seq_printf(m
, "%s: ", ring
->name
);
1933 describe_obj(m
, ctx_obj
);
1935 describe_ctx_ringbuf(m
, ringbuf
);
1939 describe_obj(m
, ctx
->legacy_hw_ctx
.rcs_state
);
1945 mutex_unlock(&dev
->struct_mutex
);
1950 static void i915_dump_lrc_obj(struct seq_file
*m
,
1951 struct intel_engine_cs
*ring
,
1952 struct drm_i915_gem_object
*ctx_obj
)
1955 uint32_t *reg_state
;
1957 unsigned long ggtt_offset
= 0;
1959 if (ctx_obj
== NULL
) {
1960 seq_printf(m
, "Context on %s with no gem object\n",
1965 seq_printf(m
, "CONTEXT: %s %u\n", ring
->name
,
1966 intel_execlists_ctx_id(ctx_obj
));
1968 if (!i915_gem_obj_ggtt_bound(ctx_obj
))
1969 seq_puts(m
, "\tNot bound in GGTT\n");
1971 ggtt_offset
= i915_gem_obj_ggtt_offset(ctx_obj
);
1973 if (i915_gem_object_get_pages(ctx_obj
)) {
1974 seq_puts(m
, "\tFailed to get pages for context object\n");
1978 page
= i915_gem_object_get_page(ctx_obj
, 1);
1979 if (!WARN_ON(page
== NULL
)) {
1980 reg_state
= kmap_atomic(page
);
1982 for (j
= 0; j
< 0x600 / sizeof(u32
) / 4; j
+= 4) {
1983 seq_printf(m
, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
1984 ggtt_offset
+ 4096 + (j
* 4),
1985 reg_state
[j
], reg_state
[j
+ 1],
1986 reg_state
[j
+ 2], reg_state
[j
+ 3]);
1988 kunmap_atomic(reg_state
);
1994 static int i915_dump_lrc(struct seq_file
*m
, void *unused
)
1996 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1997 struct drm_device
*dev
= node
->minor
->dev
;
1998 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1999 struct intel_engine_cs
*ring
;
2000 struct intel_context
*ctx
;
2003 if (!i915
.enable_execlists
) {
2004 seq_printf(m
, "Logical Ring Contexts are disabled\n");
2008 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2012 list_for_each_entry(ctx
, &dev_priv
->context_list
, link
) {
2013 for_each_ring(ring
, dev_priv
, i
) {
2014 if (ring
->default_context
!= ctx
)
2015 i915_dump_lrc_obj(m
, ring
,
2016 ctx
->engine
[i
].state
);
2020 mutex_unlock(&dev
->struct_mutex
);
2025 static int i915_execlists(struct seq_file
*m
, void *data
)
2027 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
2028 struct drm_device
*dev
= node
->minor
->dev
;
2029 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2030 struct intel_engine_cs
*ring
;
2036 struct list_head
*cursor
;
2040 if (!i915
.enable_execlists
) {
2041 seq_puts(m
, "Logical Ring Contexts are disabled\n");
2045 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2049 intel_runtime_pm_get(dev_priv
);
2051 for_each_ring(ring
, dev_priv
, ring_id
) {
2052 struct drm_i915_gem_request
*head_req
= NULL
;
2054 unsigned long flags
;
2056 seq_printf(m
, "%s\n", ring
->name
);
2058 status
= I915_READ(RING_EXECLIST_STATUS(ring
));
2059 ctx_id
= I915_READ(RING_EXECLIST_STATUS(ring
) + 4);
2060 seq_printf(m
, "\tExeclist status: 0x%08X, context: %u\n",
2063 status_pointer
= I915_READ(RING_CONTEXT_STATUS_PTR(ring
));
2064 seq_printf(m
, "\tStatus pointer: 0x%08X\n", status_pointer
);
2066 read_pointer
= ring
->next_context_status_buffer
;
2067 write_pointer
= status_pointer
& 0x07;
2068 if (read_pointer
> write_pointer
)
2070 seq_printf(m
, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
2071 read_pointer
, write_pointer
);
2073 for (i
= 0; i
< 6; i
++) {
2074 status
= I915_READ(RING_CONTEXT_STATUS_BUF(ring
) + 8*i
);
2075 ctx_id
= I915_READ(RING_CONTEXT_STATUS_BUF(ring
) + 8*i
+ 4);
2077 seq_printf(m
, "\tStatus buffer %d: 0x%08X, context: %u\n",
2081 spin_lock_irqsave(&ring
->execlist_lock
, flags
);
2082 list_for_each(cursor
, &ring
->execlist_queue
)
2084 head_req
= list_first_entry_or_null(&ring
->execlist_queue
,
2085 struct drm_i915_gem_request
, execlist_link
);
2086 spin_unlock_irqrestore(&ring
->execlist_lock
, flags
);
2088 seq_printf(m
, "\t%d requests in queue\n", count
);
2090 struct drm_i915_gem_object
*ctx_obj
;
2092 ctx_obj
= head_req
->ctx
->engine
[ring_id
].state
;
2093 seq_printf(m
, "\tHead request id: %u\n",
2094 intel_execlists_ctx_id(ctx_obj
));
2095 seq_printf(m
, "\tHead request tail: %u\n",
2102 intel_runtime_pm_put(dev_priv
);
2103 mutex_unlock(&dev
->struct_mutex
);
2108 static const char *swizzle_string(unsigned swizzle
)
2111 case I915_BIT_6_SWIZZLE_NONE
:
2113 case I915_BIT_6_SWIZZLE_9
:
2115 case I915_BIT_6_SWIZZLE_9_10
:
2116 return "bit9/bit10";
2117 case I915_BIT_6_SWIZZLE_9_11
:
2118 return "bit9/bit11";
2119 case I915_BIT_6_SWIZZLE_9_10_11
:
2120 return "bit9/bit10/bit11";
2121 case I915_BIT_6_SWIZZLE_9_17
:
2122 return "bit9/bit17";
2123 case I915_BIT_6_SWIZZLE_9_10_17
:
2124 return "bit9/bit10/bit17";
2125 case I915_BIT_6_SWIZZLE_UNKNOWN
:
2132 static int i915_swizzle_info(struct seq_file
*m
, void *data
)
2134 struct drm_info_node
*node
= m
->private;
2135 struct drm_device
*dev
= node
->minor
->dev
;
2136 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2139 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2142 intel_runtime_pm_get(dev_priv
);
2144 seq_printf(m
, "bit6 swizzle for X-tiling = %s\n",
2145 swizzle_string(dev_priv
->mm
.bit_6_swizzle_x
));
2146 seq_printf(m
, "bit6 swizzle for Y-tiling = %s\n",
2147 swizzle_string(dev_priv
->mm
.bit_6_swizzle_y
));
2149 if (IS_GEN3(dev
) || IS_GEN4(dev
)) {
2150 seq_printf(m
, "DDC = 0x%08x\n",
2152 seq_printf(m
, "DDC2 = 0x%08x\n",
2154 seq_printf(m
, "C0DRB3 = 0x%04x\n",
2155 I915_READ16(C0DRB3
));
2156 seq_printf(m
, "C1DRB3 = 0x%04x\n",
2157 I915_READ16(C1DRB3
));
2158 } else if (INTEL_INFO(dev
)->gen
>= 6) {
2159 seq_printf(m
, "MAD_DIMM_C0 = 0x%08x\n",
2160 I915_READ(MAD_DIMM_C0
));
2161 seq_printf(m
, "MAD_DIMM_C1 = 0x%08x\n",
2162 I915_READ(MAD_DIMM_C1
));
2163 seq_printf(m
, "MAD_DIMM_C2 = 0x%08x\n",
2164 I915_READ(MAD_DIMM_C2
));
2165 seq_printf(m
, "TILECTL = 0x%08x\n",
2166 I915_READ(TILECTL
));
2167 if (INTEL_INFO(dev
)->gen
>= 8)
2168 seq_printf(m
, "GAMTARBMODE = 0x%08x\n",
2169 I915_READ(GAMTARBMODE
));
2171 seq_printf(m
, "ARB_MODE = 0x%08x\n",
2172 I915_READ(ARB_MODE
));
2173 seq_printf(m
, "DISP_ARB_CTL = 0x%08x\n",
2174 I915_READ(DISP_ARB_CTL
));
2177 if (dev_priv
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
)
2178 seq_puts(m
, "L-shaped memory detected\n");
2180 intel_runtime_pm_put(dev_priv
);
2181 mutex_unlock(&dev
->struct_mutex
);
2186 static int per_file_ctx(int id
, void *ptr
, void *data
)
2188 struct intel_context
*ctx
= ptr
;
2189 struct seq_file
*m
= data
;
2190 struct i915_hw_ppgtt
*ppgtt
= ctx
->ppgtt
;
2193 seq_printf(m
, " no ppgtt for context %d\n",
2198 if (i915_gem_context_is_default(ctx
))
2199 seq_puts(m
, " default context:\n");
2201 seq_printf(m
, " context %d:\n", ctx
->user_handle
);
2202 ppgtt
->debug_dump(ppgtt
, m
);
2207 static void gen8_ppgtt_info(struct seq_file
*m
, struct drm_device
*dev
)
2209 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2210 struct intel_engine_cs
*ring
;
2211 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
2217 for_each_ring(ring
, dev_priv
, unused
) {
2218 seq_printf(m
, "%s\n", ring
->name
);
2219 for (i
= 0; i
< 4; i
++) {
2220 u32 offset
= 0x270 + i
* 8;
2221 u64 pdp
= I915_READ(ring
->mmio_base
+ offset
+ 4);
2223 pdp
|= I915_READ(ring
->mmio_base
+ offset
);
2224 seq_printf(m
, "\tPDP%d 0x%016llx\n", i
, pdp
);
2229 static void gen6_ppgtt_info(struct seq_file
*m
, struct drm_device
*dev
)
2231 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2232 struct intel_engine_cs
*ring
;
2233 struct drm_file
*file
;
2236 if (INTEL_INFO(dev
)->gen
== 6)
2237 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE
));
2239 for_each_ring(ring
, dev_priv
, i
) {
2240 seq_printf(m
, "%s\n", ring
->name
);
2241 if (INTEL_INFO(dev
)->gen
== 7)
2242 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring
)));
2243 seq_printf(m
, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring
)));
2244 seq_printf(m
, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring
)));
2245 seq_printf(m
, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring
)));
2247 if (dev_priv
->mm
.aliasing_ppgtt
) {
2248 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
2250 seq_puts(m
, "aliasing PPGTT:\n");
2251 seq_printf(m
, "pd gtt offset: 0x%08x\n", ppgtt
->pd
.pd_offset
);
2253 ppgtt
->debug_dump(ppgtt
, m
);
2256 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
2257 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2259 seq_printf(m
, "proc: %s\n",
2260 get_pid_task(file
->pid
, PIDTYPE_PID
)->comm
);
2261 idr_for_each(&file_priv
->context_idr
, per_file_ctx
, m
);
2263 seq_printf(m
, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK
));
2266 static int i915_ppgtt_info(struct seq_file
*m
, void *data
)
2268 struct drm_info_node
*node
= m
->private;
2269 struct drm_device
*dev
= node
->minor
->dev
;
2270 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2272 int ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2275 intel_runtime_pm_get(dev_priv
);
2277 if (INTEL_INFO(dev
)->gen
>= 8)
2278 gen8_ppgtt_info(m
, dev
);
2279 else if (INTEL_INFO(dev
)->gen
>= 6)
2280 gen6_ppgtt_info(m
, dev
);
2282 intel_runtime_pm_put(dev_priv
);
2283 mutex_unlock(&dev
->struct_mutex
);
2288 static int count_irq_waiters(struct drm_i915_private
*i915
)
2290 struct intel_engine_cs
*ring
;
2294 for_each_ring(ring
, i915
, i
)
2295 count
+= ring
->irq_refcount
;
2300 static int i915_rps_boost_info(struct seq_file
*m
, void *data
)
2302 struct drm_info_node
*node
= m
->private;
2303 struct drm_device
*dev
= node
->minor
->dev
;
2304 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2305 struct drm_file
*file
;
2307 seq_printf(m
, "RPS enabled? %d\n", dev_priv
->rps
.enabled
);
2308 seq_printf(m
, "GPU busy? %d\n", dev_priv
->mm
.busy
);
2309 seq_printf(m
, "CPU waiting? %d\n", count_irq_waiters(dev_priv
));
2310 seq_printf(m
, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2311 intel_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
),
2312 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
2313 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq_softlimit
),
2314 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq_softlimit
),
2315 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
));
2316 spin_lock(&dev_priv
->rps
.client_lock
);
2317 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
2318 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2319 struct task_struct
*task
;
2322 task
= pid_task(file
->pid
, PIDTYPE_PID
);
2323 seq_printf(m
, "%s [%d]: %d boosts%s\n",
2324 task
? task
->comm
: "<unknown>",
2325 task
? task
->pid
: -1,
2326 file_priv
->rps
.boosts
,
2327 list_empty(&file_priv
->rps
.link
) ? "" : ", active");
2330 seq_printf(m
, "Semaphore boosts: %d%s\n",
2331 dev_priv
->rps
.semaphores
.boosts
,
2332 list_empty(&dev_priv
->rps
.semaphores
.link
) ? "" : ", active");
2333 seq_printf(m
, "MMIO flip boosts: %d%s\n",
2334 dev_priv
->rps
.mmioflips
.boosts
,
2335 list_empty(&dev_priv
->rps
.mmioflips
.link
) ? "" : ", active");
2336 seq_printf(m
, "Kernel boosts: %d\n", dev_priv
->rps
.boosts
);
2337 spin_unlock(&dev_priv
->rps
.client_lock
);
2342 static int i915_llc(struct seq_file
*m
, void *data
)
2344 struct drm_info_node
*node
= m
->private;
2345 struct drm_device
*dev
= node
->minor
->dev
;
2346 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2348 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
2349 seq_printf(m
, "LLC: %s\n", yesno(HAS_LLC(dev
)));
2350 seq_printf(m
, "eLLC: %zuMB\n", dev_priv
->ellc_size
);
2355 static int i915_edp_psr_status(struct seq_file
*m
, void *data
)
2357 struct drm_info_node
*node
= m
->private;
2358 struct drm_device
*dev
= node
->minor
->dev
;
2359 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2363 bool enabled
= false;
2365 if (!HAS_PSR(dev
)) {
2366 seq_puts(m
, "PSR not supported\n");
2370 intel_runtime_pm_get(dev_priv
);
2372 mutex_lock(&dev_priv
->psr
.lock
);
2373 seq_printf(m
, "Sink_Support: %s\n", yesno(dev_priv
->psr
.sink_support
));
2374 seq_printf(m
, "Source_OK: %s\n", yesno(dev_priv
->psr
.source_ok
));
2375 seq_printf(m
, "Enabled: %s\n", yesno((bool)dev_priv
->psr
.enabled
));
2376 seq_printf(m
, "Active: %s\n", yesno(dev_priv
->psr
.active
));
2377 seq_printf(m
, "Busy frontbuffer bits: 0x%03x\n",
2378 dev_priv
->psr
.busy_frontbuffer_bits
);
2379 seq_printf(m
, "Re-enable work scheduled: %s\n",
2380 yesno(work_busy(&dev_priv
->psr
.work
.work
)));
2383 enabled
= I915_READ(EDP_PSR_CTL(dev
)) & EDP_PSR_ENABLE
;
2385 for_each_pipe(dev_priv
, pipe
) {
2386 stat
[pipe
] = I915_READ(VLV_PSRSTAT(pipe
)) &
2387 VLV_EDP_PSR_CURR_STATE_MASK
;
2388 if ((stat
[pipe
] == VLV_EDP_PSR_ACTIVE_NORFB_UP
) ||
2389 (stat
[pipe
] == VLV_EDP_PSR_ACTIVE_SF_UPDATE
))
2393 seq_printf(m
, "HW Enabled & Active bit: %s", yesno(enabled
));
2396 for_each_pipe(dev_priv
, pipe
) {
2397 if ((stat
[pipe
] == VLV_EDP_PSR_ACTIVE_NORFB_UP
) ||
2398 (stat
[pipe
] == VLV_EDP_PSR_ACTIVE_SF_UPDATE
))
2399 seq_printf(m
, " pipe %c", pipe_name(pipe
));
2403 /* CHV PSR has no kind of performance counter */
2405 psrperf
= I915_READ(EDP_PSR_PERF_CNT(dev
)) &
2406 EDP_PSR_PERF_CNT_MASK
;
2408 seq_printf(m
, "Performance_Counter: %u\n", psrperf
);
2410 mutex_unlock(&dev_priv
->psr
.lock
);
2412 intel_runtime_pm_put(dev_priv
);
2416 static int i915_sink_crc(struct seq_file
*m
, void *data
)
2418 struct drm_info_node
*node
= m
->private;
2419 struct drm_device
*dev
= node
->minor
->dev
;
2420 struct intel_encoder
*encoder
;
2421 struct intel_connector
*connector
;
2422 struct intel_dp
*intel_dp
= NULL
;
2426 drm_modeset_lock_all(dev
);
2427 for_each_intel_connector(dev
, connector
) {
2429 if (connector
->base
.dpms
!= DRM_MODE_DPMS_ON
)
2432 if (!connector
->base
.encoder
)
2435 encoder
= to_intel_encoder(connector
->base
.encoder
);
2436 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
2439 intel_dp
= enc_to_intel_dp(&encoder
->base
);
2441 ret
= intel_dp_sink_crc(intel_dp
, crc
);
2445 seq_printf(m
, "%02x%02x%02x%02x%02x%02x\n",
2446 crc
[0], crc
[1], crc
[2],
2447 crc
[3], crc
[4], crc
[5]);
2452 drm_modeset_unlock_all(dev
);
2456 static int i915_energy_uJ(struct seq_file
*m
, void *data
)
2458 struct drm_info_node
*node
= m
->private;
2459 struct drm_device
*dev
= node
->minor
->dev
;
2460 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2464 if (INTEL_INFO(dev
)->gen
< 6)
2467 intel_runtime_pm_get(dev_priv
);
2469 rdmsrl(MSR_RAPL_POWER_UNIT
, power
);
2470 power
= (power
& 0x1f00) >> 8;
2471 units
= 1000000 / (1 << power
); /* convert to uJ */
2472 power
= I915_READ(MCH_SECP_NRG_STTS
);
2475 intel_runtime_pm_put(dev_priv
);
2477 seq_printf(m
, "%llu", (long long unsigned)power
);
2482 static int i915_pc8_status(struct seq_file
*m
, void *unused
)
2484 struct drm_info_node
*node
= m
->private;
2485 struct drm_device
*dev
= node
->minor
->dev
;
2486 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2488 if (!IS_HASWELL(dev
) && !IS_BROADWELL(dev
)) {
2489 seq_puts(m
, "not supported\n");
2493 seq_printf(m
, "GPU idle: %s\n", yesno(!dev_priv
->mm
.busy
));
2494 seq_printf(m
, "IRQs disabled: %s\n",
2495 yesno(!intel_irqs_enabled(dev_priv
)));
2500 static const char *power_domain_str(enum intel_display_power_domain domain
)
2503 case POWER_DOMAIN_PIPE_A
:
2505 case POWER_DOMAIN_PIPE_B
:
2507 case POWER_DOMAIN_PIPE_C
:
2509 case POWER_DOMAIN_PIPE_A_PANEL_FITTER
:
2510 return "PIPE_A_PANEL_FITTER";
2511 case POWER_DOMAIN_PIPE_B_PANEL_FITTER
:
2512 return "PIPE_B_PANEL_FITTER";
2513 case POWER_DOMAIN_PIPE_C_PANEL_FITTER
:
2514 return "PIPE_C_PANEL_FITTER";
2515 case POWER_DOMAIN_TRANSCODER_A
:
2516 return "TRANSCODER_A";
2517 case POWER_DOMAIN_TRANSCODER_B
:
2518 return "TRANSCODER_B";
2519 case POWER_DOMAIN_TRANSCODER_C
:
2520 return "TRANSCODER_C";
2521 case POWER_DOMAIN_TRANSCODER_EDP
:
2522 return "TRANSCODER_EDP";
2523 case POWER_DOMAIN_PORT_DDI_A_2_LANES
:
2524 return "PORT_DDI_A_2_LANES";
2525 case POWER_DOMAIN_PORT_DDI_A_4_LANES
:
2526 return "PORT_DDI_A_4_LANES";
2527 case POWER_DOMAIN_PORT_DDI_B_2_LANES
:
2528 return "PORT_DDI_B_2_LANES";
2529 case POWER_DOMAIN_PORT_DDI_B_4_LANES
:
2530 return "PORT_DDI_B_4_LANES";
2531 case POWER_DOMAIN_PORT_DDI_C_2_LANES
:
2532 return "PORT_DDI_C_2_LANES";
2533 case POWER_DOMAIN_PORT_DDI_C_4_LANES
:
2534 return "PORT_DDI_C_4_LANES";
2535 case POWER_DOMAIN_PORT_DDI_D_2_LANES
:
2536 return "PORT_DDI_D_2_LANES";
2537 case POWER_DOMAIN_PORT_DDI_D_4_LANES
:
2538 return "PORT_DDI_D_4_LANES";
2539 case POWER_DOMAIN_PORT_DSI
:
2541 case POWER_DOMAIN_PORT_CRT
:
2543 case POWER_DOMAIN_PORT_OTHER
:
2544 return "PORT_OTHER";
2545 case POWER_DOMAIN_VGA
:
2547 case POWER_DOMAIN_AUDIO
:
2549 case POWER_DOMAIN_PLLS
:
2551 case POWER_DOMAIN_AUX_A
:
2553 case POWER_DOMAIN_AUX_B
:
2555 case POWER_DOMAIN_AUX_C
:
2557 case POWER_DOMAIN_AUX_D
:
2559 case POWER_DOMAIN_INIT
:
2562 MISSING_CASE(domain
);
2567 static int i915_power_domain_info(struct seq_file
*m
, void *unused
)
2569 struct drm_info_node
*node
= m
->private;
2570 struct drm_device
*dev
= node
->minor
->dev
;
2571 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2572 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
2575 mutex_lock(&power_domains
->lock
);
2577 seq_printf(m
, "%-25s %s\n", "Power well/domain", "Use count");
2578 for (i
= 0; i
< power_domains
->power_well_count
; i
++) {
2579 struct i915_power_well
*power_well
;
2580 enum intel_display_power_domain power_domain
;
2582 power_well
= &power_domains
->power_wells
[i
];
2583 seq_printf(m
, "%-25s %d\n", power_well
->name
,
2586 for (power_domain
= 0; power_domain
< POWER_DOMAIN_NUM
;
2588 if (!(BIT(power_domain
) & power_well
->domains
))
2591 seq_printf(m
, " %-23s %d\n",
2592 power_domain_str(power_domain
),
2593 power_domains
->domain_use_count
[power_domain
]);
2597 mutex_unlock(&power_domains
->lock
);
2602 static void intel_seq_print_mode(struct seq_file
*m
, int tabs
,
2603 struct drm_display_mode
*mode
)
2607 for (i
= 0; i
< tabs
; i
++)
2610 seq_printf(m
, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2611 mode
->base
.id
, mode
->name
,
2612 mode
->vrefresh
, mode
->clock
,
2613 mode
->hdisplay
, mode
->hsync_start
,
2614 mode
->hsync_end
, mode
->htotal
,
2615 mode
->vdisplay
, mode
->vsync_start
,
2616 mode
->vsync_end
, mode
->vtotal
,
2617 mode
->type
, mode
->flags
);
2620 static void intel_encoder_info(struct seq_file
*m
,
2621 struct intel_crtc
*intel_crtc
,
2622 struct intel_encoder
*intel_encoder
)
2624 struct drm_info_node
*node
= m
->private;
2625 struct drm_device
*dev
= node
->minor
->dev
;
2626 struct drm_crtc
*crtc
= &intel_crtc
->base
;
2627 struct intel_connector
*intel_connector
;
2628 struct drm_encoder
*encoder
;
2630 encoder
= &intel_encoder
->base
;
2631 seq_printf(m
, "\tencoder %d: type: %s, connectors:\n",
2632 encoder
->base
.id
, encoder
->name
);
2633 for_each_connector_on_encoder(dev
, encoder
, intel_connector
) {
2634 struct drm_connector
*connector
= &intel_connector
->base
;
2635 seq_printf(m
, "\t\tconnector %d: type: %s, status: %s",
2638 drm_get_connector_status_name(connector
->status
));
2639 if (connector
->status
== connector_status_connected
) {
2640 struct drm_display_mode
*mode
= &crtc
->mode
;
2641 seq_printf(m
, ", mode:\n");
2642 intel_seq_print_mode(m
, 2, mode
);
2649 static void intel_crtc_info(struct seq_file
*m
, struct intel_crtc
*intel_crtc
)
2651 struct drm_info_node
*node
= m
->private;
2652 struct drm_device
*dev
= node
->minor
->dev
;
2653 struct drm_crtc
*crtc
= &intel_crtc
->base
;
2654 struct intel_encoder
*intel_encoder
;
2656 if (crtc
->primary
->fb
)
2657 seq_printf(m
, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2658 crtc
->primary
->fb
->base
.id
, crtc
->x
, crtc
->y
,
2659 crtc
->primary
->fb
->width
, crtc
->primary
->fb
->height
);
2661 seq_puts(m
, "\tprimary plane disabled\n");
2662 for_each_encoder_on_crtc(dev
, crtc
, intel_encoder
)
2663 intel_encoder_info(m
, intel_crtc
, intel_encoder
);
2666 static void intel_panel_info(struct seq_file
*m
, struct intel_panel
*panel
)
2668 struct drm_display_mode
*mode
= panel
->fixed_mode
;
2670 seq_printf(m
, "\tfixed mode:\n");
2671 intel_seq_print_mode(m
, 2, mode
);
2674 static void intel_dp_info(struct seq_file
*m
,
2675 struct intel_connector
*intel_connector
)
2677 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2678 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
2680 seq_printf(m
, "\tDPCD rev: %x\n", intel_dp
->dpcd
[DP_DPCD_REV
]);
2681 seq_printf(m
, "\taudio support: %s\n", intel_dp
->has_audio
? "yes" :
2683 if (intel_encoder
->type
== INTEL_OUTPUT_EDP
)
2684 intel_panel_info(m
, &intel_connector
->panel
);
2687 static void intel_hdmi_info(struct seq_file
*m
,
2688 struct intel_connector
*intel_connector
)
2690 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2691 struct intel_hdmi
*intel_hdmi
= enc_to_intel_hdmi(&intel_encoder
->base
);
2693 seq_printf(m
, "\taudio support: %s\n", intel_hdmi
->has_audio
? "yes" :
2697 static void intel_lvds_info(struct seq_file
*m
,
2698 struct intel_connector
*intel_connector
)
2700 intel_panel_info(m
, &intel_connector
->panel
);
2703 static void intel_connector_info(struct seq_file
*m
,
2704 struct drm_connector
*connector
)
2706 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
2707 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2708 struct drm_display_mode
*mode
;
2710 seq_printf(m
, "connector %d: type %s, status: %s\n",
2711 connector
->base
.id
, connector
->name
,
2712 drm_get_connector_status_name(connector
->status
));
2713 if (connector
->status
== connector_status_connected
) {
2714 seq_printf(m
, "\tname: %s\n", connector
->display_info
.name
);
2715 seq_printf(m
, "\tphysical dimensions: %dx%dmm\n",
2716 connector
->display_info
.width_mm
,
2717 connector
->display_info
.height_mm
);
2718 seq_printf(m
, "\tsubpixel order: %s\n",
2719 drm_get_subpixel_order_name(connector
->display_info
.subpixel_order
));
2720 seq_printf(m
, "\tCEA rev: %d\n",
2721 connector
->display_info
.cea_rev
);
2723 if (intel_encoder
) {
2724 if (intel_encoder
->type
== INTEL_OUTPUT_DISPLAYPORT
||
2725 intel_encoder
->type
== INTEL_OUTPUT_EDP
)
2726 intel_dp_info(m
, intel_connector
);
2727 else if (intel_encoder
->type
== INTEL_OUTPUT_HDMI
)
2728 intel_hdmi_info(m
, intel_connector
);
2729 else if (intel_encoder
->type
== INTEL_OUTPUT_LVDS
)
2730 intel_lvds_info(m
, intel_connector
);
2733 seq_printf(m
, "\tmodes:\n");
2734 list_for_each_entry(mode
, &connector
->modes
, head
)
2735 intel_seq_print_mode(m
, 2, mode
);
2738 static bool cursor_active(struct drm_device
*dev
, int pipe
)
2740 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2743 if (IS_845G(dev
) || IS_I865G(dev
))
2744 state
= I915_READ(_CURACNTR
) & CURSOR_ENABLE
;
2746 state
= I915_READ(CURCNTR(pipe
)) & CURSOR_MODE
;
2751 static bool cursor_position(struct drm_device
*dev
, int pipe
, int *x
, int *y
)
2753 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2756 pos
= I915_READ(CURPOS(pipe
));
2758 *x
= (pos
>> CURSOR_X_SHIFT
) & CURSOR_POS_MASK
;
2759 if (pos
& (CURSOR_POS_SIGN
<< CURSOR_X_SHIFT
))
2762 *y
= (pos
>> CURSOR_Y_SHIFT
) & CURSOR_POS_MASK
;
2763 if (pos
& (CURSOR_POS_SIGN
<< CURSOR_Y_SHIFT
))
2766 return cursor_active(dev
, pipe
);
2769 static int i915_display_info(struct seq_file
*m
, void *unused
)
2771 struct drm_info_node
*node
= m
->private;
2772 struct drm_device
*dev
= node
->minor
->dev
;
2773 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2774 struct intel_crtc
*crtc
;
2775 struct drm_connector
*connector
;
2777 intel_runtime_pm_get(dev_priv
);
2778 drm_modeset_lock_all(dev
);
2779 seq_printf(m
, "CRTC info\n");
2780 seq_printf(m
, "---------\n");
2781 for_each_intel_crtc(dev
, crtc
) {
2785 seq_printf(m
, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
2786 crtc
->base
.base
.id
, pipe_name(crtc
->pipe
),
2787 yesno(crtc
->active
), crtc
->config
->pipe_src_w
,
2788 crtc
->config
->pipe_src_h
);
2790 intel_crtc_info(m
, crtc
);
2792 active
= cursor_position(dev
, crtc
->pipe
, &x
, &y
);
2793 seq_printf(m
, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
2794 yesno(crtc
->cursor_base
),
2795 x
, y
, crtc
->base
.cursor
->state
->crtc_w
,
2796 crtc
->base
.cursor
->state
->crtc_h
,
2797 crtc
->cursor_addr
, yesno(active
));
2800 seq_printf(m
, "\tunderrun reporting: cpu=%s pch=%s \n",
2801 yesno(!crtc
->cpu_fifo_underrun_disabled
),
2802 yesno(!crtc
->pch_fifo_underrun_disabled
));
2805 seq_printf(m
, "\n");
2806 seq_printf(m
, "Connector info\n");
2807 seq_printf(m
, "--------------\n");
2808 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
2809 intel_connector_info(m
, connector
);
2811 drm_modeset_unlock_all(dev
);
2812 intel_runtime_pm_put(dev_priv
);
2817 static int i915_semaphore_status(struct seq_file
*m
, void *unused
)
2819 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
2820 struct drm_device
*dev
= node
->minor
->dev
;
2821 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2822 struct intel_engine_cs
*ring
;
2823 int num_rings
= hweight32(INTEL_INFO(dev
)->ring_mask
);
2826 if (!i915_semaphore_is_enabled(dev
)) {
2827 seq_puts(m
, "Semaphores are disabled\n");
2831 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2834 intel_runtime_pm_get(dev_priv
);
2836 if (IS_BROADWELL(dev
)) {
2840 page
= i915_gem_object_get_page(dev_priv
->semaphore_obj
, 0);
2842 seqno
= (uint64_t *)kmap_atomic(page
);
2843 for_each_ring(ring
, dev_priv
, i
) {
2846 seq_printf(m
, "%s\n", ring
->name
);
2848 seq_puts(m
, " Last signal:");
2849 for (j
= 0; j
< num_rings
; j
++) {
2850 offset
= i
* I915_NUM_RINGS
+ j
;
2851 seq_printf(m
, "0x%08llx (0x%02llx) ",
2852 seqno
[offset
], offset
* 8);
2856 seq_puts(m
, " Last wait: ");
2857 for (j
= 0; j
< num_rings
; j
++) {
2858 offset
= i
+ (j
* I915_NUM_RINGS
);
2859 seq_printf(m
, "0x%08llx (0x%02llx) ",
2860 seqno
[offset
], offset
* 8);
2865 kunmap_atomic(seqno
);
2867 seq_puts(m
, " Last signal:");
2868 for_each_ring(ring
, dev_priv
, i
)
2869 for (j
= 0; j
< num_rings
; j
++)
2870 seq_printf(m
, "0x%08x\n",
2871 I915_READ(ring
->semaphore
.mbox
.signal
[j
]));
2875 seq_puts(m
, "\nSync seqno:\n");
2876 for_each_ring(ring
, dev_priv
, i
) {
2877 for (j
= 0; j
< num_rings
; j
++) {
2878 seq_printf(m
, " 0x%08x ", ring
->semaphore
.sync_seqno
[j
]);
2884 intel_runtime_pm_put(dev_priv
);
2885 mutex_unlock(&dev
->struct_mutex
);
2889 static int i915_shared_dplls_info(struct seq_file
*m
, void *unused
)
2891 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
2892 struct drm_device
*dev
= node
->minor
->dev
;
2893 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2896 drm_modeset_lock_all(dev
);
2897 for (i
= 0; i
< dev_priv
->num_shared_dpll
; i
++) {
2898 struct intel_shared_dpll
*pll
= &dev_priv
->shared_dplls
[i
];
2900 seq_printf(m
, "DPLL%i: %s, id: %i\n", i
, pll
->name
, pll
->id
);
2901 seq_printf(m
, " crtc_mask: 0x%08x, active: %d, on: %s\n",
2902 pll
->config
.crtc_mask
, pll
->active
, yesno(pll
->on
));
2903 seq_printf(m
, " tracked hardware state:\n");
2904 seq_printf(m
, " dpll: 0x%08x\n", pll
->config
.hw_state
.dpll
);
2905 seq_printf(m
, " dpll_md: 0x%08x\n",
2906 pll
->config
.hw_state
.dpll_md
);
2907 seq_printf(m
, " fp0: 0x%08x\n", pll
->config
.hw_state
.fp0
);
2908 seq_printf(m
, " fp1: 0x%08x\n", pll
->config
.hw_state
.fp1
);
2909 seq_printf(m
, " wrpll: 0x%08x\n", pll
->config
.hw_state
.wrpll
);
2911 drm_modeset_unlock_all(dev
);
2916 static int i915_wa_registers(struct seq_file
*m
, void *unused
)
2920 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
2921 struct drm_device
*dev
= node
->minor
->dev
;
2922 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2924 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2928 intel_runtime_pm_get(dev_priv
);
2930 seq_printf(m
, "Workarounds applied: %d\n", dev_priv
->workarounds
.count
);
2931 for (i
= 0; i
< dev_priv
->workarounds
.count
; ++i
) {
2932 u32 addr
, mask
, value
, read
;
2935 addr
= dev_priv
->workarounds
.reg
[i
].addr
;
2936 mask
= dev_priv
->workarounds
.reg
[i
].mask
;
2937 value
= dev_priv
->workarounds
.reg
[i
].value
;
2938 read
= I915_READ(addr
);
2939 ok
= (value
& mask
) == (read
& mask
);
2940 seq_printf(m
, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
2941 addr
, value
, mask
, read
, ok
? "OK" : "FAIL");
2944 intel_runtime_pm_put(dev_priv
);
2945 mutex_unlock(&dev
->struct_mutex
);
2950 static int i915_ddb_info(struct seq_file
*m
, void *unused
)
2952 struct drm_info_node
*node
= m
->private;
2953 struct drm_device
*dev
= node
->minor
->dev
;
2954 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2955 struct skl_ddb_allocation
*ddb
;
2956 struct skl_ddb_entry
*entry
;
2960 if (INTEL_INFO(dev
)->gen
< 9)
2963 drm_modeset_lock_all(dev
);
2965 ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
2967 seq_printf(m
, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
2969 for_each_pipe(dev_priv
, pipe
) {
2970 seq_printf(m
, "Pipe %c\n", pipe_name(pipe
));
2972 for_each_plane(dev_priv
, pipe
, plane
) {
2973 entry
= &ddb
->plane
[pipe
][plane
];
2974 seq_printf(m
, " Plane%-8d%8u%8u%8u\n", plane
+ 1,
2975 entry
->start
, entry
->end
,
2976 skl_ddb_entry_size(entry
));
2979 entry
= &ddb
->cursor
[pipe
];
2980 seq_printf(m
, " %-13s%8u%8u%8u\n", "Cursor", entry
->start
,
2981 entry
->end
, skl_ddb_entry_size(entry
));
2984 drm_modeset_unlock_all(dev
);
2989 static void drrs_status_per_crtc(struct seq_file
*m
,
2990 struct drm_device
*dev
, struct intel_crtc
*intel_crtc
)
2992 struct intel_encoder
*intel_encoder
;
2993 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2994 struct i915_drrs
*drrs
= &dev_priv
->drrs
;
2997 for_each_encoder_on_crtc(dev
, &intel_crtc
->base
, intel_encoder
) {
2998 /* Encoder connected on this CRTC */
2999 switch (intel_encoder
->type
) {
3000 case INTEL_OUTPUT_EDP
:
3001 seq_puts(m
, "eDP:\n");
3003 case INTEL_OUTPUT_DSI
:
3004 seq_puts(m
, "DSI:\n");
3006 case INTEL_OUTPUT_HDMI
:
3007 seq_puts(m
, "HDMI:\n");
3009 case INTEL_OUTPUT_DISPLAYPORT
:
3010 seq_puts(m
, "DP:\n");
3013 seq_printf(m
, "Other encoder (id=%d).\n",
3014 intel_encoder
->type
);
3019 if (dev_priv
->vbt
.drrs_type
== STATIC_DRRS_SUPPORT
)
3020 seq_puts(m
, "\tVBT: DRRS_type: Static");
3021 else if (dev_priv
->vbt
.drrs_type
== SEAMLESS_DRRS_SUPPORT
)
3022 seq_puts(m
, "\tVBT: DRRS_type: Seamless");
3023 else if (dev_priv
->vbt
.drrs_type
== DRRS_NOT_SUPPORTED
)
3024 seq_puts(m
, "\tVBT: DRRS_type: None");
3026 seq_puts(m
, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3028 seq_puts(m
, "\n\n");
3030 if (intel_crtc
->config
->has_drrs
) {
3031 struct intel_panel
*panel
;
3033 mutex_lock(&drrs
->mutex
);
3034 /* DRRS Supported */
3035 seq_puts(m
, "\tDRRS Supported: Yes\n");
3037 /* disable_drrs() will make drrs->dp NULL */
3039 seq_puts(m
, "Idleness DRRS: Disabled");
3040 mutex_unlock(&drrs
->mutex
);
3044 panel
= &drrs
->dp
->attached_connector
->panel
;
3045 seq_printf(m
, "\t\tBusy_frontbuffer_bits: 0x%X",
3046 drrs
->busy_frontbuffer_bits
);
3048 seq_puts(m
, "\n\t\t");
3049 if (drrs
->refresh_rate_type
== DRRS_HIGH_RR
) {
3050 seq_puts(m
, "DRRS_State: DRRS_HIGH_RR\n");
3051 vrefresh
= panel
->fixed_mode
->vrefresh
;
3052 } else if (drrs
->refresh_rate_type
== DRRS_LOW_RR
) {
3053 seq_puts(m
, "DRRS_State: DRRS_LOW_RR\n");
3054 vrefresh
= panel
->downclock_mode
->vrefresh
;
3056 seq_printf(m
, "DRRS_State: Unknown(%d)\n",
3057 drrs
->refresh_rate_type
);
3058 mutex_unlock(&drrs
->mutex
);
3061 seq_printf(m
, "\t\tVrefresh: %d", vrefresh
);
3063 seq_puts(m
, "\n\t\t");
3064 mutex_unlock(&drrs
->mutex
);
3066 /* DRRS not supported. Print the VBT parameter*/
3067 seq_puts(m
, "\tDRRS Supported : No");
3072 static int i915_drrs_status(struct seq_file
*m
, void *unused
)
3074 struct drm_info_node
*node
= m
->private;
3075 struct drm_device
*dev
= node
->minor
->dev
;
3076 struct intel_crtc
*intel_crtc
;
3077 int active_crtc_cnt
= 0;
3079 for_each_intel_crtc(dev
, intel_crtc
) {
3080 drm_modeset_lock(&intel_crtc
->base
.mutex
, NULL
);
3082 if (intel_crtc
->active
) {
3084 seq_printf(m
, "\nCRTC %d: ", active_crtc_cnt
);
3086 drrs_status_per_crtc(m
, dev
, intel_crtc
);
3089 drm_modeset_unlock(&intel_crtc
->base
.mutex
);
3092 if (!active_crtc_cnt
)
3093 seq_puts(m
, "No active crtc found\n");
3098 struct pipe_crc_info
{
3100 struct drm_device
*dev
;
3104 static int i915_dp_mst_info(struct seq_file
*m
, void *unused
)
3106 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
3107 struct drm_device
*dev
= node
->minor
->dev
;
3108 struct drm_encoder
*encoder
;
3109 struct intel_encoder
*intel_encoder
;
3110 struct intel_digital_port
*intel_dig_port
;
3111 drm_modeset_lock_all(dev
);
3112 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
3113 intel_encoder
= to_intel_encoder(encoder
);
3114 if (intel_encoder
->type
!= INTEL_OUTPUT_DISPLAYPORT
)
3116 intel_dig_port
= enc_to_dig_port(encoder
);
3117 if (!intel_dig_port
->dp
.can_mst
)
3120 drm_dp_mst_dump_topology(m
, &intel_dig_port
->dp
.mst_mgr
);
3122 drm_modeset_unlock_all(dev
);
3126 static int i915_pipe_crc_open(struct inode
*inode
, struct file
*filep
)
3128 struct pipe_crc_info
*info
= inode
->i_private
;
3129 struct drm_i915_private
*dev_priv
= info
->dev
->dev_private
;
3130 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
3132 if (info
->pipe
>= INTEL_INFO(info
->dev
)->num_pipes
)
3135 spin_lock_irq(&pipe_crc
->lock
);
3137 if (pipe_crc
->opened
) {
3138 spin_unlock_irq(&pipe_crc
->lock
);
3139 return -EBUSY
; /* already open */
3142 pipe_crc
->opened
= true;
3143 filep
->private_data
= inode
->i_private
;
3145 spin_unlock_irq(&pipe_crc
->lock
);
3150 static int i915_pipe_crc_release(struct inode
*inode
, struct file
*filep
)
3152 struct pipe_crc_info
*info
= inode
->i_private
;
3153 struct drm_i915_private
*dev_priv
= info
->dev
->dev_private
;
3154 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
3156 spin_lock_irq(&pipe_crc
->lock
);
3157 pipe_crc
->opened
= false;
3158 spin_unlock_irq(&pipe_crc
->lock
);
3163 /* (6 fields, 8 chars each, space separated (5) + '\n') */
3164 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
3165 /* account for \'0' */
3166 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
3168 static int pipe_crc_data_count(struct intel_pipe_crc
*pipe_crc
)
3170 assert_spin_locked(&pipe_crc
->lock
);
3171 return CIRC_CNT(pipe_crc
->head
, pipe_crc
->tail
,
3172 INTEL_PIPE_CRC_ENTRIES_NR
);
3176 i915_pipe_crc_read(struct file
*filep
, char __user
*user_buf
, size_t count
,
3179 struct pipe_crc_info
*info
= filep
->private_data
;
3180 struct drm_device
*dev
= info
->dev
;
3181 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3182 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
3183 char buf
[PIPE_CRC_BUFFER_LEN
];
3188 * Don't allow user space to provide buffers not big enough to hold
3191 if (count
< PIPE_CRC_LINE_LEN
)
3194 if (pipe_crc
->source
== INTEL_PIPE_CRC_SOURCE_NONE
)
3197 /* nothing to read */
3198 spin_lock_irq(&pipe_crc
->lock
);
3199 while (pipe_crc_data_count(pipe_crc
) == 0) {
3202 if (filep
->f_flags
& O_NONBLOCK
) {
3203 spin_unlock_irq(&pipe_crc
->lock
);
3207 ret
= wait_event_interruptible_lock_irq(pipe_crc
->wq
,
3208 pipe_crc_data_count(pipe_crc
), pipe_crc
->lock
);
3210 spin_unlock_irq(&pipe_crc
->lock
);
3215 /* We now have one or more entries to read */
3216 n_entries
= count
/ PIPE_CRC_LINE_LEN
;
3219 while (n_entries
> 0) {
3220 struct intel_pipe_crc_entry
*entry
=
3221 &pipe_crc
->entries
[pipe_crc
->tail
];
3224 if (CIRC_CNT(pipe_crc
->head
, pipe_crc
->tail
,
3225 INTEL_PIPE_CRC_ENTRIES_NR
) < 1)
3228 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR
);
3229 pipe_crc
->tail
= (pipe_crc
->tail
+ 1) & (INTEL_PIPE_CRC_ENTRIES_NR
- 1);
3231 bytes_read
+= snprintf(buf
, PIPE_CRC_BUFFER_LEN
,
3232 "%8u %8x %8x %8x %8x %8x\n",
3233 entry
->frame
, entry
->crc
[0],
3234 entry
->crc
[1], entry
->crc
[2],
3235 entry
->crc
[3], entry
->crc
[4]);
3237 spin_unlock_irq(&pipe_crc
->lock
);
3239 ret
= copy_to_user(user_buf
, buf
, PIPE_CRC_LINE_LEN
);
3240 if (ret
== PIPE_CRC_LINE_LEN
)
3243 user_buf
+= PIPE_CRC_LINE_LEN
;
3246 spin_lock_irq(&pipe_crc
->lock
);
3249 spin_unlock_irq(&pipe_crc
->lock
);
3254 static const struct file_operations i915_pipe_crc_fops
= {
3255 .owner
= THIS_MODULE
,
3256 .open
= i915_pipe_crc_open
,
3257 .read
= i915_pipe_crc_read
,
3258 .release
= i915_pipe_crc_release
,
3261 static struct pipe_crc_info i915_pipe_crc_data
[I915_MAX_PIPES
] = {
3263 .name
= "i915_pipe_A_crc",
3267 .name
= "i915_pipe_B_crc",
3271 .name
= "i915_pipe_C_crc",
3276 static int i915_pipe_crc_create(struct dentry
*root
, struct drm_minor
*minor
,
3279 struct drm_device
*dev
= minor
->dev
;
3281 struct pipe_crc_info
*info
= &i915_pipe_crc_data
[pipe
];
3284 ent
= debugfs_create_file(info
->name
, S_IRUGO
, root
, info
,
3285 &i915_pipe_crc_fops
);
3289 return drm_add_fake_info_node(minor
, ent
, info
);
3292 static const char * const pipe_crc_sources
[] = {
3305 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source
)
3307 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources
) != INTEL_PIPE_CRC_SOURCE_MAX
);
3308 return pipe_crc_sources
[source
];
3311 static int display_crc_ctl_show(struct seq_file
*m
, void *data
)
3313 struct drm_device
*dev
= m
->private;
3314 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3317 for (i
= 0; i
< I915_MAX_PIPES
; i
++)
3318 seq_printf(m
, "%c %s\n", pipe_name(i
),
3319 pipe_crc_source_name(dev_priv
->pipe_crc
[i
].source
));
3324 static int display_crc_ctl_open(struct inode
*inode
, struct file
*file
)
3326 struct drm_device
*dev
= inode
->i_private
;
3328 return single_open(file
, display_crc_ctl_show
, dev
);
3331 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source
*source
,
3334 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
3335 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
3338 case INTEL_PIPE_CRC_SOURCE_PIPE
:
3339 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_INCLUDE_BORDER_I8XX
;
3341 case INTEL_PIPE_CRC_SOURCE_NONE
:
3351 static int i9xx_pipe_crc_auto_source(struct drm_device
*dev
, enum pipe pipe
,
3352 enum intel_pipe_crc_source
*source
)
3354 struct intel_encoder
*encoder
;
3355 struct intel_crtc
*crtc
;
3356 struct intel_digital_port
*dig_port
;
3359 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
3361 drm_modeset_lock_all(dev
);
3362 for_each_intel_encoder(dev
, encoder
) {
3363 if (!encoder
->base
.crtc
)
3366 crtc
= to_intel_crtc(encoder
->base
.crtc
);
3368 if (crtc
->pipe
!= pipe
)
3371 switch (encoder
->type
) {
3372 case INTEL_OUTPUT_TVOUT
:
3373 *source
= INTEL_PIPE_CRC_SOURCE_TV
;
3375 case INTEL_OUTPUT_DISPLAYPORT
:
3376 case INTEL_OUTPUT_EDP
:
3377 dig_port
= enc_to_dig_port(&encoder
->base
);
3378 switch (dig_port
->port
) {
3380 *source
= INTEL_PIPE_CRC_SOURCE_DP_B
;
3383 *source
= INTEL_PIPE_CRC_SOURCE_DP_C
;
3386 *source
= INTEL_PIPE_CRC_SOURCE_DP_D
;
3389 WARN(1, "nonexisting DP port %c\n",
3390 port_name(dig_port
->port
));
3398 drm_modeset_unlock_all(dev
);
3403 static int vlv_pipe_crc_ctl_reg(struct drm_device
*dev
,
3405 enum intel_pipe_crc_source
*source
,
3408 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3409 bool need_stable_symbols
= false;
3411 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
) {
3412 int ret
= i9xx_pipe_crc_auto_source(dev
, pipe
, source
);
3418 case INTEL_PIPE_CRC_SOURCE_PIPE
:
3419 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_VLV
;
3421 case INTEL_PIPE_CRC_SOURCE_DP_B
:
3422 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_B_VLV
;
3423 need_stable_symbols
= true;
3425 case INTEL_PIPE_CRC_SOURCE_DP_C
:
3426 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_C_VLV
;
3427 need_stable_symbols
= true;
3429 case INTEL_PIPE_CRC_SOURCE_DP_D
:
3430 if (!IS_CHERRYVIEW(dev
))
3432 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_D_VLV
;
3433 need_stable_symbols
= true;
3435 case INTEL_PIPE_CRC_SOURCE_NONE
:
3443 * When the pipe CRC tap point is after the transcoders we need
3444 * to tweak symbol-level features to produce a deterministic series of
3445 * symbols for a given frame. We need to reset those features only once
3446 * a frame (instead of every nth symbol):
3447 * - DC-balance: used to ensure a better clock recovery from the data
3449 * - DisplayPort scrambling: used for EMI reduction
3451 if (need_stable_symbols
) {
3452 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
3454 tmp
|= DC_BALANCE_RESET_VLV
;
3457 tmp
|= PIPE_A_SCRAMBLE_RESET
;
3460 tmp
|= PIPE_B_SCRAMBLE_RESET
;
3463 tmp
|= PIPE_C_SCRAMBLE_RESET
;
3468 I915_WRITE(PORT_DFT2_G4X
, tmp
);
3474 static int i9xx_pipe_crc_ctl_reg(struct drm_device
*dev
,
3476 enum intel_pipe_crc_source
*source
,
3479 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3480 bool need_stable_symbols
= false;
3482 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
) {
3483 int ret
= i9xx_pipe_crc_auto_source(dev
, pipe
, source
);
3489 case INTEL_PIPE_CRC_SOURCE_PIPE
:
3490 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_I9XX
;
3492 case INTEL_PIPE_CRC_SOURCE_TV
:
3493 if (!SUPPORTS_TV(dev
))
3495 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_TV_PRE
;
3497 case INTEL_PIPE_CRC_SOURCE_DP_B
:
3500 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_B_G4X
;
3501 need_stable_symbols
= true;
3503 case INTEL_PIPE_CRC_SOURCE_DP_C
:
3506 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_C_G4X
;
3507 need_stable_symbols
= true;
3509 case INTEL_PIPE_CRC_SOURCE_DP_D
:
3512 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_D_G4X
;
3513 need_stable_symbols
= true;
3515 case INTEL_PIPE_CRC_SOURCE_NONE
:
3523 * When the pipe CRC tap point is after the transcoders we need
3524 * to tweak symbol-level features to produce a deterministic series of
3525 * symbols for a given frame. We need to reset those features only once
3526 * a frame (instead of every nth symbol):
3527 * - DC-balance: used to ensure a better clock recovery from the data
3529 * - DisplayPort scrambling: used for EMI reduction
3531 if (need_stable_symbols
) {
3532 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
3534 WARN_ON(!IS_G4X(dev
));
3536 I915_WRITE(PORT_DFT_I9XX
,
3537 I915_READ(PORT_DFT_I9XX
) | DC_BALANCE_RESET
);
3540 tmp
|= PIPE_A_SCRAMBLE_RESET
;
3542 tmp
|= PIPE_B_SCRAMBLE_RESET
;
3544 I915_WRITE(PORT_DFT2_G4X
, tmp
);
3550 static void vlv_undo_pipe_scramble_reset(struct drm_device
*dev
,
3553 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3554 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
3558 tmp
&= ~PIPE_A_SCRAMBLE_RESET
;
3561 tmp
&= ~PIPE_B_SCRAMBLE_RESET
;
3564 tmp
&= ~PIPE_C_SCRAMBLE_RESET
;
3569 if (!(tmp
& PIPE_SCRAMBLE_RESET_MASK
))
3570 tmp
&= ~DC_BALANCE_RESET_VLV
;
3571 I915_WRITE(PORT_DFT2_G4X
, tmp
);
3575 static void g4x_undo_pipe_scramble_reset(struct drm_device
*dev
,
3578 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3579 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
3582 tmp
&= ~PIPE_A_SCRAMBLE_RESET
;
3584 tmp
&= ~PIPE_B_SCRAMBLE_RESET
;
3585 I915_WRITE(PORT_DFT2_G4X
, tmp
);
3587 if (!(tmp
& PIPE_SCRAMBLE_RESET_MASK
)) {
3588 I915_WRITE(PORT_DFT_I9XX
,
3589 I915_READ(PORT_DFT_I9XX
) & ~DC_BALANCE_RESET
);
3593 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source
*source
,
3596 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
3597 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
3600 case INTEL_PIPE_CRC_SOURCE_PLANE1
:
3601 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PRIMARY_ILK
;
3603 case INTEL_PIPE_CRC_SOURCE_PLANE2
:
3604 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_SPRITE_ILK
;
3606 case INTEL_PIPE_CRC_SOURCE_PIPE
:
3607 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_ILK
;
3609 case INTEL_PIPE_CRC_SOURCE_NONE
:
3619 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device
*dev
)
3621 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3622 struct intel_crtc
*crtc
=
3623 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[PIPE_A
]);
3625 drm_modeset_lock_all(dev
);
3627 * If we use the eDP transcoder we need to make sure that we don't
3628 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
3629 * relevant on hsw with pipe A when using the always-on power well
3632 if (crtc
->config
->cpu_transcoder
== TRANSCODER_EDP
&&
3633 !crtc
->config
->pch_pfit
.enabled
) {
3634 crtc
->config
->pch_pfit
.force_thru
= true;
3636 intel_display_power_get(dev_priv
,
3637 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A
));
3639 intel_crtc_reset(crtc
);
3641 drm_modeset_unlock_all(dev
);
3644 static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device
*dev
)
3646 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3647 struct intel_crtc
*crtc
=
3648 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[PIPE_A
]);
3650 drm_modeset_lock_all(dev
);
3652 * If we use the eDP transcoder we need to make sure that we don't
3653 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
3654 * relevant on hsw with pipe A when using the always-on power well
3657 if (crtc
->config
->pch_pfit
.force_thru
) {
3658 crtc
->config
->pch_pfit
.force_thru
= false;
3660 intel_crtc_reset(crtc
);
3662 intel_display_power_put(dev_priv
,
3663 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A
));
3665 drm_modeset_unlock_all(dev
);
3668 static int ivb_pipe_crc_ctl_reg(struct drm_device
*dev
,
3670 enum intel_pipe_crc_source
*source
,
3673 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
3674 *source
= INTEL_PIPE_CRC_SOURCE_PF
;
3677 case INTEL_PIPE_CRC_SOURCE_PLANE1
:
3678 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PRIMARY_IVB
;
3680 case INTEL_PIPE_CRC_SOURCE_PLANE2
:
3681 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_SPRITE_IVB
;
3683 case INTEL_PIPE_CRC_SOURCE_PF
:
3684 if (IS_HASWELL(dev
) && pipe
== PIPE_A
)
3685 hsw_trans_edp_pipe_A_crc_wa(dev
);
3687 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PF_IVB
;
3689 case INTEL_PIPE_CRC_SOURCE_NONE
:
3699 static int pipe_crc_set_source(struct drm_device
*dev
, enum pipe pipe
,
3700 enum intel_pipe_crc_source source
)
3702 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3703 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
3704 struct intel_crtc
*crtc
= to_intel_crtc(intel_get_crtc_for_pipe(dev
,
3706 u32 val
= 0; /* shut up gcc */
3709 if (pipe_crc
->source
== source
)
3712 /* forbid changing the source without going back to 'none' */
3713 if (pipe_crc
->source
&& source
)
3716 if (!intel_display_power_is_enabled(dev_priv
, POWER_DOMAIN_PIPE(pipe
))) {
3717 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
3722 ret
= i8xx_pipe_crc_ctl_reg(&source
, &val
);
3723 else if (INTEL_INFO(dev
)->gen
< 5)
3724 ret
= i9xx_pipe_crc_ctl_reg(dev
, pipe
, &source
, &val
);
3725 else if (IS_VALLEYVIEW(dev
))
3726 ret
= vlv_pipe_crc_ctl_reg(dev
, pipe
, &source
, &val
);
3727 else if (IS_GEN5(dev
) || IS_GEN6(dev
))
3728 ret
= ilk_pipe_crc_ctl_reg(&source
, &val
);
3730 ret
= ivb_pipe_crc_ctl_reg(dev
, pipe
, &source
, &val
);
3735 /* none -> real source transition */
3737 struct intel_pipe_crc_entry
*entries
;
3739 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
3740 pipe_name(pipe
), pipe_crc_source_name(source
));
3742 entries
= kcalloc(INTEL_PIPE_CRC_ENTRIES_NR
,
3743 sizeof(pipe_crc
->entries
[0]),
3749 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
3750 * enabled and disabled dynamically based on package C states,
3751 * user space can't make reliable use of the CRCs, so let's just
3752 * completely disable it.
3754 hsw_disable_ips(crtc
);
3756 spin_lock_irq(&pipe_crc
->lock
);
3757 kfree(pipe_crc
->entries
);
3758 pipe_crc
->entries
= entries
;
3761 spin_unlock_irq(&pipe_crc
->lock
);
3764 pipe_crc
->source
= source
;
3766 I915_WRITE(PIPE_CRC_CTL(pipe
), val
);
3767 POSTING_READ(PIPE_CRC_CTL(pipe
));
3769 /* real source -> none transition */
3770 if (source
== INTEL_PIPE_CRC_SOURCE_NONE
) {
3771 struct intel_pipe_crc_entry
*entries
;
3772 struct intel_crtc
*crtc
=
3773 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
3775 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
3778 drm_modeset_lock(&crtc
->base
.mutex
, NULL
);
3780 intel_wait_for_vblank(dev
, pipe
);
3781 drm_modeset_unlock(&crtc
->base
.mutex
);
3783 spin_lock_irq(&pipe_crc
->lock
);
3784 entries
= pipe_crc
->entries
;
3785 pipe_crc
->entries
= NULL
;
3788 spin_unlock_irq(&pipe_crc
->lock
);
3793 g4x_undo_pipe_scramble_reset(dev
, pipe
);
3794 else if (IS_VALLEYVIEW(dev
))
3795 vlv_undo_pipe_scramble_reset(dev
, pipe
);
3796 else if (IS_HASWELL(dev
) && pipe
== PIPE_A
)
3797 hsw_undo_trans_edp_pipe_A_crc_wa(dev
);
3799 hsw_enable_ips(crtc
);
3806 * Parse pipe CRC command strings:
3807 * command: wsp* object wsp+ name wsp+ source wsp*
3810 * source: (none | plane1 | plane2 | pf)
3811 * wsp: (#0x20 | #0x9 | #0xA)+
3814 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
3815 * "pipe A none" -> Stop CRC
3817 static int display_crc_ctl_tokenize(char *buf
, char *words
[], int max_words
)
3824 /* skip leading white space */
3825 buf
= skip_spaces(buf
);
3827 break; /* end of buffer */
3829 /* find end of word */
3830 for (end
= buf
; *end
&& !isspace(*end
); end
++)
3833 if (n_words
== max_words
) {
3834 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
3836 return -EINVAL
; /* ran out of words[] before bytes */
3841 words
[n_words
++] = buf
;
3848 enum intel_pipe_crc_object
{
3849 PIPE_CRC_OBJECT_PIPE
,
3852 static const char * const pipe_crc_objects
[] = {
3857 display_crc_ctl_parse_object(const char *buf
, enum intel_pipe_crc_object
*o
)
3861 for (i
= 0; i
< ARRAY_SIZE(pipe_crc_objects
); i
++)
3862 if (!strcmp(buf
, pipe_crc_objects
[i
])) {
3870 static int display_crc_ctl_parse_pipe(const char *buf
, enum pipe
*pipe
)
3872 const char name
= buf
[0];
3874 if (name
< 'A' || name
>= pipe_name(I915_MAX_PIPES
))
3883 display_crc_ctl_parse_source(const char *buf
, enum intel_pipe_crc_source
*s
)
3887 for (i
= 0; i
< ARRAY_SIZE(pipe_crc_sources
); i
++)
3888 if (!strcmp(buf
, pipe_crc_sources
[i
])) {
3896 static int display_crc_ctl_parse(struct drm_device
*dev
, char *buf
, size_t len
)
3900 char *words
[N_WORDS
];
3902 enum intel_pipe_crc_object object
;
3903 enum intel_pipe_crc_source source
;
3905 n_words
= display_crc_ctl_tokenize(buf
, words
, N_WORDS
);
3906 if (n_words
!= N_WORDS
) {
3907 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
3912 if (display_crc_ctl_parse_object(words
[0], &object
) < 0) {
3913 DRM_DEBUG_DRIVER("unknown object %s\n", words
[0]);
3917 if (display_crc_ctl_parse_pipe(words
[1], &pipe
) < 0) {
3918 DRM_DEBUG_DRIVER("unknown pipe %s\n", words
[1]);
3922 if (display_crc_ctl_parse_source(words
[2], &source
) < 0) {
3923 DRM_DEBUG_DRIVER("unknown source %s\n", words
[2]);
3927 return pipe_crc_set_source(dev
, pipe
, source
);
3930 static ssize_t
display_crc_ctl_write(struct file
*file
, const char __user
*ubuf
,
3931 size_t len
, loff_t
*offp
)
3933 struct seq_file
*m
= file
->private_data
;
3934 struct drm_device
*dev
= m
->private;
3941 if (len
> PAGE_SIZE
- 1) {
3942 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
3947 tmpbuf
= kmalloc(len
+ 1, GFP_KERNEL
);
3951 if (copy_from_user(tmpbuf
, ubuf
, len
)) {
3957 ret
= display_crc_ctl_parse(dev
, tmpbuf
, len
);
3968 static const struct file_operations i915_display_crc_ctl_fops
= {
3969 .owner
= THIS_MODULE
,
3970 .open
= display_crc_ctl_open
,
3972 .llseek
= seq_lseek
,
3973 .release
= single_release
,
3974 .write
= display_crc_ctl_write
3977 static ssize_t
i915_displayport_test_active_write(struct file
*file
,
3978 const char __user
*ubuf
,
3979 size_t len
, loff_t
*offp
)
3984 struct drm_device
*dev
;
3985 struct drm_connector
*connector
;
3986 struct list_head
*connector_list
;
3987 struct intel_dp
*intel_dp
;
3990 m
= file
->private_data
;
4001 connector_list
= &dev
->mode_config
.connector_list
;
4006 input_buffer
= kmalloc(len
+ 1, GFP_KERNEL
);
4010 if (copy_from_user(input_buffer
, ubuf
, len
)) {
4015 input_buffer
[len
] = '\0';
4016 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len
);
4018 list_for_each_entry(connector
, connector_list
, head
) {
4020 if (connector
->connector_type
!=
4021 DRM_MODE_CONNECTOR_DisplayPort
)
4024 if (connector
->connector_type
==
4025 DRM_MODE_CONNECTOR_DisplayPort
&&
4026 connector
->status
== connector_status_connected
&&
4027 connector
->encoder
!= NULL
) {
4028 intel_dp
= enc_to_intel_dp(connector
->encoder
);
4029 status
= kstrtoint(input_buffer
, 10, &val
);
4032 DRM_DEBUG_DRIVER("Got %d for test active\n", val
);
4033 /* To prevent erroneous activation of the compliance
4034 * testing code, only accept an actual value of 1 here
4037 intel_dp
->compliance_test_active
= 1;
4039 intel_dp
->compliance_test_active
= 0;
4043 kfree(input_buffer
);
4051 static int i915_displayport_test_active_show(struct seq_file
*m
, void *data
)
4053 struct drm_device
*dev
= m
->private;
4054 struct drm_connector
*connector
;
4055 struct list_head
*connector_list
= &dev
->mode_config
.connector_list
;
4056 struct intel_dp
*intel_dp
;
4061 list_for_each_entry(connector
, connector_list
, head
) {
4063 if (connector
->connector_type
!=
4064 DRM_MODE_CONNECTOR_DisplayPort
)
4067 if (connector
->status
== connector_status_connected
&&
4068 connector
->encoder
!= NULL
) {
4069 intel_dp
= enc_to_intel_dp(connector
->encoder
);
4070 if (intel_dp
->compliance_test_active
)
4081 static int i915_displayport_test_active_open(struct inode
*inode
,
4084 struct drm_device
*dev
= inode
->i_private
;
4086 return single_open(file
, i915_displayport_test_active_show
, dev
);
4089 static const struct file_operations i915_displayport_test_active_fops
= {
4090 .owner
= THIS_MODULE
,
4091 .open
= i915_displayport_test_active_open
,
4093 .llseek
= seq_lseek
,
4094 .release
= single_release
,
4095 .write
= i915_displayport_test_active_write
4098 static int i915_displayport_test_data_show(struct seq_file
*m
, void *data
)
4100 struct drm_device
*dev
= m
->private;
4101 struct drm_connector
*connector
;
4102 struct list_head
*connector_list
= &dev
->mode_config
.connector_list
;
4103 struct intel_dp
*intel_dp
;
4108 list_for_each_entry(connector
, connector_list
, head
) {
4110 if (connector
->connector_type
!=
4111 DRM_MODE_CONNECTOR_DisplayPort
)
4114 if (connector
->status
== connector_status_connected
&&
4115 connector
->encoder
!= NULL
) {
4116 intel_dp
= enc_to_intel_dp(connector
->encoder
);
4117 seq_printf(m
, "%lx", intel_dp
->compliance_test_data
);
4124 static int i915_displayport_test_data_open(struct inode
*inode
,
4127 struct drm_device
*dev
= inode
->i_private
;
4129 return single_open(file
, i915_displayport_test_data_show
, dev
);
4132 static const struct file_operations i915_displayport_test_data_fops
= {
4133 .owner
= THIS_MODULE
,
4134 .open
= i915_displayport_test_data_open
,
4136 .llseek
= seq_lseek
,
4137 .release
= single_release
4140 static int i915_displayport_test_type_show(struct seq_file
*m
, void *data
)
4142 struct drm_device
*dev
= m
->private;
4143 struct drm_connector
*connector
;
4144 struct list_head
*connector_list
= &dev
->mode_config
.connector_list
;
4145 struct intel_dp
*intel_dp
;
4150 list_for_each_entry(connector
, connector_list
, head
) {
4152 if (connector
->connector_type
!=
4153 DRM_MODE_CONNECTOR_DisplayPort
)
4156 if (connector
->status
== connector_status_connected
&&
4157 connector
->encoder
!= NULL
) {
4158 intel_dp
= enc_to_intel_dp(connector
->encoder
);
4159 seq_printf(m
, "%02lx", intel_dp
->compliance_test_type
);
4167 static int i915_displayport_test_type_open(struct inode
*inode
,
4170 struct drm_device
*dev
= inode
->i_private
;
4172 return single_open(file
, i915_displayport_test_type_show
, dev
);
4175 static const struct file_operations i915_displayport_test_type_fops
= {
4176 .owner
= THIS_MODULE
,
4177 .open
= i915_displayport_test_type_open
,
4179 .llseek
= seq_lseek
,
4180 .release
= single_release
4183 static void wm_latency_show(struct seq_file
*m
, const uint16_t wm
[8])
4185 struct drm_device
*dev
= m
->private;
4186 int num_levels
= ilk_wm_max_level(dev
) + 1;
4189 drm_modeset_lock_all(dev
);
4191 for (level
= 0; level
< num_levels
; level
++) {
4192 unsigned int latency
= wm
[level
];
4195 * - WM1+ latency values in 0.5us units
4196 * - latencies are in us on gen9
4198 if (INTEL_INFO(dev
)->gen
>= 9)
4203 seq_printf(m
, "WM%d %u (%u.%u usec)\n",
4204 level
, wm
[level
], latency
/ 10, latency
% 10);
4207 drm_modeset_unlock_all(dev
);
4210 static int pri_wm_latency_show(struct seq_file
*m
, void *data
)
4212 struct drm_device
*dev
= m
->private;
4213 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4214 const uint16_t *latencies
;
4216 if (INTEL_INFO(dev
)->gen
>= 9)
4217 latencies
= dev_priv
->wm
.skl_latency
;
4219 latencies
= to_i915(dev
)->wm
.pri_latency
;
4221 wm_latency_show(m
, latencies
);
4226 static int spr_wm_latency_show(struct seq_file
*m
, void *data
)
4228 struct drm_device
*dev
= m
->private;
4229 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4230 const uint16_t *latencies
;
4232 if (INTEL_INFO(dev
)->gen
>= 9)
4233 latencies
= dev_priv
->wm
.skl_latency
;
4235 latencies
= to_i915(dev
)->wm
.spr_latency
;
4237 wm_latency_show(m
, latencies
);
4242 static int cur_wm_latency_show(struct seq_file
*m
, void *data
)
4244 struct drm_device
*dev
= m
->private;
4245 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4246 const uint16_t *latencies
;
4248 if (INTEL_INFO(dev
)->gen
>= 9)
4249 latencies
= dev_priv
->wm
.skl_latency
;
4251 latencies
= to_i915(dev
)->wm
.cur_latency
;
4253 wm_latency_show(m
, latencies
);
4258 static int pri_wm_latency_open(struct inode
*inode
, struct file
*file
)
4260 struct drm_device
*dev
= inode
->i_private
;
4262 if (HAS_GMCH_DISPLAY(dev
))
4265 return single_open(file
, pri_wm_latency_show
, dev
);
4268 static int spr_wm_latency_open(struct inode
*inode
, struct file
*file
)
4270 struct drm_device
*dev
= inode
->i_private
;
4272 if (HAS_GMCH_DISPLAY(dev
))
4275 return single_open(file
, spr_wm_latency_show
, dev
);
4278 static int cur_wm_latency_open(struct inode
*inode
, struct file
*file
)
4280 struct drm_device
*dev
= inode
->i_private
;
4282 if (HAS_GMCH_DISPLAY(dev
))
4285 return single_open(file
, cur_wm_latency_show
, dev
);
4288 static ssize_t
wm_latency_write(struct file
*file
, const char __user
*ubuf
,
4289 size_t len
, loff_t
*offp
, uint16_t wm
[8])
4291 struct seq_file
*m
= file
->private_data
;
4292 struct drm_device
*dev
= m
->private;
4293 uint16_t new[8] = { 0 };
4294 int num_levels
= ilk_wm_max_level(dev
) + 1;
4299 if (len
>= sizeof(tmp
))
4302 if (copy_from_user(tmp
, ubuf
, len
))
4307 ret
= sscanf(tmp
, "%hu %hu %hu %hu %hu %hu %hu %hu",
4308 &new[0], &new[1], &new[2], &new[3],
4309 &new[4], &new[5], &new[6], &new[7]);
4310 if (ret
!= num_levels
)
4313 drm_modeset_lock_all(dev
);
4315 for (level
= 0; level
< num_levels
; level
++)
4316 wm
[level
] = new[level
];
4318 drm_modeset_unlock_all(dev
);
4324 static ssize_t
pri_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
4325 size_t len
, loff_t
*offp
)
4327 struct seq_file
*m
= file
->private_data
;
4328 struct drm_device
*dev
= m
->private;
4329 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4330 uint16_t *latencies
;
4332 if (INTEL_INFO(dev
)->gen
>= 9)
4333 latencies
= dev_priv
->wm
.skl_latency
;
4335 latencies
= to_i915(dev
)->wm
.pri_latency
;
4337 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
4340 static ssize_t
spr_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
4341 size_t len
, loff_t
*offp
)
4343 struct seq_file
*m
= file
->private_data
;
4344 struct drm_device
*dev
= m
->private;
4345 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4346 uint16_t *latencies
;
4348 if (INTEL_INFO(dev
)->gen
>= 9)
4349 latencies
= dev_priv
->wm
.skl_latency
;
4351 latencies
= to_i915(dev
)->wm
.spr_latency
;
4353 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
4356 static ssize_t
cur_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
4357 size_t len
, loff_t
*offp
)
4359 struct seq_file
*m
= file
->private_data
;
4360 struct drm_device
*dev
= m
->private;
4361 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4362 uint16_t *latencies
;
4364 if (INTEL_INFO(dev
)->gen
>= 9)
4365 latencies
= dev_priv
->wm
.skl_latency
;
4367 latencies
= to_i915(dev
)->wm
.cur_latency
;
4369 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
4372 static const struct file_operations i915_pri_wm_latency_fops
= {
4373 .owner
= THIS_MODULE
,
4374 .open
= pri_wm_latency_open
,
4376 .llseek
= seq_lseek
,
4377 .release
= single_release
,
4378 .write
= pri_wm_latency_write
4381 static const struct file_operations i915_spr_wm_latency_fops
= {
4382 .owner
= THIS_MODULE
,
4383 .open
= spr_wm_latency_open
,
4385 .llseek
= seq_lseek
,
4386 .release
= single_release
,
4387 .write
= spr_wm_latency_write
4390 static const struct file_operations i915_cur_wm_latency_fops
= {
4391 .owner
= THIS_MODULE
,
4392 .open
= cur_wm_latency_open
,
4394 .llseek
= seq_lseek
,
4395 .release
= single_release
,
4396 .write
= cur_wm_latency_write
4400 i915_wedged_get(void *data
, u64
*val
)
4402 struct drm_device
*dev
= data
;
4403 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4405 *val
= atomic_read(&dev_priv
->gpu_error
.reset_counter
);
4411 i915_wedged_set(void *data
, u64 val
)
4413 struct drm_device
*dev
= data
;
4414 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4417 * There is no safeguard against this debugfs entry colliding
4418 * with the hangcheck calling same i915_handle_error() in
4419 * parallel, causing an explosion. For now we assume that the
4420 * test harness is responsible enough not to inject gpu hangs
4421 * while it is writing to 'i915_wedged'
4424 if (i915_reset_in_progress(&dev_priv
->gpu_error
))
4427 intel_runtime_pm_get(dev_priv
);
4429 i915_handle_error(dev
, val
,
4430 "Manually setting wedged to %llu", val
);
4432 intel_runtime_pm_put(dev_priv
);
4437 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops
,
4438 i915_wedged_get
, i915_wedged_set
,
4442 i915_ring_stop_get(void *data
, u64
*val
)
4444 struct drm_device
*dev
= data
;
4445 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4447 *val
= dev_priv
->gpu_error
.stop_rings
;
4453 i915_ring_stop_set(void *data
, u64 val
)
4455 struct drm_device
*dev
= data
;
4456 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4459 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val
);
4461 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4465 dev_priv
->gpu_error
.stop_rings
= val
;
4466 mutex_unlock(&dev
->struct_mutex
);
4471 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops
,
4472 i915_ring_stop_get
, i915_ring_stop_set
,
4476 i915_ring_missed_irq_get(void *data
, u64
*val
)
4478 struct drm_device
*dev
= data
;
4479 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4481 *val
= dev_priv
->gpu_error
.missed_irq_rings
;
4486 i915_ring_missed_irq_set(void *data
, u64 val
)
4488 struct drm_device
*dev
= data
;
4489 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4492 /* Lock against concurrent debugfs callers */
4493 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4496 dev_priv
->gpu_error
.missed_irq_rings
= val
;
4497 mutex_unlock(&dev
->struct_mutex
);
4502 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops
,
4503 i915_ring_missed_irq_get
, i915_ring_missed_irq_set
,
4507 i915_ring_test_irq_get(void *data
, u64
*val
)
4509 struct drm_device
*dev
= data
;
4510 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4512 *val
= dev_priv
->gpu_error
.test_irq_rings
;
4518 i915_ring_test_irq_set(void *data
, u64 val
)
4520 struct drm_device
*dev
= data
;
4521 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4524 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val
);
4526 /* Lock against concurrent debugfs callers */
4527 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4531 dev_priv
->gpu_error
.test_irq_rings
= val
;
4532 mutex_unlock(&dev
->struct_mutex
);
4537 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops
,
4538 i915_ring_test_irq_get
, i915_ring_test_irq_set
,
4541 #define DROP_UNBOUND 0x1
4542 #define DROP_BOUND 0x2
4543 #define DROP_RETIRE 0x4
4544 #define DROP_ACTIVE 0x8
4545 #define DROP_ALL (DROP_UNBOUND | \
4550 i915_drop_caches_get(void *data
, u64
*val
)
4558 i915_drop_caches_set(void *data
, u64 val
)
4560 struct drm_device
*dev
= data
;
4561 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4564 DRM_DEBUG("Dropping caches: 0x%08llx\n", val
);
4566 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4567 * on ioctls on -EAGAIN. */
4568 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4572 if (val
& DROP_ACTIVE
) {
4573 ret
= i915_gpu_idle(dev
);
4578 if (val
& (DROP_RETIRE
| DROP_ACTIVE
))
4579 i915_gem_retire_requests(dev
);
4581 if (val
& DROP_BOUND
)
4582 i915_gem_shrink(dev_priv
, LONG_MAX
, I915_SHRINK_BOUND
);
4584 if (val
& DROP_UNBOUND
)
4585 i915_gem_shrink(dev_priv
, LONG_MAX
, I915_SHRINK_UNBOUND
);
4588 mutex_unlock(&dev
->struct_mutex
);
4593 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops
,
4594 i915_drop_caches_get
, i915_drop_caches_set
,
4598 i915_max_freq_get(void *data
, u64
*val
)
4600 struct drm_device
*dev
= data
;
4601 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4604 if (INTEL_INFO(dev
)->gen
< 6)
4607 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
4609 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
4613 *val
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq_softlimit
);
4614 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4620 i915_max_freq_set(void *data
, u64 val
)
4622 struct drm_device
*dev
= data
;
4623 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4627 if (INTEL_INFO(dev
)->gen
< 6)
4630 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
4632 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val
);
4634 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
4639 * Turbo will still be enabled, but won't go above the set value.
4641 val
= intel_freq_opcode(dev_priv
, val
);
4643 hw_max
= dev_priv
->rps
.max_freq
;
4644 hw_min
= dev_priv
->rps
.min_freq
;
4646 if (val
< hw_min
|| val
> hw_max
|| val
< dev_priv
->rps
.min_freq_softlimit
) {
4647 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4651 dev_priv
->rps
.max_freq_softlimit
= val
;
4653 intel_set_rps(dev
, val
);
4655 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4660 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops
,
4661 i915_max_freq_get
, i915_max_freq_set
,
4665 i915_min_freq_get(void *data
, u64
*val
)
4667 struct drm_device
*dev
= data
;
4668 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4671 if (INTEL_INFO(dev
)->gen
< 6)
4674 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
4676 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
4680 *val
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq_softlimit
);
4681 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4687 i915_min_freq_set(void *data
, u64 val
)
4689 struct drm_device
*dev
= data
;
4690 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4694 if (INTEL_INFO(dev
)->gen
< 6)
4697 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
4699 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val
);
4701 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
4706 * Turbo will still be enabled, but won't go below the set value.
4708 val
= intel_freq_opcode(dev_priv
, val
);
4710 hw_max
= dev_priv
->rps
.max_freq
;
4711 hw_min
= dev_priv
->rps
.min_freq
;
4713 if (val
< hw_min
|| val
> hw_max
|| val
> dev_priv
->rps
.max_freq_softlimit
) {
4714 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4718 dev_priv
->rps
.min_freq_softlimit
= val
;
4720 intel_set_rps(dev
, val
);
4722 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4727 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops
,
4728 i915_min_freq_get
, i915_min_freq_set
,
4732 i915_cache_sharing_get(void *data
, u64
*val
)
4734 struct drm_device
*dev
= data
;
4735 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4739 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
4742 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4745 intel_runtime_pm_get(dev_priv
);
4747 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
4749 intel_runtime_pm_put(dev_priv
);
4750 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
4752 *val
= (snpcr
& GEN6_MBC_SNPCR_MASK
) >> GEN6_MBC_SNPCR_SHIFT
;
4758 i915_cache_sharing_set(void *data
, u64 val
)
4760 struct drm_device
*dev
= data
;
4761 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4764 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
4770 intel_runtime_pm_get(dev_priv
);
4771 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val
);
4773 /* Update the cache sharing policy here as well */
4774 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
4775 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
4776 snpcr
|= (val
<< GEN6_MBC_SNPCR_SHIFT
);
4777 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
4779 intel_runtime_pm_put(dev_priv
);
4783 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops
,
4784 i915_cache_sharing_get
, i915_cache_sharing_set
,
4787 struct sseu_dev_status
{
4788 unsigned int slice_total
;
4789 unsigned int subslice_total
;
4790 unsigned int subslice_per_slice
;
4791 unsigned int eu_total
;
4792 unsigned int eu_per_subslice
;
4795 static void cherryview_sseu_device_status(struct drm_device
*dev
,
4796 struct sseu_dev_status
*stat
)
4798 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4799 const int ss_max
= 2;
4801 u32 sig1
[ss_max
], sig2
[ss_max
];
4803 sig1
[0] = I915_READ(CHV_POWER_SS0_SIG1
);
4804 sig1
[1] = I915_READ(CHV_POWER_SS1_SIG1
);
4805 sig2
[0] = I915_READ(CHV_POWER_SS0_SIG2
);
4806 sig2
[1] = I915_READ(CHV_POWER_SS1_SIG2
);
4808 for (ss
= 0; ss
< ss_max
; ss
++) {
4809 unsigned int eu_cnt
;
4811 if (sig1
[ss
] & CHV_SS_PG_ENABLE
)
4812 /* skip disabled subslice */
4815 stat
->slice_total
= 1;
4816 stat
->subslice_per_slice
++;
4817 eu_cnt
= ((sig1
[ss
] & CHV_EU08_PG_ENABLE
) ? 0 : 2) +
4818 ((sig1
[ss
] & CHV_EU19_PG_ENABLE
) ? 0 : 2) +
4819 ((sig1
[ss
] & CHV_EU210_PG_ENABLE
) ? 0 : 2) +
4820 ((sig2
[ss
] & CHV_EU311_PG_ENABLE
) ? 0 : 2);
4821 stat
->eu_total
+= eu_cnt
;
4822 stat
->eu_per_subslice
= max(stat
->eu_per_subslice
, eu_cnt
);
4824 stat
->subslice_total
= stat
->subslice_per_slice
;
4827 static void gen9_sseu_device_status(struct drm_device
*dev
,
4828 struct sseu_dev_status
*stat
)
4830 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4831 int s_max
= 3, ss_max
= 4;
4833 u32 s_reg
[s_max
], eu_reg
[2*s_max
], eu_mask
[2];
4835 /* BXT has a single slice and at most 3 subslices. */
4836 if (IS_BROXTON(dev
)) {
4841 for (s
= 0; s
< s_max
; s
++) {
4842 s_reg
[s
] = I915_READ(GEN9_SLICE_PGCTL_ACK(s
));
4843 eu_reg
[2*s
] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s
));
4844 eu_reg
[2*s
+ 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s
));
4847 eu_mask
[0] = GEN9_PGCTL_SSA_EU08_ACK
|
4848 GEN9_PGCTL_SSA_EU19_ACK
|
4849 GEN9_PGCTL_SSA_EU210_ACK
|
4850 GEN9_PGCTL_SSA_EU311_ACK
;
4851 eu_mask
[1] = GEN9_PGCTL_SSB_EU08_ACK
|
4852 GEN9_PGCTL_SSB_EU19_ACK
|
4853 GEN9_PGCTL_SSB_EU210_ACK
|
4854 GEN9_PGCTL_SSB_EU311_ACK
;
4856 for (s
= 0; s
< s_max
; s
++) {
4857 unsigned int ss_cnt
= 0;
4859 if ((s_reg
[s
] & GEN9_PGCTL_SLICE_ACK
) == 0)
4860 /* skip disabled slice */
4863 stat
->slice_total
++;
4865 if (IS_SKYLAKE(dev
))
4866 ss_cnt
= INTEL_INFO(dev
)->subslice_per_slice
;
4868 for (ss
= 0; ss
< ss_max
; ss
++) {
4869 unsigned int eu_cnt
;
4871 if (IS_BROXTON(dev
) &&
4872 !(s_reg
[s
] & (GEN9_PGCTL_SS_ACK(ss
))))
4873 /* skip disabled subslice */
4876 if (IS_BROXTON(dev
))
4879 eu_cnt
= 2 * hweight32(eu_reg
[2*s
+ ss
/2] &
4881 stat
->eu_total
+= eu_cnt
;
4882 stat
->eu_per_subslice
= max(stat
->eu_per_subslice
,
4886 stat
->subslice_total
+= ss_cnt
;
4887 stat
->subslice_per_slice
= max(stat
->subslice_per_slice
,
4892 static int i915_sseu_status(struct seq_file
*m
, void *unused
)
4894 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
4895 struct drm_device
*dev
= node
->minor
->dev
;
4896 struct sseu_dev_status stat
;
4898 if ((INTEL_INFO(dev
)->gen
< 8) || IS_BROADWELL(dev
))
4901 seq_puts(m
, "SSEU Device Info\n");
4902 seq_printf(m
, " Available Slice Total: %u\n",
4903 INTEL_INFO(dev
)->slice_total
);
4904 seq_printf(m
, " Available Subslice Total: %u\n",
4905 INTEL_INFO(dev
)->subslice_total
);
4906 seq_printf(m
, " Available Subslice Per Slice: %u\n",
4907 INTEL_INFO(dev
)->subslice_per_slice
);
4908 seq_printf(m
, " Available EU Total: %u\n",
4909 INTEL_INFO(dev
)->eu_total
);
4910 seq_printf(m
, " Available EU Per Subslice: %u\n",
4911 INTEL_INFO(dev
)->eu_per_subslice
);
4912 seq_printf(m
, " Has Slice Power Gating: %s\n",
4913 yesno(INTEL_INFO(dev
)->has_slice_pg
));
4914 seq_printf(m
, " Has Subslice Power Gating: %s\n",
4915 yesno(INTEL_INFO(dev
)->has_subslice_pg
));
4916 seq_printf(m
, " Has EU Power Gating: %s\n",
4917 yesno(INTEL_INFO(dev
)->has_eu_pg
));
4919 seq_puts(m
, "SSEU Device Status\n");
4920 memset(&stat
, 0, sizeof(stat
));
4921 if (IS_CHERRYVIEW(dev
)) {
4922 cherryview_sseu_device_status(dev
, &stat
);
4923 } else if (INTEL_INFO(dev
)->gen
>= 9) {
4924 gen9_sseu_device_status(dev
, &stat
);
4926 seq_printf(m
, " Enabled Slice Total: %u\n",
4928 seq_printf(m
, " Enabled Subslice Total: %u\n",
4929 stat
.subslice_total
);
4930 seq_printf(m
, " Enabled Subslice Per Slice: %u\n",
4931 stat
.subslice_per_slice
);
4932 seq_printf(m
, " Enabled EU Total: %u\n",
4934 seq_printf(m
, " Enabled EU Per Subslice: %u\n",
4935 stat
.eu_per_subslice
);
4940 static int i915_forcewake_open(struct inode
*inode
, struct file
*file
)
4942 struct drm_device
*dev
= inode
->i_private
;
4943 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4945 if (INTEL_INFO(dev
)->gen
< 6)
4948 intel_runtime_pm_get(dev_priv
);
4949 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
4954 static int i915_forcewake_release(struct inode
*inode
, struct file
*file
)
4956 struct drm_device
*dev
= inode
->i_private
;
4957 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4959 if (INTEL_INFO(dev
)->gen
< 6)
4962 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
4963 intel_runtime_pm_put(dev_priv
);
4968 static const struct file_operations i915_forcewake_fops
= {
4969 .owner
= THIS_MODULE
,
4970 .open
= i915_forcewake_open
,
4971 .release
= i915_forcewake_release
,
4974 static int i915_forcewake_create(struct dentry
*root
, struct drm_minor
*minor
)
4976 struct drm_device
*dev
= minor
->dev
;
4979 ent
= debugfs_create_file("i915_forcewake_user",
4982 &i915_forcewake_fops
);
4986 return drm_add_fake_info_node(minor
, ent
, &i915_forcewake_fops
);
4989 static int i915_debugfs_create(struct dentry
*root
,
4990 struct drm_minor
*minor
,
4992 const struct file_operations
*fops
)
4994 struct drm_device
*dev
= minor
->dev
;
4997 ent
= debugfs_create_file(name
,
5004 return drm_add_fake_info_node(minor
, ent
, fops
);
5007 static const struct drm_info_list i915_debugfs_list
[] = {
5008 {"i915_capabilities", i915_capabilities
, 0},
5009 {"i915_gem_objects", i915_gem_object_info
, 0},
5010 {"i915_gem_gtt", i915_gem_gtt_info
, 0},
5011 {"i915_gem_pinned", i915_gem_gtt_info
, 0, (void *) PINNED_LIST
},
5012 {"i915_gem_active", i915_gem_object_list_info
, 0, (void *) ACTIVE_LIST
},
5013 {"i915_gem_inactive", i915_gem_object_list_info
, 0, (void *) INACTIVE_LIST
},
5014 {"i915_gem_stolen", i915_gem_stolen_list_info
},
5015 {"i915_gem_pageflip", i915_gem_pageflip_info
, 0},
5016 {"i915_gem_request", i915_gem_request_info
, 0},
5017 {"i915_gem_seqno", i915_gem_seqno_info
, 0},
5018 {"i915_gem_fence_regs", i915_gem_fence_regs_info
, 0},
5019 {"i915_gem_interrupt", i915_interrupt_info
, 0},
5020 {"i915_gem_hws", i915_hws_info
, 0, (void *)RCS
},
5021 {"i915_gem_hws_blt", i915_hws_info
, 0, (void *)BCS
},
5022 {"i915_gem_hws_bsd", i915_hws_info
, 0, (void *)VCS
},
5023 {"i915_gem_hws_vebox", i915_hws_info
, 0, (void *)VECS
},
5024 {"i915_gem_batch_pool", i915_gem_batch_pool_info
, 0},
5025 {"i915_frequency_info", i915_frequency_info
, 0},
5026 {"i915_hangcheck_info", i915_hangcheck_info
, 0},
5027 {"i915_drpc_info", i915_drpc_info
, 0},
5028 {"i915_emon_status", i915_emon_status
, 0},
5029 {"i915_ring_freq_table", i915_ring_freq_table
, 0},
5030 {"i915_fbc_status", i915_fbc_status
, 0},
5031 {"i915_ips_status", i915_ips_status
, 0},
5032 {"i915_sr_status", i915_sr_status
, 0},
5033 {"i915_opregion", i915_opregion
, 0},
5034 {"i915_gem_framebuffer", i915_gem_framebuffer_info
, 0},
5035 {"i915_context_status", i915_context_status
, 0},
5036 {"i915_dump_lrc", i915_dump_lrc
, 0},
5037 {"i915_execlists", i915_execlists
, 0},
5038 {"i915_forcewake_domains", i915_forcewake_domains
, 0},
5039 {"i915_swizzle_info", i915_swizzle_info
, 0},
5040 {"i915_ppgtt_info", i915_ppgtt_info
, 0},
5041 {"i915_llc", i915_llc
, 0},
5042 {"i915_edp_psr_status", i915_edp_psr_status
, 0},
5043 {"i915_sink_crc_eDP1", i915_sink_crc
, 0},
5044 {"i915_energy_uJ", i915_energy_uJ
, 0},
5045 {"i915_pc8_status", i915_pc8_status
, 0},
5046 {"i915_power_domain_info", i915_power_domain_info
, 0},
5047 {"i915_display_info", i915_display_info
, 0},
5048 {"i915_semaphore_status", i915_semaphore_status
, 0},
5049 {"i915_shared_dplls_info", i915_shared_dplls_info
, 0},
5050 {"i915_dp_mst_info", i915_dp_mst_info
, 0},
5051 {"i915_wa_registers", i915_wa_registers
, 0},
5052 {"i915_ddb_info", i915_ddb_info
, 0},
5053 {"i915_sseu_status", i915_sseu_status
, 0},
5054 {"i915_drrs_status", i915_drrs_status
, 0},
5055 {"i915_rps_boost_info", i915_rps_boost_info
, 0},
5057 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
5059 static const struct i915_debugfs_files
{
5061 const struct file_operations
*fops
;
5062 } i915_debugfs_files
[] = {
5063 {"i915_wedged", &i915_wedged_fops
},
5064 {"i915_max_freq", &i915_max_freq_fops
},
5065 {"i915_min_freq", &i915_min_freq_fops
},
5066 {"i915_cache_sharing", &i915_cache_sharing_fops
},
5067 {"i915_ring_stop", &i915_ring_stop_fops
},
5068 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops
},
5069 {"i915_ring_test_irq", &i915_ring_test_irq_fops
},
5070 {"i915_gem_drop_caches", &i915_drop_caches_fops
},
5071 {"i915_error_state", &i915_error_state_fops
},
5072 {"i915_next_seqno", &i915_next_seqno_fops
},
5073 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops
},
5074 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops
},
5075 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops
},
5076 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops
},
5077 {"i915_fbc_false_color", &i915_fbc_fc_fops
},
5078 {"i915_dp_test_data", &i915_displayport_test_data_fops
},
5079 {"i915_dp_test_type", &i915_displayport_test_type_fops
},
5080 {"i915_dp_test_active", &i915_displayport_test_active_fops
}
5083 void intel_display_crc_init(struct drm_device
*dev
)
5085 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5088 for_each_pipe(dev_priv
, pipe
) {
5089 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
5091 pipe_crc
->opened
= false;
5092 spin_lock_init(&pipe_crc
->lock
);
5093 init_waitqueue_head(&pipe_crc
->wq
);
5097 int i915_debugfs_init(struct drm_minor
*minor
)
5101 ret
= i915_forcewake_create(minor
->debugfs_root
, minor
);
5105 for (i
= 0; i
< ARRAY_SIZE(i915_pipe_crc_data
); i
++) {
5106 ret
= i915_pipe_crc_create(minor
->debugfs_root
, minor
, i
);
5111 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
5112 ret
= i915_debugfs_create(minor
->debugfs_root
, minor
,
5113 i915_debugfs_files
[i
].name
,
5114 i915_debugfs_files
[i
].fops
);
5119 return drm_debugfs_create_files(i915_debugfs_list
,
5120 I915_DEBUGFS_ENTRIES
,
5121 minor
->debugfs_root
, minor
);
5124 void i915_debugfs_cleanup(struct drm_minor
*minor
)
5128 drm_debugfs_remove_files(i915_debugfs_list
,
5129 I915_DEBUGFS_ENTRIES
, minor
);
5131 drm_debugfs_remove_files((struct drm_info_list
*) &i915_forcewake_fops
,
5134 for (i
= 0; i
< ARRAY_SIZE(i915_pipe_crc_data
); i
++) {
5135 struct drm_info_list
*info_list
=
5136 (struct drm_info_list
*)&i915_pipe_crc_data
[i
];
5138 drm_debugfs_remove_files(info_list
, 1, minor
);
5141 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
5142 struct drm_info_list
*info_list
=
5143 (struct drm_info_list
*) i915_debugfs_files
[i
].fops
;
5145 drm_debugfs_remove_files(info_list
, 1, minor
);
5150 /* DPCD dump start address. */
5151 unsigned int offset
;
5152 /* DPCD dump end address, inclusive. If unset, .size will be used. */
5154 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
5156 /* Only valid for eDP. */
5160 static const struct dpcd_block i915_dpcd_debug
[] = {
5161 { .offset
= DP_DPCD_REV
, .size
= DP_RECEIVER_CAP_SIZE
},
5162 { .offset
= DP_PSR_SUPPORT
, .end
= DP_PSR_CAPS
},
5163 { .offset
= DP_DOWNSTREAM_PORT_0
, .size
= 16 },
5164 { .offset
= DP_LINK_BW_SET
, .end
= DP_EDP_CONFIGURATION_SET
},
5165 { .offset
= DP_SINK_COUNT
, .end
= DP_ADJUST_REQUEST_LANE2_3
},
5166 { .offset
= DP_SET_POWER
},
5167 { .offset
= DP_EDP_DPCD_REV
},
5168 { .offset
= DP_EDP_GENERAL_CAP_1
, .end
= DP_EDP_GENERAL_CAP_3
},
5169 { .offset
= DP_EDP_DISPLAY_CONTROL_REGISTER
, .end
= DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB
},
5170 { .offset
= DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET
, .end
= DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET
},
5173 static int i915_dpcd_show(struct seq_file
*m
, void *data
)
5175 struct drm_connector
*connector
= m
->private;
5176 struct intel_dp
*intel_dp
=
5177 enc_to_intel_dp(&intel_attached_encoder(connector
)->base
);
5182 if (connector
->status
!= connector_status_connected
)
5185 for (i
= 0; i
< ARRAY_SIZE(i915_dpcd_debug
); i
++) {
5186 const struct dpcd_block
*b
= &i915_dpcd_debug
[i
];
5187 size_t size
= b
->end
? b
->end
- b
->offset
+ 1 : (b
->size
?: 1);
5190 connector
->connector_type
!= DRM_MODE_CONNECTOR_eDP
)
5193 /* low tech for now */
5194 if (WARN_ON(size
> sizeof(buf
)))
5197 err
= drm_dp_dpcd_read(&intel_dp
->aux
, b
->offset
, buf
, size
);
5199 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
5200 size
, b
->offset
, err
);
5204 seq_printf(m
, "%04x: %*ph\n", b
->offset
, (int) size
, buf
);
5210 static int i915_dpcd_open(struct inode
*inode
, struct file
*file
)
5212 return single_open(file
, i915_dpcd_show
, inode
->i_private
);
5215 static const struct file_operations i915_dpcd_fops
= {
5216 .owner
= THIS_MODULE
,
5217 .open
= i915_dpcd_open
,
5219 .llseek
= seq_lseek
,
5220 .release
= single_release
,
5224 * i915_debugfs_connector_add - add i915 specific connector debugfs files
5225 * @connector: pointer to a registered drm_connector
5227 * Cleanup will be done by drm_connector_unregister() through a call to
5228 * drm_debugfs_connector_remove().
5230 * Returns 0 on success, negative error codes on error.
5232 int i915_debugfs_connector_add(struct drm_connector
*connector
)
5234 struct dentry
*root
= connector
->debugfs_entry
;
5236 /* The connector must have been registered beforehands. */
5240 if (connector
->connector_type
== DRM_MODE_CONNECTOR_DisplayPort
||
5241 connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
5242 debugfs_create_file("i915_dpcd", S_IRUGO
, root
, connector
,