2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
49 /* As the drm_debugfs_init() routines are called before dev->dev_private is
50 * allocated we need to hook into the minor for release. */
52 drm_add_fake_info_node(struct drm_minor
*minor
,
56 struct drm_info_node
*node
;
58 node
= kmalloc(sizeof(*node
), GFP_KERNEL
);
66 node
->info_ent
= (void *) key
;
68 mutex_lock(&minor
->debugfs_lock
);
69 list_add(&node
->list
, &minor
->debugfs_list
);
70 mutex_unlock(&minor
->debugfs_lock
);
75 static int i915_capabilities(struct seq_file
*m
, void *data
)
77 struct drm_info_node
*node
= m
->private;
78 struct drm_device
*dev
= node
->minor
->dev
;
79 const struct intel_device_info
*info
= INTEL_INFO(dev
);
81 seq_printf(m
, "gen: %d\n", info
->gen
);
82 seq_printf(m
, "pch: %d\n", INTEL_PCH_TYPE(dev
));
83 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
84 #define SEP_SEMICOLON ;
85 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
, SEP_SEMICOLON
);
92 static const char *get_pin_flag(struct drm_i915_gem_object
*obj
)
100 static const char *get_tiling_flag(struct drm_i915_gem_object
*obj
)
102 switch (obj
->tiling_mode
) {
104 case I915_TILING_NONE
: return " ";
105 case I915_TILING_X
: return "X";
106 case I915_TILING_Y
: return "Y";
110 static inline const char *get_global_flag(struct drm_i915_gem_object
*obj
)
112 return i915_gem_obj_to_ggtt(obj
) ? "g" : " ";
115 static u64
i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object
*obj
)
118 struct i915_vma
*vma
;
120 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
) {
121 if (i915_is_ggtt(vma
->vm
) &&
122 drm_mm_node_allocated(&vma
->node
))
123 size
+= vma
->node
.size
;
130 describe_obj(struct seq_file
*m
, struct drm_i915_gem_object
*obj
)
132 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
133 struct intel_engine_cs
*ring
;
134 struct i915_vma
*vma
;
138 seq_printf(m
, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
140 obj
->active
? "*" : " ",
142 get_tiling_flag(obj
),
143 get_global_flag(obj
),
144 obj
->base
.size
/ 1024,
145 obj
->base
.read_domains
,
146 obj
->base
.write_domain
);
147 for_each_ring(ring
, dev_priv
, i
)
149 i915_gem_request_get_seqno(obj
->last_read_req
[i
]));
150 seq_printf(m
, "] %x %x%s%s%s",
151 i915_gem_request_get_seqno(obj
->last_write_req
),
152 i915_gem_request_get_seqno(obj
->last_fenced_req
),
153 i915_cache_level_str(to_i915(obj
->base
.dev
), obj
->cache_level
),
154 obj
->dirty
? " dirty" : "",
155 obj
->madv
== I915_MADV_DONTNEED
? " purgeable" : "");
157 seq_printf(m
, " (name: %d)", obj
->base
.name
);
158 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
) {
159 if (vma
->pin_count
> 0)
162 seq_printf(m
, " (pinned x %d)", pin_count
);
163 if (obj
->pin_display
)
164 seq_printf(m
, " (display)");
165 if (obj
->fence_reg
!= I915_FENCE_REG_NONE
)
166 seq_printf(m
, " (fence: %d)", obj
->fence_reg
);
167 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
) {
168 seq_printf(m
, " (%sgtt offset: %08llx, size: %08llx",
169 i915_is_ggtt(vma
->vm
) ? "g" : "pp",
170 vma
->node
.start
, vma
->node
.size
);
171 if (i915_is_ggtt(vma
->vm
))
172 seq_printf(m
, ", type: %u)", vma
->ggtt_view
.type
);
177 seq_printf(m
, " (stolen: %08llx)", obj
->stolen
->start
);
178 if (obj
->pin_display
|| obj
->fault_mappable
) {
180 if (obj
->pin_display
)
182 if (obj
->fault_mappable
)
185 seq_printf(m
, " (%s mappable)", s
);
187 if (obj
->last_write_req
!= NULL
)
188 seq_printf(m
, " (%s)",
189 i915_gem_request_get_ring(obj
->last_write_req
)->name
);
190 if (obj
->frontbuffer_bits
)
191 seq_printf(m
, " (frontbuffer: 0x%03x)", obj
->frontbuffer_bits
);
194 static void describe_ctx(struct seq_file
*m
, struct intel_context
*ctx
)
196 seq_putc(m
, ctx
->legacy_hw_ctx
.initialized
? 'I' : 'i');
197 seq_putc(m
, ctx
->remap_slice
? 'R' : 'r');
201 static int i915_gem_object_list_info(struct seq_file
*m
, void *data
)
203 struct drm_info_node
*node
= m
->private;
204 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
205 struct list_head
*head
;
206 struct drm_device
*dev
= node
->minor
->dev
;
207 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
208 struct i915_address_space
*vm
= &dev_priv
->gtt
.base
;
209 struct i915_vma
*vma
;
210 u64 total_obj_size
, total_gtt_size
;
213 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
217 /* FIXME: the user of this interface might want more than just GGTT */
220 seq_puts(m
, "Active:\n");
221 head
= &vm
->active_list
;
224 seq_puts(m
, "Inactive:\n");
225 head
= &vm
->inactive_list
;
228 mutex_unlock(&dev
->struct_mutex
);
232 total_obj_size
= total_gtt_size
= count
= 0;
233 list_for_each_entry(vma
, head
, mm_list
) {
235 describe_obj(m
, vma
->obj
);
237 total_obj_size
+= vma
->obj
->base
.size
;
238 total_gtt_size
+= vma
->node
.size
;
241 mutex_unlock(&dev
->struct_mutex
);
243 seq_printf(m
, "Total %d objects, %llu bytes, %llu GTT size\n",
244 count
, total_obj_size
, total_gtt_size
);
248 static int obj_rank_by_stolen(void *priv
,
249 struct list_head
*A
, struct list_head
*B
)
251 struct drm_i915_gem_object
*a
=
252 container_of(A
, struct drm_i915_gem_object
, obj_exec_link
);
253 struct drm_i915_gem_object
*b
=
254 container_of(B
, struct drm_i915_gem_object
, obj_exec_link
);
256 if (a
->stolen
->start
< b
->stolen
->start
)
258 if (a
->stolen
->start
> b
->stolen
->start
)
263 static int i915_gem_stolen_list_info(struct seq_file
*m
, void *data
)
265 struct drm_info_node
*node
= m
->private;
266 struct drm_device
*dev
= node
->minor
->dev
;
267 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
268 struct drm_i915_gem_object
*obj
;
269 u64 total_obj_size
, total_gtt_size
;
273 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
277 total_obj_size
= total_gtt_size
= count
= 0;
278 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
279 if (obj
->stolen
== NULL
)
282 list_add(&obj
->obj_exec_link
, &stolen
);
284 total_obj_size
+= obj
->base
.size
;
285 total_gtt_size
+= i915_gem_obj_total_ggtt_size(obj
);
288 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
289 if (obj
->stolen
== NULL
)
292 list_add(&obj
->obj_exec_link
, &stolen
);
294 total_obj_size
+= obj
->base
.size
;
297 list_sort(NULL
, &stolen
, obj_rank_by_stolen
);
298 seq_puts(m
, "Stolen:\n");
299 while (!list_empty(&stolen
)) {
300 obj
= list_first_entry(&stolen
, typeof(*obj
), obj_exec_link
);
302 describe_obj(m
, obj
);
304 list_del_init(&obj
->obj_exec_link
);
306 mutex_unlock(&dev
->struct_mutex
);
308 seq_printf(m
, "Total %d objects, %llu bytes, %llu GTT size\n",
309 count
, total_obj_size
, total_gtt_size
);
313 #define count_objects(list, member) do { \
314 list_for_each_entry(obj, list, member) { \
315 size += i915_gem_obj_total_ggtt_size(obj); \
317 if (obj->map_and_fenceable) { \
318 mappable_size += i915_gem_obj_ggtt_size(obj); \
325 struct drm_i915_file_private
*file_priv
;
329 u64 active
, inactive
;
332 static int per_file_stats(int id
, void *ptr
, void *data
)
334 struct drm_i915_gem_object
*obj
= ptr
;
335 struct file_stats
*stats
= data
;
336 struct i915_vma
*vma
;
339 stats
->total
+= obj
->base
.size
;
341 if (obj
->base
.name
|| obj
->base
.dma_buf
)
342 stats
->shared
+= obj
->base
.size
;
344 if (USES_FULL_PPGTT(obj
->base
.dev
)) {
345 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
) {
346 struct i915_hw_ppgtt
*ppgtt
;
348 if (!drm_mm_node_allocated(&vma
->node
))
351 if (i915_is_ggtt(vma
->vm
)) {
352 stats
->global
+= obj
->base
.size
;
356 ppgtt
= container_of(vma
->vm
, struct i915_hw_ppgtt
, base
);
357 if (ppgtt
->file_priv
!= stats
->file_priv
)
360 if (obj
->active
) /* XXX per-vma statistic */
361 stats
->active
+= obj
->base
.size
;
363 stats
->inactive
+= obj
->base
.size
;
368 if (i915_gem_obj_ggtt_bound(obj
)) {
369 stats
->global
+= obj
->base
.size
;
371 stats
->active
+= obj
->base
.size
;
373 stats
->inactive
+= obj
->base
.size
;
378 if (!list_empty(&obj
->global_list
))
379 stats
->unbound
+= obj
->base
.size
;
384 #define print_file_stats(m, name, stats) do { \
386 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
397 static void print_batch_pool_stats(struct seq_file
*m
,
398 struct drm_i915_private
*dev_priv
)
400 struct drm_i915_gem_object
*obj
;
401 struct file_stats stats
;
402 struct intel_engine_cs
*ring
;
405 memset(&stats
, 0, sizeof(stats
));
407 for_each_ring(ring
, dev_priv
, i
) {
408 for (j
= 0; j
< ARRAY_SIZE(ring
->batch_pool
.cache_list
); j
++) {
409 list_for_each_entry(obj
,
410 &ring
->batch_pool
.cache_list
[j
],
412 per_file_stats(0, obj
, &stats
);
416 print_file_stats(m
, "[k]batch pool", stats
);
419 #define count_vmas(list, member) do { \
420 list_for_each_entry(vma, list, member) { \
421 size += i915_gem_obj_total_ggtt_size(vma->obj); \
423 if (vma->obj->map_and_fenceable) { \
424 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
430 static int i915_gem_object_info(struct seq_file
*m
, void* data
)
432 struct drm_info_node
*node
= m
->private;
433 struct drm_device
*dev
= node
->minor
->dev
;
434 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
435 u32 count
, mappable_count
, purgeable_count
;
436 u64 size
, mappable_size
, purgeable_size
;
437 struct drm_i915_gem_object
*obj
;
438 struct i915_address_space
*vm
= &dev_priv
->gtt
.base
;
439 struct drm_file
*file
;
440 struct i915_vma
*vma
;
443 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
447 seq_printf(m
, "%u objects, %zu bytes\n",
448 dev_priv
->mm
.object_count
,
449 dev_priv
->mm
.object_memory
);
451 size
= count
= mappable_size
= mappable_count
= 0;
452 count_objects(&dev_priv
->mm
.bound_list
, global_list
);
453 seq_printf(m
, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
454 count
, mappable_count
, size
, mappable_size
);
456 size
= count
= mappable_size
= mappable_count
= 0;
457 count_vmas(&vm
->active_list
, mm_list
);
458 seq_printf(m
, " %u [%u] active objects, %llu [%llu] bytes\n",
459 count
, mappable_count
, size
, mappable_size
);
461 size
= count
= mappable_size
= mappable_count
= 0;
462 count_vmas(&vm
->inactive_list
, mm_list
);
463 seq_printf(m
, " %u [%u] inactive objects, %llu [%llu] bytes\n",
464 count
, mappable_count
, size
, mappable_size
);
466 size
= count
= purgeable_size
= purgeable_count
= 0;
467 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
468 size
+= obj
->base
.size
, ++count
;
469 if (obj
->madv
== I915_MADV_DONTNEED
)
470 purgeable_size
+= obj
->base
.size
, ++purgeable_count
;
472 seq_printf(m
, "%u unbound objects, %llu bytes\n", count
, size
);
474 size
= count
= mappable_size
= mappable_count
= 0;
475 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
476 if (obj
->fault_mappable
) {
477 size
+= i915_gem_obj_ggtt_size(obj
);
480 if (obj
->pin_display
) {
481 mappable_size
+= i915_gem_obj_ggtt_size(obj
);
484 if (obj
->madv
== I915_MADV_DONTNEED
) {
485 purgeable_size
+= obj
->base
.size
;
489 seq_printf(m
, "%u purgeable objects, %llu bytes\n",
490 purgeable_count
, purgeable_size
);
491 seq_printf(m
, "%u pinned mappable objects, %llu bytes\n",
492 mappable_count
, mappable_size
);
493 seq_printf(m
, "%u fault mappable objects, %llu bytes\n",
496 seq_printf(m
, "%llu [%llu] gtt total\n",
497 dev_priv
->gtt
.base
.total
,
498 (u64
)dev_priv
->gtt
.mappable_end
- dev_priv
->gtt
.base
.start
);
501 print_batch_pool_stats(m
, dev_priv
);
502 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
503 struct file_stats stats
;
504 struct task_struct
*task
;
506 memset(&stats
, 0, sizeof(stats
));
507 stats
.file_priv
= file
->driver_priv
;
508 spin_lock(&file
->table_lock
);
509 idr_for_each(&file
->object_idr
, per_file_stats
, &stats
);
510 spin_unlock(&file
->table_lock
);
512 * Although we have a valid reference on file->pid, that does
513 * not guarantee that the task_struct who called get_pid() is
514 * still alive (e.g. get_pid(current) => fork() => exit()).
515 * Therefore, we need to protect this ->comm access using RCU.
518 task
= pid_task(file
->pid
, PIDTYPE_PID
);
519 print_file_stats(m
, task
? task
->comm
: "<unknown>", stats
);
523 mutex_unlock(&dev
->struct_mutex
);
528 static int i915_gem_gtt_info(struct seq_file
*m
, void *data
)
530 struct drm_info_node
*node
= m
->private;
531 struct drm_device
*dev
= node
->minor
->dev
;
532 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
533 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
534 struct drm_i915_gem_object
*obj
;
535 u64 total_obj_size
, total_gtt_size
;
538 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
542 total_obj_size
= total_gtt_size
= count
= 0;
543 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
544 if (list
== PINNED_LIST
&& !i915_gem_obj_is_pinned(obj
))
548 describe_obj(m
, obj
);
550 total_obj_size
+= obj
->base
.size
;
551 total_gtt_size
+= i915_gem_obj_total_ggtt_size(obj
);
555 mutex_unlock(&dev
->struct_mutex
);
557 seq_printf(m
, "Total %d objects, %llu bytes, %llu GTT size\n",
558 count
, total_obj_size
, total_gtt_size
);
563 static int i915_gem_pageflip_info(struct seq_file
*m
, void *data
)
565 struct drm_info_node
*node
= m
->private;
566 struct drm_device
*dev
= node
->minor
->dev
;
567 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
568 struct intel_crtc
*crtc
;
571 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
575 for_each_intel_crtc(dev
, crtc
) {
576 const char pipe
= pipe_name(crtc
->pipe
);
577 const char plane
= plane_name(crtc
->plane
);
578 struct intel_unpin_work
*work
;
580 spin_lock_irq(&dev
->event_lock
);
581 work
= crtc
->unpin_work
;
583 seq_printf(m
, "No flip due on pipe %c (plane %c)\n",
588 if (atomic_read(&work
->pending
) < INTEL_FLIP_COMPLETE
) {
589 seq_printf(m
, "Flip queued on pipe %c (plane %c)\n",
592 seq_printf(m
, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
595 if (work
->flip_queued_req
) {
596 struct intel_engine_cs
*ring
=
597 i915_gem_request_get_ring(work
->flip_queued_req
);
599 seq_printf(m
, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
601 i915_gem_request_get_seqno(work
->flip_queued_req
),
602 dev_priv
->next_seqno
,
603 ring
->get_seqno(ring
, true),
604 i915_gem_request_completed(work
->flip_queued_req
, true));
606 seq_printf(m
, "Flip not associated with any ring\n");
607 seq_printf(m
, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
608 work
->flip_queued_vblank
,
609 work
->flip_ready_vblank
,
610 drm_crtc_vblank_count(&crtc
->base
));
611 if (work
->enable_stall_check
)
612 seq_puts(m
, "Stall check enabled, ");
614 seq_puts(m
, "Stall check waiting for page flip ioctl, ");
615 seq_printf(m
, "%d prepares\n", atomic_read(&work
->pending
));
617 if (INTEL_INFO(dev
)->gen
>= 4)
618 addr
= I915_HI_DISPBASE(I915_READ(DSPSURF(crtc
->plane
)));
620 addr
= I915_READ(DSPADDR(crtc
->plane
));
621 seq_printf(m
, "Current scanout address 0x%08x\n", addr
);
623 if (work
->pending_flip_obj
) {
624 seq_printf(m
, "New framebuffer address 0x%08lx\n", (long)work
->gtt_offset
);
625 seq_printf(m
, "MMIO update completed? %d\n", addr
== work
->gtt_offset
);
628 spin_unlock_irq(&dev
->event_lock
);
631 mutex_unlock(&dev
->struct_mutex
);
636 static int i915_gem_batch_pool_info(struct seq_file
*m
, void *data
)
638 struct drm_info_node
*node
= m
->private;
639 struct drm_device
*dev
= node
->minor
->dev
;
640 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
641 struct drm_i915_gem_object
*obj
;
642 struct intel_engine_cs
*ring
;
646 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
650 for_each_ring(ring
, dev_priv
, i
) {
651 for (j
= 0; j
< ARRAY_SIZE(ring
->batch_pool
.cache_list
); j
++) {
655 list_for_each_entry(obj
,
656 &ring
->batch_pool
.cache_list
[j
],
659 seq_printf(m
, "%s cache[%d]: %d objects\n",
660 ring
->name
, j
, count
);
662 list_for_each_entry(obj
,
663 &ring
->batch_pool
.cache_list
[j
],
666 describe_obj(m
, obj
);
674 seq_printf(m
, "total: %d\n", total
);
676 mutex_unlock(&dev
->struct_mutex
);
681 static int i915_gem_request_info(struct seq_file
*m
, void *data
)
683 struct drm_info_node
*node
= m
->private;
684 struct drm_device
*dev
= node
->minor
->dev
;
685 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
686 struct intel_engine_cs
*ring
;
687 struct drm_i915_gem_request
*req
;
690 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
695 for_each_ring(ring
, dev_priv
, i
) {
699 list_for_each_entry(req
, &ring
->request_list
, list
)
704 seq_printf(m
, "%s requests: %d\n", ring
->name
, count
);
705 list_for_each_entry(req
, &ring
->request_list
, list
) {
706 struct task_struct
*task
;
711 task
= pid_task(req
->pid
, PIDTYPE_PID
);
712 seq_printf(m
, " %x @ %d: %s [%d]\n",
714 (int) (jiffies
- req
->emitted_jiffies
),
715 task
? task
->comm
: "<unknown>",
716 task
? task
->pid
: -1);
722 mutex_unlock(&dev
->struct_mutex
);
725 seq_puts(m
, "No requests\n");
730 static void i915_ring_seqno_info(struct seq_file
*m
,
731 struct intel_engine_cs
*ring
)
733 if (ring
->get_seqno
) {
734 seq_printf(m
, "Current sequence (%s): %x\n",
735 ring
->name
, ring
->get_seqno(ring
, false));
739 static int i915_gem_seqno_info(struct seq_file
*m
, void *data
)
741 struct drm_info_node
*node
= m
->private;
742 struct drm_device
*dev
= node
->minor
->dev
;
743 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
744 struct intel_engine_cs
*ring
;
747 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
750 intel_runtime_pm_get(dev_priv
);
752 for_each_ring(ring
, dev_priv
, i
)
753 i915_ring_seqno_info(m
, ring
);
755 intel_runtime_pm_put(dev_priv
);
756 mutex_unlock(&dev
->struct_mutex
);
762 static int i915_interrupt_info(struct seq_file
*m
, void *data
)
764 struct drm_info_node
*node
= m
->private;
765 struct drm_device
*dev
= node
->minor
->dev
;
766 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
767 struct intel_engine_cs
*ring
;
770 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
773 intel_runtime_pm_get(dev_priv
);
775 if (IS_CHERRYVIEW(dev
)) {
776 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
777 I915_READ(GEN8_MASTER_IRQ
));
779 seq_printf(m
, "Display IER:\t%08x\n",
781 seq_printf(m
, "Display IIR:\t%08x\n",
783 seq_printf(m
, "Display IIR_RW:\t%08x\n",
784 I915_READ(VLV_IIR_RW
));
785 seq_printf(m
, "Display IMR:\t%08x\n",
787 for_each_pipe(dev_priv
, pipe
)
788 seq_printf(m
, "Pipe %c stat:\t%08x\n",
790 I915_READ(PIPESTAT(pipe
)));
792 seq_printf(m
, "Port hotplug:\t%08x\n",
793 I915_READ(PORT_HOTPLUG_EN
));
794 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
795 I915_READ(VLV_DPFLIPSTAT
));
796 seq_printf(m
, "DPINVGTT:\t%08x\n",
797 I915_READ(DPINVGTT
));
799 for (i
= 0; i
< 4; i
++) {
800 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
801 i
, I915_READ(GEN8_GT_IMR(i
)));
802 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
803 i
, I915_READ(GEN8_GT_IIR(i
)));
804 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
805 i
, I915_READ(GEN8_GT_IER(i
)));
808 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
809 I915_READ(GEN8_PCU_IMR
));
810 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
811 I915_READ(GEN8_PCU_IIR
));
812 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
813 I915_READ(GEN8_PCU_IER
));
814 } else if (INTEL_INFO(dev
)->gen
>= 8) {
815 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
816 I915_READ(GEN8_MASTER_IRQ
));
818 for (i
= 0; i
< 4; i
++) {
819 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
820 i
, I915_READ(GEN8_GT_IMR(i
)));
821 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
822 i
, I915_READ(GEN8_GT_IIR(i
)));
823 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
824 i
, I915_READ(GEN8_GT_IER(i
)));
827 for_each_pipe(dev_priv
, pipe
) {
828 if (!intel_display_power_is_enabled(dev_priv
,
829 POWER_DOMAIN_PIPE(pipe
))) {
830 seq_printf(m
, "Pipe %c power disabled\n",
834 seq_printf(m
, "Pipe %c IMR:\t%08x\n",
836 I915_READ(GEN8_DE_PIPE_IMR(pipe
)));
837 seq_printf(m
, "Pipe %c IIR:\t%08x\n",
839 I915_READ(GEN8_DE_PIPE_IIR(pipe
)));
840 seq_printf(m
, "Pipe %c IER:\t%08x\n",
842 I915_READ(GEN8_DE_PIPE_IER(pipe
)));
845 seq_printf(m
, "Display Engine port interrupt mask:\t%08x\n",
846 I915_READ(GEN8_DE_PORT_IMR
));
847 seq_printf(m
, "Display Engine port interrupt identity:\t%08x\n",
848 I915_READ(GEN8_DE_PORT_IIR
));
849 seq_printf(m
, "Display Engine port interrupt enable:\t%08x\n",
850 I915_READ(GEN8_DE_PORT_IER
));
852 seq_printf(m
, "Display Engine misc interrupt mask:\t%08x\n",
853 I915_READ(GEN8_DE_MISC_IMR
));
854 seq_printf(m
, "Display Engine misc interrupt identity:\t%08x\n",
855 I915_READ(GEN8_DE_MISC_IIR
));
856 seq_printf(m
, "Display Engine misc interrupt enable:\t%08x\n",
857 I915_READ(GEN8_DE_MISC_IER
));
859 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
860 I915_READ(GEN8_PCU_IMR
));
861 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
862 I915_READ(GEN8_PCU_IIR
));
863 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
864 I915_READ(GEN8_PCU_IER
));
865 } else if (IS_VALLEYVIEW(dev
)) {
866 seq_printf(m
, "Display IER:\t%08x\n",
868 seq_printf(m
, "Display IIR:\t%08x\n",
870 seq_printf(m
, "Display IIR_RW:\t%08x\n",
871 I915_READ(VLV_IIR_RW
));
872 seq_printf(m
, "Display IMR:\t%08x\n",
874 for_each_pipe(dev_priv
, pipe
)
875 seq_printf(m
, "Pipe %c stat:\t%08x\n",
877 I915_READ(PIPESTAT(pipe
)));
879 seq_printf(m
, "Master IER:\t%08x\n",
880 I915_READ(VLV_MASTER_IER
));
882 seq_printf(m
, "Render IER:\t%08x\n",
884 seq_printf(m
, "Render IIR:\t%08x\n",
886 seq_printf(m
, "Render IMR:\t%08x\n",
889 seq_printf(m
, "PM IER:\t\t%08x\n",
890 I915_READ(GEN6_PMIER
));
891 seq_printf(m
, "PM IIR:\t\t%08x\n",
892 I915_READ(GEN6_PMIIR
));
893 seq_printf(m
, "PM IMR:\t\t%08x\n",
894 I915_READ(GEN6_PMIMR
));
896 seq_printf(m
, "Port hotplug:\t%08x\n",
897 I915_READ(PORT_HOTPLUG_EN
));
898 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
899 I915_READ(VLV_DPFLIPSTAT
));
900 seq_printf(m
, "DPINVGTT:\t%08x\n",
901 I915_READ(DPINVGTT
));
903 } else if (!HAS_PCH_SPLIT(dev
)) {
904 seq_printf(m
, "Interrupt enable: %08x\n",
906 seq_printf(m
, "Interrupt identity: %08x\n",
908 seq_printf(m
, "Interrupt mask: %08x\n",
910 for_each_pipe(dev_priv
, pipe
)
911 seq_printf(m
, "Pipe %c stat: %08x\n",
913 I915_READ(PIPESTAT(pipe
)));
915 seq_printf(m
, "North Display Interrupt enable: %08x\n",
917 seq_printf(m
, "North Display Interrupt identity: %08x\n",
919 seq_printf(m
, "North Display Interrupt mask: %08x\n",
921 seq_printf(m
, "South Display Interrupt enable: %08x\n",
923 seq_printf(m
, "South Display Interrupt identity: %08x\n",
925 seq_printf(m
, "South Display Interrupt mask: %08x\n",
927 seq_printf(m
, "Graphics Interrupt enable: %08x\n",
929 seq_printf(m
, "Graphics Interrupt identity: %08x\n",
931 seq_printf(m
, "Graphics Interrupt mask: %08x\n",
934 for_each_ring(ring
, dev_priv
, i
) {
935 if (INTEL_INFO(dev
)->gen
>= 6) {
937 "Graphics Interrupt mask (%s): %08x\n",
938 ring
->name
, I915_READ_IMR(ring
));
940 i915_ring_seqno_info(m
, ring
);
942 intel_runtime_pm_put(dev_priv
);
943 mutex_unlock(&dev
->struct_mutex
);
948 static int i915_gem_fence_regs_info(struct seq_file
*m
, void *data
)
950 struct drm_info_node
*node
= m
->private;
951 struct drm_device
*dev
= node
->minor
->dev
;
952 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
955 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
959 seq_printf(m
, "Total fences = %d\n", dev_priv
->num_fence_regs
);
960 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++) {
961 struct drm_i915_gem_object
*obj
= dev_priv
->fence_regs
[i
].obj
;
963 seq_printf(m
, "Fence %d, pin count = %d, object = ",
964 i
, dev_priv
->fence_regs
[i
].pin_count
);
966 seq_puts(m
, "unused");
968 describe_obj(m
, obj
);
972 mutex_unlock(&dev
->struct_mutex
);
976 static int i915_hws_info(struct seq_file
*m
, void *data
)
978 struct drm_info_node
*node
= m
->private;
979 struct drm_device
*dev
= node
->minor
->dev
;
980 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
981 struct intel_engine_cs
*ring
;
985 ring
= &dev_priv
->ring
[(uintptr_t)node
->info_ent
->data
];
986 hws
= ring
->status_page
.page_addr
;
990 for (i
= 0; i
< 4096 / sizeof(u32
) / 4; i
+= 4) {
991 seq_printf(m
, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
993 hws
[i
], hws
[i
+ 1], hws
[i
+ 2], hws
[i
+ 3]);
999 i915_error_state_write(struct file
*filp
,
1000 const char __user
*ubuf
,
1004 struct i915_error_state_file_priv
*error_priv
= filp
->private_data
;
1005 struct drm_device
*dev
= error_priv
->dev
;
1008 DRM_DEBUG_DRIVER("Resetting error state\n");
1010 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1014 i915_destroy_error_state(dev
);
1015 mutex_unlock(&dev
->struct_mutex
);
1020 static int i915_error_state_open(struct inode
*inode
, struct file
*file
)
1022 struct drm_device
*dev
= inode
->i_private
;
1023 struct i915_error_state_file_priv
*error_priv
;
1025 error_priv
= kzalloc(sizeof(*error_priv
), GFP_KERNEL
);
1029 error_priv
->dev
= dev
;
1031 i915_error_state_get(dev
, error_priv
);
1033 file
->private_data
= error_priv
;
1038 static int i915_error_state_release(struct inode
*inode
, struct file
*file
)
1040 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
1042 i915_error_state_put(error_priv
);
1048 static ssize_t
i915_error_state_read(struct file
*file
, char __user
*userbuf
,
1049 size_t count
, loff_t
*pos
)
1051 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
1052 struct drm_i915_error_state_buf error_str
;
1054 ssize_t ret_count
= 0;
1057 ret
= i915_error_state_buf_init(&error_str
, to_i915(error_priv
->dev
), count
, *pos
);
1061 ret
= i915_error_state_to_str(&error_str
, error_priv
);
1065 ret_count
= simple_read_from_buffer(userbuf
, count
, &tmp_pos
,
1072 *pos
= error_str
.start
+ ret_count
;
1074 i915_error_state_buf_release(&error_str
);
1075 return ret
?: ret_count
;
1078 static const struct file_operations i915_error_state_fops
= {
1079 .owner
= THIS_MODULE
,
1080 .open
= i915_error_state_open
,
1081 .read
= i915_error_state_read
,
1082 .write
= i915_error_state_write
,
1083 .llseek
= default_llseek
,
1084 .release
= i915_error_state_release
,
1088 i915_next_seqno_get(void *data
, u64
*val
)
1090 struct drm_device
*dev
= data
;
1091 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1094 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1098 *val
= dev_priv
->next_seqno
;
1099 mutex_unlock(&dev
->struct_mutex
);
1105 i915_next_seqno_set(void *data
, u64 val
)
1107 struct drm_device
*dev
= data
;
1110 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1114 ret
= i915_gem_set_seqno(dev
, val
);
1115 mutex_unlock(&dev
->struct_mutex
);
1120 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops
,
1121 i915_next_seqno_get
, i915_next_seqno_set
,
1124 static int i915_frequency_info(struct seq_file
*m
, void *unused
)
1126 struct drm_info_node
*node
= m
->private;
1127 struct drm_device
*dev
= node
->minor
->dev
;
1128 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1131 intel_runtime_pm_get(dev_priv
);
1133 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
1136 u16 rgvswctl
= I915_READ16(MEMSWCTL
);
1137 u16 rgvstat
= I915_READ16(MEMSTAT_ILK
);
1139 seq_printf(m
, "Requested P-state: %d\n", (rgvswctl
>> 8) & 0xf);
1140 seq_printf(m
, "Requested VID: %d\n", rgvswctl
& 0x3f);
1141 seq_printf(m
, "Current VID: %d\n", (rgvstat
& MEMSTAT_VID_MASK
) >>
1143 seq_printf(m
, "Current P-state: %d\n",
1144 (rgvstat
& MEMSTAT_PSTATE_MASK
) >> MEMSTAT_PSTATE_SHIFT
);
1145 } else if (IS_GEN6(dev
) || (IS_GEN7(dev
) && !IS_VALLEYVIEW(dev
)) ||
1146 IS_BROADWELL(dev
) || IS_GEN9(dev
)) {
1147 u32 rp_state_limits
;
1150 u32 rpmodectl
, rpinclimit
, rpdeclimit
;
1151 u32 rpstat
, cagf
, reqf
;
1152 u32 rpupei
, rpcurup
, rpprevup
;
1153 u32 rpdownei
, rpcurdown
, rpprevdown
;
1154 u32 pm_ier
, pm_imr
, pm_isr
, pm_iir
, pm_mask
;
1157 rp_state_limits
= I915_READ(GEN6_RP_STATE_LIMITS
);
1158 if (IS_BROXTON(dev
)) {
1159 rp_state_cap
= I915_READ(BXT_RP_STATE_CAP
);
1160 gt_perf_status
= I915_READ(BXT_GT_PERF_STATUS
);
1162 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
1163 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
1166 /* RPSTAT1 is in the GT power well */
1167 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1171 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
1173 reqf
= I915_READ(GEN6_RPNSWREQ
);
1177 reqf
&= ~GEN6_TURBO_DISABLE
;
1178 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
1183 reqf
= intel_gpu_freq(dev_priv
, reqf
);
1185 rpmodectl
= I915_READ(GEN6_RP_CONTROL
);
1186 rpinclimit
= I915_READ(GEN6_RP_UP_THRESHOLD
);
1187 rpdeclimit
= I915_READ(GEN6_RP_DOWN_THRESHOLD
);
1189 rpstat
= I915_READ(GEN6_RPSTAT1
);
1190 rpupei
= I915_READ(GEN6_RP_CUR_UP_EI
);
1191 rpcurup
= I915_READ(GEN6_RP_CUR_UP
);
1192 rpprevup
= I915_READ(GEN6_RP_PREV_UP
);
1193 rpdownei
= I915_READ(GEN6_RP_CUR_DOWN_EI
);
1194 rpcurdown
= I915_READ(GEN6_RP_CUR_DOWN
);
1195 rpprevdown
= I915_READ(GEN6_RP_PREV_DOWN
);
1197 cagf
= (rpstat
& GEN9_CAGF_MASK
) >> GEN9_CAGF_SHIFT
;
1198 else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
1199 cagf
= (rpstat
& HSW_CAGF_MASK
) >> HSW_CAGF_SHIFT
;
1201 cagf
= (rpstat
& GEN6_CAGF_MASK
) >> GEN6_CAGF_SHIFT
;
1202 cagf
= intel_gpu_freq(dev_priv
, cagf
);
1204 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
1205 mutex_unlock(&dev
->struct_mutex
);
1207 if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
1208 pm_ier
= I915_READ(GEN6_PMIER
);
1209 pm_imr
= I915_READ(GEN6_PMIMR
);
1210 pm_isr
= I915_READ(GEN6_PMISR
);
1211 pm_iir
= I915_READ(GEN6_PMIIR
);
1212 pm_mask
= I915_READ(GEN6_PMINTRMSK
);
1214 pm_ier
= I915_READ(GEN8_GT_IER(2));
1215 pm_imr
= I915_READ(GEN8_GT_IMR(2));
1216 pm_isr
= I915_READ(GEN8_GT_ISR(2));
1217 pm_iir
= I915_READ(GEN8_GT_IIR(2));
1218 pm_mask
= I915_READ(GEN6_PMINTRMSK
);
1220 seq_printf(m
, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1221 pm_ier
, pm_imr
, pm_isr
, pm_iir
, pm_mask
);
1222 seq_printf(m
, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status
);
1223 seq_printf(m
, "Render p-state ratio: %d\n",
1224 (gt_perf_status
& (IS_GEN9(dev
) ? 0x1ff00 : 0xff00)) >> 8);
1225 seq_printf(m
, "Render p-state VID: %d\n",
1226 gt_perf_status
& 0xff);
1227 seq_printf(m
, "Render p-state limit: %d\n",
1228 rp_state_limits
& 0xff);
1229 seq_printf(m
, "RPSTAT1: 0x%08x\n", rpstat
);
1230 seq_printf(m
, "RPMODECTL: 0x%08x\n", rpmodectl
);
1231 seq_printf(m
, "RPINCLIMIT: 0x%08x\n", rpinclimit
);
1232 seq_printf(m
, "RPDECLIMIT: 0x%08x\n", rpdeclimit
);
1233 seq_printf(m
, "RPNSWREQ: %dMHz\n", reqf
);
1234 seq_printf(m
, "CAGF: %dMHz\n", cagf
);
1235 seq_printf(m
, "RP CUR UP EI: %dus\n", rpupei
&
1236 GEN6_CURICONT_MASK
);
1237 seq_printf(m
, "RP CUR UP: %dus\n", rpcurup
&
1238 GEN6_CURBSYTAVG_MASK
);
1239 seq_printf(m
, "RP PREV UP: %dus\n", rpprevup
&
1240 GEN6_CURBSYTAVG_MASK
);
1241 seq_printf(m
, "Up threshold: %d%%\n",
1242 dev_priv
->rps
.up_threshold
);
1244 seq_printf(m
, "RP CUR DOWN EI: %dus\n", rpdownei
&
1246 seq_printf(m
, "RP CUR DOWN: %dus\n", rpcurdown
&
1247 GEN6_CURBSYTAVG_MASK
);
1248 seq_printf(m
, "RP PREV DOWN: %dus\n", rpprevdown
&
1249 GEN6_CURBSYTAVG_MASK
);
1250 seq_printf(m
, "Down threshold: %d%%\n",
1251 dev_priv
->rps
.down_threshold
);
1253 max_freq
= (IS_BROXTON(dev
) ? rp_state_cap
>> 0 :
1254 rp_state_cap
>> 16) & 0xff;
1255 max_freq
*= (IS_SKYLAKE(dev
) ? GEN9_FREQ_SCALER
: 1);
1256 seq_printf(m
, "Lowest (RPN) frequency: %dMHz\n",
1257 intel_gpu_freq(dev_priv
, max_freq
));
1259 max_freq
= (rp_state_cap
& 0xff00) >> 8;
1260 max_freq
*= (IS_SKYLAKE(dev
) ? GEN9_FREQ_SCALER
: 1);
1261 seq_printf(m
, "Nominal (RP1) frequency: %dMHz\n",
1262 intel_gpu_freq(dev_priv
, max_freq
));
1264 max_freq
= (IS_BROXTON(dev
) ? rp_state_cap
>> 16 :
1265 rp_state_cap
>> 0) & 0xff;
1266 max_freq
*= (IS_SKYLAKE(dev
) ? GEN9_FREQ_SCALER
: 1);
1267 seq_printf(m
, "Max non-overclocked (RP0) frequency: %dMHz\n",
1268 intel_gpu_freq(dev_priv
, max_freq
));
1269 seq_printf(m
, "Max overclocked frequency: %dMHz\n",
1270 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
));
1272 seq_printf(m
, "Current freq: %d MHz\n",
1273 intel_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
));
1274 seq_printf(m
, "Actual freq: %d MHz\n", cagf
);
1275 seq_printf(m
, "Idle freq: %d MHz\n",
1276 intel_gpu_freq(dev_priv
, dev_priv
->rps
.idle_freq
));
1277 seq_printf(m
, "Min freq: %d MHz\n",
1278 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
));
1279 seq_printf(m
, "Max freq: %d MHz\n",
1280 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
));
1282 "efficient (RPe) frequency: %d MHz\n",
1283 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
));
1284 } else if (IS_VALLEYVIEW(dev
)) {
1287 mutex_lock(&dev_priv
->rps
.hw_lock
);
1288 freq_sts
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
1289 seq_printf(m
, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts
);
1290 seq_printf(m
, "DDR freq: %d MHz\n", dev_priv
->mem_freq
);
1292 seq_printf(m
, "actual GPU freq: %d MHz\n",
1293 intel_gpu_freq(dev_priv
, (freq_sts
>> 8) & 0xff));
1295 seq_printf(m
, "current GPU freq: %d MHz\n",
1296 intel_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
));
1298 seq_printf(m
, "max GPU freq: %d MHz\n",
1299 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
));
1301 seq_printf(m
, "min GPU freq: %d MHz\n",
1302 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
));
1304 seq_printf(m
, "idle GPU freq: %d MHz\n",
1305 intel_gpu_freq(dev_priv
, dev_priv
->rps
.idle_freq
));
1308 "efficient (RPe) frequency: %d MHz\n",
1309 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
));
1310 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1312 seq_puts(m
, "no P-state info available\n");
1315 seq_printf(m
, "Current CD clock frequency: %d kHz\n", dev_priv
->cdclk_freq
);
1316 seq_printf(m
, "Max CD clock frequency: %d kHz\n", dev_priv
->max_cdclk_freq
);
1317 seq_printf(m
, "Max pixel clock frequency: %d kHz\n", dev_priv
->max_dotclk_freq
);
1320 intel_runtime_pm_put(dev_priv
);
1324 static int i915_hangcheck_info(struct seq_file
*m
, void *unused
)
1326 struct drm_info_node
*node
= m
->private;
1327 struct drm_device
*dev
= node
->minor
->dev
;
1328 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1329 struct intel_engine_cs
*ring
;
1330 u64 acthd
[I915_NUM_RINGS
];
1331 u32 seqno
[I915_NUM_RINGS
];
1334 if (!i915
.enable_hangcheck
) {
1335 seq_printf(m
, "Hangcheck disabled\n");
1339 intel_runtime_pm_get(dev_priv
);
1341 for_each_ring(ring
, dev_priv
, i
) {
1342 seqno
[i
] = ring
->get_seqno(ring
, false);
1343 acthd
[i
] = intel_ring_get_active_head(ring
);
1346 intel_runtime_pm_put(dev_priv
);
1348 if (delayed_work_pending(&dev_priv
->gpu_error
.hangcheck_work
)) {
1349 seq_printf(m
, "Hangcheck active, fires in %dms\n",
1350 jiffies_to_msecs(dev_priv
->gpu_error
.hangcheck_work
.timer
.expires
-
1353 seq_printf(m
, "Hangcheck inactive\n");
1355 for_each_ring(ring
, dev_priv
, i
) {
1356 seq_printf(m
, "%s:\n", ring
->name
);
1357 seq_printf(m
, "\tseqno = %x [current %x]\n",
1358 ring
->hangcheck
.seqno
, seqno
[i
]);
1359 seq_printf(m
, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1360 (long long)ring
->hangcheck
.acthd
,
1361 (long long)acthd
[i
]);
1362 seq_printf(m
, "\tmax ACTHD = 0x%08llx\n",
1363 (long long)ring
->hangcheck
.max_acthd
);
1364 seq_printf(m
, "\tscore = %d\n", ring
->hangcheck
.score
);
1365 seq_printf(m
, "\taction = %d\n", ring
->hangcheck
.action
);
1371 static int ironlake_drpc_info(struct seq_file
*m
)
1373 struct drm_info_node
*node
= m
->private;
1374 struct drm_device
*dev
= node
->minor
->dev
;
1375 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1376 u32 rgvmodectl
, rstdbyctl
;
1380 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1383 intel_runtime_pm_get(dev_priv
);
1385 rgvmodectl
= I915_READ(MEMMODECTL
);
1386 rstdbyctl
= I915_READ(RSTDBYCTL
);
1387 crstandvid
= I915_READ16(CRSTANDVID
);
1389 intel_runtime_pm_put(dev_priv
);
1390 mutex_unlock(&dev
->struct_mutex
);
1392 seq_printf(m
, "HD boost: %s\n", yesno(rgvmodectl
& MEMMODE_BOOST_EN
));
1393 seq_printf(m
, "Boost freq: %d\n",
1394 (rgvmodectl
& MEMMODE_BOOST_FREQ_MASK
) >>
1395 MEMMODE_BOOST_FREQ_SHIFT
);
1396 seq_printf(m
, "HW control enabled: %s\n",
1397 yesno(rgvmodectl
& MEMMODE_HWIDLE_EN
));
1398 seq_printf(m
, "SW control enabled: %s\n",
1399 yesno(rgvmodectl
& MEMMODE_SWMODE_EN
));
1400 seq_printf(m
, "Gated voltage change: %s\n",
1401 yesno(rgvmodectl
& MEMMODE_RCLK_GATE
));
1402 seq_printf(m
, "Starting frequency: P%d\n",
1403 (rgvmodectl
& MEMMODE_FSTART_MASK
) >> MEMMODE_FSTART_SHIFT
);
1404 seq_printf(m
, "Max P-state: P%d\n",
1405 (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
);
1406 seq_printf(m
, "Min P-state: P%d\n", (rgvmodectl
& MEMMODE_FMIN_MASK
));
1407 seq_printf(m
, "RS1 VID: %d\n", (crstandvid
& 0x3f));
1408 seq_printf(m
, "RS2 VID: %d\n", ((crstandvid
>> 8) & 0x3f));
1409 seq_printf(m
, "Render standby enabled: %s\n",
1410 yesno(!(rstdbyctl
& RCX_SW_EXIT
)));
1411 seq_puts(m
, "Current RS state: ");
1412 switch (rstdbyctl
& RSX_STATUS_MASK
) {
1414 seq_puts(m
, "on\n");
1416 case RSX_STATUS_RC1
:
1417 seq_puts(m
, "RC1\n");
1419 case RSX_STATUS_RC1E
:
1420 seq_puts(m
, "RC1E\n");
1422 case RSX_STATUS_RS1
:
1423 seq_puts(m
, "RS1\n");
1425 case RSX_STATUS_RS2
:
1426 seq_puts(m
, "RS2 (RC6)\n");
1428 case RSX_STATUS_RS3
:
1429 seq_puts(m
, "RC3 (RC6+)\n");
1432 seq_puts(m
, "unknown\n");
1439 static int i915_forcewake_domains(struct seq_file
*m
, void *data
)
1441 struct drm_info_node
*node
= m
->private;
1442 struct drm_device
*dev
= node
->minor
->dev
;
1443 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1444 struct intel_uncore_forcewake_domain
*fw_domain
;
1447 spin_lock_irq(&dev_priv
->uncore
.lock
);
1448 for_each_fw_domain(fw_domain
, dev_priv
, i
) {
1449 seq_printf(m
, "%s.wake_count = %u\n",
1450 intel_uncore_forcewake_domain_to_str(i
),
1451 fw_domain
->wake_count
);
1453 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1458 static int vlv_drpc_info(struct seq_file
*m
)
1460 struct drm_info_node
*node
= m
->private;
1461 struct drm_device
*dev
= node
->minor
->dev
;
1462 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1463 u32 rpmodectl1
, rcctl1
, pw_status
;
1465 intel_runtime_pm_get(dev_priv
);
1467 pw_status
= I915_READ(VLV_GTLC_PW_STATUS
);
1468 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1469 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1471 intel_runtime_pm_put(dev_priv
);
1473 seq_printf(m
, "Video Turbo Mode: %s\n",
1474 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1475 seq_printf(m
, "Turbo enabled: %s\n",
1476 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1477 seq_printf(m
, "HW control enabled: %s\n",
1478 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1479 seq_printf(m
, "SW control enabled: %s\n",
1480 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1481 GEN6_RP_MEDIA_SW_MODE
));
1482 seq_printf(m
, "RC6 Enabled: %s\n",
1483 yesno(rcctl1
& (GEN7_RC_CTL_TO_MODE
|
1484 GEN6_RC_CTL_EI_MODE(1))));
1485 seq_printf(m
, "Render Power Well: %s\n",
1486 (pw_status
& VLV_GTLC_PW_RENDER_STATUS_MASK
) ? "Up" : "Down");
1487 seq_printf(m
, "Media Power Well: %s\n",
1488 (pw_status
& VLV_GTLC_PW_MEDIA_STATUS_MASK
) ? "Up" : "Down");
1490 seq_printf(m
, "Render RC6 residency since boot: %u\n",
1491 I915_READ(VLV_GT_RENDER_RC6
));
1492 seq_printf(m
, "Media RC6 residency since boot: %u\n",
1493 I915_READ(VLV_GT_MEDIA_RC6
));
1495 return i915_forcewake_domains(m
, NULL
);
1498 static int gen6_drpc_info(struct seq_file
*m
)
1500 struct drm_info_node
*node
= m
->private;
1501 struct drm_device
*dev
= node
->minor
->dev
;
1502 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1503 u32 rpmodectl1
, gt_core_status
, rcctl1
, rc6vids
= 0;
1504 unsigned forcewake_count
;
1507 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1510 intel_runtime_pm_get(dev_priv
);
1512 spin_lock_irq(&dev_priv
->uncore
.lock
);
1513 forcewake_count
= dev_priv
->uncore
.fw_domain
[FW_DOMAIN_ID_RENDER
].wake_count
;
1514 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1516 if (forcewake_count
) {
1517 seq_puts(m
, "RC information inaccurate because somebody "
1518 "holds a forcewake reference \n");
1520 /* NB: we cannot use forcewake, else we read the wrong values */
1521 while (count
++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK
) & 1))
1523 seq_printf(m
, "RC information accurate: %s\n", yesno(count
< 51));
1526 gt_core_status
= readl(dev_priv
->regs
+ GEN6_GT_CORE_STATUS
);
1527 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS
, gt_core_status
, 4, true);
1529 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1530 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1531 mutex_unlock(&dev
->struct_mutex
);
1532 mutex_lock(&dev_priv
->rps
.hw_lock
);
1533 sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
1534 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1536 intel_runtime_pm_put(dev_priv
);
1538 seq_printf(m
, "Video Turbo Mode: %s\n",
1539 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1540 seq_printf(m
, "HW control enabled: %s\n",
1541 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1542 seq_printf(m
, "SW control enabled: %s\n",
1543 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1544 GEN6_RP_MEDIA_SW_MODE
));
1545 seq_printf(m
, "RC1e Enabled: %s\n",
1546 yesno(rcctl1
& GEN6_RC_CTL_RC1e_ENABLE
));
1547 seq_printf(m
, "RC6 Enabled: %s\n",
1548 yesno(rcctl1
& GEN6_RC_CTL_RC6_ENABLE
));
1549 seq_printf(m
, "Deep RC6 Enabled: %s\n",
1550 yesno(rcctl1
& GEN6_RC_CTL_RC6p_ENABLE
));
1551 seq_printf(m
, "Deepest RC6 Enabled: %s\n",
1552 yesno(rcctl1
& GEN6_RC_CTL_RC6pp_ENABLE
));
1553 seq_puts(m
, "Current RC state: ");
1554 switch (gt_core_status
& GEN6_RCn_MASK
) {
1556 if (gt_core_status
& GEN6_CORE_CPD_STATE_MASK
)
1557 seq_puts(m
, "Core Power Down\n");
1559 seq_puts(m
, "on\n");
1562 seq_puts(m
, "RC3\n");
1565 seq_puts(m
, "RC6\n");
1568 seq_puts(m
, "RC7\n");
1571 seq_puts(m
, "Unknown\n");
1575 seq_printf(m
, "Core Power Down: %s\n",
1576 yesno(gt_core_status
& GEN6_CORE_CPD_STATE_MASK
));
1578 /* Not exactly sure what this is */
1579 seq_printf(m
, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1580 I915_READ(GEN6_GT_GFX_RC6_LOCKED
));
1581 seq_printf(m
, "RC6 residency since boot: %u\n",
1582 I915_READ(GEN6_GT_GFX_RC6
));
1583 seq_printf(m
, "RC6+ residency since boot: %u\n",
1584 I915_READ(GEN6_GT_GFX_RC6p
));
1585 seq_printf(m
, "RC6++ residency since boot: %u\n",
1586 I915_READ(GEN6_GT_GFX_RC6pp
));
1588 seq_printf(m
, "RC6 voltage: %dmV\n",
1589 GEN6_DECODE_RC6_VID(((rc6vids
>> 0) & 0xff)));
1590 seq_printf(m
, "RC6+ voltage: %dmV\n",
1591 GEN6_DECODE_RC6_VID(((rc6vids
>> 8) & 0xff)));
1592 seq_printf(m
, "RC6++ voltage: %dmV\n",
1593 GEN6_DECODE_RC6_VID(((rc6vids
>> 16) & 0xff)));
1597 static int i915_drpc_info(struct seq_file
*m
, void *unused
)
1599 struct drm_info_node
*node
= m
->private;
1600 struct drm_device
*dev
= node
->minor
->dev
;
1602 if (IS_VALLEYVIEW(dev
))
1603 return vlv_drpc_info(m
);
1604 else if (INTEL_INFO(dev
)->gen
>= 6)
1605 return gen6_drpc_info(m
);
1607 return ironlake_drpc_info(m
);
1610 static int i915_frontbuffer_tracking(struct seq_file
*m
, void *unused
)
1612 struct drm_info_node
*node
= m
->private;
1613 struct drm_device
*dev
= node
->minor
->dev
;
1614 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1616 seq_printf(m
, "FB tracking busy bits: 0x%08x\n",
1617 dev_priv
->fb_tracking
.busy_bits
);
1619 seq_printf(m
, "FB tracking flip bits: 0x%08x\n",
1620 dev_priv
->fb_tracking
.flip_bits
);
1625 static int i915_fbc_status(struct seq_file
*m
, void *unused
)
1627 struct drm_info_node
*node
= m
->private;
1628 struct drm_device
*dev
= node
->minor
->dev
;
1629 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1631 if (!HAS_FBC(dev
)) {
1632 seq_puts(m
, "FBC unsupported on this chipset\n");
1636 intel_runtime_pm_get(dev_priv
);
1637 mutex_lock(&dev_priv
->fbc
.lock
);
1639 if (intel_fbc_enabled(dev_priv
))
1640 seq_puts(m
, "FBC enabled\n");
1642 seq_printf(m
, "FBC disabled: %s\n",
1643 intel_no_fbc_reason_str(dev_priv
->fbc
.no_fbc_reason
));
1645 if (INTEL_INFO(dev_priv
)->gen
>= 7)
1646 seq_printf(m
, "Compressing: %s\n",
1647 yesno(I915_READ(FBC_STATUS2
) &
1648 FBC_COMPRESSION_MASK
));
1650 mutex_unlock(&dev_priv
->fbc
.lock
);
1651 intel_runtime_pm_put(dev_priv
);
1656 static int i915_fbc_fc_get(void *data
, u64
*val
)
1658 struct drm_device
*dev
= data
;
1659 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1661 if (INTEL_INFO(dev
)->gen
< 7 || !HAS_FBC(dev
))
1664 *val
= dev_priv
->fbc
.false_color
;
1669 static int i915_fbc_fc_set(void *data
, u64 val
)
1671 struct drm_device
*dev
= data
;
1672 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1675 if (INTEL_INFO(dev
)->gen
< 7 || !HAS_FBC(dev
))
1678 mutex_lock(&dev_priv
->fbc
.lock
);
1680 reg
= I915_READ(ILK_DPFC_CONTROL
);
1681 dev_priv
->fbc
.false_color
= val
;
1683 I915_WRITE(ILK_DPFC_CONTROL
, val
?
1684 (reg
| FBC_CTL_FALSE_COLOR
) :
1685 (reg
& ~FBC_CTL_FALSE_COLOR
));
1687 mutex_unlock(&dev_priv
->fbc
.lock
);
1691 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops
,
1692 i915_fbc_fc_get
, i915_fbc_fc_set
,
1695 static int i915_ips_status(struct seq_file
*m
, void *unused
)
1697 struct drm_info_node
*node
= m
->private;
1698 struct drm_device
*dev
= node
->minor
->dev
;
1699 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1701 if (!HAS_IPS(dev
)) {
1702 seq_puts(m
, "not supported\n");
1706 intel_runtime_pm_get(dev_priv
);
1708 seq_printf(m
, "Enabled by kernel parameter: %s\n",
1709 yesno(i915
.enable_ips
));
1711 if (INTEL_INFO(dev
)->gen
>= 8) {
1712 seq_puts(m
, "Currently: unknown\n");
1714 if (I915_READ(IPS_CTL
) & IPS_ENABLE
)
1715 seq_puts(m
, "Currently: enabled\n");
1717 seq_puts(m
, "Currently: disabled\n");
1720 intel_runtime_pm_put(dev_priv
);
1725 static int i915_sr_status(struct seq_file
*m
, void *unused
)
1727 struct drm_info_node
*node
= m
->private;
1728 struct drm_device
*dev
= node
->minor
->dev
;
1729 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1730 bool sr_enabled
= false;
1732 intel_runtime_pm_get(dev_priv
);
1734 if (HAS_PCH_SPLIT(dev
))
1735 sr_enabled
= I915_READ(WM1_LP_ILK
) & WM1_LP_SR_EN
;
1736 else if (IS_CRESTLINE(dev
) || IS_G4X(dev
) ||
1737 IS_I945G(dev
) || IS_I945GM(dev
))
1738 sr_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
1739 else if (IS_I915GM(dev
))
1740 sr_enabled
= I915_READ(INSTPM
) & INSTPM_SELF_EN
;
1741 else if (IS_PINEVIEW(dev
))
1742 sr_enabled
= I915_READ(DSPFW3
) & PINEVIEW_SELF_REFRESH_EN
;
1743 else if (IS_VALLEYVIEW(dev
))
1744 sr_enabled
= I915_READ(FW_BLC_SELF_VLV
) & FW_CSPWRDWNEN
;
1746 intel_runtime_pm_put(dev_priv
);
1748 seq_printf(m
, "self-refresh: %s\n",
1749 sr_enabled
? "enabled" : "disabled");
1754 static int i915_emon_status(struct seq_file
*m
, void *unused
)
1756 struct drm_info_node
*node
= m
->private;
1757 struct drm_device
*dev
= node
->minor
->dev
;
1758 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1759 unsigned long temp
, chipset
, gfx
;
1765 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1769 temp
= i915_mch_val(dev_priv
);
1770 chipset
= i915_chipset_val(dev_priv
);
1771 gfx
= i915_gfx_val(dev_priv
);
1772 mutex_unlock(&dev
->struct_mutex
);
1774 seq_printf(m
, "GMCH temp: %ld\n", temp
);
1775 seq_printf(m
, "Chipset power: %ld\n", chipset
);
1776 seq_printf(m
, "GFX power: %ld\n", gfx
);
1777 seq_printf(m
, "Total power: %ld\n", chipset
+ gfx
);
1782 static int i915_ring_freq_table(struct seq_file
*m
, void *unused
)
1784 struct drm_info_node
*node
= m
->private;
1785 struct drm_device
*dev
= node
->minor
->dev
;
1786 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1788 int gpu_freq
, ia_freq
;
1789 unsigned int max_gpu_freq
, min_gpu_freq
;
1791 if (!HAS_CORE_RING_FREQ(dev
)) {
1792 seq_puts(m
, "unsupported on this chipset\n");
1796 intel_runtime_pm_get(dev_priv
);
1798 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
1800 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1804 if (IS_SKYLAKE(dev
)) {
1805 /* Convert GT frequency to 50 HZ units */
1807 dev_priv
->rps
.min_freq_softlimit
/ GEN9_FREQ_SCALER
;
1809 dev_priv
->rps
.max_freq_softlimit
/ GEN9_FREQ_SCALER
;
1811 min_gpu_freq
= dev_priv
->rps
.min_freq_softlimit
;
1812 max_gpu_freq
= dev_priv
->rps
.max_freq_softlimit
;
1815 seq_puts(m
, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1817 for (gpu_freq
= min_gpu_freq
; gpu_freq
<= max_gpu_freq
; gpu_freq
++) {
1819 sandybridge_pcode_read(dev_priv
,
1820 GEN6_PCODE_READ_MIN_FREQ_TABLE
,
1822 seq_printf(m
, "%d\t\t%d\t\t\t\t%d\n",
1823 intel_gpu_freq(dev_priv
, (gpu_freq
*
1824 (IS_SKYLAKE(dev
) ? GEN9_FREQ_SCALER
: 1))),
1825 ((ia_freq
>> 0) & 0xff) * 100,
1826 ((ia_freq
>> 8) & 0xff) * 100);
1829 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1832 intel_runtime_pm_put(dev_priv
);
1836 static int i915_opregion(struct seq_file
*m
, void *unused
)
1838 struct drm_info_node
*node
= m
->private;
1839 struct drm_device
*dev
= node
->minor
->dev
;
1840 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1841 struct intel_opregion
*opregion
= &dev_priv
->opregion
;
1842 void *data
= kmalloc(OPREGION_SIZE
, GFP_KERNEL
);
1848 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1852 if (opregion
->header
) {
1853 memcpy(data
, opregion
->header
, OPREGION_SIZE
);
1854 seq_write(m
, data
, OPREGION_SIZE
);
1857 mutex_unlock(&dev
->struct_mutex
);
1864 static int i915_gem_framebuffer_info(struct seq_file
*m
, void *data
)
1866 struct drm_info_node
*node
= m
->private;
1867 struct drm_device
*dev
= node
->minor
->dev
;
1868 struct intel_fbdev
*ifbdev
= NULL
;
1869 struct intel_framebuffer
*fb
;
1870 struct drm_framebuffer
*drm_fb
;
1872 #ifdef CONFIG_DRM_FBDEV_EMULATION
1873 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1875 ifbdev
= dev_priv
->fbdev
;
1876 fb
= to_intel_framebuffer(ifbdev
->helper
.fb
);
1878 seq_printf(m
, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1882 fb
->base
.bits_per_pixel
,
1883 fb
->base
.modifier
[0],
1884 atomic_read(&fb
->base
.refcount
.refcount
));
1885 describe_obj(m
, fb
->obj
);
1889 mutex_lock(&dev
->mode_config
.fb_lock
);
1890 drm_for_each_fb(drm_fb
, dev
) {
1891 fb
= to_intel_framebuffer(drm_fb
);
1892 if (ifbdev
&& &fb
->base
== ifbdev
->helper
.fb
)
1895 seq_printf(m
, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1899 fb
->base
.bits_per_pixel
,
1900 fb
->base
.modifier
[0],
1901 atomic_read(&fb
->base
.refcount
.refcount
));
1902 describe_obj(m
, fb
->obj
);
1905 mutex_unlock(&dev
->mode_config
.fb_lock
);
1910 static void describe_ctx_ringbuf(struct seq_file
*m
,
1911 struct intel_ringbuffer
*ringbuf
)
1913 seq_printf(m
, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
1914 ringbuf
->space
, ringbuf
->head
, ringbuf
->tail
,
1915 ringbuf
->last_retired_head
);
1918 static int i915_context_status(struct seq_file
*m
, void *unused
)
1920 struct drm_info_node
*node
= m
->private;
1921 struct drm_device
*dev
= node
->minor
->dev
;
1922 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1923 struct intel_engine_cs
*ring
;
1924 struct intel_context
*ctx
;
1927 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1931 list_for_each_entry(ctx
, &dev_priv
->context_list
, link
) {
1932 if (!i915
.enable_execlists
&&
1933 ctx
->legacy_hw_ctx
.rcs_state
== NULL
)
1936 seq_puts(m
, "HW context ");
1937 describe_ctx(m
, ctx
);
1938 for_each_ring(ring
, dev_priv
, i
) {
1939 if (ring
->default_context
== ctx
)
1940 seq_printf(m
, "(default context %s) ",
1944 if (i915
.enable_execlists
) {
1946 for_each_ring(ring
, dev_priv
, i
) {
1947 struct drm_i915_gem_object
*ctx_obj
=
1948 ctx
->engine
[i
].state
;
1949 struct intel_ringbuffer
*ringbuf
=
1950 ctx
->engine
[i
].ringbuf
;
1952 seq_printf(m
, "%s: ", ring
->name
);
1954 describe_obj(m
, ctx_obj
);
1956 describe_ctx_ringbuf(m
, ringbuf
);
1960 describe_obj(m
, ctx
->legacy_hw_ctx
.rcs_state
);
1966 mutex_unlock(&dev
->struct_mutex
);
1971 static void i915_dump_lrc_obj(struct seq_file
*m
,
1972 struct intel_engine_cs
*ring
,
1973 struct drm_i915_gem_object
*ctx_obj
)
1976 uint32_t *reg_state
;
1978 unsigned long ggtt_offset
= 0;
1980 if (ctx_obj
== NULL
) {
1981 seq_printf(m
, "Context on %s with no gem object\n",
1986 seq_printf(m
, "CONTEXT: %s %u\n", ring
->name
,
1987 intel_execlists_ctx_id(ctx_obj
));
1989 if (!i915_gem_obj_ggtt_bound(ctx_obj
))
1990 seq_puts(m
, "\tNot bound in GGTT\n");
1992 ggtt_offset
= i915_gem_obj_ggtt_offset(ctx_obj
);
1994 if (i915_gem_object_get_pages(ctx_obj
)) {
1995 seq_puts(m
, "\tFailed to get pages for context object\n");
1999 page
= i915_gem_object_get_page(ctx_obj
, LRC_STATE_PN
);
2000 if (!WARN_ON(page
== NULL
)) {
2001 reg_state
= kmap_atomic(page
);
2003 for (j
= 0; j
< 0x600 / sizeof(u32
) / 4; j
+= 4) {
2004 seq_printf(m
, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
2005 ggtt_offset
+ 4096 + (j
* 4),
2006 reg_state
[j
], reg_state
[j
+ 1],
2007 reg_state
[j
+ 2], reg_state
[j
+ 3]);
2009 kunmap_atomic(reg_state
);
2015 static int i915_dump_lrc(struct seq_file
*m
, void *unused
)
2017 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
2018 struct drm_device
*dev
= node
->minor
->dev
;
2019 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2020 struct intel_engine_cs
*ring
;
2021 struct intel_context
*ctx
;
2024 if (!i915
.enable_execlists
) {
2025 seq_printf(m
, "Logical Ring Contexts are disabled\n");
2029 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2033 list_for_each_entry(ctx
, &dev_priv
->context_list
, link
) {
2034 for_each_ring(ring
, dev_priv
, i
) {
2035 if (ring
->default_context
!= ctx
)
2036 i915_dump_lrc_obj(m
, ring
,
2037 ctx
->engine
[i
].state
);
2041 mutex_unlock(&dev
->struct_mutex
);
2046 static int i915_execlists(struct seq_file
*m
, void *data
)
2048 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
2049 struct drm_device
*dev
= node
->minor
->dev
;
2050 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2051 struct intel_engine_cs
*ring
;
2057 struct list_head
*cursor
;
2061 if (!i915
.enable_execlists
) {
2062 seq_puts(m
, "Logical Ring Contexts are disabled\n");
2066 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2070 intel_runtime_pm_get(dev_priv
);
2072 for_each_ring(ring
, dev_priv
, ring_id
) {
2073 struct drm_i915_gem_request
*head_req
= NULL
;
2075 unsigned long flags
;
2077 seq_printf(m
, "%s\n", ring
->name
);
2079 status
= I915_READ(RING_EXECLIST_STATUS_LO(ring
));
2080 ctx_id
= I915_READ(RING_EXECLIST_STATUS_HI(ring
));
2081 seq_printf(m
, "\tExeclist status: 0x%08X, context: %u\n",
2084 status_pointer
= I915_READ(RING_CONTEXT_STATUS_PTR(ring
));
2085 seq_printf(m
, "\tStatus pointer: 0x%08X\n", status_pointer
);
2087 read_pointer
= ring
->next_context_status_buffer
;
2088 write_pointer
= status_pointer
& 0x07;
2089 if (read_pointer
> write_pointer
)
2091 seq_printf(m
, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
2092 read_pointer
, write_pointer
);
2094 for (i
= 0; i
< 6; i
++) {
2095 status
= I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring
, i
));
2096 ctx_id
= I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring
, i
));
2098 seq_printf(m
, "\tStatus buffer %d: 0x%08X, context: %u\n",
2102 spin_lock_irqsave(&ring
->execlist_lock
, flags
);
2103 list_for_each(cursor
, &ring
->execlist_queue
)
2105 head_req
= list_first_entry_or_null(&ring
->execlist_queue
,
2106 struct drm_i915_gem_request
, execlist_link
);
2107 spin_unlock_irqrestore(&ring
->execlist_lock
, flags
);
2109 seq_printf(m
, "\t%d requests in queue\n", count
);
2111 struct drm_i915_gem_object
*ctx_obj
;
2113 ctx_obj
= head_req
->ctx
->engine
[ring_id
].state
;
2114 seq_printf(m
, "\tHead request id: %u\n",
2115 intel_execlists_ctx_id(ctx_obj
));
2116 seq_printf(m
, "\tHead request tail: %u\n",
2123 intel_runtime_pm_put(dev_priv
);
2124 mutex_unlock(&dev
->struct_mutex
);
2129 static const char *swizzle_string(unsigned swizzle
)
2132 case I915_BIT_6_SWIZZLE_NONE
:
2134 case I915_BIT_6_SWIZZLE_9
:
2136 case I915_BIT_6_SWIZZLE_9_10
:
2137 return "bit9/bit10";
2138 case I915_BIT_6_SWIZZLE_9_11
:
2139 return "bit9/bit11";
2140 case I915_BIT_6_SWIZZLE_9_10_11
:
2141 return "bit9/bit10/bit11";
2142 case I915_BIT_6_SWIZZLE_9_17
:
2143 return "bit9/bit17";
2144 case I915_BIT_6_SWIZZLE_9_10_17
:
2145 return "bit9/bit10/bit17";
2146 case I915_BIT_6_SWIZZLE_UNKNOWN
:
2153 static int i915_swizzle_info(struct seq_file
*m
, void *data
)
2155 struct drm_info_node
*node
= m
->private;
2156 struct drm_device
*dev
= node
->minor
->dev
;
2157 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2160 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2163 intel_runtime_pm_get(dev_priv
);
2165 seq_printf(m
, "bit6 swizzle for X-tiling = %s\n",
2166 swizzle_string(dev_priv
->mm
.bit_6_swizzle_x
));
2167 seq_printf(m
, "bit6 swizzle for Y-tiling = %s\n",
2168 swizzle_string(dev_priv
->mm
.bit_6_swizzle_y
));
2170 if (IS_GEN3(dev
) || IS_GEN4(dev
)) {
2171 seq_printf(m
, "DDC = 0x%08x\n",
2173 seq_printf(m
, "DDC2 = 0x%08x\n",
2175 seq_printf(m
, "C0DRB3 = 0x%04x\n",
2176 I915_READ16(C0DRB3
));
2177 seq_printf(m
, "C1DRB3 = 0x%04x\n",
2178 I915_READ16(C1DRB3
));
2179 } else if (INTEL_INFO(dev
)->gen
>= 6) {
2180 seq_printf(m
, "MAD_DIMM_C0 = 0x%08x\n",
2181 I915_READ(MAD_DIMM_C0
));
2182 seq_printf(m
, "MAD_DIMM_C1 = 0x%08x\n",
2183 I915_READ(MAD_DIMM_C1
));
2184 seq_printf(m
, "MAD_DIMM_C2 = 0x%08x\n",
2185 I915_READ(MAD_DIMM_C2
));
2186 seq_printf(m
, "TILECTL = 0x%08x\n",
2187 I915_READ(TILECTL
));
2188 if (INTEL_INFO(dev
)->gen
>= 8)
2189 seq_printf(m
, "GAMTARBMODE = 0x%08x\n",
2190 I915_READ(GAMTARBMODE
));
2192 seq_printf(m
, "ARB_MODE = 0x%08x\n",
2193 I915_READ(ARB_MODE
));
2194 seq_printf(m
, "DISP_ARB_CTL = 0x%08x\n",
2195 I915_READ(DISP_ARB_CTL
));
2198 if (dev_priv
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
)
2199 seq_puts(m
, "L-shaped memory detected\n");
2201 intel_runtime_pm_put(dev_priv
);
2202 mutex_unlock(&dev
->struct_mutex
);
2207 static int per_file_ctx(int id
, void *ptr
, void *data
)
2209 struct intel_context
*ctx
= ptr
;
2210 struct seq_file
*m
= data
;
2211 struct i915_hw_ppgtt
*ppgtt
= ctx
->ppgtt
;
2214 seq_printf(m
, " no ppgtt for context %d\n",
2219 if (i915_gem_context_is_default(ctx
))
2220 seq_puts(m
, " default context:\n");
2222 seq_printf(m
, " context %d:\n", ctx
->user_handle
);
2223 ppgtt
->debug_dump(ppgtt
, m
);
2228 static void gen8_ppgtt_info(struct seq_file
*m
, struct drm_device
*dev
)
2230 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2231 struct intel_engine_cs
*ring
;
2232 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
2238 for_each_ring(ring
, dev_priv
, unused
) {
2239 seq_printf(m
, "%s\n", ring
->name
);
2240 for (i
= 0; i
< 4; i
++) {
2241 u64 pdp
= I915_READ(GEN8_RING_PDP_UDW(ring
, i
));
2243 pdp
|= I915_READ(GEN8_RING_PDP_LDW(ring
, i
));
2244 seq_printf(m
, "\tPDP%d 0x%016llx\n", i
, pdp
);
2249 static void gen6_ppgtt_info(struct seq_file
*m
, struct drm_device
*dev
)
2251 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2252 struct intel_engine_cs
*ring
;
2255 if (INTEL_INFO(dev
)->gen
== 6)
2256 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE
));
2258 for_each_ring(ring
, dev_priv
, i
) {
2259 seq_printf(m
, "%s\n", ring
->name
);
2260 if (INTEL_INFO(dev
)->gen
== 7)
2261 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring
)));
2262 seq_printf(m
, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring
)));
2263 seq_printf(m
, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring
)));
2264 seq_printf(m
, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring
)));
2266 if (dev_priv
->mm
.aliasing_ppgtt
) {
2267 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
2269 seq_puts(m
, "aliasing PPGTT:\n");
2270 seq_printf(m
, "pd gtt offset: 0x%08x\n", ppgtt
->pd
.base
.ggtt_offset
);
2272 ppgtt
->debug_dump(ppgtt
, m
);
2275 seq_printf(m
, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK
));
2278 static int i915_ppgtt_info(struct seq_file
*m
, void *data
)
2280 struct drm_info_node
*node
= m
->private;
2281 struct drm_device
*dev
= node
->minor
->dev
;
2282 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2283 struct drm_file
*file
;
2285 int ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2288 intel_runtime_pm_get(dev_priv
);
2290 if (INTEL_INFO(dev
)->gen
>= 8)
2291 gen8_ppgtt_info(m
, dev
);
2292 else if (INTEL_INFO(dev
)->gen
>= 6)
2293 gen6_ppgtt_info(m
, dev
);
2295 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
2296 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2297 struct task_struct
*task
;
2299 task
= get_pid_task(file
->pid
, PIDTYPE_PID
);
2304 seq_printf(m
, "\nproc: %s\n", task
->comm
);
2305 put_task_struct(task
);
2306 idr_for_each(&file_priv
->context_idr
, per_file_ctx
,
2307 (void *)(unsigned long)m
);
2311 intel_runtime_pm_put(dev_priv
);
2312 mutex_unlock(&dev
->struct_mutex
);
2317 static int count_irq_waiters(struct drm_i915_private
*i915
)
2319 struct intel_engine_cs
*ring
;
2323 for_each_ring(ring
, i915
, i
)
2324 count
+= ring
->irq_refcount
;
2329 static int i915_rps_boost_info(struct seq_file
*m
, void *data
)
2331 struct drm_info_node
*node
= m
->private;
2332 struct drm_device
*dev
= node
->minor
->dev
;
2333 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2334 struct drm_file
*file
;
2336 seq_printf(m
, "RPS enabled? %d\n", dev_priv
->rps
.enabled
);
2337 seq_printf(m
, "GPU busy? %d\n", dev_priv
->mm
.busy
);
2338 seq_printf(m
, "CPU waiting? %d\n", count_irq_waiters(dev_priv
));
2339 seq_printf(m
, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2340 intel_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
),
2341 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
2342 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq_softlimit
),
2343 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq_softlimit
),
2344 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
));
2345 spin_lock(&dev_priv
->rps
.client_lock
);
2346 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
2347 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2348 struct task_struct
*task
;
2351 task
= pid_task(file
->pid
, PIDTYPE_PID
);
2352 seq_printf(m
, "%s [%d]: %d boosts%s\n",
2353 task
? task
->comm
: "<unknown>",
2354 task
? task
->pid
: -1,
2355 file_priv
->rps
.boosts
,
2356 list_empty(&file_priv
->rps
.link
) ? "" : ", active");
2359 seq_printf(m
, "Semaphore boosts: %d%s\n",
2360 dev_priv
->rps
.semaphores
.boosts
,
2361 list_empty(&dev_priv
->rps
.semaphores
.link
) ? "" : ", active");
2362 seq_printf(m
, "MMIO flip boosts: %d%s\n",
2363 dev_priv
->rps
.mmioflips
.boosts
,
2364 list_empty(&dev_priv
->rps
.mmioflips
.link
) ? "" : ", active");
2365 seq_printf(m
, "Kernel boosts: %d\n", dev_priv
->rps
.boosts
);
2366 spin_unlock(&dev_priv
->rps
.client_lock
);
2371 static int i915_llc(struct seq_file
*m
, void *data
)
2373 struct drm_info_node
*node
= m
->private;
2374 struct drm_device
*dev
= node
->minor
->dev
;
2375 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2377 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
2378 seq_printf(m
, "LLC: %s\n", yesno(HAS_LLC(dev
)));
2379 seq_printf(m
, "eLLC: %zuMB\n", dev_priv
->ellc_size
);
2384 static int i915_guc_load_status_info(struct seq_file
*m
, void *data
)
2386 struct drm_info_node
*node
= m
->private;
2387 struct drm_i915_private
*dev_priv
= node
->minor
->dev
->dev_private
;
2388 struct intel_guc_fw
*guc_fw
= &dev_priv
->guc
.guc_fw
;
2391 if (!HAS_GUC_UCODE(dev_priv
->dev
))
2394 seq_printf(m
, "GuC firmware status:\n");
2395 seq_printf(m
, "\tpath: %s\n",
2396 guc_fw
->guc_fw_path
);
2397 seq_printf(m
, "\tfetch: %s\n",
2398 intel_guc_fw_status_repr(guc_fw
->guc_fw_fetch_status
));
2399 seq_printf(m
, "\tload: %s\n",
2400 intel_guc_fw_status_repr(guc_fw
->guc_fw_load_status
));
2401 seq_printf(m
, "\tversion wanted: %d.%d\n",
2402 guc_fw
->guc_fw_major_wanted
, guc_fw
->guc_fw_minor_wanted
);
2403 seq_printf(m
, "\tversion found: %d.%d\n",
2404 guc_fw
->guc_fw_major_found
, guc_fw
->guc_fw_minor_found
);
2406 tmp
= I915_READ(GUC_STATUS
);
2408 seq_printf(m
, "\nGuC status 0x%08x:\n", tmp
);
2409 seq_printf(m
, "\tBootrom status = 0x%x\n",
2410 (tmp
& GS_BOOTROM_MASK
) >> GS_BOOTROM_SHIFT
);
2411 seq_printf(m
, "\tuKernel status = 0x%x\n",
2412 (tmp
& GS_UKERNEL_MASK
) >> GS_UKERNEL_SHIFT
);
2413 seq_printf(m
, "\tMIA Core status = 0x%x\n",
2414 (tmp
& GS_MIA_MASK
) >> GS_MIA_SHIFT
);
2415 seq_puts(m
, "\nScratch registers:\n");
2416 for (i
= 0; i
< 16; i
++)
2417 seq_printf(m
, "\t%2d: \t0x%x\n", i
, I915_READ(SOFT_SCRATCH(i
)));
2422 static void i915_guc_client_info(struct seq_file
*m
,
2423 struct drm_i915_private
*dev_priv
,
2424 struct i915_guc_client
*client
)
2426 struct intel_engine_cs
*ring
;
2430 seq_printf(m
, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
2431 client
->priority
, client
->ctx_index
, client
->proc_desc_offset
);
2432 seq_printf(m
, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
2433 client
->doorbell_id
, client
->doorbell_offset
, client
->cookie
);
2434 seq_printf(m
, "\tWQ size %d, offset: 0x%x, tail %d\n",
2435 client
->wq_size
, client
->wq_offset
, client
->wq_tail
);
2437 seq_printf(m
, "\tFailed to queue: %u\n", client
->q_fail
);
2438 seq_printf(m
, "\tFailed doorbell: %u\n", client
->b_fail
);
2439 seq_printf(m
, "\tLast submission result: %d\n", client
->retcode
);
2441 for_each_ring(ring
, dev_priv
, i
) {
2442 seq_printf(m
, "\tSubmissions: %llu %s\n",
2443 client
->submissions
[i
],
2445 tot
+= client
->submissions
[i
];
2447 seq_printf(m
, "\tTotal: %llu\n", tot
);
2450 static int i915_guc_info(struct seq_file
*m
, void *data
)
2452 struct drm_info_node
*node
= m
->private;
2453 struct drm_device
*dev
= node
->minor
->dev
;
2454 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2455 struct intel_guc guc
;
2456 struct i915_guc_client client
= {};
2457 struct intel_engine_cs
*ring
;
2458 enum intel_ring_id i
;
2461 if (!HAS_GUC_SCHED(dev_priv
->dev
))
2464 /* Take a local copy of the GuC data, so we can dump it at leisure */
2465 spin_lock(&dev_priv
->guc
.host2guc_lock
);
2466 guc
= dev_priv
->guc
;
2467 if (guc
.execbuf_client
) {
2468 spin_lock(&guc
.execbuf_client
->wq_lock
);
2469 client
= *guc
.execbuf_client
;
2470 spin_unlock(&guc
.execbuf_client
->wq_lock
);
2472 spin_unlock(&dev_priv
->guc
.host2guc_lock
);
2474 seq_printf(m
, "GuC total action count: %llu\n", guc
.action_count
);
2475 seq_printf(m
, "GuC action failure count: %u\n", guc
.action_fail
);
2476 seq_printf(m
, "GuC last action command: 0x%x\n", guc
.action_cmd
);
2477 seq_printf(m
, "GuC last action status: 0x%x\n", guc
.action_status
);
2478 seq_printf(m
, "GuC last action error code: %d\n", guc
.action_err
);
2480 seq_printf(m
, "\nGuC submissions:\n");
2481 for_each_ring(ring
, dev_priv
, i
) {
2482 seq_printf(m
, "\t%-24s: %10llu, last seqno 0x%08x %9d\n",
2483 ring
->name
, guc
.submissions
[i
],
2484 guc
.last_seqno
[i
], guc
.last_seqno
[i
]);
2485 total
+= guc
.submissions
[i
];
2487 seq_printf(m
, "\t%s: %llu\n", "Total", total
);
2489 seq_printf(m
, "\nGuC execbuf client @ %p:\n", guc
.execbuf_client
);
2490 i915_guc_client_info(m
, dev_priv
, &client
);
2492 /* Add more as required ... */
2497 static int i915_guc_log_dump(struct seq_file
*m
, void *data
)
2499 struct drm_info_node
*node
= m
->private;
2500 struct drm_device
*dev
= node
->minor
->dev
;
2501 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2502 struct drm_i915_gem_object
*log_obj
= dev_priv
->guc
.log_obj
;
2509 for (pg
= 0; pg
< log_obj
->base
.size
/ PAGE_SIZE
; pg
++) {
2510 log
= kmap_atomic(i915_gem_object_get_page(log_obj
, pg
));
2512 for (i
= 0; i
< PAGE_SIZE
/ sizeof(u32
); i
+= 4)
2513 seq_printf(m
, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2514 *(log
+ i
), *(log
+ i
+ 1),
2515 *(log
+ i
+ 2), *(log
+ i
+ 3));
2525 static int i915_edp_psr_status(struct seq_file
*m
, void *data
)
2527 struct drm_info_node
*node
= m
->private;
2528 struct drm_device
*dev
= node
->minor
->dev
;
2529 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2533 bool enabled
= false;
2535 if (!HAS_PSR(dev
)) {
2536 seq_puts(m
, "PSR not supported\n");
2540 intel_runtime_pm_get(dev_priv
);
2542 mutex_lock(&dev_priv
->psr
.lock
);
2543 seq_printf(m
, "Sink_Support: %s\n", yesno(dev_priv
->psr
.sink_support
));
2544 seq_printf(m
, "Source_OK: %s\n", yesno(dev_priv
->psr
.source_ok
));
2545 seq_printf(m
, "Enabled: %s\n", yesno((bool)dev_priv
->psr
.enabled
));
2546 seq_printf(m
, "Active: %s\n", yesno(dev_priv
->psr
.active
));
2547 seq_printf(m
, "Busy frontbuffer bits: 0x%03x\n",
2548 dev_priv
->psr
.busy_frontbuffer_bits
);
2549 seq_printf(m
, "Re-enable work scheduled: %s\n",
2550 yesno(work_busy(&dev_priv
->psr
.work
.work
)));
2553 enabled
= I915_READ(EDP_PSR_CTL(dev
)) & EDP_PSR_ENABLE
;
2555 for_each_pipe(dev_priv
, pipe
) {
2556 stat
[pipe
] = I915_READ(VLV_PSRSTAT(pipe
)) &
2557 VLV_EDP_PSR_CURR_STATE_MASK
;
2558 if ((stat
[pipe
] == VLV_EDP_PSR_ACTIVE_NORFB_UP
) ||
2559 (stat
[pipe
] == VLV_EDP_PSR_ACTIVE_SF_UPDATE
))
2563 seq_printf(m
, "HW Enabled & Active bit: %s", yesno(enabled
));
2566 for_each_pipe(dev_priv
, pipe
) {
2567 if ((stat
[pipe
] == VLV_EDP_PSR_ACTIVE_NORFB_UP
) ||
2568 (stat
[pipe
] == VLV_EDP_PSR_ACTIVE_SF_UPDATE
))
2569 seq_printf(m
, " pipe %c", pipe_name(pipe
));
2573 /* CHV PSR has no kind of performance counter */
2575 psrperf
= I915_READ(EDP_PSR_PERF_CNT(dev
)) &
2576 EDP_PSR_PERF_CNT_MASK
;
2578 seq_printf(m
, "Performance_Counter: %u\n", psrperf
);
2580 mutex_unlock(&dev_priv
->psr
.lock
);
2582 intel_runtime_pm_put(dev_priv
);
2586 static int i915_sink_crc(struct seq_file
*m
, void *data
)
2588 struct drm_info_node
*node
= m
->private;
2589 struct drm_device
*dev
= node
->minor
->dev
;
2590 struct intel_encoder
*encoder
;
2591 struct intel_connector
*connector
;
2592 struct intel_dp
*intel_dp
= NULL
;
2596 drm_modeset_lock_all(dev
);
2597 for_each_intel_connector(dev
, connector
) {
2599 if (connector
->base
.dpms
!= DRM_MODE_DPMS_ON
)
2602 if (!connector
->base
.encoder
)
2605 encoder
= to_intel_encoder(connector
->base
.encoder
);
2606 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
2609 intel_dp
= enc_to_intel_dp(&encoder
->base
);
2611 ret
= intel_dp_sink_crc(intel_dp
, crc
);
2615 seq_printf(m
, "%02x%02x%02x%02x%02x%02x\n",
2616 crc
[0], crc
[1], crc
[2],
2617 crc
[3], crc
[4], crc
[5]);
2622 drm_modeset_unlock_all(dev
);
2626 static int i915_energy_uJ(struct seq_file
*m
, void *data
)
2628 struct drm_info_node
*node
= m
->private;
2629 struct drm_device
*dev
= node
->minor
->dev
;
2630 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2634 if (INTEL_INFO(dev
)->gen
< 6)
2637 intel_runtime_pm_get(dev_priv
);
2639 rdmsrl(MSR_RAPL_POWER_UNIT
, power
);
2640 power
= (power
& 0x1f00) >> 8;
2641 units
= 1000000 / (1 << power
); /* convert to uJ */
2642 power
= I915_READ(MCH_SECP_NRG_STTS
);
2645 intel_runtime_pm_put(dev_priv
);
2647 seq_printf(m
, "%llu", (long long unsigned)power
);
2652 static int i915_runtime_pm_status(struct seq_file
*m
, void *unused
)
2654 struct drm_info_node
*node
= m
->private;
2655 struct drm_device
*dev
= node
->minor
->dev
;
2656 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2658 if (!HAS_RUNTIME_PM(dev
)) {
2659 seq_puts(m
, "not supported\n");
2663 seq_printf(m
, "GPU idle: %s\n", yesno(!dev_priv
->mm
.busy
));
2664 seq_printf(m
, "IRQs disabled: %s\n",
2665 yesno(!intel_irqs_enabled(dev_priv
)));
2667 seq_printf(m
, "Usage count: %d\n",
2668 atomic_read(&dev
->dev
->power
.usage_count
));
2670 seq_printf(m
, "Device Power Management (CONFIG_PM) disabled\n");
2676 static const char *power_domain_str(enum intel_display_power_domain domain
)
2679 case POWER_DOMAIN_PIPE_A
:
2681 case POWER_DOMAIN_PIPE_B
:
2683 case POWER_DOMAIN_PIPE_C
:
2685 case POWER_DOMAIN_PIPE_A_PANEL_FITTER
:
2686 return "PIPE_A_PANEL_FITTER";
2687 case POWER_DOMAIN_PIPE_B_PANEL_FITTER
:
2688 return "PIPE_B_PANEL_FITTER";
2689 case POWER_DOMAIN_PIPE_C_PANEL_FITTER
:
2690 return "PIPE_C_PANEL_FITTER";
2691 case POWER_DOMAIN_TRANSCODER_A
:
2692 return "TRANSCODER_A";
2693 case POWER_DOMAIN_TRANSCODER_B
:
2694 return "TRANSCODER_B";
2695 case POWER_DOMAIN_TRANSCODER_C
:
2696 return "TRANSCODER_C";
2697 case POWER_DOMAIN_TRANSCODER_EDP
:
2698 return "TRANSCODER_EDP";
2699 case POWER_DOMAIN_PORT_DDI_A_2_LANES
:
2700 return "PORT_DDI_A_2_LANES";
2701 case POWER_DOMAIN_PORT_DDI_A_4_LANES
:
2702 return "PORT_DDI_A_4_LANES";
2703 case POWER_DOMAIN_PORT_DDI_B_2_LANES
:
2704 return "PORT_DDI_B_2_LANES";
2705 case POWER_DOMAIN_PORT_DDI_B_4_LANES
:
2706 return "PORT_DDI_B_4_LANES";
2707 case POWER_DOMAIN_PORT_DDI_C_2_LANES
:
2708 return "PORT_DDI_C_2_LANES";
2709 case POWER_DOMAIN_PORT_DDI_C_4_LANES
:
2710 return "PORT_DDI_C_4_LANES";
2711 case POWER_DOMAIN_PORT_DDI_D_2_LANES
:
2712 return "PORT_DDI_D_2_LANES";
2713 case POWER_DOMAIN_PORT_DDI_D_4_LANES
:
2714 return "PORT_DDI_D_4_LANES";
2715 case POWER_DOMAIN_PORT_DDI_E_2_LANES
:
2716 return "PORT_DDI_E_2_LANES";
2717 case POWER_DOMAIN_PORT_DSI
:
2719 case POWER_DOMAIN_PORT_CRT
:
2721 case POWER_DOMAIN_PORT_OTHER
:
2722 return "PORT_OTHER";
2723 case POWER_DOMAIN_VGA
:
2725 case POWER_DOMAIN_AUDIO
:
2727 case POWER_DOMAIN_PLLS
:
2729 case POWER_DOMAIN_AUX_A
:
2731 case POWER_DOMAIN_AUX_B
:
2733 case POWER_DOMAIN_AUX_C
:
2735 case POWER_DOMAIN_AUX_D
:
2737 case POWER_DOMAIN_GMBUS
:
2739 case POWER_DOMAIN_INIT
:
2742 MISSING_CASE(domain
);
2747 static int i915_power_domain_info(struct seq_file
*m
, void *unused
)
2749 struct drm_info_node
*node
= m
->private;
2750 struct drm_device
*dev
= node
->minor
->dev
;
2751 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2752 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
2755 mutex_lock(&power_domains
->lock
);
2757 seq_printf(m
, "%-25s %s\n", "Power well/domain", "Use count");
2758 for (i
= 0; i
< power_domains
->power_well_count
; i
++) {
2759 struct i915_power_well
*power_well
;
2760 enum intel_display_power_domain power_domain
;
2762 power_well
= &power_domains
->power_wells
[i
];
2763 seq_printf(m
, "%-25s %d\n", power_well
->name
,
2766 for (power_domain
= 0; power_domain
< POWER_DOMAIN_NUM
;
2768 if (!(BIT(power_domain
) & power_well
->domains
))
2771 seq_printf(m
, " %-23s %d\n",
2772 power_domain_str(power_domain
),
2773 power_domains
->domain_use_count
[power_domain
]);
2777 mutex_unlock(&power_domains
->lock
);
2782 static void intel_seq_print_mode(struct seq_file
*m
, int tabs
,
2783 struct drm_display_mode
*mode
)
2787 for (i
= 0; i
< tabs
; i
++)
2790 seq_printf(m
, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2791 mode
->base
.id
, mode
->name
,
2792 mode
->vrefresh
, mode
->clock
,
2793 mode
->hdisplay
, mode
->hsync_start
,
2794 mode
->hsync_end
, mode
->htotal
,
2795 mode
->vdisplay
, mode
->vsync_start
,
2796 mode
->vsync_end
, mode
->vtotal
,
2797 mode
->type
, mode
->flags
);
2800 static void intel_encoder_info(struct seq_file
*m
,
2801 struct intel_crtc
*intel_crtc
,
2802 struct intel_encoder
*intel_encoder
)
2804 struct drm_info_node
*node
= m
->private;
2805 struct drm_device
*dev
= node
->minor
->dev
;
2806 struct drm_crtc
*crtc
= &intel_crtc
->base
;
2807 struct intel_connector
*intel_connector
;
2808 struct drm_encoder
*encoder
;
2810 encoder
= &intel_encoder
->base
;
2811 seq_printf(m
, "\tencoder %d: type: %s, connectors:\n",
2812 encoder
->base
.id
, encoder
->name
);
2813 for_each_connector_on_encoder(dev
, encoder
, intel_connector
) {
2814 struct drm_connector
*connector
= &intel_connector
->base
;
2815 seq_printf(m
, "\t\tconnector %d: type: %s, status: %s",
2818 drm_get_connector_status_name(connector
->status
));
2819 if (connector
->status
== connector_status_connected
) {
2820 struct drm_display_mode
*mode
= &crtc
->mode
;
2821 seq_printf(m
, ", mode:\n");
2822 intel_seq_print_mode(m
, 2, mode
);
2829 static void intel_crtc_info(struct seq_file
*m
, struct intel_crtc
*intel_crtc
)
2831 struct drm_info_node
*node
= m
->private;
2832 struct drm_device
*dev
= node
->minor
->dev
;
2833 struct drm_crtc
*crtc
= &intel_crtc
->base
;
2834 struct intel_encoder
*intel_encoder
;
2835 struct drm_plane_state
*plane_state
= crtc
->primary
->state
;
2836 struct drm_framebuffer
*fb
= plane_state
->fb
;
2839 seq_printf(m
, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2840 fb
->base
.id
, plane_state
->src_x
>> 16,
2841 plane_state
->src_y
>> 16, fb
->width
, fb
->height
);
2843 seq_puts(m
, "\tprimary plane disabled\n");
2844 for_each_encoder_on_crtc(dev
, crtc
, intel_encoder
)
2845 intel_encoder_info(m
, intel_crtc
, intel_encoder
);
2848 static void intel_panel_info(struct seq_file
*m
, struct intel_panel
*panel
)
2850 struct drm_display_mode
*mode
= panel
->fixed_mode
;
2852 seq_printf(m
, "\tfixed mode:\n");
2853 intel_seq_print_mode(m
, 2, mode
);
2856 static void intel_dp_info(struct seq_file
*m
,
2857 struct intel_connector
*intel_connector
)
2859 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2860 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
2862 seq_printf(m
, "\tDPCD rev: %x\n", intel_dp
->dpcd
[DP_DPCD_REV
]);
2863 seq_printf(m
, "\taudio support: %s\n", yesno(intel_dp
->has_audio
));
2864 if (intel_encoder
->type
== INTEL_OUTPUT_EDP
)
2865 intel_panel_info(m
, &intel_connector
->panel
);
2868 static void intel_hdmi_info(struct seq_file
*m
,
2869 struct intel_connector
*intel_connector
)
2871 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2872 struct intel_hdmi
*intel_hdmi
= enc_to_intel_hdmi(&intel_encoder
->base
);
2874 seq_printf(m
, "\taudio support: %s\n", yesno(intel_hdmi
->has_audio
));
2877 static void intel_lvds_info(struct seq_file
*m
,
2878 struct intel_connector
*intel_connector
)
2880 intel_panel_info(m
, &intel_connector
->panel
);
2883 static void intel_connector_info(struct seq_file
*m
,
2884 struct drm_connector
*connector
)
2886 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
2887 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2888 struct drm_display_mode
*mode
;
2890 seq_printf(m
, "connector %d: type %s, status: %s\n",
2891 connector
->base
.id
, connector
->name
,
2892 drm_get_connector_status_name(connector
->status
));
2893 if (connector
->status
== connector_status_connected
) {
2894 seq_printf(m
, "\tname: %s\n", connector
->display_info
.name
);
2895 seq_printf(m
, "\tphysical dimensions: %dx%dmm\n",
2896 connector
->display_info
.width_mm
,
2897 connector
->display_info
.height_mm
);
2898 seq_printf(m
, "\tsubpixel order: %s\n",
2899 drm_get_subpixel_order_name(connector
->display_info
.subpixel_order
));
2900 seq_printf(m
, "\tCEA rev: %d\n",
2901 connector
->display_info
.cea_rev
);
2903 if (intel_encoder
) {
2904 if (intel_encoder
->type
== INTEL_OUTPUT_DISPLAYPORT
||
2905 intel_encoder
->type
== INTEL_OUTPUT_EDP
)
2906 intel_dp_info(m
, intel_connector
);
2907 else if (intel_encoder
->type
== INTEL_OUTPUT_HDMI
)
2908 intel_hdmi_info(m
, intel_connector
);
2909 else if (intel_encoder
->type
== INTEL_OUTPUT_LVDS
)
2910 intel_lvds_info(m
, intel_connector
);
2913 seq_printf(m
, "\tmodes:\n");
2914 list_for_each_entry(mode
, &connector
->modes
, head
)
2915 intel_seq_print_mode(m
, 2, mode
);
2918 static bool cursor_active(struct drm_device
*dev
, int pipe
)
2920 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2923 if (IS_845G(dev
) || IS_I865G(dev
))
2924 state
= I915_READ(CURCNTR(PIPE_A
)) & CURSOR_ENABLE
;
2926 state
= I915_READ(CURCNTR(pipe
)) & CURSOR_MODE
;
2931 static bool cursor_position(struct drm_device
*dev
, int pipe
, int *x
, int *y
)
2933 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2936 pos
= I915_READ(CURPOS(pipe
));
2938 *x
= (pos
>> CURSOR_X_SHIFT
) & CURSOR_POS_MASK
;
2939 if (pos
& (CURSOR_POS_SIGN
<< CURSOR_X_SHIFT
))
2942 *y
= (pos
>> CURSOR_Y_SHIFT
) & CURSOR_POS_MASK
;
2943 if (pos
& (CURSOR_POS_SIGN
<< CURSOR_Y_SHIFT
))
2946 return cursor_active(dev
, pipe
);
2949 static int i915_display_info(struct seq_file
*m
, void *unused
)
2951 struct drm_info_node
*node
= m
->private;
2952 struct drm_device
*dev
= node
->minor
->dev
;
2953 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2954 struct intel_crtc
*crtc
;
2955 struct drm_connector
*connector
;
2957 intel_runtime_pm_get(dev_priv
);
2958 drm_modeset_lock_all(dev
);
2959 seq_printf(m
, "CRTC info\n");
2960 seq_printf(m
, "---------\n");
2961 for_each_intel_crtc(dev
, crtc
) {
2963 struct intel_crtc_state
*pipe_config
;
2966 pipe_config
= to_intel_crtc_state(crtc
->base
.state
);
2968 seq_printf(m
, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
2969 crtc
->base
.base
.id
, pipe_name(crtc
->pipe
),
2970 yesno(pipe_config
->base
.active
),
2971 pipe_config
->pipe_src_w
, pipe_config
->pipe_src_h
);
2972 if (pipe_config
->base
.active
) {
2973 intel_crtc_info(m
, crtc
);
2975 active
= cursor_position(dev
, crtc
->pipe
, &x
, &y
);
2976 seq_printf(m
, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
2977 yesno(crtc
->cursor_base
),
2978 x
, y
, crtc
->base
.cursor
->state
->crtc_w
,
2979 crtc
->base
.cursor
->state
->crtc_h
,
2980 crtc
->cursor_addr
, yesno(active
));
2983 seq_printf(m
, "\tunderrun reporting: cpu=%s pch=%s \n",
2984 yesno(!crtc
->cpu_fifo_underrun_disabled
),
2985 yesno(!crtc
->pch_fifo_underrun_disabled
));
2988 seq_printf(m
, "\n");
2989 seq_printf(m
, "Connector info\n");
2990 seq_printf(m
, "--------------\n");
2991 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
2992 intel_connector_info(m
, connector
);
2994 drm_modeset_unlock_all(dev
);
2995 intel_runtime_pm_put(dev_priv
);
3000 static int i915_semaphore_status(struct seq_file
*m
, void *unused
)
3002 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
3003 struct drm_device
*dev
= node
->minor
->dev
;
3004 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3005 struct intel_engine_cs
*ring
;
3006 int num_rings
= hweight32(INTEL_INFO(dev
)->ring_mask
);
3009 if (!i915_semaphore_is_enabled(dev
)) {
3010 seq_puts(m
, "Semaphores are disabled\n");
3014 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
3017 intel_runtime_pm_get(dev_priv
);
3019 if (IS_BROADWELL(dev
)) {
3023 page
= i915_gem_object_get_page(dev_priv
->semaphore_obj
, 0);
3025 seqno
= (uint64_t *)kmap_atomic(page
);
3026 for_each_ring(ring
, dev_priv
, i
) {
3029 seq_printf(m
, "%s\n", ring
->name
);
3031 seq_puts(m
, " Last signal:");
3032 for (j
= 0; j
< num_rings
; j
++) {
3033 offset
= i
* I915_NUM_RINGS
+ j
;
3034 seq_printf(m
, "0x%08llx (0x%02llx) ",
3035 seqno
[offset
], offset
* 8);
3039 seq_puts(m
, " Last wait: ");
3040 for (j
= 0; j
< num_rings
; j
++) {
3041 offset
= i
+ (j
* I915_NUM_RINGS
);
3042 seq_printf(m
, "0x%08llx (0x%02llx) ",
3043 seqno
[offset
], offset
* 8);
3048 kunmap_atomic(seqno
);
3050 seq_puts(m
, " Last signal:");
3051 for_each_ring(ring
, dev_priv
, i
)
3052 for (j
= 0; j
< num_rings
; j
++)
3053 seq_printf(m
, "0x%08x\n",
3054 I915_READ(ring
->semaphore
.mbox
.signal
[j
]));
3058 seq_puts(m
, "\nSync seqno:\n");
3059 for_each_ring(ring
, dev_priv
, i
) {
3060 for (j
= 0; j
< num_rings
; j
++) {
3061 seq_printf(m
, " 0x%08x ", ring
->semaphore
.sync_seqno
[j
]);
3067 intel_runtime_pm_put(dev_priv
);
3068 mutex_unlock(&dev
->struct_mutex
);
3072 static int i915_shared_dplls_info(struct seq_file
*m
, void *unused
)
3074 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
3075 struct drm_device
*dev
= node
->minor
->dev
;
3076 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3079 drm_modeset_lock_all(dev
);
3080 for (i
= 0; i
< dev_priv
->num_shared_dpll
; i
++) {
3081 struct intel_shared_dpll
*pll
= &dev_priv
->shared_dplls
[i
];
3083 seq_printf(m
, "DPLL%i: %s, id: %i\n", i
, pll
->name
, pll
->id
);
3084 seq_printf(m
, " crtc_mask: 0x%08x, active: %d, on: %s\n",
3085 pll
->config
.crtc_mask
, pll
->active
, yesno(pll
->on
));
3086 seq_printf(m
, " tracked hardware state:\n");
3087 seq_printf(m
, " dpll: 0x%08x\n", pll
->config
.hw_state
.dpll
);
3088 seq_printf(m
, " dpll_md: 0x%08x\n",
3089 pll
->config
.hw_state
.dpll_md
);
3090 seq_printf(m
, " fp0: 0x%08x\n", pll
->config
.hw_state
.fp0
);
3091 seq_printf(m
, " fp1: 0x%08x\n", pll
->config
.hw_state
.fp1
);
3092 seq_printf(m
, " wrpll: 0x%08x\n", pll
->config
.hw_state
.wrpll
);
3094 drm_modeset_unlock_all(dev
);
3099 static int i915_wa_registers(struct seq_file
*m
, void *unused
)
3103 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
3104 struct drm_device
*dev
= node
->minor
->dev
;
3105 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3107 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
3111 intel_runtime_pm_get(dev_priv
);
3113 seq_printf(m
, "Workarounds applied: %d\n", dev_priv
->workarounds
.count
);
3114 for (i
= 0; i
< dev_priv
->workarounds
.count
; ++i
) {
3115 u32 addr
, mask
, value
, read
;
3118 addr
= dev_priv
->workarounds
.reg
[i
].addr
;
3119 mask
= dev_priv
->workarounds
.reg
[i
].mask
;
3120 value
= dev_priv
->workarounds
.reg
[i
].value
;
3121 read
= I915_READ(addr
);
3122 ok
= (value
& mask
) == (read
& mask
);
3123 seq_printf(m
, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3124 addr
, value
, mask
, read
, ok
? "OK" : "FAIL");
3127 intel_runtime_pm_put(dev_priv
);
3128 mutex_unlock(&dev
->struct_mutex
);
3133 static int i915_ddb_info(struct seq_file
*m
, void *unused
)
3135 struct drm_info_node
*node
= m
->private;
3136 struct drm_device
*dev
= node
->minor
->dev
;
3137 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3138 struct skl_ddb_allocation
*ddb
;
3139 struct skl_ddb_entry
*entry
;
3143 if (INTEL_INFO(dev
)->gen
< 9)
3146 drm_modeset_lock_all(dev
);
3148 ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
3150 seq_printf(m
, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3152 for_each_pipe(dev_priv
, pipe
) {
3153 seq_printf(m
, "Pipe %c\n", pipe_name(pipe
));
3155 for_each_plane(dev_priv
, pipe
, plane
) {
3156 entry
= &ddb
->plane
[pipe
][plane
];
3157 seq_printf(m
, " Plane%-8d%8u%8u%8u\n", plane
+ 1,
3158 entry
->start
, entry
->end
,
3159 skl_ddb_entry_size(entry
));
3162 entry
= &ddb
->plane
[pipe
][PLANE_CURSOR
];
3163 seq_printf(m
, " %-13s%8u%8u%8u\n", "Cursor", entry
->start
,
3164 entry
->end
, skl_ddb_entry_size(entry
));
3167 drm_modeset_unlock_all(dev
);
3172 static void drrs_status_per_crtc(struct seq_file
*m
,
3173 struct drm_device
*dev
, struct intel_crtc
*intel_crtc
)
3175 struct intel_encoder
*intel_encoder
;
3176 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3177 struct i915_drrs
*drrs
= &dev_priv
->drrs
;
3180 for_each_encoder_on_crtc(dev
, &intel_crtc
->base
, intel_encoder
) {
3181 /* Encoder connected on this CRTC */
3182 switch (intel_encoder
->type
) {
3183 case INTEL_OUTPUT_EDP
:
3184 seq_puts(m
, "eDP:\n");
3186 case INTEL_OUTPUT_DSI
:
3187 seq_puts(m
, "DSI:\n");
3189 case INTEL_OUTPUT_HDMI
:
3190 seq_puts(m
, "HDMI:\n");
3192 case INTEL_OUTPUT_DISPLAYPORT
:
3193 seq_puts(m
, "DP:\n");
3196 seq_printf(m
, "Other encoder (id=%d).\n",
3197 intel_encoder
->type
);
3202 if (dev_priv
->vbt
.drrs_type
== STATIC_DRRS_SUPPORT
)
3203 seq_puts(m
, "\tVBT: DRRS_type: Static");
3204 else if (dev_priv
->vbt
.drrs_type
== SEAMLESS_DRRS_SUPPORT
)
3205 seq_puts(m
, "\tVBT: DRRS_type: Seamless");
3206 else if (dev_priv
->vbt
.drrs_type
== DRRS_NOT_SUPPORTED
)
3207 seq_puts(m
, "\tVBT: DRRS_type: None");
3209 seq_puts(m
, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3211 seq_puts(m
, "\n\n");
3213 if (to_intel_crtc_state(intel_crtc
->base
.state
)->has_drrs
) {
3214 struct intel_panel
*panel
;
3216 mutex_lock(&drrs
->mutex
);
3217 /* DRRS Supported */
3218 seq_puts(m
, "\tDRRS Supported: Yes\n");
3220 /* disable_drrs() will make drrs->dp NULL */
3222 seq_puts(m
, "Idleness DRRS: Disabled");
3223 mutex_unlock(&drrs
->mutex
);
3227 panel
= &drrs
->dp
->attached_connector
->panel
;
3228 seq_printf(m
, "\t\tBusy_frontbuffer_bits: 0x%X",
3229 drrs
->busy_frontbuffer_bits
);
3231 seq_puts(m
, "\n\t\t");
3232 if (drrs
->refresh_rate_type
== DRRS_HIGH_RR
) {
3233 seq_puts(m
, "DRRS_State: DRRS_HIGH_RR\n");
3234 vrefresh
= panel
->fixed_mode
->vrefresh
;
3235 } else if (drrs
->refresh_rate_type
== DRRS_LOW_RR
) {
3236 seq_puts(m
, "DRRS_State: DRRS_LOW_RR\n");
3237 vrefresh
= panel
->downclock_mode
->vrefresh
;
3239 seq_printf(m
, "DRRS_State: Unknown(%d)\n",
3240 drrs
->refresh_rate_type
);
3241 mutex_unlock(&drrs
->mutex
);
3244 seq_printf(m
, "\t\tVrefresh: %d", vrefresh
);
3246 seq_puts(m
, "\n\t\t");
3247 mutex_unlock(&drrs
->mutex
);
3249 /* DRRS not supported. Print the VBT parameter*/
3250 seq_puts(m
, "\tDRRS Supported : No");
3255 static int i915_drrs_status(struct seq_file
*m
, void *unused
)
3257 struct drm_info_node
*node
= m
->private;
3258 struct drm_device
*dev
= node
->minor
->dev
;
3259 struct intel_crtc
*intel_crtc
;
3260 int active_crtc_cnt
= 0;
3262 for_each_intel_crtc(dev
, intel_crtc
) {
3263 drm_modeset_lock(&intel_crtc
->base
.mutex
, NULL
);
3265 if (intel_crtc
->base
.state
->active
) {
3267 seq_printf(m
, "\nCRTC %d: ", active_crtc_cnt
);
3269 drrs_status_per_crtc(m
, dev
, intel_crtc
);
3272 drm_modeset_unlock(&intel_crtc
->base
.mutex
);
3275 if (!active_crtc_cnt
)
3276 seq_puts(m
, "No active crtc found\n");
3281 struct pipe_crc_info
{
3283 struct drm_device
*dev
;
3287 static int i915_dp_mst_info(struct seq_file
*m
, void *unused
)
3289 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
3290 struct drm_device
*dev
= node
->minor
->dev
;
3291 struct drm_encoder
*encoder
;
3292 struct intel_encoder
*intel_encoder
;
3293 struct intel_digital_port
*intel_dig_port
;
3294 drm_modeset_lock_all(dev
);
3295 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
3296 intel_encoder
= to_intel_encoder(encoder
);
3297 if (intel_encoder
->type
!= INTEL_OUTPUT_DISPLAYPORT
)
3299 intel_dig_port
= enc_to_dig_port(encoder
);
3300 if (!intel_dig_port
->dp
.can_mst
)
3303 drm_dp_mst_dump_topology(m
, &intel_dig_port
->dp
.mst_mgr
);
3305 drm_modeset_unlock_all(dev
);
3309 static int i915_pipe_crc_open(struct inode
*inode
, struct file
*filep
)
3311 struct pipe_crc_info
*info
= inode
->i_private
;
3312 struct drm_i915_private
*dev_priv
= info
->dev
->dev_private
;
3313 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
3315 if (info
->pipe
>= INTEL_INFO(info
->dev
)->num_pipes
)
3318 spin_lock_irq(&pipe_crc
->lock
);
3320 if (pipe_crc
->opened
) {
3321 spin_unlock_irq(&pipe_crc
->lock
);
3322 return -EBUSY
; /* already open */
3325 pipe_crc
->opened
= true;
3326 filep
->private_data
= inode
->i_private
;
3328 spin_unlock_irq(&pipe_crc
->lock
);
3333 static int i915_pipe_crc_release(struct inode
*inode
, struct file
*filep
)
3335 struct pipe_crc_info
*info
= inode
->i_private
;
3336 struct drm_i915_private
*dev_priv
= info
->dev
->dev_private
;
3337 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
3339 spin_lock_irq(&pipe_crc
->lock
);
3340 pipe_crc
->opened
= false;
3341 spin_unlock_irq(&pipe_crc
->lock
);
3346 /* (6 fields, 8 chars each, space separated (5) + '\n') */
3347 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
3348 /* account for \'0' */
3349 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
3351 static int pipe_crc_data_count(struct intel_pipe_crc
*pipe_crc
)
3353 assert_spin_locked(&pipe_crc
->lock
);
3354 return CIRC_CNT(pipe_crc
->head
, pipe_crc
->tail
,
3355 INTEL_PIPE_CRC_ENTRIES_NR
);
3359 i915_pipe_crc_read(struct file
*filep
, char __user
*user_buf
, size_t count
,
3362 struct pipe_crc_info
*info
= filep
->private_data
;
3363 struct drm_device
*dev
= info
->dev
;
3364 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3365 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
3366 char buf
[PIPE_CRC_BUFFER_LEN
];
3371 * Don't allow user space to provide buffers not big enough to hold
3374 if (count
< PIPE_CRC_LINE_LEN
)
3377 if (pipe_crc
->source
== INTEL_PIPE_CRC_SOURCE_NONE
)
3380 /* nothing to read */
3381 spin_lock_irq(&pipe_crc
->lock
);
3382 while (pipe_crc_data_count(pipe_crc
) == 0) {
3385 if (filep
->f_flags
& O_NONBLOCK
) {
3386 spin_unlock_irq(&pipe_crc
->lock
);
3390 ret
= wait_event_interruptible_lock_irq(pipe_crc
->wq
,
3391 pipe_crc_data_count(pipe_crc
), pipe_crc
->lock
);
3393 spin_unlock_irq(&pipe_crc
->lock
);
3398 /* We now have one or more entries to read */
3399 n_entries
= count
/ PIPE_CRC_LINE_LEN
;
3402 while (n_entries
> 0) {
3403 struct intel_pipe_crc_entry
*entry
=
3404 &pipe_crc
->entries
[pipe_crc
->tail
];
3407 if (CIRC_CNT(pipe_crc
->head
, pipe_crc
->tail
,
3408 INTEL_PIPE_CRC_ENTRIES_NR
) < 1)
3411 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR
);
3412 pipe_crc
->tail
= (pipe_crc
->tail
+ 1) & (INTEL_PIPE_CRC_ENTRIES_NR
- 1);
3414 bytes_read
+= snprintf(buf
, PIPE_CRC_BUFFER_LEN
,
3415 "%8u %8x %8x %8x %8x %8x\n",
3416 entry
->frame
, entry
->crc
[0],
3417 entry
->crc
[1], entry
->crc
[2],
3418 entry
->crc
[3], entry
->crc
[4]);
3420 spin_unlock_irq(&pipe_crc
->lock
);
3422 ret
= copy_to_user(user_buf
, buf
, PIPE_CRC_LINE_LEN
);
3423 if (ret
== PIPE_CRC_LINE_LEN
)
3426 user_buf
+= PIPE_CRC_LINE_LEN
;
3429 spin_lock_irq(&pipe_crc
->lock
);
3432 spin_unlock_irq(&pipe_crc
->lock
);
3437 static const struct file_operations i915_pipe_crc_fops
= {
3438 .owner
= THIS_MODULE
,
3439 .open
= i915_pipe_crc_open
,
3440 .read
= i915_pipe_crc_read
,
3441 .release
= i915_pipe_crc_release
,
3444 static struct pipe_crc_info i915_pipe_crc_data
[I915_MAX_PIPES
] = {
3446 .name
= "i915_pipe_A_crc",
3450 .name
= "i915_pipe_B_crc",
3454 .name
= "i915_pipe_C_crc",
3459 static int i915_pipe_crc_create(struct dentry
*root
, struct drm_minor
*minor
,
3462 struct drm_device
*dev
= minor
->dev
;
3464 struct pipe_crc_info
*info
= &i915_pipe_crc_data
[pipe
];
3467 ent
= debugfs_create_file(info
->name
, S_IRUGO
, root
, info
,
3468 &i915_pipe_crc_fops
);
3472 return drm_add_fake_info_node(minor
, ent
, info
);
3475 static const char * const pipe_crc_sources
[] = {
3488 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source
)
3490 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources
) != INTEL_PIPE_CRC_SOURCE_MAX
);
3491 return pipe_crc_sources
[source
];
3494 static int display_crc_ctl_show(struct seq_file
*m
, void *data
)
3496 struct drm_device
*dev
= m
->private;
3497 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3500 for (i
= 0; i
< I915_MAX_PIPES
; i
++)
3501 seq_printf(m
, "%c %s\n", pipe_name(i
),
3502 pipe_crc_source_name(dev_priv
->pipe_crc
[i
].source
));
3507 static int display_crc_ctl_open(struct inode
*inode
, struct file
*file
)
3509 struct drm_device
*dev
= inode
->i_private
;
3511 return single_open(file
, display_crc_ctl_show
, dev
);
3514 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source
*source
,
3517 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
3518 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
3521 case INTEL_PIPE_CRC_SOURCE_PIPE
:
3522 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_INCLUDE_BORDER_I8XX
;
3524 case INTEL_PIPE_CRC_SOURCE_NONE
:
3534 static int i9xx_pipe_crc_auto_source(struct drm_device
*dev
, enum pipe pipe
,
3535 enum intel_pipe_crc_source
*source
)
3537 struct intel_encoder
*encoder
;
3538 struct intel_crtc
*crtc
;
3539 struct intel_digital_port
*dig_port
;
3542 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
3544 drm_modeset_lock_all(dev
);
3545 for_each_intel_encoder(dev
, encoder
) {
3546 if (!encoder
->base
.crtc
)
3549 crtc
= to_intel_crtc(encoder
->base
.crtc
);
3551 if (crtc
->pipe
!= pipe
)
3554 switch (encoder
->type
) {
3555 case INTEL_OUTPUT_TVOUT
:
3556 *source
= INTEL_PIPE_CRC_SOURCE_TV
;
3558 case INTEL_OUTPUT_DISPLAYPORT
:
3559 case INTEL_OUTPUT_EDP
:
3560 dig_port
= enc_to_dig_port(&encoder
->base
);
3561 switch (dig_port
->port
) {
3563 *source
= INTEL_PIPE_CRC_SOURCE_DP_B
;
3566 *source
= INTEL_PIPE_CRC_SOURCE_DP_C
;
3569 *source
= INTEL_PIPE_CRC_SOURCE_DP_D
;
3572 WARN(1, "nonexisting DP port %c\n",
3573 port_name(dig_port
->port
));
3581 drm_modeset_unlock_all(dev
);
3586 static int vlv_pipe_crc_ctl_reg(struct drm_device
*dev
,
3588 enum intel_pipe_crc_source
*source
,
3591 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3592 bool need_stable_symbols
= false;
3594 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
) {
3595 int ret
= i9xx_pipe_crc_auto_source(dev
, pipe
, source
);
3601 case INTEL_PIPE_CRC_SOURCE_PIPE
:
3602 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_VLV
;
3604 case INTEL_PIPE_CRC_SOURCE_DP_B
:
3605 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_B_VLV
;
3606 need_stable_symbols
= true;
3608 case INTEL_PIPE_CRC_SOURCE_DP_C
:
3609 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_C_VLV
;
3610 need_stable_symbols
= true;
3612 case INTEL_PIPE_CRC_SOURCE_DP_D
:
3613 if (!IS_CHERRYVIEW(dev
))
3615 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_D_VLV
;
3616 need_stable_symbols
= true;
3618 case INTEL_PIPE_CRC_SOURCE_NONE
:
3626 * When the pipe CRC tap point is after the transcoders we need
3627 * to tweak symbol-level features to produce a deterministic series of
3628 * symbols for a given frame. We need to reset those features only once
3629 * a frame (instead of every nth symbol):
3630 * - DC-balance: used to ensure a better clock recovery from the data
3632 * - DisplayPort scrambling: used for EMI reduction
3634 if (need_stable_symbols
) {
3635 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
3637 tmp
|= DC_BALANCE_RESET_VLV
;
3640 tmp
|= PIPE_A_SCRAMBLE_RESET
;
3643 tmp
|= PIPE_B_SCRAMBLE_RESET
;
3646 tmp
|= PIPE_C_SCRAMBLE_RESET
;
3651 I915_WRITE(PORT_DFT2_G4X
, tmp
);
3657 static int i9xx_pipe_crc_ctl_reg(struct drm_device
*dev
,
3659 enum intel_pipe_crc_source
*source
,
3662 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3663 bool need_stable_symbols
= false;
3665 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
) {
3666 int ret
= i9xx_pipe_crc_auto_source(dev
, pipe
, source
);
3672 case INTEL_PIPE_CRC_SOURCE_PIPE
:
3673 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_I9XX
;
3675 case INTEL_PIPE_CRC_SOURCE_TV
:
3676 if (!SUPPORTS_TV(dev
))
3678 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_TV_PRE
;
3680 case INTEL_PIPE_CRC_SOURCE_DP_B
:
3683 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_B_G4X
;
3684 need_stable_symbols
= true;
3686 case INTEL_PIPE_CRC_SOURCE_DP_C
:
3689 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_C_G4X
;
3690 need_stable_symbols
= true;
3692 case INTEL_PIPE_CRC_SOURCE_DP_D
:
3695 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_D_G4X
;
3696 need_stable_symbols
= true;
3698 case INTEL_PIPE_CRC_SOURCE_NONE
:
3706 * When the pipe CRC tap point is after the transcoders we need
3707 * to tweak symbol-level features to produce a deterministic series of
3708 * symbols for a given frame. We need to reset those features only once
3709 * a frame (instead of every nth symbol):
3710 * - DC-balance: used to ensure a better clock recovery from the data
3712 * - DisplayPort scrambling: used for EMI reduction
3714 if (need_stable_symbols
) {
3715 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
3717 WARN_ON(!IS_G4X(dev
));
3719 I915_WRITE(PORT_DFT_I9XX
,
3720 I915_READ(PORT_DFT_I9XX
) | DC_BALANCE_RESET
);
3723 tmp
|= PIPE_A_SCRAMBLE_RESET
;
3725 tmp
|= PIPE_B_SCRAMBLE_RESET
;
3727 I915_WRITE(PORT_DFT2_G4X
, tmp
);
3733 static void vlv_undo_pipe_scramble_reset(struct drm_device
*dev
,
3736 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3737 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
3741 tmp
&= ~PIPE_A_SCRAMBLE_RESET
;
3744 tmp
&= ~PIPE_B_SCRAMBLE_RESET
;
3747 tmp
&= ~PIPE_C_SCRAMBLE_RESET
;
3752 if (!(tmp
& PIPE_SCRAMBLE_RESET_MASK
))
3753 tmp
&= ~DC_BALANCE_RESET_VLV
;
3754 I915_WRITE(PORT_DFT2_G4X
, tmp
);
3758 static void g4x_undo_pipe_scramble_reset(struct drm_device
*dev
,
3761 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3762 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
3765 tmp
&= ~PIPE_A_SCRAMBLE_RESET
;
3767 tmp
&= ~PIPE_B_SCRAMBLE_RESET
;
3768 I915_WRITE(PORT_DFT2_G4X
, tmp
);
3770 if (!(tmp
& PIPE_SCRAMBLE_RESET_MASK
)) {
3771 I915_WRITE(PORT_DFT_I9XX
,
3772 I915_READ(PORT_DFT_I9XX
) & ~DC_BALANCE_RESET
);
3776 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source
*source
,
3779 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
3780 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
3783 case INTEL_PIPE_CRC_SOURCE_PLANE1
:
3784 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PRIMARY_ILK
;
3786 case INTEL_PIPE_CRC_SOURCE_PLANE2
:
3787 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_SPRITE_ILK
;
3789 case INTEL_PIPE_CRC_SOURCE_PIPE
:
3790 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_ILK
;
3792 case INTEL_PIPE_CRC_SOURCE_NONE
:
3802 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device
*dev
, bool enable
)
3804 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3805 struct intel_crtc
*crtc
=
3806 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[PIPE_A
]);
3807 struct intel_crtc_state
*pipe_config
;
3808 struct drm_atomic_state
*state
;
3811 drm_modeset_lock_all(dev
);
3812 state
= drm_atomic_state_alloc(dev
);
3818 state
->acquire_ctx
= drm_modeset_legacy_acquire_ctx(&crtc
->base
);
3819 pipe_config
= intel_atomic_get_crtc_state(state
, crtc
);
3820 if (IS_ERR(pipe_config
)) {
3821 ret
= PTR_ERR(pipe_config
);
3825 pipe_config
->pch_pfit
.force_thru
= enable
;
3826 if (pipe_config
->cpu_transcoder
== TRANSCODER_EDP
&&
3827 pipe_config
->pch_pfit
.enabled
!= enable
)
3828 pipe_config
->base
.connectors_changed
= true;
3830 ret
= drm_atomic_commit(state
);
3832 drm_modeset_unlock_all(dev
);
3833 WARN(ret
, "Toggling workaround to %i returns %i\n", enable
, ret
);
3835 drm_atomic_state_free(state
);
3838 static int ivb_pipe_crc_ctl_reg(struct drm_device
*dev
,
3840 enum intel_pipe_crc_source
*source
,
3843 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
3844 *source
= INTEL_PIPE_CRC_SOURCE_PF
;
3847 case INTEL_PIPE_CRC_SOURCE_PLANE1
:
3848 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PRIMARY_IVB
;
3850 case INTEL_PIPE_CRC_SOURCE_PLANE2
:
3851 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_SPRITE_IVB
;
3853 case INTEL_PIPE_CRC_SOURCE_PF
:
3854 if (IS_HASWELL(dev
) && pipe
== PIPE_A
)
3855 hsw_trans_edp_pipe_A_crc_wa(dev
, true);
3857 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PF_IVB
;
3859 case INTEL_PIPE_CRC_SOURCE_NONE
:
3869 static int pipe_crc_set_source(struct drm_device
*dev
, enum pipe pipe
,
3870 enum intel_pipe_crc_source source
)
3872 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3873 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
3874 struct intel_crtc
*crtc
= to_intel_crtc(intel_get_crtc_for_pipe(dev
,
3876 u32 val
= 0; /* shut up gcc */
3879 if (pipe_crc
->source
== source
)
3882 /* forbid changing the source without going back to 'none' */
3883 if (pipe_crc
->source
&& source
)
3886 if (!intel_display_power_is_enabled(dev_priv
, POWER_DOMAIN_PIPE(pipe
))) {
3887 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
3892 ret
= i8xx_pipe_crc_ctl_reg(&source
, &val
);
3893 else if (INTEL_INFO(dev
)->gen
< 5)
3894 ret
= i9xx_pipe_crc_ctl_reg(dev
, pipe
, &source
, &val
);
3895 else if (IS_VALLEYVIEW(dev
))
3896 ret
= vlv_pipe_crc_ctl_reg(dev
, pipe
, &source
, &val
);
3897 else if (IS_GEN5(dev
) || IS_GEN6(dev
))
3898 ret
= ilk_pipe_crc_ctl_reg(&source
, &val
);
3900 ret
= ivb_pipe_crc_ctl_reg(dev
, pipe
, &source
, &val
);
3905 /* none -> real source transition */
3907 struct intel_pipe_crc_entry
*entries
;
3909 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
3910 pipe_name(pipe
), pipe_crc_source_name(source
));
3912 entries
= kcalloc(INTEL_PIPE_CRC_ENTRIES_NR
,
3913 sizeof(pipe_crc
->entries
[0]),
3919 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
3920 * enabled and disabled dynamically based on package C states,
3921 * user space can't make reliable use of the CRCs, so let's just
3922 * completely disable it.
3924 hsw_disable_ips(crtc
);
3926 spin_lock_irq(&pipe_crc
->lock
);
3927 kfree(pipe_crc
->entries
);
3928 pipe_crc
->entries
= entries
;
3931 spin_unlock_irq(&pipe_crc
->lock
);
3934 pipe_crc
->source
= source
;
3936 I915_WRITE(PIPE_CRC_CTL(pipe
), val
);
3937 POSTING_READ(PIPE_CRC_CTL(pipe
));
3939 /* real source -> none transition */
3940 if (source
== INTEL_PIPE_CRC_SOURCE_NONE
) {
3941 struct intel_pipe_crc_entry
*entries
;
3942 struct intel_crtc
*crtc
=
3943 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
3945 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
3948 drm_modeset_lock(&crtc
->base
.mutex
, NULL
);
3949 if (crtc
->base
.state
->active
)
3950 intel_wait_for_vblank(dev
, pipe
);
3951 drm_modeset_unlock(&crtc
->base
.mutex
);
3953 spin_lock_irq(&pipe_crc
->lock
);
3954 entries
= pipe_crc
->entries
;
3955 pipe_crc
->entries
= NULL
;
3958 spin_unlock_irq(&pipe_crc
->lock
);
3963 g4x_undo_pipe_scramble_reset(dev
, pipe
);
3964 else if (IS_VALLEYVIEW(dev
))
3965 vlv_undo_pipe_scramble_reset(dev
, pipe
);
3966 else if (IS_HASWELL(dev
) && pipe
== PIPE_A
)
3967 hsw_trans_edp_pipe_A_crc_wa(dev
, false);
3969 hsw_enable_ips(crtc
);
3976 * Parse pipe CRC command strings:
3977 * command: wsp* object wsp+ name wsp+ source wsp*
3980 * source: (none | plane1 | plane2 | pf)
3981 * wsp: (#0x20 | #0x9 | #0xA)+
3984 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
3985 * "pipe A none" -> Stop CRC
3987 static int display_crc_ctl_tokenize(char *buf
, char *words
[], int max_words
)
3994 /* skip leading white space */
3995 buf
= skip_spaces(buf
);
3997 break; /* end of buffer */
3999 /* find end of word */
4000 for (end
= buf
; *end
&& !isspace(*end
); end
++)
4003 if (n_words
== max_words
) {
4004 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
4006 return -EINVAL
; /* ran out of words[] before bytes */
4011 words
[n_words
++] = buf
;
4018 enum intel_pipe_crc_object
{
4019 PIPE_CRC_OBJECT_PIPE
,
4022 static const char * const pipe_crc_objects
[] = {
4027 display_crc_ctl_parse_object(const char *buf
, enum intel_pipe_crc_object
*o
)
4031 for (i
= 0; i
< ARRAY_SIZE(pipe_crc_objects
); i
++)
4032 if (!strcmp(buf
, pipe_crc_objects
[i
])) {
4040 static int display_crc_ctl_parse_pipe(const char *buf
, enum pipe
*pipe
)
4042 const char name
= buf
[0];
4044 if (name
< 'A' || name
>= pipe_name(I915_MAX_PIPES
))
4053 display_crc_ctl_parse_source(const char *buf
, enum intel_pipe_crc_source
*s
)
4057 for (i
= 0; i
< ARRAY_SIZE(pipe_crc_sources
); i
++)
4058 if (!strcmp(buf
, pipe_crc_sources
[i
])) {
4066 static int display_crc_ctl_parse(struct drm_device
*dev
, char *buf
, size_t len
)
4070 char *words
[N_WORDS
];
4072 enum intel_pipe_crc_object object
;
4073 enum intel_pipe_crc_source source
;
4075 n_words
= display_crc_ctl_tokenize(buf
, words
, N_WORDS
);
4076 if (n_words
!= N_WORDS
) {
4077 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
4082 if (display_crc_ctl_parse_object(words
[0], &object
) < 0) {
4083 DRM_DEBUG_DRIVER("unknown object %s\n", words
[0]);
4087 if (display_crc_ctl_parse_pipe(words
[1], &pipe
) < 0) {
4088 DRM_DEBUG_DRIVER("unknown pipe %s\n", words
[1]);
4092 if (display_crc_ctl_parse_source(words
[2], &source
) < 0) {
4093 DRM_DEBUG_DRIVER("unknown source %s\n", words
[2]);
4097 return pipe_crc_set_source(dev
, pipe
, source
);
4100 static ssize_t
display_crc_ctl_write(struct file
*file
, const char __user
*ubuf
,
4101 size_t len
, loff_t
*offp
)
4103 struct seq_file
*m
= file
->private_data
;
4104 struct drm_device
*dev
= m
->private;
4111 if (len
> PAGE_SIZE
- 1) {
4112 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
4117 tmpbuf
= kmalloc(len
+ 1, GFP_KERNEL
);
4121 if (copy_from_user(tmpbuf
, ubuf
, len
)) {
4127 ret
= display_crc_ctl_parse(dev
, tmpbuf
, len
);
4138 static const struct file_operations i915_display_crc_ctl_fops
= {
4139 .owner
= THIS_MODULE
,
4140 .open
= display_crc_ctl_open
,
4142 .llseek
= seq_lseek
,
4143 .release
= single_release
,
4144 .write
= display_crc_ctl_write
4147 static ssize_t
i915_displayport_test_active_write(struct file
*file
,
4148 const char __user
*ubuf
,
4149 size_t len
, loff_t
*offp
)
4153 struct drm_device
*dev
;
4154 struct drm_connector
*connector
;
4155 struct list_head
*connector_list
;
4156 struct intel_dp
*intel_dp
;
4159 dev
= ((struct seq_file
*)file
->private_data
)->private;
4161 connector_list
= &dev
->mode_config
.connector_list
;
4166 input_buffer
= kmalloc(len
+ 1, GFP_KERNEL
);
4170 if (copy_from_user(input_buffer
, ubuf
, len
)) {
4175 input_buffer
[len
] = '\0';
4176 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len
);
4178 list_for_each_entry(connector
, connector_list
, head
) {
4180 if (connector
->connector_type
!=
4181 DRM_MODE_CONNECTOR_DisplayPort
)
4184 if (connector
->status
== connector_status_connected
&&
4185 connector
->encoder
!= NULL
) {
4186 intel_dp
= enc_to_intel_dp(connector
->encoder
);
4187 status
= kstrtoint(input_buffer
, 10, &val
);
4190 DRM_DEBUG_DRIVER("Got %d for test active\n", val
);
4191 /* To prevent erroneous activation of the compliance
4192 * testing code, only accept an actual value of 1 here
4195 intel_dp
->compliance_test_active
= 1;
4197 intel_dp
->compliance_test_active
= 0;
4201 kfree(input_buffer
);
4209 static int i915_displayport_test_active_show(struct seq_file
*m
, void *data
)
4211 struct drm_device
*dev
= m
->private;
4212 struct drm_connector
*connector
;
4213 struct list_head
*connector_list
= &dev
->mode_config
.connector_list
;
4214 struct intel_dp
*intel_dp
;
4216 list_for_each_entry(connector
, connector_list
, head
) {
4218 if (connector
->connector_type
!=
4219 DRM_MODE_CONNECTOR_DisplayPort
)
4222 if (connector
->status
== connector_status_connected
&&
4223 connector
->encoder
!= NULL
) {
4224 intel_dp
= enc_to_intel_dp(connector
->encoder
);
4225 if (intel_dp
->compliance_test_active
)
4236 static int i915_displayport_test_active_open(struct inode
*inode
,
4239 struct drm_device
*dev
= inode
->i_private
;
4241 return single_open(file
, i915_displayport_test_active_show
, dev
);
4244 static const struct file_operations i915_displayport_test_active_fops
= {
4245 .owner
= THIS_MODULE
,
4246 .open
= i915_displayport_test_active_open
,
4248 .llseek
= seq_lseek
,
4249 .release
= single_release
,
4250 .write
= i915_displayport_test_active_write
4253 static int i915_displayport_test_data_show(struct seq_file
*m
, void *data
)
4255 struct drm_device
*dev
= m
->private;
4256 struct drm_connector
*connector
;
4257 struct list_head
*connector_list
= &dev
->mode_config
.connector_list
;
4258 struct intel_dp
*intel_dp
;
4260 list_for_each_entry(connector
, connector_list
, head
) {
4262 if (connector
->connector_type
!=
4263 DRM_MODE_CONNECTOR_DisplayPort
)
4266 if (connector
->status
== connector_status_connected
&&
4267 connector
->encoder
!= NULL
) {
4268 intel_dp
= enc_to_intel_dp(connector
->encoder
);
4269 seq_printf(m
, "%lx", intel_dp
->compliance_test_data
);
4276 static int i915_displayport_test_data_open(struct inode
*inode
,
4279 struct drm_device
*dev
= inode
->i_private
;
4281 return single_open(file
, i915_displayport_test_data_show
, dev
);
4284 static const struct file_operations i915_displayport_test_data_fops
= {
4285 .owner
= THIS_MODULE
,
4286 .open
= i915_displayport_test_data_open
,
4288 .llseek
= seq_lseek
,
4289 .release
= single_release
4292 static int i915_displayport_test_type_show(struct seq_file
*m
, void *data
)
4294 struct drm_device
*dev
= m
->private;
4295 struct drm_connector
*connector
;
4296 struct list_head
*connector_list
= &dev
->mode_config
.connector_list
;
4297 struct intel_dp
*intel_dp
;
4299 list_for_each_entry(connector
, connector_list
, head
) {
4301 if (connector
->connector_type
!=
4302 DRM_MODE_CONNECTOR_DisplayPort
)
4305 if (connector
->status
== connector_status_connected
&&
4306 connector
->encoder
!= NULL
) {
4307 intel_dp
= enc_to_intel_dp(connector
->encoder
);
4308 seq_printf(m
, "%02lx", intel_dp
->compliance_test_type
);
4316 static int i915_displayport_test_type_open(struct inode
*inode
,
4319 struct drm_device
*dev
= inode
->i_private
;
4321 return single_open(file
, i915_displayport_test_type_show
, dev
);
4324 static const struct file_operations i915_displayport_test_type_fops
= {
4325 .owner
= THIS_MODULE
,
4326 .open
= i915_displayport_test_type_open
,
4328 .llseek
= seq_lseek
,
4329 .release
= single_release
4332 static void wm_latency_show(struct seq_file
*m
, const uint16_t wm
[8])
4334 struct drm_device
*dev
= m
->private;
4338 if (IS_CHERRYVIEW(dev
))
4340 else if (IS_VALLEYVIEW(dev
))
4343 num_levels
= ilk_wm_max_level(dev
) + 1;
4345 drm_modeset_lock_all(dev
);
4347 for (level
= 0; level
< num_levels
; level
++) {
4348 unsigned int latency
= wm
[level
];
4351 * - WM1+ latency values in 0.5us units
4352 * - latencies are in us on gen9/vlv/chv
4354 if (INTEL_INFO(dev
)->gen
>= 9 || IS_VALLEYVIEW(dev
))
4359 seq_printf(m
, "WM%d %u (%u.%u usec)\n",
4360 level
, wm
[level
], latency
/ 10, latency
% 10);
4363 drm_modeset_unlock_all(dev
);
4366 static int pri_wm_latency_show(struct seq_file
*m
, void *data
)
4368 struct drm_device
*dev
= m
->private;
4369 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4370 const uint16_t *latencies
;
4372 if (INTEL_INFO(dev
)->gen
>= 9)
4373 latencies
= dev_priv
->wm
.skl_latency
;
4375 latencies
= to_i915(dev
)->wm
.pri_latency
;
4377 wm_latency_show(m
, latencies
);
4382 static int spr_wm_latency_show(struct seq_file
*m
, void *data
)
4384 struct drm_device
*dev
= m
->private;
4385 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4386 const uint16_t *latencies
;
4388 if (INTEL_INFO(dev
)->gen
>= 9)
4389 latencies
= dev_priv
->wm
.skl_latency
;
4391 latencies
= to_i915(dev
)->wm
.spr_latency
;
4393 wm_latency_show(m
, latencies
);
4398 static int cur_wm_latency_show(struct seq_file
*m
, void *data
)
4400 struct drm_device
*dev
= m
->private;
4401 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4402 const uint16_t *latencies
;
4404 if (INTEL_INFO(dev
)->gen
>= 9)
4405 latencies
= dev_priv
->wm
.skl_latency
;
4407 latencies
= to_i915(dev
)->wm
.cur_latency
;
4409 wm_latency_show(m
, latencies
);
4414 static int pri_wm_latency_open(struct inode
*inode
, struct file
*file
)
4416 struct drm_device
*dev
= inode
->i_private
;
4418 if (INTEL_INFO(dev
)->gen
< 5)
4421 return single_open(file
, pri_wm_latency_show
, dev
);
4424 static int spr_wm_latency_open(struct inode
*inode
, struct file
*file
)
4426 struct drm_device
*dev
= inode
->i_private
;
4428 if (HAS_GMCH_DISPLAY(dev
))
4431 return single_open(file
, spr_wm_latency_show
, dev
);
4434 static int cur_wm_latency_open(struct inode
*inode
, struct file
*file
)
4436 struct drm_device
*dev
= inode
->i_private
;
4438 if (HAS_GMCH_DISPLAY(dev
))
4441 return single_open(file
, cur_wm_latency_show
, dev
);
4444 static ssize_t
wm_latency_write(struct file
*file
, const char __user
*ubuf
,
4445 size_t len
, loff_t
*offp
, uint16_t wm
[8])
4447 struct seq_file
*m
= file
->private_data
;
4448 struct drm_device
*dev
= m
->private;
4449 uint16_t new[8] = { 0 };
4455 if (IS_CHERRYVIEW(dev
))
4457 else if (IS_VALLEYVIEW(dev
))
4460 num_levels
= ilk_wm_max_level(dev
) + 1;
4462 if (len
>= sizeof(tmp
))
4465 if (copy_from_user(tmp
, ubuf
, len
))
4470 ret
= sscanf(tmp
, "%hu %hu %hu %hu %hu %hu %hu %hu",
4471 &new[0], &new[1], &new[2], &new[3],
4472 &new[4], &new[5], &new[6], &new[7]);
4473 if (ret
!= num_levels
)
4476 drm_modeset_lock_all(dev
);
4478 for (level
= 0; level
< num_levels
; level
++)
4479 wm
[level
] = new[level
];
4481 drm_modeset_unlock_all(dev
);
4487 static ssize_t
pri_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
4488 size_t len
, loff_t
*offp
)
4490 struct seq_file
*m
= file
->private_data
;
4491 struct drm_device
*dev
= m
->private;
4492 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4493 uint16_t *latencies
;
4495 if (INTEL_INFO(dev
)->gen
>= 9)
4496 latencies
= dev_priv
->wm
.skl_latency
;
4498 latencies
= to_i915(dev
)->wm
.pri_latency
;
4500 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
4503 static ssize_t
spr_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
4504 size_t len
, loff_t
*offp
)
4506 struct seq_file
*m
= file
->private_data
;
4507 struct drm_device
*dev
= m
->private;
4508 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4509 uint16_t *latencies
;
4511 if (INTEL_INFO(dev
)->gen
>= 9)
4512 latencies
= dev_priv
->wm
.skl_latency
;
4514 latencies
= to_i915(dev
)->wm
.spr_latency
;
4516 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
4519 static ssize_t
cur_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
4520 size_t len
, loff_t
*offp
)
4522 struct seq_file
*m
= file
->private_data
;
4523 struct drm_device
*dev
= m
->private;
4524 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4525 uint16_t *latencies
;
4527 if (INTEL_INFO(dev
)->gen
>= 9)
4528 latencies
= dev_priv
->wm
.skl_latency
;
4530 latencies
= to_i915(dev
)->wm
.cur_latency
;
4532 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
4535 static const struct file_operations i915_pri_wm_latency_fops
= {
4536 .owner
= THIS_MODULE
,
4537 .open
= pri_wm_latency_open
,
4539 .llseek
= seq_lseek
,
4540 .release
= single_release
,
4541 .write
= pri_wm_latency_write
4544 static const struct file_operations i915_spr_wm_latency_fops
= {
4545 .owner
= THIS_MODULE
,
4546 .open
= spr_wm_latency_open
,
4548 .llseek
= seq_lseek
,
4549 .release
= single_release
,
4550 .write
= spr_wm_latency_write
4553 static const struct file_operations i915_cur_wm_latency_fops
= {
4554 .owner
= THIS_MODULE
,
4555 .open
= cur_wm_latency_open
,
4557 .llseek
= seq_lseek
,
4558 .release
= single_release
,
4559 .write
= cur_wm_latency_write
4563 i915_wedged_get(void *data
, u64
*val
)
4565 struct drm_device
*dev
= data
;
4566 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4568 *val
= atomic_read(&dev_priv
->gpu_error
.reset_counter
);
4574 i915_wedged_set(void *data
, u64 val
)
4576 struct drm_device
*dev
= data
;
4577 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4580 * There is no safeguard against this debugfs entry colliding
4581 * with the hangcheck calling same i915_handle_error() in
4582 * parallel, causing an explosion. For now we assume that the
4583 * test harness is responsible enough not to inject gpu hangs
4584 * while it is writing to 'i915_wedged'
4587 if (i915_reset_in_progress(&dev_priv
->gpu_error
))
4590 intel_runtime_pm_get(dev_priv
);
4592 i915_handle_error(dev
, val
,
4593 "Manually setting wedged to %llu", val
);
4595 intel_runtime_pm_put(dev_priv
);
4600 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops
,
4601 i915_wedged_get
, i915_wedged_set
,
4605 i915_ring_stop_get(void *data
, u64
*val
)
4607 struct drm_device
*dev
= data
;
4608 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4610 *val
= dev_priv
->gpu_error
.stop_rings
;
4616 i915_ring_stop_set(void *data
, u64 val
)
4618 struct drm_device
*dev
= data
;
4619 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4622 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val
);
4624 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4628 dev_priv
->gpu_error
.stop_rings
= val
;
4629 mutex_unlock(&dev
->struct_mutex
);
4634 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops
,
4635 i915_ring_stop_get
, i915_ring_stop_set
,
4639 i915_ring_missed_irq_get(void *data
, u64
*val
)
4641 struct drm_device
*dev
= data
;
4642 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4644 *val
= dev_priv
->gpu_error
.missed_irq_rings
;
4649 i915_ring_missed_irq_set(void *data
, u64 val
)
4651 struct drm_device
*dev
= data
;
4652 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4655 /* Lock against concurrent debugfs callers */
4656 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4659 dev_priv
->gpu_error
.missed_irq_rings
= val
;
4660 mutex_unlock(&dev
->struct_mutex
);
4665 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops
,
4666 i915_ring_missed_irq_get
, i915_ring_missed_irq_set
,
4670 i915_ring_test_irq_get(void *data
, u64
*val
)
4672 struct drm_device
*dev
= data
;
4673 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4675 *val
= dev_priv
->gpu_error
.test_irq_rings
;
4681 i915_ring_test_irq_set(void *data
, u64 val
)
4683 struct drm_device
*dev
= data
;
4684 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4687 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val
);
4689 /* Lock against concurrent debugfs callers */
4690 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4694 dev_priv
->gpu_error
.test_irq_rings
= val
;
4695 mutex_unlock(&dev
->struct_mutex
);
4700 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops
,
4701 i915_ring_test_irq_get
, i915_ring_test_irq_set
,
4704 #define DROP_UNBOUND 0x1
4705 #define DROP_BOUND 0x2
4706 #define DROP_RETIRE 0x4
4707 #define DROP_ACTIVE 0x8
4708 #define DROP_ALL (DROP_UNBOUND | \
4713 i915_drop_caches_get(void *data
, u64
*val
)
4721 i915_drop_caches_set(void *data
, u64 val
)
4723 struct drm_device
*dev
= data
;
4724 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4727 DRM_DEBUG("Dropping caches: 0x%08llx\n", val
);
4729 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4730 * on ioctls on -EAGAIN. */
4731 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4735 if (val
& DROP_ACTIVE
) {
4736 ret
= i915_gpu_idle(dev
);
4741 if (val
& (DROP_RETIRE
| DROP_ACTIVE
))
4742 i915_gem_retire_requests(dev
);
4744 if (val
& DROP_BOUND
)
4745 i915_gem_shrink(dev_priv
, LONG_MAX
, I915_SHRINK_BOUND
);
4747 if (val
& DROP_UNBOUND
)
4748 i915_gem_shrink(dev_priv
, LONG_MAX
, I915_SHRINK_UNBOUND
);
4751 mutex_unlock(&dev
->struct_mutex
);
4756 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops
,
4757 i915_drop_caches_get
, i915_drop_caches_set
,
4761 i915_max_freq_get(void *data
, u64
*val
)
4763 struct drm_device
*dev
= data
;
4764 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4767 if (INTEL_INFO(dev
)->gen
< 6)
4770 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
4772 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
4776 *val
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq_softlimit
);
4777 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4783 i915_max_freq_set(void *data
, u64 val
)
4785 struct drm_device
*dev
= data
;
4786 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4790 if (INTEL_INFO(dev
)->gen
< 6)
4793 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
4795 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val
);
4797 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
4802 * Turbo will still be enabled, but won't go above the set value.
4804 val
= intel_freq_opcode(dev_priv
, val
);
4806 hw_max
= dev_priv
->rps
.max_freq
;
4807 hw_min
= dev_priv
->rps
.min_freq
;
4809 if (val
< hw_min
|| val
> hw_max
|| val
< dev_priv
->rps
.min_freq_softlimit
) {
4810 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4814 dev_priv
->rps
.max_freq_softlimit
= val
;
4816 intel_set_rps(dev
, val
);
4818 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4823 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops
,
4824 i915_max_freq_get
, i915_max_freq_set
,
4828 i915_min_freq_get(void *data
, u64
*val
)
4830 struct drm_device
*dev
= data
;
4831 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4834 if (INTEL_INFO(dev
)->gen
< 6)
4837 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
4839 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
4843 *val
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq_softlimit
);
4844 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4850 i915_min_freq_set(void *data
, u64 val
)
4852 struct drm_device
*dev
= data
;
4853 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4857 if (INTEL_INFO(dev
)->gen
< 6)
4860 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
4862 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val
);
4864 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
4869 * Turbo will still be enabled, but won't go below the set value.
4871 val
= intel_freq_opcode(dev_priv
, val
);
4873 hw_max
= dev_priv
->rps
.max_freq
;
4874 hw_min
= dev_priv
->rps
.min_freq
;
4876 if (val
< hw_min
|| val
> hw_max
|| val
> dev_priv
->rps
.max_freq_softlimit
) {
4877 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4881 dev_priv
->rps
.min_freq_softlimit
= val
;
4883 intel_set_rps(dev
, val
);
4885 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4890 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops
,
4891 i915_min_freq_get
, i915_min_freq_set
,
4895 i915_cache_sharing_get(void *data
, u64
*val
)
4897 struct drm_device
*dev
= data
;
4898 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4902 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
4905 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4908 intel_runtime_pm_get(dev_priv
);
4910 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
4912 intel_runtime_pm_put(dev_priv
);
4913 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
4915 *val
= (snpcr
& GEN6_MBC_SNPCR_MASK
) >> GEN6_MBC_SNPCR_SHIFT
;
4921 i915_cache_sharing_set(void *data
, u64 val
)
4923 struct drm_device
*dev
= data
;
4924 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4927 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
4933 intel_runtime_pm_get(dev_priv
);
4934 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val
);
4936 /* Update the cache sharing policy here as well */
4937 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
4938 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
4939 snpcr
|= (val
<< GEN6_MBC_SNPCR_SHIFT
);
4940 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
4942 intel_runtime_pm_put(dev_priv
);
4946 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops
,
4947 i915_cache_sharing_get
, i915_cache_sharing_set
,
4950 struct sseu_dev_status
{
4951 unsigned int slice_total
;
4952 unsigned int subslice_total
;
4953 unsigned int subslice_per_slice
;
4954 unsigned int eu_total
;
4955 unsigned int eu_per_subslice
;
4958 static void cherryview_sseu_device_status(struct drm_device
*dev
,
4959 struct sseu_dev_status
*stat
)
4961 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4964 u32 sig1
[ss_max
], sig2
[ss_max
];
4966 sig1
[0] = I915_READ(CHV_POWER_SS0_SIG1
);
4967 sig1
[1] = I915_READ(CHV_POWER_SS1_SIG1
);
4968 sig2
[0] = I915_READ(CHV_POWER_SS0_SIG2
);
4969 sig2
[1] = I915_READ(CHV_POWER_SS1_SIG2
);
4971 for (ss
= 0; ss
< ss_max
; ss
++) {
4972 unsigned int eu_cnt
;
4974 if (sig1
[ss
] & CHV_SS_PG_ENABLE
)
4975 /* skip disabled subslice */
4978 stat
->slice_total
= 1;
4979 stat
->subslice_per_slice
++;
4980 eu_cnt
= ((sig1
[ss
] & CHV_EU08_PG_ENABLE
) ? 0 : 2) +
4981 ((sig1
[ss
] & CHV_EU19_PG_ENABLE
) ? 0 : 2) +
4982 ((sig1
[ss
] & CHV_EU210_PG_ENABLE
) ? 0 : 2) +
4983 ((sig2
[ss
] & CHV_EU311_PG_ENABLE
) ? 0 : 2);
4984 stat
->eu_total
+= eu_cnt
;
4985 stat
->eu_per_subslice
= max(stat
->eu_per_subslice
, eu_cnt
);
4987 stat
->subslice_total
= stat
->subslice_per_slice
;
4990 static void gen9_sseu_device_status(struct drm_device
*dev
,
4991 struct sseu_dev_status
*stat
)
4993 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4994 int s_max
= 3, ss_max
= 4;
4996 u32 s_reg
[s_max
], eu_reg
[2*s_max
], eu_mask
[2];
4998 /* BXT has a single slice and at most 3 subslices. */
4999 if (IS_BROXTON(dev
)) {
5004 for (s
= 0; s
< s_max
; s
++) {
5005 s_reg
[s
] = I915_READ(GEN9_SLICE_PGCTL_ACK(s
));
5006 eu_reg
[2*s
] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s
));
5007 eu_reg
[2*s
+ 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s
));
5010 eu_mask
[0] = GEN9_PGCTL_SSA_EU08_ACK
|
5011 GEN9_PGCTL_SSA_EU19_ACK
|
5012 GEN9_PGCTL_SSA_EU210_ACK
|
5013 GEN9_PGCTL_SSA_EU311_ACK
;
5014 eu_mask
[1] = GEN9_PGCTL_SSB_EU08_ACK
|
5015 GEN9_PGCTL_SSB_EU19_ACK
|
5016 GEN9_PGCTL_SSB_EU210_ACK
|
5017 GEN9_PGCTL_SSB_EU311_ACK
;
5019 for (s
= 0; s
< s_max
; s
++) {
5020 unsigned int ss_cnt
= 0;
5022 if ((s_reg
[s
] & GEN9_PGCTL_SLICE_ACK
) == 0)
5023 /* skip disabled slice */
5026 stat
->slice_total
++;
5028 if (IS_SKYLAKE(dev
))
5029 ss_cnt
= INTEL_INFO(dev
)->subslice_per_slice
;
5031 for (ss
= 0; ss
< ss_max
; ss
++) {
5032 unsigned int eu_cnt
;
5034 if (IS_BROXTON(dev
) &&
5035 !(s_reg
[s
] & (GEN9_PGCTL_SS_ACK(ss
))))
5036 /* skip disabled subslice */
5039 if (IS_BROXTON(dev
))
5042 eu_cnt
= 2 * hweight32(eu_reg
[2*s
+ ss
/2] &
5044 stat
->eu_total
+= eu_cnt
;
5045 stat
->eu_per_subslice
= max(stat
->eu_per_subslice
,
5049 stat
->subslice_total
+= ss_cnt
;
5050 stat
->subslice_per_slice
= max(stat
->subslice_per_slice
,
5055 static void broadwell_sseu_device_status(struct drm_device
*dev
,
5056 struct sseu_dev_status
*stat
)
5058 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5060 u32 slice_info
= I915_READ(GEN8_GT_SLICE_INFO
);
5062 stat
->slice_total
= hweight32(slice_info
& GEN8_LSLICESTAT_MASK
);
5064 if (stat
->slice_total
) {
5065 stat
->subslice_per_slice
= INTEL_INFO(dev
)->subslice_per_slice
;
5066 stat
->subslice_total
= stat
->slice_total
*
5067 stat
->subslice_per_slice
;
5068 stat
->eu_per_subslice
= INTEL_INFO(dev
)->eu_per_subslice
;
5069 stat
->eu_total
= stat
->eu_per_subslice
* stat
->subslice_total
;
5071 /* subtract fused off EU(s) from enabled slice(s) */
5072 for (s
= 0; s
< stat
->slice_total
; s
++) {
5073 u8 subslice_7eu
= INTEL_INFO(dev
)->subslice_7eu
[s
];
5075 stat
->eu_total
-= hweight8(subslice_7eu
);
5080 static int i915_sseu_status(struct seq_file
*m
, void *unused
)
5082 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
5083 struct drm_device
*dev
= node
->minor
->dev
;
5084 struct sseu_dev_status stat
;
5086 if (INTEL_INFO(dev
)->gen
< 8)
5089 seq_puts(m
, "SSEU Device Info\n");
5090 seq_printf(m
, " Available Slice Total: %u\n",
5091 INTEL_INFO(dev
)->slice_total
);
5092 seq_printf(m
, " Available Subslice Total: %u\n",
5093 INTEL_INFO(dev
)->subslice_total
);
5094 seq_printf(m
, " Available Subslice Per Slice: %u\n",
5095 INTEL_INFO(dev
)->subslice_per_slice
);
5096 seq_printf(m
, " Available EU Total: %u\n",
5097 INTEL_INFO(dev
)->eu_total
);
5098 seq_printf(m
, " Available EU Per Subslice: %u\n",
5099 INTEL_INFO(dev
)->eu_per_subslice
);
5100 seq_printf(m
, " Has Slice Power Gating: %s\n",
5101 yesno(INTEL_INFO(dev
)->has_slice_pg
));
5102 seq_printf(m
, " Has Subslice Power Gating: %s\n",
5103 yesno(INTEL_INFO(dev
)->has_subslice_pg
));
5104 seq_printf(m
, " Has EU Power Gating: %s\n",
5105 yesno(INTEL_INFO(dev
)->has_eu_pg
));
5107 seq_puts(m
, "SSEU Device Status\n");
5108 memset(&stat
, 0, sizeof(stat
));
5109 if (IS_CHERRYVIEW(dev
)) {
5110 cherryview_sseu_device_status(dev
, &stat
);
5111 } else if (IS_BROADWELL(dev
)) {
5112 broadwell_sseu_device_status(dev
, &stat
);
5113 } else if (INTEL_INFO(dev
)->gen
>= 9) {
5114 gen9_sseu_device_status(dev
, &stat
);
5116 seq_printf(m
, " Enabled Slice Total: %u\n",
5118 seq_printf(m
, " Enabled Subslice Total: %u\n",
5119 stat
.subslice_total
);
5120 seq_printf(m
, " Enabled Subslice Per Slice: %u\n",
5121 stat
.subslice_per_slice
);
5122 seq_printf(m
, " Enabled EU Total: %u\n",
5124 seq_printf(m
, " Enabled EU Per Subslice: %u\n",
5125 stat
.eu_per_subslice
);
5130 static int i915_forcewake_open(struct inode
*inode
, struct file
*file
)
5132 struct drm_device
*dev
= inode
->i_private
;
5133 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5135 if (INTEL_INFO(dev
)->gen
< 6)
5138 intel_runtime_pm_get(dev_priv
);
5139 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5144 static int i915_forcewake_release(struct inode
*inode
, struct file
*file
)
5146 struct drm_device
*dev
= inode
->i_private
;
5147 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5149 if (INTEL_INFO(dev
)->gen
< 6)
5152 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5153 intel_runtime_pm_put(dev_priv
);
5158 static const struct file_operations i915_forcewake_fops
= {
5159 .owner
= THIS_MODULE
,
5160 .open
= i915_forcewake_open
,
5161 .release
= i915_forcewake_release
,
5164 static int i915_forcewake_create(struct dentry
*root
, struct drm_minor
*minor
)
5166 struct drm_device
*dev
= minor
->dev
;
5169 ent
= debugfs_create_file("i915_forcewake_user",
5172 &i915_forcewake_fops
);
5176 return drm_add_fake_info_node(minor
, ent
, &i915_forcewake_fops
);
5179 static int i915_debugfs_create(struct dentry
*root
,
5180 struct drm_minor
*minor
,
5182 const struct file_operations
*fops
)
5184 struct drm_device
*dev
= minor
->dev
;
5187 ent
= debugfs_create_file(name
,
5194 return drm_add_fake_info_node(minor
, ent
, fops
);
5197 static const struct drm_info_list i915_debugfs_list
[] = {
5198 {"i915_capabilities", i915_capabilities
, 0},
5199 {"i915_gem_objects", i915_gem_object_info
, 0},
5200 {"i915_gem_gtt", i915_gem_gtt_info
, 0},
5201 {"i915_gem_pinned", i915_gem_gtt_info
, 0, (void *) PINNED_LIST
},
5202 {"i915_gem_active", i915_gem_object_list_info
, 0, (void *) ACTIVE_LIST
},
5203 {"i915_gem_inactive", i915_gem_object_list_info
, 0, (void *) INACTIVE_LIST
},
5204 {"i915_gem_stolen", i915_gem_stolen_list_info
},
5205 {"i915_gem_pageflip", i915_gem_pageflip_info
, 0},
5206 {"i915_gem_request", i915_gem_request_info
, 0},
5207 {"i915_gem_seqno", i915_gem_seqno_info
, 0},
5208 {"i915_gem_fence_regs", i915_gem_fence_regs_info
, 0},
5209 {"i915_gem_interrupt", i915_interrupt_info
, 0},
5210 {"i915_gem_hws", i915_hws_info
, 0, (void *)RCS
},
5211 {"i915_gem_hws_blt", i915_hws_info
, 0, (void *)BCS
},
5212 {"i915_gem_hws_bsd", i915_hws_info
, 0, (void *)VCS
},
5213 {"i915_gem_hws_vebox", i915_hws_info
, 0, (void *)VECS
},
5214 {"i915_gem_batch_pool", i915_gem_batch_pool_info
, 0},
5215 {"i915_guc_info", i915_guc_info
, 0},
5216 {"i915_guc_load_status", i915_guc_load_status_info
, 0},
5217 {"i915_guc_log_dump", i915_guc_log_dump
, 0},
5218 {"i915_frequency_info", i915_frequency_info
, 0},
5219 {"i915_hangcheck_info", i915_hangcheck_info
, 0},
5220 {"i915_drpc_info", i915_drpc_info
, 0},
5221 {"i915_emon_status", i915_emon_status
, 0},
5222 {"i915_ring_freq_table", i915_ring_freq_table
, 0},
5223 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking
, 0},
5224 {"i915_fbc_status", i915_fbc_status
, 0},
5225 {"i915_ips_status", i915_ips_status
, 0},
5226 {"i915_sr_status", i915_sr_status
, 0},
5227 {"i915_opregion", i915_opregion
, 0},
5228 {"i915_gem_framebuffer", i915_gem_framebuffer_info
, 0},
5229 {"i915_context_status", i915_context_status
, 0},
5230 {"i915_dump_lrc", i915_dump_lrc
, 0},
5231 {"i915_execlists", i915_execlists
, 0},
5232 {"i915_forcewake_domains", i915_forcewake_domains
, 0},
5233 {"i915_swizzle_info", i915_swizzle_info
, 0},
5234 {"i915_ppgtt_info", i915_ppgtt_info
, 0},
5235 {"i915_llc", i915_llc
, 0},
5236 {"i915_edp_psr_status", i915_edp_psr_status
, 0},
5237 {"i915_sink_crc_eDP1", i915_sink_crc
, 0},
5238 {"i915_energy_uJ", i915_energy_uJ
, 0},
5239 {"i915_runtime_pm_status", i915_runtime_pm_status
, 0},
5240 {"i915_power_domain_info", i915_power_domain_info
, 0},
5241 {"i915_display_info", i915_display_info
, 0},
5242 {"i915_semaphore_status", i915_semaphore_status
, 0},
5243 {"i915_shared_dplls_info", i915_shared_dplls_info
, 0},
5244 {"i915_dp_mst_info", i915_dp_mst_info
, 0},
5245 {"i915_wa_registers", i915_wa_registers
, 0},
5246 {"i915_ddb_info", i915_ddb_info
, 0},
5247 {"i915_sseu_status", i915_sseu_status
, 0},
5248 {"i915_drrs_status", i915_drrs_status
, 0},
5249 {"i915_rps_boost_info", i915_rps_boost_info
, 0},
5251 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
5253 static const struct i915_debugfs_files
{
5255 const struct file_operations
*fops
;
5256 } i915_debugfs_files
[] = {
5257 {"i915_wedged", &i915_wedged_fops
},
5258 {"i915_max_freq", &i915_max_freq_fops
},
5259 {"i915_min_freq", &i915_min_freq_fops
},
5260 {"i915_cache_sharing", &i915_cache_sharing_fops
},
5261 {"i915_ring_stop", &i915_ring_stop_fops
},
5262 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops
},
5263 {"i915_ring_test_irq", &i915_ring_test_irq_fops
},
5264 {"i915_gem_drop_caches", &i915_drop_caches_fops
},
5265 {"i915_error_state", &i915_error_state_fops
},
5266 {"i915_next_seqno", &i915_next_seqno_fops
},
5267 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops
},
5268 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops
},
5269 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops
},
5270 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops
},
5271 {"i915_fbc_false_color", &i915_fbc_fc_fops
},
5272 {"i915_dp_test_data", &i915_displayport_test_data_fops
},
5273 {"i915_dp_test_type", &i915_displayport_test_type_fops
},
5274 {"i915_dp_test_active", &i915_displayport_test_active_fops
}
5277 void intel_display_crc_init(struct drm_device
*dev
)
5279 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5282 for_each_pipe(dev_priv
, pipe
) {
5283 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
5285 pipe_crc
->opened
= false;
5286 spin_lock_init(&pipe_crc
->lock
);
5287 init_waitqueue_head(&pipe_crc
->wq
);
5291 int i915_debugfs_init(struct drm_minor
*minor
)
5295 ret
= i915_forcewake_create(minor
->debugfs_root
, minor
);
5299 for (i
= 0; i
< ARRAY_SIZE(i915_pipe_crc_data
); i
++) {
5300 ret
= i915_pipe_crc_create(minor
->debugfs_root
, minor
, i
);
5305 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
5306 ret
= i915_debugfs_create(minor
->debugfs_root
, minor
,
5307 i915_debugfs_files
[i
].name
,
5308 i915_debugfs_files
[i
].fops
);
5313 return drm_debugfs_create_files(i915_debugfs_list
,
5314 I915_DEBUGFS_ENTRIES
,
5315 minor
->debugfs_root
, minor
);
5318 void i915_debugfs_cleanup(struct drm_minor
*minor
)
5322 drm_debugfs_remove_files(i915_debugfs_list
,
5323 I915_DEBUGFS_ENTRIES
, minor
);
5325 drm_debugfs_remove_files((struct drm_info_list
*) &i915_forcewake_fops
,
5328 for (i
= 0; i
< ARRAY_SIZE(i915_pipe_crc_data
); i
++) {
5329 struct drm_info_list
*info_list
=
5330 (struct drm_info_list
*)&i915_pipe_crc_data
[i
];
5332 drm_debugfs_remove_files(info_list
, 1, minor
);
5335 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
5336 struct drm_info_list
*info_list
=
5337 (struct drm_info_list
*) i915_debugfs_files
[i
].fops
;
5339 drm_debugfs_remove_files(info_list
, 1, minor
);
5344 /* DPCD dump start address. */
5345 unsigned int offset
;
5346 /* DPCD dump end address, inclusive. If unset, .size will be used. */
5348 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
5350 /* Only valid for eDP. */
5354 static const struct dpcd_block i915_dpcd_debug
[] = {
5355 { .offset
= DP_DPCD_REV
, .size
= DP_RECEIVER_CAP_SIZE
},
5356 { .offset
= DP_PSR_SUPPORT
, .end
= DP_PSR_CAPS
},
5357 { .offset
= DP_DOWNSTREAM_PORT_0
, .size
= 16 },
5358 { .offset
= DP_LINK_BW_SET
, .end
= DP_EDP_CONFIGURATION_SET
},
5359 { .offset
= DP_SINK_COUNT
, .end
= DP_ADJUST_REQUEST_LANE2_3
},
5360 { .offset
= DP_SET_POWER
},
5361 { .offset
= DP_EDP_DPCD_REV
},
5362 { .offset
= DP_EDP_GENERAL_CAP_1
, .end
= DP_EDP_GENERAL_CAP_3
},
5363 { .offset
= DP_EDP_DISPLAY_CONTROL_REGISTER
, .end
= DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB
},
5364 { .offset
= DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET
, .end
= DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET
},
5367 static int i915_dpcd_show(struct seq_file
*m
, void *data
)
5369 struct drm_connector
*connector
= m
->private;
5370 struct intel_dp
*intel_dp
=
5371 enc_to_intel_dp(&intel_attached_encoder(connector
)->base
);
5376 if (connector
->status
!= connector_status_connected
)
5379 for (i
= 0; i
< ARRAY_SIZE(i915_dpcd_debug
); i
++) {
5380 const struct dpcd_block
*b
= &i915_dpcd_debug
[i
];
5381 size_t size
= b
->end
? b
->end
- b
->offset
+ 1 : (b
->size
?: 1);
5384 connector
->connector_type
!= DRM_MODE_CONNECTOR_eDP
)
5387 /* low tech for now */
5388 if (WARN_ON(size
> sizeof(buf
)))
5391 err
= drm_dp_dpcd_read(&intel_dp
->aux
, b
->offset
, buf
, size
);
5393 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
5394 size
, b
->offset
, err
);
5398 seq_printf(m
, "%04x: %*ph\n", b
->offset
, (int) size
, buf
);
5404 static int i915_dpcd_open(struct inode
*inode
, struct file
*file
)
5406 return single_open(file
, i915_dpcd_show
, inode
->i_private
);
5409 static const struct file_operations i915_dpcd_fops
= {
5410 .owner
= THIS_MODULE
,
5411 .open
= i915_dpcd_open
,
5413 .llseek
= seq_lseek
,
5414 .release
= single_release
,
5418 * i915_debugfs_connector_add - add i915 specific connector debugfs files
5419 * @connector: pointer to a registered drm_connector
5421 * Cleanup will be done by drm_connector_unregister() through a call to
5422 * drm_debugfs_connector_remove().
5424 * Returns 0 on success, negative error codes on error.
5426 int i915_debugfs_connector_add(struct drm_connector
*connector
)
5428 struct dentry
*root
= connector
->debugfs_entry
;
5430 /* The connector must have been registered beforehands. */
5434 if (connector
->connector_type
== DRM_MODE_CONNECTOR_DisplayPort
||
5435 connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
5436 debugfs_create_file("i915_dpcd", S_IRUGO
, root
, connector
,