2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
49 static const char *yesno(int v
)
51 return v
? "yes" : "no";
54 /* As the drm_debugfs_init() routines are called before dev->dev_private is
55 * allocated we need to hook into the minor for release. */
57 drm_add_fake_info_node(struct drm_minor
*minor
,
61 struct drm_info_node
*node
;
63 node
= kmalloc(sizeof(*node
), GFP_KERNEL
);
71 node
->info_ent
= (void *) key
;
73 mutex_lock(&minor
->debugfs_lock
);
74 list_add(&node
->list
, &minor
->debugfs_list
);
75 mutex_unlock(&minor
->debugfs_lock
);
80 static int i915_capabilities(struct seq_file
*m
, void *data
)
82 struct drm_info_node
*node
= m
->private;
83 struct drm_device
*dev
= node
->minor
->dev
;
84 const struct intel_device_info
*info
= INTEL_INFO(dev
);
86 seq_printf(m
, "gen: %d\n", info
->gen
);
87 seq_printf(m
, "pch: %d\n", INTEL_PCH_TYPE(dev
));
88 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
89 #define SEP_SEMICOLON ;
90 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
, SEP_SEMICOLON
);
97 static const char *get_pin_flag(struct drm_i915_gem_object
*obj
)
99 if (i915_gem_obj_is_pinned(obj
))
105 static const char *get_tiling_flag(struct drm_i915_gem_object
*obj
)
107 switch (obj
->tiling_mode
) {
109 case I915_TILING_NONE
: return " ";
110 case I915_TILING_X
: return "X";
111 case I915_TILING_Y
: return "Y";
115 static inline const char *get_global_flag(struct drm_i915_gem_object
*obj
)
117 return i915_gem_obj_to_ggtt(obj
) ? "g" : " ";
121 describe_obj(struct seq_file
*m
, struct drm_i915_gem_object
*obj
)
123 struct i915_vma
*vma
;
126 seq_printf(m
, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
129 get_tiling_flag(obj
),
130 get_global_flag(obj
),
131 obj
->base
.size
/ 1024,
132 obj
->base
.read_domains
,
133 obj
->base
.write_domain
,
134 i915_gem_request_get_seqno(obj
->last_read_req
),
135 i915_gem_request_get_seqno(obj
->last_write_req
),
136 i915_gem_request_get_seqno(obj
->last_fenced_req
),
137 i915_cache_level_str(to_i915(obj
->base
.dev
), obj
->cache_level
),
138 obj
->dirty
? " dirty" : "",
139 obj
->madv
== I915_MADV_DONTNEED
? " purgeable" : "");
141 seq_printf(m
, " (name: %d)", obj
->base
.name
);
142 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
)
143 if (vma
->pin_count
> 0)
145 seq_printf(m
, " (pinned x %d)", pin_count
);
146 if (obj
->pin_display
)
147 seq_printf(m
, " (display)");
148 if (obj
->fence_reg
!= I915_FENCE_REG_NONE
)
149 seq_printf(m
, " (fence: %d)", obj
->fence_reg
);
150 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
) {
151 if (!i915_is_ggtt(vma
->vm
))
155 seq_printf(m
, "gtt offset: %08lx, size: %08lx, type: %u)",
156 vma
->node
.start
, vma
->node
.size
,
157 vma
->ggtt_view
.type
);
160 seq_printf(m
, " (stolen: %08lx)", obj
->stolen
->start
);
161 if (obj
->pin_mappable
|| obj
->fault_mappable
) {
163 if (obj
->pin_mappable
)
165 if (obj
->fault_mappable
)
168 seq_printf(m
, " (%s mappable)", s
);
170 if (obj
->last_read_req
!= NULL
)
171 seq_printf(m
, " (%s)",
172 i915_gem_request_get_ring(obj
->last_read_req
)->name
);
173 if (obj
->frontbuffer_bits
)
174 seq_printf(m
, " (frontbuffer: 0x%03x)", obj
->frontbuffer_bits
);
177 static void describe_ctx(struct seq_file
*m
, struct intel_context
*ctx
)
179 seq_putc(m
, ctx
->legacy_hw_ctx
.initialized
? 'I' : 'i');
180 seq_putc(m
, ctx
->remap_slice
? 'R' : 'r');
184 static int i915_gem_object_list_info(struct seq_file
*m
, void *data
)
186 struct drm_info_node
*node
= m
->private;
187 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
188 struct list_head
*head
;
189 struct drm_device
*dev
= node
->minor
->dev
;
190 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
191 struct i915_address_space
*vm
= &dev_priv
->gtt
.base
;
192 struct i915_vma
*vma
;
193 size_t total_obj_size
, total_gtt_size
;
196 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
200 /* FIXME: the user of this interface might want more than just GGTT */
203 seq_puts(m
, "Active:\n");
204 head
= &vm
->active_list
;
207 seq_puts(m
, "Inactive:\n");
208 head
= &vm
->inactive_list
;
211 mutex_unlock(&dev
->struct_mutex
);
215 total_obj_size
= total_gtt_size
= count
= 0;
216 list_for_each_entry(vma
, head
, mm_list
) {
218 describe_obj(m
, vma
->obj
);
220 total_obj_size
+= vma
->obj
->base
.size
;
221 total_gtt_size
+= vma
->node
.size
;
224 mutex_unlock(&dev
->struct_mutex
);
226 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
227 count
, total_obj_size
, total_gtt_size
);
231 static int obj_rank_by_stolen(void *priv
,
232 struct list_head
*A
, struct list_head
*B
)
234 struct drm_i915_gem_object
*a
=
235 container_of(A
, struct drm_i915_gem_object
, obj_exec_link
);
236 struct drm_i915_gem_object
*b
=
237 container_of(B
, struct drm_i915_gem_object
, obj_exec_link
);
239 return a
->stolen
->start
- b
->stolen
->start
;
242 static int i915_gem_stolen_list_info(struct seq_file
*m
, void *data
)
244 struct drm_info_node
*node
= m
->private;
245 struct drm_device
*dev
= node
->minor
->dev
;
246 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
247 struct drm_i915_gem_object
*obj
;
248 size_t total_obj_size
, total_gtt_size
;
252 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
256 total_obj_size
= total_gtt_size
= count
= 0;
257 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
258 if (obj
->stolen
== NULL
)
261 list_add(&obj
->obj_exec_link
, &stolen
);
263 total_obj_size
+= obj
->base
.size
;
264 total_gtt_size
+= i915_gem_obj_ggtt_size(obj
);
267 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
268 if (obj
->stolen
== NULL
)
271 list_add(&obj
->obj_exec_link
, &stolen
);
273 total_obj_size
+= obj
->base
.size
;
276 list_sort(NULL
, &stolen
, obj_rank_by_stolen
);
277 seq_puts(m
, "Stolen:\n");
278 while (!list_empty(&stolen
)) {
279 obj
= list_first_entry(&stolen
, typeof(*obj
), obj_exec_link
);
281 describe_obj(m
, obj
);
283 list_del_init(&obj
->obj_exec_link
);
285 mutex_unlock(&dev
->struct_mutex
);
287 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
288 count
, total_obj_size
, total_gtt_size
);
292 #define count_objects(list, member) do { \
293 list_for_each_entry(obj, list, member) { \
294 size += i915_gem_obj_ggtt_size(obj); \
296 if (obj->map_and_fenceable) { \
297 mappable_size += i915_gem_obj_ggtt_size(obj); \
304 struct drm_i915_file_private
*file_priv
;
306 size_t total
, unbound
;
307 size_t global
, shared
;
308 size_t active
, inactive
;
311 static int per_file_stats(int id
, void *ptr
, void *data
)
313 struct drm_i915_gem_object
*obj
= ptr
;
314 struct file_stats
*stats
= data
;
315 struct i915_vma
*vma
;
318 stats
->total
+= obj
->base
.size
;
320 if (obj
->base
.name
|| obj
->base
.dma_buf
)
321 stats
->shared
+= obj
->base
.size
;
323 if (USES_FULL_PPGTT(obj
->base
.dev
)) {
324 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
) {
325 struct i915_hw_ppgtt
*ppgtt
;
327 if (!drm_mm_node_allocated(&vma
->node
))
330 if (i915_is_ggtt(vma
->vm
)) {
331 stats
->global
+= obj
->base
.size
;
335 ppgtt
= container_of(vma
->vm
, struct i915_hw_ppgtt
, base
);
336 if (ppgtt
->file_priv
!= stats
->file_priv
)
339 if (obj
->active
) /* XXX per-vma statistic */
340 stats
->active
+= obj
->base
.size
;
342 stats
->inactive
+= obj
->base
.size
;
347 if (i915_gem_obj_ggtt_bound(obj
)) {
348 stats
->global
+= obj
->base
.size
;
350 stats
->active
+= obj
->base
.size
;
352 stats
->inactive
+= obj
->base
.size
;
357 if (!list_empty(&obj
->global_list
))
358 stats
->unbound
+= obj
->base
.size
;
363 #define print_file_stats(m, name, stats) \
364 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \
374 static void print_batch_pool_stats(struct seq_file
*m
,
375 struct drm_i915_private
*dev_priv
)
377 struct drm_i915_gem_object
*obj
;
378 struct file_stats stats
;
380 memset(&stats
, 0, sizeof(stats
));
382 list_for_each_entry(obj
,
383 &dev_priv
->mm
.batch_pool
.cache_list
,
385 per_file_stats(0, obj
, &stats
);
387 print_file_stats(m
, "batch pool", stats
);
390 #define count_vmas(list, member) do { \
391 list_for_each_entry(vma, list, member) { \
392 size += i915_gem_obj_ggtt_size(vma->obj); \
394 if (vma->obj->map_and_fenceable) { \
395 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
401 static int i915_gem_object_info(struct seq_file
*m
, void* data
)
403 struct drm_info_node
*node
= m
->private;
404 struct drm_device
*dev
= node
->minor
->dev
;
405 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
406 u32 count
, mappable_count
, purgeable_count
;
407 size_t size
, mappable_size
, purgeable_size
;
408 struct drm_i915_gem_object
*obj
;
409 struct i915_address_space
*vm
= &dev_priv
->gtt
.base
;
410 struct drm_file
*file
;
411 struct i915_vma
*vma
;
414 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
418 seq_printf(m
, "%u objects, %zu bytes\n",
419 dev_priv
->mm
.object_count
,
420 dev_priv
->mm
.object_memory
);
422 size
= count
= mappable_size
= mappable_count
= 0;
423 count_objects(&dev_priv
->mm
.bound_list
, global_list
);
424 seq_printf(m
, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
425 count
, mappable_count
, size
, mappable_size
);
427 size
= count
= mappable_size
= mappable_count
= 0;
428 count_vmas(&vm
->active_list
, mm_list
);
429 seq_printf(m
, " %u [%u] active objects, %zu [%zu] bytes\n",
430 count
, mappable_count
, size
, mappable_size
);
432 size
= count
= mappable_size
= mappable_count
= 0;
433 count_vmas(&vm
->inactive_list
, mm_list
);
434 seq_printf(m
, " %u [%u] inactive objects, %zu [%zu] bytes\n",
435 count
, mappable_count
, size
, mappable_size
);
437 size
= count
= purgeable_size
= purgeable_count
= 0;
438 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_list
) {
439 size
+= obj
->base
.size
, ++count
;
440 if (obj
->madv
== I915_MADV_DONTNEED
)
441 purgeable_size
+= obj
->base
.size
, ++purgeable_count
;
443 seq_printf(m
, "%u unbound objects, %zu bytes\n", count
, size
);
445 size
= count
= mappable_size
= mappable_count
= 0;
446 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
447 if (obj
->fault_mappable
) {
448 size
+= i915_gem_obj_ggtt_size(obj
);
451 if (obj
->pin_mappable
) {
452 mappable_size
+= i915_gem_obj_ggtt_size(obj
);
455 if (obj
->madv
== I915_MADV_DONTNEED
) {
456 purgeable_size
+= obj
->base
.size
;
460 seq_printf(m
, "%u purgeable objects, %zu bytes\n",
461 purgeable_count
, purgeable_size
);
462 seq_printf(m
, "%u pinned mappable objects, %zu bytes\n",
463 mappable_count
, mappable_size
);
464 seq_printf(m
, "%u fault mappable objects, %zu bytes\n",
467 seq_printf(m
, "%zu [%lu] gtt total\n",
468 dev_priv
->gtt
.base
.total
,
469 dev_priv
->gtt
.mappable_end
- dev_priv
->gtt
.base
.start
);
472 print_batch_pool_stats(m
, dev_priv
);
475 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
476 struct file_stats stats
;
477 struct task_struct
*task
;
479 memset(&stats
, 0, sizeof(stats
));
480 stats
.file_priv
= file
->driver_priv
;
481 spin_lock(&file
->table_lock
);
482 idr_for_each(&file
->object_idr
, per_file_stats
, &stats
);
483 spin_unlock(&file
->table_lock
);
485 * Although we have a valid reference on file->pid, that does
486 * not guarantee that the task_struct who called get_pid() is
487 * still alive (e.g. get_pid(current) => fork() => exit()).
488 * Therefore, we need to protect this ->comm access using RCU.
491 task
= pid_task(file
->pid
, PIDTYPE_PID
);
492 print_file_stats(m
, task
? task
->comm
: "<unknown>", stats
);
496 mutex_unlock(&dev
->struct_mutex
);
501 static int i915_gem_gtt_info(struct seq_file
*m
, void *data
)
503 struct drm_info_node
*node
= m
->private;
504 struct drm_device
*dev
= node
->minor
->dev
;
505 uintptr_t list
= (uintptr_t) node
->info_ent
->data
;
506 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
507 struct drm_i915_gem_object
*obj
;
508 size_t total_obj_size
, total_gtt_size
;
511 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
515 total_obj_size
= total_gtt_size
= count
= 0;
516 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
517 if (list
== PINNED_LIST
&& !i915_gem_obj_is_pinned(obj
))
521 describe_obj(m
, obj
);
523 total_obj_size
+= obj
->base
.size
;
524 total_gtt_size
+= i915_gem_obj_ggtt_size(obj
);
528 mutex_unlock(&dev
->struct_mutex
);
530 seq_printf(m
, "Total %d objects, %zu bytes, %zu GTT size\n",
531 count
, total_obj_size
, total_gtt_size
);
536 static int i915_gem_pageflip_info(struct seq_file
*m
, void *data
)
538 struct drm_info_node
*node
= m
->private;
539 struct drm_device
*dev
= node
->minor
->dev
;
540 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
541 struct intel_crtc
*crtc
;
544 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
548 for_each_intel_crtc(dev
, crtc
) {
549 const char pipe
= pipe_name(crtc
->pipe
);
550 const char plane
= plane_name(crtc
->plane
);
551 struct intel_unpin_work
*work
;
553 spin_lock_irq(&dev
->event_lock
);
554 work
= crtc
->unpin_work
;
556 seq_printf(m
, "No flip due on pipe %c (plane %c)\n",
561 if (atomic_read(&work
->pending
) < INTEL_FLIP_COMPLETE
) {
562 seq_printf(m
, "Flip queued on pipe %c (plane %c)\n",
565 seq_printf(m
, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
568 if (work
->flip_queued_req
) {
569 struct intel_engine_cs
*ring
=
570 i915_gem_request_get_ring(work
->flip_queued_req
);
572 seq_printf(m
, "Flip queued on %s at seqno %u, next seqno %u [current breadcrumb %u], completed? %d\n",
574 i915_gem_request_get_seqno(work
->flip_queued_req
),
575 dev_priv
->next_seqno
,
576 ring
->get_seqno(ring
, true),
577 i915_gem_request_completed(work
->flip_queued_req
, true));
579 seq_printf(m
, "Flip not associated with any ring\n");
580 seq_printf(m
, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
581 work
->flip_queued_vblank
,
582 work
->flip_ready_vblank
,
583 drm_vblank_count(dev
, crtc
->pipe
));
584 if (work
->enable_stall_check
)
585 seq_puts(m
, "Stall check enabled, ");
587 seq_puts(m
, "Stall check waiting for page flip ioctl, ");
588 seq_printf(m
, "%d prepares\n", atomic_read(&work
->pending
));
590 if (INTEL_INFO(dev
)->gen
>= 4)
591 addr
= I915_HI_DISPBASE(I915_READ(DSPSURF(crtc
->plane
)));
593 addr
= I915_READ(DSPADDR(crtc
->plane
));
594 seq_printf(m
, "Current scanout address 0x%08x\n", addr
);
596 if (work
->pending_flip_obj
) {
597 seq_printf(m
, "New framebuffer address 0x%08lx\n", (long)work
->gtt_offset
);
598 seq_printf(m
, "MMIO update completed? %d\n", addr
== work
->gtt_offset
);
601 spin_unlock_irq(&dev
->event_lock
);
604 mutex_unlock(&dev
->struct_mutex
);
609 static int i915_gem_batch_pool_info(struct seq_file
*m
, void *data
)
611 struct drm_info_node
*node
= m
->private;
612 struct drm_device
*dev
= node
->minor
->dev
;
613 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
614 struct drm_i915_gem_object
*obj
;
618 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
622 seq_puts(m
, "cache:\n");
623 list_for_each_entry(obj
,
624 &dev_priv
->mm
.batch_pool
.cache_list
,
627 describe_obj(m
, obj
);
632 seq_printf(m
, "total: %d\n", count
);
634 mutex_unlock(&dev
->struct_mutex
);
639 static int i915_gem_request_info(struct seq_file
*m
, void *data
)
641 struct drm_info_node
*node
= m
->private;
642 struct drm_device
*dev
= node
->minor
->dev
;
643 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
644 struct intel_engine_cs
*ring
;
645 struct drm_i915_gem_request
*gem_request
;
648 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
653 for_each_ring(ring
, dev_priv
, i
) {
654 if (list_empty(&ring
->request_list
))
657 seq_printf(m
, "%s requests:\n", ring
->name
);
658 list_for_each_entry(gem_request
,
661 seq_printf(m
, " %d @ %d\n",
663 (int) (jiffies
- gem_request
->emitted_jiffies
));
667 mutex_unlock(&dev
->struct_mutex
);
670 seq_puts(m
, "No requests\n");
675 static void i915_ring_seqno_info(struct seq_file
*m
,
676 struct intel_engine_cs
*ring
)
678 if (ring
->get_seqno
) {
679 seq_printf(m
, "Current sequence (%s): %u\n",
680 ring
->name
, ring
->get_seqno(ring
, false));
684 static int i915_gem_seqno_info(struct seq_file
*m
, void *data
)
686 struct drm_info_node
*node
= m
->private;
687 struct drm_device
*dev
= node
->minor
->dev
;
688 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
689 struct intel_engine_cs
*ring
;
692 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
695 intel_runtime_pm_get(dev_priv
);
697 for_each_ring(ring
, dev_priv
, i
)
698 i915_ring_seqno_info(m
, ring
);
700 intel_runtime_pm_put(dev_priv
);
701 mutex_unlock(&dev
->struct_mutex
);
707 static int i915_interrupt_info(struct seq_file
*m
, void *data
)
709 struct drm_info_node
*node
= m
->private;
710 struct drm_device
*dev
= node
->minor
->dev
;
711 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
712 struct intel_engine_cs
*ring
;
715 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
718 intel_runtime_pm_get(dev_priv
);
720 if (IS_CHERRYVIEW(dev
)) {
721 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
722 I915_READ(GEN8_MASTER_IRQ
));
724 seq_printf(m
, "Display IER:\t%08x\n",
726 seq_printf(m
, "Display IIR:\t%08x\n",
728 seq_printf(m
, "Display IIR_RW:\t%08x\n",
729 I915_READ(VLV_IIR_RW
));
730 seq_printf(m
, "Display IMR:\t%08x\n",
732 for_each_pipe(dev_priv
, pipe
)
733 seq_printf(m
, "Pipe %c stat:\t%08x\n",
735 I915_READ(PIPESTAT(pipe
)));
737 seq_printf(m
, "Port hotplug:\t%08x\n",
738 I915_READ(PORT_HOTPLUG_EN
));
739 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
740 I915_READ(VLV_DPFLIPSTAT
));
741 seq_printf(m
, "DPINVGTT:\t%08x\n",
742 I915_READ(DPINVGTT
));
744 for (i
= 0; i
< 4; i
++) {
745 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
746 i
, I915_READ(GEN8_GT_IMR(i
)));
747 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
748 i
, I915_READ(GEN8_GT_IIR(i
)));
749 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
750 i
, I915_READ(GEN8_GT_IER(i
)));
753 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
754 I915_READ(GEN8_PCU_IMR
));
755 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
756 I915_READ(GEN8_PCU_IIR
));
757 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
758 I915_READ(GEN8_PCU_IER
));
759 } else if (INTEL_INFO(dev
)->gen
>= 8) {
760 seq_printf(m
, "Master Interrupt Control:\t%08x\n",
761 I915_READ(GEN8_MASTER_IRQ
));
763 for (i
= 0; i
< 4; i
++) {
764 seq_printf(m
, "GT Interrupt IMR %d:\t%08x\n",
765 i
, I915_READ(GEN8_GT_IMR(i
)));
766 seq_printf(m
, "GT Interrupt IIR %d:\t%08x\n",
767 i
, I915_READ(GEN8_GT_IIR(i
)));
768 seq_printf(m
, "GT Interrupt IER %d:\t%08x\n",
769 i
, I915_READ(GEN8_GT_IER(i
)));
772 for_each_pipe(dev_priv
, pipe
) {
773 if (!intel_display_power_is_enabled(dev_priv
,
774 POWER_DOMAIN_PIPE(pipe
))) {
775 seq_printf(m
, "Pipe %c power disabled\n",
779 seq_printf(m
, "Pipe %c IMR:\t%08x\n",
781 I915_READ(GEN8_DE_PIPE_IMR(pipe
)));
782 seq_printf(m
, "Pipe %c IIR:\t%08x\n",
784 I915_READ(GEN8_DE_PIPE_IIR(pipe
)));
785 seq_printf(m
, "Pipe %c IER:\t%08x\n",
787 I915_READ(GEN8_DE_PIPE_IER(pipe
)));
790 seq_printf(m
, "Display Engine port interrupt mask:\t%08x\n",
791 I915_READ(GEN8_DE_PORT_IMR
));
792 seq_printf(m
, "Display Engine port interrupt identity:\t%08x\n",
793 I915_READ(GEN8_DE_PORT_IIR
));
794 seq_printf(m
, "Display Engine port interrupt enable:\t%08x\n",
795 I915_READ(GEN8_DE_PORT_IER
));
797 seq_printf(m
, "Display Engine misc interrupt mask:\t%08x\n",
798 I915_READ(GEN8_DE_MISC_IMR
));
799 seq_printf(m
, "Display Engine misc interrupt identity:\t%08x\n",
800 I915_READ(GEN8_DE_MISC_IIR
));
801 seq_printf(m
, "Display Engine misc interrupt enable:\t%08x\n",
802 I915_READ(GEN8_DE_MISC_IER
));
804 seq_printf(m
, "PCU interrupt mask:\t%08x\n",
805 I915_READ(GEN8_PCU_IMR
));
806 seq_printf(m
, "PCU interrupt identity:\t%08x\n",
807 I915_READ(GEN8_PCU_IIR
));
808 seq_printf(m
, "PCU interrupt enable:\t%08x\n",
809 I915_READ(GEN8_PCU_IER
));
810 } else if (IS_VALLEYVIEW(dev
)) {
811 seq_printf(m
, "Display IER:\t%08x\n",
813 seq_printf(m
, "Display IIR:\t%08x\n",
815 seq_printf(m
, "Display IIR_RW:\t%08x\n",
816 I915_READ(VLV_IIR_RW
));
817 seq_printf(m
, "Display IMR:\t%08x\n",
819 for_each_pipe(dev_priv
, pipe
)
820 seq_printf(m
, "Pipe %c stat:\t%08x\n",
822 I915_READ(PIPESTAT(pipe
)));
824 seq_printf(m
, "Master IER:\t%08x\n",
825 I915_READ(VLV_MASTER_IER
));
827 seq_printf(m
, "Render IER:\t%08x\n",
829 seq_printf(m
, "Render IIR:\t%08x\n",
831 seq_printf(m
, "Render IMR:\t%08x\n",
834 seq_printf(m
, "PM IER:\t\t%08x\n",
835 I915_READ(GEN6_PMIER
));
836 seq_printf(m
, "PM IIR:\t\t%08x\n",
837 I915_READ(GEN6_PMIIR
));
838 seq_printf(m
, "PM IMR:\t\t%08x\n",
839 I915_READ(GEN6_PMIMR
));
841 seq_printf(m
, "Port hotplug:\t%08x\n",
842 I915_READ(PORT_HOTPLUG_EN
));
843 seq_printf(m
, "DPFLIPSTAT:\t%08x\n",
844 I915_READ(VLV_DPFLIPSTAT
));
845 seq_printf(m
, "DPINVGTT:\t%08x\n",
846 I915_READ(DPINVGTT
));
848 } else if (!HAS_PCH_SPLIT(dev
)) {
849 seq_printf(m
, "Interrupt enable: %08x\n",
851 seq_printf(m
, "Interrupt identity: %08x\n",
853 seq_printf(m
, "Interrupt mask: %08x\n",
855 for_each_pipe(dev_priv
, pipe
)
856 seq_printf(m
, "Pipe %c stat: %08x\n",
858 I915_READ(PIPESTAT(pipe
)));
860 seq_printf(m
, "North Display Interrupt enable: %08x\n",
862 seq_printf(m
, "North Display Interrupt identity: %08x\n",
864 seq_printf(m
, "North Display Interrupt mask: %08x\n",
866 seq_printf(m
, "South Display Interrupt enable: %08x\n",
868 seq_printf(m
, "South Display Interrupt identity: %08x\n",
870 seq_printf(m
, "South Display Interrupt mask: %08x\n",
872 seq_printf(m
, "Graphics Interrupt enable: %08x\n",
874 seq_printf(m
, "Graphics Interrupt identity: %08x\n",
876 seq_printf(m
, "Graphics Interrupt mask: %08x\n",
879 for_each_ring(ring
, dev_priv
, i
) {
880 if (INTEL_INFO(dev
)->gen
>= 6) {
882 "Graphics Interrupt mask (%s): %08x\n",
883 ring
->name
, I915_READ_IMR(ring
));
885 i915_ring_seqno_info(m
, ring
);
887 intel_runtime_pm_put(dev_priv
);
888 mutex_unlock(&dev
->struct_mutex
);
893 static int i915_gem_fence_regs_info(struct seq_file
*m
, void *data
)
895 struct drm_info_node
*node
= m
->private;
896 struct drm_device
*dev
= node
->minor
->dev
;
897 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
900 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
904 seq_printf(m
, "Reserved fences = %d\n", dev_priv
->fence_reg_start
);
905 seq_printf(m
, "Total fences = %d\n", dev_priv
->num_fence_regs
);
906 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++) {
907 struct drm_i915_gem_object
*obj
= dev_priv
->fence_regs
[i
].obj
;
909 seq_printf(m
, "Fence %d, pin count = %d, object = ",
910 i
, dev_priv
->fence_regs
[i
].pin_count
);
912 seq_puts(m
, "unused");
914 describe_obj(m
, obj
);
918 mutex_unlock(&dev
->struct_mutex
);
922 static int i915_hws_info(struct seq_file
*m
, void *data
)
924 struct drm_info_node
*node
= m
->private;
925 struct drm_device
*dev
= node
->minor
->dev
;
926 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
927 struct intel_engine_cs
*ring
;
931 ring
= &dev_priv
->ring
[(uintptr_t)node
->info_ent
->data
];
932 hws
= ring
->status_page
.page_addr
;
936 for (i
= 0; i
< 4096 / sizeof(u32
) / 4; i
+= 4) {
937 seq_printf(m
, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
939 hws
[i
], hws
[i
+ 1], hws
[i
+ 2], hws
[i
+ 3]);
945 i915_error_state_write(struct file
*filp
,
946 const char __user
*ubuf
,
950 struct i915_error_state_file_priv
*error_priv
= filp
->private_data
;
951 struct drm_device
*dev
= error_priv
->dev
;
954 DRM_DEBUG_DRIVER("Resetting error state\n");
956 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
960 i915_destroy_error_state(dev
);
961 mutex_unlock(&dev
->struct_mutex
);
966 static int i915_error_state_open(struct inode
*inode
, struct file
*file
)
968 struct drm_device
*dev
= inode
->i_private
;
969 struct i915_error_state_file_priv
*error_priv
;
971 error_priv
= kzalloc(sizeof(*error_priv
), GFP_KERNEL
);
975 error_priv
->dev
= dev
;
977 i915_error_state_get(dev
, error_priv
);
979 file
->private_data
= error_priv
;
984 static int i915_error_state_release(struct inode
*inode
, struct file
*file
)
986 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
988 i915_error_state_put(error_priv
);
994 static ssize_t
i915_error_state_read(struct file
*file
, char __user
*userbuf
,
995 size_t count
, loff_t
*pos
)
997 struct i915_error_state_file_priv
*error_priv
= file
->private_data
;
998 struct drm_i915_error_state_buf error_str
;
1000 ssize_t ret_count
= 0;
1003 ret
= i915_error_state_buf_init(&error_str
, to_i915(error_priv
->dev
), count
, *pos
);
1007 ret
= i915_error_state_to_str(&error_str
, error_priv
);
1011 ret_count
= simple_read_from_buffer(userbuf
, count
, &tmp_pos
,
1018 *pos
= error_str
.start
+ ret_count
;
1020 i915_error_state_buf_release(&error_str
);
1021 return ret
?: ret_count
;
1024 static const struct file_operations i915_error_state_fops
= {
1025 .owner
= THIS_MODULE
,
1026 .open
= i915_error_state_open
,
1027 .read
= i915_error_state_read
,
1028 .write
= i915_error_state_write
,
1029 .llseek
= default_llseek
,
1030 .release
= i915_error_state_release
,
1034 i915_next_seqno_get(void *data
, u64
*val
)
1036 struct drm_device
*dev
= data
;
1037 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1040 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1044 *val
= dev_priv
->next_seqno
;
1045 mutex_unlock(&dev
->struct_mutex
);
1051 i915_next_seqno_set(void *data
, u64 val
)
1053 struct drm_device
*dev
= data
;
1056 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1060 ret
= i915_gem_set_seqno(dev
, val
);
1061 mutex_unlock(&dev
->struct_mutex
);
1066 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops
,
1067 i915_next_seqno_get
, i915_next_seqno_set
,
1070 static int i915_frequency_info(struct seq_file
*m
, void *unused
)
1072 struct drm_info_node
*node
= m
->private;
1073 struct drm_device
*dev
= node
->minor
->dev
;
1074 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1077 intel_runtime_pm_get(dev_priv
);
1079 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
1082 u16 rgvswctl
= I915_READ16(MEMSWCTL
);
1083 u16 rgvstat
= I915_READ16(MEMSTAT_ILK
);
1085 seq_printf(m
, "Requested P-state: %d\n", (rgvswctl
>> 8) & 0xf);
1086 seq_printf(m
, "Requested VID: %d\n", rgvswctl
& 0x3f);
1087 seq_printf(m
, "Current VID: %d\n", (rgvstat
& MEMSTAT_VID_MASK
) >>
1089 seq_printf(m
, "Current P-state: %d\n",
1090 (rgvstat
& MEMSTAT_PSTATE_MASK
) >> MEMSTAT_PSTATE_SHIFT
);
1091 } else if (IS_GEN6(dev
) || (IS_GEN7(dev
) && !IS_VALLEYVIEW(dev
)) ||
1092 IS_BROADWELL(dev
)) {
1093 u32 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
1094 u32 rp_state_limits
= I915_READ(GEN6_RP_STATE_LIMITS
);
1095 u32 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
1096 u32 rpmodectl
, rpinclimit
, rpdeclimit
;
1097 u32 rpstat
, cagf
, reqf
;
1098 u32 rpupei
, rpcurup
, rpprevup
;
1099 u32 rpdownei
, rpcurdown
, rpprevdown
;
1100 u32 pm_ier
, pm_imr
, pm_isr
, pm_iir
, pm_mask
;
1103 /* RPSTAT1 is in the GT power well */
1104 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1108 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
1110 reqf
= I915_READ(GEN6_RPNSWREQ
);
1111 reqf
&= ~GEN6_TURBO_DISABLE
;
1112 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
1116 reqf
*= GT_FREQUENCY_MULTIPLIER
;
1118 rpmodectl
= I915_READ(GEN6_RP_CONTROL
);
1119 rpinclimit
= I915_READ(GEN6_RP_UP_THRESHOLD
);
1120 rpdeclimit
= I915_READ(GEN6_RP_DOWN_THRESHOLD
);
1122 rpstat
= I915_READ(GEN6_RPSTAT1
);
1123 rpupei
= I915_READ(GEN6_RP_CUR_UP_EI
);
1124 rpcurup
= I915_READ(GEN6_RP_CUR_UP
);
1125 rpprevup
= I915_READ(GEN6_RP_PREV_UP
);
1126 rpdownei
= I915_READ(GEN6_RP_CUR_DOWN_EI
);
1127 rpcurdown
= I915_READ(GEN6_RP_CUR_DOWN
);
1128 rpprevdown
= I915_READ(GEN6_RP_PREV_DOWN
);
1129 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
1130 cagf
= (rpstat
& HSW_CAGF_MASK
) >> HSW_CAGF_SHIFT
;
1132 cagf
= (rpstat
& GEN6_CAGF_MASK
) >> GEN6_CAGF_SHIFT
;
1133 cagf
*= GT_FREQUENCY_MULTIPLIER
;
1135 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
1136 mutex_unlock(&dev
->struct_mutex
);
1138 if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
1139 pm_ier
= I915_READ(GEN6_PMIER
);
1140 pm_imr
= I915_READ(GEN6_PMIMR
);
1141 pm_isr
= I915_READ(GEN6_PMISR
);
1142 pm_iir
= I915_READ(GEN6_PMIIR
);
1143 pm_mask
= I915_READ(GEN6_PMINTRMSK
);
1145 pm_ier
= I915_READ(GEN8_GT_IER(2));
1146 pm_imr
= I915_READ(GEN8_GT_IMR(2));
1147 pm_isr
= I915_READ(GEN8_GT_ISR(2));
1148 pm_iir
= I915_READ(GEN8_GT_IIR(2));
1149 pm_mask
= I915_READ(GEN6_PMINTRMSK
);
1151 seq_printf(m
, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1152 pm_ier
, pm_imr
, pm_isr
, pm_iir
, pm_mask
);
1153 seq_printf(m
, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status
);
1154 seq_printf(m
, "Render p-state ratio: %d\n",
1155 (gt_perf_status
& 0xff00) >> 8);
1156 seq_printf(m
, "Render p-state VID: %d\n",
1157 gt_perf_status
& 0xff);
1158 seq_printf(m
, "Render p-state limit: %d\n",
1159 rp_state_limits
& 0xff);
1160 seq_printf(m
, "RPSTAT1: 0x%08x\n", rpstat
);
1161 seq_printf(m
, "RPMODECTL: 0x%08x\n", rpmodectl
);
1162 seq_printf(m
, "RPINCLIMIT: 0x%08x\n", rpinclimit
);
1163 seq_printf(m
, "RPDECLIMIT: 0x%08x\n", rpdeclimit
);
1164 seq_printf(m
, "RPNSWREQ: %dMHz\n", reqf
);
1165 seq_printf(m
, "CAGF: %dMHz\n", cagf
);
1166 seq_printf(m
, "RP CUR UP EI: %dus\n", rpupei
&
1167 GEN6_CURICONT_MASK
);
1168 seq_printf(m
, "RP CUR UP: %dus\n", rpcurup
&
1169 GEN6_CURBSYTAVG_MASK
);
1170 seq_printf(m
, "RP PREV UP: %dus\n", rpprevup
&
1171 GEN6_CURBSYTAVG_MASK
);
1172 seq_printf(m
, "RP CUR DOWN EI: %dus\n", rpdownei
&
1174 seq_printf(m
, "RP CUR DOWN: %dus\n", rpcurdown
&
1175 GEN6_CURBSYTAVG_MASK
);
1176 seq_printf(m
, "RP PREV DOWN: %dus\n", rpprevdown
&
1177 GEN6_CURBSYTAVG_MASK
);
1179 max_freq
= (rp_state_cap
& 0xff0000) >> 16;
1180 seq_printf(m
, "Lowest (RPN) frequency: %dMHz\n",
1181 max_freq
* GT_FREQUENCY_MULTIPLIER
);
1183 max_freq
= (rp_state_cap
& 0xff00) >> 8;
1184 seq_printf(m
, "Nominal (RP1) frequency: %dMHz\n",
1185 max_freq
* GT_FREQUENCY_MULTIPLIER
);
1187 max_freq
= rp_state_cap
& 0xff;
1188 seq_printf(m
, "Max non-overclocked (RP0) frequency: %dMHz\n",
1189 max_freq
* GT_FREQUENCY_MULTIPLIER
);
1191 seq_printf(m
, "Max overclocked frequency: %dMHz\n",
1192 dev_priv
->rps
.max_freq
* GT_FREQUENCY_MULTIPLIER
);
1193 } else if (IS_VALLEYVIEW(dev
)) {
1196 mutex_lock(&dev_priv
->rps
.hw_lock
);
1197 freq_sts
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
1198 seq_printf(m
, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts
);
1199 seq_printf(m
, "DDR freq: %d MHz\n", dev_priv
->mem_freq
);
1201 seq_printf(m
, "max GPU freq: %d MHz\n",
1202 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
));
1204 seq_printf(m
, "min GPU freq: %d MHz\n",
1205 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
));
1207 seq_printf(m
, "efficient (RPe) frequency: %d MHz\n",
1208 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
));
1210 seq_printf(m
, "current GPU freq: %d MHz\n",
1211 vlv_gpu_freq(dev_priv
, (freq_sts
>> 8) & 0xff));
1212 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1214 seq_puts(m
, "no P-state info available\n");
1218 intel_runtime_pm_put(dev_priv
);
1222 static int ironlake_drpc_info(struct seq_file
*m
)
1224 struct drm_info_node
*node
= m
->private;
1225 struct drm_device
*dev
= node
->minor
->dev
;
1226 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1227 u32 rgvmodectl
, rstdbyctl
;
1231 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1234 intel_runtime_pm_get(dev_priv
);
1236 rgvmodectl
= I915_READ(MEMMODECTL
);
1237 rstdbyctl
= I915_READ(RSTDBYCTL
);
1238 crstandvid
= I915_READ16(CRSTANDVID
);
1240 intel_runtime_pm_put(dev_priv
);
1241 mutex_unlock(&dev
->struct_mutex
);
1243 seq_printf(m
, "HD boost: %s\n", (rgvmodectl
& MEMMODE_BOOST_EN
) ?
1245 seq_printf(m
, "Boost freq: %d\n",
1246 (rgvmodectl
& MEMMODE_BOOST_FREQ_MASK
) >>
1247 MEMMODE_BOOST_FREQ_SHIFT
);
1248 seq_printf(m
, "HW control enabled: %s\n",
1249 rgvmodectl
& MEMMODE_HWIDLE_EN
? "yes" : "no");
1250 seq_printf(m
, "SW control enabled: %s\n",
1251 rgvmodectl
& MEMMODE_SWMODE_EN
? "yes" : "no");
1252 seq_printf(m
, "Gated voltage change: %s\n",
1253 rgvmodectl
& MEMMODE_RCLK_GATE
? "yes" : "no");
1254 seq_printf(m
, "Starting frequency: P%d\n",
1255 (rgvmodectl
& MEMMODE_FSTART_MASK
) >> MEMMODE_FSTART_SHIFT
);
1256 seq_printf(m
, "Max P-state: P%d\n",
1257 (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
);
1258 seq_printf(m
, "Min P-state: P%d\n", (rgvmodectl
& MEMMODE_FMIN_MASK
));
1259 seq_printf(m
, "RS1 VID: %d\n", (crstandvid
& 0x3f));
1260 seq_printf(m
, "RS2 VID: %d\n", ((crstandvid
>> 8) & 0x3f));
1261 seq_printf(m
, "Render standby enabled: %s\n",
1262 (rstdbyctl
& RCX_SW_EXIT
) ? "no" : "yes");
1263 seq_puts(m
, "Current RS state: ");
1264 switch (rstdbyctl
& RSX_STATUS_MASK
) {
1266 seq_puts(m
, "on\n");
1268 case RSX_STATUS_RC1
:
1269 seq_puts(m
, "RC1\n");
1271 case RSX_STATUS_RC1E
:
1272 seq_puts(m
, "RC1E\n");
1274 case RSX_STATUS_RS1
:
1275 seq_puts(m
, "RS1\n");
1277 case RSX_STATUS_RS2
:
1278 seq_puts(m
, "RS2 (RC6)\n");
1280 case RSX_STATUS_RS3
:
1281 seq_puts(m
, "RC3 (RC6+)\n");
1284 seq_puts(m
, "unknown\n");
1291 static int vlv_drpc_info(struct seq_file
*m
)
1294 struct drm_info_node
*node
= m
->private;
1295 struct drm_device
*dev
= node
->minor
->dev
;
1296 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1297 u32 rpmodectl1
, rcctl1
, pw_status
;
1298 unsigned fw_rendercount
= 0, fw_mediacount
= 0;
1300 intel_runtime_pm_get(dev_priv
);
1302 pw_status
= I915_READ(VLV_GTLC_PW_STATUS
);
1303 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1304 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1306 intel_runtime_pm_put(dev_priv
);
1308 seq_printf(m
, "Video Turbo Mode: %s\n",
1309 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1310 seq_printf(m
, "Turbo enabled: %s\n",
1311 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1312 seq_printf(m
, "HW control enabled: %s\n",
1313 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1314 seq_printf(m
, "SW control enabled: %s\n",
1315 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1316 GEN6_RP_MEDIA_SW_MODE
));
1317 seq_printf(m
, "RC6 Enabled: %s\n",
1318 yesno(rcctl1
& (GEN7_RC_CTL_TO_MODE
|
1319 GEN6_RC_CTL_EI_MODE(1))));
1320 seq_printf(m
, "Render Power Well: %s\n",
1321 (pw_status
& VLV_GTLC_PW_RENDER_STATUS_MASK
) ? "Up" : "Down");
1322 seq_printf(m
, "Media Power Well: %s\n",
1323 (pw_status
& VLV_GTLC_PW_MEDIA_STATUS_MASK
) ? "Up" : "Down");
1325 seq_printf(m
, "Render RC6 residency since boot: %u\n",
1326 I915_READ(VLV_GT_RENDER_RC6
));
1327 seq_printf(m
, "Media RC6 residency since boot: %u\n",
1328 I915_READ(VLV_GT_MEDIA_RC6
));
1330 spin_lock_irq(&dev_priv
->uncore
.lock
);
1331 fw_rendercount
= dev_priv
->uncore
.fw_rendercount
;
1332 fw_mediacount
= dev_priv
->uncore
.fw_mediacount
;
1333 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1335 seq_printf(m
, "Forcewake Render Count = %u\n", fw_rendercount
);
1336 seq_printf(m
, "Forcewake Media Count = %u\n", fw_mediacount
);
1343 static int gen6_drpc_info(struct seq_file
*m
)
1346 struct drm_info_node
*node
= m
->private;
1347 struct drm_device
*dev
= node
->minor
->dev
;
1348 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1349 u32 rpmodectl1
, gt_core_status
, rcctl1
, rc6vids
= 0;
1350 unsigned forcewake_count
;
1353 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1356 intel_runtime_pm_get(dev_priv
);
1358 spin_lock_irq(&dev_priv
->uncore
.lock
);
1359 forcewake_count
= dev_priv
->uncore
.forcewake_count
;
1360 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1362 if (forcewake_count
) {
1363 seq_puts(m
, "RC information inaccurate because somebody "
1364 "holds a forcewake reference \n");
1366 /* NB: we cannot use forcewake, else we read the wrong values */
1367 while (count
++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK
) & 1))
1369 seq_printf(m
, "RC information accurate: %s\n", yesno(count
< 51));
1372 gt_core_status
= readl(dev_priv
->regs
+ GEN6_GT_CORE_STATUS
);
1373 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS
, gt_core_status
, 4, true);
1375 rpmodectl1
= I915_READ(GEN6_RP_CONTROL
);
1376 rcctl1
= I915_READ(GEN6_RC_CONTROL
);
1377 mutex_unlock(&dev
->struct_mutex
);
1378 mutex_lock(&dev_priv
->rps
.hw_lock
);
1379 sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
1380 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1382 intel_runtime_pm_put(dev_priv
);
1384 seq_printf(m
, "Video Turbo Mode: %s\n",
1385 yesno(rpmodectl1
& GEN6_RP_MEDIA_TURBO
));
1386 seq_printf(m
, "HW control enabled: %s\n",
1387 yesno(rpmodectl1
& GEN6_RP_ENABLE
));
1388 seq_printf(m
, "SW control enabled: %s\n",
1389 yesno((rpmodectl1
& GEN6_RP_MEDIA_MODE_MASK
) ==
1390 GEN6_RP_MEDIA_SW_MODE
));
1391 seq_printf(m
, "RC1e Enabled: %s\n",
1392 yesno(rcctl1
& GEN6_RC_CTL_RC1e_ENABLE
));
1393 seq_printf(m
, "RC6 Enabled: %s\n",
1394 yesno(rcctl1
& GEN6_RC_CTL_RC6_ENABLE
));
1395 seq_printf(m
, "Deep RC6 Enabled: %s\n",
1396 yesno(rcctl1
& GEN6_RC_CTL_RC6p_ENABLE
));
1397 seq_printf(m
, "Deepest RC6 Enabled: %s\n",
1398 yesno(rcctl1
& GEN6_RC_CTL_RC6pp_ENABLE
));
1399 seq_puts(m
, "Current RC state: ");
1400 switch (gt_core_status
& GEN6_RCn_MASK
) {
1402 if (gt_core_status
& GEN6_CORE_CPD_STATE_MASK
)
1403 seq_puts(m
, "Core Power Down\n");
1405 seq_puts(m
, "on\n");
1408 seq_puts(m
, "RC3\n");
1411 seq_puts(m
, "RC6\n");
1414 seq_puts(m
, "RC7\n");
1417 seq_puts(m
, "Unknown\n");
1421 seq_printf(m
, "Core Power Down: %s\n",
1422 yesno(gt_core_status
& GEN6_CORE_CPD_STATE_MASK
));
1424 /* Not exactly sure what this is */
1425 seq_printf(m
, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1426 I915_READ(GEN6_GT_GFX_RC6_LOCKED
));
1427 seq_printf(m
, "RC6 residency since boot: %u\n",
1428 I915_READ(GEN6_GT_GFX_RC6
));
1429 seq_printf(m
, "RC6+ residency since boot: %u\n",
1430 I915_READ(GEN6_GT_GFX_RC6p
));
1431 seq_printf(m
, "RC6++ residency since boot: %u\n",
1432 I915_READ(GEN6_GT_GFX_RC6pp
));
1434 seq_printf(m
, "RC6 voltage: %dmV\n",
1435 GEN6_DECODE_RC6_VID(((rc6vids
>> 0) & 0xff)));
1436 seq_printf(m
, "RC6+ voltage: %dmV\n",
1437 GEN6_DECODE_RC6_VID(((rc6vids
>> 8) & 0xff)));
1438 seq_printf(m
, "RC6++ voltage: %dmV\n",
1439 GEN6_DECODE_RC6_VID(((rc6vids
>> 16) & 0xff)));
1443 static int i915_drpc_info(struct seq_file
*m
, void *unused
)
1445 struct drm_info_node
*node
= m
->private;
1446 struct drm_device
*dev
= node
->minor
->dev
;
1448 if (IS_VALLEYVIEW(dev
))
1449 return vlv_drpc_info(m
);
1450 else if (INTEL_INFO(dev
)->gen
>= 6)
1451 return gen6_drpc_info(m
);
1453 return ironlake_drpc_info(m
);
1456 static int i915_fbc_status(struct seq_file
*m
, void *unused
)
1458 struct drm_info_node
*node
= m
->private;
1459 struct drm_device
*dev
= node
->minor
->dev
;
1460 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1462 if (!HAS_FBC(dev
)) {
1463 seq_puts(m
, "FBC unsupported on this chipset\n");
1467 intel_runtime_pm_get(dev_priv
);
1469 if (intel_fbc_enabled(dev
)) {
1470 seq_puts(m
, "FBC enabled\n");
1472 seq_puts(m
, "FBC disabled: ");
1473 switch (dev_priv
->fbc
.no_fbc_reason
) {
1475 seq_puts(m
, "FBC actived, but currently disabled in hardware");
1477 case FBC_UNSUPPORTED
:
1478 seq_puts(m
, "unsupported by this chipset");
1481 seq_puts(m
, "no outputs");
1483 case FBC_STOLEN_TOO_SMALL
:
1484 seq_puts(m
, "not enough stolen memory");
1486 case FBC_UNSUPPORTED_MODE
:
1487 seq_puts(m
, "mode not supported");
1489 case FBC_MODE_TOO_LARGE
:
1490 seq_puts(m
, "mode too large");
1493 seq_puts(m
, "FBC unsupported on plane");
1496 seq_puts(m
, "scanout buffer not tiled");
1498 case FBC_MULTIPLE_PIPES
:
1499 seq_puts(m
, "multiple pipes are enabled");
1501 case FBC_MODULE_PARAM
:
1502 seq_puts(m
, "disabled per module param (default off)");
1504 case FBC_CHIP_DEFAULT
:
1505 seq_puts(m
, "disabled per chip default");
1508 seq_puts(m
, "unknown reason");
1513 intel_runtime_pm_put(dev_priv
);
1518 static int i915_fbc_fc_get(void *data
, u64
*val
)
1520 struct drm_device
*dev
= data
;
1521 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1523 if (INTEL_INFO(dev
)->gen
< 7 || !HAS_FBC(dev
))
1526 drm_modeset_lock_all(dev
);
1527 *val
= dev_priv
->fbc
.false_color
;
1528 drm_modeset_unlock_all(dev
);
1533 static int i915_fbc_fc_set(void *data
, u64 val
)
1535 struct drm_device
*dev
= data
;
1536 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1539 if (INTEL_INFO(dev
)->gen
< 7 || !HAS_FBC(dev
))
1542 drm_modeset_lock_all(dev
);
1544 reg
= I915_READ(ILK_DPFC_CONTROL
);
1545 dev_priv
->fbc
.false_color
= val
;
1547 I915_WRITE(ILK_DPFC_CONTROL
, val
?
1548 (reg
| FBC_CTL_FALSE_COLOR
) :
1549 (reg
& ~FBC_CTL_FALSE_COLOR
));
1551 drm_modeset_unlock_all(dev
);
1555 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops
,
1556 i915_fbc_fc_get
, i915_fbc_fc_set
,
1559 static int i915_ips_status(struct seq_file
*m
, void *unused
)
1561 struct drm_info_node
*node
= m
->private;
1562 struct drm_device
*dev
= node
->minor
->dev
;
1563 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1565 if (!HAS_IPS(dev
)) {
1566 seq_puts(m
, "not supported\n");
1570 intel_runtime_pm_get(dev_priv
);
1572 seq_printf(m
, "Enabled by kernel parameter: %s\n",
1573 yesno(i915
.enable_ips
));
1575 if (INTEL_INFO(dev
)->gen
>= 8) {
1576 seq_puts(m
, "Currently: unknown\n");
1578 if (I915_READ(IPS_CTL
) & IPS_ENABLE
)
1579 seq_puts(m
, "Currently: enabled\n");
1581 seq_puts(m
, "Currently: disabled\n");
1584 intel_runtime_pm_put(dev_priv
);
1589 static int i915_sr_status(struct seq_file
*m
, void *unused
)
1591 struct drm_info_node
*node
= m
->private;
1592 struct drm_device
*dev
= node
->minor
->dev
;
1593 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1594 bool sr_enabled
= false;
1596 intel_runtime_pm_get(dev_priv
);
1598 if (HAS_PCH_SPLIT(dev
))
1599 sr_enabled
= I915_READ(WM1_LP_ILK
) & WM1_LP_SR_EN
;
1600 else if (IS_CRESTLINE(dev
) || IS_I945G(dev
) || IS_I945GM(dev
))
1601 sr_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
1602 else if (IS_I915GM(dev
))
1603 sr_enabled
= I915_READ(INSTPM
) & INSTPM_SELF_EN
;
1604 else if (IS_PINEVIEW(dev
))
1605 sr_enabled
= I915_READ(DSPFW3
) & PINEVIEW_SELF_REFRESH_EN
;
1607 intel_runtime_pm_put(dev_priv
);
1609 seq_printf(m
, "self-refresh: %s\n",
1610 sr_enabled
? "enabled" : "disabled");
1615 static int i915_emon_status(struct seq_file
*m
, void *unused
)
1617 struct drm_info_node
*node
= m
->private;
1618 struct drm_device
*dev
= node
->minor
->dev
;
1619 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1620 unsigned long temp
, chipset
, gfx
;
1626 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1630 temp
= i915_mch_val(dev_priv
);
1631 chipset
= i915_chipset_val(dev_priv
);
1632 gfx
= i915_gfx_val(dev_priv
);
1633 mutex_unlock(&dev
->struct_mutex
);
1635 seq_printf(m
, "GMCH temp: %ld\n", temp
);
1636 seq_printf(m
, "Chipset power: %ld\n", chipset
);
1637 seq_printf(m
, "GFX power: %ld\n", gfx
);
1638 seq_printf(m
, "Total power: %ld\n", chipset
+ gfx
);
1643 static int i915_ring_freq_table(struct seq_file
*m
, void *unused
)
1645 struct drm_info_node
*node
= m
->private;
1646 struct drm_device
*dev
= node
->minor
->dev
;
1647 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1649 int gpu_freq
, ia_freq
;
1651 if (!(IS_GEN6(dev
) || IS_GEN7(dev
))) {
1652 seq_puts(m
, "unsupported on this chipset\n");
1656 intel_runtime_pm_get(dev_priv
);
1658 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
1660 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
1664 seq_puts(m
, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1666 for (gpu_freq
= dev_priv
->rps
.min_freq_softlimit
;
1667 gpu_freq
<= dev_priv
->rps
.max_freq_softlimit
;
1670 sandybridge_pcode_read(dev_priv
,
1671 GEN6_PCODE_READ_MIN_FREQ_TABLE
,
1673 seq_printf(m
, "%d\t\t%d\t\t\t\t%d\n",
1674 gpu_freq
* GT_FREQUENCY_MULTIPLIER
,
1675 ((ia_freq
>> 0) & 0xff) * 100,
1676 ((ia_freq
>> 8) & 0xff) * 100);
1679 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1682 intel_runtime_pm_put(dev_priv
);
1686 static int i915_opregion(struct seq_file
*m
, void *unused
)
1688 struct drm_info_node
*node
= m
->private;
1689 struct drm_device
*dev
= node
->minor
->dev
;
1690 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1691 struct intel_opregion
*opregion
= &dev_priv
->opregion
;
1692 void *data
= kmalloc(OPREGION_SIZE
, GFP_KERNEL
);
1698 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1702 if (opregion
->header
) {
1703 memcpy_fromio(data
, opregion
->header
, OPREGION_SIZE
);
1704 seq_write(m
, data
, OPREGION_SIZE
);
1707 mutex_unlock(&dev
->struct_mutex
);
1714 static int i915_gem_framebuffer_info(struct seq_file
*m
, void *data
)
1716 struct drm_info_node
*node
= m
->private;
1717 struct drm_device
*dev
= node
->minor
->dev
;
1718 struct intel_fbdev
*ifbdev
= NULL
;
1719 struct intel_framebuffer
*fb
;
1721 #ifdef CONFIG_DRM_I915_FBDEV
1722 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1724 ifbdev
= dev_priv
->fbdev
;
1725 fb
= to_intel_framebuffer(ifbdev
->helper
.fb
);
1727 seq_printf(m
, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1731 fb
->base
.bits_per_pixel
,
1732 atomic_read(&fb
->base
.refcount
.refcount
));
1733 describe_obj(m
, fb
->obj
);
1737 mutex_lock(&dev
->mode_config
.fb_lock
);
1738 list_for_each_entry(fb
, &dev
->mode_config
.fb_list
, base
.head
) {
1739 if (ifbdev
&& &fb
->base
== ifbdev
->helper
.fb
)
1742 seq_printf(m
, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1746 fb
->base
.bits_per_pixel
,
1747 atomic_read(&fb
->base
.refcount
.refcount
));
1748 describe_obj(m
, fb
->obj
);
1751 mutex_unlock(&dev
->mode_config
.fb_lock
);
1756 static void describe_ctx_ringbuf(struct seq_file
*m
,
1757 struct intel_ringbuffer
*ringbuf
)
1759 seq_printf(m
, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
1760 ringbuf
->space
, ringbuf
->head
, ringbuf
->tail
,
1761 ringbuf
->last_retired_head
);
1764 static int i915_context_status(struct seq_file
*m
, void *unused
)
1766 struct drm_info_node
*node
= m
->private;
1767 struct drm_device
*dev
= node
->minor
->dev
;
1768 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1769 struct intel_engine_cs
*ring
;
1770 struct intel_context
*ctx
;
1773 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1777 if (dev_priv
->ips
.pwrctx
) {
1778 seq_puts(m
, "power context ");
1779 describe_obj(m
, dev_priv
->ips
.pwrctx
);
1783 if (dev_priv
->ips
.renderctx
) {
1784 seq_puts(m
, "render context ");
1785 describe_obj(m
, dev_priv
->ips
.renderctx
);
1789 list_for_each_entry(ctx
, &dev_priv
->context_list
, link
) {
1790 if (!i915
.enable_execlists
&&
1791 ctx
->legacy_hw_ctx
.rcs_state
== NULL
)
1794 seq_puts(m
, "HW context ");
1795 describe_ctx(m
, ctx
);
1796 for_each_ring(ring
, dev_priv
, i
) {
1797 if (ring
->default_context
== ctx
)
1798 seq_printf(m
, "(default context %s) ",
1802 if (i915
.enable_execlists
) {
1804 for_each_ring(ring
, dev_priv
, i
) {
1805 struct drm_i915_gem_object
*ctx_obj
=
1806 ctx
->engine
[i
].state
;
1807 struct intel_ringbuffer
*ringbuf
=
1808 ctx
->engine
[i
].ringbuf
;
1810 seq_printf(m
, "%s: ", ring
->name
);
1812 describe_obj(m
, ctx_obj
);
1814 describe_ctx_ringbuf(m
, ringbuf
);
1818 describe_obj(m
, ctx
->legacy_hw_ctx
.rcs_state
);
1824 mutex_unlock(&dev
->struct_mutex
);
1829 static void i915_dump_lrc_obj(struct seq_file
*m
,
1830 struct intel_engine_cs
*ring
,
1831 struct drm_i915_gem_object
*ctx_obj
)
1834 uint32_t *reg_state
;
1836 unsigned long ggtt_offset
= 0;
1838 if (ctx_obj
== NULL
) {
1839 seq_printf(m
, "Context on %s with no gem object\n",
1844 seq_printf(m
, "CONTEXT: %s %u\n", ring
->name
,
1845 intel_execlists_ctx_id(ctx_obj
));
1847 if (!i915_gem_obj_ggtt_bound(ctx_obj
))
1848 seq_puts(m
, "\tNot bound in GGTT\n");
1850 ggtt_offset
= i915_gem_obj_ggtt_offset(ctx_obj
);
1852 if (i915_gem_object_get_pages(ctx_obj
)) {
1853 seq_puts(m
, "\tFailed to get pages for context object\n");
1857 page
= i915_gem_object_get_page(ctx_obj
, 1);
1858 if (!WARN_ON(page
== NULL
)) {
1859 reg_state
= kmap_atomic(page
);
1861 for (j
= 0; j
< 0x600 / sizeof(u32
) / 4; j
+= 4) {
1862 seq_printf(m
, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
1863 ggtt_offset
+ 4096 + (j
* 4),
1864 reg_state
[j
], reg_state
[j
+ 1],
1865 reg_state
[j
+ 2], reg_state
[j
+ 3]);
1867 kunmap_atomic(reg_state
);
1873 static int i915_dump_lrc(struct seq_file
*m
, void *unused
)
1875 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1876 struct drm_device
*dev
= node
->minor
->dev
;
1877 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1878 struct intel_engine_cs
*ring
;
1879 struct intel_context
*ctx
;
1882 if (!i915
.enable_execlists
) {
1883 seq_printf(m
, "Logical Ring Contexts are disabled\n");
1887 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1891 list_for_each_entry(ctx
, &dev_priv
->context_list
, link
) {
1892 for_each_ring(ring
, dev_priv
, i
) {
1893 if (ring
->default_context
!= ctx
)
1894 i915_dump_lrc_obj(m
, ring
,
1895 ctx
->engine
[i
].state
);
1899 mutex_unlock(&dev
->struct_mutex
);
1904 static int i915_execlists(struct seq_file
*m
, void *data
)
1906 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
1907 struct drm_device
*dev
= node
->minor
->dev
;
1908 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1909 struct intel_engine_cs
*ring
;
1915 struct list_head
*cursor
;
1919 if (!i915
.enable_execlists
) {
1920 seq_puts(m
, "Logical Ring Contexts are disabled\n");
1924 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1928 intel_runtime_pm_get(dev_priv
);
1930 for_each_ring(ring
, dev_priv
, ring_id
) {
1931 struct intel_ctx_submit_request
*head_req
= NULL
;
1933 unsigned long flags
;
1935 seq_printf(m
, "%s\n", ring
->name
);
1937 status
= I915_READ(RING_EXECLIST_STATUS(ring
));
1938 ctx_id
= I915_READ(RING_EXECLIST_STATUS(ring
) + 4);
1939 seq_printf(m
, "\tExeclist status: 0x%08X, context: %u\n",
1942 status_pointer
= I915_READ(RING_CONTEXT_STATUS_PTR(ring
));
1943 seq_printf(m
, "\tStatus pointer: 0x%08X\n", status_pointer
);
1945 read_pointer
= ring
->next_context_status_buffer
;
1946 write_pointer
= status_pointer
& 0x07;
1947 if (read_pointer
> write_pointer
)
1949 seq_printf(m
, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
1950 read_pointer
, write_pointer
);
1952 for (i
= 0; i
< 6; i
++) {
1953 status
= I915_READ(RING_CONTEXT_STATUS_BUF(ring
) + 8*i
);
1954 ctx_id
= I915_READ(RING_CONTEXT_STATUS_BUF(ring
) + 8*i
+ 4);
1956 seq_printf(m
, "\tStatus buffer %d: 0x%08X, context: %u\n",
1960 spin_lock_irqsave(&ring
->execlist_lock
, flags
);
1961 list_for_each(cursor
, &ring
->execlist_queue
)
1963 head_req
= list_first_entry_or_null(&ring
->execlist_queue
,
1964 struct intel_ctx_submit_request
, execlist_link
);
1965 spin_unlock_irqrestore(&ring
->execlist_lock
, flags
);
1967 seq_printf(m
, "\t%d requests in queue\n", count
);
1969 struct drm_i915_gem_object
*ctx_obj
;
1971 ctx_obj
= head_req
->ctx
->engine
[ring_id
].state
;
1972 seq_printf(m
, "\tHead request id: %u\n",
1973 intel_execlists_ctx_id(ctx_obj
));
1974 seq_printf(m
, "\tHead request tail: %u\n",
1981 intel_runtime_pm_put(dev_priv
);
1982 mutex_unlock(&dev
->struct_mutex
);
1987 static int i915_gen6_forcewake_count_info(struct seq_file
*m
, void *data
)
1989 struct drm_info_node
*node
= m
->private;
1990 struct drm_device
*dev
= node
->minor
->dev
;
1991 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1992 unsigned forcewake_count
= 0, fw_rendercount
= 0, fw_mediacount
= 0;
1994 spin_lock_irq(&dev_priv
->uncore
.lock
);
1995 if (IS_VALLEYVIEW(dev
)) {
1996 fw_rendercount
= dev_priv
->uncore
.fw_rendercount
;
1997 fw_mediacount
= dev_priv
->uncore
.fw_mediacount
;
1999 forcewake_count
= dev_priv
->uncore
.forcewake_count
;
2000 spin_unlock_irq(&dev_priv
->uncore
.lock
);
2002 if (IS_VALLEYVIEW(dev
)) {
2003 seq_printf(m
, "fw_rendercount = %u\n", fw_rendercount
);
2004 seq_printf(m
, "fw_mediacount = %u\n", fw_mediacount
);
2006 seq_printf(m
, "forcewake count = %u\n", forcewake_count
);
2011 static const char *swizzle_string(unsigned swizzle
)
2014 case I915_BIT_6_SWIZZLE_NONE
:
2016 case I915_BIT_6_SWIZZLE_9
:
2018 case I915_BIT_6_SWIZZLE_9_10
:
2019 return "bit9/bit10";
2020 case I915_BIT_6_SWIZZLE_9_11
:
2021 return "bit9/bit11";
2022 case I915_BIT_6_SWIZZLE_9_10_11
:
2023 return "bit9/bit10/bit11";
2024 case I915_BIT_6_SWIZZLE_9_17
:
2025 return "bit9/bit17";
2026 case I915_BIT_6_SWIZZLE_9_10_17
:
2027 return "bit9/bit10/bit17";
2028 case I915_BIT_6_SWIZZLE_UNKNOWN
:
2035 static int i915_swizzle_info(struct seq_file
*m
, void *data
)
2037 struct drm_info_node
*node
= m
->private;
2038 struct drm_device
*dev
= node
->minor
->dev
;
2039 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2042 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2045 intel_runtime_pm_get(dev_priv
);
2047 seq_printf(m
, "bit6 swizzle for X-tiling = %s\n",
2048 swizzle_string(dev_priv
->mm
.bit_6_swizzle_x
));
2049 seq_printf(m
, "bit6 swizzle for Y-tiling = %s\n",
2050 swizzle_string(dev_priv
->mm
.bit_6_swizzle_y
));
2052 if (IS_GEN3(dev
) || IS_GEN4(dev
)) {
2053 seq_printf(m
, "DDC = 0x%08x\n",
2055 seq_printf(m
, "DDC2 = 0x%08x\n",
2057 seq_printf(m
, "C0DRB3 = 0x%04x\n",
2058 I915_READ16(C0DRB3
));
2059 seq_printf(m
, "C1DRB3 = 0x%04x\n",
2060 I915_READ16(C1DRB3
));
2061 } else if (INTEL_INFO(dev
)->gen
>= 6) {
2062 seq_printf(m
, "MAD_DIMM_C0 = 0x%08x\n",
2063 I915_READ(MAD_DIMM_C0
));
2064 seq_printf(m
, "MAD_DIMM_C1 = 0x%08x\n",
2065 I915_READ(MAD_DIMM_C1
));
2066 seq_printf(m
, "MAD_DIMM_C2 = 0x%08x\n",
2067 I915_READ(MAD_DIMM_C2
));
2068 seq_printf(m
, "TILECTL = 0x%08x\n",
2069 I915_READ(TILECTL
));
2070 if (INTEL_INFO(dev
)->gen
>= 8)
2071 seq_printf(m
, "GAMTARBMODE = 0x%08x\n",
2072 I915_READ(GAMTARBMODE
));
2074 seq_printf(m
, "ARB_MODE = 0x%08x\n",
2075 I915_READ(ARB_MODE
));
2076 seq_printf(m
, "DISP_ARB_CTL = 0x%08x\n",
2077 I915_READ(DISP_ARB_CTL
));
2080 if (dev_priv
->quirks
& QUIRK_PIN_SWIZZLED_PAGES
)
2081 seq_puts(m
, "L-shaped memory detected\n");
2083 intel_runtime_pm_put(dev_priv
);
2084 mutex_unlock(&dev
->struct_mutex
);
2089 static int per_file_ctx(int id
, void *ptr
, void *data
)
2091 struct intel_context
*ctx
= ptr
;
2092 struct seq_file
*m
= data
;
2093 struct i915_hw_ppgtt
*ppgtt
= ctx
->ppgtt
;
2096 seq_printf(m
, " no ppgtt for context %d\n",
2101 if (i915_gem_context_is_default(ctx
))
2102 seq_puts(m
, " default context:\n");
2104 seq_printf(m
, " context %d:\n", ctx
->user_handle
);
2105 ppgtt
->debug_dump(ppgtt
, m
);
2110 static void gen8_ppgtt_info(struct seq_file
*m
, struct drm_device
*dev
)
2112 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2113 struct intel_engine_cs
*ring
;
2114 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
2120 seq_printf(m
, "Page directories: %d\n", ppgtt
->num_pd_pages
);
2121 seq_printf(m
, "Page tables: %d\n", ppgtt
->num_pd_entries
);
2122 for_each_ring(ring
, dev_priv
, unused
) {
2123 seq_printf(m
, "%s\n", ring
->name
);
2124 for (i
= 0; i
< 4; i
++) {
2125 u32 offset
= 0x270 + i
* 8;
2126 u64 pdp
= I915_READ(ring
->mmio_base
+ offset
+ 4);
2128 pdp
|= I915_READ(ring
->mmio_base
+ offset
);
2129 seq_printf(m
, "\tPDP%d 0x%016llx\n", i
, pdp
);
2134 static void gen6_ppgtt_info(struct seq_file
*m
, struct drm_device
*dev
)
2136 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2137 struct intel_engine_cs
*ring
;
2138 struct drm_file
*file
;
2141 if (INTEL_INFO(dev
)->gen
== 6)
2142 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE
));
2144 for_each_ring(ring
, dev_priv
, i
) {
2145 seq_printf(m
, "%s\n", ring
->name
);
2146 if (INTEL_INFO(dev
)->gen
== 7)
2147 seq_printf(m
, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring
)));
2148 seq_printf(m
, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring
)));
2149 seq_printf(m
, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring
)));
2150 seq_printf(m
, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring
)));
2152 if (dev_priv
->mm
.aliasing_ppgtt
) {
2153 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
2155 seq_puts(m
, "aliasing PPGTT:\n");
2156 seq_printf(m
, "pd gtt offset: 0x%08x\n", ppgtt
->pd_offset
);
2158 ppgtt
->debug_dump(ppgtt
, m
);
2161 list_for_each_entry_reverse(file
, &dev
->filelist
, lhead
) {
2162 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2164 seq_printf(m
, "proc: %s\n",
2165 get_pid_task(file
->pid
, PIDTYPE_PID
)->comm
);
2166 idr_for_each(&file_priv
->context_idr
, per_file_ctx
, m
);
2168 seq_printf(m
, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK
));
2171 static int i915_ppgtt_info(struct seq_file
*m
, void *data
)
2173 struct drm_info_node
*node
= m
->private;
2174 struct drm_device
*dev
= node
->minor
->dev
;
2175 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2177 int ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2180 intel_runtime_pm_get(dev_priv
);
2182 if (INTEL_INFO(dev
)->gen
>= 8)
2183 gen8_ppgtt_info(m
, dev
);
2184 else if (INTEL_INFO(dev
)->gen
>= 6)
2185 gen6_ppgtt_info(m
, dev
);
2187 intel_runtime_pm_put(dev_priv
);
2188 mutex_unlock(&dev
->struct_mutex
);
2193 static int i915_llc(struct seq_file
*m
, void *data
)
2195 struct drm_info_node
*node
= m
->private;
2196 struct drm_device
*dev
= node
->minor
->dev
;
2197 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2199 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
2200 seq_printf(m
, "LLC: %s\n", yesno(HAS_LLC(dev
)));
2201 seq_printf(m
, "eLLC: %zuMB\n", dev_priv
->ellc_size
);
2206 static int i915_edp_psr_status(struct seq_file
*m
, void *data
)
2208 struct drm_info_node
*node
= m
->private;
2209 struct drm_device
*dev
= node
->minor
->dev
;
2210 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2214 bool enabled
= false;
2216 intel_runtime_pm_get(dev_priv
);
2218 mutex_lock(&dev_priv
->psr
.lock
);
2219 seq_printf(m
, "Sink_Support: %s\n", yesno(dev_priv
->psr
.sink_support
));
2220 seq_printf(m
, "Source_OK: %s\n", yesno(dev_priv
->psr
.source_ok
));
2221 seq_printf(m
, "Enabled: %s\n", yesno((bool)dev_priv
->psr
.enabled
));
2222 seq_printf(m
, "Active: %s\n", yesno(dev_priv
->psr
.active
));
2223 seq_printf(m
, "Busy frontbuffer bits: 0x%03x\n",
2224 dev_priv
->psr
.busy_frontbuffer_bits
);
2225 seq_printf(m
, "Re-enable work scheduled: %s\n",
2226 yesno(work_busy(&dev_priv
->psr
.work
.work
)));
2230 enabled
= I915_READ(EDP_PSR_CTL(dev
)) & EDP_PSR_ENABLE
;
2232 for_each_pipe(dev_priv
, pipe
) {
2233 stat
[pipe
] = I915_READ(VLV_PSRSTAT(pipe
)) &
2234 VLV_EDP_PSR_CURR_STATE_MASK
;
2235 if ((stat
[pipe
] == VLV_EDP_PSR_ACTIVE_NORFB_UP
) ||
2236 (stat
[pipe
] == VLV_EDP_PSR_ACTIVE_SF_UPDATE
))
2241 seq_printf(m
, "HW Enabled & Active bit: %s", yesno(enabled
));
2244 for_each_pipe(dev_priv
, pipe
) {
2245 if ((stat
[pipe
] == VLV_EDP_PSR_ACTIVE_NORFB_UP
) ||
2246 (stat
[pipe
] == VLV_EDP_PSR_ACTIVE_SF_UPDATE
))
2247 seq_printf(m
, " pipe %c", pipe_name(pipe
));
2251 seq_printf(m
, "Link standby: %s\n",
2252 yesno((bool)dev_priv
->psr
.link_standby
));
2254 /* CHV PSR has no kind of performance counter */
2255 if (HAS_PSR(dev
) && HAS_DDI(dev
)) {
2256 psrperf
= I915_READ(EDP_PSR_PERF_CNT(dev
)) &
2257 EDP_PSR_PERF_CNT_MASK
;
2259 seq_printf(m
, "Performance_Counter: %u\n", psrperf
);
2261 mutex_unlock(&dev_priv
->psr
.lock
);
2263 intel_runtime_pm_put(dev_priv
);
2267 static int i915_sink_crc(struct seq_file
*m
, void *data
)
2269 struct drm_info_node
*node
= m
->private;
2270 struct drm_device
*dev
= node
->minor
->dev
;
2271 struct intel_encoder
*encoder
;
2272 struct intel_connector
*connector
;
2273 struct intel_dp
*intel_dp
= NULL
;
2277 drm_modeset_lock_all(dev
);
2278 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
,
2281 if (connector
->base
.dpms
!= DRM_MODE_DPMS_ON
)
2284 if (!connector
->base
.encoder
)
2287 encoder
= to_intel_encoder(connector
->base
.encoder
);
2288 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
2291 intel_dp
= enc_to_intel_dp(&encoder
->base
);
2293 ret
= intel_dp_sink_crc(intel_dp
, crc
);
2297 seq_printf(m
, "%02x%02x%02x%02x%02x%02x\n",
2298 crc
[0], crc
[1], crc
[2],
2299 crc
[3], crc
[4], crc
[5]);
2304 drm_modeset_unlock_all(dev
);
2308 static int i915_energy_uJ(struct seq_file
*m
, void *data
)
2310 struct drm_info_node
*node
= m
->private;
2311 struct drm_device
*dev
= node
->minor
->dev
;
2312 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2316 if (INTEL_INFO(dev
)->gen
< 6)
2319 intel_runtime_pm_get(dev_priv
);
2321 rdmsrl(MSR_RAPL_POWER_UNIT
, power
);
2322 power
= (power
& 0x1f00) >> 8;
2323 units
= 1000000 / (1 << power
); /* convert to uJ */
2324 power
= I915_READ(MCH_SECP_NRG_STTS
);
2327 intel_runtime_pm_put(dev_priv
);
2329 seq_printf(m
, "%llu", (long long unsigned)power
);
2334 static int i915_pc8_status(struct seq_file
*m
, void *unused
)
2336 struct drm_info_node
*node
= m
->private;
2337 struct drm_device
*dev
= node
->minor
->dev
;
2338 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2340 if (!IS_HASWELL(dev
) && !IS_BROADWELL(dev
)) {
2341 seq_puts(m
, "not supported\n");
2345 seq_printf(m
, "GPU idle: %s\n", yesno(!dev_priv
->mm
.busy
));
2346 seq_printf(m
, "IRQs disabled: %s\n",
2347 yesno(!intel_irqs_enabled(dev_priv
)));
2352 static const char *power_domain_str(enum intel_display_power_domain domain
)
2355 case POWER_DOMAIN_PIPE_A
:
2357 case POWER_DOMAIN_PIPE_B
:
2359 case POWER_DOMAIN_PIPE_C
:
2361 case POWER_DOMAIN_PIPE_A_PANEL_FITTER
:
2362 return "PIPE_A_PANEL_FITTER";
2363 case POWER_DOMAIN_PIPE_B_PANEL_FITTER
:
2364 return "PIPE_B_PANEL_FITTER";
2365 case POWER_DOMAIN_PIPE_C_PANEL_FITTER
:
2366 return "PIPE_C_PANEL_FITTER";
2367 case POWER_DOMAIN_TRANSCODER_A
:
2368 return "TRANSCODER_A";
2369 case POWER_DOMAIN_TRANSCODER_B
:
2370 return "TRANSCODER_B";
2371 case POWER_DOMAIN_TRANSCODER_C
:
2372 return "TRANSCODER_C";
2373 case POWER_DOMAIN_TRANSCODER_EDP
:
2374 return "TRANSCODER_EDP";
2375 case POWER_DOMAIN_PORT_DDI_A_2_LANES
:
2376 return "PORT_DDI_A_2_LANES";
2377 case POWER_DOMAIN_PORT_DDI_A_4_LANES
:
2378 return "PORT_DDI_A_4_LANES";
2379 case POWER_DOMAIN_PORT_DDI_B_2_LANES
:
2380 return "PORT_DDI_B_2_LANES";
2381 case POWER_DOMAIN_PORT_DDI_B_4_LANES
:
2382 return "PORT_DDI_B_4_LANES";
2383 case POWER_DOMAIN_PORT_DDI_C_2_LANES
:
2384 return "PORT_DDI_C_2_LANES";
2385 case POWER_DOMAIN_PORT_DDI_C_4_LANES
:
2386 return "PORT_DDI_C_4_LANES";
2387 case POWER_DOMAIN_PORT_DDI_D_2_LANES
:
2388 return "PORT_DDI_D_2_LANES";
2389 case POWER_DOMAIN_PORT_DDI_D_4_LANES
:
2390 return "PORT_DDI_D_4_LANES";
2391 case POWER_DOMAIN_PORT_DSI
:
2393 case POWER_DOMAIN_PORT_CRT
:
2395 case POWER_DOMAIN_PORT_OTHER
:
2396 return "PORT_OTHER";
2397 case POWER_DOMAIN_VGA
:
2399 case POWER_DOMAIN_AUDIO
:
2401 case POWER_DOMAIN_PLLS
:
2403 case POWER_DOMAIN_INIT
:
2406 MISSING_CASE(domain
);
2411 static int i915_power_domain_info(struct seq_file
*m
, void *unused
)
2413 struct drm_info_node
*node
= m
->private;
2414 struct drm_device
*dev
= node
->minor
->dev
;
2415 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2416 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
2419 mutex_lock(&power_domains
->lock
);
2421 seq_printf(m
, "%-25s %s\n", "Power well/domain", "Use count");
2422 for (i
= 0; i
< power_domains
->power_well_count
; i
++) {
2423 struct i915_power_well
*power_well
;
2424 enum intel_display_power_domain power_domain
;
2426 power_well
= &power_domains
->power_wells
[i
];
2427 seq_printf(m
, "%-25s %d\n", power_well
->name
,
2430 for (power_domain
= 0; power_domain
< POWER_DOMAIN_NUM
;
2432 if (!(BIT(power_domain
) & power_well
->domains
))
2435 seq_printf(m
, " %-23s %d\n",
2436 power_domain_str(power_domain
),
2437 power_domains
->domain_use_count
[power_domain
]);
2441 mutex_unlock(&power_domains
->lock
);
2446 static void intel_seq_print_mode(struct seq_file
*m
, int tabs
,
2447 struct drm_display_mode
*mode
)
2451 for (i
= 0; i
< tabs
; i
++)
2454 seq_printf(m
, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2455 mode
->base
.id
, mode
->name
,
2456 mode
->vrefresh
, mode
->clock
,
2457 mode
->hdisplay
, mode
->hsync_start
,
2458 mode
->hsync_end
, mode
->htotal
,
2459 mode
->vdisplay
, mode
->vsync_start
,
2460 mode
->vsync_end
, mode
->vtotal
,
2461 mode
->type
, mode
->flags
);
2464 static void intel_encoder_info(struct seq_file
*m
,
2465 struct intel_crtc
*intel_crtc
,
2466 struct intel_encoder
*intel_encoder
)
2468 struct drm_info_node
*node
= m
->private;
2469 struct drm_device
*dev
= node
->minor
->dev
;
2470 struct drm_crtc
*crtc
= &intel_crtc
->base
;
2471 struct intel_connector
*intel_connector
;
2472 struct drm_encoder
*encoder
;
2474 encoder
= &intel_encoder
->base
;
2475 seq_printf(m
, "\tencoder %d: type: %s, connectors:\n",
2476 encoder
->base
.id
, encoder
->name
);
2477 for_each_connector_on_encoder(dev
, encoder
, intel_connector
) {
2478 struct drm_connector
*connector
= &intel_connector
->base
;
2479 seq_printf(m
, "\t\tconnector %d: type: %s, status: %s",
2482 drm_get_connector_status_name(connector
->status
));
2483 if (connector
->status
== connector_status_connected
) {
2484 struct drm_display_mode
*mode
= &crtc
->mode
;
2485 seq_printf(m
, ", mode:\n");
2486 intel_seq_print_mode(m
, 2, mode
);
2493 static void intel_crtc_info(struct seq_file
*m
, struct intel_crtc
*intel_crtc
)
2495 struct drm_info_node
*node
= m
->private;
2496 struct drm_device
*dev
= node
->minor
->dev
;
2497 struct drm_crtc
*crtc
= &intel_crtc
->base
;
2498 struct intel_encoder
*intel_encoder
;
2500 if (crtc
->primary
->fb
)
2501 seq_printf(m
, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2502 crtc
->primary
->fb
->base
.id
, crtc
->x
, crtc
->y
,
2503 crtc
->primary
->fb
->width
, crtc
->primary
->fb
->height
);
2505 seq_puts(m
, "\tprimary plane disabled\n");
2506 for_each_encoder_on_crtc(dev
, crtc
, intel_encoder
)
2507 intel_encoder_info(m
, intel_crtc
, intel_encoder
);
2510 static void intel_panel_info(struct seq_file
*m
, struct intel_panel
*panel
)
2512 struct drm_display_mode
*mode
= panel
->fixed_mode
;
2514 seq_printf(m
, "\tfixed mode:\n");
2515 intel_seq_print_mode(m
, 2, mode
);
2518 static void intel_dp_info(struct seq_file
*m
,
2519 struct intel_connector
*intel_connector
)
2521 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2522 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
2524 seq_printf(m
, "\tDPCD rev: %x\n", intel_dp
->dpcd
[DP_DPCD_REV
]);
2525 seq_printf(m
, "\taudio support: %s\n", intel_dp
->has_audio
? "yes" :
2527 if (intel_encoder
->type
== INTEL_OUTPUT_EDP
)
2528 intel_panel_info(m
, &intel_connector
->panel
);
2531 static void intel_hdmi_info(struct seq_file
*m
,
2532 struct intel_connector
*intel_connector
)
2534 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2535 struct intel_hdmi
*intel_hdmi
= enc_to_intel_hdmi(&intel_encoder
->base
);
2537 seq_printf(m
, "\taudio support: %s\n", intel_hdmi
->has_audio
? "yes" :
2541 static void intel_lvds_info(struct seq_file
*m
,
2542 struct intel_connector
*intel_connector
)
2544 intel_panel_info(m
, &intel_connector
->panel
);
2547 static void intel_connector_info(struct seq_file
*m
,
2548 struct drm_connector
*connector
)
2550 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
2551 struct intel_encoder
*intel_encoder
= intel_connector
->encoder
;
2552 struct drm_display_mode
*mode
;
2554 seq_printf(m
, "connector %d: type %s, status: %s\n",
2555 connector
->base
.id
, connector
->name
,
2556 drm_get_connector_status_name(connector
->status
));
2557 if (connector
->status
== connector_status_connected
) {
2558 seq_printf(m
, "\tname: %s\n", connector
->display_info
.name
);
2559 seq_printf(m
, "\tphysical dimensions: %dx%dmm\n",
2560 connector
->display_info
.width_mm
,
2561 connector
->display_info
.height_mm
);
2562 seq_printf(m
, "\tsubpixel order: %s\n",
2563 drm_get_subpixel_order_name(connector
->display_info
.subpixel_order
));
2564 seq_printf(m
, "\tCEA rev: %d\n",
2565 connector
->display_info
.cea_rev
);
2567 if (intel_encoder
) {
2568 if (intel_encoder
->type
== INTEL_OUTPUT_DISPLAYPORT
||
2569 intel_encoder
->type
== INTEL_OUTPUT_EDP
)
2570 intel_dp_info(m
, intel_connector
);
2571 else if (intel_encoder
->type
== INTEL_OUTPUT_HDMI
)
2572 intel_hdmi_info(m
, intel_connector
);
2573 else if (intel_encoder
->type
== INTEL_OUTPUT_LVDS
)
2574 intel_lvds_info(m
, intel_connector
);
2577 seq_printf(m
, "\tmodes:\n");
2578 list_for_each_entry(mode
, &connector
->modes
, head
)
2579 intel_seq_print_mode(m
, 2, mode
);
2582 static bool cursor_active(struct drm_device
*dev
, int pipe
)
2584 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2587 if (IS_845G(dev
) || IS_I865G(dev
))
2588 state
= I915_READ(_CURACNTR
) & CURSOR_ENABLE
;
2590 state
= I915_READ(CURCNTR(pipe
)) & CURSOR_MODE
;
2595 static bool cursor_position(struct drm_device
*dev
, int pipe
, int *x
, int *y
)
2597 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2600 pos
= I915_READ(CURPOS(pipe
));
2602 *x
= (pos
>> CURSOR_X_SHIFT
) & CURSOR_POS_MASK
;
2603 if (pos
& (CURSOR_POS_SIGN
<< CURSOR_X_SHIFT
))
2606 *y
= (pos
>> CURSOR_Y_SHIFT
) & CURSOR_POS_MASK
;
2607 if (pos
& (CURSOR_POS_SIGN
<< CURSOR_Y_SHIFT
))
2610 return cursor_active(dev
, pipe
);
2613 static int i915_display_info(struct seq_file
*m
, void *unused
)
2615 struct drm_info_node
*node
= m
->private;
2616 struct drm_device
*dev
= node
->minor
->dev
;
2617 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2618 struct intel_crtc
*crtc
;
2619 struct drm_connector
*connector
;
2621 intel_runtime_pm_get(dev_priv
);
2622 drm_modeset_lock_all(dev
);
2623 seq_printf(m
, "CRTC info\n");
2624 seq_printf(m
, "---------\n");
2625 for_each_intel_crtc(dev
, crtc
) {
2629 seq_printf(m
, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
2630 crtc
->base
.base
.id
, pipe_name(crtc
->pipe
),
2631 yesno(crtc
->active
), crtc
->config
->pipe_src_w
,
2632 crtc
->config
->pipe_src_h
);
2634 intel_crtc_info(m
, crtc
);
2636 active
= cursor_position(dev
, crtc
->pipe
, &x
, &y
);
2637 seq_printf(m
, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
2638 yesno(crtc
->cursor_base
),
2639 x
, y
, crtc
->cursor_width
, crtc
->cursor_height
,
2640 crtc
->cursor_addr
, yesno(active
));
2643 seq_printf(m
, "\tunderrun reporting: cpu=%s pch=%s \n",
2644 yesno(!crtc
->cpu_fifo_underrun_disabled
),
2645 yesno(!crtc
->pch_fifo_underrun_disabled
));
2648 seq_printf(m
, "\n");
2649 seq_printf(m
, "Connector info\n");
2650 seq_printf(m
, "--------------\n");
2651 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
2652 intel_connector_info(m
, connector
);
2654 drm_modeset_unlock_all(dev
);
2655 intel_runtime_pm_put(dev_priv
);
2660 static int i915_semaphore_status(struct seq_file
*m
, void *unused
)
2662 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
2663 struct drm_device
*dev
= node
->minor
->dev
;
2664 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2665 struct intel_engine_cs
*ring
;
2666 int num_rings
= hweight32(INTEL_INFO(dev
)->ring_mask
);
2669 if (!i915_semaphore_is_enabled(dev
)) {
2670 seq_puts(m
, "Semaphores are disabled\n");
2674 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2677 intel_runtime_pm_get(dev_priv
);
2679 if (IS_BROADWELL(dev
)) {
2683 page
= i915_gem_object_get_page(dev_priv
->semaphore_obj
, 0);
2685 seqno
= (uint64_t *)kmap_atomic(page
);
2686 for_each_ring(ring
, dev_priv
, i
) {
2689 seq_printf(m
, "%s\n", ring
->name
);
2691 seq_puts(m
, " Last signal:");
2692 for (j
= 0; j
< num_rings
; j
++) {
2693 offset
= i
* I915_NUM_RINGS
+ j
;
2694 seq_printf(m
, "0x%08llx (0x%02llx) ",
2695 seqno
[offset
], offset
* 8);
2699 seq_puts(m
, " Last wait: ");
2700 for (j
= 0; j
< num_rings
; j
++) {
2701 offset
= i
+ (j
* I915_NUM_RINGS
);
2702 seq_printf(m
, "0x%08llx (0x%02llx) ",
2703 seqno
[offset
], offset
* 8);
2708 kunmap_atomic(seqno
);
2710 seq_puts(m
, " Last signal:");
2711 for_each_ring(ring
, dev_priv
, i
)
2712 for (j
= 0; j
< num_rings
; j
++)
2713 seq_printf(m
, "0x%08x\n",
2714 I915_READ(ring
->semaphore
.mbox
.signal
[j
]));
2718 seq_puts(m
, "\nSync seqno:\n");
2719 for_each_ring(ring
, dev_priv
, i
) {
2720 for (j
= 0; j
< num_rings
; j
++) {
2721 seq_printf(m
, " 0x%08x ", ring
->semaphore
.sync_seqno
[j
]);
2727 intel_runtime_pm_put(dev_priv
);
2728 mutex_unlock(&dev
->struct_mutex
);
2732 static int i915_shared_dplls_info(struct seq_file
*m
, void *unused
)
2734 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
2735 struct drm_device
*dev
= node
->minor
->dev
;
2736 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2739 drm_modeset_lock_all(dev
);
2740 for (i
= 0; i
< dev_priv
->num_shared_dpll
; i
++) {
2741 struct intel_shared_dpll
*pll
= &dev_priv
->shared_dplls
[i
];
2743 seq_printf(m
, "DPLL%i: %s, id: %i\n", i
, pll
->name
, pll
->id
);
2744 seq_printf(m
, " crtc_mask: 0x%08x, active: %d, on: %s\n",
2745 pll
->config
.crtc_mask
, pll
->active
, yesno(pll
->on
));
2746 seq_printf(m
, " tracked hardware state:\n");
2747 seq_printf(m
, " dpll: 0x%08x\n", pll
->config
.hw_state
.dpll
);
2748 seq_printf(m
, " dpll_md: 0x%08x\n",
2749 pll
->config
.hw_state
.dpll_md
);
2750 seq_printf(m
, " fp0: 0x%08x\n", pll
->config
.hw_state
.fp0
);
2751 seq_printf(m
, " fp1: 0x%08x\n", pll
->config
.hw_state
.fp1
);
2752 seq_printf(m
, " wrpll: 0x%08x\n", pll
->config
.hw_state
.wrpll
);
2754 drm_modeset_unlock_all(dev
);
2759 static int i915_wa_registers(struct seq_file
*m
, void *unused
)
2763 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
2764 struct drm_device
*dev
= node
->minor
->dev
;
2765 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2767 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
2771 intel_runtime_pm_get(dev_priv
);
2773 seq_printf(m
, "Workarounds applied: %d\n", dev_priv
->workarounds
.count
);
2774 for (i
= 0; i
< dev_priv
->workarounds
.count
; ++i
) {
2775 u32 addr
, mask
, value
, read
;
2778 addr
= dev_priv
->workarounds
.reg
[i
].addr
;
2779 mask
= dev_priv
->workarounds
.reg
[i
].mask
;
2780 value
= dev_priv
->workarounds
.reg
[i
].value
;
2781 read
= I915_READ(addr
);
2782 ok
= (value
& mask
) == (read
& mask
);
2783 seq_printf(m
, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
2784 addr
, value
, mask
, read
, ok
? "OK" : "FAIL");
2787 intel_runtime_pm_put(dev_priv
);
2788 mutex_unlock(&dev
->struct_mutex
);
2793 static int i915_ddb_info(struct seq_file
*m
, void *unused
)
2795 struct drm_info_node
*node
= m
->private;
2796 struct drm_device
*dev
= node
->minor
->dev
;
2797 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2798 struct skl_ddb_allocation
*ddb
;
2799 struct skl_ddb_entry
*entry
;
2803 if (INTEL_INFO(dev
)->gen
< 9)
2806 drm_modeset_lock_all(dev
);
2808 ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
2810 seq_printf(m
, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
2812 for_each_pipe(dev_priv
, pipe
) {
2813 seq_printf(m
, "Pipe %c\n", pipe_name(pipe
));
2815 for_each_plane(pipe
, plane
) {
2816 entry
= &ddb
->plane
[pipe
][plane
];
2817 seq_printf(m
, " Plane%-8d%8u%8u%8u\n", plane
+ 1,
2818 entry
->start
, entry
->end
,
2819 skl_ddb_entry_size(entry
));
2822 entry
= &ddb
->cursor
[pipe
];
2823 seq_printf(m
, " %-13s%8u%8u%8u\n", "Cursor", entry
->start
,
2824 entry
->end
, skl_ddb_entry_size(entry
));
2827 drm_modeset_unlock_all(dev
);
2832 struct pipe_crc_info
{
2834 struct drm_device
*dev
;
2838 static int i915_dp_mst_info(struct seq_file
*m
, void *unused
)
2840 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
2841 struct drm_device
*dev
= node
->minor
->dev
;
2842 struct drm_encoder
*encoder
;
2843 struct intel_encoder
*intel_encoder
;
2844 struct intel_digital_port
*intel_dig_port
;
2845 drm_modeset_lock_all(dev
);
2846 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
2847 intel_encoder
= to_intel_encoder(encoder
);
2848 if (intel_encoder
->type
!= INTEL_OUTPUT_DISPLAYPORT
)
2850 intel_dig_port
= enc_to_dig_port(encoder
);
2851 if (!intel_dig_port
->dp
.can_mst
)
2854 drm_dp_mst_dump_topology(m
, &intel_dig_port
->dp
.mst_mgr
);
2856 drm_modeset_unlock_all(dev
);
2860 static int i915_pipe_crc_open(struct inode
*inode
, struct file
*filep
)
2862 struct pipe_crc_info
*info
= inode
->i_private
;
2863 struct drm_i915_private
*dev_priv
= info
->dev
->dev_private
;
2864 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
2866 if (info
->pipe
>= INTEL_INFO(info
->dev
)->num_pipes
)
2869 spin_lock_irq(&pipe_crc
->lock
);
2871 if (pipe_crc
->opened
) {
2872 spin_unlock_irq(&pipe_crc
->lock
);
2873 return -EBUSY
; /* already open */
2876 pipe_crc
->opened
= true;
2877 filep
->private_data
= inode
->i_private
;
2879 spin_unlock_irq(&pipe_crc
->lock
);
2884 static int i915_pipe_crc_release(struct inode
*inode
, struct file
*filep
)
2886 struct pipe_crc_info
*info
= inode
->i_private
;
2887 struct drm_i915_private
*dev_priv
= info
->dev
->dev_private
;
2888 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
2890 spin_lock_irq(&pipe_crc
->lock
);
2891 pipe_crc
->opened
= false;
2892 spin_unlock_irq(&pipe_crc
->lock
);
2897 /* (6 fields, 8 chars each, space separated (5) + '\n') */
2898 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
2899 /* account for \'0' */
2900 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
2902 static int pipe_crc_data_count(struct intel_pipe_crc
*pipe_crc
)
2904 assert_spin_locked(&pipe_crc
->lock
);
2905 return CIRC_CNT(pipe_crc
->head
, pipe_crc
->tail
,
2906 INTEL_PIPE_CRC_ENTRIES_NR
);
2910 i915_pipe_crc_read(struct file
*filep
, char __user
*user_buf
, size_t count
,
2913 struct pipe_crc_info
*info
= filep
->private_data
;
2914 struct drm_device
*dev
= info
->dev
;
2915 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2916 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[info
->pipe
];
2917 char buf
[PIPE_CRC_BUFFER_LEN
];
2922 * Don't allow user space to provide buffers not big enough to hold
2925 if (count
< PIPE_CRC_LINE_LEN
)
2928 if (pipe_crc
->source
== INTEL_PIPE_CRC_SOURCE_NONE
)
2931 /* nothing to read */
2932 spin_lock_irq(&pipe_crc
->lock
);
2933 while (pipe_crc_data_count(pipe_crc
) == 0) {
2936 if (filep
->f_flags
& O_NONBLOCK
) {
2937 spin_unlock_irq(&pipe_crc
->lock
);
2941 ret
= wait_event_interruptible_lock_irq(pipe_crc
->wq
,
2942 pipe_crc_data_count(pipe_crc
), pipe_crc
->lock
);
2944 spin_unlock_irq(&pipe_crc
->lock
);
2949 /* We now have one or more entries to read */
2950 n_entries
= count
/ PIPE_CRC_LINE_LEN
;
2953 while (n_entries
> 0) {
2954 struct intel_pipe_crc_entry
*entry
=
2955 &pipe_crc
->entries
[pipe_crc
->tail
];
2958 if (CIRC_CNT(pipe_crc
->head
, pipe_crc
->tail
,
2959 INTEL_PIPE_CRC_ENTRIES_NR
) < 1)
2962 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR
);
2963 pipe_crc
->tail
= (pipe_crc
->tail
+ 1) & (INTEL_PIPE_CRC_ENTRIES_NR
- 1);
2965 bytes_read
+= snprintf(buf
, PIPE_CRC_BUFFER_LEN
,
2966 "%8u %8x %8x %8x %8x %8x\n",
2967 entry
->frame
, entry
->crc
[0],
2968 entry
->crc
[1], entry
->crc
[2],
2969 entry
->crc
[3], entry
->crc
[4]);
2971 spin_unlock_irq(&pipe_crc
->lock
);
2973 ret
= copy_to_user(user_buf
, buf
, PIPE_CRC_LINE_LEN
);
2974 if (ret
== PIPE_CRC_LINE_LEN
)
2977 user_buf
+= PIPE_CRC_LINE_LEN
;
2980 spin_lock_irq(&pipe_crc
->lock
);
2983 spin_unlock_irq(&pipe_crc
->lock
);
2988 static const struct file_operations i915_pipe_crc_fops
= {
2989 .owner
= THIS_MODULE
,
2990 .open
= i915_pipe_crc_open
,
2991 .read
= i915_pipe_crc_read
,
2992 .release
= i915_pipe_crc_release
,
2995 static struct pipe_crc_info i915_pipe_crc_data
[I915_MAX_PIPES
] = {
2997 .name
= "i915_pipe_A_crc",
3001 .name
= "i915_pipe_B_crc",
3005 .name
= "i915_pipe_C_crc",
3010 static int i915_pipe_crc_create(struct dentry
*root
, struct drm_minor
*minor
,
3013 struct drm_device
*dev
= minor
->dev
;
3015 struct pipe_crc_info
*info
= &i915_pipe_crc_data
[pipe
];
3018 ent
= debugfs_create_file(info
->name
, S_IRUGO
, root
, info
,
3019 &i915_pipe_crc_fops
);
3023 return drm_add_fake_info_node(minor
, ent
, info
);
3026 static const char * const pipe_crc_sources
[] = {
3039 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source
)
3041 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources
) != INTEL_PIPE_CRC_SOURCE_MAX
);
3042 return pipe_crc_sources
[source
];
3045 static int display_crc_ctl_show(struct seq_file
*m
, void *data
)
3047 struct drm_device
*dev
= m
->private;
3048 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3051 for (i
= 0; i
< I915_MAX_PIPES
; i
++)
3052 seq_printf(m
, "%c %s\n", pipe_name(i
),
3053 pipe_crc_source_name(dev_priv
->pipe_crc
[i
].source
));
3058 static int display_crc_ctl_open(struct inode
*inode
, struct file
*file
)
3060 struct drm_device
*dev
= inode
->i_private
;
3062 return single_open(file
, display_crc_ctl_show
, dev
);
3065 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source
*source
,
3068 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
3069 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
3072 case INTEL_PIPE_CRC_SOURCE_PIPE
:
3073 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_INCLUDE_BORDER_I8XX
;
3075 case INTEL_PIPE_CRC_SOURCE_NONE
:
3085 static int i9xx_pipe_crc_auto_source(struct drm_device
*dev
, enum pipe pipe
,
3086 enum intel_pipe_crc_source
*source
)
3088 struct intel_encoder
*encoder
;
3089 struct intel_crtc
*crtc
;
3090 struct intel_digital_port
*dig_port
;
3093 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
3095 drm_modeset_lock_all(dev
);
3096 for_each_intel_encoder(dev
, encoder
) {
3097 if (!encoder
->base
.crtc
)
3100 crtc
= to_intel_crtc(encoder
->base
.crtc
);
3102 if (crtc
->pipe
!= pipe
)
3105 switch (encoder
->type
) {
3106 case INTEL_OUTPUT_TVOUT
:
3107 *source
= INTEL_PIPE_CRC_SOURCE_TV
;
3109 case INTEL_OUTPUT_DISPLAYPORT
:
3110 case INTEL_OUTPUT_EDP
:
3111 dig_port
= enc_to_dig_port(&encoder
->base
);
3112 switch (dig_port
->port
) {
3114 *source
= INTEL_PIPE_CRC_SOURCE_DP_B
;
3117 *source
= INTEL_PIPE_CRC_SOURCE_DP_C
;
3120 *source
= INTEL_PIPE_CRC_SOURCE_DP_D
;
3123 WARN(1, "nonexisting DP port %c\n",
3124 port_name(dig_port
->port
));
3132 drm_modeset_unlock_all(dev
);
3137 static int vlv_pipe_crc_ctl_reg(struct drm_device
*dev
,
3139 enum intel_pipe_crc_source
*source
,
3142 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3143 bool need_stable_symbols
= false;
3145 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
) {
3146 int ret
= i9xx_pipe_crc_auto_source(dev
, pipe
, source
);
3152 case INTEL_PIPE_CRC_SOURCE_PIPE
:
3153 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_VLV
;
3155 case INTEL_PIPE_CRC_SOURCE_DP_B
:
3156 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_B_VLV
;
3157 need_stable_symbols
= true;
3159 case INTEL_PIPE_CRC_SOURCE_DP_C
:
3160 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_C_VLV
;
3161 need_stable_symbols
= true;
3163 case INTEL_PIPE_CRC_SOURCE_DP_D
:
3164 if (!IS_CHERRYVIEW(dev
))
3166 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_D_VLV
;
3167 need_stable_symbols
= true;
3169 case INTEL_PIPE_CRC_SOURCE_NONE
:
3177 * When the pipe CRC tap point is after the transcoders we need
3178 * to tweak symbol-level features to produce a deterministic series of
3179 * symbols for a given frame. We need to reset those features only once
3180 * a frame (instead of every nth symbol):
3181 * - DC-balance: used to ensure a better clock recovery from the data
3183 * - DisplayPort scrambling: used for EMI reduction
3185 if (need_stable_symbols
) {
3186 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
3188 tmp
|= DC_BALANCE_RESET_VLV
;
3191 tmp
|= PIPE_A_SCRAMBLE_RESET
;
3194 tmp
|= PIPE_B_SCRAMBLE_RESET
;
3197 tmp
|= PIPE_C_SCRAMBLE_RESET
;
3202 I915_WRITE(PORT_DFT2_G4X
, tmp
);
3208 static int i9xx_pipe_crc_ctl_reg(struct drm_device
*dev
,
3210 enum intel_pipe_crc_source
*source
,
3213 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3214 bool need_stable_symbols
= false;
3216 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
) {
3217 int ret
= i9xx_pipe_crc_auto_source(dev
, pipe
, source
);
3223 case INTEL_PIPE_CRC_SOURCE_PIPE
:
3224 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_I9XX
;
3226 case INTEL_PIPE_CRC_SOURCE_TV
:
3227 if (!SUPPORTS_TV(dev
))
3229 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_TV_PRE
;
3231 case INTEL_PIPE_CRC_SOURCE_DP_B
:
3234 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_B_G4X
;
3235 need_stable_symbols
= true;
3237 case INTEL_PIPE_CRC_SOURCE_DP_C
:
3240 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_C_G4X
;
3241 need_stable_symbols
= true;
3243 case INTEL_PIPE_CRC_SOURCE_DP_D
:
3246 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_DP_D_G4X
;
3247 need_stable_symbols
= true;
3249 case INTEL_PIPE_CRC_SOURCE_NONE
:
3257 * When the pipe CRC tap point is after the transcoders we need
3258 * to tweak symbol-level features to produce a deterministic series of
3259 * symbols for a given frame. We need to reset those features only once
3260 * a frame (instead of every nth symbol):
3261 * - DC-balance: used to ensure a better clock recovery from the data
3263 * - DisplayPort scrambling: used for EMI reduction
3265 if (need_stable_symbols
) {
3266 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
3268 WARN_ON(!IS_G4X(dev
));
3270 I915_WRITE(PORT_DFT_I9XX
,
3271 I915_READ(PORT_DFT_I9XX
) | DC_BALANCE_RESET
);
3274 tmp
|= PIPE_A_SCRAMBLE_RESET
;
3276 tmp
|= PIPE_B_SCRAMBLE_RESET
;
3278 I915_WRITE(PORT_DFT2_G4X
, tmp
);
3284 static void vlv_undo_pipe_scramble_reset(struct drm_device
*dev
,
3287 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3288 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
3292 tmp
&= ~PIPE_A_SCRAMBLE_RESET
;
3295 tmp
&= ~PIPE_B_SCRAMBLE_RESET
;
3298 tmp
&= ~PIPE_C_SCRAMBLE_RESET
;
3303 if (!(tmp
& PIPE_SCRAMBLE_RESET_MASK
))
3304 tmp
&= ~DC_BALANCE_RESET_VLV
;
3305 I915_WRITE(PORT_DFT2_G4X
, tmp
);
3309 static void g4x_undo_pipe_scramble_reset(struct drm_device
*dev
,
3312 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3313 uint32_t tmp
= I915_READ(PORT_DFT2_G4X
);
3316 tmp
&= ~PIPE_A_SCRAMBLE_RESET
;
3318 tmp
&= ~PIPE_B_SCRAMBLE_RESET
;
3319 I915_WRITE(PORT_DFT2_G4X
, tmp
);
3321 if (!(tmp
& PIPE_SCRAMBLE_RESET_MASK
)) {
3322 I915_WRITE(PORT_DFT_I9XX
,
3323 I915_READ(PORT_DFT_I9XX
) & ~DC_BALANCE_RESET
);
3327 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source
*source
,
3330 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
3331 *source
= INTEL_PIPE_CRC_SOURCE_PIPE
;
3334 case INTEL_PIPE_CRC_SOURCE_PLANE1
:
3335 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PRIMARY_ILK
;
3337 case INTEL_PIPE_CRC_SOURCE_PLANE2
:
3338 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_SPRITE_ILK
;
3340 case INTEL_PIPE_CRC_SOURCE_PIPE
:
3341 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PIPE_ILK
;
3343 case INTEL_PIPE_CRC_SOURCE_NONE
:
3353 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device
*dev
)
3355 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3356 struct intel_crtc
*crtc
=
3357 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[PIPE_A
]);
3359 drm_modeset_lock_all(dev
);
3361 * If we use the eDP transcoder we need to make sure that we don't
3362 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
3363 * relevant on hsw with pipe A when using the always-on power well
3366 if (crtc
->config
->cpu_transcoder
== TRANSCODER_EDP
&&
3367 !crtc
->config
->pch_pfit
.enabled
) {
3368 crtc
->config
->pch_pfit
.force_thru
= true;
3370 intel_display_power_get(dev_priv
,
3371 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A
));
3373 dev_priv
->display
.crtc_disable(&crtc
->base
);
3374 dev_priv
->display
.crtc_enable(&crtc
->base
);
3376 drm_modeset_unlock_all(dev
);
3379 static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device
*dev
)
3381 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3382 struct intel_crtc
*crtc
=
3383 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[PIPE_A
]);
3385 drm_modeset_lock_all(dev
);
3387 * If we use the eDP transcoder we need to make sure that we don't
3388 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
3389 * relevant on hsw with pipe A when using the always-on power well
3392 if (crtc
->config
->pch_pfit
.force_thru
) {
3393 crtc
->config
->pch_pfit
.force_thru
= false;
3395 dev_priv
->display
.crtc_disable(&crtc
->base
);
3396 dev_priv
->display
.crtc_enable(&crtc
->base
);
3398 intel_display_power_put(dev_priv
,
3399 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A
));
3401 drm_modeset_unlock_all(dev
);
3404 static int ivb_pipe_crc_ctl_reg(struct drm_device
*dev
,
3406 enum intel_pipe_crc_source
*source
,
3409 if (*source
== INTEL_PIPE_CRC_SOURCE_AUTO
)
3410 *source
= INTEL_PIPE_CRC_SOURCE_PF
;
3413 case INTEL_PIPE_CRC_SOURCE_PLANE1
:
3414 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PRIMARY_IVB
;
3416 case INTEL_PIPE_CRC_SOURCE_PLANE2
:
3417 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_SPRITE_IVB
;
3419 case INTEL_PIPE_CRC_SOURCE_PF
:
3420 if (IS_HASWELL(dev
) && pipe
== PIPE_A
)
3421 hsw_trans_edp_pipe_A_crc_wa(dev
);
3423 *val
= PIPE_CRC_ENABLE
| PIPE_CRC_SOURCE_PF_IVB
;
3425 case INTEL_PIPE_CRC_SOURCE_NONE
:
3435 static int pipe_crc_set_source(struct drm_device
*dev
, enum pipe pipe
,
3436 enum intel_pipe_crc_source source
)
3438 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3439 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
3440 struct intel_crtc
*crtc
= to_intel_crtc(intel_get_crtc_for_pipe(dev
,
3442 u32 val
= 0; /* shut up gcc */
3445 if (pipe_crc
->source
== source
)
3448 /* forbid changing the source without going back to 'none' */
3449 if (pipe_crc
->source
&& source
)
3452 if (!intel_display_power_is_enabled(dev_priv
, POWER_DOMAIN_PIPE(pipe
))) {
3453 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
3458 ret
= i8xx_pipe_crc_ctl_reg(&source
, &val
);
3459 else if (INTEL_INFO(dev
)->gen
< 5)
3460 ret
= i9xx_pipe_crc_ctl_reg(dev
, pipe
, &source
, &val
);
3461 else if (IS_VALLEYVIEW(dev
))
3462 ret
= vlv_pipe_crc_ctl_reg(dev
, pipe
, &source
, &val
);
3463 else if (IS_GEN5(dev
) || IS_GEN6(dev
))
3464 ret
= ilk_pipe_crc_ctl_reg(&source
, &val
);
3466 ret
= ivb_pipe_crc_ctl_reg(dev
, pipe
, &source
, &val
);
3471 /* none -> real source transition */
3473 struct intel_pipe_crc_entry
*entries
;
3475 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
3476 pipe_name(pipe
), pipe_crc_source_name(source
));
3478 entries
= kcalloc(INTEL_PIPE_CRC_ENTRIES_NR
,
3479 sizeof(pipe_crc
->entries
[0]),
3485 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
3486 * enabled and disabled dynamically based on package C states,
3487 * user space can't make reliable use of the CRCs, so let's just
3488 * completely disable it.
3490 hsw_disable_ips(crtc
);
3492 spin_lock_irq(&pipe_crc
->lock
);
3493 kfree(pipe_crc
->entries
);
3494 pipe_crc
->entries
= entries
;
3497 spin_unlock_irq(&pipe_crc
->lock
);
3500 pipe_crc
->source
= source
;
3502 I915_WRITE(PIPE_CRC_CTL(pipe
), val
);
3503 POSTING_READ(PIPE_CRC_CTL(pipe
));
3505 /* real source -> none transition */
3506 if (source
== INTEL_PIPE_CRC_SOURCE_NONE
) {
3507 struct intel_pipe_crc_entry
*entries
;
3508 struct intel_crtc
*crtc
=
3509 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
3511 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
3514 drm_modeset_lock(&crtc
->base
.mutex
, NULL
);
3516 intel_wait_for_vblank(dev
, pipe
);
3517 drm_modeset_unlock(&crtc
->base
.mutex
);
3519 spin_lock_irq(&pipe_crc
->lock
);
3520 entries
= pipe_crc
->entries
;
3521 pipe_crc
->entries
= NULL
;
3524 spin_unlock_irq(&pipe_crc
->lock
);
3529 g4x_undo_pipe_scramble_reset(dev
, pipe
);
3530 else if (IS_VALLEYVIEW(dev
))
3531 vlv_undo_pipe_scramble_reset(dev
, pipe
);
3532 else if (IS_HASWELL(dev
) && pipe
== PIPE_A
)
3533 hsw_undo_trans_edp_pipe_A_crc_wa(dev
);
3535 hsw_enable_ips(crtc
);
3542 * Parse pipe CRC command strings:
3543 * command: wsp* object wsp+ name wsp+ source wsp*
3546 * source: (none | plane1 | plane2 | pf)
3547 * wsp: (#0x20 | #0x9 | #0xA)+
3550 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
3551 * "pipe A none" -> Stop CRC
3553 static int display_crc_ctl_tokenize(char *buf
, char *words
[], int max_words
)
3560 /* skip leading white space */
3561 buf
= skip_spaces(buf
);
3563 break; /* end of buffer */
3565 /* find end of word */
3566 for (end
= buf
; *end
&& !isspace(*end
); end
++)
3569 if (n_words
== max_words
) {
3570 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
3572 return -EINVAL
; /* ran out of words[] before bytes */
3577 words
[n_words
++] = buf
;
3584 enum intel_pipe_crc_object
{
3585 PIPE_CRC_OBJECT_PIPE
,
3588 static const char * const pipe_crc_objects
[] = {
3593 display_crc_ctl_parse_object(const char *buf
, enum intel_pipe_crc_object
*o
)
3597 for (i
= 0; i
< ARRAY_SIZE(pipe_crc_objects
); i
++)
3598 if (!strcmp(buf
, pipe_crc_objects
[i
])) {
3606 static int display_crc_ctl_parse_pipe(const char *buf
, enum pipe
*pipe
)
3608 const char name
= buf
[0];
3610 if (name
< 'A' || name
>= pipe_name(I915_MAX_PIPES
))
3619 display_crc_ctl_parse_source(const char *buf
, enum intel_pipe_crc_source
*s
)
3623 for (i
= 0; i
< ARRAY_SIZE(pipe_crc_sources
); i
++)
3624 if (!strcmp(buf
, pipe_crc_sources
[i
])) {
3632 static int display_crc_ctl_parse(struct drm_device
*dev
, char *buf
, size_t len
)
3636 char *words
[N_WORDS
];
3638 enum intel_pipe_crc_object object
;
3639 enum intel_pipe_crc_source source
;
3641 n_words
= display_crc_ctl_tokenize(buf
, words
, N_WORDS
);
3642 if (n_words
!= N_WORDS
) {
3643 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
3648 if (display_crc_ctl_parse_object(words
[0], &object
) < 0) {
3649 DRM_DEBUG_DRIVER("unknown object %s\n", words
[0]);
3653 if (display_crc_ctl_parse_pipe(words
[1], &pipe
) < 0) {
3654 DRM_DEBUG_DRIVER("unknown pipe %s\n", words
[1]);
3658 if (display_crc_ctl_parse_source(words
[2], &source
) < 0) {
3659 DRM_DEBUG_DRIVER("unknown source %s\n", words
[2]);
3663 return pipe_crc_set_source(dev
, pipe
, source
);
3666 static ssize_t
display_crc_ctl_write(struct file
*file
, const char __user
*ubuf
,
3667 size_t len
, loff_t
*offp
)
3669 struct seq_file
*m
= file
->private_data
;
3670 struct drm_device
*dev
= m
->private;
3677 if (len
> PAGE_SIZE
- 1) {
3678 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
3683 tmpbuf
= kmalloc(len
+ 1, GFP_KERNEL
);
3687 if (copy_from_user(tmpbuf
, ubuf
, len
)) {
3693 ret
= display_crc_ctl_parse(dev
, tmpbuf
, len
);
3704 static const struct file_operations i915_display_crc_ctl_fops
= {
3705 .owner
= THIS_MODULE
,
3706 .open
= display_crc_ctl_open
,
3708 .llseek
= seq_lseek
,
3709 .release
= single_release
,
3710 .write
= display_crc_ctl_write
3713 static void wm_latency_show(struct seq_file
*m
, const uint16_t wm
[8])
3715 struct drm_device
*dev
= m
->private;
3716 int num_levels
= ilk_wm_max_level(dev
) + 1;
3719 drm_modeset_lock_all(dev
);
3721 for (level
= 0; level
< num_levels
; level
++) {
3722 unsigned int latency
= wm
[level
];
3725 * - WM1+ latency values in 0.5us units
3726 * - latencies are in us on gen9
3728 if (INTEL_INFO(dev
)->gen
>= 9)
3733 seq_printf(m
, "WM%d %u (%u.%u usec)\n",
3734 level
, wm
[level
], latency
/ 10, latency
% 10);
3737 drm_modeset_unlock_all(dev
);
3740 static int pri_wm_latency_show(struct seq_file
*m
, void *data
)
3742 struct drm_device
*dev
= m
->private;
3743 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3744 const uint16_t *latencies
;
3746 if (INTEL_INFO(dev
)->gen
>= 9)
3747 latencies
= dev_priv
->wm
.skl_latency
;
3749 latencies
= to_i915(dev
)->wm
.pri_latency
;
3751 wm_latency_show(m
, latencies
);
3756 static int spr_wm_latency_show(struct seq_file
*m
, void *data
)
3758 struct drm_device
*dev
= m
->private;
3759 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3760 const uint16_t *latencies
;
3762 if (INTEL_INFO(dev
)->gen
>= 9)
3763 latencies
= dev_priv
->wm
.skl_latency
;
3765 latencies
= to_i915(dev
)->wm
.spr_latency
;
3767 wm_latency_show(m
, latencies
);
3772 static int cur_wm_latency_show(struct seq_file
*m
, void *data
)
3774 struct drm_device
*dev
= m
->private;
3775 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3776 const uint16_t *latencies
;
3778 if (INTEL_INFO(dev
)->gen
>= 9)
3779 latencies
= dev_priv
->wm
.skl_latency
;
3781 latencies
= to_i915(dev
)->wm
.cur_latency
;
3783 wm_latency_show(m
, latencies
);
3788 static int pri_wm_latency_open(struct inode
*inode
, struct file
*file
)
3790 struct drm_device
*dev
= inode
->i_private
;
3792 if (HAS_GMCH_DISPLAY(dev
))
3795 return single_open(file
, pri_wm_latency_show
, dev
);
3798 static int spr_wm_latency_open(struct inode
*inode
, struct file
*file
)
3800 struct drm_device
*dev
= inode
->i_private
;
3802 if (HAS_GMCH_DISPLAY(dev
))
3805 return single_open(file
, spr_wm_latency_show
, dev
);
3808 static int cur_wm_latency_open(struct inode
*inode
, struct file
*file
)
3810 struct drm_device
*dev
= inode
->i_private
;
3812 if (HAS_GMCH_DISPLAY(dev
))
3815 return single_open(file
, cur_wm_latency_show
, dev
);
3818 static ssize_t
wm_latency_write(struct file
*file
, const char __user
*ubuf
,
3819 size_t len
, loff_t
*offp
, uint16_t wm
[8])
3821 struct seq_file
*m
= file
->private_data
;
3822 struct drm_device
*dev
= m
->private;
3823 uint16_t new[8] = { 0 };
3824 int num_levels
= ilk_wm_max_level(dev
) + 1;
3829 if (len
>= sizeof(tmp
))
3832 if (copy_from_user(tmp
, ubuf
, len
))
3837 ret
= sscanf(tmp
, "%hu %hu %hu %hu %hu %hu %hu %hu",
3838 &new[0], &new[1], &new[2], &new[3],
3839 &new[4], &new[5], &new[6], &new[7]);
3840 if (ret
!= num_levels
)
3843 drm_modeset_lock_all(dev
);
3845 for (level
= 0; level
< num_levels
; level
++)
3846 wm
[level
] = new[level
];
3848 drm_modeset_unlock_all(dev
);
3854 static ssize_t
pri_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
3855 size_t len
, loff_t
*offp
)
3857 struct seq_file
*m
= file
->private_data
;
3858 struct drm_device
*dev
= m
->private;
3859 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3860 uint16_t *latencies
;
3862 if (INTEL_INFO(dev
)->gen
>= 9)
3863 latencies
= dev_priv
->wm
.skl_latency
;
3865 latencies
= to_i915(dev
)->wm
.pri_latency
;
3867 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
3870 static ssize_t
spr_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
3871 size_t len
, loff_t
*offp
)
3873 struct seq_file
*m
= file
->private_data
;
3874 struct drm_device
*dev
= m
->private;
3875 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3876 uint16_t *latencies
;
3878 if (INTEL_INFO(dev
)->gen
>= 9)
3879 latencies
= dev_priv
->wm
.skl_latency
;
3881 latencies
= to_i915(dev
)->wm
.spr_latency
;
3883 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
3886 static ssize_t
cur_wm_latency_write(struct file
*file
, const char __user
*ubuf
,
3887 size_t len
, loff_t
*offp
)
3889 struct seq_file
*m
= file
->private_data
;
3890 struct drm_device
*dev
= m
->private;
3891 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3892 uint16_t *latencies
;
3894 if (INTEL_INFO(dev
)->gen
>= 9)
3895 latencies
= dev_priv
->wm
.skl_latency
;
3897 latencies
= to_i915(dev
)->wm
.cur_latency
;
3899 return wm_latency_write(file
, ubuf
, len
, offp
, latencies
);
3902 static const struct file_operations i915_pri_wm_latency_fops
= {
3903 .owner
= THIS_MODULE
,
3904 .open
= pri_wm_latency_open
,
3906 .llseek
= seq_lseek
,
3907 .release
= single_release
,
3908 .write
= pri_wm_latency_write
3911 static const struct file_operations i915_spr_wm_latency_fops
= {
3912 .owner
= THIS_MODULE
,
3913 .open
= spr_wm_latency_open
,
3915 .llseek
= seq_lseek
,
3916 .release
= single_release
,
3917 .write
= spr_wm_latency_write
3920 static const struct file_operations i915_cur_wm_latency_fops
= {
3921 .owner
= THIS_MODULE
,
3922 .open
= cur_wm_latency_open
,
3924 .llseek
= seq_lseek
,
3925 .release
= single_release
,
3926 .write
= cur_wm_latency_write
3930 i915_wedged_get(void *data
, u64
*val
)
3932 struct drm_device
*dev
= data
;
3933 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3935 *val
= atomic_read(&dev_priv
->gpu_error
.reset_counter
);
3941 i915_wedged_set(void *data
, u64 val
)
3943 struct drm_device
*dev
= data
;
3944 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3946 intel_runtime_pm_get(dev_priv
);
3948 i915_handle_error(dev
, val
,
3949 "Manually setting wedged to %llu", val
);
3951 intel_runtime_pm_put(dev_priv
);
3956 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops
,
3957 i915_wedged_get
, i915_wedged_set
,
3961 i915_ring_stop_get(void *data
, u64
*val
)
3963 struct drm_device
*dev
= data
;
3964 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3966 *val
= dev_priv
->gpu_error
.stop_rings
;
3972 i915_ring_stop_set(void *data
, u64 val
)
3974 struct drm_device
*dev
= data
;
3975 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3978 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val
);
3980 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
3984 dev_priv
->gpu_error
.stop_rings
= val
;
3985 mutex_unlock(&dev
->struct_mutex
);
3990 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops
,
3991 i915_ring_stop_get
, i915_ring_stop_set
,
3995 i915_ring_missed_irq_get(void *data
, u64
*val
)
3997 struct drm_device
*dev
= data
;
3998 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4000 *val
= dev_priv
->gpu_error
.missed_irq_rings
;
4005 i915_ring_missed_irq_set(void *data
, u64 val
)
4007 struct drm_device
*dev
= data
;
4008 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4011 /* Lock against concurrent debugfs callers */
4012 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4015 dev_priv
->gpu_error
.missed_irq_rings
= val
;
4016 mutex_unlock(&dev
->struct_mutex
);
4021 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops
,
4022 i915_ring_missed_irq_get
, i915_ring_missed_irq_set
,
4026 i915_ring_test_irq_get(void *data
, u64
*val
)
4028 struct drm_device
*dev
= data
;
4029 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4031 *val
= dev_priv
->gpu_error
.test_irq_rings
;
4037 i915_ring_test_irq_set(void *data
, u64 val
)
4039 struct drm_device
*dev
= data
;
4040 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4043 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val
);
4045 /* Lock against concurrent debugfs callers */
4046 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4050 dev_priv
->gpu_error
.test_irq_rings
= val
;
4051 mutex_unlock(&dev
->struct_mutex
);
4056 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops
,
4057 i915_ring_test_irq_get
, i915_ring_test_irq_set
,
4060 #define DROP_UNBOUND 0x1
4061 #define DROP_BOUND 0x2
4062 #define DROP_RETIRE 0x4
4063 #define DROP_ACTIVE 0x8
4064 #define DROP_ALL (DROP_UNBOUND | \
4069 i915_drop_caches_get(void *data
, u64
*val
)
4077 i915_drop_caches_set(void *data
, u64 val
)
4079 struct drm_device
*dev
= data
;
4080 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4083 DRM_DEBUG("Dropping caches: 0x%08llx\n", val
);
4085 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4086 * on ioctls on -EAGAIN. */
4087 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4091 if (val
& DROP_ACTIVE
) {
4092 ret
= i915_gpu_idle(dev
);
4097 if (val
& (DROP_RETIRE
| DROP_ACTIVE
))
4098 i915_gem_retire_requests(dev
);
4100 if (val
& DROP_BOUND
)
4101 i915_gem_shrink(dev_priv
, LONG_MAX
, I915_SHRINK_BOUND
);
4103 if (val
& DROP_UNBOUND
)
4104 i915_gem_shrink(dev_priv
, LONG_MAX
, I915_SHRINK_UNBOUND
);
4107 mutex_unlock(&dev
->struct_mutex
);
4112 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops
,
4113 i915_drop_caches_get
, i915_drop_caches_set
,
4117 i915_max_freq_get(void *data
, u64
*val
)
4119 struct drm_device
*dev
= data
;
4120 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4123 if (INTEL_INFO(dev
)->gen
< 6)
4126 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
4128 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
4132 if (IS_VALLEYVIEW(dev
))
4133 *val
= vlv_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq_softlimit
);
4135 *val
= dev_priv
->rps
.max_freq_softlimit
* GT_FREQUENCY_MULTIPLIER
;
4136 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4142 i915_max_freq_set(void *data
, u64 val
)
4144 struct drm_device
*dev
= data
;
4145 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4146 u32 rp_state_cap
, hw_max
, hw_min
;
4149 if (INTEL_INFO(dev
)->gen
< 6)
4152 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
4154 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val
);
4156 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
4161 * Turbo will still be enabled, but won't go above the set value.
4163 if (IS_VALLEYVIEW(dev
)) {
4164 val
= vlv_freq_opcode(dev_priv
, val
);
4166 hw_max
= dev_priv
->rps
.max_freq
;
4167 hw_min
= dev_priv
->rps
.min_freq
;
4169 do_div(val
, GT_FREQUENCY_MULTIPLIER
);
4171 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
4172 hw_max
= dev_priv
->rps
.max_freq
;
4173 hw_min
= (rp_state_cap
>> 16) & 0xff;
4176 if (val
< hw_min
|| val
> hw_max
|| val
< dev_priv
->rps
.min_freq_softlimit
) {
4177 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4181 dev_priv
->rps
.max_freq_softlimit
= val
;
4183 if (IS_VALLEYVIEW(dev
))
4184 valleyview_set_rps(dev
, val
);
4186 gen6_set_rps(dev
, val
);
4188 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4193 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops
,
4194 i915_max_freq_get
, i915_max_freq_set
,
4198 i915_min_freq_get(void *data
, u64
*val
)
4200 struct drm_device
*dev
= data
;
4201 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4204 if (INTEL_INFO(dev
)->gen
< 6)
4207 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
4209 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
4213 if (IS_VALLEYVIEW(dev
))
4214 *val
= vlv_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq_softlimit
);
4216 *val
= dev_priv
->rps
.min_freq_softlimit
* GT_FREQUENCY_MULTIPLIER
;
4217 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4223 i915_min_freq_set(void *data
, u64 val
)
4225 struct drm_device
*dev
= data
;
4226 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4227 u32 rp_state_cap
, hw_max
, hw_min
;
4230 if (INTEL_INFO(dev
)->gen
< 6)
4233 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
4235 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val
);
4237 ret
= mutex_lock_interruptible(&dev_priv
->rps
.hw_lock
);
4242 * Turbo will still be enabled, but won't go below the set value.
4244 if (IS_VALLEYVIEW(dev
)) {
4245 val
= vlv_freq_opcode(dev_priv
, val
);
4247 hw_max
= dev_priv
->rps
.max_freq
;
4248 hw_min
= dev_priv
->rps
.min_freq
;
4250 do_div(val
, GT_FREQUENCY_MULTIPLIER
);
4252 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
4253 hw_max
= dev_priv
->rps
.max_freq
;
4254 hw_min
= (rp_state_cap
>> 16) & 0xff;
4257 if (val
< hw_min
|| val
> hw_max
|| val
> dev_priv
->rps
.max_freq_softlimit
) {
4258 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4262 dev_priv
->rps
.min_freq_softlimit
= val
;
4264 if (IS_VALLEYVIEW(dev
))
4265 valleyview_set_rps(dev
, val
);
4267 gen6_set_rps(dev
, val
);
4269 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4274 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops
,
4275 i915_min_freq_get
, i915_min_freq_set
,
4279 i915_cache_sharing_get(void *data
, u64
*val
)
4281 struct drm_device
*dev
= data
;
4282 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4286 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
4289 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
4292 intel_runtime_pm_get(dev_priv
);
4294 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
4296 intel_runtime_pm_put(dev_priv
);
4297 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
4299 *val
= (snpcr
& GEN6_MBC_SNPCR_MASK
) >> GEN6_MBC_SNPCR_SHIFT
;
4305 i915_cache_sharing_set(void *data
, u64 val
)
4307 struct drm_device
*dev
= data
;
4308 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4311 if (!(IS_GEN6(dev
) || IS_GEN7(dev
)))
4317 intel_runtime_pm_get(dev_priv
);
4318 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val
);
4320 /* Update the cache sharing policy here as well */
4321 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
4322 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
4323 snpcr
|= (val
<< GEN6_MBC_SNPCR_SHIFT
);
4324 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
4326 intel_runtime_pm_put(dev_priv
);
4330 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops
,
4331 i915_cache_sharing_get
, i915_cache_sharing_set
,
4334 static int i915_forcewake_open(struct inode
*inode
, struct file
*file
)
4336 struct drm_device
*dev
= inode
->i_private
;
4337 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4339 if (INTEL_INFO(dev
)->gen
< 6)
4342 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
4347 static int i915_forcewake_release(struct inode
*inode
, struct file
*file
)
4349 struct drm_device
*dev
= inode
->i_private
;
4350 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4352 if (INTEL_INFO(dev
)->gen
< 6)
4355 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
4360 static const struct file_operations i915_forcewake_fops
= {
4361 .owner
= THIS_MODULE
,
4362 .open
= i915_forcewake_open
,
4363 .release
= i915_forcewake_release
,
4366 static int i915_forcewake_create(struct dentry
*root
, struct drm_minor
*minor
)
4368 struct drm_device
*dev
= minor
->dev
;
4371 ent
= debugfs_create_file("i915_forcewake_user",
4374 &i915_forcewake_fops
);
4378 return drm_add_fake_info_node(minor
, ent
, &i915_forcewake_fops
);
4381 static int i915_debugfs_create(struct dentry
*root
,
4382 struct drm_minor
*minor
,
4384 const struct file_operations
*fops
)
4386 struct drm_device
*dev
= minor
->dev
;
4389 ent
= debugfs_create_file(name
,
4396 return drm_add_fake_info_node(minor
, ent
, fops
);
4399 static const struct drm_info_list i915_debugfs_list
[] = {
4400 {"i915_capabilities", i915_capabilities
, 0},
4401 {"i915_gem_objects", i915_gem_object_info
, 0},
4402 {"i915_gem_gtt", i915_gem_gtt_info
, 0},
4403 {"i915_gem_pinned", i915_gem_gtt_info
, 0, (void *) PINNED_LIST
},
4404 {"i915_gem_active", i915_gem_object_list_info
, 0, (void *) ACTIVE_LIST
},
4405 {"i915_gem_inactive", i915_gem_object_list_info
, 0, (void *) INACTIVE_LIST
},
4406 {"i915_gem_stolen", i915_gem_stolen_list_info
},
4407 {"i915_gem_pageflip", i915_gem_pageflip_info
, 0},
4408 {"i915_gem_request", i915_gem_request_info
, 0},
4409 {"i915_gem_seqno", i915_gem_seqno_info
, 0},
4410 {"i915_gem_fence_regs", i915_gem_fence_regs_info
, 0},
4411 {"i915_gem_interrupt", i915_interrupt_info
, 0},
4412 {"i915_gem_hws", i915_hws_info
, 0, (void *)RCS
},
4413 {"i915_gem_hws_blt", i915_hws_info
, 0, (void *)BCS
},
4414 {"i915_gem_hws_bsd", i915_hws_info
, 0, (void *)VCS
},
4415 {"i915_gem_hws_vebox", i915_hws_info
, 0, (void *)VECS
},
4416 {"i915_gem_batch_pool", i915_gem_batch_pool_info
, 0},
4417 {"i915_frequency_info", i915_frequency_info
, 0},
4418 {"i915_drpc_info", i915_drpc_info
, 0},
4419 {"i915_emon_status", i915_emon_status
, 0},
4420 {"i915_ring_freq_table", i915_ring_freq_table
, 0},
4421 {"i915_fbc_status", i915_fbc_status
, 0},
4422 {"i915_ips_status", i915_ips_status
, 0},
4423 {"i915_sr_status", i915_sr_status
, 0},
4424 {"i915_opregion", i915_opregion
, 0},
4425 {"i915_gem_framebuffer", i915_gem_framebuffer_info
, 0},
4426 {"i915_context_status", i915_context_status
, 0},
4427 {"i915_dump_lrc", i915_dump_lrc
, 0},
4428 {"i915_execlists", i915_execlists
, 0},
4429 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info
, 0},
4430 {"i915_swizzle_info", i915_swizzle_info
, 0},
4431 {"i915_ppgtt_info", i915_ppgtt_info
, 0},
4432 {"i915_llc", i915_llc
, 0},
4433 {"i915_edp_psr_status", i915_edp_psr_status
, 0},
4434 {"i915_sink_crc_eDP1", i915_sink_crc
, 0},
4435 {"i915_energy_uJ", i915_energy_uJ
, 0},
4436 {"i915_pc8_status", i915_pc8_status
, 0},
4437 {"i915_power_domain_info", i915_power_domain_info
, 0},
4438 {"i915_display_info", i915_display_info
, 0},
4439 {"i915_semaphore_status", i915_semaphore_status
, 0},
4440 {"i915_shared_dplls_info", i915_shared_dplls_info
, 0},
4441 {"i915_dp_mst_info", i915_dp_mst_info
, 0},
4442 {"i915_wa_registers", i915_wa_registers
, 0},
4443 {"i915_ddb_info", i915_ddb_info
, 0},
4445 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4447 static const struct i915_debugfs_files
{
4449 const struct file_operations
*fops
;
4450 } i915_debugfs_files
[] = {
4451 {"i915_wedged", &i915_wedged_fops
},
4452 {"i915_max_freq", &i915_max_freq_fops
},
4453 {"i915_min_freq", &i915_min_freq_fops
},
4454 {"i915_cache_sharing", &i915_cache_sharing_fops
},
4455 {"i915_ring_stop", &i915_ring_stop_fops
},
4456 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops
},
4457 {"i915_ring_test_irq", &i915_ring_test_irq_fops
},
4458 {"i915_gem_drop_caches", &i915_drop_caches_fops
},
4459 {"i915_error_state", &i915_error_state_fops
},
4460 {"i915_next_seqno", &i915_next_seqno_fops
},
4461 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops
},
4462 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops
},
4463 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops
},
4464 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops
},
4465 {"i915_fbc_false_color", &i915_fbc_fc_fops
},
4468 void intel_display_crc_init(struct drm_device
*dev
)
4470 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4473 for_each_pipe(dev_priv
, pipe
) {
4474 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
4476 pipe_crc
->opened
= false;
4477 spin_lock_init(&pipe_crc
->lock
);
4478 init_waitqueue_head(&pipe_crc
->wq
);
4482 int i915_debugfs_init(struct drm_minor
*minor
)
4486 ret
= i915_forcewake_create(minor
->debugfs_root
, minor
);
4490 for (i
= 0; i
< ARRAY_SIZE(i915_pipe_crc_data
); i
++) {
4491 ret
= i915_pipe_crc_create(minor
->debugfs_root
, minor
, i
);
4496 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
4497 ret
= i915_debugfs_create(minor
->debugfs_root
, minor
,
4498 i915_debugfs_files
[i
].name
,
4499 i915_debugfs_files
[i
].fops
);
4504 return drm_debugfs_create_files(i915_debugfs_list
,
4505 I915_DEBUGFS_ENTRIES
,
4506 minor
->debugfs_root
, minor
);
4509 void i915_debugfs_cleanup(struct drm_minor
*minor
)
4513 drm_debugfs_remove_files(i915_debugfs_list
,
4514 I915_DEBUGFS_ENTRIES
, minor
);
4516 drm_debugfs_remove_files((struct drm_info_list
*) &i915_forcewake_fops
,
4519 for (i
= 0; i
< ARRAY_SIZE(i915_pipe_crc_data
); i
++) {
4520 struct drm_info_list
*info_list
=
4521 (struct drm_info_list
*)&i915_pipe_crc_data
[i
];
4523 drm_debugfs_remove_files(info_list
, 1, minor
);
4526 for (i
= 0; i
< ARRAY_SIZE(i915_debugfs_files
); i
++) {
4527 struct drm_info_list
*info_list
=
4528 (struct drm_info_list
*) i915_debugfs_files
[i
].fops
;
4530 drm_debugfs_remove_files(info_list
, 1, minor
);