2 * Copyright (c) 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
30 #include <generated/utsrelease.h>
33 static const char *yesno(int v
)
35 return v
? "yes" : "no";
38 static const char *ring_str(int ring
)
41 case RCS
: return "render";
42 case VCS
: return "bsd";
43 case BCS
: return "blt";
44 case VECS
: return "vebox";
49 static const char *pin_flag(int pinned
)
59 static const char *tiling_flag(int tiling
)
63 case I915_TILING_NONE
: return "";
64 case I915_TILING_X
: return " X";
65 case I915_TILING_Y
: return " Y";
69 static const char *dirty_flag(int dirty
)
71 return dirty
? " dirty" : "";
74 static const char *purgeable_flag(int purgeable
)
76 return purgeable
? " purgeable" : "";
79 static bool __i915_error_ok(struct drm_i915_error_state_buf
*e
)
82 if (!e
->err
&& WARN(e
->bytes
> (e
->size
- 1), "overflow")) {
87 if (e
->bytes
== e
->size
- 1 || e
->err
)
93 static bool __i915_error_seek(struct drm_i915_error_state_buf
*e
,
96 if (e
->pos
+ len
<= e
->start
) {
101 /* First vsnprintf needs to fit in its entirety for memmove */
102 if (len
>= e
->size
) {
110 static void __i915_error_advance(struct drm_i915_error_state_buf
*e
,
113 /* If this is first printf in this window, adjust it so that
114 * start position matches start of the buffer
117 if (e
->pos
< e
->start
) {
118 const size_t off
= e
->start
- e
->pos
;
120 /* Should not happen but be paranoid */
121 if (off
> len
|| e
->bytes
) {
126 memmove(e
->buf
, e
->buf
+ off
, len
- off
);
127 e
->bytes
= len
- off
;
136 static void i915_error_vprintf(struct drm_i915_error_state_buf
*e
,
137 const char *f
, va_list args
)
141 if (!__i915_error_ok(e
))
144 /* Seek the first printf which is hits start position */
145 if (e
->pos
< e
->start
) {
149 len
= vsnprintf(NULL
, 0, f
, tmp
);
152 if (!__i915_error_seek(e
, len
))
156 len
= vsnprintf(e
->buf
+ e
->bytes
, e
->size
- e
->bytes
, f
, args
);
157 if (len
>= e
->size
- e
->bytes
)
158 len
= e
->size
- e
->bytes
- 1;
160 __i915_error_advance(e
, len
);
163 static void i915_error_puts(struct drm_i915_error_state_buf
*e
,
168 if (!__i915_error_ok(e
))
173 /* Seek the first printf which is hits start position */
174 if (e
->pos
< e
->start
) {
175 if (!__i915_error_seek(e
, len
))
179 if (len
>= e
->size
- e
->bytes
)
180 len
= e
->size
- e
->bytes
- 1;
181 memcpy(e
->buf
+ e
->bytes
, str
, len
);
183 __i915_error_advance(e
, len
);
186 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
187 #define err_puts(e, s) i915_error_puts(e, s)
189 static void print_error_buffers(struct drm_i915_error_state_buf
*m
,
191 struct drm_i915_error_buffer
*err
,
194 err_printf(m
, "%s [%d]:\n", name
, count
);
197 err_printf(m
, " %08x %8u %02x %02x %x %x",
202 err
->rseqno
, err
->wseqno
);
203 err_puts(m
, pin_flag(err
->pinned
));
204 err_puts(m
, tiling_flag(err
->tiling
));
205 err_puts(m
, dirty_flag(err
->dirty
));
206 err_puts(m
, purgeable_flag(err
->purgeable
));
207 err_puts(m
, err
->ring
!= -1 ? " " : "");
208 err_puts(m
, ring_str(err
->ring
));
209 err_puts(m
, i915_cache_level_str(err
->cache_level
));
212 err_printf(m
, " (name: %d)", err
->name
);
213 if (err
->fence_reg
!= I915_FENCE_REG_NONE
)
214 err_printf(m
, " (fence: %d)", err
->fence_reg
);
221 static void i915_ring_error_state(struct drm_i915_error_state_buf
*m
,
222 struct drm_device
*dev
,
223 struct drm_i915_error_state
*error
,
226 BUG_ON(ring
>= I915_NUM_RINGS
); /* shut up confused gcc */
227 if (!error
->ring
[ring
].valid
)
230 err_printf(m
, "%s command stream:\n", ring_str(ring
));
231 err_printf(m
, " HEAD: 0x%08x\n", error
->head
[ring
]);
232 err_printf(m
, " TAIL: 0x%08x\n", error
->tail
[ring
]);
233 err_printf(m
, " CTL: 0x%08x\n", error
->ctl
[ring
]);
234 err_printf(m
, " ACTHD: 0x%08x\n", error
->acthd
[ring
]);
235 err_printf(m
, " IPEIR: 0x%08x\n", error
->ipeir
[ring
]);
236 err_printf(m
, " IPEHR: 0x%08x\n", error
->ipehr
[ring
]);
237 err_printf(m
, " INSTDONE: 0x%08x\n", error
->instdone
[ring
]);
238 if (ring
== RCS
&& INTEL_INFO(dev
)->gen
>= 4)
239 err_printf(m
, " BBADDR: 0x%08llx\n", error
->bbaddr
);
241 if (INTEL_INFO(dev
)->gen
>= 4)
242 err_printf(m
, " INSTPS: 0x%08x\n", error
->instps
[ring
]);
243 err_printf(m
, " INSTPM: 0x%08x\n", error
->instpm
[ring
]);
244 err_printf(m
, " FADDR: 0x%08x\n", error
->faddr
[ring
]);
245 if (INTEL_INFO(dev
)->gen
>= 6) {
246 err_printf(m
, " RC PSMI: 0x%08x\n", error
->rc_psmi
[ring
]);
247 err_printf(m
, " FAULT_REG: 0x%08x\n", error
->fault_reg
[ring
]);
248 err_printf(m
, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
249 error
->semaphore_mboxes
[ring
][0],
250 error
->semaphore_seqno
[ring
][0]);
251 err_printf(m
, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
252 error
->semaphore_mboxes
[ring
][1],
253 error
->semaphore_seqno
[ring
][1]);
254 if (HAS_VEBOX(dev
)) {
255 err_printf(m
, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
256 error
->semaphore_mboxes
[ring
][2],
257 error
->semaphore_seqno
[ring
][2]);
260 err_printf(m
, " seqno: 0x%08x\n", error
->seqno
[ring
]);
261 err_printf(m
, " waiting: %s\n", yesno(error
->waiting
[ring
]));
262 err_printf(m
, " ring->head: 0x%08x\n", error
->cpu_ring_head
[ring
]);
263 err_printf(m
, " ring->tail: 0x%08x\n", error
->cpu_ring_tail
[ring
]);
266 void i915_error_printf(struct drm_i915_error_state_buf
*e
, const char *f
, ...)
271 i915_error_vprintf(e
, f
, args
);
275 int i915_error_state_to_str(struct drm_i915_error_state_buf
*m
,
276 const struct i915_error_state_file_priv
*error_priv
)
278 struct drm_device
*dev
= error_priv
->dev
;
279 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
280 struct drm_i915_error_state
*error
= error_priv
->error
;
281 int i
, j
, page
, offset
, elt
;
284 err_printf(m
, "no error state collected\n");
288 err_printf(m
, "Time: %ld s %ld us\n", error
->time
.tv_sec
,
289 error
->time
.tv_usec
);
290 err_printf(m
, "Kernel: " UTS_RELEASE
"\n");
291 err_printf(m
, "PCI ID: 0x%04x\n", dev
->pci_device
);
292 err_printf(m
, "EIR: 0x%08x\n", error
->eir
);
293 err_printf(m
, "IER: 0x%08x\n", error
->ier
);
294 err_printf(m
, "PGTBL_ER: 0x%08x\n", error
->pgtbl_er
);
295 err_printf(m
, "FORCEWAKE: 0x%08x\n", error
->forcewake
);
296 err_printf(m
, "DERRMR: 0x%08x\n", error
->derrmr
);
297 err_printf(m
, "CCID: 0x%08x\n", error
->ccid
);
299 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
300 err_printf(m
, " fence[%d] = %08llx\n", i
, error
->fence
[i
]);
302 for (i
= 0; i
< ARRAY_SIZE(error
->extra_instdone
); i
++)
303 err_printf(m
, " INSTDONE_%d: 0x%08x\n", i
,
304 error
->extra_instdone
[i
]);
306 if (INTEL_INFO(dev
)->gen
>= 6) {
307 err_printf(m
, "ERROR: 0x%08x\n", error
->error
);
308 err_printf(m
, "DONE_REG: 0x%08x\n", error
->done_reg
);
311 if (INTEL_INFO(dev
)->gen
== 7)
312 err_printf(m
, "ERR_INT: 0x%08x\n", error
->err_int
);
314 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++)
315 i915_ring_error_state(m
, dev
, error
, i
);
317 if (error
->active_bo
)
318 print_error_buffers(m
, "Active",
320 error
->active_bo_count
[0]);
322 if (error
->pinned_bo
)
323 print_error_buffers(m
, "Pinned",
325 error
->pinned_bo_count
[0]);
327 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
328 struct drm_i915_error_object
*obj
;
330 if ((obj
= error
->ring
[i
].batchbuffer
)) {
331 err_printf(m
, "%s --- gtt_offset = 0x%08x\n",
332 dev_priv
->ring
[i
].name
,
335 for (page
= 0; page
< obj
->page_count
; page
++) {
336 for (elt
= 0; elt
< PAGE_SIZE
/4; elt
++) {
337 err_printf(m
, "%08x : %08x\n", offset
,
338 obj
->pages
[page
][elt
]);
344 if (error
->ring
[i
].num_requests
) {
345 err_printf(m
, "%s --- %d requests\n",
346 dev_priv
->ring
[i
].name
,
347 error
->ring
[i
].num_requests
);
348 for (j
= 0; j
< error
->ring
[i
].num_requests
; j
++) {
349 err_printf(m
, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
350 error
->ring
[i
].requests
[j
].seqno
,
351 error
->ring
[i
].requests
[j
].jiffies
,
352 error
->ring
[i
].requests
[j
].tail
);
356 if ((obj
= error
->ring
[i
].ringbuffer
)) {
357 err_printf(m
, "%s --- ringbuffer = 0x%08x\n",
358 dev_priv
->ring
[i
].name
,
361 for (page
= 0; page
< obj
->page_count
; page
++) {
362 for (elt
= 0; elt
< PAGE_SIZE
/4; elt
++) {
363 err_printf(m
, "%08x : %08x\n",
365 obj
->pages
[page
][elt
]);
371 if ((obj
= error
->ring
[i
].ctx
)) {
372 err_printf(m
, "%s --- HW Context = 0x%08x\n",
373 dev_priv
->ring
[i
].name
,
376 for (elt
= 0; elt
< PAGE_SIZE
/16; elt
+= 4) {
377 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
380 obj
->pages
[0][elt
+1],
381 obj
->pages
[0][elt
+2],
382 obj
->pages
[0][elt
+3]);
389 intel_overlay_print_error_state(m
, error
->overlay
);
392 intel_display_print_error_state(m
, dev
, error
->display
);
395 if (m
->bytes
== 0 && m
->err
)
401 int i915_error_state_buf_init(struct drm_i915_error_state_buf
*ebuf
,
402 size_t count
, loff_t pos
)
404 memset(ebuf
, 0, sizeof(*ebuf
));
406 /* We need to have enough room to store any i915_error_state printf
407 * so that we can move it to start position.
409 ebuf
->size
= count
+ 1 > PAGE_SIZE
? count
+ 1 : PAGE_SIZE
;
410 ebuf
->buf
= kmalloc(ebuf
->size
,
411 GFP_TEMPORARY
| __GFP_NORETRY
| __GFP_NOWARN
);
413 if (ebuf
->buf
== NULL
) {
414 ebuf
->size
= PAGE_SIZE
;
415 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
418 if (ebuf
->buf
== NULL
) {
420 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
423 if (ebuf
->buf
== NULL
)
431 static void i915_error_object_free(struct drm_i915_error_object
*obj
)
438 for (page
= 0; page
< obj
->page_count
; page
++)
439 kfree(obj
->pages
[page
]);
444 static void i915_error_state_free(struct kref
*error_ref
)
446 struct drm_i915_error_state
*error
= container_of(error_ref
,
447 typeof(*error
), ref
);
450 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
451 i915_error_object_free(error
->ring
[i
].batchbuffer
);
452 i915_error_object_free(error
->ring
[i
].ringbuffer
);
453 i915_error_object_free(error
->ring
[i
].ctx
);
454 kfree(error
->ring
[i
].requests
);
457 kfree(error
->active_bo
);
458 kfree(error
->overlay
);
459 kfree(error
->display
);
463 static struct drm_i915_error_object
*
464 i915_error_object_create_sized(struct drm_i915_private
*dev_priv
,
465 struct drm_i915_gem_object
*src
,
468 struct drm_i915_error_object
*dst
;
472 if (src
== NULL
|| src
->pages
== NULL
)
475 dst
= kmalloc(sizeof(*dst
) + num_pages
* sizeof(u32
*), GFP_ATOMIC
);
479 reloc_offset
= dst
->gtt_offset
= i915_gem_obj_ggtt_offset(src
);
480 for (i
= 0; i
< num_pages
; i
++) {
484 d
= kmalloc(PAGE_SIZE
, GFP_ATOMIC
);
488 local_irq_save(flags
);
489 if (reloc_offset
< dev_priv
->gtt
.mappable_end
&&
490 src
->has_global_gtt_mapping
) {
493 /* Simply ignore tiling or any overlapping fence.
494 * It's part of the error state, and this hopefully
495 * captures what the GPU read.
498 s
= io_mapping_map_atomic_wc(dev_priv
->gtt
.mappable
,
500 memcpy_fromio(d
, s
, PAGE_SIZE
);
501 io_mapping_unmap_atomic(s
);
502 } else if (src
->stolen
) {
503 unsigned long offset
;
505 offset
= dev_priv
->mm
.stolen_base
;
506 offset
+= src
->stolen
->start
;
507 offset
+= i
<< PAGE_SHIFT
;
509 memcpy_fromio(d
, (void __iomem
*) offset
, PAGE_SIZE
);
514 page
= i915_gem_object_get_page(src
, i
);
516 drm_clflush_pages(&page
, 1);
518 s
= kmap_atomic(page
);
519 memcpy(d
, s
, PAGE_SIZE
);
522 drm_clflush_pages(&page
, 1);
524 local_irq_restore(flags
);
528 reloc_offset
+= PAGE_SIZE
;
530 dst
->page_count
= num_pages
;
536 kfree(dst
->pages
[i
]);
540 #define i915_error_object_create(dev_priv, src) \
541 i915_error_object_create_sized((dev_priv), (src), \
542 (src)->base.size>>PAGE_SHIFT)
544 static void capture_bo(struct drm_i915_error_buffer
*err
,
545 struct drm_i915_gem_object
*obj
)
547 err
->size
= obj
->base
.size
;
548 err
->name
= obj
->base
.name
;
549 err
->rseqno
= obj
->last_read_seqno
;
550 err
->wseqno
= obj
->last_write_seqno
;
551 err
->gtt_offset
= i915_gem_obj_ggtt_offset(obj
);
552 err
->read_domains
= obj
->base
.read_domains
;
553 err
->write_domain
= obj
->base
.write_domain
;
554 err
->fence_reg
= obj
->fence_reg
;
556 if (obj
->pin_count
> 0)
558 if (obj
->user_pin_count
> 0)
560 err
->tiling
= obj
->tiling_mode
;
561 err
->dirty
= obj
->dirty
;
562 err
->purgeable
= obj
->madv
!= I915_MADV_WILLNEED
;
563 err
->ring
= obj
->ring
? obj
->ring
->id
: -1;
564 err
->cache_level
= obj
->cache_level
;
567 static u32
capture_active_bo(struct drm_i915_error_buffer
*err
,
568 int count
, struct list_head
*head
)
570 struct i915_vma
*vma
;
573 list_for_each_entry(vma
, head
, mm_list
) {
574 capture_bo(err
++, vma
->obj
);
582 static u32
capture_pinned_bo(struct drm_i915_error_buffer
*err
,
583 int count
, struct list_head
*head
)
585 struct drm_i915_gem_object
*obj
;
588 list_for_each_entry(obj
, head
, global_list
) {
589 if (obj
->pin_count
== 0)
592 capture_bo(err
++, obj
);
600 static void i915_gem_record_fences(struct drm_device
*dev
,
601 struct drm_i915_error_state
*error
)
603 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
607 switch (INTEL_INFO(dev
)->gen
) {
610 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
611 error
->fence
[i
] = I915_READ64(FENCE_REG_SANDYBRIDGE_0
+ (i
* 8));
615 for (i
= 0; i
< 16; i
++)
616 error
->fence
[i
] = I915_READ64(FENCE_REG_965_0
+ (i
* 8));
619 if (IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
620 for (i
= 0; i
< 8; i
++)
621 error
->fence
[i
+8] = I915_READ(FENCE_REG_945_8
+ (i
* 4));
623 for (i
= 0; i
< 8; i
++)
624 error
->fence
[i
] = I915_READ(FENCE_REG_830_0
+ (i
* 4));
632 static struct drm_i915_error_object
*
633 i915_error_first_batchbuffer(struct drm_i915_private
*dev_priv
,
634 struct intel_ring_buffer
*ring
)
636 struct i915_address_space
*vm
;
637 struct i915_vma
*vma
;
638 struct drm_i915_gem_object
*obj
;
641 if (!ring
->get_seqno
)
644 if (HAS_BROKEN_CS_TLB(dev_priv
->dev
)) {
645 u32 acthd
= I915_READ(ACTHD
);
647 if (WARN_ON(ring
->id
!= RCS
))
650 obj
= ring
->scratch
.obj
;
652 acthd
>= i915_gem_obj_ggtt_offset(obj
) &&
653 acthd
< i915_gem_obj_ggtt_offset(obj
) + obj
->base
.size
)
654 return i915_error_object_create(dev_priv
, obj
);
657 seqno
= ring
->get_seqno(ring
, false);
658 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
) {
659 list_for_each_entry(vma
, &vm
->active_list
, mm_list
) {
661 if (obj
->ring
!= ring
)
664 if (i915_seqno_passed(seqno
, obj
->last_read_seqno
))
667 if ((obj
->base
.read_domains
& I915_GEM_DOMAIN_COMMAND
) == 0)
670 /* We need to copy these to an anonymous buffer as the simplest
671 * method to avoid being overwritten by userspace.
673 return i915_error_object_create(dev_priv
, obj
);
680 static void i915_record_ring_state(struct drm_device
*dev
,
681 struct drm_i915_error_state
*error
,
682 struct intel_ring_buffer
*ring
)
684 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
686 if (INTEL_INFO(dev
)->gen
>= 6) {
687 error
->rc_psmi
[ring
->id
] = I915_READ(ring
->mmio_base
+ 0x50);
688 error
->fault_reg
[ring
->id
] = I915_READ(RING_FAULT_REG(ring
));
689 error
->semaphore_mboxes
[ring
->id
][0]
690 = I915_READ(RING_SYNC_0(ring
->mmio_base
));
691 error
->semaphore_mboxes
[ring
->id
][1]
692 = I915_READ(RING_SYNC_1(ring
->mmio_base
));
693 error
->semaphore_seqno
[ring
->id
][0] = ring
->sync_seqno
[0];
694 error
->semaphore_seqno
[ring
->id
][1] = ring
->sync_seqno
[1];
697 if (HAS_VEBOX(dev
)) {
698 error
->semaphore_mboxes
[ring
->id
][2] =
699 I915_READ(RING_SYNC_2(ring
->mmio_base
));
700 error
->semaphore_seqno
[ring
->id
][2] = ring
->sync_seqno
[2];
703 if (INTEL_INFO(dev
)->gen
>= 4) {
704 error
->faddr
[ring
->id
] = I915_READ(RING_DMA_FADD(ring
->mmio_base
));
705 error
->ipeir
[ring
->id
] = I915_READ(RING_IPEIR(ring
->mmio_base
));
706 error
->ipehr
[ring
->id
] = I915_READ(RING_IPEHR(ring
->mmio_base
));
707 error
->instdone
[ring
->id
] = I915_READ(RING_INSTDONE(ring
->mmio_base
));
708 error
->instps
[ring
->id
] = I915_READ(RING_INSTPS(ring
->mmio_base
));
710 error
->bbaddr
= I915_READ64(BB_ADDR
);
712 error
->faddr
[ring
->id
] = I915_READ(DMA_FADD_I8XX
);
713 error
->ipeir
[ring
->id
] = I915_READ(IPEIR
);
714 error
->ipehr
[ring
->id
] = I915_READ(IPEHR
);
715 error
->instdone
[ring
->id
] = I915_READ(INSTDONE
);
718 error
->waiting
[ring
->id
] = waitqueue_active(&ring
->irq_queue
);
719 error
->instpm
[ring
->id
] = I915_READ(RING_INSTPM(ring
->mmio_base
));
720 error
->seqno
[ring
->id
] = ring
->get_seqno(ring
, false);
721 error
->acthd
[ring
->id
] = intel_ring_get_active_head(ring
);
722 error
->head
[ring
->id
] = I915_READ_HEAD(ring
);
723 error
->tail
[ring
->id
] = I915_READ_TAIL(ring
);
724 error
->ctl
[ring
->id
] = I915_READ_CTL(ring
);
726 error
->cpu_ring_head
[ring
->id
] = ring
->head
;
727 error
->cpu_ring_tail
[ring
->id
] = ring
->tail
;
731 static void i915_gem_record_active_context(struct intel_ring_buffer
*ring
,
732 struct drm_i915_error_state
*error
,
733 struct drm_i915_error_ring
*ering
)
735 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
736 struct drm_i915_gem_object
*obj
;
738 /* Currently render ring is the only HW context user */
739 if (ring
->id
!= RCS
|| !error
->ccid
)
742 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
743 if ((error
->ccid
& PAGE_MASK
) == i915_gem_obj_ggtt_offset(obj
)) {
744 ering
->ctx
= i915_error_object_create_sized(dev_priv
,
751 static void i915_gem_record_rings(struct drm_device
*dev
,
752 struct drm_i915_error_state
*error
)
754 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
755 struct drm_i915_gem_request
*request
;
758 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
759 struct intel_ring_buffer
*ring
= &dev_priv
->ring
[i
];
761 if (ring
->dev
== NULL
)
764 error
->ring
[i
].valid
= true;
766 i915_record_ring_state(dev
, error
, ring
);
768 error
->ring
[i
].batchbuffer
=
769 i915_error_first_batchbuffer(dev_priv
, ring
);
771 error
->ring
[i
].ringbuffer
=
772 i915_error_object_create(dev_priv
, ring
->obj
);
775 i915_gem_record_active_context(ring
, error
, &error
->ring
[i
]);
778 list_for_each_entry(request
, &ring
->request_list
, list
)
781 error
->ring
[i
].num_requests
= count
;
782 error
->ring
[i
].requests
=
783 kmalloc(count
*sizeof(struct drm_i915_error_request
),
785 if (error
->ring
[i
].requests
== NULL
) {
786 error
->ring
[i
].num_requests
= 0;
791 list_for_each_entry(request
, &ring
->request_list
, list
) {
792 struct drm_i915_error_request
*erq
;
794 erq
= &error
->ring
[i
].requests
[count
++];
795 erq
->seqno
= request
->seqno
;
796 erq
->jiffies
= request
->emitted_jiffies
;
797 erq
->tail
= request
->tail
;
802 /* FIXME: Since pin count/bound list is global, we duplicate what we capture per
805 static void i915_gem_capture_vm(struct drm_i915_private
*dev_priv
,
806 struct drm_i915_error_state
*error
,
807 struct i915_address_space
*vm
,
810 struct drm_i915_error_buffer
*active_bo
= NULL
, *pinned_bo
= NULL
;
811 struct drm_i915_gem_object
*obj
;
812 struct i915_vma
*vma
;
816 list_for_each_entry(vma
, &vm
->active_list
, mm_list
)
818 error
->active_bo_count
[ndx
] = i
;
819 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
)
822 error
->pinned_bo_count
[ndx
] = i
- error
->active_bo_count
[ndx
];
825 active_bo
= kmalloc(sizeof(*active_bo
)*i
, GFP_ATOMIC
);
827 pinned_bo
= active_bo
+ error
->active_bo_count
[ndx
];
831 error
->active_bo_count
[ndx
] =
832 capture_active_bo(active_bo
,
833 error
->active_bo_count
[ndx
],
837 error
->pinned_bo_count
[ndx
] =
838 capture_pinned_bo(pinned_bo
,
839 error
->pinned_bo_count
[ndx
],
840 &dev_priv
->mm
.bound_list
);
841 error
->active_bo
[ndx
] = active_bo
;
842 error
->pinned_bo
[ndx
] = pinned_bo
;
845 static void i915_gem_capture_buffers(struct drm_i915_private
*dev_priv
,
846 struct drm_i915_error_state
*error
)
848 struct i915_address_space
*vm
;
851 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
)
854 if (WARN(cnt
> 1, "Multiple VMs not yet supported\n"))
857 vm
= &dev_priv
->gtt
.base
;
859 error
->active_bo
= kcalloc(cnt
, sizeof(*error
->active_bo
), GFP_ATOMIC
);
860 error
->pinned_bo
= kcalloc(cnt
, sizeof(*error
->pinned_bo
), GFP_ATOMIC
);
861 error
->active_bo_count
= kcalloc(cnt
, sizeof(*error
->active_bo_count
),
863 error
->pinned_bo_count
= kcalloc(cnt
, sizeof(*error
->pinned_bo_count
),
866 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
)
867 i915_gem_capture_vm(dev_priv
, error
, vm
, i
++);
871 * i915_capture_error_state - capture an error record for later analysis
874 * Should be called when an error is detected (either a hang or an error
875 * interrupt) to capture error state from the time of the error. Fills
876 * out a structure which becomes available in debugfs for user level tools
879 void i915_capture_error_state(struct drm_device
*dev
)
881 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
882 struct drm_i915_error_state
*error
;
886 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
887 error
= dev_priv
->gpu_error
.first_error
;
888 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
892 /* Account for pipe specific data like PIPE*STAT */
893 error
= kzalloc(sizeof(*error
), GFP_ATOMIC
);
895 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
899 DRM_INFO("capturing error event; look for more information in "
900 "/sys/class/drm/card%d/error\n", dev
->primary
->index
);
902 kref_init(&error
->ref
);
903 error
->eir
= I915_READ(EIR
);
904 error
->pgtbl_er
= I915_READ(PGTBL_ER
);
905 if (HAS_HW_CONTEXTS(dev
))
906 error
->ccid
= I915_READ(CCID
);
908 if (HAS_PCH_SPLIT(dev
))
909 error
->ier
= I915_READ(DEIER
) | I915_READ(GTIER
);
910 else if (IS_VALLEYVIEW(dev
))
911 error
->ier
= I915_READ(GTIER
) | I915_READ(VLV_IER
);
912 else if (IS_GEN2(dev
))
913 error
->ier
= I915_READ16(IER
);
915 error
->ier
= I915_READ(IER
);
917 if (INTEL_INFO(dev
)->gen
>= 6)
918 error
->derrmr
= I915_READ(DERRMR
);
920 if (IS_VALLEYVIEW(dev
))
921 error
->forcewake
= I915_READ(FORCEWAKE_VLV
);
922 else if (INTEL_INFO(dev
)->gen
>= 7)
923 error
->forcewake
= I915_READ(FORCEWAKE_MT
);
924 else if (INTEL_INFO(dev
)->gen
== 6)
925 error
->forcewake
= I915_READ(FORCEWAKE
);
927 if (!HAS_PCH_SPLIT(dev
))
929 error
->pipestat
[pipe
] = I915_READ(PIPESTAT(pipe
));
931 if (INTEL_INFO(dev
)->gen
>= 6) {
932 error
->error
= I915_READ(ERROR_GEN6
);
933 error
->done_reg
= I915_READ(DONE_REG
);
936 if (INTEL_INFO(dev
)->gen
== 7)
937 error
->err_int
= I915_READ(GEN7_ERR_INT
);
939 i915_get_extra_instdone(dev
, error
->extra_instdone
);
941 i915_gem_capture_buffers(dev_priv
, error
);
942 i915_gem_record_fences(dev
, error
);
943 i915_gem_record_rings(dev
, error
);
945 do_gettimeofday(&error
->time
);
947 error
->overlay
= intel_overlay_capture_error_state(dev
);
948 error
->display
= intel_display_capture_error_state(dev
);
950 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
951 if (dev_priv
->gpu_error
.first_error
== NULL
) {
952 dev_priv
->gpu_error
.first_error
= error
;
955 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
958 i915_error_state_free(&error
->ref
);
961 void i915_error_state_get(struct drm_device
*dev
,
962 struct i915_error_state_file_priv
*error_priv
)
964 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
967 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
968 error_priv
->error
= dev_priv
->gpu_error
.first_error
;
969 if (error_priv
->error
)
970 kref_get(&error_priv
->error
->ref
);
971 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
975 void i915_error_state_put(struct i915_error_state_file_priv
*error_priv
)
977 if (error_priv
->error
)
978 kref_put(&error_priv
->error
->ref
, i915_error_state_free
);
981 void i915_destroy_error_state(struct drm_device
*dev
)
983 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
984 struct drm_i915_error_state
*error
;
987 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
988 error
= dev_priv
->gpu_error
.first_error
;
989 dev_priv
->gpu_error
.first_error
= NULL
;
990 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
993 kref_put(&error
->ref
, i915_error_state_free
);
996 const char *i915_cache_level_str(int type
)
999 case I915_CACHE_NONE
: return " uncached";
1000 case I915_CACHE_LLC
: return " snooped or LLC";
1001 case I915_CACHE_L3_LLC
: return " L3+LLC";
1006 /* NB: please notice the memset */
1007 void i915_get_extra_instdone(struct drm_device
*dev
, uint32_t *instdone
)
1009 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1010 memset(instdone
, 0, sizeof(*instdone
) * I915_NUM_INSTDONE_REG
);
1012 switch (INTEL_INFO(dev
)->gen
) {
1015 instdone
[0] = I915_READ(INSTDONE
);
1020 instdone
[0] = I915_READ(INSTDONE_I965
);
1021 instdone
[1] = I915_READ(INSTDONE1
);
1024 WARN_ONCE(1, "Unsupported platform\n");
1026 instdone
[0] = I915_READ(GEN7_INSTDONE_1
);
1027 instdone
[1] = I915_READ(GEN7_SC_INSTDONE
);
1028 instdone
[2] = I915_READ(GEN7_SAMPLER_INSTDONE
);
1029 instdone
[3] = I915_READ(GEN7_ROW_INSTDONE
);