2 * Copyright (c) 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
30 #include <generated/utsrelease.h>
33 static const char *yesno(int v
)
35 return v
? "yes" : "no";
38 static const char *ring_str(int ring
)
41 case RCS
: return "render";
42 case VCS
: return "bsd";
43 case BCS
: return "blt";
44 case VECS
: return "vebox";
45 case VCS2
: return "bsd2";
50 static const char *pin_flag(int pinned
)
60 static const char *tiling_flag(int tiling
)
64 case I915_TILING_NONE
: return "";
65 case I915_TILING_X
: return " X";
66 case I915_TILING_Y
: return " Y";
70 static const char *dirty_flag(int dirty
)
72 return dirty
? " dirty" : "";
75 static const char *purgeable_flag(int purgeable
)
77 return purgeable
? " purgeable" : "";
80 static bool __i915_error_ok(struct drm_i915_error_state_buf
*e
)
83 if (!e
->err
&& WARN(e
->bytes
> (e
->size
- 1), "overflow")) {
88 if (e
->bytes
== e
->size
- 1 || e
->err
)
94 static bool __i915_error_seek(struct drm_i915_error_state_buf
*e
,
97 if (e
->pos
+ len
<= e
->start
) {
102 /* First vsnprintf needs to fit in its entirety for memmove */
103 if (len
>= e
->size
) {
111 static void __i915_error_advance(struct drm_i915_error_state_buf
*e
,
114 /* If this is first printf in this window, adjust it so that
115 * start position matches start of the buffer
118 if (e
->pos
< e
->start
) {
119 const size_t off
= e
->start
- e
->pos
;
121 /* Should not happen but be paranoid */
122 if (off
> len
|| e
->bytes
) {
127 memmove(e
->buf
, e
->buf
+ off
, len
- off
);
128 e
->bytes
= len
- off
;
137 static void i915_error_vprintf(struct drm_i915_error_state_buf
*e
,
138 const char *f
, va_list args
)
142 if (!__i915_error_ok(e
))
145 /* Seek the first printf which is hits start position */
146 if (e
->pos
< e
->start
) {
150 len
= vsnprintf(NULL
, 0, f
, tmp
);
153 if (!__i915_error_seek(e
, len
))
157 len
= vsnprintf(e
->buf
+ e
->bytes
, e
->size
- e
->bytes
, f
, args
);
158 if (len
>= e
->size
- e
->bytes
)
159 len
= e
->size
- e
->bytes
- 1;
161 __i915_error_advance(e
, len
);
164 static void i915_error_puts(struct drm_i915_error_state_buf
*e
,
169 if (!__i915_error_ok(e
))
174 /* Seek the first printf which is hits start position */
175 if (e
->pos
< e
->start
) {
176 if (!__i915_error_seek(e
, len
))
180 if (len
>= e
->size
- e
->bytes
)
181 len
= e
->size
- e
->bytes
- 1;
182 memcpy(e
->buf
+ e
->bytes
, str
, len
);
184 __i915_error_advance(e
, len
);
187 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
188 #define err_puts(e, s) i915_error_puts(e, s)
190 static void print_error_buffers(struct drm_i915_error_state_buf
*m
,
192 struct drm_i915_error_buffer
*err
,
195 err_printf(m
, " %s [%d]:\n", name
, count
);
198 err_printf(m
, " %08x %8u %02x %02x %x %x",
203 err
->rseqno
, err
->wseqno
);
204 err_puts(m
, pin_flag(err
->pinned
));
205 err_puts(m
, tiling_flag(err
->tiling
));
206 err_puts(m
, dirty_flag(err
->dirty
));
207 err_puts(m
, purgeable_flag(err
->purgeable
));
208 err_puts(m
, err
->userptr
? " userptr" : "");
209 err_puts(m
, err
->ring
!= -1 ? " " : "");
210 err_puts(m
, ring_str(err
->ring
));
211 err_puts(m
, i915_cache_level_str(m
->i915
, err
->cache_level
));
214 err_printf(m
, " (name: %d)", err
->name
);
215 if (err
->fence_reg
!= I915_FENCE_REG_NONE
)
216 err_printf(m
, " (fence: %d)", err
->fence_reg
);
223 static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a
)
230 case HANGCHECK_ACTIVE
:
232 case HANGCHECK_ACTIVE_LOOP
:
233 return "active (loop)";
243 static void i915_ring_error_state(struct drm_i915_error_state_buf
*m
,
244 struct drm_device
*dev
,
245 struct drm_i915_error_state
*error
,
248 struct drm_i915_error_ring
*ring
= &error
->ring
[ring_idx
];
253 err_printf(m
, "%s command stream:\n", ring_str(ring_idx
));
254 err_printf(m
, " HEAD: 0x%08x\n", ring
->head
);
255 err_printf(m
, " TAIL: 0x%08x\n", ring
->tail
);
256 err_printf(m
, " CTL: 0x%08x\n", ring
->ctl
);
257 err_printf(m
, " HWS: 0x%08x\n", ring
->hws
);
258 err_printf(m
, " ACTHD: 0x%08x %08x\n", (u32
)(ring
->acthd
>>32), (u32
)ring
->acthd
);
259 err_printf(m
, " IPEIR: 0x%08x\n", ring
->ipeir
);
260 err_printf(m
, " IPEHR: 0x%08x\n", ring
->ipehr
);
261 err_printf(m
, " INSTDONE: 0x%08x\n", ring
->instdone
);
262 if (INTEL_INFO(dev
)->gen
>= 4) {
263 err_printf(m
, " BBADDR: 0x%08x %08x\n", (u32
)(ring
->bbaddr
>>32), (u32
)ring
->bbaddr
);
264 err_printf(m
, " BB_STATE: 0x%08x\n", ring
->bbstate
);
265 err_printf(m
, " INSTPS: 0x%08x\n", ring
->instps
);
267 err_printf(m
, " INSTPM: 0x%08x\n", ring
->instpm
);
268 err_printf(m
, " FADDR: 0x%08x %08x\n", upper_32_bits(ring
->faddr
),
269 lower_32_bits(ring
->faddr
));
270 if (INTEL_INFO(dev
)->gen
>= 6) {
271 err_printf(m
, " RC PSMI: 0x%08x\n", ring
->rc_psmi
);
272 err_printf(m
, " FAULT_REG: 0x%08x\n", ring
->fault_reg
);
273 err_printf(m
, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
274 ring
->semaphore_mboxes
[0],
275 ring
->semaphore_seqno
[0]);
276 err_printf(m
, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
277 ring
->semaphore_mboxes
[1],
278 ring
->semaphore_seqno
[1]);
279 if (HAS_VEBOX(dev
)) {
280 err_printf(m
, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
281 ring
->semaphore_mboxes
[2],
282 ring
->semaphore_seqno
[2]);
285 if (USES_PPGTT(dev
)) {
286 err_printf(m
, " GFX_MODE: 0x%08x\n", ring
->vm_info
.gfx_mode
);
288 if (INTEL_INFO(dev
)->gen
>= 8) {
290 for (i
= 0; i
< 4; i
++)
291 err_printf(m
, " PDP%d: 0x%016llx\n",
292 i
, ring
->vm_info
.pdp
[i
]);
294 err_printf(m
, " PP_DIR_BASE: 0x%08x\n",
295 ring
->vm_info
.pp_dir_base
);
298 err_printf(m
, " seqno: 0x%08x\n", ring
->seqno
);
299 err_printf(m
, " waiting: %s\n", yesno(ring
->waiting
));
300 err_printf(m
, " ring->head: 0x%08x\n", ring
->cpu_ring_head
);
301 err_printf(m
, " ring->tail: 0x%08x\n", ring
->cpu_ring_tail
);
302 err_printf(m
, " hangcheck: %s [%d]\n",
303 hangcheck_action_to_str(ring
->hangcheck_action
),
304 ring
->hangcheck_score
);
307 void i915_error_printf(struct drm_i915_error_state_buf
*e
, const char *f
, ...)
312 i915_error_vprintf(e
, f
, args
);
316 static void print_error_obj(struct drm_i915_error_state_buf
*m
,
317 struct drm_i915_error_object
*obj
)
319 int page
, offset
, elt
;
321 for (page
= offset
= 0; page
< obj
->page_count
; page
++) {
322 for (elt
= 0; elt
< PAGE_SIZE
/4; elt
++) {
323 err_printf(m
, "%08x : %08x\n", offset
,
324 obj
->pages
[page
][elt
]);
330 int i915_error_state_to_str(struct drm_i915_error_state_buf
*m
,
331 const struct i915_error_state_file_priv
*error_priv
)
333 struct drm_device
*dev
= error_priv
->dev
;
334 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
335 struct drm_i915_error_state
*error
= error_priv
->error
;
336 struct drm_i915_error_object
*obj
;
337 int i
, j
, offset
, elt
;
338 int max_hangcheck_score
;
341 err_printf(m
, "no error state collected\n");
345 err_printf(m
, "%s\n", error
->error_msg
);
346 err_printf(m
, "Time: %ld s %ld us\n", error
->time
.tv_sec
,
347 error
->time
.tv_usec
);
348 err_printf(m
, "Kernel: " UTS_RELEASE
"\n");
349 max_hangcheck_score
= 0;
350 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
351 if (error
->ring
[i
].hangcheck_score
> max_hangcheck_score
)
352 max_hangcheck_score
= error
->ring
[i
].hangcheck_score
;
354 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
355 if (error
->ring
[i
].hangcheck_score
== max_hangcheck_score
&&
356 error
->ring
[i
].pid
!= -1) {
357 err_printf(m
, "Active process (on ring %s): %s [%d]\n",
363 err_printf(m
, "Reset count: %u\n", error
->reset_count
);
364 err_printf(m
, "Suspend count: %u\n", error
->suspend_count
);
365 err_printf(m
, "PCI ID: 0x%04x\n", dev
->pdev
->device
);
366 err_printf(m
, "EIR: 0x%08x\n", error
->eir
);
367 err_printf(m
, "IER: 0x%08x\n", error
->ier
);
368 if (INTEL_INFO(dev
)->gen
>= 8) {
369 for (i
= 0; i
< 4; i
++)
370 err_printf(m
, "GTIER gt %d: 0x%08x\n", i
,
372 } else if (HAS_PCH_SPLIT(dev
) || IS_VALLEYVIEW(dev
))
373 err_printf(m
, "GTIER: 0x%08x\n", error
->gtier
[0]);
374 err_printf(m
, "PGTBL_ER: 0x%08x\n", error
->pgtbl_er
);
375 err_printf(m
, "FORCEWAKE: 0x%08x\n", error
->forcewake
);
376 err_printf(m
, "DERRMR: 0x%08x\n", error
->derrmr
);
377 err_printf(m
, "CCID: 0x%08x\n", error
->ccid
);
378 err_printf(m
, "Missed interrupts: 0x%08lx\n", dev_priv
->gpu_error
.missed_irq_rings
);
380 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
381 err_printf(m
, " fence[%d] = %08llx\n", i
, error
->fence
[i
]);
383 for (i
= 0; i
< ARRAY_SIZE(error
->extra_instdone
); i
++)
384 err_printf(m
, " INSTDONE_%d: 0x%08x\n", i
,
385 error
->extra_instdone
[i
]);
387 if (INTEL_INFO(dev
)->gen
>= 6) {
388 err_printf(m
, "ERROR: 0x%08x\n", error
->error
);
390 if (INTEL_INFO(dev
)->gen
>= 8)
391 err_printf(m
, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
392 error
->fault_data1
, error
->fault_data0
);
394 err_printf(m
, "DONE_REG: 0x%08x\n", error
->done_reg
);
397 if (INTEL_INFO(dev
)->gen
== 7)
398 err_printf(m
, "ERR_INT: 0x%08x\n", error
->err_int
);
400 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++)
401 i915_ring_error_state(m
, dev
, error
, i
);
403 for (i
= 0; i
< error
->vm_count
; i
++) {
404 err_printf(m
, "vm[%d]\n", i
);
406 print_error_buffers(m
, "Active",
408 error
->active_bo_count
[i
]);
410 print_error_buffers(m
, "Pinned",
412 error
->pinned_bo_count
[i
]);
415 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
416 obj
= error
->ring
[i
].batchbuffer
;
418 err_puts(m
, dev_priv
->ring
[i
].name
);
419 if (error
->ring
[i
].pid
!= -1)
420 err_printf(m
, " (submitted by %s [%d])",
423 err_printf(m
, " --- gtt_offset = 0x%08x\n",
425 print_error_obj(m
, obj
);
428 obj
= error
->ring
[i
].wa_batchbuffer
;
430 err_printf(m
, "%s (w/a) --- gtt_offset = 0x%08x\n",
431 dev_priv
->ring
[i
].name
, obj
->gtt_offset
);
432 print_error_obj(m
, obj
);
435 if (error
->ring
[i
].num_requests
) {
436 err_printf(m
, "%s --- %d requests\n",
437 dev_priv
->ring
[i
].name
,
438 error
->ring
[i
].num_requests
);
439 for (j
= 0; j
< error
->ring
[i
].num_requests
; j
++) {
440 err_printf(m
, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
441 error
->ring
[i
].requests
[j
].seqno
,
442 error
->ring
[i
].requests
[j
].jiffies
,
443 error
->ring
[i
].requests
[j
].tail
);
447 if ((obj
= error
->ring
[i
].ringbuffer
)) {
448 err_printf(m
, "%s --- ringbuffer = 0x%08x\n",
449 dev_priv
->ring
[i
].name
,
451 print_error_obj(m
, obj
);
454 if ((obj
= error
->ring
[i
].hws_page
)) {
455 err_printf(m
, "%s --- HW Status = 0x%08x\n",
456 dev_priv
->ring
[i
].name
,
459 for (elt
= 0; elt
< PAGE_SIZE
/16; elt
+= 4) {
460 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
463 obj
->pages
[0][elt
+1],
464 obj
->pages
[0][elt
+2],
465 obj
->pages
[0][elt
+3]);
470 if ((obj
= error
->ring
[i
].ctx
)) {
471 err_printf(m
, "%s --- HW Context = 0x%08x\n",
472 dev_priv
->ring
[i
].name
,
474 print_error_obj(m
, obj
);
478 if ((obj
= error
->semaphore_obj
)) {
479 err_printf(m
, "Semaphore page = 0x%08x\n", obj
->gtt_offset
);
480 for (elt
= 0; elt
< PAGE_SIZE
/16; elt
+= 4) {
481 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
484 obj
->pages
[0][elt
+1],
485 obj
->pages
[0][elt
+2],
486 obj
->pages
[0][elt
+3]);
491 intel_overlay_print_error_state(m
, error
->overlay
);
494 intel_display_print_error_state(m
, dev
, error
->display
);
497 if (m
->bytes
== 0 && m
->err
)
503 int i915_error_state_buf_init(struct drm_i915_error_state_buf
*ebuf
,
504 struct drm_i915_private
*i915
,
505 size_t count
, loff_t pos
)
507 memset(ebuf
, 0, sizeof(*ebuf
));
510 /* We need to have enough room to store any i915_error_state printf
511 * so that we can move it to start position.
513 ebuf
->size
= count
+ 1 > PAGE_SIZE
? count
+ 1 : PAGE_SIZE
;
514 ebuf
->buf
= kmalloc(ebuf
->size
,
515 GFP_TEMPORARY
| __GFP_NORETRY
| __GFP_NOWARN
);
517 if (ebuf
->buf
== NULL
) {
518 ebuf
->size
= PAGE_SIZE
;
519 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
522 if (ebuf
->buf
== NULL
) {
524 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
527 if (ebuf
->buf
== NULL
)
535 static void i915_error_object_free(struct drm_i915_error_object
*obj
)
542 for (page
= 0; page
< obj
->page_count
; page
++)
543 kfree(obj
->pages
[page
]);
548 static void i915_error_state_free(struct kref
*error_ref
)
550 struct drm_i915_error_state
*error
= container_of(error_ref
,
551 typeof(*error
), ref
);
554 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
555 i915_error_object_free(error
->ring
[i
].batchbuffer
);
556 i915_error_object_free(error
->ring
[i
].ringbuffer
);
557 i915_error_object_free(error
->ring
[i
].hws_page
);
558 i915_error_object_free(error
->ring
[i
].ctx
);
559 kfree(error
->ring
[i
].requests
);
562 i915_error_object_free(error
->semaphore_obj
);
564 for (i
= 0; i
< error
->vm_count
; i
++)
565 kfree(error
->active_bo
[i
]);
567 kfree(error
->active_bo
);
568 kfree(error
->active_bo_count
);
569 kfree(error
->pinned_bo
);
570 kfree(error
->pinned_bo_count
);
571 kfree(error
->overlay
);
572 kfree(error
->display
);
576 static struct drm_i915_error_object
*
577 i915_error_object_create(struct drm_i915_private
*dev_priv
,
578 struct drm_i915_gem_object
*src
,
579 struct i915_address_space
*vm
)
581 struct drm_i915_error_object
*dst
;
582 struct i915_vma
*vma
= NULL
;
588 if (src
== NULL
|| src
->pages
== NULL
)
591 num_pages
= src
->base
.size
>> PAGE_SHIFT
;
593 dst
= kmalloc(sizeof(*dst
) + num_pages
* sizeof(u32
*), GFP_ATOMIC
);
597 if (i915_gem_obj_bound(src
, vm
))
598 dst
->gtt_offset
= i915_gem_obj_offset(src
, vm
);
600 dst
->gtt_offset
= -1;
602 reloc_offset
= dst
->gtt_offset
;
603 if (i915_is_ggtt(vm
))
604 vma
= i915_gem_obj_to_ggtt(src
);
605 use_ggtt
= (src
->cache_level
== I915_CACHE_NONE
&&
606 vma
&& (vma
->bound
& GLOBAL_BIND
) &&
607 reloc_offset
+ num_pages
* PAGE_SIZE
<= dev_priv
->gtt
.mappable_end
);
609 /* Cannot access stolen address directly, try to use the aperture */
613 if (!(vma
&& vma
->bound
& GLOBAL_BIND
))
616 reloc_offset
= i915_gem_obj_ggtt_offset(src
);
617 if (reloc_offset
+ num_pages
* PAGE_SIZE
> dev_priv
->gtt
.mappable_end
)
621 /* Cannot access snooped pages through the aperture */
622 if (use_ggtt
&& src
->cache_level
!= I915_CACHE_NONE
&& !HAS_LLC(dev_priv
->dev
))
625 dst
->page_count
= num_pages
;
626 while (num_pages
--) {
630 d
= kmalloc(PAGE_SIZE
, GFP_ATOMIC
);
634 local_irq_save(flags
);
638 /* Simply ignore tiling or any overlapping fence.
639 * It's part of the error state, and this hopefully
640 * captures what the GPU read.
643 s
= io_mapping_map_atomic_wc(dev_priv
->gtt
.mappable
,
645 memcpy_fromio(d
, s
, PAGE_SIZE
);
646 io_mapping_unmap_atomic(s
);
651 page
= i915_gem_object_get_page(src
, i
);
653 drm_clflush_pages(&page
, 1);
655 s
= kmap_atomic(page
);
656 memcpy(d
, s
, PAGE_SIZE
);
659 drm_clflush_pages(&page
, 1);
661 local_irq_restore(flags
);
664 reloc_offset
+= PAGE_SIZE
;
671 kfree(dst
->pages
[i
]);
675 #define i915_error_ggtt_object_create(dev_priv, src) \
676 i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
678 static void capture_bo(struct drm_i915_error_buffer
*err
,
679 struct i915_vma
*vma
)
681 struct drm_i915_gem_object
*obj
= vma
->obj
;
683 err
->size
= obj
->base
.size
;
684 err
->name
= obj
->base
.name
;
685 err
->rseqno
= i915_gem_request_get_seqno(obj
->last_read_req
);
686 err
->wseqno
= i915_gem_request_get_seqno(obj
->last_write_req
);
687 err
->gtt_offset
= vma
->node
.start
;
688 err
->read_domains
= obj
->base
.read_domains
;
689 err
->write_domain
= obj
->base
.write_domain
;
690 err
->fence_reg
= obj
->fence_reg
;
692 if (i915_gem_obj_is_pinned(obj
))
694 err
->tiling
= obj
->tiling_mode
;
695 err
->dirty
= obj
->dirty
;
696 err
->purgeable
= obj
->madv
!= I915_MADV_WILLNEED
;
697 err
->userptr
= obj
->userptr
.mm
!= NULL
;
698 err
->ring
= obj
->last_read_req
?
699 i915_gem_request_get_ring(obj
->last_read_req
)->id
: -1;
700 err
->cache_level
= obj
->cache_level
;
703 static u32
capture_active_bo(struct drm_i915_error_buffer
*err
,
704 int count
, struct list_head
*head
)
706 struct i915_vma
*vma
;
709 list_for_each_entry(vma
, head
, mm_list
) {
710 capture_bo(err
++, vma
);
718 static u32
capture_pinned_bo(struct drm_i915_error_buffer
*err
,
719 int count
, struct list_head
*head
,
720 struct i915_address_space
*vm
)
722 struct drm_i915_gem_object
*obj
;
723 struct drm_i915_error_buffer
* const first
= err
;
724 struct drm_i915_error_buffer
* const last
= err
+ count
;
726 list_for_each_entry(obj
, head
, global_list
) {
727 struct i915_vma
*vma
;
732 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
)
733 if (vma
->vm
== vm
&& vma
->pin_count
> 0)
734 capture_bo(err
++, vma
);
740 /* Generate a semi-unique error code. The code is not meant to have meaning, The
741 * code's only purpose is to try to prevent false duplicated bug reports by
742 * grossly estimating a GPU error state.
744 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
745 * the hang if we could strip the GTT offset information from it.
747 * It's only a small step better than a random number in its current form.
749 static uint32_t i915_error_generate_code(struct drm_i915_private
*dev_priv
,
750 struct drm_i915_error_state
*error
,
753 uint32_t error_code
= 0;
756 /* IPEHR would be an ideal way to detect errors, as it's the gross
757 * measure of "the command that hung." However, has some very common
758 * synchronization commands which almost always appear in the case
759 * strictly a client bug. Use instdone to differentiate those some.
761 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
762 if (error
->ring
[i
].hangcheck_action
== HANGCHECK_HUNG
) {
766 return error
->ring
[i
].ipehr
^ error
->ring
[i
].instdone
;
773 static void i915_gem_record_fences(struct drm_device
*dev
,
774 struct drm_i915_error_state
*error
)
776 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
779 if (IS_GEN3(dev
) || IS_GEN2(dev
)) {
780 for (i
= 0; i
< 8; i
++)
781 error
->fence
[i
] = I915_READ(FENCE_REG_830_0
+ (i
* 4));
782 if (IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
783 for (i
= 0; i
< 8; i
++)
784 error
->fence
[i
+8] = I915_READ(FENCE_REG_945_8
+
786 } else if (IS_GEN5(dev
) || IS_GEN4(dev
))
787 for (i
= 0; i
< 16; i
++)
788 error
->fence
[i
] = I915_READ64(FENCE_REG_965_0
+
790 else if (INTEL_INFO(dev
)->gen
>= 6)
791 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
792 error
->fence
[i
] = I915_READ64(FENCE_REG_SANDYBRIDGE_0
+
797 static void gen8_record_semaphore_state(struct drm_i915_private
*dev_priv
,
798 struct drm_i915_error_state
*error
,
799 struct intel_engine_cs
*ring
,
800 struct drm_i915_error_ring
*ering
)
802 struct intel_engine_cs
*to
;
805 if (!i915_semaphore_is_enabled(dev_priv
->dev
))
808 if (!error
->semaphore_obj
)
809 error
->semaphore_obj
=
810 i915_error_ggtt_object_create(dev_priv
,
811 dev_priv
->semaphore_obj
);
813 for_each_ring(to
, dev_priv
, i
) {
821 signal_offset
= (GEN8_SIGNAL_OFFSET(ring
, i
) & (PAGE_SIZE
- 1))
823 tmp
= error
->semaphore_obj
->pages
[0];
824 idx
= intel_ring_sync_index(ring
, to
);
826 ering
->semaphore_mboxes
[idx
] = tmp
[signal_offset
];
827 ering
->semaphore_seqno
[idx
] = ring
->semaphore
.sync_seqno
[idx
];
831 static void gen6_record_semaphore_state(struct drm_i915_private
*dev_priv
,
832 struct intel_engine_cs
*ring
,
833 struct drm_i915_error_ring
*ering
)
835 ering
->semaphore_mboxes
[0] = I915_READ(RING_SYNC_0(ring
->mmio_base
));
836 ering
->semaphore_mboxes
[1] = I915_READ(RING_SYNC_1(ring
->mmio_base
));
837 ering
->semaphore_seqno
[0] = ring
->semaphore
.sync_seqno
[0];
838 ering
->semaphore_seqno
[1] = ring
->semaphore
.sync_seqno
[1];
840 if (HAS_VEBOX(dev_priv
->dev
)) {
841 ering
->semaphore_mboxes
[2] =
842 I915_READ(RING_SYNC_2(ring
->mmio_base
));
843 ering
->semaphore_seqno
[2] = ring
->semaphore
.sync_seqno
[2];
847 static void i915_record_ring_state(struct drm_device
*dev
,
848 struct drm_i915_error_state
*error
,
849 struct intel_engine_cs
*ring
,
850 struct drm_i915_error_ring
*ering
)
852 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
854 if (INTEL_INFO(dev
)->gen
>= 6) {
855 ering
->rc_psmi
= I915_READ(ring
->mmio_base
+ 0x50);
856 ering
->fault_reg
= I915_READ(RING_FAULT_REG(ring
));
857 if (INTEL_INFO(dev
)->gen
>= 8)
858 gen8_record_semaphore_state(dev_priv
, error
, ring
, ering
);
860 gen6_record_semaphore_state(dev_priv
, ring
, ering
);
863 if (INTEL_INFO(dev
)->gen
>= 4) {
864 ering
->faddr
= I915_READ(RING_DMA_FADD(ring
->mmio_base
));
865 ering
->ipeir
= I915_READ(RING_IPEIR(ring
->mmio_base
));
866 ering
->ipehr
= I915_READ(RING_IPEHR(ring
->mmio_base
));
867 ering
->instdone
= I915_READ(RING_INSTDONE(ring
->mmio_base
));
868 ering
->instps
= I915_READ(RING_INSTPS(ring
->mmio_base
));
869 ering
->bbaddr
= I915_READ(RING_BBADDR(ring
->mmio_base
));
870 if (INTEL_INFO(dev
)->gen
>= 8) {
871 ering
->faddr
|= (u64
) I915_READ(RING_DMA_FADD_UDW(ring
->mmio_base
)) << 32;
872 ering
->bbaddr
|= (u64
) I915_READ(RING_BBADDR_UDW(ring
->mmio_base
)) << 32;
874 ering
->bbstate
= I915_READ(RING_BBSTATE(ring
->mmio_base
));
876 ering
->faddr
= I915_READ(DMA_FADD_I8XX
);
877 ering
->ipeir
= I915_READ(IPEIR
);
878 ering
->ipehr
= I915_READ(IPEHR
);
879 ering
->instdone
= I915_READ(INSTDONE
);
882 ering
->waiting
= waitqueue_active(&ring
->irq_queue
);
883 ering
->instpm
= I915_READ(RING_INSTPM(ring
->mmio_base
));
884 ering
->seqno
= ring
->get_seqno(ring
, false);
885 ering
->acthd
= intel_ring_get_active_head(ring
);
886 ering
->head
= I915_READ_HEAD(ring
);
887 ering
->tail
= I915_READ_TAIL(ring
);
888 ering
->ctl
= I915_READ_CTL(ring
);
890 if (I915_NEED_GFX_HWS(dev
)) {
897 mmio
= RENDER_HWS_PGA_GEN7
;
900 mmio
= BLT_HWS_PGA_GEN7
;
903 mmio
= BSD_HWS_PGA_GEN7
;
906 mmio
= VEBOX_HWS_PGA_GEN7
;
909 } else if (IS_GEN6(ring
->dev
)) {
910 mmio
= RING_HWS_PGA_GEN6(ring
->mmio_base
);
912 /* XXX: gen8 returns to sanity */
913 mmio
= RING_HWS_PGA(ring
->mmio_base
);
916 ering
->hws
= I915_READ(mmio
);
919 ering
->hangcheck_score
= ring
->hangcheck
.score
;
920 ering
->hangcheck_action
= ring
->hangcheck
.action
;
922 if (USES_PPGTT(dev
)) {
925 ering
->vm_info
.gfx_mode
= I915_READ(RING_MODE_GEN7(ring
));
928 ering
->vm_info
.pp_dir_base
=
929 I915_READ(RING_PP_DIR_BASE_READ(ring
));
930 else if (IS_GEN7(dev
))
931 ering
->vm_info
.pp_dir_base
=
932 I915_READ(RING_PP_DIR_BASE(ring
));
933 else if (INTEL_INFO(dev
)->gen
>= 8)
934 for (i
= 0; i
< 4; i
++) {
935 ering
->vm_info
.pdp
[i
] =
936 I915_READ(GEN8_RING_PDP_UDW(ring
, i
));
937 ering
->vm_info
.pdp
[i
] <<= 32;
938 ering
->vm_info
.pdp
[i
] |=
939 I915_READ(GEN8_RING_PDP_LDW(ring
, i
));
945 static void i915_gem_record_active_context(struct intel_engine_cs
*ring
,
946 struct drm_i915_error_state
*error
,
947 struct drm_i915_error_ring
*ering
)
949 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
950 struct drm_i915_gem_object
*obj
;
952 /* Currently render ring is the only HW context user */
953 if (ring
->id
!= RCS
|| !error
->ccid
)
956 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
957 if (!i915_gem_obj_ggtt_bound(obj
))
960 if ((error
->ccid
& PAGE_MASK
) == i915_gem_obj_ggtt_offset(obj
)) {
961 ering
->ctx
= i915_error_ggtt_object_create(dev_priv
, obj
);
967 static void i915_gem_record_rings(struct drm_device
*dev
,
968 struct drm_i915_error_state
*error
)
970 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
971 struct drm_i915_gem_request
*request
;
974 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
975 struct intel_engine_cs
*ring
= &dev_priv
->ring
[i
];
976 struct intel_ringbuffer
*rbuf
;
978 error
->ring
[i
].pid
= -1;
980 if (ring
->dev
== NULL
)
983 error
->ring
[i
].valid
= true;
985 i915_record_ring_state(dev
, error
, ring
, &error
->ring
[i
]);
987 request
= i915_gem_find_active_request(ring
);
989 struct i915_address_space
*vm
;
991 vm
= request
->ctx
&& request
->ctx
->ppgtt
?
992 &request
->ctx
->ppgtt
->base
:
995 /* We need to copy these to an anonymous buffer
996 * as the simplest method to avoid being overwritten
999 error
->ring
[i
].batchbuffer
=
1000 i915_error_object_create(dev_priv
,
1004 if (HAS_BROKEN_CS_TLB(dev_priv
->dev
))
1005 error
->ring
[i
].wa_batchbuffer
=
1006 i915_error_ggtt_object_create(dev_priv
,
1010 struct task_struct
*task
;
1013 task
= pid_task(request
->pid
, PIDTYPE_PID
);
1015 strcpy(error
->ring
[i
].comm
, task
->comm
);
1016 error
->ring
[i
].pid
= task
->pid
;
1022 if (i915
.enable_execlists
) {
1023 /* TODO: This is only a small fix to keep basic error
1024 * capture working, but we need to add more information
1025 * for it to be useful (e.g. dump the context being
1029 rbuf
= request
->ctx
->engine
[ring
->id
].ringbuf
;
1031 rbuf
= ring
->default_context
->engine
[ring
->id
].ringbuf
;
1033 rbuf
= ring
->buffer
;
1035 error
->ring
[i
].cpu_ring_head
= rbuf
->head
;
1036 error
->ring
[i
].cpu_ring_tail
= rbuf
->tail
;
1038 error
->ring
[i
].ringbuffer
=
1039 i915_error_ggtt_object_create(dev_priv
, rbuf
->obj
);
1041 error
->ring
[i
].hws_page
=
1042 i915_error_ggtt_object_create(dev_priv
, ring
->status_page
.obj
);
1044 i915_gem_record_active_context(ring
, error
, &error
->ring
[i
]);
1047 list_for_each_entry(request
, &ring
->request_list
, list
)
1050 error
->ring
[i
].num_requests
= count
;
1051 error
->ring
[i
].requests
=
1052 kcalloc(count
, sizeof(*error
->ring
[i
].requests
),
1054 if (error
->ring
[i
].requests
== NULL
) {
1055 error
->ring
[i
].num_requests
= 0;
1060 list_for_each_entry(request
, &ring
->request_list
, list
) {
1061 struct drm_i915_error_request
*erq
;
1063 erq
= &error
->ring
[i
].requests
[count
++];
1064 erq
->seqno
= request
->seqno
;
1065 erq
->jiffies
= request
->emitted_jiffies
;
1066 erq
->tail
= request
->postfix
;
1071 /* FIXME: Since pin count/bound list is global, we duplicate what we capture per
1074 static void i915_gem_capture_vm(struct drm_i915_private
*dev_priv
,
1075 struct drm_i915_error_state
*error
,
1076 struct i915_address_space
*vm
,
1079 struct drm_i915_error_buffer
*active_bo
= NULL
, *pinned_bo
= NULL
;
1080 struct drm_i915_gem_object
*obj
;
1081 struct i915_vma
*vma
;
1085 list_for_each_entry(vma
, &vm
->active_list
, mm_list
)
1087 error
->active_bo_count
[ndx
] = i
;
1089 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
1090 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
)
1091 if (vma
->vm
== vm
&& vma
->pin_count
> 0)
1094 error
->pinned_bo_count
[ndx
] = i
- error
->active_bo_count
[ndx
];
1097 active_bo
= kcalloc(i
, sizeof(*active_bo
), GFP_ATOMIC
);
1099 pinned_bo
= active_bo
+ error
->active_bo_count
[ndx
];
1103 error
->active_bo_count
[ndx
] =
1104 capture_active_bo(active_bo
,
1105 error
->active_bo_count
[ndx
],
1109 error
->pinned_bo_count
[ndx
] =
1110 capture_pinned_bo(pinned_bo
,
1111 error
->pinned_bo_count
[ndx
],
1112 &dev_priv
->mm
.bound_list
, vm
);
1113 error
->active_bo
[ndx
] = active_bo
;
1114 error
->pinned_bo
[ndx
] = pinned_bo
;
1117 static void i915_gem_capture_buffers(struct drm_i915_private
*dev_priv
,
1118 struct drm_i915_error_state
*error
)
1120 struct i915_address_space
*vm
;
1123 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
)
1126 error
->active_bo
= kcalloc(cnt
, sizeof(*error
->active_bo
), GFP_ATOMIC
);
1127 error
->pinned_bo
= kcalloc(cnt
, sizeof(*error
->pinned_bo
), GFP_ATOMIC
);
1128 error
->active_bo_count
= kcalloc(cnt
, sizeof(*error
->active_bo_count
),
1130 error
->pinned_bo_count
= kcalloc(cnt
, sizeof(*error
->pinned_bo_count
),
1133 if (error
->active_bo
== NULL
||
1134 error
->pinned_bo
== NULL
||
1135 error
->active_bo_count
== NULL
||
1136 error
->pinned_bo_count
== NULL
) {
1137 kfree(error
->active_bo
);
1138 kfree(error
->active_bo_count
);
1139 kfree(error
->pinned_bo
);
1140 kfree(error
->pinned_bo_count
);
1142 error
->active_bo
= NULL
;
1143 error
->active_bo_count
= NULL
;
1144 error
->pinned_bo
= NULL
;
1145 error
->pinned_bo_count
= NULL
;
1147 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
)
1148 i915_gem_capture_vm(dev_priv
, error
, vm
, i
++);
1150 error
->vm_count
= cnt
;
1154 /* Capture all registers which don't fit into another category. */
1155 static void i915_capture_reg_state(struct drm_i915_private
*dev_priv
,
1156 struct drm_i915_error_state
*error
)
1158 struct drm_device
*dev
= dev_priv
->dev
;
1161 /* General organization
1162 * 1. Registers specific to a single generation
1163 * 2. Registers which belong to multiple generations
1164 * 3. Feature specific registers.
1165 * 4. Everything else
1166 * Please try to follow the order.
1169 /* 1: Registers specific to a single generation */
1170 if (IS_VALLEYVIEW(dev
)) {
1171 error
->gtier
[0] = I915_READ(GTIER
);
1172 error
->ier
= I915_READ(VLV_IER
);
1173 error
->forcewake
= I915_READ(FORCEWAKE_VLV
);
1177 error
->err_int
= I915_READ(GEN7_ERR_INT
);
1179 if (INTEL_INFO(dev
)->gen
>= 8) {
1180 error
->fault_data0
= I915_READ(GEN8_FAULT_TLB_DATA0
);
1181 error
->fault_data1
= I915_READ(GEN8_FAULT_TLB_DATA1
);
1185 error
->forcewake
= I915_READ(FORCEWAKE
);
1186 error
->gab_ctl
= I915_READ(GAB_CTL
);
1187 error
->gfx_mode
= I915_READ(GFX_MODE
);
1190 /* 2: Registers which belong to multiple generations */
1191 if (INTEL_INFO(dev
)->gen
>= 7)
1192 error
->forcewake
= I915_READ(FORCEWAKE_MT
);
1194 if (INTEL_INFO(dev
)->gen
>= 6) {
1195 error
->derrmr
= I915_READ(DERRMR
);
1196 error
->error
= I915_READ(ERROR_GEN6
);
1197 error
->done_reg
= I915_READ(DONE_REG
);
1200 /* 3: Feature specific registers */
1201 if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
1202 error
->gam_ecochk
= I915_READ(GAM_ECOCHK
);
1203 error
->gac_eco
= I915_READ(GAC_ECO_BITS
);
1206 /* 4: Everything else */
1207 if (HAS_HW_CONTEXTS(dev
))
1208 error
->ccid
= I915_READ(CCID
);
1210 if (INTEL_INFO(dev
)->gen
>= 8) {
1211 error
->ier
= I915_READ(GEN8_DE_MISC_IER
);
1212 for (i
= 0; i
< 4; i
++)
1213 error
->gtier
[i
] = I915_READ(GEN8_GT_IER(i
));
1214 } else if (HAS_PCH_SPLIT(dev
)) {
1215 error
->ier
= I915_READ(DEIER
);
1216 error
->gtier
[0] = I915_READ(GTIER
);
1217 } else if (IS_GEN2(dev
)) {
1218 error
->ier
= I915_READ16(IER
);
1219 } else if (!IS_VALLEYVIEW(dev
)) {
1220 error
->ier
= I915_READ(IER
);
1222 error
->eir
= I915_READ(EIR
);
1223 error
->pgtbl_er
= I915_READ(PGTBL_ER
);
1225 i915_get_extra_instdone(dev
, error
->extra_instdone
);
1228 static void i915_error_capture_msg(struct drm_device
*dev
,
1229 struct drm_i915_error_state
*error
,
1231 const char *error_msg
)
1233 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1235 int ring_id
= -1, len
;
1237 ecode
= i915_error_generate_code(dev_priv
, error
, &ring_id
);
1239 len
= scnprintf(error
->error_msg
, sizeof(error
->error_msg
),
1240 "GPU HANG: ecode %d:%d:0x%08x",
1241 INTEL_INFO(dev
)->gen
, ring_id
, ecode
);
1243 if (ring_id
!= -1 && error
->ring
[ring_id
].pid
!= -1)
1244 len
+= scnprintf(error
->error_msg
+ len
,
1245 sizeof(error
->error_msg
) - len
,
1247 error
->ring
[ring_id
].comm
,
1248 error
->ring
[ring_id
].pid
);
1250 scnprintf(error
->error_msg
+ len
, sizeof(error
->error_msg
) - len
,
1251 ", reason: %s, action: %s",
1253 wedged
? "reset" : "continue");
1256 static void i915_capture_gen_state(struct drm_i915_private
*dev_priv
,
1257 struct drm_i915_error_state
*error
)
1259 error
->reset_count
= i915_reset_count(&dev_priv
->gpu_error
);
1260 error
->suspend_count
= dev_priv
->suspend_count
;
1264 * i915_capture_error_state - capture an error record for later analysis
1267 * Should be called when an error is detected (either a hang or an error
1268 * interrupt) to capture error state from the time of the error. Fills
1269 * out a structure which becomes available in debugfs for user level tools
1272 void i915_capture_error_state(struct drm_device
*dev
, bool wedged
,
1273 const char *error_msg
)
1276 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1277 struct drm_i915_error_state
*error
;
1278 unsigned long flags
;
1280 /* Account for pipe specific data like PIPE*STAT */
1281 error
= kzalloc(sizeof(*error
), GFP_ATOMIC
);
1283 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1287 kref_init(&error
->ref
);
1289 i915_capture_gen_state(dev_priv
, error
);
1290 i915_capture_reg_state(dev_priv
, error
);
1291 i915_gem_capture_buffers(dev_priv
, error
);
1292 i915_gem_record_fences(dev
, error
);
1293 i915_gem_record_rings(dev
, error
);
1295 do_gettimeofday(&error
->time
);
1297 error
->overlay
= intel_overlay_capture_error_state(dev
);
1298 error
->display
= intel_display_capture_error_state(dev
);
1300 i915_error_capture_msg(dev
, error
, wedged
, error_msg
);
1301 DRM_INFO("%s\n", error
->error_msg
);
1303 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1304 if (dev_priv
->gpu_error
.first_error
== NULL
) {
1305 dev_priv
->gpu_error
.first_error
= error
;
1308 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1311 i915_error_state_free(&error
->ref
);
1316 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1317 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1318 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1319 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1320 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev
->primary
->index
);
1325 void i915_error_state_get(struct drm_device
*dev
,
1326 struct i915_error_state_file_priv
*error_priv
)
1328 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1330 spin_lock_irq(&dev_priv
->gpu_error
.lock
);
1331 error_priv
->error
= dev_priv
->gpu_error
.first_error
;
1332 if (error_priv
->error
)
1333 kref_get(&error_priv
->error
->ref
);
1334 spin_unlock_irq(&dev_priv
->gpu_error
.lock
);
1338 void i915_error_state_put(struct i915_error_state_file_priv
*error_priv
)
1340 if (error_priv
->error
)
1341 kref_put(&error_priv
->error
->ref
, i915_error_state_free
);
1344 void i915_destroy_error_state(struct drm_device
*dev
)
1346 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1347 struct drm_i915_error_state
*error
;
1349 spin_lock_irq(&dev_priv
->gpu_error
.lock
);
1350 error
= dev_priv
->gpu_error
.first_error
;
1351 dev_priv
->gpu_error
.first_error
= NULL
;
1352 spin_unlock_irq(&dev_priv
->gpu_error
.lock
);
1355 kref_put(&error
->ref
, i915_error_state_free
);
1358 const char *i915_cache_level_str(struct drm_i915_private
*i915
, int type
)
1361 case I915_CACHE_NONE
: return " uncached";
1362 case I915_CACHE_LLC
: return HAS_LLC(i915
) ? " LLC" : " snooped";
1363 case I915_CACHE_L3_LLC
: return " L3+LLC";
1364 case I915_CACHE_WT
: return " WT";
1369 /* NB: please notice the memset */
1370 void i915_get_extra_instdone(struct drm_device
*dev
, uint32_t *instdone
)
1372 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1373 memset(instdone
, 0, sizeof(*instdone
) * I915_NUM_INSTDONE_REG
);
1375 if (IS_GEN2(dev
) || IS_GEN3(dev
))
1376 instdone
[0] = I915_READ(INSTDONE
);
1377 else if (IS_GEN4(dev
) || IS_GEN5(dev
) || IS_GEN6(dev
)) {
1378 instdone
[0] = I915_READ(INSTDONE_I965
);
1379 instdone
[1] = I915_READ(INSTDONE1
);
1380 } else if (INTEL_INFO(dev
)->gen
>= 7) {
1381 instdone
[0] = I915_READ(GEN7_INSTDONE_1
);
1382 instdone
[1] = I915_READ(GEN7_SC_INSTDONE
);
1383 instdone
[2] = I915_READ(GEN7_SAMPLER_INSTDONE
);
1384 instdone
[3] = I915_READ(GEN7_ROW_INSTDONE
);