2 * Copyright (c) 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
30 #include <generated/utsrelease.h>
33 static const char *ring_str(int ring
)
36 case RCS
: return "render";
37 case VCS
: return "bsd";
38 case BCS
: return "blt";
39 case VECS
: return "vebox";
40 case VCS2
: return "bsd2";
45 static const char *pin_flag(int pinned
)
55 static const char *tiling_flag(int tiling
)
59 case I915_TILING_NONE
: return "";
60 case I915_TILING_X
: return " X";
61 case I915_TILING_Y
: return " Y";
65 static const char *dirty_flag(int dirty
)
67 return dirty
? " dirty" : "";
70 static const char *purgeable_flag(int purgeable
)
72 return purgeable
? " purgeable" : "";
75 static bool __i915_error_ok(struct drm_i915_error_state_buf
*e
)
78 if (!e
->err
&& WARN(e
->bytes
> (e
->size
- 1), "overflow")) {
83 if (e
->bytes
== e
->size
- 1 || e
->err
)
89 static bool __i915_error_seek(struct drm_i915_error_state_buf
*e
,
92 if (e
->pos
+ len
<= e
->start
) {
97 /* First vsnprintf needs to fit in its entirety for memmove */
106 static void __i915_error_advance(struct drm_i915_error_state_buf
*e
,
109 /* If this is first printf in this window, adjust it so that
110 * start position matches start of the buffer
113 if (e
->pos
< e
->start
) {
114 const size_t off
= e
->start
- e
->pos
;
116 /* Should not happen but be paranoid */
117 if (off
> len
|| e
->bytes
) {
122 memmove(e
->buf
, e
->buf
+ off
, len
- off
);
123 e
->bytes
= len
- off
;
132 static void i915_error_vprintf(struct drm_i915_error_state_buf
*e
,
133 const char *f
, va_list args
)
137 if (!__i915_error_ok(e
))
140 /* Seek the first printf which is hits start position */
141 if (e
->pos
< e
->start
) {
145 len
= vsnprintf(NULL
, 0, f
, tmp
);
148 if (!__i915_error_seek(e
, len
))
152 len
= vsnprintf(e
->buf
+ e
->bytes
, e
->size
- e
->bytes
, f
, args
);
153 if (len
>= e
->size
- e
->bytes
)
154 len
= e
->size
- e
->bytes
- 1;
156 __i915_error_advance(e
, len
);
159 static void i915_error_puts(struct drm_i915_error_state_buf
*e
,
164 if (!__i915_error_ok(e
))
169 /* Seek the first printf which is hits start position */
170 if (e
->pos
< e
->start
) {
171 if (!__i915_error_seek(e
, len
))
175 if (len
>= e
->size
- e
->bytes
)
176 len
= e
->size
- e
->bytes
- 1;
177 memcpy(e
->buf
+ e
->bytes
, str
, len
);
179 __i915_error_advance(e
, len
);
182 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
183 #define err_puts(e, s) i915_error_puts(e, s)
185 static void print_error_buffers(struct drm_i915_error_state_buf
*m
,
187 struct drm_i915_error_buffer
*err
,
192 err_printf(m
, " %s [%d]:\n", name
, count
);
195 err_printf(m
, " %08x_%08x %8u %02x %02x [ ",
196 upper_32_bits(err
->gtt_offset
),
197 lower_32_bits(err
->gtt_offset
),
201 for (i
= 0; i
< I915_NUM_RINGS
; i
++)
202 err_printf(m
, "%02x ", err
->rseqno
[i
]);
204 err_printf(m
, "] %02x", err
->wseqno
);
205 err_puts(m
, pin_flag(err
->pinned
));
206 err_puts(m
, tiling_flag(err
->tiling
));
207 err_puts(m
, dirty_flag(err
->dirty
));
208 err_puts(m
, purgeable_flag(err
->purgeable
));
209 err_puts(m
, err
->userptr
? " userptr" : "");
210 err_puts(m
, err
->ring
!= -1 ? " " : "");
211 err_puts(m
, ring_str(err
->ring
));
212 err_puts(m
, i915_cache_level_str(m
->i915
, err
->cache_level
));
215 err_printf(m
, " (name: %d)", err
->name
);
216 if (err
->fence_reg
!= I915_FENCE_REG_NONE
)
217 err_printf(m
, " (fence: %d)", err
->fence_reg
);
224 static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a
)
231 case HANGCHECK_ACTIVE
:
233 case HANGCHECK_ACTIVE_LOOP
:
234 return "active (loop)";
244 static void i915_ring_error_state(struct drm_i915_error_state_buf
*m
,
245 struct drm_device
*dev
,
246 struct drm_i915_error_state
*error
,
249 struct drm_i915_error_ring
*ring
= &error
->ring
[ring_idx
];
254 err_printf(m
, "%s command stream:\n", ring_str(ring_idx
));
255 err_printf(m
, " START: 0x%08x\n", ring
->start
);
256 err_printf(m
, " HEAD: 0x%08x\n", ring
->head
);
257 err_printf(m
, " TAIL: 0x%08x\n", ring
->tail
);
258 err_printf(m
, " CTL: 0x%08x\n", ring
->ctl
);
259 err_printf(m
, " HWS: 0x%08x\n", ring
->hws
);
260 err_printf(m
, " ACTHD: 0x%08x %08x\n", (u32
)(ring
->acthd
>>32), (u32
)ring
->acthd
);
261 err_printf(m
, " IPEIR: 0x%08x\n", ring
->ipeir
);
262 err_printf(m
, " IPEHR: 0x%08x\n", ring
->ipehr
);
263 err_printf(m
, " INSTDONE: 0x%08x\n", ring
->instdone
);
264 if (INTEL_INFO(dev
)->gen
>= 4) {
265 err_printf(m
, " BBADDR: 0x%08x %08x\n", (u32
)(ring
->bbaddr
>>32), (u32
)ring
->bbaddr
);
266 err_printf(m
, " BB_STATE: 0x%08x\n", ring
->bbstate
);
267 err_printf(m
, " INSTPS: 0x%08x\n", ring
->instps
);
269 err_printf(m
, " INSTPM: 0x%08x\n", ring
->instpm
);
270 err_printf(m
, " FADDR: 0x%08x %08x\n", upper_32_bits(ring
->faddr
),
271 lower_32_bits(ring
->faddr
));
272 if (INTEL_INFO(dev
)->gen
>= 6) {
273 err_printf(m
, " RC PSMI: 0x%08x\n", ring
->rc_psmi
);
274 err_printf(m
, " FAULT_REG: 0x%08x\n", ring
->fault_reg
);
275 err_printf(m
, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
276 ring
->semaphore_mboxes
[0],
277 ring
->semaphore_seqno
[0]);
278 err_printf(m
, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
279 ring
->semaphore_mboxes
[1],
280 ring
->semaphore_seqno
[1]);
281 if (HAS_VEBOX(dev
)) {
282 err_printf(m
, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
283 ring
->semaphore_mboxes
[2],
284 ring
->semaphore_seqno
[2]);
287 if (USES_PPGTT(dev
)) {
288 err_printf(m
, " GFX_MODE: 0x%08x\n", ring
->vm_info
.gfx_mode
);
290 if (INTEL_INFO(dev
)->gen
>= 8) {
292 for (i
= 0; i
< 4; i
++)
293 err_printf(m
, " PDP%d: 0x%016llx\n",
294 i
, ring
->vm_info
.pdp
[i
]);
296 err_printf(m
, " PP_DIR_BASE: 0x%08x\n",
297 ring
->vm_info
.pp_dir_base
);
300 err_printf(m
, " seqno: 0x%08x\n", ring
->seqno
);
301 err_printf(m
, " waiting: %s\n", yesno(ring
->waiting
));
302 err_printf(m
, " ring->head: 0x%08x\n", ring
->cpu_ring_head
);
303 err_printf(m
, " ring->tail: 0x%08x\n", ring
->cpu_ring_tail
);
304 err_printf(m
, " hangcheck: %s [%d]\n",
305 hangcheck_action_to_str(ring
->hangcheck_action
),
306 ring
->hangcheck_score
);
309 void i915_error_printf(struct drm_i915_error_state_buf
*e
, const char *f
, ...)
314 i915_error_vprintf(e
, f
, args
);
318 static void print_error_obj(struct drm_i915_error_state_buf
*m
,
319 struct drm_i915_error_object
*obj
)
321 int page
, offset
, elt
;
323 for (page
= offset
= 0; page
< obj
->page_count
; page
++) {
324 for (elt
= 0; elt
< PAGE_SIZE
/4; elt
++) {
325 err_printf(m
, "%08x : %08x\n", offset
,
326 obj
->pages
[page
][elt
]);
332 int i915_error_state_to_str(struct drm_i915_error_state_buf
*m
,
333 const struct i915_error_state_file_priv
*error_priv
)
335 struct drm_device
*dev
= error_priv
->dev
;
336 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
337 struct drm_i915_error_state
*error
= error_priv
->error
;
338 struct drm_i915_error_object
*obj
;
339 int i
, j
, offset
, elt
;
340 int max_hangcheck_score
;
343 err_printf(m
, "no error state collected\n");
347 err_printf(m
, "%s\n", error
->error_msg
);
348 err_printf(m
, "Time: %ld s %ld us\n", error
->time
.tv_sec
,
349 error
->time
.tv_usec
);
350 err_printf(m
, "Kernel: " UTS_RELEASE
"\n");
351 max_hangcheck_score
= 0;
352 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
353 if (error
->ring
[i
].hangcheck_score
> max_hangcheck_score
)
354 max_hangcheck_score
= error
->ring
[i
].hangcheck_score
;
356 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
357 if (error
->ring
[i
].hangcheck_score
== max_hangcheck_score
&&
358 error
->ring
[i
].pid
!= -1) {
359 err_printf(m
, "Active process (on ring %s): %s [%d]\n",
365 err_printf(m
, "Reset count: %u\n", error
->reset_count
);
366 err_printf(m
, "Suspend count: %u\n", error
->suspend_count
);
367 err_printf(m
, "PCI ID: 0x%04x\n", dev
->pdev
->device
);
368 err_printf(m
, "PCI Revision: 0x%02x\n", dev
->pdev
->revision
);
369 err_printf(m
, "PCI Subsystem: %04x:%04x\n",
370 dev
->pdev
->subsystem_vendor
,
371 dev
->pdev
->subsystem_device
);
372 err_printf(m
, "IOMMU enabled?: %d\n", error
->iommu
);
375 struct intel_csr
*csr
= &dev_priv
->csr
;
377 err_printf(m
, "DMC loaded: %s\n",
378 yesno(csr
->dmc_payload
!= NULL
));
379 err_printf(m
, "DMC fw version: %d.%d\n",
380 CSR_VERSION_MAJOR(csr
->version
),
381 CSR_VERSION_MINOR(csr
->version
));
384 err_printf(m
, "EIR: 0x%08x\n", error
->eir
);
385 err_printf(m
, "IER: 0x%08x\n", error
->ier
);
386 if (INTEL_INFO(dev
)->gen
>= 8) {
387 for (i
= 0; i
< 4; i
++)
388 err_printf(m
, "GTIER gt %d: 0x%08x\n", i
,
390 } else if (HAS_PCH_SPLIT(dev
) || IS_VALLEYVIEW(dev
))
391 err_printf(m
, "GTIER: 0x%08x\n", error
->gtier
[0]);
392 err_printf(m
, "PGTBL_ER: 0x%08x\n", error
->pgtbl_er
);
393 err_printf(m
, "FORCEWAKE: 0x%08x\n", error
->forcewake
);
394 err_printf(m
, "DERRMR: 0x%08x\n", error
->derrmr
);
395 err_printf(m
, "CCID: 0x%08x\n", error
->ccid
);
396 err_printf(m
, "Missed interrupts: 0x%08lx\n", dev_priv
->gpu_error
.missed_irq_rings
);
398 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
399 err_printf(m
, " fence[%d] = %08llx\n", i
, error
->fence
[i
]);
401 for (i
= 0; i
< ARRAY_SIZE(error
->extra_instdone
); i
++)
402 err_printf(m
, " INSTDONE_%d: 0x%08x\n", i
,
403 error
->extra_instdone
[i
]);
405 if (INTEL_INFO(dev
)->gen
>= 6) {
406 err_printf(m
, "ERROR: 0x%08x\n", error
->error
);
408 if (INTEL_INFO(dev
)->gen
>= 8)
409 err_printf(m
, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
410 error
->fault_data1
, error
->fault_data0
);
412 err_printf(m
, "DONE_REG: 0x%08x\n", error
->done_reg
);
415 if (INTEL_INFO(dev
)->gen
== 7)
416 err_printf(m
, "ERR_INT: 0x%08x\n", error
->err_int
);
418 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++)
419 i915_ring_error_state(m
, dev
, error
, i
);
421 for (i
= 0; i
< error
->vm_count
; i
++) {
422 err_printf(m
, "vm[%d]\n", i
);
424 print_error_buffers(m
, "Active",
426 error
->active_bo_count
[i
]);
428 print_error_buffers(m
, "Pinned",
430 error
->pinned_bo_count
[i
]);
433 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
434 obj
= error
->ring
[i
].batchbuffer
;
436 err_puts(m
, dev_priv
->ring
[i
].name
);
437 if (error
->ring
[i
].pid
!= -1)
438 err_printf(m
, " (submitted by %s [%d])",
441 err_printf(m
, " --- gtt_offset = 0x%08x %08x\n",
442 upper_32_bits(obj
->gtt_offset
),
443 lower_32_bits(obj
->gtt_offset
));
444 print_error_obj(m
, obj
);
447 obj
= error
->ring
[i
].wa_batchbuffer
;
449 err_printf(m
, "%s (w/a) --- gtt_offset = 0x%08x\n",
450 dev_priv
->ring
[i
].name
,
451 lower_32_bits(obj
->gtt_offset
));
452 print_error_obj(m
, obj
);
455 if (error
->ring
[i
].num_requests
) {
456 err_printf(m
, "%s --- %d requests\n",
457 dev_priv
->ring
[i
].name
,
458 error
->ring
[i
].num_requests
);
459 for (j
= 0; j
< error
->ring
[i
].num_requests
; j
++) {
460 err_printf(m
, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
461 error
->ring
[i
].requests
[j
].seqno
,
462 error
->ring
[i
].requests
[j
].jiffies
,
463 error
->ring
[i
].requests
[j
].tail
);
467 if ((obj
= error
->ring
[i
].ringbuffer
)) {
468 err_printf(m
, "%s --- ringbuffer = 0x%08x\n",
469 dev_priv
->ring
[i
].name
,
470 lower_32_bits(obj
->gtt_offset
));
471 print_error_obj(m
, obj
);
474 if ((obj
= error
->ring
[i
].hws_page
)) {
475 u64 hws_offset
= obj
->gtt_offset
;
476 u32
*hws_page
= &obj
->pages
[0][0];
478 if (i915
.enable_execlists
) {
479 hws_offset
+= LRC_PPHWSP_PN
* PAGE_SIZE
;
480 hws_page
= &obj
->pages
[LRC_PPHWSP_PN
][0];
482 err_printf(m
, "%s --- HW Status = 0x%08llx\n",
483 dev_priv
->ring
[i
].name
, hws_offset
);
485 for (elt
= 0; elt
< PAGE_SIZE
/16; elt
+= 4) {
486 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
496 if ((obj
= error
->ring
[i
].ctx
)) {
497 err_printf(m
, "%s --- HW Context = 0x%08x\n",
498 dev_priv
->ring
[i
].name
,
499 lower_32_bits(obj
->gtt_offset
));
500 print_error_obj(m
, obj
);
504 if ((obj
= error
->semaphore_obj
)) {
505 err_printf(m
, "Semaphore page = 0x%08x\n",
506 lower_32_bits(obj
->gtt_offset
));
507 for (elt
= 0; elt
< PAGE_SIZE
/16; elt
+= 4) {
508 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
511 obj
->pages
[0][elt
+1],
512 obj
->pages
[0][elt
+2],
513 obj
->pages
[0][elt
+3]);
518 intel_overlay_print_error_state(m
, error
->overlay
);
521 intel_display_print_error_state(m
, dev
, error
->display
);
524 if (m
->bytes
== 0 && m
->err
)
530 int i915_error_state_buf_init(struct drm_i915_error_state_buf
*ebuf
,
531 struct drm_i915_private
*i915
,
532 size_t count
, loff_t pos
)
534 memset(ebuf
, 0, sizeof(*ebuf
));
537 /* We need to have enough room to store any i915_error_state printf
538 * so that we can move it to start position.
540 ebuf
->size
= count
+ 1 > PAGE_SIZE
? count
+ 1 : PAGE_SIZE
;
541 ebuf
->buf
= kmalloc(ebuf
->size
,
542 GFP_TEMPORARY
| __GFP_NORETRY
| __GFP_NOWARN
);
544 if (ebuf
->buf
== NULL
) {
545 ebuf
->size
= PAGE_SIZE
;
546 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
549 if (ebuf
->buf
== NULL
) {
551 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
554 if (ebuf
->buf
== NULL
)
562 static void i915_error_object_free(struct drm_i915_error_object
*obj
)
569 for (page
= 0; page
< obj
->page_count
; page
++)
570 kfree(obj
->pages
[page
]);
575 static void i915_error_state_free(struct kref
*error_ref
)
577 struct drm_i915_error_state
*error
= container_of(error_ref
,
578 typeof(*error
), ref
);
581 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
582 i915_error_object_free(error
->ring
[i
].batchbuffer
);
583 i915_error_object_free(error
->ring
[i
].wa_batchbuffer
);
584 i915_error_object_free(error
->ring
[i
].ringbuffer
);
585 i915_error_object_free(error
->ring
[i
].hws_page
);
586 i915_error_object_free(error
->ring
[i
].ctx
);
587 kfree(error
->ring
[i
].requests
);
590 i915_error_object_free(error
->semaphore_obj
);
592 for (i
= 0; i
< error
->vm_count
; i
++)
593 kfree(error
->active_bo
[i
]);
595 kfree(error
->active_bo
);
596 kfree(error
->active_bo_count
);
597 kfree(error
->pinned_bo
);
598 kfree(error
->pinned_bo_count
);
599 kfree(error
->overlay
);
600 kfree(error
->display
);
604 static struct drm_i915_error_object
*
605 i915_error_object_create(struct drm_i915_private
*dev_priv
,
606 struct drm_i915_gem_object
*src
,
607 struct i915_address_space
*vm
)
609 struct drm_i915_error_object
*dst
;
610 struct i915_vma
*vma
= NULL
;
616 if (src
== NULL
|| src
->pages
== NULL
)
619 num_pages
= src
->base
.size
>> PAGE_SHIFT
;
621 dst
= kmalloc(sizeof(*dst
) + num_pages
* sizeof(u32
*), GFP_ATOMIC
);
625 if (i915_gem_obj_bound(src
, vm
))
626 dst
->gtt_offset
= i915_gem_obj_offset(src
, vm
);
628 dst
->gtt_offset
= -1;
630 reloc_offset
= dst
->gtt_offset
;
631 if (i915_is_ggtt(vm
))
632 vma
= i915_gem_obj_to_ggtt(src
);
633 use_ggtt
= (src
->cache_level
== I915_CACHE_NONE
&&
634 vma
&& (vma
->bound
& GLOBAL_BIND
) &&
635 reloc_offset
+ num_pages
* PAGE_SIZE
<= dev_priv
->gtt
.mappable_end
);
637 /* Cannot access stolen address directly, try to use the aperture */
641 if (!(vma
&& vma
->bound
& GLOBAL_BIND
))
644 reloc_offset
= i915_gem_obj_ggtt_offset(src
);
645 if (reloc_offset
+ num_pages
* PAGE_SIZE
> dev_priv
->gtt
.mappable_end
)
649 /* Cannot access snooped pages through the aperture */
650 if (use_ggtt
&& src
->cache_level
!= I915_CACHE_NONE
&& !HAS_LLC(dev_priv
->dev
))
653 dst
->page_count
= num_pages
;
654 while (num_pages
--) {
658 d
= kmalloc(PAGE_SIZE
, GFP_ATOMIC
);
662 local_irq_save(flags
);
666 /* Simply ignore tiling or any overlapping fence.
667 * It's part of the error state, and this hopefully
668 * captures what the GPU read.
671 s
= io_mapping_map_atomic_wc(dev_priv
->gtt
.mappable
,
673 memcpy_fromio(d
, s
, PAGE_SIZE
);
674 io_mapping_unmap_atomic(s
);
679 page
= i915_gem_object_get_page(src
, i
);
681 drm_clflush_pages(&page
, 1);
683 s
= kmap_atomic(page
);
684 memcpy(d
, s
, PAGE_SIZE
);
687 drm_clflush_pages(&page
, 1);
689 local_irq_restore(flags
);
692 reloc_offset
+= PAGE_SIZE
;
699 kfree(dst
->pages
[i
]);
703 #define i915_error_ggtt_object_create(dev_priv, src) \
704 i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
706 static void capture_bo(struct drm_i915_error_buffer
*err
,
707 struct i915_vma
*vma
)
709 struct drm_i915_gem_object
*obj
= vma
->obj
;
712 err
->size
= obj
->base
.size
;
713 err
->name
= obj
->base
.name
;
714 for (i
= 0; i
< I915_NUM_RINGS
; i
++)
715 err
->rseqno
[i
] = i915_gem_request_get_seqno(obj
->last_read_req
[i
]);
716 err
->wseqno
= i915_gem_request_get_seqno(obj
->last_write_req
);
717 err
->gtt_offset
= vma
->node
.start
;
718 err
->read_domains
= obj
->base
.read_domains
;
719 err
->write_domain
= obj
->base
.write_domain
;
720 err
->fence_reg
= obj
->fence_reg
;
722 if (i915_gem_obj_is_pinned(obj
))
724 err
->tiling
= obj
->tiling_mode
;
725 err
->dirty
= obj
->dirty
;
726 err
->purgeable
= obj
->madv
!= I915_MADV_WILLNEED
;
727 err
->userptr
= obj
->userptr
.mm
!= NULL
;
728 err
->ring
= obj
->last_write_req
?
729 i915_gem_request_get_ring(obj
->last_write_req
)->id
: -1;
730 err
->cache_level
= obj
->cache_level
;
733 static u32
capture_active_bo(struct drm_i915_error_buffer
*err
,
734 int count
, struct list_head
*head
)
736 struct i915_vma
*vma
;
739 list_for_each_entry(vma
, head
, vm_link
) {
740 capture_bo(err
++, vma
);
748 static u32
capture_pinned_bo(struct drm_i915_error_buffer
*err
,
749 int count
, struct list_head
*head
,
750 struct i915_address_space
*vm
)
752 struct drm_i915_gem_object
*obj
;
753 struct drm_i915_error_buffer
* const first
= err
;
754 struct drm_i915_error_buffer
* const last
= err
+ count
;
756 list_for_each_entry(obj
, head
, global_list
) {
757 struct i915_vma
*vma
;
762 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
)
763 if (vma
->vm
== vm
&& vma
->pin_count
> 0)
764 capture_bo(err
++, vma
);
770 /* Generate a semi-unique error code. The code is not meant to have meaning, The
771 * code's only purpose is to try to prevent false duplicated bug reports by
772 * grossly estimating a GPU error state.
774 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
775 * the hang if we could strip the GTT offset information from it.
777 * It's only a small step better than a random number in its current form.
779 static uint32_t i915_error_generate_code(struct drm_i915_private
*dev_priv
,
780 struct drm_i915_error_state
*error
,
783 uint32_t error_code
= 0;
786 /* IPEHR would be an ideal way to detect errors, as it's the gross
787 * measure of "the command that hung." However, has some very common
788 * synchronization commands which almost always appear in the case
789 * strictly a client bug. Use instdone to differentiate those some.
791 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
792 if (error
->ring
[i
].hangcheck_action
== HANGCHECK_HUNG
) {
796 return error
->ring
[i
].ipehr
^ error
->ring
[i
].instdone
;
803 static void i915_gem_record_fences(struct drm_device
*dev
,
804 struct drm_i915_error_state
*error
)
806 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
809 if (IS_GEN3(dev
) || IS_GEN2(dev
)) {
810 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
811 error
->fence
[i
] = I915_READ(FENCE_REG(i
));
812 } else if (IS_GEN5(dev
) || IS_GEN4(dev
)) {
813 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
814 error
->fence
[i
] = I915_READ64(FENCE_REG_965_LO(i
));
815 } else if (INTEL_INFO(dev
)->gen
>= 6) {
816 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
817 error
->fence
[i
] = I915_READ64(FENCE_REG_GEN6_LO(i
));
822 static void gen8_record_semaphore_state(struct drm_i915_private
*dev_priv
,
823 struct drm_i915_error_state
*error
,
824 struct intel_engine_cs
*ring
,
825 struct drm_i915_error_ring
*ering
)
827 struct intel_engine_cs
*to
;
830 if (!i915_semaphore_is_enabled(dev_priv
->dev
))
833 if (!error
->semaphore_obj
)
834 error
->semaphore_obj
=
835 i915_error_ggtt_object_create(dev_priv
,
836 dev_priv
->semaphore_obj
);
838 for_each_ring(to
, dev_priv
, i
) {
846 signal_offset
= (GEN8_SIGNAL_OFFSET(ring
, i
) & (PAGE_SIZE
- 1))
848 tmp
= error
->semaphore_obj
->pages
[0];
849 idx
= intel_ring_sync_index(ring
, to
);
851 ering
->semaphore_mboxes
[idx
] = tmp
[signal_offset
];
852 ering
->semaphore_seqno
[idx
] = ring
->semaphore
.sync_seqno
[idx
];
856 static void gen6_record_semaphore_state(struct drm_i915_private
*dev_priv
,
857 struct intel_engine_cs
*ring
,
858 struct drm_i915_error_ring
*ering
)
860 ering
->semaphore_mboxes
[0] = I915_READ(RING_SYNC_0(ring
->mmio_base
));
861 ering
->semaphore_mboxes
[1] = I915_READ(RING_SYNC_1(ring
->mmio_base
));
862 ering
->semaphore_seqno
[0] = ring
->semaphore
.sync_seqno
[0];
863 ering
->semaphore_seqno
[1] = ring
->semaphore
.sync_seqno
[1];
865 if (HAS_VEBOX(dev_priv
->dev
)) {
866 ering
->semaphore_mboxes
[2] =
867 I915_READ(RING_SYNC_2(ring
->mmio_base
));
868 ering
->semaphore_seqno
[2] = ring
->semaphore
.sync_seqno
[2];
872 static void i915_record_ring_state(struct drm_device
*dev
,
873 struct drm_i915_error_state
*error
,
874 struct intel_engine_cs
*ring
,
875 struct drm_i915_error_ring
*ering
)
877 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
879 if (INTEL_INFO(dev
)->gen
>= 6) {
880 ering
->rc_psmi
= I915_READ(RING_PSMI_CTL(ring
->mmio_base
));
881 ering
->fault_reg
= I915_READ(RING_FAULT_REG(ring
));
882 if (INTEL_INFO(dev
)->gen
>= 8)
883 gen8_record_semaphore_state(dev_priv
, error
, ring
, ering
);
885 gen6_record_semaphore_state(dev_priv
, ring
, ering
);
888 if (INTEL_INFO(dev
)->gen
>= 4) {
889 ering
->faddr
= I915_READ(RING_DMA_FADD(ring
->mmio_base
));
890 ering
->ipeir
= I915_READ(RING_IPEIR(ring
->mmio_base
));
891 ering
->ipehr
= I915_READ(RING_IPEHR(ring
->mmio_base
));
892 ering
->instdone
= I915_READ(RING_INSTDONE(ring
->mmio_base
));
893 ering
->instps
= I915_READ(RING_INSTPS(ring
->mmio_base
));
894 ering
->bbaddr
= I915_READ(RING_BBADDR(ring
->mmio_base
));
895 if (INTEL_INFO(dev
)->gen
>= 8) {
896 ering
->faddr
|= (u64
) I915_READ(RING_DMA_FADD_UDW(ring
->mmio_base
)) << 32;
897 ering
->bbaddr
|= (u64
) I915_READ(RING_BBADDR_UDW(ring
->mmio_base
)) << 32;
899 ering
->bbstate
= I915_READ(RING_BBSTATE(ring
->mmio_base
));
901 ering
->faddr
= I915_READ(DMA_FADD_I8XX
);
902 ering
->ipeir
= I915_READ(IPEIR
);
903 ering
->ipehr
= I915_READ(IPEHR
);
904 ering
->instdone
= I915_READ(GEN2_INSTDONE
);
907 ering
->waiting
= waitqueue_active(&ring
->irq_queue
);
908 ering
->instpm
= I915_READ(RING_INSTPM(ring
->mmio_base
));
909 ering
->seqno
= ring
->get_seqno(ring
, false);
910 ering
->acthd
= intel_ring_get_active_head(ring
);
911 ering
->start
= I915_READ_START(ring
);
912 ering
->head
= I915_READ_HEAD(ring
);
913 ering
->tail
= I915_READ_TAIL(ring
);
914 ering
->ctl
= I915_READ_CTL(ring
);
916 if (I915_NEED_GFX_HWS(dev
)) {
923 mmio
= RENDER_HWS_PGA_GEN7
;
926 mmio
= BLT_HWS_PGA_GEN7
;
929 mmio
= BSD_HWS_PGA_GEN7
;
932 mmio
= VEBOX_HWS_PGA_GEN7
;
935 } else if (IS_GEN6(ring
->dev
)) {
936 mmio
= RING_HWS_PGA_GEN6(ring
->mmio_base
);
938 /* XXX: gen8 returns to sanity */
939 mmio
= RING_HWS_PGA(ring
->mmio_base
);
942 ering
->hws
= I915_READ(mmio
);
945 ering
->hangcheck_score
= ring
->hangcheck
.score
;
946 ering
->hangcheck_action
= ring
->hangcheck
.action
;
948 if (USES_PPGTT(dev
)) {
951 ering
->vm_info
.gfx_mode
= I915_READ(RING_MODE_GEN7(ring
));
954 ering
->vm_info
.pp_dir_base
=
955 I915_READ(RING_PP_DIR_BASE_READ(ring
));
956 else if (IS_GEN7(dev
))
957 ering
->vm_info
.pp_dir_base
=
958 I915_READ(RING_PP_DIR_BASE(ring
));
959 else if (INTEL_INFO(dev
)->gen
>= 8)
960 for (i
= 0; i
< 4; i
++) {
961 ering
->vm_info
.pdp
[i
] =
962 I915_READ(GEN8_RING_PDP_UDW(ring
, i
));
963 ering
->vm_info
.pdp
[i
] <<= 32;
964 ering
->vm_info
.pdp
[i
] |=
965 I915_READ(GEN8_RING_PDP_LDW(ring
, i
));
971 static void i915_gem_record_active_context(struct intel_engine_cs
*ring
,
972 struct drm_i915_error_state
*error
,
973 struct drm_i915_error_ring
*ering
)
975 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
976 struct drm_i915_gem_object
*obj
;
978 /* Currently render ring is the only HW context user */
979 if (ring
->id
!= RCS
|| !error
->ccid
)
982 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
983 if (!i915_gem_obj_ggtt_bound(obj
))
986 if ((error
->ccid
& PAGE_MASK
) == i915_gem_obj_ggtt_offset(obj
)) {
987 ering
->ctx
= i915_error_ggtt_object_create(dev_priv
, obj
);
993 static void i915_gem_record_rings(struct drm_device
*dev
,
994 struct drm_i915_error_state
*error
)
996 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
997 struct drm_i915_gem_request
*request
;
1000 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
1001 struct intel_engine_cs
*ring
= &dev_priv
->ring
[i
];
1002 struct intel_ringbuffer
*rbuf
;
1004 error
->ring
[i
].pid
= -1;
1006 if (ring
->dev
== NULL
)
1009 error
->ring
[i
].valid
= true;
1011 i915_record_ring_state(dev
, error
, ring
, &error
->ring
[i
]);
1013 request
= i915_gem_find_active_request(ring
);
1015 struct i915_address_space
*vm
;
1017 vm
= request
->ctx
&& request
->ctx
->ppgtt
?
1018 &request
->ctx
->ppgtt
->base
:
1019 &dev_priv
->gtt
.base
;
1021 /* We need to copy these to an anonymous buffer
1022 * as the simplest method to avoid being overwritten
1025 error
->ring
[i
].batchbuffer
=
1026 i915_error_object_create(dev_priv
,
1030 if (HAS_BROKEN_CS_TLB(dev_priv
->dev
))
1031 error
->ring
[i
].wa_batchbuffer
=
1032 i915_error_ggtt_object_create(dev_priv
,
1036 struct task_struct
*task
;
1039 task
= pid_task(request
->pid
, PIDTYPE_PID
);
1041 strcpy(error
->ring
[i
].comm
, task
->comm
);
1042 error
->ring
[i
].pid
= task
->pid
;
1048 if (i915
.enable_execlists
) {
1049 /* TODO: This is only a small fix to keep basic error
1050 * capture working, but we need to add more information
1051 * for it to be useful (e.g. dump the context being
1055 rbuf
= request
->ctx
->engine
[ring
->id
].ringbuf
;
1057 rbuf
= dev_priv
->kernel_context
->engine
[ring
->id
].ringbuf
;
1059 rbuf
= ring
->buffer
;
1061 error
->ring
[i
].cpu_ring_head
= rbuf
->head
;
1062 error
->ring
[i
].cpu_ring_tail
= rbuf
->tail
;
1064 error
->ring
[i
].ringbuffer
=
1065 i915_error_ggtt_object_create(dev_priv
, rbuf
->obj
);
1067 error
->ring
[i
].hws_page
=
1068 i915_error_ggtt_object_create(dev_priv
, ring
->status_page
.obj
);
1070 i915_gem_record_active_context(ring
, error
, &error
->ring
[i
]);
1073 list_for_each_entry(request
, &ring
->request_list
, list
)
1076 error
->ring
[i
].num_requests
= count
;
1077 error
->ring
[i
].requests
=
1078 kcalloc(count
, sizeof(*error
->ring
[i
].requests
),
1080 if (error
->ring
[i
].requests
== NULL
) {
1081 error
->ring
[i
].num_requests
= 0;
1086 list_for_each_entry(request
, &ring
->request_list
, list
) {
1087 struct drm_i915_error_request
*erq
;
1089 if (count
>= error
->ring
[i
].num_requests
) {
1091 * If the ring request list was changed in
1092 * between the point where the error request
1093 * list was created and dimensioned and this
1094 * point then just exit early to avoid crashes.
1096 * We don't need to communicate that the
1097 * request list changed state during error
1098 * state capture and that the error state is
1099 * slightly incorrect as a consequence since we
1100 * are typically only interested in the request
1101 * list state at the point of error state
1102 * capture, not in any changes happening during
1108 erq
= &error
->ring
[i
].requests
[count
++];
1109 erq
->seqno
= request
->seqno
;
1110 erq
->jiffies
= request
->emitted_jiffies
;
1111 erq
->tail
= request
->postfix
;
1116 /* FIXME: Since pin count/bound list is global, we duplicate what we capture per
1119 static void i915_gem_capture_vm(struct drm_i915_private
*dev_priv
,
1120 struct drm_i915_error_state
*error
,
1121 struct i915_address_space
*vm
,
1124 struct drm_i915_error_buffer
*active_bo
= NULL
, *pinned_bo
= NULL
;
1125 struct drm_i915_gem_object
*obj
;
1126 struct i915_vma
*vma
;
1130 list_for_each_entry(vma
, &vm
->active_list
, vm_link
)
1132 error
->active_bo_count
[ndx
] = i
;
1134 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
1135 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
)
1136 if (vma
->vm
== vm
&& vma
->pin_count
> 0)
1139 error
->pinned_bo_count
[ndx
] = i
- error
->active_bo_count
[ndx
];
1142 active_bo
= kcalloc(i
, sizeof(*active_bo
), GFP_ATOMIC
);
1144 pinned_bo
= active_bo
+ error
->active_bo_count
[ndx
];
1148 error
->active_bo_count
[ndx
] =
1149 capture_active_bo(active_bo
,
1150 error
->active_bo_count
[ndx
],
1154 error
->pinned_bo_count
[ndx
] =
1155 capture_pinned_bo(pinned_bo
,
1156 error
->pinned_bo_count
[ndx
],
1157 &dev_priv
->mm
.bound_list
, vm
);
1158 error
->active_bo
[ndx
] = active_bo
;
1159 error
->pinned_bo
[ndx
] = pinned_bo
;
1162 static void i915_gem_capture_buffers(struct drm_i915_private
*dev_priv
,
1163 struct drm_i915_error_state
*error
)
1165 struct i915_address_space
*vm
;
1168 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
)
1171 error
->active_bo
= kcalloc(cnt
, sizeof(*error
->active_bo
), GFP_ATOMIC
);
1172 error
->pinned_bo
= kcalloc(cnt
, sizeof(*error
->pinned_bo
), GFP_ATOMIC
);
1173 error
->active_bo_count
= kcalloc(cnt
, sizeof(*error
->active_bo_count
),
1175 error
->pinned_bo_count
= kcalloc(cnt
, sizeof(*error
->pinned_bo_count
),
1178 if (error
->active_bo
== NULL
||
1179 error
->pinned_bo
== NULL
||
1180 error
->active_bo_count
== NULL
||
1181 error
->pinned_bo_count
== NULL
) {
1182 kfree(error
->active_bo
);
1183 kfree(error
->active_bo_count
);
1184 kfree(error
->pinned_bo
);
1185 kfree(error
->pinned_bo_count
);
1187 error
->active_bo
= NULL
;
1188 error
->active_bo_count
= NULL
;
1189 error
->pinned_bo
= NULL
;
1190 error
->pinned_bo_count
= NULL
;
1192 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
)
1193 i915_gem_capture_vm(dev_priv
, error
, vm
, i
++);
1195 error
->vm_count
= cnt
;
1199 /* Capture all registers which don't fit into another category. */
1200 static void i915_capture_reg_state(struct drm_i915_private
*dev_priv
,
1201 struct drm_i915_error_state
*error
)
1203 struct drm_device
*dev
= dev_priv
->dev
;
1206 /* General organization
1207 * 1. Registers specific to a single generation
1208 * 2. Registers which belong to multiple generations
1209 * 3. Feature specific registers.
1210 * 4. Everything else
1211 * Please try to follow the order.
1214 /* 1: Registers specific to a single generation */
1215 if (IS_VALLEYVIEW(dev
)) {
1216 error
->gtier
[0] = I915_READ(GTIER
);
1217 error
->ier
= I915_READ(VLV_IER
);
1218 error
->forcewake
= I915_READ_FW(FORCEWAKE_VLV
);
1222 error
->err_int
= I915_READ(GEN7_ERR_INT
);
1224 if (INTEL_INFO(dev
)->gen
>= 8) {
1225 error
->fault_data0
= I915_READ(GEN8_FAULT_TLB_DATA0
);
1226 error
->fault_data1
= I915_READ(GEN8_FAULT_TLB_DATA1
);
1230 error
->forcewake
= I915_READ_FW(FORCEWAKE
);
1231 error
->gab_ctl
= I915_READ(GAB_CTL
);
1232 error
->gfx_mode
= I915_READ(GFX_MODE
);
1235 /* 2: Registers which belong to multiple generations */
1236 if (INTEL_INFO(dev
)->gen
>= 7)
1237 error
->forcewake
= I915_READ_FW(FORCEWAKE_MT
);
1239 if (INTEL_INFO(dev
)->gen
>= 6) {
1240 error
->derrmr
= I915_READ(DERRMR
);
1241 error
->error
= I915_READ(ERROR_GEN6
);
1242 error
->done_reg
= I915_READ(DONE_REG
);
1245 /* 3: Feature specific registers */
1246 if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
1247 error
->gam_ecochk
= I915_READ(GAM_ECOCHK
);
1248 error
->gac_eco
= I915_READ(GAC_ECO_BITS
);
1251 /* 4: Everything else */
1252 if (HAS_HW_CONTEXTS(dev
))
1253 error
->ccid
= I915_READ(CCID
);
1255 if (INTEL_INFO(dev
)->gen
>= 8) {
1256 error
->ier
= I915_READ(GEN8_DE_MISC_IER
);
1257 for (i
= 0; i
< 4; i
++)
1258 error
->gtier
[i
] = I915_READ(GEN8_GT_IER(i
));
1259 } else if (HAS_PCH_SPLIT(dev
)) {
1260 error
->ier
= I915_READ(DEIER
);
1261 error
->gtier
[0] = I915_READ(GTIER
);
1262 } else if (IS_GEN2(dev
)) {
1263 error
->ier
= I915_READ16(IER
);
1264 } else if (!IS_VALLEYVIEW(dev
)) {
1265 error
->ier
= I915_READ(IER
);
1267 error
->eir
= I915_READ(EIR
);
1268 error
->pgtbl_er
= I915_READ(PGTBL_ER
);
1270 i915_get_extra_instdone(dev
, error
->extra_instdone
);
1273 static void i915_error_capture_msg(struct drm_device
*dev
,
1274 struct drm_i915_error_state
*error
,
1276 const char *error_msg
)
1278 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1280 int ring_id
= -1, len
;
1282 ecode
= i915_error_generate_code(dev_priv
, error
, &ring_id
);
1284 len
= scnprintf(error
->error_msg
, sizeof(error
->error_msg
),
1285 "GPU HANG: ecode %d:%d:0x%08x",
1286 INTEL_INFO(dev
)->gen
, ring_id
, ecode
);
1288 if (ring_id
!= -1 && error
->ring
[ring_id
].pid
!= -1)
1289 len
+= scnprintf(error
->error_msg
+ len
,
1290 sizeof(error
->error_msg
) - len
,
1292 error
->ring
[ring_id
].comm
,
1293 error
->ring
[ring_id
].pid
);
1295 scnprintf(error
->error_msg
+ len
, sizeof(error
->error_msg
) - len
,
1296 ", reason: %s, action: %s",
1298 wedged
? "reset" : "continue");
1301 static void i915_capture_gen_state(struct drm_i915_private
*dev_priv
,
1302 struct drm_i915_error_state
*error
)
1305 #ifdef CONFIG_INTEL_IOMMU
1306 error
->iommu
= intel_iommu_gfx_mapped
;
1308 error
->reset_count
= i915_reset_count(&dev_priv
->gpu_error
);
1309 error
->suspend_count
= dev_priv
->suspend_count
;
1313 * i915_capture_error_state - capture an error record for later analysis
1316 * Should be called when an error is detected (either a hang or an error
1317 * interrupt) to capture error state from the time of the error. Fills
1318 * out a structure which becomes available in debugfs for user level tools
1321 void i915_capture_error_state(struct drm_device
*dev
, bool wedged
,
1322 const char *error_msg
)
1325 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1326 struct drm_i915_error_state
*error
;
1327 unsigned long flags
;
1329 /* Account for pipe specific data like PIPE*STAT */
1330 error
= kzalloc(sizeof(*error
), GFP_ATOMIC
);
1332 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1336 kref_init(&error
->ref
);
1338 i915_capture_gen_state(dev_priv
, error
);
1339 i915_capture_reg_state(dev_priv
, error
);
1340 i915_gem_capture_buffers(dev_priv
, error
);
1341 i915_gem_record_fences(dev
, error
);
1342 i915_gem_record_rings(dev
, error
);
1344 do_gettimeofday(&error
->time
);
1346 error
->overlay
= intel_overlay_capture_error_state(dev
);
1347 error
->display
= intel_display_capture_error_state(dev
);
1349 i915_error_capture_msg(dev
, error
, wedged
, error_msg
);
1350 DRM_INFO("%s\n", error
->error_msg
);
1352 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1353 if (dev_priv
->gpu_error
.first_error
== NULL
) {
1354 dev_priv
->gpu_error
.first_error
= error
;
1357 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1360 i915_error_state_free(&error
->ref
);
1365 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1366 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1367 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1368 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1369 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev
->primary
->index
);
1374 void i915_error_state_get(struct drm_device
*dev
,
1375 struct i915_error_state_file_priv
*error_priv
)
1377 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1379 spin_lock_irq(&dev_priv
->gpu_error
.lock
);
1380 error_priv
->error
= dev_priv
->gpu_error
.first_error
;
1381 if (error_priv
->error
)
1382 kref_get(&error_priv
->error
->ref
);
1383 spin_unlock_irq(&dev_priv
->gpu_error
.lock
);
1387 void i915_error_state_put(struct i915_error_state_file_priv
*error_priv
)
1389 if (error_priv
->error
)
1390 kref_put(&error_priv
->error
->ref
, i915_error_state_free
);
1393 void i915_destroy_error_state(struct drm_device
*dev
)
1395 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1396 struct drm_i915_error_state
*error
;
1398 spin_lock_irq(&dev_priv
->gpu_error
.lock
);
1399 error
= dev_priv
->gpu_error
.first_error
;
1400 dev_priv
->gpu_error
.first_error
= NULL
;
1401 spin_unlock_irq(&dev_priv
->gpu_error
.lock
);
1404 kref_put(&error
->ref
, i915_error_state_free
);
1407 const char *i915_cache_level_str(struct drm_i915_private
*i915
, int type
)
1410 case I915_CACHE_NONE
: return " uncached";
1411 case I915_CACHE_LLC
: return HAS_LLC(i915
) ? " LLC" : " snooped";
1412 case I915_CACHE_L3_LLC
: return " L3+LLC";
1413 case I915_CACHE_WT
: return " WT";
1418 /* NB: please notice the memset */
1419 void i915_get_extra_instdone(struct drm_device
*dev
, uint32_t *instdone
)
1421 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1422 memset(instdone
, 0, sizeof(*instdone
) * I915_NUM_INSTDONE_REG
);
1424 if (IS_GEN2(dev
) || IS_GEN3(dev
))
1425 instdone
[0] = I915_READ(GEN2_INSTDONE
);
1426 else if (IS_GEN4(dev
) || IS_GEN5(dev
) || IS_GEN6(dev
)) {
1427 instdone
[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE
));
1428 instdone
[1] = I915_READ(GEN4_INSTDONE1
);
1429 } else if (INTEL_INFO(dev
)->gen
>= 7) {
1430 instdone
[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE
));
1431 instdone
[1] = I915_READ(GEN7_SC_INSTDONE
);
1432 instdone
[2] = I915_READ(GEN7_SAMPLER_INSTDONE
);
1433 instdone
[3] = I915_READ(GEN7_ROW_INSTDONE
);