2 * Copyright (c) 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
30 #include <generated/utsrelease.h>
33 static const char *yesno(int v
)
35 return v
? "yes" : "no";
38 static const char *ring_str(int ring
)
41 case RCS
: return "render";
42 case VCS
: return "bsd";
43 case BCS
: return "blt";
44 case VECS
: return "vebox";
45 case VCS2
: return "bsd2";
50 static const char *pin_flag(int pinned
)
60 static const char *tiling_flag(int tiling
)
64 case I915_TILING_NONE
: return "";
65 case I915_TILING_X
: return " X";
66 case I915_TILING_Y
: return " Y";
70 static const char *dirty_flag(int dirty
)
72 return dirty
? " dirty" : "";
75 static const char *purgeable_flag(int purgeable
)
77 return purgeable
? " purgeable" : "";
80 static bool __i915_error_ok(struct drm_i915_error_state_buf
*e
)
83 if (!e
->err
&& WARN(e
->bytes
> (e
->size
- 1), "overflow")) {
88 if (e
->bytes
== e
->size
- 1 || e
->err
)
94 static bool __i915_error_seek(struct drm_i915_error_state_buf
*e
,
97 if (e
->pos
+ len
<= e
->start
) {
102 /* First vsnprintf needs to fit in its entirety for memmove */
103 if (len
>= e
->size
) {
111 static void __i915_error_advance(struct drm_i915_error_state_buf
*e
,
114 /* If this is first printf in this window, adjust it so that
115 * start position matches start of the buffer
118 if (e
->pos
< e
->start
) {
119 const size_t off
= e
->start
- e
->pos
;
121 /* Should not happen but be paranoid */
122 if (off
> len
|| e
->bytes
) {
127 memmove(e
->buf
, e
->buf
+ off
, len
- off
);
128 e
->bytes
= len
- off
;
137 static void i915_error_vprintf(struct drm_i915_error_state_buf
*e
,
138 const char *f
, va_list args
)
142 if (!__i915_error_ok(e
))
145 /* Seek the first printf which is hits start position */
146 if (e
->pos
< e
->start
) {
150 len
= vsnprintf(NULL
, 0, f
, tmp
);
153 if (!__i915_error_seek(e
, len
))
157 len
= vsnprintf(e
->buf
+ e
->bytes
, e
->size
- e
->bytes
, f
, args
);
158 if (len
>= e
->size
- e
->bytes
)
159 len
= e
->size
- e
->bytes
- 1;
161 __i915_error_advance(e
, len
);
164 static void i915_error_puts(struct drm_i915_error_state_buf
*e
,
169 if (!__i915_error_ok(e
))
174 /* Seek the first printf which is hits start position */
175 if (e
->pos
< e
->start
) {
176 if (!__i915_error_seek(e
, len
))
180 if (len
>= e
->size
- e
->bytes
)
181 len
= e
->size
- e
->bytes
- 1;
182 memcpy(e
->buf
+ e
->bytes
, str
, len
);
184 __i915_error_advance(e
, len
);
187 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
188 #define err_puts(e, s) i915_error_puts(e, s)
190 static void print_error_buffers(struct drm_i915_error_state_buf
*m
,
192 struct drm_i915_error_buffer
*err
,
197 err_printf(m
, " %s [%d]:\n", name
, count
);
200 err_printf(m
, " %08x %8u %02x %02x [ ",
205 for (i
= 0; i
< I915_NUM_RINGS
; i
++)
206 err_printf(m
, "%02x ", err
->rseqno
[i
]);
208 err_printf(m
, "] %02x", err
->wseqno
);
209 err_puts(m
, pin_flag(err
->pinned
));
210 err_puts(m
, tiling_flag(err
->tiling
));
211 err_puts(m
, dirty_flag(err
->dirty
));
212 err_puts(m
, purgeable_flag(err
->purgeable
));
213 err_puts(m
, err
->userptr
? " userptr" : "");
214 err_puts(m
, err
->ring
!= -1 ? " " : "");
215 err_puts(m
, ring_str(err
->ring
));
216 err_puts(m
, i915_cache_level_str(m
->i915
, err
->cache_level
));
219 err_printf(m
, " (name: %d)", err
->name
);
220 if (err
->fence_reg
!= I915_FENCE_REG_NONE
)
221 err_printf(m
, " (fence: %d)", err
->fence_reg
);
228 static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a
)
235 case HANGCHECK_ACTIVE
:
237 case HANGCHECK_ACTIVE_LOOP
:
238 return "active (loop)";
248 static void i915_ring_error_state(struct drm_i915_error_state_buf
*m
,
249 struct drm_device
*dev
,
250 struct drm_i915_error_state
*error
,
253 struct drm_i915_error_ring
*ring
= &error
->ring
[ring_idx
];
258 err_printf(m
, "%s command stream:\n", ring_str(ring_idx
));
259 err_printf(m
, " START: 0x%08x\n", ring
->start
);
260 err_printf(m
, " HEAD: 0x%08x\n", ring
->head
);
261 err_printf(m
, " TAIL: 0x%08x\n", ring
->tail
);
262 err_printf(m
, " CTL: 0x%08x\n", ring
->ctl
);
263 err_printf(m
, " HWS: 0x%08x\n", ring
->hws
);
264 err_printf(m
, " ACTHD: 0x%08x %08x\n", (u32
)(ring
->acthd
>>32), (u32
)ring
->acthd
);
265 err_printf(m
, " IPEIR: 0x%08x\n", ring
->ipeir
);
266 err_printf(m
, " IPEHR: 0x%08x\n", ring
->ipehr
);
267 err_printf(m
, " INSTDONE: 0x%08x\n", ring
->instdone
);
268 if (INTEL_INFO(dev
)->gen
>= 4) {
269 err_printf(m
, " BBADDR: 0x%08x %08x\n", (u32
)(ring
->bbaddr
>>32), (u32
)ring
->bbaddr
);
270 err_printf(m
, " BB_STATE: 0x%08x\n", ring
->bbstate
);
271 err_printf(m
, " INSTPS: 0x%08x\n", ring
->instps
);
273 err_printf(m
, " INSTPM: 0x%08x\n", ring
->instpm
);
274 err_printf(m
, " FADDR: 0x%08x %08x\n", upper_32_bits(ring
->faddr
),
275 lower_32_bits(ring
->faddr
));
276 if (INTEL_INFO(dev
)->gen
>= 6) {
277 err_printf(m
, " RC PSMI: 0x%08x\n", ring
->rc_psmi
);
278 err_printf(m
, " FAULT_REG: 0x%08x\n", ring
->fault_reg
);
279 err_printf(m
, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
280 ring
->semaphore_mboxes
[0],
281 ring
->semaphore_seqno
[0]);
282 err_printf(m
, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
283 ring
->semaphore_mboxes
[1],
284 ring
->semaphore_seqno
[1]);
285 if (HAS_VEBOX(dev
)) {
286 err_printf(m
, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
287 ring
->semaphore_mboxes
[2],
288 ring
->semaphore_seqno
[2]);
291 if (USES_PPGTT(dev
)) {
292 err_printf(m
, " GFX_MODE: 0x%08x\n", ring
->vm_info
.gfx_mode
);
294 if (INTEL_INFO(dev
)->gen
>= 8) {
296 for (i
= 0; i
< 4; i
++)
297 err_printf(m
, " PDP%d: 0x%016llx\n",
298 i
, ring
->vm_info
.pdp
[i
]);
300 err_printf(m
, " PP_DIR_BASE: 0x%08x\n",
301 ring
->vm_info
.pp_dir_base
);
304 err_printf(m
, " seqno: 0x%08x\n", ring
->seqno
);
305 err_printf(m
, " waiting: %s\n", yesno(ring
->waiting
));
306 err_printf(m
, " ring->head: 0x%08x\n", ring
->cpu_ring_head
);
307 err_printf(m
, " ring->tail: 0x%08x\n", ring
->cpu_ring_tail
);
308 err_printf(m
, " hangcheck: %s [%d]\n",
309 hangcheck_action_to_str(ring
->hangcheck_action
),
310 ring
->hangcheck_score
);
313 void i915_error_printf(struct drm_i915_error_state_buf
*e
, const char *f
, ...)
318 i915_error_vprintf(e
, f
, args
);
322 static void print_error_obj(struct drm_i915_error_state_buf
*m
,
323 struct drm_i915_error_object
*obj
)
325 int page
, offset
, elt
;
327 for (page
= offset
= 0; page
< obj
->page_count
; page
++) {
328 for (elt
= 0; elt
< PAGE_SIZE
/4; elt
++) {
329 err_printf(m
, "%08x : %08x\n", offset
,
330 obj
->pages
[page
][elt
]);
336 int i915_error_state_to_str(struct drm_i915_error_state_buf
*m
,
337 const struct i915_error_state_file_priv
*error_priv
)
339 struct drm_device
*dev
= error_priv
->dev
;
340 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
341 struct drm_i915_error_state
*error
= error_priv
->error
;
342 struct drm_i915_error_object
*obj
;
343 int i
, j
, offset
, elt
;
344 int max_hangcheck_score
;
347 err_printf(m
, "no error state collected\n");
351 err_printf(m
, "%s\n", error
->error_msg
);
352 err_printf(m
, "Time: %ld s %ld us\n", error
->time
.tv_sec
,
353 error
->time
.tv_usec
);
354 err_printf(m
, "Kernel: " UTS_RELEASE
"\n");
355 max_hangcheck_score
= 0;
356 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
357 if (error
->ring
[i
].hangcheck_score
> max_hangcheck_score
)
358 max_hangcheck_score
= error
->ring
[i
].hangcheck_score
;
360 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
361 if (error
->ring
[i
].hangcheck_score
== max_hangcheck_score
&&
362 error
->ring
[i
].pid
!= -1) {
363 err_printf(m
, "Active process (on ring %s): %s [%d]\n",
369 err_printf(m
, "Reset count: %u\n", error
->reset_count
);
370 err_printf(m
, "Suspend count: %u\n", error
->suspend_count
);
371 err_printf(m
, "PCI ID: 0x%04x\n", dev
->pdev
->device
);
372 err_printf(m
, "EIR: 0x%08x\n", error
->eir
);
373 err_printf(m
, "IER: 0x%08x\n", error
->ier
);
374 if (INTEL_INFO(dev
)->gen
>= 8) {
375 for (i
= 0; i
< 4; i
++)
376 err_printf(m
, "GTIER gt %d: 0x%08x\n", i
,
378 } else if (HAS_PCH_SPLIT(dev
) || IS_VALLEYVIEW(dev
))
379 err_printf(m
, "GTIER: 0x%08x\n", error
->gtier
[0]);
380 err_printf(m
, "PGTBL_ER: 0x%08x\n", error
->pgtbl_er
);
381 err_printf(m
, "FORCEWAKE: 0x%08x\n", error
->forcewake
);
382 err_printf(m
, "DERRMR: 0x%08x\n", error
->derrmr
);
383 err_printf(m
, "CCID: 0x%08x\n", error
->ccid
);
384 err_printf(m
, "Missed interrupts: 0x%08lx\n", dev_priv
->gpu_error
.missed_irq_rings
);
386 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
387 err_printf(m
, " fence[%d] = %08llx\n", i
, error
->fence
[i
]);
389 for (i
= 0; i
< ARRAY_SIZE(error
->extra_instdone
); i
++)
390 err_printf(m
, " INSTDONE_%d: 0x%08x\n", i
,
391 error
->extra_instdone
[i
]);
393 if (INTEL_INFO(dev
)->gen
>= 6) {
394 err_printf(m
, "ERROR: 0x%08x\n", error
->error
);
396 if (INTEL_INFO(dev
)->gen
>= 8)
397 err_printf(m
, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
398 error
->fault_data1
, error
->fault_data0
);
400 err_printf(m
, "DONE_REG: 0x%08x\n", error
->done_reg
);
403 if (INTEL_INFO(dev
)->gen
== 7)
404 err_printf(m
, "ERR_INT: 0x%08x\n", error
->err_int
);
406 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++)
407 i915_ring_error_state(m
, dev
, error
, i
);
409 for (i
= 0; i
< error
->vm_count
; i
++) {
410 err_printf(m
, "vm[%d]\n", i
);
412 print_error_buffers(m
, "Active",
414 error
->active_bo_count
[i
]);
416 print_error_buffers(m
, "Pinned",
418 error
->pinned_bo_count
[i
]);
421 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
422 obj
= error
->ring
[i
].batchbuffer
;
424 err_puts(m
, dev_priv
->ring
[i
].name
);
425 if (error
->ring
[i
].pid
!= -1)
426 err_printf(m
, " (submitted by %s [%d])",
429 err_printf(m
, " --- gtt_offset = 0x%08x\n",
431 print_error_obj(m
, obj
);
434 obj
= error
->ring
[i
].wa_batchbuffer
;
436 err_printf(m
, "%s (w/a) --- gtt_offset = 0x%08x\n",
437 dev_priv
->ring
[i
].name
, obj
->gtt_offset
);
438 print_error_obj(m
, obj
);
441 if (error
->ring
[i
].num_requests
) {
442 err_printf(m
, "%s --- %d requests\n",
443 dev_priv
->ring
[i
].name
,
444 error
->ring
[i
].num_requests
);
445 for (j
= 0; j
< error
->ring
[i
].num_requests
; j
++) {
446 err_printf(m
, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
447 error
->ring
[i
].requests
[j
].seqno
,
448 error
->ring
[i
].requests
[j
].jiffies
,
449 error
->ring
[i
].requests
[j
].tail
);
453 if ((obj
= error
->ring
[i
].ringbuffer
)) {
454 err_printf(m
, "%s --- ringbuffer = 0x%08x\n",
455 dev_priv
->ring
[i
].name
,
457 print_error_obj(m
, obj
);
460 if ((obj
= error
->ring
[i
].hws_page
)) {
461 err_printf(m
, "%s --- HW Status = 0x%08x\n",
462 dev_priv
->ring
[i
].name
,
465 for (elt
= 0; elt
< PAGE_SIZE
/16; elt
+= 4) {
466 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
469 obj
->pages
[0][elt
+1],
470 obj
->pages
[0][elt
+2],
471 obj
->pages
[0][elt
+3]);
476 if ((obj
= error
->ring
[i
].ctx
)) {
477 err_printf(m
, "%s --- HW Context = 0x%08x\n",
478 dev_priv
->ring
[i
].name
,
480 print_error_obj(m
, obj
);
484 if ((obj
= error
->semaphore_obj
)) {
485 err_printf(m
, "Semaphore page = 0x%08x\n", obj
->gtt_offset
);
486 for (elt
= 0; elt
< PAGE_SIZE
/16; elt
+= 4) {
487 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
490 obj
->pages
[0][elt
+1],
491 obj
->pages
[0][elt
+2],
492 obj
->pages
[0][elt
+3]);
497 intel_overlay_print_error_state(m
, error
->overlay
);
500 intel_display_print_error_state(m
, dev
, error
->display
);
503 if (m
->bytes
== 0 && m
->err
)
509 int i915_error_state_buf_init(struct drm_i915_error_state_buf
*ebuf
,
510 struct drm_i915_private
*i915
,
511 size_t count
, loff_t pos
)
513 memset(ebuf
, 0, sizeof(*ebuf
));
516 /* We need to have enough room to store any i915_error_state printf
517 * so that we can move it to start position.
519 ebuf
->size
= count
+ 1 > PAGE_SIZE
? count
+ 1 : PAGE_SIZE
;
520 ebuf
->buf
= kmalloc(ebuf
->size
,
521 GFP_TEMPORARY
| __GFP_NORETRY
| __GFP_NOWARN
);
523 if (ebuf
->buf
== NULL
) {
524 ebuf
->size
= PAGE_SIZE
;
525 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
528 if (ebuf
->buf
== NULL
) {
530 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
533 if (ebuf
->buf
== NULL
)
541 static void i915_error_object_free(struct drm_i915_error_object
*obj
)
548 for (page
= 0; page
< obj
->page_count
; page
++)
549 kfree(obj
->pages
[page
]);
554 static void i915_error_state_free(struct kref
*error_ref
)
556 struct drm_i915_error_state
*error
= container_of(error_ref
,
557 typeof(*error
), ref
);
560 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
561 i915_error_object_free(error
->ring
[i
].batchbuffer
);
562 i915_error_object_free(error
->ring
[i
].wa_batchbuffer
);
563 i915_error_object_free(error
->ring
[i
].ringbuffer
);
564 i915_error_object_free(error
->ring
[i
].hws_page
);
565 i915_error_object_free(error
->ring
[i
].ctx
);
566 kfree(error
->ring
[i
].requests
);
569 i915_error_object_free(error
->semaphore_obj
);
571 for (i
= 0; i
< error
->vm_count
; i
++)
572 kfree(error
->active_bo
[i
]);
574 kfree(error
->active_bo
);
575 kfree(error
->active_bo_count
);
576 kfree(error
->pinned_bo
);
577 kfree(error
->pinned_bo_count
);
578 kfree(error
->overlay
);
579 kfree(error
->display
);
583 static struct drm_i915_error_object
*
584 i915_error_object_create(struct drm_i915_private
*dev_priv
,
585 struct drm_i915_gem_object
*src
,
586 struct i915_address_space
*vm
)
588 struct drm_i915_error_object
*dst
;
589 struct i915_vma
*vma
= NULL
;
595 if (src
== NULL
|| src
->pages
== NULL
)
598 num_pages
= src
->base
.size
>> PAGE_SHIFT
;
600 dst
= kmalloc(sizeof(*dst
) + num_pages
* sizeof(u32
*), GFP_ATOMIC
);
604 if (i915_gem_obj_bound(src
, vm
))
605 dst
->gtt_offset
= i915_gem_obj_offset(src
, vm
);
607 dst
->gtt_offset
= -1;
609 reloc_offset
= dst
->gtt_offset
;
610 if (i915_is_ggtt(vm
))
611 vma
= i915_gem_obj_to_ggtt(src
);
612 use_ggtt
= (src
->cache_level
== I915_CACHE_NONE
&&
613 vma
&& (vma
->bound
& GLOBAL_BIND
) &&
614 reloc_offset
+ num_pages
* PAGE_SIZE
<= dev_priv
->gtt
.mappable_end
);
616 /* Cannot access stolen address directly, try to use the aperture */
620 if (!(vma
&& vma
->bound
& GLOBAL_BIND
))
623 reloc_offset
= i915_gem_obj_ggtt_offset(src
);
624 if (reloc_offset
+ num_pages
* PAGE_SIZE
> dev_priv
->gtt
.mappable_end
)
628 /* Cannot access snooped pages through the aperture */
629 if (use_ggtt
&& src
->cache_level
!= I915_CACHE_NONE
&& !HAS_LLC(dev_priv
->dev
))
632 dst
->page_count
= num_pages
;
633 while (num_pages
--) {
637 d
= kmalloc(PAGE_SIZE
, GFP_ATOMIC
);
641 local_irq_save(flags
);
645 /* Simply ignore tiling or any overlapping fence.
646 * It's part of the error state, and this hopefully
647 * captures what the GPU read.
650 s
= io_mapping_map_atomic_wc(dev_priv
->gtt
.mappable
,
652 memcpy_fromio(d
, s
, PAGE_SIZE
);
653 io_mapping_unmap_atomic(s
);
658 page
= i915_gem_object_get_page(src
, i
);
660 drm_clflush_pages(&page
, 1);
662 s
= kmap_atomic(page
);
663 memcpy(d
, s
, PAGE_SIZE
);
666 drm_clflush_pages(&page
, 1);
668 local_irq_restore(flags
);
671 reloc_offset
+= PAGE_SIZE
;
678 kfree(dst
->pages
[i
]);
682 #define i915_error_ggtt_object_create(dev_priv, src) \
683 i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
685 static void capture_bo(struct drm_i915_error_buffer
*err
,
686 struct i915_vma
*vma
)
688 struct drm_i915_gem_object
*obj
= vma
->obj
;
691 err
->size
= obj
->base
.size
;
692 err
->name
= obj
->base
.name
;
693 for (i
= 0; i
< I915_NUM_RINGS
; i
++)
694 err
->rseqno
[i
] = i915_gem_request_get_seqno(obj
->last_read_req
[i
]);
695 err
->wseqno
= i915_gem_request_get_seqno(obj
->last_write_req
);
696 err
->gtt_offset
= vma
->node
.start
;
697 err
->read_domains
= obj
->base
.read_domains
;
698 err
->write_domain
= obj
->base
.write_domain
;
699 err
->fence_reg
= obj
->fence_reg
;
701 if (i915_gem_obj_is_pinned(obj
))
703 err
->tiling
= obj
->tiling_mode
;
704 err
->dirty
= obj
->dirty
;
705 err
->purgeable
= obj
->madv
!= I915_MADV_WILLNEED
;
706 err
->userptr
= obj
->userptr
.mm
!= NULL
;
707 err
->ring
= obj
->last_write_req
?
708 i915_gem_request_get_ring(obj
->last_write_req
)->id
: -1;
709 err
->cache_level
= obj
->cache_level
;
712 static u32
capture_active_bo(struct drm_i915_error_buffer
*err
,
713 int count
, struct list_head
*head
)
715 struct i915_vma
*vma
;
718 list_for_each_entry(vma
, head
, mm_list
) {
719 capture_bo(err
++, vma
);
727 static u32
capture_pinned_bo(struct drm_i915_error_buffer
*err
,
728 int count
, struct list_head
*head
,
729 struct i915_address_space
*vm
)
731 struct drm_i915_gem_object
*obj
;
732 struct drm_i915_error_buffer
* const first
= err
;
733 struct drm_i915_error_buffer
* const last
= err
+ count
;
735 list_for_each_entry(obj
, head
, global_list
) {
736 struct i915_vma
*vma
;
741 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
)
742 if (vma
->vm
== vm
&& vma
->pin_count
> 0)
743 capture_bo(err
++, vma
);
749 /* Generate a semi-unique error code. The code is not meant to have meaning, The
750 * code's only purpose is to try to prevent false duplicated bug reports by
751 * grossly estimating a GPU error state.
753 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
754 * the hang if we could strip the GTT offset information from it.
756 * It's only a small step better than a random number in its current form.
758 static uint32_t i915_error_generate_code(struct drm_i915_private
*dev_priv
,
759 struct drm_i915_error_state
*error
,
762 uint32_t error_code
= 0;
765 /* IPEHR would be an ideal way to detect errors, as it's the gross
766 * measure of "the command that hung." However, has some very common
767 * synchronization commands which almost always appear in the case
768 * strictly a client bug. Use instdone to differentiate those some.
770 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
771 if (error
->ring
[i
].hangcheck_action
== HANGCHECK_HUNG
) {
775 return error
->ring
[i
].ipehr
^ error
->ring
[i
].instdone
;
782 static void i915_gem_record_fences(struct drm_device
*dev
,
783 struct drm_i915_error_state
*error
)
785 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
788 if (IS_GEN3(dev
) || IS_GEN2(dev
)) {
789 for (i
= 0; i
< 8; i
++)
790 error
->fence
[i
] = I915_READ(FENCE_REG_830_0
+ (i
* 4));
791 if (IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
792 for (i
= 0; i
< 8; i
++)
793 error
->fence
[i
+8] = I915_READ(FENCE_REG_945_8
+
795 } else if (IS_GEN5(dev
) || IS_GEN4(dev
))
796 for (i
= 0; i
< 16; i
++)
797 error
->fence
[i
] = I915_READ64(FENCE_REG_965_0
+
799 else if (INTEL_INFO(dev
)->gen
>= 6)
800 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
801 error
->fence
[i
] = I915_READ64(FENCE_REG_SANDYBRIDGE_0
+
806 static void gen8_record_semaphore_state(struct drm_i915_private
*dev_priv
,
807 struct drm_i915_error_state
*error
,
808 struct intel_engine_cs
*ring
,
809 struct drm_i915_error_ring
*ering
)
811 struct intel_engine_cs
*to
;
814 if (!i915_semaphore_is_enabled(dev_priv
->dev
))
817 if (!error
->semaphore_obj
)
818 error
->semaphore_obj
=
819 i915_error_ggtt_object_create(dev_priv
,
820 dev_priv
->semaphore_obj
);
822 for_each_ring(to
, dev_priv
, i
) {
830 signal_offset
= (GEN8_SIGNAL_OFFSET(ring
, i
) & (PAGE_SIZE
- 1))
832 tmp
= error
->semaphore_obj
->pages
[0];
833 idx
= intel_ring_sync_index(ring
, to
);
835 ering
->semaphore_mboxes
[idx
] = tmp
[signal_offset
];
836 ering
->semaphore_seqno
[idx
] = ring
->semaphore
.sync_seqno
[idx
];
840 static void gen6_record_semaphore_state(struct drm_i915_private
*dev_priv
,
841 struct intel_engine_cs
*ring
,
842 struct drm_i915_error_ring
*ering
)
844 ering
->semaphore_mboxes
[0] = I915_READ(RING_SYNC_0(ring
->mmio_base
));
845 ering
->semaphore_mboxes
[1] = I915_READ(RING_SYNC_1(ring
->mmio_base
));
846 ering
->semaphore_seqno
[0] = ring
->semaphore
.sync_seqno
[0];
847 ering
->semaphore_seqno
[1] = ring
->semaphore
.sync_seqno
[1];
849 if (HAS_VEBOX(dev_priv
->dev
)) {
850 ering
->semaphore_mboxes
[2] =
851 I915_READ(RING_SYNC_2(ring
->mmio_base
));
852 ering
->semaphore_seqno
[2] = ring
->semaphore
.sync_seqno
[2];
856 static void i915_record_ring_state(struct drm_device
*dev
,
857 struct drm_i915_error_state
*error
,
858 struct intel_engine_cs
*ring
,
859 struct drm_i915_error_ring
*ering
)
861 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
863 if (INTEL_INFO(dev
)->gen
>= 6) {
864 ering
->rc_psmi
= I915_READ(ring
->mmio_base
+ 0x50);
865 ering
->fault_reg
= I915_READ(RING_FAULT_REG(ring
));
866 if (INTEL_INFO(dev
)->gen
>= 8)
867 gen8_record_semaphore_state(dev_priv
, error
, ring
, ering
);
869 gen6_record_semaphore_state(dev_priv
, ring
, ering
);
872 if (INTEL_INFO(dev
)->gen
>= 4) {
873 ering
->faddr
= I915_READ(RING_DMA_FADD(ring
->mmio_base
));
874 ering
->ipeir
= I915_READ(RING_IPEIR(ring
->mmio_base
));
875 ering
->ipehr
= I915_READ(RING_IPEHR(ring
->mmio_base
));
876 ering
->instdone
= I915_READ(RING_INSTDONE(ring
->mmio_base
));
877 ering
->instps
= I915_READ(RING_INSTPS(ring
->mmio_base
));
878 ering
->bbaddr
= I915_READ(RING_BBADDR(ring
->mmio_base
));
879 if (INTEL_INFO(dev
)->gen
>= 8) {
880 ering
->faddr
|= (u64
) I915_READ(RING_DMA_FADD_UDW(ring
->mmio_base
)) << 32;
881 ering
->bbaddr
|= (u64
) I915_READ(RING_BBADDR_UDW(ring
->mmio_base
)) << 32;
883 ering
->bbstate
= I915_READ(RING_BBSTATE(ring
->mmio_base
));
885 ering
->faddr
= I915_READ(DMA_FADD_I8XX
);
886 ering
->ipeir
= I915_READ(IPEIR
);
887 ering
->ipehr
= I915_READ(IPEHR
);
888 ering
->instdone
= I915_READ(INSTDONE
);
891 ering
->waiting
= waitqueue_active(&ring
->irq_queue
);
892 ering
->instpm
= I915_READ(RING_INSTPM(ring
->mmio_base
));
893 ering
->seqno
= ring
->get_seqno(ring
, false);
894 ering
->acthd
= intel_ring_get_active_head(ring
);
895 ering
->start
= I915_READ_START(ring
);
896 ering
->head
= I915_READ_HEAD(ring
);
897 ering
->tail
= I915_READ_TAIL(ring
);
898 ering
->ctl
= I915_READ_CTL(ring
);
900 if (I915_NEED_GFX_HWS(dev
)) {
907 mmio
= RENDER_HWS_PGA_GEN7
;
910 mmio
= BLT_HWS_PGA_GEN7
;
913 mmio
= BSD_HWS_PGA_GEN7
;
916 mmio
= VEBOX_HWS_PGA_GEN7
;
919 } else if (IS_GEN6(ring
->dev
)) {
920 mmio
= RING_HWS_PGA_GEN6(ring
->mmio_base
);
922 /* XXX: gen8 returns to sanity */
923 mmio
= RING_HWS_PGA(ring
->mmio_base
);
926 ering
->hws
= I915_READ(mmio
);
929 ering
->hangcheck_score
= ring
->hangcheck
.score
;
930 ering
->hangcheck_action
= ring
->hangcheck
.action
;
932 if (USES_PPGTT(dev
)) {
935 ering
->vm_info
.gfx_mode
= I915_READ(RING_MODE_GEN7(ring
));
938 ering
->vm_info
.pp_dir_base
=
939 I915_READ(RING_PP_DIR_BASE_READ(ring
));
940 else if (IS_GEN7(dev
))
941 ering
->vm_info
.pp_dir_base
=
942 I915_READ(RING_PP_DIR_BASE(ring
));
943 else if (INTEL_INFO(dev
)->gen
>= 8)
944 for (i
= 0; i
< 4; i
++) {
945 ering
->vm_info
.pdp
[i
] =
946 I915_READ(GEN8_RING_PDP_UDW(ring
, i
));
947 ering
->vm_info
.pdp
[i
] <<= 32;
948 ering
->vm_info
.pdp
[i
] |=
949 I915_READ(GEN8_RING_PDP_LDW(ring
, i
));
955 static void i915_gem_record_active_context(struct intel_engine_cs
*ring
,
956 struct drm_i915_error_state
*error
,
957 struct drm_i915_error_ring
*ering
)
959 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
960 struct drm_i915_gem_object
*obj
;
962 /* Currently render ring is the only HW context user */
963 if (ring
->id
!= RCS
|| !error
->ccid
)
966 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
967 if (!i915_gem_obj_ggtt_bound(obj
))
970 if ((error
->ccid
& PAGE_MASK
) == i915_gem_obj_ggtt_offset(obj
)) {
971 ering
->ctx
= i915_error_ggtt_object_create(dev_priv
, obj
);
977 static void i915_gem_record_rings(struct drm_device
*dev
,
978 struct drm_i915_error_state
*error
)
980 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
981 struct drm_i915_gem_request
*request
;
984 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
985 struct intel_engine_cs
*ring
= &dev_priv
->ring
[i
];
986 struct intel_ringbuffer
*rbuf
;
988 error
->ring
[i
].pid
= -1;
990 if (ring
->dev
== NULL
)
993 error
->ring
[i
].valid
= true;
995 i915_record_ring_state(dev
, error
, ring
, &error
->ring
[i
]);
997 request
= i915_gem_find_active_request(ring
);
999 struct i915_address_space
*vm
;
1001 vm
= request
->ctx
&& request
->ctx
->ppgtt
?
1002 &request
->ctx
->ppgtt
->base
:
1003 &dev_priv
->gtt
.base
;
1005 /* We need to copy these to an anonymous buffer
1006 * as the simplest method to avoid being overwritten
1009 error
->ring
[i
].batchbuffer
=
1010 i915_error_object_create(dev_priv
,
1014 if (HAS_BROKEN_CS_TLB(dev_priv
->dev
))
1015 error
->ring
[i
].wa_batchbuffer
=
1016 i915_error_ggtt_object_create(dev_priv
,
1020 struct task_struct
*task
;
1023 task
= pid_task(request
->pid
, PIDTYPE_PID
);
1025 strcpy(error
->ring
[i
].comm
, task
->comm
);
1026 error
->ring
[i
].pid
= task
->pid
;
1032 if (i915
.enable_execlists
) {
1033 /* TODO: This is only a small fix to keep basic error
1034 * capture working, but we need to add more information
1035 * for it to be useful (e.g. dump the context being
1039 rbuf
= request
->ctx
->engine
[ring
->id
].ringbuf
;
1041 rbuf
= ring
->default_context
->engine
[ring
->id
].ringbuf
;
1043 rbuf
= ring
->buffer
;
1045 error
->ring
[i
].cpu_ring_head
= rbuf
->head
;
1046 error
->ring
[i
].cpu_ring_tail
= rbuf
->tail
;
1048 error
->ring
[i
].ringbuffer
=
1049 i915_error_ggtt_object_create(dev_priv
, rbuf
->obj
);
1051 error
->ring
[i
].hws_page
=
1052 i915_error_ggtt_object_create(dev_priv
, ring
->status_page
.obj
);
1054 i915_gem_record_active_context(ring
, error
, &error
->ring
[i
]);
1057 list_for_each_entry(request
, &ring
->request_list
, list
)
1060 error
->ring
[i
].num_requests
= count
;
1061 error
->ring
[i
].requests
=
1062 kcalloc(count
, sizeof(*error
->ring
[i
].requests
),
1064 if (error
->ring
[i
].requests
== NULL
) {
1065 error
->ring
[i
].num_requests
= 0;
1070 list_for_each_entry(request
, &ring
->request_list
, list
) {
1071 struct drm_i915_error_request
*erq
;
1073 erq
= &error
->ring
[i
].requests
[count
++];
1074 erq
->seqno
= request
->seqno
;
1075 erq
->jiffies
= request
->emitted_jiffies
;
1076 erq
->tail
= request
->postfix
;
1081 /* FIXME: Since pin count/bound list is global, we duplicate what we capture per
1084 static void i915_gem_capture_vm(struct drm_i915_private
*dev_priv
,
1085 struct drm_i915_error_state
*error
,
1086 struct i915_address_space
*vm
,
1089 struct drm_i915_error_buffer
*active_bo
= NULL
, *pinned_bo
= NULL
;
1090 struct drm_i915_gem_object
*obj
;
1091 struct i915_vma
*vma
;
1095 list_for_each_entry(vma
, &vm
->active_list
, mm_list
)
1097 error
->active_bo_count
[ndx
] = i
;
1099 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
1100 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
)
1101 if (vma
->vm
== vm
&& vma
->pin_count
> 0)
1104 error
->pinned_bo_count
[ndx
] = i
- error
->active_bo_count
[ndx
];
1107 active_bo
= kcalloc(i
, sizeof(*active_bo
), GFP_ATOMIC
);
1109 pinned_bo
= active_bo
+ error
->active_bo_count
[ndx
];
1113 error
->active_bo_count
[ndx
] =
1114 capture_active_bo(active_bo
,
1115 error
->active_bo_count
[ndx
],
1119 error
->pinned_bo_count
[ndx
] =
1120 capture_pinned_bo(pinned_bo
,
1121 error
->pinned_bo_count
[ndx
],
1122 &dev_priv
->mm
.bound_list
, vm
);
1123 error
->active_bo
[ndx
] = active_bo
;
1124 error
->pinned_bo
[ndx
] = pinned_bo
;
1127 static void i915_gem_capture_buffers(struct drm_i915_private
*dev_priv
,
1128 struct drm_i915_error_state
*error
)
1130 struct i915_address_space
*vm
;
1133 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
)
1136 error
->active_bo
= kcalloc(cnt
, sizeof(*error
->active_bo
), GFP_ATOMIC
);
1137 error
->pinned_bo
= kcalloc(cnt
, sizeof(*error
->pinned_bo
), GFP_ATOMIC
);
1138 error
->active_bo_count
= kcalloc(cnt
, sizeof(*error
->active_bo_count
),
1140 error
->pinned_bo_count
= kcalloc(cnt
, sizeof(*error
->pinned_bo_count
),
1143 if (error
->active_bo
== NULL
||
1144 error
->pinned_bo
== NULL
||
1145 error
->active_bo_count
== NULL
||
1146 error
->pinned_bo_count
== NULL
) {
1147 kfree(error
->active_bo
);
1148 kfree(error
->active_bo_count
);
1149 kfree(error
->pinned_bo
);
1150 kfree(error
->pinned_bo_count
);
1152 error
->active_bo
= NULL
;
1153 error
->active_bo_count
= NULL
;
1154 error
->pinned_bo
= NULL
;
1155 error
->pinned_bo_count
= NULL
;
1157 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
)
1158 i915_gem_capture_vm(dev_priv
, error
, vm
, i
++);
1160 error
->vm_count
= cnt
;
1164 /* Capture all registers which don't fit into another category. */
1165 static void i915_capture_reg_state(struct drm_i915_private
*dev_priv
,
1166 struct drm_i915_error_state
*error
)
1168 struct drm_device
*dev
= dev_priv
->dev
;
1171 /* General organization
1172 * 1. Registers specific to a single generation
1173 * 2. Registers which belong to multiple generations
1174 * 3. Feature specific registers.
1175 * 4. Everything else
1176 * Please try to follow the order.
1179 /* 1: Registers specific to a single generation */
1180 if (IS_VALLEYVIEW(dev
)) {
1181 error
->gtier
[0] = I915_READ(GTIER
);
1182 error
->ier
= I915_READ(VLV_IER
);
1183 error
->forcewake
= I915_READ(FORCEWAKE_VLV
);
1187 error
->err_int
= I915_READ(GEN7_ERR_INT
);
1189 if (INTEL_INFO(dev
)->gen
>= 8) {
1190 error
->fault_data0
= I915_READ(GEN8_FAULT_TLB_DATA0
);
1191 error
->fault_data1
= I915_READ(GEN8_FAULT_TLB_DATA1
);
1195 error
->forcewake
= I915_READ(FORCEWAKE
);
1196 error
->gab_ctl
= I915_READ(GAB_CTL
);
1197 error
->gfx_mode
= I915_READ(GFX_MODE
);
1200 /* 2: Registers which belong to multiple generations */
1201 if (INTEL_INFO(dev
)->gen
>= 7)
1202 error
->forcewake
= I915_READ(FORCEWAKE_MT
);
1204 if (INTEL_INFO(dev
)->gen
>= 6) {
1205 error
->derrmr
= I915_READ(DERRMR
);
1206 error
->error
= I915_READ(ERROR_GEN6
);
1207 error
->done_reg
= I915_READ(DONE_REG
);
1210 /* 3: Feature specific registers */
1211 if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
1212 error
->gam_ecochk
= I915_READ(GAM_ECOCHK
);
1213 error
->gac_eco
= I915_READ(GAC_ECO_BITS
);
1216 /* 4: Everything else */
1217 if (HAS_HW_CONTEXTS(dev
))
1218 error
->ccid
= I915_READ(CCID
);
1220 if (INTEL_INFO(dev
)->gen
>= 8) {
1221 error
->ier
= I915_READ(GEN8_DE_MISC_IER
);
1222 for (i
= 0; i
< 4; i
++)
1223 error
->gtier
[i
] = I915_READ(GEN8_GT_IER(i
));
1224 } else if (HAS_PCH_SPLIT(dev
)) {
1225 error
->ier
= I915_READ(DEIER
);
1226 error
->gtier
[0] = I915_READ(GTIER
);
1227 } else if (IS_GEN2(dev
)) {
1228 error
->ier
= I915_READ16(IER
);
1229 } else if (!IS_VALLEYVIEW(dev
)) {
1230 error
->ier
= I915_READ(IER
);
1232 error
->eir
= I915_READ(EIR
);
1233 error
->pgtbl_er
= I915_READ(PGTBL_ER
);
1235 i915_get_extra_instdone(dev
, error
->extra_instdone
);
1238 static void i915_error_capture_msg(struct drm_device
*dev
,
1239 struct drm_i915_error_state
*error
,
1241 const char *error_msg
)
1243 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1245 int ring_id
= -1, len
;
1247 ecode
= i915_error_generate_code(dev_priv
, error
, &ring_id
);
1249 len
= scnprintf(error
->error_msg
, sizeof(error
->error_msg
),
1250 "GPU HANG: ecode %d:%d:0x%08x",
1251 INTEL_INFO(dev
)->gen
, ring_id
, ecode
);
1253 if (ring_id
!= -1 && error
->ring
[ring_id
].pid
!= -1)
1254 len
+= scnprintf(error
->error_msg
+ len
,
1255 sizeof(error
->error_msg
) - len
,
1257 error
->ring
[ring_id
].comm
,
1258 error
->ring
[ring_id
].pid
);
1260 scnprintf(error
->error_msg
+ len
, sizeof(error
->error_msg
) - len
,
1261 ", reason: %s, action: %s",
1263 wedged
? "reset" : "continue");
1266 static void i915_capture_gen_state(struct drm_i915_private
*dev_priv
,
1267 struct drm_i915_error_state
*error
)
1269 error
->reset_count
= i915_reset_count(&dev_priv
->gpu_error
);
1270 error
->suspend_count
= dev_priv
->suspend_count
;
1274 * i915_capture_error_state - capture an error record for later analysis
1277 * Should be called when an error is detected (either a hang or an error
1278 * interrupt) to capture error state from the time of the error. Fills
1279 * out a structure which becomes available in debugfs for user level tools
1282 void i915_capture_error_state(struct drm_device
*dev
, bool wedged
,
1283 const char *error_msg
)
1286 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1287 struct drm_i915_error_state
*error
;
1288 unsigned long flags
;
1290 /* Account for pipe specific data like PIPE*STAT */
1291 error
= kzalloc(sizeof(*error
), GFP_ATOMIC
);
1293 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1297 kref_init(&error
->ref
);
1299 i915_capture_gen_state(dev_priv
, error
);
1300 i915_capture_reg_state(dev_priv
, error
);
1301 i915_gem_capture_buffers(dev_priv
, error
);
1302 i915_gem_record_fences(dev
, error
);
1303 i915_gem_record_rings(dev
, error
);
1305 do_gettimeofday(&error
->time
);
1307 error
->overlay
= intel_overlay_capture_error_state(dev
);
1308 error
->display
= intel_display_capture_error_state(dev
);
1310 i915_error_capture_msg(dev
, error
, wedged
, error_msg
);
1311 DRM_INFO("%s\n", error
->error_msg
);
1313 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1314 if (dev_priv
->gpu_error
.first_error
== NULL
) {
1315 dev_priv
->gpu_error
.first_error
= error
;
1318 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1321 i915_error_state_free(&error
->ref
);
1326 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1327 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1328 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1329 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1330 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev
->primary
->index
);
1335 void i915_error_state_get(struct drm_device
*dev
,
1336 struct i915_error_state_file_priv
*error_priv
)
1338 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1340 spin_lock_irq(&dev_priv
->gpu_error
.lock
);
1341 error_priv
->error
= dev_priv
->gpu_error
.first_error
;
1342 if (error_priv
->error
)
1343 kref_get(&error_priv
->error
->ref
);
1344 spin_unlock_irq(&dev_priv
->gpu_error
.lock
);
1348 void i915_error_state_put(struct i915_error_state_file_priv
*error_priv
)
1350 if (error_priv
->error
)
1351 kref_put(&error_priv
->error
->ref
, i915_error_state_free
);
1354 void i915_destroy_error_state(struct drm_device
*dev
)
1356 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1357 struct drm_i915_error_state
*error
;
1359 spin_lock_irq(&dev_priv
->gpu_error
.lock
);
1360 error
= dev_priv
->gpu_error
.first_error
;
1361 dev_priv
->gpu_error
.first_error
= NULL
;
1362 spin_unlock_irq(&dev_priv
->gpu_error
.lock
);
1365 kref_put(&error
->ref
, i915_error_state_free
);
1368 const char *i915_cache_level_str(struct drm_i915_private
*i915
, int type
)
1371 case I915_CACHE_NONE
: return " uncached";
1372 case I915_CACHE_LLC
: return HAS_LLC(i915
) ? " LLC" : " snooped";
1373 case I915_CACHE_L3_LLC
: return " L3+LLC";
1374 case I915_CACHE_WT
: return " WT";
1379 /* NB: please notice the memset */
1380 void i915_get_extra_instdone(struct drm_device
*dev
, uint32_t *instdone
)
1382 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1383 memset(instdone
, 0, sizeof(*instdone
) * I915_NUM_INSTDONE_REG
);
1385 if (IS_GEN2(dev
) || IS_GEN3(dev
))
1386 instdone
[0] = I915_READ(INSTDONE
);
1387 else if (IS_GEN4(dev
) || IS_GEN5(dev
) || IS_GEN6(dev
)) {
1388 instdone
[0] = I915_READ(INSTDONE_I965
);
1389 instdone
[1] = I915_READ(INSTDONE1
);
1390 } else if (INTEL_INFO(dev
)->gen
>= 7) {
1391 instdone
[0] = I915_READ(GEN7_INSTDONE_1
);
1392 instdone
[1] = I915_READ(GEN7_SC_INSTDONE
);
1393 instdone
[2] = I915_READ(GEN7_SAMPLER_INSTDONE
);
1394 instdone
[3] = I915_READ(GEN7_ROW_INSTDONE
);