2 * Copyright (c) 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
30 #include <generated/utsrelease.h>
33 static const char *ring_str(int ring
)
36 case RCS
: return "render";
37 case VCS
: return "bsd";
38 case BCS
: return "blt";
39 case VECS
: return "vebox";
40 case VCS2
: return "bsd2";
45 static const char *pin_flag(int pinned
)
55 static const char *tiling_flag(int tiling
)
59 case I915_TILING_NONE
: return "";
60 case I915_TILING_X
: return " X";
61 case I915_TILING_Y
: return " Y";
65 static const char *dirty_flag(int dirty
)
67 return dirty
? " dirty" : "";
70 static const char *purgeable_flag(int purgeable
)
72 return purgeable
? " purgeable" : "";
75 static bool __i915_error_ok(struct drm_i915_error_state_buf
*e
)
78 if (!e
->err
&& WARN(e
->bytes
> (e
->size
- 1), "overflow")) {
83 if (e
->bytes
== e
->size
- 1 || e
->err
)
89 static bool __i915_error_seek(struct drm_i915_error_state_buf
*e
,
92 if (e
->pos
+ len
<= e
->start
) {
97 /* First vsnprintf needs to fit in its entirety for memmove */
106 static void __i915_error_advance(struct drm_i915_error_state_buf
*e
,
109 /* If this is first printf in this window, adjust it so that
110 * start position matches start of the buffer
113 if (e
->pos
< e
->start
) {
114 const size_t off
= e
->start
- e
->pos
;
116 /* Should not happen but be paranoid */
117 if (off
> len
|| e
->bytes
) {
122 memmove(e
->buf
, e
->buf
+ off
, len
- off
);
123 e
->bytes
= len
- off
;
132 static void i915_error_vprintf(struct drm_i915_error_state_buf
*e
,
133 const char *f
, va_list args
)
137 if (!__i915_error_ok(e
))
140 /* Seek the first printf which is hits start position */
141 if (e
->pos
< e
->start
) {
145 len
= vsnprintf(NULL
, 0, f
, tmp
);
148 if (!__i915_error_seek(e
, len
))
152 len
= vsnprintf(e
->buf
+ e
->bytes
, e
->size
- e
->bytes
, f
, args
);
153 if (len
>= e
->size
- e
->bytes
)
154 len
= e
->size
- e
->bytes
- 1;
156 __i915_error_advance(e
, len
);
159 static void i915_error_puts(struct drm_i915_error_state_buf
*e
,
164 if (!__i915_error_ok(e
))
169 /* Seek the first printf which is hits start position */
170 if (e
->pos
< e
->start
) {
171 if (!__i915_error_seek(e
, len
))
175 if (len
>= e
->size
- e
->bytes
)
176 len
= e
->size
- e
->bytes
- 1;
177 memcpy(e
->buf
+ e
->bytes
, str
, len
);
179 __i915_error_advance(e
, len
);
182 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
183 #define err_puts(e, s) i915_error_puts(e, s)
185 static void print_error_buffers(struct drm_i915_error_state_buf
*m
,
187 struct drm_i915_error_buffer
*err
,
192 err_printf(m
, " %s [%d]:\n", name
, count
);
195 err_printf(m
, " %08x_%08x %8u %02x %02x [ ",
196 upper_32_bits(err
->gtt_offset
),
197 lower_32_bits(err
->gtt_offset
),
201 for (i
= 0; i
< I915_NUM_RINGS
; i
++)
202 err_printf(m
, "%02x ", err
->rseqno
[i
]);
204 err_printf(m
, "] %02x", err
->wseqno
);
205 err_puts(m
, pin_flag(err
->pinned
));
206 err_puts(m
, tiling_flag(err
->tiling
));
207 err_puts(m
, dirty_flag(err
->dirty
));
208 err_puts(m
, purgeable_flag(err
->purgeable
));
209 err_puts(m
, err
->userptr
? " userptr" : "");
210 err_puts(m
, err
->ring
!= -1 ? " " : "");
211 err_puts(m
, ring_str(err
->ring
));
212 err_puts(m
, i915_cache_level_str(m
->i915
, err
->cache_level
));
215 err_printf(m
, " (name: %d)", err
->name
);
216 if (err
->fence_reg
!= I915_FENCE_REG_NONE
)
217 err_printf(m
, " (fence: %d)", err
->fence_reg
);
224 static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a
)
231 case HANGCHECK_ACTIVE
:
233 case HANGCHECK_ACTIVE_LOOP
:
234 return "active (loop)";
244 static void i915_ring_error_state(struct drm_i915_error_state_buf
*m
,
245 struct drm_device
*dev
,
246 struct drm_i915_error_state
*error
,
249 struct drm_i915_error_ring
*ring
= &error
->ring
[ring_idx
];
254 err_printf(m
, "%s command stream:\n", ring_str(ring_idx
));
255 err_printf(m
, " START: 0x%08x\n", ring
->start
);
256 err_printf(m
, " HEAD: 0x%08x\n", ring
->head
);
257 err_printf(m
, " TAIL: 0x%08x\n", ring
->tail
);
258 err_printf(m
, " CTL: 0x%08x\n", ring
->ctl
);
259 err_printf(m
, " HWS: 0x%08x\n", ring
->hws
);
260 err_printf(m
, " ACTHD: 0x%08x %08x\n", (u32
)(ring
->acthd
>>32), (u32
)ring
->acthd
);
261 err_printf(m
, " IPEIR: 0x%08x\n", ring
->ipeir
);
262 err_printf(m
, " IPEHR: 0x%08x\n", ring
->ipehr
);
263 err_printf(m
, " INSTDONE: 0x%08x\n", ring
->instdone
);
264 if (INTEL_INFO(dev
)->gen
>= 4) {
265 err_printf(m
, " BBADDR: 0x%08x %08x\n", (u32
)(ring
->bbaddr
>>32), (u32
)ring
->bbaddr
);
266 err_printf(m
, " BB_STATE: 0x%08x\n", ring
->bbstate
);
267 err_printf(m
, " INSTPS: 0x%08x\n", ring
->instps
);
269 err_printf(m
, " INSTPM: 0x%08x\n", ring
->instpm
);
270 err_printf(m
, " FADDR: 0x%08x %08x\n", upper_32_bits(ring
->faddr
),
271 lower_32_bits(ring
->faddr
));
272 if (INTEL_INFO(dev
)->gen
>= 6) {
273 err_printf(m
, " RC PSMI: 0x%08x\n", ring
->rc_psmi
);
274 err_printf(m
, " FAULT_REG: 0x%08x\n", ring
->fault_reg
);
275 err_printf(m
, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
276 ring
->semaphore_mboxes
[0],
277 ring
->semaphore_seqno
[0]);
278 err_printf(m
, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
279 ring
->semaphore_mboxes
[1],
280 ring
->semaphore_seqno
[1]);
281 if (HAS_VEBOX(dev
)) {
282 err_printf(m
, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
283 ring
->semaphore_mboxes
[2],
284 ring
->semaphore_seqno
[2]);
287 if (USES_PPGTT(dev
)) {
288 err_printf(m
, " GFX_MODE: 0x%08x\n", ring
->vm_info
.gfx_mode
);
290 if (INTEL_INFO(dev
)->gen
>= 8) {
292 for (i
= 0; i
< 4; i
++)
293 err_printf(m
, " PDP%d: 0x%016llx\n",
294 i
, ring
->vm_info
.pdp
[i
]);
296 err_printf(m
, " PP_DIR_BASE: 0x%08x\n",
297 ring
->vm_info
.pp_dir_base
);
300 err_printf(m
, " seqno: 0x%08x\n", ring
->seqno
);
301 err_printf(m
, " waiting: %s\n", yesno(ring
->waiting
));
302 err_printf(m
, " ring->head: 0x%08x\n", ring
->cpu_ring_head
);
303 err_printf(m
, " ring->tail: 0x%08x\n", ring
->cpu_ring_tail
);
304 err_printf(m
, " hangcheck: %s [%d]\n",
305 hangcheck_action_to_str(ring
->hangcheck_action
),
306 ring
->hangcheck_score
);
309 void i915_error_printf(struct drm_i915_error_state_buf
*e
, const char *f
, ...)
314 i915_error_vprintf(e
, f
, args
);
318 static void print_error_obj(struct drm_i915_error_state_buf
*m
,
319 struct drm_i915_error_object
*obj
)
321 int page
, offset
, elt
;
323 for (page
= offset
= 0; page
< obj
->page_count
; page
++) {
324 for (elt
= 0; elt
< PAGE_SIZE
/4; elt
++) {
325 err_printf(m
, "%08x : %08x\n", offset
,
326 obj
->pages
[page
][elt
]);
332 int i915_error_state_to_str(struct drm_i915_error_state_buf
*m
,
333 const struct i915_error_state_file_priv
*error_priv
)
335 struct drm_device
*dev
= error_priv
->dev
;
336 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
337 struct drm_i915_error_state
*error
= error_priv
->error
;
338 struct drm_i915_error_object
*obj
;
339 int i
, j
, offset
, elt
;
340 int max_hangcheck_score
;
343 err_printf(m
, "no error state collected\n");
347 err_printf(m
, "%s\n", error
->error_msg
);
348 err_printf(m
, "Time: %ld s %ld us\n", error
->time
.tv_sec
,
349 error
->time
.tv_usec
);
350 err_printf(m
, "Kernel: " UTS_RELEASE
"\n");
351 max_hangcheck_score
= 0;
352 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
353 if (error
->ring
[i
].hangcheck_score
> max_hangcheck_score
)
354 max_hangcheck_score
= error
->ring
[i
].hangcheck_score
;
356 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
357 if (error
->ring
[i
].hangcheck_score
== max_hangcheck_score
&&
358 error
->ring
[i
].pid
!= -1) {
359 err_printf(m
, "Active process (on ring %s): %s [%d]\n",
365 err_printf(m
, "Reset count: %u\n", error
->reset_count
);
366 err_printf(m
, "Suspend count: %u\n", error
->suspend_count
);
367 err_printf(m
, "PCI ID: 0x%04x\n", dev
->pdev
->device
);
368 err_printf(m
, "IOMMU enabled?: %d\n", error
->iommu
);
369 err_printf(m
, "EIR: 0x%08x\n", error
->eir
);
370 err_printf(m
, "IER: 0x%08x\n", error
->ier
);
371 if (INTEL_INFO(dev
)->gen
>= 8) {
372 for (i
= 0; i
< 4; i
++)
373 err_printf(m
, "GTIER gt %d: 0x%08x\n", i
,
375 } else if (HAS_PCH_SPLIT(dev
) || IS_VALLEYVIEW(dev
))
376 err_printf(m
, "GTIER: 0x%08x\n", error
->gtier
[0]);
377 err_printf(m
, "PGTBL_ER: 0x%08x\n", error
->pgtbl_er
);
378 err_printf(m
, "FORCEWAKE: 0x%08x\n", error
->forcewake
);
379 err_printf(m
, "DERRMR: 0x%08x\n", error
->derrmr
);
380 err_printf(m
, "CCID: 0x%08x\n", error
->ccid
);
381 err_printf(m
, "Missed interrupts: 0x%08lx\n", dev_priv
->gpu_error
.missed_irq_rings
);
383 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
384 err_printf(m
, " fence[%d] = %08llx\n", i
, error
->fence
[i
]);
386 for (i
= 0; i
< ARRAY_SIZE(error
->extra_instdone
); i
++)
387 err_printf(m
, " INSTDONE_%d: 0x%08x\n", i
,
388 error
->extra_instdone
[i
]);
390 if (INTEL_INFO(dev
)->gen
>= 6) {
391 err_printf(m
, "ERROR: 0x%08x\n", error
->error
);
393 if (INTEL_INFO(dev
)->gen
>= 8)
394 err_printf(m
, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
395 error
->fault_data1
, error
->fault_data0
);
397 err_printf(m
, "DONE_REG: 0x%08x\n", error
->done_reg
);
400 if (INTEL_INFO(dev
)->gen
== 7)
401 err_printf(m
, "ERR_INT: 0x%08x\n", error
->err_int
);
403 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++)
404 i915_ring_error_state(m
, dev
, error
, i
);
406 for (i
= 0; i
< error
->vm_count
; i
++) {
407 err_printf(m
, "vm[%d]\n", i
);
409 print_error_buffers(m
, "Active",
411 error
->active_bo_count
[i
]);
413 print_error_buffers(m
, "Pinned",
415 error
->pinned_bo_count
[i
]);
418 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
419 obj
= error
->ring
[i
].batchbuffer
;
421 err_puts(m
, dev_priv
->ring
[i
].name
);
422 if (error
->ring
[i
].pid
!= -1)
423 err_printf(m
, " (submitted by %s [%d])",
426 err_printf(m
, " --- gtt_offset = 0x%08x %08x\n",
427 upper_32_bits(obj
->gtt_offset
),
428 lower_32_bits(obj
->gtt_offset
));
429 print_error_obj(m
, obj
);
432 obj
= error
->ring
[i
].wa_batchbuffer
;
434 err_printf(m
, "%s (w/a) --- gtt_offset = 0x%08x\n",
435 dev_priv
->ring
[i
].name
,
436 lower_32_bits(obj
->gtt_offset
));
437 print_error_obj(m
, obj
);
440 if (error
->ring
[i
].num_requests
) {
441 err_printf(m
, "%s --- %d requests\n",
442 dev_priv
->ring
[i
].name
,
443 error
->ring
[i
].num_requests
);
444 for (j
= 0; j
< error
->ring
[i
].num_requests
; j
++) {
445 err_printf(m
, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
446 error
->ring
[i
].requests
[j
].seqno
,
447 error
->ring
[i
].requests
[j
].jiffies
,
448 error
->ring
[i
].requests
[j
].tail
);
452 if ((obj
= error
->ring
[i
].ringbuffer
)) {
453 err_printf(m
, "%s --- ringbuffer = 0x%08x\n",
454 dev_priv
->ring
[i
].name
,
455 lower_32_bits(obj
->gtt_offset
));
456 print_error_obj(m
, obj
);
459 if ((obj
= error
->ring
[i
].hws_page
)) {
460 u64 hws_offset
= obj
->gtt_offset
;
461 u32
*hws_page
= &obj
->pages
[0][0];
463 if (i915
.enable_execlists
) {
464 hws_offset
+= LRC_PPHWSP_PN
* PAGE_SIZE
;
465 hws_page
= &obj
->pages
[LRC_PPHWSP_PN
][0];
467 err_printf(m
, "%s --- HW Status = 0x%08llx\n",
468 dev_priv
->ring
[i
].name
, hws_offset
);
470 for (elt
= 0; elt
< PAGE_SIZE
/16; elt
+= 4) {
471 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
481 if ((obj
= error
->ring
[i
].ctx
)) {
482 err_printf(m
, "%s --- HW Context = 0x%08x\n",
483 dev_priv
->ring
[i
].name
,
484 lower_32_bits(obj
->gtt_offset
));
485 print_error_obj(m
, obj
);
489 if ((obj
= error
->semaphore_obj
)) {
490 err_printf(m
, "Semaphore page = 0x%08x\n",
491 lower_32_bits(obj
->gtt_offset
));
492 for (elt
= 0; elt
< PAGE_SIZE
/16; elt
+= 4) {
493 err_printf(m
, "[%04x] %08x %08x %08x %08x\n",
496 obj
->pages
[0][elt
+1],
497 obj
->pages
[0][elt
+2],
498 obj
->pages
[0][elt
+3]);
503 intel_overlay_print_error_state(m
, error
->overlay
);
506 intel_display_print_error_state(m
, dev
, error
->display
);
509 if (m
->bytes
== 0 && m
->err
)
515 int i915_error_state_buf_init(struct drm_i915_error_state_buf
*ebuf
,
516 struct drm_i915_private
*i915
,
517 size_t count
, loff_t pos
)
519 memset(ebuf
, 0, sizeof(*ebuf
));
522 /* We need to have enough room to store any i915_error_state printf
523 * so that we can move it to start position.
525 ebuf
->size
= count
+ 1 > PAGE_SIZE
? count
+ 1 : PAGE_SIZE
;
526 ebuf
->buf
= kmalloc(ebuf
->size
,
527 GFP_TEMPORARY
| __GFP_NORETRY
| __GFP_NOWARN
);
529 if (ebuf
->buf
== NULL
) {
530 ebuf
->size
= PAGE_SIZE
;
531 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
534 if (ebuf
->buf
== NULL
) {
536 ebuf
->buf
= kmalloc(ebuf
->size
, GFP_TEMPORARY
);
539 if (ebuf
->buf
== NULL
)
547 static void i915_error_object_free(struct drm_i915_error_object
*obj
)
554 for (page
= 0; page
< obj
->page_count
; page
++)
555 kfree(obj
->pages
[page
]);
560 static void i915_error_state_free(struct kref
*error_ref
)
562 struct drm_i915_error_state
*error
= container_of(error_ref
,
563 typeof(*error
), ref
);
566 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
567 i915_error_object_free(error
->ring
[i
].batchbuffer
);
568 i915_error_object_free(error
->ring
[i
].wa_batchbuffer
);
569 i915_error_object_free(error
->ring
[i
].ringbuffer
);
570 i915_error_object_free(error
->ring
[i
].hws_page
);
571 i915_error_object_free(error
->ring
[i
].ctx
);
572 kfree(error
->ring
[i
].requests
);
575 i915_error_object_free(error
->semaphore_obj
);
577 for (i
= 0; i
< error
->vm_count
; i
++)
578 kfree(error
->active_bo
[i
]);
580 kfree(error
->active_bo
);
581 kfree(error
->active_bo_count
);
582 kfree(error
->pinned_bo
);
583 kfree(error
->pinned_bo_count
);
584 kfree(error
->overlay
);
585 kfree(error
->display
);
589 static struct drm_i915_error_object
*
590 i915_error_object_create(struct drm_i915_private
*dev_priv
,
591 struct drm_i915_gem_object
*src
,
592 struct i915_address_space
*vm
)
594 struct drm_i915_error_object
*dst
;
595 struct i915_vma
*vma
= NULL
;
601 if (src
== NULL
|| src
->pages
== NULL
)
604 num_pages
= src
->base
.size
>> PAGE_SHIFT
;
606 dst
= kmalloc(sizeof(*dst
) + num_pages
* sizeof(u32
*), GFP_ATOMIC
);
610 if (i915_gem_obj_bound(src
, vm
))
611 dst
->gtt_offset
= i915_gem_obj_offset(src
, vm
);
613 dst
->gtt_offset
= -1;
615 reloc_offset
= dst
->gtt_offset
;
616 if (i915_is_ggtt(vm
))
617 vma
= i915_gem_obj_to_ggtt(src
);
618 use_ggtt
= (src
->cache_level
== I915_CACHE_NONE
&&
619 vma
&& (vma
->bound
& GLOBAL_BIND
) &&
620 reloc_offset
+ num_pages
* PAGE_SIZE
<= dev_priv
->gtt
.mappable_end
);
622 /* Cannot access stolen address directly, try to use the aperture */
626 if (!(vma
&& vma
->bound
& GLOBAL_BIND
))
629 reloc_offset
= i915_gem_obj_ggtt_offset(src
);
630 if (reloc_offset
+ num_pages
* PAGE_SIZE
> dev_priv
->gtt
.mappable_end
)
634 /* Cannot access snooped pages through the aperture */
635 if (use_ggtt
&& src
->cache_level
!= I915_CACHE_NONE
&& !HAS_LLC(dev_priv
->dev
))
638 dst
->page_count
= num_pages
;
639 while (num_pages
--) {
643 d
= kmalloc(PAGE_SIZE
, GFP_ATOMIC
);
647 local_irq_save(flags
);
651 /* Simply ignore tiling or any overlapping fence.
652 * It's part of the error state, and this hopefully
653 * captures what the GPU read.
656 s
= io_mapping_map_atomic_wc(dev_priv
->gtt
.mappable
,
658 memcpy_fromio(d
, s
, PAGE_SIZE
);
659 io_mapping_unmap_atomic(s
);
664 page
= i915_gem_object_get_page(src
, i
);
666 drm_clflush_pages(&page
, 1);
668 s
= kmap_atomic(page
);
669 memcpy(d
, s
, PAGE_SIZE
);
672 drm_clflush_pages(&page
, 1);
674 local_irq_restore(flags
);
677 reloc_offset
+= PAGE_SIZE
;
684 kfree(dst
->pages
[i
]);
688 #define i915_error_ggtt_object_create(dev_priv, src) \
689 i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
691 static void capture_bo(struct drm_i915_error_buffer
*err
,
692 struct i915_vma
*vma
)
694 struct drm_i915_gem_object
*obj
= vma
->obj
;
697 err
->size
= obj
->base
.size
;
698 err
->name
= obj
->base
.name
;
699 for (i
= 0; i
< I915_NUM_RINGS
; i
++)
700 err
->rseqno
[i
] = i915_gem_request_get_seqno(obj
->last_read_req
[i
]);
701 err
->wseqno
= i915_gem_request_get_seqno(obj
->last_write_req
);
702 err
->gtt_offset
= vma
->node
.start
;
703 err
->read_domains
= obj
->base
.read_domains
;
704 err
->write_domain
= obj
->base
.write_domain
;
705 err
->fence_reg
= obj
->fence_reg
;
707 if (i915_gem_obj_is_pinned(obj
))
709 err
->tiling
= obj
->tiling_mode
;
710 err
->dirty
= obj
->dirty
;
711 err
->purgeable
= obj
->madv
!= I915_MADV_WILLNEED
;
712 err
->userptr
= obj
->userptr
.mm
!= NULL
;
713 err
->ring
= obj
->last_write_req
?
714 i915_gem_request_get_ring(obj
->last_write_req
)->id
: -1;
715 err
->cache_level
= obj
->cache_level
;
718 static u32
capture_active_bo(struct drm_i915_error_buffer
*err
,
719 int count
, struct list_head
*head
)
721 struct i915_vma
*vma
;
724 list_for_each_entry(vma
, head
, mm_list
) {
725 capture_bo(err
++, vma
);
733 static u32
capture_pinned_bo(struct drm_i915_error_buffer
*err
,
734 int count
, struct list_head
*head
,
735 struct i915_address_space
*vm
)
737 struct drm_i915_gem_object
*obj
;
738 struct drm_i915_error_buffer
* const first
= err
;
739 struct drm_i915_error_buffer
* const last
= err
+ count
;
741 list_for_each_entry(obj
, head
, global_list
) {
742 struct i915_vma
*vma
;
747 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
)
748 if (vma
->vm
== vm
&& vma
->pin_count
> 0)
749 capture_bo(err
++, vma
);
755 /* Generate a semi-unique error code. The code is not meant to have meaning, The
756 * code's only purpose is to try to prevent false duplicated bug reports by
757 * grossly estimating a GPU error state.
759 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
760 * the hang if we could strip the GTT offset information from it.
762 * It's only a small step better than a random number in its current form.
764 static uint32_t i915_error_generate_code(struct drm_i915_private
*dev_priv
,
765 struct drm_i915_error_state
*error
,
768 uint32_t error_code
= 0;
771 /* IPEHR would be an ideal way to detect errors, as it's the gross
772 * measure of "the command that hung." However, has some very common
773 * synchronization commands which almost always appear in the case
774 * strictly a client bug. Use instdone to differentiate those some.
776 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
777 if (error
->ring
[i
].hangcheck_action
== HANGCHECK_HUNG
) {
781 return error
->ring
[i
].ipehr
^ error
->ring
[i
].instdone
;
788 static void i915_gem_record_fences(struct drm_device
*dev
,
789 struct drm_i915_error_state
*error
)
791 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
794 if (IS_GEN3(dev
) || IS_GEN2(dev
)) {
795 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
796 error
->fence
[i
] = I915_READ(FENCE_REG(i
));
797 } else if (IS_GEN5(dev
) || IS_GEN4(dev
)) {
798 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
799 error
->fence
[i
] = I915_READ64(FENCE_REG_965_LO(i
));
800 } else if (INTEL_INFO(dev
)->gen
>= 6) {
801 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
802 error
->fence
[i
] = I915_READ64(FENCE_REG_GEN6_LO(i
));
807 static void gen8_record_semaphore_state(struct drm_i915_private
*dev_priv
,
808 struct drm_i915_error_state
*error
,
809 struct intel_engine_cs
*ring
,
810 struct drm_i915_error_ring
*ering
)
812 struct intel_engine_cs
*to
;
815 if (!i915_semaphore_is_enabled(dev_priv
->dev
))
818 if (!error
->semaphore_obj
)
819 error
->semaphore_obj
=
820 i915_error_ggtt_object_create(dev_priv
,
821 dev_priv
->semaphore_obj
);
823 for_each_ring(to
, dev_priv
, i
) {
831 signal_offset
= (GEN8_SIGNAL_OFFSET(ring
, i
) & (PAGE_SIZE
- 1))
833 tmp
= error
->semaphore_obj
->pages
[0];
834 idx
= intel_ring_sync_index(ring
, to
);
836 ering
->semaphore_mboxes
[idx
] = tmp
[signal_offset
];
837 ering
->semaphore_seqno
[idx
] = ring
->semaphore
.sync_seqno
[idx
];
841 static void gen6_record_semaphore_state(struct drm_i915_private
*dev_priv
,
842 struct intel_engine_cs
*ring
,
843 struct drm_i915_error_ring
*ering
)
845 ering
->semaphore_mboxes
[0] = I915_READ(RING_SYNC_0(ring
->mmio_base
));
846 ering
->semaphore_mboxes
[1] = I915_READ(RING_SYNC_1(ring
->mmio_base
));
847 ering
->semaphore_seqno
[0] = ring
->semaphore
.sync_seqno
[0];
848 ering
->semaphore_seqno
[1] = ring
->semaphore
.sync_seqno
[1];
850 if (HAS_VEBOX(dev_priv
->dev
)) {
851 ering
->semaphore_mboxes
[2] =
852 I915_READ(RING_SYNC_2(ring
->mmio_base
));
853 ering
->semaphore_seqno
[2] = ring
->semaphore
.sync_seqno
[2];
857 static void i915_record_ring_state(struct drm_device
*dev
,
858 struct drm_i915_error_state
*error
,
859 struct intel_engine_cs
*ring
,
860 struct drm_i915_error_ring
*ering
)
862 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
864 if (INTEL_INFO(dev
)->gen
>= 6) {
865 ering
->rc_psmi
= I915_READ(ring
->mmio_base
+ 0x50);
866 ering
->fault_reg
= I915_READ(RING_FAULT_REG(ring
));
867 if (INTEL_INFO(dev
)->gen
>= 8)
868 gen8_record_semaphore_state(dev_priv
, error
, ring
, ering
);
870 gen6_record_semaphore_state(dev_priv
, ring
, ering
);
873 if (INTEL_INFO(dev
)->gen
>= 4) {
874 ering
->faddr
= I915_READ(RING_DMA_FADD(ring
->mmio_base
));
875 ering
->ipeir
= I915_READ(RING_IPEIR(ring
->mmio_base
));
876 ering
->ipehr
= I915_READ(RING_IPEHR(ring
->mmio_base
));
877 ering
->instdone
= I915_READ(RING_INSTDONE(ring
->mmio_base
));
878 ering
->instps
= I915_READ(RING_INSTPS(ring
->mmio_base
));
879 ering
->bbaddr
= I915_READ(RING_BBADDR(ring
->mmio_base
));
880 if (INTEL_INFO(dev
)->gen
>= 8) {
881 ering
->faddr
|= (u64
) I915_READ(RING_DMA_FADD_UDW(ring
->mmio_base
)) << 32;
882 ering
->bbaddr
|= (u64
) I915_READ(RING_BBADDR_UDW(ring
->mmio_base
)) << 32;
884 ering
->bbstate
= I915_READ(RING_BBSTATE(ring
->mmio_base
));
886 ering
->faddr
= I915_READ(DMA_FADD_I8XX
);
887 ering
->ipeir
= I915_READ(IPEIR
);
888 ering
->ipehr
= I915_READ(IPEHR
);
889 ering
->instdone
= I915_READ(GEN2_INSTDONE
);
892 ering
->waiting
= waitqueue_active(&ring
->irq_queue
);
893 ering
->instpm
= I915_READ(RING_INSTPM(ring
->mmio_base
));
894 ering
->seqno
= ring
->get_seqno(ring
, false);
895 ering
->acthd
= intel_ring_get_active_head(ring
);
896 ering
->start
= I915_READ_START(ring
);
897 ering
->head
= I915_READ_HEAD(ring
);
898 ering
->tail
= I915_READ_TAIL(ring
);
899 ering
->ctl
= I915_READ_CTL(ring
);
901 if (I915_NEED_GFX_HWS(dev
)) {
908 mmio
= RENDER_HWS_PGA_GEN7
;
911 mmio
= BLT_HWS_PGA_GEN7
;
914 mmio
= BSD_HWS_PGA_GEN7
;
917 mmio
= VEBOX_HWS_PGA_GEN7
;
920 } else if (IS_GEN6(ring
->dev
)) {
921 mmio
= RING_HWS_PGA_GEN6(ring
->mmio_base
);
923 /* XXX: gen8 returns to sanity */
924 mmio
= RING_HWS_PGA(ring
->mmio_base
);
927 ering
->hws
= I915_READ(mmio
);
930 ering
->hangcheck_score
= ring
->hangcheck
.score
;
931 ering
->hangcheck_action
= ring
->hangcheck
.action
;
933 if (USES_PPGTT(dev
)) {
936 ering
->vm_info
.gfx_mode
= I915_READ(RING_MODE_GEN7(ring
));
939 ering
->vm_info
.pp_dir_base
=
940 I915_READ(RING_PP_DIR_BASE_READ(ring
));
941 else if (IS_GEN7(dev
))
942 ering
->vm_info
.pp_dir_base
=
943 I915_READ(RING_PP_DIR_BASE(ring
));
944 else if (INTEL_INFO(dev
)->gen
>= 8)
945 for (i
= 0; i
< 4; i
++) {
946 ering
->vm_info
.pdp
[i
] =
947 I915_READ(GEN8_RING_PDP_UDW(ring
, i
));
948 ering
->vm_info
.pdp
[i
] <<= 32;
949 ering
->vm_info
.pdp
[i
] |=
950 I915_READ(GEN8_RING_PDP_LDW(ring
, i
));
956 static void i915_gem_record_active_context(struct intel_engine_cs
*ring
,
957 struct drm_i915_error_state
*error
,
958 struct drm_i915_error_ring
*ering
)
960 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
961 struct drm_i915_gem_object
*obj
;
963 /* Currently render ring is the only HW context user */
964 if (ring
->id
!= RCS
|| !error
->ccid
)
967 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
968 if (!i915_gem_obj_ggtt_bound(obj
))
971 if ((error
->ccid
& PAGE_MASK
) == i915_gem_obj_ggtt_offset(obj
)) {
972 ering
->ctx
= i915_error_ggtt_object_create(dev_priv
, obj
);
978 static void i915_gem_record_rings(struct drm_device
*dev
,
979 struct drm_i915_error_state
*error
)
981 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
982 struct drm_i915_gem_request
*request
;
985 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
986 struct intel_engine_cs
*ring
= &dev_priv
->ring
[i
];
987 struct intel_ringbuffer
*rbuf
;
989 error
->ring
[i
].pid
= -1;
991 if (ring
->dev
== NULL
)
994 error
->ring
[i
].valid
= true;
996 i915_record_ring_state(dev
, error
, ring
, &error
->ring
[i
]);
998 request
= i915_gem_find_active_request(ring
);
1000 struct i915_address_space
*vm
;
1002 vm
= request
->ctx
&& request
->ctx
->ppgtt
?
1003 &request
->ctx
->ppgtt
->base
:
1004 &dev_priv
->gtt
.base
;
1006 /* We need to copy these to an anonymous buffer
1007 * as the simplest method to avoid being overwritten
1010 error
->ring
[i
].batchbuffer
=
1011 i915_error_object_create(dev_priv
,
1015 if (HAS_BROKEN_CS_TLB(dev_priv
->dev
))
1016 error
->ring
[i
].wa_batchbuffer
=
1017 i915_error_ggtt_object_create(dev_priv
,
1021 struct task_struct
*task
;
1024 task
= pid_task(request
->pid
, PIDTYPE_PID
);
1026 strcpy(error
->ring
[i
].comm
, task
->comm
);
1027 error
->ring
[i
].pid
= task
->pid
;
1033 if (i915
.enable_execlists
) {
1034 /* TODO: This is only a small fix to keep basic error
1035 * capture working, but we need to add more information
1036 * for it to be useful (e.g. dump the context being
1040 rbuf
= request
->ctx
->engine
[ring
->id
].ringbuf
;
1042 rbuf
= ring
->default_context
->engine
[ring
->id
].ringbuf
;
1044 rbuf
= ring
->buffer
;
1046 error
->ring
[i
].cpu_ring_head
= rbuf
->head
;
1047 error
->ring
[i
].cpu_ring_tail
= rbuf
->tail
;
1049 error
->ring
[i
].ringbuffer
=
1050 i915_error_ggtt_object_create(dev_priv
, rbuf
->obj
);
1052 error
->ring
[i
].hws_page
=
1053 i915_error_ggtt_object_create(dev_priv
, ring
->status_page
.obj
);
1055 i915_gem_record_active_context(ring
, error
, &error
->ring
[i
]);
1058 list_for_each_entry(request
, &ring
->request_list
, list
)
1061 error
->ring
[i
].num_requests
= count
;
1062 error
->ring
[i
].requests
=
1063 kcalloc(count
, sizeof(*error
->ring
[i
].requests
),
1065 if (error
->ring
[i
].requests
== NULL
) {
1066 error
->ring
[i
].num_requests
= 0;
1071 list_for_each_entry(request
, &ring
->request_list
, list
) {
1072 struct drm_i915_error_request
*erq
;
1074 erq
= &error
->ring
[i
].requests
[count
++];
1075 erq
->seqno
= request
->seqno
;
1076 erq
->jiffies
= request
->emitted_jiffies
;
1077 erq
->tail
= request
->postfix
;
1082 /* FIXME: Since pin count/bound list is global, we duplicate what we capture per
1085 static void i915_gem_capture_vm(struct drm_i915_private
*dev_priv
,
1086 struct drm_i915_error_state
*error
,
1087 struct i915_address_space
*vm
,
1090 struct drm_i915_error_buffer
*active_bo
= NULL
, *pinned_bo
= NULL
;
1091 struct drm_i915_gem_object
*obj
;
1092 struct i915_vma
*vma
;
1096 list_for_each_entry(vma
, &vm
->active_list
, mm_list
)
1098 error
->active_bo_count
[ndx
] = i
;
1100 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
1101 list_for_each_entry(vma
, &obj
->vma_list
, vma_link
)
1102 if (vma
->vm
== vm
&& vma
->pin_count
> 0)
1105 error
->pinned_bo_count
[ndx
] = i
- error
->active_bo_count
[ndx
];
1108 active_bo
= kcalloc(i
, sizeof(*active_bo
), GFP_ATOMIC
);
1110 pinned_bo
= active_bo
+ error
->active_bo_count
[ndx
];
1114 error
->active_bo_count
[ndx
] =
1115 capture_active_bo(active_bo
,
1116 error
->active_bo_count
[ndx
],
1120 error
->pinned_bo_count
[ndx
] =
1121 capture_pinned_bo(pinned_bo
,
1122 error
->pinned_bo_count
[ndx
],
1123 &dev_priv
->mm
.bound_list
, vm
);
1124 error
->active_bo
[ndx
] = active_bo
;
1125 error
->pinned_bo
[ndx
] = pinned_bo
;
1128 static void i915_gem_capture_buffers(struct drm_i915_private
*dev_priv
,
1129 struct drm_i915_error_state
*error
)
1131 struct i915_address_space
*vm
;
1134 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
)
1137 error
->active_bo
= kcalloc(cnt
, sizeof(*error
->active_bo
), GFP_ATOMIC
);
1138 error
->pinned_bo
= kcalloc(cnt
, sizeof(*error
->pinned_bo
), GFP_ATOMIC
);
1139 error
->active_bo_count
= kcalloc(cnt
, sizeof(*error
->active_bo_count
),
1141 error
->pinned_bo_count
= kcalloc(cnt
, sizeof(*error
->pinned_bo_count
),
1144 if (error
->active_bo
== NULL
||
1145 error
->pinned_bo
== NULL
||
1146 error
->active_bo_count
== NULL
||
1147 error
->pinned_bo_count
== NULL
) {
1148 kfree(error
->active_bo
);
1149 kfree(error
->active_bo_count
);
1150 kfree(error
->pinned_bo
);
1151 kfree(error
->pinned_bo_count
);
1153 error
->active_bo
= NULL
;
1154 error
->active_bo_count
= NULL
;
1155 error
->pinned_bo
= NULL
;
1156 error
->pinned_bo_count
= NULL
;
1158 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
)
1159 i915_gem_capture_vm(dev_priv
, error
, vm
, i
++);
1161 error
->vm_count
= cnt
;
1165 /* Capture all registers which don't fit into another category. */
1166 static void i915_capture_reg_state(struct drm_i915_private
*dev_priv
,
1167 struct drm_i915_error_state
*error
)
1169 struct drm_device
*dev
= dev_priv
->dev
;
1172 /* General organization
1173 * 1. Registers specific to a single generation
1174 * 2. Registers which belong to multiple generations
1175 * 3. Feature specific registers.
1176 * 4. Everything else
1177 * Please try to follow the order.
1180 /* 1: Registers specific to a single generation */
1181 if (IS_VALLEYVIEW(dev
)) {
1182 error
->gtier
[0] = I915_READ(GTIER
);
1183 error
->ier
= I915_READ(VLV_IER
);
1184 error
->forcewake
= I915_READ(FORCEWAKE_VLV
);
1188 error
->err_int
= I915_READ(GEN7_ERR_INT
);
1190 if (INTEL_INFO(dev
)->gen
>= 8) {
1191 error
->fault_data0
= I915_READ(GEN8_FAULT_TLB_DATA0
);
1192 error
->fault_data1
= I915_READ(GEN8_FAULT_TLB_DATA1
);
1196 error
->forcewake
= I915_READ(FORCEWAKE
);
1197 error
->gab_ctl
= I915_READ(GAB_CTL
);
1198 error
->gfx_mode
= I915_READ(GFX_MODE
);
1201 /* 2: Registers which belong to multiple generations */
1202 if (INTEL_INFO(dev
)->gen
>= 7)
1203 error
->forcewake
= I915_READ(FORCEWAKE_MT
);
1205 if (INTEL_INFO(dev
)->gen
>= 6) {
1206 error
->derrmr
= I915_READ(DERRMR
);
1207 error
->error
= I915_READ(ERROR_GEN6
);
1208 error
->done_reg
= I915_READ(DONE_REG
);
1211 /* 3: Feature specific registers */
1212 if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
1213 error
->gam_ecochk
= I915_READ(GAM_ECOCHK
);
1214 error
->gac_eco
= I915_READ(GAC_ECO_BITS
);
1217 /* 4: Everything else */
1218 if (HAS_HW_CONTEXTS(dev
))
1219 error
->ccid
= I915_READ(CCID
);
1221 if (INTEL_INFO(dev
)->gen
>= 8) {
1222 error
->ier
= I915_READ(GEN8_DE_MISC_IER
);
1223 for (i
= 0; i
< 4; i
++)
1224 error
->gtier
[i
] = I915_READ(GEN8_GT_IER(i
));
1225 } else if (HAS_PCH_SPLIT(dev
)) {
1226 error
->ier
= I915_READ(DEIER
);
1227 error
->gtier
[0] = I915_READ(GTIER
);
1228 } else if (IS_GEN2(dev
)) {
1229 error
->ier
= I915_READ16(IER
);
1230 } else if (!IS_VALLEYVIEW(dev
)) {
1231 error
->ier
= I915_READ(IER
);
1233 error
->eir
= I915_READ(EIR
);
1234 error
->pgtbl_er
= I915_READ(PGTBL_ER
);
1236 i915_get_extra_instdone(dev
, error
->extra_instdone
);
1239 static void i915_error_capture_msg(struct drm_device
*dev
,
1240 struct drm_i915_error_state
*error
,
1242 const char *error_msg
)
1244 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1246 int ring_id
= -1, len
;
1248 ecode
= i915_error_generate_code(dev_priv
, error
, &ring_id
);
1250 len
= scnprintf(error
->error_msg
, sizeof(error
->error_msg
),
1251 "GPU HANG: ecode %d:%d:0x%08x",
1252 INTEL_INFO(dev
)->gen
, ring_id
, ecode
);
1254 if (ring_id
!= -1 && error
->ring
[ring_id
].pid
!= -1)
1255 len
+= scnprintf(error
->error_msg
+ len
,
1256 sizeof(error
->error_msg
) - len
,
1258 error
->ring
[ring_id
].comm
,
1259 error
->ring
[ring_id
].pid
);
1261 scnprintf(error
->error_msg
+ len
, sizeof(error
->error_msg
) - len
,
1262 ", reason: %s, action: %s",
1264 wedged
? "reset" : "continue");
1267 static void i915_capture_gen_state(struct drm_i915_private
*dev_priv
,
1268 struct drm_i915_error_state
*error
)
1271 #ifdef CONFIG_INTEL_IOMMU
1272 error
->iommu
= intel_iommu_gfx_mapped
;
1274 error
->reset_count
= i915_reset_count(&dev_priv
->gpu_error
);
1275 error
->suspend_count
= dev_priv
->suspend_count
;
1279 * i915_capture_error_state - capture an error record for later analysis
1282 * Should be called when an error is detected (either a hang or an error
1283 * interrupt) to capture error state from the time of the error. Fills
1284 * out a structure which becomes available in debugfs for user level tools
1287 void i915_capture_error_state(struct drm_device
*dev
, bool wedged
,
1288 const char *error_msg
)
1291 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1292 struct drm_i915_error_state
*error
;
1293 unsigned long flags
;
1295 /* Account for pipe specific data like PIPE*STAT */
1296 error
= kzalloc(sizeof(*error
), GFP_ATOMIC
);
1298 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1302 kref_init(&error
->ref
);
1304 i915_capture_gen_state(dev_priv
, error
);
1305 i915_capture_reg_state(dev_priv
, error
);
1306 i915_gem_capture_buffers(dev_priv
, error
);
1307 i915_gem_record_fences(dev
, error
);
1308 i915_gem_record_rings(dev
, error
);
1310 do_gettimeofday(&error
->time
);
1312 error
->overlay
= intel_overlay_capture_error_state(dev
);
1313 error
->display
= intel_display_capture_error_state(dev
);
1315 i915_error_capture_msg(dev
, error
, wedged
, error_msg
);
1316 DRM_INFO("%s\n", error
->error_msg
);
1318 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1319 if (dev_priv
->gpu_error
.first_error
== NULL
) {
1320 dev_priv
->gpu_error
.first_error
= error
;
1323 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1326 i915_error_state_free(&error
->ref
);
1331 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1332 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1333 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1334 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1335 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev
->primary
->index
);
1340 void i915_error_state_get(struct drm_device
*dev
,
1341 struct i915_error_state_file_priv
*error_priv
)
1343 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1345 spin_lock_irq(&dev_priv
->gpu_error
.lock
);
1346 error_priv
->error
= dev_priv
->gpu_error
.first_error
;
1347 if (error_priv
->error
)
1348 kref_get(&error_priv
->error
->ref
);
1349 spin_unlock_irq(&dev_priv
->gpu_error
.lock
);
1353 void i915_error_state_put(struct i915_error_state_file_priv
*error_priv
)
1355 if (error_priv
->error
)
1356 kref_put(&error_priv
->error
->ref
, i915_error_state_free
);
1359 void i915_destroy_error_state(struct drm_device
*dev
)
1361 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1362 struct drm_i915_error_state
*error
;
1364 spin_lock_irq(&dev_priv
->gpu_error
.lock
);
1365 error
= dev_priv
->gpu_error
.first_error
;
1366 dev_priv
->gpu_error
.first_error
= NULL
;
1367 spin_unlock_irq(&dev_priv
->gpu_error
.lock
);
1370 kref_put(&error
->ref
, i915_error_state_free
);
1373 const char *i915_cache_level_str(struct drm_i915_private
*i915
, int type
)
1376 case I915_CACHE_NONE
: return " uncached";
1377 case I915_CACHE_LLC
: return HAS_LLC(i915
) ? " LLC" : " snooped";
1378 case I915_CACHE_L3_LLC
: return " L3+LLC";
1379 case I915_CACHE_WT
: return " WT";
1384 /* NB: please notice the memset */
1385 void i915_get_extra_instdone(struct drm_device
*dev
, uint32_t *instdone
)
1387 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1388 memset(instdone
, 0, sizeof(*instdone
) * I915_NUM_INSTDONE_REG
);
1390 if (IS_GEN2(dev
) || IS_GEN3(dev
))
1391 instdone
[0] = I915_READ(GEN2_INSTDONE
);
1392 else if (IS_GEN4(dev
) || IS_GEN5(dev
) || IS_GEN6(dev
)) {
1393 instdone
[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE
));
1394 instdone
[1] = I915_READ(GEN4_INSTDONE1
);
1395 } else if (INTEL_INFO(dev
)->gen
>= 7) {
1396 instdone
[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE
));
1397 instdone
[1] = I915_READ(GEN7_SC_INSTDONE
);
1398 instdone
[2] = I915_READ(GEN7_SAMPLER_INSTDONE
);
1399 instdone
[3] = I915_READ(GEN7_ROW_INSTDONE
);