2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
35 i915_verify_inactive(struct drm_device
*dev
, char *file
, int line
)
37 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
38 struct drm_gem_object
*obj
;
39 struct drm_i915_gem_object
*obj_priv
;
41 list_for_each_entry(obj_priv
, &dev_priv
->mm
.inactive_list
, list
) {
43 if (obj_priv
->pin_count
|| obj_priv
->active
||
44 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
45 I915_GEM_DOMAIN_GTT
)))
46 DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
48 obj_priv
->pin_count
, obj_priv
->active
,
49 obj
->write_domain
, file
, line
);
52 #endif /* WATCH_INACTIVE */
55 #if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
57 i915_gem_dump_page(struct page
*page
, uint32_t start
, uint32_t end
,
58 uint32_t bias
, uint32_t mark
)
60 uint32_t *mem
= kmap_atomic(page
, KM_USER0
);
62 for (i
= start
; i
< end
; i
+= 4)
63 DRM_INFO("%08x: %08x%s\n",
64 (int) (bias
+ i
), mem
[i
/ 4],
65 (bias
+ i
== mark
) ? " ********" : "");
66 kunmap_atomic(mem
, KM_USER0
);
67 /* give syslog time to catch up */
72 i915_gem_dump_object(struct drm_gem_object
*obj
, int len
,
73 const char *where
, uint32_t mark
)
75 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
78 DRM_INFO("%s: object at offset %08x\n", where
, obj_priv
->gtt_offset
);
79 for (page
= 0; page
< (len
+ PAGE_SIZE
-1) / PAGE_SIZE
; page
++) {
80 int page_len
, chunk
, chunk_len
;
82 page_len
= len
- page
* PAGE_SIZE
;
83 if (page_len
> PAGE_SIZE
)
86 for (chunk
= 0; chunk
< page_len
; chunk
+= 128) {
87 chunk_len
= page_len
- chunk
;
90 i915_gem_dump_page(obj_priv
->pages
[page
],
91 chunk
, chunk
+ chunk_len
,
92 obj_priv
->gtt_offset
+
102 i915_dump_lru(struct drm_device
*dev
, const char *where
)
104 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
105 struct drm_i915_gem_object
*obj_priv
;
107 DRM_INFO("active list %s {\n", where
);
108 spin_lock(&dev_priv
->mm
.active_list_lock
);
109 list_for_each_entry(obj_priv
, &dev_priv
->mm
.active_list
,
112 DRM_INFO(" %p: %08x\n", obj_priv
,
113 obj_priv
->last_rendering_seqno
);
115 spin_unlock(&dev_priv
->mm
.active_list_lock
);
117 DRM_INFO("flushing list %s {\n", where
);
118 list_for_each_entry(obj_priv
, &dev_priv
->mm
.flushing_list
,
121 DRM_INFO(" %p: %08x\n", obj_priv
,
122 obj_priv
->last_rendering_seqno
);
125 DRM_INFO("inactive %s {\n", where
);
126 list_for_each_entry(obj_priv
, &dev_priv
->mm
.inactive_list
, list
) {
127 DRM_INFO(" %p: %08x\n", obj_priv
,
128 obj_priv
->last_rendering_seqno
);
137 i915_gem_object_check_coherency(struct drm_gem_object
*obj
, int handle
)
139 struct drm_device
*dev
= obj
->dev
;
140 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
142 uint32_t *gtt_mapping
;
143 uint32_t *backing_map
= NULL
;
146 DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
147 __func__
, obj
, obj_priv
->gtt_offset
, handle
,
150 gtt_mapping
= ioremap(dev
->agp
->base
+ obj_priv
->gtt_offset
,
152 if (gtt_mapping
== NULL
) {
153 DRM_ERROR("failed to map GTT space\n");
157 for (page
= 0; page
< obj
->size
/ PAGE_SIZE
; page
++) {
160 backing_map
= kmap_atomic(obj_priv
->pages
[page
], KM_USER0
);
162 if (backing_map
== NULL
) {
163 DRM_ERROR("failed to map backing page\n");
167 for (i
= 0; i
< PAGE_SIZE
/ 4; i
++) {
168 uint32_t cpuval
= backing_map
[i
];
169 uint32_t gttval
= readl(gtt_mapping
+
172 if (cpuval
!= gttval
) {
173 DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
174 "0x%08x vs 0x%08x\n",
175 (int)(obj_priv
->gtt_offset
+
176 page
* PAGE_SIZE
+ i
* 4),
178 if (bad_count
++ >= 8) {
184 kunmap_atomic(backing_map
, KM_USER0
);
189 if (backing_map
!= NULL
)
190 kunmap_atomic(backing_map
, KM_USER0
);
191 iounmap(gtt_mapping
);
193 /* give syslog time to catch up */
196 /* Directly flush the object, since we just loaded values with the CPU
197 * from the backing pages and we don't want to disturb the cache
198 * management that we're trying to observe.
201 i915_gem_clflush_object(obj
);