2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
35 i915_verify_lists(struct drm_device
*dev
)
38 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
39 struct drm_i915_gem_object
*obj
;
45 list_for_each_entry(obj
, &dev_priv
->render_ring
.active_list
, list
) {
46 if (obj
->base
.dev
!= dev
||
47 !atomic_read(&obj
->base
.refcount
.refcount
)) {
48 DRM_ERROR("freed render active %p\n", obj
);
51 } else if (!obj
->active
||
52 (obj
->base
.read_domains
& I915_GEM_GPU_DOMAINS
) == 0) {
53 DRM_ERROR("invalid render active %p (a %d r %x)\n",
56 obj
->base
.read_domains
);
58 } else if (obj
->base
.write_domain
&& list_empty(&obj
->gpu_write_list
)) {
59 DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
61 obj
->base
.write_domain
,
62 !list_empty(&obj
->gpu_write_list
));
67 list_for_each_entry(obj
, &dev_priv
->mm
.flushing_list
, list
) {
68 if (obj
->base
.dev
!= dev
||
69 !atomic_read(&obj
->base
.refcount
.refcount
)) {
70 DRM_ERROR("freed flushing %p\n", obj
);
73 } else if (!obj
->active
||
74 (obj
->base
.write_domain
& I915_GEM_GPU_DOMAINS
) == 0 ||
75 list_empty(&obj
->gpu_write_list
)) {
76 DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
79 obj
->base
.write_domain
,
80 !list_empty(&obj
->gpu_write_list
));
85 list_for_each_entry(obj
, &dev_priv
->mm
.gpu_write_list
, gpu_write_list
) {
86 if (obj
->base
.dev
!= dev
||
87 !atomic_read(&obj
->base
.refcount
.refcount
)) {
88 DRM_ERROR("freed gpu write %p\n", obj
);
91 } else if (!obj
->active
||
92 (obj
->base
.write_domain
& I915_GEM_GPU_DOMAINS
) == 0) {
93 DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
96 obj
->base
.write_domain
);
101 list_for_each_entry(obj
, &dev_priv
->mm
.inactive_list
, list
) {
102 if (obj
->base
.dev
!= dev
||
103 !atomic_read(&obj
->base
.refcount
.refcount
)) {
104 DRM_ERROR("freed inactive %p\n", obj
);
107 } else if (obj
->pin_count
|| obj
->active
||
108 (obj
->base
.write_domain
& I915_GEM_GPU_DOMAINS
)) {
109 DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
111 obj
->pin_count
, obj
->active
,
112 obj
->base
.write_domain
);
117 list_for_each_entry(obj
, &dev_priv
->mm
.pinned_list
, list
) {
118 if (obj
->base
.dev
!= dev
||
119 !atomic_read(&obj
->base
.refcount
.refcount
)) {
120 DRM_ERROR("freed pinned %p\n", obj
);
123 } else if (!obj
->pin_count
|| obj
->active
||
124 (obj
->base
.write_domain
& I915_GEM_GPU_DOMAINS
)) {
125 DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
127 obj
->pin_count
, obj
->active
,
128 obj
->base
.write_domain
);
135 #endif /* WATCH_INACTIVE */
139 i915_gem_object_check_coherency(struct drm_i915_gem_object
*obj
, int handle
)
141 struct drm_device
*dev
= obj
->base
.dev
;
143 uint32_t *gtt_mapping
;
144 uint32_t *backing_map
= NULL
;
147 DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
148 __func__
, obj
, obj
->gtt_offset
, handle
,
151 gtt_mapping
= ioremap(dev
->agp
->base
+ obj
->gtt_offset
, obj
->base
.size
);
152 if (gtt_mapping
== NULL
) {
153 DRM_ERROR("failed to map GTT space\n");
157 for (page
= 0; page
< obj
->size
/ PAGE_SIZE
; page
++) {
160 backing_map
= kmap_atomic(obj
->pages
[page
]);
162 if (backing_map
== NULL
) {
163 DRM_ERROR("failed to map backing page\n");
167 for (i
= 0; i
< PAGE_SIZE
/ 4; i
++) {
168 uint32_t cpuval
= backing_map
[i
];
169 uint32_t gttval
= readl(gtt_mapping
+
172 if (cpuval
!= gttval
) {
173 DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
174 "0x%08x vs 0x%08x\n",
175 (int)(obj
->gtt_offset
+
176 page
* PAGE_SIZE
+ i
* 4),
178 if (bad_count
++ >= 8) {
184 kunmap_atomic(backing_map
);
189 if (backing_map
!= NULL
)
190 kunmap_atomic(backing_map
);
191 iounmap(gtt_mapping
);
193 /* give syslog time to catch up */
196 /* Directly flush the object, since we just loaded values with the CPU
197 * from the backing pages and we don't want to disturb the cache
198 * management that we're trying to observe.
201 i915_gem_clflush_object(obj
);