2 * Copyright © 2010 Daniel Vetter
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include "i915_trace.h"
30 #include "intel_drv.h"
32 /* XXX kill agp_type! */
33 static unsigned int cache_level_to_agp_type(struct drm_device
*dev
,
34 enum i915_cache_level cache_level
)
36 switch (cache_level
) {
37 case I915_CACHE_LLC_MLC
:
38 if (INTEL_INFO(dev
)->gen
>= 6)
39 return AGP_USER_CACHED_MEMORY_LLC_MLC
;
40 /* Older chipsets do not have this extra level of CPU
41 * cacheing, so fallthrough and request the PTE simply
45 return AGP_USER_CACHED_MEMORY
;
48 return AGP_USER_MEMORY
;
52 static bool do_idling(struct drm_i915_private
*dev_priv
)
54 bool ret
= dev_priv
->mm
.interruptible
;
56 if (unlikely(dev_priv
->mm
.gtt
->do_idle_maps
)) {
57 dev_priv
->mm
.interruptible
= false;
58 if (i915_gpu_idle(dev_priv
->dev
)) {
59 DRM_ERROR("Couldn't idle GPU\n");
60 /* Wait a bit, in hopes it avoids the hang */
68 static void undo_idling(struct drm_i915_private
*dev_priv
, bool interruptible
)
70 if (unlikely(dev_priv
->mm
.gtt
->do_idle_maps
))
71 dev_priv
->mm
.interruptible
= interruptible
;
74 void i915_gem_restore_gtt_mappings(struct drm_device
*dev
)
76 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
77 struct drm_i915_gem_object
*obj
;
79 /* First fill our portion of the GTT with scratch pages */
80 intel_gtt_clear_range(dev_priv
->mm
.gtt_start
/ PAGE_SIZE
,
81 (dev_priv
->mm
.gtt_end
- dev_priv
->mm
.gtt_start
) / PAGE_SIZE
);
83 list_for_each_entry(obj
, &dev_priv
->mm
.gtt_list
, gtt_list
) {
84 i915_gem_clflush_object(obj
);
85 i915_gem_gtt_rebind_object(obj
, obj
->cache_level
);
88 intel_gtt_chipset_flush();
91 int i915_gem_gtt_bind_object(struct drm_i915_gem_object
*obj
)
93 struct drm_device
*dev
= obj
->base
.dev
;
94 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
95 unsigned int agp_type
= cache_level_to_agp_type(dev
, obj
->cache_level
);
98 if (dev_priv
->mm
.gtt
->needs_dmar
) {
99 ret
= intel_gtt_map_memory(obj
->pages
,
100 obj
->base
.size
>> PAGE_SHIFT
,
106 intel_gtt_insert_sg_entries(obj
->sg_list
,
108 obj
->gtt_space
->start
>> PAGE_SHIFT
,
111 intel_gtt_insert_pages(obj
->gtt_space
->start
>> PAGE_SHIFT
,
112 obj
->base
.size
>> PAGE_SHIFT
,
119 void i915_gem_gtt_rebind_object(struct drm_i915_gem_object
*obj
,
120 enum i915_cache_level cache_level
)
122 struct drm_device
*dev
= obj
->base
.dev
;
123 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
124 unsigned int agp_type
= cache_level_to_agp_type(dev
, cache_level
);
126 if (dev_priv
->mm
.gtt
->needs_dmar
) {
127 BUG_ON(!obj
->sg_list
);
129 intel_gtt_insert_sg_entries(obj
->sg_list
,
131 obj
->gtt_space
->start
>> PAGE_SHIFT
,
134 intel_gtt_insert_pages(obj
->gtt_space
->start
>> PAGE_SHIFT
,
135 obj
->base
.size
>> PAGE_SHIFT
,
140 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object
*obj
)
142 struct drm_device
*dev
= obj
->base
.dev
;
143 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
146 interruptible
= do_idling(dev_priv
);
148 intel_gtt_clear_range(obj
->gtt_space
->start
>> PAGE_SHIFT
,
149 obj
->base
.size
>> PAGE_SHIFT
);
152 intel_gtt_unmap_memory(obj
->sg_list
, obj
->num_sg
);
156 undo_idling(dev_priv
, interruptible
);