2 * Copyright © 2010 Daniel Vetter
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include "i915_trace.h"
30 #include "intel_drv.h"
32 /* XXX kill agp_type! */
33 static unsigned int cache_level_to_agp_type(struct drm_device
*dev
,
34 enum i915_cache_level cache_level
)
36 switch (cache_level
) {
37 case I915_CACHE_LLC_MLC
:
38 if (INTEL_INFO(dev
)->gen
>= 6)
39 return AGP_USER_CACHED_MEMORY_LLC_MLC
;
40 /* Older chipsets do not have this extra level of CPU
41 * cacheing, so fallthrough and request the PTE simply
45 return AGP_USER_CACHED_MEMORY
;
48 return AGP_USER_MEMORY
;
52 void i915_gem_restore_gtt_mappings(struct drm_device
*dev
)
54 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
55 struct drm_i915_gem_object
*obj
;
57 /* First fill our portion of the GTT with scratch pages */
58 intel_gtt_clear_range(dev_priv
->mm
.gtt_start
/ PAGE_SIZE
,
59 (dev_priv
->mm
.gtt_end
- dev_priv
->mm
.gtt_start
) / PAGE_SIZE
);
61 list_for_each_entry(obj
, &dev_priv
->mm
.gtt_list
, gtt_list
) {
62 i915_gem_clflush_object(obj
);
63 i915_gem_gtt_rebind_object(obj
, obj
->cache_level
);
66 intel_gtt_chipset_flush();
69 int i915_gem_gtt_bind_object(struct drm_i915_gem_object
*obj
)
71 struct drm_device
*dev
= obj
->base
.dev
;
72 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
73 unsigned int agp_type
= cache_level_to_agp_type(dev
, obj
->cache_level
);
76 if (dev_priv
->mm
.gtt
->needs_dmar
) {
77 ret
= intel_gtt_map_memory(obj
->pages
,
78 obj
->base
.size
>> PAGE_SHIFT
,
84 intel_gtt_insert_sg_entries(obj
->sg_list
,
86 obj
->gtt_space
->start
>> PAGE_SHIFT
,
89 intel_gtt_insert_pages(obj
->gtt_space
->start
>> PAGE_SHIFT
,
90 obj
->base
.size
>> PAGE_SHIFT
,
97 void i915_gem_gtt_rebind_object(struct drm_i915_gem_object
*obj
,
98 enum i915_cache_level cache_level
)
100 struct drm_device
*dev
= obj
->base
.dev
;
101 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
102 unsigned int agp_type
= cache_level_to_agp_type(dev
, cache_level
);
104 if (dev_priv
->mm
.gtt
->needs_dmar
) {
105 BUG_ON(!obj
->sg_list
);
107 intel_gtt_insert_sg_entries(obj
->sg_list
,
109 obj
->gtt_space
->start
>> PAGE_SHIFT
,
112 intel_gtt_insert_pages(obj
->gtt_space
->start
>> PAGE_SHIFT
,
113 obj
->base
.size
>> PAGE_SHIFT
,
118 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object
*obj
)
120 intel_gtt_clear_range(obj
->gtt_space
->start
>> PAGE_SHIFT
,
121 obj
->base
.size
>> PAGE_SHIFT
);
124 intel_gtt_unmap_memory(obj
->sg_list
, obj
->num_sg
);