2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "huge_gem_object.h"
27 static void huge_free_pages(struct drm_i915_gem_object
*obj
,
28 struct sg_table
*pages
)
30 unsigned long nreal
= obj
->scratch
/ PAGE_SIZE
;
31 struct scatterlist
*sg
;
33 for (sg
= pages
->sgl
; sg
&& nreal
--; sg
= __sg_next(sg
))
34 __free_page(sg_page(sg
));
40 static int huge_get_pages(struct drm_i915_gem_object
*obj
)
42 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
43 const unsigned long nreal
= obj
->scratch
/ PAGE_SIZE
;
44 const unsigned long npages
= obj
->base
.size
/ PAGE_SIZE
;
45 struct scatterlist
*sg
, *src
, *end
;
46 struct sg_table
*pages
;
49 pages
= kmalloc(sizeof(*pages
), GFP
);
53 if (sg_alloc_table(pages
, npages
, GFP
)) {
59 for (n
= 0; n
< nreal
; n
++) {
62 page
= alloc_page(GFP
| __GFP_HIGHMEM
);
68 sg_set_page(sg
, page
, PAGE_SIZE
, 0);
72 for (end
= sg
, src
= pages
->sgl
; sg
; sg
= __sg_next(sg
)) {
73 sg_set_page(sg
, sg_page(src
), PAGE_SIZE
, 0);
80 if (i915_gem_gtt_prepare_pages(obj
, pages
))
83 __i915_gem_object_set_pages(obj
, pages
, PAGE_SIZE
);
88 huge_free_pages(obj
, pages
);
94 static void huge_put_pages(struct drm_i915_gem_object
*obj
,
95 struct sg_table
*pages
)
97 i915_gem_gtt_finish_pages(obj
, pages
);
98 huge_free_pages(obj
, pages
);
100 obj
->mm
.dirty
= false;
103 static const struct drm_i915_gem_object_ops huge_ops
= {
104 .flags
= I915_GEM_OBJECT_HAS_STRUCT_PAGE
|
105 I915_GEM_OBJECT_IS_SHRINKABLE
,
106 .get_pages
= huge_get_pages
,
107 .put_pages
= huge_put_pages
,
110 struct drm_i915_gem_object
*
111 huge_gem_object(struct drm_i915_private
*i915
,
112 phys_addr_t phys_size
,
115 struct drm_i915_gem_object
*obj
;
116 unsigned int cache_level
;
118 GEM_BUG_ON(!phys_size
|| phys_size
> dma_size
);
119 GEM_BUG_ON(!IS_ALIGNED(phys_size
, PAGE_SIZE
));
120 GEM_BUG_ON(!IS_ALIGNED(dma_size
, I915_GTT_PAGE_SIZE
));
122 if (overflows_type(dma_size
, obj
->base
.size
))
123 return ERR_PTR(-E2BIG
);
125 obj
= i915_gem_object_alloc(i915
);
127 return ERR_PTR(-ENOMEM
);
129 drm_gem_private_object_init(&i915
->drm
, &obj
->base
, dma_size
);
130 i915_gem_object_init(obj
, &huge_ops
);
132 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
133 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
134 cache_level
= HAS_LLC(i915
) ? I915_CACHE_LLC
: I915_CACHE_NONE
;
135 i915_gem_object_set_cache_coherency(obj
, cache_level
);
136 obj
->scratch
= phys_size
;