1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include "intel_memory_region.h"
7 #include "i915_gem_region.h"
9 #include "i915_trace.h"
12 i915_gem_object_put_pages_buddy(struct drm_i915_gem_object
*obj
,
13 struct sg_table
*pages
)
15 __intel_memory_region_put_pages_buddy(obj
->mm
.region
, &obj
->mm
.blocks
);
17 obj
->mm
.dirty
= false;
23 i915_gem_object_get_pages_buddy(struct drm_i915_gem_object
*obj
)
25 struct intel_memory_region
*mem
= obj
->mm
.region
;
26 struct list_head
*blocks
= &obj
->mm
.blocks
;
27 resource_size_t size
= obj
->base
.size
;
28 resource_size_t prev_end
;
29 struct i915_buddy_block
*block
;
32 struct scatterlist
*sg
;
33 unsigned int sg_page_sizes
;
36 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
40 if (sg_alloc_table(st
, size
>> ilog2(mem
->mm
.chunk_size
), GFP_KERNEL
)) {
45 flags
= I915_ALLOC_MIN_PAGE_SIZE
;
46 if (obj
->flags
& I915_BO_ALLOC_CONTIGUOUS
)
47 flags
|= I915_ALLOC_CONTIGUOUS
;
49 ret
= __intel_memory_region_get_pages_buddy(mem
, size
, flags
, blocks
);
53 GEM_BUG_ON(list_empty(blocks
));
58 prev_end
= (resource_size_t
)-1;
60 list_for_each_entry(block
, blocks
, link
) {
61 u64 block_size
, offset
;
63 block_size
= min_t(u64
, size
,
64 i915_buddy_block_size(&mem
->mm
, block
));
65 offset
= i915_buddy_block_offset(block
);
67 GEM_BUG_ON(overflows_type(block_size
, sg
->length
));
69 if (offset
!= prev_end
||
70 add_overflows_t(typeof(sg
->length
), sg
->length
, block_size
)) {
72 sg_page_sizes
|= sg
->length
;
76 sg_dma_address(sg
) = mem
->region
.start
+ offset
;
77 sg_dma_len(sg
) = block_size
;
79 sg
->length
= block_size
;
83 sg
->length
+= block_size
;
84 sg_dma_len(sg
) += block_size
;
87 prev_end
= offset
+ block_size
;
90 sg_page_sizes
|= sg
->length
;
94 __i915_gem_object_set_pages(obj
, st
, sg_page_sizes
);
104 void i915_gem_object_init_memory_region(struct drm_i915_gem_object
*obj
,
105 struct intel_memory_region
*mem
,
108 INIT_LIST_HEAD(&obj
->mm
.blocks
);
109 obj
->mm
.region
= intel_memory_region_get(mem
);
112 if (obj
->base
.size
<= mem
->min_page_size
)
113 obj
->flags
|= I915_BO_ALLOC_CONTIGUOUS
;
115 mutex_lock(&mem
->objects
.lock
);
117 if (obj
->flags
& I915_BO_ALLOC_VOLATILE
)
118 list_add(&obj
->mm
.region_link
, &mem
->objects
.purgeable
);
120 list_add(&obj
->mm
.region_link
, &mem
->objects
.list
);
122 mutex_unlock(&mem
->objects
.lock
);
125 void i915_gem_object_release_memory_region(struct drm_i915_gem_object
*obj
)
127 struct intel_memory_region
*mem
= obj
->mm
.region
;
129 mutex_lock(&mem
->objects
.lock
);
130 list_del(&obj
->mm
.region_link
);
131 mutex_unlock(&mem
->objects
.lock
);
133 intel_memory_region_put(mem
);
136 struct drm_i915_gem_object
*
137 i915_gem_object_create_region(struct intel_memory_region
*mem
,
138 resource_size_t size
,
141 struct drm_i915_gem_object
*obj
;
144 * NB: Our use of resource_size_t for the size stems from using struct
145 * resource for the mem->region. We might need to revisit this in the
149 GEM_BUG_ON(flags
& ~I915_BO_ALLOC_FLAGS
);
152 return ERR_PTR(-ENODEV
);
154 size
= round_up(size
, mem
->min_page_size
);
157 GEM_BUG_ON(!IS_ALIGNED(size
, I915_GTT_MIN_ALIGNMENT
));
160 * XXX: There is a prevalence of the assumption that we fit the
161 * object's page count inside a 32bit _signed_ variable. Let's document
162 * this and catch if we ever need to fix it. In the meantime, if you do
163 * spot such a local variable, please consider fixing!
166 if (size
>> PAGE_SHIFT
> INT_MAX
)
167 return ERR_PTR(-E2BIG
);
169 if (overflows_type(size
, obj
->base
.size
))
170 return ERR_PTR(-E2BIG
);
172 obj
= mem
->ops
->create_object(mem
, size
, flags
);
174 trace_i915_gem_object_create(obj
);