1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include "intel_memory_region.h"
9 /* XXX: Hysterical raisins. BIT(inst) needs to just be (inst) at some point. */
10 #define REGION_MAP(type, inst) \
11 BIT((type) + INTEL_MEMORY_TYPE_SHIFT) | BIT(inst)
13 const u32 intel_region_map
[] = {
14 [INTEL_REGION_SMEM
] = REGION_MAP(INTEL_MEMORY_SYSTEM
, 0),
15 [INTEL_REGION_LMEM
] = REGION_MAP(INTEL_MEMORY_LOCAL
, 0),
16 [INTEL_REGION_STOLEN
] = REGION_MAP(INTEL_MEMORY_STOLEN
, 0),
19 struct intel_memory_region
*
20 intel_memory_region_by_type(struct drm_i915_private
*i915
,
21 enum intel_memory_type mem_type
)
23 struct intel_memory_region
*mr
;
26 for_each_memory_region(mr
, i915
, id
)
27 if (mr
->type
== mem_type
)
34 intel_memory_region_free_pages(struct intel_memory_region
*mem
,
35 struct list_head
*blocks
)
37 struct i915_buddy_block
*block
, *on
;
40 list_for_each_entry_safe(block
, on
, blocks
, link
) {
41 size
+= i915_buddy_block_size(&mem
->mm
, block
);
42 i915_buddy_free(&mem
->mm
, block
);
44 INIT_LIST_HEAD(blocks
);
50 __intel_memory_region_put_pages_buddy(struct intel_memory_region
*mem
,
51 struct list_head
*blocks
)
53 mutex_lock(&mem
->mm_lock
);
54 mem
->avail
+= intel_memory_region_free_pages(mem
, blocks
);
55 mutex_unlock(&mem
->mm_lock
);
59 __intel_memory_region_put_block_buddy(struct i915_buddy_block
*block
)
61 struct list_head blocks
;
63 INIT_LIST_HEAD(&blocks
);
64 list_add(&block
->link
, &blocks
);
65 __intel_memory_region_put_pages_buddy(block
->private, &blocks
);
69 __intel_memory_region_get_pages_buddy(struct intel_memory_region
*mem
,
72 struct list_head
*blocks
)
74 unsigned int min_order
= 0;
75 unsigned long n_pages
;
77 GEM_BUG_ON(!IS_ALIGNED(size
, mem
->mm
.chunk_size
));
78 GEM_BUG_ON(!list_empty(blocks
));
80 if (flags
& I915_ALLOC_MIN_PAGE_SIZE
) {
81 min_order
= ilog2(mem
->min_page_size
) -
82 ilog2(mem
->mm
.chunk_size
);
85 if (flags
& I915_ALLOC_CONTIGUOUS
) {
86 size
= roundup_pow_of_two(size
);
87 min_order
= ilog2(size
) - ilog2(mem
->mm
.chunk_size
);
90 if (size
> mem
->mm
.size
)
93 n_pages
= size
>> ilog2(mem
->mm
.chunk_size
);
95 mutex_lock(&mem
->mm_lock
);
98 struct i915_buddy_block
*block
;
101 order
= fls(n_pages
) - 1;
102 GEM_BUG_ON(order
> mem
->mm
.max_order
);
103 GEM_BUG_ON(order
< min_order
);
106 block
= i915_buddy_alloc(&mem
->mm
, order
);
110 if (order
-- == min_order
)
111 goto err_free_blocks
;
114 n_pages
-= BIT(order
);
116 block
->private = mem
;
117 list_add_tail(&block
->link
, blocks
);
124 mutex_unlock(&mem
->mm_lock
);
128 intel_memory_region_free_pages(mem
, blocks
);
129 mutex_unlock(&mem
->mm_lock
);
133 struct i915_buddy_block
*
134 __intel_memory_region_get_block_buddy(struct intel_memory_region
*mem
,
135 resource_size_t size
,
138 struct i915_buddy_block
*block
;
142 ret
= __intel_memory_region_get_pages_buddy(mem
, size
, flags
, &blocks
);
146 block
= list_first_entry(&blocks
, typeof(*block
), link
);
147 list_del_init(&block
->link
);
151 int intel_memory_region_init_buddy(struct intel_memory_region
*mem
)
153 return i915_buddy_init(&mem
->mm
, resource_size(&mem
->region
),
157 void intel_memory_region_release_buddy(struct intel_memory_region
*mem
)
159 i915_buddy_fini(&mem
->mm
);
162 struct intel_memory_region
*
163 intel_memory_region_create(struct drm_i915_private
*i915
,
164 resource_size_t start
,
165 resource_size_t size
,
166 resource_size_t min_page_size
,
167 resource_size_t io_start
,
168 const struct intel_memory_region_ops
*ops
)
170 struct intel_memory_region
*mem
;
173 mem
= kzalloc(sizeof(*mem
), GFP_KERNEL
);
175 return ERR_PTR(-ENOMEM
);
178 mem
->region
= (struct resource
)DEFINE_RES_MEM(start
, size
);
179 mem
->io_start
= io_start
;
180 mem
->min_page_size
= min_page_size
;
183 mem
->avail
= mem
->total
;
185 mutex_init(&mem
->objects
.lock
);
186 INIT_LIST_HEAD(&mem
->objects
.list
);
187 INIT_LIST_HEAD(&mem
->objects
.purgeable
);
189 mutex_init(&mem
->mm_lock
);
192 err
= ops
->init(mem
);
197 kref_init(&mem
->kref
);
205 void intel_memory_region_set_name(struct intel_memory_region
*mem
,
206 const char *fmt
, ...)
211 vsnprintf(mem
->name
, sizeof(mem
->name
), fmt
, ap
);
215 static void __intel_memory_region_destroy(struct kref
*kref
)
217 struct intel_memory_region
*mem
=
218 container_of(kref
, typeof(*mem
), kref
);
220 if (mem
->ops
->release
)
221 mem
->ops
->release(mem
);
223 mutex_destroy(&mem
->mm_lock
);
224 mutex_destroy(&mem
->objects
.lock
);
228 struct intel_memory_region
*
229 intel_memory_region_get(struct intel_memory_region
*mem
)
231 kref_get(&mem
->kref
);
235 void intel_memory_region_put(struct intel_memory_region
*mem
)
237 kref_put(&mem
->kref
, __intel_memory_region_destroy
);
240 /* Global memory region registration -- only slight layer inversions! */
242 int intel_memory_regions_hw_probe(struct drm_i915_private
*i915
)
246 for (i
= 0; i
< ARRAY_SIZE(i915
->mm
.regions
); i
++) {
247 struct intel_memory_region
*mem
= ERR_PTR(-ENODEV
);
250 if (!HAS_REGION(i915
, BIT(i
)))
253 type
= MEMORY_TYPE_FROM_REGION(intel_region_map
[i
]);
255 case INTEL_MEMORY_SYSTEM
:
256 mem
= i915_gem_shmem_setup(i915
);
258 case INTEL_MEMORY_STOLEN
:
259 mem
= i915_gem_stolen_setup(i915
);
261 case INTEL_MEMORY_LOCAL
:
262 mem
= intel_setup_fake_lmem(i915
);
269 "Failed to setup region(%d) type=%d\n",
274 mem
->id
= intel_region_map
[i
];
276 mem
->instance
= MEMORY_INSTANCE_FROM_REGION(intel_region_map
[i
]);
278 i915
->mm
.regions
[i
] = mem
;
284 intel_memory_regions_driver_release(i915
);
288 void intel_memory_regions_driver_release(struct drm_i915_private
*i915
)
292 for (i
= 0; i
< ARRAY_SIZE(i915
->mm
.regions
); i
++) {
293 struct intel_memory_region
*region
=
294 fetch_and_zero(&i915
->mm
.regions
[i
]);
297 intel_memory_region_put(region
);
301 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
302 #include "selftests/intel_memory_region.c"
303 #include "selftests/mock_region.c"