revert between 56095 -> 55830 in arch
[AROS.git] / workbench / libs / mesa / src / gallium / drivers / nouveau / nouveau_mm.c
blob80de895bc92a9bf8e95776cbbf81a61c1cd3edf5
2 #include "util/u_inlines.h"
3 #include "util/u_memory.h"
4 #include "util/u_double_list.h"
6 #include "nouveau_screen.h"
7 #include "nouveau_mm.h"
9 #include "nouveau/nouveau_bo.h"
11 #define MM_MIN_ORDER 7
12 #define MM_MAX_ORDER 20
14 #define MM_NUM_BUCKETS (MM_MAX_ORDER - MM_MIN_ORDER + 1)
16 #define MM_MIN_SIZE (1 << MM_MIN_ORDER)
17 #define MM_MAX_SIZE (1 << MM_MAX_ORDER)
19 struct mm_bucket {
20 struct list_head free;
21 struct list_head used;
22 struct list_head full;
23 int num_free;
26 struct nouveau_mman {
27 struct nouveau_device *dev;
28 struct mm_bucket bucket[MM_NUM_BUCKETS];
29 uint32_t storage_type;
30 uint32_t domain;
31 uint64_t allocated;
34 struct mm_slab {
35 struct list_head head;
36 struct nouveau_bo *bo;
37 struct nouveau_mman *cache;
38 int order;
39 int count;
40 int free;
41 uint32_t bits[0];
44 static int
45 mm_slab_alloc(struct mm_slab *slab)
47 int i, n, b;
49 if (slab->free == 0)
50 return -1;
52 for (i = 0; i < (slab->count + 31) / 32; ++i) {
53 b = ffs(slab->bits[i]) - 1;
54 if (b >= 0) {
55 n = i * 32 + b;
56 assert(n < slab->count);
57 slab->free--;
58 slab->bits[i] &= ~(1 << b);
59 return n;
62 return -1;
65 static INLINE void
66 mm_slab_free(struct mm_slab *slab, int i)
68 assert(i < slab->count);
69 slab->bits[i / 32] |= 1 << (i % 32);
70 slab->free++;
71 assert(slab->free <= slab->count);
74 static INLINE int
75 mm_get_order(uint32_t size)
77 int s = __builtin_clz(size) ^ 31;
79 if (size > (1 << s))
80 s += 1;
81 return s;
84 static struct mm_bucket *
85 mm_bucket_by_order(struct nouveau_mman *cache, int order)
87 if (order > MM_MAX_ORDER)
88 return NULL;
89 return &cache->bucket[MAX2(order, MM_MIN_ORDER) - MM_MIN_ORDER];
92 static struct mm_bucket *
93 mm_bucket_by_size(struct nouveau_mman *cache, unsigned size)
95 return mm_bucket_by_order(cache, mm_get_order(size));
98 /* size of bo allocation for slab with chunks of (1 << chunk_order) bytes */
99 static INLINE uint32_t
100 mm_default_slab_size(unsigned chunk_order)
102 static const int8_t slab_order[MM_MAX_ORDER - MM_MIN_ORDER + 1] =
104 12, 12, 13, 14, 14, 17, 17, 17, 17, 19, 19, 20, 21, 22
107 assert(chunk_order <= MM_MAX_ORDER && chunk_order >= MM_MIN_ORDER);
109 return 1 << slab_order[chunk_order - MM_MIN_ORDER];
112 static int
113 mm_slab_new(struct nouveau_mman *cache, int chunk_order)
115 struct mm_slab *slab;
116 int words, ret;
117 const uint32_t size = mm_default_slab_size(chunk_order);
119 words = ((size >> chunk_order) + 31) / 32;
120 assert(words);
122 slab = MALLOC(sizeof(struct mm_slab) + words * 4);
123 if (!slab)
124 return PIPE_ERROR_OUT_OF_MEMORY;
126 memset(&slab->bits[0], ~0, words * 4);
128 slab->bo = NULL;
129 ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
130 0, cache->storage_type, &slab->bo);
131 if (ret) {
132 FREE(slab);
133 return PIPE_ERROR_OUT_OF_MEMORY;
136 LIST_INITHEAD(&slab->head);
138 slab->cache = cache;
139 slab->order = chunk_order;
140 slab->count = slab->free = size >> chunk_order;
142 LIST_ADD(&slab->head, &mm_bucket_by_order(cache, chunk_order)->free);
144 cache->allocated += size;
146 debug_printf("MM: new slab, total memory = %llu KiB\n",
147 (unsigned long long)(cache->allocated / 1024));
149 return PIPE_OK;
152 /* @return token to identify slab or NULL if we just allocated a new bo */
153 struct nouveau_mm_allocation *
154 nouveau_mm_allocate(struct nouveau_mman *cache,
155 uint32_t size, struct nouveau_bo **bo, uint32_t *offset)
157 struct mm_bucket *bucket;
158 struct mm_slab *slab;
159 struct nouveau_mm_allocation *alloc;
160 int ret;
162 bucket = mm_bucket_by_size(cache, size);
163 if (!bucket) {
164 ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
165 0, cache->storage_type, bo);
166 if (ret)
167 debug_printf("bo_new(%x, %x): %i\n", size, cache->storage_type, ret);
169 *offset = 0;
170 return NULL;
173 if (!LIST_IS_EMPTY(&bucket->used)) {
174 slab = LIST_ENTRY(struct mm_slab, bucket->used.next, head);
175 } else {
176 if (LIST_IS_EMPTY(&bucket->free)) {
177 mm_slab_new(cache, MAX2(mm_get_order(size), MM_MIN_ORDER));
179 slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head);
181 LIST_DEL(&slab->head);
182 LIST_ADD(&slab->head, &bucket->used);
185 *offset = mm_slab_alloc(slab) << slab->order;
187 alloc = MALLOC_STRUCT(nouveau_mm_allocation);
188 if (!alloc)
189 return NULL;
191 nouveau_bo_ref(slab->bo, bo);
193 if (slab->free == 0) {
194 LIST_DEL(&slab->head);
195 LIST_ADD(&slab->head, &bucket->full);
198 alloc->next = NULL;
199 alloc->offset = *offset;
200 alloc->priv = (void *)slab;
202 return alloc;
205 void
206 nouveau_mm_free(struct nouveau_mm_allocation *alloc)
208 struct mm_slab *slab = (struct mm_slab *)alloc->priv;
209 struct mm_bucket *bucket = mm_bucket_by_order(slab->cache, slab->order);
211 mm_slab_free(slab, alloc->offset >> slab->order);
213 if (slab->free == 1) {
214 LIST_DEL(&slab->head);
216 if (slab->count > 1)
217 LIST_ADDTAIL(&slab->head, &bucket->used);
218 else
219 LIST_ADDTAIL(&slab->head, &bucket->free);
222 FREE(alloc);
225 void
226 nouveau_mm_free_work(void *data)
228 nouveau_mm_free(data);
231 struct nouveau_mman *
232 nouveau_mm_create(struct nouveau_device *dev, uint32_t domain,
233 uint32_t storage_type)
235 struct nouveau_mman *cache = MALLOC_STRUCT(nouveau_mman);
236 int i;
238 if (!cache)
239 return NULL;
241 cache->dev = dev;
242 cache->domain = domain;
243 cache->storage_type = storage_type;
244 cache->allocated = 0;
246 for (i = 0; i < MM_NUM_BUCKETS; ++i) {
247 LIST_INITHEAD(&cache->bucket[i].free);
248 LIST_INITHEAD(&cache->bucket[i].used);
249 LIST_INITHEAD(&cache->bucket[i].full);
252 return cache;
255 static INLINE void
256 nouveau_mm_free_slabs(struct list_head *head)
258 struct mm_slab *slab, *next;
260 LIST_FOR_EACH_ENTRY_SAFE(slab, next, head, head) {
261 LIST_DEL(&slab->head);
262 nouveau_bo_ref(NULL, &slab->bo);
263 FREE(slab);
267 void
268 nouveau_mm_destroy(struct nouveau_mman *cache)
270 int i;
272 if (!cache)
273 return;
275 for (i = 0; i < MM_NUM_BUCKETS; ++i) {
276 if (!LIST_IS_EMPTY(&cache->bucket[i].used) ||
277 !LIST_IS_EMPTY(&cache->bucket[i].full))
278 debug_printf("WARNING: destroying GPU memory cache "
279 "with some buffers still in use\n");
281 nouveau_mm_free_slabs(&cache->bucket[i].free);
282 nouveau_mm_free_slabs(&cache->bucket[i].used);
283 nouveau_mm_free_slabs(&cache->bucket[i].full);
286 FREE(cache);