SCDoc: improved regexps
[supercollider.git] / common / SC_AllocPool.cpp
blob8134978d3b9b3fa42837fdc5b916cb5685b1496c
1 /*
2 SuperCollider real time audio synthesis system
3 Copyright (c) 2002 James McCartney. All rights reserved.
4 http://www.audiosynth.com
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include <string.h>
22 #include <stdexcept>
23 #include "SC_AllocPool.h"
24 #include "SC_BoundsMacros.h"
25 #include <assert.h>
28 Requests are `small' if both the corresponding and the next bin are small
31 #if DEBUG
32 #define check_pool() DoCheckPool()
33 #define check_free_chunk(P) DoCheckFreeChunk(P)
34 #define check_inuse_chunk(P) DoCheckInUseChunk(P)
35 #define check_chunk(P) DoCheckChunk(P)
36 #define check_malloced_chunk(P,N) DoCheckAllocedChunk(P,N)
37 #define garbage_fill(P) DoGarbageFill(P)
38 #else
39 #define check_pool()
40 #define check_free_chunk(P)
41 #define check_inuse_chunk(P)
42 #define check_chunk(P)
43 #define check_malloced_chunk(P,N)
44 #define garbage_fill(P)
45 #endif
47 #define aligned_OK(m) ((((unsigned long)(m)) & kAlignMask) == 0)
50 void* allocmem(AllocPool *pool, int32 size);
51 void* allocmem(AllocPool *pool, int32 size)
53 return pool->Alloc(size);
56 void* reallocmem(AllocPool *pool, void* ptr, int32 size);
57 void* reallocmem(AllocPool *pool, void* ptr, int32 size)
59 return pool->Realloc(ptr, size);
62 void freemem(AllocPool *pool, void* ptr);
63 void freemem(AllocPool *pool, void* ptr)
65 pool->Free(ptr);
68 void AllocPool::InitAlloc()
70 if (mAreaInitSize == 0) return;
72 /* alloc initial area */
73 NewArea(mAreaInitSize);
74 /* get chunk */
75 AllocAreaPtr area = mAreas;
76 AllocChunkPtr chunk = &area->mChunk;
77 LinkFree(chunk);
79 check_pool();
82 void AllocPool::InitBins()
84 for (int i=0; i<kNumAllocBins; ++i) {
85 mBins[i].BeEmpty();
87 for (int i=0; i<4; ++i) {
88 mBinBlocks[i] = 0;
92 AllocPool::AllocPool(NewAreaFunc inAllocArea, FreeAreaFunc inFreeArea,
93 size_t inAreaInitSize, size_t inAreaMoreSize)
95 InitBins();
96 mAreaInitSize = inAreaInitSize;
97 mAreaMoreSize = inAreaMoreSize;
98 mAllocArea = inAllocArea;
99 mFreeArea = inFreeArea;
100 mAreas = 0;
101 check_pool();
103 InitAlloc();
106 AllocPool::~AllocPool()
108 FreeAll();
111 void AllocPool::FreeAll()
113 check_pool();
114 AllocAreaPtr area = mAreas;
115 if (area) {
116 AllocAreaPtr firstArea = area;
117 do {
118 AllocAreaPtr nextarea = area->mNext;
119 (mFreeArea)(area->mUnalignedPointerToThis);
120 area = nextarea;
121 } while (area != firstArea);
122 mAreas = NULL;
124 InitBins();
125 check_pool();
128 void AllocPool::FreeAllInternal()
130 check_pool();
131 InitBins();
133 AllocAreaPtr area = mAreas;
134 if (area) {
135 AllocAreaPtr firstArea = area;
136 do {
137 AllocAreaPtr nextarea = area->mNext;
138 size_t size = area->mSize;
139 AllocChunkPtr chunk = &area->mChunk;
140 chunk->SetSizeFree(size);
141 chunk->SetNeighborsInUse(size);
142 LinkFree(chunk);
143 area = nextarea;
144 } while (area != firstArea);
146 check_pool();
149 void AllocPool::Reinit()
151 FreeAll();
152 InitAlloc();
155 void AllocPool::Free(void *inPtr)
157 #ifdef DISABLE_MEMORY_POOLS
158 free(inPtr);
159 return;
160 #endif
162 check_pool();
163 if (inPtr == 0) return; /* free(0) has no effect */
165 AllocChunkPtr chunk = MemToChunk(inPtr);
167 check_inuse_chunk(chunk);
168 garbage_fill(chunk);
170 size_t size = chunk->Size();
172 if (!chunk->PrevInUse()) /* consolidate backward */
174 size_t prevSize = chunk->PrevSize();
175 chunk = chunk->ChunkAtOffset(0L-prevSize);
176 size += prevSize;
177 UnlinkFree(chunk);
180 AllocChunkPtr next = chunk->ChunkAtOffset(size);
181 if (!next->InUse()) /* consolidate forward */
183 size += next->Size();
184 UnlinkFree(next);
187 chunk->SetSizeFree(size);
188 if (mAreaMoreSize && chunk->IsArea()) {
189 // whole area is free
190 FreeArea(chunk);
191 } else {
192 LinkFree(chunk);
194 check_pool();
199 AllocAreaPtr AllocPool::NewArea(size_t inAreaSize)
201 void *ptr = (AllocAreaPtr)(mAllocArea)(inAreaSize + kAreaOverhead);
203 if (ptr == NULL) {
204 throw std::runtime_error("Could not allocate new area");
206 // AllocAreaPtr area = (AllocAreaPtr)((unsigned long)ptr & ~kAlignMask);
207 AllocAreaPtr area = (AllocAreaPtr)(((unsigned long)ptr + kAlignMask) & ~kAlignMask);
208 assert((area >= ptr) && ((void*)((unsigned long)area & ~kAlignMask) == area));
210 area->mUnalignedPointerToThis = ptr;
212 /* link in area */
213 if (mAreas) {
214 area->mNext = mAreas;
215 area->mPrev = mAreas->mPrev;
216 area->mNext->mPrev = area;
217 area->mPrev->mNext = area;
218 } else {
219 area->mNext = area;
220 area->mPrev = area;
222 mAreas = area;
224 /* set area size */
225 area->mSize = inAreaSize;
226 area->mChunk.BeEmpty();
227 area->mChunk.SetNeighborsInUse(inAreaSize);
228 area->mChunk.SetSizeFree(inAreaSize);
230 return area;
233 void AllocPool::FreeArea(AllocChunkPtr chunk)
235 AllocAreaPtr area = (AllocAreaPtr)((char*)chunk - sizeof(AllocAreaHdr));
237 if (area->mNext == area) {
238 mAreas = NULL;
239 } else {
240 /* unlink area */
241 mAreas = area->mPrev->mNext = area->mNext;
242 area->mNext->mPrev = area->mPrev;
245 (mFreeArea)(area->mUnalignedPointerToThis);
249 size_t AllocPool::TotalFree()
251 size_t total = 0;
252 for (int i=0; i<kNumAllocBins; ++i) {
253 AllocChunkPtr bin = mBins + i;
254 if (bin->Prev() != bin) {
255 for (AllocChunkPtr candidate = bin->Prev(); candidate != bin; candidate = candidate->Prev()) {
256 total += candidate->Size();
260 return total;
263 size_t AllocPool::LargestFreeChunk()
265 int word = 0;
266 for (int i=3; i>=0; --i) {
267 if (mBinBlocks[i]) {
268 word = i;
269 break;
272 int binBits = (int)mBinBlocks[word];
273 int bitPosition = NUMBITS(binBits) - 1;
274 int index = (word << 5) + bitPosition;
275 AllocChunkPtr bin = mBins + index;
276 //postbuf("** %p %p %p %p\n", mBinBlocks[0], mBinBlocks[1], mBinBlocks[2], mBinBlocks[3]);
277 //postbuf("%d %d %d %p %p %p\n", word, bitPosition, index, binBits, bin->Prev(), bin->Next());
279 AllocChunkPtr candidate;
280 size_t maxsize = 0;
281 for (candidate = bin->Prev(); candidate != bin; candidate = candidate->Prev()) {
282 size_t candidate_size = candidate->Size();
283 maxsize = sc_max(maxsize, candidate_size);
284 //postbuf(" %d %d\n", maxsize, candidate_size);
287 /*for (int i=0; i<kNumAllocBins; ++i) {
288 bin = mBins + i;
289 if (bin->Prev() != bin) {
290 postbuf("* %d %d\n", i, bin->Prev()->Size());
293 return maxsize;
296 void* AllocPool::Alloc(size_t inReqSize)
298 #ifdef DISABLE_MEMORY_POOLS
299 return malloc(inReqSize);
300 #endif
302 // OK it has a lot of gotos, but these remove a whole lot of common code
303 // that was obfuscating the original version of this function.
304 // So here I am choosing the OnceAndOnlyOnce principle over the caveats on gotos.
305 // The gotos only jump forward and only to the exit paths of the function
307 // The old bin block scheme has been replaced by 4 x 32 bit words so that each bin has a bit
308 // and the next bin is found using a count leading zeroes instruction. Much faster.
309 // Also now each bin's flag can be kept accurate. This simplifies the searching code quite a bit.
311 // Also fwiw, changed 'victim' in the original code to 'candidate'. 'victim' just bothered me.
314 AllocChunkPtr candidate; /* inspected/selected chunk */
315 size_t candidate_size; /* its size */
316 AllocChunkPtr remainder; /* remainder from a split */
317 int32 remainder_size; /* its size */
318 AllocAreaPtr area;
319 size_t areaSize;
321 size_t size = RequestToSize(inReqSize);
322 int index = BinIndex(size);
323 assert(index < 128);
324 AllocChunkPtr bin = mBins + index;
326 check_pool();
328 /* Check for exact match in a bin */
329 if (index < kMaxSmallBin) { /* Faster version for small requests */
330 /* No traversal or size check necessary for small bins. */
331 candidate = bin->Prev();
333 /* Also scan the next one, since it would have a remainder < kMinAllocSize */
334 if (candidate == bin) candidate = (++bin)->Prev();
335 if (candidate != bin) {
336 candidate_size = candidate->Size();
337 goto found_exact_fit;
340 index += 2; /* Set for bin scan below. We've already scanned 2 bins. */
341 } else {
342 for (candidate = bin->Prev(); candidate != bin; candidate = candidate->Prev()) {
344 candidate_size = candidate->Size();
345 remainder_size = (int)(candidate_size - size);
346 if (remainder_size >= (int32)kMinAllocSize) { /* too big */
347 --index; /* adjust to rescan below after checking last remainder */
348 break;
349 } else if (remainder_size >= 0) { /* exact fit */
350 goto found_exact_fit;
353 ++index;
356 for(; (index = NextFullBin(index)) >= 0; ++index) {
357 bin = mBins + index;
359 /* Find and use first big enough chunk ... */
360 for (candidate = bin->Prev(); candidate != bin; candidate = candidate->Prev()) {
361 candidate_size = candidate->Size();
362 remainder_size = (int)(candidate_size - size);
363 if (remainder_size >= (int32)kMinAllocSize) { /* split */
364 UnlinkFree(candidate);
365 goto found_bigger_fit;
366 } else if (remainder_size >= 0) goto found_exact_fit;
369 check_pool();
371 if (mAreaMoreSize == 0) { /* pool has a non-growable area */
372 if (mAreas != NULL /* fixed size area exhausted */
373 || size > mAreaInitSize) /* too big anyway */
374 goto found_nothing;
375 areaSize = mAreaInitSize;
376 goto split_new_area;
379 if (size > mAreaMoreSize) {
380 areaSize = size;
381 goto whole_new_area;
382 } else {
383 areaSize = mAreaMoreSize;
384 goto split_new_area;
387 // exit paths:
388 found_nothing:
389 //ipostbuf("alloc failed. size: %d\n", inReqSize);
390 throw std::runtime_error("alloc failed, increase server's memory allocation (e.g. via ServerOptions)");
392 whole_new_area:
393 //ipostbuf("whole_new_area\n");
394 area = NewArea(areaSize);
395 if (!area) return 0;
396 candidate = &area->mChunk;
397 candidate_size = candidate->Size();
398 goto return_chunk;
400 split_new_area:
401 //ipostbuf("split_new_area\n");
402 area = NewArea(areaSize);
403 if (!area) return 0;
404 candidate = &area->mChunk;
405 candidate_size = candidate->Size();
406 remainder_size = (int)(areaSize - size);
407 // FALL THROUGH
408 found_bigger_fit:
409 //ipostbuf("found_bigger_fit\n");
410 remainder = candidate->ChunkAtOffset(size);
411 remainder->SetSizeFree(remainder_size);
412 candidate_size -= remainder_size;
413 LinkFree(remainder);
414 goto return_chunk;
416 found_exact_fit:
417 check_pool();
418 UnlinkFree(candidate);
419 // FALL THROUGH
420 return_chunk:
422 candidate->SetSizeInUse(candidate_size);
423 check_malloced_chunk(candidate, candidate_size);
424 check_pool();
425 garbage_fill(candidate);
426 return candidate->ToPtr();
430 void* AllocPool::Realloc(void* inPtr, size_t inReqSize)
432 #ifdef DISABLE_MEMORY_POOLS
433 return realloc(inPtr, inReqSize);
434 #endif
437 void *outPtr;
438 AllocChunkPtr prev;
439 check_pool();
440 bool docopy = false;
442 /* realloc of null is supposed to be same as malloc */
443 if (inPtr == 0) return Alloc(inReqSize);
445 AllocChunkPtr oldChunk = MemToChunk(inPtr);
446 AllocChunkPtr newChunk = oldChunk;
447 size_t oldsize = oldChunk->Size();
448 size_t newsize = oldsize;
449 size_t size = RequestToSize(inReqSize);
450 size_t nextsize, prevsize;
451 check_inuse_chunk(oldChunk);
453 if (oldsize < size) {
454 /* Try expanding forward */
455 AllocChunkPtr next = oldChunk->NextChunk();
456 if (!next->InUse()) {
457 nextsize = next->Size();
458 /* Forward into next chunk */
459 if (nextsize + newsize >= size) {
460 UnlinkFree(next);
461 newsize += nextsize;
462 goto split;
464 } else {
465 next = 0;
466 nextsize = 0;
470 /* Try shifting backwards. */
471 prev = oldChunk->PrevChunk();
472 if (!prev->InUse()) {
473 prevsize = prev->Size();
475 /* try forward + backward first to save a later consolidation */
476 if (next != 0) {
477 /* into next chunk */
478 if (nextsize + prevsize + newsize >= size) {
479 newsize += nextsize + prevsize;
480 UnlinkFree(next);
481 goto alloc_prev;
485 /* backward only */
486 if (prev != 0 && prevsize + newsize >= size) {
487 newsize += prevsize;
488 goto alloc_prev;
492 /* Must allocate */
494 outPtr = Alloc(inReqSize);
496 check_pool();
497 if (outPtr == 0) {
498 //ipostbuf("realloc failed. size: %d\n", inReqSize);
499 throw std::runtime_error("realloc failed, increase server's memory allocation (e.g. via ServerOptions)");
502 /* Otherwise copy, free, and exit */
503 memcpy(outPtr, inPtr, oldsize - sizeof(AllocChunk));
504 Free(inPtr);
505 return outPtr;
506 } else goto split;
508 alloc_prev:
509 UnlinkFree(prev);
510 newChunk = prev;
511 docopy = true;
512 // FALL THROUGH
513 split: /* split off extra room in old or expanded chunk */
514 //check_pool();
515 if (newsize - size >= kMinAllocSize) { /* split off remainder */
516 size_t remainder_size = newsize - size;
517 AllocChunkPtr remainder = newChunk->ChunkAtOffset(size);
518 remainder->SetSizeInUse(remainder_size);
519 newChunk->SetSizeInUse(size);
520 Free(remainder->ToPtr()); /* let free() deal with it */
521 } else {
522 newChunk->SetSizeInUse(newsize);
524 outPtr = newChunk->ToPtr();
525 if (docopy) {
526 memmove(outPtr, inPtr, oldsize - sizeof(AllocChunk));
528 check_inuse_chunk(newChunk);
529 check_pool();
530 garbage_fill(newChunk);
531 return outPtr;
534 void AllocPool::LinkFree(AllocChunkPtr inChunk)
536 size_t size = inChunk->Size();
537 int index = BinIndex(size);
539 AllocChunkPtr bin = mBins + index;
541 if (index < kNumSmallBins || bin->IsEmpty()) {
542 inChunk->InsertAfter(bin);
543 MarkBinBlock(index);
544 } else {
545 AllocChunkPtr link = bin->Next();
546 while (link != bin && size < link->Size()) link = link->Next();
547 inChunk->InsertBefore(link);
551 void AllocPool::DoCheckArea(AllocAreaPtr area)
553 assert(area->mChunk.PrevInUse());
555 AllocChunkPtr p = &area->mChunk;
556 while (p->mSize != kChunkInUse) {
557 if (p->InUse()) {
558 DoCheckInUseChunk(p);
559 } else {
560 DoCheckFreeChunk(p);
562 p = p->NextChunk();
566 void AllocPool::DoCheckBin(AllocChunkPtr bin, long index)
568 AllocChunkPtr p = bin->Next();
570 while (p != bin) {
571 assert(BinIndex(p->Size()) == index);
572 DoCheckFreeChunk(p);
573 p = p->Next();
578 void AllocPool::DoCheckPool()
580 AllocAreaPtr area = mAreas;
581 if (area) {
582 do {
583 AllocAreaPtr nextarea = area->mNext;
584 DoCheckArea(area);
585 area = nextarea;
586 } while (area != mAreas);
589 for (int i=0; i<kNumAllocBins; ++i) {
590 AllocChunkPtr b = mBins + i;
591 DoCheckBin(b, i);
596 void AllocPool::DoCheckChunk(AllocChunkPtr p)
598 #ifndef NDEBUG
599 size_t size = p->Size();
600 //size_t maxsize = mAreaInitSize > mAreaMoreSize ? mAreaInitSize : mAreaMoreSize;
601 // assert(size < maxsize);
603 AllocChunkPtr next = p->ChunkAtOffset(size);
604 #endif
605 assert(p->mSize == next->mPrevSize);
609 void AllocPool::DoCheckFreeChunk(AllocChunkPtr p)
611 size_t size = p->Size();
612 #ifndef NDEBUG
613 AllocChunkPtr next = p->ChunkAtOffset(size);
614 #endif
615 DoCheckChunk(p);
617 /* Check whether it claims to be free ... */
618 assert(!p->InUse());
620 /* Unless an end marker, must have OK fields */
621 if (size >= kMinAllocSize)
623 assert((size & kAlignMask) == 0);
624 assert(aligned_OK(p->ToPtr()));
625 /* ... and is fully consolidated */
626 assert(p->PrevInUse());
627 assert(next->InUse());
629 /* ... and has minimally sane links */
630 assert(p->Next()->Prev() == p);
631 assert(p->Prev()->Next() == p);
633 else /* end markers are always of size 0 */
634 assert(size == 0);
637 void AllocPool::DoCheckInUseChunk(AllocChunkPtr p)
639 size_t size = p->Size();
640 AllocChunkPtr next = p->NextChunk();
642 DoCheckChunk(p);
644 /* Check whether it claims to be in use ... */
645 assert(p->InUse());
647 /* ... and is surrounded by OK chunks.
648 Since more things can be checked with free chunks than inuse ones,
649 if an inuse chunk borders them and debug is on, it's worth doing them.
651 if (!p->PrevInUse()) {
652 size_t prevsize = p->PrevSize();
653 if (prevsize > 0) {
654 AllocChunkPtr prv = p->PrevChunk();
655 assert(prv->NextChunk() == p);
656 DoCheckFreeChunk(prv);
659 if (!p->ChunkAtOffset(size)->InUse()) {
660 DoCheckFreeChunk(next);
664 void AllocPool::DoCheckAllocedChunk(AllocChunkPtr p, size_t s)
666 #ifndef NDEBUG
667 size_t size = p->Size();
668 long room = size - s;
669 #endif
671 DoCheckInUseChunk(p);
673 /* Legal size ... */
674 assert(size >= kMinAllocSize);
675 assert((size & kAlignMask) == 0);
676 assert(room >= 0);
677 assert(room < kMinAllocSize);
679 /* ... and alignment */
680 assert(aligned_OK(p->ToPtr()));
683 /* ... and was allocated at front of an available chunk */
684 assert(p->PrevInUse()); // huh?? - jmc
688 void AllocPool::DoGarbageFill(AllocChunkPtr p)
690 long size = (p->Size() - sizeof(AllocChunk));
691 DoGarbageFill(p, size);
694 void AllocPool::DoGarbageFill(AllocChunkPtr p, long size)
696 size /= sizeof(long);
697 long *ptr = (long*)p->ToPtr();
698 for (int i=0; i<size; ++i) {
699 ptr[i] = 0xA3A56955;