convert line ends
[canaan.git] / prj / tech / libsrc / lgalloc / poolimp.cpp
blobd9254eb4644d6863e5210437708489e0734a5b1b
1 ///////////////////////////////////////////////////////////////////////////////
2 // $Source: x:/prj/tech/libsrc/lgalloc/RCS/poolimp.cpp $
3 // $Author: TOML $
4 // $Date: 1997/08/14 12:22:17 $
5 // $Revision: 1.8 $
6 //
7 // Implementation of pools
8 //
10 #include <lg.h>
11 #include <pool.h>
12 #include <poolimp.h>
14 #include <stktrace.h>
16 #include <assert.h>
17 #include <stdio.h>
18 #include <stdarg.h>
19 #include <string.h>
21 #pragma code_seg("lgalloc")
23 ///////////////////////////////////////////////////////////////////////////////
25 #ifdef _WIN32
27 #include <virtmem.h>
29 #define kPoolCoreGrowSize (1024 * 64)
30 #define kPoolCoreMaxSize (1024 * 1024 * 64)
32 class cPoolCore
34 public:
35 static void * AllocPage();
36 static void FreePage(void *);
38 private:
39 static void * gm_pCoreStack;
40 static void * gm_pCoreStackLimit;
42 #ifdef DEBUG
43 static ulong gm_nTotalCoreBytes;
44 #endif
48 void * cPoolCore::gm_pCoreStack;
49 void * cPoolCore::gm_pCoreStackLimit;
50 #ifdef DEBUG
51 ulong cPoolCore::gm_nTotalCoreBytes;
52 #endif
54 void * cPoolCore::AllocPage()
56 // if (size % 2 != 0)
57 // MessageBox(NULL, "Only even allocations?!", NULL, MB_OK);
59 if (!gm_pCoreStack)
61 gm_pCoreStack = VirtualAlloc(NULL, kPoolCoreMaxSize, MEM_RESERVE, PAGE_READWRITE);
62 AssertMsg(gm_pCoreStack, "VirtualAlloc failed!");
63 VirtualAlloc(gm_pCoreStack, kPoolCoreGrowSize, MEM_COMMIT, PAGE_READWRITE);
64 gm_pCoreStackLimit = (uchar *)gm_pCoreStack + kPoolCoreGrowSize;
67 void * pReturn = gm_pCoreStack;
68 gm_pCoreStack = (uchar *)gm_pCoreStack + kPageSize;
70 while (gm_pCoreStack > gm_pCoreStackLimit)
72 if (VirtualAlloc(gm_pCoreStackLimit, kPoolCoreGrowSize, MEM_COMMIT, PAGE_READWRITE))
74 gm_pCoreStackLimit = (uchar *)gm_pCoreStackLimit + kPoolCoreGrowSize;
76 else
78 CriticalMsg("VirtualAlloc failed!");
79 return NULL;
83 #ifdef DEBUG
84 gm_nTotalCoreBytes += kPageSize;
85 #endif
87 return pReturn;
90 #endif
91 ///////////////////////////////////////////////////////////////////////////////
93 struct sPoolBlock;
95 struct sFreePoolPart
97 sPoolBlock * pNextFree;
100 #ifdef TRACK_ALLOCS
102 const nStackLevels = 8;
104 struct sAllocPoolPart
106 void *Stack[nStackLevels];
107 sPoolBlock *pNextAlloc;
108 sPoolBlock *pPrevAlloc;
111 struct sPoolBlock : sAllocPoolPart,
112 sFreePoolPart
113 #else
114 struct sPoolBlock : sFreePoolPart
115 #endif
119 #ifdef TRACK_ALLOCS
120 #define RealElemSize(size) ((size) + sizeof(sAllocPoolPart))
121 #else
122 #define RealElemSize(size) size
123 #endif
125 // Note: Side casts
126 #define PoolItemToClient(p) ((sFreePoolPart *)(p))
127 #define ClientToPoolItem(p) (((sPoolBlock *)((sFreePoolPart *)(p))))
129 #ifdef DUMP_POOLS
130 struct sPoolDumper
132 ~sPoolDumper();
135 sPoolDumper::~sPoolDumper()
137 cPoolAllocator::DumpPools();
140 sPoolDumper PoolDumper;
141 #endif
143 ///////////////////////////////////////////////////////////////////////////////
145 cPoolAllocator * cPoolAllocator::m_pPools = 0;
147 ///////////////////////////////////////
149 // Initializer
152 void cPoolAllocator::Init(size_t elemSize)
154 m_nElementSize = elemSize;
155 m_pFreeList = 0;
157 #ifdef TRACK_ALLOCS
158 // Debug Support:
159 m_nBlocks = 0;
160 m_nInUse = 0;
161 m_nAllocs = 0;
162 m_nFrees = 0;
163 m_nMaxTakes = 0;
164 m_pAllocList = 0;
165 #endif
167 if (m_nElementSize < sizeof(sFreePoolPart))
168 m_nElementSize = sizeof(sFreePoolPart);
170 m_nBlockingFactor = kPageSize / RealElemSize(m_nElementSize);
172 m_pNextPool = m_pPools;
173 m_pPools = this;
176 ///////////////////////////////////////
178 // Refill an empty freelist
181 void cPoolAllocator::ThreadNewBlock()
183 DebugMsg1("Getting new block of %d", kPageSize);
185 AssertMsg(!m_pFreeList, "ThreadNew called when not empty");
187 // First get a new batch ...
188 m_pFreeList = (sPoolBlock *)(PoolCoreAllocPage());
190 if (!m_pFreeList)
191 return;
193 DebugMsg2("Threading New block: BlockSize = %u, elemSize = %u", kPageSize, RealElemSize(m_nElementSize));
195 #ifdef TRACK_ALLOCS
196 m_nBlocks++;
197 #endif
199 // ... Then start threading it, starting with the last element ...
200 sPoolBlock *p = (sPoolBlock *)((char *)(m_pFreeList) + (m_nBlockingFactor - 1) * RealElemSize(m_nElementSize));
202 sPoolBlock *pPrev = 0;
203 for (;;)
205 p->pNextFree = pPrev;
207 // ... and work back to the first ...
208 if (p == m_pFreeList)
209 break;
211 pPrev = p;
212 p = (sPoolBlock *)((char *)(p) - RealElemSize(m_nElementSize));
216 ///////////////////////////////////////
218 // Allocate one item from the pool
221 void *cPoolAllocator::Alloc()
223 // Refill the free list if needed
224 if (!m_pFreeList)
226 ThreadNewBlock();
228 // Check for out-of-memory
229 if (!m_pFreeList)
230 return 0;
233 sPoolBlock *p = m_pFreeList;
234 DebugMsg1("Alloc'd item @%#p", p);
236 m_pFreeList = m_pFreeList->pNextFree;
238 #ifdef HEAP_CHECK
239 // Check that the new node isn't still on the list somehow.
240 int i = 0;
241 for (sPoolBlock * p1 = m_pFreeList; p1; p1 = p1->pNextFree)
243 i++;
244 Assert_((void *)(p1->pNextFree) != (void *)0xcdcdcdcd);
245 AssertMsg(p1 != p, "Rethreading already freed");
247 #endif
249 #ifdef TRACK_ALLOCS
250 m_nInUse++;
252 m_nAllocs++;
253 if (m_nAllocs - m_nFrees > m_nMaxTakes)
254 m_nMaxTakes = m_nAllocs - m_nFrees;
256 // cross link to next element
257 if (m_pAllocList)
258 m_pAllocList->pPrevAlloc = p;
260 p->pNextAlloc = m_pAllocList;
262 // cross link to head
263 p->pPrevAlloc = 0;
264 m_pAllocList = p;
266 // FillStackArray(3, nStackLevels, p->Stack);
267 #endif
269 return PoolItemToClient(p);
272 ///////////////////////////////////////
274 // Put memory back on the free chain
277 void cPoolAllocator::Free(void *p)
279 DebugMsg1("Returning item 0x%x to freelist", p);
281 #if defined(TRACK_ALLOCS) || defined(HEAP_CHECK)
282 sPoolBlock *fp = ClientToPoolItem(p);
284 #ifdef TRACK_ALLOCS
285 // Cross link next element to previous element
286 if (fp->pNextAlloc)
287 fp->pNextAlloc->pPrevAlloc = fp->pPrevAlloc;
289 if (fp->pPrevAlloc)
290 fp->pPrevAlloc->pNextAlloc = fp->pNextAlloc;
291 else
292 m_pAllocList = fp->pNextAlloc;
294 fp->pPrevAlloc = fp->pNextAlloc = (sPoolBlock *)((void *) -1);
296 AssertMsg(m_nInUse, "Freeing once more than Alloc'd");
297 m_nInUse--;
299 m_nFrees++;
300 #endif
302 #ifdef HEAP_CHECK
303 // Prevent Circular free list (and resulting memleak & corruption)
304 for (sPoolBlock * p1 = m_pFreeList; p1; p1 = p1->pNextFree)
305 AssertMsg1(p1 != fp, "Rethreading already freed 0x%x", this);
306 #endif
308 #endif
310 ClientToPoolItem(p)->pNextFree = m_pFreeList;
312 m_pFreeList = ClientToPoolItem(p);
315 ///////////////////////////////////////
317 // Dump allocated blocks
320 void cPoolAllocator::DumpAllocs()
322 #ifdef TRACK_ALLOCS
323 if (!m_pAllocList)
325 DebugMsg("No outstanding allocs");
326 return;
329 for (sPoolBlock * p = m_pAllocList; p; p = p->pNextAlloc)
331 DebugMsg7("[0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x]", p->Stack[0],
332 p->Stack[1], p->Stack[2],
333 p->Stack[3], p->Stack[4],
334 p->Stack[5], p->Stack[6]);
336 #endif
339 ///////////////////////////////////////
341 // Dump out all pools
344 void cPoolAllocator::DumpPools()
346 #ifdef DUMP_POOLS
347 DebugMsg("DumpPools()");
349 for (cPoolAllocator * p = m_pPools; p; p = p->m_pNextPool)
351 DebugMsg5("Pool: ES=%d BF=%d Bs=%lu A=%lu F=%lu",
352 p->m_nElementSize,
353 p->m_nBlockingFactor,
354 p->m_nBlocks,
355 p->m_nAllocs,
356 p->m_nFrees);
357 #ifdef TRACK_ALLOCS
358 p->DumpAllocs();
359 #endif
362 #endif
365 ///////////////////////////////////////////////////////////////////////////////