2 * Copyright 2008, Axel Dörfler, axeld@pinc-software.de.
3 * Copyright 2002/03, Thomas Kurschel. All rights reserved.
5 * Distributed under the terms of the MIT License.
8 /*! Deadlock-safe allocation of locked memory.
10 Allocation/freeing is optimized for speed. Count of <sem>
11 is the number of free blocks and thus should be modified
12 by each alloc() and free(). As this count is only crucial if
13 an allocation is waiting for a free block, <sem> is only
14 updated on demand - the correct number of free blocks is
15 stored in <free_blocks>. There are only three cases where
18 - if an allocation fails because there is no free block left;
19 in this case, <num_waiting> increased by one and then the
20 thread makes <sem> up-to-date and waits for a free block
21 via <sem> in one step; finally, <num_waiting> is decreased
23 - if a block is freed and <num_waiting> is non-zero;
24 here, count of <sem> is updated to release threads waiting
26 - if a new chunk of blocks is allocated;
31 #include <KernelExport.h>
32 #include <drivers/locked_pool.h>
41 //#define TRACE_LOCKED_POOL
42 #ifdef TRACE_LOCKED_POOL
43 # define TRACE(x) dprintf x
50 typedef struct locked_pool
{
51 struct mutex mutex
; // to be used whenever some variable of the first
52 // block of this structure is read or modified
53 int free_blocks
; // # free blocks
54 int num_waiting
; // # waiting allocations
55 void *free_list
; // list of free blocks
56 int next_ofs
; // offset of next-pointer in block
58 sem_id sem
; // count=number of free blocks
60 size_t header_size
; // effective size of chunk header
61 struct chunk_header
*chunks
;// list of chunks
62 size_t block_size
; // size of memory block
63 uint32 lock_flags
; // flags for lock_memory()
64 int min_free_blocks
; // min. number of free blocks
65 int num_blocks
; // cur. number of blocks
66 int max_blocks
; // maximum number of blocks
67 int enlarge_by
; // number of blocks to enlarge pool by
68 size_t alignment
; // block alignment restrictions
69 locked_pool_add_hook add_hook
; // user hooks
70 locked_pool_remove_hook remove_hook
;
71 void *hook_arg
; // arg for user hooks
72 struct locked_pool
*prev
, *next
; // global cyclic list
76 // header of memory chunk
77 typedef struct chunk_header
{
78 struct chunk_header
*next
; // free-list
79 area_id area
; // underlying area
80 int num_blocks
; // size in blocks
84 // global list of pools
85 static locked_pool
*sLockedPools
;
86 // mutex to protect sLockedPools
87 static mutex sLockedPoolsLock
;
88 // true, if thread should shut down
89 static bool sShuttingDown
;
90 // background thread to enlarge pools
91 static thread_id sEnlargerThread
;
92 // semaphore to wake up enlarger thread
93 static sem_id sEnlargerSemaphore
;
95 // macro to access next-pointer in free block
96 #define NEXT_PTR(pool, a) ((void **)(((char *)a) + pool->next_ofs))
99 /*! Enlarge memory pool by <num_block> blocks */
101 enlarge_pool(locked_pool
*pool
, int numBlocks
)
110 void *block
, *lastBlock
;
112 TRACE(("enlarge_pool()\n"));
114 // get memory directly from VM; we don't let user code access memory
115 chunkSize
= numBlocks
* pool
->block_size
+ pool
->header_size
;
116 chunkSize
= (chunkSize
+ B_PAGE_SIZE
- 1) & ~(B_PAGE_SIZE
- 1);
118 status
= area
= create_area(pool
->name
,
119 (void **)&chunk
, B_ANY_KERNEL_ADDRESS
, chunkSize
,
120 pool
->lock_flags
, 0);
122 dprintf("cannot enlarge pool (%s)\n", strerror(status
));
123 // TODO: we should wait a bit and try again!
128 chunk
->num_blocks
= numBlocks
;
130 // create free_list and call add-hook
131 // very important: we first create a freelist within the chunk,
132 // going from lower to higher addresses; at the end of the loop,
133 // "next" points to the head of the list and "lastBlock" to the
137 lastBlock
= (char *)chunk
+ pool
->header_size
+
138 (numBlocks
-1) * pool
->block_size
;
140 for (i
= 0, block
= lastBlock
; i
< numBlocks
;
141 ++i
, block
= (char *)block
- pool
->block_size
)
143 if (pool
->add_hook
) {
144 if ((status
= pool
->add_hook(block
, pool
->hook_arg
)) < B_OK
)
148 *NEXT_PTR(pool
, block
) = next
;
153 // ups - pre-init failed somehow
154 // call remove-hook for blocks that we called add-hook for
157 for (block
= lastBlock
, j
= 0; j
< i
; ++j
,
158 block
= (char *)block
- pool
->block_size
) {
159 if (pool
->remove_hook
)
160 pool
->remove_hook(block
, pool
->hook_arg
);
163 // destroy area and give up
164 delete_area(chunk
->area
);
169 // add new blocks to pool
170 mutex_lock(&pool
->mutex
);
172 // see remarks about initialising list within chunk
173 *NEXT_PTR(pool
, lastBlock
) = pool
->free_list
;
174 pool
->free_list
= next
;
176 chunk
->next
= pool
->chunks
;
177 pool
->chunks
= chunk
;
179 pool
->num_blocks
+= numBlocks
;
180 pool
->free_blocks
+= numBlocks
;
182 TRACE(("done - num_blocks=%d, free_blocks=%d, num_waiting=%d\n",
183 pool
->num_blocks
, pool
->free_blocks
, pool
->num_waiting
));
185 numWaiting
= min_c(pool
->num_waiting
, numBlocks
);
186 pool
->num_waiting
-= numWaiting
;
188 mutex_unlock(&pool
->mutex
);
190 // release threads that wait for empty blocks
191 release_sem_etc(pool
->sem
, numWaiting
, 0);
197 /*! Background thread that adjusts pool size */
199 enlarger_thread(void *arg
)
204 acquire_sem(sEnlargerSemaphore
);
209 // protect traversing of global list and
210 // block destroy_pool() to not clean up a pool we are enlarging
211 mutex_lock(&sLockedPoolsLock
);
213 for (pool
= sLockedPools
; pool
; pool
= pool
->next
) {
216 // this mutex is probably not necessary (at least on 80x86)
217 // but I'm not sure about atomicity of other architectures
218 // (anyway - this routine is not performance critical)
219 mutex_lock(&pool
->mutex
);
220 num_free
= pool
->free_blocks
;
221 mutex_unlock(&pool
->mutex
);
223 // perhaps blocks got freed meanwhile, i.e. pool is large enough
224 if (num_free
> pool
->min_free_blocks
)
227 // enlarge pool as much as possible
228 // never create more blocks then defined - the caller may have
229 // a good reason for choosing the limit
230 if (pool
->num_blocks
< pool
->max_blocks
) {
232 min(pool
->enlarge_by
, pool
->max_blocks
- pool
->num_blocks
));
236 mutex_unlock(&sLockedPoolsLock
);
243 /*! Free all chunks belonging to pool */
245 free_chunks(locked_pool
*pool
)
247 chunk_header
*chunk
, *next
;
249 for (chunk
= pool
->chunks
; chunk
; chunk
= next
) {
251 void *block
, *lastBlock
;
255 lastBlock
= (char *)chunk
+ pool
->header_size
+
256 (chunk
->num_blocks
-1) * pool
->block_size
;
258 // don't forget to call remove-hook
259 for (i
= 0, block
= lastBlock
; i
< pool
->num_blocks
; ++i
, block
= (char *)block
- pool
->block_size
) {
260 if (pool
->remove_hook
)
261 pool
->remove_hook(block
, pool
->hook_arg
);
264 delete_area(chunk
->area
);
271 /*! Global init, executed when module is loaded */
273 init_locked_pool(void)
277 mutex_init(&sLockedPoolsLock
, "locked_pool_global_list");
279 status
= sEnlargerSemaphore
= create_sem(0,
280 "locked_pool_enlarger");
285 sShuttingDown
= false;
287 status
= sEnlargerThread
= spawn_kernel_thread(enlarger_thread
,
288 "locked_pool_enlarger", B_NORMAL_PRIORITY
, NULL
);
292 resume_thread(sEnlargerThread
);
296 delete_sem(sEnlargerSemaphore
);
298 mutex_destroy(&sLockedPoolsLock
);
303 /*! Global uninit, executed before module is unloaded */
305 uninit_locked_pool(void)
307 sShuttingDown
= true;
309 release_sem(sEnlargerSemaphore
);
311 wait_for_thread(sEnlargerThread
, NULL
);
313 delete_sem(sEnlargerSemaphore
);
314 mutex_destroy(&sLockedPoolsLock
);
320 // #pragma mark - Module API
323 /*! Alloc memory from pool */
325 pool_alloc(locked_pool
*pool
)
329 TRACE(("pool_alloc()\n"));
331 mutex_lock(&pool
->mutex
);
335 if (pool
->free_blocks
> 0) {
336 // there are free blocks - grab one
338 TRACE(("freeblocks=%d, free_list=%p\n",
339 pool
->free_blocks
, pool
->free_list
));
341 block
= pool
->free_list
;
342 pool
->free_list
= *NEXT_PTR(pool
, block
);
344 TRACE(("new free_list=%p\n", pool
->free_list
));
346 mutex_unlock(&pool
->mutex
);
350 // entire pool is in use
351 // we should do a ++free_blocks here, but this can lead to race
352 // condition: when we wait for <sem> and a block gets released
353 // and another thread calls alloc() before we grab the freshly freed
354 // block, the other thread could overtake us and grab the free block
355 // instead! by leaving free_block at a negative value, the other
356 // thread cannot see the free block and thus will leave it for us
358 // tell them we are waiting on semaphore
361 TRACE(("%d waiting allocs\n", pool
->num_waiting
));
363 mutex_unlock(&pool
->mutex
);
365 // awake background thread
366 release_sem_etc(sEnlargerSemaphore
, 1, B_DO_NOT_RESCHEDULE
);
367 // make samphore up-to-date and wait until a block is available
368 acquire_sem(pool
->sem
);
370 mutex_lock(&pool
->mutex
);
372 TRACE(("continuing alloc (%d free blocks)\n", pool
->free_blocks
));
374 block
= pool
->free_list
;
375 pool
->free_list
= *NEXT_PTR(pool
, block
);
377 mutex_unlock(&pool
->mutex
);
383 pool_free(locked_pool
*pool
, void *block
)
385 TRACE(("pool_free()\n"));
387 mutex_lock(&pool
->mutex
);
390 *NEXT_PTR(pool
, block
) = pool
->free_list
;
391 pool
->free_list
= block
;
395 TRACE(("freeblocks=%d, free_list=%p\n", pool
->free_blocks
,
398 if (pool
->num_waiting
== 0) {
399 // if no one is waiting, this is it
400 mutex_unlock(&pool
->mutex
);
404 // someone is waiting on the semaphore
406 TRACE(("%d waiting allocs\n", pool
->num_waiting
));
409 mutex_unlock(&pool
->mutex
);
411 // now it is up-to-date and waiting allocations can be continued
412 release_sem(pool
->sem
);
418 create_pool(int block_size
, int alignment
, int next_ofs
,
419 int chunkSize
, int max_blocks
, int min_free_blocks
,
420 const char *name
, uint32 lock_flags
,
421 locked_pool_add_hook add_hook
,
422 locked_pool_remove_hook remove_hook
, void *hook_arg
)
427 TRACE(("create_pool()\n"));
429 pool
= (locked_pool
*)malloc(sizeof(*pool
));
433 memset(pool
, 0, sizeof(*pool
));
435 mutex_init(&pool
->mutex
, "locked_pool");
437 if ((status
= pool
->sem
= create_sem(0, "locked_pool")) < 0)
440 if ((pool
->name
= strdup(name
)) == NULL
) {
441 status
= B_NO_MEMORY
;
445 pool
->alignment
= alignment
;
447 // take care that there is always enough space to fulfill alignment
448 pool
->block_size
= (block_size
+ pool
->alignment
) & ~pool
->alignment
;
450 pool
->next_ofs
= next_ofs
;
451 pool
->lock_flags
= lock_flags
;
453 pool
->header_size
= max((sizeof( chunk_header
) + pool
->alignment
) & ~pool
->alignment
,
454 pool
->alignment
+ 1);
456 pool
->enlarge_by
= (((chunkSize
+ B_PAGE_SIZE
- 1) & ~(B_PAGE_SIZE
- 1)) - pool
->header_size
)
459 pool
->max_blocks
= max_blocks
;
460 pool
->min_free_blocks
= min_free_blocks
;
461 pool
->free_blocks
= 0;
462 pool
->num_blocks
= 0;
463 pool
->num_waiting
= 0;
464 pool
->free_list
= NULL
;
465 pool
->add_hook
= add_hook
;
466 pool
->remove_hook
= remove_hook
;
467 pool
->hook_arg
= hook_arg
;
470 TRACE(("block_size=%d, alignment=%d, next_ofs=%d, wiring_flags=%d, header_size=%d, enlarge_by=%d\n",
471 (int)pool
->block_size
, (int)pool
->alignment
, (int)pool
->next_ofs
,
472 (int)pool
->lock_flags
, (int)pool
->header_size
, pool
->enlarge_by
));
474 // if there is a minimum size, enlarge pool right now
475 if (min_free_blocks
> 0) {
476 if ((status
= enlarge_pool(pool
, min(pool
->enlarge_by
, pool
->max_blocks
))) < 0)
480 // add to global list, so enlarger thread takes care of pool
481 mutex_lock(&sLockedPoolsLock
);
482 ADD_DL_LIST_HEAD(pool
, sLockedPools
, );
483 mutex_unlock(&sLockedPoolsLock
);
490 delete_sem(pool
->sem
);
492 mutex_destroy(&pool
->mutex
);
499 destroy_pool(locked_pool
*pool
)
501 TRACE(("destroy_pool()\n"));
503 // first, remove from global list, so enlarger thread
504 // won't touch this pool anymore
505 mutex_lock(&sLockedPoolsLock
);
506 REMOVE_DL_LIST(pool
, sLockedPools
, );
507 mutex_unlock(&sLockedPoolsLock
);
513 delete_sem(pool
->sem
);
514 mutex_destroy(&pool
->mutex
);
521 std_ops(int32 op
, ...)
525 return init_locked_pool();
526 case B_MODULE_UNINIT
:
527 return uninit_locked_pool();
535 locked_pool_interface interface
= {
537 LOCKED_POOL_MODULE_NAME
,
550 module_info
*modules
[] = {