1 /* ///////////////////////////////////////////////////////////////////////
7 * Brief: basic_pool class
10 * Copyright (c) 2008-2020, Waruqi All rights reserved.
11 * //////////////////////////////////////////////////////////////////// */
13 #ifndef EXTL_MEMORY_BASIC_POOL_H
14 #define EXTL_MEMORY_BASIC_POOL_H
17 * \brief basic_pool class
20 # error basic_pool.h need be supported by c++.
23 /* ///////////////////////////////////////////////////////////////////////
26 #include "basic_allocator_selector.h"
27 #include "../type/traits/limit_traits.h"
28 #include "../mpl/math/max_min.h"
30 #ifdef EXTL_MEMORY_BASIC_POOL_TEST_ENABLE
31 # include "../counter/clock_counter.h"
32 # include "../math/rand.h"
35 /* ///////////////////////////////////////////////////////////////////////
39 /// EXTL_BASIC_POOL_DEFAULT_MAX_FREE_UNLIMITED
40 #define EXTL_BASIC_POOL_DEFAULT_MAX_FREE_UNLIMITED (0)
42 /*!if EXTL_BASIC_POOL_DEFAULT_MAX_FREE_INDEX == 15
43 * max_free_chunk_size = (15 + 1) * 4K = 64K
45 #define EXTL_BASIC_POOL_DEFAULT_MAX_FREE_INDEX (15)
47 /// Default alignment boundary = 2^4 = 16B
48 #define EXTL_BASIC_POOL_DEFAULT_ALIGN_BOUNDARY_POWER (4)
50 /* ///////////////////////////////////////////////////////////////////////
55 /*!\brief basic_pool class
57 * \param A The basic allocator type
58 * \param MAX_FREE_INDEX: The maximum index of a free chunk in the pool
59 * \param DEFAULT_ALIGN_BOUNDARY_POWER: Default alignment boundary = 2^DEFAULT_ALIGN_BOUNDARY_POWER (B)
61 * \ingroup extl_group_memory
64 #ifdef EXTL_TEMPLATE_CLASS_DEFAULT_ARGUMENT_SUPPORT
65 typename_param_k A
= typename_type_def_k basic_allocator_selector
<e_byte_t
>::allocator_type
66 , e_size_t MAX_FREE_INDEX
= EXTL_BASIC_POOL_DEFAULT_MAX_FREE_INDEX
67 , e_size_t DEFAULT_ALIGN_BOUNDARY_POWER
= EXTL_BASIC_POOL_DEFAULT_ALIGN_BOUNDARY_POWER
70 , e_size_t MAX_FREE_INDEX
71 , e_size_t DEFAULT_ALIGN_BOUNDARY_POWER
75 #ifdef EXTL_EBO_FORM_2_SUPPORT
76 : protected A
/* if EBO is supported */
84 typedef basic_pool
<A
, MAX_FREE_INDEX
, DEFAULT_ALIGN_BOUNDARY_POWER
> class_type
;
85 typedef void value_type
;
86 typedef A allocator_type
;
87 typedef void* pointer
;
88 typedef void const* const_pointer
;
89 typedef void reference
;
90 typedef void const_reference
;
91 typedef typename_type_k
allocator_type::size_type size_type
;
95 /*!\brief Prohibit copy constructor and copy assignment
97 * Maybe EBO is not supported if uses extl::uncopyable,
98 * because some compilers do not support EBO_FORM_6 - EXTL_EBO_FORM_6_SUPPORT
100 basic_pool(class_type
const&);
101 class_type
& operator=(class_type
const&);
105 /* ///////////////////////////////////////////////////////////////////////
106 * The size of a memory page : 8K
108 enum { en_per_page_size
= 8 * 1024 };
110 /* ///////////////////////////////////////////////////////////////////////
113 /* Default boundary alignment : 16B = 2^4 */
114 enum { en_default_boundary_power
= DEFAULT_ALIGN_BOUNDARY_POWER
};
115 enum { en_default_boundary
= 1 << en_default_boundary_power
};
117 /* The boundary size of one chunk : 4K */
118 enum { en_chunk_boundary_size
= en_per_page_size
>> 1 };
120 /* ///////////////////////////////////////////////////////////////////////
124 /* Used to the conversion between index and the size of the chunk in the free pool and
125 * en_boundary_index = log2(en_per_page_size / 2)
128 * index = (chunk_size >> en_boundary_index) - 1; ==> index = (chunk_size / 4K) - 1
129 * chunk_size = (index + 1) << en_boundary_index; ==> chunk_size = (index + 1) * 4K
131 enum { en_boundary_index
= 12 };
133 /* The maximum index of the free pool
134 * if en_fp_max_index == 20 :
135 * max_regular_chunk_size = 8K(per_page_size) + 4K * (en_fp_max_index - 1) = 84K
137 enum { en_fp_max_index
= 20 };
139 /* The index of the larger than en_fp_max_regular_chunk_size size
140 * chunks linked list in the free pool
142 enum { en_fp_non_regular_index
= 0 };
144 /* The maximum index of the free chunk
145 * When the size of a chunk is larger than index_to_size(en_fp_max_free_index),
146 * the chunk will be destroyed at once
147 * and if the index is equal with EXTL_BASIC_POOL_DEFAULT_MAX_FREE_UNLIMITED,
148 * the chunk will be not destroyed until the basic_pool is destroyed
150 enum { en_fp_max_free_index
= MAX_FREE_INDEX
};
152 /* The current maximum index of the chunks linked list in the free pool
153 * ------ ------ ------ ------ ------ ------
155 * ------ ------ ------ ------ ------ ------
157 * en_fp_max_index en_fp_max_free_index
159 * en_fp_cur_max_index = en_fp_max_index
161 * ------ ------ ------ ------ ------ ------
163 * ------ ------ ------ ------ ------ ------
165 * en_fp_max_free_index en_fp_max_index
167 * en_fp_cur_max_index = en_fp_max_free_index
171 enum { en_fp_cur_max_index
=
172 (en_fp_max_free_index
== EXTL_BASIC_POOL_DEFAULT_MAX_FREE_UNLIMITED
)?
173 en_fp_max_index
: (EXTL_NS_MPL(min
)<en_fp_max_free_index
, en_fp_max_index
>::value
) };
175 /* Used to the conversion between index and the size of the block in the active pool */
176 enum { en_ap_max_index
= (8 * 1024 / en_default_boundary
) };
177 enum { en_ap_larger_blocks_index
= en_ap_max_index
- 1 };
178 enum { en_ap_non_free_index
= 0 };
181 /// The index of the free pool is converted to the size of the chunk
182 static size_type
index_to_size(size_type index
)
184 EXTL_ASSERT(0 < index
);
185 return ((index
+ 1) << en_boundary_index
);
187 /// The size of the chunk is converted to the index of the free pool
188 static size_type
size_to_index(size_type size
)
190 EXTL_ASSERT(en_per_page_size
<= size
);
191 return ((size
>> en_boundary_index
) - 1);
194 /// Deafult boundary alignment
195 static size_type
default_align(size_type size
)
197 return EXTL_ALIGN(size
, en_default_boundary
);
201 enum { en_block_head_size
= sizeof(size_type
) };
202 /*!\brief The block type
203 * The linked node of the free blocks in one chunk
205 * ------ ------ ------ ------ ------ ------
206 * | | | | | | | - m_active_pool
207 * ------ ------ ------ ------ ------ ------
210 * | ap_index | -------
211 * -------------- <-- data()
212 * | next | ---------------> next block
230 pointer
data() { return reinterpret_cast<pointer
>(&next
); }
231 size_type
get_ap_index() const { return ap_index
; }
232 void set_ap_index(size_type index
) { ap_index
= index
; }
233 static block_head
* get_block(pointer p
) { return reinterpret_cast<block_head
*>(static_cast<e_byte_t
*>(p
) - en_block_head_size
); }
236 friend struct block_head
;
238 /*!\brief The chunk type
239 * A memory chunk node
241 * next: pointer to the next chunk
242 * index: (chunk_size >> boundary_index) - 1
244 * -------------- --------------
245 * | next | ---------> | | ----------> ... ----------> NULL
248 * -------------- | chunk_head2 |
255 * | first_free | --- | |
256 * -------------- | | |
257 * | last_free | ---|-- | |
258 * -------------- | | --------------
269 * -------------- <- first_free - |
270 * | free_block1 | -- | block_size |
271 * -------------- | - |
272 * | free_block2 | -- | chunk size
274 * | free_block3 | -- |
275 * -------------- <-| last_free |
276 * | free_block4 | -- |
287 size_type block_size
;
288 size_type free_count
;
290 block_head
* first_free
;
291 block_head
* last_free
;
294 /// The bit count of the mask in the reserve1
297 /// Does allocate free blocks by linked list?
298 en_is_alloc_by_linked_list
= 0
303 /// Allocates free blocks by linked list
304 e_bool_t
is_alloc_by_linked_list() const { return 1 == (reserve1
& (1 << en_is_alloc_by_linked_list
)); }
305 void enable_alloc_by_linked_list() { reserve1
|= (1 << en_is_alloc_by_linked_list
); }
306 void disable_alloc_by_linked_list() { reserve1
&= ~(1 << en_is_alloc_by_linked_list
); }
308 size_type
chunk_size() const { return index_to_size(index
); }
309 size_type
data_size() const { return (chunk_size() - sizeof(chunk_head
)); }
310 size_type
block_count() const { return (data_size() / block_size
); }
311 e_byte_t
* data() { return reinterpret_cast<e_byte_t
*>(&((this)[1])); }
312 block_head
* first_block() { return reinterpret_cast<block_head
*>(data()); }
313 block_head
* last_block() { return reinterpret_cast<block_head
*>(data() + (block_count() - 1) * block_size
); }
314 e_bool_t
is_free() const { return free_count
== block_count(); }
316 /// Returns true if the block in the chunk
317 e_bool_t
is_in_chunk(block_head
const* block
) const
319 EXTL_ASSERT(NULL
!= block
);
320 if (reinterpret_cast<e_byte_t
const*>(this) < reinterpret_cast<e_byte_t
const*>(block
) &&
321 reinterpret_cast<e_byte_t
const*>(block
) < (reinterpret_cast<e_byte_t
const*>(this) + chunk_size()))
323 else return e_false_v
;
328 friend struct chunk_head
;
330 /// The snapshot info of the pool
335 size_type cur_alloc_size_from_system
;
336 size_type cur_alloc_size_from_pool
;
345 * index: 0 1 2 ... ... en_fp_max_index - 1
346 * chunk_size: larger 8K 8K + 4K * 1 8K + 4K * 2 8K + 4K * (index - 1)
347 * ------------- ------------- ------------- ------------- ------------- -------------
348 * | | | | ... ... | |
349 * ------------- ------------- ------------- ------------- ------------- -------------
351 * ------------- ------------- -------------
352 * | chunk1 | | ... ... | |
353 * ------------- ------------- -------------
355 * ------------- ------------- NULL
357 * ------------- -------------
364 * Note: non-regular regular ... ... regular regular
369 /*!\brief active pool
371 * en_default_boundary = 16
373 * index: 0 1 2 ... ... en_ap_max_index - 1
374 * block_size: unknown 16B 16B * 2 16B * 3 16B * index en_ap_larger_blocks_index
375 * ------------- ------------- ------------- ------------- ------------- -------------
376 * | | | | ... ... | |
377 * ------------- ------------- ------------- ------------- ------------- -------------
379 * ------------- ------------- -------------
380 * | chunk1 | | ... ... | |
381 * ------------- ------------- -------------
383 * ------------- ------------- NULL
385 * ------------- -------------
393 * Note: non-free partial free ... ... partial free partial free
394 * (only a block) ascending order
397 chunk_head
* m_free_pool
[en_fp_max_index
];
398 chunk_head
* m_active_pool
[en_ap_max_index
];
402 /// Initialize a free chunk
403 void init_free_chunk(chunk_head
* free_chunk
, size_type block_size
);
406 /// Allocates a free block from the partial-free chunk
407 block_head
* alloc_block_from_free_chunk(chunk_head
* free_chunk
);
408 /// Allocates a free chunk from the free pool
409 chunk_head
* alloc_chunk_from_free_pool(size_type chunk_size
);
412 /// Deallocates a block to the chunk
413 void dealloc_block_to_chunk(block_head
* block
, chunk_head
* chunk
);
414 /// Deallocates a allocated chunk to the free pool
415 void dealloc_chunk_to_free_pool(chunk_head
* chunk
);
416 /// Deallocates a block from the given index of the active pool
417 e_bool_t
dealloc_block_from_active_index(pointer block
, size_type index
);
420 /// Adds a chunk to the active pool and returns the index in the active pool
421 size_type
add_chunk_to_active_pool(chunk_head
* chunk
);
423 /// \name Constructors
428 EXTL_STATIC_ASSERT(en_ap_max_index
>= 2);
429 EXTL_STATIC_ASSERT(en_fp_max_index
>= 2);
431 /* Creates the basic_pool */
436 /* Destroys the basic_pool */
442 allocator_type
allocator() const
444 #ifdef EXTL_EBO_FORM_2_SUPPORT
447 return allocator_type();
451 /// Creates the basic_pool
454 /// Destroys the basic_pool
457 /// Shrinks the basic_pool
460 /// Allocates memory block of the specified size
461 pointer
allocate(size_type size
);
463 /// Reallocates memory block of the specified size
464 pointer
reallocate(pointer block
, size_type size
);
466 /// Deallocates the block to pool
467 void deallocate(pointer block
);
470 /// Creates a snapshot of the pool
471 snapshot_info
get_snapshot() const;
475 /* ///////////////////////////////////////////////////////////////////////
479 /* Template declaration */
480 #ifdef EXTL_BASIC_POOL_TEMPLATE_DECL
481 # undef EXTL_BASIC_POOL_TEMPLATE_DECL
484 #define EXTL_BASIC_POOL_TEMPLATE_DECL \
485 template< typename_param_k A \
486 , e_size_t MAX_FREE_INDEX \
487 , e_size_t DEFAULT_ALIGN_BOUNDARY_POWER \
490 /* Class qualification */
491 #ifdef EXTL_BASIC_POOL_CLASS_QUAL
492 # undef EXTL_BASIC_POOL_CLASS_QUAL
495 #define EXTL_BASIC_POOL_CLASS_QUAL basic_pool<A, MAX_FREE_INDEX, DEFAULT_ALIGN_BOUNDARY_POWER>
497 /* Class qualification */
498 #ifdef EXTL_BASIC_POOL_CLASS_RET_QUAL
499 # undef EXTL_BASIC_POOL_CLASS_RET_QUAL
502 #define EXTL_BASIC_POOL_CLASS_RET_QUAL(ret_type) \
503 typename_type_ret_k EXTL_BASIC_POOL_CLASS_QUAL::ret_type \
504 EXTL_BASIC_POOL_CLASS_QUAL
505 /* ///////////////////////////////////////////////////////////////////////
509 /* Creates the basic_pool */
510 EXTL_BASIC_POOL_TEMPLATE_DECL
511 inline void EXTL_BASIC_POOL_CLASS_QUAL::create()
513 mem_fill_0(m_free_pool
, en_fp_max_index
* sizeof(chunk_head
*));
514 mem_fill_0(m_active_pool
, en_ap_max_index
* sizeof(chunk_head
*));
516 /* Destroys the basic_pool */
517 EXTL_BASIC_POOL_TEMPLATE_DECL
518 void EXTL_BASIC_POOL_CLASS_QUAL::destroy()
522 /*!Frees all chunks in the active pool to system */
523 for(index
= 0; index
< en_ap_max_index
; index
++)
525 chunk_head
* node
= m_active_pool
[index
];
528 chunk_head
* pre_node
= node
;
530 allocator().deallocate(reinterpret_cast<e_byte_t
*>(pre_node
));
532 m_active_pool
[index
] = NULL
;
535 /*!Frees all chunks in the free pool to system */
536 for(index
= 0; index
< en_fp_cur_max_index
; index
++)
538 chunk_head
* node
= m_free_pool
[index
];
541 chunk_head
* pre_node
= node
;
543 allocator().deallocate(reinterpret_cast<e_byte_t
*>(pre_node
));
545 m_free_pool
[index
] = NULL
;
548 /* Shrinks the basic_pool */
549 EXTL_BASIC_POOL_TEMPLATE_DECL
550 void EXTL_BASIC_POOL_CLASS_QUAL::shrink()
552 /* Frees all chunks in the free pool to system */
553 for(size_type index
= 0; index
< en_fp_cur_max_index
; index
++)
555 chunk_head
* node
= m_free_pool
[index
];
558 chunk_head
* pre_node
= node
;
560 allocator().deallocate(reinterpret_cast<e_byte_t
*>(pre_node
));
562 m_free_pool
[index
] = NULL
;
565 /* Allocates memory block of the specified size */
566 EXTL_BASIC_POOL_TEMPLATE_DECL
567 EXTL_BASIC_POOL_CLASS_RET_QUAL(pointer
)::allocate(size_type size
)
569 if (0 == size
) return NULL
;
571 /*!Default: the eight-byte alignment */
572 size
+= en_block_head_size
;
573 if(size
< sizeof(block_head
)) size
= sizeof(block_head
);
574 size_type block_size
= default_align(size
);
575 EXTL_ASSERT(0 == block_size
% en_default_boundary
);
576 EXTL_ASSERT(size
<= block_size
);
578 /*!index = block_size / 16 if en_default_boundary == 16 */
579 size_type index
= block_size
>> en_default_boundary_power
;
580 EXTL_ASSERT((0 < index
) && (index
<= EXTL_LIMIT_TRAITS_UINT32_MAX
));
582 /*!Allocates a free block from the active pool */
583 if (index
> en_ap_larger_blocks_index
) index
= en_ap_larger_blocks_index
;
584 EXTL_ASSERT(0 != index
);
585 chunk_head
* node
= m_active_pool
[index
];
588 if (NULL
!= node
->first_free
&& block_size
<= node
->block_size
)
590 /* Allocates a free block from the free chunk */
591 block_head
* block
= alloc_block_from_free_chunk(node
);
592 if(NULL
!= block
) block
->set_ap_index(index
);
594 EXTL_ASSERT(size
<= node
->block_size
);
595 return block
->data();
599 #else /* The optimization of the above code */
600 /*!Allocates a free block from the active pool */
601 if (index
< en_ap_larger_blocks_index
)
603 EXTL_ASSERT(0 != index
);
604 chunk_head
* node
= m_active_pool
[index
];
607 if (NULL
!= node
->first_free
)
609 /* Allocates a free block from the free chunk */
610 block_head
* block
= alloc_block_from_free_chunk(node
);
611 if(NULL
!= block
) block
->set_ap_index(index
);
613 EXTL_ASSERT(size
<= node
->block_size
);
614 return block
->data();
621 chunk_head
* node
= m_active_pool
[en_ap_larger_blocks_index
];
624 if (NULL
!= node
->first_free
&& block_size
<= node
->block_size
)
626 /* Allocates a free block from the free chunk */
627 block_head
* block
= alloc_block_from_free_chunk(node
);
628 if(NULL
!= block
) block
->set_ap_index(en_ap_larger_blocks_index
);
630 EXTL_ASSERT(size
<= node
->block_size
);
631 return block
->data();
638 /*!Adjusts the size of a free chunck
641 * 8K, 8K + 4K * 1, 8K + 4K * 2, ..., 8K + 4K * n
646 size_type chunk_size
= EXTL_ALIGN(sizeof(chunk_head
) + block_size
, en_chunk_boundary_size
);
647 if (chunk_size
< en_per_page_size
) chunk_size
= en_per_page_size
;
649 /*!Allocates a free chunk from the free pool
650 * Note: Maybe new_chunk->chunk_size() != chunk_size
652 chunk_head
* new_chunk
= alloc_chunk_from_free_pool(chunk_size
);
653 EXTL_ASSERT(NULL
!= new_chunk
);
657 /* Initializes the free chunk */
658 init_free_chunk(new_chunk
, block_size
);
660 /* Allocates a free block from the free chunk */
661 block_head
* block
= alloc_block_from_free_chunk(new_chunk
);
663 #else /* The optimization of the above code */
665 EXTL_ASSERT(block_size
<= new_chunk
->chunk_size());
666 EXTL_ASSERT(sizeof(block_head
) <= block_size
);
668 /*!Initializes the free chunk */
669 new_chunk
->block_size
= block_size
;
670 new_chunk
->free_count
= (new_chunk
->chunk_size() - sizeof(chunk_head
)) / block_size
;
671 EXTL_ASSERT(0 < new_chunk
->free_count
);
673 /*!Allocates a free block from the free chunk */
674 block_head
* block
= new_chunk
->first_block();
675 new_chunk
->last_free
= reinterpret_cast<block_head
*>(reinterpret_cast<e_byte_t
*>(block
) + (new_chunk
->free_count
- 1) * block_size
);
676 new_chunk
->last_free
->next
= NULL
;
678 /*!Delay to segregate a chunk */
679 if (1 == new_chunk
->free_count
)
681 new_chunk
->first_free
= NULL
;
682 new_chunk
->last_free
= NULL
;
683 new_chunk
->enable_alloc_by_linked_list();
687 new_chunk
->first_free
= reinterpret_cast<block_head
*>(reinterpret_cast<e_byte_t
*>(block
) + block_size
);
688 if (new_chunk
->first_free
== new_chunk
->last_free
)
689 new_chunk
->enable_alloc_by_linked_list();
690 else new_chunk
->disable_alloc_by_linked_list();
692 --new_chunk
->free_count
;
695 /*!Adds the chunk to the active pool */
696 block
->set_ap_index(add_chunk_to_active_pool(new_chunk
));
698 return block
->data();
700 /* Adds a chunk to the active pool and returns the index in the active pool */
701 EXTL_BASIC_POOL_TEMPLATE_DECL
702 EXTL_BASIC_POOL_CLASS_RET_QUAL(size_type
)::add_chunk_to_active_pool(chunk_head
* chunk
)
704 EXTL_ASSERT(NULL
!= chunk
);
705 /*!Puts the chunk to m_active_pool[en_ap_non_free_index]
706 * if there is only one block which has been allocated in the chunk
711 * -------------- <- first_free
717 if (NULL
== chunk
->first_free
)
719 EXTL_ASSERT(0 == chunk
->free_count
);
720 chunk
->next
= m_active_pool
[en_ap_non_free_index
];
721 m_active_pool
[en_ap_non_free_index
] = chunk
;
722 return en_ap_non_free_index
;
724 /* Puts the chunk to m_active_pool[index] */
727 /*!index = block_size / 8 if en_default_boundary == 8 */
728 size_type index
= chunk
->block_size
>> en_default_boundary_power
;
729 EXTL_ASSERT((0 < index
) && (index
<= EXTL_LIMIT_TRAITS_UINT32_MAX
));
731 /* smaller block in the chunk */
732 if (index
< en_ap_larger_blocks_index
)
734 chunk
->next
= m_active_pool
[index
];
735 m_active_pool
[index
] = chunk
;
738 /*!larger block in the chunk
739 * The chunks in the m_active_pool[en_ap_larger_blocks_index] is sorted by the size of a block in the ascending order
741 * m_active_pool[en_ap_larger_blocks_index]
744 * | chunk1 | -- eg: block_size = 1
746 * | chunk2 | -- eg: block_size = 2
747 * --------- | <- insert a chunk
748 * | chunk3 | -- eg: block_size = 3
750 * | chunk4 | -- eg: block_size = 4
752 * | chunk5 | -- eg: block_size = 5
759 chunk_head
* node
= m_active_pool
[en_ap_larger_blocks_index
];
762 chunk_head
* pre_node
= node
;
763 while(NULL
!= node
&& node
->block_size
< chunk
->block_size
)
768 chunk
->next
= pre_node
->next
;
769 pre_node
->next
= chunk
;
773 m_active_pool
[en_ap_larger_blocks_index
] = chunk
;
776 return en_ap_larger_blocks_index
;
781 /* Allocates a free chunk from the free pool */
782 EXTL_BASIC_POOL_TEMPLATE_DECL
783 EXTL_BASIC_POOL_CLASS_RET_QUAL(chunk_head
*)::alloc_chunk_from_free_pool(size_type chunk_size
)
785 EXTL_ASSERT(sizeof(chunk_head
) < chunk_size
);
787 * index = chunk_size / 4K - 1
789 * 8K, 8K + 4K * 1, 8K + 4K * 2, ..., 8K + 4K * n
791 * 1, 2, , 3, , ..., n + 1
796 size_type index
= size_to_index(chunk_size
);
797 EXTL_ASSERT((0 < index
) && (index
<= EXTL_LIMIT_TRAITS_UINT32_MAX
));
799 /*!Finds a regular chunk
802 * 8K, 8K + 4K * 1, 8K + 4K * 2, ..., 8K + 4K * n
806 for(size_type i
= index
; i
< en_fp_cur_max_index
; i
++)
808 /* Non-empty linked list */
809 if (NULL
!= m_free_pool
[i
])
811 chunk_head
* node
= m_free_pool
[i
];
812 m_free_pool
[i
] = m_free_pool
[i
]->next
;
814 EXTL_ASSERT(node
->index
== i
);
819 /*!Finds a non-regular chunk
821 * index = en_fp_non_regular_index = 0
823 * The size of a non-regular chunk is larger than a regular chunk
826 chunk_head
* node
= m_free_pool
[en_fp_non_regular_index
];
827 if (NULL
!= node
&& index
<= node
->index
) /* chunk_size <= node->chunk_size() */
829 m_free_pool
[en_fp_non_regular_index
] = node
->next
;
831 EXTL_ASSERT(node
->index
== en_fp_non_regular_index
);
835 /*!Allocates a new chunk
836 * if there is not a chunk of the appropriate size in the free pool
838 node
= reinterpret_cast<chunk_head
*>(allocator().allocate(chunk_size
));
839 EXTL_ASSERT(NULL
!= node
);
845 /* Initialize a free chunk */
846 EXTL_BASIC_POOL_TEMPLATE_DECL
847 inline void EXTL_BASIC_POOL_CLASS_QUAL::init_free_chunk(chunk_head
* free_chunk
, size_type block_size
)
849 EXTL_ASSERT(NULL
!= free_chunk
);
850 EXTL_ASSERT(block_size
<= free_chunk
->chunk_size());
851 EXTL_ASSERT(sizeof(block_head
) <= block_size
);
853 free_chunk
->block_size
= block_size
;
854 free_chunk
->free_count
= free_chunk
->block_count();
855 free_chunk
->first_free
= free_chunk
->first_block();
856 free_chunk
->last_free
= free_chunk
->last_block();
857 free_chunk
->last_free
->next
= NULL
;
858 free_chunk
->disable_alloc_by_linked_list();
860 /* Allocates a free block from the partial-free chunk */
861 EXTL_BASIC_POOL_TEMPLATE_DECL
862 inline EXTL_BASIC_POOL_CLASS_RET_QUAL(block_head
*)::alloc_block_from_free_chunk(chunk_head
* free_chunk
)
864 EXTL_ASSERT(NULL
!= free_chunk
);
865 EXTL_ASSERT(NULL
!= free_chunk
->first_free
);
866 EXTL_ASSERT(0 < free_chunk
->free_count
);
868 block_head
* block
= free_chunk
->first_free
;
870 if (free_chunk
->is_alloc_by_linked_list() || free_chunk
->free_count
<= 1)
877 * -------------- <- first_free
881 * -------------- <- next_free
887 free_chunk
->first_free
= free_chunk
->first_free
->next
;
891 /*!Delay to segregate a chunk for optimization
893 * The chunk is not segregated until a block in the chunk is allocated
894 * and the chunk will be segregated completely when first_free is pointed to the last block,
895 * then the blocks in the chunk will be allocated by linked list
901 * -------------- <- first_free
902 * | free_block2 | + block_size
903 * -------------- <- next_free
907 * -------------- <- first_free (complete to segregate)
913 EXTL_ASSERT(1 < free_chunk
->free_count
);
914 free_chunk
->first_free
= reinterpret_cast<block_head
*>
915 (reinterpret_cast<e_byte_t
*>(free_chunk
->first_free
) + free_chunk
->block_size
);
916 if (free_chunk
->first_free
== free_chunk
->last_block())
917 free_chunk
->enable_alloc_by_linked_list();
921 if (NULL
== free_chunk
->first_free
) free_chunk
->last_free
= NULL
;
923 EXTL_ASSERT(free_chunk
->free_count
> 0);
924 --free_chunk
->free_count
;
929 /*!Deallocates the chunk to the pool
934 * index: 0 1 2 ... ...
935 * ------------- ------------- ------------- ------------- ------------- -------------
936 * ---->| | | | ... ... | |
937 * | ------------- ------------- ------------- ------------- ------------- -------------
939 * ap_index |---- -------------
940 * ap_index |---- | chunk1 |
943 * ap_index |---- -------------
944 * ap_index |---- | chunk2 |
951 EXTL_BASIC_POOL_TEMPLATE_DECL
952 void EXTL_BASIC_POOL_CLASS_QUAL::deallocate(pointer p
)
954 if (NULL
== p
) return ;
955 block_head
* block
= block_head::get_block(p
);
956 EXTL_ASSERT(NULL
!= block
);
958 size_type index
= block
->get_ap_index();
959 EXTL_ASSERT(index
>= 0 && index
< en_ap_max_index
);
961 chunk_head
* node
= m_active_pool
[index
];
962 chunk_head
* pre_node
= node
;
966 if (node
->is_in_chunk(block
))
968 EXTL_ASSERT(((reinterpret_cast<e_byte_t
*>(block
) -
969 reinterpret_cast<e_byte_t
*>(node
) -
970 sizeof(chunk_head
)) % node
->block_size
) == 0);
972 dealloc_block_to_chunk(block
, node
);
974 /*!Removes the chunk from the active pool
975 * and puts the chunk to the free pool if the chunk is free.
979 /*!Removes the chunk from the active pool */
980 if (pre_node
== node
) m_active_pool
[index
] = node
->next
; /* Remove the head node */
981 else pre_node
->next
= node
->next
;
983 /*!Deallocates the chunk to the free pool */
984 dealloc_chunk_to_free_pool(node
);
992 /* Dealocates a allocated chunk to the free pool */
993 EXTL_BASIC_POOL_TEMPLATE_DECL
994 void EXTL_BASIC_POOL_CLASS_QUAL::dealloc_chunk_to_free_pool(chunk_head
* chunk
)
996 EXTL_ASSERT(NULL
!= chunk
);
997 EXTL_ASSERT(chunk
->is_free());
999 size_type index
= chunk
->index
;
1000 EXTL_ASSERT((0 < index
) && (index
<= EXTL_LIMIT_TRAITS_UINT32_MAX
));
1002 /*!Frees the chunk to system if the size of the chunk is larger than index_to_size(en_fp_max_free_index).
1003 * Does't free the chunk until the pool is destroyed, if en_fp_max_free_index == en_fp_max_free_unlimited.
1005 if ((index
>= en_fp_max_free_index
) &&
1006 (en_fp_max_free_index
!= EXTL_BASIC_POOL_DEFAULT_MAX_FREE_UNLIMITED
))
1008 allocator().deallocate(reinterpret_cast<e_byte_t
*>(chunk
));
1010 /*!Frees the regular chunk to the free pool */
1011 else if (index
< en_fp_cur_max_index
)
1013 EXTL_ASSERT(index
!= en_fp_non_regular_index
);
1014 chunk
->next
= m_free_pool
[index
];
1015 m_free_pool
[index
] = chunk
;
1017 /*!Frees the non-regular chunk to the free pool
1018 * and the non-chunks in the free pool is sorted in descending order by the size of a chunk.
1023 * | free_chunk1 | -- eg: size = 10
1025 * | free_chunk2 | -- eg: size = 8
1026 * -------------- | <- insert a non-regular chunk
1027 * | free_chunk3 | -- eg: size = 5
1029 * | free_chunk4 | -- eg: size = 3
1031 * | free_chunk5 | -- eg: size = 1
1038 chunk_head
* node
= m_free_pool
[en_fp_non_regular_index
];
1042 /* Finds the position where the chunk will be inserted */
1043 chunk_head
* pre_node
= node
;
1044 while(NULL
!= node
&& node
->index
> index
)
1051 /* Inserts the chunk */
1052 chunk
->next
= node
->next
;
1057 /* Inserts the chunk into the head of m_free_pool[en_fp_non_regular_index] */
1059 m_free_pool
[en_fp_non_regular_index
] = chunk
;
1063 /* Deallocates a block to the chunk */
1064 EXTL_BASIC_POOL_TEMPLATE_DECL
1065 inline void EXTL_BASIC_POOL_CLASS_QUAL::dealloc_block_to_chunk(block_head
* block
, chunk_head
* chunk
)
1067 EXTL_ASSERT(NULL
!= chunk
);
1068 EXTL_ASSERT(NULL
!= block
);
1069 EXTL_ASSERT(chunk
->is_in_chunk(block
));
1073 if (NULL
== chunk
->last_free
)
1075 EXTL_ASSERT(NULL
== chunk
->first_free
);
1076 chunk
->last_free
= block
;
1077 chunk
->first_free
= block
;
1081 chunk
->last_free
->next
= block
;
1082 chunk
->last_free
= block
;
1085 EXTL_ASSERT(chunk
->free_count
< chunk
->block_count());
1086 ++chunk
->free_count
;
1089 /* Reallocates memory block of the specified size */
1090 EXTL_BASIC_POOL_TEMPLATE_DECL
1091 EXTL_BASIC_POOL_CLASS_RET_QUAL(pointer
)::reallocate(pointer p
, size_type size
)
1093 if (NULL
== p
) return NULL
;
1094 block_head
* block
= block_head::get_block(p
);
1095 EXTL_ASSERT(NULL
!= block
);
1097 size_type index
= block
->get_ap_index();
1098 EXTL_ASSERT(index
>= 0 && index
< en_ap_max_index
);
1100 chunk_head
* node
= m_active_pool
[index
];
1101 chunk_head
* pre_node
= node
;
1104 if (node
->is_in_chunk(block
))
1106 if (size
<= node
->block_size
- en_block_head_size
)
1108 else if (node
->block_count() == 1 &&
1109 size
<= node
->data_size() - en_block_head_size
)
1113 /* Allocates a new block */
1114 pointer p
= allocate(size
);
1116 EXTL_ASSERT(NULL
!= p
);
1117 mem_copy(p
, block
->data(), node
->block_size
- en_block_head_size
);
1119 /* Deallocate the block */
1120 dealloc_block_to_chunk(block
, node
);
1121 if (node
->is_free())
1123 /* Removes the chunk from the active pool */
1124 if (pre_node
== node
) m_active_pool
[index
] = NULL
;
1125 else pre_node
->next
= node
->next
;
1127 /* Deallocates the chunk to the free pool */
1128 dealloc_chunk_to_free_pool(node
);
1140 /* Creates a snapshot of the pool */
1141 EXTL_BASIC_POOL_TEMPLATE_DECL
1142 EXTL_BASIC_POOL_CLASS_RET_QUAL(snapshot_info
)::get_snapshot() const
1144 snapshot_info cur_snapshot_info
;
1145 mem_fill_0(&cur_snapshot_info
, sizeof(snapshot_info
));
1149 /* the snapshot info of the active pool */
1150 for(index
= 0; index
< en_ap_max_index
; index
++)
1152 chunk_head
* node
= m_active_pool
[index
];
1155 cur_snapshot_info
.cur_alloc_size_from_system
+= node
->chunk_size();
1156 cur_snapshot_info
.cur_alloc_size_from_pool
+= node
->block_size
* (node
->block_count() - node
->free_count
);
1161 /* the snapshot info of the free pool */
1162 for(index
= 0; index
< en_fp_cur_max_index
; index
++)
1164 chunk_head
* node
= m_free_pool
[index
];
1167 cur_snapshot_info
.cur_alloc_size_from_system
+= node
->chunk_size();
1172 return cur_snapshot_info
;
1175 /* ///////////////////////////////////////////////////////////////////////
1178 #undef EXTL_BASIC_POOL_TEMPLATE_DECL
1179 #undef EXTL_BASIC_POOL_CLASS_QUAL
1180 #undef EXTL_BASIC_POOL_CLASS_RET_QUAL
1182 /* ///////////////////////////////////////////////////////////////////////
1183 * some basic_pool type
1185 /// default_basic_pool
1186 typedef basic_pool
< basic_allocator_selector
<e_byte_t
>::allocator_type
1187 , EXTL_BASIC_POOL_DEFAULT_MAX_FREE_INDEX
1188 , EXTL_BASIC_POOL_DEFAULT_ALIGN_BOUNDARY_POWER
1189 > default_basic_pool
;
1191 typedef basic_pool
< basic_allocator_selector
<e_byte_t
>::allocator_type
1192 , EXTL_BASIC_POOL_DEFAULT_MAX_FREE_UNLIMITED
1193 , 4 /* Default alignment boundary: 16 = 2^4 */
1195 /// spare_basic_pool
1196 typedef basic_pool
< basic_allocator_selector
<e_byte_t
>::allocator_type
1197 , 15 /* max_free_chunk_size = (15 + 1) * 4K = 64K */
1198 , 3 /* Default alignment boundary: 8 = 2^8 */
1200 /* ///////////////////////////////////////////////////////////////////////
1203 #ifdef EXTL_MEMORY_BASIC_POOL_TEST_ENABLE
1204 # include "unit_test/basic_pool_test.h"
1207 /* ///////////////////////////////////////////////////////////////////////
1212 /* //////////////////////////////////////////////////////////////////// */
1213 #endif /* EXTL_MEMORY_BASIC_POOL_H */
1214 /* //////////////////////////////////////////////////////////////////// */