1 //////////////////////////////////////////////////////////////////////////////
3 // (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
4 // Software License, Version 1.0. (See accompanying file
5 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
7 // See http://www.boost.org/libs/interprocess for documentation.
9 //////////////////////////////////////////////////////////////////////////////
11 #ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
12 #define BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
14 #if (defined _MSC_VER) && (_MSC_VER >= 1200)
18 #include <boost/interprocess/detail/config_begin.hpp>
19 #include <boost/interprocess/detail/workaround.hpp>
21 #include <boost/pointer_to_other.hpp>
23 #include <boost/interprocess/interprocess_fwd.hpp>
24 #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
25 #include <boost/interprocess/containers/allocation_type.hpp>
26 #include <boost/interprocess/offset_ptr.hpp>
27 #include <boost/interprocess/sync/interprocess_mutex.hpp>
28 #include <boost/interprocess/exceptions.hpp>
29 #include <boost/interprocess/detail/utilities.hpp>
30 #include <boost/interprocess/detail/min_max.hpp>
31 #include <boost/interprocess/detail/math_functions.hpp>
32 #include <boost/interprocess/detail/type_traits.hpp>
33 #include <boost/interprocess/sync/scoped_lock.hpp>
34 #include <boost/assert.hpp>
35 #include <boost/static_assert.hpp>
45 #include <boost/intrusive/set.hpp>
48 //!Describes a best-fit algorithm based in an intrusive red-black tree used to allocate
49 //!objects in shared memory. This class is intended as a base class for single segment
50 //!and multi-segment implementations.
53 namespace interprocess
{
55 //!This class implements an algorithm that stores the free nodes in a red-black tree
56 //!to have logarithmic search/insert times.
57 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
63 rbtree_best_fit(const rbtree_best_fit
&);
64 rbtree_best_fit
&operator=(const rbtree_best_fit
&);
68 //!Shared interprocess_mutex family used for the rest of the Interprocess framework
69 typedef MutexFamily mutex_family
;
70 //!Pointer type to be used with the rest of the Interprocess framework
71 typedef VoidPointer void_pointer
;
72 //typedef detail::basic_multiallocation_cached_counted_slist<void_pointer> multiallocation_chain;
74 typedef detail::basic_multiallocation_cached_slist
<void_pointer
> multialloc_cached
;
75 typedef detail::basic_multiallocation_cached_counted_slist
76 <multialloc_cached
> multiallocation_chain
;
82 typedef typename
boost::
83 pointer_to_other
<void_pointer
, block_ctrl
>::type block_ctrl_ptr
;
84 typedef typename
boost::
85 pointer_to_other
<void_pointer
, char>::type char_ptr
;
87 typedef typename
bi::make_set_base_hook
88 < bi::void_pointer
<VoidPointer
>
89 , bi::optimize_size
<true>
90 , bi::link_mode
<bi::normal_link
> >::type TreeHook
;
94 //!This block's memory size (including block_ctrl
95 //!header) in Alignment units
96 std::size_t m_prev_size
: sizeof(std::size_t)*CHAR_BIT
;
97 std::size_t m_size
: sizeof(std::size_t)*CHAR_BIT
- 2;
98 std::size_t m_prev_allocated
: 1;
99 std::size_t m_allocated
: 1;
102 //!Block control structure
104 : public SizeHolder
, public TreeHook
107 { this->m_size
= 0; this->m_allocated
= 0, this->m_prev_allocated
= 0; }
109 friend bool operator<(const block_ctrl
&a
, const block_ctrl
&b
)
110 { return a
.m_size
< b
.m_size
; }
111 friend bool operator==(const block_ctrl
&a
, const block_ctrl
&b
)
112 { return a
.m_size
== b
.m_size
; }
115 struct size_block_ctrl_compare
117 bool operator()(std::size_t size
, const block_ctrl
&block
) const
118 { return size
< block
.m_size
; }
120 bool operator()(const block_ctrl
&block
, std::size_t size
) const
121 { return block
.m_size
< size
; }
124 //!Shared interprocess_mutex to protect memory allocate/deallocate
125 typedef typename
MutexFamily::mutex_type interprocess_mutex
;
126 typedef typename
bi::make_multiset
127 <block_ctrl
, bi::base_hook
<TreeHook
> >::type Imultiset
;
129 typedef typename
Imultiset::iterator imultiset_iterator
;
131 //!This struct includes needed data and derives from
132 //!interprocess_mutex to allow EBO when using null interprocess_mutex
133 struct header_t
: public interprocess_mutex
135 Imultiset m_imultiset
;
137 //!The extra size required by the segment
138 std::size_t m_extra_hdr_bytes
;
139 //!Allocated bytes for internal checking
140 std::size_t m_allocated
;
141 //!The size of the memory segment
145 friend class detail::memory_algorithm_common
<rbtree_best_fit
>;
147 typedef detail::memory_algorithm_common
<rbtree_best_fit
> algo_impl_t
;
152 //!Constructor. "size" is the total size of the managed memory segment,
153 //!"extra_hdr_bytes" indicates the extra bytes beginning in the sizeof(rbtree_best_fit)
154 //!offset that the allocator should not use at all.
155 rbtree_best_fit (std::size_t size
, std::size_t extra_hdr_bytes
);
160 //!Obtains the minimum size needed by the algorithm
161 static std::size_t get_min_size (std::size_t extra_hdr_bytes
);
163 //Functions for single segment management
165 //!Allocates bytes, returns 0 if there is not more memory
166 void* allocate (std::size_t nbytes
);
170 //Experimental. Dont' use
172 //!Multiple element allocation, same size
173 multiallocation_chain
allocate_many(std::size_t elem_bytes
, std::size_t num_elements
)
176 //-----------------------
177 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
178 //-----------------------
179 return algo_impl_t::allocate_many(this, elem_bytes
, num_elements
);
182 //!Multiple element allocation, different size
183 multiallocation_chain
allocate_many(const std::size_t *elem_sizes
, std::size_t n_elements
, std::size_t sizeof_element
)
186 //-----------------------
187 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
188 //-----------------------
189 return algo_impl_t::allocate_many(this, elem_sizes
, n_elements
, sizeof_element
);
192 //!Multiple element allocation, different size
193 void deallocate_many(multiallocation_chain chain
);
197 //!Deallocates previously allocated bytes
198 void deallocate (void *addr
);
200 //!Returns the size of the memory segment
201 std::size_t get_size() const;
203 //!Returns the number of free bytes of the segment
204 std::size_t get_free_memory() const;
206 //!Initializes to zero all the memory that's not in use.
207 //!This function is normally used for security reasons.
208 void zero_free_memory();
210 //!Increases managed memory in
211 //!extra_size bytes more
212 void grow(std::size_t extra_size
);
214 //!Decreases managed memory as much as possible
215 void shrink_to_fit();
217 //!Returns true if all allocated memory has been deallocated
218 bool all_memory_deallocated();
220 //!Makes an internal sanity check
221 //!and returns true if success
226 allocation_command (boost::interprocess::allocation_type command
, std::size_t limit_size
,
227 std::size_t preferred_size
,std::size_t &received_size
,
230 std::pair
<void *, bool>
231 raw_allocation_command (boost::interprocess::allocation_type command
, std::size_t limit_object
,
232 std::size_t preferred_object
,std::size_t &received_object
,
233 void *reuse_ptr
= 0, std::size_t sizeof_object
= 1);
235 //!Returns the size of the buffer previously allocated pointed by ptr
236 std::size_t size(const void *ptr
) const;
238 //!Allocates aligned bytes, returns 0 if there is not more memory.
239 //!Alignment must be power of 2
240 void* allocate_aligned (std::size_t nbytes
, std::size_t alignment
);
244 static std::size_t priv_first_block_offset(const void *this_ptr
, std::size_t extra_hdr_bytes
);
246 std::pair
<void*, bool>
247 priv_allocation_command(boost::interprocess::allocation_type command
, std::size_t limit_size
,
248 std::size_t preferred_size
,std::size_t &received_size
,
249 void *reuse_ptr
, std::size_t sizeof_object
);
252 //!Real allocation algorithm with min allocation option
253 std::pair
<void *, bool> priv_allocate(boost::interprocess::allocation_type command
254 ,std::size_t limit_size
255 ,std::size_t preferred_size
256 ,std::size_t &received_size
258 ,std::size_t backwards_multiple
= 1);
260 //!Obtains the block control structure of the user buffer
261 static block_ctrl
*priv_get_block(const void *ptr
);
263 //!Obtains the pointer returned to the user from the block control
264 static void *priv_get_user_buffer(const block_ctrl
*block
);
266 //!Returns the number of total units that a user buffer
267 //!of "userbytes" bytes really occupies (including header)
268 static std::size_t priv_get_total_units(std::size_t userbytes
);
270 //!Real expand function implementation
271 bool priv_expand(void *ptr
272 ,const std::size_t min_size
, const std::size_t preferred_size
273 ,std::size_t &received_size
);
275 //!Real expand to both sides implementation
276 void* priv_expand_both_sides(boost::interprocess::allocation_type command
277 ,std::size_t min_size
278 ,std::size_t preferred_size
279 ,std::size_t &received_size
281 ,bool only_preferred_backwards
282 ,std::size_t backwards_multiple
);
284 //!Get poitner of the previous block (previous block must be free)
285 block_ctrl
* priv_prev_block(block_ctrl
*ptr
);
287 //!Returns true if the previous block is allocated
288 bool priv_is_prev_allocated(block_ctrl
*ptr
);
290 //!Get a pointer of the "end" block from the first block of the segment
291 block_ctrl
* priv_end_block(block_ctrl
*first_segment_block
);
293 //!Get the size in the tail of the previous block
294 block_ctrl
* priv_next_block(block_ctrl
*ptr
);
296 //!Check if this block is free (not allocated)
297 bool priv_is_allocated_block(block_ctrl
*ptr
);
299 //!Marks the block as allocated
300 void priv_mark_as_allocated_block(block_ctrl
*ptr
);
302 //!Marks the block as allocated
303 void priv_mark_as_free_block(block_ctrl
*ptr
);
305 //!Checks if block has enough memory and splits/unlinks the block
306 //!returning the address to the users
307 void* priv_check_and_allocate(std::size_t units
309 ,std::size_t &received_size
);
310 //!Real deallocation algorithm
311 void priv_deallocate(void *addr
);
313 //!Makes a new memory portion available for allocation
314 void priv_add_segment(void *addr
, std::size_t size
);
316 void priv_mark_new_allocated_block(block_ctrl
*block
);
320 static const std::size_t Alignment
= !MemAlignment
321 ? detail::alignment_of
<detail::max_align
>::value
326 //Due to embedded bits in size, Alignment must be at least 4
327 BOOST_STATIC_ASSERT((Alignment
>= 4));
328 //Due to rbtree size optimizations, Alignment must have at least pointer alignment
329 BOOST_STATIC_ASSERT((Alignment
>= detail::alignment_of
<void_pointer
>::value
));
330 static const std::size_t AlignmentMask
= (Alignment
- 1);
331 static const std::size_t BlockCtrlBytes
= detail::ct_rounded_size
<sizeof(block_ctrl
), Alignment
>::value
;
332 static const std::size_t BlockCtrlUnits
= BlockCtrlBytes
/Alignment
;
333 static const std::size_t AllocatedCtrlBytes
= detail::ct_rounded_size
<sizeof(SizeHolder
), Alignment
>::value
;
334 static const std::size_t AllocatedCtrlUnits
= AllocatedCtrlBytes
/Alignment
;
335 static const std::size_t EndCtrlBlockBytes
= detail::ct_rounded_size
<sizeof(SizeHolder
), Alignment
>::value
;
336 static const std::size_t EndCtrlBlockUnits
= EndCtrlBlockBytes
/Alignment
;
337 static const std::size_t MinBlockUnits
= BlockCtrlUnits
;
338 static const std::size_t UsableByPreviousChunk
= sizeof(std::size_t);
340 //Make sure the maximum alignment is power of two
341 BOOST_STATIC_ASSERT((0 == (Alignment
& (Alignment
- std::size_t(1u)))));
344 static const std::size_t PayloadPerAllocation
= AllocatedCtrlBytes
- UsableByPreviousChunk
;
349 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
350 inline std::size_t rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>
351 ::priv_first_block_offset(const void *this_ptr
, std::size_t extra_hdr_bytes
)
353 std::size_t uint_this
= (std::size_t)this_ptr
;
354 std::size_t main_hdr_end
= uint_this
+ sizeof(rbtree_best_fit
) + extra_hdr_bytes
;
355 std::size_t aligned_main_hdr_end
= detail::get_rounded_size(main_hdr_end
, Alignment
);
356 std::size_t block1_off
= aligned_main_hdr_end
- uint_this
;
357 algo_impl_t::assert_alignment(aligned_main_hdr_end
);
358 algo_impl_t::assert_alignment(uint_this
+ block1_off
);
362 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
363 inline rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::
364 rbtree_best_fit(std::size_t size
, std::size_t extra_hdr_bytes
)
366 //Initialize the header
367 m_header
.m_allocated
= 0;
368 m_header
.m_size
= size
;
369 m_header
.m_extra_hdr_bytes
= extra_hdr_bytes
;
371 //Now write calculate the offset of the first big block that will
372 //cover the whole segment
373 assert(get_min_size(extra_hdr_bytes
) <= size
);
374 std::size_t block1_off
= priv_first_block_offset(this, extra_hdr_bytes
);
375 priv_add_segment(reinterpret_cast<char*>(this) + block1_off
, size
- block1_off
);
378 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
379 inline rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::~rbtree_best_fit()
381 //There is a memory leak!
382 // assert(m_header.m_allocated == 0);
383 // assert(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root));
386 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
387 void rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::grow(std::size_t extra_size
)
389 //Get the address of the first block
390 std::size_t block1_off
=
391 priv_first_block_offset(this, m_header
.m_extra_hdr_bytes
);
393 block_ctrl
*first_block
= reinterpret_cast<block_ctrl
*>
394 (reinterpret_cast<char*>(this) + block1_off
);
395 block_ctrl
*old_end_block
= priv_end_block(first_block
);
396 assert(priv_is_allocated_block(old_end_block
));
397 std::size_t old_border_offset
= (reinterpret_cast<char*>(old_end_block
) -
398 reinterpret_cast<char*>(this)) + EndCtrlBlockBytes
;
400 //Update managed buffer's size
401 m_header
.m_size
+= extra_size
;
403 //We need at least MinBlockUnits blocks to create a new block
404 // assert((m_header.m_size - old_end) >= MinBlockUnits);
405 if((m_header
.m_size
- old_border_offset
) < MinBlockUnits
){
409 //Now create a new block between the old end and the new end
410 std::size_t align_offset
= (m_header
.m_size
- old_border_offset
)/Alignment
;
411 block_ctrl
*new_end_block
= reinterpret_cast<block_ctrl
*>
412 (reinterpret_cast<char*>(old_end_block
) + align_offset
*Alignment
);
413 new_end_block
->m_size
= (reinterpret_cast<char*>(first_block
) -
414 reinterpret_cast<char*>(new_end_block
))/Alignment
;
415 first_block
->m_prev_size
= new_end_block
->m_size
;
416 assert(first_block
== priv_next_block(new_end_block
));
417 priv_mark_new_allocated_block(new_end_block
);
419 assert(new_end_block
== priv_end_block(first_block
));
421 //The old end block is the new block
422 block_ctrl
*new_block
= old_end_block
;
423 new_block
->m_size
= (reinterpret_cast<char*>(new_end_block
) -
424 reinterpret_cast<char*>(new_block
))/Alignment
;
425 assert(new_block
->m_size
>= BlockCtrlUnits
);
426 priv_mark_new_allocated_block(new_block
);
427 assert(priv_next_block(new_block
) == new_end_block
);
429 m_header
.m_allocated
+= new_block
->m_size
*Alignment
;
431 //Now deallocate the newly created block
432 this->priv_deallocate(priv_get_user_buffer(new_block
));
435 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
436 void rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::shrink_to_fit()
438 //Get the address of the first block
439 std::size_t block1_off
=
440 priv_first_block_offset(this, m_header
.m_extra_hdr_bytes
);
442 block_ctrl
*first_block
= reinterpret_cast<block_ctrl
*>
443 (reinterpret_cast<char*>(this) + block1_off
);
444 algo_impl_t::assert_alignment(first_block
);
446 block_ctrl
*old_end_block
= priv_end_block(first_block
);
447 algo_impl_t::assert_alignment(old_end_block
);
448 assert(priv_is_allocated_block(old_end_block
));
450 algo_impl_t::assert_alignment(old_end_block
);
452 std::size_t old_end_block_size
= old_end_block
->m_size
;
454 void *unique_buffer
= 0;
455 block_ctrl
*last_block
;
456 if(priv_next_block(first_block
) == old_end_block
){
458 unique_buffer
= priv_allocate(boost::interprocess::allocate_new
, 0, 0, ignore
).first
;
461 algo_impl_t::assert_alignment(unique_buffer
);
462 block_ctrl
*unique_block
= priv_get_block(unique_buffer
);
463 assert(priv_is_allocated_block(unique_block
));
464 algo_impl_t::assert_alignment(unique_block
);
465 last_block
= priv_next_block(unique_block
);
466 assert(!priv_is_allocated_block(last_block
));
467 algo_impl_t::assert_alignment(last_block
);
470 if(priv_is_prev_allocated(old_end_block
))
472 last_block
= priv_prev_block(old_end_block
);
475 std::size_t last_block_size
= last_block
->m_size
;
477 //Erase block from the free tree, since we will erase it
478 m_header
.m_imultiset
.erase(Imultiset::s_iterator_to(*last_block
));
480 std::size_t shrunk_border_offset
= (reinterpret_cast<char*>(last_block
) -
481 reinterpret_cast<char*>(this)) + EndCtrlBlockBytes
;
483 block_ctrl
*new_end_block
= last_block
;
484 algo_impl_t::assert_alignment(new_end_block
);
485 new_end_block
->m_size
= old_end_block_size
+ last_block_size
;
486 priv_mark_as_allocated_block(new_end_block
);
488 //Although the first block might be allocated, we'll
489 //store the offset to the end block since in the previous
490 //offset can't be overwritten by a previous block
491 first_block
->m_prev_size
= new_end_block
->m_size
;
492 assert(priv_end_block(first_block
) == new_end_block
);
494 //Update managed buffer's size
495 m_header
.m_size
= shrunk_border_offset
;
497 priv_deallocate(unique_buffer
);
500 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
501 void rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::
502 priv_add_segment(void *addr
, std::size_t size
)
505 algo_impl_t::check_alignment(addr
);
507 assert(size
>= (BlockCtrlBytes
+ EndCtrlBlockBytes
));
509 //Initialize the first big block and the "end" node
510 block_ctrl
*first_big_block
= new(addr
)block_ctrl
;
511 first_big_block
->m_size
= size
/Alignment
- EndCtrlBlockUnits
;
512 assert(first_big_block
->m_size
>= BlockCtrlUnits
);
514 //The "end" node is just a node of size 0 with the "end" bit set
515 block_ctrl
*end_block
= static_cast<block_ctrl
*>
516 (new (reinterpret_cast<char*>(addr
) + first_big_block
->m_size
*Alignment
)SizeHolder
);
518 //This will overwrite the prev part of the "end" node
519 priv_mark_as_free_block (first_big_block
);
520 first_big_block
->m_prev_size
= end_block
->m_size
=
521 (reinterpret_cast<char*>(first_big_block
) - reinterpret_cast<char*>(end_block
))/Alignment
;
522 priv_mark_as_allocated_block(end_block
);
524 assert(priv_next_block(first_big_block
) == end_block
);
525 assert(priv_next_block(end_block
) == first_big_block
);
526 assert(priv_end_block(first_big_block
) == end_block
);
527 assert(priv_prev_block(end_block
) == first_big_block
);
529 //Some check to validate the algorithm, since it makes some assumptions
530 //to optimize the space wasted in bookkeeping:
532 //Check that the sizes of the header are placed before the rbtree
533 assert(static_cast<void*>(static_cast<SizeHolder
*>(first_big_block
))
534 < static_cast<void*>(static_cast<TreeHook
*>(first_big_block
)));
536 //Check that the alignment is power of two (we use some optimizations based on this)
537 //assert((Alignment % 2) == 0);
538 //Insert it in the intrusive containers
539 m_header
.m_imultiset
.insert(*first_big_block
);
542 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
543 inline void rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::
544 priv_mark_new_allocated_block(block_ctrl
*new_block
)
545 { priv_mark_as_allocated_block(new_block
); }
547 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
548 inline std::size_t rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::get_size() const
549 { return m_header
.m_size
; }
551 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
552 inline std::size_t rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::get_free_memory() const
554 return m_header
.m_size
- m_header
.m_allocated
-
555 priv_first_block_offset(this, m_header
.m_extra_hdr_bytes
);
558 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
559 inline std::size_t rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::
560 get_min_size (std::size_t extra_hdr_bytes
)
562 return (algo_impl_t::ceil_units(sizeof(rbtree_best_fit
)) +
563 algo_impl_t::ceil_units(extra_hdr_bytes
) +
564 MinBlockUnits
+ EndCtrlBlockUnits
)*Alignment
;
567 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
568 inline bool rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::
569 all_memory_deallocated()
571 //-----------------------
572 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
573 //-----------------------
574 std::size_t block1_off
=
575 priv_first_block_offset(this, m_header
.m_extra_hdr_bytes
);
577 return m_header
.m_allocated
== 0 &&
578 m_header
.m_imultiset
.begin() != m_header
.m_imultiset
.end() &&
579 (++m_header
.m_imultiset
.begin()) == m_header
.m_imultiset
.end()
580 && m_header
.m_imultiset
.begin()->m_size
==
581 (m_header
.m_size
- block1_off
- EndCtrlBlockBytes
)/Alignment
;
584 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
585 bool rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::
588 //-----------------------
589 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
590 //-----------------------
591 imultiset_iterator
ib(m_header
.m_imultiset
.begin()), ie(m_header
.m_imultiset
.end());
593 std::size_t free_memory
= 0;
595 //Iterate through all blocks obtaining their size
596 for(; ib
!= ie
; ++ib
){
597 free_memory
+= ib
->m_size
*Alignment
;
598 algo_impl_t::assert_alignment(&*ib
);
599 if(!algo_impl_t::check_alignment(&*ib
))
603 //Check allocated bytes are less than size
604 if(m_header
.m_allocated
> m_header
.m_size
){
608 std::size_t block1_off
=
609 priv_first_block_offset(this, m_header
.m_extra_hdr_bytes
);
611 //Check free bytes are less than size
612 if(free_memory
> (m_header
.m_size
- block1_off
)){
618 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
619 inline void* rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::
620 allocate(std::size_t nbytes
)
622 //-----------------------
623 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
624 //-----------------------
626 void * ret
= priv_allocate(boost::interprocess::allocate_new
, nbytes
, nbytes
, ignore
).first
;
630 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
631 inline void* rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::
632 allocate_aligned(std::size_t nbytes
, std::size_t alignment
)
634 //-----------------------
635 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
636 //-----------------------
637 return algo_impl_t::allocate_aligned(this, nbytes
, alignment
);
640 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
642 inline std::pair
<T
*, bool> rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::
643 allocation_command (boost::interprocess::allocation_type command
, std::size_t limit_size
,
644 std::size_t preferred_size
,std::size_t &received_size
,
647 std::pair
<void*, bool> ret
= priv_allocation_command
648 (command
, limit_size
, preferred_size
, received_size
, static_cast<void*>(reuse_ptr
), sizeof(T
));
650 BOOST_ASSERT(0 == ((std::size_t)ret
.first
% detail::alignment_of
<T
>::value
));
651 return std::pair
<T
*, bool>(static_cast<T
*>(ret
.first
), ret
.second
);
654 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
655 inline std::pair
<void*, bool> rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::
656 raw_allocation_command (boost::interprocess::allocation_type command
, std::size_t limit_objects
,
657 std::size_t preferred_objects
,std::size_t &received_objects
,
658 void *reuse_ptr
, std::size_t sizeof_object
)
661 return std::pair
<void *, bool>(static_cast<void*>(0), 0);
662 if(command
& boost::interprocess::try_shrink_in_place
){
663 bool success
= algo_impl_t::try_shrink
664 ( this, reuse_ptr
, limit_objects
*sizeof_object
665 , preferred_objects
*sizeof_object
, received_objects
);
666 received_objects
/= sizeof_object
;
667 return std::pair
<void *, bool> ((success
? reuse_ptr
: 0), true);
669 return priv_allocation_command
670 (command
, limit_objects
, preferred_objects
, received_objects
, reuse_ptr
, sizeof_object
);
674 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
675 inline std::pair
<void*, bool> rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::
676 priv_allocation_command (boost::interprocess::allocation_type command
, std::size_t limit_size
,
677 std::size_t preferred_size
,std::size_t &received_size
,
678 void *reuse_ptr
, std::size_t sizeof_object
)
680 std::pair
<void*, bool> ret
;
681 std::size_t max_count
= m_header
.m_size
/sizeof_object
;
682 if(limit_size
> max_count
|| preferred_size
> max_count
){
683 ret
.first
= 0; return ret
;
685 std::size_t l_size
= limit_size
*sizeof_object
;
686 std::size_t p_size
= preferred_size
*sizeof_object
;
689 //-----------------------
690 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
691 //-----------------------
692 ret
= priv_allocate(command
, l_size
, p_size
, r_size
, reuse_ptr
, sizeof_object
);
694 received_size
= r_size
/sizeof_object
;
698 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
699 inline std::size_t rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::
700 size(const void *ptr
) const
702 //We need no synchronization since this block's size is not going
703 //to be modified by anyone else
704 //Obtain the real size of the block
705 return (priv_get_block(ptr
)->m_size
- AllocatedCtrlUnits
)*Alignment
+ UsableByPreviousChunk
;
708 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
709 inline void rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::zero_free_memory()
711 //-----------------------
712 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
713 //-----------------------
714 imultiset_iterator
ib(m_header
.m_imultiset
.begin()), ie(m_header
.m_imultiset
.end());
716 //Iterate through all blocks obtaining their size
717 for(; ib
!= ie
; ++ib
){
718 //Just clear user the memory part reserved for the user
719 std::memset( reinterpret_cast<char*>(&*ib
) + BlockCtrlBytes
721 , ib
->m_size
*Alignment
- BlockCtrlBytes
);
725 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
726 void* rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::
727 priv_expand_both_sides(boost::interprocess::allocation_type command
728 ,std::size_t min_size
729 ,std::size_t preferred_size
730 ,std::size_t &received_size
732 ,bool only_preferred_backwards
733 ,std::size_t backwards_multiple
)
735 algo_impl_t::assert_alignment(reuse_ptr
);
736 if(command
& boost::interprocess::expand_fwd
){
737 if(priv_expand(reuse_ptr
, min_size
, preferred_size
, received_size
))
741 received_size
= this->size(reuse_ptr
);
742 if(received_size
>= preferred_size
|| received_size
>= min_size
)
746 if(backwards_multiple
){
747 BOOST_ASSERT(0 == (min_size
% backwards_multiple
));
748 BOOST_ASSERT(0 == (preferred_size
% backwards_multiple
));
751 if(command
& boost::interprocess::expand_bwd
){
752 //Obtain the real size of the block
753 block_ctrl
*reuse
= priv_get_block(reuse_ptr
);
756 //assert(reuse->m_size == priv_tail_size(reuse));
757 algo_impl_t::assert_alignment(reuse
);
759 block_ctrl
*prev_block
;
761 //If the previous block is not free, there is nothing to do
762 if(priv_is_prev_allocated(reuse
)){
766 prev_block
= priv_prev_block(reuse
);
767 assert(!priv_is_allocated_block(prev_block
));
770 assert(prev_block
->m_size
== reuse
->m_prev_size
);
771 algo_impl_t::assert_alignment(prev_block
);
773 std::size_t needs_backwards_aligned
;
775 if(!algo_impl_t::calculate_lcm_and_needs_backwards_lcmed
778 , only_preferred_backwards
? preferred_size
: min_size
779 , lcm
, needs_backwards_aligned
)){
783 //Check if previous block has enough size
784 if(std::size_t(prev_block
->m_size
*Alignment
) >= needs_backwards_aligned
){
785 //Now take all next space. This will succeed
786 if(command
& boost::interprocess::expand_fwd
){
787 std::size_t received_size2
;
788 if(!priv_expand(reuse_ptr
, received_size
, received_size
, received_size2
)){
791 assert(received_size
= received_size2
);
793 //We need a minimum size to split the previous one
794 if(prev_block
->m_size
>= (needs_backwards_aligned
/Alignment
+ BlockCtrlUnits
)){
795 block_ctrl
*new_block
= reinterpret_cast<block_ctrl
*>
796 (reinterpret_cast<char*>(reuse
) - needs_backwards_aligned
);
798 //Free old previous buffer
800 AllocatedCtrlUnits
+ (needs_backwards_aligned
+ (received_size
- UsableByPreviousChunk
))/Alignment
;
801 assert(new_block
->m_size
>= BlockCtrlUnits
);
802 priv_mark_new_allocated_block(new_block
);
804 prev_block
->m_size
= (reinterpret_cast<char*>(new_block
) -
805 reinterpret_cast<char*>(prev_block
))/Alignment
;
806 assert(prev_block
->m_size
>= BlockCtrlUnits
);
807 priv_mark_as_free_block(prev_block
);
809 //Update the old previous block in the free blocks tree
810 //If the new size fulfills tree invariants do nothing,
811 //otherwise erase() + insert()
813 imultiset_iterator
prev_block_it(Imultiset::s_iterator_to(*prev_block
));
814 imultiset_iterator
was_smaller_it(prev_block_it
);
815 if(prev_block_it
!= m_header
.m_imultiset
.begin() &&
816 (--(was_smaller_it
= prev_block_it
))->m_size
> prev_block
->m_size
){
817 m_header
.m_imultiset
.erase(prev_block_it
);
818 m_header
.m_imultiset
.insert(m_header
.m_imultiset
.begin(), *prev_block
);
822 received_size
= needs_backwards_aligned
+ received_size
;
823 m_header
.m_allocated
+= needs_backwards_aligned
;
826 algo_impl_t::assert_alignment(new_block
);
828 //If the backwards expansion has remaining bytes in the
829 //first bytes, fill them with a pattern
830 void *p
= priv_get_user_buffer(new_block
);
831 void *user_ptr
= reinterpret_cast<char*>(p
);
832 assert((static_cast<char*>(reuse_ptr
) - static_cast<char*>(user_ptr
)) % backwards_multiple
== 0);
833 algo_impl_t::assert_alignment(user_ptr
);
836 //Check if there is no place to create a new block and
837 //the whole new block is multiple of the backwards expansion multiple
838 else if(prev_block
->m_size
>= needs_backwards_aligned
/Alignment
&&
839 0 == ((prev_block
->m_size
*Alignment
) % lcm
)) {
840 //Erase old previous block, since we will change it
841 m_header
.m_imultiset
.erase(Imultiset::s_iterator_to(*prev_block
));
843 //Just merge the whole previous block
844 //prev_block->m_size*Alignment is multiple of lcm (and backwards_multiple)
845 received_size
= received_size
+ prev_block
->m_size
*Alignment
;
847 m_header
.m_allocated
+= prev_block
->m_size
*Alignment
;
849 prev_block
->m_size
= prev_block
->m_size
+ reuse
->m_size
;
850 assert(prev_block
->m_size
>= BlockCtrlUnits
);
851 priv_mark_new_allocated_block(prev_block
);
853 //If the backwards expansion has remaining bytes in the
854 //first bytes, fill them with a pattern
855 void *user_ptr
= priv_get_user_buffer(prev_block
);
856 assert((static_cast<char*>(reuse_ptr
) - static_cast<char*>(user_ptr
)) % backwards_multiple
== 0);
857 algo_impl_t::assert_alignment(user_ptr
);
868 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
869 inline void rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::
870 deallocate_many(typename rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::multiallocation_chain chain
)
872 //-----------------------
873 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
874 //-----------------------
875 algo_impl_t::deallocate_many(this, boost::interprocess::move(chain
));
878 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
879 std::pair
<void *, bool> rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::
880 priv_allocate(boost::interprocess::allocation_type command
881 ,std::size_t limit_size
882 ,std::size_t preferred_size
883 ,std::size_t &received_size
885 ,std::size_t backwards_multiple
)
887 //Remove me. Forbid backwards allocation
888 //command &= (~boost::interprocess::expand_bwd);
890 if(command
& boost::interprocess::shrink_in_place
){
892 algo_impl_t::shrink(this, reuse_ptr
, limit_size
, preferred_size
, received_size
);
893 return std::pair
<void *, bool> ((success
? reuse_ptr
: 0), true);
896 typedef std::pair
<void *, bool> return_type
;
899 if(limit_size
> preferred_size
)
900 return return_type(static_cast<void*>(0), false);
902 //Number of units to request (including block_ctrl header)
903 std::size_t preferred_units
= priv_get_total_units(preferred_size
);
905 //Number of units to request (including block_ctrl header)
906 std::size_t limit_units
= priv_get_total_units(limit_size
);
909 if(reuse_ptr
&& (command
& (boost::interprocess::expand_fwd
| boost::interprocess::expand_bwd
))){
910 void *ret
= priv_expand_both_sides
911 (command
, limit_size
, preferred_size
, received_size
, reuse_ptr
, true, backwards_multiple
);
913 return return_type(ret
, true);
916 if(command
& boost::interprocess::allocate_new
){
917 size_block_ctrl_compare comp
;
918 imultiset_iterator
it(m_header
.m_imultiset
.lower_bound(preferred_units
, comp
));
920 if(it
!= m_header
.m_imultiset
.end()){
921 return return_type(this->priv_check_and_allocate
922 (preferred_units
, detail::get_pointer(&*it
), received_size
), false);
925 if(it
!= m_header
.m_imultiset
.begin()&&
926 (--it
)->m_size
>= limit_units
){
927 return return_type(this->priv_check_and_allocate
928 (it
->m_size
, detail::get_pointer(&*it
), received_size
), false);
933 //Now try to expand both sides with min size
934 if(reuse_ptr
&& (command
& (boost::interprocess::expand_fwd
| boost::interprocess::expand_bwd
))){
935 return return_type(priv_expand_both_sides
936 (command
, limit_size
, preferred_size
, received_size
, reuse_ptr
, false, backwards_multiple
), true);
939 return return_type(static_cast<void*>(0), false);
942 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
944 typename rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::block_ctrl
*
945 rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::priv_get_block(const void *ptr
)
947 return const_cast<block_ctrl
*>
948 (reinterpret_cast<const block_ctrl
*>
949 (reinterpret_cast<const char*>(ptr
) - AllocatedCtrlBytes
));
952 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
954 void *rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::
955 priv_get_user_buffer(const typename rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::block_ctrl
*block
)
956 { return const_cast<char*>(reinterpret_cast<const char*>(block
) + AllocatedCtrlBytes
); }
958 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
960 std::size_t rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::
961 priv_get_total_units(std::size_t userbytes
)
963 if(userbytes
< UsableByPreviousChunk
)
964 userbytes
= UsableByPreviousChunk
;
965 std::size_t units
= detail::get_rounded_size(userbytes
- UsableByPreviousChunk
, Alignment
)/Alignment
+ AllocatedCtrlUnits
;
966 if(units
< BlockCtrlUnits
) units
= BlockCtrlUnits
;
970 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
971 bool rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::
972 priv_expand (void *ptr
973 ,const std::size_t min_size
974 ,const std::size_t preferred_size
975 ,std::size_t &received_size
)
977 //Obtain the real size of the block
978 block_ctrl
*block
= priv_get_block(ptr
);
979 std::size_t old_block_units
= block
->m_size
;
981 //The block must be marked as allocated and the sizes must be equal
982 assert(priv_is_allocated_block(block
));
983 //assert(old_block_units == priv_tail_size(block));
985 //Put this to a safe value
986 received_size
= (old_block_units
- AllocatedCtrlUnits
)*Alignment
+ UsableByPreviousChunk
;
987 if(received_size
>= preferred_size
|| received_size
>= min_size
)
990 //Now translate it to Alignment units
991 const std::size_t min_user_units
= algo_impl_t::ceil_units(min_size
- UsableByPreviousChunk
);
992 const std::size_t preferred_user_units
= algo_impl_t::ceil_units(preferred_size
- UsableByPreviousChunk
);
994 //Some parameter checks
995 assert(min_user_units
<= preferred_user_units
);
997 block_ctrl
*next_block
;
999 if(priv_is_allocated_block(next_block
= priv_next_block(block
))){
1000 return received_size
>= min_size
? true : false;
1002 algo_impl_t::assert_alignment(next_block
);
1004 //Is "block" + "next_block" big enough?
1005 const std::size_t merged_units
= old_block_units
+ next_block
->m_size
;
1007 //Now get the expansion size
1008 const std::size_t merged_user_units
= merged_units
- AllocatedCtrlUnits
;
1010 if(merged_user_units
< min_user_units
){
1011 received_size
= merged_units
*Alignment
- UsableByPreviousChunk
;
1015 //Now get the maximum size the user can allocate
1016 std::size_t intended_user_units
= (merged_user_units
< preferred_user_units
) ?
1017 merged_user_units
: preferred_user_units
;
1019 //These are total units of the merged block (supposing the next block can be split)
1020 const std::size_t intended_units
= AllocatedCtrlUnits
+ intended_user_units
;
1022 //Check if we can split the next one in two parts
1023 if((merged_units
- intended_units
) >= BlockCtrlUnits
){
1024 //This block is bigger than needed, split it in
1025 //two blocks, the first one will be merged and
1026 //the second's size will be the remaining space
1027 assert(next_block
->m_size
== priv_next_block(next_block
)->m_prev_size
);
1028 const std::size_t rem_units
= merged_units
- intended_units
;
1030 //Check if we we need to update the old next block in the free blocks tree
1031 //If the new size fulfills tree invariants, we just need to replace the node
1032 //(the block start has been displaced), otherwise erase() + insert().
1034 //This fixup must be done in two parts, because the new next block might
1035 //overwrite the tree hook of the old next block. So we first erase the
1036 //old if needed and we'll insert the new one after creating the new next
1037 imultiset_iterator
old_next_block_it(Imultiset::s_iterator_to(*next_block
));
1038 const bool size_invariants_broken
=
1039 (next_block
->m_size
- rem_units
) < BlockCtrlUnits
||
1040 (old_next_block_it
!= m_header
.m_imultiset
.begin() &&
1041 (--imultiset_iterator(old_next_block_it
))->m_size
> rem_units
);
1042 if(size_invariants_broken
){
1043 m_header
.m_imultiset
.erase(old_next_block_it
);
1045 //This is the remaining block
1046 block_ctrl
*rem_block
= new(reinterpret_cast<block_ctrl
*>
1047 (reinterpret_cast<char*>(block
) + intended_units
*Alignment
))block_ctrl
;
1048 rem_block
->m_size
= rem_units
;
1049 algo_impl_t::assert_alignment(rem_block
);
1050 assert(rem_block
->m_size
>= BlockCtrlUnits
);
1051 priv_mark_as_free_block(rem_block
);
1053 //Now the second part of the fixup
1054 if(size_invariants_broken
)
1055 m_header
.m_imultiset
.insert(m_header
.m_imultiset
.begin(), *rem_block
);
1057 m_header
.m_imultiset
.replace_node(old_next_block_it
, *rem_block
);
1059 //Write the new length
1060 block
->m_size
= intended_user_units
+ AllocatedCtrlUnits
;
1061 assert(block
->m_size
>= BlockCtrlUnits
);
1062 m_header
.m_allocated
+= (intended_units
- old_block_units
)*Alignment
;
1064 //There is no free space to create a new node: just merge both blocks
1066 //Now we have to update the data in the tree
1067 m_header
.m_imultiset
.erase(Imultiset::s_iterator_to(*next_block
));
1069 //Write the new length
1070 block
->m_size
= merged_units
;
1071 assert(block
->m_size
>= BlockCtrlUnits
);
1072 m_header
.m_allocated
+= (merged_units
- old_block_units
)*Alignment
;
1074 priv_mark_as_allocated_block(block
);
1075 received_size
= (block
->m_size
- AllocatedCtrlUnits
)*Alignment
+ UsableByPreviousChunk
;
1079 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
> inline
1080 typename rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::block_ctrl
*
1081 rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::priv_prev_block
1082 (typename rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::block_ctrl
*ptr
)
1084 assert(!ptr
->m_prev_allocated
);
1085 return reinterpret_cast<block_ctrl
*>
1086 (reinterpret_cast<char*>(ptr
) - ptr
->m_prev_size
*Alignment
);
1089 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
> inline
1090 bool rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::priv_is_prev_allocated
1091 (typename rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::block_ctrl
*ptr
)
1093 if(ptr
->m_prev_allocated
){
1097 block_ctrl
*prev
= priv_prev_block(ptr
);
1099 assert(!priv_is_allocated_block(prev
));
1104 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
> inline
1105 typename rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::block_ctrl
*
1106 rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::priv_end_block
1107 (typename rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::block_ctrl
*first_segment_block
)
1109 assert(first_segment_block
->m_prev_allocated
);
1110 block_ctrl
*end_block
= reinterpret_cast<block_ctrl
*>
1111 (reinterpret_cast<char*>(first_segment_block
) - first_segment_block
->m_prev_size
*Alignment
);
1113 assert(priv_is_allocated_block(end_block
));
1114 assert(end_block
> first_segment_block
);
1118 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
> inline
1119 typename rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::block_ctrl
*
1120 rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::priv_next_block
1121 (typename rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::block_ctrl
*ptr
)
1123 return reinterpret_cast<block_ctrl
*>
1124 (reinterpret_cast<char*>(ptr
) + ptr
->m_size
*Alignment
);
1127 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
> inline
1128 bool rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::priv_is_allocated_block
1129 (typename rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::block_ctrl
*block
)
1131 bool allocated
= block
->m_allocated
!= 0;
1132 block_ctrl
*next_block
= reinterpret_cast<block_ctrl
*>
1133 (reinterpret_cast<char*>(block
) + block
->m_size
*Alignment
);
1134 bool next_block_prev_allocated
= next_block
->m_prev_allocated
!= 0;
1135 (void)next_block_prev_allocated
;
1136 assert(allocated
== next_block_prev_allocated
);
1140 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
> inline
1141 void rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::priv_mark_as_allocated_block
1142 (typename rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::block_ctrl
*block
)
1144 //assert(!priv_is_allocated_block(block));
1145 block
->m_allocated
= 1;
1146 reinterpret_cast<block_ctrl
*>
1147 (reinterpret_cast<char*>(block
)+ block
->m_size
*Alignment
)->m_prev_allocated
= 1;
1150 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
> inline
1151 void rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::priv_mark_as_free_block
1152 (typename rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::block_ctrl
*block
)
1154 block
->m_allocated
= 0;
1155 reinterpret_cast<block_ctrl
*>
1156 (reinterpret_cast<char*>(block
) + block
->m_size
*Alignment
)->m_prev_allocated
= 0;
1157 //assert(!priv_is_allocated_block(ptr));
1158 priv_next_block(block
)->m_prev_size
= block
->m_size
;
1161 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
> inline
1162 void* rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::priv_check_and_allocate
1164 ,typename rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::block_ctrl
* block
1165 ,std::size_t &received_size
)
1167 std::size_t upper_nunits
= nunits
+ BlockCtrlUnits
;
1168 imultiset_iterator it_old
= Imultiset::s_iterator_to(*block
);
1169 algo_impl_t::assert_alignment(block
);
1171 if (block
->m_size
>= upper_nunits
){
1172 //This block is bigger than needed, split it in
1173 //two blocks, the first's size will be "units" and
1174 //the second's size "block->m_size-units"
1175 std::size_t block_old_size
= block
->m_size
;
1176 block
->m_size
= nunits
;
1177 assert(block
->m_size
>= BlockCtrlUnits
);
1179 //This is the remaining block
1180 block_ctrl
*rem_block
= new(reinterpret_cast<block_ctrl
*>
1181 (reinterpret_cast<char*>(block
) + Alignment
*nunits
))block_ctrl
;
1182 algo_impl_t::assert_alignment(rem_block
);
1183 rem_block
->m_size
= block_old_size
- nunits
;
1184 assert(rem_block
->m_size
>= BlockCtrlUnits
);
1185 priv_mark_as_free_block(rem_block
);
1187 imultiset_iterator it_hint
;
1188 if(it_old
== m_header
.m_imultiset
.begin()
1189 || (--imultiset_iterator(it_old
))->m_size
< rem_block
->m_size
){
1190 //option a: slow but secure
1191 //m_header.m_imultiset.insert(m_header.m_imultiset.erase(it_old), *rem_block);
1192 //option b: Construct an empty node and swap
1193 //Imultiset::init_node(*rem_block);
1194 //block->swap_nodes(*rem_block);
1195 //option c: replace the node directly
1196 m_header
.m_imultiset
.replace_node(Imultiset::s_iterator_to(*it_old
), *rem_block
);
1199 //Now we have to update the data in the tree
1200 m_header
.m_imultiset
.erase(it_old
);
1201 m_header
.m_imultiset
.insert(m_header
.m_imultiset
.begin(), *rem_block
);
1205 else if (block
->m_size
>= nunits
){
1206 m_header
.m_imultiset
.erase(it_old
);
1212 //We need block_ctrl for deallocation stuff, so
1213 //return memory user can overwrite
1214 m_header
.m_allocated
+= block
->m_size
*Alignment
;
1215 received_size
= (block
->m_size
- AllocatedCtrlUnits
)*Alignment
+ UsableByPreviousChunk
;
1217 //Mark the block as allocated
1218 priv_mark_as_allocated_block(block
);
1220 //Clear the memory occupied by the tree hook, since this won't be
1221 //cleared with zero_free_memory
1222 TreeHook
*t
= static_cast<TreeHook
*>(block
);
1223 std::memset(t
, 0, sizeof(*t
));
1224 this->priv_next_block(block
)->m_prev_size
= 0;
1225 return priv_get_user_buffer(block
);
1228 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
1229 void rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::deallocate(void* addr
)
1232 //-----------------------
1233 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
1234 //-----------------------
1235 return this->priv_deallocate(addr
);
1238 template<class MutexFamily
, class VoidPointer
, std::size_t MemAlignment
>
1239 void rbtree_best_fit
<MutexFamily
, VoidPointer
, MemAlignment
>::priv_deallocate(void* addr
)
1243 block_ctrl
*block
= priv_get_block(addr
);
1245 //The blocks must be marked as allocated and the sizes must be equal
1246 assert(priv_is_allocated_block(block
));
1247 // assert(block->m_size == priv_tail_size(block));
1249 //Check if alignment and block size are right
1250 algo_impl_t::assert_alignment(addr
);
1252 std::size_t block_old_size
= Alignment
*block
->m_size
;
1253 assert(m_header
.m_allocated
>= block_old_size
);
1255 //Update used memory count
1256 m_header
.m_allocated
-= block_old_size
;
1258 //The block to insert in the tree
1259 block_ctrl
*block_to_insert
= block
;
1261 //Get the next block
1262 block_ctrl
*next_block
= priv_next_block(block
);
1263 bool merge_with_prev
= !priv_is_prev_allocated(block
);
1264 bool merge_with_next
= !priv_is_allocated_block(next_block
);
1266 //Merge logic. First just update block sizes, then fix free blocks tree
1267 if(merge_with_prev
|| merge_with_next
){
1268 //Merge if the previous is free
1269 if(merge_with_prev
){
1270 //Get the previous block
1271 block_ctrl
*prev_block
= priv_prev_block(block
);
1272 prev_block
->m_size
+= block
->m_size
;
1273 assert(prev_block
->m_size
>= BlockCtrlUnits
);
1274 block_to_insert
= prev_block
;
1276 //Merge if the next is free
1277 if(merge_with_next
){
1278 block_to_insert
->m_size
+= next_block
->m_size
;
1279 assert(block_to_insert
->m_size
>= BlockCtrlUnits
);
1281 m_header
.m_imultiset
.erase(Imultiset::s_iterator_to(*next_block
));
1284 bool only_merge_next
= !merge_with_prev
&& merge_with_next
;
1285 imultiset_iterator free_block_to_check_it
1286 (Imultiset::s_iterator_to(only_merge_next
? *next_block
: *block_to_insert
));
1287 imultiset_iterator
was_bigger_it(free_block_to_check_it
);
1289 //Now try to shortcut erasure + insertion (O(log(N))) with
1290 //a O(1) operation if merging does not alter tree positions
1291 if(++was_bigger_it
!= m_header
.m_imultiset
.end() &&
1292 block_to_insert
->m_size
> was_bigger_it
->m_size
){
1293 m_header
.m_imultiset
.erase(free_block_to_check_it
);
1294 m_header
.m_imultiset
.insert(m_header
.m_imultiset
.begin(), *block_to_insert
);
1296 else if(only_merge_next
){
1297 m_header
.m_imultiset
.replace_node(free_block_to_check_it
, *block_to_insert
);
1301 m_header
.m_imultiset
.insert(m_header
.m_imultiset
.begin(), *block_to_insert
);
1303 priv_mark_as_free_block(block_to_insert
);
1308 } //namespace interprocess {
1309 } //namespace boost {
1311 #include <boost/interprocess/detail/config_end.hpp>
1313 #endif //#ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP