1 //////////////////////////////////////////////////////////////////////////////
3 // (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
4 // Software License, Version 1.0. (See accompanying file
5 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
7 // See http://www.boost.org/libs/interprocess for documentation.
9 //////////////////////////////////////////////////////////////////////////////
11 #ifndef BOOST_INTERPROCESS_MEM_ALGO_DETAIL_SIMPLE_SEQ_FIT_IMPL_HPP
12 #define BOOST_INTERPROCESS_MEM_ALGO_DETAIL_SIMPLE_SEQ_FIT_IMPL_HPP
14 #if (defined _MSC_VER) && (_MSC_VER >= 1200)
18 #include <boost/interprocess/detail/config_begin.hpp>
19 #include <boost/interprocess/detail/workaround.hpp>
21 #include <boost/pointer_to_other.hpp>
23 #include <boost/interprocess/interprocess_fwd.hpp>
24 #include <boost/interprocess/containers/allocation_type.hpp>
25 #include <boost/interprocess/offset_ptr.hpp>
26 #include <boost/interprocess/sync/interprocess_mutex.hpp>
27 #include <boost/interprocess/exceptions.hpp>
28 #include <boost/interprocess/detail/utilities.hpp>
29 #include <boost/interprocess/detail/min_max.hpp>
30 #include <boost/interprocess/detail/type_traits.hpp>
31 #include <boost/interprocess/sync/scoped_lock.hpp>
32 #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
40 //!Describes sequential fit algorithm used to allocate objects in shared memory.
41 //!This class is intended as a base class for single segment and multi-segment
45 namespace interprocess
{
48 //!This class implements the simple sequential fit algorithm with a simply
49 //!linked list of free buffers.
50 //!This class is intended as a base class for single segment and multi-segment
52 template<class MutexFamily
, class VoidPointer
>
53 class simple_seq_fit_impl
56 simple_seq_fit_impl();
57 simple_seq_fit_impl(const simple_seq_fit_impl
&);
58 simple_seq_fit_impl
&operator=(const simple_seq_fit_impl
&);
62 //!Shared interprocess_mutex family used for the rest of the Interprocess framework
63 typedef MutexFamily mutex_family
;
64 //!Pointer type to be used with the rest of the Interprocess framework
65 typedef VoidPointer void_pointer
;
66 typedef detail::basic_multiallocation_cached_slist
<void_pointer
> multialloc_cached
;
67 typedef detail::basic_multiallocation_cached_counted_slist
68 <multialloc_cached
> multiallocation_chain
;
72 typedef typename
boost::
73 pointer_to_other
<void_pointer
, block_ctrl
>::type block_ctrl_ptr
;
76 friend class block_ctrl
;
78 //!Block control structure
82 //!Offset pointer to the next block.
83 block_ctrl_ptr m_next
;
84 //!This block's memory size (including block_ctrl
85 //!header) in BasicSize units
88 std::size_t get_user_bytes() const
89 { return this->m_size
*Alignment
- BlockCtrlBytes
; }
91 std::size_t get_total_bytes() const
92 { return this->m_size
*Alignment
; }
95 //!Shared interprocess_mutex to protect memory allocate/deallocate
96 typedef typename
MutexFamily::mutex_type interprocess_mutex
;
98 //!This struct includes needed data and derives from
99 //!interprocess_mutex to allow EBO when using null interprocess_mutex
100 struct header_t
: public interprocess_mutex
102 //!Pointer to the first free block
104 //!Allocated bytes for internal checking
105 std::size_t m_allocated
;
106 //!The size of the memory segment
108 //!The extra size required by the segment
109 std::size_t m_extra_hdr_bytes
;
112 friend class detail::memory_algorithm_common
<simple_seq_fit_impl
>;
114 typedef detail::memory_algorithm_common
<simple_seq_fit_impl
> algo_impl_t
;
117 //!Constructor. "size" is the total size of the managed memory segment,
118 //!"extra_hdr_bytes" indicates the extra bytes beginning in the sizeof(simple_seq_fit_impl)
119 //!offset that the allocator should not use at all.
120 simple_seq_fit_impl (std::size_t size
, std::size_t extra_hdr_bytes
);
123 ~simple_seq_fit_impl();
125 //!Obtains the minimum size needed by the algorithm
126 static std::size_t get_min_size (std::size_t extra_hdr_bytes
);
128 //Functions for single segment management
130 //!Allocates bytes, returns 0 if there is not more memory
131 void* allocate (std::size_t nbytes
);
135 //!Multiple element allocation, same size
136 multiallocation_chain
137 allocate_many(std::size_t elem_bytes
, std::size_t num_elements
)
139 //-----------------------
140 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
141 //-----------------------
142 return algo_impl_t::allocate_many(this, elem_bytes
, num_elements
);
145 //!Multiple element allocation, different size
146 multiallocation_chain
147 allocate_many(const std::size_t *elem_sizes
, std::size_t n_elements
, std::size_t sizeof_element
)
149 //-----------------------
150 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
151 //-----------------------
152 return algo_impl_t::allocate_many(this, elem_sizes
, n_elements
, sizeof_element
);
155 //!Multiple element deallocation
156 void deallocate_many(multiallocation_chain chain
);
160 //!Deallocates previously allocated bytes
161 void deallocate (void *addr
);
163 //!Returns the size of the memory segment
164 std::size_t get_size() const;
166 //!Returns the number of free bytes of the memory segment
167 std::size_t get_free_memory() const;
169 //!Increases managed memory in extra_size bytes more
170 void grow(std::size_t extra_size
);
172 //!Decreases managed memory as much as possible
173 void shrink_to_fit();
175 //!Returns true if all allocated memory has been deallocated
176 bool all_memory_deallocated();
178 //!Makes an internal sanity check and returns true if success
181 //!Initializes to zero all the memory that's not in use.
182 //!This function is normally used for security reasons.
183 void zero_free_memory();
187 allocation_command (boost::interprocess::allocation_type command
, std::size_t limit_size
,
188 std::size_t preferred_size
,std::size_t &received_size
,
191 std::pair
<void *, bool>
192 raw_allocation_command (boost::interprocess::allocation_type command
, std::size_t limit_size
,
193 std::size_t preferred_size
,std::size_t &received_size
,
194 void *reuse_ptr
= 0, std::size_t sizeof_object
= 1);
196 //!Returns the size of the buffer previously allocated pointed by ptr
197 std::size_t size(const void *ptr
) const;
199 //!Allocates aligned bytes, returns 0 if there is not more memory.
200 //!Alignment must be power of 2
201 void* allocate_aligned (std::size_t nbytes
, std::size_t alignment
);
205 //!Obtains the pointer returned to the user from the block control
206 static void *priv_get_user_buffer(const block_ctrl
*block
);
208 //!Obtains the block control structure of the user buffer
209 static block_ctrl
*priv_get_block(const void *ptr
);
211 //!Real allocation algorithm with min allocation option
212 std::pair
<void *, bool> priv_allocate(boost::interprocess::allocation_type command
213 ,std::size_t min_size
214 ,std::size_t preferred_size
215 ,std::size_t &received_size
216 ,void *reuse_ptr
= 0);
218 std::pair
<void *, bool> priv_allocation_command(boost::interprocess::allocation_type command
219 ,std::size_t min_size
220 ,std::size_t preferred_size
221 ,std::size_t &received_size
223 ,std::size_t sizeof_object
);
225 //!Returns the number of total units that a user buffer
226 //!of "userbytes" bytes really occupies (including header)
227 static std::size_t priv_get_total_units(std::size_t userbytes
);
229 static std::size_t priv_first_block_offset(const void *this_ptr
, std::size_t extra_hdr_bytes
);
230 std::size_t priv_block_end_offset() const;
232 //!Returns next block if it's free.
233 //!Returns 0 if next block is not free.
234 block_ctrl
*priv_next_block_if_free(block_ctrl
*ptr
);
236 //!Check if this block is free (not allocated)
237 bool priv_is_allocated_block(block_ctrl
*ptr
);
239 //!Returns previous block's if it's free.
240 //!Returns 0 if previous block is not free.
241 std::pair
<block_ctrl
*, block_ctrl
*>priv_prev_block_if_free(block_ctrl
*ptr
);
243 //!Real expand function implementation
244 bool priv_expand(void *ptr
245 ,std::size_t min_size
, std::size_t preferred_size
246 ,std::size_t &received_size
);
248 //!Real expand to both sides implementation
249 void* priv_expand_both_sides(boost::interprocess::allocation_type command
250 ,std::size_t min_size
251 ,std::size_t preferred_size
252 ,std::size_t &received_size
254 ,bool only_preferred_backwards
);
256 //!Real private aligned allocation function
257 //void* priv_allocate_aligned (std::size_t nbytes, std::size_t alignment);
259 //!Checks if block has enough memory and splits/unlinks the block
260 //!returning the address to the users
261 void* priv_check_and_allocate(std::size_t units
264 ,std::size_t &received_size
);
265 //!Real deallocation algorithm
266 void priv_deallocate(void *addr
);
268 //!Makes a new memory portion available for allocation
269 void priv_add_segment(void *addr
, std::size_t size
);
271 void priv_mark_new_allocated_block(block_ctrl
*block
);
274 static const std::size_t Alignment
= detail::alignment_of
<detail::max_align
>::value
;
276 static const std::size_t BlockCtrlBytes
= detail::ct_rounded_size
<sizeof(block_ctrl
), Alignment
>::value
;
277 static const std::size_t BlockCtrlUnits
= BlockCtrlBytes
/Alignment
;
278 static const std::size_t MinBlockUnits
= BlockCtrlUnits
;
279 static const std::size_t MinBlockSize
= MinBlockUnits
*Alignment
;
280 static const std::size_t AllocatedCtrlBytes
= BlockCtrlBytes
;
281 static const std::size_t AllocatedCtrlUnits
= BlockCtrlUnits
;
282 static const std::size_t UsableByPreviousChunk
= 0;
285 static const std::size_t PayloadPerAllocation
= BlockCtrlBytes
;
288 template<class MutexFamily
, class VoidPointer
>
289 inline std::size_t simple_seq_fit_impl
<MutexFamily
, VoidPointer
>
290 ::priv_first_block_offset(const void *this_ptr
, std::size_t extra_hdr_bytes
)
292 //First align "this" pointer
293 std::size_t uint_this
= (std::size_t)this_ptr
;
294 std::size_t uint_aligned_this
= uint_this
/Alignment
*Alignment
;
295 std::size_t this_disalignment
= (uint_this
- uint_aligned_this
);
296 std::size_t block1_off
=
297 detail::get_rounded_size(sizeof(simple_seq_fit_impl
) + extra_hdr_bytes
+ this_disalignment
, Alignment
)
299 algo_impl_t::assert_alignment(this_disalignment
+ block1_off
);
303 template<class MutexFamily
, class VoidPointer
>
304 inline std::size_t simple_seq_fit_impl
<MutexFamily
, VoidPointer
>
305 ::priv_block_end_offset() const
307 //First align "this" pointer
308 std::size_t uint_this
= (std::size_t)this;
309 std::size_t uint_aligned_this
= uint_this
/Alignment
*Alignment
;
310 std::size_t this_disalignment
= (uint_this
- uint_aligned_this
);
311 std::size_t old_end
=
312 detail::get_truncated_size(m_header
.m_size
+ this_disalignment
, Alignment
)
314 algo_impl_t::assert_alignment(old_end
+ this_disalignment
);
318 template<class MutexFamily
, class VoidPointer
>
319 inline simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::
320 simple_seq_fit_impl(std::size_t size
, std::size_t extra_hdr_bytes
)
322 //Initialize sizes and counters
323 m_header
.m_allocated
= 0;
324 m_header
.m_size
= size
;
325 m_header
.m_extra_hdr_bytes
= extra_hdr_bytes
;
327 //Initialize pointers
328 std::size_t block1_off
= priv_first_block_offset(this, extra_hdr_bytes
);
330 m_header
.m_root
.m_next
= reinterpret_cast<block_ctrl
*>
331 ((reinterpret_cast<char*>(this) + block1_off
));
332 algo_impl_t::assert_alignment(detail::get_pointer(m_header
.m_root
.m_next
));
333 m_header
.m_root
.m_next
->m_size
= (size
- block1_off
)/Alignment
;
334 m_header
.m_root
.m_next
->m_next
= &m_header
.m_root
;
337 template<class MutexFamily
, class VoidPointer
>
338 inline simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::~simple_seq_fit_impl()
340 //There is a memory leak!
341 // assert(m_header.m_allocated == 0);
342 // assert(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root));
345 template<class MutexFamily
, class VoidPointer
>
346 inline void simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::grow(std::size_t extra_size
)
348 //Old highest address block's end offset
349 std::size_t old_end
= this->priv_block_end_offset();
351 //Update managed buffer's size
352 m_header
.m_size
+= extra_size
;
354 //We need at least MinBlockSize blocks to create a new block
355 if((m_header
.m_size
- old_end
) < MinBlockSize
){
359 //We'll create a new free block with extra_size bytes
361 block_ctrl
*new_block
= reinterpret_cast<block_ctrl
*>
362 (reinterpret_cast<char*>(this) + old_end
);
364 algo_impl_t::assert_alignment(new_block
);
365 new_block
->m_next
= 0;
366 new_block
->m_size
= (m_header
.m_size
- old_end
)/Alignment
;
367 m_header
.m_allocated
+= new_block
->m_size
*Alignment
;
368 this->priv_deallocate(priv_get_user_buffer(new_block
));
371 template<class MutexFamily
, class VoidPointer
>
372 void simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::shrink_to_fit()
374 //Get the root and the first memory block
375 block_ctrl
*prev
= &m_header
.m_root
;
376 block_ctrl
*last
= &m_header
.m_root
;
377 block_ctrl
*block
= detail::get_pointer(last
->m_next
);
378 block_ctrl
*root
= &m_header
.m_root
;
381 if(block
== root
) return;
383 //Iterate through the free block list
384 while(block
!= root
){
387 block
= detail::get_pointer(block
->m_next
);
390 char *last_free_end_address
= reinterpret_cast<char*>(last
) + last
->m_size
*Alignment
;
391 if(last_free_end_address
!= (reinterpret_cast<char*>(this) + priv_block_end_offset())){
392 //there is an allocated block in the end of this block
393 //so no shrinking is possible
397 //Check if have only 1 big free block
398 void *unique_block
= 0;
399 if(!m_header
.m_allocated
){
400 assert(prev
== root
);
402 unique_block
= priv_allocate(boost::interprocess::allocate_new
, 0, 0, ignore
).first
;
405 last
= detail::get_pointer(m_header
.m_root
.m_next
);
406 assert(last_free_end_address
== (reinterpret_cast<char*>(last
) + last
->m_size
*Alignment
));
408 std::size_t last_units
= last
->m_size
;
410 std::size_t received_size
;
411 void *addr
= priv_check_and_allocate(last_units
, prev
, last
, received_size
);
414 assert(received_size
== last_units
*Alignment
- AllocatedCtrlBytes
);
417 m_header
.m_size
/= Alignment
;
418 m_header
.m_size
-= last
->m_size
;
419 m_header
.m_size
*= Alignment
;
420 m_header
.m_allocated
-= last
->m_size
*Alignment
;
423 priv_deallocate(unique_block
);
426 template<class MutexFamily
, class VoidPointer
>
427 inline void simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::
428 priv_mark_new_allocated_block(block_ctrl
*new_block
)
430 new_block
->m_next
= 0;
433 template<class MutexFamily
, class VoidPointer
>
435 typename simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::block_ctrl
*
436 simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::priv_get_block(const void *ptr
)
438 return const_cast<block_ctrl
*>(reinterpret_cast<const block_ctrl
*>
439 (reinterpret_cast<const char*>(ptr
) - AllocatedCtrlBytes
));
442 template<class MutexFamily
, class VoidPointer
>
444 void *simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::
445 priv_get_user_buffer(const typename simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::block_ctrl
*block
)
447 return const_cast<char*>(reinterpret_cast<const char*>(block
) + AllocatedCtrlBytes
);
450 template<class MutexFamily
, class VoidPointer
>
451 inline void simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::priv_add_segment(void *addr
, std::size_t size
)
453 algo_impl_t::assert_alignment(addr
);
455 assert(!(size
< MinBlockSize
));
456 if(size
< MinBlockSize
)
458 //Construct big block using the new segment
459 block_ctrl
*new_block
= static_cast<block_ctrl
*>(addr
);
460 new_block
->m_size
= size
/Alignment
;
461 new_block
->m_next
= 0;
462 //Simulate this block was previously allocated
463 m_header
.m_allocated
+= new_block
->m_size
*Alignment
;
464 //Return block and insert it in the free block list
465 this->priv_deallocate(priv_get_user_buffer(new_block
));
468 template<class MutexFamily
, class VoidPointer
>
469 inline std::size_t simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::get_size() const
470 { return m_header
.m_size
; }
472 template<class MutexFamily
, class VoidPointer
>
473 inline std::size_t simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::get_free_memory() const
475 return m_header
.m_size
- m_header
.m_allocated
-
476 algo_impl_t::multiple_of_units(sizeof(*this) + m_header
.m_extra_hdr_bytes
);
479 template<class MutexFamily
, class VoidPointer
>
480 inline std::size_t simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::
481 get_min_size (std::size_t extra_hdr_bytes
)
483 return detail::get_rounded_size(sizeof(simple_seq_fit_impl
),Alignment
) +
484 detail::get_rounded_size(extra_hdr_bytes
,Alignment
)
488 template<class MutexFamily
, class VoidPointer
>
489 inline bool simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::
490 all_memory_deallocated()
492 //-----------------------
493 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
494 //-----------------------
495 return m_header
.m_allocated
== 0 &&
496 detail::get_pointer(m_header
.m_root
.m_next
->m_next
) == &m_header
.m_root
;
499 template<class MutexFamily
, class VoidPointer
>
500 inline void simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::zero_free_memory()
502 //-----------------------
503 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
504 //-----------------------
505 block_ctrl
*block
= detail::get_pointer(m_header
.m_root
.m_next
);
507 //Iterate through all free portions
509 //Just clear user the memory part reserved for the user
510 std::memset( priv_get_user_buffer(block
)
512 , block
->get_user_bytes());
513 block
= detail::get_pointer(block
->m_next
);
515 while(block
!= &m_header
.m_root
);
518 template<class MutexFamily
, class VoidPointer
>
519 inline bool simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::
522 //-----------------------
523 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
524 //-----------------------
525 block_ctrl
*block
= detail::get_pointer(m_header
.m_root
.m_next
);
527 std::size_t free_memory
= 0;
529 //Iterate through all blocks obtaining their size
530 while(block
!= &m_header
.m_root
){
531 algo_impl_t::assert_alignment(block
);
532 if(!algo_impl_t::check_alignment(block
))
534 //Free blocks's next must be always valid
535 block_ctrl
*next
= detail::get_pointer(block
->m_next
);
539 free_memory
+= block
->m_size
*Alignment
;
543 //Check allocated bytes are less than size
544 if(m_header
.m_allocated
> m_header
.m_size
){
548 //Check free bytes are less than size
549 if(free_memory
> m_header
.m_size
){
555 template<class MutexFamily
, class VoidPointer
>
556 inline void* simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::
557 allocate(std::size_t nbytes
)
559 //-----------------------
560 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
561 //-----------------------
563 return priv_allocate(boost::interprocess::allocate_new
, nbytes
, nbytes
, ignore
).first
;
566 template<class MutexFamily
, class VoidPointer
>
567 inline void* simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::
568 allocate_aligned(std::size_t nbytes
, std::size_t alignment
)
570 //-----------------------
571 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
572 //-----------------------
574 allocate_aligned(this, nbytes
, alignment
);
577 template<class MutexFamily
, class VoidPointer
>
579 inline std::pair
<T
*, bool> simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::
580 allocation_command (boost::interprocess::allocation_type command
, std::size_t limit_size
,
581 std::size_t preferred_size
,std::size_t &received_size
,
584 std::pair
<void*, bool> ret
= priv_allocation_command
585 (command
, limit_size
, preferred_size
, received_size
, static_cast<void*>(reuse_ptr
), sizeof(T
));
587 BOOST_ASSERT(0 == ((std::size_t)ret
.first
% detail::alignment_of
<T
>::value
));
588 return std::pair
<T
*, bool>(static_cast<T
*>(ret
.first
), ret
.second
);
591 template<class MutexFamily
, class VoidPointer
>
592 inline std::pair
<void*, bool> simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::
593 raw_allocation_command (boost::interprocess::allocation_type command
, std::size_t limit_objects
,
594 std::size_t preferred_objects
,std::size_t &received_objects
,
595 void *reuse_ptr
, std::size_t sizeof_object
)
598 return std::pair
<void *, bool>(static_cast<void*>(0), 0);
599 if(command
& boost::interprocess::try_shrink_in_place
){
600 bool success
= algo_impl_t::try_shrink
601 ( this, reuse_ptr
, limit_objects
*sizeof_object
602 , preferred_objects
*sizeof_object
, received_objects
);
603 received_objects
/= sizeof_object
;
604 return std::pair
<void *, bool> ((success
? reuse_ptr
: 0), true);
606 return priv_allocation_command
607 (command
, limit_objects
, preferred_objects
, received_objects
, reuse_ptr
, sizeof_object
);
610 template<class MutexFamily
, class VoidPointer
>
611 inline std::pair
<void*, bool> simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::
612 priv_allocation_command (boost::interprocess::allocation_type command
, std::size_t limit_size
,
613 std::size_t preferred_size
, std::size_t &received_size
,
614 void *reuse_ptr
, std::size_t sizeof_object
)
616 command
&= ~boost::interprocess::expand_bwd
;
617 if(!command
) return std::pair
<void *, bool>(static_cast<void*>(0), false);
619 std::pair
<void*, bool> ret
;
620 std::size_t max_count
= m_header
.m_size
/sizeof_object
;
621 if(limit_size
> max_count
|| preferred_size
> max_count
){
622 ret
.first
= 0; return ret
;
624 std::size_t l_size
= limit_size
*sizeof_object
;
625 std::size_t p_size
= preferred_size
*sizeof_object
;
628 //-----------------------
629 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
630 //-----------------------
631 ret
= priv_allocate(command
, l_size
, p_size
, r_size
, reuse_ptr
);
633 received_size
= r_size
/sizeof_object
;
637 template<class MutexFamily
, class VoidPointer
>
638 inline std::size_t simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::
639 size(const void *ptr
) const
641 //We need no synchronization since this block is not going
643 //Obtain the real size of the block
644 const block_ctrl
*block
= static_cast<const block_ctrl
*>(priv_get_block(ptr
));
645 return block
->get_user_bytes();
648 template<class MutexFamily
, class VoidPointer
>
649 void* simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::
650 priv_expand_both_sides(boost::interprocess::allocation_type command
651 ,std::size_t min_size
652 ,std::size_t preferred_size
653 ,std::size_t &received_size
655 ,bool only_preferred_backwards
)
657 typedef std::pair
<block_ctrl
*, block_ctrl
*> prev_block_t
;
658 block_ctrl
*reuse
= priv_get_block(reuse_ptr
);
661 if(this->size(reuse_ptr
) > min_size
){
662 received_size
= this->size(reuse_ptr
);
666 if(command
& boost::interprocess::expand_fwd
){
667 if(priv_expand(reuse_ptr
, min_size
, preferred_size
, received_size
))
671 received_size
= this->size(reuse_ptr
);
673 if(command
& boost::interprocess::expand_bwd
){
674 std::size_t extra_forward
= !received_size
? 0 : received_size
+ BlockCtrlBytes
;
675 prev_block_t prev_pair
= priv_prev_block_if_free(reuse
);
676 block_ctrl
*prev
= prev_pair
.second
;
681 std::size_t needs_backwards
=
682 detail::get_rounded_size(preferred_size
- extra_forward
, Alignment
);
684 if(!only_preferred_backwards
){
685 max_value(detail::get_rounded_size(min_size
- extra_forward
, Alignment
)
686 ,min_value(prev
->get_user_bytes(), needs_backwards
));
689 //Check if previous block has enough size
690 if((prev
->get_user_bytes()) >= needs_backwards
){
691 //Now take all next space. This will succeed
692 if(!priv_expand(reuse_ptr
, received_size
, received_size
, received_size
)){
696 //We need a minimum size to split the previous one
697 if((prev
->get_user_bytes() - needs_backwards
) > 2*BlockCtrlBytes
){
698 block_ctrl
*new_block
= reinterpret_cast<block_ctrl
*>
699 (reinterpret_cast<char*>(reuse
) - needs_backwards
- BlockCtrlBytes
);
701 new_block
->m_next
= 0;
703 BlockCtrlUnits
+ (needs_backwards
+ extra_forward
)/Alignment
;
705 (prev
->get_total_bytes() - needs_backwards
)/Alignment
- BlockCtrlUnits
;
706 received_size
= needs_backwards
+ extra_forward
;
707 m_header
.m_allocated
+= needs_backwards
+ BlockCtrlBytes
;
708 return priv_get_user_buffer(new_block
);
711 //Just merge the whole previous block
712 block_ctrl
*prev_2_block
= prev_pair
.first
;
713 //Update received size and allocation
714 received_size
= extra_forward
+ prev
->get_user_bytes();
715 m_header
.m_allocated
+= prev
->get_total_bytes();
716 //Now unlink it from previous block
717 prev_2_block
->m_next
= prev
->m_next
;
718 prev
->m_size
= reuse
->m_size
+ prev
->m_size
;
720 priv_get_user_buffer(prev
);
727 template<class MutexFamily
, class VoidPointer
>
728 inline void simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::
729 deallocate_many(typename simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::multiallocation_chain chain
)
731 //-----------------------
732 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
733 //-----------------------
734 while(!chain
.empty()){
735 void *addr
= chain
.front();
737 this->priv_deallocate(addr
);
741 template<class MutexFamily
, class VoidPointer
>
742 inline std::size_t simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::
743 priv_get_total_units(std::size_t userbytes
)
745 std::size_t s
= detail::get_rounded_size(userbytes
, Alignment
)/Alignment
;
747 return BlockCtrlUnits
+ s
;
750 template<class MutexFamily
, class VoidPointer
>
751 std::pair
<void *, bool> simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::
752 priv_allocate(boost::interprocess::allocation_type command
753 ,std::size_t limit_size
754 ,std::size_t preferred_size
755 ,std::size_t &received_size
758 if(command
& boost::interprocess::shrink_in_place
){
760 algo_impl_t::shrink(this, reuse_ptr
, limit_size
, preferred_size
, received_size
);
761 return std::pair
<void *, bool> ((success
? reuse_ptr
: 0), true);
763 typedef std::pair
<void *, bool> return_type
;
766 if(limit_size
> preferred_size
)
767 return return_type(static_cast<void*>(0), false);
769 //Number of units to request (including block_ctrl header)
770 std::size_t nunits
= detail::get_rounded_size(preferred_size
, Alignment
)/Alignment
+ BlockCtrlUnits
;
772 //Get the root and the first memory block
773 block_ctrl
*prev
= &m_header
.m_root
;
774 block_ctrl
*block
= detail::get_pointer(prev
->m_next
);
775 block_ctrl
*root
= &m_header
.m_root
;
776 block_ctrl
*biggest_block
= 0;
777 block_ctrl
*prev_biggest_block
= 0;
778 std::size_t biggest_size
= 0;
781 //reuse_ptr, limit_size, preferred_size, received_size
783 if(reuse_ptr
&& (command
& (boost::interprocess::expand_fwd
| boost::interprocess::expand_bwd
))){
784 void *ret
= priv_expand_both_sides
785 (command
, limit_size
, preferred_size
, received_size
, reuse_ptr
, true);
787 algo_impl_t::assert_alignment(ret
);
788 return return_type(ret
, true);
792 if(command
& boost::interprocess::allocate_new
){
794 while(block
!= root
){
795 //Update biggest block pointers
796 if(block
->m_size
> biggest_size
){
797 prev_biggest_block
= prev
;
798 biggest_size
= block
->m_size
;
799 biggest_block
= block
;
801 algo_impl_t::assert_alignment(block
);
802 void *addr
= this->priv_check_and_allocate(nunits
, prev
, block
, received_size
);
804 algo_impl_t::assert_alignment(addr
);
805 return return_type(addr
, false);
807 //Bad luck, let's check next block
809 block
= detail::get_pointer(block
->m_next
);
812 //Bad luck finding preferred_size, now if we have any biggest_block
813 //try with this block
815 std::size_t limit_units
= detail::get_rounded_size(limit_size
, Alignment
)/Alignment
+ BlockCtrlUnits
;
816 if(biggest_block
->m_size
< limit_units
)
817 return return_type(static_cast<void*>(0), false);
819 received_size
= biggest_block
->m_size
*Alignment
- BlockCtrlUnits
;
820 void *ret
= this->priv_check_and_allocate
821 (biggest_block
->m_size
, prev_biggest_block
, biggest_block
, received_size
);
823 algo_impl_t::assert_alignment(ret
);
824 return return_type(ret
, false);
827 //Now try to expand both sides with min size
828 if(reuse_ptr
&& (command
& (boost::interprocess::expand_fwd
| boost::interprocess::expand_bwd
))){
829 return_type
ret (priv_expand_both_sides
830 (command
, limit_size
, preferred_size
, received_size
, reuse_ptr
, false), true);
831 algo_impl_t::assert_alignment(ret
.first
);
834 return return_type(static_cast<void*>(0), false);
837 template<class MutexFamily
, class VoidPointer
> inline
838 bool simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::priv_is_allocated_block
839 (typename simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::block_ctrl
*block
)
840 { return block
->m_next
== 0; }
842 template<class MutexFamily
, class VoidPointer
>
843 inline typename simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::block_ctrl
*
844 simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::
845 priv_next_block_if_free
846 (typename simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::block_ctrl
*ptr
)
848 //Take the address where the next block should go
849 block_ctrl
*next_block
= reinterpret_cast<block_ctrl
*>
850 (reinterpret_cast<char*>(ptr
) + ptr
->m_size
*Alignment
);
852 //Check if the adjacent block is in the managed segment
853 char *this_char_ptr
= reinterpret_cast<char*>(this);
854 char *next_char_ptr
= reinterpret_cast<char*>(next_block
);
855 std::size_t distance
= (next_char_ptr
- this_char_ptr
)/Alignment
;
857 if(distance
>= (m_header
.m_size
/Alignment
)){
858 //"next_block" does not exist so we can't expand "block"
862 if(!next_block
->m_next
)
868 template<class MutexFamily
, class VoidPointer
>
870 std::pair
<typename simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::block_ctrl
*
871 ,typename simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::block_ctrl
*>
872 simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::
873 priv_prev_block_if_free
874 (typename simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::block_ctrl
*ptr
)
876 typedef std::pair
<block_ctrl
*, block_ctrl
*> prev_pair_t
;
877 //Take the address where the previous block should go
878 block_ctrl
*root
= &m_header
.m_root
;
879 block_ctrl
*prev_2_block
= root
;
880 block_ctrl
*prev_block
= detail::get_pointer(root
->m_next
);
882 while((reinterpret_cast<char*>(prev_block
) + prev_block
->m_size
*Alignment
)
883 != reinterpret_cast<char*>(ptr
)
884 && prev_block
!= root
){
885 prev_2_block
= prev_block
;
886 prev_block
= detail::get_pointer(prev_block
->m_next
);
889 if(prev_block
== root
|| !prev_block
->m_next
)
890 return prev_pair_t(static_cast<block_ctrl
*>(0), static_cast<block_ctrl
*>(0));
892 //Check if the previous block is in the managed segment
893 char *this_char_ptr
= reinterpret_cast<char*>(this);
894 char *prev_char_ptr
= reinterpret_cast<char*>(prev_block
);
895 std::size_t distance
= (prev_char_ptr
- this_char_ptr
)/Alignment
;
897 if(distance
>= (m_header
.m_size
/Alignment
)){
898 //"previous_block" does not exist so we can't expand "block"
899 return prev_pair_t(static_cast<block_ctrl
*>(0), static_cast<block_ctrl
*>(0));
901 return prev_pair_t(prev_2_block
, prev_block
);
905 template<class MutexFamily
, class VoidPointer
>
906 inline bool simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::
907 priv_expand (void *ptr
908 ,std::size_t min_size
909 ,std::size_t preferred_size
910 ,std::size_t &received_size
)
912 //Obtain the real size of the block
913 block_ctrl
*block
= reinterpret_cast<block_ctrl
*>(priv_get_block(ptr
));
914 std::size_t old_block_size
= block
->m_size
;
916 //All used blocks' next is marked with 0 so check it
917 assert(block
->m_next
== 0);
919 //Put this to a safe value
920 received_size
= old_block_size
*Alignment
- BlockCtrlBytes
;
922 //Now translate it to Alignment units
923 min_size
= detail::get_rounded_size(min_size
, Alignment
)/Alignment
;
924 preferred_size
= detail::get_rounded_size(preferred_size
, Alignment
)/Alignment
;
926 //Some parameter checks
927 if(min_size
> preferred_size
)
930 std::size_t data_size
= old_block_size
- BlockCtrlUnits
;
932 if(data_size
>= min_size
)
935 block_ctrl
*next_block
= priv_next_block_if_free(block
);
940 //Is "block" + "next_block" big enough?
941 std::size_t merged_size
= old_block_size
+ next_block
->m_size
;
943 //Now we can expand this block further than before
944 received_size
= merged_size
*Alignment
- BlockCtrlBytes
;
946 if(merged_size
< (min_size
+ BlockCtrlUnits
)){
950 //We can fill expand. Merge both blocks,
951 block
->m_next
= next_block
->m_next
;
952 block
->m_size
= merged_size
;
954 //Find the previous free block of next_block
955 block_ctrl
*prev
= &m_header
.m_root
;
956 while(detail::get_pointer(prev
->m_next
) != next_block
){
957 prev
= detail::get_pointer(prev
->m_next
);
960 //Now insert merged block in the free list
961 //This allows reusing allocation logic in this function
962 m_header
.m_allocated
-= old_block_size
*Alignment
;
963 prev
->m_next
= block
;
965 //Now use check and allocate to do the allocation logic
966 preferred_size
+= BlockCtrlUnits
;
967 std::size_t nunits
= preferred_size
< merged_size
? preferred_size
: merged_size
;
969 //This must success since nunits is less than merged_size!
970 if(!this->priv_check_and_allocate (nunits
, prev
, block
, received_size
)){
971 //Something very ugly is happening here. This is a bug
972 //or there is memory corruption
979 template<class MutexFamily
, class VoidPointer
> inline
980 void* simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::priv_check_and_allocate
982 ,typename simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::block_ctrl
* prev
983 ,typename simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::block_ctrl
* block
984 ,std::size_t &received_size
)
986 std::size_t upper_nunits
= nunits
+ BlockCtrlUnits
;
989 if (block
->m_size
> upper_nunits
){
990 //This block is bigger than needed, split it in
991 //two blocks, the first's size will be "units"
992 //the second's size will be "block->m_size-units"
993 std::size_t total_size
= block
->m_size
;
994 block
->m_size
= nunits
;
996 block_ctrl
*new_block
= reinterpret_cast<block_ctrl
*>
997 (reinterpret_cast<char*>(block
) + Alignment
*nunits
);
998 new_block
->m_size
= total_size
- nunits
;
999 new_block
->m_next
= block
->m_next
;
1000 prev
->m_next
= new_block
;
1003 else if (block
->m_size
>= nunits
){
1004 //This block has exactly the right size with an extra
1005 //unusable extra bytes.
1006 prev
->m_next
= block
->m_next
;
1011 //We need block_ctrl for deallocation stuff, so
1012 //return memory user can overwrite
1013 m_header
.m_allocated
+= block
->m_size
*Alignment
;
1014 received_size
= block
->get_user_bytes();
1015 //Mark the block as allocated
1018 algo_impl_t::assert_alignment(block
);
1019 return priv_get_user_buffer(block
);
1024 template<class MutexFamily
, class VoidPointer
>
1025 void simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::deallocate(void* addr
)
1028 //-----------------------
1029 boost::interprocess::scoped_lock
<interprocess_mutex
> guard(m_header
);
1030 //-----------------------
1031 return this->priv_deallocate(addr
);
1034 template<class MutexFamily
, class VoidPointer
>
1035 void simple_seq_fit_impl
<MutexFamily
, VoidPointer
>::priv_deallocate(void* addr
)
1039 //Let's get free block list. List is always sorted
1040 //by memory address to allow block merging.
1041 //Pointer next always points to the first
1042 //(lower address) block
1043 block_ctrl
* prev
= &m_header
.m_root
;
1044 block_ctrl
* pos
= detail::get_pointer(m_header
.m_root
.m_next
);
1045 block_ctrl
* block
= reinterpret_cast<block_ctrl
*>(priv_get_block(addr
));
1047 //All used blocks' next is marked with 0 so check it
1048 assert(block
->m_next
== 0);
1050 //Check if alignment and block size are right
1051 algo_impl_t::assert_alignment(addr
);
1053 std::size_t total_size
= Alignment
*block
->m_size
;
1054 assert(m_header
.m_allocated
>= total_size
);
1056 //Update used memory count
1057 m_header
.m_allocated
-= total_size
;
1059 //Let's find the previous and the next block of the block to deallocate
1060 //This ordering comparison must be done with original pointers
1061 //types since their mapping to raw pointers can be different
1063 while((detail::get_pointer(pos
) != &m_header
.m_root
) && (block
> pos
)){
1065 pos
= detail::get_pointer(pos
->m_next
);
1068 //Try to combine with upper block
1069 char *block_char_ptr
= reinterpret_cast<char*>(detail::get_pointer(block
));
1071 if ((block_char_ptr
+ Alignment
*block
->m_size
) ==
1072 reinterpret_cast<char*>(detail::get_pointer(pos
))){
1073 block
->m_size
+= pos
->m_size
;
1074 block
->m_next
= pos
->m_next
;
1077 block
->m_next
= pos
;
1080 //Try to combine with lower block
1081 if ((reinterpret_cast<char*>(detail::get_pointer(prev
))
1082 + Alignment
*prev
->m_size
) ==
1086 prev
->m_size
+= block
->m_size
;
1087 prev
->m_next
= block
->m_next
;
1090 prev
->m_next
= block
;
1094 } //namespace detail {
1096 } //namespace interprocess {
1098 } //namespace boost {
1100 #include <boost/interprocess/detail/config_end.hpp>
1102 #endif //#ifndef BOOST_INTERPROCESS_MEM_ALGO_DETAIL_SIMPLE_SEQ_FIT_IMPL_HPP