fix doc example typo
[boost.git] / boost / interprocess / mem_algo / detail / mem_algo_common.hpp
blobdec5300e5751781ff6a6fc7a3f670dc3ad05392e
1 //////////////////////////////////////////////////////////////////////////////
2 //
3 // (C) Copyright Ion Gaztanaga 2005-2008. Distributed under the Boost
4 // Software License, Version 1.0. (See accompanying file
5 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6 //
7 // See http://www.boost.org/libs/interprocess for documentation.
8 //
9 //////////////////////////////////////////////////////////////////////////////
11 #ifndef BOOST_INTERPROCESS_DETAIL_MEM_ALGO_COMMON_HPP
12 #define BOOST_INTERPROCESS_DETAIL_MEM_ALGO_COMMON_HPP
14 #if (defined _MSC_VER) && (_MSC_VER >= 1200)
15 # pragma once
16 #endif
18 #include <boost/interprocess/detail/config_begin.hpp>
19 #include <boost/interprocess/detail/workaround.hpp>
21 #include <boost/interprocess/interprocess_fwd.hpp>
22 #include <boost/interprocess/containers/allocation_type.hpp>
23 #include <boost/interprocess/detail/utilities.hpp>
24 #include <boost/interprocess/detail/type_traits.hpp>
25 #include <boost/interprocess/detail/math_functions.hpp>
26 #include <boost/interprocess/detail/utilities.hpp>
27 #include <boost/interprocess/detail/move.hpp>
28 #include <boost/assert.hpp>
29 #include <boost/static_assert.hpp>
30 #include <algorithm>
31 #include <utility>
32 #include <iterator>
34 //!\file
35 //!Implements common operations for memory algorithms.
37 namespace boost {
38 namespace interprocess {
39 namespace detail {
41 template<class VoidPointer>
42 class basic_multiallocation_slist
44 public:
45 typedef VoidPointer void_pointer;
47 private:
48 static VoidPointer &priv_get_ref(const VoidPointer &p)
49 { return *static_cast<void_pointer*>(detail::get_pointer(p)); }
51 basic_multiallocation_slist(basic_multiallocation_slist &);
52 basic_multiallocation_slist &operator=(basic_multiallocation_slist &);
54 public:
55 BOOST_INTERPROCESS_ENABLE_MOVE_EMULATION(basic_multiallocation_slist)
57 //!This iterator is returned by "allocate_many" functions so that
58 //!the user can access the multiple buffers allocated in a single call
59 class iterator
60 : public std::iterator<std::input_iterator_tag, char>
62 friend class basic_multiallocation_slist<void_pointer>;
63 void unspecified_bool_type_func() const {}
64 typedef void (iterator::*unspecified_bool_type)() const;
66 iterator(void_pointer node_range)
67 : next_node_(node_range)
70 public:
71 typedef char value_type;
72 typedef value_type & reference;
73 typedef value_type * pointer;
75 iterator()
76 : next_node_(0)
79 iterator &operator=(const iterator &other)
80 { next_node_ = other.next_node_; return *this; }
82 public:
83 iterator& operator++()
85 next_node_ = *static_cast<void_pointer*>(detail::get_pointer(next_node_));
86 return *this;
89 iterator operator++(int)
91 iterator result(*this);
92 ++*this;
93 return result;
96 bool operator== (const iterator& other) const
97 { return next_node_ == other.next_node_; }
99 bool operator!= (const iterator& other) const
100 { return !operator== (other); }
102 reference operator*() const
103 { return *static_cast<char*>(detail::get_pointer(next_node_)); }
105 operator unspecified_bool_type() const
106 { return next_node_? &iterator::unspecified_bool_type_func : 0; }
108 pointer operator->() const
109 { return &(*(*this)); }
111 private:
112 void_pointer next_node_;
115 private:
116 iterator it_;
118 public:
119 basic_multiallocation_slist()
120 : it_(iterator())
123 basic_multiallocation_slist(void_pointer p)
124 : it_(p ? iterator_to(p) : iterator())
127 basic_multiallocation_slist(BOOST_INTERPROCESS_RV_REF(basic_multiallocation_slist) other)
128 : it_(iterator())
129 { this->swap(other); }
131 basic_multiallocation_slist& operator=(BOOST_INTERPROCESS_RV_REF(basic_multiallocation_slist) other)
133 basic_multiallocation_slist tmp(boost::interprocess::move(other));
134 this->swap(tmp);
135 return *this;
138 bool empty() const
139 { return !it_; }
141 iterator before_begin() const
142 { return iterator(void_pointer(const_cast<void*>(static_cast<const void*>(&it_.next_node_)))); }
144 iterator begin() const
145 { return it_; }
147 iterator end() const
148 { return iterator(); }
150 void clear()
151 { this->it_.next_node_ = void_pointer(0); }
153 iterator insert_after(iterator it, void_pointer m)
155 priv_get_ref(m) = priv_get_ref(it.next_node_);
156 priv_get_ref(it.next_node_) = m;
157 return iterator(m);
160 void push_front(void_pointer m)
162 priv_get_ref(m) = this->it_.next_node_;
163 this->it_.next_node_ = m;
166 void pop_front()
167 { ++it_; }
169 void *front() const
170 { return detail::get_pointer(it_.next_node_); }
172 void splice_after(iterator after_this, iterator before_begin, iterator before_end)
174 if (after_this != before_begin && after_this != before_end && before_begin != before_end) {
175 void_pointer next_b = priv_get_ref(before_begin.next_node_);
176 void_pointer next_e = priv_get_ref(before_end.next_node_);
177 void_pointer next_p = priv_get_ref(after_this.next_node_);
178 priv_get_ref(before_begin.next_node_) = next_e;
179 priv_get_ref(before_end.next_node_) = next_p;
180 priv_get_ref(after_this.next_node_) = next_b;
184 void swap(basic_multiallocation_slist &other_chain)
186 std::swap(this->it_, other_chain.it_);
189 static iterator iterator_to(void_pointer p)
190 { return iterator(p); }
192 void_pointer extract_data()
194 void_pointer ret = empty() ? void_pointer(0) : void_pointer(&*it_);
195 it_ = iterator();
196 return ret;
200 template<class VoidPointer>
201 class basic_multiallocation_cached_slist
203 private:
204 basic_multiallocation_slist<VoidPointer> slist_;
205 typename basic_multiallocation_slist<VoidPointer>::iterator last_;
207 basic_multiallocation_cached_slist(basic_multiallocation_cached_slist &);
208 basic_multiallocation_cached_slist &operator=(basic_multiallocation_cached_slist &);
210 public:
211 BOOST_INTERPROCESS_ENABLE_MOVE_EMULATION(basic_multiallocation_cached_slist)
213 typedef typename basic_multiallocation_slist<VoidPointer>::void_pointer void_pointer;
214 typedef typename basic_multiallocation_slist<VoidPointer>::iterator iterator;
216 basic_multiallocation_cached_slist()
217 : slist_(), last_(slist_.before_begin())
220 basic_multiallocation_cached_slist(iterator first_node)
221 : slist_(first_node), last_(slist_.before_begin())
223 iterator end;
224 while(first_node != end){
225 ++last_;
229 basic_multiallocation_cached_slist(void_pointer p1, void_pointer p2)
230 : slist_(p1), last_(p2 ? iterator_to(p2) : slist_.before_begin())
233 basic_multiallocation_cached_slist(BOOST_INTERPROCESS_RV_REF(basic_multiallocation_cached_slist) other)
234 : slist_(), last_(slist_.before_begin())
235 { this->swap(other); }
237 basic_multiallocation_cached_slist& operator=(BOOST_INTERPROCESS_RV_REF(basic_multiallocation_cached_slist) other)
239 basic_multiallocation_cached_slist tmp(boost::interprocess::move(other));
240 this->swap(tmp);
241 return *this;
244 bool empty() const
245 { return slist_.empty(); }
247 iterator before_begin() const
248 { return slist_.before_begin(); }
250 iterator begin() const
251 { return slist_.begin(); }
253 iterator end() const
254 { return slist_.end(); }
256 iterator last() const
257 { return last_; }
259 void clear()
261 slist_.clear();
262 last_ = slist_.before_begin();
265 iterator insert_after(iterator it, void_pointer m)
267 slist_.insert_after(it, m);
268 if(it == last_){
269 last_ = slist_.iterator_to(m);
271 return iterator_to(m);
274 void push_front(void_pointer m)
275 { this->insert_after(this->before_begin(), m); }
277 void push_back(void_pointer m)
278 { this->insert_after(last_, m); }
280 void pop_front()
282 if(last_ == slist_.begin()){
283 last_ = slist_.before_begin();
285 slist_.pop_front();
288 void *front() const
289 { return slist_.front(); }
291 void splice_after(iterator after_this, iterator before_begin, iterator before_end)
293 if(before_begin == before_end)
294 return;
295 if(after_this == last_){
296 last_ = before_end;
298 slist_.splice_after(after_this, before_begin, before_end);
301 void swap(basic_multiallocation_cached_slist &x)
303 slist_.swap(x.slist_);
304 using std::swap;
305 swap(last_, x.last_);
306 if(last_ == x.before_begin()){
307 last_ = this->before_begin();
309 if(x.last_ == this->before_begin()){
310 x.last_ = x.before_begin();
314 static iterator iterator_to(void_pointer p)
315 { return basic_multiallocation_slist<VoidPointer>::iterator_to(p); }
317 std::pair<void_pointer, void_pointer> extract_data()
319 if(this->empty()){
320 return std::pair<void_pointer, void_pointer>(void_pointer(0), void_pointer(0));
322 else{
323 void_pointer p1 = slist_.extract_data();
324 void_pointer p2 = void_pointer(&*last_);
325 last_ = iterator();
326 return std::pair<void_pointer, void_pointer>(p1, p2);
331 template<class MultiallocatorCachedSlist>
332 class basic_multiallocation_cached_counted_slist
334 private:
335 MultiallocatorCachedSlist cached_slist_;
336 std::size_t size_;
338 basic_multiallocation_cached_counted_slist(basic_multiallocation_cached_counted_slist &);
339 basic_multiallocation_cached_counted_slist &operator=(basic_multiallocation_cached_counted_slist &);
341 public:
342 BOOST_INTERPROCESS_ENABLE_MOVE_EMULATION(basic_multiallocation_cached_counted_slist)
344 typedef typename MultiallocatorCachedSlist::void_pointer void_pointer;
345 typedef typename MultiallocatorCachedSlist::iterator iterator;
347 basic_multiallocation_cached_counted_slist()
348 : cached_slist_(), size_(0)
351 basic_multiallocation_cached_counted_slist(void_pointer p1, void_pointer p2, std::size_t n)
352 : cached_slist_(p1, p2), size_(n)
355 basic_multiallocation_cached_counted_slist(BOOST_INTERPROCESS_RV_REF(basic_multiallocation_cached_counted_slist) other)
356 : cached_slist_(), size_(0)
357 { this->swap(other); }
359 basic_multiallocation_cached_counted_slist& operator=(BOOST_INTERPROCESS_RV_REF(basic_multiallocation_cached_counted_slist) other)
361 basic_multiallocation_cached_counted_slist tmp(boost::interprocess::move(other));
362 this->swap(tmp);
363 return *this;
366 basic_multiallocation_cached_counted_slist (MultiallocatorCachedSlist mem, std::size_t n)
367 : cached_slist_(boost::interprocess::move(mem)), size_(n)
370 bool empty() const
371 { return cached_slist_.empty(); }
373 std::size_t size() const
374 { return size_; }
376 iterator before_begin() const
377 { return cached_slist_.before_begin(); }
379 iterator begin() const
380 { return cached_slist_.begin(); }
382 iterator end() const
383 { return cached_slist_.end(); }
385 iterator last() const
386 { return cached_slist_.last(); }
388 void clear()
390 cached_slist_.clear();
391 size_ = 0;
394 iterator insert_after(iterator it, void_pointer m)
396 iterator ret = cached_slist_.insert_after(it, m);
397 ++size_;
398 return ret;
401 void push_front(void_pointer m)
402 { this->insert_after(this->before_begin(), m); }
404 void push_back(void_pointer m)
405 { this->insert_after(this->before_begin(), m); }
407 void pop_front()
409 cached_slist_.pop_front();
410 --size_;
413 void *front() const
414 { return cached_slist_.front(); }
416 void splice_after(iterator after_this, basic_multiallocation_cached_counted_slist &x, iterator before_begin, iterator before_end)
418 std::size_t n = static_cast<std::size_t>(std::distance(before_begin, before_end));
419 this->splice_after(after_this, x, before_begin, before_end, n);
422 void splice_after(iterator after_this, basic_multiallocation_cached_counted_slist &x, iterator before_begin, iterator before_end, std::size_t n)
424 cached_slist_.splice_after(after_this, before_begin, before_end);
425 size_ += n;
426 x.size_ -= n;
429 void splice_after(iterator after_this, basic_multiallocation_cached_counted_slist &x)
431 cached_slist_.splice_after(after_this, x.before_begin(), x.last());
432 size_ += x.size_;
433 x.size_ = 0;
436 void swap(basic_multiallocation_cached_counted_slist &x)
438 cached_slist_.swap(x.cached_slist_);
439 using std::swap;
440 swap(size_, x.size_);
443 static iterator iterator_to(void_pointer p)
444 { return MultiallocatorCachedSlist::iterator_to(p); }
446 std::pair<void_pointer, void_pointer> extract_data()
448 size_ = 0;
449 return cached_slist_.extract_data();
453 template<class T>
454 struct cast_functor
456 typedef typename detail::add_reference<T>::type result_type;
457 result_type operator()(char &ptr) const
458 { return *static_cast<T*>(static_cast<void*>(&ptr)); }
462 template<class MultiallocationChain, class T>
463 class transform_multiallocation_chain
465 private:
467 MultiallocationChain holder_;
468 typedef typename MultiallocationChain::void_pointer void_pointer;
469 typedef typename boost::pointer_to_other
470 <void_pointer, T>::type pointer;
472 transform_multiallocation_chain(transform_multiallocation_chain &);
473 transform_multiallocation_chain &operator=(transform_multiallocation_chain &);
475 static pointer cast(void_pointer p)
477 return pointer(static_cast<T*>(detail::get_pointer(p)));
480 public:
481 BOOST_INTERPROCESS_ENABLE_MOVE_EMULATION(transform_multiallocation_chain)
483 typedef transform_iterator
484 < typename MultiallocationChain::iterator
485 , detail::cast_functor <T> > iterator;
487 transform_multiallocation_chain(void_pointer p1, void_pointer p2, std::size_t n)
488 : holder_(p1, p2, n)
491 transform_multiallocation_chain()
492 : holder_()
495 transform_multiallocation_chain(BOOST_INTERPROCESS_RV_REF(transform_multiallocation_chain) other)
496 : holder_()
497 { this->swap(other); }
499 transform_multiallocation_chain(BOOST_INTERPROCESS_RV_REF(MultiallocationChain) other)
500 : holder_(boost::interprocess::move(other))
503 transform_multiallocation_chain& operator=(BOOST_INTERPROCESS_RV_REF(transform_multiallocation_chain) other)
505 transform_multiallocation_chain tmp(boost::interprocess::move(other));
506 this->swap(tmp);
507 return *this;
510 void push_front(pointer mem)
511 { holder_.push_front(mem); }
513 void swap(transform_multiallocation_chain &other_chain)
514 { holder_.swap(other_chain.holder_); }
516 void splice_after(iterator after_this, iterator before_begin, iterator before_end)
517 { holder_.splice_after(after_this.base(), before_begin.base(), before_end.base()); }
519 void splice_after(iterator after_this, transform_multiallocation_chain &x, iterator before_begin, iterator before_end, std::size_t n)
520 { holder_.splice_after(after_this.base(), x.holder_, before_begin.base(), before_end.base(), n); }
522 void pop_front()
523 { holder_.pop_front(); }
525 pointer front() const
526 { return cast(holder_.front()); }
528 bool empty() const
529 { return holder_.empty(); }
531 iterator before_begin() const
532 { return iterator(holder_.before_begin()); }
534 iterator begin() const
535 { return iterator(holder_.begin()); }
537 iterator end() const
538 { return iterator(holder_.end()); }
540 iterator last() const
541 { return iterator(holder_.last()); }
543 std::size_t size() const
544 { return holder_.size(); }
546 void clear()
547 { holder_.clear(); }
549 iterator insert_after(iterator it, pointer m)
550 { return iterator(holder_.insert_after(it.base(), m)); }
552 static iterator iterator_to(pointer p)
553 { return iterator(MultiallocationChain::iterator_to(p)); }
555 std::pair<void_pointer, void_pointer> extract_data()
556 { return holder_.extract_data(); }
558 MultiallocationChain extract_multiallocation_chain()
560 return MultiallocationChain(boost::interprocess::move(holder_));
564 //!This class implements several allocation functions shared by different algorithms
565 //!(aligned allocation, multiple allocation...).
566 template<class MemoryAlgorithm>
567 class memory_algorithm_common
569 public:
570 typedef typename MemoryAlgorithm::void_pointer void_pointer;
571 typedef typename MemoryAlgorithm::block_ctrl block_ctrl;
572 typedef typename MemoryAlgorithm::multiallocation_chain multiallocation_chain;
573 typedef memory_algorithm_common<MemoryAlgorithm> this_type;
575 static const std::size_t Alignment = MemoryAlgorithm::Alignment;
576 static const std::size_t MinBlockUnits = MemoryAlgorithm::MinBlockUnits;
577 static const std::size_t AllocatedCtrlBytes = MemoryAlgorithm::AllocatedCtrlBytes;
578 static const std::size_t AllocatedCtrlUnits = MemoryAlgorithm::AllocatedCtrlUnits;
579 static const std::size_t BlockCtrlBytes = MemoryAlgorithm::BlockCtrlBytes;
580 static const std::size_t BlockCtrlUnits = MemoryAlgorithm::BlockCtrlUnits;
581 static const std::size_t UsableByPreviousChunk = MemoryAlgorithm::UsableByPreviousChunk;
583 static void assert_alignment(const void *ptr)
584 { assert_alignment((std::size_t)ptr); }
586 static void assert_alignment(std::size_t uint_ptr)
588 (void)uint_ptr;
589 BOOST_ASSERT(uint_ptr % Alignment == 0);
592 static bool check_alignment(const void *ptr)
593 { return (((std::size_t)ptr) % Alignment == 0); }
595 static std::size_t ceil_units(std::size_t size)
596 { return detail::get_rounded_size(size, Alignment)/Alignment; }
598 static std::size_t floor_units(std::size_t size)
599 { return size/Alignment; }
601 static std::size_t multiple_of_units(std::size_t size)
602 { return detail::get_rounded_size(size, Alignment); }
604 static multiallocation_chain allocate_many
605 (MemoryAlgorithm *memory_algo, std::size_t elem_bytes, std::size_t n_elements)
607 return this_type::priv_allocate_many(memory_algo, &elem_bytes, n_elements, 0);
610 static void deallocate_many(MemoryAlgorithm *memory_algo, multiallocation_chain chain)
612 return this_type::priv_deallocate_many(memory_algo, boost::interprocess::move(chain));
615 static bool calculate_lcm_and_needs_backwards_lcmed
616 (std::size_t backwards_multiple, std::size_t received_size, std::size_t size_to_achieve,
617 std::size_t &lcm_out, std::size_t &needs_backwards_lcmed_out)
619 // Now calculate lcm
620 std::size_t max = backwards_multiple;
621 std::size_t min = Alignment;
622 std::size_t needs_backwards;
623 std::size_t needs_backwards_lcmed;
624 std::size_t lcm;
625 std::size_t current_forward;
626 //Swap if necessary
627 if(max < min){
628 std::size_t tmp = min;
629 min = max;
630 max = tmp;
632 //Check if it's power of two
633 if((backwards_multiple & (backwards_multiple-1)) == 0){
634 if(0 != (size_to_achieve & ((backwards_multiple-1)))){
635 return false;
638 lcm = max;
639 //If we want to use minbytes data to get a buffer between maxbytes
640 //and minbytes if maxbytes can't be achieved, calculate the
641 //biggest of all possibilities
642 current_forward = detail::get_truncated_size_po2(received_size, backwards_multiple);
643 needs_backwards = size_to_achieve - current_forward;
644 assert((needs_backwards % backwards_multiple) == 0);
645 needs_backwards_lcmed = detail::get_rounded_size_po2(needs_backwards, lcm);
646 lcm_out = lcm;
647 needs_backwards_lcmed_out = needs_backwards_lcmed;
648 return true;
650 //Check if it's multiple of alignment
651 else if((backwards_multiple & (Alignment - 1u)) == 0){
652 lcm = backwards_multiple;
653 current_forward = detail::get_truncated_size(received_size, backwards_multiple);
654 //No need to round needs_backwards because backwards_multiple == lcm
655 needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
656 assert((needs_backwards_lcmed & (Alignment - 1u)) == 0);
657 lcm_out = lcm;
658 needs_backwards_lcmed_out = needs_backwards_lcmed;
659 return true;
661 //Check if it's multiple of the half of the alignmment
662 else if((backwards_multiple & ((Alignment/2u) - 1u)) == 0){
663 lcm = backwards_multiple*2u;
664 current_forward = detail::get_truncated_size(received_size, backwards_multiple);
665 needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
666 if(0 != (needs_backwards_lcmed & (Alignment-1)))
667 //while(0 != (needs_backwards_lcmed & (Alignment-1)))
668 needs_backwards_lcmed += backwards_multiple;
669 assert((needs_backwards_lcmed % lcm) == 0);
670 lcm_out = lcm;
671 needs_backwards_lcmed_out = needs_backwards_lcmed;
672 return true;
674 //Check if it's multiple of the half of the alignmment
675 else if((backwards_multiple & ((Alignment/4u) - 1u)) == 0){
676 std::size_t remainder;
677 lcm = backwards_multiple*4u;
678 current_forward = detail::get_truncated_size(received_size, backwards_multiple);
679 needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
680 //while(0 != (needs_backwards_lcmed & (Alignment-1)))
681 //needs_backwards_lcmed += backwards_multiple;
682 if(0 != (remainder = ((needs_backwards_lcmed & (Alignment-1))>>(Alignment/8u)))){
683 if(backwards_multiple & Alignment/2u){
684 needs_backwards_lcmed += (remainder)*backwards_multiple;
686 else{
687 needs_backwards_lcmed += (4-remainder)*backwards_multiple;
690 assert((needs_backwards_lcmed % lcm) == 0);
691 lcm_out = lcm;
692 needs_backwards_lcmed_out = needs_backwards_lcmed;
693 return true;
695 else{
696 lcm = detail::lcm(max, min);
698 //If we want to use minbytes data to get a buffer between maxbytes
699 //and minbytes if maxbytes can't be achieved, calculate the
700 //biggest of all possibilities
701 current_forward = detail::get_truncated_size(received_size, backwards_multiple);
702 needs_backwards = size_to_achieve - current_forward;
703 assert((needs_backwards % backwards_multiple) == 0);
704 needs_backwards_lcmed = detail::get_rounded_size(needs_backwards, lcm);
705 lcm_out = lcm;
706 needs_backwards_lcmed_out = needs_backwards_lcmed;
707 return true;
710 static multiallocation_chain allocate_many
711 ( MemoryAlgorithm *memory_algo
712 , const std::size_t *elem_sizes
713 , std::size_t n_elements
714 , std::size_t sizeof_element)
716 return this_type::priv_allocate_many(memory_algo, elem_sizes, n_elements, sizeof_element);
719 static void* allocate_aligned
720 (MemoryAlgorithm *memory_algo, std::size_t nbytes, std::size_t alignment)
723 //Ensure power of 2
724 if ((alignment & (alignment - std::size_t(1u))) != 0){
725 //Alignment is not power of two
726 BOOST_ASSERT((alignment & (alignment - std::size_t(1u))) == 0);
727 return 0;
730 std::size_t real_size;
731 if(alignment <= Alignment){
732 return memory_algo->priv_allocate
733 (boost::interprocess::allocate_new, nbytes, nbytes, real_size).first;
736 if(nbytes > UsableByPreviousChunk)
737 nbytes -= UsableByPreviousChunk;
739 //We can find a aligned portion if we allocate a block that has alignment
740 //nbytes + alignment bytes or more.
741 std::size_t minimum_allocation = max_value
742 (nbytes + alignment, std::size_t(MinBlockUnits*Alignment));
743 //Since we will split that block, we must request a bit more memory
744 //if the alignment is near the beginning of the buffer, because otherwise,
745 //there is no space for a new block before the alignment.
747 // ____ Aligned here
748 // |
749 // -----------------------------------------------------
750 // | MBU |
751 // -----------------------------------------------------
752 std::size_t request =
753 minimum_allocation + (2*MinBlockUnits*Alignment - AllocatedCtrlBytes
754 //prevsize - UsableByPreviousChunk
757 //Now allocate the buffer
758 void *buffer = memory_algo->priv_allocate
759 (boost::interprocess::allocate_new, request, request, real_size).first;
760 if(!buffer){
761 return 0;
763 else if ((((std::size_t)(buffer)) % alignment) == 0){
764 //If we are lucky and the buffer is aligned, just split it and
765 //return the high part
766 block_ctrl *first = memory_algo->priv_get_block(buffer);
767 std::size_t old_size = first->m_size;
768 const std::size_t first_min_units =
769 max_value(ceil_units(nbytes) + AllocatedCtrlUnits, std::size_t(MinBlockUnits));
770 //We can create a new block in the end of the segment
771 if(old_size >= (first_min_units + MinBlockUnits)){
772 block_ctrl *second = reinterpret_cast<block_ctrl *>
773 (reinterpret_cast<char*>(first) + Alignment*first_min_units);
774 first->m_size = first_min_units;
775 second->m_size = old_size - first->m_size;
776 BOOST_ASSERT(second->m_size >= MinBlockUnits);
777 memory_algo->priv_mark_new_allocated_block(first);
778 //memory_algo->priv_tail_size(first, first->m_size);
779 memory_algo->priv_mark_new_allocated_block(second);
780 memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(second));
782 return buffer;
785 //Buffer not aligned, find the aligned part.
787 // ____ Aligned here
788 // |
789 // -----------------------------------------------------
790 // | MBU +more | ACB |
791 // -----------------------------------------------------
792 char *pos = reinterpret_cast<char*>
793 (reinterpret_cast<std::size_t>(static_cast<char*>(buffer) +
794 //This is the minimum size of (2)
795 (MinBlockUnits*Alignment - AllocatedCtrlBytes) +
796 //This is the next MBU for the aligned memory
797 AllocatedCtrlBytes +
798 //This is the alignment trick
799 alignment - 1) & -alignment);
801 //Now obtain the address of the blocks
802 block_ctrl *first = memory_algo->priv_get_block(buffer);
803 block_ctrl *second = memory_algo->priv_get_block(pos);
804 assert(pos <= (reinterpret_cast<char*>(first) + first->m_size*Alignment));
805 assert(first->m_size >= 2*MinBlockUnits);
806 assert((pos + MinBlockUnits*Alignment - AllocatedCtrlBytes + nbytes*Alignment/Alignment) <=
807 (reinterpret_cast<char*>(first) + first->m_size*Alignment));
808 //Set the new size of the first block
809 std::size_t old_size = first->m_size;
810 first->m_size = (reinterpret_cast<char*>(second) - reinterpret_cast<char*>(first))/Alignment;
811 memory_algo->priv_mark_new_allocated_block(first);
813 //Now check if we can create a new buffer in the end
815 // __"second" block
816 // | __Aligned here
817 // | | __"third" block
818 // -----------|-----|-----|------------------------------
819 // | MBU +more | ACB | (3) | BCU |
820 // -----------------------------------------------------
821 //This size will be the minimum size to be able to create a
822 //new block in the end.
823 const std::size_t second_min_units = max_value(std::size_t(MinBlockUnits),
824 ceil_units(nbytes) + AllocatedCtrlUnits );
826 //Check if we can create a new block (of size MinBlockUnits) in the end of the segment
827 if((old_size - first->m_size) >= (second_min_units + MinBlockUnits)){
828 //Now obtain the address of the end block
829 block_ctrl *third = new (reinterpret_cast<char*>(second) + Alignment*second_min_units)block_ctrl;
830 second->m_size = second_min_units;
831 third->m_size = old_size - first->m_size - second->m_size;
832 BOOST_ASSERT(third->m_size >= MinBlockUnits);
833 memory_algo->priv_mark_new_allocated_block(second);
834 memory_algo->priv_mark_new_allocated_block(third);
835 memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(third));
837 else{
838 second->m_size = old_size - first->m_size;
839 assert(second->m_size >= MinBlockUnits);
840 memory_algo->priv_mark_new_allocated_block(second);
843 memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(first));
844 return memory_algo->priv_get_user_buffer(second);
847 static bool try_shrink
848 (MemoryAlgorithm *memory_algo, void *ptr
849 ,const std::size_t max_size, const std::size_t preferred_size
850 ,std::size_t &received_size)
852 (void)memory_algo;
853 //Obtain the real block
854 block_ctrl *block = memory_algo->priv_get_block(ptr);
855 std::size_t old_block_units = block->m_size;
857 //The block must be marked as allocated
858 BOOST_ASSERT(memory_algo->priv_is_allocated_block(block));
860 //Check if alignment and block size are right
861 assert_alignment(ptr);
863 //Put this to a safe value
864 received_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
866 //Now translate it to Alignment units
867 const std::size_t max_user_units = floor_units(max_size - UsableByPreviousChunk);
868 const std::size_t preferred_user_units = ceil_units(preferred_size - UsableByPreviousChunk);
870 //Check if rounded max and preferred are possible correct
871 if(max_user_units < preferred_user_units)
872 return false;
874 //Check if the block is smaller than the requested minimum
875 std::size_t old_user_units = old_block_units - AllocatedCtrlUnits;
877 if(old_user_units < preferred_user_units)
878 return false;
880 //If the block is smaller than the requested minimum
881 if(old_user_units == preferred_user_units)
882 return true;
884 std::size_t shrunk_user_units =
885 ((BlockCtrlUnits - AllocatedCtrlUnits) > preferred_user_units)
886 ? (BlockCtrlUnits - AllocatedCtrlUnits)
887 : preferred_user_units;
889 //Some parameter checks
890 if(max_user_units < shrunk_user_units)
891 return false;
893 //We must be able to create at least a new empty block
894 if((old_user_units - shrunk_user_units) < BlockCtrlUnits ){
895 return false;
898 //Update new size
899 received_size = shrunk_user_units*Alignment + UsableByPreviousChunk;
900 return true;
903 static bool shrink
904 (MemoryAlgorithm *memory_algo, void *ptr
905 ,const std::size_t max_size, const std::size_t preferred_size
906 ,std::size_t &received_size)
908 //Obtain the real block
909 block_ctrl *block = memory_algo->priv_get_block(ptr);
910 std::size_t old_block_units = block->m_size;
912 if(!try_shrink
913 (memory_algo, ptr, max_size, preferred_size, received_size)){
914 return false;
917 //Check if the old size was just the shrunk size (no splitting)
918 if((old_block_units - AllocatedCtrlUnits) == ceil_units(preferred_size - UsableByPreviousChunk))
919 return true;
921 //Now we can just rewrite the size of the old buffer
922 block->m_size = (received_size-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits;
923 BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
925 //We create the new block
926 block_ctrl *new_block = reinterpret_cast<block_ctrl*>
927 (reinterpret_cast<char*>(block) + block->m_size*Alignment);
928 //Write control data to simulate this new block was previously allocated
929 //and deallocate it
930 new_block->m_size = old_block_units - block->m_size;
931 BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
932 memory_algo->priv_mark_new_allocated_block(block);
933 memory_algo->priv_mark_new_allocated_block(new_block);
934 memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(new_block));
935 return true;
938 private:
939 static multiallocation_chain priv_allocate_many
940 ( MemoryAlgorithm *memory_algo
941 , const std::size_t *elem_sizes
942 , std::size_t n_elements
943 , std::size_t sizeof_element)
945 //Note: sizeof_element == 0 indicates that we want to
946 //allocate n_elements of the same size "*elem_sizes"
948 //Calculate the total size of all requests
949 std::size_t total_request_units = 0;
950 std::size_t elem_units = 0;
951 const std::size_t ptr_size_units = memory_algo->priv_get_total_units(sizeof(void_pointer));
952 if(!sizeof_element){
953 elem_units = memory_algo->priv_get_total_units(*elem_sizes);
954 elem_units = ptr_size_units > elem_units ? ptr_size_units : elem_units;
955 total_request_units = n_elements*elem_units;
957 else{
958 for(std::size_t i = 0; i < n_elements; ++i){
959 elem_units = memory_algo->priv_get_total_units(elem_sizes[i]*sizeof_element);
960 elem_units = ptr_size_units > elem_units ? ptr_size_units : elem_units;
961 total_request_units += elem_units;
965 multiallocation_chain chain;
967 std::size_t low_idx = 0;
968 while(low_idx < n_elements){
969 std::size_t total_bytes = total_request_units*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
970 std::size_t min_allocation = (!sizeof_element)
971 ? elem_units
972 : memory_algo->priv_get_total_units(elem_sizes[low_idx]*sizeof_element);
973 min_allocation = min_allocation*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
975 std::size_t received_size;
976 std::pair<void *, bool> ret = memory_algo->priv_allocate
977 (boost::interprocess::allocate_new, min_allocation, total_bytes, received_size, 0);
978 if(!ret.first){
979 break;
982 block_ctrl *block = memory_algo->priv_get_block(ret.first);
983 std::size_t received_units = block->m_size;
984 char *block_address = reinterpret_cast<char*>(block);
986 std::size_t total_used_units = 0;
987 // block_ctrl *prev_block = 0;
988 while(total_used_units < received_units){
989 if(sizeof_element){
990 elem_units = memory_algo->priv_get_total_units(elem_sizes[low_idx]*sizeof_element);
991 elem_units = ptr_size_units > elem_units ? ptr_size_units : elem_units;
993 if(total_used_units + elem_units > received_units)
994 break;
995 total_request_units -= elem_units;
996 //This is the position where the new block must be created
997 block_ctrl *new_block = reinterpret_cast<block_ctrl *>(block_address);
998 assert_alignment(new_block);
1000 //The last block should take all the remaining space
1001 if((low_idx + 1) == n_elements ||
1002 (total_used_units + elem_units +
1003 ((!sizeof_element)
1004 ? elem_units
1005 : memory_algo->priv_get_total_units(elem_sizes[low_idx+1]*sizeof_element))
1006 ) > received_units){
1007 //By default, the new block will use the rest of the buffer
1008 new_block->m_size = received_units - total_used_units;
1009 memory_algo->priv_mark_new_allocated_block(new_block);
1011 //If the remaining units are bigger than needed and we can
1012 //split it obtaining a new free memory block do it.
1013 if((received_units - total_used_units) >= (elem_units + MemoryAlgorithm::BlockCtrlUnits)){
1014 std::size_t shrunk_received;
1015 std::size_t shrunk_request = elem_units*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
1016 bool shrink_ok = shrink
1017 (memory_algo
1018 ,memory_algo->priv_get_user_buffer(new_block)
1019 ,shrunk_request
1020 ,shrunk_request
1021 ,shrunk_received);
1022 (void)shrink_ok;
1023 //Shrink must always succeed with passed parameters
1024 BOOST_ASSERT(shrink_ok);
1025 //Some sanity checks
1026 BOOST_ASSERT(shrunk_request == shrunk_received);
1027 BOOST_ASSERT(elem_units == ((shrunk_request-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits));
1028 //"new_block->m_size" must have been reduced to elem_units by "shrink"
1029 BOOST_ASSERT(new_block->m_size == elem_units);
1030 //Now update the total received units with the reduction
1031 received_units = elem_units + total_used_units;
1034 else{
1035 new_block->m_size = elem_units;
1036 memory_algo->priv_mark_new_allocated_block(new_block);
1039 block_address += new_block->m_size*Alignment;
1040 total_used_units += new_block->m_size;
1041 //Check we have enough room to overwrite the intrusive pointer
1042 assert((new_block->m_size*Alignment - AllocatedCtrlUnits) >= sizeof(void_pointer));
1043 void_pointer p = new(memory_algo->priv_get_user_buffer(new_block))void_pointer(0);
1044 chain.push_back(p);
1045 ++low_idx;
1046 //prev_block = new_block;
1048 //Sanity check
1049 BOOST_ASSERT(total_used_units == received_units);
1052 if(low_idx != n_elements){
1053 priv_deallocate_many(memory_algo, boost::interprocess::move(chain));
1055 return boost::interprocess::move(chain);
1058 static void priv_deallocate_many(MemoryAlgorithm *memory_algo, multiallocation_chain chain)
1060 while(!chain.empty()){
1061 void *addr = detail::get_pointer(chain.front());
1062 chain.pop_front();
1063 memory_algo->priv_deallocate(addr);
1068 } //namespace detail {
1069 } //namespace interprocess {
1070 } //namespace boost {
1072 #include <boost/interprocess/detail/config_end.hpp>
1074 #endif //#ifndef BOOST_INTERPROCESS_DETAIL_MEM_ALGO_COMMON_HPP