3 // Copyright (C) 2004, 2005 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
34 #include <bits/c++config.h>
35 #include <bits/concurrence.h>
36 #include <ext/mt_allocator.h>
38 namespace __gnu_internal
43 typedef __gnu_cxx::__pool
<true>::_Thread_record _Thread_record
;
44 _Thread_record
* _M_thread_freelist
;
45 _Thread_record
* _M_thread_freelist_array
;
46 size_t _M_max_threads
;
47 __gthread_key_t _M_key
;
51 if (_M_thread_freelist_array
)
53 __gthread_key_delete(_M_key
);
54 ::operator delete(static_cast<void*>(_M_thread_freelist_array
));
59 // Ensure freelist is constructed first.
60 static __freelist freelist
;
61 static __glibcxx_mutex_define_initialized(freelist_mutex
);
64 _M_destroy_thread_key(void* __id
)
66 // Return this thread id record to the front of thread_freelist.
67 __gnu_cxx::lock
sentry(__gnu_internal::freelist_mutex
);
68 size_t _M_id
= reinterpret_cast<size_t>(__id
);
70 using namespace __gnu_internal
;
71 typedef __gnu_cxx::__pool
<true>::_Thread_record _Thread_record
;
72 _Thread_record
* __tr
= &freelist
._M_thread_freelist_array
[_M_id
- 1];
73 __tr
->_M_next
= freelist
._M_thread_freelist
;
74 freelist
._M_thread_freelist
= __tr
;
82 __pool
<false>::_M_destroy() throw()
84 if (_M_init
&& !_M_options
._M_force_new
)
86 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
88 _Bin_record
& __bin
= _M_bin
[__n
];
89 while (__bin
._M_address
)
91 _Block_address
* __tmp
= __bin
._M_address
->_M_next
;
92 ::operator delete(__bin
._M_address
->_M_initial
);
93 __bin
._M_address
= __tmp
;
95 ::operator delete(__bin
._M_first
);
97 ::operator delete(_M_bin
);
98 ::operator delete(_M_binmap
);
103 __pool
<false>::_M_reclaim_block(char* __p
, size_t __bytes
)
105 // Round up to power of 2 and figure out which bin to use.
106 const size_t __which
= _M_binmap
[__bytes
];
107 _Bin_record
& __bin
= _M_bin
[__which
];
109 char* __c
= __p
- _M_get_align();
110 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
112 // Single threaded application - return to global pool.
113 __block
->_M_next
= __bin
._M_first
[0];
114 __bin
._M_first
[0] = __block
;
118 __pool
<false>::_M_reserve_block(size_t __bytes
, const size_t __thread_id
)
120 // Round up to power of 2 and figure out which bin to use.
121 const size_t __which
= _M_binmap
[__bytes
];
122 _Bin_record
& __bin
= _M_bin
[__which
];
123 const _Tune
& __options
= _M_get_options();
124 const size_t __bin_size
= (__options
._M_min_bin
<< __which
)
125 + __options
._M_align
;
126 size_t __block_count
= __options
._M_chunk_size
- sizeof(_Block_address
);
127 __block_count
/= __bin_size
;
129 // Get a new block dynamically, set it up for use.
130 void* __v
= ::operator new(__options
._M_chunk_size
);
131 _Block_address
* __address
= static_cast<_Block_address
*>(__v
);
132 __address
->_M_initial
= __v
;
133 __address
->_M_next
= __bin
._M_address
;
134 __bin
._M_address
= __address
;
136 char* __c
= static_cast<char*>(__v
) + sizeof(_Block_address
);
137 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
138 __bin
._M_first
[__thread_id
] = __block
;
139 while (--__block_count
> 0)
142 __block
->_M_next
= reinterpret_cast<_Block_record
*>(__c
);
143 __block
= __block
->_M_next
;
145 __block
->_M_next
= NULL
;
147 __block
= __bin
._M_first
[__thread_id
];
148 __bin
._M_first
[__thread_id
] = __block
->_M_next
;
150 // NB: For alignment reasons, we can't use the first _M_align
151 // bytes, even when sizeof(_Block_record) < _M_align.
152 return reinterpret_cast<char*>(__block
) + __options
._M_align
;
156 __pool
<false>::_M_initialize()
158 // _M_force_new must not change after the first allocate(), which
159 // in turn calls this method, so if it's false, it's false forever
160 // and we don't need to return here ever again.
161 if (_M_options
._M_force_new
)
168 // Calculate the number of bins required based on _M_max_bytes.
169 // _M_bin_size is statically-initialized to one.
170 size_t __bin_size
= _M_options
._M_min_bin
;
171 while (_M_options
._M_max_bytes
> __bin_size
)
177 // Setup the bin map for quick lookup of the relevant bin.
178 const size_t __j
= (_M_options
._M_max_bytes
+ 1) * sizeof(_Binmap_type
);
179 _M_binmap
= static_cast<_Binmap_type
*>(::operator new(__j
));
180 _Binmap_type
* __bp
= _M_binmap
;
181 _Binmap_type __bin_max
= _M_options
._M_min_bin
;
182 _Binmap_type __bint
= 0;
183 for (_Binmap_type __ct
= 0; __ct
<= _M_options
._M_max_bytes
; ++__ct
)
185 if (__ct
> __bin_max
)
193 // Initialize _M_bin and its members.
194 void* __v
= ::operator new(sizeof(_Bin_record
) * _M_bin_size
);
195 _M_bin
= static_cast<_Bin_record
*>(__v
);
196 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
198 _Bin_record
& __bin
= _M_bin
[__n
];
199 __v
= ::operator new(sizeof(_Block_record
*));
200 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
201 __bin
._M_first
[0] = NULL
;
202 __bin
._M_address
= NULL
;
210 __pool
<true>::_M_destroy() throw()
212 if (_M_init
&& !_M_options
._M_force_new
)
214 if (__gthread_active_p())
216 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
218 _Bin_record
& __bin
= _M_bin
[__n
];
219 while (__bin
._M_address
)
221 _Block_address
* __tmp
= __bin
._M_address
->_M_next
;
222 ::operator delete(__bin
._M_address
->_M_initial
);
223 __bin
._M_address
= __tmp
;
225 ::operator delete(__bin
._M_first
);
226 ::operator delete(__bin
._M_free
);
227 ::operator delete(__bin
._M_used
);
228 ::operator delete(__bin
._M_mutex
);
233 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
235 _Bin_record
& __bin
= _M_bin
[__n
];
236 while (__bin
._M_address
)
238 _Block_address
* __tmp
= __bin
._M_address
->_M_next
;
239 ::operator delete(__bin
._M_address
->_M_initial
);
240 __bin
._M_address
= __tmp
;
242 ::operator delete(__bin
._M_first
);
245 ::operator delete(_M_bin
);
246 ::operator delete(_M_binmap
);
251 __pool
<true>::_M_reclaim_block(char* __p
, size_t __bytes
)
253 // Round up to power of 2 and figure out which bin to use.
254 const size_t __which
= _M_binmap
[__bytes
];
255 const _Bin_record
& __bin
= _M_bin
[__which
];
257 // Know __p not null, assume valid block.
258 char* __c
= __p
- _M_get_align();
259 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
260 if (__gthread_active_p())
262 // Calculate the number of records to remove from our freelist:
263 // in order to avoid too much contention we wait until the
264 // number of records is "high enough".
265 const size_t __thread_id
= _M_get_thread_id();
266 const _Tune
& __options
= _M_get_options();
267 const unsigned long __limit
= 100 * (_M_bin_size
- __which
)
268 * __options
._M_freelist_headroom
;
270 unsigned long __remove
= __bin
._M_free
[__thread_id
];
271 __remove
*= __options
._M_freelist_headroom
;
272 if (__remove
>= __bin
._M_used
[__thread_id
])
273 __remove
-= __bin
._M_used
[__thread_id
];
276 if (__remove
> __limit
&& __remove
> __bin
._M_free
[__thread_id
])
278 _Block_record
* __first
= __bin
._M_first
[__thread_id
];
279 _Block_record
* __tmp
= __first
;
280 __remove
/= __options
._M_freelist_headroom
;
281 const unsigned long __removed
= __remove
;
282 while (--__remove
> 0)
283 __tmp
= __tmp
->_M_next
;
284 __bin
._M_first
[__thread_id
] = __tmp
->_M_next
;
285 __bin
._M_free
[__thread_id
] -= __removed
;
287 __gthread_mutex_lock(__bin
._M_mutex
);
288 __tmp
->_M_next
= __bin
._M_first
[0];
289 __bin
._M_first
[0] = __first
;
290 __bin
._M_free
[0] += __removed
;
291 __gthread_mutex_unlock(__bin
._M_mutex
);
294 // Return this block to our list and update counters and
295 // owner id as needed.
296 --__bin
._M_used
[__block
->_M_thread_id
];
298 __block
->_M_next
= __bin
._M_first
[__thread_id
];
299 __bin
._M_first
[__thread_id
] = __block
;
301 ++__bin
._M_free
[__thread_id
];
305 // Not using threads, so single threaded application - return
307 __block
->_M_next
= __bin
._M_first
[0];
308 __bin
._M_first
[0] = __block
;
313 __pool
<true>::_M_reserve_block(size_t __bytes
, const size_t __thread_id
)
315 // Round up to power of 2 and figure out which bin to use.
316 const size_t __which
= _M_binmap
[__bytes
];
317 const _Tune
& __options
= _M_get_options();
318 const size_t __bin_size
= ((__options
._M_min_bin
<< __which
)
319 + __options
._M_align
);
320 size_t __block_count
= __options
._M_chunk_size
- sizeof(_Block_address
);
321 __block_count
/= __bin_size
;
323 // Are we using threads?
324 // - Yes, check if there are free blocks on the global
325 // list. If so, grab up to __block_count blocks in one
326 // lock and change ownership. If the global list is
327 // empty, we allocate a new chunk and add those blocks
328 // directly to our own freelist (with us as owner).
329 // - No, all operations are made directly to global pool 0
330 // no need to lock or change ownership but check for free
331 // blocks on global list (and if not add new ones) and
332 // get the first one.
333 _Bin_record
& __bin
= _M_bin
[__which
];
334 _Block_record
* __block
= NULL
;
335 if (__gthread_active_p())
337 __gthread_mutex_lock(__bin
._M_mutex
);
338 if (__bin
._M_first
[0] == NULL
)
340 void* __v
= ::operator new(__options
._M_chunk_size
);
341 _Block_address
* __address
= static_cast<_Block_address
*>(__v
);
342 __address
->_M_initial
= __v
;
343 __address
->_M_next
= __bin
._M_address
;
344 __bin
._M_address
= __address
;
345 __gthread_mutex_unlock(__bin
._M_mutex
);
347 // No need to hold the lock when we are adding a whole
348 // chunk to our own list.
349 char* __c
= static_cast<char*>(__v
) + sizeof(_Block_address
);
350 __block
= reinterpret_cast<_Block_record
*>(__c
);
351 __bin
._M_free
[__thread_id
] = __block_count
;
352 __bin
._M_first
[__thread_id
] = __block
;
353 while (--__block_count
> 0)
356 __block
->_M_next
= reinterpret_cast<_Block_record
*>(__c
);
357 __block
= __block
->_M_next
;
359 __block
->_M_next
= NULL
;
363 // Is the number of required blocks greater than or equal
364 // to the number that can be provided by the global free
366 __bin
._M_first
[__thread_id
] = __bin
._M_first
[0];
367 if (__block_count
>= __bin
._M_free
[0])
369 __bin
._M_free
[__thread_id
] = __bin
._M_free
[0];
370 __bin
._M_free
[0] = 0;
371 __bin
._M_first
[0] = NULL
;
375 __bin
._M_free
[__thread_id
] = __block_count
;
376 __bin
._M_free
[0] -= __block_count
;
377 __block
= __bin
._M_first
[0];
378 while (--__block_count
> 0)
379 __block
= __block
->_M_next
;
380 __bin
._M_first
[0] = __block
->_M_next
;
381 __block
->_M_next
= NULL
;
383 __gthread_mutex_unlock(__bin
._M_mutex
);
388 void* __v
= ::operator new(__options
._M_chunk_size
);
389 _Block_address
* __address
= static_cast<_Block_address
*>(__v
);
390 __address
->_M_initial
= __v
;
391 __address
->_M_next
= __bin
._M_address
;
392 __bin
._M_address
= __address
;
394 char* __c
= static_cast<char*>(__v
) + sizeof(_Block_address
);
395 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
396 __bin
._M_first
[0] = __block
;
397 while (--__block_count
> 0)
400 __block
->_M_next
= reinterpret_cast<_Block_record
*>(__c
);
401 __block
= __block
->_M_next
;
403 __block
->_M_next
= NULL
;
406 __block
= __bin
._M_first
[__thread_id
];
407 __bin
._M_first
[__thread_id
] = __block
->_M_next
;
409 if (__gthread_active_p())
411 __block
->_M_thread_id
= __thread_id
;
412 --__bin
._M_free
[__thread_id
];
413 ++__bin
._M_used
[__thread_id
];
416 // NB: For alignment reasons, we can't use the first _M_align
417 // bytes, even when sizeof(_Block_record) < _M_align.
418 return reinterpret_cast<char*>(__block
) + __options
._M_align
;
422 __pool
<true>::_M_initialize()
424 // _M_force_new must not change after the first allocate(),
425 // which in turn calls this method, so if it's false, it's false
426 // forever and we don't need to return here ever again.
427 if (_M_options
._M_force_new
)
434 // Calculate the number of bins required based on _M_max_bytes.
435 // _M_bin_size is statically-initialized to one.
436 size_t __bin_size
= _M_options
._M_min_bin
;
437 while (_M_options
._M_max_bytes
> __bin_size
)
443 // Setup the bin map for quick lookup of the relevant bin.
444 const size_t __j
= (_M_options
._M_max_bytes
+ 1) * sizeof(_Binmap_type
);
445 _M_binmap
= static_cast<_Binmap_type
*>(::operator new(__j
));
446 _Binmap_type
* __bp
= _M_binmap
;
447 _Binmap_type __bin_max
= _M_options
._M_min_bin
;
448 _Binmap_type __bint
= 0;
449 for (_Binmap_type __ct
= 0; __ct
<= _M_options
._M_max_bytes
; ++__ct
)
451 if (__ct
> __bin_max
)
459 // Initialize _M_bin and its members.
460 void* __v
= ::operator new(sizeof(_Bin_record
) * _M_bin_size
);
461 _M_bin
= static_cast<_Bin_record
*>(__v
);
463 // If __gthread_active_p() create and initialize the list of
464 // free thread ids. Single threaded applications use thread id 0
465 // directly and have no need for this.
466 if (__gthread_active_p())
469 __gnu_cxx::lock
sentry(__gnu_internal::freelist_mutex
);
471 if (!__gnu_internal::freelist
._M_thread_freelist_array
472 || __gnu_internal::freelist
._M_max_threads
473 < _M_options
._M_max_threads
)
475 const size_t __k
= sizeof(_Thread_record
)
476 * _M_options
._M_max_threads
;
477 __v
= ::operator new(__k
);
478 _Thread_record
* _M_thread_freelist
479 = static_cast<_Thread_record
*>(__v
);
481 // NOTE! The first assignable thread id is 1 since the
482 // global pool uses id 0
484 for (__i
= 1; __i
< _M_options
._M_max_threads
; ++__i
)
486 _Thread_record
& __tr
= _M_thread_freelist
[__i
- 1];
487 __tr
._M_next
= &_M_thread_freelist
[__i
];
492 _M_thread_freelist
[__i
- 1]._M_next
= NULL
;
493 _M_thread_freelist
[__i
- 1]._M_id
= __i
;
495 if (!__gnu_internal::freelist
._M_thread_freelist_array
)
497 // Initialize per thread key to hold pointer to
498 // _M_thread_freelist.
499 __gthread_key_create(&__gnu_internal::freelist
._M_key
,
500 __gnu_internal::_M_destroy_thread_key
);
501 __gnu_internal::freelist
._M_thread_freelist
502 = _M_thread_freelist
;
506 _Thread_record
* _M_old_freelist
507 = __gnu_internal::freelist
._M_thread_freelist
;
508 _Thread_record
* _M_old_array
509 = __gnu_internal::freelist
._M_thread_freelist_array
;
510 __gnu_internal::freelist
._M_thread_freelist
511 = &_M_thread_freelist
[_M_old_freelist
- _M_old_array
];
512 while (_M_old_freelist
)
515 if (_M_old_freelist
->_M_next
)
516 next_id
= _M_old_freelist
->_M_next
- _M_old_array
;
518 next_id
= __gnu_internal::freelist
._M_max_threads
;
519 _M_thread_freelist
[_M_old_freelist
->_M_id
- 1]._M_next
520 = &_M_thread_freelist
[next_id
];
521 _M_old_freelist
= _M_old_freelist
->_M_next
;
523 ::operator delete(static_cast<void*>(_M_old_array
));
525 __gnu_internal::freelist
._M_thread_freelist_array
526 = _M_thread_freelist
;
527 __gnu_internal::freelist
._M_max_threads
528 = _M_options
._M_max_threads
;
532 const size_t __max_threads
= _M_options
._M_max_threads
+ 1;
533 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
535 _Bin_record
& __bin
= _M_bin
[__n
];
536 __v
= ::operator new(sizeof(_Block_record
*) * __max_threads
);
537 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
539 __bin
._M_address
= NULL
;
541 __v
= ::operator new(sizeof(size_t) * __max_threads
);
542 __bin
._M_free
= static_cast<size_t*>(__v
);
544 __v
= ::operator new(sizeof(size_t) * __max_threads
);
545 __bin
._M_used
= static_cast<size_t*>(__v
);
547 __v
= ::operator new(sizeof(__gthread_mutex_t
));
548 __bin
._M_mutex
= static_cast<__gthread_mutex_t
*>(__v
);
550 #ifdef __GTHREAD_MUTEX_INIT
552 // Do not copy a POSIX/gthr mutex once in use.
553 __gthread_mutex_t __tmp
= __GTHREAD_MUTEX_INIT
;
554 *__bin
._M_mutex
= __tmp
;
557 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin
._M_mutex
); }
559 for (size_t __threadn
= 0; __threadn
< __max_threads
; ++__threadn
)
561 __bin
._M_first
[__threadn
] = NULL
;
562 __bin
._M_free
[__threadn
] = 0;
563 __bin
._M_used
[__threadn
] = 0;
569 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
571 _Bin_record
& __bin
= _M_bin
[__n
];
572 __v
= ::operator new(sizeof(_Block_record
*));
573 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
574 __bin
._M_first
[0] = NULL
;
575 __bin
._M_address
= NULL
;
582 __pool
<true>::_M_get_thread_id()
584 // If we have thread support and it's active we check the thread
585 // key value and return its id or if it's not set we take the
586 // first record from _M_thread_freelist and sets the key and
588 if (__gthread_active_p())
590 void* v
= __gthread_getspecific(__gnu_internal::freelist
._M_key
);
591 size_t _M_id
= (size_t)v
;
595 __gnu_cxx::lock
sentry(__gnu_internal::freelist_mutex
);
596 if (__gnu_internal::freelist
._M_thread_freelist
)
598 _M_id
= __gnu_internal::freelist
._M_thread_freelist
->_M_id
;
599 __gnu_internal::freelist
._M_thread_freelist
600 = __gnu_internal::freelist
._M_thread_freelist
->_M_next
;
604 __gthread_setspecific(__gnu_internal::freelist
._M_key
,
607 return _M_id
>= _M_options
._M_max_threads
? 0 : _M_id
;
610 // Otherwise (no thread support or inactive) all requests are
611 // served from the global pool 0.
615 // XXX GLIBCXX_ABI Deprecated
617 __pool
<true>::_M_destroy_thread_key(void*) { }
619 // XXX GLIBCXX_ABI Deprecated
621 __pool
<true>::_M_initialize(__destroy_handler
)
623 // _M_force_new must not change after the first allocate(),
624 // which in turn calls this method, so if it's false, it's false
625 // forever and we don't need to return here ever again.
626 if (_M_options
._M_force_new
)
633 // Calculate the number of bins required based on _M_max_bytes.
634 // _M_bin_size is statically-initialized to one.
635 size_t __bin_size
= _M_options
._M_min_bin
;
636 while (_M_options
._M_max_bytes
> __bin_size
)
642 // Setup the bin map for quick lookup of the relevant bin.
643 const size_t __j
= (_M_options
._M_max_bytes
+ 1) * sizeof(_Binmap_type
);
644 _M_binmap
= static_cast<_Binmap_type
*>(::operator new(__j
));
645 _Binmap_type
* __bp
= _M_binmap
;
646 _Binmap_type __bin_max
= _M_options
._M_min_bin
;
647 _Binmap_type __bint
= 0;
648 for (_Binmap_type __ct
= 0; __ct
<= _M_options
._M_max_bytes
; ++__ct
)
650 if (__ct
> __bin_max
)
658 // Initialize _M_bin and its members.
659 void* __v
= ::operator new(sizeof(_Bin_record
) * _M_bin_size
);
660 _M_bin
= static_cast<_Bin_record
*>(__v
);
662 // If __gthread_active_p() create and initialize the list of
663 // free thread ids. Single threaded applications use thread id 0
664 // directly and have no need for this.
665 if (__gthread_active_p())
668 __gnu_cxx::lock
sentry(__gnu_internal::freelist_mutex
);
670 if (!__gnu_internal::freelist
._M_thread_freelist_array
671 || __gnu_internal::freelist
._M_max_threads
672 < _M_options
._M_max_threads
)
674 const size_t __k
= sizeof(_Thread_record
)
675 * _M_options
._M_max_threads
;
676 __v
= ::operator new(__k
);
677 _Thread_record
* _M_thread_freelist
678 = static_cast<_Thread_record
*>(__v
);
680 // NOTE! The first assignable thread id is 1 since the
681 // global pool uses id 0
683 for (__i
= 1; __i
< _M_options
._M_max_threads
; ++__i
)
685 _Thread_record
& __tr
= _M_thread_freelist
[__i
- 1];
686 __tr
._M_next
= &_M_thread_freelist
[__i
];
691 _M_thread_freelist
[__i
- 1]._M_next
= NULL
;
692 _M_thread_freelist
[__i
- 1]._M_id
= __i
;
694 if (!__gnu_internal::freelist
._M_thread_freelist_array
)
696 // Initialize per thread key to hold pointer to
697 // _M_thread_freelist.
698 __gthread_key_create(&__gnu_internal::freelist
._M_key
,
699 __gnu_internal::_M_destroy_thread_key
);
700 __gnu_internal::freelist
._M_thread_freelist
701 = _M_thread_freelist
;
705 _Thread_record
* _M_old_freelist
706 = __gnu_internal::freelist
._M_thread_freelist
;
707 _Thread_record
* _M_old_array
708 = __gnu_internal::freelist
._M_thread_freelist_array
;
709 __gnu_internal::freelist
._M_thread_freelist
710 = &_M_thread_freelist
[_M_old_freelist
- _M_old_array
];
711 while (_M_old_freelist
)
714 if (_M_old_freelist
->_M_next
)
715 next_id
= _M_old_freelist
->_M_next
- _M_old_array
;
717 next_id
= __gnu_internal::freelist
._M_max_threads
;
718 _M_thread_freelist
[_M_old_freelist
->_M_id
- 1]._M_next
719 = &_M_thread_freelist
[next_id
];
720 _M_old_freelist
= _M_old_freelist
->_M_next
;
722 ::operator delete(static_cast<void*>(_M_old_array
));
724 __gnu_internal::freelist
._M_thread_freelist_array
725 = _M_thread_freelist
;
726 __gnu_internal::freelist
._M_max_threads
727 = _M_options
._M_max_threads
;
731 const size_t __max_threads
= _M_options
._M_max_threads
+ 1;
732 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
734 _Bin_record
& __bin
= _M_bin
[__n
];
735 __v
= ::operator new(sizeof(_Block_record
*) * __max_threads
);
736 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
738 __bin
._M_address
= NULL
;
740 __v
= ::operator new(sizeof(size_t) * __max_threads
);
741 __bin
._M_free
= static_cast<size_t*>(__v
);
743 __v
= ::operator new(sizeof(size_t) * __max_threads
);
744 __bin
._M_used
= static_cast<size_t*>(__v
);
746 __v
= ::operator new(sizeof(__gthread_mutex_t
));
747 __bin
._M_mutex
= static_cast<__gthread_mutex_t
*>(__v
);
749 #ifdef __GTHREAD_MUTEX_INIT
751 // Do not copy a POSIX/gthr mutex once in use.
752 __gthread_mutex_t __tmp
= __GTHREAD_MUTEX_INIT
;
753 *__bin
._M_mutex
= __tmp
;
756 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin
._M_mutex
); }
758 for (size_t __threadn
= 0; __threadn
< __max_threads
; ++__threadn
)
760 __bin
._M_first
[__threadn
] = NULL
;
761 __bin
._M_free
[__threadn
] = 0;
762 __bin
._M_used
[__threadn
] = 0;
768 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
770 _Bin_record
& __bin
= _M_bin
[__n
];
771 __v
= ::operator new(sizeof(_Block_record
*));
772 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
773 __bin
._M_first
[0] = NULL
;
774 __bin
._M_address
= NULL
;
782 template class __mt_alloc
<char>;
783 template class __mt_alloc
<wchar_t>;
784 } // namespace __gnu_cxx