1 /*************************************************************************
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 * Copyright 2008 by Sun Microsystems, Inc.
7 * OpenOffice.org - a multi-platform office productivity suite
9 * $RCSfile: alloc_cache.c,v $
12 * This file is part of OpenOffice.org.
14 * OpenOffice.org is free software: you can redistribute it and/or modify
15 * it under the terms of the GNU Lesser General Public License version 3
16 * only, as published by the Free Software Foundation.
18 * OpenOffice.org is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU Lesser General Public License version 3 for more details
22 * (a copy is included in the LICENSE file that accompanied this code).
24 * You should have received a copy of the GNU Lesser General Public License
25 * version 3 along with OpenOffice.org. If not, see
26 * <http://www.openoffice.org/license.html>
27 * for a copy of the LGPLv3 License.
29 ************************************************************************/
31 #include "alloc_cache.h"
32 #include "alloc_impl.h"
33 #include "alloc_arena.h"
34 #include "internal/once.h"
35 #include "sal/macros.h"
36 #include "osl/diagnose.h"
38 #ifndef INCLUDED_STRING_H
42 #ifndef INCLUDED_STDIO_H
48 #define OSL_TRACE 1 ? ((void)0) : _OSL_GLOBAL osl_trace
51 /* ================================================================= *
55 * ================================================================= */
60 struct rtl_cache_list_st
62 rtl_memory_lock_type m_lock
;
63 rtl_cache_type m_cache_head
;
65 #if defined(SAL_UNX) || defined(SAL_OS2)
66 pthread_t m_update_thread
;
67 pthread_cond_t m_update_cond
;
68 #elif defined(SAL_W32)
69 HANDLE m_update_thread
;
71 #endif /* SAL_UNX || SAL_W32 */
75 static struct rtl_cache_list_st g_cache_list
;
79 * provided for cache_type allocations, and hash_table resizing.
83 static rtl_arena_type
* gp_cache_arena
= 0;
86 /** gp_cache_magazine_cache
89 static rtl_cache_type
* gp_cache_magazine_cache
= 0;
92 /** gp_cache_slab_cache
95 static rtl_cache_type
* gp_cache_slab_cache
= 0;
98 /** gp_cache_bufctl_cache
101 static rtl_cache_type
* gp_cache_bufctl_cache
= 0;
108 rtl_cache_init (void);
111 /* ================================================================= */
113 /** RTL_CACHE_HASH_INDEX()
115 #define RTL_CACHE_HASH_INDEX_IMPL(a, s, q, m) \
116 ((((a) + ((a) >> (s)) + ((a) >> ((s) << 1))) >> (q)) & (m))
118 #define RTL_CACHE_HASH_INDEX(cache, addr) \
119 RTL_CACHE_HASH_INDEX_IMPL((addr), (cache)->m_hash_shift, (cache)->m_type_shift, ((cache)->m_hash_size - 1))
122 /** rtl_cache_hash_rescale()
125 rtl_cache_hash_rescale (
126 rtl_cache_type
* cache
,
130 rtl_cache_bufctl_type
** new_table
;
133 new_bytes
= new_size
* sizeof(rtl_cache_bufctl_type
*);
134 new_table
= (rtl_cache_bufctl_type
**)rtl_arena_alloc(gp_cache_arena
, &new_bytes
);
138 rtl_cache_bufctl_type
** old_table
;
139 sal_Size old_size
, i
;
141 memset (new_table
, 0, new_bytes
);
143 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
145 old_table
= cache
->m_hash_table
;
146 old_size
= cache
->m_hash_size
;
149 "rtl_cache_hash_rescale(\"%s\"): "
150 "nbuf: % " PRIu64
" (ave: %" PRIu64
"), frees: %" PRIu64
" "
151 "[old_size: %lu, new_size: %lu]",
153 cache
->m_slab_stats
.m_alloc
- cache
->m_slab_stats
.m_free
,
154 (cache
->m_slab_stats
.m_alloc
- cache
->m_slab_stats
.m_free
) >> cache
->m_hash_shift
,
155 cache
->m_slab_stats
.m_free
,
158 cache
->m_hash_table
= new_table
;
159 cache
->m_hash_size
= new_size
;
160 cache
->m_hash_shift
= highbit(cache
->m_hash_size
) - 1;
162 for (i
= 0; i
< old_size
; i
++)
164 rtl_cache_bufctl_type
* curr
= old_table
[i
];
167 rtl_cache_bufctl_type
* next
= curr
->m_next
;
168 rtl_cache_bufctl_type
** head
;
170 head
= &(cache
->m_hash_table
[RTL_CACHE_HASH_INDEX(cache
, curr
->m_addr
)]);
171 curr
->m_next
= (*head
);
179 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
181 if (old_table
!= cache
->m_hash_table_0
)
183 sal_Size old_bytes
= old_size
* sizeof(rtl_cache_bufctl_type
*);
184 rtl_arena_free (gp_cache_arena
, old_table
, old_bytes
);
189 /** rtl_cache_hash_insert()
191 static RTL_MEMORY_INLINE sal_uIntPtr
192 rtl_cache_hash_insert (
193 rtl_cache_type
* cache
,
194 rtl_cache_bufctl_type
* bufctl
197 rtl_cache_bufctl_type
** ppHead
;
199 ppHead
= &(cache
->m_hash_table
[RTL_CACHE_HASH_INDEX(cache
, bufctl
->m_addr
)]);
201 bufctl
->m_next
= (*ppHead
);
204 return (bufctl
->m_addr
);
207 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
208 #pragma inline(rtl_cache_hash_insert)
209 #endif /* __SUNPRO_C */
212 /** rtl_cache_hash_remove()
214 static rtl_cache_bufctl_type
*
215 rtl_cache_hash_remove (
216 rtl_cache_type
* cache
,
220 rtl_cache_bufctl_type
** ppHead
;
221 rtl_cache_bufctl_type
* bufctl
;
222 sal_Size lookups
= 0;
224 ppHead
= &(cache
->m_hash_table
[RTL_CACHE_HASH_INDEX(cache
, addr
)]);
225 while ((bufctl
= *ppHead
) != 0)
227 if (bufctl
->m_addr
== addr
)
229 *ppHead
= bufctl
->m_next
, bufctl
->m_next
= 0;
234 ppHead
= &(bufctl
->m_next
);
237 OSL_ASSERT (bufctl
!= 0); /* bad free */
241 sal_Size nbuf
= (sal_Size
)(cache
->m_slab_stats
.m_alloc
- cache
->m_slab_stats
.m_free
);
242 if (nbuf
> 4 * cache
->m_hash_size
)
244 if (!(cache
->m_features
& RTL_CACHE_FEATURE_RESCALE
))
246 sal_Size ave
= nbuf
>> cache
->m_hash_shift
;
247 sal_Size new_size
= cache
->m_hash_size
<< (highbit(ave
) - 1);
249 cache
->m_features
|= RTL_CACHE_FEATURE_RESCALE
;
250 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
251 rtl_cache_hash_rescale (cache
, new_size
);
252 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
253 cache
->m_features
&= ~RTL_CACHE_FEATURE_RESCALE
;
261 /* ================================================================= */
265 #define RTL_CACHE_SLAB(addr, size) \
266 (((rtl_cache_slab_type*)(RTL_MEMORY_P2END((sal_uIntPtr)(addr), (size)))) - 1)
269 /** rtl_cache_slab_constructor()
272 rtl_cache_slab_constructor (void * obj
, void * arg
)
274 rtl_cache_slab_type
* slab
= (rtl_cache_slab_type
*)(obj
);
276 (void) arg
; /* unused */
278 QUEUE_START_NAMED(slab
, slab_
);
285 /** rtl_cache_slab_destructor()
288 rtl_cache_slab_destructor (void * obj
, void * arg
)
290 #if OSL_DEBUG_LEVEL == 0
291 (void) obj
; /* unused */
292 #else /* OSL_DEBUG_LEVEL */
293 rtl_cache_slab_type
* slab
= (rtl_cache_slab_type
*)(obj
);
295 /* assure removed from queue(s) */
296 OSL_ASSERT(QUEUE_STARTED_NAMED(slab
, slab_
));
298 /* assure no longer referenced */
299 OSL_ASSERT(slab
->m_ntypes
== 0);
300 #endif /* OSL_DEBUG_LEVEL */
302 (void) arg
; /* unused */
306 /** rtl_cache_slab_create()
308 * @precond cache->m_slab_lock released.
310 static rtl_cache_slab_type
*
311 rtl_cache_slab_create (
312 rtl_cache_type
* cache
315 rtl_cache_slab_type
* slab
= 0;
319 size
= cache
->m_slab_size
;
320 addr
= rtl_arena_alloc (cache
->m_source
, &size
);
323 OSL_ASSERT(size
>= cache
->m_slab_size
);
325 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
327 /* allocate slab struct from slab cache */
328 OSL_ASSERT (cache
!= gp_cache_slab_cache
);
329 slab
= (rtl_cache_slab_type
*)rtl_cache_alloc (gp_cache_slab_cache
);
333 /* construct embedded slab struct */
334 slab
= RTL_CACHE_SLAB(addr
, cache
->m_slab_size
);
335 (void) rtl_cache_slab_constructor (slab
, 0);
339 slab
->m_data
= (sal_uIntPtr
)(addr
);
341 /* dynamic freelist initialization */
342 slab
->m_bp
= slab
->m_data
;
347 rtl_arena_free (cache
->m_source
, addr
, size
);
354 /** rtl_cache_slab_destroy()
356 * @precond cache->m_slab_lock released.
359 rtl_cache_slab_destroy (
360 rtl_cache_type
* cache
,
361 rtl_cache_slab_type
* slab
364 void * addr
= (void*)(slab
->m_data
);
365 sal_Size refcnt
= slab
->m_ntypes
; slab
->m_ntypes
= 0;
367 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
369 /* cleanup bufctl(s) for free buffer(s) */
370 sal_Size ntypes
= (slab
->m_bp
- slab
->m_data
) / cache
->m_type_size
;
371 for (ntypes
-= refcnt
; slab
->m_sp
!= 0; ntypes
--)
373 rtl_cache_bufctl_type
* bufctl
= slab
->m_sp
;
375 /* pop from freelist */
376 slab
->m_sp
= bufctl
->m_next
, bufctl
->m_next
= 0;
378 /* return bufctl struct to bufctl cache */
379 rtl_cache_free (gp_cache_bufctl_cache
, bufctl
);
381 OSL_ASSERT(ntypes
== 0);
383 /* return slab struct to slab cache */
384 rtl_cache_free (gp_cache_slab_cache
, slab
);
388 /* destruct embedded slab struct */
389 rtl_cache_slab_destructor (slab
, 0);
392 if ((refcnt
== 0) || (cache
->m_features
& RTL_CACHE_FEATURE_BULKDESTROY
))
395 rtl_arena_free (cache
->m_source
, addr
, cache
->m_slab_size
);
400 /** rtl_cache_slab_populate()
402 * @precond cache->m_slab_lock acquired.
405 rtl_cache_slab_populate (
406 rtl_cache_type
* cache
409 rtl_cache_slab_type
* slab
;
411 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
412 slab
= rtl_cache_slab_create (cache
);
413 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
416 /* update buffer start addr w/ current color */
417 slab
->m_bp
+= cache
->m_ncolor
;
419 /* update color for next slab */
420 cache
->m_ncolor
+= cache
->m_type_align
;
421 if (cache
->m_ncolor
> cache
->m_ncolor_max
)
425 cache
->m_slab_stats
.m_mem_total
+= cache
->m_slab_size
;
427 /* insert onto 'free' queue */
428 QUEUE_INSERT_HEAD_NAMED(&(cache
->m_free_head
), slab
, slab_
);
433 /* ================================================================= */
435 /** rtl_cache_slab_alloc()
437 * Allocate a buffer from slab layer; used by magazine layer.
440 rtl_cache_slab_alloc (
441 rtl_cache_type
* cache
445 rtl_cache_slab_type
* head
;
447 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
449 head
= &(cache
->m_free_head
);
450 if ((head
->m_slab_next
!= head
) || rtl_cache_slab_populate (cache
))
452 rtl_cache_slab_type
* slab
;
453 rtl_cache_bufctl_type
* bufctl
;
455 slab
= head
->m_slab_next
;
456 OSL_ASSERT(slab
->m_ntypes
< cache
->m_ntypes
);
460 /* initialize bufctl w/ current 'slab->m_bp' */
461 OSL_ASSERT (slab
->m_bp
< slab
->m_data
+ cache
->m_ntypes
* cache
->m_type_size
+ cache
->m_ncolor_max
);
462 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
464 /* allocate bufctl */
465 OSL_ASSERT (cache
!= gp_cache_bufctl_cache
);
466 bufctl
= (rtl_cache_bufctl_type
*)rtl_cache_alloc (gp_cache_bufctl_cache
);
470 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
474 bufctl
->m_addr
= slab
->m_bp
;
475 bufctl
->m_slab
= (sal_uIntPtr
)(slab
);
479 /* embedded bufctl */
480 bufctl
= (rtl_cache_bufctl_type
*)(slab
->m_bp
);
484 /* update 'slab->m_bp' to next free buffer */
485 slab
->m_bp
+= cache
->m_type_size
;
487 /* assign bufctl to freelist */
493 slab
->m_sp
= bufctl
->m_next
;
495 /* increment usage, check for full slab */
496 if ((slab
->m_ntypes
+= 1) == cache
->m_ntypes
)
498 /* remove from 'free' queue */
499 QUEUE_REMOVE_NAMED(slab
, slab_
);
501 /* insert onto 'used' queue (tail) */
502 QUEUE_INSERT_TAIL_NAMED(&(cache
->m_used_head
), slab
, slab_
);
506 cache
->m_slab_stats
.m_alloc
+= 1;
507 cache
->m_slab_stats
.m_mem_alloc
+= cache
->m_type_size
;
509 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
510 addr
= (void*)rtl_cache_hash_insert (cache
, bufctl
);
515 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
520 /** rtl_cache_slab_free()
522 * Return a buffer to slab layer; used by magazine layer.
525 rtl_cache_slab_free (
526 rtl_cache_type
* cache
,
530 rtl_cache_bufctl_type
* bufctl
;
531 rtl_cache_slab_type
* slab
;
533 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
535 /* determine slab from addr */
536 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
538 bufctl
= rtl_cache_hash_remove (cache
, (sal_uIntPtr
)(addr
));
539 slab
= (bufctl
!= 0) ? (rtl_cache_slab_type
*)(bufctl
->m_slab
) : 0;
543 /* embedded slab struct */
544 bufctl
= (rtl_cache_bufctl_type
*)(addr
);
545 slab
= RTL_CACHE_SLAB(addr
, cache
->m_slab_size
);
550 /* check for full slab */
551 if (slab
->m_ntypes
== cache
->m_ntypes
)
553 /* remove from 'used' queue */
554 QUEUE_REMOVE_NAMED(slab
, slab_
);
556 /* insert onto 'free' queue (head) */
557 QUEUE_INSERT_HEAD_NAMED(&(cache
->m_free_head
), slab
, slab_
);
561 bufctl
->m_next
= slab
->m_sp
;
565 cache
->m_slab_stats
.m_free
+= 1;
566 cache
->m_slab_stats
.m_mem_alloc
-= cache
->m_type_size
;
568 /* decrement usage, check for empty slab */
569 if ((slab
->m_ntypes
-= 1) == 0)
571 /* remove from 'free' queue */
572 QUEUE_REMOVE_NAMED(slab
, slab_
);
575 cache
->m_slab_stats
.m_mem_total
-= cache
->m_slab_size
;
577 /* free 'empty' slab */
578 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
579 rtl_cache_slab_destroy (cache
, slab
);
584 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
587 /* ================================================================= */
589 /** rtl_cache_magazine_constructor()
592 rtl_cache_magazine_constructor (void * obj
, void * arg
)
594 rtl_cache_magazine_type
* mag
= (rtl_cache_magazine_type
*)(obj
);
595 /* @@@ sal_Size size = (sal_Size)(arg); @@@ */
597 (void) arg
; /* unused */
600 mag
->m_mag_size
= RTL_CACHE_MAGAZINE_SIZE
;
607 /** rtl_cache_magazine_destructor()
610 rtl_cache_magazine_destructor (void * obj
, void * arg
)
612 #if OSL_DEBUG_LEVEL == 0
613 (void) obj
; /* unused */
614 #else /* OSL_DEBUG_LEVEL */
615 rtl_cache_magazine_type
* mag
= (rtl_cache_magazine_type
*)(obj
);
617 /* assure removed from queue(s) */
618 OSL_ASSERT(mag
->m_mag_next
== 0);
620 /* assure no longer referenced */
621 OSL_ASSERT(mag
->m_mag_used
== 0);
622 #endif /* OSL_DEBUG_LEVEL */
624 (void) arg
; /* unused */
628 /** rtl_cache_magazine_clear()
631 rtl_cache_magazine_clear (
632 rtl_cache_type
* cache
,
633 rtl_cache_magazine_type
* mag
636 for (; mag
->m_mag_used
> 0; --mag
->m_mag_used
)
638 void * obj
= mag
->m_objects
[mag
->m_mag_used
- 1];
639 mag
->m_objects
[mag
->m_mag_used
- 1] = 0;
641 if (cache
->m_destructor
!= 0)
643 /* destruct object */
644 (cache
->m_destructor
)(obj
, cache
->m_userarg
);
647 /* return buffer to slab layer */
648 rtl_cache_slab_free (cache
, obj
);
652 /* ================================================================= */
654 /** rtl_cache_depot_enqueue()
656 * @precond cache->m_depot_lock acquired.
658 static RTL_MEMORY_INLINE
void
659 rtl_cache_depot_enqueue (
660 rtl_cache_depot_type
* depot
,
661 rtl_cache_magazine_type
* mag
664 /* enqueue empty magazine */
665 mag
->m_mag_next
= depot
->m_mag_next
;
666 depot
->m_mag_next
= mag
;
668 /* update depot stats */
669 depot
->m_mag_count
++;
672 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
673 #pragma inline(rtl_cache_depot_enqueue)
674 #endif /* __SUNPRO_C */
677 /** rtl_cache_depot_dequeue()
679 * @precond cache->m_depot_lock acquired.
681 static RTL_MEMORY_INLINE rtl_cache_magazine_type
*
682 rtl_cache_depot_dequeue (
683 rtl_cache_depot_type
* depot
686 rtl_cache_magazine_type
* mag
= 0;
687 if (depot
->m_mag_count
> 0)
689 /* dequeue magazine */
690 OSL_ASSERT(depot
->m_mag_next
!= 0);
692 mag
= depot
->m_mag_next
;
693 depot
->m_mag_next
= mag
->m_mag_next
;
696 /* update depot stats */
697 depot
->m_mag_count
--;
698 depot
->m_curr_min
= SAL_MIN(depot
->m_curr_min
, depot
->m_mag_count
);
703 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
704 #pragma inline(rtl_cache_depot_dequeue)
705 #endif /* __SUNPRO_C */
708 /** rtl_cache_depot_exchange_alloc()
710 * @precond cache->m_depot_lock acquired.
712 static RTL_MEMORY_INLINE rtl_cache_magazine_type
*
713 rtl_cache_depot_exchange_alloc (
714 rtl_cache_type
* cache
,
715 rtl_cache_magazine_type
* empty
718 rtl_cache_magazine_type
* full
;
720 OSL_ASSERT((empty
== 0) || (empty
->m_mag_used
== 0));
722 /* dequeue full magazine */
723 full
= rtl_cache_depot_dequeue (&(cache
->m_depot_full
));
724 if ((full
!= 0) && (empty
!= 0))
726 /* enqueue empty magazine */
727 rtl_cache_depot_enqueue (&(cache
->m_depot_empty
), empty
);
730 OSL_ASSERT((full
== 0) || (full
->m_mag_used
> 0));
735 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
736 #pragma inline(rtl_cache_depot_exchange_alloc)
737 #endif /* __SUNPRO_C */
740 /** rtl_cache_depot_exchange_free()
742 * @precond cache->m_depot_lock acquired.
744 static RTL_MEMORY_INLINE rtl_cache_magazine_type
*
745 rtl_cache_depot_exchange_free (
746 rtl_cache_type
* cache
,
747 rtl_cache_magazine_type
* full
750 rtl_cache_magazine_type
* empty
;
752 OSL_ASSERT((full
== 0) || (full
->m_mag_used
> 0));
754 /* dequeue empty magazine */
755 empty
= rtl_cache_depot_dequeue (&(cache
->m_depot_empty
));
756 if ((empty
!= 0) && (full
!= 0))
758 /* enqueue full magazine */
759 rtl_cache_depot_enqueue (&(cache
->m_depot_full
), full
);
762 OSL_ASSERT((empty
== 0) || (empty
->m_mag_used
== 0));
767 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
768 #pragma inline(rtl_cache_depot_exchange_free)
769 #endif /* __SUNPRO_C */
772 /** rtl_cache_depot_populate()
774 * @precond cache->m_depot_lock acquired.
777 rtl_cache_depot_populate (
778 rtl_cache_type
* cache
781 rtl_cache_magazine_type
* empty
= 0;
783 if (cache
->m_magazine_cache
!= 0)
785 /* allocate new empty magazine */
786 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
787 empty
= (rtl_cache_magazine_type
*)rtl_cache_alloc (cache
->m_magazine_cache
);
788 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
791 /* enqueue (new) empty magazine */
792 rtl_cache_depot_enqueue (&(cache
->m_depot_empty
), empty
);
798 /* ================================================================= */
800 /** rtl_cache_constructor()
803 rtl_cache_constructor (void * obj
)
805 rtl_cache_type
* cache
= (rtl_cache_type
*)(obj
);
807 memset (cache
, 0, sizeof(rtl_cache_type
));
810 QUEUE_START_NAMED(cache
, cache_
);
813 (void)RTL_MEMORY_LOCK_INIT(&(cache
->m_slab_lock
));
815 QUEUE_START_NAMED(&(cache
->m_free_head
), slab_
);
816 QUEUE_START_NAMED(&(cache
->m_used_head
), slab_
);
818 cache
->m_hash_table
= cache
->m_hash_table_0
;
819 cache
->m_hash_size
= RTL_CACHE_HASH_SIZE
;
820 cache
->m_hash_shift
= highbit(cache
->m_hash_size
) - 1;
823 (void)RTL_MEMORY_LOCK_INIT(&(cache
->m_depot_lock
));
828 /** rtl_cache_destructor()
831 rtl_cache_destructor (void * obj
)
833 rtl_cache_type
* cache
= (rtl_cache_type
*)(obj
);
836 OSL_ASSERT(QUEUE_STARTED_NAMED(cache
, cache_
));
839 (void)RTL_MEMORY_LOCK_DESTROY(&(cache
->m_slab_lock
));
841 OSL_ASSERT(QUEUE_STARTED_NAMED(&(cache
->m_free_head
), slab_
));
842 OSL_ASSERT(QUEUE_STARTED_NAMED(&(cache
->m_used_head
), slab_
));
844 OSL_ASSERT(cache
->m_hash_table
== cache
->m_hash_table_0
);
845 OSL_ASSERT(cache
->m_hash_size
== RTL_CACHE_HASH_SIZE
);
846 OSL_ASSERT(cache
->m_hash_shift
== (sal_Size
)(highbit(cache
->m_hash_size
) - 1));
849 (void)RTL_MEMORY_LOCK_DESTROY(&(cache
->m_depot_lock
));
852 /* ================================================================= */
854 /** rtl_cache_activate()
856 static rtl_cache_type
*
858 rtl_cache_type
* cache
,
862 int (SAL_CALL
* constructor
)(void * obj
, void * userarg
),
863 void (SAL_CALL
* destructor
) (void * obj
, void * userarg
),
864 void (SAL_CALL
* reclaim
) (void * userarg
),
866 rtl_arena_type
* source
,
870 OSL_ASSERT(cache
!= 0);
875 snprintf (cache
->m_name
, sizeof(cache
->m_name
), "%s", name
);
877 /* ensure minimum size (embedded bufctl linkage) */
878 objsize
= SAL_MAX(objsize
, sizeof(rtl_cache_bufctl_type
*));
882 /* determine default alignment */
883 if (objsize
>= RTL_MEMORY_ALIGNMENT_8
)
884 objalign
= RTL_MEMORY_ALIGNMENT_8
;
886 objalign
= RTL_MEMORY_ALIGNMENT_4
;
890 /* ensure minimum alignment */
891 objalign
= SAL_MAX(objalign
, RTL_MEMORY_ALIGNMENT_4
);
893 OSL_ASSERT(RTL_MEMORY_ISP2(objalign
));
895 cache
->m_type_size
= objsize
= RTL_MEMORY_P2ROUNDUP(objsize
, objalign
);
896 cache
->m_type_align
= objalign
;
897 cache
->m_type_shift
= highbit(cache
->m_type_size
) - 1;
899 cache
->m_constructor
= constructor
;
900 cache
->m_destructor
= destructor
;
901 cache
->m_reclaim
= reclaim
;
902 cache
->m_userarg
= userarg
;
905 cache
->m_source
= source
;
907 slabsize
= source
->m_quantum
; /* minimum slab size */
908 if (flags
& RTL_CACHE_FLAG_QUANTUMCACHE
)
910 /* next power of 2 above 3 * qcache_max */
911 slabsize
= SAL_MAX(slabsize
, (1UL << highbit(3 * source
->m_qcache_max
)));
915 /* waste at most 1/8 of slab */
916 slabsize
= SAL_MAX(slabsize
, cache
->m_type_size
* 8);
919 slabsize
= RTL_MEMORY_P2ROUNDUP(slabsize
, source
->m_quantum
);
920 if (!RTL_MEMORY_ISP2(slabsize
))
921 slabsize
= 1UL << highbit(slabsize
);
922 cache
->m_slab_size
= slabsize
;
924 if (cache
->m_slab_size
> source
->m_quantum
)
926 OSL_ASSERT(gp_cache_slab_cache
!= 0);
927 OSL_ASSERT(gp_cache_bufctl_cache
!= 0);
929 cache
->m_features
|= RTL_CACHE_FEATURE_HASH
;
930 cache
->m_ntypes
= cache
->m_slab_size
/ cache
->m_type_size
;
931 cache
->m_ncolor_max
= cache
->m_slab_size
% cache
->m_type_size
;
935 /* embedded slab struct */
936 cache
->m_ntypes
= (cache
->m_slab_size
- sizeof(rtl_cache_slab_type
)) / cache
->m_type_size
;
937 cache
->m_ncolor_max
= (cache
->m_slab_size
- sizeof(rtl_cache_slab_type
)) % cache
->m_type_size
;
940 OSL_ASSERT(cache
->m_ntypes
> 0);
943 if (flags
& RTL_CACHE_FLAG_BULKDESTROY
)
945 /* allow bulk slab delete upon cache deactivation */
946 cache
->m_features
|= RTL_CACHE_FEATURE_BULKDESTROY
;
950 if (!(flags
& RTL_CACHE_FLAG_NOMAGAZINE
))
952 OSL_ASSERT(gp_cache_magazine_cache
!= 0);
953 cache
->m_magazine_cache
= gp_cache_magazine_cache
;
956 /* insert into cache list */
957 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
958 QUEUE_INSERT_TAIL_NAMED(&(g_cache_list
.m_cache_head
), cache
, cache_
);
959 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
964 /** rtl_cache_deactivate()
967 rtl_cache_deactivate (
968 rtl_cache_type
* cache
971 /* remove from cache list */
972 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
973 QUEUE_REMOVE_NAMED(cache
, cache_
);
974 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
976 /* cleanup magazine layer */
977 if (cache
->m_magazine_cache
!= 0)
979 rtl_cache_type
* mag_cache
;
980 rtl_cache_magazine_type
* mag
;
982 /* prevent recursion */
983 mag_cache
= cache
->m_magazine_cache
, cache
->m_magazine_cache
= 0;
985 /* cleanup cpu layer */
986 if ((mag
= cache
->m_cpu_curr
) != 0)
988 cache
->m_cpu_curr
= 0;
989 rtl_cache_magazine_clear (cache
, mag
);
990 rtl_cache_free (mag_cache
, mag
);
992 if ((mag
= cache
->m_cpu_prev
) != 0)
994 cache
->m_cpu_prev
= 0;
995 rtl_cache_magazine_clear (cache
, mag
);
996 rtl_cache_free (mag_cache
, mag
);
999 /* cleanup depot layer */
1000 while ((mag
= rtl_cache_depot_dequeue(&(cache
->m_depot_full
))) != 0)
1002 rtl_cache_magazine_clear (cache
, mag
);
1003 rtl_cache_free (mag_cache
, mag
);
1005 while ((mag
= rtl_cache_depot_dequeue(&(cache
->m_depot_empty
))) != 0)
1007 rtl_cache_magazine_clear (cache
, mag
);
1008 rtl_cache_free (mag_cache
, mag
);
1013 "rtl_cache_deactivate(\"%s\"): "
1014 "[slab]: allocs: %"PRIu64
", frees: %"PRIu64
"; total: %lu, used: %lu; "
1015 "[cpu]: allocs: %"PRIu64
", frees: %"PRIu64
"; "
1016 "[total]: allocs: %"PRIu64
", frees: %"PRIu64
"",
1018 cache
->m_slab_stats
.m_alloc
, cache
->m_slab_stats
.m_free
,
1019 cache
->m_slab_stats
.m_mem_total
, cache
->m_slab_stats
.m_mem_alloc
,
1020 cache
->m_cpu_stats
.m_alloc
, cache
->m_cpu_stats
.m_free
,
1021 cache
->m_slab_stats
.m_alloc
+ cache
->m_cpu_stats
.m_alloc
,
1022 cache
->m_slab_stats
.m_free
+ cache
->m_cpu_stats
.m_free
1025 /* cleanup slab layer */
1026 if (cache
->m_slab_stats
.m_alloc
> cache
->m_slab_stats
.m_free
)
1029 "rtl_cache_deactivate(\"%s\"): "
1030 "cleaning up %"PRIu64
" leaked buffer(s) [%lu bytes] [%lu total]",
1032 cache
->m_slab_stats
.m_alloc
- cache
->m_slab_stats
.m_free
,
1033 cache
->m_slab_stats
.m_mem_alloc
, cache
->m_slab_stats
.m_mem_total
1036 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
1038 /* cleanup bufctl(s) for leaking buffer(s) */
1039 sal_Size i
, n
= cache
->m_hash_size
;
1040 for (i
= 0; i
< n
; i
++)
1042 rtl_cache_bufctl_type
* bufctl
;
1043 while ((bufctl
= cache
->m_hash_table
[i
]) != 0)
1045 /* pop from hash table */
1046 cache
->m_hash_table
[i
] = bufctl
->m_next
, bufctl
->m_next
= 0;
1048 /* return to bufctl cache */
1049 rtl_cache_free (gp_cache_bufctl_cache
, bufctl
);
1054 /* force cleanup of remaining slabs */
1055 rtl_cache_slab_type
*head
, *slab
;
1057 head
= &(cache
->m_used_head
);
1058 for (slab
= head
->m_slab_next
; slab
!= head
; slab
= head
->m_slab_next
)
1060 /* remove from 'used' queue */
1061 QUEUE_REMOVE_NAMED(slab
, slab_
);
1064 cache
->m_slab_stats
.m_mem_total
-= cache
->m_slab_size
;
1067 rtl_cache_slab_destroy (cache
, slab
);
1070 head
= &(cache
->m_free_head
);
1071 for (slab
= head
->m_slab_next
; slab
!= head
; slab
= head
->m_slab_next
)
1073 /* remove from 'free' queue */
1074 QUEUE_REMOVE_NAMED(slab
, slab_
);
1077 cache
->m_slab_stats
.m_mem_total
-= cache
->m_slab_size
;
1080 rtl_cache_slab_destroy (cache
, slab
);
1085 if (cache
->m_hash_table
!= cache
->m_hash_table_0
)
1089 cache
->m_hash_table
,
1090 cache
->m_hash_size
* sizeof(rtl_cache_bufctl_type
*));
1092 cache
->m_hash_table
= cache
->m_hash_table_0
;
1093 cache
->m_hash_size
= RTL_CACHE_HASH_SIZE
;
1094 cache
->m_hash_shift
= highbit(cache
->m_hash_size
) - 1;
1098 /* ================================================================= *
1100 * cache implementation.
1102 * ================================================================= */
1104 /** rtl_cache_create()
1107 SAL_CALL
rtl_cache_create (
1111 int (SAL_CALL
* constructor
)(void * obj
, void * userarg
),
1112 void (SAL_CALL
* destructor
) (void * obj
, void * userarg
),
1113 void (SAL_CALL
* reclaim
) (void * userarg
),
1115 rtl_arena_type
* source
,
1117 ) SAL_THROW_EXTERN_C()
1119 rtl_cache_type
* result
= 0;
1120 sal_Size size
= sizeof(rtl_cache_type
);
1123 result
= (rtl_cache_type
*)rtl_arena_alloc (gp_cache_arena
, &size
);
1126 rtl_cache_type
* cache
= result
;
1127 (void) rtl_cache_constructor (cache
);
1131 /* use default arena */
1132 OSL_ASSERT(gp_default_arena
!= 0);
1133 source
= gp_default_arena
;
1136 result
= rtl_cache_activate (
1151 /* activation failed */
1152 rtl_cache_deactivate (cache
);
1153 rtl_cache_destructor (cache
);
1154 rtl_arena_free (gp_cache_arena
, cache
, size
);
1157 else if (gp_cache_arena
== 0)
1159 if (rtl_cache_init())
1168 /** rtl_cache_destroy()
1170 void SAL_CALL
rtl_cache_destroy (
1171 rtl_cache_type
* cache
1172 ) SAL_THROW_EXTERN_C()
1176 rtl_cache_deactivate (cache
);
1177 rtl_cache_destructor (cache
);
1178 rtl_arena_free (gp_cache_arena
, cache
, sizeof(rtl_cache_type
));
1182 /** rtl_cache_alloc()
1185 SAL_CALL
rtl_cache_alloc (
1186 rtl_cache_type
* cache
1187 ) SAL_THROW_EXTERN_C()
1194 if (cache
->m_cpu_curr
!= 0)
1196 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1200 /* take object from magazine layer */
1201 rtl_cache_magazine_type
*curr
, *prev
, *temp
;
1203 curr
= cache
->m_cpu_curr
;
1204 if ((curr
!= 0) && (curr
->m_mag_used
> 0))
1206 obj
= curr
->m_objects
[--curr
->m_mag_used
];
1207 cache
->m_cpu_stats
.m_alloc
+= 1;
1208 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1213 prev
= cache
->m_cpu_prev
;
1214 if ((prev
!= 0) && (prev
->m_mag_used
> 0))
1216 temp
= cache
->m_cpu_curr
;
1217 cache
->m_cpu_curr
= cache
->m_cpu_prev
;
1218 cache
->m_cpu_prev
= temp
;
1223 temp
= rtl_cache_depot_exchange_alloc (cache
, prev
);
1226 cache
->m_cpu_prev
= cache
->m_cpu_curr
;
1227 cache
->m_cpu_curr
= temp
;
1232 /* no full magazine: fall through to slab layer */
1236 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1239 /* alloc buffer from slab layer */
1240 obj
= rtl_cache_slab_alloc (cache
);
1241 if ((obj
!= 0) && (cache
->m_constructor
!= 0))
1243 /* construct object */
1244 if (!((cache
->m_constructor
)(obj
, cache
->m_userarg
)))
1246 /* construction failure */
1247 rtl_cache_slab_free (cache
, obj
), obj
= 0;
1254 /** rtl_cache_free()
1257 SAL_CALL
rtl_cache_free (
1258 rtl_cache_type
* cache
,
1260 ) SAL_THROW_EXTERN_C()
1262 if ((obj
!= 0) && (cache
!= 0))
1264 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1268 /* return object to magazine layer */
1269 rtl_cache_magazine_type
*curr
, *prev
, *temp
;
1271 curr
= cache
->m_cpu_curr
;
1272 if ((curr
!= 0) && (curr
->m_mag_used
< curr
->m_mag_size
))
1275 for (i
= 0; i
< curr
->m_mag_used
; ++i
)
1277 OSL_ENSURE(curr
->m_objects
[i
] != obj
, "DOUBLE FREE!");
1278 if (curr
->m_objects
[i
] == obj
)
1280 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1285 curr
->m_objects
[curr
->m_mag_used
++] = obj
;
1286 cache
->m_cpu_stats
.m_free
+= 1;
1287 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1292 prev
= cache
->m_cpu_prev
;
1293 if ((prev
!= 0) && (prev
->m_mag_used
== 0))
1295 temp
= cache
->m_cpu_curr
;
1296 cache
->m_cpu_curr
= cache
->m_cpu_prev
;
1297 cache
->m_cpu_prev
= temp
;
1302 temp
= rtl_cache_depot_exchange_free (cache
, prev
);
1305 cache
->m_cpu_prev
= cache
->m_cpu_curr
;
1306 cache
->m_cpu_curr
= temp
;
1311 if (rtl_cache_depot_populate(cache
) != 0)
1316 /* no empty magazine: fall through to slab layer */
1320 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1322 /* no space for constructed object in magazine layer */
1323 if (cache
->m_destructor
!= 0)
1325 /* destruct object */
1326 (cache
->m_destructor
)(obj
, cache
->m_userarg
);
1329 /* return buffer to slab layer */
1330 rtl_cache_slab_free (cache
, obj
);
1334 /* ================================================================= *
1336 * cache wsupdate (machdep) internals.
1338 * ================================================================= */
1340 /** rtl_cache_wsupdate_init()
1342 * @precond g_cache_list.m_lock initialized
1345 rtl_cache_wsupdate_init (void);
1348 /** rtl_cache_wsupdate_wait()
1350 * @precond g_cache_list.m_lock acquired
1353 rtl_cache_wsupdate_wait (
1354 unsigned int seconds
1357 /** rtl_cache_wsupdate_fini()
1361 rtl_cache_wsupdate_fini (void);
1363 /* ================================================================= */
1365 #if defined(SAL_UNX) || defined(SAL_OS2)
1367 #include <sys/time.h>
1370 rtl_cache_wsupdate_all (void * arg
);
1373 rtl_cache_wsupdate_init (void)
1375 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1376 g_cache_list
.m_update_done
= 0;
1377 (void) pthread_cond_init (&(g_cache_list
.m_update_cond
), NULL
);
1378 if (pthread_create (
1379 &(g_cache_list
.m_update_thread
), NULL
, rtl_cache_wsupdate_all
, (void*)(10)) != 0)
1382 g_cache_list
.m_update_thread
= (pthread_t
)(0);
1384 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1388 rtl_cache_wsupdate_wait (unsigned int seconds
)
1393 struct timespec wakeup
;
1395 gettimeofday(&now
, 0);
1396 wakeup
.tv_sec
= now
.tv_sec
+ (seconds
);
1397 wakeup
.tv_nsec
= now
.tv_usec
* 1000;
1399 (void) pthread_cond_timedwait (
1400 &(g_cache_list
.m_update_cond
),
1401 &(g_cache_list
.m_lock
),
1407 rtl_cache_wsupdate_fini (void)
1409 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1410 g_cache_list
.m_update_done
= 1;
1411 pthread_cond_signal (&(g_cache_list
.m_update_cond
));
1412 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1414 if (g_cache_list
.m_update_thread
!= (pthread_t
)(0))
1415 pthread_join (g_cache_list
.m_update_thread
, NULL
);
1418 /* ================================================================= */
1420 #elif defined(SAL_W32)
1423 rtl_cache_wsupdate_all (void * arg
);
1426 rtl_cache_wsupdate_init (void)
1430 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1431 g_cache_list
.m_update_done
= 0;
1432 g_cache_list
.m_update_cond
= CreateEvent (0, TRUE
, FALSE
, 0);
1434 g_cache_list
.m_update_thread
=
1435 CreateThread (NULL
, 0, rtl_cache_wsupdate_all
, (LPVOID
)(10), 0, &dwThreadId
);
1436 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1440 rtl_cache_wsupdate_wait (unsigned int seconds
)
1444 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1445 WaitForSingleObject (g_cache_list
.m_update_cond
, (DWORD
)(seconds
* 1000));
1446 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1451 rtl_cache_wsupdate_fini (void)
1453 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1454 g_cache_list
.m_update_done
= 1;
1455 SetEvent (g_cache_list
.m_update_cond
);
1456 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1458 WaitForSingleObject (g_cache_list
.m_update_thread
, INFINITE
);
1461 #endif /* SAL_UNX || SAL_W32 */
1463 /* ================================================================= */
1465 /** rtl_cache_depot_wsupdate()
1466 * update depot stats and purge excess magazines.
1468 * @precond cache->m_depot_lock acquired
1471 rtl_cache_depot_wsupdate (
1472 rtl_cache_type
* cache
,
1473 rtl_cache_depot_type
* depot
1478 depot
->m_prev_min
= depot
->m_curr_min
;
1479 depot
->m_curr_min
= depot
->m_mag_count
;
1481 npurge
= SAL_MIN(depot
->m_curr_min
, depot
->m_prev_min
);
1482 for (; npurge
> 0; npurge
--)
1484 rtl_cache_magazine_type
* mag
= rtl_cache_depot_dequeue (depot
);
1487 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1488 rtl_cache_magazine_clear (cache
, mag
);
1489 rtl_cache_free (cache
->m_magazine_cache
, mag
);
1490 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1495 /** rtl_cache_wsupdate()
1497 * @precond cache->m_depot_lock released
1500 rtl_cache_wsupdate (
1501 rtl_cache_type
* cache
1504 if (cache
->m_magazine_cache
!= 0)
1506 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1509 "rtl_cache_wsupdate(\"%s\") "
1510 "[depot: count, curr_min, prev_min] "
1511 "full: %lu, %lu, %lu; empty: %lu, %lu, %lu",
1513 cache
->m_depot_full
.m_mag_count
,
1514 cache
->m_depot_full
.m_curr_min
,
1515 cache
->m_depot_full
.m_prev_min
,
1516 cache
->m_depot_empty
.m_mag_count
,
1517 cache
->m_depot_empty
.m_curr_min
,
1518 cache
->m_depot_empty
.m_prev_min
1521 rtl_cache_depot_wsupdate (cache
, &(cache
->m_depot_full
));
1522 rtl_cache_depot_wsupdate (cache
, &(cache
->m_depot_empty
));
1524 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1528 /** rtl_cache_wsupdate_all()
1531 #if defined(SAL_UNX) || defined(SAL_OS2)
1533 #elif defined(SAL_W32)
1535 #endif /* SAL_UNX || SAL_W32 */
1536 rtl_cache_wsupdate_all (void * arg
)
1538 unsigned int seconds
= (unsigned int)SAL_INT_CAST(sal_uIntPtr
, arg
);
1540 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1541 while (!g_cache_list
.m_update_done
)
1543 rtl_cache_wsupdate_wait (seconds
);
1544 if (!g_cache_list
.m_update_done
)
1546 rtl_cache_type
* head
, * cache
;
1548 head
= &(g_cache_list
.m_cache_head
);
1549 for (cache
= head
->m_cache_next
;
1551 cache
= cache
->m_cache_next
)
1553 rtl_cache_wsupdate (cache
);
1557 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1562 /* ================================================================= *
1564 * cache initialization.
1566 * ================================================================= */
1569 rtl_cache_once_init (void)
1572 /* list of caches */
1573 RTL_MEMORY_LOCK_INIT(&(g_cache_list
.m_lock
));
1574 (void) rtl_cache_constructor (&(g_cache_list
.m_cache_head
));
1577 /* cache: internal arena */
1578 OSL_ASSERT(gp_cache_arena
== 0);
1580 gp_cache_arena
= rtl_arena_create (
1581 "rtl_cache_internal_arena",
1583 0, /* no quantum caching */
1584 NULL
, /* default source */
1589 OSL_ASSERT(gp_cache_arena
!= 0);
1591 /* check 'gp_default_arena' initialization */
1592 OSL_ASSERT(gp_default_arena
!= 0);
1595 /* cache: magazine cache */
1596 static rtl_cache_type g_cache_magazine_cache
;
1598 OSL_ASSERT(gp_cache_magazine_cache
== 0);
1599 (void) rtl_cache_constructor (&g_cache_magazine_cache
);
1601 gp_cache_magazine_cache
= rtl_cache_activate (
1602 &g_cache_magazine_cache
,
1603 "rtl_cache_magazine_cache",
1604 sizeof(rtl_cache_magazine_type
), /* objsize */
1606 rtl_cache_magazine_constructor
,
1607 rtl_cache_magazine_destructor
,
1609 0, /* userarg: NYI */
1610 gp_default_arena
, /* source */
1611 RTL_CACHE_FLAG_NOMAGAZINE
/* during bootstrap; activated below */
1613 OSL_ASSERT(gp_cache_magazine_cache
!= 0);
1615 /* activate magazine layer */
1616 g_cache_magazine_cache
.m_magazine_cache
= gp_cache_magazine_cache
;
1619 /* cache: slab (struct) cache */
1620 static rtl_cache_type g_cache_slab_cache
;
1622 OSL_ASSERT(gp_cache_slab_cache
== 0);
1623 (void) rtl_cache_constructor (&g_cache_slab_cache
);
1625 gp_cache_slab_cache
= rtl_cache_activate (
1626 &g_cache_slab_cache
,
1627 "rtl_cache_slab_cache",
1628 sizeof(rtl_cache_slab_type
), /* objsize */
1630 rtl_cache_slab_constructor
,
1631 rtl_cache_slab_destructor
,
1633 0, /* userarg: none */
1634 gp_default_arena
, /* source */
1637 OSL_ASSERT(gp_cache_slab_cache
!= 0);
1640 /* cache: bufctl cache */
1641 static rtl_cache_type g_cache_bufctl_cache
;
1643 OSL_ASSERT(gp_cache_bufctl_cache
== 0);
1644 (void) rtl_cache_constructor (&g_cache_bufctl_cache
);
1646 gp_cache_bufctl_cache
= rtl_cache_activate (
1647 &g_cache_bufctl_cache
,
1648 "rtl_cache_bufctl_cache",
1649 sizeof(rtl_cache_bufctl_type
), /* objsize */
1651 0, /* constructor */
1655 gp_default_arena
, /* source */
1658 OSL_ASSERT(gp_cache_bufctl_cache
!= 0);
1661 rtl_cache_wsupdate_init();
1665 rtl_cache_init (void)
1667 static sal_once_type g_once
= SAL_ONCE_INIT
;
1668 SAL_ONCE(&g_once
, rtl_cache_once_init
);
1669 return (gp_cache_arena
!= 0);
1672 /* ================================================================= */
1674 #if defined(__GNUC__)
1675 static void rtl_cache_fini (void) __attribute__((destructor
));
1676 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
1677 #pragma fini(rtl_cache_fini)
1678 static void rtl_cache_fini (void);
1679 #endif /* __GNUC__ || __SUNPRO_C */
1682 rtl_cache_fini (void)
1684 if (gp_cache_arena
!= 0)
1686 rtl_cache_type
* cache
, * head
;
1688 rtl_cache_wsupdate_fini();
1690 if (gp_cache_bufctl_cache
!= 0)
1692 cache
= gp_cache_bufctl_cache
, gp_cache_bufctl_cache
= 0;
1693 rtl_cache_deactivate (cache
);
1694 rtl_cache_destructor (cache
);
1696 if (gp_cache_slab_cache
!= 0)
1698 cache
= gp_cache_slab_cache
, gp_cache_slab_cache
= 0;
1699 rtl_cache_deactivate (cache
);
1700 rtl_cache_destructor (cache
);
1702 if (gp_cache_magazine_cache
!= 0)
1704 cache
= gp_cache_magazine_cache
, gp_cache_magazine_cache
= 0;
1705 rtl_cache_deactivate (cache
);
1706 rtl_cache_destructor (cache
);
1708 if (gp_cache_arena
!= 0)
1710 rtl_arena_destroy (gp_cache_arena
);
1714 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1715 head
= &(g_cache_list
.m_cache_head
);
1716 for (cache
= head
->m_cache_next
; cache
!= head
; cache
= cache
->m_cache_next
)
1719 "rtl_cache_fini(\"%s\") "
1720 "[slab]: allocs: %"PRIu64
", frees: %"PRIu64
"; total: %lu, used: %lu; "
1721 "[cpu]: allocs: %"PRIu64
", frees: %"PRIu64
"; "
1722 "[total]: allocs: %"PRIu64
", frees: %"PRIu64
"",
1724 cache
->m_slab_stats
.m_alloc
, cache
->m_slab_stats
.m_free
,
1725 cache
->m_slab_stats
.m_mem_total
, cache
->m_slab_stats
.m_mem_alloc
,
1726 cache
->m_cpu_stats
.m_alloc
, cache
->m_cpu_stats
.m_free
,
1727 cache
->m_slab_stats
.m_alloc
+ cache
->m_cpu_stats
.m_alloc
,
1728 cache
->m_slab_stats
.m_free
+ cache
->m_cpu_stats
.m_free
1731 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1735 /* ================================================================= */