1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
3 * This file is part of the LibreOffice project.
5 * This Source Code Form is subject to the terms of the Mozilla Public
6 * License, v. 2.0. If a copy of the MPL was not distributed with this
7 * file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 * This file incorporates work covered by the following license notice:
11 * Licensed to the Apache Software Foundation (ASF) under one or more
12 * contributor license agreements. See the NOTICE file distributed
13 * with this work for additional information regarding copyright
14 * ownership. The ASF licenses this file to you under the Apache
15 * License, Version 2.0 (the "License"); you may not use this file
16 * except in compliance with the License. You may obtain a copy of
17 * the License at http://www.apache.org/licenses/LICENSE-2.0 .
20 #include "alloc_cache.hxx"
21 #include "alloc_impl.hxx"
22 #include "alloc_arena.hxx"
23 #include "internal/rtllifecycle.h"
24 #include "sal/macros.h"
25 #include "osl/diagnose.h"
31 extern AllocMode alloc_mode
;
33 /* ================================================================= *
37 * ================================================================= */
42 struct rtl_cache_list_st
44 rtl_memory_lock_type m_lock
;
45 rtl_cache_type m_cache_head
;
48 pthread_t m_update_thread
;
49 pthread_cond_t m_update_cond
;
50 #elif defined(SAL_W32)
51 HANDLE m_update_thread
;
53 #endif /* SAL_UNX || SAL_W32 */
57 static rtl_cache_list_st g_cache_list
;
61 * provided for cache_type allocations, and hash_table resizing.
65 static rtl_arena_type
* gp_cache_arena
= 0;
68 /** gp_cache_magazine_cache
71 static rtl_cache_type
* gp_cache_magazine_cache
= 0;
74 /** gp_cache_slab_cache
77 static rtl_cache_type
* gp_cache_slab_cache
= 0;
80 /** gp_cache_bufctl_cache
83 static rtl_cache_type
* gp_cache_bufctl_cache
= 0;
86 /* ================================================================= */
88 /** RTL_CACHE_HASH_INDEX()
90 #define RTL_CACHE_HASH_INDEX_IMPL(a, s, q, m) \
91 ((((a) + ((a) >> (s)) + ((a) >> ((s) << 1))) >> (q)) & (m))
93 #define RTL_CACHE_HASH_INDEX(cache, addr) \
94 RTL_CACHE_HASH_INDEX_IMPL((addr), (cache)->m_hash_shift, (cache)->m_type_shift, ((cache)->m_hash_size - 1))
100 rtl_cache_hash_rescale (
101 rtl_cache_type
* cache
,
105 rtl_cache_bufctl_type
** new_table
;
108 new_bytes
= new_size
* sizeof(rtl_cache_bufctl_type
*);
109 new_table
= (rtl_cache_bufctl_type
**)rtl_arena_alloc(gp_cache_arena
, &new_bytes
);
113 rtl_cache_bufctl_type
** old_table
;
114 sal_Size old_size
, i
;
116 memset (new_table
, 0, new_bytes
);
118 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
120 old_table
= cache
->m_hash_table
;
121 old_size
= cache
->m_hash_size
;
125 // "rtl_cache_hash_rescale(" << cache->m_name << "): nbuf: "
126 // << (cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free)
128 // << ((cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free)
129 // >> cache->m_hash_shift)
130 // << "), frees: " << cache->m_slab_stats.m_free << " [old_size: "
131 // << old_size << ", new_size: " << new_size << ']');
133 cache
->m_hash_table
= new_table
;
134 cache
->m_hash_size
= new_size
;
135 cache
->m_hash_shift
= highbit(cache
->m_hash_size
) - 1;
137 for (i
= 0; i
< old_size
; i
++)
139 rtl_cache_bufctl_type
* curr
= old_table
[i
];
142 rtl_cache_bufctl_type
* next
= curr
->m_next
;
143 rtl_cache_bufctl_type
** head
;
145 head
= &(cache
->m_hash_table
[RTL_CACHE_HASH_INDEX(cache
, curr
->m_addr
)]);
146 curr
->m_next
= (*head
);
154 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
156 if (old_table
!= cache
->m_hash_table_0
)
158 sal_Size old_bytes
= old_size
* sizeof(rtl_cache_bufctl_type
*);
159 rtl_arena_free (gp_cache_arena
, old_table
, old_bytes
);
165 rtl_cache_hash_insert (
166 rtl_cache_type
* cache
,
167 rtl_cache_bufctl_type
* bufctl
170 rtl_cache_bufctl_type
** ppHead
;
172 ppHead
= &(cache
->m_hash_table
[RTL_CACHE_HASH_INDEX(cache
, bufctl
->m_addr
)]);
174 bufctl
->m_next
= (*ppHead
);
177 return (bufctl
->m_addr
);
180 /** rtl_cache_hash_remove()
182 rtl_cache_bufctl_type
*
183 rtl_cache_hash_remove (
184 rtl_cache_type
* cache
,
188 rtl_cache_bufctl_type
** ppHead
;
189 rtl_cache_bufctl_type
* bufctl
;
190 sal_Size lookups
= 0;
192 ppHead
= &(cache
->m_hash_table
[RTL_CACHE_HASH_INDEX(cache
, addr
)]);
193 while ((bufctl
= *ppHead
) != 0)
195 if (bufctl
->m_addr
== addr
)
197 *ppHead
= bufctl
->m_next
, bufctl
->m_next
= 0;
202 ppHead
= &(bufctl
->m_next
);
205 assert(bufctl
!= 0); // bad free
209 sal_Size nbuf
= (sal_Size
)(cache
->m_slab_stats
.m_alloc
- cache
->m_slab_stats
.m_free
);
210 if (nbuf
> 4 * cache
->m_hash_size
)
212 if (!(cache
->m_features
& RTL_CACHE_FEATURE_RESCALE
))
214 sal_Size ave
= nbuf
>> cache
->m_hash_shift
;
215 sal_Size new_size
= cache
->m_hash_size
<< (highbit(ave
) - 1);
217 cache
->m_features
|= RTL_CACHE_FEATURE_RESCALE
;
218 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
219 rtl_cache_hash_rescale (cache
, new_size
);
220 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
221 cache
->m_features
&= ~RTL_CACHE_FEATURE_RESCALE
;
229 /* ================================================================= */
233 #define RTL_CACHE_SLAB(addr, size) \
234 (((rtl_cache_slab_type*)(RTL_MEMORY_P2END((sal_uIntPtr)(addr), (size)))) - 1)
237 /** rtl_cache_slab_constructor()
240 rtl_cache_slab_constructor (void * obj
, SAL_UNUSED_PARAMETER
void *)
242 rtl_cache_slab_type
* slab
= (rtl_cache_slab_type
*)(obj
);
244 QUEUE_START_NAMED(slab
, slab_
);
251 /** rtl_cache_slab_destructor()
254 rtl_cache_slab_destructor (void * obj
, SAL_UNUSED_PARAMETER
void *)
256 rtl_cache_slab_type
* slab
= static_cast< rtl_cache_slab_type
* >(obj
);
257 assert(QUEUE_STARTED_NAMED(slab
, slab_
)); // assure removed from queue(s)
258 assert(slab
->m_ntypes
== 0); // assure no longer referenced
259 (void) slab
; // avoid warnings
263 /** rtl_cache_slab_create()
265 * @precond cache->m_slab_lock released.
267 rtl_cache_slab_type
*
268 rtl_cache_slab_create (
269 rtl_cache_type
* cache
272 rtl_cache_slab_type
* slab
= 0;
276 size
= cache
->m_slab_size
;
277 addr
= rtl_arena_alloc (cache
->m_source
, &size
);
280 assert(size
>= cache
->m_slab_size
);
282 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
284 /* allocate slab struct from slab cache */
285 assert(cache
!= gp_cache_slab_cache
);
286 slab
= (rtl_cache_slab_type
*)rtl_cache_alloc (gp_cache_slab_cache
);
290 /* construct embedded slab struct */
291 slab
= RTL_CACHE_SLAB(addr
, cache
->m_slab_size
);
292 (void) rtl_cache_slab_constructor (slab
, 0);
296 slab
->m_data
= (sal_uIntPtr
)(addr
);
298 /* dynamic freelist initialization */
299 slab
->m_bp
= slab
->m_data
;
304 rtl_arena_free (cache
->m_source
, addr
, size
);
311 /** rtl_cache_slab_destroy()
313 * @precond cache->m_slab_lock released.
316 rtl_cache_slab_destroy (
317 rtl_cache_type
* cache
,
318 rtl_cache_slab_type
* slab
321 void * addr
= (void*)(slab
->m_data
);
322 sal_Size refcnt
= slab
->m_ntypes
; slab
->m_ntypes
= 0;
324 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
326 /* cleanup bufctl(s) for free buffer(s) */
327 sal_Size ntypes
= (slab
->m_bp
- slab
->m_data
) / cache
->m_type_size
;
328 for (ntypes
-= refcnt
; slab
->m_sp
!= 0; ntypes
--)
330 rtl_cache_bufctl_type
* bufctl
= slab
->m_sp
;
332 /* pop from freelist */
333 slab
->m_sp
= bufctl
->m_next
, bufctl
->m_next
= 0;
335 /* return bufctl struct to bufctl cache */
336 rtl_cache_free (gp_cache_bufctl_cache
, bufctl
);
340 /* return slab struct to slab cache */
341 rtl_cache_free (gp_cache_slab_cache
, slab
);
345 /* destruct embedded slab struct */
346 rtl_cache_slab_destructor (slab
, 0);
349 if ((refcnt
== 0) || (cache
->m_features
& RTL_CACHE_FEATURE_BULKDESTROY
))
352 rtl_arena_free (cache
->m_source
, addr
, cache
->m_slab_size
);
357 /** rtl_cache_slab_populate()
359 * @precond cache->m_slab_lock acquired.
362 rtl_cache_slab_populate (
363 rtl_cache_type
* cache
366 rtl_cache_slab_type
* slab
;
368 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
369 slab
= rtl_cache_slab_create (cache
);
370 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
373 /* update buffer start addr w/ current color */
374 slab
->m_bp
+= cache
->m_ncolor
;
376 /* update color for next slab */
377 cache
->m_ncolor
+= cache
->m_type_align
;
378 if (cache
->m_ncolor
> cache
->m_ncolor_max
)
382 cache
->m_slab_stats
.m_mem_total
+= cache
->m_slab_size
;
384 /* insert onto 'free' queue */
385 QUEUE_INSERT_HEAD_NAMED(&(cache
->m_free_head
), slab
, slab_
);
390 /* ================================================================= */
392 /** rtl_cache_slab_alloc()
394 * Allocate a buffer from slab layer; used by magazine layer.
397 rtl_cache_slab_alloc (
398 rtl_cache_type
* cache
402 rtl_cache_slab_type
* head
;
404 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
406 head
= &(cache
->m_free_head
);
407 if ((head
->m_slab_next
!= head
) || rtl_cache_slab_populate (cache
))
409 rtl_cache_slab_type
* slab
;
410 rtl_cache_bufctl_type
* bufctl
;
412 slab
= head
->m_slab_next
;
413 assert(slab
->m_ntypes
< cache
->m_ntypes
);
417 /* initialize bufctl w/ current 'slab->m_bp' */
418 assert(slab
->m_bp
< slab
->m_data
+ cache
->m_ntypes
* cache
->m_type_size
+ cache
->m_ncolor_max
);
419 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
421 /* allocate bufctl */
422 assert(cache
!= gp_cache_bufctl_cache
);
423 bufctl
= (rtl_cache_bufctl_type
*)rtl_cache_alloc (gp_cache_bufctl_cache
);
427 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
431 bufctl
->m_addr
= slab
->m_bp
;
432 bufctl
->m_slab
= (sal_uIntPtr
)(slab
);
436 /* embedded bufctl */
437 bufctl
= (rtl_cache_bufctl_type
*)(slab
->m_bp
);
441 /* update 'slab->m_bp' to next free buffer */
442 slab
->m_bp
+= cache
->m_type_size
;
444 /* assign bufctl to freelist */
450 slab
->m_sp
= bufctl
->m_next
;
452 /* increment usage, check for full slab */
453 if ((slab
->m_ntypes
+= 1) == cache
->m_ntypes
)
455 /* remove from 'free' queue */
456 QUEUE_REMOVE_NAMED(slab
, slab_
);
458 /* insert onto 'used' queue (tail) */
459 QUEUE_INSERT_TAIL_NAMED(&(cache
->m_used_head
), slab
, slab_
);
463 cache
->m_slab_stats
.m_alloc
+= 1;
464 cache
->m_slab_stats
.m_mem_alloc
+= cache
->m_type_size
;
466 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
467 addr
= (void*)rtl_cache_hash_insert (cache
, bufctl
);
472 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
477 /** rtl_cache_slab_free()
479 * Return a buffer to slab layer; used by magazine layer.
482 rtl_cache_slab_free (
483 rtl_cache_type
* cache
,
487 rtl_cache_bufctl_type
* bufctl
;
488 rtl_cache_slab_type
* slab
;
490 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_slab_lock
));
492 /* determine slab from addr */
493 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
495 bufctl
= rtl_cache_hash_remove (cache
, (sal_uIntPtr
)(addr
));
496 slab
= (bufctl
!= 0) ? (rtl_cache_slab_type
*)(bufctl
->m_slab
) : 0;
500 /* embedded slab struct */
501 bufctl
= (rtl_cache_bufctl_type
*)(addr
);
502 slab
= RTL_CACHE_SLAB(addr
, cache
->m_slab_size
);
507 /* check for full slab */
508 if (slab
->m_ntypes
== cache
->m_ntypes
)
510 /* remove from 'used' queue */
511 QUEUE_REMOVE_NAMED(slab
, slab_
);
513 /* insert onto 'free' queue (head) */
514 QUEUE_INSERT_HEAD_NAMED(&(cache
->m_free_head
), slab
, slab_
);
518 bufctl
->m_next
= slab
->m_sp
;
522 cache
->m_slab_stats
.m_free
+= 1;
523 cache
->m_slab_stats
.m_mem_alloc
-= cache
->m_type_size
;
525 /* decrement usage, check for empty slab */
526 if ((slab
->m_ntypes
-= 1) == 0)
528 /* remove from 'free' queue */
529 QUEUE_REMOVE_NAMED(slab
, slab_
);
532 cache
->m_slab_stats
.m_mem_total
-= cache
->m_slab_size
;
534 /* free 'empty' slab */
535 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
536 rtl_cache_slab_destroy (cache
, slab
);
541 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_slab_lock
));
544 /* ================================================================= */
546 /** rtl_cache_magazine_constructor()
549 rtl_cache_magazine_constructor (void * obj
, SAL_UNUSED_PARAMETER
void *)
551 rtl_cache_magazine_type
* mag
= (rtl_cache_magazine_type
*)(obj
);
552 /* @@@ sal_Size size = (sal_Size)(arg); @@@ */
555 mag
->m_mag_size
= RTL_CACHE_MAGAZINE_SIZE
;
562 /** rtl_cache_magazine_destructor()
565 rtl_cache_magazine_destructor (void * obj
, SAL_UNUSED_PARAMETER
void *)
567 rtl_cache_magazine_type
* mag
= static_cast< rtl_cache_magazine_type
* >(
569 assert(mag
->m_mag_next
== 0); // assure removed from queue(s)
570 assert(mag
->m_mag_used
== 0); // assure no longer referenced
571 (void) mag
; // avoid warnings
575 /** rtl_cache_magazine_clear()
578 rtl_cache_magazine_clear (
579 rtl_cache_type
* cache
,
580 rtl_cache_magazine_type
* mag
583 for (; mag
->m_mag_used
> 0; --mag
->m_mag_used
)
585 void * obj
= mag
->m_objects
[mag
->m_mag_used
- 1];
586 mag
->m_objects
[mag
->m_mag_used
- 1] = 0;
588 if (cache
->m_destructor
!= 0)
590 /* destruct object */
591 (cache
->m_destructor
)(obj
, cache
->m_userarg
);
594 /* return buffer to slab layer */
595 rtl_cache_slab_free (cache
, obj
);
599 /* ================================================================= */
601 /** rtl_cache_depot_enqueue()
603 * @precond cache->m_depot_lock acquired.
606 rtl_cache_depot_enqueue (
607 rtl_cache_depot_type
* depot
,
608 rtl_cache_magazine_type
* mag
611 /* enqueue empty magazine */
612 mag
->m_mag_next
= depot
->m_mag_next
;
613 depot
->m_mag_next
= mag
;
615 /* update depot stats */
616 depot
->m_mag_count
++;
619 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
620 #pragma inline(rtl_cache_depot_enqueue)
621 #endif /* __SUNPRO_C */
624 /** rtl_cache_depot_dequeue()
626 * @precond cache->m_depot_lock acquired.
628 inline rtl_cache_magazine_type
*
629 rtl_cache_depot_dequeue (
630 rtl_cache_depot_type
* depot
633 rtl_cache_magazine_type
* mag
= 0;
634 if (depot
->m_mag_count
> 0)
636 /* dequeue magazine */
637 assert(depot
->m_mag_next
!= 0);
639 mag
= depot
->m_mag_next
;
640 depot
->m_mag_next
= mag
->m_mag_next
;
643 /* update depot stats */
644 depot
->m_mag_count
--;
645 if(depot
->m_curr_min
> depot
->m_mag_count
)
647 depot
->m_curr_min
= depot
->m_mag_count
;
653 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
654 #pragma inline(rtl_cache_depot_dequeue)
655 #endif /* __SUNPRO_C */
658 /** rtl_cache_depot_exchange_alloc()
660 * @precond cache->m_depot_lock acquired.
662 inline rtl_cache_magazine_type
*
663 rtl_cache_depot_exchange_alloc (
664 rtl_cache_type
* cache
,
665 rtl_cache_magazine_type
* empty
668 rtl_cache_magazine_type
* full
;
670 assert((empty
== 0) || (empty
->m_mag_used
== 0));
672 /* dequeue full magazine */
673 full
= rtl_cache_depot_dequeue (&(cache
->m_depot_full
));
674 if ((full
!= 0) && (empty
!= 0))
676 /* enqueue empty magazine */
677 rtl_cache_depot_enqueue (&(cache
->m_depot_empty
), empty
);
680 assert((full
== 0) || (full
->m_mag_used
> 0));
685 /** rtl_cache_depot_exchange_free()
687 * @precond cache->m_depot_lock acquired.
689 inline rtl_cache_magazine_type
*
690 rtl_cache_depot_exchange_free (
691 rtl_cache_type
* cache
,
692 rtl_cache_magazine_type
* full
695 rtl_cache_magazine_type
* empty
;
697 assert((full
== 0) || (full
->m_mag_used
> 0));
699 /* dequeue empty magazine */
700 empty
= rtl_cache_depot_dequeue (&(cache
->m_depot_empty
));
701 if ((empty
!= 0) && (full
!= 0))
703 /* enqueue full magazine */
704 rtl_cache_depot_enqueue (&(cache
->m_depot_full
), full
);
707 assert((empty
== 0) || (empty
->m_mag_used
== 0));
712 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
713 #pragma inline(rtl_cache_depot_exchange_free)
714 #endif /* __SUNPRO_C */
717 /** rtl_cache_depot_populate()
719 * @precond cache->m_depot_lock acquired.
722 rtl_cache_depot_populate (
723 rtl_cache_type
* cache
726 rtl_cache_magazine_type
* empty
= 0;
728 if (cache
->m_magazine_cache
!= 0)
730 /* allocate new empty magazine */
731 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
732 empty
= (rtl_cache_magazine_type
*)rtl_cache_alloc (cache
->m_magazine_cache
);
733 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
736 /* enqueue (new) empty magazine */
737 rtl_cache_depot_enqueue (&(cache
->m_depot_empty
), empty
);
743 /* ================================================================= */
745 /** rtl_cache_constructor()
748 rtl_cache_constructor (void * obj
)
750 rtl_cache_type
* cache
= (rtl_cache_type
*)(obj
);
752 memset (cache
, 0, sizeof(rtl_cache_type
));
755 QUEUE_START_NAMED(cache
, cache_
);
758 (void)RTL_MEMORY_LOCK_INIT(&(cache
->m_slab_lock
));
760 QUEUE_START_NAMED(&(cache
->m_free_head
), slab_
);
761 QUEUE_START_NAMED(&(cache
->m_used_head
), slab_
);
763 cache
->m_hash_table
= cache
->m_hash_table_0
;
764 cache
->m_hash_size
= RTL_CACHE_HASH_SIZE
;
765 cache
->m_hash_shift
= highbit(cache
->m_hash_size
) - 1;
768 (void)RTL_MEMORY_LOCK_INIT(&(cache
->m_depot_lock
));
773 /** rtl_cache_destructor()
776 rtl_cache_destructor (void * obj
)
778 rtl_cache_type
* cache
= (rtl_cache_type
*)(obj
);
781 assert(QUEUE_STARTED_NAMED(cache
, cache_
));
784 (void)RTL_MEMORY_LOCK_DESTROY(&(cache
->m_slab_lock
));
786 assert(QUEUE_STARTED_NAMED(&(cache
->m_free_head
), slab_
));
787 assert(QUEUE_STARTED_NAMED(&(cache
->m_used_head
), slab_
));
789 assert(cache
->m_hash_table
== cache
->m_hash_table_0
);
790 assert(cache
->m_hash_size
== RTL_CACHE_HASH_SIZE
);
791 assert(cache
->m_hash_shift
== (sal_Size
)(highbit(cache
->m_hash_size
) - 1));
794 (void)RTL_MEMORY_LOCK_DESTROY(&(cache
->m_depot_lock
));
797 /* ================================================================= */
799 /** rtl_cache_activate()
803 rtl_cache_type
* cache
,
807 int (SAL_CALL
* constructor
)(void * obj
, void * userarg
),
808 void (SAL_CALL
* destructor
) (void * obj
, void * userarg
),
809 void (SAL_CALL
* reclaim
) (void * userarg
),
811 rtl_arena_type
* source
,
820 snprintf (cache
->m_name
, sizeof(cache
->m_name
), "%s", name
);
822 /* ensure minimum size (embedded bufctl linkage) */
823 if(objsize
< sizeof(rtl_cache_bufctl_type
*))
825 objsize
= sizeof(rtl_cache_bufctl_type
*);
830 /* determine default alignment */
831 if (objsize
>= RTL_MEMORY_ALIGNMENT_8
)
832 objalign
= RTL_MEMORY_ALIGNMENT_8
;
834 objalign
= RTL_MEMORY_ALIGNMENT_4
;
838 /* ensure minimum alignment */
839 if(objalign
< RTL_MEMORY_ALIGNMENT_4
)
841 objalign
= RTL_MEMORY_ALIGNMENT_4
;
844 assert(RTL_MEMORY_ISP2(objalign
));
846 cache
->m_type_size
= objsize
= RTL_MEMORY_P2ROUNDUP(objsize
, objalign
);
847 cache
->m_type_align
= objalign
;
848 cache
->m_type_shift
= highbit(cache
->m_type_size
) - 1;
850 cache
->m_constructor
= constructor
;
851 cache
->m_destructor
= destructor
;
852 cache
->m_reclaim
= reclaim
;
853 cache
->m_userarg
= userarg
;
856 cache
->m_source
= source
;
858 slabsize
= source
->m_quantum
; /* minimum slab size */
859 if (flags
& RTL_CACHE_FLAG_QUANTUMCACHE
)
861 /* next power of 2 above 3 * qcache_max */
862 if(slabsize
< (1UL << highbit(3 * source
->m_qcache_max
)))
864 slabsize
= (1UL << highbit(3 * source
->m_qcache_max
));
869 /* waste at most 1/8 of slab */
870 if(slabsize
< cache
->m_type_size
* 8)
872 slabsize
= cache
->m_type_size
* 8;
876 slabsize
= RTL_MEMORY_P2ROUNDUP(slabsize
, source
->m_quantum
);
877 if (!RTL_MEMORY_ISP2(slabsize
))
878 slabsize
= 1UL << highbit(slabsize
);
879 cache
->m_slab_size
= slabsize
;
881 if (cache
->m_slab_size
> source
->m_quantum
)
883 assert(gp_cache_slab_cache
!= 0);
884 assert(gp_cache_bufctl_cache
!= 0);
886 cache
->m_features
|= RTL_CACHE_FEATURE_HASH
;
887 cache
->m_ntypes
= cache
->m_slab_size
/ cache
->m_type_size
;
888 cache
->m_ncolor_max
= cache
->m_slab_size
% cache
->m_type_size
;
892 /* embedded slab struct */
893 cache
->m_ntypes
= (cache
->m_slab_size
- sizeof(rtl_cache_slab_type
)) / cache
->m_type_size
;
894 cache
->m_ncolor_max
= (cache
->m_slab_size
- sizeof(rtl_cache_slab_type
)) % cache
->m_type_size
;
897 assert(cache
->m_ntypes
> 0);
900 if (flags
& RTL_CACHE_FLAG_BULKDESTROY
)
902 /* allow bulk slab delete upon cache deactivation */
903 cache
->m_features
|= RTL_CACHE_FEATURE_BULKDESTROY
;
907 if (!(flags
& RTL_CACHE_FLAG_NOMAGAZINE
))
909 assert(gp_cache_magazine_cache
!= 0);
910 cache
->m_magazine_cache
= gp_cache_magazine_cache
;
913 /* insert into cache list */
914 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
915 QUEUE_INSERT_TAIL_NAMED(&(g_cache_list
.m_cache_head
), cache
, cache_
);
916 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
921 /** rtl_cache_deactivate()
924 rtl_cache_deactivate (
925 rtl_cache_type
* cache
928 /* remove from cache list */
929 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
930 int active
= QUEUE_STARTED_NAMED(cache
, cache_
) == 0;
931 QUEUE_REMOVE_NAMED(cache
, cache_
);
932 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
934 assert(active
); // orphaned cache
937 /* cleanup magazine layer */
938 if (cache
->m_magazine_cache
!= 0)
940 rtl_cache_type
* mag_cache
;
941 rtl_cache_magazine_type
* mag
;
943 /* prevent recursion */
944 mag_cache
= cache
->m_magazine_cache
, cache
->m_magazine_cache
= 0;
946 /* cleanup cpu layer */
947 if ((mag
= cache
->m_cpu_curr
) != 0)
949 cache
->m_cpu_curr
= 0;
950 rtl_cache_magazine_clear (cache
, mag
);
951 rtl_cache_free (mag_cache
, mag
);
953 if ((mag
= cache
->m_cpu_prev
) != 0)
955 cache
->m_cpu_prev
= 0;
956 rtl_cache_magazine_clear (cache
, mag
);
957 rtl_cache_free (mag_cache
, mag
);
960 /* cleanup depot layer */
961 while ((mag
= rtl_cache_depot_dequeue(&(cache
->m_depot_full
))) != 0)
963 rtl_cache_magazine_clear (cache
, mag
);
964 rtl_cache_free (mag_cache
, mag
);
966 while ((mag
= rtl_cache_depot_dequeue(&(cache
->m_depot_empty
))) != 0)
968 rtl_cache_magazine_clear (cache
, mag
);
969 rtl_cache_free (mag_cache
, mag
);
975 // "rtl_cache_deactivate(" << cache->m_name << "): [slab]: allocs: "
976 // << cache->m_slab_stats.m_alloc << ", frees: "
977 // << cache->m_slab_stats.m_free << "; total: "
978 // << cache->m_slab_stats.m_mem_total << ", used: "
979 // << cache->m_slab_stats.m_mem_alloc << "; [cpu]: allocs: "
980 // << cache->m_cpu_stats.m_alloc << ", frees: "
981 // << cache->m_cpu_stats.m_free << "; [total]: allocs: "
982 // << (cache->m_slab_stats.m_alloc + cache->m_cpu_stats.m_alloc)
984 // << (cache->m_slab_stats.m_free + cache->m_cpu_stats.m_free));
986 /* cleanup slab layer */
987 if (cache
->m_slab_stats
.m_alloc
> cache
->m_slab_stats
.m_free
)
991 // "rtl_cache_deactivate(" << cache->m_name << "): cleaning up "
992 // << (cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free)
993 // << " leaked buffer(s) [" << cache->m_slab_stats.m_mem_alloc
994 // << " bytes] [" << cache->m_slab_stats.m_mem_total << " total]");
996 if (cache
->m_features
& RTL_CACHE_FEATURE_HASH
)
998 /* cleanup bufctl(s) for leaking buffer(s) */
999 sal_Size i
, n
= cache
->m_hash_size
;
1000 for (i
= 0; i
< n
; i
++)
1002 rtl_cache_bufctl_type
* bufctl
;
1003 while ((bufctl
= cache
->m_hash_table
[i
]) != 0)
1005 /* pop from hash table */
1006 cache
->m_hash_table
[i
] = bufctl
->m_next
, bufctl
->m_next
= 0;
1008 /* return to bufctl cache */
1009 rtl_cache_free (gp_cache_bufctl_cache
, bufctl
);
1014 /* force cleanup of remaining slabs */
1015 rtl_cache_slab_type
*head
, *slab
;
1017 head
= &(cache
->m_used_head
);
1018 for (slab
= head
->m_slab_next
; slab
!= head
; slab
= head
->m_slab_next
)
1020 /* remove from 'used' queue */
1021 QUEUE_REMOVE_NAMED(slab
, slab_
);
1024 cache
->m_slab_stats
.m_mem_total
-= cache
->m_slab_size
;
1027 rtl_cache_slab_destroy (cache
, slab
);
1030 head
= &(cache
->m_free_head
);
1031 for (slab
= head
->m_slab_next
; slab
!= head
; slab
= head
->m_slab_next
)
1033 /* remove from 'free' queue */
1034 QUEUE_REMOVE_NAMED(slab
, slab_
);
1037 cache
->m_slab_stats
.m_mem_total
-= cache
->m_slab_size
;
1040 rtl_cache_slab_destroy (cache
, slab
);
1045 if (cache
->m_hash_table
!= cache
->m_hash_table_0
)
1049 cache
->m_hash_table
,
1050 cache
->m_hash_size
* sizeof(rtl_cache_bufctl_type
*));
1052 cache
->m_hash_table
= cache
->m_hash_table_0
;
1053 cache
->m_hash_size
= RTL_CACHE_HASH_SIZE
;
1054 cache
->m_hash_shift
= highbit(cache
->m_hash_size
) - 1;
1060 /* ================================================================= *
1062 * cache implementation.
1064 * ================================================================= */
1066 /** rtl_cache_create()
1069 SAL_CALL
rtl_cache_create (
1073 int (SAL_CALL
* constructor
)(void * obj
, void * userarg
),
1074 void (SAL_CALL
* destructor
) (void * obj
, void * userarg
),
1075 void (SAL_CALL
* reclaim
) (void * userarg
),
1077 rtl_arena_type
* source
,
1079 ) SAL_THROW_EXTERN_C()
1081 rtl_cache_type
* result
= 0;
1082 sal_Size size
= sizeof(rtl_cache_type
);
1085 result
= (rtl_cache_type
*)rtl_arena_alloc (gp_cache_arena
, &size
);
1088 rtl_cache_type
* cache
= result
;
1089 (void) rtl_cache_constructor (cache
);
1093 /* use default arena */
1094 assert(gp_default_arena
!= 0);
1095 source
= gp_default_arena
;
1098 result
= rtl_cache_activate (
1113 /* activation failed */
1114 rtl_cache_deactivate (cache
);
1115 rtl_cache_destructor (cache
);
1116 rtl_arena_free (gp_cache_arena
, cache
, size
);
1119 else if (gp_cache_arena
== 0)
1121 ensureCacheSingleton();
1131 /** rtl_cache_destroy()
1133 void SAL_CALL
rtl_cache_destroy (
1134 rtl_cache_type
* cache
1135 ) SAL_THROW_EXTERN_C()
1139 rtl_cache_deactivate (cache
);
1140 rtl_cache_destructor (cache
);
1141 rtl_arena_free (gp_cache_arena
, cache
, sizeof(rtl_cache_type
));
1145 /** rtl_cache_alloc()
1148 SAL_CALL
rtl_cache_alloc (
1149 rtl_cache_type
* cache
1150 ) SAL_THROW_EXTERN_C()
1157 if (alloc_mode
== AMode_SYSTEM
)
1159 obj
= rtl_allocateMemory(cache
->m_type_size
);
1160 if ((obj
!= 0) && (cache
->m_constructor
!= 0))
1162 if (!((cache
->m_constructor
)(obj
, cache
->m_userarg
)))
1164 /* construction failure */
1165 rtl_freeMemory(obj
), obj
= 0;
1171 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1172 if (cache
->m_cpu_curr
!= 0)
1176 /* take object from magazine layer */
1177 rtl_cache_magazine_type
*curr
, *prev
, *temp
;
1179 curr
= cache
->m_cpu_curr
;
1180 if ((curr
!= 0) && (curr
->m_mag_used
> 0))
1182 obj
= curr
->m_objects
[--curr
->m_mag_used
];
1183 cache
->m_cpu_stats
.m_alloc
+= 1;
1184 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1189 prev
= cache
->m_cpu_prev
;
1190 if ((prev
!= 0) && (prev
->m_mag_used
> 0))
1192 temp
= cache
->m_cpu_curr
;
1193 cache
->m_cpu_curr
= cache
->m_cpu_prev
;
1194 cache
->m_cpu_prev
= temp
;
1199 temp
= rtl_cache_depot_exchange_alloc (cache
, prev
);
1202 cache
->m_cpu_prev
= cache
->m_cpu_curr
;
1203 cache
->m_cpu_curr
= temp
;
1208 /* no full magazine: fall through to slab layer */
1212 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1214 /* alloc buffer from slab layer */
1215 obj
= rtl_cache_slab_alloc (cache
);
1216 if ((obj
!= 0) && (cache
->m_constructor
!= 0))
1218 /* construct object */
1219 if (!((cache
->m_constructor
)(obj
, cache
->m_userarg
)))
1221 /* construction failure */
1222 rtl_cache_slab_free (cache
, obj
), obj
= 0;
1228 /** rtl_cache_free()
1231 SAL_CALL
rtl_cache_free (
1232 rtl_cache_type
* cache
,
1234 ) SAL_THROW_EXTERN_C()
1236 if ((obj
!= 0) && (cache
!= 0))
1238 if (alloc_mode
== AMode_SYSTEM
)
1240 if (cache
->m_destructor
!= 0)
1242 /* destruct object */
1243 (cache
->m_destructor
)(obj
, cache
->m_userarg
);
1245 rtl_freeMemory(obj
);
1249 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1253 /* return object to magazine layer */
1254 rtl_cache_magazine_type
*curr
, *prev
, *temp
;
1256 curr
= cache
->m_cpu_curr
;
1257 if ((curr
!= 0) && (curr
->m_mag_used
< curr
->m_mag_size
))
1259 curr
->m_objects
[curr
->m_mag_used
++] = obj
;
1260 cache
->m_cpu_stats
.m_free
+= 1;
1261 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1266 prev
= cache
->m_cpu_prev
;
1267 if ((prev
!= 0) && (prev
->m_mag_used
== 0))
1269 temp
= cache
->m_cpu_curr
;
1270 cache
->m_cpu_curr
= cache
->m_cpu_prev
;
1271 cache
->m_cpu_prev
= temp
;
1276 temp
= rtl_cache_depot_exchange_free (cache
, prev
);
1279 cache
->m_cpu_prev
= cache
->m_cpu_curr
;
1280 cache
->m_cpu_curr
= temp
;
1285 if (rtl_cache_depot_populate(cache
) != 0)
1290 /* no empty magazine: fall through to slab layer */
1294 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1296 /* no space for constructed object in magazine layer */
1297 if (cache
->m_destructor
!= 0)
1299 /* destruct object */
1300 (cache
->m_destructor
)(obj
, cache
->m_userarg
);
1303 /* return buffer to slab layer */
1304 rtl_cache_slab_free (cache
, obj
);
1308 /* ================================================================= *
1310 * cache wsupdate (machdep) internals.
1312 * ================================================================= */
1314 /** rtl_cache_wsupdate_init()
1316 * @precond g_cache_list.m_lock initialized
1319 rtl_cache_wsupdate_init();
1322 /** rtl_cache_wsupdate_wait()
1324 * @precond g_cache_list.m_lock acquired
1327 rtl_cache_wsupdate_wait (
1328 unsigned int seconds
1331 /** rtl_cache_wsupdate_fini()
1335 rtl_cache_wsupdate_fini();
1337 /* ================================================================= */
1339 #if defined(SAL_UNX)
1341 #include <sys/time.h>
1344 rtl_cache_wsupdate_all (void * arg
);
1347 rtl_cache_wsupdate_init()
1349 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1350 g_cache_list
.m_update_done
= 0;
1351 (void) pthread_cond_init (&(g_cache_list
.m_update_cond
), NULL
);
1352 if (pthread_create (
1353 &(g_cache_list
.m_update_thread
), NULL
, rtl_cache_wsupdate_all
, (void*)(10)) != 0)
1356 g_cache_list
.m_update_thread
= (pthread_t
)(0);
1358 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1362 rtl_cache_wsupdate_wait (unsigned int seconds
)
1369 gettimeofday(&now
, 0);
1370 wakeup
.tv_sec
= now
.tv_sec
+ (seconds
);
1371 wakeup
.tv_nsec
= now
.tv_usec
* 1000;
1373 (void) pthread_cond_timedwait (
1374 &(g_cache_list
.m_update_cond
),
1375 &(g_cache_list
.m_lock
),
1381 rtl_cache_wsupdate_fini()
1383 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1384 g_cache_list
.m_update_done
= 1;
1385 pthread_cond_signal (&(g_cache_list
.m_update_cond
));
1386 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1388 if (g_cache_list
.m_update_thread
!= (pthread_t
)(0))
1389 pthread_join (g_cache_list
.m_update_thread
, NULL
);
1392 /* ================================================================= */
1394 #elif defined(SAL_W32)
1397 rtl_cache_wsupdate_all (void * arg
);
1400 rtl_cache_wsupdate_init()
1404 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1405 g_cache_list
.m_update_done
= 0;
1406 g_cache_list
.m_update_cond
= CreateEvent (0, TRUE
, FALSE
, 0);
1408 g_cache_list
.m_update_thread
=
1409 CreateThread (NULL
, 0, rtl_cache_wsupdate_all
, (LPVOID
)(10), 0, &dwThreadId
);
1410 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1414 rtl_cache_wsupdate_wait (unsigned int seconds
)
1418 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1419 WaitForSingleObject (g_cache_list
.m_update_cond
, (DWORD
)(seconds
* 1000));
1420 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1425 rtl_cache_wsupdate_fini()
1427 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1428 g_cache_list
.m_update_done
= 1;
1429 SetEvent (g_cache_list
.m_update_cond
);
1430 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1432 WaitForSingleObject (g_cache_list
.m_update_thread
, INFINITE
);
1435 #endif /* SAL_UNX || SAL_W32 */
1437 /* ================================================================= */
1439 /** rtl_cache_depot_wsupdate()
1440 * update depot stats and purge excess magazines.
1442 * @precond cache->m_depot_lock acquired
1445 rtl_cache_depot_wsupdate (
1446 rtl_cache_type
* cache
,
1447 rtl_cache_depot_type
* depot
1452 depot
->m_prev_min
= depot
->m_curr_min
;
1453 depot
->m_curr_min
= depot
->m_mag_count
;
1455 npurge
= depot
->m_curr_min
< depot
->m_prev_min
? depot
->m_curr_min
: depot
->m_prev_min
;
1456 for (; npurge
> 0; npurge
--)
1458 rtl_cache_magazine_type
* mag
= rtl_cache_depot_dequeue (depot
);
1461 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1462 rtl_cache_magazine_clear (cache
, mag
);
1463 rtl_cache_free (cache
->m_magazine_cache
, mag
);
1464 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1469 /** rtl_cache_wsupdate()
1471 * @precond cache->m_depot_lock released
1474 rtl_cache_wsupdate (
1475 rtl_cache_type
* cache
1478 if (cache
->m_magazine_cache
!= 0)
1480 RTL_MEMORY_LOCK_ACQUIRE(&(cache
->m_depot_lock
));
1484 // "rtl_cache_wsupdate(" << cache->m_name
1485 // << ") [depot: count, curr_min, prev_min] full: "
1486 // << cache->m_depot_full.m_mag_count << ", "
1487 // << cache->m_depot_full.m_curr_min << ", "
1488 // << cache->m_depot_full.m_prev_min << "; empty: "
1489 // << cache->m_depot_empty.m_mag_count << ", "
1490 // << cache->m_depot_empty.m_curr_min << ", "
1491 // << cache->m_depot_empty.m_prev_min);
1493 rtl_cache_depot_wsupdate (cache
, &(cache
->m_depot_full
));
1494 rtl_cache_depot_wsupdate (cache
, &(cache
->m_depot_empty
));
1496 RTL_MEMORY_LOCK_RELEASE(&(cache
->m_depot_lock
));
1500 /** rtl_cache_wsupdate_all()
1503 #if defined(SAL_UNX)
1505 #elif defined(SAL_W32)
1507 #endif /* SAL_UNX || SAL_W32 */
1508 rtl_cache_wsupdate_all (void * arg
)
1510 unsigned int seconds
= sal::static_int_cast
< unsigned int >(
1511 reinterpret_cast< sal_uIntPtr
>(arg
));
1513 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1514 while (!g_cache_list
.m_update_done
)
1516 rtl_cache_wsupdate_wait (seconds
);
1517 if (!g_cache_list
.m_update_done
)
1519 rtl_cache_type
* head
, * cache
;
1521 head
= &(g_cache_list
.m_cache_head
);
1522 for (cache
= head
->m_cache_next
;
1524 cache
= cache
->m_cache_next
)
1526 rtl_cache_wsupdate (cache
);
1530 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1535 /* ================================================================= *
1537 * cache initialization.
1539 * ================================================================= */
1545 /* list of caches */
1546 RTL_MEMORY_LOCK_INIT(&(g_cache_list
.m_lock
));
1547 (void) rtl_cache_constructor (&(g_cache_list
.m_cache_head
));
1550 /* cache: internal arena */
1551 assert(gp_cache_arena
== 0);
1553 gp_cache_arena
= rtl_arena_create (
1554 "rtl_cache_internal_arena",
1556 0, /* no quantum caching */
1557 NULL
, /* default source */
1562 assert(gp_cache_arena
!= 0);
1564 /* check 'gp_default_arena' initialization */
1565 assert(gp_default_arena
!= 0);
1568 /* cache: magazine cache */
1569 static rtl_cache_type g_cache_magazine_cache
;
1571 assert(gp_cache_magazine_cache
== 0);
1572 (void) rtl_cache_constructor (&g_cache_magazine_cache
);
1574 gp_cache_magazine_cache
= rtl_cache_activate (
1575 &g_cache_magazine_cache
,
1576 "rtl_cache_magazine_cache",
1577 sizeof(rtl_cache_magazine_type
), /* objsize */
1579 rtl_cache_magazine_constructor
,
1580 rtl_cache_magazine_destructor
,
1582 0, /* userarg: NYI */
1583 gp_default_arena
, /* source */
1584 RTL_CACHE_FLAG_NOMAGAZINE
/* during bootstrap; activated below */
1586 assert(gp_cache_magazine_cache
!= 0);
1588 /* activate magazine layer */
1589 g_cache_magazine_cache
.m_magazine_cache
= gp_cache_magazine_cache
;
1592 /* cache: slab (struct) cache */
1593 static rtl_cache_type g_cache_slab_cache
;
1595 assert(gp_cache_slab_cache
== 0);
1596 (void) rtl_cache_constructor (&g_cache_slab_cache
);
1598 gp_cache_slab_cache
= rtl_cache_activate (
1599 &g_cache_slab_cache
,
1600 "rtl_cache_slab_cache",
1601 sizeof(rtl_cache_slab_type
), /* objsize */
1603 rtl_cache_slab_constructor
,
1604 rtl_cache_slab_destructor
,
1606 0, /* userarg: none */
1607 gp_default_arena
, /* source */
1610 assert(gp_cache_slab_cache
!= 0);
1613 /* cache: bufctl cache */
1614 static rtl_cache_type g_cache_bufctl_cache
;
1616 assert(gp_cache_bufctl_cache
== 0);
1617 (void) rtl_cache_constructor (&g_cache_bufctl_cache
);
1619 gp_cache_bufctl_cache
= rtl_cache_activate (
1620 &g_cache_bufctl_cache
,
1621 "rtl_cache_bufctl_cache",
1622 sizeof(rtl_cache_bufctl_type
), /* objsize */
1624 0, /* constructor */
1628 gp_default_arena
, /* source */
1631 assert(gp_cache_bufctl_cache
!= 0);
1634 rtl_cache_wsupdate_init();
1635 // SAL_INFO("sal.rtl", "rtl_cache_init completed");
1638 /* ================================================================= */
1643 if (gp_cache_arena
!= 0)
1645 rtl_cache_type
* cache
, * head
;
1647 rtl_cache_wsupdate_fini();
1649 if (gp_cache_bufctl_cache
!= 0)
1651 cache
= gp_cache_bufctl_cache
, gp_cache_bufctl_cache
= 0;
1652 rtl_cache_deactivate (cache
);
1653 rtl_cache_destructor (cache
);
1655 if (gp_cache_slab_cache
!= 0)
1657 cache
= gp_cache_slab_cache
, gp_cache_slab_cache
= 0;
1658 rtl_cache_deactivate (cache
);
1659 rtl_cache_destructor (cache
);
1661 if (gp_cache_magazine_cache
!= 0)
1663 cache
= gp_cache_magazine_cache
, gp_cache_magazine_cache
= 0;
1664 rtl_cache_deactivate (cache
);
1665 rtl_cache_destructor (cache
);
1667 if (gp_cache_arena
!= 0)
1669 rtl_arena_destroy (gp_cache_arena
);
1673 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list
.m_lock
));
1674 head
= &(g_cache_list
.m_cache_head
);
1675 for (cache
= head
->m_cache_next
; cache
!= head
; cache
= cache
->m_cache_next
)
1679 // "rtl_cache_fini(" << cache->m_name << ") [slab]: allocs: "
1680 // << cache->m_slab_stats.m_alloc << ", frees: "
1681 // << cache->m_slab_stats.m_free << "; total: "
1682 // << cache->m_slab_stats.m_mem_total << ", used: "
1683 // << cache->m_slab_stats.m_mem_alloc << "; [cpu]: allocs: "
1684 // << cache->m_cpu_stats.m_alloc << ", frees: "
1685 // << cache->m_cpu_stats.m_free << "; [total]: allocs: "
1686 // << (cache->m_slab_stats.m_alloc
1687 // + cache->m_cpu_stats.m_alloc)
1689 // << (cache->m_slab_stats.m_free
1690 // + cache->m_cpu_stats.m_free));
1692 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list
.m_lock
));
1694 // SAL_INFO("sal.rtl", "rtl_cache_fini completed");
1697 /* vim:set shiftwidth=4 softtabstop=4 expandtab: */