Version 4.3.0.0.beta1, tag libreoffice-4.3.0.0.beta1
[LibreOffice.git] / sal / rtl / alloc_cache.cxx
blob19591a89870cdd64a3a7102c41e3d374b821bf09
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
2 /*
3 * This file is part of the LibreOffice project.
5 * This Source Code Form is subject to the terms of the Mozilla Public
6 * License, v. 2.0. If a copy of the MPL was not distributed with this
7 * file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 * This file incorporates work covered by the following license notice:
11 * Licensed to the Apache Software Foundation (ASF) under one or more
12 * contributor license agreements. See the NOTICE file distributed
13 * with this work for additional information regarding copyright
14 * ownership. The ASF licenses this file to you under the Apache
15 * License, Version 2.0 (the "License"); you may not use this file
16 * except in compliance with the License. You may obtain a copy of
17 * the License at http://www.apache.org/licenses/LICENSE-2.0 .
20 #include "alloc_cache.hxx"
21 #include "alloc_impl.hxx"
22 #include "alloc_arena.hxx"
23 #include "internal/rtllifecycle.h"
24 #include "sal/macros.h"
25 #include "osl/diagnose.h"
26 #include <osl/thread.hxx>
28 #include <cassert>
29 #include <string.h>
30 #include <stdio.h>
32 extern AllocMode alloc_mode;
34 /* ================================================================= *
36 * cache internals.
38 * ================================================================= */
40 /** g_cache_list
41 * @internal
43 struct rtl_cache_list_st
45 rtl_memory_lock_type m_lock;
46 rtl_cache_type m_cache_head;
48 #if defined(SAL_UNX)
49 pthread_t m_update_thread;
50 pthread_cond_t m_update_cond;
51 #elif defined(SAL_W32)
52 HANDLE m_update_thread;
53 HANDLE m_update_cond;
54 #endif /* SAL_UNX || SAL_W32 */
55 int m_update_done;
58 static rtl_cache_list_st g_cache_list;
60 /** gp_cache_arena
61 * provided for cache_type allocations, and hash_table resizing.
63 * @internal
65 static rtl_arena_type * gp_cache_arena = 0;
67 /** gp_cache_magazine_cache
68 * @internal
70 static rtl_cache_type * gp_cache_magazine_cache = 0;
72 /** gp_cache_slab_cache
73 * @internal
75 static rtl_cache_type * gp_cache_slab_cache = 0;
77 /** gp_cache_bufctl_cache
78 * @internal
80 static rtl_cache_type * gp_cache_bufctl_cache = 0;
82 /* ================================================================= */
84 /** RTL_CACHE_HASH_INDEX()
86 #define RTL_CACHE_HASH_INDEX_IMPL(a, s, q, m) \
87 ((((a) + ((a) >> (s)) + ((a) >> ((s) << 1))) >> (q)) & (m))
89 #define RTL_CACHE_HASH_INDEX(cache, addr) \
90 RTL_CACHE_HASH_INDEX_IMPL((addr), (cache)->m_hash_shift, (cache)->m_type_shift, ((cache)->m_hash_size - 1))
92 namespace
95 void
96 rtl_cache_hash_rescale (
97 rtl_cache_type * cache,
98 sal_Size new_size
101 rtl_cache_bufctl_type ** new_table;
102 sal_Size new_bytes;
104 new_bytes = new_size * sizeof(rtl_cache_bufctl_type*);
105 new_table = (rtl_cache_bufctl_type**)rtl_arena_alloc(gp_cache_arena, &new_bytes);
107 if (new_table != 0)
109 rtl_cache_bufctl_type ** old_table;
110 sal_Size old_size, i;
112 memset (new_table, 0, new_bytes);
114 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
116 old_table = cache->m_hash_table;
117 old_size = cache->m_hash_size;
119 // SAL_INFO(
120 // "sal.rtl",
121 // "rtl_cache_hash_rescale(" << cache->m_name << "): nbuf: "
122 // << (cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free)
123 // << " (ave: "
124 // << ((cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free)
125 // >> cache->m_hash_shift)
126 // << "), frees: " << cache->m_slab_stats.m_free << " [old_size: "
127 // << old_size << ", new_size: " << new_size << ']');
129 cache->m_hash_table = new_table;
130 cache->m_hash_size = new_size;
131 cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
133 for (i = 0; i < old_size; i++)
135 rtl_cache_bufctl_type * curr = old_table[i];
136 while (curr != 0)
138 rtl_cache_bufctl_type * next = curr->m_next;
139 rtl_cache_bufctl_type ** head;
141 head = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, curr->m_addr)]);
142 curr->m_next = (*head);
143 (*head) = curr;
145 curr = next;
147 old_table[i] = 0;
150 RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
152 if (old_table != cache->m_hash_table_0)
154 sal_Size old_bytes = old_size * sizeof(rtl_cache_bufctl_type*);
155 rtl_arena_free (gp_cache_arena, old_table, old_bytes);
160 inline sal_uIntPtr
161 rtl_cache_hash_insert (
162 rtl_cache_type * cache,
163 rtl_cache_bufctl_type * bufctl
166 rtl_cache_bufctl_type ** ppHead;
168 ppHead = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, bufctl->m_addr)]);
170 bufctl->m_next = (*ppHead);
171 (*ppHead) = bufctl;
173 return (bufctl->m_addr);
176 /** rtl_cache_hash_remove()
178 rtl_cache_bufctl_type *
179 rtl_cache_hash_remove (
180 rtl_cache_type * cache,
181 sal_uIntPtr addr
184 rtl_cache_bufctl_type ** ppHead;
185 rtl_cache_bufctl_type * bufctl;
186 sal_Size lookups = 0;
188 ppHead = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, addr)]);
189 while ((bufctl = *ppHead) != 0)
191 if (bufctl->m_addr == addr)
193 *ppHead = bufctl->m_next, bufctl->m_next = 0;
194 break;
197 lookups += 1;
198 ppHead = &(bufctl->m_next);
201 assert(bufctl != 0); // bad free
203 if (lookups > 1)
205 sal_Size nbuf = (sal_Size)(cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free);
206 if (nbuf > 4 * cache->m_hash_size)
208 if (!(cache->m_features & RTL_CACHE_FEATURE_RESCALE))
210 sal_Size ave = nbuf >> cache->m_hash_shift;
211 sal_Size new_size = cache->m_hash_size << (highbit(ave) - 1);
213 cache->m_features |= RTL_CACHE_FEATURE_RESCALE;
214 RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
215 rtl_cache_hash_rescale (cache, new_size);
216 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
217 cache->m_features &= ~RTL_CACHE_FEATURE_RESCALE;
222 return (bufctl);
225 /* ================================================================= */
227 /** RTL_CACHE_SLAB()
229 #define RTL_CACHE_SLAB(addr, size) \
230 (((rtl_cache_slab_type*)(RTL_MEMORY_P2END((sal_uIntPtr)(addr), (size)))) - 1)
232 /** rtl_cache_slab_constructor()
235 rtl_cache_slab_constructor (void * obj, SAL_UNUSED_PARAMETER void *)
237 rtl_cache_slab_type * slab = (rtl_cache_slab_type*)(obj);
239 QUEUE_START_NAMED(slab, slab_);
240 slab->m_ntypes = 0;
242 return (1);
245 /** rtl_cache_slab_destructor()
247 void
248 rtl_cache_slab_destructor (void * obj, SAL_UNUSED_PARAMETER void *)
250 rtl_cache_slab_type * slab = static_cast< rtl_cache_slab_type * >(obj);
251 assert(QUEUE_STARTED_NAMED(slab, slab_)); // assure removed from queue(s)
252 assert(slab->m_ntypes == 0); // assure no longer referenced
253 (void) slab; // avoid warnings
256 /** rtl_cache_slab_create()
258 * @precond cache->m_slab_lock released.
260 rtl_cache_slab_type *
261 rtl_cache_slab_create (
262 rtl_cache_type * cache
265 rtl_cache_slab_type * slab = 0;
266 void * addr;
267 sal_Size size;
269 size = cache->m_slab_size;
270 addr = rtl_arena_alloc (cache->m_source, &size);
271 if (addr != 0)
273 assert(size >= cache->m_slab_size);
275 if (cache->m_features & RTL_CACHE_FEATURE_HASH)
277 /* allocate slab struct from slab cache */
278 assert(cache != gp_cache_slab_cache);
279 slab = (rtl_cache_slab_type*)rtl_cache_alloc (gp_cache_slab_cache);
281 else
283 /* construct embedded slab struct */
284 slab = RTL_CACHE_SLAB(addr, cache->m_slab_size);
285 (void) rtl_cache_slab_constructor (slab, 0);
287 if (slab != 0)
289 slab->m_data = (sal_uIntPtr)(addr);
291 /* dynamic freelist initialization */
292 slab->m_bp = slab->m_data;
293 slab->m_sp = 0;
295 else
297 rtl_arena_free (cache->m_source, addr, size);
300 return (slab);
303 /** rtl_cache_slab_destroy()
305 * @precond cache->m_slab_lock released.
307 void
308 rtl_cache_slab_destroy (
309 rtl_cache_type * cache,
310 rtl_cache_slab_type * slab
313 void * addr = (void*)(slab->m_data);
314 sal_Size refcnt = slab->m_ntypes; slab->m_ntypes = 0;
316 if (cache->m_features & RTL_CACHE_FEATURE_HASH)
318 /* cleanup bufctl(s) for free buffer(s) */
319 sal_Size ntypes = (slab->m_bp - slab->m_data) / cache->m_type_size;
320 for (ntypes -= refcnt; slab->m_sp != 0; ntypes--)
322 rtl_cache_bufctl_type * bufctl = slab->m_sp;
324 /* pop from freelist */
325 slab->m_sp = bufctl->m_next, bufctl->m_next = 0;
327 /* return bufctl struct to bufctl cache */
328 rtl_cache_free (gp_cache_bufctl_cache, bufctl);
330 assert(ntypes == 0);
332 /* return slab struct to slab cache */
333 rtl_cache_free (gp_cache_slab_cache, slab);
335 else
337 /* destruct embedded slab struct */
338 rtl_cache_slab_destructor (slab, 0);
341 if ((refcnt == 0) || (cache->m_features & RTL_CACHE_FEATURE_BULKDESTROY))
343 /* free memory */
344 rtl_arena_free (cache->m_source, addr, cache->m_slab_size);
348 /** rtl_cache_slab_populate()
350 * @precond cache->m_slab_lock acquired.
352 bool
353 rtl_cache_slab_populate (
354 rtl_cache_type * cache
357 rtl_cache_slab_type * slab;
359 RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
360 slab = rtl_cache_slab_create (cache);
361 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
362 if (slab != 0)
364 /* update buffer start addr w/ current color */
365 slab->m_bp += cache->m_ncolor;
367 /* update color for next slab */
368 cache->m_ncolor += cache->m_type_align;
369 if (cache->m_ncolor > cache->m_ncolor_max)
370 cache->m_ncolor = 0;
372 /* update stats */
373 cache->m_slab_stats.m_mem_total += cache->m_slab_size;
375 /* insert onto 'free' queue */
376 QUEUE_INSERT_HEAD_NAMED(&(cache->m_free_head), slab, slab_);
378 return (slab != 0);
381 /* ================================================================= */
383 /** rtl_cache_slab_alloc()
385 * Allocate a buffer from slab layer; used by magazine layer.
387 void *
388 rtl_cache_slab_alloc (
389 rtl_cache_type * cache
392 void * addr = 0;
393 rtl_cache_slab_type * head;
395 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
397 head = &(cache->m_free_head);
398 if ((head->m_slab_next != head) || rtl_cache_slab_populate (cache))
400 rtl_cache_slab_type * slab;
401 rtl_cache_bufctl_type * bufctl;
403 slab = head->m_slab_next;
404 assert(slab->m_ntypes < cache->m_ntypes);
406 if (slab->m_sp == 0)
408 /* initialize bufctl w/ current 'slab->m_bp' */
409 assert(slab->m_bp < slab->m_data + cache->m_ntypes * cache->m_type_size + cache->m_ncolor_max);
410 if (cache->m_features & RTL_CACHE_FEATURE_HASH)
412 /* allocate bufctl */
413 assert(cache != gp_cache_bufctl_cache);
414 bufctl = (rtl_cache_bufctl_type*)rtl_cache_alloc (gp_cache_bufctl_cache);
415 if (bufctl == 0)
417 /* out of memory */
418 RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
419 return (0);
422 bufctl->m_addr = slab->m_bp;
423 bufctl->m_slab = (sal_uIntPtr)(slab);
425 else
427 /* embedded bufctl */
428 bufctl = (rtl_cache_bufctl_type*)(slab->m_bp);
430 bufctl->m_next = 0;
432 /* update 'slab->m_bp' to next free buffer */
433 slab->m_bp += cache->m_type_size;
435 /* assign bufctl to freelist */
436 slab->m_sp = bufctl;
439 /* pop front */
440 bufctl = slab->m_sp;
441 slab->m_sp = bufctl->m_next;
443 /* increment usage, check for full slab */
444 if ((slab->m_ntypes += 1) == cache->m_ntypes)
446 /* remove from 'free' queue */
447 QUEUE_REMOVE_NAMED(slab, slab_);
449 /* insert onto 'used' queue (tail) */
450 QUEUE_INSERT_TAIL_NAMED(&(cache->m_used_head), slab, slab_);
453 /* update stats */
454 cache->m_slab_stats.m_alloc += 1;
455 cache->m_slab_stats.m_mem_alloc += cache->m_type_size;
457 if (cache->m_features & RTL_CACHE_FEATURE_HASH)
458 addr = (void*)rtl_cache_hash_insert (cache, bufctl);
459 else
460 addr = bufctl;
463 RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
464 return (addr);
467 /** rtl_cache_slab_free()
469 * Return a buffer to slab layer; used by magazine layer.
471 void
472 rtl_cache_slab_free (
473 rtl_cache_type * cache,
474 void * addr
477 rtl_cache_bufctl_type * bufctl;
478 rtl_cache_slab_type * slab;
480 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
482 /* determine slab from addr */
483 if (cache->m_features & RTL_CACHE_FEATURE_HASH)
485 bufctl = rtl_cache_hash_remove (cache, (sal_uIntPtr)(addr));
486 slab = (bufctl != 0) ? (rtl_cache_slab_type*)(bufctl->m_slab) : 0;
488 else
490 /* embedded slab struct */
491 bufctl = (rtl_cache_bufctl_type*)(addr);
492 slab = RTL_CACHE_SLAB(addr, cache->m_slab_size);
495 if (slab != 0)
497 /* check for full slab */
498 if (slab->m_ntypes == cache->m_ntypes)
500 /* remove from 'used' queue */
501 QUEUE_REMOVE_NAMED(slab, slab_);
503 /* insert onto 'free' queue (head) */
504 QUEUE_INSERT_HEAD_NAMED(&(cache->m_free_head), slab, slab_);
507 /* push front */
508 bufctl->m_next = slab->m_sp;
509 slab->m_sp = bufctl;
511 /* update stats */
512 cache->m_slab_stats.m_free += 1;
513 cache->m_slab_stats.m_mem_alloc -= cache->m_type_size;
515 /* decrement usage, check for empty slab */
516 if ((slab->m_ntypes -= 1) == 0)
518 /* remove from 'free' queue */
519 QUEUE_REMOVE_NAMED(slab, slab_);
521 /* update stats */
522 cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
524 /* free 'empty' slab */
525 RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
526 rtl_cache_slab_destroy (cache, slab);
527 return;
531 RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
534 /* ================================================================= */
536 /** rtl_cache_magazine_constructor()
539 rtl_cache_magazine_constructor (void * obj, SAL_UNUSED_PARAMETER void *)
541 rtl_cache_magazine_type * mag = (rtl_cache_magazine_type*)(obj);
542 /* @@@ sal_Size size = (sal_Size)(arg); @@@ */
544 mag->m_mag_next = 0;
545 mag->m_mag_size = RTL_CACHE_MAGAZINE_SIZE;
546 mag->m_mag_used = 0;
548 return (1);
551 /** rtl_cache_magazine_destructor()
553 void
554 rtl_cache_magazine_destructor (void * obj, SAL_UNUSED_PARAMETER void *)
556 rtl_cache_magazine_type * mag = static_cast< rtl_cache_magazine_type * >(
557 obj);
558 assert(mag->m_mag_next == 0); // assure removed from queue(s)
559 assert(mag->m_mag_used == 0); // assure no longer referenced
560 (void) mag; // avoid warnings
563 /** rtl_cache_magazine_clear()
565 void
566 rtl_cache_magazine_clear (
567 rtl_cache_type * cache,
568 rtl_cache_magazine_type * mag
571 for (; mag->m_mag_used > 0; --mag->m_mag_used)
573 void * obj = mag->m_objects[mag->m_mag_used - 1];
574 mag->m_objects[mag->m_mag_used - 1] = 0;
576 if (cache->m_destructor != 0)
578 /* destruct object */
579 (cache->m_destructor)(obj, cache->m_userarg);
582 /* return buffer to slab layer */
583 rtl_cache_slab_free (cache, obj);
587 /* ================================================================= */
589 /** rtl_cache_depot_enqueue()
591 * @precond cache->m_depot_lock acquired.
593 inline void
594 rtl_cache_depot_enqueue (
595 rtl_cache_depot_type * depot,
596 rtl_cache_magazine_type * mag
599 /* enqueue empty magazine */
600 mag->m_mag_next = depot->m_mag_next;
601 depot->m_mag_next = mag;
603 /* update depot stats */
604 depot->m_mag_count++;
607 /** rtl_cache_depot_dequeue()
609 * @precond cache->m_depot_lock acquired.
611 inline rtl_cache_magazine_type *
612 rtl_cache_depot_dequeue (
613 rtl_cache_depot_type * depot
616 rtl_cache_magazine_type * mag = 0;
617 if (depot->m_mag_count > 0)
619 /* dequeue magazine */
620 assert(depot->m_mag_next != 0);
622 mag = depot->m_mag_next;
623 depot->m_mag_next = mag->m_mag_next;
624 mag->m_mag_next = 0;
626 /* update depot stats */
627 depot->m_mag_count--;
628 if(depot->m_curr_min > depot->m_mag_count)
630 depot->m_curr_min = depot->m_mag_count;
633 return (mag);
636 /** rtl_cache_depot_exchange_alloc()
638 * @precond cache->m_depot_lock acquired.
640 inline rtl_cache_magazine_type *
641 rtl_cache_depot_exchange_alloc (
642 rtl_cache_type * cache,
643 rtl_cache_magazine_type * empty
646 rtl_cache_magazine_type * full;
648 assert((empty == 0) || (empty->m_mag_used == 0));
650 /* dequeue full magazine */
651 full = rtl_cache_depot_dequeue (&(cache->m_depot_full));
652 if ((full != 0) && (empty != 0))
654 /* enqueue empty magazine */
655 rtl_cache_depot_enqueue (&(cache->m_depot_empty), empty);
658 assert((full == 0) || (full->m_mag_used > 0));
660 return (full);
663 /** rtl_cache_depot_exchange_free()
665 * @precond cache->m_depot_lock acquired.
667 inline rtl_cache_magazine_type *
668 rtl_cache_depot_exchange_free (
669 rtl_cache_type * cache,
670 rtl_cache_magazine_type * full
673 rtl_cache_magazine_type * empty;
675 assert((full == 0) || (full->m_mag_used > 0));
677 /* dequeue empty magazine */
678 empty = rtl_cache_depot_dequeue (&(cache->m_depot_empty));
679 if ((empty != 0) && (full != 0))
681 /* enqueue full magazine */
682 rtl_cache_depot_enqueue (&(cache->m_depot_full), full);
685 assert((empty == 0) || (empty->m_mag_used == 0));
687 return (empty);
690 /** rtl_cache_depot_populate()
692 * @precond cache->m_depot_lock acquired.
694 bool
695 rtl_cache_depot_populate (
696 rtl_cache_type * cache
699 rtl_cache_magazine_type * empty = 0;
701 if (cache->m_magazine_cache != 0)
703 /* allocate new empty magazine */
704 RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
705 empty = (rtl_cache_magazine_type*)rtl_cache_alloc (cache->m_magazine_cache);
706 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
707 if (empty != 0)
709 /* enqueue (new) empty magazine */
710 rtl_cache_depot_enqueue (&(cache->m_depot_empty), empty);
713 return (empty != 0);
716 /* ================================================================= */
718 /** rtl_cache_constructor()
721 rtl_cache_constructor (void * obj)
723 rtl_cache_type * cache = (rtl_cache_type*)(obj);
725 memset (cache, 0, sizeof(rtl_cache_type));
727 /* linkage */
728 QUEUE_START_NAMED(cache, cache_);
730 /* slab layer */
731 (void)RTL_MEMORY_LOCK_INIT(&(cache->m_slab_lock));
733 QUEUE_START_NAMED(&(cache->m_free_head), slab_);
734 QUEUE_START_NAMED(&(cache->m_used_head), slab_);
736 cache->m_hash_table = cache->m_hash_table_0;
737 cache->m_hash_size = RTL_CACHE_HASH_SIZE;
738 cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
740 /* depot layer */
741 (void)RTL_MEMORY_LOCK_INIT(&(cache->m_depot_lock));
743 return (1);
746 /** rtl_cache_destructor()
748 void
749 rtl_cache_destructor (void * obj)
751 rtl_cache_type * cache = (rtl_cache_type*)(obj);
753 /* linkage */
754 assert(QUEUE_STARTED_NAMED(cache, cache_));
756 /* slab layer */
757 (void)RTL_MEMORY_LOCK_DESTROY(&(cache->m_slab_lock));
759 assert(QUEUE_STARTED_NAMED(&(cache->m_free_head), slab_));
760 assert(QUEUE_STARTED_NAMED(&(cache->m_used_head), slab_));
762 assert(cache->m_hash_table == cache->m_hash_table_0);
763 assert(cache->m_hash_size == RTL_CACHE_HASH_SIZE);
764 assert(cache->m_hash_shift == (sal_Size)(highbit(cache->m_hash_size) - 1));
766 /* depot layer */
767 (void)RTL_MEMORY_LOCK_DESTROY(&(cache->m_depot_lock));
770 /* ================================================================= */
772 /** rtl_cache_activate()
774 rtl_cache_type *
775 rtl_cache_activate (
776 rtl_cache_type * cache,
777 const char * name,
778 size_t objsize,
779 size_t objalign,
780 int (SAL_CALL * constructor)(void * obj, void * userarg),
781 void (SAL_CALL * destructor) (void * obj, void * userarg),
782 void (SAL_CALL * reclaim) (void * userarg),
783 void * userarg,
784 rtl_arena_type * source,
785 int flags
788 assert(cache != 0);
789 if (cache != 0)
791 sal_Size slabsize;
793 snprintf (cache->m_name, sizeof(cache->m_name), "%s", name);
795 /* ensure minimum size (embedded bufctl linkage) */
796 if(objsize < sizeof(rtl_cache_bufctl_type*))
798 objsize = sizeof(rtl_cache_bufctl_type*);
801 if (objalign == 0)
803 /* determine default alignment */
804 if (objsize >= RTL_MEMORY_ALIGNMENT_8)
805 objalign = RTL_MEMORY_ALIGNMENT_8;
806 else
807 objalign = RTL_MEMORY_ALIGNMENT_4;
809 else
811 /* ensure minimum alignment */
812 if(objalign < RTL_MEMORY_ALIGNMENT_4)
814 objalign = RTL_MEMORY_ALIGNMENT_4;
817 assert(RTL_MEMORY_ISP2(objalign));
819 cache->m_type_size = objsize = RTL_MEMORY_P2ROUNDUP(objsize, objalign);
820 cache->m_type_align = objalign;
821 cache->m_type_shift = highbit(cache->m_type_size) - 1;
823 cache->m_constructor = constructor;
824 cache->m_destructor = destructor;
825 cache->m_reclaim = reclaim;
826 cache->m_userarg = userarg;
828 /* slab layer */
829 cache->m_source = source;
831 slabsize = source->m_quantum; /* minimum slab size */
832 if (flags & RTL_CACHE_FLAG_QUANTUMCACHE)
834 /* next power of 2 above 3 * qcache_max */
835 if(slabsize < (((sal_Size)1) << highbit(3 * source->m_qcache_max)))
837 slabsize = (((sal_Size)1) << highbit(3 * source->m_qcache_max));
840 else
842 /* waste at most 1/8 of slab */
843 if(slabsize < cache->m_type_size * 8)
845 slabsize = cache->m_type_size * 8;
849 slabsize = RTL_MEMORY_P2ROUNDUP(slabsize, source->m_quantum);
850 if (!RTL_MEMORY_ISP2(slabsize))
851 slabsize = (((sal_Size)1) << highbit(slabsize));
852 cache->m_slab_size = slabsize;
854 if (cache->m_slab_size > source->m_quantum)
856 assert(gp_cache_slab_cache != 0);
857 assert(gp_cache_bufctl_cache != 0);
859 cache->m_features |= RTL_CACHE_FEATURE_HASH;
860 cache->m_ntypes = cache->m_slab_size / cache->m_type_size;
861 cache->m_ncolor_max = cache->m_slab_size % cache->m_type_size;
863 else
865 /* embedded slab struct */
866 cache->m_ntypes = (cache->m_slab_size - sizeof(rtl_cache_slab_type)) / cache->m_type_size;
867 cache->m_ncolor_max = (cache->m_slab_size - sizeof(rtl_cache_slab_type)) % cache->m_type_size;
870 assert(cache->m_ntypes > 0);
871 cache->m_ncolor = 0;
873 if (flags & RTL_CACHE_FLAG_BULKDESTROY)
875 /* allow bulk slab delete upon cache deactivation */
876 cache->m_features |= RTL_CACHE_FEATURE_BULKDESTROY;
879 /* magazine layer */
880 if (!(flags & RTL_CACHE_FLAG_NOMAGAZINE))
882 assert(gp_cache_magazine_cache != 0);
883 cache->m_magazine_cache = gp_cache_magazine_cache;
886 /* insert into cache list */
887 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
888 QUEUE_INSERT_TAIL_NAMED(&(g_cache_list.m_cache_head), cache, cache_);
889 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
891 return (cache);
894 /** rtl_cache_deactivate()
896 void
897 rtl_cache_deactivate (
898 rtl_cache_type * cache
901 /* remove from cache list */
902 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
903 bool active = !QUEUE_STARTED_NAMED(cache, cache_);
904 QUEUE_REMOVE_NAMED(cache, cache_);
905 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
907 assert(active); // orphaned cache
908 (void)active;
910 /* cleanup magazine layer */
911 if (cache->m_magazine_cache != 0)
913 rtl_cache_type * mag_cache;
914 rtl_cache_magazine_type * mag;
916 /* prevent recursion */
917 mag_cache = cache->m_magazine_cache, cache->m_magazine_cache = 0;
919 /* cleanup cpu layer */
920 if ((mag = cache->m_cpu_curr) != 0)
922 cache->m_cpu_curr = 0;
923 rtl_cache_magazine_clear (cache, mag);
924 rtl_cache_free (mag_cache, mag);
926 if ((mag = cache->m_cpu_prev) != 0)
928 cache->m_cpu_prev = 0;
929 rtl_cache_magazine_clear (cache, mag);
930 rtl_cache_free (mag_cache, mag);
933 /* cleanup depot layer */
934 while ((mag = rtl_cache_depot_dequeue(&(cache->m_depot_full))) != 0)
936 rtl_cache_magazine_clear (cache, mag);
937 rtl_cache_free (mag_cache, mag);
939 while ((mag = rtl_cache_depot_dequeue(&(cache->m_depot_empty))) != 0)
941 rtl_cache_magazine_clear (cache, mag);
942 rtl_cache_free (mag_cache, mag);
946 // SAL_INFO(
947 // "sal.rtl",
948 // "rtl_cache_deactivate(" << cache->m_name << "): [slab]: allocs: "
949 // << cache->m_slab_stats.m_alloc << ", frees: "
950 // << cache->m_slab_stats.m_free << "; total: "
951 // << cache->m_slab_stats.m_mem_total << ", used: "
952 // << cache->m_slab_stats.m_mem_alloc << "; [cpu]: allocs: "
953 // << cache->m_cpu_stats.m_alloc << ", frees: "
954 // << cache->m_cpu_stats.m_free << "; [total]: allocs: "
955 // << (cache->m_slab_stats.m_alloc + cache->m_cpu_stats.m_alloc)
956 // << ", frees: "
957 // << (cache->m_slab_stats.m_free + cache->m_cpu_stats.m_free));
959 /* cleanup slab layer */
960 if (cache->m_slab_stats.m_alloc > cache->m_slab_stats.m_free)
962 // SAL_INFO(
963 // "sal.rtl",
964 // "rtl_cache_deactivate(" << cache->m_name << "): cleaning up "
965 // << (cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free)
966 // << " leaked buffer(s) [" << cache->m_slab_stats.m_mem_alloc
967 // << " bytes] [" << cache->m_slab_stats.m_mem_total << " total]");
969 if (cache->m_features & RTL_CACHE_FEATURE_HASH)
971 /* cleanup bufctl(s) for leaking buffer(s) */
972 sal_Size i, n = cache->m_hash_size;
973 for (i = 0; i < n; i++)
975 rtl_cache_bufctl_type * bufctl;
976 while ((bufctl = cache->m_hash_table[i]) != 0)
978 /* pop from hash table */
979 cache->m_hash_table[i] = bufctl->m_next, bufctl->m_next = 0;
981 /* return to bufctl cache */
982 rtl_cache_free (gp_cache_bufctl_cache, bufctl);
987 /* force cleanup of remaining slabs */
988 rtl_cache_slab_type *head, *slab;
990 head = &(cache->m_used_head);
991 for (slab = head->m_slab_next; slab != head; slab = head->m_slab_next)
993 /* remove from 'used' queue */
994 QUEUE_REMOVE_NAMED(slab, slab_);
996 /* update stats */
997 cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
999 /* free slab */
1000 rtl_cache_slab_destroy (cache, slab);
1003 head = &(cache->m_free_head);
1004 for (slab = head->m_slab_next; slab != head; slab = head->m_slab_next)
1006 /* remove from 'free' queue */
1007 QUEUE_REMOVE_NAMED(slab, slab_);
1009 /* update stats */
1010 cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
1012 /* free slab */
1013 rtl_cache_slab_destroy (cache, slab);
1018 if (cache->m_hash_table != cache->m_hash_table_0)
1020 rtl_arena_free (
1021 gp_cache_arena,
1022 cache->m_hash_table,
1023 cache->m_hash_size * sizeof(rtl_cache_bufctl_type*));
1025 cache->m_hash_table = cache->m_hash_table_0;
1026 cache->m_hash_size = RTL_CACHE_HASH_SIZE;
1027 cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
1031 } //namespace
1033 /* ================================================================= *
1035 * cache implementation.
1037 * ================================================================= */
1039 /** rtl_cache_create()
1041 rtl_cache_type *
1042 SAL_CALL rtl_cache_create (
1043 const char * name,
1044 sal_Size objsize,
1045 sal_Size objalign,
1046 int (SAL_CALL * constructor)(void * obj, void * userarg),
1047 void (SAL_CALL * destructor) (void * obj, void * userarg),
1048 void (SAL_CALL * reclaim) (void * userarg),
1049 void * userarg,
1050 rtl_arena_type * source,
1051 int flags
1052 ) SAL_THROW_EXTERN_C()
1054 rtl_cache_type * result = 0;
1055 sal_Size size = sizeof(rtl_cache_type);
1057 try_alloc:
1058 result = (rtl_cache_type*)rtl_arena_alloc (gp_cache_arena, &size);
1059 if (result != 0)
1061 rtl_cache_type * cache = result;
1062 (void) rtl_cache_constructor (cache);
1064 if (!source)
1066 /* use default arena */
1067 assert(gp_default_arena != 0);
1068 source = gp_default_arena;
1071 result = rtl_cache_activate (
1072 cache,
1073 name,
1074 objsize,
1075 objalign,
1076 constructor,
1077 destructor,
1078 reclaim,
1079 userarg,
1080 source,
1081 flags
1084 if (result == 0)
1086 /* activation failed */
1087 rtl_cache_deactivate (cache);
1088 rtl_cache_destructor (cache);
1089 rtl_arena_free (gp_cache_arena, cache, size);
1092 else if (gp_cache_arena == 0)
1094 ensureCacheSingleton();
1095 if (gp_cache_arena)
1097 /* try again */
1098 goto try_alloc;
1101 return (result);
1104 /** rtl_cache_destroy()
1106 void SAL_CALL rtl_cache_destroy (
1107 rtl_cache_type * cache
1108 ) SAL_THROW_EXTERN_C()
1110 if (cache != 0)
1112 rtl_cache_deactivate (cache);
1113 rtl_cache_destructor (cache);
1114 rtl_arena_free (gp_cache_arena, cache, sizeof(rtl_cache_type));
1118 /** rtl_cache_alloc()
1120 void *
1121 SAL_CALL rtl_cache_alloc (
1122 rtl_cache_type * cache
1123 ) SAL_THROW_EXTERN_C()
1125 void * obj = 0;
1127 if (cache == 0)
1128 return (0);
1130 if (alloc_mode == AMode_SYSTEM)
1132 obj = rtl_allocateMemory(cache->m_type_size);
1133 if ((obj != 0) && (cache->m_constructor != 0))
1135 if (!((cache->m_constructor)(obj, cache->m_userarg)))
1137 /* construction failure */
1138 rtl_freeMemory(obj), obj = 0;
1141 return obj;
1144 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1145 if (cache->m_cpu_curr != 0)
1147 for (;;)
1149 /* take object from magazine layer */
1150 rtl_cache_magazine_type *curr, *prev, *temp;
1152 curr = cache->m_cpu_curr;
1153 if ((curr != 0) && (curr->m_mag_used > 0))
1155 obj = curr->m_objects[--curr->m_mag_used];
1156 cache->m_cpu_stats.m_alloc += 1;
1157 RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1159 return (obj);
1162 prev = cache->m_cpu_prev;
1163 if ((prev != 0) && (prev->m_mag_used > 0))
1165 temp = cache->m_cpu_curr;
1166 cache->m_cpu_curr = cache->m_cpu_prev;
1167 cache->m_cpu_prev = temp;
1169 continue;
1172 temp = rtl_cache_depot_exchange_alloc (cache, prev);
1173 if (temp != 0)
1175 cache->m_cpu_prev = cache->m_cpu_curr;
1176 cache->m_cpu_curr = temp;
1178 continue;
1181 /* no full magazine: fall through to slab layer */
1182 break;
1185 RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1187 /* alloc buffer from slab layer */
1188 obj = rtl_cache_slab_alloc (cache);
1189 if ((obj != 0) && (cache->m_constructor != 0))
1191 /* construct object */
1192 if (!((cache->m_constructor)(obj, cache->m_userarg)))
1194 /* construction failure */
1195 rtl_cache_slab_free (cache, obj), obj = 0;
1198 return (obj);
1201 /** rtl_cache_free()
1203 void
1204 SAL_CALL rtl_cache_free (
1205 rtl_cache_type * cache,
1206 void * obj
1207 ) SAL_THROW_EXTERN_C()
1209 if ((obj != 0) && (cache != 0))
1211 if (alloc_mode == AMode_SYSTEM)
1213 if (cache->m_destructor != 0)
1215 /* destruct object */
1216 (cache->m_destructor)(obj, cache->m_userarg);
1218 rtl_freeMemory(obj);
1219 return;
1222 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1224 for (;;)
1226 /* return object to magazine layer */
1227 rtl_cache_magazine_type *curr, *prev, *temp;
1229 curr = cache->m_cpu_curr;
1230 if ((curr != 0) && (curr->m_mag_used < curr->m_mag_size))
1232 curr->m_objects[curr->m_mag_used++] = obj;
1233 cache->m_cpu_stats.m_free += 1;
1234 RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1236 return;
1239 prev = cache->m_cpu_prev;
1240 if ((prev != 0) && (prev->m_mag_used == 0))
1242 temp = cache->m_cpu_curr;
1243 cache->m_cpu_curr = cache->m_cpu_prev;
1244 cache->m_cpu_prev = temp;
1246 continue;
1249 temp = rtl_cache_depot_exchange_free (cache, prev);
1250 if (temp != 0)
1252 cache->m_cpu_prev = cache->m_cpu_curr;
1253 cache->m_cpu_curr = temp;
1255 continue;
1258 if (rtl_cache_depot_populate(cache))
1260 continue;
1263 /* no empty magazine: fall through to slab layer */
1264 break;
1267 RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1269 /* no space for constructed object in magazine layer */
1270 if (cache->m_destructor != 0)
1272 /* destruct object */
1273 (cache->m_destructor)(obj, cache->m_userarg);
1276 /* return buffer to slab layer */
1277 rtl_cache_slab_free (cache, obj);
1281 /* ================================================================= *
1283 * cache wsupdate (machdep) internals.
1285 * ================================================================= */
1287 /** rtl_cache_wsupdate_init()
1289 * @precond g_cache_list.m_lock initialized
1291 static void
1292 rtl_cache_wsupdate_init();
1294 /** rtl_cache_wsupdate_wait()
1296 * @precond g_cache_list.m_lock acquired
1298 static void
1299 rtl_cache_wsupdate_wait (
1300 unsigned int seconds
1303 /** rtl_cache_wsupdate_fini()
1306 static void
1307 rtl_cache_wsupdate_fini();
1309 /* ================================================================= */
1311 #if defined(SAL_UNX)
1313 #include <sys/time.h>
1315 static void *
1316 rtl_cache_wsupdate_all (void * arg);
1318 static void
1319 rtl_cache_wsupdate_init()
1321 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1322 g_cache_list.m_update_done = 0;
1323 (void) pthread_cond_init (&(g_cache_list.m_update_cond), NULL);
1324 if (pthread_create (
1325 &(g_cache_list.m_update_thread), NULL, rtl_cache_wsupdate_all, (void*)(10)) != 0)
1327 /* failure */
1328 g_cache_list.m_update_thread = (pthread_t)(0);
1330 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1333 static void
1334 rtl_cache_wsupdate_wait (unsigned int seconds)
1336 if (seconds > 0)
1338 timeval now;
1339 timespec wakeup;
1341 gettimeofday(&now, 0);
1342 wakeup.tv_sec = now.tv_sec + (seconds);
1343 wakeup.tv_nsec = now.tv_usec * 1000;
1345 (void) pthread_cond_timedwait (
1346 &(g_cache_list.m_update_cond),
1347 &(g_cache_list.m_lock),
1348 &wakeup);
1352 static void
1353 rtl_cache_wsupdate_fini()
1355 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1356 g_cache_list.m_update_done = 1;
1357 pthread_cond_signal (&(g_cache_list.m_update_cond));
1358 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1360 if (g_cache_list.m_update_thread != (pthread_t)(0))
1361 pthread_join (g_cache_list.m_update_thread, NULL);
1364 /* ================================================================= */
1366 #elif defined(SAL_W32)
1368 static DWORD WINAPI
1369 rtl_cache_wsupdate_all (void * arg);
1371 static void
1372 rtl_cache_wsupdate_init()
1374 DWORD dwThreadId;
1376 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1377 g_cache_list.m_update_done = 0;
1378 g_cache_list.m_update_cond = CreateEvent (0, TRUE, FALSE, 0);
1380 g_cache_list.m_update_thread =
1381 CreateThread (NULL, 0, rtl_cache_wsupdate_all, (LPVOID)(10), 0, &dwThreadId);
1382 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1385 static void
1386 rtl_cache_wsupdate_wait (unsigned int seconds)
1388 if (seconds > 0)
1390 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1391 WaitForSingleObject (g_cache_list.m_update_cond, (DWORD)(seconds * 1000));
1392 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1396 static void
1397 rtl_cache_wsupdate_fini()
1399 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1400 g_cache_list.m_update_done = 1;
1401 SetEvent (g_cache_list.m_update_cond);
1402 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1404 WaitForSingleObject (g_cache_list.m_update_thread, INFINITE);
1407 #endif /* SAL_UNX || SAL_W32 */
1409 /* ================================================================= */
1411 /** rtl_cache_depot_wsupdate()
1412 * update depot stats and purge excess magazines.
1414 * @precond cache->m_depot_lock acquired
1416 static void
1417 rtl_cache_depot_wsupdate (
1418 rtl_cache_type * cache,
1419 rtl_cache_depot_type * depot
1422 sal_Size npurge;
1424 depot->m_prev_min = depot->m_curr_min;
1425 depot->m_curr_min = depot->m_mag_count;
1427 npurge = depot->m_curr_min < depot->m_prev_min ? depot->m_curr_min : depot->m_prev_min;
1428 for (; npurge > 0; npurge--)
1430 rtl_cache_magazine_type * mag = rtl_cache_depot_dequeue (depot);
1431 if (mag != 0)
1433 RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1434 rtl_cache_magazine_clear (cache, mag);
1435 rtl_cache_free (cache->m_magazine_cache, mag);
1436 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1441 /** rtl_cache_wsupdate()
1443 * @precond cache->m_depot_lock released
1445 static void
1446 rtl_cache_wsupdate (
1447 rtl_cache_type * cache
1450 if (cache->m_magazine_cache != 0)
1452 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1454 // SAL_INFO(
1455 // "sal.rtl",
1456 // "rtl_cache_wsupdate(" << cache->m_name
1457 // << ") [depot: count, curr_min, prev_min] full: "
1458 // << cache->m_depot_full.m_mag_count << ", "
1459 // << cache->m_depot_full.m_curr_min << ", "
1460 // << cache->m_depot_full.m_prev_min << "; empty: "
1461 // << cache->m_depot_empty.m_mag_count << ", "
1462 // << cache->m_depot_empty.m_curr_min << ", "
1463 // << cache->m_depot_empty.m_prev_min);
1465 rtl_cache_depot_wsupdate (cache, &(cache->m_depot_full));
1466 rtl_cache_depot_wsupdate (cache, &(cache->m_depot_empty));
1468 RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1472 /** rtl_cache_wsupdate_all()
1475 #if defined(SAL_UNX)
1476 static void *
1477 #elif defined(SAL_W32)
1478 static DWORD WINAPI
1479 #endif /* SAL_UNX || SAL_W32 */
1480 rtl_cache_wsupdate_all (void * arg)
1482 osl::Thread::setName("rtl_cache_wsupdate_all");
1483 unsigned int seconds = sal::static_int_cast< unsigned int >(
1484 reinterpret_cast< sal_uIntPtr >(arg));
1486 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1487 while (!g_cache_list.m_update_done)
1489 rtl_cache_wsupdate_wait (seconds);
1490 if (!g_cache_list.m_update_done)
1492 rtl_cache_type * head, * cache;
1494 head = &(g_cache_list.m_cache_head);
1495 for (cache = head->m_cache_next;
1496 cache != head;
1497 cache = cache->m_cache_next)
1499 rtl_cache_wsupdate (cache);
1503 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1505 return (0);
1508 /* ================================================================= *
1510 * cache initialization.
1512 * ================================================================= */
1514 void
1515 rtl_cache_init()
1518 /* list of caches */
1519 RTL_MEMORY_LOCK_INIT(&(g_cache_list.m_lock));
1520 (void) rtl_cache_constructor (&(g_cache_list.m_cache_head));
1523 /* cache: internal arena */
1524 assert(gp_cache_arena == 0);
1526 gp_cache_arena = rtl_arena_create (
1527 "rtl_cache_internal_arena",
1528 64, /* quantum */
1529 0, /* no quantum caching */
1530 NULL, /* default source */
1531 rtl_arena_alloc,
1532 rtl_arena_free,
1533 0 /* flags */
1535 assert(gp_cache_arena != 0);
1537 /* check 'gp_default_arena' initialization */
1538 assert(gp_default_arena != 0);
1541 /* cache: magazine cache */
1542 static rtl_cache_type g_cache_magazine_cache;
1544 assert(gp_cache_magazine_cache == 0);
1545 (void) rtl_cache_constructor (&g_cache_magazine_cache);
1547 gp_cache_magazine_cache = rtl_cache_activate (
1548 &g_cache_magazine_cache,
1549 "rtl_cache_magazine_cache",
1550 sizeof(rtl_cache_magazine_type), /* objsize */
1551 0, /* objalign */
1552 rtl_cache_magazine_constructor,
1553 rtl_cache_magazine_destructor,
1554 0, /* reclaim */
1555 0, /* userarg: NYI */
1556 gp_default_arena, /* source */
1557 RTL_CACHE_FLAG_NOMAGAZINE /* during bootstrap; activated below */
1559 assert(gp_cache_magazine_cache != 0);
1561 /* activate magazine layer */
1562 g_cache_magazine_cache.m_magazine_cache = gp_cache_magazine_cache;
1565 /* cache: slab (struct) cache */
1566 static rtl_cache_type g_cache_slab_cache;
1568 assert(gp_cache_slab_cache == 0);
1569 (void) rtl_cache_constructor (&g_cache_slab_cache);
1571 gp_cache_slab_cache = rtl_cache_activate (
1572 &g_cache_slab_cache,
1573 "rtl_cache_slab_cache",
1574 sizeof(rtl_cache_slab_type), /* objsize */
1575 0, /* objalign */
1576 rtl_cache_slab_constructor,
1577 rtl_cache_slab_destructor,
1578 0, /* reclaim */
1579 0, /* userarg: none */
1580 gp_default_arena, /* source */
1581 0 /* flags: none */
1583 assert(gp_cache_slab_cache != 0);
1586 /* cache: bufctl cache */
1587 static rtl_cache_type g_cache_bufctl_cache;
1589 assert(gp_cache_bufctl_cache == 0);
1590 (void) rtl_cache_constructor (&g_cache_bufctl_cache);
1592 gp_cache_bufctl_cache = rtl_cache_activate (
1593 &g_cache_bufctl_cache,
1594 "rtl_cache_bufctl_cache",
1595 sizeof(rtl_cache_bufctl_type), /* objsize */
1596 0, /* objalign */
1597 0, /* constructor */
1598 0, /* destructor */
1599 0, /* reclaim */
1600 0, /* userarg */
1601 gp_default_arena, /* source */
1602 0 /* flags: none */
1604 assert(gp_cache_bufctl_cache != 0);
1607 rtl_cache_wsupdate_init();
1608 // SAL_INFO("sal.rtl", "rtl_cache_init completed");
1611 /* ================================================================= */
1613 void
1614 rtl_cache_fini()
1616 if (gp_cache_arena != 0)
1618 rtl_cache_type * cache, * head;
1620 rtl_cache_wsupdate_fini();
1622 if (gp_cache_bufctl_cache != 0)
1624 cache = gp_cache_bufctl_cache, gp_cache_bufctl_cache = 0;
1625 rtl_cache_deactivate (cache);
1626 rtl_cache_destructor (cache);
1628 if (gp_cache_slab_cache != 0)
1630 cache = gp_cache_slab_cache, gp_cache_slab_cache = 0;
1631 rtl_cache_deactivate (cache);
1632 rtl_cache_destructor (cache);
1634 if (gp_cache_magazine_cache != 0)
1636 cache = gp_cache_magazine_cache, gp_cache_magazine_cache = 0;
1637 rtl_cache_deactivate (cache);
1638 rtl_cache_destructor (cache);
1640 if (gp_cache_arena != 0)
1642 rtl_arena_destroy (gp_cache_arena);
1643 gp_cache_arena = 0;
1646 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1647 head = &(g_cache_list.m_cache_head);
1648 for (cache = head->m_cache_next; cache != head; cache = cache->m_cache_next)
1650 // SAL_INFO(
1651 // "sal.rtl",
1652 // "rtl_cache_fini(" << cache->m_name << ") [slab]: allocs: "
1653 // << cache->m_slab_stats.m_alloc << ", frees: "
1654 // << cache->m_slab_stats.m_free << "; total: "
1655 // << cache->m_slab_stats.m_mem_total << ", used: "
1656 // << cache->m_slab_stats.m_mem_alloc << "; [cpu]: allocs: "
1657 // << cache->m_cpu_stats.m_alloc << ", frees: "
1658 // << cache->m_cpu_stats.m_free << "; [total]: allocs: "
1659 // << (cache->m_slab_stats.m_alloc
1660 // + cache->m_cpu_stats.m_alloc)
1661 // << ", frees: "
1662 // << (cache->m_slab_stats.m_free
1663 // + cache->m_cpu_stats.m_free));
1665 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1667 // SAL_INFO("sal.rtl", "rtl_cache_fini completed");
1670 /* vim:set shiftwidth=4 softtabstop=4 expandtab: */