bump product version to 4.1.6.2
[LibreOffice.git] / sal / rtl / alloc_cache.cxx
blobd514cfc8451490b943a0bd611737681162d41a23
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
2 /*
3 * This file is part of the LibreOffice project.
5 * This Source Code Form is subject to the terms of the Mozilla Public
6 * License, v. 2.0. If a copy of the MPL was not distributed with this
7 * file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 * This file incorporates work covered by the following license notice:
11 * Licensed to the Apache Software Foundation (ASF) under one or more
12 * contributor license agreements. See the NOTICE file distributed
13 * with this work for additional information regarding copyright
14 * ownership. The ASF licenses this file to you under the Apache
15 * License, Version 2.0 (the "License"); you may not use this file
16 * except in compliance with the License. You may obtain a copy of
17 * the License at http://www.apache.org/licenses/LICENSE-2.0 .
20 #include "alloc_cache.hxx"
21 #include "alloc_impl.hxx"
22 #include "alloc_arena.hxx"
23 #include "internal/rtllifecycle.h"
24 #include "sal/macros.h"
25 #include "osl/diagnose.h"
27 #include <cassert>
28 #include <string.h>
29 #include <stdio.h>
31 extern AllocMode alloc_mode;
33 /* ================================================================= *
35 * cache internals.
37 * ================================================================= */
39 /** g_cache_list
40 * @internal
42 struct rtl_cache_list_st
44 rtl_memory_lock_type m_lock;
45 rtl_cache_type m_cache_head;
47 #if defined(SAL_UNX)
48 pthread_t m_update_thread;
49 pthread_cond_t m_update_cond;
50 #elif defined(SAL_W32)
51 HANDLE m_update_thread;
52 HANDLE m_update_cond;
53 #endif /* SAL_UNX || SAL_W32 */
54 int m_update_done;
57 static rtl_cache_list_st g_cache_list;
60 /** gp_cache_arena
61 * provided for cache_type allocations, and hash_table resizing.
63 * @internal
65 static rtl_arena_type * gp_cache_arena = 0;
68 /** gp_cache_magazine_cache
69 * @internal
71 static rtl_cache_type * gp_cache_magazine_cache = 0;
74 /** gp_cache_slab_cache
75 * @internal
77 static rtl_cache_type * gp_cache_slab_cache = 0;
80 /** gp_cache_bufctl_cache
81 * @internal
83 static rtl_cache_type * gp_cache_bufctl_cache = 0;
86 /* ================================================================= */
88 /** RTL_CACHE_HASH_INDEX()
90 #define RTL_CACHE_HASH_INDEX_IMPL(a, s, q, m) \
91 ((((a) + ((a) >> (s)) + ((a) >> ((s) << 1))) >> (q)) & (m))
93 #define RTL_CACHE_HASH_INDEX(cache, addr) \
94 RTL_CACHE_HASH_INDEX_IMPL((addr), (cache)->m_hash_shift, (cache)->m_type_shift, ((cache)->m_hash_size - 1))
96 namespace
99 void
100 rtl_cache_hash_rescale (
101 rtl_cache_type * cache,
102 sal_Size new_size
105 rtl_cache_bufctl_type ** new_table;
106 sal_Size new_bytes;
108 new_bytes = new_size * sizeof(rtl_cache_bufctl_type*);
109 new_table = (rtl_cache_bufctl_type**)rtl_arena_alloc(gp_cache_arena, &new_bytes);
111 if (new_table != 0)
113 rtl_cache_bufctl_type ** old_table;
114 sal_Size old_size, i;
116 memset (new_table, 0, new_bytes);
118 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
120 old_table = cache->m_hash_table;
121 old_size = cache->m_hash_size;
123 // SAL_INFO(
124 // "sal.rtl",
125 // "rtl_cache_hash_rescale(" << cache->m_name << "): nbuf: "
126 // << (cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free)
127 // << " (ave: "
128 // << ((cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free)
129 // >> cache->m_hash_shift)
130 // << "), frees: " << cache->m_slab_stats.m_free << " [old_size: "
131 // << old_size << ", new_size: " << new_size << ']');
133 cache->m_hash_table = new_table;
134 cache->m_hash_size = new_size;
135 cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
137 for (i = 0; i < old_size; i++)
139 rtl_cache_bufctl_type * curr = old_table[i];
140 while (curr != 0)
142 rtl_cache_bufctl_type * next = curr->m_next;
143 rtl_cache_bufctl_type ** head;
145 head = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, curr->m_addr)]);
146 curr->m_next = (*head);
147 (*head) = curr;
149 curr = next;
151 old_table[i] = 0;
154 RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
156 if (old_table != cache->m_hash_table_0)
158 sal_Size old_bytes = old_size * sizeof(rtl_cache_bufctl_type*);
159 rtl_arena_free (gp_cache_arena, old_table, old_bytes);
164 inline sal_uIntPtr
165 rtl_cache_hash_insert (
166 rtl_cache_type * cache,
167 rtl_cache_bufctl_type * bufctl
170 rtl_cache_bufctl_type ** ppHead;
172 ppHead = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, bufctl->m_addr)]);
174 bufctl->m_next = (*ppHead);
175 (*ppHead) = bufctl;
177 return (bufctl->m_addr);
180 /** rtl_cache_hash_remove()
182 rtl_cache_bufctl_type *
183 rtl_cache_hash_remove (
184 rtl_cache_type * cache,
185 sal_uIntPtr addr
188 rtl_cache_bufctl_type ** ppHead;
189 rtl_cache_bufctl_type * bufctl;
190 sal_Size lookups = 0;
192 ppHead = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, addr)]);
193 while ((bufctl = *ppHead) != 0)
195 if (bufctl->m_addr == addr)
197 *ppHead = bufctl->m_next, bufctl->m_next = 0;
198 break;
201 lookups += 1;
202 ppHead = &(bufctl->m_next);
205 assert(bufctl != 0); // bad free
207 if (lookups > 1)
209 sal_Size nbuf = (sal_Size)(cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free);
210 if (nbuf > 4 * cache->m_hash_size)
212 if (!(cache->m_features & RTL_CACHE_FEATURE_RESCALE))
214 sal_Size ave = nbuf >> cache->m_hash_shift;
215 sal_Size new_size = cache->m_hash_size << (highbit(ave) - 1);
217 cache->m_features |= RTL_CACHE_FEATURE_RESCALE;
218 RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
219 rtl_cache_hash_rescale (cache, new_size);
220 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
221 cache->m_features &= ~RTL_CACHE_FEATURE_RESCALE;
226 return (bufctl);
229 /* ================================================================= */
231 /** RTL_CACHE_SLAB()
233 #define RTL_CACHE_SLAB(addr, size) \
234 (((rtl_cache_slab_type*)(RTL_MEMORY_P2END((sal_uIntPtr)(addr), (size)))) - 1)
237 /** rtl_cache_slab_constructor()
240 rtl_cache_slab_constructor (void * obj, SAL_UNUSED_PARAMETER void *)
242 rtl_cache_slab_type * slab = (rtl_cache_slab_type*)(obj);
244 QUEUE_START_NAMED(slab, slab_);
245 slab->m_ntypes = 0;
247 return (1);
251 /** rtl_cache_slab_destructor()
253 void
254 rtl_cache_slab_destructor (void * obj, SAL_UNUSED_PARAMETER void *)
256 rtl_cache_slab_type * slab = static_cast< rtl_cache_slab_type * >(obj);
257 assert(QUEUE_STARTED_NAMED(slab, slab_)); // assure removed from queue(s)
258 assert(slab->m_ntypes == 0); // assure no longer referenced
259 (void) slab; // avoid warnings
263 /** rtl_cache_slab_create()
265 * @precond cache->m_slab_lock released.
267 rtl_cache_slab_type *
268 rtl_cache_slab_create (
269 rtl_cache_type * cache
272 rtl_cache_slab_type * slab = 0;
273 void * addr;
274 sal_Size size;
276 size = cache->m_slab_size;
277 addr = rtl_arena_alloc (cache->m_source, &size);
278 if (addr != 0)
280 assert(size >= cache->m_slab_size);
282 if (cache->m_features & RTL_CACHE_FEATURE_HASH)
284 /* allocate slab struct from slab cache */
285 assert(cache != gp_cache_slab_cache);
286 slab = (rtl_cache_slab_type*)rtl_cache_alloc (gp_cache_slab_cache);
288 else
290 /* construct embedded slab struct */
291 slab = RTL_CACHE_SLAB(addr, cache->m_slab_size);
292 (void) rtl_cache_slab_constructor (slab, 0);
294 if (slab != 0)
296 slab->m_data = (sal_uIntPtr)(addr);
298 /* dynamic freelist initialization */
299 slab->m_bp = slab->m_data;
300 slab->m_sp = 0;
302 else
304 rtl_arena_free (cache->m_source, addr, size);
307 return (slab);
311 /** rtl_cache_slab_destroy()
313 * @precond cache->m_slab_lock released.
315 void
316 rtl_cache_slab_destroy (
317 rtl_cache_type * cache,
318 rtl_cache_slab_type * slab
321 void * addr = (void*)(slab->m_data);
322 sal_Size refcnt = slab->m_ntypes; slab->m_ntypes = 0;
324 if (cache->m_features & RTL_CACHE_FEATURE_HASH)
326 /* cleanup bufctl(s) for free buffer(s) */
327 sal_Size ntypes = (slab->m_bp - slab->m_data) / cache->m_type_size;
328 for (ntypes -= refcnt; slab->m_sp != 0; ntypes--)
330 rtl_cache_bufctl_type * bufctl = slab->m_sp;
332 /* pop from freelist */
333 slab->m_sp = bufctl->m_next, bufctl->m_next = 0;
335 /* return bufctl struct to bufctl cache */
336 rtl_cache_free (gp_cache_bufctl_cache, bufctl);
338 assert(ntypes == 0);
340 /* return slab struct to slab cache */
341 rtl_cache_free (gp_cache_slab_cache, slab);
343 else
345 /* destruct embedded slab struct */
346 rtl_cache_slab_destructor (slab, 0);
349 if ((refcnt == 0) || (cache->m_features & RTL_CACHE_FEATURE_BULKDESTROY))
351 /* free memory */
352 rtl_arena_free (cache->m_source, addr, cache->m_slab_size);
357 /** rtl_cache_slab_populate()
359 * @precond cache->m_slab_lock acquired.
362 rtl_cache_slab_populate (
363 rtl_cache_type * cache
366 rtl_cache_slab_type * slab;
368 RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
369 slab = rtl_cache_slab_create (cache);
370 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
371 if (slab != 0)
373 /* update buffer start addr w/ current color */
374 slab->m_bp += cache->m_ncolor;
376 /* update color for next slab */
377 cache->m_ncolor += cache->m_type_align;
378 if (cache->m_ncolor > cache->m_ncolor_max)
379 cache->m_ncolor = 0;
381 /* update stats */
382 cache->m_slab_stats.m_mem_total += cache->m_slab_size;
384 /* insert onto 'free' queue */
385 QUEUE_INSERT_HEAD_NAMED(&(cache->m_free_head), slab, slab_);
387 return (slab != 0);
390 /* ================================================================= */
392 /** rtl_cache_slab_alloc()
394 * Allocate a buffer from slab layer; used by magazine layer.
396 void *
397 rtl_cache_slab_alloc (
398 rtl_cache_type * cache
401 void * addr = 0;
402 rtl_cache_slab_type * head;
404 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
406 head = &(cache->m_free_head);
407 if ((head->m_slab_next != head) || rtl_cache_slab_populate (cache))
409 rtl_cache_slab_type * slab;
410 rtl_cache_bufctl_type * bufctl;
412 slab = head->m_slab_next;
413 assert(slab->m_ntypes < cache->m_ntypes);
415 if (slab->m_sp == 0)
417 /* initialize bufctl w/ current 'slab->m_bp' */
418 assert(slab->m_bp < slab->m_data + cache->m_ntypes * cache->m_type_size + cache->m_ncolor_max);
419 if (cache->m_features & RTL_CACHE_FEATURE_HASH)
421 /* allocate bufctl */
422 assert(cache != gp_cache_bufctl_cache);
423 bufctl = (rtl_cache_bufctl_type*)rtl_cache_alloc (gp_cache_bufctl_cache);
424 if (bufctl == 0)
426 /* out of memory */
427 RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
428 return (0);
431 bufctl->m_addr = slab->m_bp;
432 bufctl->m_slab = (sal_uIntPtr)(slab);
434 else
436 /* embedded bufctl */
437 bufctl = (rtl_cache_bufctl_type*)(slab->m_bp);
439 bufctl->m_next = 0;
441 /* update 'slab->m_bp' to next free buffer */
442 slab->m_bp += cache->m_type_size;
444 /* assign bufctl to freelist */
445 slab->m_sp = bufctl;
448 /* pop front */
449 bufctl = slab->m_sp;
450 slab->m_sp = bufctl->m_next;
452 /* increment usage, check for full slab */
453 if ((slab->m_ntypes += 1) == cache->m_ntypes)
455 /* remove from 'free' queue */
456 QUEUE_REMOVE_NAMED(slab, slab_);
458 /* insert onto 'used' queue (tail) */
459 QUEUE_INSERT_TAIL_NAMED(&(cache->m_used_head), slab, slab_);
462 /* update stats */
463 cache->m_slab_stats.m_alloc += 1;
464 cache->m_slab_stats.m_mem_alloc += cache->m_type_size;
466 if (cache->m_features & RTL_CACHE_FEATURE_HASH)
467 addr = (void*)rtl_cache_hash_insert (cache, bufctl);
468 else
469 addr = bufctl;
472 RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
473 return (addr);
477 /** rtl_cache_slab_free()
479 * Return a buffer to slab layer; used by magazine layer.
481 void
482 rtl_cache_slab_free (
483 rtl_cache_type * cache,
484 void * addr
487 rtl_cache_bufctl_type * bufctl;
488 rtl_cache_slab_type * slab;
490 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
492 /* determine slab from addr */
493 if (cache->m_features & RTL_CACHE_FEATURE_HASH)
495 bufctl = rtl_cache_hash_remove (cache, (sal_uIntPtr)(addr));
496 slab = (bufctl != 0) ? (rtl_cache_slab_type*)(bufctl->m_slab) : 0;
498 else
500 /* embedded slab struct */
501 bufctl = (rtl_cache_bufctl_type*)(addr);
502 slab = RTL_CACHE_SLAB(addr, cache->m_slab_size);
505 if (slab != 0)
507 /* check for full slab */
508 if (slab->m_ntypes == cache->m_ntypes)
510 /* remove from 'used' queue */
511 QUEUE_REMOVE_NAMED(slab, slab_);
513 /* insert onto 'free' queue (head) */
514 QUEUE_INSERT_HEAD_NAMED(&(cache->m_free_head), slab, slab_);
517 /* push front */
518 bufctl->m_next = slab->m_sp;
519 slab->m_sp = bufctl;
521 /* update stats */
522 cache->m_slab_stats.m_free += 1;
523 cache->m_slab_stats.m_mem_alloc -= cache->m_type_size;
525 /* decrement usage, check for empty slab */
526 if ((slab->m_ntypes -= 1) == 0)
528 /* remove from 'free' queue */
529 QUEUE_REMOVE_NAMED(slab, slab_);
531 /* update stats */
532 cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
534 /* free 'empty' slab */
535 RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
536 rtl_cache_slab_destroy (cache, slab);
537 return;
541 RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
544 /* ================================================================= */
546 /** rtl_cache_magazine_constructor()
549 rtl_cache_magazine_constructor (void * obj, SAL_UNUSED_PARAMETER void *)
551 rtl_cache_magazine_type * mag = (rtl_cache_magazine_type*)(obj);
552 /* @@@ sal_Size size = (sal_Size)(arg); @@@ */
554 mag->m_mag_next = 0;
555 mag->m_mag_size = RTL_CACHE_MAGAZINE_SIZE;
556 mag->m_mag_used = 0;
558 return (1);
562 /** rtl_cache_magazine_destructor()
564 void
565 rtl_cache_magazine_destructor (void * obj, SAL_UNUSED_PARAMETER void *)
567 rtl_cache_magazine_type * mag = static_cast< rtl_cache_magazine_type * >(
568 obj);
569 assert(mag->m_mag_next == 0); // assure removed from queue(s)
570 assert(mag->m_mag_used == 0); // assure no longer referenced
571 (void) mag; // avoid warnings
575 /** rtl_cache_magazine_clear()
577 void
578 rtl_cache_magazine_clear (
579 rtl_cache_type * cache,
580 rtl_cache_magazine_type * mag
583 for (; mag->m_mag_used > 0; --mag->m_mag_used)
585 void * obj = mag->m_objects[mag->m_mag_used - 1];
586 mag->m_objects[mag->m_mag_used - 1] = 0;
588 if (cache->m_destructor != 0)
590 /* destruct object */
591 (cache->m_destructor)(obj, cache->m_userarg);
594 /* return buffer to slab layer */
595 rtl_cache_slab_free (cache, obj);
599 /* ================================================================= */
601 /** rtl_cache_depot_enqueue()
603 * @precond cache->m_depot_lock acquired.
605 inline void
606 rtl_cache_depot_enqueue (
607 rtl_cache_depot_type * depot,
608 rtl_cache_magazine_type * mag
611 /* enqueue empty magazine */
612 mag->m_mag_next = depot->m_mag_next;
613 depot->m_mag_next = mag;
615 /* update depot stats */
616 depot->m_mag_count++;
619 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
620 #pragma inline(rtl_cache_depot_enqueue)
621 #endif /* __SUNPRO_C */
624 /** rtl_cache_depot_dequeue()
626 * @precond cache->m_depot_lock acquired.
628 inline rtl_cache_magazine_type *
629 rtl_cache_depot_dequeue (
630 rtl_cache_depot_type * depot
633 rtl_cache_magazine_type * mag = 0;
634 if (depot->m_mag_count > 0)
636 /* dequeue magazine */
637 assert(depot->m_mag_next != 0);
639 mag = depot->m_mag_next;
640 depot->m_mag_next = mag->m_mag_next;
641 mag->m_mag_next = 0;
643 /* update depot stats */
644 depot->m_mag_count--;
645 if(depot->m_curr_min > depot->m_mag_count)
647 depot->m_curr_min = depot->m_mag_count;
650 return (mag);
653 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
654 #pragma inline(rtl_cache_depot_dequeue)
655 #endif /* __SUNPRO_C */
658 /** rtl_cache_depot_exchange_alloc()
660 * @precond cache->m_depot_lock acquired.
662 inline rtl_cache_magazine_type *
663 rtl_cache_depot_exchange_alloc (
664 rtl_cache_type * cache,
665 rtl_cache_magazine_type * empty
668 rtl_cache_magazine_type * full;
670 assert((empty == 0) || (empty->m_mag_used == 0));
672 /* dequeue full magazine */
673 full = rtl_cache_depot_dequeue (&(cache->m_depot_full));
674 if ((full != 0) && (empty != 0))
676 /* enqueue empty magazine */
677 rtl_cache_depot_enqueue (&(cache->m_depot_empty), empty);
680 assert((full == 0) || (full->m_mag_used > 0));
682 return (full);
685 /** rtl_cache_depot_exchange_free()
687 * @precond cache->m_depot_lock acquired.
689 inline rtl_cache_magazine_type *
690 rtl_cache_depot_exchange_free (
691 rtl_cache_type * cache,
692 rtl_cache_magazine_type * full
695 rtl_cache_magazine_type * empty;
697 assert((full == 0) || (full->m_mag_used > 0));
699 /* dequeue empty magazine */
700 empty = rtl_cache_depot_dequeue (&(cache->m_depot_empty));
701 if ((empty != 0) && (full != 0))
703 /* enqueue full magazine */
704 rtl_cache_depot_enqueue (&(cache->m_depot_full), full);
707 assert((empty == 0) || (empty->m_mag_used == 0));
709 return (empty);
712 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
713 #pragma inline(rtl_cache_depot_exchange_free)
714 #endif /* __SUNPRO_C */
717 /** rtl_cache_depot_populate()
719 * @precond cache->m_depot_lock acquired.
722 rtl_cache_depot_populate (
723 rtl_cache_type * cache
726 rtl_cache_magazine_type * empty = 0;
728 if (cache->m_magazine_cache != 0)
730 /* allocate new empty magazine */
731 RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
732 empty = (rtl_cache_magazine_type*)rtl_cache_alloc (cache->m_magazine_cache);
733 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
734 if (empty != 0)
736 /* enqueue (new) empty magazine */
737 rtl_cache_depot_enqueue (&(cache->m_depot_empty), empty);
740 return (empty != 0);
743 /* ================================================================= */
745 /** rtl_cache_constructor()
748 rtl_cache_constructor (void * obj)
750 rtl_cache_type * cache = (rtl_cache_type*)(obj);
752 memset (cache, 0, sizeof(rtl_cache_type));
754 /* linkage */
755 QUEUE_START_NAMED(cache, cache_);
757 /* slab layer */
758 (void)RTL_MEMORY_LOCK_INIT(&(cache->m_slab_lock));
760 QUEUE_START_NAMED(&(cache->m_free_head), slab_);
761 QUEUE_START_NAMED(&(cache->m_used_head), slab_);
763 cache->m_hash_table = cache->m_hash_table_0;
764 cache->m_hash_size = RTL_CACHE_HASH_SIZE;
765 cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
767 /* depot layer */
768 (void)RTL_MEMORY_LOCK_INIT(&(cache->m_depot_lock));
770 return (1);
773 /** rtl_cache_destructor()
775 void
776 rtl_cache_destructor (void * obj)
778 rtl_cache_type * cache = (rtl_cache_type*)(obj);
780 /* linkage */
781 assert(QUEUE_STARTED_NAMED(cache, cache_));
783 /* slab layer */
784 (void)RTL_MEMORY_LOCK_DESTROY(&(cache->m_slab_lock));
786 assert(QUEUE_STARTED_NAMED(&(cache->m_free_head), slab_));
787 assert(QUEUE_STARTED_NAMED(&(cache->m_used_head), slab_));
789 assert(cache->m_hash_table == cache->m_hash_table_0);
790 assert(cache->m_hash_size == RTL_CACHE_HASH_SIZE);
791 assert(cache->m_hash_shift == (sal_Size)(highbit(cache->m_hash_size) - 1));
793 /* depot layer */
794 (void)RTL_MEMORY_LOCK_DESTROY(&(cache->m_depot_lock));
797 /* ================================================================= */
799 /** rtl_cache_activate()
801 rtl_cache_type *
802 rtl_cache_activate (
803 rtl_cache_type * cache,
804 const char * name,
805 size_t objsize,
806 size_t objalign,
807 int (SAL_CALL * constructor)(void * obj, void * userarg),
808 void (SAL_CALL * destructor) (void * obj, void * userarg),
809 void (SAL_CALL * reclaim) (void * userarg),
810 void * userarg,
811 rtl_arena_type * source,
812 int flags
815 assert(cache != 0);
816 if (cache != 0)
818 sal_Size slabsize;
820 snprintf (cache->m_name, sizeof(cache->m_name), "%s", name);
822 /* ensure minimum size (embedded bufctl linkage) */
823 if(objsize < sizeof(rtl_cache_bufctl_type*))
825 objsize = sizeof(rtl_cache_bufctl_type*);
828 if (objalign == 0)
830 /* determine default alignment */
831 if (objsize >= RTL_MEMORY_ALIGNMENT_8)
832 objalign = RTL_MEMORY_ALIGNMENT_8;
833 else
834 objalign = RTL_MEMORY_ALIGNMENT_4;
836 else
838 /* ensure minimum alignment */
839 if(objalign < RTL_MEMORY_ALIGNMENT_4)
841 objalign = RTL_MEMORY_ALIGNMENT_4;
844 assert(RTL_MEMORY_ISP2(objalign));
846 cache->m_type_size = objsize = RTL_MEMORY_P2ROUNDUP(objsize, objalign);
847 cache->m_type_align = objalign;
848 cache->m_type_shift = highbit(cache->m_type_size) - 1;
850 cache->m_constructor = constructor;
851 cache->m_destructor = destructor;
852 cache->m_reclaim = reclaim;
853 cache->m_userarg = userarg;
855 /* slab layer */
856 cache->m_source = source;
858 slabsize = source->m_quantum; /* minimum slab size */
859 if (flags & RTL_CACHE_FLAG_QUANTUMCACHE)
861 /* next power of 2 above 3 * qcache_max */
862 if(slabsize < (1UL << highbit(3 * source->m_qcache_max)))
864 slabsize = (1UL << highbit(3 * source->m_qcache_max));
867 else
869 /* waste at most 1/8 of slab */
870 if(slabsize < cache->m_type_size * 8)
872 slabsize = cache->m_type_size * 8;
876 slabsize = RTL_MEMORY_P2ROUNDUP(slabsize, source->m_quantum);
877 if (!RTL_MEMORY_ISP2(slabsize))
878 slabsize = 1UL << highbit(slabsize);
879 cache->m_slab_size = slabsize;
881 if (cache->m_slab_size > source->m_quantum)
883 assert(gp_cache_slab_cache != 0);
884 assert(gp_cache_bufctl_cache != 0);
886 cache->m_features |= RTL_CACHE_FEATURE_HASH;
887 cache->m_ntypes = cache->m_slab_size / cache->m_type_size;
888 cache->m_ncolor_max = cache->m_slab_size % cache->m_type_size;
890 else
892 /* embedded slab struct */
893 cache->m_ntypes = (cache->m_slab_size - sizeof(rtl_cache_slab_type)) / cache->m_type_size;
894 cache->m_ncolor_max = (cache->m_slab_size - sizeof(rtl_cache_slab_type)) % cache->m_type_size;
897 assert(cache->m_ntypes > 0);
898 cache->m_ncolor = 0;
900 if (flags & RTL_CACHE_FLAG_BULKDESTROY)
902 /* allow bulk slab delete upon cache deactivation */
903 cache->m_features |= RTL_CACHE_FEATURE_BULKDESTROY;
906 /* magazine layer */
907 if (!(flags & RTL_CACHE_FLAG_NOMAGAZINE))
909 assert(gp_cache_magazine_cache != 0);
910 cache->m_magazine_cache = gp_cache_magazine_cache;
913 /* insert into cache list */
914 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
915 QUEUE_INSERT_TAIL_NAMED(&(g_cache_list.m_cache_head), cache, cache_);
916 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
918 return (cache);
921 /** rtl_cache_deactivate()
923 void
924 rtl_cache_deactivate (
925 rtl_cache_type * cache
928 /* remove from cache list */
929 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
930 int active = QUEUE_STARTED_NAMED(cache, cache_) == 0;
931 QUEUE_REMOVE_NAMED(cache, cache_);
932 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
934 assert(active); // orphaned cache
935 (void)active;
937 /* cleanup magazine layer */
938 if (cache->m_magazine_cache != 0)
940 rtl_cache_type * mag_cache;
941 rtl_cache_magazine_type * mag;
943 /* prevent recursion */
944 mag_cache = cache->m_magazine_cache, cache->m_magazine_cache = 0;
946 /* cleanup cpu layer */
947 if ((mag = cache->m_cpu_curr) != 0)
949 cache->m_cpu_curr = 0;
950 rtl_cache_magazine_clear (cache, mag);
951 rtl_cache_free (mag_cache, mag);
953 if ((mag = cache->m_cpu_prev) != 0)
955 cache->m_cpu_prev = 0;
956 rtl_cache_magazine_clear (cache, mag);
957 rtl_cache_free (mag_cache, mag);
960 /* cleanup depot layer */
961 while ((mag = rtl_cache_depot_dequeue(&(cache->m_depot_full))) != 0)
963 rtl_cache_magazine_clear (cache, mag);
964 rtl_cache_free (mag_cache, mag);
966 while ((mag = rtl_cache_depot_dequeue(&(cache->m_depot_empty))) != 0)
968 rtl_cache_magazine_clear (cache, mag);
969 rtl_cache_free (mag_cache, mag);
973 // SAL_INFO(
974 // "sal.rtl",
975 // "rtl_cache_deactivate(" << cache->m_name << "): [slab]: allocs: "
976 // << cache->m_slab_stats.m_alloc << ", frees: "
977 // << cache->m_slab_stats.m_free << "; total: "
978 // << cache->m_slab_stats.m_mem_total << ", used: "
979 // << cache->m_slab_stats.m_mem_alloc << "; [cpu]: allocs: "
980 // << cache->m_cpu_stats.m_alloc << ", frees: "
981 // << cache->m_cpu_stats.m_free << "; [total]: allocs: "
982 // << (cache->m_slab_stats.m_alloc + cache->m_cpu_stats.m_alloc)
983 // << ", frees: "
984 // << (cache->m_slab_stats.m_free + cache->m_cpu_stats.m_free));
986 /* cleanup slab layer */
987 if (cache->m_slab_stats.m_alloc > cache->m_slab_stats.m_free)
989 // SAL_INFO(
990 // "sal.rtl",
991 // "rtl_cache_deactivate(" << cache->m_name << "): cleaning up "
992 // << (cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free)
993 // << " leaked buffer(s) [" << cache->m_slab_stats.m_mem_alloc
994 // << " bytes] [" << cache->m_slab_stats.m_mem_total << " total]");
996 if (cache->m_features & RTL_CACHE_FEATURE_HASH)
998 /* cleanup bufctl(s) for leaking buffer(s) */
999 sal_Size i, n = cache->m_hash_size;
1000 for (i = 0; i < n; i++)
1002 rtl_cache_bufctl_type * bufctl;
1003 while ((bufctl = cache->m_hash_table[i]) != 0)
1005 /* pop from hash table */
1006 cache->m_hash_table[i] = bufctl->m_next, bufctl->m_next = 0;
1008 /* return to bufctl cache */
1009 rtl_cache_free (gp_cache_bufctl_cache, bufctl);
1014 /* force cleanup of remaining slabs */
1015 rtl_cache_slab_type *head, *slab;
1017 head = &(cache->m_used_head);
1018 for (slab = head->m_slab_next; slab != head; slab = head->m_slab_next)
1020 /* remove from 'used' queue */
1021 QUEUE_REMOVE_NAMED(slab, slab_);
1023 /* update stats */
1024 cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
1026 /* free slab */
1027 rtl_cache_slab_destroy (cache, slab);
1030 head = &(cache->m_free_head);
1031 for (slab = head->m_slab_next; slab != head; slab = head->m_slab_next)
1033 /* remove from 'free' queue */
1034 QUEUE_REMOVE_NAMED(slab, slab_);
1036 /* update stats */
1037 cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
1039 /* free slab */
1040 rtl_cache_slab_destroy (cache, slab);
1045 if (cache->m_hash_table != cache->m_hash_table_0)
1047 rtl_arena_free (
1048 gp_cache_arena,
1049 cache->m_hash_table,
1050 cache->m_hash_size * sizeof(rtl_cache_bufctl_type*));
1052 cache->m_hash_table = cache->m_hash_table_0;
1053 cache->m_hash_size = RTL_CACHE_HASH_SIZE;
1054 cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
1058 } //namespace
1060 /* ================================================================= *
1062 * cache implementation.
1064 * ================================================================= */
1066 /** rtl_cache_create()
1068 rtl_cache_type *
1069 SAL_CALL rtl_cache_create (
1070 const char * name,
1071 sal_Size objsize,
1072 sal_Size objalign,
1073 int (SAL_CALL * constructor)(void * obj, void * userarg),
1074 void (SAL_CALL * destructor) (void * obj, void * userarg),
1075 void (SAL_CALL * reclaim) (void * userarg),
1076 void * userarg,
1077 rtl_arena_type * source,
1078 int flags
1079 ) SAL_THROW_EXTERN_C()
1081 rtl_cache_type * result = 0;
1082 sal_Size size = sizeof(rtl_cache_type);
1084 try_alloc:
1085 result = (rtl_cache_type*)rtl_arena_alloc (gp_cache_arena, &size);
1086 if (result != 0)
1088 rtl_cache_type * cache = result;
1089 (void) rtl_cache_constructor (cache);
1091 if (!source)
1093 /* use default arena */
1094 assert(gp_default_arena != 0);
1095 source = gp_default_arena;
1098 result = rtl_cache_activate (
1099 cache,
1100 name,
1101 objsize,
1102 objalign,
1103 constructor,
1104 destructor,
1105 reclaim,
1106 userarg,
1107 source,
1108 flags
1111 if (result == 0)
1113 /* activation failed */
1114 rtl_cache_deactivate (cache);
1115 rtl_cache_destructor (cache);
1116 rtl_arena_free (gp_cache_arena, cache, size);
1119 else if (gp_cache_arena == 0)
1121 ensureCacheSingleton();
1122 if (gp_cache_arena)
1124 /* try again */
1125 goto try_alloc;
1128 return (result);
1131 /** rtl_cache_destroy()
1133 void SAL_CALL rtl_cache_destroy (
1134 rtl_cache_type * cache
1135 ) SAL_THROW_EXTERN_C()
1137 if (cache != 0)
1139 rtl_cache_deactivate (cache);
1140 rtl_cache_destructor (cache);
1141 rtl_arena_free (gp_cache_arena, cache, sizeof(rtl_cache_type));
1145 /** rtl_cache_alloc()
1147 void *
1148 SAL_CALL rtl_cache_alloc (
1149 rtl_cache_type * cache
1150 ) SAL_THROW_EXTERN_C()
1152 void * obj = 0;
1154 if (cache == 0)
1155 return (0);
1157 if (alloc_mode == AMode_SYSTEM)
1159 obj = rtl_allocateMemory(cache->m_type_size);
1160 if ((obj != 0) && (cache->m_constructor != 0))
1162 if (!((cache->m_constructor)(obj, cache->m_userarg)))
1164 /* construction failure */
1165 rtl_freeMemory(obj), obj = 0;
1168 return obj;
1171 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1172 if (cache->m_cpu_curr != 0)
1174 for (;;)
1176 /* take object from magazine layer */
1177 rtl_cache_magazine_type *curr, *prev, *temp;
1179 curr = cache->m_cpu_curr;
1180 if ((curr != 0) && (curr->m_mag_used > 0))
1182 obj = curr->m_objects[--curr->m_mag_used];
1183 cache->m_cpu_stats.m_alloc += 1;
1184 RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1186 return (obj);
1189 prev = cache->m_cpu_prev;
1190 if ((prev != 0) && (prev->m_mag_used > 0))
1192 temp = cache->m_cpu_curr;
1193 cache->m_cpu_curr = cache->m_cpu_prev;
1194 cache->m_cpu_prev = temp;
1196 continue;
1199 temp = rtl_cache_depot_exchange_alloc (cache, prev);
1200 if (temp != 0)
1202 cache->m_cpu_prev = cache->m_cpu_curr;
1203 cache->m_cpu_curr = temp;
1205 continue;
1208 /* no full magazine: fall through to slab layer */
1209 break;
1212 RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1214 /* alloc buffer from slab layer */
1215 obj = rtl_cache_slab_alloc (cache);
1216 if ((obj != 0) && (cache->m_constructor != 0))
1218 /* construct object */
1219 if (!((cache->m_constructor)(obj, cache->m_userarg)))
1221 /* construction failure */
1222 rtl_cache_slab_free (cache, obj), obj = 0;
1225 return (obj);
1228 /** rtl_cache_free()
1230 void
1231 SAL_CALL rtl_cache_free (
1232 rtl_cache_type * cache,
1233 void * obj
1234 ) SAL_THROW_EXTERN_C()
1236 if ((obj != 0) && (cache != 0))
1238 if (alloc_mode == AMode_SYSTEM)
1240 if (cache->m_destructor != 0)
1242 /* destruct object */
1243 (cache->m_destructor)(obj, cache->m_userarg);
1245 rtl_freeMemory(obj);
1246 return;
1249 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1251 for (;;)
1253 /* return object to magazine layer */
1254 rtl_cache_magazine_type *curr, *prev, *temp;
1256 curr = cache->m_cpu_curr;
1257 if ((curr != 0) && (curr->m_mag_used < curr->m_mag_size))
1259 curr->m_objects[curr->m_mag_used++] = obj;
1260 cache->m_cpu_stats.m_free += 1;
1261 RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1263 return;
1266 prev = cache->m_cpu_prev;
1267 if ((prev != 0) && (prev->m_mag_used == 0))
1269 temp = cache->m_cpu_curr;
1270 cache->m_cpu_curr = cache->m_cpu_prev;
1271 cache->m_cpu_prev = temp;
1273 continue;
1276 temp = rtl_cache_depot_exchange_free (cache, prev);
1277 if (temp != 0)
1279 cache->m_cpu_prev = cache->m_cpu_curr;
1280 cache->m_cpu_curr = temp;
1282 continue;
1285 if (rtl_cache_depot_populate(cache) != 0)
1287 continue;
1290 /* no empty magazine: fall through to slab layer */
1291 break;
1294 RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1296 /* no space for constructed object in magazine layer */
1297 if (cache->m_destructor != 0)
1299 /* destruct object */
1300 (cache->m_destructor)(obj, cache->m_userarg);
1303 /* return buffer to slab layer */
1304 rtl_cache_slab_free (cache, obj);
1308 /* ================================================================= *
1310 * cache wsupdate (machdep) internals.
1312 * ================================================================= */
1314 /** rtl_cache_wsupdate_init()
1316 * @precond g_cache_list.m_lock initialized
1318 static void
1319 rtl_cache_wsupdate_init();
1322 /** rtl_cache_wsupdate_wait()
1324 * @precond g_cache_list.m_lock acquired
1326 static void
1327 rtl_cache_wsupdate_wait (
1328 unsigned int seconds
1331 /** rtl_cache_wsupdate_fini()
1334 static void
1335 rtl_cache_wsupdate_fini();
1337 /* ================================================================= */
1339 #if defined(SAL_UNX)
1341 #include <sys/time.h>
1343 static void *
1344 rtl_cache_wsupdate_all (void * arg);
1346 static void
1347 rtl_cache_wsupdate_init()
1349 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1350 g_cache_list.m_update_done = 0;
1351 (void) pthread_cond_init (&(g_cache_list.m_update_cond), NULL);
1352 if (pthread_create (
1353 &(g_cache_list.m_update_thread), NULL, rtl_cache_wsupdate_all, (void*)(10)) != 0)
1355 /* failure */
1356 g_cache_list.m_update_thread = (pthread_t)(0);
1358 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1361 static void
1362 rtl_cache_wsupdate_wait (unsigned int seconds)
1364 if (seconds > 0)
1366 timeval now;
1367 timespec wakeup;
1369 gettimeofday(&now, 0);
1370 wakeup.tv_sec = now.tv_sec + (seconds);
1371 wakeup.tv_nsec = now.tv_usec * 1000;
1373 (void) pthread_cond_timedwait (
1374 &(g_cache_list.m_update_cond),
1375 &(g_cache_list.m_lock),
1376 &wakeup);
1380 static void
1381 rtl_cache_wsupdate_fini()
1383 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1384 g_cache_list.m_update_done = 1;
1385 pthread_cond_signal (&(g_cache_list.m_update_cond));
1386 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1388 if (g_cache_list.m_update_thread != (pthread_t)(0))
1389 pthread_join (g_cache_list.m_update_thread, NULL);
1392 /* ================================================================= */
1394 #elif defined(SAL_W32)
1396 static DWORD WINAPI
1397 rtl_cache_wsupdate_all (void * arg);
1399 static void
1400 rtl_cache_wsupdate_init()
1402 DWORD dwThreadId;
1404 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1405 g_cache_list.m_update_done = 0;
1406 g_cache_list.m_update_cond = CreateEvent (0, TRUE, FALSE, 0);
1408 g_cache_list.m_update_thread =
1409 CreateThread (NULL, 0, rtl_cache_wsupdate_all, (LPVOID)(10), 0, &dwThreadId);
1410 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1413 static void
1414 rtl_cache_wsupdate_wait (unsigned int seconds)
1416 if (seconds > 0)
1418 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1419 WaitForSingleObject (g_cache_list.m_update_cond, (DWORD)(seconds * 1000));
1420 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1424 static void
1425 rtl_cache_wsupdate_fini()
1427 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1428 g_cache_list.m_update_done = 1;
1429 SetEvent (g_cache_list.m_update_cond);
1430 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1432 WaitForSingleObject (g_cache_list.m_update_thread, INFINITE);
1435 #endif /* SAL_UNX || SAL_W32 */
1437 /* ================================================================= */
1439 /** rtl_cache_depot_wsupdate()
1440 * update depot stats and purge excess magazines.
1442 * @precond cache->m_depot_lock acquired
1444 static void
1445 rtl_cache_depot_wsupdate (
1446 rtl_cache_type * cache,
1447 rtl_cache_depot_type * depot
1450 sal_Size npurge;
1452 depot->m_prev_min = depot->m_curr_min;
1453 depot->m_curr_min = depot->m_mag_count;
1455 npurge = depot->m_curr_min < depot->m_prev_min ? depot->m_curr_min : depot->m_prev_min;
1456 for (; npurge > 0; npurge--)
1458 rtl_cache_magazine_type * mag = rtl_cache_depot_dequeue (depot);
1459 if (mag != 0)
1461 RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1462 rtl_cache_magazine_clear (cache, mag);
1463 rtl_cache_free (cache->m_magazine_cache, mag);
1464 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1469 /** rtl_cache_wsupdate()
1471 * @precond cache->m_depot_lock released
1473 static void
1474 rtl_cache_wsupdate (
1475 rtl_cache_type * cache
1478 if (cache->m_magazine_cache != 0)
1480 RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1482 // SAL_INFO(
1483 // "sal.rtl",
1484 // "rtl_cache_wsupdate(" << cache->m_name
1485 // << ") [depot: count, curr_min, prev_min] full: "
1486 // << cache->m_depot_full.m_mag_count << ", "
1487 // << cache->m_depot_full.m_curr_min << ", "
1488 // << cache->m_depot_full.m_prev_min << "; empty: "
1489 // << cache->m_depot_empty.m_mag_count << ", "
1490 // << cache->m_depot_empty.m_curr_min << ", "
1491 // << cache->m_depot_empty.m_prev_min);
1493 rtl_cache_depot_wsupdate (cache, &(cache->m_depot_full));
1494 rtl_cache_depot_wsupdate (cache, &(cache->m_depot_empty));
1496 RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1500 /** rtl_cache_wsupdate_all()
1503 #if defined(SAL_UNX)
1504 static void *
1505 #elif defined(SAL_W32)
1506 static DWORD WINAPI
1507 #endif /* SAL_UNX || SAL_W32 */
1508 rtl_cache_wsupdate_all (void * arg)
1510 unsigned int seconds = sal::static_int_cast< unsigned int >(
1511 reinterpret_cast< sal_uIntPtr >(arg));
1513 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1514 while (!g_cache_list.m_update_done)
1516 rtl_cache_wsupdate_wait (seconds);
1517 if (!g_cache_list.m_update_done)
1519 rtl_cache_type * head, * cache;
1521 head = &(g_cache_list.m_cache_head);
1522 for (cache = head->m_cache_next;
1523 cache != head;
1524 cache = cache->m_cache_next)
1526 rtl_cache_wsupdate (cache);
1530 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1532 return (0);
1535 /* ================================================================= *
1537 * cache initialization.
1539 * ================================================================= */
1541 void
1542 rtl_cache_init()
1545 /* list of caches */
1546 RTL_MEMORY_LOCK_INIT(&(g_cache_list.m_lock));
1547 (void) rtl_cache_constructor (&(g_cache_list.m_cache_head));
1550 /* cache: internal arena */
1551 assert(gp_cache_arena == 0);
1553 gp_cache_arena = rtl_arena_create (
1554 "rtl_cache_internal_arena",
1555 64, /* quantum */
1556 0, /* no quantum caching */
1557 NULL, /* default source */
1558 rtl_arena_alloc,
1559 rtl_arena_free,
1560 0 /* flags */
1562 assert(gp_cache_arena != 0);
1564 /* check 'gp_default_arena' initialization */
1565 assert(gp_default_arena != 0);
1568 /* cache: magazine cache */
1569 static rtl_cache_type g_cache_magazine_cache;
1571 assert(gp_cache_magazine_cache == 0);
1572 (void) rtl_cache_constructor (&g_cache_magazine_cache);
1574 gp_cache_magazine_cache = rtl_cache_activate (
1575 &g_cache_magazine_cache,
1576 "rtl_cache_magazine_cache",
1577 sizeof(rtl_cache_magazine_type), /* objsize */
1578 0, /* objalign */
1579 rtl_cache_magazine_constructor,
1580 rtl_cache_magazine_destructor,
1581 0, /* reclaim */
1582 0, /* userarg: NYI */
1583 gp_default_arena, /* source */
1584 RTL_CACHE_FLAG_NOMAGAZINE /* during bootstrap; activated below */
1586 assert(gp_cache_magazine_cache != 0);
1588 /* activate magazine layer */
1589 g_cache_magazine_cache.m_magazine_cache = gp_cache_magazine_cache;
1592 /* cache: slab (struct) cache */
1593 static rtl_cache_type g_cache_slab_cache;
1595 assert(gp_cache_slab_cache == 0);
1596 (void) rtl_cache_constructor (&g_cache_slab_cache);
1598 gp_cache_slab_cache = rtl_cache_activate (
1599 &g_cache_slab_cache,
1600 "rtl_cache_slab_cache",
1601 sizeof(rtl_cache_slab_type), /* objsize */
1602 0, /* objalign */
1603 rtl_cache_slab_constructor,
1604 rtl_cache_slab_destructor,
1605 0, /* reclaim */
1606 0, /* userarg: none */
1607 gp_default_arena, /* source */
1608 0 /* flags: none */
1610 assert(gp_cache_slab_cache != 0);
1613 /* cache: bufctl cache */
1614 static rtl_cache_type g_cache_bufctl_cache;
1616 assert(gp_cache_bufctl_cache == 0);
1617 (void) rtl_cache_constructor (&g_cache_bufctl_cache);
1619 gp_cache_bufctl_cache = rtl_cache_activate (
1620 &g_cache_bufctl_cache,
1621 "rtl_cache_bufctl_cache",
1622 sizeof(rtl_cache_bufctl_type), /* objsize */
1623 0, /* objalign */
1624 0, /* constructor */
1625 0, /* destructor */
1626 0, /* reclaim */
1627 0, /* userarg */
1628 gp_default_arena, /* source */
1629 0 /* flags: none */
1631 assert(gp_cache_bufctl_cache != 0);
1634 rtl_cache_wsupdate_init();
1635 // SAL_INFO("sal.rtl", "rtl_cache_init completed");
1638 /* ================================================================= */
1640 void
1641 rtl_cache_fini()
1643 if (gp_cache_arena != 0)
1645 rtl_cache_type * cache, * head;
1647 rtl_cache_wsupdate_fini();
1649 if (gp_cache_bufctl_cache != 0)
1651 cache = gp_cache_bufctl_cache, gp_cache_bufctl_cache = 0;
1652 rtl_cache_deactivate (cache);
1653 rtl_cache_destructor (cache);
1655 if (gp_cache_slab_cache != 0)
1657 cache = gp_cache_slab_cache, gp_cache_slab_cache = 0;
1658 rtl_cache_deactivate (cache);
1659 rtl_cache_destructor (cache);
1661 if (gp_cache_magazine_cache != 0)
1663 cache = gp_cache_magazine_cache, gp_cache_magazine_cache = 0;
1664 rtl_cache_deactivate (cache);
1665 rtl_cache_destructor (cache);
1667 if (gp_cache_arena != 0)
1669 rtl_arena_destroy (gp_cache_arena);
1670 gp_cache_arena = 0;
1673 RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1674 head = &(g_cache_list.m_cache_head);
1675 for (cache = head->m_cache_next; cache != head; cache = cache->m_cache_next)
1677 // SAL_INFO(
1678 // "sal.rtl",
1679 // "rtl_cache_fini(" << cache->m_name << ") [slab]: allocs: "
1680 // << cache->m_slab_stats.m_alloc << ", frees: "
1681 // << cache->m_slab_stats.m_free << "; total: "
1682 // << cache->m_slab_stats.m_mem_total << ", used: "
1683 // << cache->m_slab_stats.m_mem_alloc << "; [cpu]: allocs: "
1684 // << cache->m_cpu_stats.m_alloc << ", frees: "
1685 // << cache->m_cpu_stats.m_free << "; [total]: allocs: "
1686 // << (cache->m_slab_stats.m_alloc
1687 // + cache->m_cpu_stats.m_alloc)
1688 // << ", frees: "
1689 // << (cache->m_slab_stats.m_free
1690 // + cache->m_cpu_stats.m_free));
1692 RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1694 // SAL_INFO("sal.rtl", "rtl_cache_fini completed");
1697 /* vim:set shiftwidth=4 softtabstop=4 expandtab: */