2 BlkInfo thread-local cache. Used for array appending in the conservative GC to avoid the lock when possible.
4 Note: this used to be in rt.lifetime, but was moved here to allow GCs to take over array operations.
6 module core
.internal
.gc
.blkcache
;
11 debug (PRINTF
) import core
.stdc
.stdio
: printf
;
13 alias BlkInfo
= GC
.BlkInfo
;
14 alias BlkAttr
= GC
.BlkAttr
;
17 cache for the lookup of the block info
19 private enum N_CACHE_BLOCKS
= 8;
21 // note this is TLS, so no need to sync.
22 BlkInfo
*__blkcache_storage
;
24 static if (N_CACHE_BLOCKS
== 1)
30 //version=simple_cache; // uncomment to test simple cache strategy
31 //version=random_cache; // uncomment to test random cache strategy
33 // ensure N_CACHE_BLOCKS is power of 2.
34 static assert(!((N_CACHE_BLOCKS
- 1) & N_CACHE_BLOCKS
));
36 version (random_cache
)
43 @property BlkInfo
*__blkcache() nothrow @nogc
45 if (!__blkcache_storage
)
47 import core
.stdc
.stdlib
;
48 import core
.stdc
.string
;
49 import core
.thread
.threadbase
;
50 auto tBase
= ThreadBase
.getThis();
52 // if we don't have a thread object, this is a detached thread, and
53 // this won't be properly maintained by the GC.
56 // allocate the block cache for the first time
57 immutable size
= BlkInfo
.sizeof
* N_CACHE_BLOCKS
;
58 // use C alloc, because this may become a detached thread, and the GC
59 // would then clean up the cache without zeroing this pointer.
60 __blkcache_storage
= cast(BlkInfo
*) calloc(size
, 1);
61 tBase
.tlsGCData
= __blkcache_storage
;
63 return __blkcache_storage
;
66 // free the allocation on thread exit.
67 @standalone static ~this()
69 if (__blkcache_storage
)
71 import core
.stdc
.stdlib
;
72 import core
.thread
.threadbase
;
73 auto tBase
= ThreadBase
.getThis();
75 tBase
.tlsGCData
= null;
76 free(__blkcache_storage
);
77 __blkcache_storage
= null;
82 * Indicates whether an address has been marked by the GC.
86 no
, /// Address is not marked.
87 yes
, /// Address is marked.
88 unknown
, /// Address is not managed by the GC.
91 alias IsMarkedDg
= IsMarked
delegate(void* addr
) nothrow; /// The isMarked callback function.
93 // we expect this to be called with the lock in place
94 void processGCMarks(void* data
, scope IsMarkedDg isMarked
) nothrow
99 auto cache
= cast(BlkInfo
*) data
;
100 // called after the mark routine to eliminate block cache data when it
101 // might be ready to sweep
103 debug(PRINTF
) printf("processing GC Marks, %p\n", cache
);
104 debug(PRINTF
) foreach (i
; 0 .. N_CACHE_BLOCKS
)
106 printf("cache entry %d has base ptr %p\tsize %zd\tflags %x\n", i
, cache
[i
].base
, cache
[i
].size
, cache
[i
].attr
);
108 auto cache_end
= cache
+ N_CACHE_BLOCKS
;
109 for (;cache
< cache_end
; ++cache
)
111 if (cache
.base
!= null && isMarked(cache
.base
) == IsMarked
.no
)
113 debug(PRINTF
) printf("clearing cache entry at %p\n", cache
.base
);
114 cache
.base
= null; // clear that data.
121 // Bugzilla 10701 - segfault in GC
122 ubyte[] result
; result
.length
= 4096;
128 Get the cached block info of an interior pointer. Returns null if the
129 interior pointer's block is not cached.
131 NOTE: The following note was not valid, but is retained for historical
132 purposes. The data cannot be cleared because the stack contains a
133 reference to the affected block (e.g. through `interior`). Therefore,
134 the element will not be collected, and the data will remain valid.
136 ORIGINAL: The base ptr in this struct can be cleared asynchronously by the GC,
137 so any use of the returned BlkInfo should copy it and then check the
138 base ptr of the copy before actually using it.
140 BlkInfo
*__getBlkInfo(void *interior
) nothrow @nogc
142 BlkInfo
*ptr
= __blkcache
;
144 // if for some reason we don't have a cache, return null.
146 version (single_cache
)
148 if (ptr
.base
&& ptr
.base
<= interior
&& (interior
- ptr
.base
) < ptr
.size
)
150 return null; // not in cache.
152 else version (simple_cache
)
154 foreach (i
; 0..N_CACHE_BLOCKS
)
156 if (ptr
.base
&& ptr
.base
<= interior
&& (interior
- ptr
.base
) < ptr
.size
)
163 // try to do a smart lookup, using __nextBlkIdx as the "head"
164 auto curi
= ptr
+ __nextBlkIdx
;
165 for (auto i
= curi
; i
>= ptr
; --i
)
167 if (i
.base
&& i
.base
<= interior
&& cast(size_t
)(interior
- i
.base
) < i
.size
)
171 for (auto i
= ptr
+ N_CACHE_BLOCKS
- 1; i
> curi
; --i
)
173 if (i
.base
&& i
.base
<= interior
&& cast(size_t
)(interior
- i
.base
) < i
.size
)
177 return null; // not in cache.
180 void __insertBlkInfoCache(BlkInfo bi
, BlkInfo
*curpos
) nothrow @nogc
182 auto cache
= __blkcache
;
187 version (single_cache
)
194 version (simple_cache
)
200 // note, this is a super-simple algorithm that does not care about
201 // most recently used. It simply uses a round-robin technique to
202 // cache block info. This means that the ordering of the cache
203 // doesn't mean anything. Certain patterns of allocation may
204 // render the cache near-useless.
205 cache
[__nextBlkIdx
] = bi
;
206 __nextBlkIdx
= (__nextBlkIdx
+1) & (N_CACHE_BLOCKS
- 1);
209 else version (random_cache
)
211 // strategy: if the block currently is in the cache, move the
212 // current block index to the a random element and evict that
216 __nextBlkIdx
= (__nextRndNum
= 1664525 * __nextRndNum
+ 1013904223) & (N_CACHE_BLOCKS
- 1);
217 curpos
= cache
+ __nextBlkIdx
;
221 __nextBlkIdx
= curpos
- cache
;
228 // strategy: If the block currently is in the cache, swap it with
229 // the head element. Otherwise, move the head element up by one,
230 // and insert it there.
234 __nextBlkIdx
= (__nextBlkIdx
+1) & (N_CACHE_BLOCKS
- 1);
235 curpos
= cache
+ __nextBlkIdx
;
237 else if (curpos
!is cache
+ __nextBlkIdx
)
239 *curpos
= cache
[__nextBlkIdx
];
240 curpos
= cache
+ __nextBlkIdx
;
249 extern(C
) void printArrayCache()
251 auto ptr
= __blkcache
;
253 foreach (i
; 0 .. N_CACHE_BLOCKS
)
255 printf(" %d\taddr:% .8p\tsize:% .10zd\tflags:% .8x\n", i
, ptr
[i
].base
, ptr
[i
].size
, ptr
[i
].attr
);