1 // Copyright (c) 2016 The Bitcoin Core developers
2 // Distributed under the MIT software license, see the accompanying
3 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 #ifndef BITCOIN_SUPPORT_LOCKEDPOOL_H
6 #define BITCOIN_SUPPORT_LOCKEDPOOL_H
15 * OS-dependent allocation and deallocation of locked/pinned memory pages.
16 * Abstract base class.
18 class LockedPageAllocator
21 virtual ~LockedPageAllocator() {}
22 /** Allocate and lock memory pages.
23 * If len is not a multiple of the system page size, it is rounded up.
24 * Returns 0 in case of allocation failure.
26 * If locking the memory pages could not be accomplished it will still
27 * return the memory, however the lockingSuccess flag will be false.
28 * lockingSuccess is undefined if the allocation fails.
30 virtual void* AllocateLocked(size_t len
, bool *lockingSuccess
) = 0;
32 /** Unlock and free memory pages.
33 * Clear the memory before unlocking.
35 virtual void FreeLocked(void* addr
, size_t len
) = 0;
37 /** Get the total limit on the amount of memory that may be locked by this
38 * process, in bytes. Return size_t max if there is no limit or the limit
39 * is unknown. Return 0 if no memory can be locked at all.
41 virtual size_t GetLimit() = 0;
44 /* An arena manages a contiguous region of memory by dividing it into
50 Arena(void *base
, size_t size
, size_t alignment
);
53 Arena(const Arena
& other
) = delete; // non construction-copyable
54 Arena
& operator=(const Arena
&) = delete; // non copyable
56 /** Memory statistics. */
66 /** Allocate size bytes from this arena.
67 * Returns pointer on success, or 0 if memory is full or
68 * the application tried to allocate 0 bytes.
70 void* alloc(size_t size
);
72 /** Free a previously allocated chunk of memory.
73 * Freeing the zero pointer has no effect.
74 * Raises std::runtime_error in case of error.
78 /** Get arena usage statistics */
85 /** Return whether a pointer points inside this arena.
86 * This returns base <= ptr < (base+size) so only use it for (inclusive)
87 * chunk starting addresses.
89 bool addressInArena(void *ptr
) const { return ptr
>= base
&& ptr
< end
; }
91 /** Map of chunk address to chunk information. This class makes use of the
92 * sorted order to merge previous and next chunks during deallocation.
94 std::map
<char*, size_t> chunks_free
;
95 std::map
<char*, size_t> chunks_used
;
96 /** Base address of arena */
98 /** End address of arena */
100 /** Minimum chunk alignment */
104 /** Pool for locked memory chunks.
106 * To avoid sensitive key data from being swapped to disk, the memory in this pool
109 * An arena manages a contiguous region of memory. The pool starts out with one arena
110 * but can grow to multiple arenas if the need arises.
112 * Unlike a normal C heap, the administrative structures are separate from the managed
113 * memory. This has been done as the sizes and bases of objects are not in themselves sensitive
114 * information, as to conserve precious locked memory. In some operating systems
115 * the amount of memory that can be locked is small.
120 /** Size of one arena of locked memory. This is a compromise.
121 * Do not set this too low, as managing many arenas will increase
122 * allocation and deallocation overhead. Setting it too high allocates
123 * more locked memory from the OS than strictly necessary.
125 static const size_t ARENA_SIZE
= 256*1024;
126 /** Chunk alignment. Another compromise. Setting this too high will waste
127 * memory, setting it too low will facilitate fragmentation.
129 static const size_t ARENA_ALIGN
= 16;
131 /** Callback when allocation succeeds but locking fails.
133 typedef bool (*LockingFailed_Callback
)();
135 /** Memory statistics. */
146 /** Create a new LockedPool. This takes ownership of the MemoryPageLocker,
147 * you can only instantiate this with LockedPool(std::move(...)).
149 * The second argument is an optional callback when locking a newly allocated arena failed.
150 * If this callback is provided and returns false, the allocation fails (hard fail), if
151 * it returns true the allocation proceeds, but it could warn.
153 explicit LockedPool(std::unique_ptr
<LockedPageAllocator
> allocator
, LockingFailed_Callback lf_cb_in
= nullptr);
156 LockedPool(const LockedPool
& other
) = delete; // non construction-copyable
157 LockedPool
& operator=(const LockedPool
&) = delete; // non copyable
159 /** Allocate size bytes from this arena.
160 * Returns pointer on success, or 0 if memory is full or
161 * the application tried to allocate 0 bytes.
163 void* alloc(size_t size
);
165 /** Free a previously allocated chunk of memory.
166 * Freeing the zero pointer has no effect.
167 * Raises std::runtime_error in case of error.
169 void free(void *ptr
);
171 /** Get pool usage statistics */
174 std::unique_ptr
<LockedPageAllocator
> allocator
;
176 /** Create an arena from locked pages */
177 class LockedPageArena
: public Arena
180 LockedPageArena(LockedPageAllocator
*alloc_in
, void *base_in
, size_t size
, size_t align
);
185 LockedPageAllocator
*allocator
;
188 bool new_arena(size_t size
, size_t align
);
190 std::list
<LockedPageArena
> arenas
;
191 LockingFailed_Callback lf_cb
;
192 size_t cumulative_bytes_locked
;
193 /** Mutex protects access to this pool's data structures, including arenas.
195 mutable std::mutex mutex
;
199 * Singleton class to keep track of locked (ie, non-swappable) memory, for use in
200 * std::allocator templates.
202 * Some implementations of the STL allocate memory in some constructors (i.e., see
203 * MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.)
204 * Due to the unpredictable order of static initializers, we have to make sure the
205 * LockedPoolManager instance exists before any other STL-based objects that use
206 * secure_allocator are created. So instead of having LockedPoolManager also be
207 * static-initialized, it is created on demand.
209 class LockedPoolManager
: public LockedPool
212 /** Return the current instance, or create it once */
213 static LockedPoolManager
& Instance()
215 std::call_once(LockedPoolManager::init_flag
, LockedPoolManager::CreateInstance
);
216 return *LockedPoolManager::_instance
;
220 explicit LockedPoolManager(std::unique_ptr
<LockedPageAllocator
> allocator
);
222 /** Create a new LockedPoolManager specialized to the OS */
223 static void CreateInstance();
224 /** Called when locking fails, warn the user here */
225 static bool LockingFailed();
227 static LockedPoolManager
* _instance
;
228 static std::once_flag init_flag
;
231 #endif // BITCOIN_SUPPORT_LOCKEDPOOL_H