1 //===-- sanitizer_flat_map.h ------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Part of the Sanitizer Allocator.
11 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_FLAT_MAP_H
14 #define SANITIZER_FLAT_MAP_H
16 #include "sanitizer_atomic.h"
17 #include "sanitizer_common.h"
18 #include "sanitizer_internal_defs.h"
19 #include "sanitizer_local_address_space_view.h"
20 #include "sanitizer_mutex.h"
22 namespace __sanitizer
{
24 // Call these callbacks on mmap/munmap.
25 struct NoOpMapUnmapCallback
{
26 void OnMap(uptr p
, uptr size
) const {}
27 void OnUnmap(uptr p
, uptr size
) const {}
30 // Maps integers in rage [0, kSize) to values.
31 template <typename T
, u64 kSize
,
32 typename AddressSpaceViewTy
= LocalAddressSpaceView
>
35 using AddressSpaceView
= AddressSpaceViewTy
;
36 void Init() { internal_memset(map_
, 0, sizeof(map_
)); }
38 constexpr uptr
size() const { return kSize
; }
40 bool contains(uptr idx
) const {
45 T
&operator[](uptr idx
) {
46 DCHECK_LT(idx
, kSize
);
50 const T
&operator[](uptr idx
) const {
51 DCHECK_LT(idx
, kSize
);
59 // TwoLevelMap maps integers in range [0, kSize1*kSize2) to values.
60 // It is implemented as a two-dimensional array: array of kSize1 pointers
61 // to kSize2-byte arrays. The secondary arrays are mmaped on demand.
62 // Each value is initially zero and can be set to something else only once.
63 // Setting and getting values from multiple threads is safe w/o extra locking.
64 template <typename T
, u64 kSize1
, u64 kSize2
,
65 typename AddressSpaceViewTy
= LocalAddressSpaceView
,
66 class MapUnmapCallback
= NoOpMapUnmapCallback
>
68 static_assert(IsPowerOfTwo(kSize2
), "Use a power of two for performance.");
71 using AddressSpaceView
= AddressSpaceViewTy
;
74 internal_memset(map1_
, 0, sizeof(map1_
));
77 void TestOnlyUnmap() {
78 for (uptr i
= 0; i
< kSize1
; i
++) {
82 MapUnmapCallback().OnUnmap(reinterpret_cast<uptr
>(p
), MmapSize());
83 UnmapOrDie(p
, kSize2
);
88 uptr
MemoryUsage() const {
90 for (uptr i
= 0; i
< kSize1
; i
++) {
99 constexpr uptr
size() const { return kSize1
* kSize2
; }
100 constexpr uptr
size1() const { return kSize1
; }
101 constexpr uptr
size2() const { return kSize2
; }
103 bool contains(uptr idx
) const {
104 CHECK_LT(idx
, kSize1
* kSize2
);
105 return Get(idx
/ kSize2
);
108 const T
&operator[](uptr idx
) const {
109 DCHECK_LT(idx
, kSize1
* kSize2
);
110 T
*map2
= GetOrCreate(idx
/ kSize2
);
111 return *AddressSpaceView::Load(&map2
[idx
% kSize2
]);
114 T
&operator[](uptr idx
) {
115 DCHECK_LT(idx
, kSize1
* kSize2
);
116 T
*map2
= GetOrCreate(idx
/ kSize2
);
117 return *AddressSpaceView::LoadWritable(&map2
[idx
% kSize2
]);
121 constexpr uptr
MmapSize() const {
122 return RoundUpTo(kSize2
* sizeof(T
), GetPageSizeCached());
125 T
*Get(uptr idx
) const {
126 DCHECK_LT(idx
, kSize1
);
127 return reinterpret_cast<T
*>(
128 atomic_load(&map1_
[idx
], memory_order_acquire
));
131 T
*GetOrCreate(uptr idx
) const {
132 DCHECK_LT(idx
, kSize1
);
133 // This code needs to use memory_order_acquire/consume, but we use
134 // memory_order_relaxed for performance reasons (matters for arm64). We
135 // expect memory_order_relaxed to be effectively equivalent to
136 // memory_order_consume in this case for all relevant architectures: all
137 // dependent data is reachable only by dereferencing the resulting pointer.
138 // If relaxed load fails to see stored ptr, the code will fall back to
139 // Create() and reload the value again with locked mutex as a memory
141 T
*res
= reinterpret_cast<T
*>(atomic_load_relaxed(&map1_
[idx
]));
147 NOINLINE T
*Create(uptr idx
) const {
148 SpinMutexLock
l(&mu_
);
151 res
= reinterpret_cast<T
*>(MmapOrDie(MmapSize(), "TwoLevelMap"));
152 MapUnmapCallback().OnMap(reinterpret_cast<uptr
>(res
), kSize2
);
153 atomic_store(&map1_
[idx
], reinterpret_cast<uptr
>(res
),
154 memory_order_release
);
159 mutable StaticSpinMutex mu_
;
160 mutable atomic_uintptr_t map1_
[kSize1
];
163 template <u64 kSize
, typename AddressSpaceViewTy
= LocalAddressSpaceView
>
164 using FlatByteMap
= FlatMap
<u8
, kSize
, AddressSpaceViewTy
>;
166 template <u64 kSize1
, u64 kSize2
,
167 typename AddressSpaceViewTy
= LocalAddressSpaceView
,
168 class MapUnmapCallback
= NoOpMapUnmapCallback
>
169 using TwoLevelByteMap
=
170 TwoLevelMap
<u8
, kSize1
, kSize2
, AddressSpaceViewTy
, MapUnmapCallback
>;
171 } // namespace __sanitizer