[clang][extract-api] Emit "navigator" property of "name" in SymbolGraph
[llvm-project.git] / compiler-rt / lib / sanitizer_common / sanitizer_common.h
blob8fd4870a8a9fe13ccfde6cf7c96170b7f8f6754f
1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is shared between run-time libraries of sanitizers.
11 // It declares common functions and classes that are used in both runtimes.
12 // Implementation of some functions are provided in sanitizer_common, while
13 // others must be defined by run-time library itself.
14 //===----------------------------------------------------------------------===//
15 #ifndef SANITIZER_COMMON_H
16 #define SANITIZER_COMMON_H
18 #include "sanitizer_flags.h"
19 #include "sanitizer_internal_defs.h"
20 #include "sanitizer_libc.h"
21 #include "sanitizer_list.h"
22 #include "sanitizer_mutex.h"
24 #if defined(_MSC_VER) && !defined(__clang__)
25 extern "C" void _ReadWriteBarrier();
26 #pragma intrinsic(_ReadWriteBarrier)
27 #endif
29 namespace __sanitizer {
31 struct AddressInfo;
32 struct BufferedStackTrace;
33 struct SignalContext;
34 struct StackTrace;
36 // Constants.
37 const uptr kWordSize = SANITIZER_WORDSIZE / 8;
38 const uptr kWordSizeInBits = 8 * kWordSize;
40 const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;
42 const uptr kMaxPathLength = 4096;
44 const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
46 const uptr kErrorMessageBufferSize = 1 << 16;
48 // Denotes fake PC values that come from JIT/JAVA/etc.
49 // For such PC values __tsan_symbolize_external_ex() will be called.
50 const u64 kExternalPCBit = 1ULL << 60;
52 extern const char *SanitizerToolName; // Can be changed by the tool.
54 extern atomic_uint32_t current_verbosity;
55 inline void SetVerbosity(int verbosity) {
56 atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
58 inline int Verbosity() {
59 return atomic_load(&current_verbosity, memory_order_relaxed);
62 #if SANITIZER_ANDROID
63 inline uptr GetPageSize() {
64 // Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
65 return 4096;
67 inline uptr GetPageSizeCached() {
68 return 4096;
70 #else
71 uptr GetPageSize();
72 extern uptr PageSizeCached;
73 inline uptr GetPageSizeCached() {
74 if (!PageSizeCached)
75 PageSizeCached = GetPageSize();
76 return PageSizeCached;
78 #endif
79 uptr GetMmapGranularity();
80 uptr GetMaxVirtualAddress();
81 uptr GetMaxUserVirtualAddress();
82 // Threads
83 tid_t GetTid();
84 int TgKill(pid_t pid, tid_t tid, int sig);
85 uptr GetThreadSelf();
86 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
87 uptr *stack_bottom);
88 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
89 uptr *tls_addr, uptr *tls_size);
91 // Memory management
92 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
93 inline void *MmapOrDieQuietly(uptr size, const char *mem_type) {
94 return MmapOrDie(size, mem_type, /*raw_report*/ true);
96 void UnmapOrDie(void *addr, uptr size);
97 // Behaves just like MmapOrDie, but tolerates out of memory condition, in that
98 // case returns nullptr.
99 void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
100 bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
101 WARN_UNUSED_RESULT;
102 bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size,
103 const char *name = nullptr) WARN_UNUSED_RESULT;
104 void *MmapNoReserveOrDie(uptr size, const char *mem_type);
105 void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
106 // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
107 // that case returns nullptr.
108 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size,
109 const char *name = nullptr);
110 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
111 void *MmapNoAccess(uptr size);
112 // Map aligned chunk of address space; size and alignment are powers of two.
113 // Dies on all but out of memory errors, in the latter case returns nullptr.
114 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
115 const char *mem_type);
116 // Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
117 // unaccessible memory.
118 bool MprotectNoAccess(uptr addr, uptr size);
119 bool MprotectReadOnly(uptr addr, uptr size);
121 void MprotectMallocZones(void *addr, int prot);
123 #if SANITIZER_LINUX
124 // Unmap memory. Currently only used on Linux.
125 void UnmapFromTo(uptr from, uptr to);
126 #endif
128 // Maps shadow_size_bytes of shadow memory and returns shadow address. It will
129 // be aligned to the mmap granularity * 2^shadow_scale, or to
130 // 2^min_shadow_base_alignment if that is larger. The returned address will
131 // have max(2^min_shadow_base_alignment, mmap granularity) on the left, and
132 // shadow_size_bytes bytes on the right, which on linux is mapped no access.
133 // The high_mem_end may be updated if the original shadow size doesn't fit.
134 uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
135 uptr min_shadow_base_alignment, uptr &high_mem_end);
137 // Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size).
138 // Reserves 2*S bytes of address space to the right of the returned address and
139 // ring_buffer_size bytes to the left. The returned address is aligned to 2*S.
140 // Also creates num_aliases regions of accessible memory starting at offset S
141 // from the returned address. Each region has size alias_size and is backed by
142 // the same physical memory.
143 uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
144 uptr num_aliases, uptr ring_buffer_size);
146 // Reserve memory range [beg, end]. If madvise_shadow is true then apply
147 // madvise (e.g. hugepages, core dumping) requested by options.
148 void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
149 bool madvise_shadow = true);
151 // Protect size bytes of memory starting at addr. Also try to protect
152 // several pages at the start of the address space as specified by
153 // zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.
154 void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
155 uptr zero_base_max_shadow_start);
157 // Find an available address space.
158 uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
159 uptr *largest_gap_found, uptr *max_occupied_addr);
161 // Used to check if we can map shadow memory to a fixed location.
162 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
163 // Releases memory pages entirely within the [beg, end] address range. Noop if
164 // the provided range does not contain at least one entire page.
165 void ReleaseMemoryPagesToOS(uptr beg, uptr end);
166 void IncreaseTotalMmap(uptr size);
167 void DecreaseTotalMmap(uptr size);
168 uptr GetRSS();
169 void SetShadowRegionHugePageMode(uptr addr, uptr length);
170 bool DontDumpShadowMemory(uptr addr, uptr length);
171 // Check if the built VMA size matches the runtime one.
172 void CheckVMASize();
173 void RunMallocHooks(const void *ptr, uptr size);
174 void RunFreeHooks(const void *ptr);
176 class ReservedAddressRange {
177 public:
178 uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
179 uptr InitAligned(uptr size, uptr align, const char *name = nullptr);
180 uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr);
181 uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
182 void Unmap(uptr addr, uptr size);
183 void *base() const { return base_; }
184 uptr size() const { return size_; }
186 private:
187 void* base_;
188 uptr size_;
189 const char* name_;
190 uptr os_handle_;
193 typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
194 /*out*/ uptr *stats);
196 // Parse the contents of /proc/self/smaps and generate a memory profile.
197 // |cb| is a tool-specific callback that fills the |stats| array.
198 void GetMemoryProfile(fill_profile_f cb, uptr *stats);
199 void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
200 uptr smaps_len);
202 // Simple low-level (mmap-based) allocator for internal use. Doesn't have
203 // constructor, so all instances of LowLevelAllocator should be
204 // linker initialized.
205 class LowLevelAllocator {
206 public:
207 // Requires an external lock.
208 void *Allocate(uptr size);
209 private:
210 char *allocated_end_;
211 char *allocated_current_;
213 // Set the min alignment of LowLevelAllocator to at least alignment.
214 void SetLowLevelAllocateMinAlignment(uptr alignment);
215 typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
216 // Allows to register tool-specific callbacks for LowLevelAllocator.
217 // Passing NULL removes the callback.
218 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
220 // IO
221 void CatastrophicErrorWrite(const char *buffer, uptr length);
222 void RawWrite(const char *buffer);
223 bool ColorizeReports();
224 void RemoveANSIEscapeSequencesFromString(char *buffer);
225 void Printf(const char *format, ...) FORMAT(1, 2);
226 void Report(const char *format, ...) FORMAT(1, 2);
227 void SetPrintfAndReportCallback(void (*callback)(const char *));
228 #define VReport(level, ...) \
229 do { \
230 if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
231 } while (0)
232 #define VPrintf(level, ...) \
233 do { \
234 if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
235 } while (0)
237 // Lock sanitizer error reporting and protects against nested errors.
238 class ScopedErrorReportLock {
239 public:
240 ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); }
241 ~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); }
243 static void Lock() SANITIZER_ACQUIRE(mutex_);
244 static void Unlock() SANITIZER_RELEASE(mutex_);
245 static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_);
247 private:
248 static atomic_uintptr_t reporting_thread_;
249 static StaticSpinMutex mutex_;
252 extern uptr stoptheworld_tracer_pid;
253 extern uptr stoptheworld_tracer_ppid;
255 bool IsAccessibleMemoryRange(uptr beg, uptr size);
257 // Error report formatting.
258 const char *StripPathPrefix(const char *filepath,
259 const char *strip_file_prefix);
260 // Strip the directories from the module name.
261 const char *StripModuleName(const char *module);
263 // OS
264 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
265 uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
266 uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len);
267 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
268 const char *GetProcessName();
269 void UpdateProcessName();
270 void CacheBinaryName();
271 void DisableCoreDumperIfNecessary();
272 void DumpProcessMap();
273 const char *GetEnv(const char *name);
274 bool SetEnv(const char *name, const char *value);
276 u32 GetUid();
277 void ReExec();
278 void CheckASLR();
279 void CheckMPROTECT();
280 char **GetArgv();
281 char **GetEnviron();
282 void PrintCmdline();
283 bool StackSizeIsUnlimited();
284 void SetStackSizeLimitInBytes(uptr limit);
285 bool AddressSpaceIsUnlimited();
286 void SetAddressSpaceUnlimited();
287 void AdjustStackSize(void *attr);
288 void PlatformPrepareForSandboxing(void *args);
289 void SetSandboxingCallback(void (*f)());
291 void InitializeCoverage(bool enabled, const char *coverage_dir);
293 void InitTlsSize();
294 uptr GetTlsSize();
296 // Other
297 void SleepForSeconds(unsigned seconds);
298 void SleepForMillis(unsigned millis);
299 u64 NanoTime();
300 u64 MonotonicNanoTime();
301 int Atexit(void (*function)(void));
302 bool TemplateMatch(const char *templ, const char *str);
304 // Exit
305 void NORETURN Abort();
306 void NORETURN Die();
307 void NORETURN
308 CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
309 void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
310 const char *mmap_type, error_t err,
311 bool raw_report = false);
313 // Specific tools may override behavior of "Die" function to do tool-specific
314 // job.
315 typedef void (*DieCallbackType)(void);
317 // It's possible to add several callbacks that would be run when "Die" is
318 // called. The callbacks will be run in the opposite order. The tools are
319 // strongly recommended to setup all callbacks during initialization, when there
320 // is only a single thread.
321 bool AddDieCallback(DieCallbackType callback);
322 bool RemoveDieCallback(DieCallbackType callback);
324 void SetUserDieCallback(DieCallbackType callback);
326 void SetCheckUnwindCallback(void (*callback)());
328 // Functions related to signal handling.
329 typedef void (*SignalHandlerType)(int, void *, void *);
330 HandleSignalMode GetHandleSignalMode(int signum);
331 void InstallDeadlySignalHandlers(SignalHandlerType handler);
333 // Signal reporting.
334 // Each sanitizer uses slightly different implementation of stack unwinding.
335 typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
336 const void *callback_context,
337 BufferedStackTrace *stack);
338 // Print deadly signal report and die.
339 void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
340 UnwindSignalStackCallbackType unwind,
341 const void *unwind_context);
343 // Part of HandleDeadlySignal, exposed for asan.
344 void StartReportDeadlySignal();
345 // Part of HandleDeadlySignal, exposed for asan.
346 void ReportDeadlySignal(const SignalContext &sig, u32 tid,
347 UnwindSignalStackCallbackType unwind,
348 const void *unwind_context);
350 // Alternative signal stack (POSIX-only).
351 void SetAlternateSignalStack();
352 void UnsetAlternateSignalStack();
354 // Construct a one-line string:
355 // SUMMARY: SanitizerToolName: error_message
356 // and pass it to __sanitizer_report_error_summary.
357 // If alt_tool_name is provided, it's used in place of SanitizerToolName.
358 void ReportErrorSummary(const char *error_message,
359 const char *alt_tool_name = nullptr);
360 // Same as above, but construct error_message as:
361 // error_type file:line[:column][ function]
362 void ReportErrorSummary(const char *error_type, const AddressInfo &info,
363 const char *alt_tool_name = nullptr);
364 // Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
365 void ReportErrorSummary(const char *error_type, const StackTrace *trace,
366 const char *alt_tool_name = nullptr);
368 void ReportMmapWriteExec(int prot, int mflags);
370 // Math
371 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
372 extern "C" {
373 unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
374 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
375 #if defined(_WIN64)
376 unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);
377 unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);
378 #endif
380 #endif
382 inline uptr MostSignificantSetBitIndex(uptr x) {
383 CHECK_NE(x, 0U);
384 unsigned long up;
385 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
386 # ifdef _WIN64
387 up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
388 # else
389 up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
390 # endif
391 #elif defined(_WIN64)
392 _BitScanReverse64(&up, x);
393 #else
394 _BitScanReverse(&up, x);
395 #endif
396 return up;
399 inline uptr LeastSignificantSetBitIndex(uptr x) {
400 CHECK_NE(x, 0U);
401 unsigned long up;
402 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
403 # ifdef _WIN64
404 up = __builtin_ctzll(x);
405 # else
406 up = __builtin_ctzl(x);
407 # endif
408 #elif defined(_WIN64)
409 _BitScanForward64(&up, x);
410 #else
411 _BitScanForward(&up, x);
412 #endif
413 return up;
416 inline constexpr bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; }
418 inline uptr RoundUpToPowerOfTwo(uptr size) {
419 CHECK(size);
420 if (IsPowerOfTwo(size)) return size;
422 uptr up = MostSignificantSetBitIndex(size);
423 CHECK_LT(size, (1ULL << (up + 1)));
424 CHECK_GT(size, (1ULL << up));
425 return 1ULL << (up + 1);
428 inline constexpr uptr RoundUpTo(uptr size, uptr boundary) {
429 RAW_CHECK(IsPowerOfTwo(boundary));
430 return (size + boundary - 1) & ~(boundary - 1);
433 inline constexpr uptr RoundDownTo(uptr x, uptr boundary) {
434 return x & ~(boundary - 1);
437 inline constexpr bool IsAligned(uptr a, uptr alignment) {
438 return (a & (alignment - 1)) == 0;
441 inline uptr Log2(uptr x) {
442 CHECK(IsPowerOfTwo(x));
443 return LeastSignificantSetBitIndex(x);
446 // Don't use std::min, std::max or std::swap, to minimize dependency
447 // on libstdc++.
448 template <class T>
449 constexpr T Min(T a, T b) {
450 return a < b ? a : b;
452 template <class T>
453 constexpr T Max(T a, T b) {
454 return a > b ? a : b;
456 template <class T>
457 constexpr T Abs(T a) {
458 return a < 0 ? -a : a;
460 template<class T> void Swap(T& a, T& b) {
461 T tmp = a;
462 a = b;
463 b = tmp;
466 // Char handling
467 inline bool IsSpace(int c) {
468 return (c == ' ') || (c == '\n') || (c == '\t') ||
469 (c == '\f') || (c == '\r') || (c == '\v');
471 inline bool IsDigit(int c) {
472 return (c >= '0') && (c <= '9');
474 inline int ToLower(int c) {
475 return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
478 // A low-level vector based on mmap. May incur a significant memory overhead for
479 // small vectors.
480 // WARNING: The current implementation supports only POD types.
481 template<typename T>
482 class InternalMmapVectorNoCtor {
483 public:
484 using value_type = T;
485 void Initialize(uptr initial_capacity) {
486 capacity_bytes_ = 0;
487 size_ = 0;
488 data_ = 0;
489 reserve(initial_capacity);
491 void Destroy() { UnmapOrDie(data_, capacity_bytes_); }
492 T &operator[](uptr i) {
493 CHECK_LT(i, size_);
494 return data_[i];
496 const T &operator[](uptr i) const {
497 CHECK_LT(i, size_);
498 return data_[i];
500 void push_back(const T &element) {
501 CHECK_LE(size_, capacity());
502 if (size_ == capacity()) {
503 uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
504 Realloc(new_capacity);
506 internal_memcpy(&data_[size_++], &element, sizeof(T));
508 T &back() {
509 CHECK_GT(size_, 0);
510 return data_[size_ - 1];
512 void pop_back() {
513 CHECK_GT(size_, 0);
514 size_--;
516 uptr size() const {
517 return size_;
519 const T *data() const {
520 return data_;
522 T *data() {
523 return data_;
525 uptr capacity() const { return capacity_bytes_ / sizeof(T); }
526 void reserve(uptr new_size) {
527 // Never downsize internal buffer.
528 if (new_size > capacity())
529 Realloc(new_size);
531 void resize(uptr new_size) {
532 if (new_size > size_) {
533 reserve(new_size);
534 internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
536 size_ = new_size;
539 void clear() { size_ = 0; }
540 bool empty() const { return size() == 0; }
542 const T *begin() const {
543 return data();
545 T *begin() {
546 return data();
548 const T *end() const {
549 return data() + size();
551 T *end() {
552 return data() + size();
555 void swap(InternalMmapVectorNoCtor &other) {
556 Swap(data_, other.data_);
557 Swap(capacity_bytes_, other.capacity_bytes_);
558 Swap(size_, other.size_);
561 private:
562 void Realloc(uptr new_capacity) {
563 CHECK_GT(new_capacity, 0);
564 CHECK_LE(size_, new_capacity);
565 uptr new_capacity_bytes =
566 RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached());
567 T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector");
568 internal_memcpy(new_data, data_, size_ * sizeof(T));
569 UnmapOrDie(data_, capacity_bytes_);
570 data_ = new_data;
571 capacity_bytes_ = new_capacity_bytes;
574 T *data_;
575 uptr capacity_bytes_;
576 uptr size_;
579 template <typename T>
580 bool operator==(const InternalMmapVectorNoCtor<T> &lhs,
581 const InternalMmapVectorNoCtor<T> &rhs) {
582 if (lhs.size() != rhs.size()) return false;
583 return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;
586 template <typename T>
587 bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
588 const InternalMmapVectorNoCtor<T> &rhs) {
589 return !(lhs == rhs);
592 template<typename T>
593 class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
594 public:
595 InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); }
596 explicit InternalMmapVector(uptr cnt) {
597 InternalMmapVectorNoCtor<T>::Initialize(cnt);
598 this->resize(cnt);
600 ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
601 // Disallow copies and moves.
602 InternalMmapVector(const InternalMmapVector &) = delete;
603 InternalMmapVector &operator=(const InternalMmapVector &) = delete;
604 InternalMmapVector(InternalMmapVector &&) = delete;
605 InternalMmapVector &operator=(InternalMmapVector &&) = delete;
608 class InternalScopedString {
609 public:
610 InternalScopedString() : buffer_(1) { buffer_[0] = '\0'; }
612 uptr length() const { return buffer_.size() - 1; }
613 void clear() {
614 buffer_.resize(1);
615 buffer_[0] = '\0';
617 void append(const char *format, ...) FORMAT(2, 3);
618 const char *data() const { return buffer_.data(); }
619 char *data() { return buffer_.data(); }
621 private:
622 InternalMmapVector<char> buffer_;
625 template <class T>
626 struct CompareLess {
627 bool operator()(const T &a, const T &b) const { return a < b; }
630 // HeapSort for arrays and InternalMmapVector.
631 template <class T, class Compare = CompareLess<T>>
632 void Sort(T *v, uptr size, Compare comp = {}) {
633 if (size < 2)
634 return;
635 // Stage 1: insert elements to the heap.
636 for (uptr i = 1; i < size; i++) {
637 uptr j, p;
638 for (j = i; j > 0; j = p) {
639 p = (j - 1) / 2;
640 if (comp(v[p], v[j]))
641 Swap(v[j], v[p]);
642 else
643 break;
646 // Stage 2: swap largest element with the last one,
647 // and sink the new top.
648 for (uptr i = size - 1; i > 0; i--) {
649 Swap(v[0], v[i]);
650 uptr j, max_ind;
651 for (j = 0; j < i; j = max_ind) {
652 uptr left = 2 * j + 1;
653 uptr right = 2 * j + 2;
654 max_ind = j;
655 if (left < i && comp(v[max_ind], v[left]))
656 max_ind = left;
657 if (right < i && comp(v[max_ind], v[right]))
658 max_ind = right;
659 if (max_ind != j)
660 Swap(v[j], v[max_ind]);
661 else
662 break;
667 // Works like std::lower_bound: finds the first element that is not less
668 // than the val.
669 template <class Container, class T,
670 class Compare = CompareLess<typename Container::value_type>>
671 uptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) {
672 uptr first = 0;
673 uptr last = v.size();
674 while (last > first) {
675 uptr mid = (first + last) / 2;
676 if (comp(v[mid], val))
677 first = mid + 1;
678 else
679 last = mid;
681 return first;
684 enum ModuleArch {
685 kModuleArchUnknown,
686 kModuleArchI386,
687 kModuleArchX86_64,
688 kModuleArchX86_64H,
689 kModuleArchARMV6,
690 kModuleArchARMV7,
691 kModuleArchARMV7S,
692 kModuleArchARMV7K,
693 kModuleArchARM64,
694 kModuleArchRISCV64,
695 kModuleArchHexagon
698 // Sorts and removes duplicates from the container.
699 template <class Container,
700 class Compare = CompareLess<typename Container::value_type>>
701 void SortAndDedup(Container &v, Compare comp = {}) {
702 Sort(v.data(), v.size(), comp);
703 uptr size = v.size();
704 if (size < 2)
705 return;
706 uptr last = 0;
707 for (uptr i = 1; i < size; ++i) {
708 if (comp(v[last], v[i])) {
709 ++last;
710 if (last != i)
711 v[last] = v[i];
712 } else {
713 CHECK(!comp(v[i], v[last]));
716 v.resize(last + 1);
719 constexpr uptr kDefaultFileMaxSize = FIRST_32_SECOND_64(1 << 26, 1 << 28);
721 // Opens the file 'file_name" and reads up to 'max_len' bytes.
722 // The resulting buffer is mmaped and stored in '*buff'.
723 // Returns true if file was successfully opened and read.
724 bool ReadFileToVector(const char *file_name,
725 InternalMmapVectorNoCtor<char> *buff,
726 uptr max_len = kDefaultFileMaxSize,
727 error_t *errno_p = nullptr);
729 // Opens the file 'file_name" and reads up to 'max_len' bytes.
730 // This function is less I/O efficient than ReadFileToVector as it may reread
731 // file multiple times to avoid mmap during read attempts. It's used to read
732 // procmap, so short reads with mmap in between can produce inconsistent result.
733 // The resulting buffer is mmaped and stored in '*buff'.
734 // The size of the mmaped region is stored in '*buff_size'.
735 // The total number of read bytes is stored in '*read_len'.
736 // Returns true if file was successfully opened and read.
737 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
738 uptr *read_len, uptr max_len = kDefaultFileMaxSize,
739 error_t *errno_p = nullptr);
741 int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len,
742 uptr *pc_offset);
744 // When adding a new architecture, don't forget to also update
745 // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
746 inline const char *ModuleArchToString(ModuleArch arch) {
747 switch (arch) {
748 case kModuleArchUnknown:
749 return "";
750 case kModuleArchI386:
751 return "i386";
752 case kModuleArchX86_64:
753 return "x86_64";
754 case kModuleArchX86_64H:
755 return "x86_64h";
756 case kModuleArchARMV6:
757 return "armv6";
758 case kModuleArchARMV7:
759 return "armv7";
760 case kModuleArchARMV7S:
761 return "armv7s";
762 case kModuleArchARMV7K:
763 return "armv7k";
764 case kModuleArchARM64:
765 return "arm64";
766 case kModuleArchRISCV64:
767 return "riscv64";
768 case kModuleArchHexagon:
769 return "hexagon";
771 CHECK(0 && "Invalid module arch");
772 return "";
775 const uptr kModuleUUIDSize = 32;
776 const uptr kMaxSegName = 16;
778 // Represents a binary loaded into virtual memory (e.g. this can be an
779 // executable or a shared object).
780 class LoadedModule {
781 public:
782 LoadedModule()
783 : full_name_(nullptr),
784 base_address_(0),
785 max_address_(0),
786 arch_(kModuleArchUnknown),
787 uuid_size_(0),
788 instrumented_(false) {
789 internal_memset(uuid_, 0, kModuleUUIDSize);
790 ranges_.clear();
792 void set(const char *module_name, uptr base_address);
793 void set(const char *module_name, uptr base_address, ModuleArch arch,
794 u8 uuid[kModuleUUIDSize], bool instrumented);
795 void setUuid(const char *uuid, uptr size);
796 void clear();
797 void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
798 const char *name = nullptr);
799 bool containsAddress(uptr address) const;
801 const char *full_name() const { return full_name_; }
802 uptr base_address() const { return base_address_; }
803 uptr max_address() const { return max_address_; }
804 ModuleArch arch() const { return arch_; }
805 const u8 *uuid() const { return uuid_; }
806 uptr uuid_size() const { return uuid_size_; }
807 bool instrumented() const { return instrumented_; }
809 struct AddressRange {
810 AddressRange *next;
811 uptr beg;
812 uptr end;
813 bool executable;
814 bool writable;
815 char name[kMaxSegName];
817 AddressRange(uptr beg, uptr end, bool executable, bool writable,
818 const char *name)
819 : next(nullptr),
820 beg(beg),
821 end(end),
822 executable(executable),
823 writable(writable) {
824 internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name));
828 const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
830 private:
831 char *full_name_; // Owned.
832 uptr base_address_;
833 uptr max_address_;
834 ModuleArch arch_;
835 uptr uuid_size_;
836 u8 uuid_[kModuleUUIDSize];
837 bool instrumented_;
838 IntrusiveList<AddressRange> ranges_;
841 // List of LoadedModules. OS-dependent implementation is responsible for
842 // filling this information.
843 class ListOfModules {
844 public:
845 ListOfModules() : initialized(false) {}
846 ~ListOfModules() { clear(); }
847 void init();
848 void fallbackInit(); // Uses fallback init if available, otherwise clears
849 const LoadedModule *begin() const { return modules_.begin(); }
850 LoadedModule *begin() { return modules_.begin(); }
851 const LoadedModule *end() const { return modules_.end(); }
852 LoadedModule *end() { return modules_.end(); }
853 uptr size() const { return modules_.size(); }
854 const LoadedModule &operator[](uptr i) const {
855 CHECK_LT(i, modules_.size());
856 return modules_[i];
859 private:
860 void clear() {
861 for (auto &module : modules_) module.clear();
862 modules_.clear();
864 void clearOrInit() {
865 initialized ? clear() : modules_.Initialize(kInitialCapacity);
866 initialized = true;
869 InternalMmapVectorNoCtor<LoadedModule> modules_;
870 // We rarely have more than 16K loaded modules.
871 static const uptr kInitialCapacity = 1 << 14;
872 bool initialized;
875 // Callback type for iterating over a set of memory ranges.
876 typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
878 enum AndroidApiLevel {
879 ANDROID_NOT_ANDROID = 0,
880 ANDROID_KITKAT = 19,
881 ANDROID_LOLLIPOP_MR1 = 22,
882 ANDROID_POST_LOLLIPOP = 23
885 void WriteToSyslog(const char *buffer);
887 #if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
888 #define SANITIZER_WIN_TRACE 1
889 #else
890 #define SANITIZER_WIN_TRACE 0
891 #endif
893 #if SANITIZER_MAC || SANITIZER_WIN_TRACE
894 void LogFullErrorReport(const char *buffer);
895 #else
896 inline void LogFullErrorReport(const char *buffer) {}
897 #endif
899 #if SANITIZER_LINUX || SANITIZER_MAC
900 void WriteOneLineToSyslog(const char *s);
901 void LogMessageOnPrintf(const char *str);
902 #else
903 inline void WriteOneLineToSyslog(const char *s) {}
904 inline void LogMessageOnPrintf(const char *str) {}
905 #endif
907 #if SANITIZER_LINUX || SANITIZER_WIN_TRACE
908 // Initialize Android logging. Any writes before this are silently lost.
909 void AndroidLogInit();
910 void SetAbortMessage(const char *);
911 #else
912 inline void AndroidLogInit() {}
913 // FIXME: MacOS implementation could use CRSetCrashLogMessage.
914 inline void SetAbortMessage(const char *) {}
915 #endif
917 #if SANITIZER_ANDROID
918 void SanitizerInitializeUnwinder();
919 AndroidApiLevel AndroidGetApiLevel();
920 #else
921 inline void AndroidLogWrite(const char *buffer_unused) {}
922 inline void SanitizerInitializeUnwinder() {}
923 inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
924 #endif
926 inline uptr GetPthreadDestructorIterations() {
927 #if SANITIZER_ANDROID
928 return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
929 #elif SANITIZER_POSIX
930 return 4;
931 #else
932 // Unused on Windows.
933 return 0;
934 #endif
937 void *internal_start_thread(void *(*func)(void*), void *arg);
938 void internal_join_thread(void *th);
939 void MaybeStartBackgroudThread();
941 // Make the compiler think that something is going on there.
942 // Use this inside a loop that looks like memset/memcpy/etc to prevent the
943 // compiler from recognising it and turning it into an actual call to
944 // memset/memcpy/etc.
945 static inline void SanitizerBreakOptimization(void *arg) {
946 #if defined(_MSC_VER) && !defined(__clang__)
947 _ReadWriteBarrier();
948 #else
949 __asm__ __volatile__("" : : "r" (arg) : "memory");
950 #endif
953 struct SignalContext {
954 void *siginfo;
955 void *context;
956 uptr addr;
957 uptr pc;
958 uptr sp;
959 uptr bp;
960 bool is_memory_access;
961 enum WriteFlag { Unknown, Read, Write } write_flag;
963 // In some cases the kernel cannot provide the true faulting address; `addr`
964 // will be zero then. This field allows to distinguish between these cases
965 // and dereferences of null.
966 bool is_true_faulting_addr;
968 // VS2013 doesn't implement unrestricted unions, so we need a trivial default
969 // constructor
970 SignalContext() = default;
972 // Creates signal context in a platform-specific manner.
973 // SignalContext is going to keep pointers to siginfo and context without
974 // owning them.
975 SignalContext(void *siginfo, void *context)
976 : siginfo(siginfo),
977 context(context),
978 addr(GetAddress()),
979 is_memory_access(IsMemoryAccess()),
980 write_flag(GetWriteFlag()),
981 is_true_faulting_addr(IsTrueFaultingAddress()) {
982 InitPcSpBp();
985 static void DumpAllRegisters(void *context);
987 // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
988 int GetType() const;
990 // String description of the signal.
991 const char *Describe() const;
993 // Returns true if signal is stack overflow.
994 bool IsStackOverflow() const;
996 private:
997 // Platform specific initialization.
998 void InitPcSpBp();
999 uptr GetAddress() const;
1000 WriteFlag GetWriteFlag() const;
1001 bool IsMemoryAccess() const;
1002 bool IsTrueFaultingAddress() const;
1005 void InitializePlatformEarly();
1006 void MaybeReexec();
1008 template <typename Fn>
1009 class RunOnDestruction {
1010 public:
1011 explicit RunOnDestruction(Fn fn) : fn_(fn) {}
1012 ~RunOnDestruction() { fn_(); }
1014 private:
1015 Fn fn_;
1018 // A simple scope guard. Usage:
1019 // auto cleanup = at_scope_exit([]{ do_cleanup; });
1020 template <typename Fn>
1021 RunOnDestruction<Fn> at_scope_exit(Fn fn) {
1022 return RunOnDestruction<Fn>(fn);
1025 // Linux on 64-bit s390 had a nasty bug that crashes the whole machine
1026 // if a process uses virtual memory over 4TB (as many sanitizers like
1027 // to do). This function will abort the process if running on a kernel
1028 // that looks vulnerable.
1029 #if SANITIZER_LINUX && SANITIZER_S390_64
1030 void AvoidCVE_2016_2143();
1031 #else
1032 inline void AvoidCVE_2016_2143() {}
1033 #endif
1035 struct StackDepotStats {
1036 uptr n_uniq_ids;
1037 uptr allocated;
1040 // The default value for allocator_release_to_os_interval_ms common flag to
1041 // indicate that sanitizer allocator should not attempt to release memory to OS.
1042 const s32 kReleaseToOSIntervalNever = -1;
1044 void CheckNoDeepBind(const char *filename, int flag);
1046 // Returns the requested amount of random data (up to 256 bytes) that can then
1047 // be used to seed a PRNG. Defaults to blocking like the underlying syscall.
1048 bool GetRandom(void *buffer, uptr length, bool blocking = true);
1050 // Returns the number of logical processors on the system.
1051 u32 GetNumberOfCPUs();
1052 extern u32 NumberOfCPUsCached;
1053 inline u32 GetNumberOfCPUsCached() {
1054 if (!NumberOfCPUsCached)
1055 NumberOfCPUsCached = GetNumberOfCPUs();
1056 return NumberOfCPUsCached;
1059 template <typename T>
1060 class ArrayRef {
1061 public:
1062 ArrayRef() {}
1063 ArrayRef(T *begin, T *end) : begin_(begin), end_(end) {}
1065 T *begin() { return begin_; }
1066 T *end() { return end_; }
1068 private:
1069 T *begin_ = nullptr;
1070 T *end_ = nullptr;
1073 } // namespace __sanitizer
1075 inline void *operator new(__sanitizer::operator_new_size_type size,
1076 __sanitizer::LowLevelAllocator &alloc) {
1077 return alloc.Allocate(size);
1080 #endif // SANITIZER_COMMON_H