1 //===-- sanitizer_win.cpp -------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // run-time libraries and implements windows-specific functions from
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_platform.h"
17 #define WIN32_LEAN_AND_MEAN
24 #include "sanitizer_common.h"
25 #include "sanitizer_file.h"
26 #include "sanitizer_libc.h"
27 #include "sanitizer_mutex.h"
28 #include "sanitizer_placement_new.h"
29 #include "sanitizer_win_defs.h"
31 #if defined(PSAPI_VERSION) && PSAPI_VERSION == 1
32 #pragma comment(lib, "psapi")
34 #if SANITIZER_WIN_TRACE
35 #include <traceloggingprovider.h>
36 // Windows trace logging provider init
37 #pragma comment(lib, "advapi32.lib")
38 TRACELOGGING_DECLARE_PROVIDER(g_asan_provider
);
39 // GUID must be the same in utils/AddressSanitizerLoggingProvider.wprp
40 TRACELOGGING_DEFINE_PROVIDER(g_asan_provider
, "AddressSanitizerLoggingProvider",
41 (0x6c6c766d, 0x3846, 0x4e6a, 0xa4, 0xfb, 0x5b,
42 0x53, 0x0b, 0xd0, 0xf3, 0xfa));
44 #define TraceLoggingUnregister(x)
48 # pragma comment(lib, "synchronization.lib")
50 // A macro to tell the compiler that this part of the code cannot be reached,
51 // if the compiler supports this feature. Since we're using this in
52 // code that is called when terminating the process, the expansion of the
53 // macro should not terminate the process to avoid infinite recursion.
54 #if defined(__clang__)
55 # define BUILTIN_UNREACHABLE() __builtin_unreachable()
56 #elif defined(__GNUC__) && \
57 (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
58 # define BUILTIN_UNREACHABLE() __builtin_unreachable()
59 #elif defined(_MSC_VER)
60 # define BUILTIN_UNREACHABLE() __assume(0)
62 # define BUILTIN_UNREACHABLE()
65 namespace __sanitizer
{
67 #include "sanitizer_syscall_generic.inc"
69 // --------------------- sanitizer_common.h
76 uptr
GetMmapGranularity() {
79 return si
.dwAllocationGranularity
;
82 uptr
GetMaxUserVirtualAddress() {
85 return (uptr
)si
.lpMaximumApplicationAddress
;
88 uptr
GetMaxVirtualAddress() {
89 return GetMaxUserVirtualAddress();
92 bool FileExists(const char *filename
) {
93 return ::GetFileAttributesA(filename
) != INVALID_FILE_ATTRIBUTES
;
96 bool DirExists(const char *path
) {
97 auto attr
= ::GetFileAttributesA(path
);
98 return (attr
!= INVALID_FILE_ATTRIBUTES
) && (attr
& FILE_ATTRIBUTE_DIRECTORY
);
101 uptr
internal_getpid() {
102 return GetProcessId(GetCurrentProcess());
105 int internal_dlinfo(void *handle
, int request
, void *p
) {
109 // In contrast to POSIX, on Windows GetCurrentThreadId()
110 // returns a system-unique identifier.
112 return GetCurrentThreadId();
115 uptr
GetThreadSelf() {
120 void GetThreadStackTopAndBottom(bool at_initialization
, uptr
*stack_top
,
121 uptr
*stack_bottom
) {
124 MEMORY_BASIC_INFORMATION mbi
;
125 CHECK_NE(VirtualQuery(&mbi
/* on stack */, &mbi
, sizeof(mbi
)), 0);
126 // FIXME: is it possible for the stack to not be a single allocation?
127 // Are these values what ASan expects to get (reserved, not committed;
128 // including stack guard page) ?
129 *stack_top
= (uptr
)mbi
.BaseAddress
+ mbi
.RegionSize
;
130 *stack_bottom
= (uptr
)mbi
.AllocationBase
;
132 #endif // #if !SANITIZER_GO
134 bool ErrorIsOOM(error_t err
) {
135 // TODO: This should check which `err`s correspond to OOM.
139 void *MmapOrDie(uptr size
, const char *mem_type
, bool raw_report
) {
140 void *rv
= VirtualAlloc(0, size
, MEM_RESERVE
| MEM_COMMIT
, PAGE_READWRITE
);
142 ReportMmapFailureAndDie(size
, mem_type
, "allocate",
143 GetLastError(), raw_report
);
147 void UnmapOrDie(void *addr
, uptr size
, bool raw_report
) {
151 MEMORY_BASIC_INFORMATION mbi
;
152 CHECK(VirtualQuery(addr
, &mbi
, sizeof(mbi
)));
154 // MEM_RELEASE can only be used to unmap whole regions previously mapped with
155 // VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that
156 // fails try MEM_DECOMMIT.
157 if (VirtualFree(addr
, 0, MEM_RELEASE
) == 0) {
158 if (VirtualFree(addr
, size
, MEM_DECOMMIT
) == 0) {
159 ReportMunmapFailureAndDie(addr
, size
, GetLastError(), raw_report
);
164 static void *ReturnNullptrOnOOMOrDie(uptr size
, const char *mem_type
,
165 const char *mmap_type
) {
166 error_t last_error
= GetLastError();
167 if (last_error
== ERROR_NOT_ENOUGH_MEMORY
)
169 ReportMmapFailureAndDie(size
, mem_type
, mmap_type
, last_error
);
172 void *MmapOrDieOnFatalError(uptr size
, const char *mem_type
) {
173 void *rv
= VirtualAlloc(0, size
, MEM_RESERVE
| MEM_COMMIT
, PAGE_READWRITE
);
175 return ReturnNullptrOnOOMOrDie(size
, mem_type
, "allocate");
179 // We want to map a chunk of address space aligned to 'alignment'.
180 void *MmapAlignedOrDieOnFatalError(uptr size
, uptr alignment
,
181 const char *mem_type
) {
182 CHECK(IsPowerOfTwo(size
));
183 CHECK(IsPowerOfTwo(alignment
));
185 // Windows will align our allocations to at least 64K.
186 alignment
= Max(alignment
, GetMmapGranularity());
189 (uptr
)VirtualAlloc(0, size
, MEM_RESERVE
| MEM_COMMIT
, PAGE_READWRITE
);
191 return ReturnNullptrOnOOMOrDie(size
, mem_type
, "allocate aligned");
193 // If we got it right on the first try, return. Otherwise, unmap it and go to
195 if (IsAligned(mapped_addr
, alignment
))
196 return (void*)mapped_addr
;
197 if (VirtualFree((void *)mapped_addr
, 0, MEM_RELEASE
) == 0)
198 ReportMmapFailureAndDie(size
, mem_type
, "deallocate", GetLastError());
200 // If we didn't get an aligned address, overallocate, find an aligned address,
201 // unmap, and try to allocate at that aligned address.
203 const int kMaxRetries
= 10;
204 for (; retries
< kMaxRetries
&&
205 (mapped_addr
== 0 || !IsAligned(mapped_addr
, alignment
));
207 // Overallocate size + alignment bytes.
209 (uptr
)VirtualAlloc(0, size
+ alignment
, MEM_RESERVE
, PAGE_NOACCESS
);
211 return ReturnNullptrOnOOMOrDie(size
, mem_type
, "allocate aligned");
213 // Find the aligned address.
214 uptr aligned_addr
= RoundUpTo(mapped_addr
, alignment
);
216 // Free the overallocation.
217 if (VirtualFree((void *)mapped_addr
, 0, MEM_RELEASE
) == 0)
218 ReportMmapFailureAndDie(size
, mem_type
, "deallocate", GetLastError());
220 // Attempt to allocate exactly the number of bytes we need at the aligned
221 // address. This may fail for a number of reasons, in which case we continue
223 mapped_addr
= (uptr
)VirtualAlloc((void *)aligned_addr
, size
,
224 MEM_RESERVE
| MEM_COMMIT
, PAGE_READWRITE
);
227 // Fail if we can't make this work quickly.
228 if (retries
== kMaxRetries
&& mapped_addr
== 0)
229 return ReturnNullptrOnOOMOrDie(size
, mem_type
, "allocate aligned");
231 return (void *)mapped_addr
;
234 // ZeroMmapFixedRegion zero's out a region of memory previously returned from a
235 // call to one of the MmapFixed* helpers. On non-windows systems this would be
236 // done with another mmap, but on windows remapping is not an option.
237 // VirtualFree(DECOMMIT)+VirtualAlloc(RECOMMIT) would also be a way to zero the
238 // memory, but we can't do this atomically, so instead we fall back to using
240 bool ZeroMmapFixedRegion(uptr fixed_addr
, uptr size
) {
241 internal_memset((void*) fixed_addr
, 0, size
);
245 bool MmapFixedNoReserve(uptr fixed_addr
, uptr size
, const char *name
) {
246 // FIXME: is this really "NoReserve"? On Win32 this does not matter much,
247 // but on Win64 it does.
248 (void)name
; // unsupported
249 #if !SANITIZER_GO && SANITIZER_WINDOWS64
250 // On asan/Windows64, use MEM_COMMIT would result in error
251 // 1455:ERROR_COMMITMENT_LIMIT.
252 // Asan uses exception handler to commit page on demand.
253 void *p
= VirtualAlloc((LPVOID
)fixed_addr
, size
, MEM_RESERVE
, PAGE_READWRITE
);
255 void *p
= VirtualAlloc((LPVOID
)fixed_addr
, size
, MEM_RESERVE
| MEM_COMMIT
,
259 Report("ERROR: %s failed to "
260 "allocate %p (%zd) bytes at %p (error code: %d)\n",
261 SanitizerToolName
, size
, size
, fixed_addr
, GetLastError());
267 bool MmapFixedSuperNoReserve(uptr fixed_addr
, uptr size
, const char *name
) {
268 // FIXME: Windows support large pages too. Might be worth checking
269 return MmapFixedNoReserve(fixed_addr
, size
, name
);
272 // Memory space mapped by 'MmapFixedOrDie' must have been reserved by
273 // 'MmapFixedNoAccess'.
274 void *MmapFixedOrDie(uptr fixed_addr
, uptr size
, const char *name
) {
275 void *p
= VirtualAlloc((LPVOID
)fixed_addr
, size
,
276 MEM_COMMIT
, PAGE_READWRITE
);
279 internal_snprintf(mem_type
, sizeof(mem_type
), "memory at address %p",
281 ReportMmapFailureAndDie(size
, mem_type
, "allocate", GetLastError());
286 // Uses fixed_addr for now.
287 // Will use offset instead once we've implemented this function for real.
288 uptr
ReservedAddressRange::Map(uptr fixed_addr
, uptr size
, const char *name
) {
289 return reinterpret_cast<uptr
>(MmapFixedOrDieOnFatalError(fixed_addr
, size
));
292 uptr
ReservedAddressRange::MapOrDie(uptr fixed_addr
, uptr size
,
294 return reinterpret_cast<uptr
>(MmapFixedOrDie(fixed_addr
, size
));
297 void ReservedAddressRange::Unmap(uptr addr
, uptr size
) {
298 // Only unmap if it covers the entire range.
299 CHECK((addr
== reinterpret_cast<uptr
>(base_
)) && (size
== size_
));
300 // We unmap the whole range, just null out the base.
303 UnmapOrDie(reinterpret_cast<void*>(addr
), size
);
306 void *MmapFixedOrDieOnFatalError(uptr fixed_addr
, uptr size
, const char *name
) {
307 void *p
= VirtualAlloc((LPVOID
)fixed_addr
, size
,
308 MEM_COMMIT
, PAGE_READWRITE
);
311 internal_snprintf(mem_type
, sizeof(mem_type
), "memory at address %p",
313 return ReturnNullptrOnOOMOrDie(size
, mem_type
, "allocate");
318 void *MmapNoReserveOrDie(uptr size
, const char *mem_type
) {
319 // FIXME: make this really NoReserve?
320 return MmapOrDie(size
, mem_type
);
323 uptr
ReservedAddressRange::Init(uptr size
, const char *name
, uptr fixed_addr
) {
324 base_
= fixed_addr
? MmapFixedNoAccess(fixed_addr
, size
) : MmapNoAccess(size
);
327 (void)os_handle_
; // unsupported
328 return reinterpret_cast<uptr
>(base_
);
332 void *MmapFixedNoAccess(uptr fixed_addr
, uptr size
, const char *name
) {
333 (void)name
; // unsupported
334 void *res
= VirtualAlloc((LPVOID
)fixed_addr
, size
,
335 MEM_RESERVE
, PAGE_NOACCESS
);
337 Report("WARNING: %s failed to "
338 "mprotect %p (%zd) bytes at %p (error code: %d)\n",
339 SanitizerToolName
, size
, size
, fixed_addr
, GetLastError());
343 void *MmapNoAccess(uptr size
) {
344 void *res
= VirtualAlloc(nullptr, size
, MEM_RESERVE
, PAGE_NOACCESS
);
346 Report("WARNING: %s failed to "
347 "mprotect %p (%zd) bytes (error code: %d)\n",
348 SanitizerToolName
, size
, size
, GetLastError());
352 bool MprotectNoAccess(uptr addr
, uptr size
) {
353 DWORD old_protection
;
354 return VirtualProtect((LPVOID
)addr
, size
, PAGE_NOACCESS
, &old_protection
);
357 bool MprotectReadOnly(uptr addr
, uptr size
) {
358 DWORD old_protection
;
359 return VirtualProtect((LPVOID
)addr
, size
, PAGE_READONLY
, &old_protection
);
362 bool MprotectReadWrite(uptr addr
, uptr size
) {
363 DWORD old_protection
;
364 return VirtualProtect((LPVOID
)addr
, size
, PAGE_READWRITE
, &old_protection
);
367 void ReleaseMemoryPagesToOS(uptr beg
, uptr end
) {
368 uptr beg_aligned
= RoundDownTo(beg
, GetPageSizeCached()),
369 end_aligned
= RoundDownTo(end
, GetPageSizeCached());
370 CHECK(beg
< end
); // make sure the region is sane
371 if (beg_aligned
== end_aligned
) // make sure we're freeing at least 1 page;
373 UnmapOrDie((void *)beg
, end_aligned
- beg_aligned
);
376 void SetShadowRegionHugePageMode(uptr addr
, uptr size
) {
377 // FIXME: probably similar to ReleaseMemoryToOS.
380 bool DontDumpShadowMemory(uptr addr
, uptr length
) {
381 // This is almost useless on 32-bits.
382 // FIXME: add madvise-analog when we move to 64-bits.
386 uptr
MapDynamicShadow(uptr shadow_size_bytes
, uptr shadow_scale
,
387 uptr min_shadow_base_alignment
, UNUSED uptr
&high_mem_end
,
389 const uptr alignment
=
390 Max
<uptr
>(granularity
<< shadow_scale
, 1ULL << min_shadow_base_alignment
);
391 const uptr left_padding
=
392 Max
<uptr
>(granularity
, 1ULL << min_shadow_base_alignment
);
393 uptr space_size
= shadow_size_bytes
+ left_padding
;
394 uptr shadow_start
= FindAvailableMemoryRange(space_size
, alignment
,
395 granularity
, nullptr, nullptr);
396 CHECK_NE((uptr
)0, shadow_start
);
397 CHECK(IsAligned(shadow_start
, alignment
));
401 uptr
FindAvailableMemoryRange(uptr size
, uptr alignment
, uptr left_padding
,
402 uptr
*largest_gap_found
,
403 uptr
*max_occupied_addr
) {
406 MEMORY_BASIC_INFORMATION info
;
407 if (!::VirtualQuery((void*)address
, &info
, sizeof(info
)))
410 if (info
.State
== MEM_FREE
) {
411 uptr shadow_address
= RoundUpTo((uptr
)info
.BaseAddress
+ left_padding
,
413 if (shadow_address
+ size
< (uptr
)info
.BaseAddress
+ info
.RegionSize
)
414 return shadow_address
;
417 // Move to the next region.
418 address
= (uptr
)info
.BaseAddress
+ info
.RegionSize
;
423 uptr
MapDynamicShadowAndAliases(uptr shadow_size
, uptr alias_size
,
424 uptr num_aliases
, uptr ring_buffer_size
) {
425 CHECK(false && "HWASan aliasing is unimplemented on Windows");
429 bool MemoryRangeIsAvailable(uptr range_start
, uptr range_end
) {
430 MEMORY_BASIC_INFORMATION mbi
;
431 CHECK(VirtualQuery((void *)range_start
, &mbi
, sizeof(mbi
)));
432 return mbi
.Protect
== PAGE_NOACCESS
&&
433 (uptr
)mbi
.BaseAddress
+ mbi
.RegionSize
>= range_end
;
436 void *MapFileToMemory(const char *file_name
, uptr
*buff_size
) {
440 void *MapWritableFileToMemory(void *addr
, uptr size
, fd_t fd
, OFF_T offset
) {
444 static const int kMaxEnvNameLength
= 128;
445 static const DWORD kMaxEnvValueLength
= 32767;
450 char name
[kMaxEnvNameLength
];
451 char value
[kMaxEnvValueLength
];
456 static const int kEnvVariables
= 5;
457 static EnvVariable env_vars
[kEnvVariables
];
458 static int num_env_vars
;
460 const char *GetEnv(const char *name
) {
461 // Note: this implementation caches the values of the environment variables
462 // and limits their quantity.
463 for (int i
= 0; i
< num_env_vars
; i
++) {
464 if (0 == internal_strcmp(name
, env_vars
[i
].name
))
465 return env_vars
[i
].value
;
467 CHECK_LT(num_env_vars
, kEnvVariables
);
468 DWORD rv
= GetEnvironmentVariableA(name
, env_vars
[num_env_vars
].value
,
470 if (rv
> 0 && rv
< kMaxEnvValueLength
) {
471 CHECK_LT(internal_strlen(name
), kMaxEnvNameLength
);
472 internal_strncpy(env_vars
[num_env_vars
].name
, name
, kMaxEnvNameLength
);
474 return env_vars
[num_env_vars
- 1].value
;
479 const char *GetPwd() {
489 const char *filepath
;
495 int CompareModulesBase(const void *pl
, const void *pr
) {
496 const ModuleInfo
*l
= (const ModuleInfo
*)pl
, *r
= (const ModuleInfo
*)pr
;
497 if (l
->base_address
< r
->base_address
)
499 return l
->base_address
> r
->base_address
;
505 void DumpProcessMap() {
506 Report("Dumping process modules:\n");
507 ListOfModules modules
;
509 uptr num_modules
= modules
.size();
511 InternalMmapVector
<ModuleInfo
> module_infos(num_modules
);
512 for (size_t i
= 0; i
< num_modules
; ++i
) {
513 module_infos
[i
].filepath
= modules
[i
].full_name();
514 module_infos
[i
].base_address
= modules
[i
].ranges().front()->beg
;
515 module_infos
[i
].end_address
= modules
[i
].ranges().back()->end
;
517 qsort(module_infos
.data(), num_modules
, sizeof(ModuleInfo
),
520 for (size_t i
= 0; i
< num_modules
; ++i
) {
521 const ModuleInfo
&mi
= module_infos
[i
];
522 if (mi
.end_address
!= 0) {
523 Printf("\t%p-%p %s\n", mi
.base_address
, mi
.end_address
,
524 mi
.filepath
[0] ? mi
.filepath
: "[no name]");
525 } else if (mi
.filepath
[0]) {
526 Printf("\t??\?-??? %s\n", mi
.filepath
);
534 void DisableCoreDumperIfNecessary() {
542 void PlatformPrepareForSandboxing(void *args
) {}
544 bool StackSizeIsUnlimited() {
548 void SetStackSizeLimitInBytes(uptr limit
) {
552 bool AddressSpaceIsUnlimited() {
556 void SetAddressSpaceUnlimited() {
560 bool IsPathSeparator(const char c
) {
561 return c
== '\\' || c
== '/';
564 static bool IsAlpha(char c
) {
566 return c
>= 'a' && c
<= 'z';
569 bool IsAbsolutePath(const char *path
) {
570 return path
!= nullptr && IsAlpha(path
[0]) && path
[1] == ':' &&
571 IsPathSeparator(path
[2]);
574 void internal_usleep(u64 useconds
) { Sleep(useconds
/ 1000); }
577 static LARGE_INTEGER frequency
= {};
578 LARGE_INTEGER counter
;
579 if (UNLIKELY(frequency
.QuadPart
== 0)) {
580 QueryPerformanceFrequency(&frequency
);
581 CHECK_NE(frequency
.QuadPart
, 0);
583 QueryPerformanceCounter(&counter
);
584 counter
.QuadPart
*= 1000ULL * 1000000ULL;
585 counter
.QuadPart
/= frequency
.QuadPart
;
586 return counter
.QuadPart
;
589 u64
MonotonicNanoTime() { return NanoTime(); }
595 bool CreateDir(const char *pathname
) {
596 return CreateDirectoryA(pathname
, nullptr) != 0;
600 // Read the file to extract the ImageBase field from the PE header. If ASLR is
601 // disabled and this virtual address is available, the loader will typically
602 // load the image at this address. Therefore, we call it the preferred base. Any
603 // addresses in the DWARF typically assume that the object has been loaded at
605 static uptr
GetPreferredBase(const char *modname
, char *buf
, size_t buf_size
) {
606 fd_t fd
= OpenFile(modname
, RdOnly
, nullptr);
607 if (fd
== kInvalidFd
)
609 FileCloser
closer(fd
);
611 // Read just the DOS header.
612 IMAGE_DOS_HEADER dos_header
;
614 if (!ReadFromFile(fd
, &dos_header
, sizeof(dos_header
), &bytes_read
) ||
615 bytes_read
!= sizeof(dos_header
))
618 // The file should start with the right signature.
619 if (dos_header
.e_magic
!= IMAGE_DOS_SIGNATURE
)
622 // The layout at e_lfanew is:
625 // IMAGE_OPTIONAL_HEADER
626 // Seek to e_lfanew and read all that data.
627 if (::SetFilePointer(fd
, dos_header
.e_lfanew
, nullptr, FILE_BEGIN
) ==
628 INVALID_SET_FILE_POINTER
)
630 if (!ReadFromFile(fd
, buf
, buf_size
, &bytes_read
) || bytes_read
!= buf_size
)
633 // Check for "PE\0\0" before the PE header.
634 char *pe_sig
= &buf
[0];
635 if (internal_memcmp(pe_sig
, "PE\0\0", 4) != 0)
638 // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted.
639 IMAGE_OPTIONAL_HEADER
*pe_header
=
640 (IMAGE_OPTIONAL_HEADER
*)(pe_sig
+ 4 + sizeof(IMAGE_FILE_HEADER
));
642 // Check for more magic in the PE header.
643 if (pe_header
->Magic
!= IMAGE_NT_OPTIONAL_HDR_MAGIC
)
646 // Finally, return the ImageBase.
647 return (uptr
)pe_header
->ImageBase
;
650 void ListOfModules::init() {
652 HANDLE cur_process
= GetCurrentProcess();
654 // Query the list of modules. Start by assuming there are no more than 256
655 // modules and retry if that's not sufficient.
656 HMODULE
*hmodules
= 0;
657 uptr modules_buffer_size
= sizeof(HMODULE
) * 256;
658 DWORD bytes_required
;
660 hmodules
= (HMODULE
*)MmapOrDie(modules_buffer_size
, __FUNCTION__
);
661 CHECK(EnumProcessModules(cur_process
, hmodules
, modules_buffer_size
,
663 if (bytes_required
> modules_buffer_size
) {
664 // Either there turned out to be more than 256 hmodules, or new hmodules
665 // could have loaded since the last try. Retry.
666 UnmapOrDie(hmodules
, modules_buffer_size
);
668 modules_buffer_size
= bytes_required
;
672 InternalMmapVector
<char> buf(4 + sizeof(IMAGE_FILE_HEADER
) +
673 sizeof(IMAGE_OPTIONAL_HEADER
));
674 InternalMmapVector
<wchar_t> modname_utf16(kMaxPathLength
);
675 InternalMmapVector
<char> module_name(kMaxPathLength
);
676 // |num_modules| is the number of modules actually present,
677 size_t num_modules
= bytes_required
/ sizeof(HMODULE
);
678 for (size_t i
= 0; i
< num_modules
; ++i
) {
679 HMODULE handle
= hmodules
[i
];
681 if (!GetModuleInformation(cur_process
, handle
, &mi
, sizeof(mi
)))
684 // Get the UTF-16 path and convert to UTF-8.
685 int modname_utf16_len
=
686 GetModuleFileNameW(handle
, &modname_utf16
[0], kMaxPathLength
);
687 if (modname_utf16_len
== 0)
688 modname_utf16
[0] = '\0';
689 int module_name_len
= ::WideCharToMultiByte(
690 CP_UTF8
, 0, &modname_utf16
[0], modname_utf16_len
+ 1, &module_name
[0],
691 kMaxPathLength
, NULL
, NULL
);
692 module_name
[module_name_len
] = '\0';
694 uptr base_address
= (uptr
)mi
.lpBaseOfDll
;
695 uptr end_address
= (uptr
)mi
.lpBaseOfDll
+ mi
.SizeOfImage
;
697 // Adjust the base address of the module so that we get a VA instead of an
698 // RVA when computing the module offset. This helps llvm-symbolizer find the
699 // right DWARF CU. In the common case that the image is loaded at it's
700 // preferred address, we will now print normal virtual addresses.
701 uptr preferred_base
=
702 GetPreferredBase(&module_name
[0], &buf
[0], buf
.size());
703 uptr adjusted_base
= base_address
- preferred_base
;
705 modules_
.push_back(LoadedModule());
706 LoadedModule
&cur_module
= modules_
.back();
707 cur_module
.set(&module_name
[0], adjusted_base
);
708 // We add the whole module as one single address range.
709 cur_module
.addAddressRange(base_address
, end_address
, /*executable*/ true,
712 UnmapOrDie(hmodules
, modules_buffer_size
);
715 void ListOfModules::fallbackInit() { clear(); }
717 // We can't use atexit() directly at __asan_init time as the CRT is not fully
718 // initialized at this point. Place the functions into a vector and use
719 // atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers).
720 InternalMmapVectorNoCtor
<void (*)(void)> atexit_functions
;
722 static int queueAtexit(void (*function
)(void)) {
723 atexit_functions
.push_back(function
);
727 // If Atexit() is being called after RunAtexit() has already been run, it needs
728 // to be able to call atexit() directly. Here we use a function ponter to
729 // switch out its behaviour.
730 // An example of where this is needed is the asan_dynamic runtime on MinGW-w64.
731 // On this environment, __asan_init is called during global constructor phase,
732 // way after calling the .CRT$XID initializer.
733 static int (*volatile queueOrCallAtExit
)(void (*)(void)) = &queueAtexit
;
735 int Atexit(void (*function
)(void)) { return queueOrCallAtExit(function
); }
737 static int RunAtexit() {
738 TraceLoggingUnregister(g_asan_provider
);
739 queueOrCallAtExit
= &atexit
;
741 for (uptr i
= 0; i
< atexit_functions
.size(); ++i
) {
742 ret
|= atexit(atexit_functions
[i
]);
747 #pragma section(".CRT$XID", long, read)
748 __declspec(allocate(".CRT$XID")) int (*__run_atexit
)() = RunAtexit
;
751 // ------------------ sanitizer_libc.h
752 fd_t
OpenFile(const char *filename
, FileAccessMode mode
, error_t
*last_error
) {
753 // FIXME: Use the wide variants to handle Unicode filenames.
755 if (mode
== RdOnly
) {
756 res
= CreateFileA(filename
, GENERIC_READ
,
757 FILE_SHARE_READ
| FILE_SHARE_WRITE
| FILE_SHARE_DELETE
,
758 nullptr, OPEN_EXISTING
, FILE_ATTRIBUTE_NORMAL
, nullptr);
759 } else if (mode
== WrOnly
) {
760 res
= CreateFileA(filename
, GENERIC_WRITE
, 0, nullptr, CREATE_ALWAYS
,
761 FILE_ATTRIBUTE_NORMAL
, nullptr);
765 CHECK(res
!= kStdoutFd
|| kStdoutFd
== kInvalidFd
);
766 CHECK(res
!= kStderrFd
|| kStderrFd
== kInvalidFd
);
767 if (res
== kInvalidFd
&& last_error
)
768 *last_error
= GetLastError();
772 void CloseFile(fd_t fd
) {
776 bool ReadFromFile(fd_t fd
, void *buff
, uptr buff_size
, uptr
*bytes_read
,
778 CHECK(fd
!= kInvalidFd
);
780 // bytes_read can't be passed directly to ReadFile:
781 // uptr is unsigned long long on 64-bit Windows.
782 unsigned long num_read_long
;
784 bool success
= ::ReadFile(fd
, buff
, buff_size
, &num_read_long
, nullptr);
785 if (!success
&& error_p
)
786 *error_p
= GetLastError();
788 *bytes_read
= num_read_long
;
792 bool SupportsColoredOutput(fd_t fd
) {
793 // FIXME: support colored output.
797 bool WriteToFile(fd_t fd
, const void *buff
, uptr buff_size
, uptr
*bytes_written
,
799 CHECK(fd
!= kInvalidFd
);
801 // Handle null optional parameters.
803 error_p
= error_p
? error_p
: &dummy_error
;
804 uptr dummy_bytes_written
;
805 bytes_written
= bytes_written
? bytes_written
: &dummy_bytes_written
;
807 // Initialize output parameters in case we fail.
811 // Map the conventional Unix fds 1 and 2 to Windows handles. They might be
812 // closed, in which case this will fail.
813 if (fd
== kStdoutFd
|| fd
== kStderrFd
) {
814 fd
= GetStdHandle(fd
== kStdoutFd
? STD_OUTPUT_HANDLE
: STD_ERROR_HANDLE
);
816 *error_p
= ERROR_INVALID_HANDLE
;
821 DWORD bytes_written_32
;
822 if (!WriteFile(fd
, buff
, buff_size
, &bytes_written_32
, 0)) {
823 *error_p
= GetLastError();
826 *bytes_written
= bytes_written_32
;
831 uptr
internal_sched_yield() {
836 void internal__exit(int exitcode
) {
837 TraceLoggingUnregister(g_asan_provider
);
838 // ExitProcess runs some finalizers, so use TerminateProcess to avoid that.
839 // The debugger doesn't stop on TerminateProcess like it does on ExitProcess,
840 // so add our own breakpoint here.
841 if (::IsDebuggerPresent())
843 TerminateProcess(GetCurrentProcess(), exitcode
);
844 BUILTIN_UNREACHABLE();
847 uptr
internal_ftruncate(fd_t fd
, uptr size
) {
852 PROCESS_MEMORY_COUNTERS counters
;
853 if (!GetProcessMemoryInfo(GetCurrentProcess(), &counters
, sizeof(counters
)))
855 return counters
.WorkingSetSize
;
858 void *internal_start_thread(void *(*func
)(void *arg
), void *arg
) { return 0; }
859 void internal_join_thread(void *th
) { }
861 void FutexWait(atomic_uint32_t
*p
, u32 cmp
) {
862 WaitOnAddress(p
, &cmp
, sizeof(cmp
), INFINITE
);
865 void FutexWake(atomic_uint32_t
*p
, u32 count
) {
867 WakeByAddressSingle(p
);
876 void GetThreadStackAndTls(bool main
, uptr
*stk_begin
, uptr
*stk_end
,
877 uptr
*tls_begin
, uptr
*tls_end
) {
884 GetThreadStackTopAndBottom(main
, stk_end
, stk_begin
);
890 void ReportFile::Write(const char *buffer
, uptr length
) {
893 if (!WriteToFile(fd
, buffer
, length
)) {
894 // stderr may be closed, but we may be able to print to the debugger
895 // instead. This is the case when launching a program from Visual Studio,
896 // and the following routine should write to its console.
897 OutputDebugStringA(buffer
);
901 void SetAlternateSignalStack() {
902 // FIXME: Decide what to do on Windows.
905 void UnsetAlternateSignalStack() {
906 // FIXME: Decide what to do on Windows.
909 void InstallDeadlySignalHandlers(SignalHandlerType handler
) {
911 // FIXME: Decide what to do on Windows.
914 HandleSignalMode
GetHandleSignalMode(int signum
) {
915 // FIXME: Decide what to do on Windows.
916 return kHandleSignalNo
;
919 // Check based on flags if we should handle this exception.
920 bool IsHandledDeadlyException(DWORD exceptionCode
) {
921 switch (exceptionCode
) {
922 case EXCEPTION_ACCESS_VIOLATION
:
923 case EXCEPTION_ARRAY_BOUNDS_EXCEEDED
:
924 case EXCEPTION_STACK_OVERFLOW
:
925 case EXCEPTION_DATATYPE_MISALIGNMENT
:
926 case EXCEPTION_IN_PAGE_ERROR
:
927 return common_flags()->handle_segv
;
928 case EXCEPTION_ILLEGAL_INSTRUCTION
:
929 case EXCEPTION_PRIV_INSTRUCTION
:
930 case EXCEPTION_BREAKPOINT
:
931 return common_flags()->handle_sigill
;
932 case EXCEPTION_FLT_DENORMAL_OPERAND
:
933 case EXCEPTION_FLT_DIVIDE_BY_ZERO
:
934 case EXCEPTION_FLT_INEXACT_RESULT
:
935 case EXCEPTION_FLT_INVALID_OPERATION
:
936 case EXCEPTION_FLT_OVERFLOW
:
937 case EXCEPTION_FLT_STACK_CHECK
:
938 case EXCEPTION_FLT_UNDERFLOW
:
939 case EXCEPTION_INT_DIVIDE_BY_ZERO
:
940 case EXCEPTION_INT_OVERFLOW
:
941 return common_flags()->handle_sigfpe
;
946 bool IsAccessibleMemoryRange(uptr beg
, uptr size
) {
948 GetNativeSystemInfo(&si
);
949 uptr page_size
= si
.dwPageSize
;
950 uptr page_mask
= ~(page_size
- 1);
952 for (uptr page
= beg
& page_mask
, end
= (beg
+ size
- 1) & page_mask
;
954 MEMORY_BASIC_INFORMATION info
;
955 if (VirtualQuery((LPCVOID
)page
, &info
, sizeof(info
)) != sizeof(info
))
958 if (info
.Protect
== 0 || info
.Protect
== PAGE_NOACCESS
||
959 info
.Protect
== PAGE_EXECUTE
)
962 if (info
.RegionSize
== 0)
965 page
+= info
.RegionSize
;
971 bool TryMemCpy(void *dest
, const void *src
, uptr n
) {
976 bool SignalContext::IsStackOverflow() const {
977 return (DWORD
)GetType() == EXCEPTION_STACK_OVERFLOW
;
980 void SignalContext::InitPcSpBp() {
981 EXCEPTION_RECORD
*exception_record
= (EXCEPTION_RECORD
*)siginfo
;
982 CONTEXT
*context_record
= (CONTEXT
*)context
;
984 pc
= (uptr
)exception_record
->ExceptionAddress
;
985 # if SANITIZER_WINDOWS64
987 bp
= (uptr
)context_record
->Fp
;
988 sp
= (uptr
)context_record
->Sp
;
990 bp
= (uptr
)context_record
->Rbp
;
991 sp
= (uptr
)context_record
->Rsp
;
995 bp
= (uptr
)context_record
->R11
;
996 sp
= (uptr
)context_record
->Sp
;
998 bp
= (uptr
)context_record
->Ebp
;
999 sp
= (uptr
)context_record
->Esp
;
1004 uptr
SignalContext::GetAddress() const {
1005 EXCEPTION_RECORD
*exception_record
= (EXCEPTION_RECORD
*)siginfo
;
1006 if (exception_record
->ExceptionCode
== EXCEPTION_ACCESS_VIOLATION
)
1007 return exception_record
->ExceptionInformation
[1];
1008 return (uptr
)exception_record
->ExceptionAddress
;
1011 bool SignalContext::IsMemoryAccess() const {
1012 return ((EXCEPTION_RECORD
*)siginfo
)->ExceptionCode
==
1013 EXCEPTION_ACCESS_VIOLATION
;
1016 bool SignalContext::IsTrueFaultingAddress() const { return true; }
1018 SignalContext::WriteFlag
SignalContext::GetWriteFlag() const {
1019 EXCEPTION_RECORD
*exception_record
= (EXCEPTION_RECORD
*)siginfo
;
1021 // The write flag is only available for access violation exceptions.
1022 if (exception_record
->ExceptionCode
!= EXCEPTION_ACCESS_VIOLATION
)
1023 return SignalContext::Unknown
;
1025 // The contents of this array are documented at
1026 // https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
1027 // The first element indicates read as 0, write as 1, or execute as 8. The
1028 // second element is the faulting address.
1029 switch (exception_record
->ExceptionInformation
[0]) {
1031 return SignalContext::Read
;
1033 return SignalContext::Write
;
1035 return SignalContext::Unknown
;
1037 return SignalContext::Unknown
;
1040 void SignalContext::DumpAllRegisters(void *context
) {
1041 CONTEXT
*ctx
= (CONTEXT
*)context
;
1042 # if defined(_M_X64)
1043 Report("Register values:\n");
1044 Printf("rax = %llx ", ctx
->Rax
);
1045 Printf("rbx = %llx ", ctx
->Rbx
);
1046 Printf("rcx = %llx ", ctx
->Rcx
);
1047 Printf("rdx = %llx ", ctx
->Rdx
);
1049 Printf("rdi = %llx ", ctx
->Rdi
);
1050 Printf("rsi = %llx ", ctx
->Rsi
);
1051 Printf("rbp = %llx ", ctx
->Rbp
);
1052 Printf("rsp = %llx ", ctx
->Rsp
);
1054 Printf("r8 = %llx ", ctx
->R8
);
1055 Printf("r9 = %llx ", ctx
->R9
);
1056 Printf("r10 = %llx ", ctx
->R10
);
1057 Printf("r11 = %llx ", ctx
->R11
);
1059 Printf("r12 = %llx ", ctx
->R12
);
1060 Printf("r13 = %llx ", ctx
->R13
);
1061 Printf("r14 = %llx ", ctx
->R14
);
1062 Printf("r15 = %llx ", ctx
->R15
);
1064 # elif defined(_M_IX86)
1065 Report("Register values:\n");
1066 Printf("eax = %lx ", ctx
->Eax
);
1067 Printf("ebx = %lx ", ctx
->Ebx
);
1068 Printf("ecx = %lx ", ctx
->Ecx
);
1069 Printf("edx = %lx ", ctx
->Edx
);
1071 Printf("edi = %lx ", ctx
->Edi
);
1072 Printf("esi = %lx ", ctx
->Esi
);
1073 Printf("ebp = %lx ", ctx
->Ebp
);
1074 Printf("esp = %lx ", ctx
->Esp
);
1076 # elif defined(_M_ARM64)
1077 Report("Register values:\n");
1078 for (int i
= 0; i
<= 30; i
++) {
1079 Printf("x%d%s = %llx", i
< 10 ? " " : "", ctx
->X
[i
]);
1089 int SignalContext::GetType() const {
1090 return static_cast<const EXCEPTION_RECORD
*>(siginfo
)->ExceptionCode
;
1093 const char *SignalContext::Describe() const {
1094 unsigned code
= GetType();
1095 // Get the string description of the exception if this is a known deadly
1098 case EXCEPTION_ACCESS_VIOLATION
:
1099 return "access-violation";
1100 case EXCEPTION_ARRAY_BOUNDS_EXCEEDED
:
1101 return "array-bounds-exceeded";
1102 case EXCEPTION_STACK_OVERFLOW
:
1103 return "stack-overflow";
1104 case EXCEPTION_DATATYPE_MISALIGNMENT
:
1105 return "datatype-misalignment";
1106 case EXCEPTION_IN_PAGE_ERROR
:
1107 return "in-page-error";
1108 case EXCEPTION_ILLEGAL_INSTRUCTION
:
1109 return "illegal-instruction";
1110 case EXCEPTION_PRIV_INSTRUCTION
:
1111 return "priv-instruction";
1112 case EXCEPTION_BREAKPOINT
:
1113 return "breakpoint";
1114 case EXCEPTION_FLT_DENORMAL_OPERAND
:
1115 return "flt-denormal-operand";
1116 case EXCEPTION_FLT_DIVIDE_BY_ZERO
:
1117 return "flt-divide-by-zero";
1118 case EXCEPTION_FLT_INEXACT_RESULT
:
1119 return "flt-inexact-result";
1120 case EXCEPTION_FLT_INVALID_OPERATION
:
1121 return "flt-invalid-operation";
1122 case EXCEPTION_FLT_OVERFLOW
:
1123 return "flt-overflow";
1124 case EXCEPTION_FLT_STACK_CHECK
:
1125 return "flt-stack-check";
1126 case EXCEPTION_FLT_UNDERFLOW
:
1127 return "flt-underflow";
1128 case EXCEPTION_INT_DIVIDE_BY_ZERO
:
1129 return "int-divide-by-zero";
1130 case EXCEPTION_INT_OVERFLOW
:
1131 return "int-overflow";
1133 return "unknown exception";
1136 uptr
ReadBinaryName(/*out*/char *buf
, uptr buf_len
) {
1140 // Get the UTF-16 path and convert to UTF-8.
1141 InternalMmapVector
<wchar_t> binname_utf16(kMaxPathLength
);
1142 int binname_utf16_len
=
1143 GetModuleFileNameW(NULL
, &binname_utf16
[0], kMaxPathLength
);
1144 if (binname_utf16_len
== 0) {
1148 int binary_name_len
=
1149 ::WideCharToMultiByte(CP_UTF8
, 0, &binname_utf16
[0], binname_utf16_len
,
1150 buf
, buf_len
, NULL
, NULL
);
1151 if ((unsigned)binary_name_len
== buf_len
)
1153 buf
[binary_name_len
] = '\0';
1154 return binary_name_len
;
1157 uptr
ReadLongProcessName(/*out*/char *buf
, uptr buf_len
) {
1158 return ReadBinaryName(buf
, buf_len
);
1161 void CheckVMASize() {
1165 void InitializePlatformEarly() {
1173 void CheckMPROTECT() {
1178 // FIXME: Actually implement this function.
1182 char **GetEnviron() {
1183 // FIXME: Actually implement this function.
1187 pid_t
StartSubprocess(const char *program
, const char *const argv
[],
1188 const char *const envp
[], fd_t stdin_fd
, fd_t stdout_fd
,
1190 // FIXME: implement on this platform
1191 // Should be implemented based on
1192 // SymbolizerProcess::StarAtSymbolizerSubprocess
1193 // from lib/sanitizer_common/sanitizer_symbolizer_win.cpp.
1197 bool IsProcessRunning(pid_t pid
) {
1198 // FIXME: implement on this platform.
1202 int WaitForProcess(pid_t pid
) { return -1; }
1204 // FIXME implement on this platform.
1205 void GetMemoryProfile(fill_profile_f cb
, uptr
*stats
) {}
1207 void CheckNoDeepBind(const char *filename
, int flag
) {
1211 // FIXME: implement on this platform.
1212 bool GetRandom(void *buffer
, uptr length
, bool blocking
) {
1216 u32
GetNumberOfCPUs() {
1217 SYSTEM_INFO sysinfo
= {};
1218 GetNativeSystemInfo(&sysinfo
);
1219 return sysinfo
.dwNumberOfProcessors
;
1222 #if SANITIZER_WIN_TRACE
1223 // TODO(mcgov): Rename this project-wide to PlatformLogInit
1224 void AndroidLogInit(void) {
1225 HRESULT hr
= TraceLoggingRegister(g_asan_provider
);
1230 void SetAbortMessage(const char *) {}
1232 void LogFullErrorReport(const char *buffer
) {
1233 if (common_flags()->log_to_syslog
) {
1234 InternalMmapVector
<wchar_t> filename
;
1235 DWORD filename_length
= 0;
1237 filename
.resize(filename
.size() + 0x100);
1239 GetModuleFileNameW(NULL
, filename
.begin(), filename
.size());
1240 } while (filename_length
>= filename
.size());
1241 TraceLoggingWrite(g_asan_provider
, "AsanReportEvent",
1242 TraceLoggingValue(filename
.begin(), "ExecutableName"),
1243 TraceLoggingValue(buffer
, "AsanReportContents"));
1246 #endif // SANITIZER_WIN_TRACE
1248 void InitializePlatformCommonFlags(CommonFlags
*cf
) {}
1250 } // namespace __sanitizer