1 //===-- sanitizer_win.cpp -------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // run-time libraries and implements windows-specific functions from
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_platform.h"
17 #define WIN32_LEAN_AND_MEAN
24 #include "sanitizer_common.h"
25 #include "sanitizer_file.h"
26 #include "sanitizer_libc.h"
27 #include "sanitizer_mutex.h"
28 #include "sanitizer_placement_new.h"
29 #include "sanitizer_win_defs.h"
31 #if defined(PSAPI_VERSION) && PSAPI_VERSION == 1
32 #pragma comment(lib, "psapi")
34 #if SANITIZER_WIN_TRACE
35 #include <traceloggingprovider.h>
36 // Windows trace logging provider init
37 #pragma comment(lib, "advapi32.lib")
38 TRACELOGGING_DECLARE_PROVIDER(g_asan_provider
);
39 // GUID must be the same in utils/AddressSanitizerLoggingProvider.wprp
40 TRACELOGGING_DEFINE_PROVIDER(g_asan_provider
, "AddressSanitizerLoggingProvider",
41 (0x6c6c766d, 0x3846, 0x4e6a, 0xa4, 0xfb, 0x5b,
42 0x53, 0x0b, 0xd0, 0xf3, 0xfa));
44 #define TraceLoggingUnregister(x)
48 # pragma comment(lib, "synchronization.lib")
50 // A macro to tell the compiler that this part of the code cannot be reached,
51 // if the compiler supports this feature. Since we're using this in
52 // code that is called when terminating the process, the expansion of the
53 // macro should not terminate the process to avoid infinite recursion.
54 #if defined(__clang__)
55 # define BUILTIN_UNREACHABLE() __builtin_unreachable()
56 #elif defined(__GNUC__) && \
57 (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
58 # define BUILTIN_UNREACHABLE() __builtin_unreachable()
59 #elif defined(_MSC_VER)
60 # define BUILTIN_UNREACHABLE() __assume(0)
62 # define BUILTIN_UNREACHABLE()
65 namespace __sanitizer
{
67 #include "sanitizer_syscall_generic.inc"
69 // --------------------- sanitizer_common.h
76 uptr
GetMmapGranularity() {
79 return si
.dwAllocationGranularity
;
82 uptr
GetMaxUserVirtualAddress() {
85 return (uptr
)si
.lpMaximumApplicationAddress
;
88 uptr
GetMaxVirtualAddress() {
89 return GetMaxUserVirtualAddress();
92 bool FileExists(const char *filename
) {
93 return ::GetFileAttributesA(filename
) != INVALID_FILE_ATTRIBUTES
;
96 bool DirExists(const char *path
) {
97 auto attr
= ::GetFileAttributesA(path
);
98 return (attr
!= INVALID_FILE_ATTRIBUTES
) && (attr
& FILE_ATTRIBUTE_DIRECTORY
);
101 uptr
internal_getpid() {
102 return GetProcessId(GetCurrentProcess());
105 int internal_dlinfo(void *handle
, int request
, void *p
) {
109 // In contrast to POSIX, on Windows GetCurrentThreadId()
110 // returns a system-unique identifier.
112 return GetCurrentThreadId();
115 uptr
GetThreadSelf() {
120 void GetThreadStackTopAndBottom(bool at_initialization
, uptr
*stack_top
,
121 uptr
*stack_bottom
) {
124 MEMORY_BASIC_INFORMATION mbi
;
125 CHECK_NE(VirtualQuery(&mbi
/* on stack */, &mbi
, sizeof(mbi
)), 0);
126 // FIXME: is it possible for the stack to not be a single allocation?
127 // Are these values what ASan expects to get (reserved, not committed;
128 // including stack guard page) ?
129 *stack_top
= (uptr
)mbi
.BaseAddress
+ mbi
.RegionSize
;
130 *stack_bottom
= (uptr
)mbi
.AllocationBase
;
132 #endif // #if !SANITIZER_GO
134 bool ErrorIsOOM(error_t err
) {
135 // TODO: This should check which `err`s correspond to OOM.
139 void *MmapOrDie(uptr size
, const char *mem_type
, bool raw_report
) {
140 void *rv
= VirtualAlloc(0, size
, MEM_RESERVE
| MEM_COMMIT
, PAGE_READWRITE
);
142 ReportMmapFailureAndDie(size
, mem_type
, "allocate",
143 GetLastError(), raw_report
);
147 void UnmapOrDie(void *addr
, uptr size
) {
151 MEMORY_BASIC_INFORMATION mbi
;
152 CHECK(VirtualQuery(addr
, &mbi
, sizeof(mbi
)));
154 // MEM_RELEASE can only be used to unmap whole regions previously mapped with
155 // VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that
156 // fails try MEM_DECOMMIT.
157 if (VirtualFree(addr
, 0, MEM_RELEASE
) == 0) {
158 if (VirtualFree(addr
, size
, MEM_DECOMMIT
) == 0) {
159 Report("ERROR: %s failed to "
160 "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n",
161 SanitizerToolName
, size
, size
, addr
, GetLastError());
162 CHECK("unable to unmap" && 0);
167 static void *ReturnNullptrOnOOMOrDie(uptr size
, const char *mem_type
,
168 const char *mmap_type
) {
169 error_t last_error
= GetLastError();
170 if (last_error
== ERROR_NOT_ENOUGH_MEMORY
)
172 ReportMmapFailureAndDie(size
, mem_type
, mmap_type
, last_error
);
175 void *MmapOrDieOnFatalError(uptr size
, const char *mem_type
) {
176 void *rv
= VirtualAlloc(0, size
, MEM_RESERVE
| MEM_COMMIT
, PAGE_READWRITE
);
178 return ReturnNullptrOnOOMOrDie(size
, mem_type
, "allocate");
182 // We want to map a chunk of address space aligned to 'alignment'.
183 void *MmapAlignedOrDieOnFatalError(uptr size
, uptr alignment
,
184 const char *mem_type
) {
185 CHECK(IsPowerOfTwo(size
));
186 CHECK(IsPowerOfTwo(alignment
));
188 // Windows will align our allocations to at least 64K.
189 alignment
= Max(alignment
, GetMmapGranularity());
192 (uptr
)VirtualAlloc(0, size
, MEM_RESERVE
| MEM_COMMIT
, PAGE_READWRITE
);
194 return ReturnNullptrOnOOMOrDie(size
, mem_type
, "allocate aligned");
196 // If we got it right on the first try, return. Otherwise, unmap it and go to
198 if (IsAligned(mapped_addr
, alignment
))
199 return (void*)mapped_addr
;
200 if (VirtualFree((void *)mapped_addr
, 0, MEM_RELEASE
) == 0)
201 ReportMmapFailureAndDie(size
, mem_type
, "deallocate", GetLastError());
203 // If we didn't get an aligned address, overallocate, find an aligned address,
204 // unmap, and try to allocate at that aligned address.
206 const int kMaxRetries
= 10;
207 for (; retries
< kMaxRetries
&&
208 (mapped_addr
== 0 || !IsAligned(mapped_addr
, alignment
));
210 // Overallocate size + alignment bytes.
212 (uptr
)VirtualAlloc(0, size
+ alignment
, MEM_RESERVE
, PAGE_NOACCESS
);
214 return ReturnNullptrOnOOMOrDie(size
, mem_type
, "allocate aligned");
216 // Find the aligned address.
217 uptr aligned_addr
= RoundUpTo(mapped_addr
, alignment
);
219 // Free the overallocation.
220 if (VirtualFree((void *)mapped_addr
, 0, MEM_RELEASE
) == 0)
221 ReportMmapFailureAndDie(size
, mem_type
, "deallocate", GetLastError());
223 // Attempt to allocate exactly the number of bytes we need at the aligned
224 // address. This may fail for a number of reasons, in which case we continue
226 mapped_addr
= (uptr
)VirtualAlloc((void *)aligned_addr
, size
,
227 MEM_RESERVE
| MEM_COMMIT
, PAGE_READWRITE
);
230 // Fail if we can't make this work quickly.
231 if (retries
== kMaxRetries
&& mapped_addr
== 0)
232 return ReturnNullptrOnOOMOrDie(size
, mem_type
, "allocate aligned");
234 return (void *)mapped_addr
;
237 // ZeroMmapFixedRegion zero's out a region of memory previously returned from a
238 // call to one of the MmapFixed* helpers. On non-windows systems this would be
239 // done with another mmap, but on windows remapping is not an option.
240 // VirtualFree(DECOMMIT)+VirtualAlloc(RECOMMIT) would also be a way to zero the
241 // memory, but we can't do this atomically, so instead we fall back to using
243 bool ZeroMmapFixedRegion(uptr fixed_addr
, uptr size
) {
244 internal_memset((void*) fixed_addr
, 0, size
);
248 bool MmapFixedNoReserve(uptr fixed_addr
, uptr size
, const char *name
) {
249 // FIXME: is this really "NoReserve"? On Win32 this does not matter much,
250 // but on Win64 it does.
251 (void)name
; // unsupported
252 #if !SANITIZER_GO && SANITIZER_WINDOWS64
253 // On asan/Windows64, use MEM_COMMIT would result in error
254 // 1455:ERROR_COMMITMENT_LIMIT.
255 // Asan uses exception handler to commit page on demand.
256 void *p
= VirtualAlloc((LPVOID
)fixed_addr
, size
, MEM_RESERVE
, PAGE_READWRITE
);
258 void *p
= VirtualAlloc((LPVOID
)fixed_addr
, size
, MEM_RESERVE
| MEM_COMMIT
,
262 Report("ERROR: %s failed to "
263 "allocate %p (%zd) bytes at %p (error code: %d)\n",
264 SanitizerToolName
, size
, size
, fixed_addr
, GetLastError());
270 bool MmapFixedSuperNoReserve(uptr fixed_addr
, uptr size
, const char *name
) {
271 // FIXME: Windows support large pages too. Might be worth checking
272 return MmapFixedNoReserve(fixed_addr
, size
, name
);
275 // Memory space mapped by 'MmapFixedOrDie' must have been reserved by
276 // 'MmapFixedNoAccess'.
277 void *MmapFixedOrDie(uptr fixed_addr
, uptr size
, const char *name
) {
278 void *p
= VirtualAlloc((LPVOID
)fixed_addr
, size
,
279 MEM_COMMIT
, PAGE_READWRITE
);
282 internal_snprintf(mem_type
, sizeof(mem_type
), "memory at address 0x%zx",
284 ReportMmapFailureAndDie(size
, mem_type
, "allocate", GetLastError());
289 // Uses fixed_addr for now.
290 // Will use offset instead once we've implemented this function for real.
291 uptr
ReservedAddressRange::Map(uptr fixed_addr
, uptr size
, const char *name
) {
292 return reinterpret_cast<uptr
>(MmapFixedOrDieOnFatalError(fixed_addr
, size
));
295 uptr
ReservedAddressRange::MapOrDie(uptr fixed_addr
, uptr size
,
297 return reinterpret_cast<uptr
>(MmapFixedOrDie(fixed_addr
, size
));
300 void ReservedAddressRange::Unmap(uptr addr
, uptr size
) {
301 // Only unmap if it covers the entire range.
302 CHECK((addr
== reinterpret_cast<uptr
>(base_
)) && (size
== size_
));
303 // We unmap the whole range, just null out the base.
306 UnmapOrDie(reinterpret_cast<void*>(addr
), size
);
309 void *MmapFixedOrDieOnFatalError(uptr fixed_addr
, uptr size
, const char *name
) {
310 void *p
= VirtualAlloc((LPVOID
)fixed_addr
, size
,
311 MEM_COMMIT
, PAGE_READWRITE
);
314 internal_snprintf(mem_type
, sizeof(mem_type
), "memory at address 0x%zx",
316 return ReturnNullptrOnOOMOrDie(size
, mem_type
, "allocate");
321 void *MmapNoReserveOrDie(uptr size
, const char *mem_type
) {
322 // FIXME: make this really NoReserve?
323 return MmapOrDie(size
, mem_type
);
326 uptr
ReservedAddressRange::Init(uptr size
, const char *name
, uptr fixed_addr
) {
327 base_
= fixed_addr
? MmapFixedNoAccess(fixed_addr
, size
) : MmapNoAccess(size
);
330 (void)os_handle_
; // unsupported
331 return reinterpret_cast<uptr
>(base_
);
335 void *MmapFixedNoAccess(uptr fixed_addr
, uptr size
, const char *name
) {
336 (void)name
; // unsupported
337 void *res
= VirtualAlloc((LPVOID
)fixed_addr
, size
,
338 MEM_RESERVE
, PAGE_NOACCESS
);
340 Report("WARNING: %s failed to "
341 "mprotect %p (%zd) bytes at %p (error code: %d)\n",
342 SanitizerToolName
, size
, size
, fixed_addr
, GetLastError());
346 void *MmapNoAccess(uptr size
) {
347 void *res
= VirtualAlloc(nullptr, size
, MEM_RESERVE
, PAGE_NOACCESS
);
349 Report("WARNING: %s failed to "
350 "mprotect %p (%zd) bytes (error code: %d)\n",
351 SanitizerToolName
, size
, size
, GetLastError());
355 bool MprotectNoAccess(uptr addr
, uptr size
) {
356 DWORD old_protection
;
357 return VirtualProtect((LPVOID
)addr
, size
, PAGE_NOACCESS
, &old_protection
);
360 bool MprotectReadOnly(uptr addr
, uptr size
) {
361 DWORD old_protection
;
362 return VirtualProtect((LPVOID
)addr
, size
, PAGE_READONLY
, &old_protection
);
365 bool MprotectReadWrite(uptr addr
, uptr size
) {
366 DWORD old_protection
;
367 return VirtualProtect((LPVOID
)addr
, size
, PAGE_READWRITE
, &old_protection
);
370 void ReleaseMemoryPagesToOS(uptr beg
, uptr end
) {
371 uptr beg_aligned
= RoundDownTo(beg
, GetPageSizeCached()),
372 end_aligned
= RoundDownTo(end
, GetPageSizeCached());
373 CHECK(beg
< end
); // make sure the region is sane
374 if (beg_aligned
== end_aligned
) // make sure we're freeing at least 1 page;
376 UnmapOrDie((void *)beg
, end_aligned
- beg_aligned
);
379 void SetShadowRegionHugePageMode(uptr addr
, uptr size
) {
380 // FIXME: probably similar to ReleaseMemoryToOS.
383 bool DontDumpShadowMemory(uptr addr
, uptr length
) {
384 // This is almost useless on 32-bits.
385 // FIXME: add madvise-analog when we move to 64-bits.
389 uptr
MapDynamicShadow(uptr shadow_size_bytes
, uptr shadow_scale
,
390 uptr min_shadow_base_alignment
,
391 UNUSED uptr
&high_mem_end
) {
392 const uptr granularity
= GetMmapGranularity();
393 const uptr alignment
=
394 Max
<uptr
>(granularity
<< shadow_scale
, 1ULL << min_shadow_base_alignment
);
395 const uptr left_padding
=
396 Max
<uptr
>(granularity
, 1ULL << min_shadow_base_alignment
);
397 uptr space_size
= shadow_size_bytes
+ left_padding
;
398 uptr shadow_start
= FindAvailableMemoryRange(space_size
, alignment
,
399 granularity
, nullptr, nullptr);
400 CHECK_NE((uptr
)0, shadow_start
);
401 CHECK(IsAligned(shadow_start
, alignment
));
405 uptr
FindAvailableMemoryRange(uptr size
, uptr alignment
, uptr left_padding
,
406 uptr
*largest_gap_found
,
407 uptr
*max_occupied_addr
) {
410 MEMORY_BASIC_INFORMATION info
;
411 if (!::VirtualQuery((void*)address
, &info
, sizeof(info
)))
414 if (info
.State
== MEM_FREE
) {
415 uptr shadow_address
= RoundUpTo((uptr
)info
.BaseAddress
+ left_padding
,
417 if (shadow_address
+ size
< (uptr
)info
.BaseAddress
+ info
.RegionSize
)
418 return shadow_address
;
421 // Move to the next region.
422 address
= (uptr
)info
.BaseAddress
+ info
.RegionSize
;
427 uptr
MapDynamicShadowAndAliases(uptr shadow_size
, uptr alias_size
,
428 uptr num_aliases
, uptr ring_buffer_size
) {
429 CHECK(false && "HWASan aliasing is unimplemented on Windows");
433 bool MemoryRangeIsAvailable(uptr range_start
, uptr range_end
) {
434 MEMORY_BASIC_INFORMATION mbi
;
435 CHECK(VirtualQuery((void *)range_start
, &mbi
, sizeof(mbi
)));
436 return mbi
.Protect
== PAGE_NOACCESS
&&
437 (uptr
)mbi
.BaseAddress
+ mbi
.RegionSize
>= range_end
;
440 void *MapFileToMemory(const char *file_name
, uptr
*buff_size
) {
444 void *MapWritableFileToMemory(void *addr
, uptr size
, fd_t fd
, OFF_T offset
) {
448 static const int kMaxEnvNameLength
= 128;
449 static const DWORD kMaxEnvValueLength
= 32767;
454 char name
[kMaxEnvNameLength
];
455 char value
[kMaxEnvValueLength
];
460 static const int kEnvVariables
= 5;
461 static EnvVariable env_vars
[kEnvVariables
];
462 static int num_env_vars
;
464 const char *GetEnv(const char *name
) {
465 // Note: this implementation caches the values of the environment variables
466 // and limits their quantity.
467 for (int i
= 0; i
< num_env_vars
; i
++) {
468 if (0 == internal_strcmp(name
, env_vars
[i
].name
))
469 return env_vars
[i
].value
;
471 CHECK_LT(num_env_vars
, kEnvVariables
);
472 DWORD rv
= GetEnvironmentVariableA(name
, env_vars
[num_env_vars
].value
,
474 if (rv
> 0 && rv
< kMaxEnvValueLength
) {
475 CHECK_LT(internal_strlen(name
), kMaxEnvNameLength
);
476 internal_strncpy(env_vars
[num_env_vars
].name
, name
, kMaxEnvNameLength
);
478 return env_vars
[num_env_vars
- 1].value
;
483 const char *GetPwd() {
493 const char *filepath
;
499 int CompareModulesBase(const void *pl
, const void *pr
) {
500 const ModuleInfo
*l
= (const ModuleInfo
*)pl
, *r
= (const ModuleInfo
*)pr
;
501 if (l
->base_address
< r
->base_address
)
503 return l
->base_address
> r
->base_address
;
509 void DumpProcessMap() {
510 Report("Dumping process modules:\n");
511 ListOfModules modules
;
513 uptr num_modules
= modules
.size();
515 InternalMmapVector
<ModuleInfo
> module_infos(num_modules
);
516 for (size_t i
= 0; i
< num_modules
; ++i
) {
517 module_infos
[i
].filepath
= modules
[i
].full_name();
518 module_infos
[i
].base_address
= modules
[i
].ranges().front()->beg
;
519 module_infos
[i
].end_address
= modules
[i
].ranges().back()->end
;
521 qsort(module_infos
.data(), num_modules
, sizeof(ModuleInfo
),
524 for (size_t i
= 0; i
< num_modules
; ++i
) {
525 const ModuleInfo
&mi
= module_infos
[i
];
526 if (mi
.end_address
!= 0) {
527 Printf("\t%p-%p %s\n", mi
.base_address
, mi
.end_address
,
528 mi
.filepath
[0] ? mi
.filepath
: "[no name]");
529 } else if (mi
.filepath
[0]) {
530 Printf("\t??\?-??? %s\n", mi
.filepath
);
538 void DisableCoreDumperIfNecessary() {
546 void PlatformPrepareForSandboxing(void *args
) {}
548 bool StackSizeIsUnlimited() {
552 void SetStackSizeLimitInBytes(uptr limit
) {
556 bool AddressSpaceIsUnlimited() {
560 void SetAddressSpaceUnlimited() {
564 bool IsPathSeparator(const char c
) {
565 return c
== '\\' || c
== '/';
568 static bool IsAlpha(char c
) {
570 return c
>= 'a' && c
<= 'z';
573 bool IsAbsolutePath(const char *path
) {
574 return path
!= nullptr && IsAlpha(path
[0]) && path
[1] == ':' &&
575 IsPathSeparator(path
[2]);
578 void internal_usleep(u64 useconds
) { Sleep(useconds
/ 1000); }
581 static LARGE_INTEGER frequency
= {};
582 LARGE_INTEGER counter
;
583 if (UNLIKELY(frequency
.QuadPart
== 0)) {
584 QueryPerformanceFrequency(&frequency
);
585 CHECK_NE(frequency
.QuadPart
, 0);
587 QueryPerformanceCounter(&counter
);
588 counter
.QuadPart
*= 1000ULL * 1000000ULL;
589 counter
.QuadPart
/= frequency
.QuadPart
;
590 return counter
.QuadPart
;
593 u64
MonotonicNanoTime() { return NanoTime(); }
599 bool CreateDir(const char *pathname
) {
600 return CreateDirectoryA(pathname
, nullptr) != 0;
604 // Read the file to extract the ImageBase field from the PE header. If ASLR is
605 // disabled and this virtual address is available, the loader will typically
606 // load the image at this address. Therefore, we call it the preferred base. Any
607 // addresses in the DWARF typically assume that the object has been loaded at
609 static uptr
GetPreferredBase(const char *modname
, char *buf
, size_t buf_size
) {
610 fd_t fd
= OpenFile(modname
, RdOnly
, nullptr);
611 if (fd
== kInvalidFd
)
613 FileCloser
closer(fd
);
615 // Read just the DOS header.
616 IMAGE_DOS_HEADER dos_header
;
618 if (!ReadFromFile(fd
, &dos_header
, sizeof(dos_header
), &bytes_read
) ||
619 bytes_read
!= sizeof(dos_header
))
622 // The file should start with the right signature.
623 if (dos_header
.e_magic
!= IMAGE_DOS_SIGNATURE
)
626 // The layout at e_lfanew is:
629 // IMAGE_OPTIONAL_HEADER
630 // Seek to e_lfanew and read all that data.
631 if (::SetFilePointer(fd
, dos_header
.e_lfanew
, nullptr, FILE_BEGIN
) ==
632 INVALID_SET_FILE_POINTER
)
634 if (!ReadFromFile(fd
, buf
, buf_size
, &bytes_read
) || bytes_read
!= buf_size
)
637 // Check for "PE\0\0" before the PE header.
638 char *pe_sig
= &buf
[0];
639 if (internal_memcmp(pe_sig
, "PE\0\0", 4) != 0)
642 // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted.
643 IMAGE_OPTIONAL_HEADER
*pe_header
=
644 (IMAGE_OPTIONAL_HEADER
*)(pe_sig
+ 4 + sizeof(IMAGE_FILE_HEADER
));
646 // Check for more magic in the PE header.
647 if (pe_header
->Magic
!= IMAGE_NT_OPTIONAL_HDR_MAGIC
)
650 // Finally, return the ImageBase.
651 return (uptr
)pe_header
->ImageBase
;
654 void ListOfModules::init() {
656 HANDLE cur_process
= GetCurrentProcess();
658 // Query the list of modules. Start by assuming there are no more than 256
659 // modules and retry if that's not sufficient.
660 HMODULE
*hmodules
= 0;
661 uptr modules_buffer_size
= sizeof(HMODULE
) * 256;
662 DWORD bytes_required
;
664 hmodules
= (HMODULE
*)MmapOrDie(modules_buffer_size
, __FUNCTION__
);
665 CHECK(EnumProcessModules(cur_process
, hmodules
, modules_buffer_size
,
667 if (bytes_required
> modules_buffer_size
) {
668 // Either there turned out to be more than 256 hmodules, or new hmodules
669 // could have loaded since the last try. Retry.
670 UnmapOrDie(hmodules
, modules_buffer_size
);
672 modules_buffer_size
= bytes_required
;
676 InternalMmapVector
<char> buf(4 + sizeof(IMAGE_FILE_HEADER
) +
677 sizeof(IMAGE_OPTIONAL_HEADER
));
678 InternalMmapVector
<wchar_t> modname_utf16(kMaxPathLength
);
679 InternalMmapVector
<char> module_name(kMaxPathLength
);
680 // |num_modules| is the number of modules actually present,
681 size_t num_modules
= bytes_required
/ sizeof(HMODULE
);
682 for (size_t i
= 0; i
< num_modules
; ++i
) {
683 HMODULE handle
= hmodules
[i
];
685 if (!GetModuleInformation(cur_process
, handle
, &mi
, sizeof(mi
)))
688 // Get the UTF-16 path and convert to UTF-8.
689 int modname_utf16_len
=
690 GetModuleFileNameW(handle
, &modname_utf16
[0], kMaxPathLength
);
691 if (modname_utf16_len
== 0)
692 modname_utf16
[0] = '\0';
693 int module_name_len
= ::WideCharToMultiByte(
694 CP_UTF8
, 0, &modname_utf16
[0], modname_utf16_len
+ 1, &module_name
[0],
695 kMaxPathLength
, NULL
, NULL
);
696 module_name
[module_name_len
] = '\0';
698 uptr base_address
= (uptr
)mi
.lpBaseOfDll
;
699 uptr end_address
= (uptr
)mi
.lpBaseOfDll
+ mi
.SizeOfImage
;
701 // Adjust the base address of the module so that we get a VA instead of an
702 // RVA when computing the module offset. This helps llvm-symbolizer find the
703 // right DWARF CU. In the common case that the image is loaded at it's
704 // preferred address, we will now print normal virtual addresses.
705 uptr preferred_base
=
706 GetPreferredBase(&module_name
[0], &buf
[0], buf
.size());
707 uptr adjusted_base
= base_address
- preferred_base
;
709 modules_
.push_back(LoadedModule());
710 LoadedModule
&cur_module
= modules_
.back();
711 cur_module
.set(&module_name
[0], adjusted_base
);
712 // We add the whole module as one single address range.
713 cur_module
.addAddressRange(base_address
, end_address
, /*executable*/ true,
716 UnmapOrDie(hmodules
, modules_buffer_size
);
719 void ListOfModules::fallbackInit() { clear(); }
721 // We can't use atexit() directly at __asan_init time as the CRT is not fully
722 // initialized at this point. Place the functions into a vector and use
723 // atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers).
724 InternalMmapVectorNoCtor
<void (*)(void)> atexit_functions
;
726 static int queueAtexit(void (*function
)(void)) {
727 atexit_functions
.push_back(function
);
731 // If Atexit() is being called after RunAtexit() has already been run, it needs
732 // to be able to call atexit() directly. Here we use a function ponter to
733 // switch out its behaviour.
734 // An example of where this is needed is the asan_dynamic runtime on MinGW-w64.
735 // On this environment, __asan_init is called during global constructor phase,
736 // way after calling the .CRT$XID initializer.
737 static int (*volatile queueOrCallAtExit
)(void (*)(void)) = &queueAtexit
;
739 int Atexit(void (*function
)(void)) { return queueOrCallAtExit(function
); }
741 static int RunAtexit() {
742 TraceLoggingUnregister(g_asan_provider
);
743 queueOrCallAtExit
= &atexit
;
745 for (uptr i
= 0; i
< atexit_functions
.size(); ++i
) {
746 ret
|= atexit(atexit_functions
[i
]);
751 #pragma section(".CRT$XID", long, read)
752 __declspec(allocate(".CRT$XID")) int (*__run_atexit
)() = RunAtexit
;
755 // ------------------ sanitizer_libc.h
756 fd_t
OpenFile(const char *filename
, FileAccessMode mode
, error_t
*last_error
) {
757 // FIXME: Use the wide variants to handle Unicode filenames.
759 if (mode
== RdOnly
) {
760 res
= CreateFileA(filename
, GENERIC_READ
,
761 FILE_SHARE_READ
| FILE_SHARE_WRITE
| FILE_SHARE_DELETE
,
762 nullptr, OPEN_EXISTING
, FILE_ATTRIBUTE_NORMAL
, nullptr);
763 } else if (mode
== WrOnly
) {
764 res
= CreateFileA(filename
, GENERIC_WRITE
, 0, nullptr, CREATE_ALWAYS
,
765 FILE_ATTRIBUTE_NORMAL
, nullptr);
769 CHECK(res
!= kStdoutFd
|| kStdoutFd
== kInvalidFd
);
770 CHECK(res
!= kStderrFd
|| kStderrFd
== kInvalidFd
);
771 if (res
== kInvalidFd
&& last_error
)
772 *last_error
= GetLastError();
776 void CloseFile(fd_t fd
) {
780 bool ReadFromFile(fd_t fd
, void *buff
, uptr buff_size
, uptr
*bytes_read
,
782 CHECK(fd
!= kInvalidFd
);
784 // bytes_read can't be passed directly to ReadFile:
785 // uptr is unsigned long long on 64-bit Windows.
786 unsigned long num_read_long
;
788 bool success
= ::ReadFile(fd
, buff
, buff_size
, &num_read_long
, nullptr);
789 if (!success
&& error_p
)
790 *error_p
= GetLastError();
792 *bytes_read
= num_read_long
;
796 bool SupportsColoredOutput(fd_t fd
) {
797 // FIXME: support colored output.
801 bool WriteToFile(fd_t fd
, const void *buff
, uptr buff_size
, uptr
*bytes_written
,
803 CHECK(fd
!= kInvalidFd
);
805 // Handle null optional parameters.
807 error_p
= error_p
? error_p
: &dummy_error
;
808 uptr dummy_bytes_written
;
809 bytes_written
= bytes_written
? bytes_written
: &dummy_bytes_written
;
811 // Initialize output parameters in case we fail.
815 // Map the conventional Unix fds 1 and 2 to Windows handles. They might be
816 // closed, in which case this will fail.
817 if (fd
== kStdoutFd
|| fd
== kStderrFd
) {
818 fd
= GetStdHandle(fd
== kStdoutFd
? STD_OUTPUT_HANDLE
: STD_ERROR_HANDLE
);
820 *error_p
= ERROR_INVALID_HANDLE
;
825 DWORD bytes_written_32
;
826 if (!WriteFile(fd
, buff
, buff_size
, &bytes_written_32
, 0)) {
827 *error_p
= GetLastError();
830 *bytes_written
= bytes_written_32
;
835 uptr
internal_sched_yield() {
840 void internal__exit(int exitcode
) {
841 TraceLoggingUnregister(g_asan_provider
);
842 // ExitProcess runs some finalizers, so use TerminateProcess to avoid that.
843 // The debugger doesn't stop on TerminateProcess like it does on ExitProcess,
844 // so add our own breakpoint here.
845 if (::IsDebuggerPresent())
847 TerminateProcess(GetCurrentProcess(), exitcode
);
848 BUILTIN_UNREACHABLE();
851 uptr
internal_ftruncate(fd_t fd
, uptr size
) {
856 PROCESS_MEMORY_COUNTERS counters
;
857 if (!GetProcessMemoryInfo(GetCurrentProcess(), &counters
, sizeof(counters
)))
859 return counters
.WorkingSetSize
;
862 void *internal_start_thread(void *(*func
)(void *arg
), void *arg
) { return 0; }
863 void internal_join_thread(void *th
) { }
865 void FutexWait(atomic_uint32_t
*p
, u32 cmp
) {
866 WaitOnAddress(p
, &cmp
, sizeof(cmp
), INFINITE
);
869 void FutexWake(atomic_uint32_t
*p
, u32 count
) {
871 WakeByAddressSingle(p
);
883 void GetThreadStackAndTls(bool main
, uptr
*stk_addr
, uptr
*stk_size
,
884 uptr
*tls_addr
, uptr
*tls_size
) {
891 uptr stack_top
, stack_bottom
;
892 GetThreadStackTopAndBottom(main
, &stack_top
, &stack_bottom
);
893 *stk_addr
= stack_bottom
;
894 *stk_size
= stack_top
- stack_bottom
;
900 void ReportFile::Write(const char *buffer
, uptr length
) {
903 if (!WriteToFile(fd
, buffer
, length
)) {
904 // stderr may be closed, but we may be able to print to the debugger
905 // instead. This is the case when launching a program from Visual Studio,
906 // and the following routine should write to its console.
907 OutputDebugStringA(buffer
);
911 void SetAlternateSignalStack() {
912 // FIXME: Decide what to do on Windows.
915 void UnsetAlternateSignalStack() {
916 // FIXME: Decide what to do on Windows.
919 void InstallDeadlySignalHandlers(SignalHandlerType handler
) {
921 // FIXME: Decide what to do on Windows.
924 HandleSignalMode
GetHandleSignalMode(int signum
) {
925 // FIXME: Decide what to do on Windows.
926 return kHandleSignalNo
;
929 // Check based on flags if we should handle this exception.
930 bool IsHandledDeadlyException(DWORD exceptionCode
) {
931 switch (exceptionCode
) {
932 case EXCEPTION_ACCESS_VIOLATION
:
933 case EXCEPTION_ARRAY_BOUNDS_EXCEEDED
:
934 case EXCEPTION_STACK_OVERFLOW
:
935 case EXCEPTION_DATATYPE_MISALIGNMENT
:
936 case EXCEPTION_IN_PAGE_ERROR
:
937 return common_flags()->handle_segv
;
938 case EXCEPTION_ILLEGAL_INSTRUCTION
:
939 case EXCEPTION_PRIV_INSTRUCTION
:
940 case EXCEPTION_BREAKPOINT
:
941 return common_flags()->handle_sigill
;
942 case EXCEPTION_FLT_DENORMAL_OPERAND
:
943 case EXCEPTION_FLT_DIVIDE_BY_ZERO
:
944 case EXCEPTION_FLT_INEXACT_RESULT
:
945 case EXCEPTION_FLT_INVALID_OPERATION
:
946 case EXCEPTION_FLT_OVERFLOW
:
947 case EXCEPTION_FLT_STACK_CHECK
:
948 case EXCEPTION_FLT_UNDERFLOW
:
949 case EXCEPTION_INT_DIVIDE_BY_ZERO
:
950 case EXCEPTION_INT_OVERFLOW
:
951 return common_flags()->handle_sigfpe
;
956 bool IsAccessibleMemoryRange(uptr beg
, uptr size
) {
958 GetNativeSystemInfo(&si
);
959 uptr page_size
= si
.dwPageSize
;
960 uptr page_mask
= ~(page_size
- 1);
962 for (uptr page
= beg
& page_mask
, end
= (beg
+ size
- 1) & page_mask
;
964 MEMORY_BASIC_INFORMATION info
;
965 if (VirtualQuery((LPCVOID
)page
, &info
, sizeof(info
)) != sizeof(info
))
968 if (info
.Protect
== 0 || info
.Protect
== PAGE_NOACCESS
||
969 info
.Protect
== PAGE_EXECUTE
)
972 if (info
.RegionSize
== 0)
975 page
+= info
.RegionSize
;
981 bool SignalContext::IsStackOverflow() const {
982 return (DWORD
)GetType() == EXCEPTION_STACK_OVERFLOW
;
985 void SignalContext::InitPcSpBp() {
986 EXCEPTION_RECORD
*exception_record
= (EXCEPTION_RECORD
*)siginfo
;
987 CONTEXT
*context_record
= (CONTEXT
*)context
;
989 pc
= (uptr
)exception_record
->ExceptionAddress
;
990 # if SANITIZER_WINDOWS64
992 bp
= (uptr
)context_record
->Fp
;
993 sp
= (uptr
)context_record
->Sp
;
995 bp
= (uptr
)context_record
->Rbp
;
996 sp
= (uptr
)context_record
->Rsp
;
999 bp
= (uptr
)context_record
->Ebp
;
1000 sp
= (uptr
)context_record
->Esp
;
1004 uptr
SignalContext::GetAddress() const {
1005 EXCEPTION_RECORD
*exception_record
= (EXCEPTION_RECORD
*)siginfo
;
1006 if (exception_record
->ExceptionCode
== EXCEPTION_ACCESS_VIOLATION
)
1007 return exception_record
->ExceptionInformation
[1];
1008 return (uptr
)exception_record
->ExceptionAddress
;
1011 bool SignalContext::IsMemoryAccess() const {
1012 return ((EXCEPTION_RECORD
*)siginfo
)->ExceptionCode
==
1013 EXCEPTION_ACCESS_VIOLATION
;
1016 bool SignalContext::IsTrueFaultingAddress() const { return true; }
1018 SignalContext::WriteFlag
SignalContext::GetWriteFlag() const {
1019 EXCEPTION_RECORD
*exception_record
= (EXCEPTION_RECORD
*)siginfo
;
1021 // The write flag is only available for access violation exceptions.
1022 if (exception_record
->ExceptionCode
!= EXCEPTION_ACCESS_VIOLATION
)
1023 return SignalContext::Unknown
;
1025 // The contents of this array are documented at
1026 // https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
1027 // The first element indicates read as 0, write as 1, or execute as 8. The
1028 // second element is the faulting address.
1029 switch (exception_record
->ExceptionInformation
[0]) {
1031 return SignalContext::Read
;
1033 return SignalContext::Write
;
1035 return SignalContext::Unknown
;
1037 return SignalContext::Unknown
;
1040 void SignalContext::DumpAllRegisters(void *context
) {
1041 // FIXME: Implement this.
1044 int SignalContext::GetType() const {
1045 return static_cast<const EXCEPTION_RECORD
*>(siginfo
)->ExceptionCode
;
1048 const char *SignalContext::Describe() const {
1049 unsigned code
= GetType();
1050 // Get the string description of the exception if this is a known deadly
1053 case EXCEPTION_ACCESS_VIOLATION
:
1054 return "access-violation";
1055 case EXCEPTION_ARRAY_BOUNDS_EXCEEDED
:
1056 return "array-bounds-exceeded";
1057 case EXCEPTION_STACK_OVERFLOW
:
1058 return "stack-overflow";
1059 case EXCEPTION_DATATYPE_MISALIGNMENT
:
1060 return "datatype-misalignment";
1061 case EXCEPTION_IN_PAGE_ERROR
:
1062 return "in-page-error";
1063 case EXCEPTION_ILLEGAL_INSTRUCTION
:
1064 return "illegal-instruction";
1065 case EXCEPTION_PRIV_INSTRUCTION
:
1066 return "priv-instruction";
1067 case EXCEPTION_BREAKPOINT
:
1068 return "breakpoint";
1069 case EXCEPTION_FLT_DENORMAL_OPERAND
:
1070 return "flt-denormal-operand";
1071 case EXCEPTION_FLT_DIVIDE_BY_ZERO
:
1072 return "flt-divide-by-zero";
1073 case EXCEPTION_FLT_INEXACT_RESULT
:
1074 return "flt-inexact-result";
1075 case EXCEPTION_FLT_INVALID_OPERATION
:
1076 return "flt-invalid-operation";
1077 case EXCEPTION_FLT_OVERFLOW
:
1078 return "flt-overflow";
1079 case EXCEPTION_FLT_STACK_CHECK
:
1080 return "flt-stack-check";
1081 case EXCEPTION_FLT_UNDERFLOW
:
1082 return "flt-underflow";
1083 case EXCEPTION_INT_DIVIDE_BY_ZERO
:
1084 return "int-divide-by-zero";
1085 case EXCEPTION_INT_OVERFLOW
:
1086 return "int-overflow";
1088 return "unknown exception";
1091 uptr
ReadBinaryName(/*out*/char *buf
, uptr buf_len
) {
1095 // Get the UTF-16 path and convert to UTF-8.
1096 InternalMmapVector
<wchar_t> binname_utf16(kMaxPathLength
);
1097 int binname_utf16_len
=
1098 GetModuleFileNameW(NULL
, &binname_utf16
[0], kMaxPathLength
);
1099 if (binname_utf16_len
== 0) {
1103 int binary_name_len
=
1104 ::WideCharToMultiByte(CP_UTF8
, 0, &binname_utf16
[0], binname_utf16_len
,
1105 buf
, buf_len
, NULL
, NULL
);
1106 if ((unsigned)binary_name_len
== buf_len
)
1108 buf
[binary_name_len
] = '\0';
1109 return binary_name_len
;
1112 uptr
ReadLongProcessName(/*out*/char *buf
, uptr buf_len
) {
1113 return ReadBinaryName(buf
, buf_len
);
1116 void CheckVMASize() {
1120 void InitializePlatformEarly() {
1128 void CheckMPROTECT() {
1133 // FIXME: Actually implement this function.
1137 char **GetEnviron() {
1138 // FIXME: Actually implement this function.
1142 pid_t
StartSubprocess(const char *program
, const char *const argv
[],
1143 const char *const envp
[], fd_t stdin_fd
, fd_t stdout_fd
,
1145 // FIXME: implement on this platform
1146 // Should be implemented based on
1147 // SymbolizerProcess::StarAtSymbolizerSubprocess
1148 // from lib/sanitizer_common/sanitizer_symbolizer_win.cpp.
1152 bool IsProcessRunning(pid_t pid
) {
1153 // FIXME: implement on this platform.
1157 int WaitForProcess(pid_t pid
) { return -1; }
1159 // FIXME implement on this platform.
1160 void GetMemoryProfile(fill_profile_f cb
, uptr
*stats
) {}
1162 void CheckNoDeepBind(const char *filename
, int flag
) {
1166 // FIXME: implement on this platform.
1167 bool GetRandom(void *buffer
, uptr length
, bool blocking
) {
1171 u32
GetNumberOfCPUs() {
1172 SYSTEM_INFO sysinfo
= {};
1173 GetNativeSystemInfo(&sysinfo
);
1174 return sysinfo
.dwNumberOfProcessors
;
1177 #if SANITIZER_WIN_TRACE
1178 // TODO(mcgov): Rename this project-wide to PlatformLogInit
1179 void AndroidLogInit(void) {
1180 HRESULT hr
= TraceLoggingRegister(g_asan_provider
);
1185 void SetAbortMessage(const char *) {}
1187 void LogFullErrorReport(const char *buffer
) {
1188 if (common_flags()->log_to_syslog
) {
1189 InternalMmapVector
<wchar_t> filename
;
1190 DWORD filename_length
= 0;
1192 filename
.resize(filename
.size() + 0x100);
1194 GetModuleFileNameW(NULL
, filename
.begin(), filename
.size());
1195 } while (filename_length
>= filename
.size());
1196 TraceLoggingWrite(g_asan_provider
, "AsanReportEvent",
1197 TraceLoggingValue(filename
.begin(), "ExecutableName"),
1198 TraceLoggingValue(buffer
, "AsanReportContents"));
1201 #endif // SANITIZER_WIN_TRACE
1203 void InitializePlatformCommonFlags(CommonFlags
*cf
) {}
1205 } // namespace __sanitizer