[clang] Handle __declspec() attributes in using
[llvm-project.git] / compiler-rt / lib / sanitizer_common / sanitizer_posix.cpp
blob75968ad33ccf5ecefdf8df93a2cbd1e59f36f545
1 //===-- sanitizer_posix.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // run-time libraries and implements POSIX-specific functions from
11 // sanitizer_posix.h.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_platform.h"
16 #if SANITIZER_POSIX
18 #include "sanitizer_common.h"
19 #include "sanitizer_file.h"
20 #include "sanitizer_flags.h"
21 #include "sanitizer_libc.h"
22 #include "sanitizer_posix.h"
23 #include "sanitizer_procmaps.h"
25 #include <errno.h>
26 #include <fcntl.h>
27 #include <signal.h>
28 #include <sys/mman.h>
30 #if SANITIZER_FREEBSD
31 // The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
32 // that, it was never implemented. So just define it to zero.
33 #undef MAP_NORESERVE
34 #define MAP_NORESERVE 0
35 #endif
37 namespace __sanitizer {
39 // ------------- sanitizer_common.h
40 uptr GetMmapGranularity() {
41 return GetPageSize();
44 bool ErrorIsOOM(error_t err) { return err == ENOMEM; }
46 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
47 size = RoundUpTo(size, GetPageSizeCached());
48 uptr res = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
49 MAP_PRIVATE | MAP_ANON, mem_type);
50 int reserrno;
51 if (UNLIKELY(internal_iserror(res, &reserrno)))
52 ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno, raw_report);
53 IncreaseTotalMmap(size);
54 return (void *)res;
57 void UnmapOrDie(void *addr, uptr size) {
58 if (!addr || !size) return;
59 uptr res = internal_munmap(addr, size);
60 if (UNLIKELY(internal_iserror(res))) {
61 Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
62 SanitizerToolName, size, size, addr);
63 CHECK("unable to unmap" && 0);
65 DecreaseTotalMmap(size);
68 void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
69 size = RoundUpTo(size, GetPageSizeCached());
70 uptr res = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
71 MAP_PRIVATE | MAP_ANON, mem_type);
72 int reserrno;
73 if (UNLIKELY(internal_iserror(res, &reserrno))) {
74 if (reserrno == ENOMEM)
75 return nullptr;
76 ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno);
78 IncreaseTotalMmap(size);
79 return (void *)res;
82 // We want to map a chunk of address space aligned to 'alignment'.
83 // We do it by mapping a bit more and then unmapping redundant pieces.
84 // We probably can do it with fewer syscalls in some OS-dependent way.
85 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
86 const char *mem_type) {
87 CHECK(IsPowerOfTwo(size));
88 CHECK(IsPowerOfTwo(alignment));
89 uptr map_size = size + alignment;
90 // mmap maps entire pages and rounds up map_size needs to be a an integral
91 // number of pages.
92 // We need to be aware of this size for calculating end and for unmapping
93 // fragments before and after the alignment region.
94 map_size = RoundUpTo(map_size, GetPageSizeCached());
95 uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type);
96 if (UNLIKELY(!map_res))
97 return nullptr;
98 uptr res = map_res;
99 if (!IsAligned(res, alignment)) {
100 res = (map_res + alignment - 1) & ~(alignment - 1);
101 UnmapOrDie((void*)map_res, res - map_res);
103 uptr map_end = map_res + map_size;
104 uptr end = res + size;
105 end = RoundUpTo(end, GetPageSizeCached());
106 if (end != map_end) {
107 CHECK_LT(end, map_end);
108 UnmapOrDie((void*)end, map_end - end);
110 return (void*)res;
113 void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
114 size = RoundUpTo(size, GetPageSizeCached());
115 uptr p = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
116 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, mem_type);
117 int reserrno;
118 if (UNLIKELY(internal_iserror(p, &reserrno)))
119 ReportMmapFailureAndDie(size, mem_type, "allocate noreserve", reserrno);
120 IncreaseTotalMmap(size);
121 return (void *)p;
124 static void *MmapFixedImpl(uptr fixed_addr, uptr size, bool tolerate_enomem,
125 const char *name) {
126 size = RoundUpTo(size, GetPageSizeCached());
127 fixed_addr = RoundDownTo(fixed_addr, GetPageSizeCached());
128 uptr p = MmapNamed((void *)fixed_addr, size, PROT_READ | PROT_WRITE,
129 MAP_PRIVATE | MAP_ANON | MAP_FIXED, name);
130 int reserrno;
131 if (UNLIKELY(internal_iserror(p, &reserrno))) {
132 if (tolerate_enomem && reserrno == ENOMEM)
133 return nullptr;
134 char mem_type[40];
135 internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
136 fixed_addr);
137 ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno);
139 IncreaseTotalMmap(size);
140 return (void *)p;
143 void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name) {
144 return MmapFixedImpl(fixed_addr, size, false /*tolerate_enomem*/, name);
147 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, const char *name) {
148 return MmapFixedImpl(fixed_addr, size, true /*tolerate_enomem*/, name);
151 bool MprotectNoAccess(uptr addr, uptr size) {
152 return 0 == internal_mprotect((void*)addr, size, PROT_NONE);
155 bool MprotectReadOnly(uptr addr, uptr size) {
156 return 0 == internal_mprotect((void *)addr, size, PROT_READ);
159 #if !SANITIZER_APPLE
160 void MprotectMallocZones(void *addr, int prot) {}
161 #endif
163 fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) {
164 if (ShouldMockFailureToOpen(filename))
165 return kInvalidFd;
166 int flags;
167 switch (mode) {
168 case RdOnly: flags = O_RDONLY; break;
169 case WrOnly: flags = O_WRONLY | O_CREAT | O_TRUNC; break;
170 case RdWr: flags = O_RDWR | O_CREAT; break;
172 fd_t res = internal_open(filename, flags, 0660);
173 if (internal_iserror(res, errno_p))
174 return kInvalidFd;
175 return ReserveStandardFds(res);
178 void CloseFile(fd_t fd) {
179 internal_close(fd);
182 bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
183 error_t *error_p) {
184 uptr res = internal_read(fd, buff, buff_size);
185 if (internal_iserror(res, error_p))
186 return false;
187 if (bytes_read)
188 *bytes_read = res;
189 return true;
192 bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
193 error_t *error_p) {
194 uptr res = internal_write(fd, buff, buff_size);
195 if (internal_iserror(res, error_p))
196 return false;
197 if (bytes_written)
198 *bytes_written = res;
199 return true;
202 void *MapFileToMemory(const char *file_name, uptr *buff_size) {
203 fd_t fd = OpenFile(file_name, RdOnly);
204 CHECK(fd != kInvalidFd);
205 uptr fsize = internal_filesize(fd);
206 CHECK_NE(fsize, (uptr)-1);
207 CHECK_GT(fsize, 0);
208 *buff_size = RoundUpTo(fsize, GetPageSizeCached());
209 uptr map = internal_mmap(nullptr, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0);
210 return internal_iserror(map) ? nullptr : (void *)map;
213 void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
214 uptr flags = MAP_SHARED;
215 if (addr) flags |= MAP_FIXED;
216 uptr p = internal_mmap(addr, size, PROT_READ | PROT_WRITE, flags, fd, offset);
217 int mmap_errno = 0;
218 if (internal_iserror(p, &mmap_errno)) {
219 Printf("could not map writable file (%d, %lld, %zu): %zd, errno: %d\n",
220 fd, (long long)offset, size, p, mmap_errno);
221 return nullptr;
223 return (void *)p;
226 static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
227 uptr start2, uptr end2) {
228 CHECK(start1 <= end1);
229 CHECK(start2 <= end2);
230 return (end1 < start2) || (end2 < start1);
233 // FIXME: this is thread-unsafe, but should not cause problems most of the time.
234 // When the shadow is mapped only a single thread usually exists (plus maybe
235 // several worker threads on Mac, which aren't expected to map big chunks of
236 // memory).
237 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
238 MemoryMappingLayout proc_maps(/*cache_enabled*/true);
239 if (proc_maps.Error())
240 return true; // and hope for the best
241 MemoryMappedSegment segment;
242 while (proc_maps.Next(&segment)) {
243 if (segment.start == segment.end) continue; // Empty range.
244 CHECK_NE(0, segment.end);
245 if (!IntervalsAreSeparate(segment.start, segment.end - 1, range_start,
246 range_end))
247 return false;
249 return true;
252 #if !SANITIZER_APPLE
253 void DumpProcessMap() {
254 MemoryMappingLayout proc_maps(/*cache_enabled*/true);
255 const sptr kBufSize = 4095;
256 char *filename = (char*)MmapOrDie(kBufSize, __func__);
257 MemoryMappedSegment segment(filename, kBufSize);
258 Report("Process memory map follows:\n");
259 while (proc_maps.Next(&segment)) {
260 Printf("\t%p-%p\t%s\n", (void *)segment.start, (void *)segment.end,
261 segment.filename);
263 Report("End of process memory map.\n");
264 UnmapOrDie(filename, kBufSize);
266 #endif
268 const char *GetPwd() {
269 return GetEnv("PWD");
272 bool IsPathSeparator(const char c) {
273 return c == '/';
276 bool IsAbsolutePath(const char *path) {
277 return path != nullptr && IsPathSeparator(path[0]);
280 void ReportFile::Write(const char *buffer, uptr length) {
281 SpinMutexLock l(mu);
282 ReopenIfNecessary();
283 internal_write(fd, buffer, length);
286 bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
287 MemoryMappingLayout proc_maps(/*cache_enabled*/false);
288 InternalMmapVector<char> buff(kMaxPathLength);
289 MemoryMappedSegment segment(buff.data(), buff.size());
290 while (proc_maps.Next(&segment)) {
291 if (segment.IsExecutable() &&
292 internal_strcmp(module, segment.filename) == 0) {
293 *start = segment.start;
294 *end = segment.end;
295 return true;
298 return false;
301 uptr SignalContext::GetAddress() const {
302 auto si = static_cast<const siginfo_t *>(siginfo);
303 return (uptr)si->si_addr;
306 bool SignalContext::IsMemoryAccess() const {
307 auto si = static_cast<const siginfo_t *>(siginfo);
308 return si->si_signo == SIGSEGV || si->si_signo == SIGBUS;
311 int SignalContext::GetType() const {
312 return static_cast<const siginfo_t *>(siginfo)->si_signo;
315 const char *SignalContext::Describe() const {
316 switch (GetType()) {
317 case SIGFPE:
318 return "FPE";
319 case SIGILL:
320 return "ILL";
321 case SIGABRT:
322 return "ABRT";
323 case SIGSEGV:
324 return "SEGV";
325 case SIGBUS:
326 return "BUS";
327 case SIGTRAP:
328 return "TRAP";
330 return "UNKNOWN SIGNAL";
333 fd_t ReserveStandardFds(fd_t fd) {
334 CHECK_GE(fd, 0);
335 if (fd > 2)
336 return fd;
337 bool used[3];
338 internal_memset(used, 0, sizeof(used));
339 while (fd <= 2) {
340 used[fd] = true;
341 fd = internal_dup(fd);
343 for (int i = 0; i <= 2; ++i)
344 if (used[i])
345 internal_close(i);
346 return fd;
349 bool ShouldMockFailureToOpen(const char *path) {
350 return common_flags()->test_only_emulate_no_memorymap &&
351 internal_strncmp(path, "/proc/", 6) == 0;
354 #if SANITIZER_LINUX && !SANITIZER_ANDROID && !SANITIZER_GO
355 int GetNamedMappingFd(const char *name, uptr size, int *flags) {
356 if (!common_flags()->decorate_proc_maps || !name)
357 return -1;
358 char shmname[200];
359 CHECK(internal_strlen(name) < sizeof(shmname) - 10);
360 internal_snprintf(shmname, sizeof(shmname), "/dev/shm/%zu [%s]",
361 internal_getpid(), name);
362 int o_cloexec = 0;
363 #if defined(O_CLOEXEC)
364 o_cloexec = O_CLOEXEC;
365 #endif
366 int fd = ReserveStandardFds(
367 internal_open(shmname, O_RDWR | O_CREAT | O_TRUNC | o_cloexec, S_IRWXU));
368 CHECK_GE(fd, 0);
369 int res = internal_ftruncate(fd, size);
370 #if !defined(O_CLOEXEC)
371 res = fcntl(fd, F_SETFD, FD_CLOEXEC);
372 CHECK_EQ(0, res);
373 #endif
374 CHECK_EQ(0, res);
375 res = internal_unlink(shmname);
376 CHECK_EQ(0, res);
377 *flags &= ~(MAP_ANON | MAP_ANONYMOUS);
378 return fd;
380 #else
381 int GetNamedMappingFd(const char *name, uptr size, int *flags) {
382 return -1;
384 #endif
386 #if SANITIZER_ANDROID
387 #define PR_SET_VMA 0x53564d41
388 #define PR_SET_VMA_ANON_NAME 0
389 void DecorateMapping(uptr addr, uptr size, const char *name) {
390 if (!common_flags()->decorate_proc_maps || !name)
391 return;
392 internal_prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, addr, size, (uptr)name);
394 #else
395 void DecorateMapping(uptr addr, uptr size, const char *name) {
397 #endif
399 uptr MmapNamed(void *addr, uptr length, int prot, int flags, const char *name) {
400 int fd = GetNamedMappingFd(name, length, &flags);
401 uptr res = internal_mmap(addr, length, prot, flags, fd, 0);
402 if (!internal_iserror(res))
403 DecorateMapping(res, length, name);
404 return res;
408 } // namespace __sanitizer
410 #endif // SANITIZER_POSIX