1 //===-- sanitizer_posix.cpp -----------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // run-time libraries and implements POSIX-specific functions from
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_platform.h"
18 #include "sanitizer_common.h"
19 #include "sanitizer_file.h"
20 #include "sanitizer_flags.h"
21 #include "sanitizer_libc.h"
22 #include "sanitizer_posix.h"
23 #include "sanitizer_procmaps.h"
31 // The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
32 // that, it was never implemented. So just define it to zero.
34 #define MAP_NORESERVE 0
37 namespace __sanitizer
{
39 // ------------- sanitizer_common.h
40 uptr
GetMmapGranularity() {
44 bool ErrorIsOOM(error_t err
) { return err
== ENOMEM
; }
46 void *MmapOrDie(uptr size
, const char *mem_type
, bool raw_report
) {
47 size
= RoundUpTo(size
, GetPageSizeCached());
48 uptr res
= MmapNamed(nullptr, size
, PROT_READ
| PROT_WRITE
,
49 MAP_PRIVATE
| MAP_ANON
, mem_type
);
51 if (UNLIKELY(internal_iserror(res
, &reserrno
)))
52 ReportMmapFailureAndDie(size
, mem_type
, "allocate", reserrno
, raw_report
);
53 IncreaseTotalMmap(size
);
57 void UnmapOrDie(void *addr
, uptr size
) {
58 if (!addr
|| !size
) return;
59 uptr res
= internal_munmap(addr
, size
);
60 if (UNLIKELY(internal_iserror(res
))) {
61 Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
62 SanitizerToolName
, size
, size
, addr
);
63 CHECK("unable to unmap" && 0);
65 DecreaseTotalMmap(size
);
68 void *MmapOrDieOnFatalError(uptr size
, const char *mem_type
) {
69 size
= RoundUpTo(size
, GetPageSizeCached());
70 uptr res
= MmapNamed(nullptr, size
, PROT_READ
| PROT_WRITE
,
71 MAP_PRIVATE
| MAP_ANON
, mem_type
);
73 if (UNLIKELY(internal_iserror(res
, &reserrno
))) {
74 if (reserrno
== ENOMEM
)
76 ReportMmapFailureAndDie(size
, mem_type
, "allocate", reserrno
);
78 IncreaseTotalMmap(size
);
82 // We want to map a chunk of address space aligned to 'alignment'.
83 // We do it by mapping a bit more and then unmapping redundant pieces.
84 // We probably can do it with fewer syscalls in some OS-dependent way.
85 void *MmapAlignedOrDieOnFatalError(uptr size
, uptr alignment
,
86 const char *mem_type
) {
87 CHECK(IsPowerOfTwo(size
));
88 CHECK(IsPowerOfTwo(alignment
));
89 uptr map_size
= size
+ alignment
;
90 // mmap maps entire pages and rounds up map_size needs to be a an integral
92 // We need to be aware of this size for calculating end and for unmapping
93 // fragments before and after the alignment region.
94 map_size
= RoundUpTo(map_size
, GetPageSizeCached());
95 uptr map_res
= (uptr
)MmapOrDieOnFatalError(map_size
, mem_type
);
96 if (UNLIKELY(!map_res
))
99 if (!IsAligned(res
, alignment
)) {
100 res
= (map_res
+ alignment
- 1) & ~(alignment
- 1);
101 UnmapOrDie((void*)map_res
, res
- map_res
);
103 uptr map_end
= map_res
+ map_size
;
104 uptr end
= res
+ size
;
105 end
= RoundUpTo(end
, GetPageSizeCached());
106 if (end
!= map_end
) {
107 CHECK_LT(end
, map_end
);
108 UnmapOrDie((void*)end
, map_end
- end
);
113 void *MmapNoReserveOrDie(uptr size
, const char *mem_type
) {
114 size
= RoundUpTo(size
, GetPageSizeCached());
115 uptr p
= MmapNamed(nullptr, size
, PROT_READ
| PROT_WRITE
,
116 MAP_PRIVATE
| MAP_ANON
| MAP_NORESERVE
, mem_type
);
118 if (UNLIKELY(internal_iserror(p
, &reserrno
)))
119 ReportMmapFailureAndDie(size
, mem_type
, "allocate noreserve", reserrno
);
120 IncreaseTotalMmap(size
);
124 static void *MmapFixedImpl(uptr fixed_addr
, uptr size
, bool tolerate_enomem
,
126 size
= RoundUpTo(size
, GetPageSizeCached());
127 fixed_addr
= RoundDownTo(fixed_addr
, GetPageSizeCached());
128 uptr p
= MmapNamed((void *)fixed_addr
, size
, PROT_READ
| PROT_WRITE
,
129 MAP_PRIVATE
| MAP_ANON
| MAP_FIXED
, name
);
131 if (UNLIKELY(internal_iserror(p
, &reserrno
))) {
132 if (tolerate_enomem
&& reserrno
== ENOMEM
)
135 internal_snprintf(mem_type
, sizeof(mem_type
), "memory at address 0x%zx",
137 ReportMmapFailureAndDie(size
, mem_type
, "allocate", reserrno
);
139 IncreaseTotalMmap(size
);
143 void *MmapFixedOrDie(uptr fixed_addr
, uptr size
, const char *name
) {
144 return MmapFixedImpl(fixed_addr
, size
, false /*tolerate_enomem*/, name
);
147 void *MmapFixedOrDieOnFatalError(uptr fixed_addr
, uptr size
, const char *name
) {
148 return MmapFixedImpl(fixed_addr
, size
, true /*tolerate_enomem*/, name
);
151 bool MprotectNoAccess(uptr addr
, uptr size
) {
152 return 0 == internal_mprotect((void*)addr
, size
, PROT_NONE
);
155 bool MprotectReadOnly(uptr addr
, uptr size
) {
156 return 0 == internal_mprotect((void *)addr
, size
, PROT_READ
);
160 void MprotectMallocZones(void *addr
, int prot
) {}
163 fd_t
OpenFile(const char *filename
, FileAccessMode mode
, error_t
*errno_p
) {
164 if (ShouldMockFailureToOpen(filename
))
168 case RdOnly
: flags
= O_RDONLY
; break;
169 case WrOnly
: flags
= O_WRONLY
| O_CREAT
| O_TRUNC
; break;
170 case RdWr
: flags
= O_RDWR
| O_CREAT
; break;
172 fd_t res
= internal_open(filename
, flags
, 0660);
173 if (internal_iserror(res
, errno_p
))
175 return ReserveStandardFds(res
);
178 void CloseFile(fd_t fd
) {
182 bool ReadFromFile(fd_t fd
, void *buff
, uptr buff_size
, uptr
*bytes_read
,
184 uptr res
= internal_read(fd
, buff
, buff_size
);
185 if (internal_iserror(res
, error_p
))
192 bool WriteToFile(fd_t fd
, const void *buff
, uptr buff_size
, uptr
*bytes_written
,
194 uptr res
= internal_write(fd
, buff
, buff_size
);
195 if (internal_iserror(res
, error_p
))
198 *bytes_written
= res
;
202 void *MapFileToMemory(const char *file_name
, uptr
*buff_size
) {
203 fd_t fd
= OpenFile(file_name
, RdOnly
);
204 CHECK(fd
!= kInvalidFd
);
205 uptr fsize
= internal_filesize(fd
);
206 CHECK_NE(fsize
, (uptr
)-1);
208 *buff_size
= RoundUpTo(fsize
, GetPageSizeCached());
209 uptr map
= internal_mmap(nullptr, *buff_size
, PROT_READ
, MAP_PRIVATE
, fd
, 0);
210 return internal_iserror(map
) ? nullptr : (void *)map
;
213 void *MapWritableFileToMemory(void *addr
, uptr size
, fd_t fd
, OFF_T offset
) {
214 uptr flags
= MAP_SHARED
;
215 if (addr
) flags
|= MAP_FIXED
;
216 uptr p
= internal_mmap(addr
, size
, PROT_READ
| PROT_WRITE
, flags
, fd
, offset
);
218 if (internal_iserror(p
, &mmap_errno
)) {
219 Printf("could not map writable file (%d, %lld, %zu): %zd, errno: %d\n",
220 fd
, (long long)offset
, size
, p
, mmap_errno
);
226 static inline bool IntervalsAreSeparate(uptr start1
, uptr end1
,
227 uptr start2
, uptr end2
) {
228 CHECK(start1
<= end1
);
229 CHECK(start2
<= end2
);
230 return (end1
< start2
) || (end2
< start1
);
233 // FIXME: this is thread-unsafe, but should not cause problems most of the time.
234 // When the shadow is mapped only a single thread usually exists (plus maybe
235 // several worker threads on Mac, which aren't expected to map big chunks of
237 bool MemoryRangeIsAvailable(uptr range_start
, uptr range_end
) {
238 MemoryMappingLayout
proc_maps(/*cache_enabled*/true);
239 if (proc_maps
.Error())
240 return true; // and hope for the best
241 MemoryMappedSegment segment
;
242 while (proc_maps
.Next(&segment
)) {
243 if (segment
.start
== segment
.end
) continue; // Empty range.
244 CHECK_NE(0, segment
.end
);
245 if (!IntervalsAreSeparate(segment
.start
, segment
.end
- 1, range_start
,
253 void DumpProcessMap() {
254 MemoryMappingLayout
proc_maps(/*cache_enabled*/true);
255 const sptr kBufSize
= 4095;
256 char *filename
= (char*)MmapOrDie(kBufSize
, __func__
);
257 MemoryMappedSegment
segment(filename
, kBufSize
);
258 Report("Process memory map follows:\n");
259 while (proc_maps
.Next(&segment
)) {
260 Printf("\t%p-%p\t%s\n", (void *)segment
.start
, (void *)segment
.end
,
263 Report("End of process memory map.\n");
264 UnmapOrDie(filename
, kBufSize
);
268 const char *GetPwd() {
269 return GetEnv("PWD");
272 bool IsPathSeparator(const char c
) {
276 bool IsAbsolutePath(const char *path
) {
277 return path
!= nullptr && IsPathSeparator(path
[0]);
280 void ReportFile::Write(const char *buffer
, uptr length
) {
283 internal_write(fd
, buffer
, length
);
286 bool GetCodeRangeForFile(const char *module
, uptr
*start
, uptr
*end
) {
287 MemoryMappingLayout
proc_maps(/*cache_enabled*/false);
288 InternalMmapVector
<char> buff(kMaxPathLength
);
289 MemoryMappedSegment
segment(buff
.data(), buff
.size());
290 while (proc_maps
.Next(&segment
)) {
291 if (segment
.IsExecutable() &&
292 internal_strcmp(module
, segment
.filename
) == 0) {
293 *start
= segment
.start
;
301 uptr
SignalContext::GetAddress() const {
302 auto si
= static_cast<const siginfo_t
*>(siginfo
);
303 return (uptr
)si
->si_addr
;
306 bool SignalContext::IsMemoryAccess() const {
307 auto si
= static_cast<const siginfo_t
*>(siginfo
);
308 return si
->si_signo
== SIGSEGV
|| si
->si_signo
== SIGBUS
;
311 int SignalContext::GetType() const {
312 return static_cast<const siginfo_t
*>(siginfo
)->si_signo
;
315 const char *SignalContext::Describe() const {
330 return "UNKNOWN SIGNAL";
333 fd_t
ReserveStandardFds(fd_t fd
) {
338 internal_memset(used
, 0, sizeof(used
));
341 fd
= internal_dup(fd
);
343 for (int i
= 0; i
<= 2; ++i
)
349 bool ShouldMockFailureToOpen(const char *path
) {
350 return common_flags()->test_only_emulate_no_memorymap
&&
351 internal_strncmp(path
, "/proc/", 6) == 0;
354 #if SANITIZER_LINUX && !SANITIZER_ANDROID && !SANITIZER_GO
355 int GetNamedMappingFd(const char *name
, uptr size
, int *flags
) {
356 if (!common_flags()->decorate_proc_maps
|| !name
)
359 CHECK(internal_strlen(name
) < sizeof(shmname
) - 10);
360 internal_snprintf(shmname
, sizeof(shmname
), "/dev/shm/%zu [%s]",
361 internal_getpid(), name
);
363 #if defined(O_CLOEXEC)
364 o_cloexec
= O_CLOEXEC
;
366 int fd
= ReserveStandardFds(
367 internal_open(shmname
, O_RDWR
| O_CREAT
| O_TRUNC
| o_cloexec
, S_IRWXU
));
369 int res
= internal_ftruncate(fd
, size
);
370 #if !defined(O_CLOEXEC)
371 res
= fcntl(fd
, F_SETFD
, FD_CLOEXEC
);
375 res
= internal_unlink(shmname
);
377 *flags
&= ~(MAP_ANON
| MAP_ANONYMOUS
);
381 int GetNamedMappingFd(const char *name
, uptr size
, int *flags
) {
386 #if SANITIZER_ANDROID
387 #define PR_SET_VMA 0x53564d41
388 #define PR_SET_VMA_ANON_NAME 0
389 void DecorateMapping(uptr addr
, uptr size
, const char *name
) {
390 if (!common_flags()->decorate_proc_maps
|| !name
)
392 internal_prctl(PR_SET_VMA
, PR_SET_VMA_ANON_NAME
, addr
, size
, (uptr
)name
);
395 void DecorateMapping(uptr addr
, uptr size
, const char *name
) {
399 uptr
MmapNamed(void *addr
, uptr length
, int prot
, int flags
, const char *name
) {
400 int fd
= GetNamedMappingFd(name
, length
, &flags
);
401 uptr res
= internal_mmap(addr
, length
, prot
, flags
, fd
, 0);
402 if (!internal_iserror(res
))
403 DecorateMapping(res
, length
, name
);
408 } // namespace __sanitizer
410 #endif // SANITIZER_POSIX