1 //===-- sanitizer_posix.cpp -----------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // run-time libraries and implements POSIX-specific functions from
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_platform.h"
18 #include "sanitizer_common.h"
19 #include "sanitizer_file.h"
20 #include "sanitizer_flags.h"
21 #include "sanitizer_libc.h"
22 #include "sanitizer_posix.h"
23 #include "sanitizer_procmaps.h"
31 // The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
32 // that, it was never implemented. So just define it to zero.
34 #define MAP_NORESERVE 0
37 namespace __sanitizer
{
39 // ------------- sanitizer_common.h
40 uptr
GetMmapGranularity() {
44 bool ErrorIsOOM(error_t err
) { return err
== ENOMEM
; }
46 void *MmapOrDie(uptr size
, const char *mem_type
, bool raw_report
) {
47 size
= RoundUpTo(size
, GetPageSizeCached());
48 uptr res
= MmapNamed(nullptr, size
, PROT_READ
| PROT_WRITE
,
49 MAP_PRIVATE
| MAP_ANON
, mem_type
);
51 if (UNLIKELY(internal_iserror(res
, &reserrno
)))
52 ReportMmapFailureAndDie(size
, mem_type
, "allocate", reserrno
, raw_report
);
53 IncreaseTotalMmap(size
);
57 void UnmapOrDie(void *addr
, uptr size
) {
58 if (!addr
|| !size
) return;
59 uptr res
= internal_munmap(addr
, size
);
61 if (UNLIKELY(internal_iserror(res
, &reserrno
)))
62 ReportMunmapFailureAndDie(addr
, size
, reserrno
);
63 DecreaseTotalMmap(size
);
66 void *MmapOrDieOnFatalError(uptr size
, const char *mem_type
) {
67 size
= RoundUpTo(size
, GetPageSizeCached());
68 uptr res
= MmapNamed(nullptr, size
, PROT_READ
| PROT_WRITE
,
69 MAP_PRIVATE
| MAP_ANON
, mem_type
);
71 if (UNLIKELY(internal_iserror(res
, &reserrno
))) {
72 if (reserrno
== ENOMEM
)
74 ReportMmapFailureAndDie(size
, mem_type
, "allocate", reserrno
);
76 IncreaseTotalMmap(size
);
80 // We want to map a chunk of address space aligned to 'alignment'.
81 // We do it by mapping a bit more and then unmapping redundant pieces.
82 // We probably can do it with fewer syscalls in some OS-dependent way.
83 void *MmapAlignedOrDieOnFatalError(uptr size
, uptr alignment
,
84 const char *mem_type
) {
85 CHECK(IsPowerOfTwo(size
));
86 CHECK(IsPowerOfTwo(alignment
));
87 uptr map_size
= size
+ alignment
;
88 // mmap maps entire pages and rounds up map_size needs to be a an integral
90 // We need to be aware of this size for calculating end and for unmapping
91 // fragments before and after the alignment region.
92 map_size
= RoundUpTo(map_size
, GetPageSizeCached());
93 uptr map_res
= (uptr
)MmapOrDieOnFatalError(map_size
, mem_type
);
94 if (UNLIKELY(!map_res
))
97 if (!IsAligned(res
, alignment
)) {
98 res
= (map_res
+ alignment
- 1) & ~(alignment
- 1);
99 UnmapOrDie((void*)map_res
, res
- map_res
);
101 uptr map_end
= map_res
+ map_size
;
102 uptr end
= res
+ size
;
103 end
= RoundUpTo(end
, GetPageSizeCached());
104 if (end
!= map_end
) {
105 CHECK_LT(end
, map_end
);
106 UnmapOrDie((void*)end
, map_end
- end
);
111 void *MmapNoReserveOrDie(uptr size
, const char *mem_type
) {
112 size
= RoundUpTo(size
, GetPageSizeCached());
113 uptr p
= MmapNamed(nullptr, size
, PROT_READ
| PROT_WRITE
,
114 MAP_PRIVATE
| MAP_ANON
| MAP_NORESERVE
, mem_type
);
116 if (UNLIKELY(internal_iserror(p
, &reserrno
)))
117 ReportMmapFailureAndDie(size
, mem_type
, "allocate noreserve", reserrno
);
118 IncreaseTotalMmap(size
);
122 static void *MmapFixedImpl(uptr fixed_addr
, uptr size
, bool tolerate_enomem
,
124 size
= RoundUpTo(size
, GetPageSizeCached());
125 fixed_addr
= RoundDownTo(fixed_addr
, GetPageSizeCached());
126 uptr p
= MmapNamed((void *)fixed_addr
, size
, PROT_READ
| PROT_WRITE
,
127 MAP_PRIVATE
| MAP_ANON
| MAP_FIXED
, name
);
129 if (UNLIKELY(internal_iserror(p
, &reserrno
))) {
130 if (tolerate_enomem
&& reserrno
== ENOMEM
)
133 internal_snprintf(mem_type
, sizeof(mem_type
), "memory at address 0x%zx",
135 ReportMmapFailureAndDie(size
, mem_type
, "allocate", reserrno
);
137 IncreaseTotalMmap(size
);
141 void *MmapFixedOrDie(uptr fixed_addr
, uptr size
, const char *name
) {
142 return MmapFixedImpl(fixed_addr
, size
, false /*tolerate_enomem*/, name
);
145 void *MmapFixedOrDieOnFatalError(uptr fixed_addr
, uptr size
, const char *name
) {
146 return MmapFixedImpl(fixed_addr
, size
, true /*tolerate_enomem*/, name
);
149 bool MprotectNoAccess(uptr addr
, uptr size
) {
150 return 0 == internal_mprotect((void*)addr
, size
, PROT_NONE
);
153 bool MprotectReadOnly(uptr addr
, uptr size
) {
154 return 0 == internal_mprotect((void *)addr
, size
, PROT_READ
);
157 bool MprotectReadWrite(uptr addr
, uptr size
) {
158 return 0 == internal_mprotect((void *)addr
, size
, PROT_READ
| PROT_WRITE
);
162 void MprotectMallocZones(void *addr
, int prot
) {}
165 fd_t
OpenFile(const char *filename
, FileAccessMode mode
, error_t
*errno_p
) {
166 if (ShouldMockFailureToOpen(filename
))
170 case RdOnly
: flags
= O_RDONLY
; break;
171 case WrOnly
: flags
= O_WRONLY
| O_CREAT
| O_TRUNC
; break;
172 case RdWr
: flags
= O_RDWR
| O_CREAT
; break;
174 fd_t res
= internal_open(filename
, flags
, 0660);
175 if (internal_iserror(res
, errno_p
))
177 return ReserveStandardFds(res
);
180 void CloseFile(fd_t fd
) {
184 bool ReadFromFile(fd_t fd
, void *buff
, uptr buff_size
, uptr
*bytes_read
,
186 uptr res
= internal_read(fd
, buff
, buff_size
);
187 if (internal_iserror(res
, error_p
))
194 bool WriteToFile(fd_t fd
, const void *buff
, uptr buff_size
, uptr
*bytes_written
,
196 uptr res
= internal_write(fd
, buff
, buff_size
);
197 if (internal_iserror(res
, error_p
))
200 *bytes_written
= res
;
204 void *MapFileToMemory(const char *file_name
, uptr
*buff_size
) {
205 fd_t fd
= OpenFile(file_name
, RdOnly
);
206 CHECK(fd
!= kInvalidFd
);
207 uptr fsize
= internal_filesize(fd
);
208 CHECK_NE(fsize
, (uptr
)-1);
210 *buff_size
= RoundUpTo(fsize
, GetPageSizeCached());
211 uptr map
= internal_mmap(nullptr, *buff_size
, PROT_READ
, MAP_PRIVATE
, fd
, 0);
212 return internal_iserror(map
) ? nullptr : (void *)map
;
215 void *MapWritableFileToMemory(void *addr
, uptr size
, fd_t fd
, OFF_T offset
) {
216 uptr flags
= MAP_SHARED
;
217 if (addr
) flags
|= MAP_FIXED
;
218 uptr p
= internal_mmap(addr
, size
, PROT_READ
| PROT_WRITE
, flags
, fd
, offset
);
220 if (internal_iserror(p
, &mmap_errno
)) {
221 Printf("could not map writable file (%d, %lld, %zu): %zd, errno: %d\n",
222 fd
, (long long)offset
, size
, p
, mmap_errno
);
228 static inline bool IntervalsAreSeparate(uptr start1
, uptr end1
,
229 uptr start2
, uptr end2
) {
230 CHECK(start1
<= end1
);
231 CHECK(start2
<= end2
);
232 return (end1
< start2
) || (end2
< start1
);
235 // FIXME: this is thread-unsafe, but should not cause problems most of the time.
236 // When the shadow is mapped only a single thread usually exists (plus maybe
237 // several worker threads on Mac, which aren't expected to map big chunks of
239 bool MemoryRangeIsAvailable(uptr range_start
, uptr range_end
) {
240 MemoryMappingLayout
proc_maps(/*cache_enabled*/true);
241 if (proc_maps
.Error())
242 return true; // and hope for the best
243 MemoryMappedSegment segment
;
244 while (proc_maps
.Next(&segment
)) {
245 if (segment
.start
== segment
.end
) continue; // Empty range.
246 CHECK_NE(0, segment
.end
);
247 if (!IntervalsAreSeparate(segment
.start
, segment
.end
- 1, range_start
,
255 void DumpProcessMap() {
256 MemoryMappingLayout
proc_maps(/*cache_enabled*/true);
257 const sptr kBufSize
= 4095;
258 char *filename
= (char*)MmapOrDie(kBufSize
, __func__
);
259 MemoryMappedSegment
segment(filename
, kBufSize
);
260 Report("Process memory map follows:\n");
261 while (proc_maps
.Next(&segment
)) {
262 Printf("\t%p-%p\t%s\n", (void *)segment
.start
, (void *)segment
.end
,
265 Report("End of process memory map.\n");
266 UnmapOrDie(filename
, kBufSize
);
270 const char *GetPwd() {
271 return GetEnv("PWD");
274 bool IsPathSeparator(const char c
) {
278 bool IsAbsolutePath(const char *path
) {
279 return path
!= nullptr && IsPathSeparator(path
[0]);
282 void ReportFile::Write(const char *buffer
, uptr length
) {
285 internal_write(fd
, buffer
, length
);
288 bool GetCodeRangeForFile(const char *module
, uptr
*start
, uptr
*end
) {
289 MemoryMappingLayout
proc_maps(/*cache_enabled*/false);
290 InternalMmapVector
<char> buff(kMaxPathLength
);
291 MemoryMappedSegment
segment(buff
.data(), buff
.size());
292 while (proc_maps
.Next(&segment
)) {
293 if (segment
.IsExecutable() &&
294 internal_strcmp(module
, segment
.filename
) == 0) {
295 *start
= segment
.start
;
303 uptr
SignalContext::GetAddress() const {
304 auto si
= static_cast<const siginfo_t
*>(siginfo
);
305 return (uptr
)si
->si_addr
;
308 bool SignalContext::IsMemoryAccess() const {
309 auto si
= static_cast<const siginfo_t
*>(siginfo
);
310 return si
->si_signo
== SIGSEGV
|| si
->si_signo
== SIGBUS
;
313 int SignalContext::GetType() const {
314 return static_cast<const siginfo_t
*>(siginfo
)->si_signo
;
317 const char *SignalContext::Describe() const {
332 return "UNKNOWN SIGNAL";
335 fd_t
ReserveStandardFds(fd_t fd
) {
340 internal_memset(used
, 0, sizeof(used
));
343 fd
= internal_dup(fd
);
345 for (int i
= 0; i
<= 2; ++i
)
351 bool ShouldMockFailureToOpen(const char *path
) {
352 return common_flags()->test_only_emulate_no_memorymap
&&
353 internal_strncmp(path
, "/proc/", 6) == 0;
356 #if SANITIZER_LINUX && !SANITIZER_ANDROID && !SANITIZER_GO
357 int GetNamedMappingFd(const char *name
, uptr size
, int *flags
) {
358 if (!common_flags()->decorate_proc_maps
|| !name
)
361 CHECK(internal_strlen(name
) < sizeof(shmname
) - 10);
362 internal_snprintf(shmname
, sizeof(shmname
), "/dev/shm/%zu [%s]",
363 internal_getpid(), name
);
365 #if defined(O_CLOEXEC)
366 o_cloexec
= O_CLOEXEC
;
368 int fd
= ReserveStandardFds(
369 internal_open(shmname
, O_RDWR
| O_CREAT
| O_TRUNC
| o_cloexec
, S_IRWXU
));
371 int res
= internal_ftruncate(fd
, size
);
372 #if !defined(O_CLOEXEC)
373 res
= fcntl(fd
, F_SETFD
, FD_CLOEXEC
);
377 res
= internal_unlink(shmname
);
379 *flags
&= ~(MAP_ANON
| MAP_ANONYMOUS
);
383 int GetNamedMappingFd(const char *name
, uptr size
, int *flags
) {
388 #if SANITIZER_ANDROID
389 #define PR_SET_VMA 0x53564d41
390 #define PR_SET_VMA_ANON_NAME 0
391 void DecorateMapping(uptr addr
, uptr size
, const char *name
) {
392 if (!common_flags()->decorate_proc_maps
|| !name
)
394 internal_prctl(PR_SET_VMA
, PR_SET_VMA_ANON_NAME
, addr
, size
, (uptr
)name
);
397 void DecorateMapping(uptr addr
, uptr size
, const char *name
) {
401 uptr
MmapNamed(void *addr
, uptr length
, int prot
, int flags
, const char *name
) {
402 int fd
= GetNamedMappingFd(name
, length
, &flags
);
403 uptr res
= internal_mmap(addr
, length
, prot
, flags
, fd
, 0);
404 if (!internal_iserror(res
))
405 DecorateMapping(res
, length
, name
);
410 } // namespace __sanitizer
412 #endif // SANITIZER_POSIX