1 //===-- tsan_fd.cpp -------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //===----------------------------------------------------------------------===//
15 #include <sanitizer_common/sanitizer_atomic.h>
17 #include "tsan_interceptors.h"
22 const int kTableSizeL1
= 1024;
23 const int kTableSizeL2
= 1024;
24 const int kTableSize
= kTableSizeL1
* kTableSizeL2
;
32 // This is used to establish write -> epoll_wait synchronization
33 // where epoll_wait receives notification about the write.
34 atomic_uintptr_t aux_sync
; // FdSync*
36 StackID creation_stack
;
41 atomic_uintptr_t tab
[kTableSizeL1
];
42 // Addresses used for synchronization.
49 static FdContext fdctx
;
51 static bool bogusfd(int fd
) {
52 // Apparently a bogus fd value.
53 return fd
< 0 || fd
>= kTableSize
;
56 static FdSync
*allocsync(ThreadState
*thr
, uptr pc
) {
57 FdSync
*s
= (FdSync
*)user_alloc_internal(thr
, pc
, sizeof(FdSync
),
58 kDefaultAlignment
, false);
59 atomic_store(&s
->rc
, 1, memory_order_relaxed
);
63 static FdSync
*ref(FdSync
*s
) {
64 if (s
&& atomic_load(&s
->rc
, memory_order_relaxed
) != (u64
)-1)
65 atomic_fetch_add(&s
->rc
, 1, memory_order_relaxed
);
69 static void unref(ThreadState
*thr
, uptr pc
, FdSync
*s
) {
70 if (s
&& atomic_load(&s
->rc
, memory_order_relaxed
) != (u64
)-1) {
71 if (atomic_fetch_sub(&s
->rc
, 1, memory_order_acq_rel
) == 1) {
72 CHECK_NE(s
, &fdctx
.globsync
);
73 CHECK_NE(s
, &fdctx
.filesync
);
74 CHECK_NE(s
, &fdctx
.socksync
);
75 user_free(thr
, pc
, s
, false);
80 static FdDesc
*fddesc(ThreadState
*thr
, uptr pc
, int fd
) {
82 CHECK_LT(fd
, kTableSize
);
83 atomic_uintptr_t
*pl1
= &fdctx
.tab
[fd
/ kTableSizeL2
];
84 uptr l1
= atomic_load(pl1
, memory_order_consume
);
86 uptr size
= kTableSizeL2
* sizeof(FdDesc
);
87 // We need this to reside in user memory to properly catch races on it.
88 void *p
= user_alloc_internal(thr
, pc
, size
, kDefaultAlignment
, false);
89 internal_memset(p
, 0, size
);
90 MemoryResetRange(thr
, (uptr
)&fddesc
, (uptr
)p
, size
);
91 if (atomic_compare_exchange_strong(pl1
, &l1
, (uptr
)p
, memory_order_acq_rel
))
94 user_free(thr
, pc
, p
, false);
96 FdDesc
*fds
= reinterpret_cast<FdDesc
*>(l1
);
97 return &fds
[fd
% kTableSizeL2
];
100 // pd must be already ref'ed.
101 static void init(ThreadState
*thr
, uptr pc
, int fd
, FdSync
*s
,
103 FdDesc
*d
= fddesc(thr
, pc
, fd
);
104 // As a matter of fact, we don't intercept all close calls.
105 // See e.g. libc __res_iclose().
107 unref(thr
, pc
, d
->sync
);
111 reinterpret_cast<FdSync
*>(
112 atomic_load(&d
->aux_sync
, memory_order_relaxed
)));
113 atomic_store(&d
->aux_sync
, 0, memory_order_relaxed
);
114 if (flags()->io_sync
== 0) {
116 } else if (flags()->io_sync
== 1) {
118 } else if (flags()->io_sync
== 2) {
120 d
->sync
= &fdctx
.globsync
;
122 d
->creation_tid
= thr
->tid
;
123 d
->creation_stack
= CurrentStackId(thr
, pc
);
125 // This prevents false positives on fd_close_norace3.cpp test.
126 // The mechanics of the false positive are not completely clear,
127 // but it happens only if global reset is enabled (flush_memory_ms=1)
128 // and may be related to lost writes during asynchronous MADV_DONTNEED.
129 SlotLocker
locker(thr
);
131 // To catch races between fd usage and open.
132 MemoryRangeImitateWrite(thr
, pc
, (uptr
)d
, 8);
134 // See the dup-related comment in FdClose.
135 MemoryAccess(thr
, pc
, (uptr
)d
, 8, kAccessRead
| kAccessSlotLocked
);
140 atomic_store(&fdctx
.globsync
.rc
, (u64
)-1, memory_order_relaxed
);
141 atomic_store(&fdctx
.filesync
.rc
, (u64
)-1, memory_order_relaxed
);
142 atomic_store(&fdctx
.socksync
.rc
, (u64
)-1, memory_order_relaxed
);
145 void FdOnFork(ThreadState
*thr
, uptr pc
) {
146 // On fork() we need to reset all fd's, because the child is going
147 // close all them, and that will cause races between previous read/write
149 for (int l1
= 0; l1
< kTableSizeL1
; l1
++) {
150 FdDesc
*tab
= (FdDesc
*)atomic_load(&fdctx
.tab
[l1
], memory_order_relaxed
);
153 for (int l2
= 0; l2
< kTableSizeL2
; l2
++) {
154 FdDesc
*d
= &tab
[l2
];
155 MemoryResetRange(thr
, pc
, (uptr
)d
, 8);
160 bool FdLocation(uptr addr
, int *fd
, Tid
*tid
, StackID
*stack
, bool *closed
) {
161 for (int l1
= 0; l1
< kTableSizeL1
; l1
++) {
162 FdDesc
*tab
= (FdDesc
*)atomic_load(&fdctx
.tab
[l1
], memory_order_relaxed
);
165 if (addr
>= (uptr
)tab
&& addr
< (uptr
)(tab
+ kTableSizeL2
)) {
166 int l2
= (addr
- (uptr
)tab
) / sizeof(FdDesc
);
167 FdDesc
*d
= &tab
[l2
];
168 *fd
= l1
* kTableSizeL1
+ l2
;
169 *tid
= d
->creation_tid
;
170 *stack
= d
->creation_stack
;
178 void FdAcquire(ThreadState
*thr
, uptr pc
, int fd
) {
181 FdDesc
*d
= fddesc(thr
, pc
, fd
);
183 DPrintf("#%d: FdAcquire(%d) -> %p\n", thr
->tid
, fd
, s
);
184 MemoryAccess(thr
, pc
, (uptr
)d
, 8, kAccessRead
);
186 Acquire(thr
, pc
, (uptr
)s
);
189 void FdRelease(ThreadState
*thr
, uptr pc
, int fd
) {
192 FdDesc
*d
= fddesc(thr
, pc
, fd
);
194 DPrintf("#%d: FdRelease(%d) -> %p\n", thr
->tid
, fd
, s
);
195 MemoryAccess(thr
, pc
, (uptr
)d
, 8, kAccessRead
);
197 Release(thr
, pc
, (uptr
)s
);
198 if (uptr aux_sync
= atomic_load(&d
->aux_sync
, memory_order_acquire
))
199 Release(thr
, pc
, aux_sync
);
202 void FdAccess(ThreadState
*thr
, uptr pc
, int fd
) {
203 DPrintf("#%d: FdAccess(%d)\n", thr
->tid
, fd
);
206 FdDesc
*d
= fddesc(thr
, pc
, fd
);
207 MemoryAccess(thr
, pc
, (uptr
)d
, 8, kAccessRead
);
210 void FdClose(ThreadState
*thr
, uptr pc
, int fd
, bool write
) {
211 DPrintf("#%d: FdClose(%d)\n", thr
->tid
, fd
);
214 FdDesc
*d
= fddesc(thr
, pc
, fd
);
216 // Need to lock the slot to make MemoryAccess and MemoryResetRange atomic
217 // with respect to global reset. See the comment in MemoryRangeFreed.
218 SlotLocker
locker(thr
);
219 if (!MustIgnoreInterceptor(thr
)) {
221 // To catch races between fd usage and close.
222 MemoryAccess(thr
, pc
, (uptr
)d
, 8,
223 kAccessWrite
| kAccessCheckOnly
| kAccessSlotLocked
);
225 // This path is used only by dup2/dup3 calls.
226 // We do read instead of write because there is a number of legitimate
227 // cases where write would lead to false positives:
228 // 1. Some software dups a closed pipe in place of a socket before
230 // the socket (to prevent races actually).
231 // 2. Some daemons dup /dev/null in place of stdin/stdout.
232 // On the other hand we have not seen cases when write here catches real
234 MemoryAccess(thr
, pc
, (uptr
)d
, 8,
235 kAccessRead
| kAccessCheckOnly
| kAccessSlotLocked
);
238 // We need to clear it, because if we do not intercept any call out there
239 // that creates fd, we will hit false postives.
240 MemoryResetRange(thr
, pc
, (uptr
)d
, 8);
242 unref(thr
, pc
, d
->sync
);
245 reinterpret_cast<FdSync
*>(
246 atomic_load(&d
->aux_sync
, memory_order_relaxed
)));
247 atomic_store(&d
->aux_sync
, 0, memory_order_relaxed
);
249 d
->creation_tid
= thr
->tid
;
250 d
->creation_stack
= CurrentStackId(thr
, pc
);
253 void FdFileCreate(ThreadState
*thr
, uptr pc
, int fd
) {
254 DPrintf("#%d: FdFileCreate(%d)\n", thr
->tid
, fd
);
257 init(thr
, pc
, fd
, &fdctx
.filesync
);
260 void FdDup(ThreadState
*thr
, uptr pc
, int oldfd
, int newfd
, bool write
) {
261 DPrintf("#%d: FdDup(%d, %d)\n", thr
->tid
, oldfd
, newfd
);
262 if (bogusfd(oldfd
) || bogusfd(newfd
))
264 // Ignore the case when user dups not yet connected socket.
265 FdDesc
*od
= fddesc(thr
, pc
, oldfd
);
266 MemoryAccess(thr
, pc
, (uptr
)od
, 8, kAccessRead
);
267 FdClose(thr
, pc
, newfd
, write
);
268 init(thr
, pc
, newfd
, ref(od
->sync
), write
);
271 void FdPipeCreate(ThreadState
*thr
, uptr pc
, int rfd
, int wfd
) {
272 DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr
->tid
, rfd
, wfd
);
273 FdSync
*s
= allocsync(thr
, pc
);
274 init(thr
, pc
, rfd
, ref(s
));
275 init(thr
, pc
, wfd
, ref(s
));
279 void FdEventCreate(ThreadState
*thr
, uptr pc
, int fd
) {
280 DPrintf("#%d: FdEventCreate(%d)\n", thr
->tid
, fd
);
283 init(thr
, pc
, fd
, allocsync(thr
, pc
));
286 void FdSignalCreate(ThreadState
*thr
, uptr pc
, int fd
) {
287 DPrintf("#%d: FdSignalCreate(%d)\n", thr
->tid
, fd
);
290 init(thr
, pc
, fd
, 0);
293 void FdInotifyCreate(ThreadState
*thr
, uptr pc
, int fd
) {
294 DPrintf("#%d: FdInotifyCreate(%d)\n", thr
->tid
, fd
);
297 init(thr
, pc
, fd
, 0);
300 void FdPollCreate(ThreadState
*thr
, uptr pc
, int fd
) {
301 DPrintf("#%d: FdPollCreate(%d)\n", thr
->tid
, fd
);
304 init(thr
, pc
, fd
, allocsync(thr
, pc
));
307 void FdPollAdd(ThreadState
*thr
, uptr pc
, int epfd
, int fd
) {
308 DPrintf("#%d: FdPollAdd(%d, %d)\n", thr
->tid
, epfd
, fd
);
309 if (bogusfd(epfd
) || bogusfd(fd
))
311 FdDesc
*d
= fddesc(thr
, pc
, fd
);
312 // Associate fd with epoll fd only once.
313 // While an fd can be associated with multiple epolls at the same time,
314 // or with different epolls during different phases of lifetime,
315 // synchronization semantics (and examples) of this are unclear.
316 // So we don't support this for now.
317 // If we change the association, it will also create lifetime management
318 // problem for FdRelease which accesses the aux_sync.
319 if (atomic_load(&d
->aux_sync
, memory_order_relaxed
))
321 FdDesc
*epd
= fddesc(thr
, pc
, epfd
);
322 FdSync
*s
= epd
->sync
;
326 if (atomic_compare_exchange_strong(
327 &d
->aux_sync
, &cmp
, reinterpret_cast<uptr
>(s
), memory_order_release
))
331 void FdSocketCreate(ThreadState
*thr
, uptr pc
, int fd
) {
332 DPrintf("#%d: FdSocketCreate(%d)\n", thr
->tid
, fd
);
335 // It can be a UDP socket.
336 init(thr
, pc
, fd
, &fdctx
.socksync
);
339 void FdSocketAccept(ThreadState
*thr
, uptr pc
, int fd
, int newfd
) {
340 DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr
->tid
, fd
, newfd
);
343 // Synchronize connect->accept.
344 Acquire(thr
, pc
, (uptr
)&fdctx
.connectsync
);
345 init(thr
, pc
, newfd
, &fdctx
.socksync
);
348 void FdSocketConnecting(ThreadState
*thr
, uptr pc
, int fd
) {
349 DPrintf("#%d: FdSocketConnecting(%d)\n", thr
->tid
, fd
);
352 // Synchronize connect->accept.
353 Release(thr
, pc
, (uptr
)&fdctx
.connectsync
);
356 void FdSocketConnect(ThreadState
*thr
, uptr pc
, int fd
) {
357 DPrintf("#%d: FdSocketConnect(%d)\n", thr
->tid
, fd
);
360 init(thr
, pc
, fd
, &fdctx
.socksync
);
363 uptr
File2addr(const char *path
) {
369 uptr
Dir2addr(const char *path
) {
375 } // namespace __tsan