1 //===- Unix/Threading.inc - Unix Threading Implementation ----- -*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file provides the Unix specific implementation of Threading functions.
11 //===----------------------------------------------------------------------===//
14 #include "llvm/ADT/ScopeExit.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringRef.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/Support/MemoryBuffer.h"
20 #include "llvm/Support/raw_ostream.h"
22 #if defined(__APPLE__)
23 #include <mach/mach_init.h>
24 #include <mach/mach_port.h>
25 #include <pthread/qos.h>
26 #include <sys/sysctl.h>
27 #include <sys/types.h>
32 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
33 #include <pthread_np.h> // For pthread_getthreadid_np() / pthread_set_name_np()
36 // Must be included after Threading.inc to provide definition for llvm::thread
37 // because FreeBSD's condvar.h (included by user.h) misuses the "thread"
40 #include "llvm/Support/thread.h"
43 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
45 #include <sys/cpuset.h>
46 #include <sys/sysctl.h>
51 #if defined(__NetBSD__)
52 #include <lwp.h> // For _lwp_self()
55 #if defined(__OpenBSD__)
56 #include <unistd.h> // For getthrid()
59 #if defined(__linux__)
60 #include <sched.h> // For sched_getaffinity
61 #include <sys/syscall.h> // For syscall codes
62 #include <unistd.h> // For syscall()
65 #if defined(__HAIKU__)
66 #include <OS.h> // For B_OS_NAME_LENGTH
71 llvm_execute_on_thread_impl(void *(*ThreadFunc)(void *), void *Arg,
72 std::optional<unsigned> StackSizeInBytes) {
75 // Construct the attributes object.
77 if ((errnum = ::pthread_attr_init(&Attr)) != 0) {
78 ReportErrnumFatal("pthread_attr_init failed", errnum);
81 auto AttrGuard = llvm::make_scope_exit([&] {
82 if ((errnum = ::pthread_attr_destroy(&Attr)) != 0) {
83 ReportErrnumFatal("pthread_attr_destroy failed", errnum);
87 // Set the requested stack size, if given.
88 if (StackSizeInBytes) {
89 if ((errnum = ::pthread_attr_setstacksize(&Attr, *StackSizeInBytes)) != 0) {
90 ReportErrnumFatal("pthread_attr_setstacksize failed", errnum);
94 // Construct and execute the thread.
96 if ((errnum = ::pthread_create(&Thread, &Attr, ThreadFunc, Arg)) != 0)
97 ReportErrnumFatal("pthread_create failed", errnum);
102 void llvm_thread_detach_impl(pthread_t Thread) {
105 if ((errnum = ::pthread_detach(Thread)) != 0) {
106 ReportErrnumFatal("pthread_detach failed", errnum);
110 void llvm_thread_join_impl(pthread_t Thread) {
113 if ((errnum = ::pthread_join(Thread, nullptr)) != 0) {
114 ReportErrnumFatal("pthread_join failed", errnum);
118 pthread_t llvm_thread_get_id_impl(pthread_t Thread) { return Thread; }
120 pthread_t llvm_thread_get_current_id_impl() { return ::pthread_self(); }
124 uint64_t llvm::get_threadid() {
125 #if defined(__APPLE__)
126 // Calling "mach_thread_self()" bumps the reference count on the thread
127 // port, so we need to deallocate it. mach_task_self() doesn't bump the ref
129 static thread_local thread_port_t Self = [] {
130 thread_port_t InitSelf = mach_thread_self();
131 mach_port_deallocate(mach_task_self(), Self);
135 #elif defined(__FreeBSD__) || defined(__DragonFly__)
136 return uint64_t(pthread_getthreadid_np());
137 #elif defined(__NetBSD__)
138 return uint64_t(_lwp_self());
139 #elif defined(__OpenBSD__)
140 return uint64_t(getthrid());
141 #elif defined(__ANDROID__)
142 return uint64_t(gettid());
143 #elif defined(__linux__)
144 return uint64_t(syscall(__NR_gettid));
146 return uint64_t(pthread_self());
150 static constexpr uint32_t get_max_thread_name_length_impl() {
151 #if defined(PTHREAD_MAX_NAMELEN_NP)
152 return PTHREAD_MAX_NAMELEN_NP;
153 #elif defined(__HAIKU__)
154 return B_OS_NAME_LENGTH;
155 #elif defined(__APPLE__)
157 #elif defined(__sun__) && defined(__svr4__)
159 #elif defined(__linux__) && HAVE_PTHREAD_SETNAME_NP
161 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
162 defined(__DragonFly__)
164 #elif defined(__OpenBSD__)
171 uint32_t llvm::get_max_thread_name_length() {
172 return get_max_thread_name_length_impl();
175 void llvm::set_thread_name(const Twine &Name) {
176 // Make sure the input is null terminated.
177 SmallString<64> Storage;
178 StringRef NameStr = Name.toNullTerminatedStringRef(Storage);
180 // Truncate from the beginning, not the end, if the specified name is too
181 // long. For one, this ensures that the resulting string is still null
182 // terminated, but additionally the end of a long thread name will usually
183 // be more unique than the beginning, since a common pattern is for similar
184 // threads to share a common prefix.
185 // Note that the name length includes the null terminator.
186 if (get_max_thread_name_length() > 0)
187 NameStr = NameStr.take_back(get_max_thread_name_length() - 1);
189 #if defined(HAVE_PTHREAD_SET_NAME_NP)
190 ::pthread_set_name_np(::pthread_self(), NameStr.data());
191 #elif defined(HAVE_PTHREAD_SETNAME_NP)
192 #if defined(__NetBSD__)
193 ::pthread_setname_np(::pthread_self(), "%s",
194 const_cast<char *>(NameStr.data()));
195 #elif defined(__APPLE__)
196 ::pthread_setname_np(NameStr.data());
198 ::pthread_setname_np(::pthread_self(), NameStr.data());
203 void llvm::get_thread_name(SmallVectorImpl<char> &Name) {
206 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
207 int pid = ::getpid();
208 uint64_t tid = get_threadid();
210 struct kinfo_proc *kp = nullptr, *nkp;
213 int ctl[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID | KERN_PROC_INC_THREAD,
217 error = sysctl(ctl, 4, kp, &len, nullptr, 0);
218 if (kp == nullptr || (error != 0 && errno == ENOMEM)) {
219 // Add extra space in case threads are added before next call.
220 len += sizeof(*kp) + len / 10;
221 nkp = (struct kinfo_proc *)::realloc(kp, len);
222 if (nkp == nullptr) {
234 for (size_t i = 0; i < len / sizeof(*kp); i++) {
235 if (kp[i].ki_tid == (lwpid_t)tid) {
236 Name.append(kp[i].ki_tdname, kp[i].ki_tdname + strlen(kp[i].ki_tdname));
242 #elif defined(__linux__) && HAVE_PTHREAD_GETNAME_NP
243 constexpr uint32_t len = get_max_thread_name_length_impl();
244 char Buffer[len] = {'\0'}; // FIXME: working around MSan false positive.
245 if (0 == ::pthread_getname_np(::pthread_self(), Buffer, len))
246 Name.append(Buffer, Buffer + strlen(Buffer));
247 #elif defined(HAVE_PTHREAD_GET_NAME_NP) && HAVE_PTHREAD_GET_NAME_NP
248 constexpr uint32_t len = get_max_thread_name_length_impl();
250 ::pthread_get_name_np(::pthread_self(), buf, len);
252 Name.append(buf, buf + strlen(buf));
254 #elif defined(HAVE_PTHREAD_GETNAME_NP) && HAVE_PTHREAD_GETNAME_NP
255 constexpr uint32_t len = get_max_thread_name_length_impl();
257 ::pthread_getname_np(::pthread_self(), buf, len);
259 Name.append(buf, buf + strlen(buf));
263 SetThreadPriorityResult llvm::set_thread_priority(ThreadPriority Priority) {
264 #if defined(__linux__) && defined(SCHED_IDLE)
265 // Some *really* old glibcs are missing SCHED_IDLE.
266 // http://man7.org/linux/man-pages/man3/pthread_setschedparam.3.html
267 // http://man7.org/linux/man-pages/man2/sched_setscheduler.2.html
268 sched_param priority;
269 // For each of the above policies, param->sched_priority must be 0.
270 priority.sched_priority = 0;
271 // SCHED_IDLE for running very low priority background jobs.
272 // SCHED_OTHER the standard round-robin time-sharing policy;
273 return !pthread_setschedparam(
275 // FIXME: consider SCHED_BATCH for Low
276 Priority == ThreadPriority::Default ? SCHED_OTHER : SCHED_IDLE,
278 ? SetThreadPriorityResult::SUCCESS
279 : SetThreadPriorityResult::FAILURE;
280 #elif defined(__APPLE__)
281 // https://developer.apple.com/documentation/apple-silicon/tuning-your-code-s-performance-for-apple-silicon
283 // Background - Applies to work that isn’t visible to the user and may take
284 // significant time to complete. Examples include indexing, backing up, or
285 // synchronizing data. This class emphasizes energy efficiency.
287 // Utility - Applies to work that takes anywhere from a few seconds to a few
288 // minutes to complete. Examples include downloading a document or importing
289 // data. This class offers a balance between responsiveness, performance, and
290 // energy efficiency.
291 const auto qosClass = [&]() {
293 case ThreadPriority::Background:
294 return QOS_CLASS_BACKGROUND;
295 case ThreadPriority::Low:
296 return QOS_CLASS_UTILITY;
297 case ThreadPriority::Default:
298 return QOS_CLASS_DEFAULT;
301 return !pthread_set_qos_class_self_np(qosClass, 0)
302 ? SetThreadPriorityResult::SUCCESS
303 : SetThreadPriorityResult::FAILURE;
305 return SetThreadPriorityResult::FAILURE;
310 static int computeHostNumHardwareThreads() {
311 #if defined(__FreeBSD__)
314 if (cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, -1, sizeof(mask),
316 return CPU_COUNT(&mask);
317 #elif defined(__linux__)
319 if (sched_getaffinity(0, sizeof(Set), &Set) == 0)
320 return CPU_COUNT(&Set);
322 // Guard against std::thread::hardware_concurrency() returning 0.
323 if (unsigned Val = std::thread::hardware_concurrency())
328 void llvm::ThreadPoolStrategy::apply_thread_strategy(
329 unsigned ThreadPoolNum) const {}
331 llvm::BitVector llvm::get_thread_affinity_mask() {
333 llvm_unreachable("Not implemented!");
336 unsigned llvm::get_cpus() { return 1; }
338 #if defined(__linux__) && (defined(__i386__) || defined(__x86_64__))
339 // On Linux, the number of physical cores can be computed from /proc/cpuinfo,
340 // using the number of unique physical/core id pairs. The following
341 // implementation reads the /proc/cpuinfo format on an x86_64 system.
342 static int computeHostNumPhysicalCores() {
343 // Enabled represents the number of physical id/core id pairs with at least
344 // one processor id enabled by the CPU affinity mask.
345 cpu_set_t Affinity, Enabled;
346 if (sched_getaffinity(0, sizeof(Affinity), &Affinity) != 0)
350 // Read /proc/cpuinfo as a stream (until EOF reached). It cannot be
351 // mmapped because it appears to have 0 size.
352 llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Text =
353 llvm::MemoryBuffer::getFileAsStream("/proc/cpuinfo");
354 if (std::error_code EC = Text.getError()) {
355 llvm::errs() << "Can't read "
356 << "/proc/cpuinfo: " << EC.message() << "\n";
359 SmallVector<StringRef, 8> strs;
360 (*Text)->getBuffer().split(strs, "\n", /*MaxSplit=*/-1,
361 /*KeepEmpty=*/false);
362 int CurProcessor = -1;
363 int CurPhysicalId = -1;
364 int CurSiblings = -1;
366 for (StringRef Line : strs) {
367 std::pair<StringRef, StringRef> Data = Line.split(':');
368 auto Name = Data.first.trim();
369 auto Val = Data.second.trim();
370 // These fields are available if the kernel is configured with CONFIG_SMP.
371 if (Name == "processor")
372 Val.getAsInteger(10, CurProcessor);
373 else if (Name == "physical id")
374 Val.getAsInteger(10, CurPhysicalId);
375 else if (Name == "siblings")
376 Val.getAsInteger(10, CurSiblings);
377 else if (Name == "core id") {
378 Val.getAsInteger(10, CurCoreId);
379 // The processor id corresponds to an index into cpu_set_t.
380 if (CPU_ISSET(CurProcessor, &Affinity))
381 CPU_SET(CurPhysicalId * CurSiblings + CurCoreId, &Enabled);
384 return CPU_COUNT(&Enabled);
386 #elif (defined(__linux__) && defined(__s390x__)) || defined(_AIX)
387 static int computeHostNumPhysicalCores() {
388 return sysconf(_SC_NPROCESSORS_ONLN);
390 #elif defined(__linux__) && !defined(__ANDROID__)
391 static int computeHostNumPhysicalCores() {
393 if (sched_getaffinity(0, sizeof(Affinity), &Affinity) == 0)
394 return CPU_COUNT(&Affinity);
396 // The call to sched_getaffinity() may have failed because the Affinity
397 // mask is too small for the number of CPU's on the system (i.e. the
398 // system has more than 1024 CPUs). Allocate a mask large enough for
399 // twice as many CPUs.
400 cpu_set_t *DynAffinity;
401 DynAffinity = CPU_ALLOC(2048);
402 if (sched_getaffinity(0, CPU_ALLOC_SIZE(2048), DynAffinity) == 0) {
403 int NumCPUs = CPU_COUNT(DynAffinity);
404 CPU_FREE(DynAffinity);
409 #elif defined(__APPLE__)
410 // Gets the number of *physical cores* on the machine.
411 static int computeHostNumPhysicalCores() {
413 size_t len = sizeof(count);
414 sysctlbyname("hw.physicalcpu", &count, &len, NULL, 0);
419 sysctl(nm, 2, &count, &len, NULL, 0);
425 #elif defined(__MVS__)
426 static int computeHostNumPhysicalCores() {
428 // Byte offset of the pointer to the Communications Vector Table (CVT) in
429 // the Prefixed Save Area (PSA). The table entry is a 31-bit pointer and
430 // will be zero-extended to uintptr_t.
432 // Byte offset of the pointer to the Common System Data Area (CSD) in the
433 // CVT. The table entry is a 31-bit pointer and will be zero-extended to
436 // Byte offset to the number of live CPs in the LPAR, stored as a signed
437 // 32-bit value in the table.
438 CSD_NUMBER_ONLINE_STANDARD_CPS = 264,
441 char *CVT = reinterpret_cast<char *>(
442 static_cast<uintptr_t>(reinterpret_cast<unsigned int &>(PSA[FLCCVT])));
443 char *CSD = reinterpret_cast<char *>(
444 static_cast<uintptr_t>(reinterpret_cast<unsigned int &>(CVT[CVTCSD])));
445 return reinterpret_cast<int &>(CSD[CSD_NUMBER_ONLINE_STANDARD_CPS]);
448 // On other systems, return -1 to indicate unknown.
449 static int computeHostNumPhysicalCores() { return -1; }
452 int llvm::get_physical_cores() {
453 static int NumCores = computeHostNumPhysicalCores();