[ARM] Fix for MVE VPT block pass
[llvm-complete.git] / utils / benchmark / src / cycleclock.h
blobe1f18cc64d20222237b5595d90c1fbe56bab95a9
1 // ----------------------------------------------------------------------
2 // CycleClock
3 // A CycleClock tells you the current time in Cycles. The "time"
4 // is actually time since power-on. This is like time() but doesn't
5 // involve a system call and is much more precise.
6 //
7 // NOTE: Not all cpu/platform/kernel combinations guarantee that this
8 // clock increments at a constant rate or is synchronized across all logical
9 // cpus in a system.
11 // If you need the above guarantees, please consider using a different
12 // API. There are efforts to provide an interface which provides a millisecond
13 // granularity and implemented as a memory read. A memory read is generally
14 // cheaper than the CycleClock for many architectures.
16 // Also, in some out of order CPU implementations, the CycleClock is not
17 // serializing. So if you're trying to count at cycles granularity, your
18 // data might be inaccurate due to out of order instruction execution.
19 // ----------------------------------------------------------------------
21 #ifndef BENCHMARK_CYCLECLOCK_H_
22 #define BENCHMARK_CYCLECLOCK_H_
24 #include <cstdint>
26 #include "benchmark/benchmark.h"
27 #include "internal_macros.h"
29 #if defined(BENCHMARK_OS_MACOSX)
30 #include <mach/mach_time.h>
31 #endif
32 // For MSVC, we want to use '_asm rdtsc' when possible (since it works
33 // with even ancient MSVC compilers), and when not possible the
34 // __rdtsc intrinsic, declared in <intrin.h>. Unfortunately, in some
35 // environments, <windows.h> and <intrin.h> have conflicting
36 // declarations of some other intrinsics, breaking compilation.
37 // Therefore, we simply declare __rdtsc ourselves. See also
38 // http://connect.microsoft.com/VisualStudio/feedback/details/262047
39 #if defined(COMPILER_MSVC) && !defined(_M_IX86)
40 extern "C" uint64_t __rdtsc();
41 #pragma intrinsic(__rdtsc)
42 #endif
44 #if !defined(BENCHMARK_OS_WINDOWS) || defined(BENCHMARK_OS_MINGW)
45 #include <sys/time.h>
46 #include <time.h>
47 #endif
49 #ifdef BENCHMARK_OS_EMSCRIPTEN
50 #include <emscripten.h>
51 #endif
53 namespace benchmark {
54 // NOTE: only i386 and x86_64 have been well tested.
55 // PPC, sparc, alpha, and ia64 are based on
56 // http://peter.kuscsik.com/wordpress/?p=14
57 // with modifications by m3b. See also
58 // https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
59 namespace cycleclock {
60 // This should return the number of cycles since power-on. Thread-safe.
61 inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
62 #if defined(BENCHMARK_OS_MACOSX)
63 // this goes at the top because we need ALL Macs, regardless of
64 // architecture, to return the number of "mach time units" that
65 // have passed since startup. See sysinfo.cc where
66 // InitializeSystemInfo() sets the supposed cpu clock frequency of
67 // macs to the number of mach time units per second, not actual
68 // CPU clock frequency (which can change in the face of CPU
69 // frequency scaling). Also note that when the Mac sleeps, this
70 // counter pauses; it does not continue counting, nor does it
71 // reset to zero.
72 return mach_absolute_time();
73 #elif defined(BENCHMARK_OS_EMSCRIPTEN)
74 // this goes above x86-specific code because old versions of Emscripten
75 // define __x86_64__, although they have nothing to do with it.
76 return static_cast<int64_t>(emscripten_get_now() * 1e+6);
77 #elif defined(__i386__)
78 int64_t ret;
79 __asm__ volatile("rdtsc" : "=A"(ret));
80 return ret;
81 #elif defined(__x86_64__) || defined(__amd64__)
82 uint64_t low, high;
83 __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
84 return (high << 32) | low;
85 #elif defined(__powerpc__) || defined(__ppc__)
86 // This returns a time-base, which is not always precisely a cycle-count.
87 int64_t tbl, tbu0, tbu1;
88 asm("mftbu %0" : "=r"(tbu0));
89 asm("mftb %0" : "=r"(tbl));
90 asm("mftbu %0" : "=r"(tbu1));
91 tbl &= -static_cast<int64_t>(tbu0 == tbu1);
92 // high 32 bits in tbu1; low 32 bits in tbl (tbu0 is garbage)
93 return (tbu1 << 32) | tbl;
94 #elif defined(__sparc__)
95 int64_t tick;
96 asm(".byte 0x83, 0x41, 0x00, 0x00");
97 asm("mov %%g1, %0" : "=r"(tick));
98 return tick;
99 #elif defined(__ia64__)
100 int64_t itc;
101 asm("mov %0 = ar.itc" : "=r"(itc));
102 return itc;
103 #elif defined(COMPILER_MSVC) && defined(_M_IX86)
104 // Older MSVC compilers (like 7.x) don't seem to support the
105 // __rdtsc intrinsic properly, so I prefer to use _asm instead
106 // when I know it will work. Otherwise, I'll use __rdtsc and hope
107 // the code is being compiled with a non-ancient compiler.
108 _asm rdtsc
109 #elif defined(COMPILER_MSVC)
110 return __rdtsc();
111 #elif defined(BENCHMARK_OS_NACL)
112 // Native Client validator on x86/x86-64 allows RDTSC instructions,
113 // and this case is handled above. Native Client validator on ARM
114 // rejects MRC instructions (used in the ARM-specific sequence below),
115 // so we handle it here. Portable Native Client compiles to
116 // architecture-agnostic bytecode, which doesn't provide any
117 // cycle counter access mnemonics.
119 // Native Client does not provide any API to access cycle counter.
120 // Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday
121 // because is provides nanosecond resolution (which is noticable at
122 // least for PNaCl modules running on x86 Mac & Linux).
123 // Initialize to always return 0 if clock_gettime fails.
124 struct timespec ts = { 0, 0 };
125 clock_gettime(CLOCK_MONOTONIC, &ts);
126 return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
127 #elif defined(__aarch64__)
128 // System timer of ARMv8 runs at a different frequency than the CPU's.
129 // The frequency is fixed, typically in the range 1-50MHz. It can be
130 // read at CNTFRQ special register. We assume the OS has set up
131 // the virtual timer properly.
132 int64_t virtual_timer_value;
133 asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
134 return virtual_timer_value;
135 #elif defined(__ARM_ARCH)
136 // V6 is the earliest arch that has a standard cyclecount
137 // Native Client validator doesn't allow MRC instructions.
138 #if (__ARM_ARCH >= 6)
139 uint32_t pmccntr;
140 uint32_t pmuseren;
141 uint32_t pmcntenset;
142 // Read the user mode perf monitor counter access permissions.
143 asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
144 if (pmuseren & 1) { // Allows reading perfmon counters for user mode code.
145 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
146 if (pmcntenset & 0x80000000ul) { // Is it counting?
147 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
148 // The counter is set up to count every 64th cycle
149 return static_cast<int64_t>(pmccntr) * 64; // Should optimize to << 6
152 #endif
153 struct timeval tv;
154 gettimeofday(&tv, nullptr);
155 return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
156 #elif defined(__mips__)
157 // mips apparently only allows rdtsc for superusers, so we fall
158 // back to gettimeofday. It's possible clock_gettime would be better.
159 struct timeval tv;
160 gettimeofday(&tv, nullptr);
161 return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
162 #elif defined(__s390__) // Covers both s390 and s390x.
163 // Return the CPU clock.
164 uint64_t tsc;
165 asm("stck %0" : "=Q" (tsc) : : "cc");
166 return tsc;
167 #else
168 // The soft failover to a generic implementation is automatic only for ARM.
169 // For other platforms the developer is expected to make an attempt to create
170 // a fast implementation and use generic version if nothing better is available.
171 #error You need to define CycleTimer for your OS and CPU
172 #endif
174 } // end namespace cycleclock
175 } // end namespace benchmark
177 #endif // BENCHMARK_CYCLECLOCK_H_