1 // SPDX-License-Identifier: GPL-2.0
3 * Precise Delay Loops for i386
5 * Copyright (C) 1993 Linus Torvalds
6 * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
7 * Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
9 * The __delay function must _NOT_ be inlined as its execution time
10 * depends wildly on alignment on many x86 processors. The additional
11 * jump magic is needed to get the timing stable on all the CPU's
12 * we have to worry about.
15 #include <linux/export.h>
16 #include <linux/sched.h>
17 #include <linux/timex.h>
18 #include <linux/preempt.h>
19 #include <linux/delay.h>
21 #include <asm/processor.h>
22 #include <asm/delay.h>
23 #include <asm/timer.h>
24 #include <asm/mwait.h>
30 static void delay_loop(u64 __loops
);
33 * Calibration and selection of the delay mechanism happens only once
36 static void (*delay_fn
)(u64
) __ro_after_init
= delay_loop
;
37 static void (*delay_halt_fn
)(u64 start
, u64 cycles
) __ro_after_init
;
39 /* simple loop based delay: */
40 static void delay_loop(u64 __loops
)
42 unsigned long loops
= (unsigned long)__loops
;
62 /* TSC based delay: */
63 static void delay_tsc(u64 cycles
)
69 cpu
= smp_processor_id();
70 bclock
= rdtsc_ordered();
72 now
= rdtsc_ordered();
73 if ((now
- bclock
) >= cycles
)
76 /* Allow RT tasks to run */
82 * It is possible that we moved to another CPU, and
83 * since TSC's are per-cpu we need to calculate
84 * that. The delay must guarantee that we wait "at
85 * least" the amount of time. Being moved to another
86 * CPU could make the wait longer but we just need to
87 * make sure we waited long enough. Rebalance the
88 * counter for this CPU.
90 if (unlikely(cpu
!= smp_processor_id())) {
91 cycles
-= (now
- bclock
);
92 cpu
= smp_processor_id();
93 bclock
= rdtsc_ordered();
100 * On Intel the TPAUSE instruction waits until any of:
101 * 1) the TSC counter exceeds the value provided in EDX:EAX
102 * 2) global timeout in IA32_UMWAIT_CONTROL is exceeded
103 * 3) an external interrupt occurs
105 static void delay_halt_tpause(u64 start
, u64 cycles
)
107 u64 until
= start
+ cycles
;
110 eax
= lower_32_bits(until
);
111 edx
= upper_32_bits(until
);
114 * Hard code the deeper (C0.2) sleep state because exit latency is
115 * small compared to the "microseconds" that usleep() will delay.
117 __tpause(TPAUSE_C02_STATE
, edx
, eax
);
121 * On some AMD platforms, MWAITX has a configurable 32-bit timer, that
122 * counts with TSC frequency. The input value is the number of TSC cycles
123 * to wait. MWAITX will also exit when the timer expires.
125 static void delay_halt_mwaitx(u64 unused
, u64 cycles
)
129 delay
= min_t(u64
, MWAITX_MAX_WAIT_CYCLES
, cycles
);
131 * Use cpu_tss_rw as a cacheline-aligned, seldom accessed per-cpu
132 * variable as the monitor target.
134 __monitorx(raw_cpu_ptr(&cpu_tss_rw
), 0, 0);
137 * AMD, like Intel, supports the EAX hint and EAX=0xf means, do not
138 * enter any deep C-state and we use it here in delay() to minimize
141 __mwaitx(MWAITX_DISABLE_CSTATES
, delay
, MWAITX_ECX_TIMER_ENABLE
);
145 * Call a vendor specific function to delay for a given amount of time. Because
146 * these functions may return earlier than requested, check for actual elapsed
147 * time and call again until done.
149 static void delay_halt(u64 __cycles
)
151 u64 start
, end
, cycles
= __cycles
;
154 * Timer value of 0 causes MWAITX to wait indefinitely, unless there
155 * is a store on the memory monitored by MONITORX.
160 start
= rdtsc_ordered();
163 delay_halt_fn(start
, cycles
);
164 end
= rdtsc_ordered();
166 if (cycles
<= end
- start
)
169 cycles
-= end
- start
;
174 void __init
use_tsc_delay(void)
176 if (delay_fn
== delay_loop
)
177 delay_fn
= delay_tsc
;
180 void __init
use_tpause_delay(void)
182 delay_halt_fn
= delay_halt_tpause
;
183 delay_fn
= delay_halt
;
186 void use_mwaitx_delay(void)
188 delay_halt_fn
= delay_halt_mwaitx
;
189 delay_fn
= delay_halt
;
192 int read_current_timer(unsigned long *timer_val
)
194 if (delay_fn
== delay_tsc
) {
195 *timer_val
= rdtsc();
201 void __delay(unsigned long loops
)
205 EXPORT_SYMBOL(__delay
);
207 noinline
void __const_udelay(unsigned long xloops
)
209 unsigned long lpj
= this_cpu_read(cpu_info
.loops_per_jiffy
) ? : loops_per_jiffy
;
214 :"=d" (xloops
), "=&a" (d0
)
215 :"1" (xloops
), "0" (lpj
* (HZ
/ 4)));
219 EXPORT_SYMBOL(__const_udelay
);
221 void __udelay(unsigned long usecs
)
223 __const_udelay(usecs
* 0x000010c7); /* 2**32 / 1000000 (rounded up) */
225 EXPORT_SYMBOL(__udelay
);
227 void __ndelay(unsigned long nsecs
)
229 __const_udelay(nsecs
* 0x00005); /* 2**32 / 1000000000 (rounded up) */
231 EXPORT_SYMBOL(__ndelay
);