2 * linux/arch/x86-64/kernel/time.c
4 * "High Precision Event Timer" based timekeeping.
6 * Copyright (c) 1991,1992,1995 Linus Torvalds
7 * Copyright (c) 1994 Alan Modra
8 * Copyright (c) 1995 Markus Kuhn
9 * Copyright (c) 1996 Ingo Molnar
10 * Copyright (c) 1998 Andrea Arcangeli
11 * Copyright (c) 2002 Vojtech Pavlik
12 * Copyright (c) 2003 Andi Kleen
13 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/mc146818rtc.h>
21 #include <linux/irq.h>
22 #include <linux/time.h>
23 #include <linux/ioport.h>
24 #include <linux/module.h>
25 #include <linux/device.h>
26 #include <linux/sysdev.h>
27 #include <linux/bcd.h>
28 #include <linux/kallsyms.h>
29 #include <linux/acpi.h>
30 #include <acpi/achware.h> /* for PM timer frequency */
31 #include <asm/8253pit.h>
32 #include <asm/pgtable.h>
33 #include <asm/vsyscall.h>
34 #include <asm/timex.h>
35 #include <asm/proto.h>
37 #include <asm/sections.h>
38 #include <linux/cpufreq.h>
39 #include <linux/hpet.h>
40 #ifdef CONFIG_X86_LOCAL_APIC
44 u64 jiffies_64
= INITIAL_JIFFIES
;
46 EXPORT_SYMBOL(jiffies_64
);
48 #ifdef CONFIG_CPU_FREQ
49 static void cpufreq_delayed_get(void);
51 extern void i8254_timer_resume(void);
52 extern int using_apic_timer
;
54 DEFINE_SPINLOCK(rtc_lock
);
55 DEFINE_SPINLOCK(i8253_lock
);
57 static int nohpet __initdata
= 0;
58 static int notsc __initdata
= 0;
60 #undef HPET_HACK_ENABLE_DANGEROUS
62 unsigned int cpu_khz
; /* TSC clocks / usec, not used here */
63 static unsigned long hpet_period
; /* fsecs / HPET clock */
64 unsigned long hpet_tick
; /* HPET clocks / interrupt */
65 unsigned long vxtime_hz
= PIT_TICK_RATE
;
66 int report_lost_ticks
; /* command line option */
67 unsigned long long monotonic_base
;
69 struct vxtime_data __vxtime __section_vxtime
; /* for vsyscalls */
71 volatile unsigned long __jiffies __section_jiffies
= INITIAL_JIFFIES
;
72 unsigned long __wall_jiffies __section_wall_jiffies
= INITIAL_JIFFIES
;
73 struct timespec __xtime __section_xtime
;
74 struct timezone __sys_tz __section_sys_tz
;
76 static inline void rdtscll_sync(unsigned long *tsc
)
85 * do_gettimeoffset() returns microseconds since last timer interrupt was
86 * triggered by hardware. A memory read of HPET is slower than a register read
87 * of TSC, but much more reliable. It's also synchronized to the timer
88 * interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a
89 * timer interrupt has happened already, but vxtime.trigger wasn't updated yet.
90 * This is not a problem, because jiffies hasn't updated either. They are bound
91 * together by xtime_lock.
94 static inline unsigned int do_gettimeoffset_tsc(void)
99 if (t
< vxtime
.last_tsc
) t
= vxtime
.last_tsc
; /* hack */
100 x
= ((t
- vxtime
.last_tsc
) * vxtime
.tsc_quot
) >> 32;
104 static inline unsigned int do_gettimeoffset_hpet(void)
106 return ((hpet_readl(HPET_COUNTER
) - vxtime
.last
) * vxtime
.quot
) >> 32;
109 unsigned int (*do_gettimeoffset
)(void) = do_gettimeoffset_tsc
;
112 * This version of gettimeofday() has microsecond resolution and better than
113 * microsecond precision, as we're using at least a 10 MHz (usually 14.31818
117 void do_gettimeofday(struct timeval
*tv
)
119 unsigned long seq
, t
;
120 unsigned int sec
, usec
;
123 seq
= read_seqbegin(&xtime_lock
);
126 usec
= xtime
.tv_nsec
/ 1000;
128 /* i386 does some correction here to keep the clock
129 monotonous even when ntpd is fixing drift.
130 But they didn't work for me, there is a non monotonic
131 clock anyways with ntp.
132 I dropped all corrections now until a real solution can
133 be found. Note when you fix it here you need to do the same
134 in arch/x86_64/kernel/vsyscall.c and export all needed
135 variables in vmlinux.lds. -AK */
137 t
= (jiffies
- wall_jiffies
) * (1000000L / HZ
) +
141 } while (read_seqretry(&xtime_lock
, seq
));
143 tv
->tv_sec
= sec
+ usec
/ 1000000;
144 tv
->tv_usec
= usec
% 1000000;
147 EXPORT_SYMBOL(do_gettimeofday
);
150 * settimeofday() first undoes the correction that gettimeofday would do
151 * on the time, and then saves it. This is ugly, but has been like this for
155 int do_settimeofday(struct timespec
*tv
)
157 time_t wtm_sec
, sec
= tv
->tv_sec
;
158 long wtm_nsec
, nsec
= tv
->tv_nsec
;
160 if ((unsigned long)tv
->tv_nsec
>= NSEC_PER_SEC
)
163 write_seqlock_irq(&xtime_lock
);
165 nsec
-= do_gettimeoffset() * 1000 +
166 (jiffies
- wall_jiffies
) * (NSEC_PER_SEC
/HZ
);
168 wtm_sec
= wall_to_monotonic
.tv_sec
+ (xtime
.tv_sec
- sec
);
169 wtm_nsec
= wall_to_monotonic
.tv_nsec
+ (xtime
.tv_nsec
- nsec
);
171 set_normalized_timespec(&xtime
, sec
, nsec
);
172 set_normalized_timespec(&wall_to_monotonic
, wtm_sec
, wtm_nsec
);
174 time_adjust
= 0; /* stop active adjtime() */
175 time_status
|= STA_UNSYNC
;
176 time_maxerror
= NTP_PHASE_LIMIT
;
177 time_esterror
= NTP_PHASE_LIMIT
;
179 write_sequnlock_irq(&xtime_lock
);
184 EXPORT_SYMBOL(do_settimeofday
);
186 unsigned long profile_pc(struct pt_regs
*regs
)
188 unsigned long pc
= instruction_pointer(regs
);
190 /* Assume the lock function has either no stack frame or only a single word.
191 This checks if the address on the stack looks like a kernel text address.
192 There is a small window for false hits, but in that case the tick
193 is just accounted to the spinlock function.
194 Better would be to write these functions in assembler again
195 and check exactly. */
196 if (in_lock_functions(pc
)) {
197 char *v
= *(char **)regs
->rsp
;
198 if ((v
>= _stext
&& v
<= _etext
) ||
199 (v
>= _sinittext
&& v
<= _einittext
) ||
200 (v
>= (char *)MODULES_VADDR
&& v
<= (char *)MODULES_END
))
201 return (unsigned long)v
;
202 return ((unsigned long *)regs
->rsp
)[1];
206 EXPORT_SYMBOL(profile_pc
);
209 * In order to set the CMOS clock precisely, set_rtc_mmss has to be called 500
210 * ms after the second nowtime has started, because when nowtime is written
211 * into the registers of the CMOS clock, it will jump to the next second
212 * precisely 500 ms later. Check the Motorola MC146818A or Dallas DS12887 data
216 static void set_rtc_mmss(unsigned long nowtime
)
218 int real_seconds
, real_minutes
, cmos_minutes
;
219 unsigned char control
, freq_select
;
222 * IRQs are disabled when we're called from the timer interrupt,
223 * no need for spin_lock_irqsave()
226 spin_lock(&rtc_lock
);
229 * Tell the clock it's being set and stop it.
232 control
= CMOS_READ(RTC_CONTROL
);
233 CMOS_WRITE(control
| RTC_SET
, RTC_CONTROL
);
235 freq_select
= CMOS_READ(RTC_FREQ_SELECT
);
236 CMOS_WRITE(freq_select
| RTC_DIV_RESET2
, RTC_FREQ_SELECT
);
238 cmos_minutes
= CMOS_READ(RTC_MINUTES
);
239 BCD_TO_BIN(cmos_minutes
);
242 * since we're only adjusting minutes and seconds, don't interfere with hour
243 * overflow. This avoids messing with unknown time zones but requires your RTC
244 * not to be off by more than 15 minutes. Since we're calling it only when
245 * our clock is externally synchronized using NTP, this shouldn't be a problem.
248 real_seconds
= nowtime
% 60;
249 real_minutes
= nowtime
/ 60;
250 if (((abs(real_minutes
- cmos_minutes
) + 15) / 30) & 1)
251 real_minutes
+= 30; /* correct for half hour time zone */
255 /* AMD 8111 is a really bad time keeper and hits this regularly.
256 It probably was an attempt to avoid screwing up DST, but ignore
258 if (abs(real_minutes
- cmos_minutes
) >= 30) {
259 printk(KERN_WARNING
"time.c: can't update CMOS clock "
260 "from %d to %d\n", cmos_minutes
, real_minutes
);
265 BIN_TO_BCD(real_seconds
);
266 BIN_TO_BCD(real_minutes
);
267 CMOS_WRITE(real_seconds
, RTC_SECONDS
);
268 CMOS_WRITE(real_minutes
, RTC_MINUTES
);
272 * The following flags have to be released exactly in this order, otherwise the
273 * DS12887 (popular MC146818A clone with integrated battery and quartz) will
274 * not reset the oscillator and will not update precisely 500 ms later. You
275 * won't find this mentioned in the Dallas Semiconductor data sheets, but who
276 * believes data sheets anyway ... -- Markus Kuhn
279 CMOS_WRITE(control
, RTC_CONTROL
);
280 CMOS_WRITE(freq_select
, RTC_FREQ_SELECT
);
282 spin_unlock(&rtc_lock
);
286 /* monotonic_clock(): returns # of nanoseconds passed since time_init()
287 * Note: This function is required to return accurate
288 * time even in the absence of multiple timer ticks.
290 unsigned long long monotonic_clock(void)
293 u32 last_offset
, this_offset
, offset
;
294 unsigned long long base
;
296 if (vxtime
.mode
== VXTIME_HPET
) {
298 seq
= read_seqbegin(&xtime_lock
);
300 last_offset
= vxtime
.last
;
301 base
= monotonic_base
;
302 this_offset
= hpet_readl(HPET_T0_CMP
) - hpet_tick
;
304 } while (read_seqretry(&xtime_lock
, seq
));
305 offset
= (this_offset
- last_offset
);
306 offset
*=(NSEC_PER_SEC
/HZ
)/hpet_tick
;
307 return base
+ offset
;
310 seq
= read_seqbegin(&xtime_lock
);
312 last_offset
= vxtime
.last_tsc
;
313 base
= monotonic_base
;
314 } while (read_seqretry(&xtime_lock
, seq
));
316 rdtscll(this_offset
);
317 offset
= (this_offset
- last_offset
)*1000/cpu_khz
;
318 return base
+ offset
;
323 EXPORT_SYMBOL(monotonic_clock
);
325 static noinline
void handle_lost_ticks(int lost
, struct pt_regs
*regs
)
327 static long lost_count
;
330 if (report_lost_ticks
) {
331 printk(KERN_WARNING
"time.c: Lost %d timer "
333 print_symbol("rip %s)\n", regs
->rip
);
336 if (lost_count
== 1000 && !warned
) {
338 "warning: many lost ticks.\n"
339 KERN_WARNING
"Your time source seems to be instable or "
340 "some driver is hogging interupts\n");
341 print_symbol("rip %s\n", regs
->rip
);
342 if (vxtime
.mode
== VXTIME_TSC
&& vxtime
.hpet_address
) {
343 printk(KERN_WARNING
"Falling back to HPET\n");
344 vxtime
.last
= hpet_readl(HPET_T0_CMP
) - hpet_tick
;
345 vxtime
.mode
= VXTIME_HPET
;
346 do_gettimeoffset
= do_gettimeoffset_hpet
;
348 /* else should fall back to PIT, but code missing. */
353 #ifdef CONFIG_CPU_FREQ
354 /* In some cases the CPU can change frequency without us noticing
355 (like going into thermal throttle)
356 Give cpufreq a change to catch up. */
357 if ((lost_count
+1) % 25 == 0) {
358 cpufreq_delayed_get();
363 static irqreturn_t
timer_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
365 static unsigned long rtc_update
= 0;
367 int delay
, offset
= 0, lost
= 0;
370 * Here we are in the timer irq handler. We have irqs locally disabled (so we
371 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
372 * on the other CPU, so we need a lock. We also need to lock the vsyscall
373 * variables, because both do_timer() and us change them -arca+vojtech
376 write_seqlock(&xtime_lock
);
378 if (vxtime
.hpet_address
) {
379 offset
= hpet_readl(HPET_T0_CMP
) - hpet_tick
;
380 delay
= hpet_readl(HPET_COUNTER
) - offset
;
382 spin_lock(&i8253_lock
);
385 delay
|= inb(0x40) << 8;
386 spin_unlock(&i8253_lock
);
387 delay
= LATCH
- 1 - delay
;
392 if (vxtime
.mode
== VXTIME_HPET
) {
393 if (offset
- vxtime
.last
> hpet_tick
) {
394 lost
= (offset
- vxtime
.last
) / hpet_tick
- 1;
398 (offset
- vxtime
.last
)*(NSEC_PER_SEC
/HZ
) / hpet_tick
;
400 vxtime
.last
= offset
;
401 #ifdef CONFIG_X86_PM_TIMER
402 } else if (vxtime
.mode
== VXTIME_PMTMR
) {
403 lost
= pmtimer_mark_offset();
406 offset
= (((tsc
- vxtime
.last_tsc
) *
407 vxtime
.tsc_quot
) >> 32) - (USEC_PER_SEC
/ HZ
);
412 if (offset
> (USEC_PER_SEC
/ HZ
)) {
413 lost
= offset
/ (USEC_PER_SEC
/ HZ
);
414 offset
%= (USEC_PER_SEC
/ HZ
);
417 monotonic_base
+= (tsc
- vxtime
.last_tsc
)*1000000/cpu_khz
;
419 vxtime
.last_tsc
= tsc
- vxtime
.quot
* delay
/ vxtime
.tsc_quot
;
421 if ((((tsc
- vxtime
.last_tsc
) *
422 vxtime
.tsc_quot
) >> 32) < offset
)
423 vxtime
.last_tsc
= tsc
-
424 (((long) offset
<< 32) / vxtime
.tsc_quot
) - 1;
428 handle_lost_ticks(lost
, regs
);
433 * Do the timer stuff.
438 update_process_times(user_mode(regs
));
442 * In the SMP case we use the local APIC timer interrupt to do the profiling,
443 * except when we simulate SMP mode on a uniprocessor system, in that case we
444 * have to call the local interrupt handler.
447 #ifndef CONFIG_X86_LOCAL_APIC
448 profile_tick(CPU_PROFILING
, regs
);
450 if (!using_apic_timer
)
451 smp_local_timer_interrupt(regs
);
455 * If we have an externally synchronized Linux clock, then update CMOS clock
456 * accordingly every ~11 minutes. set_rtc_mmss() will be called in the jiffy
457 * closest to exactly 500 ms before the next second. If the update fails, we
458 * don't care, as it'll be updated on the next turn, and the problem (time way
459 * off) isn't likely to go away much sooner anyway.
462 if ((~time_status
& STA_UNSYNC
) && xtime
.tv_sec
> rtc_update
&&
463 abs(xtime
.tv_nsec
- 500000000) <= tick_nsec
/ 2) {
464 set_rtc_mmss(xtime
.tv_sec
);
465 rtc_update
= xtime
.tv_sec
+ 660;
468 write_sequnlock(&xtime_lock
);
473 static unsigned int cyc2ns_scale
;
474 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
476 static inline void set_cyc2ns_scale(unsigned long cpu_mhz
)
478 cyc2ns_scale
= (1000 << CYC2NS_SCALE_FACTOR
)/cpu_mhz
;
481 static inline unsigned long long cycles_2_ns(unsigned long long cyc
)
483 return (cyc
* cyc2ns_scale
) >> CYC2NS_SCALE_FACTOR
;
486 unsigned long long sched_clock(void)
491 /* Don't do a HPET read here. Using TSC always is much faster
492 and HPET may not be mapped yet when the scheduler first runs.
493 Disadvantage is a small drift between CPUs in some configurations,
494 but that should be tolerable. */
495 if (__vxtime
.mode
== VXTIME_HPET
)
496 return (hpet_readl(HPET_COUNTER
) * vxtime
.quot
) >> 32;
499 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
500 which means it is not completely exact and may not be monotonous between
501 CPUs. But the errors should be too small to matter for scheduling
505 return cycles_2_ns(a
);
508 unsigned long get_cmos_time(void)
510 unsigned int timeout
, year
, mon
, day
, hour
, min
, sec
;
511 unsigned char last
, this;
515 * The Linux interpretation of the CMOS clock register contents: When the
516 * Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the
517 * second which has precisely just started. Waiting for this can take up to 1
518 * second, we timeout approximately after 2.4 seconds on a machine with
519 * standard 8.3 MHz ISA bus.
522 spin_lock_irqsave(&rtc_lock
, flags
);
527 while (timeout
&& last
&& !this) {
529 this = CMOS_READ(RTC_FREQ_SELECT
) & RTC_UIP
;
534 * Here we are safe to assume the registers won't change for a whole second, so
535 * we just go ahead and read them.
538 sec
= CMOS_READ(RTC_SECONDS
);
539 min
= CMOS_READ(RTC_MINUTES
);
540 hour
= CMOS_READ(RTC_HOURS
);
541 day
= CMOS_READ(RTC_DAY_OF_MONTH
);
542 mon
= CMOS_READ(RTC_MONTH
);
543 year
= CMOS_READ(RTC_YEAR
);
545 spin_unlock_irqrestore(&rtc_lock
, flags
);
548 * We know that x86-64 always uses BCD format, no need to check the config
560 * x86-64 systems only exists since 2002.
561 * This will work up to Dec 31, 2100
565 return mktime(year
, mon
, day
, hour
, min
, sec
);
568 #ifdef CONFIG_CPU_FREQ
570 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
573 RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
574 not that important because current Opteron setups do not support
575 scaling on SMP anyroads.
577 Should fix up last_tsc too. Currently gettimeofday in the
578 first tick after the change will be slightly wrong. */
580 #include <linux/workqueue.h>
582 static unsigned int cpufreq_delayed_issched
= 0;
583 static unsigned int cpufreq_init
= 0;
584 static struct work_struct cpufreq_delayed_get_work
;
586 static void handle_cpufreq_delayed_get(void *v
)
589 for_each_online_cpu(cpu
) {
592 cpufreq_delayed_issched
= 0;
595 /* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
596 * to verify the CPU frequency the timing core thinks the CPU is running
597 * at is still correct.
599 static void cpufreq_delayed_get(void)
602 if (cpufreq_init
&& !cpufreq_delayed_issched
) {
603 cpufreq_delayed_issched
= 1;
606 printk(KERN_DEBUG
"Losing some ticks... checking if CPU frequency changed.\n");
608 schedule_work(&cpufreq_delayed_get_work
);
612 static unsigned int ref_freq
= 0;
613 static unsigned long loops_per_jiffy_ref
= 0;
615 static unsigned long cpu_khz_ref
= 0;
617 static int time_cpufreq_notifier(struct notifier_block
*nb
, unsigned long val
,
620 struct cpufreq_freqs
*freq
= data
;
621 unsigned long *lpj
, dummy
;
623 if (cpu_has(&cpu_data
[freq
->cpu
], X86_FEATURE_CONSTANT_TSC
))
627 if (!(freq
->flags
& CPUFREQ_CONST_LOOPS
))
629 lpj
= &cpu_data
[freq
->cpu
].loops_per_jiffy
;
631 lpj
= &boot_cpu_data
.loops_per_jiffy
;
635 ref_freq
= freq
->old
;
636 loops_per_jiffy_ref
= *lpj
;
637 cpu_khz_ref
= cpu_khz
;
639 if ((val
== CPUFREQ_PRECHANGE
&& freq
->old
< freq
->new) ||
640 (val
== CPUFREQ_POSTCHANGE
&& freq
->old
> freq
->new) ||
641 (val
== CPUFREQ_RESUMECHANGE
)) {
643 cpufreq_scale(loops_per_jiffy_ref
, ref_freq
, freq
->new);
645 cpu_khz
= cpufreq_scale(cpu_khz_ref
, ref_freq
, freq
->new);
646 if (!(freq
->flags
& CPUFREQ_CONST_LOOPS
))
647 vxtime
.tsc_quot
= (1000L << 32) / cpu_khz
;
650 set_cyc2ns_scale(cpu_khz_ref
/ 1000);
655 static struct notifier_block time_cpufreq_notifier_block
= {
656 .notifier_call
= time_cpufreq_notifier
659 static int __init
cpufreq_tsc(void)
661 INIT_WORK(&cpufreq_delayed_get_work
, handle_cpufreq_delayed_get
, NULL
);
662 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block
,
663 CPUFREQ_TRANSITION_NOTIFIER
))
668 core_initcall(cpufreq_tsc
);
673 * calibrate_tsc() calibrates the processor TSC in a very simple way, comparing
674 * it to the HPET timer of known frequency.
677 #define TICK_COUNT 100000000
679 static unsigned int __init
hpet_calibrate_tsc(void)
681 int tsc_start
, hpet_start
;
682 int tsc_now
, hpet_now
;
685 local_irq_save(flags
);
688 hpet_start
= hpet_readl(HPET_COUNTER
);
693 hpet_now
= hpet_readl(HPET_COUNTER
);
696 local_irq_restore(flags
);
697 } while ((tsc_now
- tsc_start
) < TICK_COUNT
&&
698 (hpet_now
- hpet_start
) < TICK_COUNT
);
700 return (tsc_now
- tsc_start
) * 1000000000L
701 / ((hpet_now
- hpet_start
) * hpet_period
/ 1000);
706 * pit_calibrate_tsc() uses the speaker output (channel 2) of
707 * the PIT. This is better than using the timer interrupt output,
708 * because we can read the value of the speaker with just one inb(),
709 * where we need three i/o operations for the interrupt channel.
710 * We count how many ticks the TSC does in 50 ms.
713 static unsigned int __init
pit_calibrate_tsc(void)
715 unsigned long start
, end
;
718 spin_lock_irqsave(&i8253_lock
, flags
);
720 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
723 outb((PIT_TICK_RATE
/ (1000 / 50)) & 0xff, 0x42);
724 outb((PIT_TICK_RATE
/ (1000 / 50)) >> 8, 0x42);
727 while ((inb(0x61) & 0x20) == 0);
731 spin_unlock_irqrestore(&i8253_lock
, flags
);
733 return (end
- start
) / 50;
737 static __init
int late_hpet_init(void)
742 if (!vxtime
.hpet_address
)
745 memset(&hd
, 0, sizeof (hd
));
747 ntimer
= hpet_readl(HPET_ID
);
748 ntimer
= (ntimer
& HPET_ID_NUMBER
) >> HPET_ID_NUMBER_SHIFT
;
752 * Register with driver.
753 * Timer0 and Timer1 is used by platform.
755 hd
.hd_phys_address
= vxtime
.hpet_address
;
756 hd
.hd_address
= (void *)fix_to_virt(FIX_HPET_BASE
);
757 hd
.hd_nirqs
= ntimer
;
758 hd
.hd_flags
= HPET_DATA_PLATFORM
;
759 hpet_reserve_timer(&hd
, 0);
760 #ifdef CONFIG_HPET_EMULATE_RTC
761 hpet_reserve_timer(&hd
, 1);
763 hd
.hd_irq
[0] = HPET_LEGACY_8254
;
764 hd
.hd_irq
[1] = HPET_LEGACY_RTC
;
767 struct hpet_timer
*timer
;
770 hpet
= (struct hpet
*) fix_to_virt(FIX_HPET_BASE
);
772 for (i
= 2, timer
= &hpet
->hpet_timers
[2]; i
< ntimer
;
774 hd
.hd_irq
[i
] = (timer
->hpet_config
&
775 Tn_INT_ROUTE_CNF_MASK
) >>
776 Tn_INT_ROUTE_CNF_SHIFT
;
783 fs_initcall(late_hpet_init
);
786 static int hpet_timer_stop_set_go(unsigned long tick
)
791 * Stop the timers and reset the main counter.
794 cfg
= hpet_readl(HPET_CFG
);
795 cfg
&= ~(HPET_CFG_ENABLE
| HPET_CFG_LEGACY
);
796 hpet_writel(cfg
, HPET_CFG
);
797 hpet_writel(0, HPET_COUNTER
);
798 hpet_writel(0, HPET_COUNTER
+ 4);
801 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
802 * and period also hpet_tick.
805 hpet_writel(HPET_TN_ENABLE
| HPET_TN_PERIODIC
| HPET_TN_SETVAL
|
806 HPET_TN_32BIT
, HPET_T0_CFG
);
807 hpet_writel(hpet_tick
, HPET_T0_CMP
);
808 hpet_writel(hpet_tick
, HPET_T0_CMP
); /* AK: why twice? */
814 cfg
|= HPET_CFG_ENABLE
| HPET_CFG_LEGACY
;
815 hpet_writel(cfg
, HPET_CFG
);
820 static int hpet_init(void)
824 if (!vxtime
.hpet_address
)
826 set_fixmap_nocache(FIX_HPET_BASE
, vxtime
.hpet_address
);
827 __set_fixmap(VSYSCALL_HPET
, vxtime
.hpet_address
, PAGE_KERNEL_VSYSCALL_NOCACHE
);
830 * Read the period, compute tick and quotient.
833 id
= hpet_readl(HPET_ID
);
835 if (!(id
& HPET_ID_VENDOR
) || !(id
& HPET_ID_NUMBER
) ||
836 !(id
& HPET_ID_LEGSUP
))
839 hpet_period
= hpet_readl(HPET_PERIOD
);
840 if (hpet_period
< 100000 || hpet_period
> 100000000)
843 hpet_tick
= (1000000000L * (USEC_PER_SEC
/ HZ
) + hpet_period
/ 2) /
846 return hpet_timer_stop_set_go(hpet_tick
);
849 static int hpet_reenable(void)
851 return hpet_timer_stop_set_go(hpet_tick
);
854 void __init
pit_init(void)
858 spin_lock_irqsave(&i8253_lock
, flags
);
859 outb_p(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */
860 outb_p(LATCH
& 0xff, 0x40); /* LSB */
861 outb_p(LATCH
>> 8, 0x40); /* MSB */
862 spin_unlock_irqrestore(&i8253_lock
, flags
);
865 int __init
time_setup(char *str
)
867 report_lost_ticks
= 1;
871 static struct irqaction irq0
= {
872 timer_interrupt
, SA_INTERRUPT
, CPU_MASK_NONE
, "timer", NULL
, NULL
875 extern void __init
config_acpi_tables(void);
877 void __init
time_init(void)
881 #ifdef HPET_HACK_ENABLE_DANGEROUS
882 if (!vxtime
.hpet_address
) {
883 printk(KERN_WARNING
"time.c: WARNING: Enabling HPET base "
885 outl(0x800038a0, 0xcf8);
886 outl(0xff000001, 0xcfc);
887 outl(0x800038a0, 0xcf8);
888 vxtime
.hpet_address
= inl(0xcfc) & 0xfffffffe;
889 printk(KERN_WARNING
"time.c: WARNING: Enabled HPET "
890 "at %#lx.\n", vxtime
.hpet_address
);
894 vxtime
.hpet_address
= 0;
896 xtime
.tv_sec
= get_cmos_time();
899 set_normalized_timespec(&wall_to_monotonic
,
900 -xtime
.tv_sec
, -xtime
.tv_nsec
);
903 vxtime_hz
= (1000000000000000L + hpet_period
/ 2) /
905 cpu_khz
= hpet_calibrate_tsc();
907 #ifdef CONFIG_X86_PM_TIMER
908 } else if (pmtmr_ioport
) {
909 vxtime_hz
= PM_TIMER_FREQUENCY
;
912 cpu_khz
= pit_calibrate_tsc();
916 cpu_khz
= pit_calibrate_tsc();
920 printk(KERN_INFO
"time.c: Using %ld.%06ld MHz %s timer.\n",
921 vxtime_hz
/ 1000000, vxtime_hz
% 1000000, timename
);
922 printk(KERN_INFO
"time.c: Detected %d.%03d MHz processor.\n",
923 cpu_khz
/ 1000, cpu_khz
% 1000);
924 vxtime
.mode
= VXTIME_TSC
;
925 vxtime
.quot
= (1000000L << 32) / vxtime_hz
;
926 vxtime
.tsc_quot
= (1000L << 32) / cpu_khz
;
927 vxtime
.hz
= vxtime_hz
;
928 rdtscll_sync(&vxtime
.last_tsc
);
931 set_cyc2ns_scale(cpu_khz
/ 1000);
939 * Make an educated guess if the TSC is trustworthy and synchronized
942 static __init
int unsynchronized_tsc(void)
945 if (oem_force_hpet_timer())
947 /* Intel systems are normally all synchronized. Exceptions
948 are handled in the OEM check above. */
949 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
)
951 /* All in a single socket - should be synchronized */
952 if (cpus_weight(cpu_core_map
[0]) == num_online_cpus())
955 /* Assume multi socket systems are not synchronized */
956 return num_online_cpus() > 1;
960 * Decide after all CPUs are booted what mode gettimeofday should use.
962 void __init
time_init_gtod(void)
966 if (unsynchronized_tsc())
968 if (vxtime
.hpet_address
&& notsc
) {
970 vxtime
.last
= hpet_readl(HPET_T0_CMP
) - hpet_tick
;
971 vxtime
.mode
= VXTIME_HPET
;
972 do_gettimeoffset
= do_gettimeoffset_hpet
;
973 #ifdef CONFIG_X86_PM_TIMER
974 /* Using PM for gettimeofday is quite slow, but we have no other
975 choice because the TSC is too unreliable on some systems. */
976 } else if (pmtmr_ioport
&& !vxtime
.hpet_address
&& notsc
) {
978 do_gettimeoffset
= do_gettimeoffset_pm
;
979 vxtime
.mode
= VXTIME_PMTMR
;
981 printk(KERN_INFO
"Disabling vsyscall due to use of PM timer\n");
984 timetype
= vxtime
.hpet_address
? "HPET/TSC" : "PIT/TSC";
985 vxtime
.mode
= VXTIME_TSC
;
988 printk(KERN_INFO
"time.c: Using %s based timekeeping.\n", timetype
);
991 __setup("report_lost_ticks", time_setup
);
993 static long clock_cmos_diff
;
994 static unsigned long sleep_start
;
996 static int timer_suspend(struct sys_device
*dev
, pm_message_t state
)
999 * Estimate time zone so that set_time can update the clock
1001 long cmos_time
= get_cmos_time();
1003 clock_cmos_diff
= -cmos_time
;
1004 clock_cmos_diff
+= get_seconds();
1005 sleep_start
= cmos_time
;
1009 static int timer_resume(struct sys_device
*dev
)
1011 unsigned long flags
;
1013 unsigned long ctime
= get_cmos_time();
1014 unsigned long sleep_length
= (ctime
- sleep_start
) * HZ
;
1016 if (vxtime
.hpet_address
)
1019 i8254_timer_resume();
1021 sec
= ctime
+ clock_cmos_diff
;
1022 write_seqlock_irqsave(&xtime_lock
,flags
);
1025 write_sequnlock_irqrestore(&xtime_lock
,flags
);
1026 jiffies
+= sleep_length
;
1027 wall_jiffies
+= sleep_length
;
1031 static struct sysdev_class timer_sysclass
= {
1032 .resume
= timer_resume
,
1033 .suspend
= timer_suspend
,
1034 set_kset_name("timer"),
1038 /* XXX this driverfs stuff should probably go elsewhere later -john */
1039 static struct sys_device device_timer
= {
1041 .cls
= &timer_sysclass
,
1044 static int time_init_device(void)
1046 int error
= sysdev_class_register(&timer_sysclass
);
1048 error
= sysdev_register(&device_timer
);
1052 device_initcall(time_init_device
);
1054 #ifdef CONFIG_HPET_EMULATE_RTC
1055 /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
1056 * is enabled, we support RTC interrupt functionality in software.
1057 * RTC has 3 kinds of interrupts:
1058 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
1060 * 2) Alarm Interrupt - generate an interrupt at a specific time of day
1061 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
1062 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
1063 * (1) and (2) above are implemented using polling at a frequency of
1064 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
1065 * overhead. (DEFAULT_RTC_INT_FREQ)
1066 * For (3), we use interrupts at 64Hz or user specified periodic
1067 * frequency, whichever is higher.
1069 #include <linux/rtc.h>
1071 extern irqreturn_t
rtc_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
);
1073 #define DEFAULT_RTC_INT_FREQ 64
1074 #define RTC_NUM_INTS 1
1076 static unsigned long UIE_on
;
1077 static unsigned long prev_update_sec
;
1079 static unsigned long AIE_on
;
1080 static struct rtc_time alarm_time
;
1082 static unsigned long PIE_on
;
1083 static unsigned long PIE_freq
= DEFAULT_RTC_INT_FREQ
;
1084 static unsigned long PIE_count
;
1086 static unsigned long hpet_rtc_int_freq
; /* RTC interrupt frequency */
1088 int is_hpet_enabled(void)
1090 return vxtime
.hpet_address
!= 0;
1094 * Timer 1 for RTC, we do not use periodic interrupt feature,
1095 * even if HPET supports periodic interrupts on Timer 1.
1096 * The reason being, to set up a periodic interrupt in HPET, we need to
1097 * stop the main counter. And if we do that everytime someone diables/enables
1098 * RTC, we will have adverse effect on main kernel timer running on Timer 0.
1099 * So, for the time being, simulate the periodic interrupt in software.
1101 * hpet_rtc_timer_init() is called for the first time and during subsequent
1102 * interuppts reinit happens through hpet_rtc_timer_reinit().
1104 int hpet_rtc_timer_init(void)
1106 unsigned int cfg
, cnt
;
1107 unsigned long flags
;
1109 if (!is_hpet_enabled())
1112 * Set the counter 1 and enable the interrupts.
1114 if (PIE_on
&& (PIE_freq
> DEFAULT_RTC_INT_FREQ
))
1115 hpet_rtc_int_freq
= PIE_freq
;
1117 hpet_rtc_int_freq
= DEFAULT_RTC_INT_FREQ
;
1119 local_irq_save(flags
);
1120 cnt
= hpet_readl(HPET_COUNTER
);
1121 cnt
+= ((hpet_tick
*HZ
)/hpet_rtc_int_freq
);
1122 hpet_writel(cnt
, HPET_T1_CMP
);
1123 local_irq_restore(flags
);
1125 cfg
= hpet_readl(HPET_T1_CFG
);
1126 cfg
|= HPET_TN_ENABLE
| HPET_TN_SETVAL
| HPET_TN_32BIT
;
1127 hpet_writel(cfg
, HPET_T1_CFG
);
1132 static void hpet_rtc_timer_reinit(void)
1134 unsigned int cfg
, cnt
;
1136 if (!(PIE_on
| AIE_on
| UIE_on
))
1139 if (PIE_on
&& (PIE_freq
> DEFAULT_RTC_INT_FREQ
))
1140 hpet_rtc_int_freq
= PIE_freq
;
1142 hpet_rtc_int_freq
= DEFAULT_RTC_INT_FREQ
;
1144 /* It is more accurate to use the comparator value than current count.*/
1145 cnt
= hpet_readl(HPET_T1_CMP
);
1146 cnt
+= hpet_tick
*HZ
/hpet_rtc_int_freq
;
1147 hpet_writel(cnt
, HPET_T1_CMP
);
1149 cfg
= hpet_readl(HPET_T1_CFG
);
1150 cfg
|= HPET_TN_ENABLE
| HPET_TN_SETVAL
| HPET_TN_32BIT
;
1151 hpet_writel(cfg
, HPET_T1_CFG
);
1157 * The functions below are called from rtc driver.
1158 * Return 0 if HPET is not being used.
1159 * Otherwise do the necessary changes and return 1.
1161 int hpet_mask_rtc_irq_bit(unsigned long bit_mask
)
1163 if (!is_hpet_enabled())
1166 if (bit_mask
& RTC_UIE
)
1168 if (bit_mask
& RTC_PIE
)
1170 if (bit_mask
& RTC_AIE
)
1176 int hpet_set_rtc_irq_bit(unsigned long bit_mask
)
1178 int timer_init_reqd
= 0;
1180 if (!is_hpet_enabled())
1183 if (!(PIE_on
| AIE_on
| UIE_on
))
1184 timer_init_reqd
= 1;
1186 if (bit_mask
& RTC_UIE
) {
1189 if (bit_mask
& RTC_PIE
) {
1193 if (bit_mask
& RTC_AIE
) {
1197 if (timer_init_reqd
)
1198 hpet_rtc_timer_init();
1203 int hpet_set_alarm_time(unsigned char hrs
, unsigned char min
, unsigned char sec
)
1205 if (!is_hpet_enabled())
1208 alarm_time
.tm_hour
= hrs
;
1209 alarm_time
.tm_min
= min
;
1210 alarm_time
.tm_sec
= sec
;
1215 int hpet_set_periodic_freq(unsigned long freq
)
1217 if (!is_hpet_enabled())
1226 int hpet_rtc_dropped_irq(void)
1228 if (!is_hpet_enabled())
1234 irqreturn_t
hpet_rtc_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
1236 struct rtc_time curr_time
;
1237 unsigned long rtc_int_flag
= 0;
1238 int call_rtc_interrupt
= 0;
1240 hpet_rtc_timer_reinit();
1242 if (UIE_on
| AIE_on
) {
1243 rtc_get_rtc_time(&curr_time
);
1246 if (curr_time
.tm_sec
!= prev_update_sec
) {
1247 /* Set update int info, call real rtc int routine */
1248 call_rtc_interrupt
= 1;
1249 rtc_int_flag
= RTC_UF
;
1250 prev_update_sec
= curr_time
.tm_sec
;
1255 if (PIE_count
>= hpet_rtc_int_freq
/PIE_freq
) {
1256 /* Set periodic int info, call real rtc int routine */
1257 call_rtc_interrupt
= 1;
1258 rtc_int_flag
|= RTC_PF
;
1263 if ((curr_time
.tm_sec
== alarm_time
.tm_sec
) &&
1264 (curr_time
.tm_min
== alarm_time
.tm_min
) &&
1265 (curr_time
.tm_hour
== alarm_time
.tm_hour
)) {
1266 /* Set alarm int info, call real rtc int routine */
1267 call_rtc_interrupt
= 1;
1268 rtc_int_flag
|= RTC_AF
;
1271 if (call_rtc_interrupt
) {
1272 rtc_int_flag
|= (RTC_IRQF
| (RTC_NUM_INTS
<< 8));
1273 rtc_interrupt(rtc_int_flag
, dev_id
, regs
);
1281 static int __init
nohpet_setup(char *s
)
1287 __setup("nohpet", nohpet_setup
);
1290 static int __init
notsc_setup(char *s
)
1296 __setup("notsc", notsc_setup
);