1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Common time routines among all ppc machines.
5 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
6 * Paul Mackerras' version and mine for PReP and Pmac.
7 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
8 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
10 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
11 * to make clock more stable (2.4.0-test5). The only thing
12 * that this code assumes is that the timebases have been synchronized
13 * by firmware on SMP and are never stopped (never do sleep
14 * on SMP then, nap and doze are OK).
16 * Speeded up do_gettimeofday by getting rid of references to
17 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
19 * TODO (not necessarily in this file):
20 * - improve precision and reproducibility of timebase frequency
21 * measurement at boot time.
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/sched.h>
33 #include <linux/sched/clock.h>
34 #include <linux/kernel.h>
35 #include <linux/param.h>
36 #include <linux/string.h>
38 #include <linux/interrupt.h>
39 #include <linux/timex.h>
40 #include <linux/kernel_stat.h>
41 #include <linux/time.h>
42 #include <linux/init.h>
43 #include <linux/profile.h>
44 #include <linux/cpu.h>
45 #include <linux/security.h>
46 #include <linux/percpu.h>
47 #include <linux/rtc.h>
48 #include <linux/jiffies.h>
49 #include <linux/posix-timers.h>
50 #include <linux/irq.h>
51 #include <linux/delay.h>
52 #include <linux/irq_work.h>
53 #include <linux/of_clk.h>
54 #include <linux/suspend.h>
55 #include <linux/sched/cputime.h>
56 #include <linux/processor.h>
57 #include <asm/trace.h>
60 #include <asm/nvram.h>
61 #include <asm/cache.h>
62 #include <asm/machdep.h>
63 #include <linux/uaccess.h>
67 #include <asm/div64.h>
69 #include <asm/vdso_datapage.h>
70 #include <asm/firmware.h>
71 #include <asm/asm-prototypes.h>
73 /* powerpc clocksource/clockevent code */
75 #include <linux/clockchips.h>
76 #include <linux/timekeeper_internal.h>
78 static u64
timebase_read(struct clocksource
*);
79 static struct clocksource clocksource_timebase
= {
82 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
83 .mask
= CLOCKSOURCE_MASK(64),
84 .read
= timebase_read
,
85 .vdso_clock_mode
= VDSO_CLOCKMODE_ARCHTIMER
,
88 #define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
89 u64 decrementer_max
= DECREMENTER_DEFAULT_MAX
;
91 static int decrementer_set_next_event(unsigned long evt
,
92 struct clock_event_device
*dev
);
93 static int decrementer_shutdown(struct clock_event_device
*evt
);
95 struct clock_event_device decrementer_clockevent
= {
96 .name
= "decrementer",
99 .set_next_event
= decrementer_set_next_event
,
100 .set_state_oneshot_stopped
= decrementer_shutdown
,
101 .set_state_shutdown
= decrementer_shutdown
,
102 .tick_resume
= decrementer_shutdown
,
103 .features
= CLOCK_EVT_FEAT_ONESHOT
|
104 CLOCK_EVT_FEAT_C3STOP
,
106 EXPORT_SYMBOL(decrementer_clockevent
);
108 DEFINE_PER_CPU(u64
, decrementers_next_tb
);
109 static DEFINE_PER_CPU(struct clock_event_device
, decrementers
);
111 #define XSEC_PER_SEC (1024*1024)
114 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
116 /* compute ((xsec << 12) * max) >> 32 */
117 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
120 unsigned long tb_ticks_per_jiffy
;
121 unsigned long tb_ticks_per_usec
= 100; /* sane default */
122 EXPORT_SYMBOL(tb_ticks_per_usec
);
123 unsigned long tb_ticks_per_sec
;
124 EXPORT_SYMBOL(tb_ticks_per_sec
); /* for cputime_t conversions */
126 DEFINE_SPINLOCK(rtc_lock
);
127 EXPORT_SYMBOL_GPL(rtc_lock
);
129 static u64 tb_to_ns_scale __read_mostly
;
130 static unsigned tb_to_ns_shift __read_mostly
;
131 static u64 boot_tb __read_mostly
;
133 extern struct timezone sys_tz
;
134 static long timezone_offset
;
136 unsigned long ppc_proc_freq
;
137 EXPORT_SYMBOL_GPL(ppc_proc_freq
);
138 unsigned long ppc_tb_freq
;
139 EXPORT_SYMBOL_GPL(ppc_tb_freq
);
143 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
145 * Factor for converting from cputime_t (timebase ticks) to
146 * microseconds. This is stored as 0.64 fixed-point binary fraction.
148 u64 __cputime_usec_factor
;
149 EXPORT_SYMBOL(__cputime_usec_factor
);
151 #ifdef CONFIG_PPC_SPLPAR
152 void (*dtl_consumer
)(struct dtl_entry
*, u64
);
155 static void calc_cputime_factors(void)
157 struct div_result res
;
159 div128_by_32(1000000, 0, tb_ticks_per_sec
, &res
);
160 __cputime_usec_factor
= res
.result_low
;
164 * Read the SPURR on systems that have it, otherwise the PURR,
165 * or if that doesn't exist return the timebase value passed in.
167 static inline unsigned long read_spurr(unsigned long tb
)
169 if (cpu_has_feature(CPU_FTR_SPURR
))
170 return mfspr(SPRN_SPURR
);
171 if (cpu_has_feature(CPU_FTR_PURR
))
172 return mfspr(SPRN_PURR
);
176 #ifdef CONFIG_PPC_SPLPAR
181 * Scan the dispatch trace log and count up the stolen time.
182 * Should be called with interrupts disabled.
184 static u64
scan_dispatch_log(u64 stop_tb
)
186 u64 i
= local_paca
->dtl_ridx
;
187 struct dtl_entry
*dtl
= local_paca
->dtl_curr
;
188 struct dtl_entry
*dtl_end
= local_paca
->dispatch_log_end
;
189 struct lppaca
*vpa
= local_paca
->lppaca_ptr
;
197 if (i
== be64_to_cpu(vpa
->dtl_idx
))
199 while (i
< be64_to_cpu(vpa
->dtl_idx
)) {
200 dtb
= be64_to_cpu(dtl
->timebase
);
201 tb_delta
= be32_to_cpu(dtl
->enqueue_to_dispatch_time
) +
202 be32_to_cpu(dtl
->ready_to_enqueue_time
);
204 if (i
+ N_DISPATCH_LOG
< be64_to_cpu(vpa
->dtl_idx
)) {
205 /* buffer has overflowed */
206 i
= be64_to_cpu(vpa
->dtl_idx
) - N_DISPATCH_LOG
;
207 dtl
= local_paca
->dispatch_log
+ (i
% N_DISPATCH_LOG
);
213 dtl_consumer(dtl
, i
);
218 dtl
= local_paca
->dispatch_log
;
220 local_paca
->dtl_ridx
= i
;
221 local_paca
->dtl_curr
= dtl
;
226 * Accumulate stolen time by scanning the dispatch trace log.
227 * Called on entry from user mode.
229 void notrace
accumulate_stolen_time(void)
232 unsigned long save_irq_soft_mask
= irq_soft_mask_return();
233 struct cpu_accounting_data
*acct
= &local_paca
->accounting
;
235 /* We are called early in the exception entry, before
236 * soft/hard_enabled are sync'ed to the expected state
237 * for the exception. We are hard disabled but the PACA
238 * needs to reflect that so various debug stuff doesn't
241 irq_soft_mask_set(IRQS_DISABLED
);
243 sst
= scan_dispatch_log(acct
->starttime_user
);
244 ust
= scan_dispatch_log(acct
->starttime
);
247 acct
->steal_time
+= ust
+ sst
;
249 irq_soft_mask_set(save_irq_soft_mask
);
252 static inline u64
calculate_stolen_time(u64 stop_tb
)
254 if (!firmware_has_feature(FW_FEATURE_SPLPAR
))
257 if (get_paca()->dtl_ridx
!= be64_to_cpu(get_lppaca()->dtl_idx
))
258 return scan_dispatch_log(stop_tb
);
263 #else /* CONFIG_PPC_SPLPAR */
264 static inline u64
calculate_stolen_time(u64 stop_tb
)
269 #endif /* CONFIG_PPC_SPLPAR */
272 * Account time for a transition between system, hard irq
275 static unsigned long vtime_delta_scaled(struct cpu_accounting_data
*acct
,
276 unsigned long now
, unsigned long stime
)
278 unsigned long stime_scaled
= 0;
279 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
280 unsigned long nowscaled
, deltascaled
;
281 unsigned long utime
, utime_scaled
;
283 nowscaled
= read_spurr(now
);
284 deltascaled
= nowscaled
- acct
->startspurr
;
285 acct
->startspurr
= nowscaled
;
286 utime
= acct
->utime
- acct
->utime_sspurr
;
287 acct
->utime_sspurr
= acct
->utime
;
290 * Because we don't read the SPURR on every kernel entry/exit,
291 * deltascaled includes both user and system SPURR ticks.
292 * Apportion these ticks to system SPURR ticks and user
293 * SPURR ticks in the same ratio as the system time (delta)
294 * and user time (udelta) values obtained from the timebase
295 * over the same interval. The system ticks get accounted here;
296 * the user ticks get saved up in paca->user_time_scaled to be
297 * used by account_process_tick.
299 stime_scaled
= stime
;
300 utime_scaled
= utime
;
301 if (deltascaled
!= stime
+ utime
) {
303 stime_scaled
= deltascaled
* stime
/ (stime
+ utime
);
304 utime_scaled
= deltascaled
- stime_scaled
;
306 stime_scaled
= deltascaled
;
309 acct
->utime_scaled
+= utime_scaled
;
315 static unsigned long vtime_delta(struct cpu_accounting_data
*acct
,
316 unsigned long *stime_scaled
,
317 unsigned long *steal_time
)
319 unsigned long now
, stime
;
321 WARN_ON_ONCE(!irqs_disabled());
324 stime
= now
- acct
->starttime
;
325 acct
->starttime
= now
;
327 *stime_scaled
= vtime_delta_scaled(acct
, now
, stime
);
329 *steal_time
= calculate_stolen_time(now
);
334 static void vtime_delta_kernel(struct cpu_accounting_data
*acct
,
335 unsigned long *stime
, unsigned long *stime_scaled
)
337 unsigned long steal_time
;
339 *stime
= vtime_delta(acct
, stime_scaled
, &steal_time
);
340 *stime
-= min(*stime
, steal_time
);
341 acct
->steal_time
+= steal_time
;
344 void vtime_account_kernel(struct task_struct
*tsk
)
346 struct cpu_accounting_data
*acct
= get_accounting(tsk
);
347 unsigned long stime
, stime_scaled
;
349 vtime_delta_kernel(acct
, &stime
, &stime_scaled
);
351 if (tsk
->flags
& PF_VCPU
) {
352 acct
->gtime
+= stime
;
353 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
354 acct
->utime_scaled
+= stime_scaled
;
357 acct
->stime
+= stime
;
358 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
359 acct
->stime_scaled
+= stime_scaled
;
363 EXPORT_SYMBOL_GPL(vtime_account_kernel
);
365 void vtime_account_idle(struct task_struct
*tsk
)
367 unsigned long stime
, stime_scaled
, steal_time
;
368 struct cpu_accounting_data
*acct
= get_accounting(tsk
);
370 stime
= vtime_delta(acct
, &stime_scaled
, &steal_time
);
371 acct
->idle_time
+= stime
+ steal_time
;
374 static void vtime_account_irq_field(struct cpu_accounting_data
*acct
,
375 unsigned long *field
)
377 unsigned long stime
, stime_scaled
;
379 vtime_delta_kernel(acct
, &stime
, &stime_scaled
);
381 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
382 acct
->stime_scaled
+= stime_scaled
;
386 void vtime_account_softirq(struct task_struct
*tsk
)
388 struct cpu_accounting_data
*acct
= get_accounting(tsk
);
389 vtime_account_irq_field(acct
, &acct
->softirq_time
);
392 void vtime_account_hardirq(struct task_struct
*tsk
)
394 struct cpu_accounting_data
*acct
= get_accounting(tsk
);
395 vtime_account_irq_field(acct
, &acct
->hardirq_time
);
398 static void vtime_flush_scaled(struct task_struct
*tsk
,
399 struct cpu_accounting_data
*acct
)
401 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
402 if (acct
->utime_scaled
)
403 tsk
->utimescaled
+= cputime_to_nsecs(acct
->utime_scaled
);
404 if (acct
->stime_scaled
)
405 tsk
->stimescaled
+= cputime_to_nsecs(acct
->stime_scaled
);
407 acct
->utime_scaled
= 0;
408 acct
->utime_sspurr
= 0;
409 acct
->stime_scaled
= 0;
414 * Account the whole cputime accumulated in the paca
415 * Must be called with interrupts disabled.
416 * Assumes that vtime_account_kernel/idle() has been called
417 * recently (i.e. since the last entry from usermode) so that
418 * get_paca()->user_time_scaled is up to date.
420 void vtime_flush(struct task_struct
*tsk
)
422 struct cpu_accounting_data
*acct
= get_accounting(tsk
);
425 account_user_time(tsk
, cputime_to_nsecs(acct
->utime
));
428 account_guest_time(tsk
, cputime_to_nsecs(acct
->gtime
));
430 if (IS_ENABLED(CONFIG_PPC_SPLPAR
) && acct
->steal_time
) {
431 account_steal_time(cputime_to_nsecs(acct
->steal_time
));
432 acct
->steal_time
= 0;
436 account_idle_time(cputime_to_nsecs(acct
->idle_time
));
439 account_system_index_time(tsk
, cputime_to_nsecs(acct
->stime
),
442 if (acct
->hardirq_time
)
443 account_system_index_time(tsk
, cputime_to_nsecs(acct
->hardirq_time
),
445 if (acct
->softirq_time
)
446 account_system_index_time(tsk
, cputime_to_nsecs(acct
->softirq_time
),
449 vtime_flush_scaled(tsk
, acct
);
455 acct
->hardirq_time
= 0;
456 acct
->softirq_time
= 0;
459 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
460 #define calc_cputime_factors()
463 void __delay(unsigned long loops
)
470 * TB is in error state and isn't ticking anymore.
471 * HMI handler was unable to recover from TB error.
472 * Return immediately, so that kernel won't get stuck here.
477 while (mftb() - start
< loops
)
482 EXPORT_SYMBOL(__delay
);
484 void udelay(unsigned long usecs
)
486 __delay(tb_ticks_per_usec
* usecs
);
488 EXPORT_SYMBOL(udelay
);
491 unsigned long profile_pc(struct pt_regs
*regs
)
493 unsigned long pc
= instruction_pointer(regs
);
495 if (in_lock_functions(pc
))
500 EXPORT_SYMBOL(profile_pc
);
503 #ifdef CONFIG_IRQ_WORK
506 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
509 static inline unsigned long test_irq_work_pending(void)
513 asm volatile("lbz %0,%1(13)"
515 : "i" (offsetof(struct paca_struct
, irq_work_pending
)));
519 static inline void set_irq_work_pending_flag(void)
521 asm volatile("stb %0,%1(13)" : :
523 "i" (offsetof(struct paca_struct
, irq_work_pending
)));
526 static inline void clear_irq_work_pending(void)
528 asm volatile("stb %0,%1(13)" : :
530 "i" (offsetof(struct paca_struct
, irq_work_pending
)));
535 DEFINE_PER_CPU(u8
, irq_work_pending
);
537 #define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
538 #define test_irq_work_pending() __this_cpu_read(irq_work_pending)
539 #define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
541 #endif /* 32 vs 64 bit */
543 void arch_irq_work_raise(void)
546 * 64-bit code that uses irq soft-mask can just cause an immediate
547 * interrupt here that gets soft masked, if this is called under
548 * local_irq_disable(). It might be possible to prevent that happening
549 * by noticing interrupts are disabled and setting decrementer pending
550 * to be replayed when irqs are enabled. The problem there is that
551 * tracing can call irq_work_raise, including in code that does low
552 * level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
553 * which could get tangled up if we're messing with the same state
557 set_irq_work_pending_flag();
562 #else /* CONFIG_IRQ_WORK */
564 #define test_irq_work_pending() 0
565 #define clear_irq_work_pending()
567 #endif /* CONFIG_IRQ_WORK */
570 * timer_interrupt - gets called when the decrementer overflows,
571 * with interrupts disabled.
573 void timer_interrupt(struct pt_regs
*regs
)
575 struct clock_event_device
*evt
= this_cpu_ptr(&decrementers
);
576 u64
*next_tb
= this_cpu_ptr(&decrementers_next_tb
);
577 struct pt_regs
*old_regs
;
581 * Some implementations of hotplug will get timer interrupts while
582 * offline, just ignore these.
584 if (unlikely(!cpu_online(smp_processor_id()))) {
585 set_dec(decrementer_max
);
589 /* Ensure a positive value is written to the decrementer, or else
590 * some CPUs will continue to take decrementer exceptions. When the
591 * PPC_WATCHDOG (decrementer based) is configured, keep this at most
592 * 31 bits, which is about 4 seconds on most systems, which gives
593 * the watchdog a chance of catching timer interrupt hard lockups.
595 if (IS_ENABLED(CONFIG_PPC_WATCHDOG
))
598 set_dec(decrementer_max
);
600 /* Conditionally hard-enable interrupts now that the DEC has been
601 * bumped to its maximum value
603 may_hard_irq_enable();
606 #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
607 if (atomic_read(&ppc_n_lost_interrupts
) != 0)
611 old_regs
= set_irq_regs(regs
);
613 trace_timer_interrupt_entry(regs
);
615 if (test_irq_work_pending()) {
616 clear_irq_work_pending();
621 if (now
>= *next_tb
) {
623 if (evt
->event_handler
)
624 evt
->event_handler(evt
);
625 __this_cpu_inc(irq_stat
.timer_irqs_event
);
627 now
= *next_tb
- now
;
628 if (now
<= decrementer_max
)
630 /* We may have raced with new irq work */
631 if (test_irq_work_pending())
633 __this_cpu_inc(irq_stat
.timer_irqs_others
);
636 trace_timer_interrupt_exit(regs
);
638 set_irq_regs(old_regs
);
640 EXPORT_SYMBOL(timer_interrupt
);
642 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
643 void timer_broadcast_interrupt(void)
645 u64
*next_tb
= this_cpu_ptr(&decrementers_next_tb
);
648 tick_receive_broadcast();
649 __this_cpu_inc(irq_stat
.broadcast_irqs_event
);
653 #ifdef CONFIG_SUSPEND
654 static void generic_suspend_disable_irqs(void)
656 /* Disable the decrementer, so that it doesn't interfere
660 set_dec(decrementer_max
);
662 set_dec(decrementer_max
);
665 static void generic_suspend_enable_irqs(void)
670 /* Overrides the weak version in kernel/power/main.c */
671 void arch_suspend_disable_irqs(void)
673 if (ppc_md
.suspend_disable_irqs
)
674 ppc_md
.suspend_disable_irqs();
675 generic_suspend_disable_irqs();
678 /* Overrides the weak version in kernel/power/main.c */
679 void arch_suspend_enable_irqs(void)
681 generic_suspend_enable_irqs();
682 if (ppc_md
.suspend_enable_irqs
)
683 ppc_md
.suspend_enable_irqs();
687 unsigned long long tb_to_ns(unsigned long long ticks
)
689 return mulhdu(ticks
, tb_to_ns_scale
) << tb_to_ns_shift
;
691 EXPORT_SYMBOL_GPL(tb_to_ns
);
694 * Scheduler clock - returns current time in nanosec units.
696 * Note: mulhdu(a, b) (multiply high double unsigned) returns
697 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
698 * are 64-bit unsigned numbers.
700 notrace
unsigned long long sched_clock(void)
702 return mulhdu(get_tb() - boot_tb
, tb_to_ns_scale
) << tb_to_ns_shift
;
706 #ifdef CONFIG_PPC_PSERIES
709 * Running clock - attempts to give a view of time passing for a virtualised
711 * Uses the VTB register if available otherwise a next best guess.
713 unsigned long long running_clock(void)
716 * Don't read the VTB as a host since KVM does not switch in host
717 * timebase into the VTB when it takes a guest off the CPU, reading the
718 * VTB would result in reading 'last switched out' guest VTB.
720 * Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
721 * would be unsafe to rely only on the #ifdef above.
723 if (firmware_has_feature(FW_FEATURE_LPAR
) &&
724 cpu_has_feature(CPU_FTR_ARCH_207S
))
725 return mulhdu(get_vtb() - boot_tb
, tb_to_ns_scale
) << tb_to_ns_shift
;
728 * This is a next best approximation without a VTB.
729 * On a host which is running bare metal there should never be any stolen
730 * time and on a host which doesn't do any virtualisation TB *should* equal
731 * VTB so it makes no difference anyway.
733 return local_clock() - kcpustat_this_cpu
->cpustat
[CPUTIME_STEAL
];
737 static int __init
get_freq(char *name
, int cells
, unsigned long *val
)
739 struct device_node
*cpu
;
743 /* The cpu node should have timebase and clock frequency properties */
744 cpu
= of_find_node_by_type(NULL
, "cpu");
747 fp
= of_get_property(cpu
, name
, NULL
);
750 *val
= of_read_ulong(fp
, cells
);
759 static void start_cpu_decrementer(void)
761 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
764 /* Clear any pending timer interrupts */
765 mtspr(SPRN_TSR
, TSR_ENW
| TSR_WIS
| TSR_DIS
| TSR_FIS
);
767 tcr
= mfspr(SPRN_TCR
);
769 * The watchdog may have already been enabled by u-boot. So leave
770 * TRC[WP] (Watchdog Period) alone.
772 tcr
&= TCR_WP_MASK
; /* Clear all bits except for TCR[WP] */
773 tcr
|= TCR_DIE
; /* Enable decrementer */
774 mtspr(SPRN_TCR
, tcr
);
778 void __init
generic_calibrate_decr(void)
780 ppc_tb_freq
= DEFAULT_TB_FREQ
; /* hardcoded default */
782 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq
) &&
783 !get_freq("timebase-frequency", 1, &ppc_tb_freq
)) {
785 printk(KERN_ERR
"WARNING: Estimating decrementer frequency "
789 ppc_proc_freq
= DEFAULT_PROC_FREQ
; /* hardcoded default */
791 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq
) &&
792 !get_freq("clock-frequency", 1, &ppc_proc_freq
)) {
794 printk(KERN_ERR
"WARNING: Estimating processor frequency "
799 int update_persistent_clock64(struct timespec64 now
)
803 if (!ppc_md
.set_rtc_time
)
806 rtc_time64_to_tm(now
.tv_sec
+ 1 + timezone_offset
, &tm
);
808 return ppc_md
.set_rtc_time(&tm
);
811 static void __read_persistent_clock(struct timespec64
*ts
)
814 static int first
= 1;
817 /* XXX this is a litle fragile but will work okay in the short term */
820 if (ppc_md
.time_init
)
821 timezone_offset
= ppc_md
.time_init();
823 /* get_boot_time() isn't guaranteed to be safe to call late */
824 if (ppc_md
.get_boot_time
) {
825 ts
->tv_sec
= ppc_md
.get_boot_time() - timezone_offset
;
829 if (!ppc_md
.get_rtc_time
) {
833 ppc_md
.get_rtc_time(&tm
);
835 ts
->tv_sec
= rtc_tm_to_time64(&tm
);
838 void read_persistent_clock64(struct timespec64
*ts
)
840 __read_persistent_clock(ts
);
842 /* Sanitize it in case real time clock is set below EPOCH */
843 if (ts
->tv_sec
< 0) {
850 /* clocksource code */
851 static notrace u64
timebase_read(struct clocksource
*cs
)
853 return (u64
)get_tb();
856 static void __init
clocksource_init(void)
858 struct clocksource
*clock
= &clocksource_timebase
;
860 if (clocksource_register_hz(clock
, tb_ticks_per_sec
)) {
861 printk(KERN_ERR
"clocksource: %s is already registered\n",
866 printk(KERN_INFO
"clocksource: %s mult[%x] shift[%d] registered\n",
867 clock
->name
, clock
->mult
, clock
->shift
);
870 static int decrementer_set_next_event(unsigned long evt
,
871 struct clock_event_device
*dev
)
873 __this_cpu_write(decrementers_next_tb
, get_tb() + evt
);
876 /* We may have raced with new irq work */
877 if (test_irq_work_pending())
883 static int decrementer_shutdown(struct clock_event_device
*dev
)
885 decrementer_set_next_event(decrementer_max
, dev
);
889 static void register_decrementer_clockevent(int cpu
)
891 struct clock_event_device
*dec
= &per_cpu(decrementers
, cpu
);
893 *dec
= decrementer_clockevent
;
894 dec
->cpumask
= cpumask_of(cpu
);
896 clockevents_config_and_register(dec
, ppc_tb_freq
, 2, decrementer_max
);
898 printk_once(KERN_DEBUG
"clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
899 dec
->name
, dec
->mult
, dec
->shift
, cpu
);
901 /* Set values for KVM, see kvm_emulate_dec() */
902 decrementer_clockevent
.mult
= dec
->mult
;
903 decrementer_clockevent
.shift
= dec
->shift
;
906 static void enable_large_decrementer(void)
908 if (!cpu_has_feature(CPU_FTR_ARCH_300
))
911 if (decrementer_max
<= DECREMENTER_DEFAULT_MAX
)
915 * If we're running as the hypervisor we need to enable the LD manually
916 * otherwise firmware should have done it for us.
918 if (cpu_has_feature(CPU_FTR_HVMODE
))
919 mtspr(SPRN_LPCR
, mfspr(SPRN_LPCR
) | LPCR_LD
);
922 static void __init
set_decrementer_max(void)
924 struct device_node
*cpu
;
927 /* Prior to ISAv3 the decrementer is always 32 bit */
928 if (!cpu_has_feature(CPU_FTR_ARCH_300
))
931 cpu
= of_find_node_by_type(NULL
, "cpu");
933 if (of_property_read_u32(cpu
, "ibm,dec-bits", &bits
) == 0) {
934 if (bits
> 64 || bits
< 32) {
935 pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
939 /* calculate the signed maximum given this many bits */
940 decrementer_max
= (1ul << (bits
- 1)) - 1;
945 pr_info("time_init: %u bit decrementer (max: %llx)\n",
946 bits
, decrementer_max
);
949 static void __init
init_decrementer_clockevent(void)
951 register_decrementer_clockevent(smp_processor_id());
954 void secondary_cpu_time_init(void)
956 /* Enable and test the large decrementer for this cpu */
957 enable_large_decrementer();
959 /* Start the decrementer on CPUs that have manual control
962 start_cpu_decrementer();
964 /* FIME: Should make unrelatred change to move snapshot_timebase
966 register_decrementer_clockevent(smp_processor_id());
969 /* This function is only called on the boot processor */
970 void __init
time_init(void)
972 struct div_result res
;
976 /* Normal PowerPC with timebase register */
977 ppc_md
.calibrate_decr();
978 printk(KERN_DEBUG
"time_init: decrementer frequency = %lu.%.6lu MHz\n",
979 ppc_tb_freq
/ 1000000, ppc_tb_freq
% 1000000);
980 printk(KERN_DEBUG
"time_init: processor frequency = %lu.%.6lu MHz\n",
981 ppc_proc_freq
/ 1000000, ppc_proc_freq
% 1000000);
983 tb_ticks_per_jiffy
= ppc_tb_freq
/ HZ
;
984 tb_ticks_per_sec
= ppc_tb_freq
;
985 tb_ticks_per_usec
= ppc_tb_freq
/ 1000000;
986 calc_cputime_factors();
989 * Compute scale factor for sched_clock.
990 * The calibrate_decr() function has set tb_ticks_per_sec,
991 * which is the timebase frequency.
992 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
993 * the 128-bit result as a 64.64 fixed-point number.
994 * We then shift that number right until it is less than 1.0,
995 * giving us the scale factor and shift count to use in
998 div128_by_32(1000000000, 0, tb_ticks_per_sec
, &res
);
999 scale
= res
.result_low
;
1000 for (shift
= 0; res
.result_high
!= 0; ++shift
) {
1001 scale
= (scale
>> 1) | (res
.result_high
<< 63);
1002 res
.result_high
>>= 1;
1004 tb_to_ns_scale
= scale
;
1005 tb_to_ns_shift
= shift
;
1006 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
1009 /* If platform provided a timezone (pmac), we correct the time */
1010 if (timezone_offset
) {
1011 sys_tz
.tz_minuteswest
= -timezone_offset
/ 60;
1012 sys_tz
.tz_dsttime
= 0;
1015 vdso_data
->tb_ticks_per_sec
= tb_ticks_per_sec
;
1017 /* initialise and enable the large decrementer (if we have one) */
1018 set_decrementer_max();
1019 enable_large_decrementer();
1021 /* Start the decrementer on CPUs that have manual control
1024 start_cpu_decrementer();
1026 /* Register the clocksource */
1029 init_decrementer_clockevent();
1030 tick_setup_hrtimer_broadcast();
1036 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1039 void div128_by_32(u64 dividend_high
, u64 dividend_low
,
1040 unsigned divisor
, struct div_result
*dr
)
1042 unsigned long a
, b
, c
, d
;
1043 unsigned long w
, x
, y
, z
;
1046 a
= dividend_high
>> 32;
1047 b
= dividend_high
& 0xffffffff;
1048 c
= dividend_low
>> 32;
1049 d
= dividend_low
& 0xffffffff;
1052 ra
= ((u64
)(a
- (w
* divisor
)) << 32) + b
;
1054 rb
= ((u64
) do_div(ra
, divisor
) << 32) + c
;
1057 rc
= ((u64
) do_div(rb
, divisor
) << 32) + d
;
1060 do_div(rc
, divisor
);
1063 dr
->result_high
= ((u64
)w
<< 32) + x
;
1064 dr
->result_low
= ((u64
)y
<< 32) + z
;
1068 /* We don't need to calibrate delay, we use the CPU timebase for that */
1069 void calibrate_delay(void)
1071 /* Some generic code (such as spinlock debug) use loops_per_jiffy
1072 * as the number of __delay(1) in a jiffy, so make it so
1074 loops_per_jiffy
= tb_ticks_per_jiffy
;
1077 #if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
1078 static int rtc_generic_get_time(struct device
*dev
, struct rtc_time
*tm
)
1080 ppc_md
.get_rtc_time(tm
);
1084 static int rtc_generic_set_time(struct device
*dev
, struct rtc_time
*tm
)
1086 if (!ppc_md
.set_rtc_time
)
1089 if (ppc_md
.set_rtc_time(tm
) < 0)
1095 static const struct rtc_class_ops rtc_generic_ops
= {
1096 .read_time
= rtc_generic_get_time
,
1097 .set_time
= rtc_generic_set_time
,
1100 static int __init
rtc_init(void)
1102 struct platform_device
*pdev
;
1104 if (!ppc_md
.get_rtc_time
)
1107 pdev
= platform_device_register_data(NULL
, "rtc-generic", -1,
1109 sizeof(rtc_generic_ops
));
1111 return PTR_ERR_OR_ZERO(pdev
);
1114 device_initcall(rtc_init
);