1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2001 MontaVista Software Inc.
4 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
5 * Copyright (c) 2003, 2004 Maciej W. Rozycki
7 * Common time service routines for MIPS machines.
10 #include <linux/clockchips.h>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/sched.h>
15 #include <linux/param.h>
16 #include <linux/time.h>
17 #include <linux/timex.h>
18 #include <linux/smp.h>
19 #include <linux/spinlock.h>
20 #include <linux/export.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
24 #include <asm/cpu-features.h>
25 #include <asm/cpu-type.h>
26 #include <asm/div64.h>
29 #ifdef CONFIG_CPU_FREQ
31 static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref
);
32 static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref_freq
);
33 static unsigned long glb_lpj_ref
;
34 static unsigned long glb_lpj_ref_freq
;
36 static int cpufreq_callback(struct notifier_block
*nb
,
37 unsigned long val
, void *data
)
39 struct cpufreq_freqs
*freq
= data
;
40 struct cpumask
*cpus
= freq
->policy
->cpus
;
45 * Skip lpj numbers adjustment if the CPU-freq transition is safe for
46 * the loops delay. (Is this possible?)
48 if (freq
->flags
& CPUFREQ_CONST_LOOPS
)
51 /* Save the initial values of the lpjes for future scaling. */
53 glb_lpj_ref
= boot_cpu_data
.udelay_val
;
54 glb_lpj_ref_freq
= freq
->old
;
56 for_each_online_cpu(cpu
) {
57 per_cpu(pcp_lpj_ref
, cpu
) =
58 cpu_data
[cpu
].udelay_val
;
59 per_cpu(pcp_lpj_ref_freq
, cpu
) = freq
->old
;
64 * Adjust global lpj variable and per-CPU udelay_val number in
65 * accordance with the new CPU frequency.
67 if ((val
== CPUFREQ_PRECHANGE
&& freq
->old
< freq
->new) ||
68 (val
== CPUFREQ_POSTCHANGE
&& freq
->old
> freq
->new)) {
69 loops_per_jiffy
= cpufreq_scale(glb_lpj_ref
,
73 for_each_cpu(cpu
, cpus
) {
74 lpj
= cpufreq_scale(per_cpu(pcp_lpj_ref
, cpu
),
75 per_cpu(pcp_lpj_ref_freq
, cpu
),
77 cpu_data
[cpu
].udelay_val
= (unsigned int)lpj
;
84 static struct notifier_block cpufreq_notifier
= {
85 .notifier_call
= cpufreq_callback
,
88 static int __init
register_cpufreq_notifier(void)
90 return cpufreq_register_notifier(&cpufreq_notifier
,
91 CPUFREQ_TRANSITION_NOTIFIER
);
93 core_initcall(register_cpufreq_notifier
);
95 #endif /* CONFIG_CPU_FREQ */
100 DEFINE_SPINLOCK(rtc_lock
);
101 EXPORT_SYMBOL(rtc_lock
);
103 static int null_perf_irq(void)
108 int (*perf_irq
)(void) = null_perf_irq
;
110 EXPORT_SYMBOL(perf_irq
);
113 * time_init() - it does the following things.
115 * 1) plat_time_init() -
116 * a) (optional) set up RTC routines,
117 * b) (optional) calibrate and set the mips_hpt_frequency
118 * (only needed if you intended to use cpu counter as timer interrupt
120 * 2) calculate a couple of cached variables for later usage
123 unsigned int mips_hpt_frequency
;
124 EXPORT_SYMBOL_GPL(mips_hpt_frequency
);
126 static __init
int cpu_has_mfc0_count_bug(void)
128 switch (current_cpu_type()) {
133 * V3.0 is documented as suffering from the mfc0 from count bug.
134 * Afaik this is the last version of the R4000. Later versions
135 * were marketed as R4400.
143 * The published errata for the R4400 up to 3.0 say the CPU
144 * has the mfc0 from count bug.
146 if ((current_cpu_data
.processor_id
& 0xff) <= 0x30)
150 * we assume newer revisions are ok
158 void __init
time_init(void)
163 * The use of the R4k timer as a clock event takes precedence;
164 * if reading the Count register might interfere with the timer
165 * interrupt, then we don't use the timer as a clock source.
166 * We may still use the timer as a clock source though if the
167 * timer interrupt isn't reliable; the interference doesn't
168 * matter then, because we don't use the interrupt.
170 if (mips_clockevent_init() != 0 || !cpu_has_mfc0_count_bug())
171 init_mips_clocksource();