drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / arch / mips / kernel / csrc-r4k.c
blob59eca397f2971c1e598f6fdf98125041e0215524
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 2007 by Ralf Baechle
7 */
8 #include <linux/clocksource.h>
9 #include <linux/cpufreq.h>
10 #include <linux/init.h>
11 #include <linux/sched_clock.h>
13 #include <asm/time.h>
15 static u64 c0_hpt_read(struct clocksource *cs)
17 return read_c0_count();
20 static struct clocksource clocksource_mips = {
21 .name = "MIPS",
22 .read = c0_hpt_read,
23 .mask = CLOCKSOURCE_MASK(32),
24 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
27 static u64 __maybe_unused notrace r4k_read_sched_clock(void)
29 return read_c0_count();
32 static inline unsigned int rdhwr_count(void)
34 unsigned int count;
36 __asm__ __volatile__(
37 " .set push\n"
38 " .set mips32r2\n"
39 " rdhwr %0, $2\n"
40 " .set pop\n"
41 : "=r" (count));
43 return count;
46 static bool rdhwr_count_usable(void)
48 unsigned int prev, curr, i;
51 * Older QEMUs have a broken implementation of RDHWR for the CP0 count
52 * which always returns a constant value. Try to identify this and don't
53 * use it in the VDSO if it is broken. This workaround can be removed
54 * once the fix has been in QEMU stable for a reasonable amount of time.
56 for (i = 0, prev = rdhwr_count(); i < 100; i++) {
57 curr = rdhwr_count();
59 if (curr != prev)
60 return true;
62 prev = curr;
65 pr_warn("Not using R4K clocksource in VDSO due to broken RDHWR\n");
66 return false;
69 static inline __init bool count_can_be_sched_clock(void)
71 if (IS_ENABLED(CONFIG_CPU_FREQ))
72 return false;
74 if (num_possible_cpus() > 1 &&
75 !IS_ENABLED(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK))
76 return false;
78 return true;
81 #ifdef CONFIG_CPU_FREQ
83 static bool __read_mostly r4k_clock_unstable;
85 static void r4k_clocksource_unstable(char *reason)
87 if (r4k_clock_unstable)
88 return;
90 r4k_clock_unstable = true;
92 pr_info("R4K timer is unstable due to %s\n", reason);
94 clocksource_mark_unstable(&clocksource_mips);
97 static int r4k_cpufreq_callback(struct notifier_block *nb,
98 unsigned long val, void *data)
100 if (val == CPUFREQ_POSTCHANGE)
101 r4k_clocksource_unstable("CPU frequency change");
103 return 0;
106 static struct notifier_block r4k_cpufreq_notifier = {
107 .notifier_call = r4k_cpufreq_callback,
110 static int __init r4k_register_cpufreq_notifier(void)
112 return cpufreq_register_notifier(&r4k_cpufreq_notifier,
113 CPUFREQ_TRANSITION_NOTIFIER);
116 core_initcall(r4k_register_cpufreq_notifier);
118 #endif /* !CONFIG_CPU_FREQ */
120 int __init init_r4k_clocksource(void)
122 if (!cpu_has_counter || !mips_hpt_frequency)
123 return -ENXIO;
125 /* Calculate a somewhat reasonable rating value */
126 clocksource_mips.rating = 200;
127 clocksource_mips.rating += clamp(mips_hpt_frequency / 10000000, 0, 99);
130 * R2 onwards makes the count accessible to user mode so it can be used
131 * by the VDSO (HWREna is configured by configure_hwrena()).
133 if (cpu_has_mips_r2_r6 && rdhwr_count_usable())
134 clocksource_mips.vdso_clock_mode = VDSO_CLOCKMODE_R4K;
136 clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
138 if (count_can_be_sched_clock())
139 sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency);
141 return 0;