Staging: Panel: panel: Fixed checkpatch line length warnings
[linux/fpc-iii.git] / arch / arm / kernel / sched_clock.c
blobe8edcaa0e4323c304d2b7a0cda4105eb3a1088f8
1 /*
2 * sched_clock.c: support for extending counters to full 64-bit ns counter
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8 #include <linux/clocksource.h>
9 #include <linux/init.h>
10 #include <linux/jiffies.h>
11 #include <linux/kernel.h>
12 #include <linux/moduleparam.h>
13 #include <linux/sched.h>
14 #include <linux/syscore_ops.h>
15 #include <linux/timer.h>
17 #include <asm/sched_clock.h>
19 struct clock_data {
20 u64 epoch_ns;
21 u32 epoch_cyc;
22 u32 epoch_cyc_copy;
23 unsigned long rate;
24 u32 mult;
25 u32 shift;
26 bool suspended;
27 bool needs_suspend;
30 static void sched_clock_poll(unsigned long wrap_ticks);
31 static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
32 static int irqtime = -1;
34 core_param(irqtime, irqtime, int, 0400);
36 static struct clock_data cd = {
37 .mult = NSEC_PER_SEC / HZ,
40 static u32 __read_mostly sched_clock_mask = 0xffffffff;
42 static u32 notrace jiffy_sched_clock_read(void)
44 return (u32)(jiffies - INITIAL_JIFFIES);
47 static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
49 static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
51 return (cyc * mult) >> shift;
54 static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
56 u64 epoch_ns;
57 u32 epoch_cyc;
59 if (cd.suspended)
60 return cd.epoch_ns;
63 * Load the epoch_cyc and epoch_ns atomically. We do this by
64 * ensuring that we always write epoch_cyc, epoch_ns and
65 * epoch_cyc_copy in strict order, and read them in strict order.
66 * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
67 * the middle of an update, and we should repeat the load.
69 do {
70 epoch_cyc = cd.epoch_cyc;
71 smp_rmb();
72 epoch_ns = cd.epoch_ns;
73 smp_rmb();
74 } while (epoch_cyc != cd.epoch_cyc_copy);
76 return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift);
80 * Atomically update the sched_clock epoch.
82 static void notrace update_sched_clock(void)
84 unsigned long flags;
85 u32 cyc;
86 u64 ns;
88 cyc = read_sched_clock();
89 ns = cd.epoch_ns +
90 cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
91 cd.mult, cd.shift);
93 * Write epoch_cyc and epoch_ns in a way that the update is
94 * detectable in cyc_to_fixed_sched_clock().
96 raw_local_irq_save(flags);
97 cd.epoch_cyc_copy = cyc;
98 smp_wmb();
99 cd.epoch_ns = ns;
100 smp_wmb();
101 cd.epoch_cyc = cyc;
102 raw_local_irq_restore(flags);
105 static void sched_clock_poll(unsigned long wrap_ticks)
107 mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
108 update_sched_clock();
111 void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
113 unsigned long r, w;
114 u64 res, wrap;
115 char r_unit;
117 if (cd.rate > rate)
118 return;
120 BUG_ON(bits > 32);
121 WARN_ON(!irqs_disabled());
122 read_sched_clock = read;
123 sched_clock_mask = (1 << bits) - 1;
124 cd.rate = rate;
126 /* calculate the mult/shift to convert counter ticks to ns. */
127 clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0);
129 r = rate;
130 if (r >= 4000000) {
131 r /= 1000000;
132 r_unit = 'M';
133 } else if (r >= 1000) {
134 r /= 1000;
135 r_unit = 'k';
136 } else
137 r_unit = ' ';
139 /* calculate how many ns until we wrap */
140 wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift);
141 do_div(wrap, NSEC_PER_MSEC);
142 w = wrap;
144 /* calculate the ns resolution of this counter */
145 res = cyc_to_ns(1ULL, cd.mult, cd.shift);
146 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
147 bits, r, r_unit, res, w);
150 * Start the timer to keep sched_clock() properly updated and
151 * sets the initial epoch.
153 sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
154 update_sched_clock();
157 * Ensure that sched_clock() starts off at 0ns
159 cd.epoch_ns = 0;
161 /* Enable IRQ time accounting if we have a fast enough sched_clock */
162 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
163 enable_sched_clock_irqtime();
165 pr_debug("Registered %pF as sched_clock source\n", read);
168 static unsigned long long notrace sched_clock_32(void)
170 u32 cyc = read_sched_clock();
171 return cyc_to_sched_clock(cyc, sched_clock_mask);
174 unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32;
176 unsigned long long notrace sched_clock(void)
178 return sched_clock_func();
181 void __init sched_clock_postinit(void)
184 * If no sched_clock function has been provided at that point,
185 * make it the final one one.
187 if (read_sched_clock == jiffy_sched_clock_read)
188 setup_sched_clock(jiffy_sched_clock_read, 32, HZ);
190 sched_clock_poll(sched_clock_timer.data);
193 static int sched_clock_suspend(void)
195 sched_clock_poll(sched_clock_timer.data);
196 cd.suspended = true;
197 return 0;
200 static void sched_clock_resume(void)
202 cd.epoch_cyc = read_sched_clock();
203 cd.epoch_cyc_copy = cd.epoch_cyc;
204 cd.suspended = false;
207 static struct syscore_ops sched_clock_ops = {
208 .suspend = sched_clock_suspend,
209 .resume = sched_clock_resume,
212 static int __init sched_clock_syscore_init(void)
214 register_syscore_ops(&sched_clock_ops);
215 return 0;
217 device_initcall(sched_clock_syscore_init);