sysfs: Remove support for tagged directories with untagged members (again)
[linux-btrfs-devel.git] / arch / mips / kernel / cevt-smtc.c
blob2e72d30b2f05a9b2b0d1a40dd90699c658925d5b
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 2007 MIPS Technologies, Inc.
7 * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
8 * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
9 */
10 #include <linux/clockchips.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/smp.h>
14 #include <linux/irq.h>
16 #include <asm/smtc_ipi.h>
17 #include <asm/time.h>
18 #include <asm/cevt-r4k.h>
21 * Variant clock event timer support for SMTC on MIPS 34K, 1004K
22 * or other MIPS MT cores.
24 * Notes on SMTC Support:
26 * SMTC has multiple microthread TCs pretending to be Linux CPUs.
27 * But there's only one Count/Compare pair per VPE, and Compare
28 * interrupts are taken opportunisitically by available TCs
29 * bound to the VPE with the Count register. The new timer
30 * framework provides for global broadcasts, but we really
31 * want VPE-level multicasts for best behavior. So instead
32 * of invoking the high-level clock-event broadcast code,
33 * this version of SMTC support uses the historical SMTC
34 * multicast mechanisms "under the hood", appearing to the
35 * generic clock layer as if the interrupts are per-CPU.
37 * The approach taken here is to maintain a set of NR_CPUS
38 * virtual timers, and track which "CPU" needs to be alerted
39 * at each event.
41 * It's unlikely that we'll see a MIPS MT core with more than
42 * 2 VPEs, but we *know* that we won't need to handle more
43 * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
44 * is always going to be overkill, but always going to be enough.
47 unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
48 static int smtc_nextinvpe[NR_CPUS];
51 * Timestamps stored are absolute values to be programmed
52 * into Count register. Valid timestamps will never be zero.
53 * If a Zero Count value is actually calculated, it is converted
54 * to be a 1, which will introduce 1 or two CPU cycles of error
55 * roughly once every four billion events, which at 1000 HZ means
56 * about once every 50 days. If that's actually a problem, one
57 * could alternate squashing 0 to 1 and to -1.
60 #define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
61 #define ISVALID(x) ((x) != 0L)
64 * Time comparison is subtle, as it's really truncated
65 * modular arithmetic.
68 #define IS_SOONER(a, b, reference) \
69 (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
72 * CATCHUP_INCREMENT, used when the function falls behind the counter.
73 * Could be an increasing function instead of a constant;
76 #define CATCHUP_INCREMENT 64
78 static int mips_next_event(unsigned long delta,
79 struct clock_event_device *evt)
81 unsigned long flags;
82 unsigned int mtflags;
83 unsigned long timestamp, reference, previous;
84 unsigned long nextcomp = 0L;
85 int vpe = current_cpu_data.vpe_id;
86 int cpu = smp_processor_id();
87 local_irq_save(flags);
88 mtflags = dmt();
91 * Maintain the per-TC virtual timer
92 * and program the per-VPE shared Count register
93 * as appropriate here...
95 reference = (unsigned long)read_c0_count();
96 timestamp = MAKEVALID(reference + delta);
98 * To really model the clock, we have to catch the case
99 * where the current next-in-VPE timestamp is the old
100 * timestamp for the calling CPE, but the new value is
101 * in fact later. In that case, we have to do a full
102 * scan and discover the new next-in-VPE CPU id and
103 * timestamp.
105 previous = smtc_nexttime[vpe][cpu];
106 if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
107 && IS_SOONER(previous, timestamp, reference)) {
108 int i;
109 int soonest = cpu;
112 * Update timestamp array here, so that new
113 * value gets considered along with those of
114 * other virtual CPUs on the VPE.
116 smtc_nexttime[vpe][cpu] = timestamp;
117 for_each_online_cpu(i) {
118 if (ISVALID(smtc_nexttime[vpe][i])
119 && IS_SOONER(smtc_nexttime[vpe][i],
120 smtc_nexttime[vpe][soonest], reference)) {
121 soonest = i;
124 smtc_nextinvpe[vpe] = soonest;
125 nextcomp = smtc_nexttime[vpe][soonest];
127 * Otherwise, we don't have to process the whole array rank,
128 * we just have to see if the event horizon has gotten closer.
130 } else {
131 if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
132 IS_SOONER(timestamp,
133 smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
134 smtc_nextinvpe[vpe] = cpu;
135 nextcomp = timestamp;
138 * Since next-in-VPE may me the same as the executing
139 * virtual CPU, we update the array *after* checking
140 * its value.
142 smtc_nexttime[vpe][cpu] = timestamp;
146 * It may be that, in fact, we don't need to update Compare,
147 * but if we do, we want to make sure we didn't fall into
148 * a crack just behind Count.
150 if (ISVALID(nextcomp)) {
151 write_c0_compare(nextcomp);
152 ehb();
154 * We never return an error, we just make sure
155 * that we trigger the handlers as quickly as
156 * we can if we fell behind.
158 while ((nextcomp - (unsigned long)read_c0_count())
159 > (unsigned long)LONG_MAX) {
160 nextcomp += CATCHUP_INCREMENT;
161 write_c0_compare(nextcomp);
162 ehb();
165 emt(mtflags);
166 local_irq_restore(flags);
167 return 0;
171 void smtc_distribute_timer(int vpe)
173 unsigned long flags;
174 unsigned int mtflags;
175 int cpu;
176 struct clock_event_device *cd;
177 unsigned long nextstamp;
178 unsigned long reference;
181 repeat:
182 nextstamp = 0L;
183 for_each_online_cpu(cpu) {
185 * Find virtual CPUs within the current VPE who have
186 * unserviced timer requests whose time is now past.
188 local_irq_save(flags);
189 mtflags = dmt();
190 if (cpu_data[cpu].vpe_id == vpe &&
191 ISVALID(smtc_nexttime[vpe][cpu])) {
192 reference = (unsigned long)read_c0_count();
193 if ((smtc_nexttime[vpe][cpu] - reference)
194 > (unsigned long)LONG_MAX) {
195 smtc_nexttime[vpe][cpu] = 0L;
196 emt(mtflags);
197 local_irq_restore(flags);
199 * We don't send IPIs to ourself.
201 if (cpu != smp_processor_id()) {
202 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
203 } else {
204 cd = &per_cpu(mips_clockevent_device, cpu);
205 cd->event_handler(cd);
207 } else {
208 /* Local to VPE but Valid Time not yet reached. */
209 if (!ISVALID(nextstamp) ||
210 IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
211 reference)) {
212 smtc_nextinvpe[vpe] = cpu;
213 nextstamp = smtc_nexttime[vpe][cpu];
215 emt(mtflags);
216 local_irq_restore(flags);
218 } else {
219 emt(mtflags);
220 local_irq_restore(flags);
224 /* Reprogram for interrupt at next soonest timestamp for VPE */
225 if (ISVALID(nextstamp)) {
226 write_c0_compare(nextstamp);
227 ehb();
228 if ((nextstamp - (unsigned long)read_c0_count())
229 > (unsigned long)LONG_MAX)
230 goto repeat;
235 irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
237 int cpu = smp_processor_id();
239 /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
240 handle_perf_irq(1);
242 if (read_c0_cause() & (1 << 30)) {
243 /* Clear Count/Compare Interrupt */
244 write_c0_compare(read_c0_compare());
245 smtc_distribute_timer(cpu_data[cpu].vpe_id);
247 return IRQ_HANDLED;
251 int __cpuinit smtc_clockevent_init(void)
253 uint64_t mips_freq = mips_hpt_frequency;
254 unsigned int cpu = smp_processor_id();
255 struct clock_event_device *cd;
256 unsigned int irq;
257 int i;
258 int j;
260 if (!cpu_has_counter || !mips_hpt_frequency)
261 return -ENXIO;
262 if (cpu == 0) {
263 for (i = 0; i < num_possible_cpus(); i++) {
264 smtc_nextinvpe[i] = 0;
265 for (j = 0; j < num_possible_cpus(); j++)
266 smtc_nexttime[i][j] = 0L;
269 * SMTC also can't have the usablility test
270 * run by secondary TCs once Compare is in use.
272 if (!c0_compare_int_usable())
273 return -ENXIO;
277 * With vectored interrupts things are getting platform specific.
278 * get_c0_compare_int is a hook to allow a platform to return the
279 * interrupt number of it's liking.
281 irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
282 if (get_c0_compare_int)
283 irq = get_c0_compare_int();
285 cd = &per_cpu(mips_clockevent_device, cpu);
287 cd->name = "MIPS";
288 cd->features = CLOCK_EVT_FEAT_ONESHOT;
290 /* Calculate the min / max delta */
291 cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
292 cd->shift = 32;
293 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
294 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
296 cd->rating = 300;
297 cd->irq = irq;
298 cd->cpumask = cpumask_of(cpu);
299 cd->set_next_event = mips_next_event;
300 cd->set_mode = mips_set_clock_mode;
301 cd->event_handler = mips_event_handler;
303 clockevents_register_device(cd);
306 * On SMTC we only want to do the data structure
307 * initialization and IRQ setup once.
309 if (cpu)
310 return 0;
312 * And we need the hwmask associated with the c0_compare
313 * vector to be initialized.
315 irq_hwmask[irq] = (0x100 << cp0_compare_irq);
316 if (cp0_timer_irq_installed)
317 return 0;
319 cp0_timer_irq_installed = 1;
321 setup_irq(irq, &c0_compare_irqaction);
323 return 0;