spi-topcliff-pch: supports a spi mode setup and bit order setup by IO control
[zen-stable.git] / arch / arm / mach-msm / timer.c
blob11d0d8f2656cb92b4af676db094d758b1591094f
1 /*
3 * Copyright (C) 2007 Google, Inc.
4 * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/clocksource.h>
18 #include <linux/clockchips.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/irq.h>
22 #include <linux/io.h>
24 #include <asm/mach/time.h>
25 #include <asm/hardware/gic.h>
26 #include <asm/localtimer.h>
28 #include <mach/msm_iomap.h>
29 #include <mach/cpu.h>
30 #include <mach/board.h>
32 #define TIMER_MATCH_VAL 0x0000
33 #define TIMER_COUNT_VAL 0x0004
34 #define TIMER_ENABLE 0x0008
35 #define TIMER_ENABLE_CLR_ON_MATCH_EN BIT(1)
36 #define TIMER_ENABLE_EN BIT(0)
37 #define TIMER_CLEAR 0x000C
38 #define DGT_CLK_CTL 0x0034
39 #define DGT_CLK_CTL_DIV_4 0x3
41 #define GPT_HZ 32768
43 #define MSM_DGT_SHIFT 5
45 static void __iomem *event_base;
47 static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
49 struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
50 /* Stop the timer tick */
51 if (evt->mode == CLOCK_EVT_MODE_ONESHOT) {
52 u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
53 ctrl &= ~TIMER_ENABLE_EN;
54 writel_relaxed(ctrl, event_base + TIMER_ENABLE);
56 evt->event_handler(evt);
57 return IRQ_HANDLED;
60 static int msm_timer_set_next_event(unsigned long cycles,
61 struct clock_event_device *evt)
63 u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
65 writel_relaxed(0, event_base + TIMER_CLEAR);
66 writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);
67 writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);
68 return 0;
71 static void msm_timer_set_mode(enum clock_event_mode mode,
72 struct clock_event_device *evt)
74 u32 ctrl;
76 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
77 ctrl &= ~(TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN);
79 switch (mode) {
80 case CLOCK_EVT_MODE_RESUME:
81 case CLOCK_EVT_MODE_PERIODIC:
82 break;
83 case CLOCK_EVT_MODE_ONESHOT:
84 /* Timer is enabled in set_next_event */
85 break;
86 case CLOCK_EVT_MODE_UNUSED:
87 case CLOCK_EVT_MODE_SHUTDOWN:
88 break;
90 writel_relaxed(ctrl, event_base + TIMER_ENABLE);
93 static struct clock_event_device msm_clockevent = {
94 .name = "gp_timer",
95 .features = CLOCK_EVT_FEAT_ONESHOT,
96 .rating = 200,
97 .set_next_event = msm_timer_set_next_event,
98 .set_mode = msm_timer_set_mode,
101 static union {
102 struct clock_event_device *evt;
103 struct clock_event_device __percpu **percpu_evt;
104 } msm_evt;
106 static void __iomem *source_base;
108 static cycle_t msm_read_timer_count(struct clocksource *cs)
110 return readl_relaxed(source_base + TIMER_COUNT_VAL);
113 static cycle_t msm_read_timer_count_shift(struct clocksource *cs)
116 * Shift timer count down by a constant due to unreliable lower bits
117 * on some targets.
119 return msm_read_timer_count(cs) >> MSM_DGT_SHIFT;
122 static struct clocksource msm_clocksource = {
123 .name = "dg_timer",
124 .rating = 300,
125 .read = msm_read_timer_count,
126 .mask = CLOCKSOURCE_MASK(32),
127 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
130 static void __init msm_timer_init(void)
132 struct clock_event_device *ce = &msm_clockevent;
133 struct clocksource *cs = &msm_clocksource;
134 int res;
135 u32 dgt_hz;
137 if (cpu_is_msm7x01()) {
138 event_base = MSM_CSR_BASE;
139 source_base = MSM_CSR_BASE + 0x10;
140 dgt_hz = 19200000 >> MSM_DGT_SHIFT; /* 600 KHz */
141 cs->read = msm_read_timer_count_shift;
142 cs->mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT));
143 } else if (cpu_is_msm7x30()) {
144 event_base = MSM_CSR_BASE + 0x04;
145 source_base = MSM_CSR_BASE + 0x24;
146 dgt_hz = 24576000 / 4;
147 } else if (cpu_is_qsd8x50()) {
148 event_base = MSM_CSR_BASE;
149 source_base = MSM_CSR_BASE + 0x10;
150 dgt_hz = 19200000 / 4;
151 } else if (cpu_is_msm8x60() || cpu_is_msm8960()) {
152 event_base = MSM_TMR_BASE + 0x04;
153 /* Use CPU0's timer as the global clock source. */
154 source_base = MSM_TMR0_BASE + 0x24;
155 dgt_hz = 27000000 / 4;
156 writel_relaxed(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
157 } else
158 BUG();
160 writel_relaxed(0, event_base + TIMER_ENABLE);
161 writel_relaxed(0, event_base + TIMER_CLEAR);
162 writel_relaxed(~0, event_base + TIMER_MATCH_VAL);
163 ce->cpumask = cpumask_of(0);
165 ce->irq = INT_GP_TIMER_EXP;
166 clockevents_config_and_register(ce, GPT_HZ, 4, 0xffffffff);
167 if (cpu_is_msm8x60() || cpu_is_msm8960()) {
168 msm_evt.percpu_evt = alloc_percpu(struct clock_event_device *);
169 if (!msm_evt.percpu_evt) {
170 pr_err("memory allocation failed for %s\n", ce->name);
171 goto err;
173 *__this_cpu_ptr(msm_evt.percpu_evt) = ce;
174 res = request_percpu_irq(ce->irq, msm_timer_interrupt,
175 ce->name, msm_evt.percpu_evt);
176 if (!res)
177 enable_percpu_irq(ce->irq, 0);
178 } else {
179 msm_evt.evt = ce;
180 res = request_irq(ce->irq, msm_timer_interrupt,
181 IRQF_TIMER | IRQF_NOBALANCING |
182 IRQF_TRIGGER_RISING, ce->name, &msm_evt.evt);
185 if (res)
186 pr_err("request_irq failed for %s\n", ce->name);
187 err:
188 writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE);
189 res = clocksource_register_hz(cs, dgt_hz);
190 if (res)
191 pr_err("clocksource_register failed\n");
194 #ifdef CONFIG_LOCAL_TIMERS
195 int __cpuinit local_timer_setup(struct clock_event_device *evt)
197 /* Use existing clock_event for cpu 0 */
198 if (!smp_processor_id())
199 return 0;
201 writel_relaxed(0, event_base + TIMER_ENABLE);
202 writel_relaxed(0, event_base + TIMER_CLEAR);
203 writel_relaxed(~0, event_base + TIMER_MATCH_VAL);
204 evt->irq = msm_clockevent.irq;
205 evt->name = "local_timer";
206 evt->features = msm_clockevent.features;
207 evt->rating = msm_clockevent.rating;
208 evt->set_mode = msm_timer_set_mode;
209 evt->set_next_event = msm_timer_set_next_event;
210 evt->shift = msm_clockevent.shift;
211 evt->mult = div_sc(GPT_HZ, NSEC_PER_SEC, evt->shift);
212 evt->max_delta_ns = clockevent_delta2ns(0xf0000000, evt);
213 evt->min_delta_ns = clockevent_delta2ns(4, evt);
215 *__this_cpu_ptr(msm_evt.percpu_evt) = evt;
216 clockevents_register_device(evt);
217 enable_percpu_irq(evt->irq, 0);
218 return 0;
221 void local_timer_stop(struct clock_event_device *evt)
223 evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
224 disable_percpu_irq(evt->irq);
226 #endif /* CONFIG_LOCAL_TIMERS */
228 struct sys_timer msm_timer = {
229 .init = msm_timer_init