1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2010 Google, Inc.
6 * Colin Cross <ccross@google.com>
9 #define pr_fmt(fmt) "tegra-timer: " fmt
11 #include <linux/clk.h>
12 #include <linux/clockchips.h>
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
15 #include <linux/delay.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
18 #include <linux/of_address.h>
19 #include <linux/of_irq.h>
20 #include <linux/percpu.h>
21 #include <linux/sched_clock.h>
22 #include <linux/time.h>
26 #define RTC_SECONDS 0x08
27 #define RTC_SHADOW_SECONDS 0x0c
28 #define RTC_MILLISECONDS 0x10
30 #define TIMERUS_CNTR_1US 0x10
31 #define TIMERUS_USEC_CFG 0x14
32 #define TIMERUS_CNTR_FREEZE 0x4c
35 #define TIMER_PTV_EN BIT(31)
36 #define TIMER_PTV_PER BIT(30)
38 #define TIMER_PCR_INTR_CLR BIT(30)
40 #define TIMER1_BASE 0x00
41 #define TIMER2_BASE 0x08
42 #define TIMER3_BASE 0x50
43 #define TIMER4_BASE 0x58
44 #define TIMER10_BASE 0x90
46 #define TIMER1_IRQ_IDX 0
47 #define TIMER10_IRQ_IDX 10
49 #define TIMER_1MHz 1000000
51 static u32 usec_config
;
52 static void __iomem
*timer_reg_base
;
54 static int tegra_timer_set_next_event(unsigned long cycles
,
55 struct clock_event_device
*evt
)
57 void __iomem
*reg_base
= timer_of_base(to_timer_of(evt
));
60 * Tegra's timer uses n+1 scheme for the counter, i.e. timer will
61 * fire after one tick if 0 is loaded.
63 * The minimum and maximum numbers of oneshot ticks are defined
64 * by clockevents_config_and_register(1, 0x1fffffff + 1) invocation
65 * below in the code. Hence the cycles (ticks) can't be outside of
66 * a range supportable by hardware.
68 writel_relaxed(TIMER_PTV_EN
| (cycles
- 1), reg_base
+ TIMER_PTV
);
73 static int tegra_timer_shutdown(struct clock_event_device
*evt
)
75 void __iomem
*reg_base
= timer_of_base(to_timer_of(evt
));
77 writel_relaxed(0, reg_base
+ TIMER_PTV
);
82 static int tegra_timer_set_periodic(struct clock_event_device
*evt
)
84 void __iomem
*reg_base
= timer_of_base(to_timer_of(evt
));
85 unsigned long period
= timer_of_period(to_timer_of(evt
));
87 writel_relaxed(TIMER_PTV_EN
| TIMER_PTV_PER
| (period
- 1),
88 reg_base
+ TIMER_PTV
);
93 static irqreturn_t
tegra_timer_isr(int irq
, void *dev_id
)
95 struct clock_event_device
*evt
= dev_id
;
96 void __iomem
*reg_base
= timer_of_base(to_timer_of(evt
));
98 writel_relaxed(TIMER_PCR_INTR_CLR
, reg_base
+ TIMER_PCR
);
99 evt
->event_handler(evt
);
104 static void tegra_timer_suspend(struct clock_event_device
*evt
)
106 void __iomem
*reg_base
= timer_of_base(to_timer_of(evt
));
108 writel_relaxed(TIMER_PCR_INTR_CLR
, reg_base
+ TIMER_PCR
);
111 static void tegra_timer_resume(struct clock_event_device
*evt
)
113 writel_relaxed(usec_config
, timer_reg_base
+ TIMERUS_USEC_CFG
);
116 static DEFINE_PER_CPU(struct timer_of
, tegra_to
) = {
117 .flags
= TIMER_OF_CLOCK
| TIMER_OF_BASE
,
120 .name
= "tegra_timer",
121 .features
= CLOCK_EVT_FEAT_ONESHOT
| CLOCK_EVT_FEAT_PERIODIC
,
122 .set_next_event
= tegra_timer_set_next_event
,
123 .set_state_shutdown
= tegra_timer_shutdown
,
124 .set_state_periodic
= tegra_timer_set_periodic
,
125 .set_state_oneshot
= tegra_timer_shutdown
,
126 .tick_resume
= tegra_timer_shutdown
,
127 .suspend
= tegra_timer_suspend
,
128 .resume
= tegra_timer_resume
,
132 static int tegra_timer_setup(unsigned int cpu
)
134 struct timer_of
*to
= per_cpu_ptr(&tegra_to
, cpu
);
136 writel_relaxed(0, timer_of_base(to
) + TIMER_PTV
);
137 writel_relaxed(TIMER_PCR_INTR_CLR
, timer_of_base(to
) + TIMER_PCR
);
139 irq_force_affinity(to
->clkevt
.irq
, cpumask_of(cpu
));
140 enable_irq(to
->clkevt
.irq
);
143 * Tegra's timer uses n+1 scheme for the counter, i.e. timer will
144 * fire after one tick if 0 is loaded and thus minimum number of
145 * ticks is 1. In result both of the clocksource's tick limits are
146 * higher than a minimum and maximum that hardware register can
147 * take by 1, this is then taken into account by set_next_event
150 clockevents_config_and_register(&to
->clkevt
, timer_of_rate(to
),
152 0x1fffffff + 1); /* max 29 bits + 1 */
157 static int tegra_timer_stop(unsigned int cpu
)
159 struct timer_of
*to
= per_cpu_ptr(&tegra_to
, cpu
);
161 to
->clkevt
.set_state_shutdown(&to
->clkevt
);
162 disable_irq_nosync(to
->clkevt
.irq
);
167 static u64 notrace
tegra_read_sched_clock(void)
169 return readl_relaxed(timer_reg_base
+ TIMERUS_CNTR_1US
);
173 static unsigned long tegra_delay_timer_read_counter_long(void)
175 return readl_relaxed(timer_reg_base
+ TIMERUS_CNTR_1US
);
178 static struct delay_timer tegra_delay_timer
= {
179 .read_current_timer
= tegra_delay_timer_read_counter_long
,
184 static struct timer_of suspend_rtc_to
= {
185 .flags
= TIMER_OF_BASE
| TIMER_OF_CLOCK
,
189 * tegra_rtc_read - Reads the Tegra RTC registers
190 * Care must be taken that this function is not called while the
191 * tegra_rtc driver could be executing to avoid race conditions
192 * on the RTC shadow register
194 static u64
tegra_rtc_read_ms(struct clocksource
*cs
)
196 void __iomem
*reg_base
= timer_of_base(&suspend_rtc_to
);
198 u32 ms
= readl_relaxed(reg_base
+ RTC_MILLISECONDS
);
199 u32 s
= readl_relaxed(reg_base
+ RTC_SHADOW_SECONDS
);
201 return (u64
)s
* MSEC_PER_SEC
+ ms
;
204 static struct clocksource suspend_rtc_clocksource
= {
205 .name
= "tegra_suspend_timer",
207 .read
= tegra_rtc_read_ms
,
208 .mask
= CLOCKSOURCE_MASK(32),
209 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
| CLOCK_SOURCE_SUSPEND_NONSTOP
,
212 static inline unsigned int tegra_base_for_cpu(int cpu
, bool tegra20
)
227 return TIMER10_BASE
+ cpu
* 8;
230 static inline unsigned int tegra_irq_idx_for_cpu(int cpu
, bool tegra20
)
233 return TIMER1_IRQ_IDX
+ cpu
;
235 return TIMER10_IRQ_IDX
+ cpu
;
238 static inline unsigned long tegra_rate_for_timer(struct timer_of
*to
,
242 * TIMER1-9 are fixed to 1MHz, TIMER10-13 are running off the
248 return timer_of_rate(to
);
251 static int __init
tegra_init_timer(struct device_node
*np
, bool tegra20
,
257 to
= this_cpu_ptr(&tegra_to
);
258 ret
= timer_of_init(np
, to
);
262 timer_reg_base
= timer_of_base(to
);
265 * Configure microsecond timers to have 1MHz clock
266 * Config register is 0xqqww, where qq is "dividend", ww is "divisor"
269 switch (timer_of_rate(to
)) {
271 usec_config
= 0x000b; /* (11+1)/(0+1) */
274 usec_config
= 0x043f; /* (63+1)/(4+1) */
277 usec_config
= 0x000c; /* (12+1)/(0+1) */
280 usec_config
= 0x0453; /* (83+1)/(4+1) */
283 usec_config
= 0x045f; /* (95+1)/(4+1) */
286 usec_config
= 0x0019; /* (25+1)/(0+1) */
289 usec_config
= 0x04bf; /* (191+1)/(4+1) */
292 usec_config
= 0x002f; /* (47+1)/(0+1) */
299 writel_relaxed(usec_config
, timer_reg_base
+ TIMERUS_USEC_CFG
);
301 for_each_possible_cpu(cpu
) {
302 struct timer_of
*cpu_to
= per_cpu_ptr(&tegra_to
, cpu
);
303 unsigned long flags
= IRQF_TIMER
| IRQF_NOBALANCING
;
304 unsigned long rate
= tegra_rate_for_timer(to
, tegra20
);
305 unsigned int base
= tegra_base_for_cpu(cpu
, tegra20
);
306 unsigned int idx
= tegra_irq_idx_for_cpu(cpu
, tegra20
);
307 unsigned int irq
= irq_of_parse_and_map(np
, idx
);
310 pr_err("failed to map irq for cpu%d\n", cpu
);
315 cpu_to
->clkevt
.irq
= irq
;
316 cpu_to
->clkevt
.rating
= rating
;
317 cpu_to
->clkevt
.cpumask
= cpumask_of(cpu
);
318 cpu_to
->of_base
.base
= timer_reg_base
+ base
;
319 cpu_to
->of_clk
.period
= rate
/ HZ
;
320 cpu_to
->of_clk
.rate
= rate
;
322 irq_set_status_flags(cpu_to
->clkevt
.irq
, IRQ_NOAUTOEN
);
324 ret
= request_irq(cpu_to
->clkevt
.irq
, tegra_timer_isr
, flags
,
325 cpu_to
->clkevt
.name
, &cpu_to
->clkevt
);
327 pr_err("failed to set up irq for cpu%d: %d\n",
329 irq_dispose_mapping(cpu_to
->clkevt
.irq
);
330 cpu_to
->clkevt
.irq
= 0;
335 sched_clock_register(tegra_read_sched_clock
, 32, TIMER_1MHz
);
337 ret
= clocksource_mmio_init(timer_reg_base
+ TIMERUS_CNTR_1US
,
338 "timer_us", TIMER_1MHz
, 300, 32,
339 clocksource_mmio_readl_up
);
341 pr_err("failed to register clocksource: %d\n", ret
);
344 register_current_timer_delay(&tegra_delay_timer
);
347 ret
= cpuhp_setup_state(CPUHP_AP_TEGRA_TIMER_STARTING
,
348 "AP_TEGRA_TIMER_STARTING", tegra_timer_setup
,
351 pr_err("failed to set up cpu hp state: %d\n", ret
);
356 for_each_possible_cpu(cpu
) {
357 struct timer_of
*cpu_to
;
359 cpu_to
= per_cpu_ptr(&tegra_to
, cpu
);
360 if (cpu_to
->clkevt
.irq
) {
361 free_irq(cpu_to
->clkevt
.irq
, &cpu_to
->clkevt
);
362 irq_dispose_mapping(cpu_to
->clkevt
.irq
);
366 to
->of_base
.base
= timer_reg_base
;
368 timer_of_cleanup(to
);
373 static int __init
tegra210_init_timer(struct device_node
*np
)
376 * Arch-timer can't survive across power cycle of CPU core and
377 * after CPUPORESET signal due to a system design shortcoming,
378 * hence tegra-timer is more preferable on Tegra210.
380 return tegra_init_timer(np
, false, 460);
382 TIMER_OF_DECLARE(tegra210_timer
, "nvidia,tegra210-timer", tegra210_init_timer
);
384 static int __init
tegra20_init_timer(struct device_node
*np
)
389 * Tegra20 and Tegra30 have Cortex A9 CPU that has a TWD timer,
390 * that timer runs off the CPU clock and hence is subjected to
391 * a jitter caused by DVFS clock rate changes. Tegra-timer is
392 * more preferable for older Tegra's, while later SoC generations
393 * have arch-timer as a main per-CPU timer and it is not affected
396 if (of_machine_is_compatible("nvidia,tegra20") ||
397 of_machine_is_compatible("nvidia,tegra30"))
402 return tegra_init_timer(np
, true, rating
);
404 TIMER_OF_DECLARE(tegra20_timer
, "nvidia,tegra20-timer", tegra20_init_timer
);
406 static int __init
tegra20_init_rtc(struct device_node
*np
)
410 ret
= timer_of_init(np
, &suspend_rtc_to
);
414 return clocksource_register_hz(&suspend_rtc_clocksource
, 1000);
416 TIMER_OF_DECLARE(tegra20_rtc
, "nvidia,tegra20-rtc", tegra20_init_rtc
);