2 * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com)
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 /* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1, Each can be
11 * programmed to go from @count to @limit and optionally interrupt.
12 * We've designated TIMER0 for clockevents and TIMER1 for clocksource
14 * ARCv2 based HS38 cores have RTC (in-core) and GFRC (inside ARConnect/MCIP)
15 * which are suitable for UP and SMP based clocksources respectively
18 #include <linux/interrupt.h>
19 #include <linux/clk.h>
20 #include <linux/clk-provider.h>
21 #include <linux/clocksource.h>
22 #include <linux/clockchips.h>
23 #include <linux/cpu.h>
25 #include <linux/of_irq.h>
26 #include <linux/sched_clock.h>
28 #include <soc/arc/timers.h>
29 #include <soc/arc/mcip.h>
32 static unsigned long arc_timer_freq
;
34 static int noinline
arc_get_timer_clk(struct device_node
*node
)
39 clk
= of_clk_get(node
, 0);
41 pr_err("timer missing clk\n");
45 ret
= clk_prepare_enable(clk
);
47 pr_err("Couldn't enable parent clk\n");
51 arc_timer_freq
= clk_get_rate(clk
);
56 /********** Clock Source Device *********/
58 #ifdef CONFIG_ARC_TIMERS_64BIT
60 static u64
arc_read_gfrc(struct clocksource
*cs
)
66 * From a programming model pov, there seems to be just one instance of
67 * MCIP_CMD/MCIP_READBACK however micro-architecturally there's
68 * an instance PER ARC CORE (not per cluster), and there are dedicated
69 * hardware decode logic (per core) inside ARConnect to handle
70 * simultaneous read/write accesses from cores via those two registers.
71 * So several concurrent commands to ARConnect are OK if they are
72 * trying to access two different sub-components (like GFRC,
73 * inter-core interrupt, etc...). HW also supports simultaneously
74 * accessing GFRC by multiple cores.
75 * That's why it is safe to disable hard interrupts on the local CPU
76 * before access to GFRC instead of taking global MCIP spinlock
77 * defined in arch/arc/kernel/mcip.c
79 local_irq_save(flags
);
81 __mcip_cmd(CMD_GFRC_READ_LO
, 0);
82 l
= read_aux_reg(ARC_REG_MCIP_READBACK
);
84 __mcip_cmd(CMD_GFRC_READ_HI
, 0);
85 h
= read_aux_reg(ARC_REG_MCIP_READBACK
);
87 local_irq_restore(flags
);
89 return (((u64
)h
) << 32) | l
;
92 static notrace u64
arc_gfrc_clock_read(void)
94 return arc_read_gfrc(NULL
);
97 static struct clocksource arc_counter_gfrc
= {
98 .name
= "ARConnect GFRC",
100 .read
= arc_read_gfrc
,
101 .mask
= CLOCKSOURCE_MASK(64),
102 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
105 static int __init
arc_cs_setup_gfrc(struct device_node
*node
)
110 READ_BCR(ARC_REG_MCIP_BCR
, mp
);
112 pr_warn("Global-64-bit-Ctr clocksource not detected\n");
116 ret
= arc_get_timer_clk(node
);
120 sched_clock_register(arc_gfrc_clock_read
, 64, arc_timer_freq
);
122 return clocksource_register_hz(&arc_counter_gfrc
, arc_timer_freq
);
124 TIMER_OF_DECLARE(arc_gfrc
, "snps,archs-timer-gfrc", arc_cs_setup_gfrc
);
126 #define AUX_RTC_CTRL 0x103
127 #define AUX_RTC_LOW 0x104
128 #define AUX_RTC_HIGH 0x105
130 static u64
arc_read_rtc(struct clocksource
*cs
)
132 unsigned long status
;
136 * hardware has an internal state machine which tracks readout of
137 * low/high and updates the CTRL.status if
138 * - interrupt/exception taken between the two reads
139 * - high increments after low has been read
142 l
= read_aux_reg(AUX_RTC_LOW
);
143 h
= read_aux_reg(AUX_RTC_HIGH
);
144 status
= read_aux_reg(AUX_RTC_CTRL
);
145 } while (!(status
& _BITUL(31)));
147 return (((u64
)h
) << 32) | l
;
150 static notrace u64
arc_rtc_clock_read(void)
152 return arc_read_rtc(NULL
);
155 static struct clocksource arc_counter_rtc
= {
158 .read
= arc_read_rtc
,
159 .mask
= CLOCKSOURCE_MASK(64),
160 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
163 static int __init
arc_cs_setup_rtc(struct device_node
*node
)
165 struct bcr_timer timer
;
168 READ_BCR(ARC_REG_TIMERS_BCR
, timer
);
170 pr_warn("Local-64-bit-Ctr clocksource not detected\n");
174 /* Local to CPU hence not usable in SMP */
175 if (IS_ENABLED(CONFIG_SMP
)) {
176 pr_warn("Local-64-bit-Ctr not usable in SMP\n");
180 ret
= arc_get_timer_clk(node
);
184 write_aux_reg(AUX_RTC_CTRL
, 1);
186 sched_clock_register(arc_rtc_clock_read
, 64, arc_timer_freq
);
188 return clocksource_register_hz(&arc_counter_rtc
, arc_timer_freq
);
190 TIMER_OF_DECLARE(arc_rtc
, "snps,archs-timer-rtc", arc_cs_setup_rtc
);
195 * 32bit TIMER1 to keep counting monotonically and wraparound
198 static u64
arc_read_timer1(struct clocksource
*cs
)
200 return (u64
) read_aux_reg(ARC_REG_TIMER1_CNT
);
203 static notrace u64
arc_timer1_clock_read(void)
205 return arc_read_timer1(NULL
);
208 static struct clocksource arc_counter_timer1
= {
209 .name
= "ARC Timer1",
211 .read
= arc_read_timer1
,
212 .mask
= CLOCKSOURCE_MASK(32),
213 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
216 static int __init
arc_cs_setup_timer1(struct device_node
*node
)
220 /* Local to CPU hence not usable in SMP */
221 if (IS_ENABLED(CONFIG_SMP
))
224 ret
= arc_get_timer_clk(node
);
228 write_aux_reg(ARC_REG_TIMER1_LIMIT
, ARC_TIMERN_MAX
);
229 write_aux_reg(ARC_REG_TIMER1_CNT
, 0);
230 write_aux_reg(ARC_REG_TIMER1_CTRL
, TIMER_CTRL_NH
);
232 sched_clock_register(arc_timer1_clock_read
, 32, arc_timer_freq
);
234 return clocksource_register_hz(&arc_counter_timer1
, arc_timer_freq
);
237 /********** Clock Event Device *********/
239 static int arc_timer_irq
;
242 * Arm the timer to interrupt after @cycles
243 * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
245 static void arc_timer_event_setup(unsigned int cycles
)
247 write_aux_reg(ARC_REG_TIMER0_LIMIT
, cycles
);
248 write_aux_reg(ARC_REG_TIMER0_CNT
, 0); /* start from 0 */
250 write_aux_reg(ARC_REG_TIMER0_CTRL
, TIMER_CTRL_IE
| TIMER_CTRL_NH
);
254 static int arc_clkevent_set_next_event(unsigned long delta
,
255 struct clock_event_device
*dev
)
257 arc_timer_event_setup(delta
);
261 static int arc_clkevent_set_periodic(struct clock_event_device
*dev
)
264 * At X Hz, 1 sec = 1000ms -> X cycles;
265 * 10ms -> X / 100 cycles
267 arc_timer_event_setup(arc_timer_freq
/ HZ
);
271 static DEFINE_PER_CPU(struct clock_event_device
, arc_clockevent_device
) = {
272 .name
= "ARC Timer0",
273 .features
= CLOCK_EVT_FEAT_ONESHOT
|
274 CLOCK_EVT_FEAT_PERIODIC
,
276 .set_next_event
= arc_clkevent_set_next_event
,
277 .set_state_periodic
= arc_clkevent_set_periodic
,
280 static irqreturn_t
timer_irq_handler(int irq
, void *dev_id
)
283 * Note that generic IRQ core could have passed @evt for @dev_id if
284 * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
286 struct clock_event_device
*evt
= this_cpu_ptr(&arc_clockevent_device
);
287 int irq_reenable
= clockevent_state_periodic(evt
);
290 * 1. ACK the interrupt
291 * - For ARC700, any write to CTRL reg ACKs it, so just rewrite
292 * Count when [N]ot [H]alted bit.
293 * - For HS3x, it is a bit subtle. On taken count-down interrupt,
294 * IP bit [3] is set, which needs to be cleared for ACK'ing.
295 * The write below can only update the other two bits, hence
296 * explicitly clears IP bit
297 * 2. Re-arm interrupt if periodic by writing to IE bit [0]
299 write_aux_reg(ARC_REG_TIMER0_CTRL
, irq_reenable
| TIMER_CTRL_NH
);
301 evt
->event_handler(evt
);
307 static int arc_timer_starting_cpu(unsigned int cpu
)
309 struct clock_event_device
*evt
= this_cpu_ptr(&arc_clockevent_device
);
311 evt
->cpumask
= cpumask_of(smp_processor_id());
313 clockevents_config_and_register(evt
, arc_timer_freq
, 0, ARC_TIMERN_MAX
);
314 enable_percpu_irq(arc_timer_irq
, 0);
318 static int arc_timer_dying_cpu(unsigned int cpu
)
320 disable_percpu_irq(arc_timer_irq
);
325 * clockevent setup for boot CPU
327 static int __init
arc_clockevent_setup(struct device_node
*node
)
329 struct clock_event_device
*evt
= this_cpu_ptr(&arc_clockevent_device
);
332 arc_timer_irq
= irq_of_parse_and_map(node
, 0);
333 if (arc_timer_irq
<= 0) {
334 pr_err("clockevent: missing irq\n");
338 ret
= arc_get_timer_clk(node
);
340 pr_err("clockevent: missing clk\n");
344 /* Needs apriori irq_set_percpu_devid() done in intc map function */
345 ret
= request_percpu_irq(arc_timer_irq
, timer_irq_handler
,
346 "Timer0 (per-cpu-tick)", evt
);
348 pr_err("clockevent: unable to request irq\n");
352 ret
= cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING
,
353 "clockevents/arc/timer:starting",
354 arc_timer_starting_cpu
,
355 arc_timer_dying_cpu
);
357 pr_err("Failed to setup hotplug state\n");
363 static int __init
arc_of_timer_init(struct device_node
*np
)
365 static int init_count
= 0;
370 ret
= arc_clockevent_setup(np
);
372 ret
= arc_cs_setup_timer1(np
);
377 TIMER_OF_DECLARE(arc_clkevt
, "snps,arc-timer", arc_of_timer_init
);