1 // SPDX-License-Identifier: GPL-2.0-only
3 * apb_timer.c: Driver for Langwell APB timers
5 * (C) Copyright 2009 Intel Corporation
6 * Author: Jacob Pan (jacob.jun.pan@intel.com)
9 * Langwell is the south complex of Intel Moorestown MID platform. There are
10 * eight external timers in total that can be used by the operating system.
11 * The timer information, such as frequency and addresses, is provided to the
13 * Timer interrupts are routed via FW/HW emulated IOAPIC independently via
14 * individual redirection table entries (RTE).
15 * Unlike HPET, there is no master counter, therefore one of the timers are
16 * used as clocksource. The overall allocation looks like:
17 * - timer 0 - NR_CPUs for per cpu timer
18 * - one timer for clocksource
19 * - one timer for watchdog driver.
20 * It is also worth notice that APB timer does not support true one-shot mode,
21 * free-running mode will be used here to emulate one-shot mode.
22 * APB timer can also be used as broadcast timer along with per cpu local APIC
23 * timer, but by default APB timer has higher rating than local APIC timers.
26 #include <linux/delay.h>
27 #include <linux/dw_apb_timer.h>
28 #include <linux/errno.h>
29 #include <linux/init.h>
30 #include <linux/slab.h>
32 #include <linux/sfi.h>
33 #include <linux/interrupt.h>
34 #include <linux/cpu.h>
35 #include <linux/irq.h>
37 #include <asm/fixmap.h>
38 #include <asm/apb_timer.h>
39 #include <asm/intel-mid.h>
42 #define APBT_CLOCKEVENT_RATING 110
43 #define APBT_CLOCKSOURCE_RATING 250
45 #define APBT_CLOCKEVENT0_NUM (0)
46 #define APBT_CLOCKSOURCE_NUM (2)
48 static phys_addr_t apbt_address
;
49 static int apb_timer_block_enabled
;
50 static void __iomem
*apbt_virt_address
;
53 * Common DW APB timer info
55 static unsigned long apbt_freq
;
58 struct dw_apb_clock_event_device
*timer
;
65 static struct dw_apb_clocksource
*clocksource_apbt
;
67 static inline void __iomem
*adev_virt_addr(struct apbt_dev
*adev
)
69 return apbt_virt_address
+ adev
->num
* APBTMRS_REG_SIZE
;
72 static DEFINE_PER_CPU(struct apbt_dev
, cpu_apbt_dev
);
75 static unsigned int apbt_num_timers_used
;
78 static inline void apbt_set_mapping(void)
80 struct sfi_timer_table_entry
*mtmr
;
81 int phy_cs_timer_id
= 0;
83 if (apbt_virt_address
) {
84 pr_debug("APBT base already mapped\n");
87 mtmr
= sfi_get_mtmr(APBT_CLOCKEVENT0_NUM
);
89 printk(KERN_ERR
"Failed to get MTMR %d from SFI\n",
90 APBT_CLOCKEVENT0_NUM
);
93 apbt_address
= (phys_addr_t
)mtmr
->phys_addr
;
95 printk(KERN_WARNING
"No timer base from SFI, use default\n");
96 apbt_address
= APBT_DEFAULT_BASE
;
98 apbt_virt_address
= ioremap(apbt_address
, APBT_MMAP_SIZE
);
99 if (!apbt_virt_address
) {
100 pr_debug("Failed mapping APBT phy address at %lu\n",\
101 (unsigned long)apbt_address
);
104 apbt_freq
= mtmr
->freq_hz
;
107 /* Now figure out the physical timer id for clocksource device */
108 mtmr
= sfi_get_mtmr(APBT_CLOCKSOURCE_NUM
);
112 /* Now figure out the physical timer id */
113 pr_debug("Use timer %d for clocksource\n",
114 (int)(mtmr
->phys_addr
& 0xff) / APBTMRS_REG_SIZE
);
115 phy_cs_timer_id
= (unsigned int)(mtmr
->phys_addr
& 0xff) /
118 clocksource_apbt
= dw_apb_clocksource_init(APBT_CLOCKSOURCE_RATING
,
119 "apbt0", apbt_virt_address
+ phy_cs_timer_id
*
120 APBTMRS_REG_SIZE
, apbt_freq
);
124 panic("Failed to setup APB system timer\n");
128 static inline void apbt_clear_mapping(void)
130 iounmap(apbt_virt_address
);
131 apbt_virt_address
= NULL
;
134 static int __init
apbt_clockevent_register(void)
136 struct sfi_timer_table_entry
*mtmr
;
137 struct apbt_dev
*adev
= this_cpu_ptr(&cpu_apbt_dev
);
139 mtmr
= sfi_get_mtmr(APBT_CLOCKEVENT0_NUM
);
141 printk(KERN_ERR
"Failed to get MTMR %d from SFI\n",
142 APBT_CLOCKEVENT0_NUM
);
146 adev
->num
= smp_processor_id();
147 adev
->timer
= dw_apb_clockevent_init(smp_processor_id(), "apbt0",
148 intel_mid_timer_options
== INTEL_MID_TIMER_LAPIC_APBT
?
149 APBT_CLOCKEVENT_RATING
- 100 : APBT_CLOCKEVENT_RATING
,
150 adev_virt_addr(adev
), 0, apbt_freq
);
151 /* Firmware does EOI handling for us. */
152 adev
->timer
->eoi
= NULL
;
154 if (intel_mid_timer_options
== INTEL_MID_TIMER_LAPIC_APBT
) {
155 global_clock_event
= &adev
->timer
->ced
;
156 printk(KERN_DEBUG
"%s clockevent registered as global\n",
157 global_clock_event
->name
);
160 dw_apb_clockevent_register(adev
->timer
);
168 static void apbt_setup_irq(struct apbt_dev
*adev
)
170 irq_modify_status(adev
->irq
, 0, IRQ_MOVE_PCNTXT
);
171 irq_set_affinity(adev
->irq
, cpumask_of(adev
->cpu
));
174 /* Should be called with per cpu */
175 void apbt_setup_secondary_clock(void)
177 struct apbt_dev
*adev
;
180 /* Don't register boot CPU clockevent */
181 cpu
= smp_processor_id();
185 adev
= this_cpu_ptr(&cpu_apbt_dev
);
187 adev
->timer
= dw_apb_clockevent_init(cpu
, adev
->name
,
188 APBT_CLOCKEVENT_RATING
, adev_virt_addr(adev
),
189 adev
->irq
, apbt_freq
);
190 adev
->timer
->eoi
= NULL
;
192 dw_apb_clockevent_resume(adev
->timer
);
195 printk(KERN_INFO
"Registering CPU %d clockevent device %s, cpu %08x\n",
196 cpu
, adev
->name
, adev
->cpu
);
198 apbt_setup_irq(adev
);
199 dw_apb_clockevent_register(adev
->timer
);
205 * this notify handler process CPU hotplug events. in case of S0i3, nonboot
206 * cpus are disabled/enabled frequently, for performance reasons, we keep the
207 * per cpu timer irq registered so that we do need to do free_irq/request_irq.
209 * TODO: it might be more reliable to directly disable percpu clockevent device
210 * without the notifier chain. currently, cpu 0 may get interrupts from other
211 * cpu timers during the offline process due to the ordering of notification.
212 * the extra interrupt is harmless.
214 static int apbt_cpu_dead(unsigned int cpu
)
216 struct apbt_dev
*adev
= &per_cpu(cpu_apbt_dev
, cpu
);
218 dw_apb_clockevent_pause(adev
->timer
);
219 if (system_state
== SYSTEM_RUNNING
) {
220 pr_debug("skipping APBT CPU %u offline\n", cpu
);
222 pr_debug("APBT clockevent for cpu %u offline\n", cpu
);
223 dw_apb_clockevent_stop(adev
->timer
);
228 static __init
int apbt_late_init(void)
230 if (intel_mid_timer_options
== INTEL_MID_TIMER_LAPIC_APBT
||
231 !apb_timer_block_enabled
)
233 return cpuhp_setup_state(CPUHP_X86_APB_DEAD
, "x86/apb:dead", NULL
,
236 fs_initcall(apbt_late_init
);
239 void apbt_setup_secondary_clock(void) {}
241 #endif /* CONFIG_SMP */
243 static int apbt_clocksource_register(void)
248 /* Start the counter, use timer 2 as source, timer 0/1 for event */
249 dw_apb_clocksource_start(clocksource_apbt
);
251 /* Verify whether apbt counter works */
252 t1
= dw_apb_clocksource_read(clocksource_apbt
);
256 * We don't know the TSC frequency yet, but waiting for
257 * 200000 TSC cycles is safe:
264 } while ((now
- start
) < 200000UL);
266 /* APBT is the only always on clocksource, it has to work! */
267 if (t1
== dw_apb_clocksource_read(clocksource_apbt
))
268 panic("APBT counter not counting. APBT disabled\n");
270 dw_apb_clocksource_register(clocksource_apbt
);
276 * Early setup the APBT timer, only use timer 0 for booting then switch to
277 * per CPU timer if possible.
278 * returns 1 if per cpu apbt is setup
279 * returns 0 if no per cpu apbt is chosen
280 * panic if set up failed, this is the only platform timer on Moorestown.
282 void __init
apbt_time_init(void)
286 struct sfi_timer_table_entry
*p_mtmr
;
287 struct apbt_dev
*adev
;
290 if (apb_timer_block_enabled
)
293 if (!apbt_virt_address
)
296 * Read the frequency and check for a sane value, for ESL model
297 * we extend the possible clock range to allow time scaling.
300 if (apbt_freq
< APBT_MIN_FREQ
|| apbt_freq
> APBT_MAX_FREQ
) {
301 pr_debug("APBT has invalid freq 0x%lx\n", apbt_freq
);
304 if (apbt_clocksource_register()) {
305 pr_debug("APBT has failed to register clocksource\n");
308 if (!apbt_clockevent_register())
309 apb_timer_block_enabled
= 1;
311 pr_debug("APBT has failed to register clockevent\n");
315 /* kernel cmdline disable apb timer, so we will use lapic timers */
316 if (intel_mid_timer_options
== INTEL_MID_TIMER_LAPIC_APBT
) {
317 printk(KERN_INFO
"apbt: disabled per cpu timer\n");
320 pr_debug("%s: %d CPUs online\n", __func__
, num_online_cpus());
321 if (num_possible_cpus() <= sfi_mtimer_num
)
322 apbt_num_timers_used
= num_possible_cpus();
324 apbt_num_timers_used
= 1;
325 pr_debug("%s: %d APB timers used\n", __func__
, apbt_num_timers_used
);
327 /* here we set up per CPU timer data structure */
328 for (i
= 0; i
< apbt_num_timers_used
; i
++) {
329 adev
= &per_cpu(cpu_apbt_dev
, i
);
332 p_mtmr
= sfi_get_mtmr(i
);
334 adev
->irq
= p_mtmr
->irq
;
336 printk(KERN_ERR
"Failed to get timer for cpu %d\n", i
);
337 snprintf(adev
->name
, sizeof(adev
->name
) - 1, "apbt%d", i
);
344 apbt_clear_mapping();
345 apb_timer_block_enabled
= 0;
346 panic("failed to enable APB timer\n");
349 /* called before apb_timer_enable, use early map */
350 unsigned long apbt_quick_calibrate(void)
355 unsigned long khz
= 0;
359 dw_apb_clocksource_start(clocksource_apbt
);
361 /* check if the timer can count down, otherwise return */
362 old
= dw_apb_clocksource_read(clocksource_apbt
);
365 if (old
!= dw_apb_clocksource_read(clocksource_apbt
))
372 loop
= (apbt_freq
/ 1000) << 4;
374 /* restart the timer to ensure it won't get to 0 in the calibration */
375 dw_apb_clocksource_start(clocksource_apbt
);
377 old
= dw_apb_clocksource_read(clocksource_apbt
);
383 new = dw_apb_clocksource_read(clocksource_apbt
);
389 if (unlikely(loop
>> shift
== 0)) {
391 "APBT TSC calibration failed, not enough resolution\n");
394 scale
= (int)div_u64((t2
- t1
), loop
>> shift
);
395 khz
= (scale
* (apbt_freq
/ 1000)) >> shift
;
396 printk(KERN_INFO
"TSC freq calculated by APB timer is %lu khz\n", khz
);