2 * Copyright 2002-2011, Haiku. All rights reserved.
3 * Distributed under the terms of the MIT License.
5 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6 * Distributed under the terms of the NewOS License.
10 /*! Policy info for timers */
17 #include <arch/timer.h>
18 #include <boot/kernel_args.h>
22 #include <real_time_clock.h>
25 #include <util/AutoLock.h>
28 struct per_cpu_timer_data
{
30 timer
* volatile events
;
31 timer
* volatile current_event
;
32 int32 current_event_in_progress
;
33 bigtime_t real_time_offset
;
36 static per_cpu_timer_data sPerCPU
[SMP_MAX_CPUS
];
41 # define TRACE(x) dprintf x
47 /*! Sets the hardware timer to the given absolute time.
49 \param scheduleTime The absolute system time for the timer expiration.
50 \param now The current system time.
53 set_hardware_timer(bigtime_t scheduleTime
, bigtime_t now
)
55 arch_timer_set_hardware_timer(scheduleTime
> now
? scheduleTime
- now
: 0);
59 /*! Sets the hardware timer to the given absolute time.
61 \param scheduleTime The absolute system time for the timer expiration.
64 set_hardware_timer(bigtime_t scheduleTime
)
66 set_hardware_timer(scheduleTime
, system_time());
70 /*! NOTE: expects interrupts to be off */
72 add_event_to_list(timer
* event
, timer
* volatile* list
)
77 // stick it in the event list
78 for (next
= *list
; next
; last
= next
, next
= (timer
*)next
->next
) {
79 if ((bigtime_t
)next
->schedule_time
>= (bigtime_t
)event
->schedule_time
)
84 event
->next
= last
->next
;
94 per_cpu_real_time_clock_changed(void*, int cpu
)
96 per_cpu_timer_data
& cpuData
= sPerCPU
[cpu
];
97 SpinLocker
cpuDataLocker(cpuData
.lock
);
99 bigtime_t realTimeOffset
= rtc_boot_time();
100 if (realTimeOffset
== cpuData
.real_time_offset
)
103 // The real time offset has changed. We need to update all affected
104 // timers. First find and dequeue them.
105 bigtime_t timeDiff
= cpuData
.real_time_offset
- realTimeOffset
;
106 cpuData
.real_time_offset
= realTimeOffset
;
108 timer
* affectedTimers
= NULL
;
109 timer
* volatile* it
= &cpuData
.events
;
110 timer
* firstEvent
= *it
;
111 while (timer
* event
= *it
) {
112 // check whether it's an absolute real-time timer
113 uint32 flags
= event
->flags
;
114 if ((flags
& ~B_TIMER_FLAGS
) != B_ONE_SHOT_ABSOLUTE_TIMER
115 || (flags
& B_TIMER_REAL_TIME_BASE
) == 0) {
120 // Yep, remove the timer from the queue and add it to the
121 // affectedTimers list.
123 event
->next
= affectedTimers
;
124 affectedTimers
= event
;
127 // update and requeue the affected timers
128 bool firstEventChanged
= cpuData
.events
!= firstEvent
;
129 firstEvent
= cpuData
.events
;
131 while (affectedTimers
!= NULL
) {
132 timer
* event
= affectedTimers
;
133 affectedTimers
= event
->next
;
135 bigtime_t oldTime
= event
->schedule_time
;
136 event
->schedule_time
+= timeDiff
;
138 // handle over-/underflows
140 if (event
->schedule_time
< oldTime
)
141 event
->schedule_time
= B_INFINITE_TIMEOUT
;
143 if (event
->schedule_time
< 0)
144 event
->schedule_time
= 0;
147 add_event_to_list(event
, &cpuData
.events
);
150 firstEventChanged
|= cpuData
.events
!= firstEvent
;
152 // If the first event has changed, reset the hardware timer.
153 if (firstEventChanged
)
154 set_hardware_timer(cpuData
.events
->schedule_time
);
158 // #pragma mark - debugging
162 dump_timers(int argc
, char** argv
)
164 int32 cpuCount
= smp_get_num_cpus();
165 for (int32 i
= 0; i
< cpuCount
; i
++) {
166 kprintf("CPU %" B_PRId32
":\n", i
);
168 if (sPerCPU
[i
].events
== NULL
) {
169 kprintf(" no timers scheduled\n");
173 for (timer
* event
= sPerCPU
[i
].events
; event
!= NULL
;
174 event
= event
->next
) {
175 kprintf(" [%9lld] %p: ", (long long)event
->schedule_time
, event
);
176 if ((event
->flags
& ~B_TIMER_FLAGS
) == B_PERIODIC_TIMER
)
177 kprintf("periodic %9lld, ", (long long)event
->period
);
179 kprintf("one shot, ");
181 kprintf("flags: %#x, user data: %p, callback: %p ",
182 event
->flags
, event
->user_data
, event
->hook
);
184 // look up and print the hook function symbol
186 const char* imageName
;
189 status_t error
= elf_debug_lookup_symbol_address(
190 (addr_t
)event
->hook
, NULL
, &symbol
, &imageName
, &exactMatch
);
191 if (error
== B_OK
&& exactMatch
) {
192 if (const char* slash
= strchr(imageName
, '/'))
193 imageName
= slash
+ 1;
195 kprintf(" %s:%s", imageName
, symbol
);
202 kprintf("current time: %lld\n", (long long)system_time());
208 // #pragma mark - kernel-private
212 timer_init(kernel_args
* args
)
214 TRACE(("timer_init: entry\n"));
216 if (arch_init_timer(args
) != B_OK
)
217 panic("arch_init_timer() failed");
219 add_debugger_command_etc("timers", &dump_timers
, "List all timers",
221 "Prints a list of all scheduled timers.\n", 0);
228 timer_init_post_rtc(void)
230 bigtime_t realTimeOffset
= rtc_boot_time();
232 int32 cpuCount
= smp_get_num_cpus();
233 for (int32 i
= 0; i
< cpuCount
; i
++)
234 sPerCPU
[i
].real_time_offset
= realTimeOffset
;
239 timer_real_time_clock_changed()
241 call_all_cpus(&per_cpu_real_time_clock_changed
, NULL
);
250 per_cpu_timer_data
& cpuData
= sPerCPU
[smp_get_current_cpu()];
251 int32 rc
= B_HANDLED_INTERRUPT
;
253 TRACE(("timer_interrupt: time %lld, cpu %ld\n", system_time(),
254 smp_get_current_cpu()));
256 spinlock
= &cpuData
.lock
;
258 acquire_spinlock(spinlock
);
260 event
= cpuData
.events
;
261 while (event
!= NULL
&& ((bigtime_t
)event
->schedule_time
< system_time())) {
262 // this event needs to happen
263 int mode
= event
->flags
;
265 cpuData
.events
= (timer
*)event
->next
;
266 cpuData
.current_event
= event
;
267 atomic_set(&cpuData
.current_event_in_progress
, 1);
269 release_spinlock(spinlock
);
271 TRACE(("timer_interrupt: calling hook %p for event %p\n", event
->hook
,
275 // note: if the event is not periodic, it is ok
276 // to delete the event structure inside the callback
278 rc
= event
->hook(event
);
280 atomic_set(&cpuData
.current_event_in_progress
, 0);
282 acquire_spinlock(spinlock
);
284 if ((mode
& ~B_TIMER_FLAGS
) == B_PERIODIC_TIMER
285 && cpuData
.current_event
!= NULL
) {
286 // we need to adjust it and add it back to the list
287 event
->schedule_time
+= event
->period
;
289 // If the new schedule time is a full interval or more in the past,
291 bigtime_t now
= system_time();
292 if (now
>= event
->schedule_time
+ event
->period
) {
293 // pick the closest tick in the past
294 event
->schedule_time
= now
295 - (now
- event
->schedule_time
) % event
->period
;
298 add_event_to_list(event
, &cpuData
.events
);
301 cpuData
.current_event
= NULL
;
303 event
= cpuData
.events
;
306 // setup the next hardware timer
307 if (cpuData
.events
!= NULL
)
308 set_hardware_timer(cpuData
.events
->schedule_time
);
310 release_spinlock(spinlock
);
316 // #pragma mark - public API
320 add_timer(timer
* event
, timer_hook hook
, bigtime_t period
, int32 flags
)
322 bigtime_t currentTime
= system_time();
325 if (event
== NULL
|| hook
== NULL
|| period
< 0)
328 TRACE(("add_timer: event %p\n", event
));
330 // compute the schedule time
331 bigtime_t scheduleTime
;
332 if ((flags
& B_TIMER_USE_TIMER_STRUCT_TIMES
) != 0) {
333 scheduleTime
= event
->schedule_time
;
334 period
= event
->period
;
336 scheduleTime
= period
;
337 if ((flags
& ~B_TIMER_FLAGS
) != B_ONE_SHOT_ABSOLUTE_TIMER
)
338 scheduleTime
+= currentTime
;
339 event
->schedule_time
= (int64
)scheduleTime
;
340 event
->period
= period
;
344 event
->flags
= flags
;
346 state
= disable_interrupts();
347 int currentCPU
= smp_get_current_cpu();
348 per_cpu_timer_data
& cpuData
= sPerCPU
[currentCPU
];
349 acquire_spinlock(&cpuData
.lock
);
351 // If the timer is an absolute real-time base timer, convert the schedule
352 // time to system time.
353 if ((flags
& ~B_TIMER_FLAGS
) == B_ONE_SHOT_ABSOLUTE_TIMER
354 && (flags
& B_TIMER_REAL_TIME_BASE
) != 0) {
355 if (event
->schedule_time
> cpuData
.real_time_offset
)
356 event
->schedule_time
-= cpuData
.real_time_offset
;
358 event
->schedule_time
= 0;
361 add_event_to_list(event
, &cpuData
.events
);
362 event
->cpu
= currentCPU
;
364 // if we were stuck at the head of the list, set the hardware timer
365 if (event
== cpuData
.events
)
366 set_hardware_timer(scheduleTime
, currentTime
);
368 release_spinlock(&cpuData
.lock
);
369 restore_interrupts(state
);
376 cancel_timer(timer
* event
)
378 TRACE(("cancel_timer: event %p\n", event
));
382 // lock the right CPU spinlock
383 int cpu
= event
->cpu
;
384 SpinLocker spinLocker
;
386 if (cpu
>= SMP_MAX_CPUS
)
389 spinLocker
.SetTo(sPerCPU
[cpu
].lock
, false);
390 if (cpu
== event
->cpu
)
393 // cpu field changed while we were trying to lock
398 per_cpu_timer_data
& cpuData
= sPerCPU
[cpu
];
400 if (event
!= cpuData
.current_event
) {
401 // The timer hook is not yet being executed.
402 timer
* current
= cpuData
.events
;
405 while (current
!= NULL
) {
406 if (current
== event
) {
409 cpuData
.events
= current
->next
;
411 last
->next
= current
->next
;
412 current
->next
= NULL
;
413 // break out of the whole thing
417 current
= current
->next
;
420 // If not found, we assume this was a one-shot timer and has already
425 // invalidate CPU field
428 // If on the current CPU, also reset the hardware timer.
429 if (cpu
== smp_get_current_cpu()) {
430 if (cpuData
.events
== NULL
)
431 arch_timer_clear_hardware_timer();
433 set_hardware_timer(cpuData
.events
->schedule_time
);
439 // The timer hook is currently being executed. We clear the current
440 // event so that timer_interrupt() will not reschedule periodic timers.
441 cpuData
.current_event
= NULL
;
443 // Unless this is a kernel-private timer that also requires the scheduler
444 // lock to be held while calling the event hook, we'll have to wait
445 // for the hook to complete. When called from the timer hook we don't
446 // wait either, of course.
447 if (cpu
!= smp_get_current_cpu()) {
450 while (atomic_get(&cpuData
.current_event_in_progress
) == 1)
451 cpu_wait(&cpuData
.current_event_in_progress
, 0);
459 spin(bigtime_t microseconds
)
461 bigtime_t time
= system_time();
463 while ((system_time() - time
) < microseconds
)