libroot_debug: Merge guarded heap into libroot_debug.
[haiku.git] / src / system / kernel / timer.cpp
blob28284f4ae10909fcda548c15da835dd796bd375f
1 /*
2 * Copyright 2002-2011, Haiku. All rights reserved.
3 * Distributed under the terms of the MIT License.
5 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6 * Distributed under the terms of the NewOS License.
7 */
10 /*! Policy info for timers */
13 #include <timer.h>
15 #include <OS.h>
17 #include <arch/timer.h>
18 #include <boot/kernel_args.h>
19 #include <cpu.h>
20 #include <debug.h>
21 #include <elf.h>
22 #include <real_time_clock.h>
23 #include <smp.h>
24 #include <thread.h>
25 #include <util/AutoLock.h>
28 struct per_cpu_timer_data {
29 spinlock lock;
30 timer* volatile events;
31 timer* volatile current_event;
32 int32 current_event_in_progress;
33 bigtime_t real_time_offset;
36 static per_cpu_timer_data sPerCPU[SMP_MAX_CPUS];
39 //#define TRACE_TIMER
40 #ifdef TRACE_TIMER
41 # define TRACE(x) dprintf x
42 #else
43 # define TRACE(x) ;
44 #endif
47 /*! Sets the hardware timer to the given absolute time.
49 \param scheduleTime The absolute system time for the timer expiration.
50 \param now The current system time.
52 static void
53 set_hardware_timer(bigtime_t scheduleTime, bigtime_t now)
55 arch_timer_set_hardware_timer(scheduleTime > now ? scheduleTime - now : 0);
59 /*! Sets the hardware timer to the given absolute time.
61 \param scheduleTime The absolute system time for the timer expiration.
63 static inline void
64 set_hardware_timer(bigtime_t scheduleTime)
66 set_hardware_timer(scheduleTime, system_time());
70 /*! NOTE: expects interrupts to be off */
71 static void
72 add_event_to_list(timer* event, timer* volatile* list)
74 timer* next;
75 timer* last = NULL;
77 // stick it in the event list
78 for (next = *list; next; last = next, next = (timer*)next->next) {
79 if ((bigtime_t)next->schedule_time >= (bigtime_t)event->schedule_time)
80 break;
83 if (last != NULL) {
84 event->next = last->next;
85 last->next = event;
86 } else {
87 event->next = next;
88 *list = event;
93 static void
94 per_cpu_real_time_clock_changed(void*, int cpu)
96 per_cpu_timer_data& cpuData = sPerCPU[cpu];
97 SpinLocker cpuDataLocker(cpuData.lock);
99 bigtime_t realTimeOffset = rtc_boot_time();
100 if (realTimeOffset == cpuData.real_time_offset)
101 return;
103 // The real time offset has changed. We need to update all affected
104 // timers. First find and dequeue them.
105 bigtime_t timeDiff = cpuData.real_time_offset - realTimeOffset;
106 cpuData.real_time_offset = realTimeOffset;
108 timer* affectedTimers = NULL;
109 timer* volatile* it = &cpuData.events;
110 timer* firstEvent = *it;
111 while (timer* event = *it) {
112 // check whether it's an absolute real-time timer
113 uint32 flags = event->flags;
114 if ((flags & ~B_TIMER_FLAGS) != B_ONE_SHOT_ABSOLUTE_TIMER
115 || (flags & B_TIMER_REAL_TIME_BASE) == 0) {
116 it = &event->next;
117 continue;
120 // Yep, remove the timer from the queue and add it to the
121 // affectedTimers list.
122 *it = event->next;
123 event->next = affectedTimers;
124 affectedTimers = event;
127 // update and requeue the affected timers
128 bool firstEventChanged = cpuData.events != firstEvent;
129 firstEvent = cpuData.events;
131 while (affectedTimers != NULL) {
132 timer* event = affectedTimers;
133 affectedTimers = event->next;
135 bigtime_t oldTime = event->schedule_time;
136 event->schedule_time += timeDiff;
138 // handle over-/underflows
139 if (timeDiff >= 0) {
140 if (event->schedule_time < oldTime)
141 event->schedule_time = B_INFINITE_TIMEOUT;
142 } else {
143 if (event->schedule_time < 0)
144 event->schedule_time = 0;
147 add_event_to_list(event, &cpuData.events);
150 firstEventChanged |= cpuData.events != firstEvent;
152 // If the first event has changed, reset the hardware timer.
153 if (firstEventChanged)
154 set_hardware_timer(cpuData.events->schedule_time);
158 // #pragma mark - debugging
161 static int
162 dump_timers(int argc, char** argv)
164 int32 cpuCount = smp_get_num_cpus();
165 for (int32 i = 0; i < cpuCount; i++) {
166 kprintf("CPU %" B_PRId32 ":\n", i);
168 if (sPerCPU[i].events == NULL) {
169 kprintf(" no timers scheduled\n");
170 continue;
173 for (timer* event = sPerCPU[i].events; event != NULL;
174 event = event->next) {
175 kprintf(" [%9lld] %p: ", (long long)event->schedule_time, event);
176 if ((event->flags & ~B_TIMER_FLAGS) == B_PERIODIC_TIMER)
177 kprintf("periodic %9lld, ", (long long)event->period);
178 else
179 kprintf("one shot, ");
181 kprintf("flags: %#x, user data: %p, callback: %p ",
182 event->flags, event->user_data, event->hook);
184 // look up and print the hook function symbol
185 const char* symbol;
186 const char* imageName;
187 bool exactMatch;
189 status_t error = elf_debug_lookup_symbol_address(
190 (addr_t)event->hook, NULL, &symbol, &imageName, &exactMatch);
191 if (error == B_OK && exactMatch) {
192 if (const char* slash = strchr(imageName, '/'))
193 imageName = slash + 1;
195 kprintf(" %s:%s", imageName, symbol);
198 kprintf("\n");
202 kprintf("current time: %lld\n", (long long)system_time());
204 return 0;
208 // #pragma mark - kernel-private
211 status_t
212 timer_init(kernel_args* args)
214 TRACE(("timer_init: entry\n"));
216 if (arch_init_timer(args) != B_OK)
217 panic("arch_init_timer() failed");
219 add_debugger_command_etc("timers", &dump_timers, "List all timers",
220 "\n"
221 "Prints a list of all scheduled timers.\n", 0);
223 return B_OK;
227 void
228 timer_init_post_rtc(void)
230 bigtime_t realTimeOffset = rtc_boot_time();
232 int32 cpuCount = smp_get_num_cpus();
233 for (int32 i = 0; i < cpuCount; i++)
234 sPerCPU[i].real_time_offset = realTimeOffset;
238 void
239 timer_real_time_clock_changed()
241 call_all_cpus(&per_cpu_real_time_clock_changed, NULL);
245 int32
246 timer_interrupt()
248 timer* event;
249 spinlock* spinlock;
250 per_cpu_timer_data& cpuData = sPerCPU[smp_get_current_cpu()];
251 int32 rc = B_HANDLED_INTERRUPT;
253 TRACE(("timer_interrupt: time %lld, cpu %ld\n", system_time(),
254 smp_get_current_cpu()));
256 spinlock = &cpuData.lock;
258 acquire_spinlock(spinlock);
260 event = cpuData.events;
261 while (event != NULL && ((bigtime_t)event->schedule_time < system_time())) {
262 // this event needs to happen
263 int mode = event->flags;
265 cpuData.events = (timer*)event->next;
266 cpuData.current_event = event;
267 atomic_set(&cpuData.current_event_in_progress, 1);
269 release_spinlock(spinlock);
271 TRACE(("timer_interrupt: calling hook %p for event %p\n", event->hook,
272 event));
274 // call the callback
275 // note: if the event is not periodic, it is ok
276 // to delete the event structure inside the callback
277 if (event->hook)
278 rc = event->hook(event);
280 atomic_set(&cpuData.current_event_in_progress, 0);
282 acquire_spinlock(spinlock);
284 if ((mode & ~B_TIMER_FLAGS) == B_PERIODIC_TIMER
285 && cpuData.current_event != NULL) {
286 // we need to adjust it and add it back to the list
287 event->schedule_time += event->period;
289 // If the new schedule time is a full interval or more in the past,
290 // skip ticks.
291 bigtime_t now = system_time();
292 if (now >= event->schedule_time + event->period) {
293 // pick the closest tick in the past
294 event->schedule_time = now
295 - (now - event->schedule_time) % event->period;
298 add_event_to_list(event, &cpuData.events);
301 cpuData.current_event = NULL;
303 event = cpuData.events;
306 // setup the next hardware timer
307 if (cpuData.events != NULL)
308 set_hardware_timer(cpuData.events->schedule_time);
310 release_spinlock(spinlock);
312 return rc;
316 // #pragma mark - public API
319 status_t
320 add_timer(timer* event, timer_hook hook, bigtime_t period, int32 flags)
322 bigtime_t currentTime = system_time();
323 cpu_status state;
325 if (event == NULL || hook == NULL || period < 0)
326 return B_BAD_VALUE;
328 TRACE(("add_timer: event %p\n", event));
330 // compute the schedule time
331 bigtime_t scheduleTime;
332 if ((flags & B_TIMER_USE_TIMER_STRUCT_TIMES) != 0) {
333 scheduleTime = event->schedule_time;
334 period = event->period;
335 } else {
336 scheduleTime = period;
337 if ((flags & ~B_TIMER_FLAGS) != B_ONE_SHOT_ABSOLUTE_TIMER)
338 scheduleTime += currentTime;
339 event->schedule_time = (int64)scheduleTime;
340 event->period = period;
343 event->hook = hook;
344 event->flags = flags;
346 state = disable_interrupts();
347 int currentCPU = smp_get_current_cpu();
348 per_cpu_timer_data& cpuData = sPerCPU[currentCPU];
349 acquire_spinlock(&cpuData.lock);
351 // If the timer is an absolute real-time base timer, convert the schedule
352 // time to system time.
353 if ((flags & ~B_TIMER_FLAGS) == B_ONE_SHOT_ABSOLUTE_TIMER
354 && (flags & B_TIMER_REAL_TIME_BASE) != 0) {
355 if (event->schedule_time > cpuData.real_time_offset)
356 event->schedule_time -= cpuData.real_time_offset;
357 else
358 event->schedule_time = 0;
361 add_event_to_list(event, &cpuData.events);
362 event->cpu = currentCPU;
364 // if we were stuck at the head of the list, set the hardware timer
365 if (event == cpuData.events)
366 set_hardware_timer(scheduleTime, currentTime);
368 release_spinlock(&cpuData.lock);
369 restore_interrupts(state);
371 return B_OK;
375 bool
376 cancel_timer(timer* event)
378 TRACE(("cancel_timer: event %p\n", event));
380 InterruptsLocker _;
382 // lock the right CPU spinlock
383 int cpu = event->cpu;
384 SpinLocker spinLocker;
385 while (true) {
386 if (cpu >= SMP_MAX_CPUS)
387 return false;
389 spinLocker.SetTo(sPerCPU[cpu].lock, false);
390 if (cpu == event->cpu)
391 break;
393 // cpu field changed while we were trying to lock
394 spinLocker.Unlock();
395 cpu = event->cpu;
398 per_cpu_timer_data& cpuData = sPerCPU[cpu];
400 if (event != cpuData.current_event) {
401 // The timer hook is not yet being executed.
402 timer* current = cpuData.events;
403 timer* last = NULL;
405 while (current != NULL) {
406 if (current == event) {
407 // we found it
408 if (last == NULL)
409 cpuData.events = current->next;
410 else
411 last->next = current->next;
412 current->next = NULL;
413 // break out of the whole thing
414 break;
416 last = current;
417 current = current->next;
420 // If not found, we assume this was a one-shot timer and has already
421 // fired.
422 if (current == NULL)
423 return true;
425 // invalidate CPU field
426 event->cpu = 0xffff;
428 // If on the current CPU, also reset the hardware timer.
429 if (cpu == smp_get_current_cpu()) {
430 if (cpuData.events == NULL)
431 arch_timer_clear_hardware_timer();
432 else
433 set_hardware_timer(cpuData.events->schedule_time);
436 return false;
439 // The timer hook is currently being executed. We clear the current
440 // event so that timer_interrupt() will not reschedule periodic timers.
441 cpuData.current_event = NULL;
443 // Unless this is a kernel-private timer that also requires the scheduler
444 // lock to be held while calling the event hook, we'll have to wait
445 // for the hook to complete. When called from the timer hook we don't
446 // wait either, of course.
447 if (cpu != smp_get_current_cpu()) {
448 spinLocker.Unlock();
450 while (atomic_get(&cpuData.current_event_in_progress) == 1)
451 cpu_wait(&cpuData.current_event_in_progress, 0);
454 return true;
458 void
459 spin(bigtime_t microseconds)
461 bigtime_t time = system_time();
463 while ((system_time() - time) < microseconds)
464 cpu_pause();