1 /* This file contains the clock task, which handles time related functions.
2 * Important events that are handled by the CLOCK include setting and
3 * monitoring alarm timers and deciding when to (re)schedule processes.
4 * The CLOCK offers a direct interface to kernel processes. System services
5 * can access its services through system calls, such as sys_setalarm(). The
6 * CLOCK task thus is hidden from the outside world.
9 * Aug 18, 2006 removed direct hardware access etc, MinixPPC (Ingmar Alting)
10 * Oct 08, 2005 reordering and comment editing (A. S. Woodhull)
11 * Mar 18, 2004 clock interface moved to SYSTEM task (Jorrit N. Herder)
12 * Sep 30, 2004 source code documentation updated (Jorrit N. Herder)
13 * Sep 24, 2004 redesigned alarm timers (Jorrit N. Herder)
15 * Clock task is notified by the clock's interrupt handler when a timer
18 * In addition to the main clock_task() entry point, which starts the main
19 * loop, there are several other minor entry points:
20 * clock_stop: called just before MINIX shutdown
21 * get_realtime: get wall time since boot in clock ticks
22 * set_realtime: set wall time since boot in clock ticks
23 * set_adjtime_delta: set the number of ticks to adjust realtime
24 * get_monotonic: get monotonic time since boot in clock ticks
25 * set_kernel_timer: set a watchdog timer (+)
26 * reset_kernel_timer: reset a watchdog timer (+)
27 * read_clock: read the counter of channel 0 of the 8253A timer
29 * (+) The CLOCK task keeps tracks of watchdog timers for the entire kernel.
30 * It is crucial that watchdog functions not block, or the CLOCK task may
31 * be blocked. Do not send() a message when the receiver is not expecting it.
32 * Instead, notify(), which always returns, should be used.
35 #include "kernel/kernel.h"
36 #include <minix/endpoint.h>
45 /* Function prototype for PRIVATE functions.
47 static void load_update(void);
49 /* The CLOCK's timers queue. The functions in <minix/timers.h> operate on this.
50 * Each system process possesses a single synchronous alarm timer. If other
51 * kernel parts want to use additional timers, they must declare their own
52 * persistent (static) timer structure, which can be passed to the clock
53 * via (re)set_kernel_timer().
54 * When a timer expires its watchdog function is run by the CLOCK task.
56 static minix_timer_t
*clock_timers
; /* queue of CLOCK timers */
57 static clock_t next_timeout
; /* monotonic time that next timer expires */
59 /* The time is incremented by the interrupt handler on each clock tick.
61 static clock_t monotonic
= 0;
63 /* Reflects the wall time and may be slowed/sped up by using adjclock()
65 static clock_t realtime
= 0;
67 /* Number of ticks to adjust realtime by. A negative value implies slowing
68 * down realtime, a positive value implies speeding it up.
70 static int32_t adjtime_delta
= 0;
73 * The boot processor's timer interrupt handler. In addition to non-boot cpus
74 * it keeps real time and notifies the clock task if need be.
76 int timer_int_handler(void)
78 /* Update user and system accounting times. Charge the current process
79 * for user time. If the current process is not billable, that is, if a
80 * non-user process is running, charge the billable process for system
81 * time as well. Thus the unbillable process' user time is the billable
85 struct proc
* p
, * billp
;
87 /* FIXME watchdog for slave cpus! */
90 * we need to know whether local timer ticks are happening or whether
91 * the kernel is locked up. We don't care about overflows as we only
92 * need to know that it's still ticking or not
94 watchdog_local_timer_ticks
++;
97 if (cpu_is_bsp(cpuid
)) {
100 /* if adjtime_delta has ticks remaining, apply one to realtime.
101 * limit changes to every other interrupt.
103 if (adjtime_delta
!= 0 && monotonic
& 0x1) {
104 /* go forward or stay behind */
105 realtime
+= (adjtime_delta
> 0) ? 2 : 0;
106 adjtime_delta
+= (adjtime_delta
> 0) ? -1 : +1;
112 /* Update user and system accounting times. Charge the current process
113 * for user time. If the current process is not billable, that is, if a
114 * non-user process is running, charge the billable process for system
115 * time as well. Thus the unbillable process' user time is the billable
116 * user's system time.
119 p
= get_cpulocal_var(proc_ptr
);
120 billp
= get_cpulocal_var(bill_ptr
);
124 if (! (priv(p
)->s_flags
& BILLABLE
)) {
128 /* Decrement virtual timers, if applicable. We decrement both the
129 * virtual and the profile timer of the current process, and if the
130 * current process is not billable, the timer of the billed process as
131 * well. If any of the timers expire, do_clocktick() will send out
134 if ((p
->p_misc_flags
& MF_VIRT_TIMER
) && (p
->p_virt_left
> 0)) {
137 if ((p
->p_misc_flags
& MF_PROF_TIMER
) && (p
->p_prof_left
> 0)) {
140 if (! (priv(p
)->s_flags
& BILLABLE
) &&
141 (billp
->p_misc_flags
& MF_PROF_TIMER
) &&
142 (billp
->p_prof_left
> 0)) {
143 billp
->p_prof_left
--;
147 * Check if a process-virtual timer expired. Check current process, but
148 * also bill_ptr - one process's user time is another's system time, and
149 * the profile timer decreases for both!
156 /* Update load average. */
159 if (cpu_is_bsp(cpuid
)) {
160 /* if a timer expired, notify the clock task */
161 if ((next_timeout
<= monotonic
)) {
162 tmrs_exptimers(&clock_timers
, monotonic
, NULL
);
163 next_timeout
= (clock_timers
== NULL
) ?
164 TMR_NEVER
: clock_timers
->tmr_exp_time
;
168 if (kinfo
.do_serial_debug
)
174 arch_timer_int_handler();
176 return(1); /* reenable interrupts */
179 /*===========================================================================*
181 *===========================================================================*/
182 clock_t get_realtime(void)
184 /* Get and return the current wall time in ticks since boot. */
188 /*===========================================================================*
190 *===========================================================================*/
191 void set_realtime(clock_t newrealtime
)
193 realtime
= newrealtime
;
196 /*===========================================================================*
197 * set_adjtime_delta *
198 *===========================================================================*/
199 void set_adjtime_delta(int32_t ticks
)
201 adjtime_delta
= ticks
;
204 /*===========================================================================*
206 *===========================================================================*/
207 clock_t get_monotonic(void)
209 /* Get and return the number of ticks since boot. */
213 /*===========================================================================*
215 *===========================================================================*/
216 void set_kernel_timer(tp
, exp_time
, watchdog
)
217 minix_timer_t
*tp
; /* pointer to timer structure */
218 clock_t exp_time
; /* expiration monotonic time */
219 tmr_func_t watchdog
; /* watchdog to be called */
221 /* Insert the new timer in the active timers list. Always update the
222 * next timeout time by setting it to the front of the active list.
224 tmrs_settimer(&clock_timers
, tp
, exp_time
, watchdog
, NULL
);
225 next_timeout
= clock_timers
->tmr_exp_time
;
228 /*===========================================================================*
229 * reset_kernel_timer *
230 *===========================================================================*/
231 void reset_kernel_timer(tp
)
232 minix_timer_t
*tp
; /* pointer to timer structure */
234 /* The timer pointed to by 'tp' is no longer needed. Remove it from both the
235 * active and expired lists. Always update the next timeout time by setting
236 * it to the front of the active list.
238 tmrs_clrtimer(&clock_timers
, tp
, NULL
);
239 next_timeout
= (clock_timers
== NULL
) ?
240 TMR_NEVER
: clock_timers
->tmr_exp_time
;
243 /*===========================================================================*
245 *===========================================================================*/
246 static void load_update(void)
251 struct proc
**rdy_head
;
253 /* Load average data is stored as a list of numbers in a circular
254 * buffer. Each slot accumulates _LOAD_UNIT_SECS of samples of
255 * the number of runnable processes. Computations can then
256 * be made of the load average over variable periods, in the
257 * user library (see getloadavg(3)).
259 slot
= (monotonic
/ system_hz
/ _LOAD_UNIT_SECS
) % _LOAD_HISTORY
;
260 if(slot
!= kloadinfo
.proc_last_slot
) {
261 kloadinfo
.proc_load_history
[slot
] = 0;
262 kloadinfo
.proc_last_slot
= slot
;
265 rdy_head
= get_cpulocal_var(run_q_head
);
266 /* Cumulation. How many processes are ready now? */
267 for(q
= 0; q
< NR_SCHED_QUEUES
; q
++) {
268 for(p
= rdy_head
[q
]; p
!= NULL
; p
= p
->p_nextready
) {
273 kloadinfo
.proc_load_history
[slot
] += enqueued
;
275 /* Up-to-dateness. */
276 kloadinfo
.last_clock
= monotonic
;
279 int boot_cpu_init_timer(unsigned freq
)
281 if (init_local_timer(freq
))
284 if (register_local_timer_handler(
285 (irq_handler_t
) timer_int_handler
))
291 int app_cpu_init_timer(unsigned freq
)
293 if (init_local_timer(freq
))