Added lance entry to drivers.conf.
[minix3-old.git] / kernel / clock.c
blob16866eb72258769f7e83e14e9c63c73be673f523
1 /* This file contains the clock task, which handles time related functions.
2 * Important events that are handled by the CLOCK include setting and
3 * monitoring alarm timers and deciding when to (re)schedule processes.
4 * The CLOCK offers a direct interface to kernel processes. System services
5 * can access its services through system calls, such as sys_setalarm(). The
6 * CLOCK task thus is hidden from the outside world.
8 * Changes:
9 * Aug 18, 2006 removed direct hardware access etc, MinixPPC (Ingmar Alting)
10 * Oct 08, 2005 reordering and comment editing (A. S. Woodhull)
11 * Mar 18, 2004 clock interface moved to SYSTEM task (Jorrit N. Herder)
12 * Sep 30, 2004 source code documentation updated (Jorrit N. Herder)
13 * Sep 24, 2004 redesigned alarm timers (Jorrit N. Herder)
15 * The function do_clocktick() is triggered by the clock's interrupt
16 * handler when a watchdog timer has expired or a process must be scheduled.
18 * In addition to the main clock_task() entry point, which starts the main
19 * loop, there are several other minor entry points:
20 * clock_stop: called just before MINIX shutdown
21 * get_uptime: get realtime since boot in clock ticks
22 * set_timer: set a watchdog timer (+)
23 * reset_timer: reset a watchdog timer (+)
24 * read_clock: read the counter of channel 0 of the 8253A timer
26 * (+) The CLOCK task keeps tracks of watchdog timers for the entire kernel.
27 * The watchdog functions of expired timers are executed in do_clocktick().
28 * It is crucial that watchdog functions not block, or the CLOCK task may
29 * be blocked. Do not send() a message when the receiver is not expecting it.
30 * Instead, notify(), which always returns, should be used.
33 #include "kernel.h"
34 #include "proc.h"
35 #include <signal.h>
36 #include <minix/com.h>
38 /* Function prototype for PRIVATE functions.
39 */
40 FORWARD _PROTOTYPE( void init_clock, (void) );
41 FORWARD _PROTOTYPE( int clock_handler, (irq_hook_t *hook) );
42 FORWARD _PROTOTYPE( void do_clocktick, (message *m_ptr) );
43 FORWARD _PROTOTYPE( void load_update, (void));
45 /* The CLOCK's timers queue. The functions in <timers.h> operate on this.
46 * Each system process possesses a single synchronous alarm timer. If other
47 * kernel parts want to use additional timers, they must declare their own
48 * persistent (static) timer structure, which can be passed to the clock
49 * via (re)set_timer().
50 * When a timer expires its watchdog function is run by the CLOCK task.
52 PRIVATE timer_t *clock_timers; /* queue of CLOCK timers */
53 PRIVATE clock_t next_timeout; /* realtime that next timer expires */
55 /* The time is incremented by the interrupt handler on each clock tick.
57 PRIVATE clock_t realtime = 0; /* real time clock */
58 PRIVATE irq_hook_t clock_hook; /* interrupt handler hook */
60 /*===========================================================================*
61 * clock_task *
62 *===========================================================================*/
63 PUBLIC void clock_task()
65 /* Main program of clock task. If the call is not HARD_INT it is an error.
67 message m; /* message buffer for both input and output */
68 int result; /* result returned by the handler */
70 init_clock(); /* initialize clock task */
72 /* Main loop of the clock task. Get work, process it. Never reply. */
73 while(TRUE) {
74 /* Go get a message. */
75 result = receive(ANY, &m);
77 if(result != OK)
78 panic("receive() failed", result);
80 /* Handle the request. Only clock ticks are expected. */
81 switch (m.m_type) {
82 case HARD_INT:
83 do_clocktick(&m); /* handle clock tick */
84 break;
85 default: /* illegal request type */
86 kprintf("CLOCK: illegal request %d from %d.\n",
87 m.m_type, m.m_source);
92 /*===========================================================================*
93 * do_clocktick *
94 *===========================================================================*/
95 PRIVATE void do_clocktick(m_ptr)
96 message *m_ptr; /* pointer to request message */
98 /* Despite its name, this routine is not called on every clock tick. It
99 * is called on those clock ticks when a lot of work needs to be done.
102 /* A process used up a full quantum. The interrupt handler stored this
103 * process in 'prev_ptr'. First make sure that the process is not on the
104 * scheduling queues. Then announce the process ready again. Since it has
105 * no more time left, it gets a new quantum and is inserted at the right
106 * place in the queues. As a side-effect a new process will be scheduled.
108 if (prev_ptr->p_ticks_left <= 0 && priv(prev_ptr)->s_flags & PREEMPTIBLE) {
109 if(prev_ptr->p_rts_flags == 0) { /* if it was runnable .. */
110 lock_dequeue(prev_ptr); /* take it off the queues */
111 lock_enqueue(prev_ptr); /* and reinsert it again */
112 } else {
113 kprintf("CLOCK: %d not runnable; flags: %x\n",
114 prev_ptr->p_endpoint, prev_ptr->p_rts_flags);
118 /* Check if a clock timer expired and run its watchdog function. */
119 if (next_timeout <= realtime) {
120 tmrs_exptimers(&clock_timers, realtime, NULL);
121 next_timeout = (clock_timers == NULL) ?
122 TMR_NEVER : clock_timers->tmr_exp_time;
125 return;
128 /*===========================================================================*
129 * init_clock *
130 *===========================================================================*/
131 PRIVATE void init_clock()
133 /* First of all init the clock system.
135 * Here the (a) clock is set to produce a interrupt at
136 * every 1/60 second (ea. 60Hz).
138 * Running right away.
140 arch_init_clock(); /* architecture-dependent initialization. */
142 /* Initialize the CLOCK's interrupt hook. */
143 clock_hook.proc_nr_e = CLOCK;
145 put_irq_handler(&clock_hook, CLOCK_IRQ, clock_handler);
146 enable_irq(&clock_hook); /* ready for clock interrupts */
148 /* Set a watchdog timer to periodically balance the scheduling queues. */
149 balance_queues(NULL); /* side-effect sets new timer */
152 /*===========================================================================*
153 * clock_handler *
154 *===========================================================================*/
155 PRIVATE int clock_handler(hook)
156 irq_hook_t *hook;
158 /* This executes on each clock tick (i.e., every time the timer chip generates
159 * an interrupt). It does a little bit of work so the clock task does not have
160 * to be called on every tick. The clock task is called when:
162 * (1) the scheduling quantum of the running process has expired, or
163 * (2) a timer has expired and the watchdog function should be run.
165 * Many global global and static variables are accessed here. The safety of
166 * this must be justified. All scheduling and message passing code acquires a
167 * lock by temporarily disabling interrupts, so no conflicts with calls from
168 * the task level can occur. Furthermore, interrupts are not reentrant, the
169 * interrupt handler cannot be bothered by other interrupts.
171 * Variables that are updated in the clock's interrupt handler:
172 * lost_ticks:
173 * Clock ticks counted outside the clock task. This for example
174 * is used when the boot monitor processes a real mode interrupt.
175 * realtime:
176 * The current uptime is incremented with all outstanding ticks.
177 * proc_ptr, bill_ptr:
178 * These are used for accounting. It does not matter if proc.c
179 * is changing them, provided they are always valid pointers,
180 * since at worst the previous process would be billed.
182 register unsigned ticks;
184 /* Get number of ticks and update realtime. */
185 ticks = lost_ticks + 1;
186 lost_ticks = 0;
187 realtime += ticks;
189 /* Update user and system accounting times. Charge the current process for
190 * user time. If the current process is not billable, that is, if a non-user
191 * process is running, charge the billable process for system time as well.
192 * Thus the unbillable process' user time is the billable user's system time.
195 proc_ptr->p_user_time += ticks;
196 if (priv(proc_ptr)->s_flags & PREEMPTIBLE) {
197 proc_ptr->p_ticks_left -= ticks;
199 if (! (priv(proc_ptr)->s_flags & BILLABLE)) {
200 bill_ptr->p_sys_time += ticks;
201 bill_ptr->p_ticks_left -= ticks;
204 /* Update load average. */
205 load_update();
207 /* Check if do_clocktick() must be called. Done for alarms and scheduling.
208 * Some processes, such as the kernel tasks, cannot be preempted.
210 if ((next_timeout <= realtime) || (proc_ptr->p_ticks_left <= 0)) {
211 prev_ptr = proc_ptr; /* store running process */
212 lock_notify(HARDWARE, CLOCK); /* send notification */
215 if (do_serial_debug)
216 do_ser_debug();
218 return(1); /* reenable interrupts */
221 /*===========================================================================*
222 * get_uptime *
223 *===========================================================================*/
224 PUBLIC clock_t get_uptime(void)
226 /* Get and return the current clock uptime in ticks. */
227 return(realtime);
230 /*===========================================================================*
231 * set_timer *
232 *===========================================================================*/
233 PUBLIC void set_timer(tp, exp_time, watchdog)
234 struct timer *tp; /* pointer to timer structure */
235 clock_t exp_time; /* expiration realtime */
236 tmr_func_t watchdog; /* watchdog to be called */
238 /* Insert the new timer in the active timers list. Always update the
239 * next timeout time by setting it to the front of the active list.
241 tmrs_settimer(&clock_timers, tp, exp_time, watchdog, NULL);
242 next_timeout = clock_timers->tmr_exp_time;
245 /*===========================================================================*
246 * reset_timer *
247 *===========================================================================*/
248 PUBLIC void reset_timer(tp)
249 struct timer *tp; /* pointer to timer structure */
251 /* The timer pointed to by 'tp' is no longer needed. Remove it from both the
252 * active and expired lists. Always update the next timeout time by setting
253 * it to the front of the active list.
255 tmrs_clrtimer(&clock_timers, tp, NULL);
256 next_timeout = (clock_timers == NULL) ?
257 TMR_NEVER : clock_timers->tmr_exp_time;
260 /*===========================================================================*
261 * load_update *
262 *===========================================================================*/
263 PRIVATE void load_update(void)
265 u16_t slot;
266 int enqueued = -1, q; /* -1: special compensation for IDLE. */
267 struct proc *p;
269 /* Load average data is stored as a list of numbers in a circular
270 * buffer. Each slot accumulates _LOAD_UNIT_SECS of samples of
271 * the number of runnable processes. Computations can then
272 * be made of the load average over variable periods, in the
273 * user library (see getloadavg(3)).
275 slot = (realtime / HZ / _LOAD_UNIT_SECS) % _LOAD_HISTORY;
276 if(slot != kloadinfo.proc_last_slot) {
277 kloadinfo.proc_load_history[slot] = 0;
278 kloadinfo.proc_last_slot = slot;
281 /* Cumulation. How many processes are ready now? */
282 for(q = 0; q < NR_SCHED_QUEUES; q++)
283 for(p = rdy_head[q]; p != NIL_PROC; p = p->p_nextready)
284 enqueued++;
286 kloadinfo.proc_load_history[slot] += enqueued;
288 /* Up-to-dateness. */
289 kloadinfo.last_clock = realtime;