make vfs & filesystems use failable copying
[minix3.git] / kernel / arch / i386 / arch_clock.c
blobba7af4d0f70dbc9a483acf610e64a52625ac2a14
2 /* i386-specific clock functions. */
4 #include <machine/ports.h>
5 #include <minix/portio.h>
7 #include "kernel/kernel.h"
9 #include "kernel/clock.h"
10 #include "kernel/interrupt.h"
11 #include <minix/u64.h>
12 #include "glo.h"
13 #include "kernel/profile.h"
16 #ifdef USE_APIC
17 #include "apic.h"
18 #endif
20 #include "kernel/spinlock.h"
22 #ifdef CONFIG_SMP
23 #include "kernel/smp.h"
24 #endif
26 #define CLOCK_ACK_BIT 0x80 /* PS/2 clock interrupt acknowledge bit */
28 /* Clock parameters. */
29 #define COUNTER_FREQ (2*TIMER_FREQ) /* counter frequency using square wave */
30 #define LATCH_COUNT 0x00 /* cc00xxxx, c = channel, x = any */
31 #define SQUARE_WAVE 0x36 /* ccaammmb, a = access, m = mode, b = BCD */
32 /* 11x11, 11 = LSB then MSB, x11 = sq wave */
33 #define TIMER_FREQ 1193182 /* clock frequency for timer in PC and AT */
34 #define TIMER_COUNT(freq) (TIMER_FREQ/(freq)) /* initial value for counter*/
36 static irq_hook_t pic_timer_hook; /* interrupt handler hook */
38 static unsigned probe_ticks;
39 static u64_t tsc0, tsc1;
40 #define PROBE_TICKS (system_hz / 10)
42 static unsigned tsc_per_ms[CONFIG_MAX_CPUS];
44 /*===========================================================================*
45 * init_8235A_timer *
46 *===========================================================================*/
47 int init_8253A_timer(const unsigned freq)
49 /* Initialize channel 0 of the 8253A timer to, e.g., 60 Hz,
50 * and register the CLOCK task's interrupt handler to be run
51 * on every clock tick.
53 outb(TIMER_MODE, SQUARE_WAVE); /* run continuously */
54 outb(TIMER0, (TIMER_COUNT(freq) & 0xff)); /* timer low byte */
55 outb(TIMER0, TIMER_COUNT(freq) >> 8); /* timer high byte */
57 return OK;
60 /*===========================================================================*
61 * stop_8235A_timer *
62 *===========================================================================*/
63 void stop_8253A_timer(void)
65 /* Reset the clock to the BIOS rate. (For rebooting.) */
66 outb(TIMER_MODE, 0x36);
67 outb(TIMER0, 0);
68 outb(TIMER0, 0);
71 void arch_timer_int_handler(void)
75 static int calib_cpu_handler(irq_hook_t * UNUSED(hook))
77 u64_t tsc;
79 probe_ticks++;
80 read_tsc_64(&tsc);
83 if (probe_ticks == 1) {
84 tsc0 = tsc;
86 else if (probe_ticks == PROBE_TICKS) {
87 tsc1 = tsc;
90 /* just in case we are in an SMP single cpu fallback mode */
91 BKL_UNLOCK();
92 return 1;
95 static void estimate_cpu_freq(void)
97 u64_t tsc_delta;
98 u64_t cpu_freq;
100 irq_hook_t calib_cpu;
102 /* set the probe, we use the legacy timer, IRQ 0 */
103 put_irq_handler(&calib_cpu, CLOCK_IRQ, calib_cpu_handler);
105 /* just in case we are in an SMP single cpu fallback mode */
106 BKL_UNLOCK();
107 /* set the PIC timer to get some time */
108 intr_enable();
110 /* loop for some time to get a sample */
111 while(probe_ticks < PROBE_TICKS) {
112 intr_enable();
115 intr_disable();
116 /* just in case we are in an SMP single cpu fallback mode */
117 BKL_LOCK();
119 /* remove the probe */
120 rm_irq_handler(&calib_cpu);
122 tsc_delta = tsc1 - tsc0;
124 cpu_freq = (tsc_delta / (PROBE_TICKS - 1)) * system_hz;
125 cpu_set_freq(cpuid, cpu_freq);
126 cpu_info[cpuid].freq = (unsigned long)(cpu_freq / 1000000);
127 BOOT_VERBOSE(cpu_print_freq(cpuid));
130 int init_local_timer(unsigned freq)
132 #ifdef USE_APIC
133 /* if we know the address, lapic is enabled and we should use it */
134 if (lapic_addr) {
135 unsigned cpu = cpuid;
136 tsc_per_ms[cpu] = (unsigned long)(cpu_get_freq(cpu) / 1000);
137 lapic_set_timer_one_shot(1000000 / system_hz);
138 } else {
139 BOOT_VERBOSE(printf("Initiating legacy i8253 timer\n"));
140 #else
142 #endif
143 init_8253A_timer(freq);
144 estimate_cpu_freq();
145 /* always only 1 cpu in the system */
146 tsc_per_ms[0] = (unsigned long)(cpu_get_freq(0) / 1000);
149 return 0;
152 void stop_local_timer(void)
154 #ifdef USE_APIC
155 if (lapic_addr) {
156 lapic_stop_timer();
157 apic_eoi();
158 } else
159 #endif
161 stop_8253A_timer();
165 void restart_local_timer(void)
167 #ifdef USE_APIC
168 if (lapic_addr) {
169 lapic_restart_timer();
171 #endif
174 int register_local_timer_handler(const irq_handler_t handler)
176 #ifdef USE_APIC
177 if (lapic_addr) {
178 /* Using APIC, it is configured in apic_idt_init() */
179 BOOT_VERBOSE(printf("Using LAPIC timer as tick source\n"));
180 } else
181 #endif
183 /* Using PIC, Initialize the CLOCK's interrupt hook. */
184 pic_timer_hook.proc_nr_e = NONE;
185 pic_timer_hook.irq = CLOCK_IRQ;
187 put_irq_handler(&pic_timer_hook, CLOCK_IRQ, handler);
190 return 0;
193 void cycles_accounting_init(void)
195 #ifdef CONFIG_SMP
196 unsigned cpu = cpuid;
197 #endif
199 read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
201 get_cpu_var(cpu, cpu_last_tsc) = 0;
202 get_cpu_var(cpu, cpu_last_idle) = 0;
205 void context_stop(struct proc * p)
207 u64_t tsc, tsc_delta;
208 u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
209 #ifdef CONFIG_SMP
210 unsigned cpu = cpuid;
211 int must_bkl_unlock = 0;
214 * This function is called only if we switch from kernel to user or idle
215 * or back. Therefore this is a perfect location to place the big kernel
216 * lock which will hopefully disappear soon.
218 * If we stop accounting for KERNEL we must unlock the BKL. If account
219 * for IDLE we must not hold the lock
221 if (p == proc_addr(KERNEL)) {
222 u64_t tmp;
224 read_tsc_64(&tsc);
225 tmp = tsc - *__tsc_ctr_switch;
226 kernel_ticks[cpu] = kernel_ticks[cpu] + tmp;
227 p->p_cycles = p->p_cycles + tmp;
228 must_bkl_unlock = 1;
229 } else {
230 u64_t bkl_tsc;
231 atomic_t succ;
233 read_tsc_64(&bkl_tsc);
234 /* this only gives a good estimate */
235 succ = big_kernel_lock.val;
237 BKL_LOCK();
239 read_tsc_64(&tsc);
241 bkl_ticks[cpu] = bkl_ticks[cpu] + tsc - bkl_tsc;
242 bkl_tries[cpu]++;
243 bkl_succ[cpu] += !(!(succ == 0));
245 p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
247 #ifdef CONFIG_SMP
249 * Since at the time we got a scheduling IPI we might have been
250 * waiting for BKL already, we may miss it due to a similar IPI to
251 * the cpu which is already waiting for us to handle its. This
252 * results in a live-lock of these two cpus.
254 * Therefore we always check if there is one pending and if so,
255 * we handle it straight away so the other cpu can continue and
256 * we do not deadlock.
258 smp_sched_handler();
259 #endif
261 #else
262 read_tsc_64(&tsc);
263 p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
264 #endif
266 tsc_delta = tsc - *__tsc_ctr_switch;
268 if (kbill_ipc) {
269 kbill_ipc->p_kipc_cycles =
270 kbill_ipc->p_kipc_cycles + tsc_delta;
271 kbill_ipc = NULL;
274 if (kbill_kcall) {
275 kbill_kcall->p_kcall_cycles =
276 kbill_kcall->p_kcall_cycles + tsc_delta;
277 kbill_kcall = NULL;
281 * deduct the just consumed cpu cycles from the cpu time left for this
282 * process during its current quantum. Skip IDLE and other pseudo kernel
283 * tasks
285 if (p->p_endpoint >= 0) {
286 #if DEBUG_RACE
287 p->p_cpu_time_left = 0;
288 #else
289 /* if (tsc_delta < p->p_cpu_time_left) in 64bit */
290 if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
291 (ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
292 ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
293 p->p_cpu_time_left = p->p_cpu_time_left - tsc_delta;
294 else {
295 p->p_cpu_time_left = 0;
297 #endif
300 *__tsc_ctr_switch = tsc;
302 #ifdef CONFIG_SMP
303 if(must_bkl_unlock) {
304 BKL_UNLOCK();
306 #endif
309 void context_stop_idle(void)
311 int is_idle;
312 #ifdef CONFIG_SMP
313 unsigned cpu = cpuid;
314 #endif
316 is_idle = get_cpu_var(cpu, cpu_is_idle);
317 get_cpu_var(cpu, cpu_is_idle) = 0;
319 context_stop(get_cpulocal_var_ptr(idle_proc));
321 if (is_idle)
322 restart_local_timer();
323 #if SPROFILE
324 if (sprofiling)
325 get_cpulocal_var(idle_interrupted) = 1;
326 #endif
329 u64_t ms_2_cpu_time(unsigned ms)
331 return (u64_t)tsc_per_ms[cpuid] * ms;
334 unsigned cpu_time_2_ms(u64_t cpu_time)
336 return (unsigned long)(cpu_time / tsc_per_ms[cpuid]);
339 short cpu_load(void)
341 u64_t current_tsc, *current_idle;
342 u64_t tsc_delta, idle_delta, busy;
343 struct proc *idle;
344 short load;
345 #ifdef CONFIG_SMP
346 unsigned cpu = cpuid;
347 #endif
349 u64_t *last_tsc, *last_idle;
351 last_tsc = get_cpu_var_ptr(cpu, cpu_last_tsc);
352 last_idle = get_cpu_var_ptr(cpu, cpu_last_idle);
354 idle = get_cpu_var_ptr(cpu, idle_proc);;
355 read_tsc_64(&current_tsc);
356 current_idle = &idle->p_cycles; /* ptr to idle proc */
358 /* calculate load since last cpu_load invocation */
359 if (*last_tsc) {
360 tsc_delta = current_tsc - *last_tsc;
361 idle_delta = *current_idle - *last_idle;
363 busy = tsc_delta - idle_delta;
364 busy = busy * 100;
365 load = ex64lo(busy / tsc_delta);
367 if (load > 100)
368 load = 100;
369 } else
370 load = 0;
372 *last_tsc = current_tsc;
373 *last_idle = *current_idle;
374 return load;
377 void busy_delay_ms(int ms)
379 u64_t cycles = ms_2_cpu_time(ms), tsc0, tsc, tsc1;
380 read_tsc_64(&tsc0);
381 tsc1 = tsc0 + cycles;
382 do { read_tsc_64(&tsc); } while(tsc < tsc1);
383 return;