coverity appeasement - redundant check
[minix.git] / kernel / arch / i386 / arch_clock.c
blobb7b7c1ec3b0fe434a3216ad12a2de46288690f1d
2 /* i386-specific clock functions. */
4 #include <machine/ports.h>
5 #include <minix/portio.h>
7 #include "kernel/kernel.h"
9 #include "kernel/clock.h"
10 #include "kernel/interrupt.h"
11 #include <minix/u64.h>
12 #include "glo.h"
13 #include "profile.h"
16 #ifdef USE_APIC
17 #include "apic.h"
18 #endif
20 #include "spinlock.h"
22 #ifdef CONFIG_SMP
23 #include "kernel/smp.h"
24 #endif
26 #define CLOCK_ACK_BIT 0x80 /* PS/2 clock interrupt acknowledge bit */
28 /* Clock parameters. */
29 #define COUNTER_FREQ (2*TIMER_FREQ) /* counter frequency using square wave */
30 #define LATCH_COUNT 0x00 /* cc00xxxx, c = channel, x = any */
31 #define SQUARE_WAVE 0x36 /* ccaammmb, a = access, m = mode, b = BCD */
32 /* 11x11, 11 = LSB then MSB, x11 = sq wave */
33 #define TIMER_FREQ 1193182 /* clock frequency for timer in PC and AT */
34 #define TIMER_COUNT(freq) (TIMER_FREQ/(freq)) /* initial value for counter*/
36 static irq_hook_t pic_timer_hook; /* interrupt handler hook */
38 static unsigned probe_ticks;
39 static u64_t tsc0, tsc1;
40 #define PROBE_TICKS (system_hz / 10)
42 static unsigned tsc_per_ms[CONFIG_MAX_CPUS];
44 /*===========================================================================*
45 * init_8235A_timer *
46 *===========================================================================*/
47 int init_8253A_timer(const unsigned freq)
49 /* Initialize channel 0 of the 8253A timer to, e.g., 60 Hz,
50 * and register the CLOCK task's interrupt handler to be run
51 * on every clock tick.
53 outb(TIMER_MODE, SQUARE_WAVE); /* run continuously */
54 outb(TIMER0, (TIMER_COUNT(freq) & 0xff)); /* timer low byte */
55 outb(TIMER0, TIMER_COUNT(freq) >> 8); /* timer high byte */
57 return OK;
60 /*===========================================================================*
61 * stop_8235A_timer *
62 *===========================================================================*/
63 void stop_8253A_timer(void)
65 /* Reset the clock to the BIOS rate. (For rebooting.) */
66 outb(TIMER_MODE, 0x36);
67 outb(TIMER0, 0);
68 outb(TIMER0, 0);
71 static int calib_cpu_handler(irq_hook_t * UNUSED(hook))
73 u64_t tsc;
75 probe_ticks++;
76 read_tsc_64(&tsc);
79 if (probe_ticks == 1) {
80 tsc0 = tsc;
82 else if (probe_ticks == PROBE_TICKS) {
83 tsc1 = tsc;
86 /* just in case we are in an SMP single cpu fallback mode */
87 BKL_UNLOCK();
88 return 1;
91 static void estimate_cpu_freq(void)
93 u64_t tsc_delta;
94 u64_t cpu_freq;
96 irq_hook_t calib_cpu;
98 /* set the probe, we use the legacy timer, IRQ 0 */
99 put_irq_handler(&calib_cpu, CLOCK_IRQ, calib_cpu_handler);
101 /* just in case we are in an SMP single cpu fallback mode */
102 BKL_UNLOCK();
103 /* set the PIC timer to get some time */
104 intr_enable();
106 /* loop for some time to get a sample */
107 while(probe_ticks < PROBE_TICKS) {
108 intr_enable();
111 intr_disable();
112 /* just in case we are in an SMP single cpu fallback mode */
113 BKL_LOCK();
115 /* remove the probe */
116 rm_irq_handler(&calib_cpu);
118 tsc_delta = sub64(tsc1, tsc0);
120 cpu_freq = mul64(div64u64(tsc_delta, PROBE_TICKS - 1), make64(system_hz, 0));
121 cpu_set_freq(cpuid, cpu_freq);
122 cpu_info[cpuid].freq = div64u(cpu_freq, 1000000);
123 BOOT_VERBOSE(cpu_print_freq(cpuid));
126 int init_local_timer(unsigned freq)
128 #ifdef USE_APIC
129 /* if we know the address, lapic is enabled and we should use it */
130 if (lapic_addr) {
131 unsigned cpu = cpuid;
132 tsc_per_ms[cpu] = div64u(cpu_get_freq(cpu), 1000);
133 lapic_set_timer_one_shot(1000000/system_hz);
134 } else
136 BOOT_VERBOSE(printf("Initiating legacy i8253 timer\n"));
137 #else
139 #endif
140 init_8253A_timer(freq);
141 estimate_cpu_freq();
142 /* always only 1 cpu in the system */
143 tsc_per_ms[0] = div64u(cpu_get_freq(0), 1000);
146 return 0;
149 void stop_local_timer(void)
151 #ifdef USE_APIC
152 if (lapic_addr) {
153 lapic_stop_timer();
154 apic_eoi();
155 } else
156 #endif
158 stop_8253A_timer();
162 void restart_local_timer(void)
164 #ifdef USE_APIC
165 if (lapic_addr) {
166 lapic_restart_timer();
168 #endif
171 int register_local_timer_handler(const irq_handler_t handler)
173 #ifdef USE_APIC
174 if (lapic_addr) {
175 /* Using APIC, it is configured in apic_idt_init() */
176 BOOT_VERBOSE(printf("Using LAPIC timer as tick source\n"));
177 } else
178 #endif
180 /* Using PIC, Initialize the CLOCK's interrupt hook. */
181 pic_timer_hook.proc_nr_e = NONE;
182 pic_timer_hook.irq = CLOCK_IRQ;
184 put_irq_handler(&pic_timer_hook, CLOCK_IRQ, handler);
187 return 0;
190 void cycles_accounting_init(void)
192 #ifdef CONFIG_SMP
193 unsigned cpu = cpuid;
194 #endif
196 read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
198 make_zero64(get_cpu_var(cpu, cpu_last_tsc));
199 make_zero64(get_cpu_var(cpu, cpu_last_idle));
202 void context_stop(struct proc * p)
204 u64_t tsc, tsc_delta;
205 u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
206 #ifdef CONFIG_SMP
207 unsigned cpu = cpuid;
208 int must_bkl_unlock = 0;
211 * This function is called only if we switch from kernel to user or idle
212 * or back. Therefore this is a perfect location to place the big kernel
213 * lock which will hopefully disappear soon.
215 * If we stop accounting for KERNEL we must unlock the BKL. If account
216 * for IDLE we must not hold the lock
218 if (p == proc_addr(KERNEL)) {
219 u64_t tmp;
221 read_tsc_64(&tsc);
222 tmp = sub64(tsc, *__tsc_ctr_switch);
223 kernel_ticks[cpu] = add64(kernel_ticks[cpu], tmp);
224 p->p_cycles = add64(p->p_cycles, tmp);
225 must_bkl_unlock = 1;
226 } else {
227 u64_t bkl_tsc;
228 atomic_t succ;
230 read_tsc_64(&bkl_tsc);
231 /* this only gives a good estimate */
232 succ = big_kernel_lock.val;
234 BKL_LOCK();
236 read_tsc_64(&tsc);
238 bkl_ticks[cpu] = add64(bkl_ticks[cpu], sub64(tsc, bkl_tsc));
239 bkl_tries[cpu]++;
240 bkl_succ[cpu] += !(!(succ == 0));
242 p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
244 #ifdef CONFIG_SMP
246 * Since at the time we got a scheduling IPI we might have been
247 * waiting for BKL already, we may miss it due to a similar IPI to
248 * the cpu which is already waiting for us to handle its. This
249 * results in a live-lock of these two cpus.
251 * Therefore we always check if there is one pending and if so,
252 * we handle it straight away so the other cpu can continue and
253 * we do not deadlock.
255 smp_sched_handler();
256 #endif
258 #else
259 read_tsc_64(&tsc);
260 p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
261 #endif
263 tsc_delta = sub64(tsc, *__tsc_ctr_switch);
265 if(kbill_ipc) {
266 kbill_ipc->p_kipc_cycles =
267 add64(kbill_ipc->p_kipc_cycles, tsc_delta);
268 kbill_ipc = NULL;
271 if(kbill_kcall) {
272 kbill_kcall->p_kcall_cycles =
273 add64(kbill_kcall->p_kcall_cycles, tsc_delta);
274 kbill_kcall = NULL;
278 * deduct the just consumed cpu cycles from the cpu time left for this
279 * process during its current quantum. Skip IDLE and other pseudo kernel
280 * tasks
282 if (p->p_endpoint >= 0) {
283 #if DEBUG_RACE
284 make_zero64(p->p_cpu_time_left);
285 #else
286 /* if (tsc_delta < p->p_cpu_time_left) in 64bit */
287 if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
288 (ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
289 ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
290 p->p_cpu_time_left = sub64(p->p_cpu_time_left, tsc_delta);
291 else {
292 make_zero64(p->p_cpu_time_left);
294 #endif
297 *__tsc_ctr_switch = tsc;
299 #ifdef CONFIG_SMP
300 if(must_bkl_unlock) {
301 BKL_UNLOCK();
303 #endif
306 void context_stop_idle(void)
308 int is_idle;
309 #ifdef CONFIG_SMP
310 unsigned cpu = cpuid;
311 #endif
313 is_idle = get_cpu_var(cpu, cpu_is_idle);
314 get_cpu_var(cpu, cpu_is_idle) = 0;
316 context_stop(get_cpulocal_var_ptr(idle_proc));
318 if (is_idle)
319 restart_local_timer();
320 #if SPROFILE
321 if (sprofiling)
322 get_cpulocal_var(idle_interrupted) = 1;
323 #endif
326 u64_t ms_2_cpu_time(unsigned ms)
328 return mul64u(tsc_per_ms[cpuid], ms);
331 unsigned cpu_time_2_ms(u64_t cpu_time)
333 return div64u(cpu_time, tsc_per_ms[cpuid]);
336 short cpu_load(void)
338 u64_t current_tsc, *current_idle;
339 u64_t tsc_delta, idle_delta, busy;
340 struct proc *idle;
341 short load;
342 #ifdef CONFIG_SMP
343 unsigned cpu = cpuid;
344 #endif
346 u64_t *last_tsc, *last_idle;
348 last_tsc = get_cpu_var_ptr(cpu, cpu_last_tsc);
349 last_idle = get_cpu_var_ptr(cpu, cpu_last_idle);
351 idle = get_cpu_var_ptr(cpu, idle_proc);;
352 read_tsc_64(&current_tsc);
353 current_idle = &idle->p_cycles; /* ptr to idle proc */
355 /* calculate load since last cpu_load invocation */
356 if (!is_zero64(*last_tsc)) {
357 tsc_delta = sub64(current_tsc, *last_tsc);
358 idle_delta = sub64(*current_idle, *last_idle);
360 busy = sub64(tsc_delta, idle_delta);
361 busy = mul64(busy, make64(100, 0));
362 load = ex64lo(div64(busy, tsc_delta));
364 if (load > 100)
365 load = 100;
366 } else
367 load = 0;
369 *last_tsc = current_tsc;
370 *last_idle = *current_idle;
371 return load;
374 void busy_delay_ms(int ms)
376 u64_t cycles = ms_2_cpu_time(ms), tsc0, tsc, tsc1;
377 read_tsc_64(&tsc0);
378 tsc1 = tsc0 + cycles;
379 do { read_tsc_64(&tsc); } while(tsc < tsc1);
380 return;