2 /* i386-specific clock functions. */
4 #include <machine/ports.h>
5 #include <minix/portio.h>
7 #include "kernel/kernel.h"
9 #include "kernel/clock.h"
10 #include "kernel/interrupt.h"
11 #include <minix/u64.h>
23 #include "kernel/smp.h"
26 #define CLOCK_ACK_BIT 0x80 /* PS/2 clock interrupt acknowledge bit */
28 /* Clock parameters. */
29 #define COUNTER_FREQ (2*TIMER_FREQ) /* counter frequency using square wave */
30 #define LATCH_COUNT 0x00 /* cc00xxxx, c = channel, x = any */
31 #define SQUARE_WAVE 0x36 /* ccaammmb, a = access, m = mode, b = BCD */
32 /* 11x11, 11 = LSB then MSB, x11 = sq wave */
33 #define TIMER_FREQ 1193182 /* clock frequency for timer in PC and AT */
34 #define TIMER_COUNT(freq) (TIMER_FREQ/(freq)) /* initial value for counter*/
36 static irq_hook_t pic_timer_hook
; /* interrupt handler hook */
38 static unsigned probe_ticks
;
39 static u64_t tsc0
, tsc1
;
40 #define PROBE_TICKS (system_hz / 10)
42 static unsigned tsc_per_ms
[CONFIG_MAX_CPUS
];
44 /*===========================================================================*
46 *===========================================================================*/
47 int init_8253A_timer(const unsigned freq
)
49 /* Initialize channel 0 of the 8253A timer to, e.g., 60 Hz,
50 * and register the CLOCK task's interrupt handler to be run
51 * on every clock tick.
53 outb(TIMER_MODE
, SQUARE_WAVE
); /* run continuously */
54 outb(TIMER0
, (TIMER_COUNT(freq
) & 0xff)); /* timer low byte */
55 outb(TIMER0
, TIMER_COUNT(freq
) >> 8); /* timer high byte */
60 /*===========================================================================*
62 *===========================================================================*/
63 void stop_8253A_timer(void)
65 /* Reset the clock to the BIOS rate. (For rebooting.) */
66 outb(TIMER_MODE
, 0x36);
71 static int calib_cpu_handler(irq_hook_t
* UNUSED(hook
))
79 if (probe_ticks
== 1) {
82 else if (probe_ticks
== PROBE_TICKS
) {
86 /* just in case we are in an SMP single cpu fallback mode */
91 static void estimate_cpu_freq(void)
98 /* set the probe, we use the legacy timer, IRQ 0 */
99 put_irq_handler(&calib_cpu
, CLOCK_IRQ
, calib_cpu_handler
);
101 /* just in case we are in an SMP single cpu fallback mode */
103 /* set the PIC timer to get some time */
106 /* loop for some time to get a sample */
107 while(probe_ticks
< PROBE_TICKS
) {
112 /* just in case we are in an SMP single cpu fallback mode */
115 /* remove the probe */
116 rm_irq_handler(&calib_cpu
);
118 tsc_delta
= sub64(tsc1
, tsc0
);
120 cpu_freq
= mul64(div64u64(tsc_delta
, PROBE_TICKS
- 1), make64(system_hz
, 0));
121 cpu_set_freq(cpuid
, cpu_freq
);
122 cpu_info
[cpuid
].freq
= div64u(cpu_freq
, 1000000);
123 BOOT_VERBOSE(cpu_print_freq(cpuid
));
126 int init_local_timer(unsigned freq
)
129 /* if we know the address, lapic is enabled and we should use it */
131 unsigned cpu
= cpuid
;
132 tsc_per_ms
[cpu
] = div64u(cpu_get_freq(cpu
), 1000);
133 lapic_set_timer_one_shot(1000000/system_hz
);
136 BOOT_VERBOSE(printf("Initiating legacy i8253 timer\n"));
140 init_8253A_timer(freq
);
142 /* always only 1 cpu in the system */
143 tsc_per_ms
[0] = div64u(cpu_get_freq(0), 1000);
149 void stop_local_timer(void)
162 void restart_local_timer(void)
166 lapic_restart_timer();
171 int register_local_timer_handler(const irq_handler_t handler
)
175 /* Using APIC, it is configured in apic_idt_init() */
176 BOOT_VERBOSE(printf("Using LAPIC timer as tick source\n"));
180 /* Using PIC, Initialize the CLOCK's interrupt hook. */
181 pic_timer_hook
.proc_nr_e
= NONE
;
182 pic_timer_hook
.irq
= CLOCK_IRQ
;
184 put_irq_handler(&pic_timer_hook
, CLOCK_IRQ
, handler
);
190 void cycles_accounting_init(void)
193 unsigned cpu
= cpuid
;
196 read_tsc_64(get_cpu_var_ptr(cpu
, tsc_ctr_switch
));
198 make_zero64(get_cpu_var(cpu
, cpu_last_tsc
));
199 make_zero64(get_cpu_var(cpu
, cpu_last_idle
));
202 void context_stop(struct proc
* p
)
204 u64_t tsc
, tsc_delta
;
205 u64_t
* __tsc_ctr_switch
= get_cpulocal_var_ptr(tsc_ctr_switch
);
207 unsigned cpu
= cpuid
;
208 int must_bkl_unlock
= 0;
211 * This function is called only if we switch from kernel to user or idle
212 * or back. Therefore this is a perfect location to place the big kernel
213 * lock which will hopefully disappear soon.
215 * If we stop accounting for KERNEL we must unlock the BKL. If account
216 * for IDLE we must not hold the lock
218 if (p
== proc_addr(KERNEL
)) {
222 tmp
= sub64(tsc
, *__tsc_ctr_switch
);
223 kernel_ticks
[cpu
] = add64(kernel_ticks
[cpu
], tmp
);
224 p
->p_cycles
= add64(p
->p_cycles
, tmp
);
230 read_tsc_64(&bkl_tsc
);
231 /* this only gives a good estimate */
232 succ
= big_kernel_lock
.val
;
238 bkl_ticks
[cpu
] = add64(bkl_ticks
[cpu
], sub64(tsc
, bkl_tsc
));
240 bkl_succ
[cpu
] += !(!(succ
== 0));
242 p
->p_cycles
= add64(p
->p_cycles
, sub64(tsc
, *__tsc_ctr_switch
));
246 * Since at the time we got a scheduling IPI we might have been
247 * waiting for BKL already, we may miss it due to a similar IPI to
248 * the cpu which is already waiting for us to handle its. This
249 * results in a live-lock of these two cpus.
251 * Therefore we always check if there is one pending and if so,
252 * we handle it straight away so the other cpu can continue and
253 * we do not deadlock.
260 p
->p_cycles
= add64(p
->p_cycles
, sub64(tsc
, *__tsc_ctr_switch
));
263 tsc_delta
= sub64(tsc
, *__tsc_ctr_switch
);
266 kbill_ipc
->p_kipc_cycles
=
267 add64(kbill_ipc
->p_kipc_cycles
, tsc_delta
);
272 kbill_kcall
->p_kcall_cycles
=
273 add64(kbill_kcall
->p_kcall_cycles
, tsc_delta
);
278 * deduct the just consumed cpu cycles from the cpu time left for this
279 * process during its current quantum. Skip IDLE and other pseudo kernel
282 if (p
->p_endpoint
>= 0) {
284 make_zero64(p
->p_cpu_time_left
);
286 /* if (tsc_delta < p->p_cpu_time_left) in 64bit */
287 if (ex64hi(tsc_delta
) < ex64hi(p
->p_cpu_time_left
) ||
288 (ex64hi(tsc_delta
) == ex64hi(p
->p_cpu_time_left
) &&
289 ex64lo(tsc_delta
) < ex64lo(p
->p_cpu_time_left
)))
290 p
->p_cpu_time_left
= sub64(p
->p_cpu_time_left
, tsc_delta
);
292 make_zero64(p
->p_cpu_time_left
);
297 *__tsc_ctr_switch
= tsc
;
300 if(must_bkl_unlock
) {
306 void context_stop_idle(void)
310 unsigned cpu
= cpuid
;
313 is_idle
= get_cpu_var(cpu
, cpu_is_idle
);
314 get_cpu_var(cpu
, cpu_is_idle
) = 0;
316 context_stop(get_cpulocal_var_ptr(idle_proc
));
319 restart_local_timer();
322 get_cpulocal_var(idle_interrupted
) = 1;
326 u64_t
ms_2_cpu_time(unsigned ms
)
328 return mul64u(tsc_per_ms
[cpuid
], ms
);
331 unsigned cpu_time_2_ms(u64_t cpu_time
)
333 return div64u(cpu_time
, tsc_per_ms
[cpuid
]);
338 u64_t current_tsc
, *current_idle
;
339 u64_t tsc_delta
, idle_delta
, busy
;
343 unsigned cpu
= cpuid
;
346 u64_t
*last_tsc
, *last_idle
;
348 last_tsc
= get_cpu_var_ptr(cpu
, cpu_last_tsc
);
349 last_idle
= get_cpu_var_ptr(cpu
, cpu_last_idle
);
351 idle
= get_cpu_var_ptr(cpu
, idle_proc
);;
352 read_tsc_64(¤t_tsc
);
353 current_idle
= &idle
->p_cycles
; /* ptr to idle proc */
355 /* calculate load since last cpu_load invocation */
356 if (!is_zero64(*last_tsc
)) {
357 tsc_delta
= sub64(current_tsc
, *last_tsc
);
358 idle_delta
= sub64(*current_idle
, *last_idle
);
360 busy
= sub64(tsc_delta
, idle_delta
);
361 busy
= mul64(busy
, make64(100, 0));
362 load
= ex64lo(div64(busy
, tsc_delta
));
369 *last_tsc
= current_tsc
;
370 *last_idle
= *current_idle
;
374 void busy_delay_ms(int ms
)
376 u64_t cycles
= ms_2_cpu_time(ms
), tsc0
, tsc
, tsc1
;
378 tsc1
= tsc0
+ cycles
;
379 do { read_tsc_64(&tsc
); } while(tsc
< tsc1
);