1 /* i386-specific clock functions. */
3 #include <machine/ports.h>
5 #include "kernel/clock.h"
6 #include "kernel/interrupt.h"
9 #include <sys/sched.h> /* for CP_*, CPUSTATES */
10 #if CPUSTATES != MINIX_CPUSTATES
11 /* If this breaks, the code in this file may have to be adapted accordingly. */
12 #error "MINIX_CPUSTATES value is out of sync with NetBSD's!"
19 #include "kernel/spinlock.h"
22 #include "kernel/smp.h"
25 #define CLOCK_ACK_BIT 0x80 /* PS/2 clock interrupt acknowledge bit */
27 /* Clock parameters. */
28 #define COUNTER_FREQ (2*TIMER_FREQ) /* counter frequency using square wave */
29 #define LATCH_COUNT 0x00 /* cc00xxxx, c = channel, x = any */
30 #define SQUARE_WAVE 0x36 /* ccaammmb, a = access, m = mode, b = BCD */
31 /* 11x11, 11 = LSB then MSB, x11 = sq wave */
32 #define TIMER_FREQ 1193182 /* clock frequency for timer in PC and AT */
33 #define TIMER_COUNT(freq) (TIMER_FREQ/(freq)) /* initial value for counter*/
35 static irq_hook_t pic_timer_hook
; /* interrupt handler hook */
37 static unsigned probe_ticks
;
38 static u64_t tsc0
, tsc1
;
39 #define PROBE_TICKS (system_hz / 10)
41 static unsigned tsc_per_ms
[CONFIG_MAX_CPUS
];
42 static unsigned tsc_per_tick
[CONFIG_MAX_CPUS
];
43 static uint64_t tsc_per_state
[CONFIG_MAX_CPUS
][CPUSTATES
];
45 /*===========================================================================*
47 *===========================================================================*/
48 int init_8253A_timer(const unsigned freq
)
50 /* Initialize channel 0 of the 8253A timer to, e.g., 60 Hz,
51 * and register the CLOCK task's interrupt handler to be run
52 * on every clock tick.
54 outb(TIMER_MODE
, SQUARE_WAVE
); /* run continuously */
55 outb(TIMER0
, (TIMER_COUNT(freq
) & 0xff)); /* timer low byte */
56 outb(TIMER0
, TIMER_COUNT(freq
) >> 8); /* timer high byte */
61 /*===========================================================================*
63 *===========================================================================*/
64 void stop_8253A_timer(void)
66 /* Reset the clock to the BIOS rate. (For rebooting.) */
67 outb(TIMER_MODE
, 0x36);
72 void arch_timer_int_handler(void)
76 static int calib_cpu_handler(irq_hook_t
* UNUSED(hook
))
84 if (probe_ticks
== 1) {
87 else if (probe_ticks
== PROBE_TICKS
) {
91 /* just in case we are in an SMP single cpu fallback mode */
96 static void estimate_cpu_freq(void)
101 irq_hook_t calib_cpu
;
103 /* set the probe, we use the legacy timer, IRQ 0 */
104 put_irq_handler(&calib_cpu
, CLOCK_IRQ
, calib_cpu_handler
);
106 /* just in case we are in an SMP single cpu fallback mode */
108 /* set the PIC timer to get some time */
111 /* loop for some time to get a sample */
112 while(probe_ticks
< PROBE_TICKS
) {
117 /* just in case we are in an SMP single cpu fallback mode */
120 /* remove the probe */
121 rm_irq_handler(&calib_cpu
);
123 tsc_delta
= tsc1
- tsc0
;
125 cpu_freq
= (tsc_delta
/ (PROBE_TICKS
- 1)) * system_hz
;
126 cpu_set_freq(cpuid
, cpu_freq
);
127 cpu_info
[cpuid
].freq
= (unsigned long)(cpu_freq
/ 1000000);
128 BOOT_VERBOSE(cpu_print_freq(cpuid
));
131 int init_local_timer(unsigned freq
)
134 /* if we know the address, lapic is enabled and we should use it */
136 unsigned cpu
= cpuid
;
137 tsc_per_ms
[cpu
] = (unsigned)(cpu_get_freq(cpu
) / 1000);
138 tsc_per_tick
[cpu
] = (unsigned)(cpu_get_freq(cpu
) / system_hz
);
139 lapic_set_timer_one_shot(1000000 / system_hz
);
141 DEBUGBASIC(("Initiating legacy i8253 timer\n"));
145 init_8253A_timer(freq
);
147 /* always only 1 cpu in the system */
148 tsc_per_ms
[0] = (unsigned long)(cpu_get_freq(0) / 1000);
149 tsc_per_tick
[0] = (unsigned)(cpu_get_freq(0) / system_hz
);
155 void stop_local_timer(void)
168 void restart_local_timer(void)
172 lapic_restart_timer();
177 int register_local_timer_handler(const irq_handler_t handler
)
181 /* Using APIC, it is configured in apic_idt_init() */
182 BOOT_VERBOSE(printf("Using LAPIC timer as tick source\n"));
186 /* Using PIC, Initialize the CLOCK's interrupt hook. */
187 pic_timer_hook
.proc_nr_e
= NONE
;
188 pic_timer_hook
.irq
= CLOCK_IRQ
;
190 put_irq_handler(&pic_timer_hook
, CLOCK_IRQ
, handler
);
196 void cycles_accounting_init(void)
199 unsigned cpu
= cpuid
;
202 read_tsc_64(get_cpu_var_ptr(cpu
, tsc_ctr_switch
));
204 get_cpu_var(cpu
, cpu_last_tsc
) = 0;
205 get_cpu_var(cpu
, cpu_last_idle
) = 0;
208 void context_stop(struct proc
* p
)
210 u64_t tsc
, tsc_delta
;
211 u64_t
* __tsc_ctr_switch
= get_cpulocal_var_ptr(tsc_ctr_switch
);
212 unsigned int cpu
, tpt
, counter
;
214 int must_bkl_unlock
= 0;
219 * This function is called only if we switch from kernel to user or idle
220 * or back. Therefore this is a perfect location to place the big kernel
221 * lock which will hopefully disappear soon.
223 * If we stop accounting for KERNEL we must unlock the BKL. If account
224 * for IDLE we must not hold the lock
226 if (p
== proc_addr(KERNEL
)) {
230 tmp
= tsc
- *__tsc_ctr_switch
;
231 kernel_ticks
[cpu
] = kernel_ticks
[cpu
] + tmp
;
232 p
->p_cycles
= p
->p_cycles
+ tmp
;
238 read_tsc_64(&bkl_tsc
);
239 /* this only gives a good estimate */
240 succ
= big_kernel_lock
.val
;
246 bkl_ticks
[cpu
] = bkl_ticks
[cpu
] + tsc
- bkl_tsc
;
248 bkl_succ
[cpu
] += !(!(succ
== 0));
250 p
->p_cycles
= p
->p_cycles
+ tsc
- *__tsc_ctr_switch
;
254 * Since at the time we got a scheduling IPI we might have been
255 * waiting for BKL already, we may miss it due to a similar IPI to
256 * the cpu which is already waiting for us to handle its. This
257 * results in a live-lock of these two cpus.
259 * Therefore we always check if there is one pending and if so,
260 * we handle it straight away so the other cpu can continue and
261 * we do not deadlock.
268 p
->p_cycles
= p
->p_cycles
+ tsc
- *__tsc_ctr_switch
;
272 tsc_delta
= tsc
- *__tsc_ctr_switch
;
275 kbill_ipc
->p_kipc_cycles
+= tsc_delta
;
280 kbill_kcall
->p_kcall_cycles
+= tsc_delta
;
285 * Perform CPU average accounting here, rather than in the generic
286 * clock handler. Doing it here offers two advantages: 1) we can
287 * account for time spent in the kernel, and 2) we properly account for
288 * CPU time spent by a process that has a lot of short-lasting activity
289 * such that it spends serious CPU time but never actually runs when a
290 * clock tick triggers. Note that clock speed inaccuracy requires that
291 * the code below is a loop, but the loop will in by far most cases not
292 * be executed more than once, and often be skipped at all.
294 tpt
= tsc_per_tick
[cpu
];
296 p
->p_tick_cycles
+= tsc_delta
;
297 while (tpt
> 0 && p
->p_tick_cycles
>= tpt
) {
298 p
->p_tick_cycles
-= tpt
;
301 * The process has spent roughly a whole clock tick worth of
302 * CPU cycles. Update its per-process CPU utilization counter.
303 * Some of the cycles may actually have been spent in a
304 * previous second, but that is not a problem.
306 cpuavg_increment(&p
->p_cpuavg
, kclockinfo
.uptime
, system_hz
);
310 * deduct the just consumed cpu cycles from the cpu time left for this
311 * process during its current quantum. Skip IDLE and other pseudo kernel
312 * tasks, except for global accounting purposes.
314 if (p
->p_endpoint
>= 0) {
315 /* On MINIX3, the "system" counter covers system processes. */
316 if (p
->p_priv
!= priv_addr(USER_PRIV_ID
))
318 else if (p
->p_misc_flags
& MF_NICED
)
324 p
->p_cpu_time_left
= 0;
326 if (tsc_delta
< p
->p_cpu_time_left
) {
327 p
->p_cpu_time_left
-= tsc_delta
;
329 p
->p_cpu_time_left
= 0;
333 /* On MINIX3, the "interrupts" counter covers the kernel. */
334 if (p
->p_endpoint
== IDLE
)
340 tsc_per_state
[cpu
][counter
] += tsc_delta
;
342 *__tsc_ctr_switch
= tsc
;
345 if(must_bkl_unlock
) {
351 void context_stop_idle(void)
355 unsigned cpu
= cpuid
;
358 is_idle
= get_cpu_var(cpu
, cpu_is_idle
);
359 get_cpu_var(cpu
, cpu_is_idle
) = 0;
361 context_stop(get_cpulocal_var_ptr(idle_proc
));
364 restart_local_timer();
367 get_cpulocal_var(idle_interrupted
) = 1;
371 u64_t
ms_2_cpu_time(unsigned ms
)
373 return (u64_t
)tsc_per_ms
[cpuid
] * ms
;
376 unsigned cpu_time_2_ms(u64_t cpu_time
)
378 return (unsigned long)(cpu_time
/ tsc_per_ms
[cpuid
]);
383 u64_t current_tsc
, *current_idle
;
384 u64_t tsc_delta
, idle_delta
, busy
;
388 unsigned cpu
= cpuid
;
391 u64_t
*last_tsc
, *last_idle
;
393 last_tsc
= get_cpu_var_ptr(cpu
, cpu_last_tsc
);
394 last_idle
= get_cpu_var_ptr(cpu
, cpu_last_idle
);
396 idle
= get_cpu_var_ptr(cpu
, idle_proc
);;
397 read_tsc_64(¤t_tsc
);
398 current_idle
= &idle
->p_cycles
; /* ptr to idle proc */
400 /* calculate load since last cpu_load invocation */
402 tsc_delta
= current_tsc
- *last_tsc
;
403 idle_delta
= *current_idle
- *last_idle
;
405 busy
= tsc_delta
- idle_delta
;
407 load
= ex64lo(busy
/ tsc_delta
);
414 *last_tsc
= current_tsc
;
415 *last_idle
= *current_idle
;
419 void busy_delay_ms(int ms
)
421 u64_t cycles
= ms_2_cpu_time(ms
), tsc0
, tsc
, tsc1
;
423 tsc1
= tsc0
+ cycles
;
424 do { read_tsc_64(&tsc
); } while(tsc
< tsc1
);
429 * Return the number of clock ticks spent in each of a predefined number of
433 get_cpu_ticks(unsigned int cpu
, uint64_t ticks
[CPUSTATES
])
437 /* TODO: make this inter-CPU safe! */
438 for (i
= 0; i
< CPUSTATES
; i
++)
439 ticks
[i
] = tsc_per_state
[cpu
][i
] / tsc_per_tick
[cpu
];