4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 /* All Rights Reserved */
25 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 * Copyright (c) 2015, Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
28 * Copyright (c) 2016 by Delphix. All rights reserved.
31 #include <sys/param.h>
32 #include <sys/t_lock.h>
33 #include <sys/types.h>
34 #include <sys/tuneable.h>
35 #include <sys/sysmacros.h>
36 #include <sys/systm.h>
37 #include <sys/cpuvar.h>
41 #include <sys/callo.h>
44 #include <sys/cmn_err.h>
46 #include <sys/vmsystm.h>
47 #include <sys/class.h>
49 #include <sys/debug.h>
50 #include <sys/vtrace.h>
52 #include <sys/atomic.h>
53 #include <sys/dumphdr.h>
54 #include <sys/archsystm.h>
55 #include <sys/fs/swapnode.h>
56 #include <sys/panic.h>
58 #include <sys/msacct.h>
59 #include <sys/mem_cage.h>
64 #include <sys/cyclic.h>
65 #include <sys/cpupart.h>
69 #include <sys/ddi_periodic.h>
70 #include <sys/random.h>
71 #include <sys/modctl.h>
77 #include <sys/timex.h>
78 #include <sys/inttypes.h>
80 #include <sys/sunddi.h>
81 #include <sys/clock_impl.h>
84 * clock() is called straight from the clock cyclic; see clock_init().
92 extern kcondvar_t fsflush_cv
;
93 extern sysinfo_t sysinfo
;
94 extern vminfo_t vminfo
;
95 extern int idleswtch
; /* flag set while idle in pswtch() */
96 extern hrtime_t
volatile devinfo_freeze
;
99 * high-precision avenrun values. These are needed to make the
100 * regular avenrun values accurate.
102 static uint64_t hp_avenrun
[3];
103 int avenrun
[3]; /* FSCALED average run queue lengths */
104 time_t time
; /* time in seconds since 1970 - for compatibility only */
106 static struct loadavg_s loadavg
;
108 * Phase/frequency-lock loop (PLL/FLL) definitions
110 * The following variables are read and set by the ntp_adjtime() system
113 * time_state shows the state of the system clock, with values defined
114 * in the timex.h header file.
116 * time_status shows the status of the system clock, with bits defined
117 * in the timex.h header file.
119 * time_offset is used by the PLL/FLL to adjust the system time in small
122 * time_constant determines the bandwidth or "stiffness" of the PLL.
124 * time_tolerance determines maximum frequency error or tolerance of the
125 * CPU clock oscillator and is a property of the architecture; however,
126 * in principle it could change as result of the presence of external
127 * discipline signals, for instance.
129 * time_precision is usually equal to the kernel tick variable; however,
130 * in cases where a precision clock counter or external clock is
131 * available, the resolution can be much less than this and depend on
132 * whether the external clock is working or not.
134 * time_maxerror is initialized by a ntp_adjtime() call and increased by
135 * the kernel once each second to reflect the maximum error bound
138 * time_esterror is set and read by the ntp_adjtime() call, but
139 * otherwise not used by the kernel.
141 int32_t time_state
= TIME_OK
; /* clock state */
142 int32_t time_status
= STA_UNSYNC
; /* clock status bits */
143 int32_t time_offset
= 0; /* time offset (us) */
144 int32_t time_constant
= 0; /* pll time constant */
145 int32_t time_tolerance
= MAXFREQ
; /* frequency tolerance (scaled ppm) */
146 int32_t time_precision
= 1; /* clock precision (us) */
147 int32_t time_maxerror
= MAXPHASE
; /* maximum error (us) */
148 int32_t time_esterror
= MAXPHASE
; /* estimated error (us) */
151 * The following variables establish the state of the PLL/FLL and the
152 * residual time and frequency offset of the local clock. The scale
153 * factors are defined in the timex.h header file.
155 * time_phase and time_freq are the phase increment and the frequency
156 * increment, respectively, of the kernel time variable.
158 * time_freq is set via ntp_adjtime() from a value stored in a file when
159 * the synchronization daemon is first started. Its value is retrieved
160 * via ntp_adjtime() and written to the file about once per hour by the
163 * time_adj is the adjustment added to the value of tick at each timer
164 * interrupt and is recomputed from time_phase and time_freq at each
167 * time_reftime is the second's portion of the system time at the last
168 * call to ntp_adjtime(). It is used to adjust the time_freq variable
169 * and to increase the time_maxerror as the time since last update
172 int32_t time_phase
= 0; /* phase offset (scaled us) */
173 int32_t time_freq
= 0; /* frequency offset (scaled ppm) */
174 int32_t time_adj
= 0; /* tick adjust (scaled 1 / hz) */
175 int32_t time_reftime
= 0; /* time at last adjustment (s) */
178 * The scale factors of the following variables are defined in the
179 * timex.h header file.
181 * pps_time contains the time at each calibration interval, as read by
182 * microtime(). pps_count counts the seconds of the calibration
183 * interval, the duration of which is nominally pps_shift in powers of
186 * pps_offset is the time offset produced by the time median filter
187 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
190 * pps_freq is the frequency offset produced by the frequency median
191 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
194 * pps_usec is latched from a high resolution counter or external clock
195 * at pps_time. Here we want the hardware counter contents only, not the
196 * contents plus the time_tv.usec as usual.
198 * pps_valid counts the number of seconds since the last PPS update. It
199 * is used as a watchdog timer to disable the PPS discipline should the
200 * PPS signal be lost.
202 * pps_glitch counts the number of seconds since the beginning of an
203 * offset burst more than tick/2 from current nominal offset. It is used
204 * mainly to suppress error bursts due to priority conflicts between the
205 * PPS interrupt and timer interrupt.
207 * pps_intcnt counts the calibration intervals for use in the interval-
208 * adaptation algorithm. It's just too complicated for words.
210 struct timeval pps_time
; /* kernel time at last interval */
211 int32_t pps_tf
[] = {0, 0, 0}; /* pps time offset median filter (us) */
212 int32_t pps_offset
= 0; /* pps time offset (us) */
213 int32_t pps_jitter
= MAXTIME
; /* time dispersion (jitter) (us) */
214 int32_t pps_ff
[] = {0, 0, 0}; /* pps frequency offset median filter */
215 int32_t pps_freq
= 0; /* frequency offset (scaled ppm) */
216 int32_t pps_stabil
= MAXFREQ
; /* frequency dispersion (scaled ppm) */
217 int32_t pps_usec
= 0; /* microsec counter at last interval */
218 int32_t pps_valid
= PPS_VALID
; /* pps signal watchdog counter */
219 int32_t pps_glitch
= 0; /* pps signal glitch counter */
220 int32_t pps_count
= 0; /* calibration interval counter (s) */
221 int32_t pps_shift
= PPS_SHIFT
; /* interval duration (s) (shift) */
222 int32_t pps_intcnt
= 0; /* intervals at current duration */
225 * PPS signal quality monitors
227 * pps_jitcnt counts the seconds that have been discarded because the
228 * jitter measured by the time median filter exceeds the limit MAXTIME
231 * pps_calcnt counts the frequency calibration intervals, which are
232 * variable from 4 s to 256 s.
234 * pps_errcnt counts the calibration intervals which have been discarded
235 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
236 * calibration interval jitter exceeds two ticks.
238 * pps_stbcnt counts the calibration intervals that have been discarded
239 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
241 int32_t pps_jitcnt
= 0; /* jitter limit exceeded */
242 int32_t pps_calcnt
= 0; /* calibration intervals */
243 int32_t pps_errcnt
= 0; /* calibration errors */
244 int32_t pps_stbcnt
= 0; /* stability limit exceeded */
249 * Hybrid lbolt implementation:
251 * The service historically provided by the lbolt and lbolt64 variables has
252 * been replaced by the ddi_get_lbolt() and ddi_get_lbolt64() routines, and the
253 * original symbols removed from the system. The once clock driven variables are
254 * now implemented in an event driven fashion, backed by gethrtime() coarsed to
255 * the appropriate clock resolution. The default event driven implementation is
256 * complemented by a cyclic driven one, active only during periods of intense
257 * activity around the DDI lbolt routines, when a lbolt specific cyclic is
258 * reprogramed to fire at a clock tick interval to serve consumers of lbolt who
259 * rely on the original low cost of consulting a memory position.
261 * The implementation uses the number of calls to these routines and the
262 * frequency of these to determine when to transition from event to cyclic
263 * driven and vice-versa. These values are kept on a per CPU basis for
264 * scalability reasons and to prevent CPUs from constantly invalidating a single
265 * cache line when modifying a global variable. The transition from event to
266 * cyclic mode happens once the thresholds are crossed, and activity on any CPU
267 * can cause such transition.
269 * The lbolt_hybrid function pointer is called by ddi_get_lbolt() and
270 * ddi_get_lbolt64(), and will point to lbolt_event_driven() or
271 * lbolt_cyclic_driven() according to the current mode. When the thresholds
272 * are exceeded, lbolt_event_driven() will reprogram the lbolt cyclic to
273 * fire at a nsec_per_tick interval and increment an internal variable at
274 * each firing. lbolt_hybrid will then point to lbolt_cyclic_driven(), which
275 * will simply return the value of such variable. lbolt_cyclic() will attempt
276 * to shut itself off at each threshold interval (sampling period for calls
277 * to the DDI lbolt routines), and return to the event driven mode, but will
278 * be prevented from doing so if lbolt_cyclic_driven() is being heavily used.
280 * lbolt_bootstrap is used during boot to serve lbolt consumers who don't wait
281 * for the cyclic subsystem to be intialized.
284 int64_t lbolt_bootstrap(void);
285 int64_t lbolt_event_driven(void);
286 int64_t lbolt_cyclic_driven(void);
287 int64_t (*lbolt_hybrid
)(void) = lbolt_bootstrap
;
288 uint_t
lbolt_ev_to_cyclic(caddr_t
, caddr_t
);
291 * lbolt's cyclic, installed by clock_init().
293 static void lbolt_cyclic(void);
296 * Tunable to keep lbolt in cyclic driven mode. This will prevent the system
297 * from switching back to event driven, once it reaches cyclic mode.
299 static boolean_t lbolt_cyc_only
= B_FALSE
;
302 * Cache aligned, per CPU structure with lbolt usage statistics.
304 static lbolt_cpu_t
*lb_cpu
;
307 * Single, cache aligned, structure with all the information required by
308 * the lbolt implementation.
310 lbolt_info_t
*lb_info
;
313 int one_sec
= 1; /* turned on once every second */
314 static int fsflushcnt
; /* counter for t_fsflushr */
315 int dosynctodr
= 1; /* patchable; enable/disable sync to TOD chip */
316 int tod_needsync
= 0; /* need to sync tod chip with software time */
317 static int tod_broken
= 0; /* clock chip doesn't work */
318 time_t boot_time
= 0; /* Boot time in seconds since 1970 */
319 cyclic_id_t clock_cyclic
; /* clock()'s cyclic_id */
320 cyclic_id_t deadman_cyclic
; /* deadman()'s cyclic_id */
322 extern void clock_tick_schedule(int);
324 static int lgrp_ticks
; /* counter to schedule lgrp load calcs */
327 * for tod fault detection
329 #define TOD_REF_FREQ ((longlong_t)(NANOSEC))
330 #define TOD_STALL_THRESHOLD (TOD_REF_FREQ * 3 / 2)
331 #define TOD_JUMP_THRESHOLD (TOD_REF_FREQ / 2)
332 #define TOD_FILTER_N 4
333 #define TOD_FILTER_SETTLE (4 * TOD_FILTER_N)
334 static enum tod_fault_type tod_faulted
= TOD_NOFAULT
;
336 static int tod_status_flag
= 0; /* used by tod_validate() */
338 static hrtime_t prev_set_tick
= 0; /* gethrtime() prior to tod_set() */
339 static time_t prev_set_tod
= 0; /* tv_sec value passed to tod_set() */
341 /* patchable via /etc/system */
342 int tod_validate_enable
= 1;
344 /* Diagnose/Limit messages about delay(9F) called from interrupt context */
345 int delay_from_interrupt_diagnose
= 0;
346 volatile uint32_t delay_from_interrupt_msg
= 20;
349 * On non-SPARC systems, TOD validation must be deferred until gethrtime
350 * returns non-zero values (after mach_clkinit's execution).
351 * On SPARC systems, it must be deferred until after hrtime_base
352 * and hres_last_tick are set (in the first invocation of hres_tick).
353 * Since in both cases the prerequisites occur before the invocation of
354 * tod_get() in clock(), the deferment is lifted there.
356 static boolean_t tod_validate_deferred
= B_TRUE
;
359 * tod_fault_table[] must be aligned with
360 * enum tod_fault_type in systm.h
362 static char *tod_fault_table
[] = {
363 "Reversed", /* TOD_REVERSED */
364 "Stalled", /* TOD_STALLED */
365 "Jumped", /* TOD_JUMPED */
366 "Changed in Clock Rate", /* TOD_RATECHANGED */
367 "Is Read-Only" /* TOD_RDONLY */
369 * no strings needed for TOD_NOFAULT
374 * test hook for tod broken detection in tod_validate
376 int tod_unit_test
= 0;
377 time_t tod_test_injector
;
379 #define CLOCK_ADJ_HIST_SIZE 4
381 static int adj_hist_entry
;
383 int64_t clock_adj_hist
[CLOCK_ADJ_HIST_SIZE
];
385 static void calcloadavg(int, uint64_t *);
386 static int genloadavg(struct loadavg_s
*);
387 static void loadavg_update();
389 void (*cmm_clock_callout
)() = NULL
;
390 void (*cpucaps_clock_callout
)() = NULL
;
392 extern clock_t clock_tick_proc_max
;
394 static int64_t deadman_counter
= 0;
404 extern void set_freemem();
411 clock_t now
= LBOLT_NO_ACCOUNT
; /* current tick */
417 * Make sure that 'freemem' do not drift too far from the truth
423 * Before the section which is repeated is executed, we do
424 * the time delta processing which occurs every clock tick
426 * There is additional processing which happens every time
427 * the nanosecond counter rolls over which is described
428 * below - see the section which begins with : if (one_sec)
430 * This section marks the beginning of the precision-kernel
433 * First, compute the phase adjustment. If the low-order bits
434 * (time_phase) of the update overflow, bump the higher order
435 * bits (time_update).
437 time_phase
+= time_adj
;
438 if (time_phase
<= -FINEUSEC
) {
439 ltemp
= -time_phase
/ SCALE_PHASE
;
440 time_phase
+= ltemp
* SCALE_PHASE
;
442 timedelta
-= ltemp
* (NANOSEC
/MICROSEC
);
444 } else if (time_phase
>= FINEUSEC
) {
445 ltemp
= time_phase
/ SCALE_PHASE
;
446 time_phase
-= ltemp
* SCALE_PHASE
;
448 timedelta
+= ltemp
* (NANOSEC
/MICROSEC
);
453 * End of precision-kernel code fragment which is processed
454 * every timer interrupt.
456 * Continue with the interrupt processing as scheduled.
459 * Count the number of runnable threads and the number waiting
460 * for some form of I/O to complete -- gets added to
461 * sysinfo.waiting. To know the state of the system, must add
462 * wait counts from all CPUs. Also add up the per-partition
469 * keep track of when to update lgrp/part loads
473 if (lgrp_ticks
++ >= hz
/ 10) {
484 * First count the threads waiting on kpreempt queues in each
488 cpupart
= cp_list_head
;
490 uint_t cpupart_nrunnable
= cpupart
->cp_kp_queue
.disp_nrunnable
;
492 cpupart
->cp_updates
++;
493 nrunnable
+= cpupart_nrunnable
;
494 cpupart
->cp_nrunnable_cum
+= cpupart_nrunnable
;
496 cpupart
->cp_nrunning
= 0;
497 cpupart
->cp_nrunnable
= cpupart_nrunnable
;
499 } while ((cpupart
= cpupart
->cp_next
) != cp_list_head
);
502 /* Now count the per-CPU statistics. */
505 uint_t cpu_nrunnable
= cp
->cpu_disp
->disp_nrunnable
;
507 nrunnable
+= cpu_nrunnable
;
508 cpupart
= cp
->cpu_part
;
509 cpupart
->cp_nrunnable_cum
+= cpu_nrunnable
;
511 cpupart
->cp_nrunnable
+= cpu_nrunnable
;
513 * Update user, system, and idle cpu times.
515 cpupart
->cp_nrunning
++;
517 * w_io is used to update sysinfo.waiting during
518 * one_second processing below. Only gather w_io
519 * information when we walk the list of cpus if we're
520 * going to perform one_second processing.
522 w_io
+= CPU_STATS(cp
, sys
.iowait
);
525 if (one_sec
&& (cp
->cpu_flags
& CPU_EXISTS
)) {
527 hrtime_t intracct
, intrused
;
528 const hrtime_t maxnsec
= 1000000000;
529 const int precision
= 100;
532 * Estimate interrupt load on this cpu each second.
533 * Computes cpu_intrload as %utilization (0-99).
536 /* add up interrupt time from all micro states */
537 for (intracct
= 0, i
= 0; i
< NCMSTATES
; i
++)
538 intracct
+= cp
->cpu_intracct
[i
];
539 scalehrtime(&intracct
);
541 /* compute nsec used in the past second */
542 intrused
= intracct
- cp
->cpu_intrlast
;
543 cp
->cpu_intrlast
= intracct
;
545 /* limit the value for safety (and the first pass) */
546 if (intrused
>= maxnsec
)
547 intrused
= maxnsec
- 1;
549 /* calculate %time in interrupt */
550 load
= (precision
* intrused
) / maxnsec
;
551 ASSERT(load
>= 0 && load
< precision
);
552 change
= cp
->cpu_intrload
- load
;
554 /* jump to new max, or decay the old max */
556 cp
->cpu_intrload
= load
;
558 cp
->cpu_intrload
-= (change
+ 3) / 4;
560 DTRACE_PROBE3(cpu_intrload
,
567 (cp
->cpu_flags
& CPU_EXISTS
)) {
569 * When updating the lgroup's load average,
570 * account for the thread running on the CPU.
571 * If the CPU is the current one, then we need
572 * to account for the underlying thread which
573 * got the clock interrupt not the thread that is
574 * handling the interrupt and caculating the load
582 * Account for the load average for this thread if
583 * it isn't the idle thread or it is on the interrupt
584 * stack and not the current CPU handling the clock
587 if ((t
&& t
!= cp
->cpu_idle_thread
) || (CPU
!= cp
&&
589 if (t
->t_lpl
== cp
->cpu_lpl
) {
594 * This is a remote thread, charge it
595 * against its home lgroup. Note that
596 * we notice that a thread is remote
597 * only if it's currently executing.
598 * This is a reasonable approximation,
599 * since queued remote threads are rare.
600 * Note also that if we didn't charge
601 * it to its home lgroup, remote
602 * execution would often make a system
603 * appear balanced even though it was
604 * not, and thread placement/migration
605 * would often not be done correctly.
607 lgrp_loadavg(t
->t_lpl
,
608 LGRP_LOADAVG_IN_THREAD_MAX
, 0);
611 lgrp_loadavg(cp
->cpu_lpl
,
612 cpu_nrunnable
* LGRP_LOADAVG_IN_THREAD_MAX
, 1);
614 } while ((cp
= cp
->cpu_next
) != cpu_list
);
616 clock_tick_schedule(one_sec
);
619 * Check for a callout that needs be called from the clock
620 * thread to support the membership protocol in a clustered
621 * system. Copy the function pointer so that we can reset
622 * this to NULL if needed.
624 if ((funcp
= cmm_clock_callout
) != NULL
)
627 if ((funcp
= cpucaps_clock_callout
) != NULL
)
631 * Wakeup the cageout thread waiters once per second.
643 * Beginning of precision-kernel code fragment executed
646 * On rollover of the second the phase adjustment to be
647 * used for the next second is calculated. Also, the
648 * maximum error is increased by the tolerance. If the
649 * PPS frequency discipline code is present, the phase is
650 * increased to compensate for the CPU clock oscillator
653 * On a 32-bit machine and given parameters in the timex.h
654 * header file, the maximum phase adjustment is +-512 ms
655 * and maximum frequency offset is (a tad less than)
656 * +-512 ppm. On a 64-bit machine, you shouldn't need to ask.
658 time_maxerror
+= time_tolerance
/ SCALE_USEC
;
661 * Leap second processing. If in leap-insert state at
662 * the end of the day, the system clock is set back one
663 * second; if in leap-delete state, the system clock is
664 * set ahead one second. The microtime() routine or
665 * external clock driver will insure that reported time
666 * is always monotonic. The ugly divides should be
669 switch (time_state
) {
672 if (time_status
& STA_INS
)
673 time_state
= TIME_INS
;
674 else if (time_status
& STA_DEL
)
675 time_state
= TIME_DEL
;
679 if (hrestime
.tv_sec
% 86400 == 0) {
683 time_state
= TIME_OOP
;
688 if ((hrestime
.tv_sec
+ 1) % 86400 == 0) {
692 time_state
= TIME_WAIT
;
697 time_state
= TIME_WAIT
;
701 if (!(time_status
& (STA_INS
| STA_DEL
)))
702 time_state
= TIME_OK
;
708 * Compute the phase adjustment for the next second. In
709 * PLL mode, the offset is reduced by a fixed factor
710 * times the time constant. In FLL mode the offset is
711 * used directly. In either mode, the maximum phase
712 * adjustment for each second is clamped so as to spread
713 * the adjustment over not more than the number of
714 * seconds between updates.
716 if (time_offset
== 0)
718 else if (time_offset
< 0) {
719 lltemp
= -time_offset
;
720 if (!(time_status
& STA_FLL
)) {
721 if ((1 << time_constant
) >= SCALE_KG
)
722 lltemp
*= (1 << time_constant
) /
725 lltemp
= (lltemp
/ SCALE_KG
) >>
728 if (lltemp
> (MAXPHASE
/ MINSEC
) * SCALE_UPDATE
)
729 lltemp
= (MAXPHASE
/ MINSEC
) * SCALE_UPDATE
;
730 time_offset
+= lltemp
;
731 time_adj
= -(lltemp
* SCALE_PHASE
) / hz
/ SCALE_UPDATE
;
733 lltemp
= time_offset
;
734 if (!(time_status
& STA_FLL
)) {
735 if ((1 << time_constant
) >= SCALE_KG
)
736 lltemp
*= (1 << time_constant
) /
739 lltemp
= (lltemp
/ SCALE_KG
) >>
742 if (lltemp
> (MAXPHASE
/ MINSEC
) * SCALE_UPDATE
)
743 lltemp
= (MAXPHASE
/ MINSEC
) * SCALE_UPDATE
;
744 time_offset
-= lltemp
;
745 time_adj
= (lltemp
* SCALE_PHASE
) / hz
/ SCALE_UPDATE
;
749 * Compute the frequency estimate and additional phase
750 * adjustment due to frequency error for the next
751 * second. When the PPS signal is engaged, gnaw on the
752 * watchdog counter and update the frequency computed by
753 * the pll and the PPS signal.
756 if (pps_valid
== PPS_VALID
) {
757 pps_jitter
= MAXTIME
;
758 pps_stabil
= MAXFREQ
;
759 time_status
&= ~(STA_PPSSIGNAL
| STA_PPSJITTER
|
760 STA_PPSWANDER
| STA_PPSERROR
);
762 lltemp
= time_freq
+ pps_freq
;
765 time_adj
+= (lltemp
* SCALE_PHASE
) / (SCALE_USEC
* hz
);
768 * End of precision kernel-code fragment
770 * The section below should be modified if we are planning
771 * to use NTP for synchronization.
773 * Note: the clock synchronization code now assumes
775 * - if dosynctodr is 1, then compute the drift between
776 * the tod chip and software time and adjust one or
777 * the other depending on the circumstances
779 * - if dosynctodr is 0, then the tod chip is independent
780 * of the software clock and should not be adjusted,
781 * but allowed to free run. this allows NTP to sync.
782 * hrestime without any interference from the tod chip.
785 tod_validate_deferred
= B_FALSE
;
786 mutex_enter(&tod_lock
);
788 drift
= tod
.tv_sec
- hrestime
.tv_sec
;
789 absdrift
= (drift
>= 0) ? drift
: -drift
;
790 if (tod_needsync
|| absdrift
> 1) {
793 if (!tod_broken
&& tod_faulted
== TOD_NOFAULT
) {
796 membar_enter(); /* hrestime visible */
805 if (tod_needsync
|| !dosynctodr
) {
814 * If the drift is 2 seconds on the
815 * money, then the TOD is adjusting
816 * the clock; record that.
818 clock_adj_hist
[adj_hist_entry
++ %
819 CLOCK_ADJ_HIST_SIZE
] = now
;
821 timedelta
= (int64_t)drift
*NANOSEC
;
827 time
= gethrestime_sec(); /* for crusty old kmem readers */
828 mutex_exit(&tod_lock
);
831 * Some drivers still depend on this... XXX
833 cv_broadcast(&lbolt_cv
);
835 vminfo
.freemem
+= freemem
;
837 pgcnt_t maxswap
, resv
, free
;
839 MAX((spgcnt_t
)(availrmem
- swapfs_minfree
), 0);
841 maxswap
= k_anoninfo
.ani_mem_resv
+
842 k_anoninfo
.ani_max
+avail
;
843 /* Update ani_free */
845 free
= k_anoninfo
.ani_free
+ avail
;
846 resv
= k_anoninfo
.ani_phys_resv
+
847 k_anoninfo
.ani_mem_resv
;
849 vminfo
.swap_resv
+= resv
;
850 /* number of reserved and allocated pages */
853 cmn_err(CE_WARN
, "clock: maxswap < free");
855 cmn_err(CE_WARN
, "clock: maxswap < resv");
857 vminfo
.swap_alloc
+= maxswap
- free
;
858 vminfo
.swap_avail
+= maxswap
- resv
;
859 vminfo
.swap_free
+= free
;
863 sysinfo
.runque
+= nrunnable
;
867 sysinfo
.swpque
+= nswapped
;
870 sysinfo
.waiting
+= w_io
;
874 * Wake up fsflush to write out DELWRI
875 * buffers, dirty pages and other cached
876 * administrative data, e.g. inodes.
878 if (--fsflushcnt
<= 0) {
879 fsflushcnt
= tune
.t_fsflushr
;
880 cv_signal(&fsflush_cv
);
884 calcloadavg(genloadavg(&loadavg
), hp_avenrun
);
885 for (i
= 0; i
< 3; i
++)
887 * At the moment avenrun[] can only hold 31
888 * bits of load average as it is a signed
889 * int in the API. We need to ensure that
890 * hp_avenrun[i] >> (16 - FSHIFT) will not be
891 * too large. If it is, we put the largest value
892 * that we can use into avenrun[i]. This is
893 * kludgey, but about all we can do until we
894 * avenrun[] is declared as an array of uint64[]
896 if (hp_avenrun
[i
] < ((uint64_t)1<<(31+16-FSHIFT
)))
897 avenrun
[i
] = (int32_t)(hp_avenrun
[i
] >>
900 avenrun
[i
] = 0x7fffffff;
902 cpupart
= cp_list_head
;
904 calcloadavg(genloadavg(&cpupart
->cp_loadavg
),
905 cpupart
->cp_hp_avenrun
);
906 } while ((cpupart
= cpupart
->cp_next
) != cp_list_head
);
913 cyc_handler_t clk_hdlr
, lbolt_hdlr
;
914 cyc_time_t clk_when
, lbolt_when
;
919 * Setup handler and timer for the clock cyclic.
921 clk_hdlr
.cyh_func
= (cyc_func_t
)clock
;
922 clk_hdlr
.cyh_level
= CY_LOCK_LEVEL
;
923 clk_hdlr
.cyh_arg
= NULL
;
925 clk_when
.cyt_when
= 0;
926 clk_when
.cyt_interval
= nsec_per_tick
;
929 * The lbolt cyclic will be reprogramed to fire at a nsec_per_tick
930 * interval to satisfy performance needs of the DDI lbolt consumers.
931 * It is off by default.
933 lbolt_hdlr
.cyh_func
= (cyc_func_t
)lbolt_cyclic
;
934 lbolt_hdlr
.cyh_level
= CY_LOCK_LEVEL
;
935 lbolt_hdlr
.cyh_arg
= NULL
;
937 lbolt_when
.cyt_interval
= nsec_per_tick
;
940 * Allocate cache line aligned space for the per CPU lbolt data and
941 * lbolt info structures, and initialize them with their default
942 * values. Note that these structures are also cache line sized.
944 sz
= sizeof (lbolt_info_t
) + CPU_CACHE_COHERENCE_SIZE
;
945 buf
= (intptr_t)kmem_zalloc(sz
, KM_SLEEP
);
946 lb_info
= (lbolt_info_t
*)P2ROUNDUP(buf
, CPU_CACHE_COHERENCE_SIZE
);
948 if (hz
!= HZ_DEFAULT
)
949 lb_info
->lbi_thresh_interval
= LBOLT_THRESH_INTERVAL
*
952 lb_info
->lbi_thresh_interval
= LBOLT_THRESH_INTERVAL
;
954 lb_info
->lbi_thresh_calls
= LBOLT_THRESH_CALLS
;
956 sz
= (sizeof (lbolt_cpu_t
) * max_ncpus
) + CPU_CACHE_COHERENCE_SIZE
;
957 buf
= (intptr_t)kmem_zalloc(sz
, KM_SLEEP
);
958 lb_cpu
= (lbolt_cpu_t
*)P2ROUNDUP(buf
, CPU_CACHE_COHERENCE_SIZE
);
960 for (i
= 0; i
< max_ncpus
; i
++)
961 lb_cpu
[i
].lbc_counter
= lb_info
->lbi_thresh_calls
;
964 * Install the softint used to switch between event and cyclic driven
965 * lbolt. We use a soft interrupt to make sure the context of the
966 * cyclic reprogram call is safe.
971 * Since the hybrid lbolt implementation is based on a hardware counter
972 * that is reset at every hardware reboot and that we'd like to have
973 * the lbolt value starting at zero after both a hardware and a fast
974 * reboot, we calculate the number of clock ticks the system's been up
975 * and store it in the lbi_debug_time field of the lbolt info structure.
976 * The value of this field will be subtracted from lbolt before
979 lb_info
->lbi_internal
= lb_info
->lbi_debug_time
=
980 (gethrtime()/nsec_per_tick
);
983 * lbolt_hybrid points at lbolt_bootstrap until now. The LBOLT_* macros
984 * and lbolt_debug_{enter,return} use this value as an indication that
985 * the initializaion above hasn't been completed. Setting lbolt_hybrid
986 * to either lbolt_{cyclic,event}_driven here signals those code paths
987 * that the lbolt related structures can be used.
989 if (lbolt_cyc_only
) {
990 lbolt_when
.cyt_when
= 0;
991 lbolt_hybrid
= lbolt_cyclic_driven
;
993 lbolt_when
.cyt_when
= CY_INFINITY
;
994 lbolt_hybrid
= lbolt_event_driven
;
998 * Grab cpu_lock and install all three cyclics.
1000 mutex_enter(&cpu_lock
);
1002 clock_cyclic
= cyclic_add(&clk_hdlr
, &clk_when
);
1003 lb_info
->id
.lbi_cyclic_id
= cyclic_add(&lbolt_hdlr
, &lbolt_when
);
1005 mutex_exit(&cpu_lock
);
1009 * Called before calcloadavg to get 10-sec moving loadavg together
1013 genloadavg(struct loadavg_s
*avgs
)
1016 int spos
; /* starting position */
1017 int cpos
; /* moving current position */
1022 /* 10-second snapshot, calculate first positon */
1023 if (avgs
->lg_len
== 0) {
1026 slen
= avgs
->lg_len
< S_MOVAVG_SZ
? avgs
->lg_len
: S_MOVAVG_SZ
;
1028 spos
= (avgs
->lg_cur
- 1) >= 0 ? avgs
->lg_cur
- 1 :
1029 S_LOADAVG_SZ
+ (avgs
->lg_cur
- 1);
1030 for (i
= hr_avg
= 0; i
< slen
; i
++) {
1031 cpos
= (spos
- i
) >= 0 ? spos
- i
: S_LOADAVG_SZ
+ (spos
- i
);
1032 hr_avg
+= avgs
->lg_loads
[cpos
];
1035 hr_avg
= hr_avg
/ slen
;
1036 avg
= hr_avg
/ (NANOSEC
/ LGRP_LOADAVG_IN_THREAD_MAX
);
1042 * Run every second from clock () to update the loadavg count available to the
1043 * system and cpu-partitions.
1045 * This works by sampling the previous usr, sys, wait time elapsed,
1046 * computing a delta, and adding that delta to the elapsed usr, sys,
1059 loadavg
.lg_total
= 0;
1062 * first pass totals up per-cpu statistics for system and cpu
1067 struct loadavg_s
*lavg
;
1069 lavg
= &cp
->cpu_loadavg
;
1071 cpu_total
= cp
->cpu_acct
[CMS_USER
] +
1072 cp
->cpu_acct
[CMS_SYSTEM
] + cp
->cpu_waitrq
;
1073 /* compute delta against last total */
1074 scalehrtime(&cpu_total
);
1075 prev
= (lavg
->lg_cur
- 1) >= 0 ? lavg
->lg_cur
- 1 :
1076 S_LOADAVG_SZ
+ (lavg
->lg_cur
- 1);
1077 if (lavg
->lg_loads
[prev
] <= 0) {
1078 lavg
->lg_loads
[lavg
->lg_cur
] = cpu_total
;
1081 lavg
->lg_loads
[lavg
->lg_cur
] = cpu_total
;
1082 cpu_total
= cpu_total
- lavg
->lg_loads
[prev
];
1087 lavg
->lg_cur
= (lavg
->lg_cur
+ 1) % S_LOADAVG_SZ
;
1088 lavg
->lg_len
= (lavg
->lg_len
+ 1) < S_LOADAVG_SZ
?
1089 lavg
->lg_len
+ 1 : S_LOADAVG_SZ
;
1091 loadavg
.lg_total
+= cpu_total
;
1092 cp
->cpu_part
->cp_loadavg
.lg_total
+= cpu_total
;
1094 } while ((cp
= cp
->cpu_next
) != cpu_list
);
1096 loadavg
.lg_loads
[loadavg
.lg_cur
] = loadavg
.lg_total
;
1097 loadavg
.lg_cur
= (loadavg
.lg_cur
+ 1) % S_LOADAVG_SZ
;
1098 loadavg
.lg_len
= (loadavg
.lg_len
+ 1) < S_LOADAVG_SZ
?
1099 loadavg
.lg_len
+ 1 : S_LOADAVG_SZ
;
1101 * Second pass updates counts
1103 cpupart
= cp_list_head
;
1106 struct loadavg_s
*lavg
;
1108 lavg
= &cpupart
->cp_loadavg
;
1109 lavg
->lg_loads
[lavg
->lg_cur
] = lavg
->lg_total
;
1111 lavg
->lg_cur
= (lavg
->lg_cur
+ 1) % S_LOADAVG_SZ
;
1112 lavg
->lg_len
= (lavg
->lg_len
+ 1) < S_LOADAVG_SZ
?
1113 lavg
->lg_len
+ 1 : S_LOADAVG_SZ
;
1115 } while ((cpupart
= cpupart
->cp_next
) != cp_list_head
);
1118 * Third pass totals up per-zone statistics.
1120 zone_loadavg_update();
1124 * clock_update() - local clock update
1126 * This routine is called by ntp_adjtime() to update the local clock
1127 * phase and frequency. The implementation is of an
1128 * adaptive-parameter, hybrid phase/frequency-lock loop (PLL/FLL). The
1129 * routine computes new time and frequency offset estimates for each
1130 * call. The PPS signal itself determines the new time offset,
1131 * instead of the calling argument. Presumably, calls to
1132 * ntp_adjtime() occur only when the caller believes the local clock
1133 * is valid within some bound (+-128 ms with NTP). If the caller's
1134 * time is far different than the PPS time, an argument will ensue,
1135 * and it's not clear who will lose.
1137 * For uncompensated quartz crystal oscillatores and nominal update
1138 * intervals less than 1024 s, operation should be in phase-lock mode
1139 * (STA_FLL = 0), where the loop is disciplined to phase. For update
1140 * intervals greater than this, operation should be in frequency-lock
1141 * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1143 * Note: mutex(&tod_lock) is in effect.
1146 clock_update(int offset
)
1148 int ltemp
, mtemp
, s
;
1150 ASSERT(MUTEX_HELD(&tod_lock
));
1152 if (!(time_status
& STA_PLL
) && !(time_status
& STA_PPSTIME
))
1155 if ((time_status
& STA_PPSTIME
) && (time_status
& STA_PPSSIGNAL
))
1159 * Scale the phase adjustment and clamp to the operating range.
1161 if (ltemp
> MAXPHASE
)
1162 time_offset
= MAXPHASE
* SCALE_UPDATE
;
1163 else if (ltemp
< -MAXPHASE
)
1164 time_offset
= -(MAXPHASE
* SCALE_UPDATE
);
1166 time_offset
= ltemp
* SCALE_UPDATE
;
1169 * Select whether the frequency is to be controlled and in which
1170 * mode (PLL or FLL). Clamp to the operating range. Ugly
1171 * multiply/divide should be replaced someday.
1173 if (time_status
& STA_FREQHOLD
|| time_reftime
== 0)
1174 time_reftime
= hrestime
.tv_sec
;
1176 mtemp
= hrestime
.tv_sec
- time_reftime
;
1177 time_reftime
= hrestime
.tv_sec
;
1179 if (time_status
& STA_FLL
) {
1180 if (mtemp
>= MINSEC
) {
1181 ltemp
= ((time_offset
/ mtemp
) * (SCALE_USEC
/
1184 time_freq
+= ltemp
/ SCALE_KH
;
1187 if (mtemp
< MAXSEC
) {
1190 time_freq
+= (int)(((int64_t)ltemp
*
1191 SCALE_USEC
) / SCALE_KF
)
1192 / (1 << (time_constant
* 2));
1195 if (time_freq
> time_tolerance
)
1196 time_freq
= time_tolerance
;
1197 else if (time_freq
< -time_tolerance
)
1198 time_freq
= -time_tolerance
;
1200 s
= hr_clock_lock();
1206 * ddi_hardpps() - discipline CPU clock oscillator to external PPS signal
1208 * This routine is called at each PPS interrupt in order to discipline
1209 * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1210 * and leaves it in a handy spot for the clock() routine. It
1211 * integrates successive PPS phase differences and calculates the
1212 * frequency offset. This is used in clock() to discipline the CPU
1213 * clock oscillator so that intrinsic frequency error is cancelled out.
1214 * The code requires the caller to capture the time and hardware counter
1215 * value at the on-time PPS signal transition.
1217 * Note that, on some Unix systems, this routine runs at an interrupt
1218 * priority level higher than the timer interrupt routine clock().
1219 * Therefore, the variables used are distinct from the clock()
1220 * variables, except for certain exceptions: The PPS frequency pps_freq
1221 * and phase pps_offset variables are determined by this routine and
1222 * updated atomically. The time_tolerance variable can be considered a
1223 * constant, since it is infrequently changed, and then only when the
1224 * PPS signal is disabled. The watchdog counter pps_valid is updated
1225 * once per second by clock() and is atomically cleared in this
1228 * tvp is the time of the last tick; usec is a microsecond count since the
1231 * Note: In Solaris systems, the tick value is actually given by
1232 * usec_per_tick. This is called from the serial driver cdintr(),
1233 * or equivalent, at a high PIL. Because the kernel keeps a
1234 * highresolution time, the following code can accept either
1235 * the traditional argument pair, or the current highres timestamp
1236 * in tvp and zero in usec.
1239 ddi_hardpps(struct timeval
*tvp
, int usec
)
1241 int u_usec
, v_usec
, bigtick
;
1246 * An occasional glitch can be produced when the PPS interrupt
1247 * occurs in the clock() routine before the time variable is
1248 * updated. Here the offset is discarded when the difference
1249 * between it and the last one is greater than tick/2, but not
1250 * if the interval since the first discard exceeds 30 s.
1252 time_status
|= STA_PPSSIGNAL
;
1253 time_status
&= ~(STA_PPSJITTER
| STA_PPSWANDER
| STA_PPSERROR
);
1255 u_usec
= -tvp
->tv_usec
;
1256 if (u_usec
< -(MICROSEC
/2))
1258 v_usec
= pps_offset
- u_usec
;
1261 if (v_usec
> (usec_per_tick
>> 1)) {
1262 if (pps_glitch
> MAXGLITCH
) {
1268 u_usec
= pps_offset
;
1274 * A three-stage median filter is used to help deglitch the pps
1275 * time. The median sample becomes the time offset estimate; the
1276 * difference between the other two samples becomes the time
1277 * dispersion (jitter) estimate.
1279 pps_tf
[2] = pps_tf
[1];
1280 pps_tf
[1] = pps_tf
[0];
1282 if (pps_tf
[0] > pps_tf
[1]) {
1283 if (pps_tf
[1] > pps_tf
[2]) {
1284 pps_offset
= pps_tf
[1]; /* 0 1 2 */
1285 v_usec
= pps_tf
[0] - pps_tf
[2];
1286 } else if (pps_tf
[2] > pps_tf
[0]) {
1287 pps_offset
= pps_tf
[0]; /* 2 0 1 */
1288 v_usec
= pps_tf
[2] - pps_tf
[1];
1290 pps_offset
= pps_tf
[2]; /* 0 2 1 */
1291 v_usec
= pps_tf
[0] - pps_tf
[1];
1294 if (pps_tf
[1] < pps_tf
[2]) {
1295 pps_offset
= pps_tf
[1]; /* 2 1 0 */
1296 v_usec
= pps_tf
[2] - pps_tf
[0];
1297 } else if (pps_tf
[2] < pps_tf
[0]) {
1298 pps_offset
= pps_tf
[0]; /* 1 0 2 */
1299 v_usec
= pps_tf
[1] - pps_tf
[2];
1301 pps_offset
= pps_tf
[2]; /* 1 2 0 */
1302 v_usec
= pps_tf
[1] - pps_tf
[0];
1305 if (v_usec
> MAXTIME
)
1307 v_usec
= (v_usec
<< PPS_AVG
) - pps_jitter
;
1308 pps_jitter
+= v_usec
/ (1 << PPS_AVG
);
1309 if (pps_jitter
> (MAXTIME
>> 1))
1310 time_status
|= STA_PPSJITTER
;
1313 * During the calibration interval adjust the starting time when
1314 * the tick overflows. At the end of the interval compute the
1315 * duration of the interval and the difference of the hardware
1316 * counters at the beginning and end of the interval. This code
1317 * is deliciously complicated by the fact valid differences may
1318 * exceed the value of tick when using long calibration
1319 * intervals and small ticks. Note that the counter can be
1320 * greater than tick if caught at just the wrong instant, but
1321 * the values returned and used here are correct.
1323 bigtick
= (int)usec_per_tick
* SCALE_USEC
;
1324 pps_usec
-= pps_freq
;
1325 if (pps_usec
>= bigtick
)
1326 pps_usec
-= bigtick
;
1328 pps_usec
+= bigtick
;
1331 if (pps_count
< (1 << pps_shift
))
1335 u_usec
= usec
* SCALE_USEC
;
1336 v_usec
= pps_usec
- u_usec
;
1337 if (v_usec
>= bigtick
>> 1)
1339 if (v_usec
< -(bigtick
>> 1))
1342 v_usec
= -(-v_usec
>> pps_shift
);
1344 v_usec
= v_usec
>> pps_shift
;
1346 cal_sec
= tvp
->tv_sec
;
1347 cal_usec
= tvp
->tv_usec
;
1348 cal_sec
-= pps_time
.tv_sec
;
1349 cal_usec
-= pps_time
.tv_usec
;
1351 cal_usec
+= MICROSEC
;
1357 * Check for lost interrupts, noise, excessive jitter and
1358 * excessive frequency error. The number of timer ticks during
1359 * the interval may vary +-1 tick. Add to this a margin of one
1360 * tick for the PPS signal jitter and maximum frequency
1361 * deviation. If the limits are exceeded, the calibration
1362 * interval is reset to the minimum and we start over.
1364 u_usec
= (int)usec_per_tick
<< 1;
1365 if (!((cal_sec
== -1 && cal_usec
> (MICROSEC
- u_usec
)) ||
1366 (cal_sec
== 0 && cal_usec
< u_usec
)) ||
1367 v_usec
> time_tolerance
|| v_usec
< -time_tolerance
) {
1369 pps_shift
= PPS_SHIFT
;
1371 time_status
|= STA_PPSERROR
;
1376 * A three-stage median filter is used to help deglitch the pps
1377 * frequency. The median sample becomes the frequency offset
1378 * estimate; the difference between the other two samples
1379 * becomes the frequency dispersion (stability) estimate.
1381 pps_ff
[2] = pps_ff
[1];
1382 pps_ff
[1] = pps_ff
[0];
1384 if (pps_ff
[0] > pps_ff
[1]) {
1385 if (pps_ff
[1] > pps_ff
[2]) {
1386 u_usec
= pps_ff
[1]; /* 0 1 2 */
1387 v_usec
= pps_ff
[0] - pps_ff
[2];
1388 } else if (pps_ff
[2] > pps_ff
[0]) {
1389 u_usec
= pps_ff
[0]; /* 2 0 1 */
1390 v_usec
= pps_ff
[2] - pps_ff
[1];
1392 u_usec
= pps_ff
[2]; /* 0 2 1 */
1393 v_usec
= pps_ff
[0] - pps_ff
[1];
1396 if (pps_ff
[1] < pps_ff
[2]) {
1397 u_usec
= pps_ff
[1]; /* 2 1 0 */
1398 v_usec
= pps_ff
[2] - pps_ff
[0];
1399 } else if (pps_ff
[2] < pps_ff
[0]) {
1400 u_usec
= pps_ff
[0]; /* 1 0 2 */
1401 v_usec
= pps_ff
[1] - pps_ff
[2];
1403 u_usec
= pps_ff
[2]; /* 1 2 0 */
1404 v_usec
= pps_ff
[1] - pps_ff
[0];
1409 * Here the frequency dispersion (stability) is updated. If it
1410 * is less than one-fourth the maximum (MAXFREQ), the frequency
1411 * offset is updated as well, but clamped to the tolerance. It
1412 * will be processed later by the clock() routine.
1414 v_usec
= (v_usec
>> 1) - pps_stabil
;
1416 pps_stabil
-= -v_usec
>> PPS_AVG
;
1418 pps_stabil
+= v_usec
>> PPS_AVG
;
1419 if (pps_stabil
> MAXFREQ
>> 2) {
1421 time_status
|= STA_PPSWANDER
;
1424 if (time_status
& STA_PPSFREQ
) {
1426 pps_freq
-= -u_usec
>> PPS_AVG
;
1427 if (pps_freq
< -time_tolerance
)
1428 pps_freq
= -time_tolerance
;
1431 pps_freq
+= u_usec
>> PPS_AVG
;
1432 if (pps_freq
> time_tolerance
)
1433 pps_freq
= time_tolerance
;
1438 * Here the calibration interval is adjusted. If the maximum
1439 * time difference is greater than tick / 4, reduce the interval
1440 * by half. If this is not the case for four consecutive
1441 * intervals, double the interval.
1443 if (u_usec
<< pps_shift
> bigtick
>> 2) {
1445 if (pps_shift
> PPS_SHIFT
)
1447 } else if (pps_intcnt
>= 4) {
1449 if (pps_shift
< PPS_SHIFTMAX
)
1455 * If recovering from kmdb, then make sure the tod chip gets resynced.
1456 * If we took an early exit above, then we don't yet have a stable
1457 * calibration signal to lock onto, so don't mark the tod for sync
1458 * until we get all the way here.
1461 int s
= hr_clock_lock();
1469 * Handle clock tick processing for a thread.
1470 * Check for timer action, enforce CPU rlimit, do profiling etc.
1473 clock_tick(kthread_t
*t
, int pending
)
1479 int poke
= 0; /* notify another CPU */
1482 int i
, total_usec
, usec
;
1485 ASSERT(pending
> 0);
1487 /* Must be operating on a lwp/thread */
1488 if ((lwp
= ttolwp(t
)) == NULL
) {
1489 panic("clock_tick: no lwp");
1493 for (i
= 0; i
< pending
; i
++) {
1494 CL_TICK(t
); /* Class specific tick processing */
1495 DTRACE_SCHED1(tick
, kthread_t
*, t
);
1500 /* pp->p_lock makes sure that the thread does not exit */
1501 ASSERT(MUTEX_HELD(&pp
->p_lock
));
1503 user_mode
= (lwp
->lwp_state
== LWP_USER
);
1505 ticks
= (pp
->p_utime
+ pp
->p_stime
) % hz
;
1507 * Update process times. Should use high res clock and state
1508 * changes instead of statistical sampling method. XXX
1511 pp
->p_utime
+= pending
;
1513 pp
->p_stime
+= pending
;
1516 pp
->p_ttime
+= pending
;
1520 * Update user profiling statistics. Get the pc from the
1521 * lwp when the AST happens.
1523 if (pp
->p_prof
.pr_scale
) {
1524 atomic_add_32(&lwp
->lwp_oweupc
, (int32_t)pending
);
1532 * If CPU was in user state, process lwp-virtual time
1533 * interval timer. The value passed to itimerdecr() has to be
1534 * in microseconds and has to be less than one second. Hence
1537 total_usec
= usec_per_tick
* pending
;
1538 while (total_usec
> 0) {
1539 usec
= MIN(total_usec
, (MICROSEC
- 1));
1541 timerisset(&lwp
->lwp_timer
[ITIMER_VIRTUAL
].it_value
) &&
1542 itimerdecr(&lwp
->lwp_timer
[ITIMER_VIRTUAL
], usec
) == 0) {
1544 sigtoproc(pp
, t
, SIGVTALRM
);
1550 * If CPU was in user state, process lwp-profile
1553 total_usec
= usec_per_tick
* pending
;
1554 while (total_usec
> 0) {
1555 usec
= MIN(total_usec
, (MICROSEC
- 1));
1556 if (timerisset(&lwp
->lwp_timer
[ITIMER_PROF
].it_value
) &&
1557 itimerdecr(&lwp
->lwp_timer
[ITIMER_PROF
], usec
) == 0) {
1559 sigtoproc(pp
, t
, SIGPROF
);
1565 * Enforce CPU resource controls:
1566 * (a) process.max-cpu-time resource control
1568 * Perform the check only if we have accumulated more a second.
1570 if ((ticks
+ pending
) >= hz
) {
1571 (void) rctl_test(rctlproc_legacy
[RLIMIT_CPU
], pp
->p_rctls
, pp
,
1572 (pp
->p_utime
+ pp
->p_stime
)/hz
, RCA_UNSAFE_SIGINFO
);
1576 * (b) task.max-cpu-time resource control
1578 * If we have accumulated enough ticks, increment the task CPU
1579 * time usage and test for the resource limit. This minimizes the
1580 * number of calls to the rct_test(). The task CPU time mutex
1581 * is highly contentious as many processes can be sharing a task.
1583 if (pp
->p_ttime
>= clock_tick_proc_max
) {
1584 secs
= task_cpu_time_incr(pp
->p_task
, pp
->p_ttime
);
1587 (void) rctl_test(rc_task_cpu_time
, pp
->p_task
->tk_rctls
,
1588 pp
, secs
, RCA_UNSAFE_SIGINFO
);
1593 * Update memory usage for the currently running process.
1596 PTOU(pp
)->u_mem
+= rss
;
1597 if (rss
> PTOU(pp
)->u_mem_max
)
1598 PTOU(pp
)->u_mem_max
= rss
;
1601 * Notify the CPU the thread is running on.
1603 if (poke
&& t
->t_cpu
!= CPU
)
1604 poke_cpu(t
->t_cpu
->cpu_id
);
1608 profil_tick(uintptr_t upc
)
1611 proc_t
*p
= ttoproc(curthread
);
1612 klwp_t
*lwp
= ttolwp(curthread
);
1613 struct prof
*pr
= &p
->p_prof
;
1616 ticks
= lwp
->lwp_oweupc
;
1617 } while (atomic_cas_32(&lwp
->lwp_oweupc
, ticks
, 0) != ticks
);
1619 mutex_enter(&p
->p_pflock
);
1620 if (pr
->pr_scale
>= 2 && upc
>= pr
->pr_off
) {
1622 * Old-style profiling
1624 uint16_t *slot
= pr
->pr_base
;
1626 if (pr
->pr_scale
!= 2) {
1627 uintptr_t delta
= upc
- pr
->pr_off
;
1628 uintptr_t byteoff
= ((delta
>> 16) * pr
->pr_scale
) +
1629 (((delta
& 0xffff) * pr
->pr_scale
) >> 16);
1630 if (byteoff
>= (uintptr_t)pr
->pr_size
) {
1631 mutex_exit(&p
->p_pflock
);
1634 slot
+= byteoff
/ sizeof (uint16_t);
1636 if (fuword16(slot
, &old
) < 0 ||
1637 (new = old
+ ticks
) > SHRT_MAX
||
1638 suword16(slot
, new) < 0) {
1641 } else if (pr
->pr_scale
== 1) {
1645 model_t model
= lwp_getdatamodel(lwp
);
1647 while (ticks
-- > 0) {
1648 if (pr
->pr_samples
== pr
->pr_size
) {
1649 /* buffer full, turn off sampling */
1653 switch (SIZEOF_PTR(model
)) {
1654 case sizeof (uint32_t):
1655 result
= suword32(pr
->pr_base
, (uint32_t)upc
);
1658 case sizeof (uint64_t):
1659 result
= suword64(pr
->pr_base
, (uint64_t)upc
);
1663 cmn_err(CE_WARN
, "profil_tick: unexpected "
1672 pr
->pr_base
= (caddr_t
)pr
->pr_base
+ SIZEOF_PTR(model
);
1676 mutex_exit(&p
->p_pflock
);
1680 delay_wakeup(void *arg
)
1684 mutex_enter(&t
->t_delay_lock
);
1685 cv_signal(&t
->t_delay_cv
);
1686 mutex_exit(&t
->t_delay_lock
);
1690 * The delay(9F) man page indicates that it can only be called from user or
1691 * kernel context - detect and diagnose bad calls. The following macro will
1692 * produce a limited number of messages identifying bad callers. This is done
1693 * in a macro so that caller() is meaningful. When a bad caller is identified,
1694 * switching to 'drv_usecwait(TICK_TO_USEC(ticks));' may be appropriate.
1696 #define DELAY_CONTEXT_CHECK() { \
1701 m = delay_from_interrupt_msg; \
1702 if (delay_from_interrupt_diagnose && servicing_interrupt() && \
1703 !panicstr && !devinfo_freeze && \
1704 atomic_cas_32(&delay_from_interrupt_msg, m ? m : 1, m-1)) { \
1705 f = modgetsymname((uintptr_t)caller(), &off); \
1706 cmn_err(CE_WARN, "delay(9F) called from " \
1707 "interrupt context: %s`%s", \
1708 mod_containing_pc(caller()), f ? f : "..."); \
1713 * delay_common: common delay code.
1716 delay_common(clock_t ticks
)
1718 kthread_t
*t
= curthread
;
1723 /* If timeouts aren't running all we can do is spin. */
1724 if (panicstr
|| devinfo_freeze
) {
1725 /* Convert delay(9F) call into drv_usecwait(9F) call. */
1727 drv_usecwait(TICK_TO_USEC(ticks
));
1731 deadline
= ddi_get_lbolt() + ticks
;
1732 while ((timeleft
= deadline
- ddi_get_lbolt()) > 0) {
1733 mutex_enter(&t
->t_delay_lock
);
1734 id
= timeout_default(delay_wakeup
, t
, timeleft
);
1735 cv_wait(&t
->t_delay_cv
, &t
->t_delay_lock
);
1736 mutex_exit(&t
->t_delay_lock
);
1737 (void) untimeout_default(id
, 0);
1742 * Delay specified number of clock ticks.
1745 delay(clock_t ticks
)
1747 DELAY_CONTEXT_CHECK();
1749 delay_common(ticks
);
1753 * Delay a random number of clock ticks between 1 and ticks.
1756 delay_random(clock_t ticks
)
1760 DELAY_CONTEXT_CHECK();
1762 (void) random_get_pseudo_bytes((void *)&r
, sizeof (r
));
1765 ticks
= (r
% ticks
) + 1;
1766 delay_common(ticks
);
1770 * Like delay, but interruptible by a signal.
1773 delay_sig(clock_t ticks
)
1775 kthread_t
*t
= curthread
;
1779 /* If timeouts aren't running all we can do is spin. */
1780 if (panicstr
|| devinfo_freeze
) {
1782 drv_usecwait(TICK_TO_USEC(ticks
));
1786 deadline
= ddi_get_lbolt() + ticks
;
1787 mutex_enter(&t
->t_delay_lock
);
1789 rc
= cv_timedwait_sig(&t
->t_delay_cv
,
1790 &t
->t_delay_lock
, deadline
);
1791 /* loop until past deadline or signaled */
1793 mutex_exit(&t
->t_delay_lock
);
1800 ddi_sleep_common(hrtime_t delay
, hrtime_t resolution
)
1802 kthread_t
*t
= curthread
;
1807 /* If timeouts aren't running all we can do is spin. */
1808 if (panicstr
|| devinfo_freeze
) {
1809 /* Convert ddi_*sleep(9F) call into drv_usecwait(9F) call. */
1810 if (NSEC2USEC(delay
) > 0)
1811 drv_usecwait(NSEC2USEC(delay
));
1816 * TODO: does this need to be in a loop checking that we didn't get
1817 * woken up too early?
1819 mutex_enter(&t
->t_delay_lock
);
1821 id
= timeout_generic(CALLOUT_NORMAL
, delay_wakeup
, t
, delay
,
1822 resolution
, CALLOUT_FLAG_ROUNDUP
);
1823 cv_wait(&t
->t_delay_cv
, &t
->t_delay_lock
);
1824 mutex_exit(&t
->t_delay_lock
);
1825 (void) untimeout_generic(id
, 0);
1826 if (gethrtime() - tmp
< delay
)
1827 cmn_err(CE_WARN
, "%s returned too soon (wanted %llu, got %llu)",
1828 __func__
, delay
, gethrtime() - tmp
);
1832 ddi_sleep(clock_t secs
)
1837 * We don't want to use 1 s resulution unconditionally because of
1838 * how it is used for rounding up the deadline. With 1 s
1839 * resolution, a sleep of 1 second can take anywhere from 1 to
1840 * 1.999999999 seconds on an idle system. This seems unacceptable,
1841 * and so we use either 100 ms or 10% of sleep interval as the
1842 * resolution - whichever is smaller.
1844 * (There is a similar issue with the milli- and micro- sleep
1845 * functions, but somehow an extra 1 ms or 1us doesn't seem as bad.)
1848 res
= MIN(100000000 /* 100 ms */, SEC2NSEC(secs
) / 10);
1850 res
= 100000000; /* 100 ms */
1852 ddi_sleep_common(SEC2NSEC(secs
), res
);
1856 ddi_msleep(clock_t msecs
)
1858 ddi_sleep_common(MSEC2NSEC(msecs
), 1000000 /* 1 ms */);
1862 ddi_usleep(clock_t usecs
)
1864 ddi_sleep_common(USEC2NSEC(usecs
), 1000 /* 1 us */);
1868 #define SECONDS_PER_DAY 86400
1871 * Initialize the system time based on the TOD chip. approx is used as
1872 * an approximation of time (e.g. from the filesystem) in the event that
1873 * the TOD chip has been cleared or is unresponsive. An approx of -1
1874 * means the filesystem doesn't keep time.
1877 clkset(time_t approx
)
1883 mutex_enter(&tod_lock
);
1886 if (ts
.tv_sec
> 365 * SECONDS_PER_DAY
) {
1888 * If the TOD chip is reporting some time after 1971,
1889 * then it probably didn't lose power or become otherwise
1890 * cleared in the recent past; check to assure that
1891 * the time coming from the filesystem isn't in the future
1892 * according to the TOD chip.
1894 if (approx
!= -1 && approx
> ts
.tv_sec
) {
1895 cmn_err(CE_WARN
, "Last shutdown is later "
1896 "than time on time-of-day chip; check date.");
1900 * If the TOD chip isn't giving correct time, set it to the
1901 * greater of i) approx and ii) 1987. That way if approx
1902 * is negative or is earlier than 1987, we set the clock
1903 * back to a time when Oliver North, ALF and Dire Straits
1904 * were all on the collective brain: 1987.
1907 time_t diagnose_date
= (1987 - 1970) * 365 * SECONDS_PER_DAY
;
1908 ts
.tv_sec
= (approx
> diagnose_date
? approx
: diagnose_date
);
1912 * Attempt to write the new time to the TOD chip. Set spl high
1913 * to avoid getting preempted between the tod_set and tod_get.
1920 if (tmp
.tv_sec
!= ts
.tv_sec
&& tmp
.tv_sec
!= ts
.tv_sec
+ 1) {
1923 cmn_err(CE_WARN
, "Time-of-day chip unresponsive.");
1925 cmn_err(CE_WARN
, "Time-of-day chip had "
1926 "incorrect date; check and reset.");
1932 boot_time
= ts
.tv_sec
;
1939 mutex_exit(&tod_lock
);
1942 int timechanged
; /* for testing if the system time has been reset */
1945 set_hrestime(timestruc_t
*ts
)
1947 int spl
= hr_clock_lock();
1949 membar_enter(); /* hrestime must be visible before timechanged++ */
1952 hr_clock_unlock(spl
);
1956 static uint_t deadman_seconds
;
1957 static uint32_t deadman_panics
;
1958 static int deadman_enabled
= 0;
1959 static int deadman_panic_timers
= 1;
1966 * During panic, other CPUs besides the panic
1967 * master continue to handle cyclics and some other
1968 * interrupts. The code below is intended to be
1969 * single threaded, so any CPU other than the master
1972 if (CPU
->cpu_id
!= panic_cpu
.cpu_id
)
1975 if (!deadman_panic_timers
)
1976 return; /* allow all timers to be manually disabled */
1979 * If we are generating a crash dump or syncing filesystems and
1980 * the corresponding timer is set, decrement it and re-enter
1981 * the panic code to abort it and advance to the next state.
1982 * The panic states and triggers are explained in panic.c.
1985 if (dump_timeleft
&& (--dump_timeleft
== 0)) {
1986 panic("panic dump timeout");
1993 if (deadman_counter
!= CPU
->cpu_deadman_counter
) {
1994 CPU
->cpu_deadman_counter
= deadman_counter
;
1995 CPU
->cpu_deadman_countdown
= deadman_seconds
;
1999 if (--CPU
->cpu_deadman_countdown
> 0)
2003 * Regardless of whether or not we actually bring the system down,
2004 * bump the deadman_panics variable.
2006 * N.B. deadman_panics is incremented once for each CPU that
2007 * passes through here. It's expected that all the CPUs will
2008 * detect this condition within one second of each other, so
2009 * when deadman_enabled is off, deadman_panics will
2010 * typically be a multiple of the total number of CPUs in
2013 atomic_inc_32(&deadman_panics
);
2015 if (!deadman_enabled
) {
2016 CPU
->cpu_deadman_countdown
= deadman_seconds
;
2021 * If we're here, we want to bring the system down.
2023 panic("deadman: timed out after %d seconds of clock "
2024 "inactivity", deadman_seconds
);
2030 deadman_online(void *arg
, cpu_t
*cpu
, cyc_handler_t
*hdlr
, cyc_time_t
*when
)
2032 cpu
->cpu_deadman_counter
= 0;
2033 cpu
->cpu_deadman_countdown
= deadman_seconds
;
2035 hdlr
->cyh_func
= (cyc_func_t
)deadman
;
2036 hdlr
->cyh_level
= CY_HIGH_LEVEL
;
2037 hdlr
->cyh_arg
= NULL
;
2040 * Stagger the CPUs so that they don't all run deadman() at
2041 * the same time. Simplest reason to do this is to make it
2042 * more likely that only one CPU will panic in case of a
2043 * timeout. This is (strictly speaking) an aesthetic, not a
2044 * technical consideration.
2046 when
->cyt_when
= cpu
->cpu_id
* (NANOSEC
/ NCPU
);
2047 when
->cyt_interval
= NANOSEC
;
2054 cyc_omni_handler_t hdlr
;
2056 if (deadman_seconds
== 0)
2057 deadman_seconds
= snoop_interval
/ MICROSEC
;
2060 deadman_enabled
= 1;
2062 hdlr
.cyo_online
= deadman_online
;
2063 hdlr
.cyo_offline
= NULL
;
2064 hdlr
.cyo_arg
= NULL
;
2066 mutex_enter(&cpu_lock
);
2067 deadman_cyclic
= cyclic_add_omni(&hdlr
);
2068 mutex_exit(&cpu_lock
);
2072 * tod_fault() is for updating tod validate mechanism state:
2073 * (1) TOD_NOFAULT: for resetting the state to 'normal'.
2074 * currently used for debugging only
2075 * (2) The following four cases detected by tod validate mechanism:
2076 * TOD_REVERSED: current tod value is less than previous value.
2077 * TOD_STALLED: current tod value hasn't advanced.
2078 * TOD_JUMPED: current tod value advanced too far from previous value.
2079 * TOD_RATECHANGED: the ratio between average tod delta and
2080 * average tick delta has changed.
2081 * (3) TOD_RDONLY: when the TOD clock is not writeable e.g. because it is
2082 * a virtual TOD provided by a hypervisor.
2085 tod_fault(enum tod_fault_type ftype
, int off
)
2087 ASSERT(MUTEX_HELD(&tod_lock
));
2089 if (tod_faulted
!= ftype
) {
2092 plat_tod_fault(TOD_NOFAULT
);
2093 cmn_err(CE_NOTE
, "Restarted tracking "
2094 "Time of Day clock.");
2095 tod_faulted
= ftype
;
2099 if (tod_faulted
== TOD_NOFAULT
) {
2100 plat_tod_fault(ftype
);
2101 cmn_err(CE_WARN
, "Time of Day clock error: "
2102 "reason [%s by 0x%x]. -- "
2103 " Stopped tracking Time Of Day clock.",
2104 tod_fault_table
[ftype
], off
);
2105 tod_faulted
= ftype
;
2109 case TOD_RATECHANGED
:
2110 if (tod_faulted
== TOD_NOFAULT
) {
2111 plat_tod_fault(ftype
);
2112 cmn_err(CE_WARN
, "Time of Day clock error: "
2114 " Stopped tracking Time Of Day clock.",
2115 tod_fault_table
[ftype
]);
2116 tod_faulted
= ftype
;
2120 if (tod_faulted
== TOD_NOFAULT
) {
2121 plat_tod_fault(ftype
);
2122 cmn_err(CE_NOTE
, "!Time of Day clock is "
2123 "Read-Only; set of Date/Time will not "
2124 "persist across reboot.");
2125 tod_faulted
= ftype
;
2132 return (tod_faulted
);
2136 * Two functions that allow tod_status_flag to be manipulated by functions
2137 * external to this file.
2141 tod_status_set(int tod_flag
)
2143 tod_status_flag
|= tod_flag
;
2147 tod_status_clear(int tod_flag
)
2149 tod_status_flag
&= ~tod_flag
;
2153 * Record a timestamp and the value passed to tod_set(). The next call to
2154 * tod_validate() can use these values, prev_set_tick and prev_set_tod,
2155 * when checking the timestruc_t returned by tod_get(). Ordinarily,
2156 * tod_validate() will use prev_tick and prev_tod for this task but these
2157 * become obsolete, and will be re-assigned with the prev_set_* values,
2158 * in the case when the TOD is re-written.
2161 tod_set_prev(timestruc_t ts
)
2163 if ((tod_validate_enable
== 0) || (tod_faulted
!= TOD_NOFAULT
) ||
2164 tod_validate_deferred
) {
2167 prev_set_tick
= gethrtime();
2169 * A negative value will be set to zero in utc_to_tod() so we fake
2170 * a zero here in such a case. This would need to change if the
2171 * behavior of utc_to_tod() changes.
2173 prev_set_tod
= ts
.tv_sec
< 0 ? 0 : ts
.tv_sec
;
2177 * tod_validate() is used for checking values returned by tod_get().
2178 * Four error cases can be detected by this routine:
2179 * TOD_REVERSED: current tod value is less than previous.
2180 * TOD_STALLED: current tod value hasn't advanced.
2181 * TOD_JUMPED: current tod value advanced too far from previous value.
2182 * TOD_RATECHANGED: the ratio between average tod delta and
2183 * average tick delta has changed.
2186 tod_validate(time_t tod
)
2195 enum tod_fault_type tod_bad
= TOD_NOFAULT
;
2197 static int firsttime
= 1;
2199 static time_t prev_tod
= 0;
2200 static hrtime_t prev_tick
= 0;
2201 static long dtick_avg
= TOD_REF_FREQ
;
2203 int cpr_resume_done
= 0;
2204 int dr_resume_done
= 0;
2206 hrtime_t tick
= gethrtime();
2208 ASSERT(MUTEX_HELD(&tod_lock
));
2211 * tod_validate_enable is patchable via /etc/system.
2212 * If TOD is already faulted, or if TOD validation is deferred,
2213 * there is nothing to do.
2215 if ((tod_validate_enable
== 0) || (tod_faulted
!= TOD_NOFAULT
) ||
2216 tod_validate_deferred
) {
2221 * If this is the first time through, we just need to save the tod
2222 * we were called with and hrtime so we can use them next time to
2223 * validate tod_get().
2233 * Handle any flags that have been turned on by tod_status_set().
2234 * In the case where a tod_set() is done and then a subsequent
2235 * tod_get() fails (ie, both TOD_SET_DONE and TOD_GET_FAILED are
2236 * true), we treat the TOD_GET_FAILED with precedence by switching
2237 * off the flag, returning tod and leaving TOD_SET_DONE asserted
2238 * until such time as tod_get() completes successfully.
2240 if (tod_status_flag
& TOD_GET_FAILED
) {
2242 * tod_get() has encountered an issue, possibly transitory,
2243 * when reading TOD. We'll just return the incoming tod
2244 * value (which is actually hrestime.tv_sec in this case)
2245 * and when we get a genuine tod, following a successful
2246 * tod_get(), we can validate using prev_tod and prev_tick.
2248 tod_status_flag
&= ~TOD_GET_FAILED
;
2250 } else if (tod_status_flag
& TOD_SET_DONE
) {
2252 * TOD has been modified. Just before the TOD was written,
2253 * tod_set_prev() saved tod and hrtime; we can now use
2254 * those values, prev_set_tod and prev_set_tick, to validate
2255 * the incoming tod that's just been read.
2257 prev_tod
= prev_set_tod
;
2258 prev_tick
= prev_set_tick
;
2259 dtick_avg
= TOD_REF_FREQ
;
2260 tod_status_flag
&= ~TOD_SET_DONE
;
2262 * If a tod_set() preceded a cpr_suspend() without an
2263 * intervening tod_validate(), we need to ensure that a
2264 * TOD_JUMPED condition is ignored.
2265 * Note this isn't a concern in the case of DR as we've
2266 * just reassigned dtick_avg, above.
2268 if (tod_status_flag
& TOD_CPR_RESUME_DONE
) {
2269 cpr_resume_done
= 1;
2270 tod_status_flag
&= ~TOD_CPR_RESUME_DONE
;
2272 } else if (tod_status_flag
& TOD_CPR_RESUME_DONE
) {
2274 * The system's coming back from a checkpoint resume.
2276 cpr_resume_done
= 1;
2277 tod_status_flag
&= ~TOD_CPR_RESUME_DONE
;
2279 * We need to handle the possibility of a CPR suspend
2280 * operation having been initiated whilst a DR event was
2283 if (tod_status_flag
& TOD_DR_RESUME_DONE
) {
2285 tod_status_flag
&= ~TOD_DR_RESUME_DONE
;
2287 } else if (tod_status_flag
& TOD_DR_RESUME_DONE
) {
2289 * A Dynamic Reconfiguration event has taken place.
2292 tod_status_flag
&= ~TOD_DR_RESUME_DONE
;
2296 switch (tod_unit_test
) {
2297 case 1: /* for testing jumping tod */
2298 tod
+= tod_test_injector
;
2301 case 2: /* for testing stuck tod bit */
2302 tod
|= 1 << tod_test_injector
;
2305 case 3: /* for testing stalled tod */
2309 case 4: /* reset tod fault status */
2310 (void) tod_fault(TOD_NOFAULT
, 0);
2317 diff_tod
= tod
- prev_tod
;
2318 diff_tick
= tick
- prev_tick
;
2320 ASSERT(diff_tick
>= 0);
2323 /* ERROR - tod reversed */
2324 tod_bad
= TOD_REVERSED
;
2325 off
= (int)(prev_tod
- tod
);
2326 } else if (diff_tod
== 0) {
2327 /* tod did not advance */
2328 if (diff_tick
> TOD_STALL_THRESHOLD
) {
2329 /* ERROR - tod stalled */
2330 tod_bad
= TOD_STALLED
;
2333 * Make sure we don't update prev_tick
2334 * so that diff_tick is calculated since
2335 * the first diff_tod == 0
2340 /* calculate dtick */
2341 dtick
= diff_tick
/ diff_tod
;
2343 /* update dtick averages */
2344 dtick_avg
+= ((dtick
- dtick_avg
) / TOD_FILTER_N
);
2347 * Calculate dtick_delta as
2348 * variation from reference freq in quartiles
2350 dtick_delta
= (dtick_avg
- TOD_REF_FREQ
) /
2351 (TOD_REF_FREQ
>> 2);
2354 * Even with a perfectly functioning TOD device,
2355 * when the number of elapsed seconds is low the
2356 * algorithm can calculate a rate that is beyond
2357 * tolerance, causing an error. The algorithm is
2358 * inaccurate when elapsed time is low (less than
2362 if (dtick
< TOD_JUMP_THRESHOLD
) {
2364 * If we've just done a CPR resume, we detect
2365 * a jump in the TOD but, actually, what's
2366 * happened is that the TOD has been increasing
2367 * whilst the system was suspended and the tick
2368 * count hasn't kept up. We consider the first
2369 * occurrence of this after a resume as normal
2370 * and ignore it; otherwise, in a non-resume
2371 * case, we regard it as a TOD problem.
2373 if (!cpr_resume_done
) {
2374 /* ERROR - tod jumped */
2375 tod_bad
= TOD_JUMPED
;
2376 off
= (int)diff_tod
;
2381 * If we've just done a DR resume, dtick_avg
2382 * can go a bit askew so we reset it and carry
2383 * on; otherwise, the TOD is in error.
2385 if (dr_resume_done
) {
2386 dtick_avg
= TOD_REF_FREQ
;
2388 /* ERROR - change in clock rate */
2389 tod_bad
= TOD_RATECHANGED
;
2395 if (tod_bad
!= TOD_NOFAULT
) {
2396 (void) tod_fault(tod_bad
, off
);
2399 * Disable dosynctodr since we are going to fault
2400 * the TOD chip anyway here
2405 * Set tod to the correct value from hrestime
2407 tod
= hrestime
.tv_sec
;
2416 calcloadavg(int nrun
, uint64_t *hp_ave
)
2418 static int64_t f
[3] = { 135, 27, 9 };
2423 * Compute load average over the last 1, 5, and 15 minutes
2424 * (60, 300, and 900 seconds). The constants in f[3] are for
2425 * exponential decay:
2426 * (1 - exp(-1/60)) << 13 = 135,
2427 * (1 - exp(-1/300)) << 13 = 27,
2428 * (1 - exp(-1/900)) << 13 = 9.
2432 * a little hoop-jumping to avoid integer overflow
2434 for (i
= 0; i
< 3; i
++) {
2435 q
= (hp_ave
[i
] >> 16) << 7;
2436 r
= (hp_ave
[i
] & 0xffff) << 7;
2437 hp_ave
[i
] += ((nrun
- q
) * f
[i
] - ((r
* f
[i
]) >> 16)) >> 4;
2442 * lbolt_hybrid() is used by ddi_get_lbolt() and ddi_get_lbolt64() to
2443 * calculate the value of lbolt according to the current mode. In the event
2444 * driven mode (the default), lbolt is calculated by dividing the current hires
2445 * time by the number of nanoseconds per clock tick. In the cyclic driven mode
2446 * an internal variable is incremented at each firing of the lbolt cyclic
2447 * and returned by lbolt_cyclic_driven().
2449 * The system will transition from event to cyclic driven mode when the number
2450 * of calls to lbolt_event_driven() exceeds the (per CPU) threshold within a
2451 * window of time. It does so by reprograming lbolt_cyclic from CY_INFINITY to
2452 * nsec_per_tick. The lbolt cyclic will remain ON while at least one CPU is
2453 * causing enough activity to cross the thresholds.
2456 lbolt_bootstrap(void)
2463 lbolt_ev_to_cyclic(caddr_t arg1
, caddr_t arg2
)
2468 ASSERT(lbolt_hybrid
!= lbolt_cyclic_driven
);
2473 lb_info
->lbi_internal
= (ts
/nsec_per_tick
);
2476 * Align the next expiration to a clock tick boundary.
2478 exp
= ts
+ nsec_per_tick
- 1;
2479 exp
= (exp
/nsec_per_tick
) * nsec_per_tick
;
2481 ret
= cyclic_reprogram(lb_info
->id
.lbi_cyclic_id
, exp
);
2484 lbolt_hybrid
= lbolt_cyclic_driven
;
2485 lb_info
->lbi_cyc_deactivate
= B_FALSE
;
2486 lb_info
->lbi_cyc_deac_start
= lb_info
->lbi_internal
;
2490 ret
= atomic_dec_32_nv(&lb_info
->lbi_token
);
2497 lbolt_event_driven(void)
2501 int ret
, cpu
= CPU
->cpu_seqid
;
2506 ASSERT(nsec_per_tick
> 0);
2507 lb
= (ts
/nsec_per_tick
);
2510 * Switch to cyclic mode if the number of calls to this routine
2511 * has reached the threshold within the interval.
2513 if ((lb
- lb_cpu
[cpu
].lbc_cnt_start
) < lb_info
->lbi_thresh_interval
) {
2515 if (--lb_cpu
[cpu
].lbc_counter
== 0) {
2517 * Reached the threshold within the interval, reset
2518 * the usage statistics.
2520 lb_cpu
[cpu
].lbc_counter
= lb_info
->lbi_thresh_calls
;
2521 lb_cpu
[cpu
].lbc_cnt_start
= lb
;
2524 * Make sure only one thread reprograms the
2525 * lbolt cyclic and changes the mode.
2527 if (panicstr
== NULL
&&
2528 atomic_cas_32(&lb_info
->lbi_token
, 0, 1) == 0) {
2530 if (lbolt_hybrid
== lbolt_cyclic_driven
) {
2531 ret
= atomic_dec_32_nv(
2532 &lb_info
->lbi_token
);
2535 lbolt_softint_post();
2541 * Exceeded the interval, reset the usage statistics.
2543 lb_cpu
[cpu
].lbc_counter
= lb_info
->lbi_thresh_calls
;
2544 lb_cpu
[cpu
].lbc_cnt_start
= lb
;
2547 ASSERT(lb
>= lb_info
->lbi_debug_time
);
2549 return (lb
- lb_info
->lbi_debug_time
);
2553 lbolt_cyclic_driven(void)
2555 int64_t lb
= lb_info
->lbi_internal
;
2559 * If a CPU has already prevented the lbolt cyclic from deactivating
2560 * itself, don't bother tracking the usage. Otherwise check if we're
2561 * within the interval and how the per CPU counter is doing.
2563 if (lb_info
->lbi_cyc_deactivate
) {
2564 cpu
= CPU
->cpu_seqid
;
2565 if ((lb
- lb_cpu
[cpu
].lbc_cnt_start
) <
2566 lb_info
->lbi_thresh_interval
) {
2568 if (lb_cpu
[cpu
].lbc_counter
== 0)
2570 * Reached the threshold within the interval,
2571 * prevent the lbolt cyclic from turning itself
2574 lb_info
->lbi_cyc_deactivate
= B_FALSE
;
2576 lb_cpu
[cpu
].lbc_counter
--;
2579 * Only reset the usage statistics when we have
2580 * exceeded the interval.
2582 lb_cpu
[cpu
].lbc_counter
= lb_info
->lbi_thresh_calls
;
2583 lb_cpu
[cpu
].lbc_cnt_start
= lb
;
2587 ASSERT(lb
>= lb_info
->lbi_debug_time
);
2589 return (lb
- lb_info
->lbi_debug_time
);
2593 * The lbolt_cyclic() routine will fire at a nsec_per_tick interval to satisfy
2594 * performance needs of ddi_get_lbolt() and ddi_get_lbolt64() consumers.
2595 * It is inactive by default, and will be activated when switching from event
2596 * to cyclic driven lbolt. The cyclic will turn itself off unless signaled
2597 * by lbolt_cyclic_driven().
2604 lb_info
->lbi_internal
++;
2606 if (!lbolt_cyc_only
) {
2608 if (lb_info
->lbi_cyc_deactivate
) {
2610 * Switching from cyclic to event driven mode.
2612 if (panicstr
== NULL
&&
2613 atomic_cas_32(&lb_info
->lbi_token
, 0, 1) == 0) {
2615 if (lbolt_hybrid
== lbolt_event_driven
) {
2616 ret
= atomic_dec_32_nv(
2617 &lb_info
->lbi_token
);
2624 lbolt_hybrid
= lbolt_event_driven
;
2625 ret
= cyclic_reprogram(
2626 lb_info
->id
.lbi_cyclic_id
,
2632 ret
= atomic_dec_32_nv(&lb_info
->lbi_token
);
2638 * The lbolt cyclic should not try to deactivate itself before
2639 * the sampling period has elapsed.
2641 if (lb_info
->lbi_internal
- lb_info
->lbi_cyc_deac_start
>=
2642 lb_info
->lbi_thresh_interval
) {
2643 lb_info
->lbi_cyc_deactivate
= B_TRUE
;
2644 lb_info
->lbi_cyc_deac_start
= lb_info
->lbi_internal
;
2650 * Since the lbolt service was historically cyclic driven, it must be 'stopped'
2651 * when the system drops into the kernel debugger. lbolt_debug_entry() is
2652 * called by the KDI system claim callbacks to record a hires timestamp at
2653 * debug enter time. lbolt_debug_return() is called by the sistem release
2654 * callbacks to account for the time spent in the debugger. The value is then
2655 * accumulated in the lb_info structure and used by lbolt_event_driven() and
2656 * lbolt_cyclic_driven(), as well as the mdb_get_lbolt() routine.
2659 lbolt_debug_entry(void)
2661 if (lbolt_hybrid
!= lbolt_bootstrap
) {
2662 ASSERT(lb_info
!= NULL
);
2663 lb_info
->lbi_debug_ts
= gethrtime();
2668 * Calculate the time spent in the debugger and add it to the lbolt info
2669 * structure. We also update the internal lbolt value in case we were in
2670 * cyclic driven mode going in.
2673 lbolt_debug_return(void)
2677 if (lbolt_hybrid
!= lbolt_bootstrap
) {
2678 ASSERT(lb_info
!= NULL
);
2679 ASSERT(nsec_per_tick
> 0);
2682 lb_info
->lbi_internal
= (ts
/nsec_per_tick
);
2683 lb_info
->lbi_debug_time
+=
2684 ((ts
- lb_info
->lbi_debug_ts
)/nsec_per_tick
);
2686 lb_info
->lbi_debug_ts
= 0;