Expand PMF_FN_* macros.
[netbsd-mini2440.git] / dist / ntp / ntpd / ntp_loopfilter.c
blobe528ca1341fefce691b788c6a857c37a30140ef6
1 /* $NetBSD: ntp_loopfilter.c,v 1.10 2009/04/05 17:33:11 christos Exp $ */
3 /*
4 * ntp_loopfilter.c - implements the NTP loop filter algorithm
6 * ATTENTION: Get approval from Dave Mills on all changes to this file!
8 */
9 #ifdef HAVE_CONFIG_H
10 # include <config.h>
11 #endif
13 #include "ntpd.h"
14 #include "ntp_io.h"
15 #include "ntp_unixtime.h"
16 #include "ntp_stdlib.h"
18 #include <stdio.h>
19 #include <ctype.h>
21 #include <signal.h>
22 #include <setjmp.h>
23 #ifdef __NetBSD__
24 #include <util.h>
25 #endif
27 #if defined(VMS) && defined(VMS_LOCALUNIT) /*wjm*/
28 #include "ntp_refclock.h"
29 #endif /* VMS */
31 #ifdef KERNEL_PLL
32 #include "ntp_syscall.h"
33 #endif /* KERNEL_PLL */
36 * This is an implementation of the clock discipline algorithm described
37 * in UDel TR 97-4-3, as amended. It operates as an adaptive parameter,
38 * hybrid phase/frequency-lock loop. A number of sanity checks are
39 * included to protect against timewarps, timespikes and general mayhem.
40 * All units are in s and s/s, unless noted otherwise.
42 #define CLOCK_MAX .128 /* default step threshold (s) */
43 #define CLOCK_MINSTEP 900. /* default stepout threshold (s) */
44 #define CLOCK_PANIC 1000. /* default panic threshold (s) */
45 #define CLOCK_PHI 15e-6 /* max frequency error (s/s) */
46 #define CLOCK_PLL 16. /* PLL loop gain (log2) */
47 #define CLOCK_AVG 8. /* parameter averaging constant */
48 #define CLOCK_FLL (NTP_MAXPOLL + CLOCK_AVG) /* FLL loop gain */
49 #define CLOCK_ALLAN 1500. /* compromise Allan intercept (s) */
50 #define CLOCK_DAY 86400. /* one day in seconds (s) */
51 #define CLOCK_JUNE (CLOCK_DAY * 30) /* June in seconds (s) */
52 #define CLOCK_LIMIT 30 /* poll-adjust threshold */
53 #define CLOCK_PGATE 4. /* poll-adjust gate */
54 #define PPS_MAXAGE 120 /* kernel pps signal timeout (s) */
57 * Clock discipline state machine. This is used to control the
58 * synchronization behavior during initialization and following a
59 * timewarp.
61 * State < step > step Comments
62 * ====================================================
63 * NSET FREQ step, FREQ no ntp.drift
65 * FSET SYNC step, SYNC ntp.drift
67 * FREQ if (mu < 900) if (mu < 900) set freq
68 * ignore ignore
69 * else else
70 * freq, SYNC freq, step, SYNC
72 * SYNC SYNC if (mu < 900) adjust phase/freq
73 * ignore
74 * else
75 * SPIK
77 * SPIK SYNC step, SYNC set phase
79 #define S_NSET 0 /* clock never set */
80 #define S_FSET 1 /* frequency set from the drift file */
81 #define S_SPIK 2 /* spike detected */
82 #define S_FREQ 3 /* frequency mode */
83 #define S_SYNC 4 /* clock synchronized */
86 * Kernel PLL/PPS state machine. This is used with the kernel PLL
87 * modifications described in the README.kernel file.
89 * If kernel support for the ntp_adjtime() system call is available, the
90 * ntp_control flag is set. The ntp_enable and kern_enable flags can be
91 * set at configuration time or run time using ntpdc. If ntp_enable is
92 * false, the discipline loop is unlocked and no corrections of any kind
93 * are made. If both ntp_control and kern_enable are set, the kernel
94 * support is used as described above; if false, the kernel is bypassed
95 * entirely and the daemon discipline used instead.
97 * There have been three versions of the kernel discipline code. The
98 * first (microkernel) now in Solaris discipilnes the microseconds. The
99 * second and third (nanokernel) disciplines the clock in nanoseconds.
100 * These versions are identifed if the symbol STA_PLL is present in the
101 * header file /usr/include/sys/timex.h. The third and current version
102 * includes TAI offset and is identified by the symbol NTP_API with
103 * value 4.
105 * Each update to a prefer peer sets pps_stratum if it survives the
106 * intersection algorithm and its time is within range. The PPS time
107 * discipline is enabled (STA_PPSTIME bit set in the status word) when
108 * pps_stratum is true and the PPS frequency discipline is enabled. If
109 * the PPS time discipline is enabled and the kernel reports a PPS
110 * signal is present, the pps_control variable is set to the current
111 * time. If the current time is later than pps_control by PPS_MAXAGE
112 * (120 s), this variable is set to zero.
114 * If an external clock is present, the clock driver sets STA_CLK in the
115 * status word. When the local clock driver sees this bit, it updates
116 * via this routine, which then calls ntp_adjtime() with the STA_PLL bit
117 * set to zero, in which case the system clock is not adjusted. This is
118 * also a signal for the external clock driver to discipline the system
119 * clock.
122 * Program variables that can be tinkered.
124 double clock_max = CLOCK_MAX; /* step threshold (s) */
125 double clock_minstep = CLOCK_MINSTEP; /* stepout threshold (s) */
126 double clock_panic = CLOCK_PANIC; /* panic threshold (s) */
127 double clock_phi = CLOCK_PHI; /* dispersion rate (s/s) */
128 double allan_xpt = CLOCK_ALLAN; /* Allan intercept (s) */
131 * Program variables
133 static double clock_offset; /* offset (s) */
134 double clock_jitter; /* offset jitter (s) */
135 double drift_comp; /* frequency (s/s) */
136 double clock_stability; /* frequency stability (wander) (s/s) */
137 u_long sys_clocktime; /* last system clock update */
138 u_long pps_control; /* last pps update */
139 u_long sys_tai; /* UTC offset from TAI (s) */
140 static void rstclock P((int, u_long, double)); /* transition function */
142 #ifdef KERNEL_PLL
143 struct timex ntv; /* kernel API parameters */
144 int pll_status; /* status bits for kernel pll */
145 #endif /* KERNEL_PLL */
148 * Clock state machine control flags
150 int ntp_enable; /* clock discipline enabled */
151 int pll_control; /* kernel support available */
152 int kern_enable; /* kernel support enabled */
153 int pps_enable; /* kernel PPS discipline enabled */
154 int ext_enable; /* external clock enabled */
155 int pps_stratum; /* pps stratum */
156 int allow_panic = FALSE; /* allow panic correction */
157 int mode_ntpdate = FALSE; /* exit on first clock set */
160 * Clock state machine variables
162 int state; /* clock discipline state */
163 u_char sys_poll = NTP_MINDPOLL; /* time constant/poll (log2 s) */
164 int tc_counter; /* jiggle counter */
165 double last_offset; /* last offset (s) */
168 * Huff-n'-puff filter variables
170 static double *sys_huffpuff; /* huff-n'-puff filter */
171 static int sys_hufflen; /* huff-n'-puff filter stages */
172 static int sys_huffptr; /* huff-n'-puff filter pointer */
173 static double sys_mindly; /* huff-n'-puff filter min delay */
175 #if defined(KERNEL_PLL)
176 /* Emacs cc-mode goes nuts if we split the next line... */
177 #define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | \
178 MOD_STATUS | MOD_TIMECONST)
179 #ifdef SIGSYS
180 static void pll_trap P((int)); /* configuration trap */
181 static struct sigaction sigsys; /* current sigaction status */
182 static struct sigaction newsigsys; /* new sigaction status */
183 static sigjmp_buf env; /* environment var. for pll_trap() */
184 #endif /* SIGSYS */
185 #endif /* KERNEL_PLL */
187 static void
188 sync_status(const char *what, int status)
190 char buf[1024];
191 #ifdef STA_FMT
192 snprintb(buf, sizeof(buf), STA_FMT, status);
193 #else
194 snprintf(buf, sizeof(buf), "%04x", status);
195 #endif
196 NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT)
197 msyslog(LOG_NOTICE,
198 "kernel time sync %s %s", what, buf);
202 * init_loopfilter - initialize loop filter data
204 void
205 init_loopfilter(void)
208 * Initialize state variables. Initially, we expect no drift
209 * file, so set the state to S_NSET. If a drift file is present,
210 * it will be detected later and the state set to S_FSET.
212 rstclock(S_NSET, 0, 0);
213 clock_jitter = LOGTOD(sys_precision);
217 * local_clock - the NTP logical clock loop filter.
219 * Return codes:
220 * -1 update ignored: exceeds panic threshold
221 * 0 update ignored: popcorn or exceeds step threshold
222 * 1 clock was slewed
223 * 2 clock was stepped
225 * LOCKCLOCK: The only thing this routine does is set the
226 * sys_rootdispersion variable equal to the peer dispersion.
229 local_clock(
230 struct peer *peer, /* synch source peer structure */
231 double fp_offset /* clock offset (s) */
234 int rval; /* return code */
235 u_long mu; /* interval since last update (s) */
236 double flladj; /* FLL frequency adjustment (ppm) */
237 double plladj; /* PLL frequency adjustment (ppm) */
238 double clock_frequency; /* clock frequency adjustment (ppm) */
239 double dtemp, etemp; /* double temps */
240 #ifdef OPENSSL
241 u_int32 *tpt;
242 int i;
243 u_int len;
244 long togo;
245 #endif /* OPENSSL */
248 * If the loop is opened or the NIST LOCKCLOCK is in use,
249 * monitor and record the offsets anyway in order to determine
250 * the open-loop response and then go home.
252 #ifdef DEBUG
253 if (debug)
254 printf(
255 "local_clock: assocID %d offset %.9f freq %.3f state %d\n",
256 peer->associd, fp_offset, drift_comp * 1e6, state);
257 #endif
258 #ifdef LOCKCLOCK
259 return (0);
261 #else /* LOCKCLOCK */
262 if (!ntp_enable) {
263 record_loop_stats(fp_offset, drift_comp, clock_jitter,
264 clock_stability, sys_poll);
265 return (0);
269 * If the clock is way off, panic is declared. The clock_panic
270 * defaults to 1000 s; if set to zero, the panic will never
271 * occur. The allow_panic defaults to FALSE, so the first panic
272 * will exit. It can be set TRUE by a command line option, in
273 * which case the clock will be set anyway and time marches on.
274 * But, allow_panic will be set FALSE when the update is less
275 * than the step threshold; so, subsequent panics will exit.
277 if (fabs(fp_offset) > clock_panic && clock_panic > 0 &&
278 !allow_panic) {
279 msyslog(LOG_ERR,
280 "time correction of %.0f seconds exceeds sanity limit (%.0f); set clock manually to the correct UTC time.",
281 fp_offset, clock_panic);
282 return (-1);
286 * If simulating ntpdate, set the clock directly, rather than
287 * using the discipline. The clock_max defines the step
288 * threshold, above which the clock will be stepped instead of
289 * slewed. The value defaults to 128 ms, but can be set to even
290 * unreasonable values. If set to zero, the clock will never be
291 * stepped. Note that a slew will persist beyond the life of
292 * this program.
294 * Note that if ntpdate is active, the terminal does not detach,
295 * so the termination comments print directly to the console.
297 if (mode_ntpdate) {
298 if (fabs(fp_offset) > clock_max && clock_max > 0) {
299 step_systime(fp_offset);
300 msyslog(LOG_NOTICE, "time reset %+.6f s",
301 fp_offset);
302 printf("ntpd: time set %+.6fs\n", fp_offset);
303 } else {
304 adj_systime(fp_offset);
305 msyslog(LOG_NOTICE, "time slew %+.6f s",
306 fp_offset);
307 printf("ntpd: time slew %+.6fs\n", fp_offset);
309 record_loop_stats(fp_offset, drift_comp, clock_jitter,
310 clock_stability, sys_poll);
311 exit (0);
315 * The huff-n'-puff filter finds the lowest delay in the recent
316 * interval. This is used to correct the offset by one-half the
317 * difference between the sample delay and minimum delay. This
318 * is most effective if the delays are highly assymetric and
319 * clockhopping is avoided and the clock frequency wander is
320 * relatively small.
322 * Note either there is no prefer peer or this update is from
323 * the prefer peer.
325 if (sys_huffpuff != NULL && (sys_prefer == NULL || sys_prefer ==
326 peer)) {
327 if (peer->delay < sys_huffpuff[sys_huffptr])
328 sys_huffpuff[sys_huffptr] = peer->delay;
329 if (peer->delay < sys_mindly)
330 sys_mindly = peer->delay;
331 if (fp_offset > 0)
332 dtemp = -(peer->delay - sys_mindly) / 2;
333 else
334 dtemp = (peer->delay - sys_mindly) / 2;
335 fp_offset += dtemp;
336 #ifdef DEBUG
337 if (debug)
338 printf(
339 "local_clock: size %d mindly %.6f huffpuff %.6f\n",
340 sys_hufflen, sys_mindly, dtemp);
341 #endif
345 * Clock state machine transition function. This is where the
346 * action is and defines how the system reacts to large phase
347 * and frequency errors. There are two main regimes: when the
348 * offset exceeds the step threshold and when it does not.
349 * However, if the step threshold is set to zero, a step will
350 * never occur. See the instruction manual for the details how
351 * these actions interact with the command line options.
353 * Note the system poll is set to minpoll only if the clock is
354 * stepped. Note also the kernel is disabled if step is
355 * disabled or greater than 0.5 s.
357 clock_frequency = flladj = plladj = 0;
358 mu = peer->epoch - sys_clocktime;
359 if (clock_max == 0 || clock_max > 0.5)
360 kern_enable = 0;
361 rval = 1;
362 if (fabs(fp_offset) > clock_max && clock_max > 0) {
363 switch (state) {
366 * In S_SYNC state we ignore the first outlyer amd
367 * switch to S_SPIK state.
369 case S_SYNC:
370 state = S_SPIK;
371 return (0);
374 * In S_FREQ state we ignore outlyers and inlyers. At
375 * the first outlyer after the stepout threshold,
376 * compute the apparent frequency correction and step
377 * the phase.
379 case S_FREQ:
380 if (mu < clock_minstep)
381 return (0);
383 clock_frequency = (fp_offset - clock_offset) /
386 /* fall through to S_SPIK */
389 * In S_SPIK state we ignore succeeding outlyers until
390 * either an inlyer is found or the stepout threshold is
391 * exceeded.
393 case S_SPIK:
394 if (mu < clock_minstep)
395 return (0);
397 /* fall through to default */
400 * We get here by default in S_NSET and S_FSET states
401 * and from above in S_FREQ or S_SPIK states.
403 * In S_NSET state an initial frequency correction is
404 * not available, usually because the frequency file has
405 * not yet been written. Since the time is outside the
406 * step threshold, the clock is stepped. The frequency
407 * will be set directly following the stepout interval.
409 * In S_FSET state the initial frequency has been set
410 * from the frequency file. Since the time is outside
411 * the step threshold, the clock is stepped immediately,
412 * rather than after the stepout interval. Guys get
413 * nervous if it takes 17 minutes to set the clock for
414 * the first time.
416 * In S_FREQ and S_SPIK states the stepout threshold has
417 * expired and the phase is still above the step
418 * threshold. Note that a single spike greater than the
419 * step threshold is always suppressed, even at the
420 * longer poll intervals.
422 default:
423 step_systime(fp_offset);
424 msyslog(LOG_NOTICE, "time reset %+.6f s",
425 fp_offset);
426 reinit_timer();
427 tc_counter = 0;
428 sys_poll = NTP_MINPOLL;
429 sys_tai = 0;
430 clock_jitter = LOGTOD(sys_precision);
431 rval = 2;
432 if (state == S_NSET) {
433 rstclock(S_FREQ, peer->epoch, 0);
434 return (rval);
436 break;
438 rstclock(S_SYNC, peer->epoch, 0);
439 } else {
442 * The offset is less than the step threshold. Calculate
443 * the jitter as the exponentially weighted offset
444 * differences.
446 etemp = SQUARE(clock_jitter);
447 dtemp = SQUARE(max(fabs(fp_offset - last_offset),
448 LOGTOD(sys_precision)));
449 clock_jitter = SQRT(etemp + (dtemp - etemp) /
450 CLOCK_AVG);
451 switch (state) {
454 * In S_NSET state this is the first update received and
455 * the frequency has not been initialized. Adjust the
456 * phase, but do not adjust the frequency until after
457 * the stepout threshold.
459 case S_NSET:
460 rstclock(S_FREQ, peer->epoch, fp_offset);
461 break;
464 * In S_FSET state this is the first update received and
465 * the frequency has been initialized. Adjust the phase,
466 * but do not adjust the frequency until the next
467 * update.
469 case S_FSET:
470 rstclock(S_SYNC, peer->epoch, fp_offset);
471 break;
474 * In S_FREQ state ignore updates until the stepout
475 * threshold. After that, correct the phase and
476 * frequency and switch to S_SYNC state.
478 case S_FREQ:
479 if (mu < clock_minstep)
480 return (0);
482 clock_frequency = (fp_offset - clock_offset) /
484 rstclock(S_SYNC, peer->epoch, fp_offset);
485 break;
488 * We get here by default in S_SYNC and S_SPIK states.
489 * Here we compute the frequency update due to PLL and
490 * FLL contributions.
492 default:
493 allow_panic = FALSE;
496 * The FLL and PLL frequency gain constants
497 * depend on the poll interval and Allan
498 * intercept. The PLL is always used, but
499 * becomes ineffective above the Allan
500 * intercept. The FLL is not used below one-half
501 * the Allan intercept. Above that the loop gain
502 * increases in steps to 1 / CLOCK_AVG.
504 if (ULOGTOD(sys_poll) > allan_xpt / 2) {
505 dtemp = CLOCK_FLL - sys_poll;
506 flladj = (fp_offset - clock_offset) /
507 (max(mu, allan_xpt) * dtemp);
511 * For the PLL the integration interval
512 * (numerator) is the minimum of the update
513 * interval and poll interval. This allows
514 * oversampling, but not undersampling.
516 etemp = min(mu, (u_long)ULOGTOD(sys_poll));
517 dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll);
518 plladj = fp_offset * etemp / (dtemp * dtemp);
519 rstclock(S_SYNC, peer->epoch, fp_offset);
520 break;
524 #ifdef OPENSSL
526 * Scan the loopsecond table to determine the TAI offset. If
527 * there is a scheduled leap in future, set the leap warning,
528 * but only if less than 30 days before the leap.
530 tpt = (u_int32 *)tai_leap.ptr;
531 len = ntohl(tai_leap.vallen) / sizeof(u_int32);
532 if (tpt != NULL) {
533 for (i = 0; i < len; i++) {
534 togo = (long)ntohl(tpt[i]) -
535 (long)peer->rec.l_ui;
536 if (togo > 0) {
537 if (togo < CLOCK_JUNE)
538 leap_next |= LEAP_ADDSECOND;
539 break;
542 #if defined(STA_NANO) && NTP_API == 4
543 if (pll_control && kern_enable && sys_tai == 0) {
544 memset(&ntv, 0, sizeof(ntv));
545 ntv.modes = MOD_TAI;
546 ntv.constant = i + TAI_1972 - 1;
547 ntp_adjtime(&ntv);
549 #endif /* STA_NANO */
550 sys_tai = i + TAI_1972 - 1;
552 #endif /* OPENSSL */
553 #ifdef KERNEL_PLL
555 * This code segment works when clock adjustments are made using
556 * precision time kernel support and the ntp_adjtime() system
557 * call. This support is available in Solaris 2.6 and later,
558 * Digital Unix 4.0 and later, FreeBSD, Linux and specially
559 * modified kernels for HP-UX 9 and Ultrix 4. In the case of the
560 * DECstation 5000/240 and Alpha AXP, additional kernel
561 * modifications provide a true microsecond clock and nanosecond
562 * clock, respectively.
564 * Important note: The kernel discipline is used only if the
565 * step threshold is less than 0.5 s, as anything higher can
566 * lead to overflow problems. This might occur if some misguided
567 * lad set the step threshold to something ridiculous.
569 if (pll_control && kern_enable) {
572 * We initialize the structure for the ntp_adjtime()
573 * system call. We have to convert everything to
574 * microseconds or nanoseconds first. Do not update the
575 * system variables if the ext_enable flag is set. In
576 * this case, the external clock driver will update the
577 * variables, which will be read later by the local
578 * clock driver. Afterwards, remember the time and
579 * frequency offsets for jitter and stability values and
580 * to update the frequency file.
582 memset(&ntv, 0, sizeof(ntv));
583 if (ext_enable) {
584 ntv.modes = MOD_STATUS;
585 } else {
586 struct tm *tm = NULL;
587 time_t tstamp;
589 #ifdef STA_NANO
590 ntv.modes = MOD_BITS | MOD_NANO;
591 #else /* STA_NANO */
592 ntv.modes = MOD_BITS;
593 #endif /* STA_NANO */
594 if (clock_offset < 0)
595 dtemp = -.5;
596 else
597 dtemp = .5;
598 #ifdef STA_NANO
599 ntv.offset = (int32)(clock_offset * 1e9 +
600 dtemp);
601 ntv.constant = sys_poll;
602 #else /* STA_NANO */
603 ntv.offset = (int32)(clock_offset * 1e6 +
604 dtemp);
605 ntv.constant = sys_poll - 4;
606 #endif /* STA_NANO */
609 * The frequency is set directly only if
610 * clock_frequency is nonzero coming out of FREQ
611 * state.
613 if (clock_frequency != 0) {
614 ntv.modes |= MOD_FREQUENCY;
615 ntv.freq = (int32)((clock_frequency +
616 drift_comp) * 65536e6);
618 ntv.esterror = (u_int32)(clock_jitter * 1e6);
619 ntv.maxerror = (u_int32)((sys_rootdelay / 2 +
620 sys_rootdispersion) * 1e6);
621 ntv.status = STA_PLL;
624 * Set the leap bits in the status word, but
625 * only on the last day of June or December.
627 tstamp = peer->rec.l_ui - JAN_1970;
628 tm = gmtime(&tstamp);
629 if (tm != NULL) {
630 if ((tm->tm_mon + 1 == 6 &&
631 tm->tm_mday == 30) || (tm->tm_mon +
632 1 == 12 && tm->tm_mday == 31)) {
633 if (leap_next & LEAP_ADDSECOND)
634 ntv.status |= STA_INS;
635 else if (leap_next &
636 LEAP_DELSECOND)
637 ntv.status |= STA_DEL;
642 * If the PPS signal is up and enabled, light
643 * the frequency bit. If the PPS driver is
644 * working, light the phase bit as well. If not,
645 * douse the lights, since somebody else may
646 * have left the switch on.
648 if (pps_enable && pll_status & STA_PPSSIGNAL) {
649 ntv.status |= STA_PPSFREQ;
650 if (pps_stratum < STRATUM_UNSPEC)
651 ntv.status |= STA_PPSTIME;
652 } else {
653 ntv.status &= ~(STA_PPSFREQ |
654 STA_PPSTIME);
659 * Pass the stuff to the kernel. If it squeals, turn off
660 * the pig. In any case, fetch the kernel offset and
661 * frequency and pretend we did it here.
663 if (ntp_adjtime(&ntv) == TIME_ERROR) {
664 sync_status("error", ntv.status);
665 ntv.status &= ~(STA_PPSFREQ | STA_PPSTIME);
666 } else {
667 if ((ntv.status ^ pll_status) & ~STA_FLL)
668 sync_status("status change", ntv.status);
670 pll_status = ntv.status;
671 #ifdef STA_NANO
672 clock_offset = ntv.offset / 1e9;
673 #else /* STA_NANO */
674 clock_offset = ntv.offset / 1e6;
675 #endif /* STA_NANO */
676 clock_frequency = ntv.freq / 65536e6;
677 flladj = plladj = 0;
680 * If the kernel PPS is lit, monitor its performance.
682 if (ntv.status & STA_PPSTIME) {
683 pps_control = current_time;
684 #ifdef STA_NANO
685 clock_jitter = ntv.jitter / 1e9;
686 #else /* STA_NANO */
687 clock_jitter = ntv.jitter / 1e6;
688 #endif /* STA_NANO */
690 } else {
691 #endif /* KERNEL_PLL */
694 * We get here if the kernel discipline is not enabled.
695 * Adjust the clock frequency as the sum of the directly
696 * computed frequency (if measured) and the PLL and FLL
697 * increments.
699 clock_frequency = drift_comp + clock_frequency +
700 flladj + plladj;
701 #ifdef KERNEL_PLL
703 #endif /* KERNEL_PLL */
706 * Clamp the frequency within the tolerance range and calculate
707 * the frequency change since the last update.
709 if (fabs(clock_frequency) > NTP_MAXFREQ)
710 NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT)
711 msyslog(LOG_NOTICE,
712 "frequency error %.0f PPM exceeds tolerance %.0f PPM",
713 clock_frequency * 1e6, NTP_MAXFREQ * 1e6);
714 dtemp = SQUARE(clock_frequency - drift_comp);
715 if (clock_frequency > NTP_MAXFREQ)
716 drift_comp = NTP_MAXFREQ;
717 else if (clock_frequency < -NTP_MAXFREQ)
718 drift_comp = -NTP_MAXFREQ;
719 else
720 drift_comp = clock_frequency;
723 * Calculate the wander as the exponentially weighted frequency
724 * differences.
726 etemp = SQUARE(clock_stability);
727 clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG);
730 * Here we adjust the poll interval by comparing the current
731 * offset with the clock jitter. If the offset is less than the
732 * clock jitter times a constant, then the averaging interval is
733 * increased, otherwise it is decreased. A bit of hysteresis
734 * helps calm the dance. Works best using burst mode.
736 if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) {
737 tc_counter += sys_poll;
738 if (tc_counter > CLOCK_LIMIT) {
739 tc_counter = CLOCK_LIMIT;
740 if (sys_poll < peer->maxpoll) {
741 tc_counter = 0;
742 sys_poll++;
745 } else {
746 tc_counter -= sys_poll << 1;
747 if (tc_counter < -CLOCK_LIMIT) {
748 tc_counter = -CLOCK_LIMIT;
749 if (sys_poll > peer->minpoll) {
750 tc_counter = 0;
751 sys_poll--;
757 * Yibbidy, yibbbidy, yibbidy; that'h all folks.
759 record_loop_stats(clock_offset, drift_comp, clock_jitter,
760 clock_stability, sys_poll);
761 #ifdef DEBUG
762 if (debug)
763 printf(
764 "local_clock: mu %lu jitr %.6f freq %.3f stab %.6f poll %d count %d\n",
765 mu, clock_jitter, drift_comp * 1e6,
766 clock_stability * 1e6, sys_poll, tc_counter);
767 #endif /* DEBUG */
768 return (rval);
769 #endif /* LOCKCLOCK */
774 * adj_host_clock - Called once every second to update the local clock.
776 * LOCKCLOCK: The only thing this routine does is increment the
777 * sys_rootdispersion variable.
779 void
780 adj_host_clock(
781 void
784 double adjustment;
787 * Update the dispersion since the last update. In contrast to
788 * NTPv3, NTPv4 does not declare unsynchronized after one day,
789 * since the dispersion check serves this function. Also,
790 * since the poll interval can exceed one day, the old test
791 * would be counterproductive. Note we do this even with
792 * external clocks, since the clock driver will recompute the
793 * maximum error and the local clock driver will pick it up and
794 * pass to the common refclock routines. Very elegant.
796 sys_rootdispersion += clock_phi;
798 #ifndef LOCKCLOCK
800 * If clock discipline is disabled or if the kernel is enabled,
801 * get out of Dodge quick.
803 if (!ntp_enable || mode_ntpdate || (pll_control &&
804 kern_enable))
805 return;
808 * Declare PPS kernel unsync if the pps signal has not been
809 * heard for a few minutes.
811 if (pps_control && current_time - pps_control > PPS_MAXAGE) {
812 if (pps_control)
813 NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT)
814 msyslog(LOG_NOTICE, "pps sync disabled");
815 pps_control = 0;
819 * Implement the phase and frequency adjustments. The gain
820 * factor (denominator) is not allowed to increase beyond the
821 * Allan intercept. It doesn't make sense to average phase noise
822 * beyond this point and it helps to damp residual offset at the
823 * longer poll intervals.
825 adjustment = clock_offset / (CLOCK_PLL * min(ULOGTOD(sys_poll),
826 allan_xpt));
827 clock_offset -= adjustment;
828 adj_systime(adjustment + drift_comp);
829 #endif /* LOCKCLOCK */
834 * Clock state machine. Enter new state and set state variables. Note we
835 * use the time of the last clock filter sample, which may be earlier
836 * than the current time.
838 static void
839 rstclock(
840 int trans, /* new state */
841 u_long update, /* new update time */
842 double offset /* new offset */
845 #ifdef DEBUG
846 if (debug)
847 printf("local_clock: time %lu offset %.6f freq %.3f state %d\n",
848 update, offset, drift_comp * 1e6, trans);
849 #endif
850 state = trans;
851 sys_clocktime = update;
852 last_offset = clock_offset = offset;
857 * huff-n'-puff filter
859 void
860 huffpuff()
862 int i;
864 if (sys_huffpuff == NULL)
865 return;
867 sys_huffptr = (sys_huffptr + 1) % sys_hufflen;
868 sys_huffpuff[sys_huffptr] = 1e9;
869 sys_mindly = 1e9;
870 for (i = 0; i < sys_hufflen; i++) {
871 if (sys_huffpuff[i] < sys_mindly)
872 sys_mindly = sys_huffpuff[i];
878 * loop_config - configure the loop filter
880 * LOCKCLOCK: The LOOP_DRIFTINIT and LOOP_DRIFTCOMP cases are no-ops.
882 void
883 loop_config(
884 int item,
885 double freq
888 int i;
890 switch (item) {
892 case LOOP_DRIFTINIT:
894 #ifndef LOCKCLOCK
895 #ifdef KERNEL_PLL
897 * Assume the kernel supports the ntp_adjtime() syscall.
898 * If that syscall works, initialize the kernel time
899 * variables. Otherwise, continue leaving no harm
900 * behind. While at it, ask to set nanosecond mode. If
901 * the kernel agrees, rejoice; othewise, it does only
902 * microseconds.
904 if (mode_ntpdate)
905 break;
907 pll_control = 1;
908 memset(&ntv, 0, sizeof(ntv));
909 #ifdef STA_NANO
910 ntv.modes = MOD_BITS | MOD_NANO;
911 #else /* STA_NANO */
912 ntv.modes = MOD_BITS;
913 #endif /* STA_NANO */
914 ntv.maxerror = MAXDISPERSE;
915 ntv.esterror = MAXDISPERSE;
916 ntv.status = STA_UNSYNC;
917 #ifdef SIGSYS
919 * Use sigsetjmp() to save state and then call
920 * ntp_adjtime(); if it fails, then siglongjmp() is used
921 * to return control
923 newsigsys.sa_handler = pll_trap;
924 newsigsys.sa_flags = 0;
925 if (sigaction(SIGSYS, &newsigsys, &sigsys)) {
926 msyslog(LOG_ERR,
927 "sigaction() fails to save SIGSYS trap: %m");
928 pll_control = 0;
930 if (sigsetjmp(env, 1) == 0)
931 ntp_adjtime(&ntv);
932 if ((sigaction(SIGSYS, &sigsys,
933 (struct sigaction *)NULL))) {
934 msyslog(LOG_ERR,
935 "sigaction() fails to restore SIGSYS trap: %m");
936 pll_control = 0;
938 #else /* SIGSYS */
939 ntp_adjtime(&ntv);
940 #endif /* SIGSYS */
943 * Save the result status and light up an external clock
944 * if available.
946 pll_status = ntv.status;
947 if (pll_control) {
948 #ifdef STA_NANO
949 if (pll_status & STA_CLK)
950 ext_enable = 1;
951 #endif /* STA_NANO */
952 sync_status("status", pll_status);
954 #endif /* KERNEL_PLL */
955 #endif /* LOCKCLOCK */
956 break;
958 case LOOP_DRIFTCOMP:
960 #ifndef LOCKCLOCK
962 * If the frequency value is reasonable, set the initial
963 * frequency to the given value and the state to S_FSET.
964 * Otherwise, the drift file may be missing or broken,
965 * so set the frequency to zero. This erases past
966 * history should somebody break something.
968 if (freq <= NTP_MAXFREQ && freq >= -NTP_MAXFREQ) {
969 drift_comp = freq;
970 rstclock(S_FSET, 0, 0);
971 } else {
972 drift_comp = 0;
975 #ifdef KERNEL_PLL
977 * Sanity check. If the kernel is available, load the
978 * frequency and light up the loop. Make sure the offset
979 * is zero to cancel any previous nonsense. If you don't
980 * want this initialization, remove the ntp.drift file.
982 if (pll_control && kern_enable) {
983 memset((char *)&ntv, 0, sizeof(ntv));
984 ntv.modes = MOD_OFFSET | MOD_FREQUENCY;
985 ntv.freq = (int32)(drift_comp * 65536e6);
986 ntp_adjtime(&ntv);
988 #endif /* KERNEL_PLL */
989 #endif /* LOCKCLOCK */
990 break;
992 case LOOP_KERN_CLEAR:
993 #ifndef LOCKCLOCK
994 #ifdef KERNEL_PLL
995 /* Completely turn off the kernel time adjustments. */
996 if (pll_control) {
997 memset((char *)&ntv, 0, sizeof(ntv));
998 ntv.modes = MOD_BITS | MOD_OFFSET | MOD_FREQUENCY;
999 ntv.status = STA_UNSYNC;
1000 ntp_adjtime(&ntv);
1001 sync_status("disabled", ntv.status);
1003 #endif /* KERNEL_PLL */
1004 #endif /* LOCKCLOCK */
1005 break;
1008 * Special tinker variables for Ulrich Windl. Very dangerous.
1010 case LOOP_MAX: /* step threshold */
1011 clock_max = freq;
1012 break;
1014 case LOOP_PANIC: /* panic threshold */
1015 clock_panic = freq;
1016 break;
1018 case LOOP_PHI: /* dispersion rate */
1019 clock_phi = freq;
1020 break;
1022 case LOOP_MINSTEP: /* watchdog bark */
1023 clock_minstep = freq;
1024 break;
1026 case LOOP_ALLAN: /* Allan intercept */
1027 allan_xpt = freq;
1028 break;
1030 case LOOP_HUFFPUFF: /* huff-n'-puff filter length */
1031 if (freq < HUFFPUFF)
1032 freq = HUFFPUFF;
1033 sys_hufflen = (int)(freq / HUFFPUFF);
1034 sys_huffpuff = (double *)emalloc(sizeof(double) *
1035 sys_hufflen);
1036 for (i = 0; i < sys_hufflen; i++)
1037 sys_huffpuff[i] = 1e9;
1038 sys_mindly = 1e9;
1039 break;
1041 case LOOP_FREQ: /* initial frequency */
1042 drift_comp = freq / 1e6;
1043 rstclock(S_FSET, 0, 0);
1044 break;
1049 #if defined(KERNEL_PLL) && defined(SIGSYS)
1051 * _trap - trap processor for undefined syscalls
1053 * This nugget is called by the kernel when the SYS_ntp_adjtime()
1054 * syscall bombs because the silly thing has not been implemented in
1055 * the kernel. In this case the phase-lock loop is emulated by
1056 * the stock adjtime() syscall and a lot of indelicate abuse.
1058 static RETSIGTYPE
1059 pll_trap(
1060 int arg
1063 pll_control = 0;
1064 siglongjmp(env, 1);
1066 #endif /* KERNEL_PLL && SIGSYS */