option-tester: Add many changes
[ntpsec.git] / ntpd / ntp_control.c
blobbacaaa37eefe6bfc2ce8edce3f0a92ac5a6076af
1 /*
2 * ntp_control.c - respond to mode 6 control messages.
3 * Provides service to ntpq and others.
4 */
6 #include "config.h"
8 #include <stdio.h>
9 #include <ctype.h>
10 #include <sys/stat.h>
11 #include <netinet/in.h>
12 #include <arpa/inet.h>
13 #include <inttypes.h>
14 #include <stdbool.h>
16 #include <openssl/evp.h> /* provides OpenSSL digest API */
17 #include "hack-ancient-openssl.h"
19 #include "ntpd.h"
20 #include "ntp_io.h"
21 #include "ntp_refclock.h"
22 #include "ntp_control.h"
23 #include "ntp_calendar.h"
24 #include "ntp_stdlib.h"
25 #include "ntp_config.h"
26 #include "ntp_assert.h"
27 #include "ntp_leapsec.h"
28 #include "lib_strbuf.h"
29 #include "ntp_syscall.h"
30 #include "ntp_auth.h"
31 #include "nts.h"
32 #include "timespecops.h"
34 /* Run time constants */
35 # include <sys/utsname.h>
36 struct utsname utsnamebuf;
38 /* Variables that need updating each time. */
39 static leap_signature_t lsig;
40 static struct timex ntx;
42 /* Ugh. timex slots are tough. The man page says "long"
43 * But the actual implementation on Linux uses something else.
44 * On some 32 bit systems, that may not match the size of a long.
45 * The below kludge of using a special slot for each of the 5 places
46 * where that type would get used is simpler than setting up a
47 * #define for SIZEOF_TIMEX_XX that could be used to setup the
48 * correct type of the pointer in the table.
50 * See the discussion at:
51 * https://gitlab.com/NTPsec/ntpsec/-/merge_requests/1403
52 * https://lists.ntpsec.org/pipermail/devel/2024-September/010492.html
56 * Statistic counters to keep track of requests and responses.
58 static unsigned long ctltimereset; /* time stats reset */
59 static uint64_t numctlreq; /* # of requests we've received */
60 static uint64_t numctlbadpkts; /* # of bad control packets */
61 static uint64_t numctlresponses; /* # of resp packets sent with data */
62 static uint64_t numctlfrags; /* # of fragments sent */
63 static uint64_t numctlerrors; /* # of error responses sent */
64 static uint64_t numctltooshort; /* # of too short input packets */
65 static uint64_t numctlinputresp; /* # of responses on input */
66 static uint64_t numctlinputfrag; /* # of fragments on input */
67 static uint64_t numctlinputerr; /* # of input pkts with err bit set */
68 static uint64_t numctlbadoffset; /* # of input pkts with nonzero offset */
69 static uint64_t numctlbadversion; /* # of input pkts with unknown version */
70 static uint64_t numctldatatooshort; /* data too short for count */
71 static uint64_t numctlbadop; /* bad op code found in packet */
73 static int log_limit = 0; /* Avoid DDoS to log file */
75 // Refactored C?_VARLIST innards
76 ssize_t CI_VARLIST(char*, char*, const struct ctl_var*, bool*);
77 bool CF_VARLIST(const struct ctl_var*, const struct ctl_var*, const struct ctl_var*);
79 /* undefine to suppress random tags and get fixed emission order */
80 #define USE_RANDOMIZE_RESPONSES
83 * Structure to hold request procedure information
86 struct ctl_proc {
87 short control_code; /* defined request code */
88 #define NO_REQUEST (-1)
89 unsigned short flags; /* flags word */
90 /* Only one flag. Authentication required or not. */
91 #define NOAUTH 0
92 #define AUTH 1
93 void (*handler) (struct recvbuf *, int); /* handle request */
98 * Request processing routines
100 static void unmarshall_ntp_control(struct ntp_control *, struct recvbuf *);
101 static uint16_t extract_16bits_from_stream(uint8_t *);
102 static void ctl_error (uint8_t);
103 #ifdef REFCLOCK
104 static unsigned short ctlclkstatus (struct refclockstat *);
105 #endif
106 static void ctl_flushpkt (uint8_t);
107 static void ctl_putdata (const char *, unsigned int, bool);
108 static void ctl_putstr (const char *, const char *, size_t);
109 static void ctl_putdblf (const char *, bool, int, double);
110 #define ctl_putdbl(tag, d) ctl_putdblf(tag, true, 3, d)
111 #define ctl_putdbl6(tag, d) ctl_putdblf(tag, true, 6, d)
112 #define ctl_putsfp(tag, sfp) ctl_putdblf(tag, false, -1, FP_UNSCALE(sfp))
113 static void ctl_putuint (const char *, uint64_t);
114 static void ctl_puthex (const char *, uint64_t);
115 static void ctl_putint (const char *, int64_t);
116 static void ctl_putts (const char *, l_fp);
117 static void ctl_putadr (const char *, refid_t, sockaddr_u *);
118 static void ctl_putrefid (const char *, refid_t);
119 static void ctl_putarray (const char *, double *, int);
120 static void ctl_putpeer (int, struct peer *);
121 static void ctl_puttime (const char *, time_t);
122 #ifdef REFCLOCK
123 static void ctl_putclock (int, struct refclockstat *, bool);
124 #endif /* REFCLOCK */
126 /* 2023-Jan-14 Fedora c compiler barfs on function with
127 * arg of const struct foo * unless that is used as a result previously.
129 static const struct var * ctl_getitem(const struct var *, char **);
130 static void ctl_putsys (const struct var *);
131 static void ctl_putspecial (const struct var *);
132 void do_sys_var_list(const char* name, const struct var* v);
135 static const struct ctl_var *ctl_getitem2(const struct ctl_var *, char **);
136 static unsigned short ctlsysstatus (void);
137 static unsigned short count_var (const struct ctl_var *);
138 static void control_unspec (struct recvbuf *, int);
139 static void read_status (struct recvbuf *, int);
140 static void read_sysvars (void);
141 static void read_peervars (void);
142 static void read_variables (struct recvbuf *, int);
143 static void read_clockstatus(struct recvbuf *, int);
144 static void configure (struct recvbuf *, int);
145 static void send_mru_entry (mon_entry *, int);
146 #ifdef USE_RANDOMIZE_RESPONSES
147 static void send_random_tag_value(int);
148 #endif /* USE_RANDOMIZE_RESPONSES */
149 static void read_mru_list (struct recvbuf *, int);
150 static void send_ifstats_entry(endpt *, unsigned int);
151 static void read_ifstats (struct recvbuf *);
152 static void sockaddrs_from_restrict_u(sockaddr_u *, sockaddr_u *,
153 restrict_u *, int);
154 static void send_restrict_entry(restrict_u *, int, unsigned int);
155 static void send_restrict_list(restrict_u *, int, unsigned int *);
156 static void read_addr_restrictions(struct recvbuf *);
157 static void read_ordlist (struct recvbuf *, int);
158 static uint32_t derive_nonce (sockaddr_u *, uint32_t, uint32_t);
159 static void generate_nonce (struct recvbuf *, char *, size_t);
160 static int validate_nonce (const char *, struct recvbuf *);
161 static void req_nonce (struct recvbuf *, int);
163 static const struct ctl_proc control_codes[] = {
164 { CTL_OP_UNSPEC, NOAUTH, control_unspec },
165 { CTL_OP_READSTAT, NOAUTH, read_status },
166 { CTL_OP_READVAR, NOAUTH, read_variables },
167 { CTL_OP_WRITEVAR, AUTH, NULL },
168 { CTL_OP_READCLOCK, NOAUTH, read_clockstatus },
169 { CTL_OP_WRITECLOCK, NOAUTH, NULL },
170 { CTL_OP_CONFIGURE, AUTH, configure },
171 { CTL_OP_READ_MRU, NOAUTH, read_mru_list },
172 { CTL_OP_READ_ORDLIST_A, AUTH, read_ordlist },
173 { CTL_OP_REQ_NONCE, NOAUTH, req_nonce },
174 { NO_REQUEST, 0, NULL }
177 enum var_type {v_time,
178 v_str, v_dbl, v_uli, v_li, v_uint, v_int,
179 v_u64, v_i64, v_u32, v_i32, v_u8, v_i8, v_bool,
180 v_strP, v_u64P, v_u32P, v_uliP,
181 v_l_fp, v_l_fp_ms, v_l_fp_sec, v_l_fp_sec6,
182 v_u64_r, v_l_fp_sec_r,
183 v_mrumem,
184 v_since, v_kli, v_special};
185 enum var_type_special {
186 vs_peer, vs_peeradr, vs_peermode,
187 vs_systime,
188 vs_refid, vs_mruoldest, vs_varlist};
189 struct var {
190 const char* name;
191 const int flags;
192 const enum var_type type;
193 union {
194 const time_t* time;
195 const char* str;
196 const double* dbl;
197 const unsigned long int* uli;
198 const long int* li;
199 const long long* ll;
200 #ifdef NTP_TIMEX_LONG_LONG
201 const long long* timex_li;
202 #else
203 const long* timex_li;
204 #endif
205 const unsigned int* uinnt;
206 const int* innt;
207 const uint64_t* u64;
208 const int64_t* i64;
209 const uint32_t* u32;
210 const int32_t* i32;
211 const uint8_t* u8;
212 const int8_t* i8;
213 const bool* boool;
214 const l_fp* l_fp;
215 const uptime_t* up;
216 const char* (*strP)(void);
217 uint64_t (*u64P)(void);
218 uint32_t (*u32P)(void);
219 unsigned long int (*uliP)(void);
220 const enum var_type_special special;
221 } p;
222 union {
223 /* second pointer for returning recent since-stats-logged */
224 const uint64_t* u64;
225 const uint64_t* l_fp;
226 } p2;
229 #define Var_time(xname, xflags, xlocation) { \
230 .name = xname, .flags = xflags, .type = v_time, .p.time = &xlocation }
231 #define Var_str(xname, xflags, xlocation) { \
232 .name = xname, .flags = xflags, .type = v_str, .p.str = xlocation }
233 #define Var_dbl(xname, xflags, xlocation) { \
234 .name = xname, .flags = xflags, .type = v_dbl, .p.dbl = &xlocation }
235 #define Var_uli(xname, xflags, xlocation) { \
236 .name = xname, .flags = xflags, .type = v_uli, .p.uli = &xlocation }
237 #define Var_li(xname, xflags, xlocation) { \
238 .name = xname, .flags = xflags, .type = v_li, .p.li = &xlocation }
239 #define Var_uint(xname, xflags, xlocation) { \
240 .name = xname, .flags = xflags, .type = v_uint, .p.uinnt = &xlocation }
241 #define Var_int(xname, xflags, xlocation) { \
242 .name = xname, .flags = xflags, .type = v_int, .p.innt = &xlocation }
244 #define Var_strP(xname, xflags, xlocation) { \
245 .name = xname, .flags = xflags, .type = v_strP, .p.strP = xlocation }
246 #define Var_u64P(xname, xflags, xlocation) { \
247 .name = xname, .flags = xflags, .type = v_u64P, .p.u64P = xlocation }
248 #define Var_u32P(xname, xflags, xlocation) { \
249 .name = xname, .flags = xflags, .type = v_u32P, .p.u32P = xlocation }
250 #define Var_uliP(xname, xflags, xlocation) { \
251 .name = xname, .flags = xflags, .type = v_uliP, .p.uliP = xlocation }
253 #define Var_u64(xname, xflags, xlocation) { \
254 .name = xname, .flags = xflags, .type = v_u64, .p.u64 = &xlocation }
255 #define Var_u64_r(xname, xflags, xlocation) { \
256 .name = xname, .flags = xflags, .type = v_u64_r, \
257 .p.u64 = &xlocation, .p2.u64 = &(old_##xlocation) }
258 #define Var_i64(xname, xflags, xlocation) { \
259 .name = xname, .flags = xflags, .type = v_i64, .p.i64 = &xlocation }
260 #define Var_u32(xname, xflags, xlocation) { \
261 .name = xname, .flags = xflags, .type = v_u32, .p.u32 = &xlocation }
262 #define Var_i32(xname, xflags, xlocation) { \
263 .name = xname, .flags = xflags, .type = v_i32, .p.i32 = &xlocation }
264 #define Var_u8(xname, xflags, xlocation) { \
265 .name = xname, .flags = xflags, .type = v_u8, .p.u8 = &xlocation }
266 #define Var_i8(xname, xflags, xlocation) { \
267 .name = xname, .flags = xflags, .type = v_i8, .p.i8 = &xlocation }
268 #define Var_bool(xname, xflags, xlocation) { \
269 .name = xname, .flags = xflags, .type = v_bool, .p.boool = &xlocation }
271 #define Var_l_fp(xname, xflags, xlocation) { \
272 .name = xname, .flags = xflags, .type = v_l_fp, .p.l_fp = &xlocation }
273 #define Var_l_fp_ms(xname, xflags, xlocation) { \
274 .name = xname, .flags = xflags, .type = v_l_fp_ms, .p.l_fp = &xlocation }
275 #define Var_l_fp_sec(xname, xflags, xlocation) { \
276 .name = xname, .flags = xflags, .type = v_l_fp_sec, .p.l_fp = &xlocation }
277 #define Var_l_fp_r(xname, xflags, xlocation) { \
278 .name = xname, .flags = xflags, .type = v_l_fp_sec_r, \
279 .p.l_fp = &xlocation, .p2.l_fp = &(old_##xlocation) }
280 #define Var_l_fp_sec6(xname, xflags, xlocation) { \
281 .name = xname, .flags = xflags, .type = v_l_fp_sec6, .p.l_fp = &xlocation }
282 #define Var_since(xname, xflags, xlocation) { \
283 .name = xname, .flags = xflags, .type = v_since, .p.up = &xlocation }
285 #define Var_mrumem(xname, xflags, xlocation) { \
286 .name = xname, .flags = xflags, .type = v_mrumem, .p.u64 = &xlocation }
287 #define Var_kli(xname, xflags, xlocation) { \
288 .name = xname, .flags = xflags, .type = v_kli, .p.timex_li = &xlocation }
289 #define Var_special(xname, xflags, xspecial) { \
290 .name = xname, .flags = xflags, .type = v_special, .p.special = xspecial }
292 static const struct var sys_var[] = {
293 Var_u8("leap", RO|DEF, sys_vars.sys_leap), // Was RW
294 Var_u8("stratum", RO|DEF, sys_vars.sys_stratum),
295 Var_i8("precision", RO|DEF, sys_vars.sys_precision),
296 Var_dbl("rootdelay", RO|DEF|ToMS, sys_vars.sys_rootdelay),
297 Var_dbl("rootdisp", RO|DEF|ToMS, sys_vars.sys_rootdisp),
298 Var_dbl("rootdist", RO|DEF|ToMS, sys_vars.sys_rootdist),
299 Var_special("refid", RO|DEF, vs_refid),
300 Var_l_fp("reftime", RO|DEF, sys_vars.sys_reftime),
301 Var_u8("tc", RO|DEF, clkstate.sys_poll),
302 Var_special("peer", RO|DEF, vs_peer),
303 Var_dbl("offset", RO|DEF|ToMS|DBL6, clkstate.last_offset),
304 Var_dbl("frequency", RO|DEF|ToPPM, loop_data.drift_comp),
306 Var_dbl("sys_jitter", RO|DEF|ToMS|DBL6, clkstate.sys_jitter),
307 Var_dbl("clk_jitter", RO|DEF|ToMS|DBL6, clkstate.clock_jitter),
308 Var_special("clock", RO|DEF, vs_systime),
309 Var_str("processor", RO|DEF, utsnamebuf.machine),
310 Var_str("system", RO|DEF, utsnamebuf.sysname),
311 // old code appended release to system
312 Var_str("release", RO|DEF, utsnamebuf.release),
313 Var_strP("version", RO|DEF, ntpd_version),
315 Var_dbl("clk_wander", RO|DEF|ToPPM|DBL6, loop_data.clock_stability),
316 Var_special("sys_var_list", RO|DEF, vs_varlist),
317 Var_uint("tai", RO|DEF, sys_tai),
318 Var_time("leapsec", RO|DEF|N_LEAP, lsig.ttime),
319 Var_time("expire", RO|DEF|N_LEAP, lsig.etime),
320 Var_u8("mintc", RO|DEF, rstrct.ntp_minpoll),
322 Var_uint("mru_enabled", RO, mon_data.mon_enabled),
323 Var_u64("mru_hashslots", RO, mon_data.mru_hashslots),
324 Var_u64("mru_depth", RO, mon_data.mru_entries),
325 Var_u64("mru_deepest", RO, mon_data.mru_peakentries),
326 Var_u64("mru_mindepth", RO, mon_data.mru_mindepth),
327 Var_int("mru_maxage", RO, mon_data.mru_maxage),
328 Var_int("mru_minage", RO, mon_data.mru_minage),
329 Var_u64("mru_maxdepth", RO, mon_data.mru_maxdepth),
330 Var_mrumem("mru_mem", RO, mon_data.mru_entries),
331 Var_mrumem("mru_maxmem", RO, mon_data.mru_maxdepth),
332 Var_u64("mru_exists", RO, mon_data.mru_exists),
333 Var_u64("mru_new", RO, mon_data.mru_new),
334 Var_u64("mru_recycleold", RO, mon_data.mru_recycleold),
335 Var_u64("mru_recyclefull", RO, mon_data.mru_recyclefull),
336 Var_u64("mru_none", RO, mon_data.mru_none),
337 Var_special("mru_oldest_age", RO, vs_mruoldest),
339 #define Var_Pair(name, location) \
340 Var_u64P(name, RO, stat_##location), \
341 Var_u64P(name "_r", RO, stat_total_##location)
343 Var_u32("ss_uptime", RO, current_time),
344 Var_u32P("ss_reset", RO, stat_stattime),
345 Var_u32P("ss_reset_r", RO, stat_total_stattime),
346 Var_Pair("ss_received", received),
347 Var_Pair("ss_thisver", newversion),
348 Var_Pair("ss_oldver", oldversion),
349 Var_Pair("ss_ver1", version1),
350 Var_Pair("ss_ver1client", version1client),
351 Var_Pair("ss_ver1zero", version1zero),
352 Var_Pair("ss_ver1symm", version1symm),
353 Var_Pair("ss_badformat", badlength),
354 Var_Pair("ss_badauth", badauth),
355 Var_Pair("ss_declined", declined),
356 Var_Pair("ss_restricted", restricted),
357 Var_Pair("ss_limited", limitrejected),
358 Var_Pair("ss_kodsent", kodsent),
359 Var_Pair("ss_processed", processed),
360 #undef Var_Pair
362 /* We own this one. See above. No proc mode.
363 * Note that lots of others are not (yet?) in this table. */
364 Var_u64("ss_numctlreq", RO, numctlreq),
366 Var_special("peeradr", RO, vs_peeradr),
367 Var_special("peermode", RO, vs_peermode),
369 /* authinfo: Shared Key Authentication */
370 Var_since("authreset", RO, auth_timereset),
371 Var_l_fp_ms("authdelay", RO, sys_authdelay),
372 Var_uint("authkeys", RO, authnumkeys),
373 Var_uint("authfreek", RO, authnumfreekeys),
374 Var_uli("authklookups", RO, authkeylookups),
375 Var_uli("authknotfound", RO, authkeynotfound),
376 Var_uli("authencrypts", RO, authencryptions),
377 Var_uli("authdigestencrypts", RO, authdigestencrypt),
378 Var_uli("authcmacencrypts", RO, authcmacencrypt),
379 Var_uli("authdecrypts", RO, authdecryptions),
380 Var_uli("authdigestdecrypts", RO, authdigestdecrypt),
381 Var_uli("authdigestfails", RO, authdigestfail),
382 Var_uli("authcmacdecrypts", RO, authcmacdecrypt),
383 Var_uli("authcmacfails", RO, authcmacfail),
385 /* kerninfo: Kernel timekeeping info */
386 Var_kli("koffset", RO|N_CLOCK|KNUToMS, ntx.offset),
387 Var_kli("kfreq", RO|N_CLOCK|K_16, ntx.freq),
388 Var_kli("kmaxerr", RO|N_CLOCK|KUToMS, ntx.maxerror),
389 Var_kli("kesterr", RO|N_CLOCK|KUToMS, ntx.esterror),
390 Var_int("kstflags", RO|N_CLOCK, ntx.status), // turn to text
391 Var_kli("ktimeconst", RO|N_CLOCK, ntx.constant),
392 Var_kli("kprecis", RO|N_CLOCK|KUToMS, ntx.precision),
393 Var_kli("kfreqtol", RO|N_CLOCK|K_16, ntx.tolerance), // Not in man page
394 Var_kli("kppsfreq", RO|N_CLOCK|K_16, ntx.ppsfreq),
395 Var_kli("kppsjitter", RO|N_CLOCK|KNUToMS, ntx.jitter),
396 Var_int("kppscalibdur", RO|N_CLOCK, ntx.shift), // 1<<shift
397 Var_kli("kppsstab", RO|N_CLOCK|K_16, ntx.stabil),
398 Var_kli("kppsjitexc", RO|N_CLOCK, ntx.jitcnt),
399 Var_kli("kppscalibs", RO|N_CLOCK, ntx.calcnt),
400 Var_kli("kppscaliberrs", RO|N_CLOCK, ntx.errcnt),
401 Var_kli("kppsstbexc", RO|N_CLOCK, ntx.stbcnt),
404 /* refclock stuff in ntp_io */
405 Var_since("iostats_reset", RO, io_timereset),
406 Var_u64P("io_dropped", RO, dropped_count),
407 Var_u64P("io_ignored", RO, ignored_count),
408 Var_u64P("io_received", RO, received_count),
409 Var_u64P("io_sent", RO, sent_count),
410 Var_u64P("io_sendfailed", RO, notsent_count),
411 Var_u64P("io_wakeups", RO, handler_calls_count),
412 Var_u64P("io_pkt_reads", RO, handler_pkts_count),
413 #ifdef REFCLOCK
414 Var_u64P("io_ref_reads", RO, handler_refrds_count),
415 #endif
417 // receive buffers
418 Var_uliP("total_rbuf", RO, total_recvbuffs),
419 Var_uliP("free_rbuf", RO, free_recvbuffs),
420 Var_uliP("used_rbuf", RO, lowater_additions),
422 Var_since("timerstats_reset", RO, timer_timereset),
423 Var_uli("timer_overruns", RO, alarm_overflow),
424 Var_uli("timer_xmts", RO, timer_xmtcalls),
426 Var_uli("clk_wander_threshold", RO|ToPPM, timer_xmtcalls),
428 #ifdef ENABLE_LEAP_SMEAR
429 /* Old code returned nothing if leap.smear_intv was 0 */
430 Var_uint("leapsmearinterval", RO, leap_smear_intv),
431 Var_dbl("leapsmearoffset", RO|ToMS, leap_smear.doffset),
432 #endif
434 Var_bool("lockclock", RO, loop_data.lockclock),
436 #ifndef DISABLE_NTS
437 #define Var_Pair(name, location) \
438 Var_u64(name, RO, location), \
439 Var_u64_r(name "_r", RO, location)
440 #define Var_PairF(name, location) \
441 Var_l_fp_sec(name, RO, location), \
442 Var_l_fp_r(name "_r", RO, location)
443 /* ntsinfo: NTS statistics */
444 Var_Pair("nts_client_send", nts_cnt.client_send),
445 Var_Pair("nts_client_recv_good", nts_cnt.client_recv_good),
446 Var_Pair("nts_client_recv_bad", nts_cnt.client_recv_bad),
447 Var_Pair("nts_server_send", nts_cnt.server_send),
448 Var_Pair("nts_server_recv_good", nts_cnt.server_recv_good),
449 Var_Pair("nts_server_recv_bad", nts_cnt.server_recv_bad),
450 Var_Pair("nts_cookie_make", nts_cnt.cookie_make),
451 Var_Pair("nts_cookie_not_server", nts_cnt.cookie_not_server),
452 Var_Pair("nts_cookie_decode_total", nts_cnt.cookie_decode_total),
453 Var_Pair("nts_cookie_decode_current", nts_cnt.cookie_decode_current),
454 /* Following line is a hack for old versions of ntpq
455 * nts_cookie_decode is old name for nts_cookie_decode_current */
456 Var_Pair("nts_cookie_decode", nts_cnt.cookie_decode_current),
457 Var_Pair("nts_cookie_decode_old", nts_cnt.cookie_decode_old),
458 Var_Pair("nts_cookie_decode_old2", nts_cnt.cookie_decode_old2),
459 Var_Pair("nts_cookie_decode_older", nts_cnt.cookie_decode_older),
460 Var_Pair("nts_cookie_decode_too_old", nts_cnt.cookie_decode_too_old),
461 Var_Pair("nts_cookie_decode_error", nts_cnt.cookie_decode_error),
462 Var_Pair("nts_ke_serves_good", ntske_cnt.serves_good),
463 Var_PairF("nts_ke_serves_good_wall", ntske_cnt.serves_good_wall),
464 Var_PairF("nts_ke_serves_good_cpu", ntske_cnt.serves_good_cpu),
465 Var_Pair("nts_ke_serves_nossl", ntske_cnt.serves_nossl),
466 Var_PairF("nts_ke_serves_nossl_wall", ntske_cnt.serves_nossl_wall),
467 Var_PairF("nts_ke_serves_nossl_cpu", ntske_cnt.serves_nossl_cpu),
468 Var_Pair("nts_ke_serves_bad", ntske_cnt.serves_bad),
469 Var_PairF("nts_ke_serves_bad_wall", ntske_cnt.serves_bad_wall),
470 Var_PairF("nts_ke_serves_bad_cpu", ntske_cnt.serves_bad_cpu),
471 Var_Pair("nts_ke_probes_good", ntske_cnt.probes_good),
472 Var_Pair("nts_ke_probes_bad", ntske_cnt.probes_bad),
473 #undef Var_Pair
474 #undef Var_PairF
475 #endif
477 #ifdef ENABLE_MSSNTP
478 #define Var_Pair(name, location) \
479 Var_u64(name, RO, location), \
480 Var_u64_r(name "_r", RO, location)
481 #define Var_PairF(name, location) \
482 Var_l_fp_sec(name, RO, location), \
483 Var_l_fp_r(name "_r", RO, location)
484 Var_Pair("mssntp_serves", mssntp_cnt.serves),
485 Var_Pair("mssntp_serves_no", mssntp_cnt.serves_no),
486 Var_Pair("mssntp_serves_err", mssntp_cnt.serves_err),
487 Var_Pair("mssntp_serves_good", mssntp_cnt.serves_good),
488 Var_PairF("mssntp_serves_good_wall", mssntp_cnt.serves_good_wall),
489 Var_l_fp_sec6("mssntp_serves_good_slowest", RO,
490 mssntp_cnt.serves_good_slowest),
491 Var_l_fp_sec6("mssntp_serves_good_slowest_r", RO,
492 old_mssntp_cnt.serves_good_slowest),
493 Var_Pair("mssntp_serves_bad", mssntp_cnt.serves_bad),
494 Var_PairF("mssntp_serves_bad_wall", mssntp_cnt.serves_bad_wall),
495 Var_l_fp_sec6("mssntp_serves_bad_slowest", RO,
496 mssntp_cnt.serves_bad_slowest),
497 Var_l_fp_sec6("mssntp_serves_bad_slowest_r", RO,
498 old_mssntp_cnt.serves_bad_slowest),
499 #undef Var_Pair
500 #undef Var_PairF
501 #endif
504 { .flags=EOV } // end marker for scans
510 static struct ctl_var *ext_sys_var = NULL;
513 * Peer variable list. Not order-sensitive.
515 static const struct ctl_var peer_var2[] = {
516 { 0, PADDING, "" },
517 #define CP_CONFIG 1
518 { CP_CONFIG, RO, "config" },
519 #define CP_AUTHENABLE 2
520 { CP_AUTHENABLE, RO, "authenable" },
521 #define CP_AUTHENTIC 3
522 { CP_AUTHENTIC, RO, "authentic" },
523 #define CP_SRCADR 4
524 { CP_SRCADR, RO|DEF , "srcadr" },
525 #define CP_SRCPORT 5
526 { CP_SRCPORT, RO|DEF, "srcport" },
527 #define CP_DSTADR 6
528 { CP_DSTADR, RO|DEF, "dstadr" },
529 #define CP_DSTPORT 7
530 { CP_DSTPORT, RO|DEF, "dstport" },
531 #define CP_LEAP 8
532 { CP_LEAP, RO|DEF, "leap" },
533 #define CP_HMODE 9
534 { CP_HMODE, RO|DEF, "hmode" },
535 #define CP_STRATUM 10
536 { CP_STRATUM, RO|DEF, "stratum" },
537 #define CP_PPOLL 11
538 { CP_PPOLL, RO|DEF, "ppoll" },
539 #define CP_HPOLL 12
540 { CP_HPOLL, RO|DEF, "hpoll" },
541 #define CP_PRECISION 13
542 { CP_PRECISION, RO|DEF, "precision" },
543 #define CP_ROOTDELAY 14
544 { CP_ROOTDELAY, RO|DEF, "rootdelay" },
545 #define CP_ROOTDISPERSION 15
546 { CP_ROOTDISPERSION, RO|DEF, "rootdisp" },
547 #define CP_REFID 16
548 { CP_REFID, RO|DEF, "refid" },
549 #define CP_REFTIME 17
550 { CP_REFTIME, RO|DEF, "reftime" },
551 /* Placeholder. Reporting of "org" is disabled because
552 leaking it creates a vulnerability */
553 #define CP_ORG 18
554 { CP_ORG, RO, "org" },
555 #define CP_REC 19
556 { CP_REC, RO|DEF, "rec" },
557 #define CP_XMT 20
558 { CP_XMT, RO|DEF, "xmt" },
559 #define CP_REACH 21
560 { CP_REACH, RO|DEF, "reach" },
561 #define CP_UNREACH 22
562 { CP_UNREACH, RO|DEF, "unreach" },
563 #define CP_TIMER 23
564 { CP_TIMER, RO, "timer" },
565 #define CP_DELAY 24
566 { CP_DELAY, RO|DEF, "delay" },
567 #define CP_OFFSET 25
568 { CP_OFFSET, RO|DEF, "offset" },
569 #define CP_JITTER 26
570 { CP_JITTER, RO|DEF, "jitter" },
571 #define CP_DISPERSION 27
572 { CP_DISPERSION, RO|DEF, "dispersion" },
573 #define CP_KEYID 28
574 { CP_KEYID, RO|DEF, "keyid" },
575 #define CP_FILTDELAY 29
576 { CP_FILTDELAY, RO|DEF, "filtdelay" },
577 #define CP_FILTOFFSET 30
578 { CP_FILTOFFSET, RO|DEF, "filtoffset" },
579 #define CP_PMODE 31
580 { CP_PMODE, RO|DEF, "pmode" },
581 #define CP_RECEIVED 32
582 { CP_RECEIVED, RO, "received"},
583 #define CP_SENT 33
584 { CP_SENT, RO, "sent" },
585 #define CP_FILTERROR 34
586 { CP_FILTERROR, RO|DEF, "filtdisp" },
587 #define CP_FLASH 35
588 { CP_FLASH, RO|DEF, "flash" },
589 #define CP_MODE 36
590 { CP_MODE, RO|DEF, "mode" },
591 #define CP_VARLIST 37
592 { CP_VARLIST, RO, "peer_var_list" },
593 #define CP_RATE 38
594 { CP_RATE, RO|DEF, "headway" },
595 #define CP_BIAS 39
596 { CP_BIAS, RO|DEF, "bias" },
597 #define CP_SRCHOST 40
598 { CP_SRCHOST, RO|DEF, "srchost" },
599 #define CP_TIMEREC 41
600 { CP_TIMEREC, RO, "timerec" },
601 #define CP_TIMEREACH 42
602 { CP_TIMEREACH, RO, "timereach" },
603 #define CP_BADAUTH 43
604 { CP_BADAUTH, RO, "badauth" },
605 #define CP_BOGUSORG 44
606 { CP_BOGUSORG, RO, "bogusorg" },
607 #define CP_OLDPKT 45
608 { CP_OLDPKT, RO, "oldpkt" },
609 #define CP_SELDISP 46
610 { CP_SELDISP, RO, "seldisp" },
611 #define CP_SELBROKEN 47
612 { CP_SELBROKEN, RO, "selbroken" },
613 #define CP_CANDIDATE 48
614 { CP_CANDIDATE, RO, "candidate" },
615 /* new in NTPsec */
616 #define CP_NTSCOOKIES 49
617 { CP_NTSCOOKIES, RO|DEF, "ntscookies" },
618 #define CP_MAXCODE ((sizeof(peer_var2)/sizeof(peer_var2[0])) - 1)
619 { 0, EOV, "" }
622 #ifdef REFCLOCK
624 * Clock variable list. Not order-sensitive.
627 static const struct ctl_var clock_var2[] = {
628 { 0, PADDING, "" },
629 #define CC_NAME 1
630 { CC_NAME, RO|DEF, "name" },
631 #define CC_TIMECODE 2
632 { CC_TIMECODE, RO|DEF, "timecode" },
633 #define CC_POLL 3
634 { CC_POLL, RO|DEF, "poll" },
635 #define CC_NOREPLY 4
636 { CC_NOREPLY, RO|DEF, "noreply" },
637 #define CC_BADFORMAT 5
638 { CC_BADFORMAT, RO|DEF, "badformat" },
639 #define CC_BADDATA 6
640 { CC_BADDATA, RO|DEF, "baddata" },
641 #define CC_FUDGETIME1 7
642 { CC_FUDGETIME1, RO|DEF, "fudgetime1" },
643 #define CC_FUDGETIME2 8
644 { CC_FUDGETIME2, RO|DEF, "fudgetime2" },
645 #define CC_FUDGEVAL1 9
646 { CC_FUDGEVAL1, RO|DEF, "stratum" },
647 #define CC_FUDGEVAL2 10
648 { CC_FUDGEVAL2, RO|DEF, "refid" },
649 #define CC_FLAGS 11
650 { CC_FLAGS, RO|DEF, "flags" },
651 #define CC_DEVICE 12
652 { CC_DEVICE, RO|DEF, "device" },
653 #define CC_VARLIST 13
654 { CC_VARLIST, RO, "clock_var_list"},
655 #define CC_MAXCODE CC_VARLIST
656 { 0, EOV, "" }
658 #endif
661 * MRU string constants shared by send_mru_entry() and read_mru_list().
663 static const char addr_fmt[] = "addr.%d";
664 static const char last_fmt[] = "last.%d";
667 * Keyid used for authenticating write requests.
669 keyid_t ctl_auth_keyid;
672 * We keep track of the last error reported by the system internally
674 static uint8_t ctl_sys_last_event;
675 static uint8_t ctl_sys_num_events;
680 * Response packet used by these routines. Also some state information
681 * so that we can handle packet formatting within a common set of
682 * subroutines. Note we try to enter data in place whenever possible,
683 * but the need to set the more bit correctly means we occasionally
684 * use the extra buffer and copy.
686 static struct ntp_control rpkt;
687 static uint8_t res_version;
688 static uint8_t res_opcode;
689 static associd_t res_associd;
690 static unsigned short res_frags; /* datagrams in this response */
691 static int res_offset; /* offset of payload in response */
692 static uint8_t * datapt;
693 static int datalinelen;
694 static bool datasent; /* flag to avoid initial ", " */
695 static bool datanotbinflag;
696 static sockaddr_u *rmt_addr;
697 static endpt *lcl_inter;
699 static auth_info* res_auth; /* !NULL => authenticate */
701 #define MAXDATALINELEN (72)
704 * Pointers for saving state when decoding request packets
706 static char *reqpt;
707 static char *reqend;
710 * init_control - initialize request data
712 void
713 init_control(void) {
714 uname(&utsnamebuf);
716 ctl_clr_stats();
718 ctl_auth_keyid = 0;
719 /* these may be unused with the old trap facility gone */
720 ctl_sys_last_event = EVNT_UNSPEC;
721 ctl_sys_num_events = 0;
723 #ifdef ENABLE_CLASSIC_MODE
724 /* a relic from when there were multiple nonstandard ways to set time */
725 #define PRESET "settimeofday=\"clock_settime\""
726 set_sys_var(PRESET, sizeof(PRESET), RO);
727 #undef PRESET
728 #endif /* ENABLE_CLASSIC_MODE */
733 * unmarshall_ntp_control - unmarshall data stream into a ntp_sontrol struct
735 void
736 unmarshall_ntp_control(struct ntp_control *pkt, struct recvbuf *rbufp) {
737 pkt->li_vn_mode = (uint8_t)rbufp->recv_buffer[0];
738 pkt->r_m_e_op = (uint8_t)rbufp->recv_buffer[1];
739 pkt->sequence = extract_16bits_from_stream(&rbufp->recv_buffer[2]);
740 pkt->status = extract_16bits_from_stream(&rbufp->recv_buffer[4]);
741 pkt->associd = extract_16bits_from_stream(&rbufp->recv_buffer[6]);
742 pkt->offset = extract_16bits_from_stream(&rbufp->recv_buffer[8]);
743 pkt->count = extract_16bits_from_stream(&rbufp->recv_buffer[10]);
744 memcpy(&pkt->data, rbufp->recv_buffer + 12, 480 + MAX_MAC_LEN);
747 uint16_t
748 extract_16bits_from_stream(uint8_t *addr) {
749 uint16_t var = 0;
750 var = (uint16_t)*addr << 8;
751 var |= (uint16_t)*(addr + 1);
752 var = ntohs(var);
753 return var;
757 * ctl_error - send an error response for the current request
759 static void
760 ctl_error(
761 uint8_t errcode
764 int maclen;
766 numctlerrors++;
767 DPRINT(3, ("sending control error %u\n", errcode));
770 * Fill in the fields. We assume rpkt.sequence and rpkt.associd
771 * have already been filled in.
773 rpkt.r_m_e_op = (uint8_t)CTL_RESPONSE | CTL_ERROR |
774 (res_opcode & CTL_OP_MASK);
775 rpkt.status = htons((unsigned short)(errcode & 0xff) << 8);
776 rpkt.count = 0;
779 * send packet and bump counters
781 if (NULL != res_auth) {
782 maclen = authencrypt(res_auth, (uint32_t *)&rpkt,
783 CTL_HEADER_LEN);
784 sendpkt(rmt_addr, lcl_inter, &rpkt,
785 (int)CTL_HEADER_LEN + maclen);
786 } else
787 sendpkt(rmt_addr, lcl_inter, &rpkt, CTL_HEADER_LEN);
791 * process_control - process an incoming control message
793 void
794 process_control(
795 struct recvbuf *rbufp,
796 int restrict_mask
799 struct ntp_control *pkt;
800 struct ntp_control pkt_core;
801 int req_count;
802 int req_data;
803 const struct ctl_proc *cc;
804 keyid_t *pkid;
805 int properlen;
806 size_t maclen;
808 DPRINT(3, ("in process_control()\n"));
811 * Save the addresses for error responses
813 numctlreq++;
814 rmt_addr = &rbufp->recv_srcadr;
815 lcl_inter = rbufp->dstadr;
816 unmarshall_ntp_control(&pkt_core, rbufp);
817 pkt = &pkt_core;
820 * If the length is less than required for the header, or
821 * it is a response or a fragment, ignore this.
823 if (rbufp->recv_length < (int)CTL_HEADER_LEN
824 || (CTL_RESPONSE | CTL_MORE | CTL_ERROR) & pkt->r_m_e_op
825 || pkt->offset != 0) {
826 DPRINT(1, ("invalid format in control packet\n"));
827 if (rbufp->recv_length < (int)CTL_HEADER_LEN)
828 numctltooshort++;
829 if (CTL_RESPONSE & pkt->r_m_e_op)
830 numctlinputresp++;
831 if (CTL_MORE & pkt->r_m_e_op)
832 numctlinputfrag++;
833 if (CTL_ERROR & pkt->r_m_e_op)
834 numctlinputerr++;
835 if (pkt->offset != 0)
836 numctlbadoffset++;
837 return;
839 res_version = PKT_VERSION(pkt->li_vn_mode);
840 if (res_version > NTP_VERSION || res_version < NTP_OLDVERSION) {
841 DPRINT(1, ("unknown version %d in control packet\n",
842 res_version));
843 numctlbadversion++;
844 return;
848 * Pull enough data from the packet to make intelligent
849 * responses
851 rpkt.li_vn_mode = PKT_LI_VN_MODE(sys_vars.sys_leap, res_version,
852 MODE_CONTROL);
853 res_opcode = pkt->r_m_e_op;
854 rpkt.sequence = pkt->sequence;
855 rpkt.associd = pkt->associd;
856 rpkt.status = 0;
857 res_frags = 1;
858 res_offset = 0;
859 res_associd = ntohs(pkt->associd);
860 res_auth = NULL;
861 req_count = (int)ntohs(pkt->count);
862 datanotbinflag = false;
863 datalinelen = 0;
864 datasent = false;
865 datapt = rpkt.data;
867 if ((rbufp->recv_length & 0x3) != 0)
868 DPRINT(3, ("Control packet length %zu unrounded\n",
869 rbufp->recv_length));
872 * We're set up now. Make sure we've got at least enough
873 * incoming data space to match the count.
875 req_data = (int)rbufp->recv_length - (int)CTL_HEADER_LEN;
876 if (req_data < req_count || rbufp->recv_length & 0x3) {
877 ctl_error(CERR_BADFMT);
878 numctldatatooshort++;
879 return;
882 if (CTL_MAX_DATA_LEN < req_count) {
883 /* count too big */
884 ctl_error(CERR_BADFMT);
885 numctlbadpkts++;
886 return;
889 properlen = req_count + (int)CTL_HEADER_LEN;
890 /* round up proper len to a 4 octet boundary */
892 properlen = (properlen + 3) & ~3;
893 maclen = rbufp->recv_length - (size_t)properlen;
894 if ((rbufp->recv_length & 3) == 0 &&
895 maclen >= MIN_MAC_LEN && maclen <= MAX_MAC_LEN) {
896 keyid_t keyid;
897 pkid = (void *)((char *)pkt + properlen);
898 keyid = ntohl(*pkid);
899 DPRINT(3, ("recv_len %zu, properlen %d, wants auth with keyid %08x, MAC length=%zu\n",
900 rbufp->recv_length, properlen, keyid,
901 maclen));
903 res_auth = authlookup(keyid, true); // FIXME
904 if (NULL == res_auth)
905 DPRINT(3, ("invalid keyid %08x\n", keyid));
906 else if (authdecrypt(res_auth, (uint32_t *)pkt,
907 (int)rbufp->recv_length - (int)maclen,
908 (int)maclen)) {
909 DPRINT(3, ("authenticated okay\n"));
910 } else {
911 res_auth = NULL;
912 DPRINT(3, ("authentication failed\n"));
917 * Set up translate pointers
919 reqpt = (char *)pkt->data;
920 #ifndef __COVERITY__
921 if (CTL_MAX_DATA_LEN < req_count) {
922 /* count too big - backstop to prevent stack overflow*/
923 /* coverity[deadcode] */
924 ctl_error(CERR_BADFMT);
925 numctlbadpkts++;
926 return;
928 #endif /* __COVERITY__ */
929 reqend = reqpt + req_count;
932 * Look for the opcode processor
934 for (cc = control_codes; cc->control_code != NO_REQUEST; cc++) {
935 if (cc->control_code == res_opcode) {
936 DPRINT(3, ("opcode %d, found command handler\n",
937 res_opcode));
938 if (NULL == cc->handler) {
939 ctl_error(CERR_BADOP); // Not Implemented
940 return;
942 if (cc->flags == AUTH
943 && (NULL == res_auth
944 || res_auth->keyid != ctl_auth_keyid)) {
945 ctl_error(CERR_PERMISSION);
946 return;
948 (cc->handler)(rbufp, restrict_mask);
949 return;
954 * Can't find this one, return an error.
956 numctlbadop++;
957 ctl_error(CERR_BADOP);
958 return;
963 * ctlpeerstatus - return a status word for this peer
965 unsigned short
966 ctlpeerstatus(
967 struct peer *p
970 unsigned short status;
972 status = p->status;
973 if (FLAG_CONFIG & p->cfg.flags)
974 status |= CTL_PST_CONFIG;
975 if ((p->cfg.peerkey) || (FLAG_NTS & p->cfg.flags))
976 status |= CTL_PST_AUTHENABLE;
977 if (FLAG_AUTHENTIC & p->cfg.flags)
978 status |= CTL_PST_AUTHENTIC;
979 if (p->reach)
980 status |= CTL_PST_REACH;
982 return CTL_PEER_STATUS(status, p->num_events, p->last_event);
987 * ctlclkstatus - return a status word for this clock
989 #ifdef REFCLOCK
990 static unsigned short
991 ctlclkstatus(
992 struct refclockstat *pcs
995 return CTL_PEER_STATUS(0, pcs->lastevent, pcs->currentstatus);
997 #endif
1001 * ctlsysstatus - return the system status word
1003 static unsigned short
1004 ctlsysstatus(void)
1006 uint8_t this_clock;
1008 this_clock = CTL_SST_TS_UNSPEC;
1009 #ifdef REFCLOCK
1010 if (sys_vars.sys_peer != NULL) {
1011 if (CTL_SST_TS_UNSPEC != sys_vars.sys_peer->sstclktype)
1012 this_clock = sys_vars.sys_peer->sstclktype;
1014 #else /* REFCLOCK */
1015 if (sys_vars.sys_peer != 0)
1016 this_clock = CTL_SST_TS_NTP;
1017 #endif /* REFCLOCK */
1018 return CTL_SYS_STATUS(sys_vars.sys_leap, this_clock, ctl_sys_num_events,
1019 ctl_sys_last_event);
1024 * ctl_flushpkt - write out the current packet and prepare
1025 * another if necessary.
1027 static void
1028 ctl_flushpkt(
1029 uint8_t more
1032 int dlen;
1033 int sendlen;
1034 int maclen;
1035 int totlen;
1037 dlen = datapt - rpkt.data;
1038 if (!more && datanotbinflag && dlen + 2 < CTL_MAX_DATA_LEN) {
1040 * Big hack, output a trailing \r\n
1042 *datapt++ = '\r';
1043 *datapt++ = '\n';
1044 dlen += 2;
1046 sendlen = dlen + (int)CTL_HEADER_LEN;
1049 * Zero-fill the unused part of the packet. This wasn't needed
1050 * when the clients were all in C, for which the first NUL is
1051 * a string terminator. But Python allows NULs in strings,
1052 * which means Python mode 6 clients might actually see the trailing
1053 * garbage.
1055 memset(rpkt.data + sendlen, '\0', sizeof(rpkt.data) - (size_t)sendlen);
1058 * Pad to a multiple of 32 bits
1060 while (sendlen & 0x3) {
1061 sendlen++;
1065 * Fill in the packet with the current info
1067 rpkt.r_m_e_op = CTL_RESPONSE | more |
1068 (res_opcode & CTL_OP_MASK);
1069 rpkt.count = htons((unsigned short)dlen);
1070 rpkt.offset = htons((unsigned short)res_offset);
1071 if (NULL != res_auth) {
1072 keyid_t keyid;
1073 totlen = sendlen;
1075 * If we are going to authenticate, then there
1076 * is an additional requirement that the MAC
1077 * begin on a 64 bit boundary.
1079 while (totlen & 7) {
1080 totlen++;
1082 keyid = htonl(res_auth->keyid);
1083 memcpy(datapt, &keyid, sizeof(keyid));
1084 maclen = authencrypt(res_auth,
1085 (uint32_t *)&rpkt, totlen);
1086 sendpkt(rmt_addr, lcl_inter, &rpkt, totlen + maclen);
1087 } else {
1088 sendpkt(rmt_addr, lcl_inter, &rpkt, sendlen);
1090 if (more) {
1091 numctlfrags++;
1092 } else {
1093 numctlresponses++;
1097 * Set us up for another go around.
1099 res_frags++;
1100 res_offset += dlen;
1101 datapt = rpkt.data;
1106 * ctl_putdata - write data into the packet, fragmenting and starting
1107 * another if this one is full.
1109 static void
1110 ctl_putdata(
1111 const char *dp,
1112 unsigned int dlen,
1113 bool bin /* set to true when data is binary */
1116 unsigned int overhead;
1117 unsigned int currentlen;
1118 const uint8_t * dataend = &rpkt.data[CTL_MAX_DATA_LEN];
1120 overhead = 0;
1121 if (!bin) {
1122 datanotbinflag = true;
1123 overhead = 3;
1124 if (datasent) {
1125 *datapt++ = ',';
1126 datalinelen++;
1127 if ((dlen + (unsigned int)datalinelen + 1) >= MAXDATALINELEN) {
1128 *datapt++ = '\r';
1129 *datapt++ = '\n';
1130 datalinelen = 0;
1131 } else {
1132 *datapt++ = ' ';
1133 datalinelen++;
1139 * Save room for trailing junk
1141 while (dlen + overhead + datapt > dataend) {
1143 * Not enough room in this one, flush it out.
1145 currentlen = (unsigned int)(dataend - datapt);
1146 if (dlen < currentlen) currentlen = dlen;
1148 memcpy(datapt, dp, currentlen);
1150 datapt += currentlen;
1151 dp += currentlen;
1152 dlen -= currentlen;
1153 datalinelen += (int)currentlen;
1155 ctl_flushpkt(CTL_MORE);
1158 memcpy(datapt, dp, dlen);
1159 datapt += dlen;
1160 datalinelen += (int)dlen;
1161 datasent = true;
1166 * ctl_putstr - write a tagged string into the response packet
1167 * in the form:
1169 * tag="data"
1171 * len is the data length excluding the NUL terminator,
1172 * as in ctl_putstr("var", "value", strlen("value"));
1173 * The write will be truncated if data contains a NUL,
1174 * so don't do that.
1176 static void
1177 ctl_putstr(
1178 const char * tag,
1179 const char * data,
1180 size_t len
1183 char buffer[512];
1185 strlcpy(buffer, tag, sizeof(buffer));
1186 strlcat(buffer, "=\"", sizeof(buffer));
1187 if (0 < len) {
1188 strlcat(buffer, data, sizeof(buffer));
1190 strlcat(buffer, "\"", sizeof(buffer));
1192 ctl_putdata(buffer, strlen(buffer), false);
1197 * ctl_putunqstr - write a tagged string into the response packet
1198 * in the form:
1200 * tag=data
1202 * len is the data length excluding the NUL terminator.
1203 * data must not contain a comma or whitespace.
1205 static void
1206 ctl_putunqstr(
1207 const char * tag,
1208 const char * data,
1209 size_t len
1212 char buffer[512];
1214 if ((strlen(tag) + 2 + len) >= sizeof(buffer)) {
1215 return;
1218 strlcpy(buffer, tag, sizeof(buffer));
1219 strlcat(buffer, "=", sizeof(buffer));
1220 if (len > 0) {
1221 strlcat(buffer, data, sizeof(buffer));
1223 ctl_putdata(buffer, strlen(buffer), false);
1228 * ctl_putdblf - write a tagged, signed double into the response packet
1230 static void
1231 ctl_putdblf(
1232 const char * tag,
1233 bool use_f,
1234 int precision,
1235 double d
1238 char buf[50];
1239 snprintf(buf, sizeof(buf), use_f ? "%.*f" : "%.*g", precision, d);
1240 ctl_putunqstr(tag, buf, strlen(buf));
1245 * ctl_putuint - write a tagged unsigned integer into the response
1247 static void
1248 ctl_putuint(
1249 const char *tag,
1250 uint64_t uval
1253 char buf[50];
1254 snprintf(buf, sizeof(buf), "%" PRIu64, uval);
1255 ctl_putunqstr(tag, buf, strlen(buf));
1259 * ctl_puttime - write a decoded filestamp into the response
1261 static void
1262 ctl_puttime(
1263 const char *tag,
1264 time_t uval
1267 struct tm tmbuf, *tm = NULL;
1268 char buf[50];
1269 tm = gmtime_r(&uval, &tmbuf);
1270 if (NULL == tm) {
1271 return;
1273 snprintf(buf, sizeof(buf), "%04d-%02d-%02dT%02d:%02dZ",
1274 tm->tm_year + 1900,
1275 tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min);
1276 ctl_putunqstr(tag, buf, strlen(buf));
1281 * ctl_puthex - write a tagged unsigned integer, in hex, into the
1282 * response
1284 static void
1285 ctl_puthex(
1286 const char *tag,
1287 uint64_t uval
1290 char buf[50];
1291 snprintf(buf, sizeof(buf), "0x%" PRIx64, uval);
1292 ctl_putunqstr(tag, buf, strlen(buf));
1297 * ctl_putint - write a tagged signed integer into the response
1299 static void
1300 ctl_putint(
1301 const char *tag,
1302 int64_t ival
1305 char buf[50];
1306 snprintf(buf, sizeof(buf), "%" PRId64, ival);
1307 ctl_putunqstr(tag, buf, strlen(buf));
1312 * ctl_putts - write a tagged timestamp, in hex, into the response
1314 static void
1315 ctl_putts(
1316 const char *tag,
1317 l_fp ts
1320 char buf[50];
1321 snprintf(buf, sizeof(buf), "0x%08x.%08x",
1322 (unsigned int)lfpuint(ts), (unsigned int)lfpfrac(ts));
1323 ctl_putunqstr(tag, buf, strlen(buf));
1328 * ctl_putadr - write an IP address into the response
1330 static void
1331 ctl_putadr(
1332 const char *tag,
1333 refid_t addr32,
1334 sockaddr_u *addr
1337 const char *cq;
1338 if (NULL == addr) {
1339 struct in_addr in4;
1340 in4.s_addr = addr32;
1341 cq = inet_ntoa(in4);
1342 } else
1343 cq = socktoa(addr);
1344 ctl_putunqstr(tag, cq, strlen(cq));
1349 * ctl_putrefid - send a refid as printable text
1351 static void
1352 ctl_putrefid(
1353 const char * tag,
1354 refid_t refid
1357 char buf[sizeof(refid) + 1];
1358 char * cp;
1359 unsigned int i;
1361 /* refid is really a 4 byte, un-terminated, string */
1362 cp = (char *)&refid;
1363 /* make sure all printable */
1364 for (i = 0; sizeof(refid) > i && '\0' != cp[i]; i++) {
1365 if (isgraph((int)cp[i]))
1366 buf[i] = cp[i];
1367 else
1368 buf[i] = '.';
1370 buf[i] = '\0';
1371 ctl_putunqstr(tag, buf, strlen(buf));
1376 * ctl_putarray - write a tagged eight element double array into the response
1378 static void
1379 ctl_putarray(
1380 const char *tag,
1381 double *arr,
1382 int start
1385 char buffer[200];
1386 char buf[50];
1387 int i;
1388 buffer[0] = 0;
1389 i = start;
1390 do {
1391 if (i == 0)
1392 i = NTP_SHIFT;
1393 i--;
1394 snprintf(buf, sizeof(buf), " %.2f", arr[i] * MS_PER_S);
1395 strlcat(buffer, buf, sizeof(buffer));
1396 } while (i != start);
1397 ctl_putunqstr(tag, buffer, strlen(buffer));
1402 * ctl_putsys - output a system variable
1404 static void
1405 ctl_putsys(const struct var * v) {
1406 static unsigned long ntp_adjtime_time;
1407 static unsigned long ntp_leap_time;
1409 /* older compilers don't allow declarations on each case without {} */
1410 double temp_d;
1411 uptime_t temp_up;
1412 uint64_t mem;
1414 /* FIXME: ****
1415 * This should get pushed up a layer: flag, once per request
1416 * This could get data from 2 samples if the clock ticks while we are working..
1418 /* The Kernel clock variables need up-to-date output of ntp_adjtime() */
1419 if (v->flags&N_CLOCK && current_time != ntp_adjtime_time) {
1420 ZERO(ntx);
1421 if (ntp_adjtime(&ntx) < 0)
1422 msyslog(LOG_ERR,
1423 "MODE6: ntp_adjtime() for mode 6 query failed: %s", strerror(errno));
1424 ntp_adjtime_time = current_time;
1427 /* The leap second variables need up-to-date info */
1428 if (v->flags&N_LEAP && current_time != ntp_leap_time) {
1429 leapsec_getsig(&lsig);
1430 ntp_leap_time = current_time;
1433 switch (v->type) {
1435 case v_str: ctl_putstr(v->name, v->p.str, strlen(v->p.str)); break;
1436 case v_strP: ctl_putstr(v->name, v->p.strP(), strlen(v->p.strP())); break;
1438 case v_dbl:
1439 temp_d = *v->p.dbl;
1440 if (v->flags&ToMS) temp_d *= MS_PER_S; // to Milliseconds
1441 if (v->flags&ToPPM) temp_d *= US_PER_S; // to PPM
1442 if (v->flags&DBL6)
1443 ctl_putdbl6(v->name, temp_d);
1444 else
1445 ctl_putdbl(v->name, temp_d);
1446 break;
1448 case v_kli:
1449 if (v->flags&K_16) {
1450 /* value is scaled by 16 bits */
1451 temp_d = FP_UNSCALE(*v->p.timex_li);
1452 } else {
1453 temp_d = (double)*v->p.timex_li;
1455 if (v->flags & (KNUToMS | KUToMS)) {
1456 /* value is in nanoseconds or microseconds */
1457 # ifdef STA_NANO
1458 if ((v->flags & KNUToMS) && (ntx.status & STA_NANO)) {
1459 temp_d *= 1E-9;
1460 } else
1461 # endif
1463 temp_d *= 1E-6;
1466 temp_d *= MS_PER_S;
1468 ctl_putdbl(v->name, temp_d);
1469 break;
1471 case v_since:
1472 temp_up = current_time - *v->p.up;
1473 ctl_putuint(v->name, temp_up);
1474 break;
1476 case v_uli: ctl_putuint(v->name, *v->p.uli); break;
1477 case v_uint: ctl_putuint(v->name, *v->p.uinnt); break;
1478 case v_u64: ctl_putuint(v->name, *v->p.u64); break;
1479 case v_u64_r:
1480 ctl_putuint(v->name, *v->p.u64-*v->p2.u64); break;
1481 case v_u32: ctl_putuint(v->name, *v->p.u32); break;
1482 case v_u8: ctl_putuint(v->name, *v->p.u8); break;
1484 case v_u64P: ctl_putuint(v->name, v->p.u64P()); break;
1485 case v_u32P: ctl_putuint(v->name, v->p.u32P()); break;
1486 case v_uliP: ctl_putuint(v->name, v->p.uliP()); break;
1488 case v_li: ctl_putint(v->name, *v->p.li); break;
1489 case v_int: ctl_putint(v->name, *v->p.innt); break;
1490 case v_i64: ctl_putint(v->name, *v->p.i64); break;
1491 case v_i32: ctl_putint(v->name, *v->p.i32); break;
1492 case v_i8: ctl_putint(v->name, *v->p.i8); break;
1494 case v_bool: ctl_putint(v->name, *v->p.boool); break;
1496 case v_time: ctl_puttime(v->name, *v->p.time); break;
1498 /* time of day */
1499 case v_l_fp: ctl_putts(v->name, *v->p.l_fp); break;
1501 /* time differences */
1502 case v_l_fp_ms:
1503 temp_d = lfptod(*v->p.l_fp);
1504 temp_d *= MS_PER_S;
1505 ctl_putdbl(v->name, temp_d);
1506 break;
1507 case v_l_fp_sec:
1508 temp_d = lfptod(*v->p.l_fp);
1509 ctl_putdbl(v->name, temp_d);
1510 break;
1511 case v_l_fp_sec6:
1512 temp_d = lfptod(*v->p.l_fp);
1513 ctl_putdbl6(v->name, temp_d);
1514 break;
1515 case v_l_fp_sec_r:
1516 temp_d = lfptod(*v->p.l_fp-*v->p2.l_fp);
1517 ctl_putdbl(v->name, temp_d);
1518 break;
1520 case v_mrumem:
1521 mem = *v->p.u64 * sizeof(mon_entry);
1522 mem = (mem + 512) / 1024;
1523 ctl_putuint(v->name, mem);
1524 break;
1526 case v_special: ctl_putspecial(v); break;
1528 default: {
1529 /* -Wswitch-enum will warn if this is possible */
1530 if (log_limit++ > 10) return; /* Avoid log file clutter/DDoS */
1531 msyslog(LOG_ERR, "ERR: ctl_putsys() needs work type=%u\n", v->type);
1532 break;
1538 * ctl_putspecial - output a system variable -- special cases
1540 static void
1541 ctl_putspecial(const struct var * v) {
1543 /* older compilers don't allow declarations on each case */
1544 int64_t i;
1545 const char *ss = "0.0.0.0:0";
1546 l_fp tmp;
1547 uint64_t u;
1548 l_fp now;
1550 switch (v->p.special) {
1551 case vs_peer:
1552 i = 0;
1553 if (NULL != sys_vars.sys_peer)
1554 i = sys_vars.sys_peer->associd;
1555 ctl_putuint(v->name, i);
1556 break;
1557 case vs_peeradr:
1558 if (NULL != sys_vars.sys_peer)
1559 ss = sockporttoa(&sys_vars.sys_peer->srcadr);
1560 ctl_putstr(v->name, ss, strlen(ss));
1561 break;
1562 case vs_peermode:
1563 u = MODE_UNSPEC;
1564 if (NULL != sys_vars.sys_peer)
1565 u = sys_vars.sys_peer->hmode;
1566 ctl_putuint(v->name, u);
1567 break;
1568 case vs_systime:
1569 get_systime(&tmp);
1570 ctl_putts(v->name, tmp);
1571 break;
1572 case vs_refid:
1573 if (sys_vars.sys_stratum > 1 &&
1574 sys_vars.sys_stratum < STRATUM_UNSPEC)
1575 ctl_putadr(v->name, sys_vars.sys_refid, NULL);
1576 else
1577 ctl_putrefid(v->name, sys_vars.sys_refid);
1578 break;
1579 case vs_mruoldest:
1580 get_systime(&now);
1581 ctl_putuint(v->name, mon_get_oldest_age(now));
1582 break;
1583 case vs_varlist:
1584 do_sys_var_list(v->name, sys_var);
1585 break;
1586 default:
1587 /* -Wswitch-enum will warn if this is possible */
1588 if (log_limit++ > 10) return; /* Avoid log file clutter/DDoS */
1589 msyslog(LOG_ERR, "ERR: ctl_putspecial() needs work special=%u\n", v->p.special);
1590 break;
1594 #define CASE_DBL(number, variable) case number: \
1595 ctl_putdbl(CV_NAME, variable); \
1596 break
1597 #define CASE_DBL6(number, variable) case number: \
1598 ctl_putdbl6(CV_NAME, variable); \
1599 break
1600 #define CASE_HEX(number, variable) case number: \
1601 ctl_puthex(CV_NAME, variable); \
1602 break
1603 #define CASE_INT(number, variable) case number: \
1604 ctl_putint(CV_NAME, variable); \
1605 break
1606 #define CASE_UINT(number, variable) case number: \
1607 ctl_putuint(CV_NAME, variable); \
1608 break
1609 #define CASE_TS(number, variable) case number: \
1610 ctl_putts(CV_NAME, variable); \
1611 break
1613 #define CV_NAME peer_var2[id].text
1615 * ctl_putpeer - output a peer variable
1617 static void
1618 ctl_putpeer(
1619 int id,
1620 struct peer *p
1623 switch (id) {
1625 case CP_CONFIG:
1626 ctl_putuint(CV_NAME,
1627 !(FLAG_PREEMPT & p->cfg.flags));
1628 break;
1630 CASE_UINT(CP_AUTHENABLE, !(p->cfg.peerkey));
1632 case CP_AUTHENTIC:
1633 ctl_putuint(CV_NAME,
1634 !!(FLAG_AUTHENTIC & p->cfg.flags));
1635 break;
1637 case CP_SRCADR:
1638 ctl_putadr(CV_NAME, 0, &p->srcadr);
1639 break;
1641 CASE_UINT(CP_SRCPORT, SRCPORT(&p->srcadr));
1643 case CP_SRCHOST:
1644 if (p->hostname != NULL)
1645 ctl_putstr(CV_NAME, p->hostname,
1646 strlen(p->hostname));
1647 #ifdef REFCLOCK
1648 if (p->procptr != NULL) {
1649 char buf1[256];
1650 strlcpy(buf1, refclock_name(p), sizeof(buf1));
1651 ctl_putstr(CV_NAME, buf1, strlen(buf1));
1653 #endif /* REFCLOCK */
1654 break;
1656 case CP_DSTADR:
1657 ctl_putadr(CV_NAME, 0,
1658 (p->dstadr != NULL)
1659 ? &p->dstadr->sin
1660 : NULL);
1661 break;
1663 case CP_DSTPORT:
1664 ctl_putuint(CV_NAME,
1665 (p->dstadr != NULL)
1666 ? SRCPORT(&p->dstadr->sin)
1667 : 0);
1668 break;
1670 CASE_UINT(CP_RATE, p->throttle);
1672 CASE_UINT(CP_LEAP, p->leap);
1674 CASE_UINT(CP_HMODE, p->hmode);
1676 CASE_UINT(CP_STRATUM, p->stratum);
1678 CASE_UINT(CP_PPOLL, p->ppoll);
1680 CASE_UINT(CP_HPOLL, p->hpoll);
1682 CASE_INT(CP_PRECISION, p->precision);
1684 CASE_DBL(CP_ROOTDELAY, p->rootdelay * MS_PER_S);
1686 CASE_DBL(CP_ROOTDISPERSION, p->rootdisp * MS_PER_S);
1688 case CP_REFID:
1689 #ifdef REFCLOCK
1690 if (p->cfg.flags & FLAG_REFCLOCK) {
1691 ctl_putrefid(CV_NAME, p->refid);
1692 break;
1694 #endif
1695 if (p->stratum > 1 && p->stratum < STRATUM_UNSPEC)
1696 ctl_putadr(CV_NAME, p->refid,
1697 NULL);
1698 else
1699 ctl_putrefid(CV_NAME, p->refid);
1700 break;
1702 CASE_TS(CP_REFTIME, p->reftime);
1704 CASE_TS(CP_REC, p->dst);
1706 CASE_TS(CP_XMT, p->xmt);
1708 case CP_BIAS:
1709 if ( !D_ISZERO_NS(p->cfg.bias) )
1710 ctl_putdbl(CV_NAME, p->cfg.bias);
1711 break;
1713 CASE_HEX(CP_REACH, p->reach);
1715 CASE_HEX(CP_FLASH, p->flash);
1717 case CP_MODE:
1718 #ifdef REFCLOCK
1719 if (p->cfg.flags & FLAG_REFCLOCK) {
1720 ctl_putuint(CV_NAME, p->cfg.mode);
1721 break;
1723 #endif
1724 break;
1726 CASE_UINT(CP_UNREACH, (unsigned long)p->unreach);
1728 CASE_UINT(CP_TIMER, p->nextdate - current_time);
1730 CASE_DBL6(CP_DELAY, p->delay * MS_PER_S);
1732 CASE_DBL6(CP_OFFSET, p->offset * MS_PER_S);
1734 CASE_DBL6(CP_JITTER, p->jitter * MS_PER_S);
1736 CASE_DBL6(CP_DISPERSION, p->disp * MS_PER_S);
1738 case CP_KEYID:
1739 if (p->cfg.peerkey > NTP_MAXKEY)
1740 ctl_puthex(CV_NAME, p->cfg.peerkey);
1741 else
1742 ctl_putuint(CV_NAME, p->cfg.peerkey);
1743 break;
1745 case CP_FILTDELAY:
1746 ctl_putarray(CV_NAME, p->filter_delay,
1747 p->filter_nextpt);
1748 break;
1750 case CP_FILTOFFSET:
1751 ctl_putarray(CV_NAME, p->filter_offset,
1752 p->filter_nextpt);
1753 break;
1755 case CP_FILTERROR:
1756 ctl_putarray(CV_NAME, p->filter_disp,
1757 p->filter_nextpt);
1758 break;
1760 CASE_UINT(CP_PMODE, p->pmode);
1762 CASE_UINT(CP_RECEIVED, p->received);
1764 CASE_UINT(CP_SENT, p->sent);
1766 case CP_VARLIST:
1767 (void)CF_VARLIST(&peer_var2[id], peer_var2, NULL);
1768 break;
1770 CASE_UINT(CP_TIMEREC, current_time - p->timereceived);
1772 CASE_UINT(CP_TIMEREACH, current_time - p->timereachable);
1774 CASE_UINT(CP_BADAUTH, p->badauth);
1776 CASE_UINT(CP_BOGUSORG, p->bogusorg);
1778 CASE_UINT(CP_OLDPKT, p->oldpkt);
1780 CASE_UINT(CP_SELDISP, p->seldisptoolarge);
1782 CASE_UINT(CP_SELBROKEN, p->selbroken);
1784 CASE_UINT(CP_CANDIDATE, p->status);
1786 CASE_INT(CP_NTSCOOKIES, p->nts_state.count);
1788 default:
1789 break;
1792 #undef CV_NAME
1795 bool CF_VARLIST(
1796 const struct ctl_var *entry,
1797 const struct ctl_var *table1,
1798 const struct ctl_var *table2
1800 char buf[1500]; // Arbitrary length greeter than used to be.
1801 char *buffer_lap, *buffer_end;
1802 bool first = true;
1803 ssize_t increment;
1804 memset(buf, '.', sizeof(buf));
1805 buf[0] = '\0';
1806 buffer_lap = buf;
1807 buffer_end = buf + sizeof(buf);
1808 if (strlen(entry->text) + 4 > sizeof(buf)) {
1809 return false; // really long var name
1812 snprintf(buffer_lap, sizeof(buf), "%s=\"", entry->text);
1813 buffer_lap += strlen(buffer_lap);
1814 increment = CI_VARLIST(buffer_lap, buffer_end,
1815 table1, &first);
1816 if (increment <= 0) {
1817 return false;
1819 buffer_lap += increment;
1820 increment = CI_VARLIST(buffer_lap, buffer_end,
1821 table2, &first);
1822 if (increment < 0) {
1823 return false;
1825 buffer_lap += increment;
1826 if (buffer_lap + 2 >= buffer_end)
1827 return false;
1829 *buffer_lap++ = '"';
1830 *buffer_lap = '\0';
1831 ctl_putdata(buf, (unsigned)(buffer_lap - buf), false);
1832 return true;
1836 ssize_t CI_VARLIST(
1837 char *buffer_lap,
1838 char *buf_end,
1839 const struct ctl_var *table,
1840 bool *first
1842 char *start = buffer_lap;
1843 char *string_split;
1844 size_t string_length;
1845 const struct ctl_var *row;
1846 if (NULL == table) {
1847 return 0;
1849 for (row = table; !(EOV & row->flags); row++) {
1850 if (PADDING & row->flags)
1851 continue;
1852 string_split = strchr(row->text, '=');
1853 if (string_split == NULL) {
1854 string_length = strlen(row->text);
1855 } else {
1856 string_length = string_split - row->text;
1858 if (string_length >= buf_end - buffer_lap - (size_t)1) {
1859 return -1;
1861 if (!*first) {
1862 *buffer_lap++ = ',';
1863 } else {
1864 *first = false;
1866 memcpy(buffer_lap, row->text, string_length);
1867 buffer_lap+= string_length;
1869 *buffer_lap = '\0';
1870 return buffer_lap - start;
1874 void do_sys_var_list(const char* name, const struct var* v) {
1875 /* This has to be big enough for the whole answer -- all names.
1876 * On 2024-Jul-21, that was almost 3000 characters.
1877 * We could split this into two: counters and other. */
1878 char buf[5000];
1879 char *buffer;
1880 bool first = true;
1881 int length;
1883 memset(buf, '.', sizeof(buf));
1884 buf[0] = '\0';
1885 buffer = buf;
1886 if (strlen(v->name) + 10 > sizeof(buf)) {
1887 return; // really long var name
1889 snprintf(buffer, sizeof(buf), "%s=\"", name);
1890 buffer += strlen(buffer);
1892 for ( ;!(EOV & v->flags); v++) {
1893 length = strlen(v->name);
1894 if (buffer+length+6 >= buf+sizeof(buf)) {
1895 /* FIXME -- need bigger buffer */
1896 continue;
1898 if (first) {
1899 first = false;
1900 } else {
1901 *buffer++ = ',';
1903 memcpy(buffer, v->name, length);
1904 buffer+= length;
1907 *buffer++ = '"';
1908 *buffer = '\0';
1909 ctl_putdata(buf, (unsigned)(buffer - buf), false);
1913 #ifdef REFCLOCK
1914 #define CV_NAME clock_var2[id].text
1916 * ctl_putclock - output clock variables
1918 static void
1919 ctl_putclock(
1920 int id,
1921 struct refclockstat *pcs,
1922 bool mustput
1925 switch (id) {
1927 case CC_NAME:
1928 if (pcs->clockname == NULL ||
1929 *(pcs->clockname) == '\0') {
1930 if (mustput)
1931 ctl_putstr(CV_NAME,
1932 "", 0);
1933 } else {
1934 ctl_putstr(CV_NAME,
1935 pcs->clockname,
1936 strlen(pcs->clockname));
1938 break;
1939 case CC_TIMECODE:
1940 ctl_putstr(CV_NAME,
1941 pcs->p_lastcode,
1942 (unsigned)pcs->lencode);
1943 break;
1945 CASE_UINT(CC_POLL, pcs->polls);
1947 CASE_UINT(CC_NOREPLY, pcs->noresponse);
1949 CASE_UINT(CC_BADFORMAT, pcs->badformat);
1951 CASE_UINT(CC_BADDATA, pcs->baddata);
1953 case CC_FUDGETIME1:
1954 if (mustput || (pcs->haveflags & CLK_HAVETIME1))
1955 ctl_putdbl(CV_NAME,
1956 pcs->fudgetime1 * MS_PER_S);
1957 break;
1959 case CC_FUDGETIME2:
1960 if (mustput || (pcs->haveflags & CLK_HAVETIME2))
1961 ctl_putdbl(CV_NAME,
1962 pcs->fudgetime2 * MS_PER_S);
1963 break;
1965 case CC_FUDGEVAL1:
1966 if (mustput || (pcs->haveflags & CLK_HAVEVAL1))
1967 ctl_putint(CV_NAME,
1968 pcs->fudgeval1);
1969 break;
1971 case CC_FUDGEVAL2:
1972 if (mustput || (pcs->haveflags & CLK_HAVEVAL2)) {
1973 if (pcs->fudgeval1 > 1)
1974 ctl_putadr(CV_NAME,
1975 pcs->fudgeval2, NULL);
1976 else
1977 ctl_putrefid(CV_NAME,
1978 pcs->fudgeval2);
1980 break;
1982 CASE_UINT(CC_FLAGS, pcs->flags);
1984 case CC_DEVICE:
1985 if (pcs->clockdesc == NULL ||
1986 *(pcs->clockdesc) == '\0') {
1987 if (mustput)
1988 ctl_putstr(CV_NAME,
1989 "", 0);
1990 } else {
1991 ctl_putstr(CV_NAME,
1992 pcs->clockdesc,
1993 strlen(pcs->clockdesc));
1995 break;
1997 case CC_VARLIST:
1998 (void)CF_VARLIST(&clock_var2[id], clock_var2, pcs->kv_list);
1999 break;
2001 default:
2002 /* huh? */
2003 break;
2006 #undef CV_NAME
2007 #endif
2008 #undef CASE_UINT
2009 #undef CASE_TS
2010 #undef CASE_SFP
2011 #undef CASE_INT
2012 #undef CASE_HEX
2013 #undef CASE_DBL6
2014 #undef CASE_DBL
2018 * ctl_getitem - get the next data item from the incoming packet
2019 * Return NULL on error, pointer to EOV on can't find.
2020 * Advance reqpt on success.
2022 static const struct var *
2023 ctl_getitem(
2024 const struct var *var_list,
2025 char **data // for writes
2028 static char buf[128];
2029 static u_long quiet_until;
2030 const struct var *v;
2031 size_t len;
2032 char *cp;
2033 char *tp;
2035 /* Old code deleted white space. Don't send it. */
2037 /* Scan the string in the packet until we hit comma or
2038 * EoB. Register position of first '=' on the fly. */
2039 for (tp = NULL, cp = reqpt; cp < reqend; ++cp) {
2040 if (*cp == '=' && tp == NULL) {
2041 tp = cp;
2043 if (*cp == ',') {
2044 break;
2048 /* Process payload for write requests, if any. */
2049 *data = NULL;
2050 if (NULL != tp) {
2051 const char *plhead = tp + 1; /* skip the '=' */
2052 const char *pltail = cp;
2053 size_t plsize;
2055 /* check payload size, terminate packet on overflow */
2056 plsize = (size_t)(pltail - plhead);
2057 if (plsize >= sizeof(buf)-1) {
2058 goto badpacket;
2061 /* copy data, NUL terminate, and set result data ptr */
2062 memcpy(buf, plhead, plsize);
2063 buf[plsize] = '\0';
2064 *data = buf;
2065 } else {
2066 /* no payload, current end --> current name termination */
2067 tp = cp;
2070 len = tp-reqpt;
2071 for (v = var_list; !(EOV & v->flags); ++v) {
2072 /* Check if the var name matches the buffer. The
2073 * name is bracketed by [reqpt..tp] and not NUL
2074 * terminated, and it contains no '=' char.
2076 if (len == strlen(v->name)
2077 && 0 == memcmp(reqpt, v->name, len)) {
2078 break;
2081 if (EOV & v->flags) {
2082 return v;
2084 if (cp < reqend) cp++; // skip over ","
2085 reqpt = cp; // advance past this slot
2086 return v;
2088 badpacket:
2089 /*TODO? somehow indicate this packet was bad, apart from syslog? */
2090 numctlbadpkts++;
2091 NLOG(NLOG_SYSEVENT)
2092 if (quiet_until <= current_time) {
2093 unsigned int port = SRCPORT(rmt_addr);
2094 quiet_until = current_time + 300;
2095 /* The port variable above suppresses a warning on NetBSD 8.0
2096 * http://gnats.netbsd.org/cgi-bin/query-pr-single.pl?number=53619
2097 * A cast on SRCPORT without the dummy variable didn't work.
2099 msyslog(LOG_WARNING,
2100 "Possible 'ntpdx' exploit from %s#%u"
2101 " (possibly spoofed)",
2102 socktoa(rmt_addr), port);
2104 reqpt = reqend; /* never again for this packet! */
2105 return NULL;
2110 * ctl_getitem2 - get the next data item from the incoming packet
2112 static const struct ctl_var *
2113 ctl_getitem2(
2114 const struct ctl_var *var_list,
2115 char **data
2118 /* [Bug 3008] First check the packet data sanity, then search
2119 * the key. This improves the consistency of result values: If
2120 * the result is NULL once, it will never be EOV again for this
2121 * packet; If it's EOV, it will never be NULL again until the
2122 * variable is found and processed in a given 'var_list'. (That
2123 * is, a result is returned that is neither NULL nor EOV).
2125 static const struct ctl_var eol = { 0, EOV, NULL };
2126 static char buf[128];
2127 static u_long quiet_until;
2128 const struct ctl_var *v;
2129 char *cp;
2130 char *tp;
2133 * Part One: Validate the packet state
2136 /* Delete leading commas and white space */
2137 while (reqpt < reqend && (*reqpt == ',' ||
2138 isspace((unsigned char)*reqpt))) {
2139 reqpt++;
2141 if (reqpt >= reqend) {
2142 return NULL;
2145 /* Scan the string in the packet until we hit comma or
2146 * EoB. Register position of first '=' on the fly. */
2147 for (tp = NULL, cp = reqpt; cp < reqend; ++cp) {
2148 if (*cp == '=' && tp == NULL) {
2149 tp = cp;
2151 if (*cp == ',') {
2152 break;
2156 /* Process payload, if any. */
2157 *data = NULL;
2158 if (NULL != tp) {
2159 /* eventually strip white space from argument. */
2160 const char *plhead = tp + 1; /* skip the '=' */
2161 const char *pltail = cp;
2162 size_t plsize;
2164 while (plhead != pltail && isspace((u_char)plhead[0])) {
2165 ++plhead;
2167 while (plhead != pltail && isspace((u_char)pltail[-1])) {
2168 --pltail;
2171 /* check payload size, terminate packet on overflow */
2172 plsize = (size_t)(pltail - plhead);
2173 if (plsize >= sizeof(buf)) {
2174 goto badpacket;
2177 /* copy data, NUL terminate, and set result data ptr */
2178 memcpy(buf, plhead, plsize);
2179 buf[plsize] = '\0';
2180 *data = buf;
2181 } else {
2182 /* no payload, current end --> current name termination */
2183 tp = cp;
2186 /* Part Two
2188 * Now we're sure that the packet data itself is sane. Scan the
2189 * list now. Make sure a NULL list is properly treated by
2190 * returning a synthetic End-Of-Values record. We must not
2191 * return NULL pointers after this point, or the behaviour would
2192 * become inconsistent if called several times with different
2193 * variable lists after an EoV was returned. (Such a behavior
2194 * actually caused Bug 3008.)
2197 if (NULL == var_list)
2198 return &eol;
2200 for (v = var_list; !(EOV & v->flags); ++v)
2201 if (!(PADDING & v->flags)) {
2202 /* Check if the var name matches the buffer. The
2203 * name is bracketed by [reqpt..tp] and not NUL
2204 * terminated, and it contains no '=' char. The
2205 * lookup value IS NUL-terminated but might
2206 * include a '='... We have to look out for
2207 * that!
2209 const char *sp1 = reqpt;
2210 const char *sp2 = v->text;
2212 /* [Bug 3412] do not compare past NUL byte in name */
2213 while ( (sp1 != tp)
2214 && ('\0' != *sp2) && (*sp1 == *sp2)) {
2215 ++sp1;
2216 ++sp2;
2218 if (sp1 == tp && (*sp2 == '\0' || *sp2 == '='))
2219 break;
2222 /* See if we have found a valid entry or not. If found, advance
2223 * the request pointer for the next round; if not, clear the
2224 * data pointer so we have no dangling garbage here.
2226 if (EOV & v->flags)
2227 *data = NULL;
2228 else
2229 reqpt = cp + (cp != reqend);
2230 return v;
2232 badpacket:
2233 /*TODO? somehow indicate this packet was bad, apart from syslog? */
2234 numctlbadpkts++;
2235 NLOG(NLOG_SYSEVENT)
2236 if (quiet_until <= current_time) {
2237 unsigned int port = SRCPORT(rmt_addr);
2238 quiet_until = current_time + 300;
2239 /* The port variable above suppresses a warning on NetBSD 8.0
2240 * http://gnats.netbsd.org/cgi-bin/query-pr-single.pl?number=53619
2241 * A cast on SRCPORT without the dummy variable didn't work.
2243 msyslog(LOG_WARNING,
2244 "Possible 'ntpdx' exploit from %s#%u"
2245 " (possibly spoofed)",
2246 socktoa(rmt_addr), port);
2248 reqpt = reqend; /* never again for this packet! */
2249 return NULL;
2254 * control_unspec - response to an unspecified op-code
2256 /*ARGSUSED*/
2257 static void
2258 control_unspec(
2259 struct recvbuf *rbufp,
2260 int restrict_mask
2263 struct peer *peer;
2265 UNUSED_ARG(rbufp);
2266 UNUSED_ARG(restrict_mask);
2269 * What is an appropriate response to an unspecified op-code?
2270 * I return no errors and no data, unless a specified association
2271 * doesn't exist.
2273 if (res_associd) {
2274 peer = findpeerbyassoc(res_associd);
2275 if (NULL == peer) {
2276 ctl_error(CERR_BADASSOC);
2277 return;
2279 rpkt.status = htons(ctlpeerstatus(peer));
2280 } else
2281 rpkt.status = htons(ctlsysstatus());
2282 ctl_flushpkt(0);
2287 * read_status - return either a list of associd's, or a particular
2288 * peer's status.
2290 /*ARGSUSED*/
2291 static void
2292 read_status(
2293 struct recvbuf *rbufp,
2294 int restrict_mask
2297 struct peer *peer;
2298 size_t n;
2299 /* a_st holds association ID, status pairs alternating */
2300 unsigned short a_st[CTL_MAX_DATA_LEN / sizeof(unsigned short)];
2302 UNUSED_ARG(rbufp);
2303 UNUSED_ARG(restrict_mask);
2304 DPRINT(3, ("read_status: ID %d\n", res_associd));
2306 * Two choices here. If the specified association ID is
2307 * zero we return all known association ID's. Otherwise
2308 * we return a bunch of stuff about the particular peer.
2310 if (res_associd) {
2311 peer = findpeerbyassoc(res_associd);
2312 if (NULL == peer) {
2313 ctl_error(CERR_BADASSOC);
2314 return;
2316 rpkt.status = htons(ctlpeerstatus(peer));
2317 if (NULL != res_auth) /* FIXME: what's this for? */
2318 peer->num_events = 0;
2320 * For now, output everything we know about the
2321 * peer. May be more selective later.
2323 for (const struct ctl_var *kv = peer_var2; kv && !(EOV & kv->flags); kv++)
2324 if (kv->flags & DEF)
2325 ctl_putpeer(kv->code, peer);
2326 ctl_flushpkt(0);
2327 return;
2329 n = 0;
2330 rpkt.status = htons(ctlsysstatus());
2331 for (peer = peer_list; peer != NULL; peer = peer->p_link) {
2332 a_st[n++] = htons(peer->associd);
2333 a_st[n++] = htons(ctlpeerstatus(peer));
2334 /* two entries each loop iteration, so n + 1 */
2335 if (n + 1 >= COUNTOF(a_st)) {
2336 ctl_putdata((void *)a_st, n * sizeof(a_st[0]),
2337 true);
2338 n = 0;
2341 if (n)
2342 ctl_putdata((void *)a_st, n * sizeof(a_st[0]), true);
2343 ctl_flushpkt(0);
2348 * read_peervars - half of read_variables() implementation
2350 static void
2351 read_peervars(void)
2353 const struct ctl_var *v;
2354 struct peer *peer;
2355 size_t i;
2356 char * valuep;
2357 bool wants[CP_MAXCODE + 1];
2358 bool gotvar;
2361 * Wants info for a particular peer. See if we know
2362 * the guy.
2364 peer = findpeerbyassoc(res_associd);
2365 if (NULL == peer) {
2366 ctl_error(CERR_BADASSOC);
2367 return;
2369 rpkt.status = htons(ctlpeerstatus(peer));
2370 if (NULL != res_auth) /* FIXME: What's this for?? */
2371 peer->num_events = 0;
2372 ZERO(wants);
2373 gotvar = false;
2374 while (NULL != (v = ctl_getitem2(peer_var2, &valuep))) {
2375 if (v->flags & EOV) {
2376 ctl_error(CERR_UNKNOWNVAR);
2377 return;
2379 INSIST(v->code < COUNTOF(wants));
2380 wants[v->code] = 1;
2381 gotvar = true;
2383 if (gotvar) {
2384 for (i = 1; i < COUNTOF(wants); i++)
2385 if (wants[i])
2386 ctl_putpeer((int)i, peer);
2387 } else
2388 for (const struct ctl_var *kv = peer_var2; kv && !(EOV & kv->flags); kv++)
2389 if (kv->flags & DEF)
2390 ctl_putpeer(kv->code, peer);
2391 ctl_flushpkt(0);
2396 * read_sysvars - half of read_variables() implementation
2398 static void
2399 read_sysvars(void)
2401 const struct var *v;
2402 const struct ctl_var *v2;
2403 char * valuep;
2404 const char * pch;
2407 * Wants system variables. Figure out which he wants
2408 * and give them to him.
2411 /* Old code had a wants bit map. Two passes.
2412 * Maybe to verify all target names before giving a partial answer.
2414 rpkt.status = htons(ctlsysstatus());
2416 if (reqpt == reqend) {
2417 /* No names provided, send back defaults */
2418 for (v = sys_var; v && !(EOV & v->flags); v++)
2419 if (DEF & v->flags)
2420 ctl_putsys(v);
2421 for (v2 = ext_sys_var; v2 && !(EOV & v2->flags); v2++)
2422 if (DEF & v2->flags)
2423 ctl_putdata(v2->text, strlen(v2->text),
2424 false);
2425 ctl_flushpkt(0);
2426 return;
2429 /* This code structure is ugly.
2430 * The basic problem is that parsing the input stream is burried in ctl_getitem
2431 * and we need to know if parsing failed or it couldn't find a name.
2432 * If it can't find a name, we try ext_sys_var.
2433 * Return NULL on error, pointer to EOV on can't find.
2434 * Advance reqpt on success.
2436 while (reqpt < reqend) {
2437 v = ctl_getitem(sys_var, &valuep);
2438 if (NULL == v)
2439 break; // parsing error
2440 if (!(v->flags & EOV)) {
2441 ctl_putsys(v);
2442 } else {
2443 v2 = ctl_getitem2(ext_sys_var, &valuep);
2444 if (NULL == v2) {
2445 ctl_error(CERR_BADVALUE);
2446 return;
2449 if (EOV & v2->flags) {
2450 ctl_error(CERR_UNKNOWNVAR);
2451 return;
2453 pch = ext_sys_var[v2->code].text;
2454 ctl_putdata(pch, strlen(pch), false);
2458 ctl_flushpkt(0);
2463 * read_variables - return the variables the caller asks for
2465 /*ARGSUSED*/
2466 static void
2467 read_variables(
2468 struct recvbuf *rbufp,
2469 int restrict_mask
2472 UNUSED_ARG(rbufp);
2473 UNUSED_ARG(restrict_mask);
2475 if (res_associd)
2476 read_peervars();
2477 else
2478 read_sysvars();
2483 * configure() processes ntpq :config/config-from-file, allowing
2484 * generic runtime reconfiguration.
2486 static void configure(
2487 struct recvbuf *rbufp,
2488 int restrict_mask
2491 size_t data_count;
2492 int retval;
2493 bool replace_nl;
2495 /* I haven't yet implemented changes to an existing association.
2496 * Hence check if the association id is 0
2498 if (res_associd != 0) {
2499 ctl_error(CERR_BADVALUE);
2500 return;
2503 if (RES_NOMODIFY & restrict_mask) {
2504 snprintf(remote_config.err_msg,
2505 sizeof(remote_config.err_msg),
2506 "runtime configuration prohibited by restrict ..."
2507 " nomodify");
2508 ctl_putdata(remote_config.err_msg,
2509 strlen(remote_config.err_msg), false);
2510 ctl_flushpkt(0);
2511 NLOG(NLOG_SYSINFO)
2512 msyslog(LOG_NOTICE,
2513 "MODE6: runtime config from %s rejected due"
2514 " to nomodify restriction",
2515 socktoa(&rbufp->recv_srcadr));
2516 increment_restricted();
2517 return;
2520 /* Initialize the remote config buffer */
2521 data_count = (size_t)(reqend - reqpt);
2523 if (data_count > sizeof(remote_config.buffer) - 2) {
2524 snprintf(remote_config.err_msg,
2525 sizeof(remote_config.err_msg),
2526 "runtime configuration failed: request too long");
2527 ctl_putdata(remote_config.err_msg,
2528 strlen(remote_config.err_msg), false);
2529 ctl_flushpkt(0);
2530 msyslog(LOG_NOTICE,
2531 "MODE6: runtime config from %s rejected: request"
2532 " too long",
2533 socktoa(&rbufp->recv_srcadr));
2534 return;
2537 memcpy(remote_config.buffer, reqpt, data_count);
2538 if (data_count > 0
2539 && '\n' != remote_config.buffer[data_count - 1])
2540 remote_config.buffer[data_count++] = '\n';
2541 remote_config.buffer[data_count] = '\0';
2542 remote_config.pos = 0;
2543 remote_config.err_pos = 0;
2544 remote_config.no_errors = 0;
2546 /* do not include terminating newline in log */
2547 if (data_count > 0
2548 && '\n' == remote_config.buffer[data_count - 1]) {
2549 remote_config.buffer[data_count - 1] = '\0';
2550 replace_nl = true;
2551 } else {
2552 replace_nl = false;
2555 DPRINT(1, ("Got Remote Configuration Command: %s\n",
2556 remote_config.buffer));
2557 msyslog(LOG_NOTICE, "MODE6: %s config: %s",
2558 socktoa(&rbufp->recv_srcadr),
2559 remote_config.buffer);
2561 if (replace_nl)
2562 remote_config.buffer[data_count - 1] = '\n';
2564 config_remotely(&rbufp->recv_srcadr);
2567 * Check if errors were reported. If not, output 'Config
2568 * Succeeded'. Else output the error count. It would be nice
2569 * to output any parser error messages.
2571 if (0 == remote_config.no_errors) {
2572 retval = snprintf(remote_config.err_msg,
2573 sizeof(remote_config.err_msg),
2574 "Config Succeeded");
2575 if (retval > 0)
2576 remote_config.err_pos += retval;
2579 ctl_putdata(remote_config.err_msg, (unsigned int)remote_config.err_pos,
2580 false);
2581 ctl_flushpkt(0);
2583 DPRINT(1, ("Reply: %s\n", remote_config.err_msg));
2585 if (remote_config.no_errors > 0)
2586 msyslog(LOG_NOTICE, "MODE6: %d error in %s config",
2587 remote_config.no_errors,
2588 socktoa(&rbufp->recv_srcadr));
2593 * derive_nonce - generate client-address-specific nonce value
2594 * associated with a given timestamp.
2596 static uint32_t derive_nonce(
2597 sockaddr_u * addr,
2598 uint32_t ts_i,
2599 uint32_t ts_f
2602 static uint8_t salt[16];
2603 static unsigned long next_salt_update = 0;
2604 union d_tag {
2605 uint8_t digest[EVP_MAX_MD_SIZE];
2606 uint32_t extract;
2607 } d;
2609 static EVP_MD_CTX *ctx;
2610 static const EVP_MD *evp;
2611 unsigned int len;
2613 if (NULL == ctx) {
2614 ctx = EVP_MD_CTX_new();
2615 if (NULL == ctx) {
2616 msyslog(LOG_ERR, "ERR: EVP_MD_CTX_new() failed");
2617 exit(1);
2621 if (NULL == evp) {
2622 /* EVP_md5() doesn't work on FIPS systems.
2623 * Check here in case EVP_sha1() gets demoted.
2624 * This is making a cookie which is only checked by
2625 * this system so the details of how it is made don't matter.
2627 evp = EVP_sha1();
2628 if (NULL == evp) {
2629 msyslog(LOG_ERR, "ERR: EVP_sha1() failed");
2630 exit(1);
2634 if (current_time >= next_salt_update) {
2635 ntp_RAND_bytes(&salt[0], sizeof(salt));
2636 next_salt_update = current_time+SECSPERHR;
2637 if (0) msyslog(LOG_INFO, "derive_nonce: update salt, %lld", \
2638 (long long)next_salt_update);
2641 EVP_DigestInit_ex(ctx, evp, NULL);
2642 EVP_DigestUpdate(ctx, salt, sizeof(salt));
2643 EVP_DigestUpdate(ctx, &ts_i, sizeof(ts_i));
2644 EVP_DigestUpdate(ctx, &ts_f, sizeof(ts_f));
2645 if (IS_IPV4(addr))
2646 EVP_DigestUpdate(ctx, &SOCK_ADDR4(addr),
2647 sizeof(SOCK_ADDR4(addr)));
2648 else
2649 EVP_DigestUpdate(ctx, &SOCK_ADDR6(addr),
2650 sizeof(SOCK_ADDR6(addr)));
2651 EVP_DigestUpdate(ctx, &NSRCPORT(addr), sizeof(NSRCPORT(addr)));
2652 EVP_DigestUpdate(ctx, salt, sizeof(salt));
2653 EVP_DigestFinal_ex(ctx, d.digest, &len);
2655 return d.extract;
2660 * generate_nonce - generate client-address-specific nonce string.
2662 static void generate_nonce(
2663 struct recvbuf * rbufp,
2664 char * nonce,
2665 size_t nonce_octets
2668 uint32_t derived;
2670 derived = derive_nonce(&rbufp->recv_srcadr,
2671 lfpuint(rbufp->recv_time),
2672 lfpfrac(rbufp->recv_time));
2673 snprintf(nonce, nonce_octets, "%08x%08x%08x",
2674 lfpuint(rbufp->recv_time), lfpfrac(rbufp->recv_time), derived);
2679 * validate_nonce - validate client-address-specific nonce string.
2681 * Returns true if the local calculation of the nonce matches the
2682 * client-provided value and the timestamp is recent enough.
2684 static int validate_nonce(
2685 const char * pnonce,
2686 struct recvbuf * rbufp
2689 unsigned int ts_i;
2690 unsigned int ts_f;
2691 l_fp ts;
2692 l_fp now_delta;
2693 unsigned int supposed;
2694 unsigned int derived;
2696 if (3 != sscanf(pnonce, "%08x%08x%08x", &ts_i, &ts_f, &supposed))
2697 return false;
2699 ts = lfpinit_u(ts_i, (uint32_t)ts_f);
2700 derived = derive_nonce(&rbufp->recv_srcadr, lfpuint(ts), lfpfrac(ts));
2701 get_systime(&now_delta);
2702 now_delta -= ts;
2704 return (supposed == derived && lfpuint(now_delta) < NONCE_TIMEOUT);
2708 #ifdef USE_RANDOMIZE_RESPONSES
2710 * send_random_tag_value - send a randomly-generated three character
2711 * tag prefix, a '.', an index, a '=' and a
2712 * random integer value.
2714 * To try to force clients to ignore unrecognized tags in mrulist,
2715 * reslist, and ifstats responses, the first and last rows are spiced
2716 * with randomly-generated tag names with correct .# index. Make it
2717 * three characters knowing that none of the currently-used subscripted
2718 * tags have that length, avoiding the need to test for
2719 * tag collision.
2721 static void
2722 send_random_tag_value(
2723 int indx
2726 int noise;
2727 char buf[32];
2729 /* coverity[DC.WEAK_CRYPTO] */
2730 noise = random();
2731 buf[0] = 'a' + noise % 26;
2732 noise >>= 5;
2733 buf[1] = 'a' + noise % 26;
2734 noise >>= 5;
2735 buf[2] = 'a' + noise % 26;
2736 noise >>= 5;
2737 buf[3] = '.';
2738 snprintf(&buf[4], sizeof(buf) - 4, "%d", indx);
2739 ctl_putuint(buf, (unsigned long)noise);
2741 #endif /* USE_RANDOMIZE_RESPONSE */
2745 * Send a MRU list entry in response to a "ntpq -c mrulist" operation.
2747 * To keep clients honest about not depending on the order of values,
2748 * and thereby avoid being locked into ugly workarounds to maintain
2749 * backward compatibility later as new fields are added to the response,
2750 * the order is random.
2752 static void
2753 send_mru_entry(
2754 mon_entry * mon,
2755 int count
2758 const char first_fmt[] = "first.%d";
2759 const char ct_fmt[] = "ct.%d";
2760 const char mv_fmt[] = "mv.%d";
2761 const char rs_fmt[] = "rs.%d";
2762 const char sc_fmt[] = "sc.%d";
2763 const char dr_fmt[] = "dr.%d";
2764 char tag[32];
2765 bool sent[8]; /* 8 tag=value pairs */
2766 uint32_t noise;
2767 unsigned int which = 0;
2768 unsigned int remaining;
2769 const char * pch;
2771 remaining = COUNTOF(sent);
2772 ZERO(sent);
2773 /* coverity[DC.WEAK_CRYPTO] */
2774 noise = (uint32_t)random();
2775 while (remaining > 0) {
2776 #ifdef USE_RANDOMIZE_RESPONSES
2777 which = (noise & 7) % COUNTOF(sent);
2778 #endif /* USE_RANDOMIZE_RESPONSES */
2779 noise >>= 3;
2780 while (sent[which])
2781 which = (which + 1) % COUNTOF(sent);
2783 switch (which) {
2785 case 0:
2786 snprintf(tag, sizeof(tag), addr_fmt, count);
2787 pch = sockporttoa(&mon->rmtadr);
2788 ctl_putunqstr(tag, pch, strlen(pch));
2789 break;
2791 case 1:
2792 snprintf(tag, sizeof(tag), last_fmt, count);
2793 ctl_putts(tag, mon->last);
2794 break;
2796 case 2:
2797 snprintf(tag, sizeof(tag), first_fmt, count);
2798 ctl_putts(tag, mon->first);
2799 break;
2801 case 3:
2802 snprintf(tag, sizeof(tag), ct_fmt, count);
2803 ctl_putint(tag, mon->count);
2804 break;
2806 case 4:
2807 snprintf(tag, sizeof(tag), mv_fmt, count);
2808 ctl_putuint(tag, mon->vn_mode);
2809 break;
2811 case 5:
2812 snprintf(tag, sizeof(tag), rs_fmt, count);
2813 ctl_puthex(tag, mon->flags);
2814 break;
2816 case 6:
2817 snprintf(tag, sizeof(tag), sc_fmt, count);
2818 ctl_putdblf(tag, true, 3, mon->score);
2819 break;
2821 case 7:
2822 snprintf(tag, sizeof(tag), dr_fmt, count);
2823 ctl_putuint(tag, mon->dropped);
2824 break;
2826 default:
2827 /* huh? */
2828 break;
2830 sent[which] = true;
2831 remaining--;
2837 * read_mru_list - supports ntpq's mrulist command.
2839 * The approach was suggested by Ry Jones. A finite and variable number
2840 * of entries are retrieved per request, to avoid having responses with
2841 * such large numbers of packets that socket buffers are overflowed and
2842 * packets lost. The entries are retrieved oldest-first, taking into
2843 * account that the MRU list will be changing between each request. We
2844 * can expect to see duplicate entries for addresses updated in the MRU
2845 * list during the fetch operation. In the end, the client can assemble
2846 * a close approximation of the MRU list at the point in time the last
2847 * response was sent by ntpd. The only difference is it may be longer,
2848 * containing some number of oldest entries which have since been
2849 * reclaimed. If necessary, the protocol could be extended to zap those
2850 * from the client snapshot at the end, but so far that doesn't seem
2851 * useful.
2853 * To accommodate the changing MRU list, the starting point for requests
2854 * after the first request is supplied as a series of last seen
2855 * timestamps and associated addresses, the newest ones the client has
2856 * received. As long as at least one of those entries hasn't been
2857 * bumped to the head of the MRU list, ntpd can pick up at that point.
2858 * Otherwise, the request is failed and it is up to ntpq to back up and
2859 * provide the next newest entry's timestamps and addresses, conceivably
2860 * backing up all the way to the starting point.
2862 * input parameters:
2863 * nonce= Regurgitated nonce retrieved by the client
2864 * previously using CTL_OP_REQ_NONCE, demonstrating
2865 * ability to receive traffic sent to its address.
2866 * frags= Limit on datagrams (fragments) in response. Used
2867 * by newer ntpq versions instead of limit= when
2868 * retrieving multiple entries.
2869 * limit= Limit on MRU entries returned. One of frags= or
2870 * limit= must be provided.
2871 * limit=1 is a special case: Instead of fetching
2872 * beginning with the supplied starting points
2873 * (provided by a last.x and addr.x where 0 <= x
2874 * <= 15, default the beginning of time) newer
2875 * neighbor, fetch the supplied entries (provided
2876 * by addr.x= entries where 0 <= x <= 15), and in
2877 * that case the #.last timestamp can be zero.
2878 * This enables fetching a multiple entries from
2879 * given IP addresses. When limit is not one and
2880 * frags= is provided, the fragment limit controls.
2881 * NOTE: a single mrulist command may cause many
2882 * query/response rounds allowing limits as low as
2883 * 3 to potentially retrieve thousands of entries
2884 * in responses.
2885 * mincount= (decimal) Return entries with count >= mincount.
2886 * mindrop= (decimal) Return entries with drop >= mindrop.
2887 * minscore= (float) Return entries with score >= minscore.
2888 * laddr= Return entries associated with the server's IP
2889 * address given. No port specification is needed,
2890 * and any supplied is ignored.
2891 * recent= Set the reporting start point to retrieve roughly
2892 * a specified number of most recent entries
2893 * 'Roughly' because the logic cannot anticipate
2894 * update volume. Use this to volume-limit the
2895 * response when you are monitoring something like
2896 * a pool server with a very long MRU list.
2897 * resall= 0x-prefixed hex restrict bits which must all be
2898 * lit for an MRU entry to be included.
2899 * Has precedence over any resany=.
2900 * resany= 0x-prefixed hex restrict bits, at least one of
2901 * which must be list for an MRU entry to be
2902 * included.
2903 * last.0= 0x-prefixed hex l_fp timestamp of newest entry
2904 * which client previously received.
2905 * addr.0= text of newest entry's IP address and port,
2906 * IPv6 addresses in bracketed form: [::]:123
2907 * last.1= timestamp of 2nd newest entry client has.
2908 * addr.1= address of 2nd newest entry.
2909 * [...]
2911 * ntpq provides as many last/addr pairs as will fit in a single request
2912 * packet, except for the first request in a MRU fetch operation.
2914 * The response begins with a new nonce value to be used for any
2915 * followup request. Following the nonce is the next newer entry than
2916 * referred to by last.0 and addr.0, if the "0" entry has not been
2917 * bumped to the front. If it has, the first entry returned will be the
2918 * next entry newer than referred to by last.1 and addr.1, and so on.
2919 * If none of the referenced entries remain unchanged, the request fails
2920 * and ntpq backs up to the next earlier set of entries to resync.
2922 * Except for the first response, the response begins with confirmation
2923 * of the entry that precedes the first additional entry provided:
2925 * last.older= hex l_fp timestamp matching one of the input
2926 * .last timestamps, which entry now precedes the
2927 * response 0. entry in the MRU list.
2928 * addr.older= text of address corresponding to older.last.
2930 * And in any case, a successful response contains sets of values
2931 * comprising entries, with the oldest numbered 0 and incrementing from
2932 * there:
2934 * addr.# text of IPv4 or IPv6 address and port
2935 * last.# hex l_fp timestamp of last receipt
2936 * first.# hex l_fp timestamp of first receipt
2937 * ct.# count of packets received
2938 * mv.# mode and version
2939 * rs.# restriction mask (RES_* bits)
2941 * Note the code currently assumes there are no valid three letter
2942 * tags sent with each row, and needs to be adjusted if that changes.
2944 * The client should accept the values in any order, and ignore .#
2945 * values which it does not understand, to allow a smooth path to
2946 * future changes without requiring a new opcode. Clients can rely
2947 * on all *.0 values preceding any *.1 values, that is all values for
2948 * a given index number are together in the response.
2950 * The end of the response list is noted with one or two tag=value
2951 * pairs. Unconditionally:
2953 * now= 0x-prefixed l_fp timestamp at the server marking
2954 * the end of the operation.
2956 * If any entries were returned, now= is followed by:
2958 * last.newest= hex l_fp identical to last.# of the prior
2959 * entry.
2961 static void read_mru_list(
2962 struct recvbuf *rbufp,
2963 int restrict_mask
2966 static const char nulltxt[1] = { '\0' };
2967 static const char nonce_text[] = "nonce";
2968 static const char frags_text[] = "frags";
2969 static const char limit_text[] = "limit";
2970 static const char mincount_text[] = "mincount";
2971 static const char mindrop_text[] = "mindrop";
2972 static const char minscore_text[] = "minscore";
2973 static const char resall_text[] = "resall";
2974 static const char resany_text[] = "resany";
2975 static const char maxlstint_text[] = "maxlstint";
2976 static const char minlstint_text[] = "minlstint";
2977 static const char laddr_text[] = "laddr";
2978 static const char recent_text[] = "recent";
2979 static const char resaxx_fmt[] = "0x%hx";
2981 unsigned int limit;
2982 unsigned short frags;
2983 unsigned short resall;
2984 unsigned short resany;
2985 int mincount;
2986 unsigned int mindrop;
2987 float minscore;
2988 unsigned int maxlstint;
2989 unsigned int minlstint;
2990 sockaddr_u laddr;
2991 unsigned int recent;
2992 endpt * lcladr;
2993 unsigned int count;
2994 static unsigned int countdown;
2995 unsigned int ui;
2996 unsigned int uf;
2997 l_fp last[16];
2998 sockaddr_u addr[COUNTOF(last)];
2999 char buf[128];
3000 struct ctl_var * in_parms;
3001 const struct ctl_var * v;
3002 const char * val;
3003 const char * pch;
3004 char * pnonce;
3005 int nonce_valid;
3006 size_t i;
3007 int priors;
3008 mon_entry * mon;
3009 mon_entry * prior_mon;
3010 l_fp now;
3012 if (RES_NOMRULIST & restrict_mask) {
3013 ctl_error(CERR_PERMISSION);
3014 NLOG(NLOG_SYSINFO)
3015 msyslog(LOG_NOTICE,
3016 "MODE6: mrulist from %s rejected due to"
3017 " nomrulist restriction",
3018 socktoa(&rbufp->recv_srcadr));
3019 increment_restricted();
3020 return;
3023 * fill in_parms var list with all possible input parameters.
3025 in_parms = NULL;
3026 set_var(&in_parms, nonce_text, sizeof(nonce_text), 0);
3027 set_var(&in_parms, frags_text, sizeof(frags_text), 0);
3028 set_var(&in_parms, limit_text, sizeof(limit_text), 0);
3029 set_var(&in_parms, mincount_text, sizeof(mincount_text), 0);
3030 set_var(&in_parms, mindrop_text, sizeof(mindrop_text), 0);
3031 set_var(&in_parms, minscore_text, sizeof(minscore_text), 0);
3032 set_var(&in_parms, resall_text, sizeof(resall_text), 0);
3033 set_var(&in_parms, resany_text, sizeof(resany_text), 0);
3034 set_var(&in_parms, maxlstint_text, sizeof(maxlstint_text), 0);
3035 set_var(&in_parms, minlstint_text, sizeof(minlstint_text), 0);
3036 set_var(&in_parms, laddr_text, sizeof(laddr_text), 0);
3037 set_var(&in_parms, recent_text, sizeof(recent_text), 0);
3038 for (i = 0; i < COUNTOF(last); i++) {
3039 snprintf(buf, sizeof(buf), last_fmt, (int)i);
3040 set_var(&in_parms, buf, strlen(buf) + 1, 0);
3041 snprintf(buf, sizeof(buf), addr_fmt, (int)i);
3042 set_var(&in_parms, buf, strlen(buf) + 1, 0);
3045 /* decode input parms */
3046 pnonce = NULL;
3047 frags = 0;
3048 limit = 0;
3049 mincount = 0;
3050 mindrop = 0;
3051 minscore = 0.0;
3052 resall = 0;
3053 resany = 0;
3054 maxlstint = 0;
3055 minlstint = 0;
3056 recent = 0;
3057 lcladr = NULL;
3058 priors = 0;
3059 ZERO(last);
3060 ZERO(addr);
3062 /* have to go through '(void*)' to drop 'const' property from pointer.
3063 * ctl_getitem2()' needs some cleanup, too.... perlinger@ntp.org
3065 while (NULL != (v = ctl_getitem2(in_parms, (void*)&val)) &&
3066 !(EOV & v->flags)) {
3067 int si;
3069 if (NULL == val)
3070 val = nulltxt;
3072 if (!strcmp(nonce_text, v->text)) {
3073 free(pnonce);
3074 pnonce = (*val) ? estrdup(val) : NULL;
3075 } else if (!strcmp(frags_text, v->text)) {
3076 if (1 != sscanf(val, "%hu", &frags))
3077 goto blooper;
3078 } else if (!strcmp(limit_text, v->text)) {
3079 if (1 != sscanf(val, "%u", &limit))
3080 goto blooper;
3081 } else if (!strcmp(mincount_text, v->text)) {
3082 if (1 != sscanf(val, "%d", &mincount))
3083 goto blooper;
3084 if (mincount < 0)
3085 mincount = 0;
3086 } else if (!strcmp(mindrop_text, v->text)) {
3087 if (1 != sscanf(val, "%u", &mindrop))
3088 goto blooper;
3089 } else if (!strcmp(minscore_text, v->text)) {
3090 if (1 != sscanf(val, "%f", &minscore))
3091 goto blooper;
3092 if (minscore < 0)
3093 minscore = 0.0;
3094 } else if (!strcmp(resall_text, v->text)) {
3095 if (1 != sscanf(val, resaxx_fmt, &resall))
3096 goto blooper;
3097 } else if (!strcmp(resany_text, v->text)) {
3098 if (1 != sscanf(val, resaxx_fmt, &resany))
3099 goto blooper;
3100 } else if (!strcmp(maxlstint_text, v->text)) {
3101 if (1 != sscanf(val, "%u", &maxlstint))
3102 goto blooper;
3103 } else if (!strcmp(minlstint_text, v->text)) {
3104 if (1 != sscanf(val, "%u", &minlstint))
3105 goto blooper;
3106 } else if (!strcmp(laddr_text, v->text)) {
3107 if (decodenetnum(val, &laddr))
3108 goto blooper;
3109 lcladr = getinterface(&laddr, 0);
3110 } else if (!strcmp(recent_text, v->text)) {
3111 if (1 != sscanf(val, "%u", &recent))
3112 goto blooper;
3113 } else if (1 == sscanf(v->text, last_fmt, &si) &&
3114 (size_t)si < COUNTOF(last)) {
3115 if (2 != sscanf(val, "0x%08x.%08x", &ui, &uf))
3116 goto blooper;
3117 last[si] = lfpinit_u(ui, uf);
3118 if (!SOCK_UNSPEC(&addr[si]) && si == priors)
3119 priors++;
3120 } else if (1 == sscanf(v->text, addr_fmt, &si) &&
3121 (size_t)si < COUNTOF(addr)) {
3122 if (decodenetnum(val, &addr[si]))
3123 goto blooper;
3124 if (lfpuint(last[si]) && lfpfrac(last[si]) && si == priors)
3125 priors++;
3126 } else {
3127 DPRINT(1, ("read_mru_list: invalid key item: '%s'"
3128 " (ignored)\n",
3129 v->text));
3130 continue;
3132 blooper:
3133 DPRINT(1, ("read_mru_list: invalid param for '%s'"
3134 ": '%s' (bailing)\n",
3135 v->text, val));
3136 free(pnonce);
3137 pnonce = NULL;
3138 break;
3141 free_varlist(in_parms);
3142 in_parms = NULL;
3144 /* return no responses until the nonce is validated */
3145 if (NULL == pnonce) {
3146 return;
3149 nonce_valid = validate_nonce(pnonce, rbufp);
3150 free(pnonce);
3151 if (!nonce_valid) {
3152 return;
3155 if ((0 == frags && !(0 < limit && limit <= MRU_ROW_LIMIT)) ||
3156 frags > MRU_FRAGS_LIMIT) {
3157 ctl_error(CERR_BADVALUE);
3158 return;
3162 * If either frags or limit is not given, use the max.
3164 if (0 != frags && 0 == limit) {
3165 limit = UINT_MAX;
3166 } else if (0 != limit && 0 == frags)
3167 frags = MRU_FRAGS_LIMIT;
3169 mon = NULL;
3170 if (limit == 1) {
3171 for (i = 0; i < COUNTOF(last); i++) {
3172 mon = mon_get_slot(&addr[i]);
3173 if (mon != NULL) {
3174 send_mru_entry(mon, i);
3177 generate_nonce(rbufp, buf, sizeof(buf));
3178 ctl_putunqstr("nonce", buf, strlen(buf));
3179 get_systime(&now);
3180 ctl_putts("now", now);
3181 ctl_flushpkt(0);
3182 return;
3186 * Find the starting point if one was provided.
3188 for (i = 0; i < (size_t)priors; i++) {
3189 mon = mon_get_slot(&addr[i]);
3190 if (mon != NULL) {
3191 if (mon->last == last[i])
3192 break;
3196 /* If a starting point was provided... */
3197 if (priors) {
3198 /* and none could be found unmodified... */
3199 if (NULL == mon) {
3200 /* tell ntpq to try again with older entries */
3201 ctl_error(CERR_UNKNOWNVAR);
3202 return;
3204 /* confirm the prior entry used as starting point */
3205 ctl_putts("last.older", mon->last);
3206 pch = sockporttoa(&mon->rmtadr);
3207 ctl_putunqstr("addr.older", pch, strlen(pch));
3210 * Move on to the first entry the client doesn't have,
3211 * except in the special case of a limit of one. In
3212 * that case return the starting point entry.
3214 if (limit > 1)
3215 mon = PREV_DLIST(mon_data.mon_mru_list, mon, mru);
3216 } else { /* start with the oldest */
3217 mon = TAIL_DLIST(mon_data.mon_mru_list, mru);
3218 countdown = mon_data.mru_entries;
3222 * send up to limit= entries in up to frags= datagrams
3224 get_systime(&now);
3225 generate_nonce(rbufp, buf, sizeof(buf));
3226 ctl_putunqstr("nonce", buf, strlen(buf));
3227 prior_mon = NULL;
3228 for (count = 0;
3229 mon != NULL && res_frags < frags && count < limit;
3230 mon = PREV_DLIST(mon_data.mon_mru_list, mon, mru)) {
3232 if (mon->count < mincount)
3233 continue;
3234 if (mon->dropped < mindrop)
3235 continue;
3236 if (mon->score < minscore)
3237 continue;
3238 if (resall && resall != (resall & mon->flags))
3239 continue;
3240 if (resany && !(resany & mon->flags))
3241 continue;
3242 if (maxlstint > 0 && lfpuint(now) - lfpuint(mon->last) >
3243 maxlstint)
3244 continue;
3245 if (minlstint > 0 && lfpuint(now) - lfpuint(mon->last) <
3246 minlstint)
3247 continue;
3248 if (lcladr != NULL && mon->lcladr != lcladr)
3249 continue;
3250 if (recent != 0 && countdown-- > recent)
3251 continue;
3252 send_mru_entry(mon, (int)count);
3253 #ifdef USE_RANDOMIZE_RESPONSES
3254 if (!count)
3255 send_random_tag_value(0);
3256 #endif /* USE_RANDOMIZE_RESPONSES */
3257 count++;
3258 prior_mon = mon;
3262 * If this batch completes the MRU list, say so explicitly with
3263 * a now= l_fp timestamp.
3265 if (NULL == mon) {
3266 #ifdef USE_RANDOMIZE_RESPONSES
3267 if (count > 1) {
3268 send_random_tag_value((int)count - 1);
3270 #endif /* USE_RANDOMIZE_RESPONSES */
3271 ctl_putts("now", now);
3272 /* if any entries were returned confirm the last */
3273 if (prior_mon != NULL)
3274 ctl_putts("last.newest", prior_mon->last);
3276 ctl_flushpkt(0);
3280 * Send a ifstats entry in response to a "ntpq -c ifstats" request.
3282 * To keep clients honest about not depending on the order of values,
3283 * and thereby avoid being locked into ugly workarounds to maintain
3284 * backward compatibility later as new fields are added to the response,
3285 * the order is random.
3287 static void
3288 send_ifstats_entry(
3289 endpt * la,
3290 unsigned int ifnum
3293 const char addr_fmtu[] = "addr.%u";
3294 const char en_fmt[] = "en.%u"; /* enabled */
3295 const char name_fmt[] = "name.%u";
3296 const char flags_fmt[] = "flags.%u";
3297 const char rx_fmt[] = "rx.%u";
3298 const char tx_fmt[] = "tx.%u";
3299 const char txerr_fmt[] = "txerr.%u";
3300 const char pc_fmt[] = "pc.%u"; /* peer count */
3301 const char up_fmt[] = "up.%u"; /* uptime */
3302 char tag[32];
3303 uint8_t sent[IFSTATS_FIELDS]; /* 9 tag=value pairs */
3304 int noisebits;
3305 uint32_t noise;
3306 unsigned int which = 0;
3307 unsigned int remaining;
3308 const char *pch;
3310 remaining = COUNTOF(sent);
3311 ZERO(sent);
3312 noise = 0;
3313 noisebits = 0;
3314 while (remaining > 0) {
3315 if (noisebits < 4) {
3316 /* coverity[DC.WEAK_CRYPTO] */
3317 noise = (uint32_t)random();
3318 noisebits = 31;
3320 #ifdef USE_RANDOMIZE_RESPONSES
3321 which = (noise & 0xf) % COUNTOF(sent);
3322 #endif /* USE_RANDOMIZE_RESPONSES */
3323 noise >>= 4;
3324 noisebits -= 4;
3326 while (sent[which])
3327 which = (which + 1) % COUNTOF(sent);
3329 switch (which) {
3331 case 0:
3332 snprintf(tag, sizeof(tag), addr_fmtu, ifnum);
3333 pch = sockporttoa(&la->sin);
3334 ctl_putunqstr(tag, pch, strlen(pch));
3335 break;
3337 case 1:
3338 snprintf(tag, sizeof(tag), en_fmt, ifnum);
3339 ctl_putint(tag, !la->ignore_packets);
3340 break;
3342 case 2:
3343 snprintf(tag, sizeof(tag), name_fmt, ifnum);
3344 ctl_putstr(tag, la->name, strlen(la->name));
3345 break;
3347 case 3:
3348 snprintf(tag, sizeof(tag), flags_fmt, ifnum);
3349 ctl_puthex(tag, la->flags);
3350 break;
3352 case 4:
3353 snprintf(tag, sizeof(tag), rx_fmt, ifnum);
3354 ctl_putint(tag, la->received);
3355 break;
3357 case 5:
3358 snprintf(tag, sizeof(tag), tx_fmt, ifnum);
3359 ctl_putint(tag, la->sent);
3360 break;
3362 case 6:
3363 snprintf(tag, sizeof(tag), txerr_fmt, ifnum);
3364 ctl_putint(tag, la->notsent);
3365 break;
3367 case 7:
3368 snprintf(tag, sizeof(tag), pc_fmt, ifnum);
3369 ctl_putuint(tag, la->peercnt);
3370 break;
3372 case 8:
3373 snprintf(tag, sizeof(tag), up_fmt, ifnum);
3374 ctl_putuint(tag, current_time - la->starttime);
3375 break;
3377 default:
3378 /* Get here if IFSTATS_FIELDS is too big. */
3379 break;
3381 sent[which] = true;
3382 remaining--;
3384 #ifdef USE_RANDOMIZE_RESPONSES
3385 send_random_tag_value((int)ifnum);
3386 #endif /* USE_RANDOMIZE_RESPONSES */
3391 * read_ifstats - send statistics for each local address, exposed by
3392 * ntpq -c ifstats
3394 static void
3395 read_ifstats(
3396 struct recvbuf * rbufp
3399 unsigned int ifidx;
3400 endpt * la;
3402 UNUSED_ARG(rbufp);
3405 * loop over [0..sys_ifnum] searching ep_list for each
3406 * ifnum in turn.
3408 for (ifidx = 0; ifidx < io_data.sys_ifnum; ifidx++) {
3409 for (la = io_data.ep_list; la != NULL; la = la->elink)
3410 if (ifidx == la->ifnum)
3411 break;
3412 if (NULL == la)
3413 continue;
3414 /* return stats for one local address */
3415 send_ifstats_entry(la, ifidx);
3417 ctl_flushpkt(0);
3420 static void
3421 sockaddrs_from_restrict_u(
3422 sockaddr_u * psaA,
3423 sockaddr_u * psaM,
3424 restrict_u * pres,
3425 int ipv6
3428 ZERO(*psaA);
3429 ZERO(*psaM);
3430 if (!ipv6) {
3431 SET_AF(psaA, AF_INET);
3432 PSOCK_ADDR4(psaA)->s_addr = htonl(pres->u.v4.addr);
3433 SET_AF(psaM, AF_INET);
3434 PSOCK_ADDR4(psaM)->s_addr = htonl(pres->u.v4.mask);
3435 } else {
3436 SET_AF(psaA, AF_INET6);
3437 memcpy(&SOCK_ADDR6(psaA), &pres->u.v6.addr,
3438 sizeof(SOCK_ADDR6(psaA)));
3439 SET_AF(psaM, AF_INET6);
3440 memcpy(&SOCK_ADDR6(psaM), &pres->u.v6.mask,
3441 sizeof(SOCK_ADDR6(psaA)));
3447 * Send a restrict entry in response to a "ntpq -c reslist" request.
3449 * To keep clients honest about not depending on the order of values,
3450 * and thereby avoid being locked into ugly workarounds to maintain
3451 * backward compatibility later as new fields are added to the response,
3452 * the order is random.
3454 static void
3455 send_restrict_entry(
3456 restrict_u * pres,
3457 int ipv6,
3458 unsigned int idx
3461 const char addr_fmtu[] = "addr.%u";
3462 const char mask_fmtu[] = "mask.%u";
3463 const char hits_fmt[] = "hits.%u";
3464 const char flags_fmt[] = "flags.%u";
3465 char tag[32];
3466 uint8_t sent[RESLIST_FIELDS]; /* 4 tag=value pairs */
3467 int noisebits;
3468 uint32_t noise;
3469 unsigned int which = 0;
3470 unsigned int remaining;
3471 sockaddr_u addr;
3472 sockaddr_u mask;
3473 const char * pch;
3474 char * buf;
3475 const char * match_str;
3476 const char * access_str;
3478 sockaddrs_from_restrict_u(&addr, &mask, pres, ipv6);
3479 remaining = COUNTOF(sent);
3480 ZERO(sent);
3481 noise = 0;
3482 noisebits = 0;
3483 while (remaining > 0) {
3484 if (noisebits < 2) {
3485 /* coverity[DC.WEAK_CRYPTO] */
3486 noise = (uint32_t)random();
3487 noisebits = 31;
3489 #ifdef USE_RANDOMIZE_RESPONSES
3490 which = (noise & 0x3) % COUNTOF(sent);
3491 #endif /* USE_RANDOMIZE_RESPONSES */
3492 noise >>= 2;
3493 noisebits -= 2;
3495 while (sent[which])
3496 which = (which + 1) % COUNTOF(sent);
3498 switch (which) {
3500 case 0:
3501 snprintf(tag, sizeof(tag), addr_fmtu, idx);
3502 pch = socktoa(&addr);
3503 ctl_putunqstr(tag, pch, strlen(pch));
3504 break;
3506 case 1:
3507 snprintf(tag, sizeof(tag), mask_fmtu, idx);
3508 pch = socktoa(&mask);
3509 ctl_putunqstr(tag, pch, strlen(pch));
3510 break;
3512 case 2:
3513 snprintf(tag, sizeof(tag), hits_fmt, idx);
3514 ctl_putuint(tag, pres->hitcount);
3515 break;
3517 case 3:
3518 snprintf(tag, sizeof(tag), flags_fmt, idx);
3519 match_str = res_match_flags(pres->mflags);
3520 access_str = res_access_flags(pres->flags);
3521 if ('\0' == match_str[0]) {
3522 pch = access_str;
3523 } else {
3524 buf = lib_getbuf();
3525 snprintf(buf, LIB_BUFLENGTH, "%s %s",
3526 match_str, access_str);
3527 pch = buf;
3529 ctl_putunqstr(tag, pch, strlen(pch));
3530 break;
3532 default:
3533 /* huh? */
3534 break;
3536 sent[which] = true;
3537 remaining--;
3539 #ifdef USE_RANDOMIZE_RESPONSES
3540 send_random_tag_value((int)idx);
3541 #endif /* USE_RANDOMIZE_RESPONSES */
3545 static void
3546 send_restrict_list(
3547 restrict_u * pres,
3548 int ipv6,
3549 unsigned int * pidx
3552 for ( ; pres != NULL; pres = pres->link) {
3553 send_restrict_entry(pres, ipv6, *pidx);
3554 (*pidx)++;
3560 * read_addr_restrictions - returns IPv4 and IPv6 access control lists
3562 static void
3563 read_addr_restrictions(
3564 struct recvbuf * rbufp
3567 unsigned int idx;
3569 UNUSED_ARG(rbufp);
3571 idx = 0;
3572 send_restrict_list(rstrct.restrictlist4, false, &idx);
3573 send_restrict_list(rstrct.restrictlist6, true, &idx);
3574 ctl_flushpkt(0);
3579 * read_ordlist - CTL_OP_READ_ORDLIST_A for ntpq -c ifstats & reslist
3581 static void
3582 read_ordlist(
3583 struct recvbuf * rbufp,
3584 int restrict_mask
3587 const char ifstats_s[] = "ifstats";
3588 const size_t ifstatint8_ts = COUNTOF(ifstats_s) - 1;
3589 const char addr_rst_s[] = "addr_restrictions";
3590 const size_t a_r_chars = COUNTOF(addr_rst_s) - 1;
3591 struct ntp_control * cpkt;
3592 struct ntp_control pkt_core;
3593 unsigned short qdata_octets;
3595 UNUSED_ARG(rbufp);
3596 UNUSED_ARG(restrict_mask);
3599 * CTL_OP_READ_ORDLIST_A was first named CTL_OP_READ_IFSTATS and
3600 * used only for ntpq -c ifstats. With the addition of reslist
3601 * the same opcode was generalized to retrieve ordered lists
3602 * which require authentication. The request data is empty or
3603 * contains "ifstats" (not null terminated) to retrieve local
3604 * addresses and associated stats. It is "addr_restrictions"
3605 * to retrieve the IPv4 then IPv6 remote address restrictions,
3606 * which are access control lists. Other request data return
3607 * CERR_UNKNOWNVAR.
3609 unmarshall_ntp_control(&pkt_core, rbufp);
3610 cpkt = &pkt_core;
3611 qdata_octets = ntohs(cpkt->count);
3612 if (0 == qdata_octets || (ifstatint8_ts == qdata_octets &&
3613 !memcmp(ifstats_s, cpkt->data, ifstatint8_ts))) {
3614 read_ifstats(rbufp);
3615 return;
3617 if (a_r_chars == qdata_octets &&
3618 !memcmp(addr_rst_s, cpkt->data, a_r_chars)) {
3619 read_addr_restrictions(rbufp);
3620 return;
3622 ctl_error(CERR_UNKNOWNVAR);
3627 * req_nonce - CTL_OP_REQ_NONCE for ntpq -c mrulist prerequisite.
3629 static void req_nonce(
3630 struct recvbuf * rbufp,
3631 int restrict_mask
3634 char buf[64];
3636 UNUSED_ARG(restrict_mask);
3638 generate_nonce(rbufp, buf, sizeof(buf));
3639 ctl_putunqstr("nonce", buf, strlen(buf));
3640 ctl_flushpkt(0);
3645 * read_clockstatus - return clock radio status
3647 /*ARGSUSED*/
3648 static void
3649 read_clockstatus(
3650 struct recvbuf *rbufp,
3651 int restrict_mask
3654 #ifndef REFCLOCK
3655 UNUSED_ARG(rbufp);
3656 UNUSED_ARG(restrict_mask);
3658 * If no refclock support, no data to return
3660 ctl_error(CERR_BADASSOC);
3661 #else
3662 const struct ctl_var * v;
3663 int i;
3664 struct peer * peer;
3665 char * valuep;
3666 uint8_t * wants;
3667 size_t wants_alloc;
3668 bool gotvar;
3669 struct ctl_var * kv;
3670 struct refclockstat cs;
3672 UNUSED_ARG(rbufp);
3673 UNUSED_ARG(restrict_mask);
3675 if (res_associd != 0) {
3676 peer = findpeerbyassoc(res_associd);
3677 } else {
3679 * Find a clock for this jerk. If the system peer
3680 * is a clock use it, else search peer_list for one.
3682 if (sys_vars.sys_peer != NULL && (FLAG_REFCLOCK &
3683 sys_vars.sys_peer->cfg.flags))
3684 peer = sys_vars.sys_peer;
3685 else
3686 for (peer = peer_list;
3687 peer != NULL;
3688 peer = peer->p_link)
3689 if (FLAG_REFCLOCK & peer->cfg.flags)
3690 break;
3692 if (NULL == peer || !(FLAG_REFCLOCK & peer->cfg.flags)) {
3693 ctl_error(CERR_BADASSOC);
3694 return;
3697 * If we got here we have a peer which is a clock. Get his
3698 * status.
3700 cs.kv_list = NULL;
3701 refclock_control(&peer->srcadr, NULL, &cs);
3702 kv = cs.kv_list;
3704 * Look for variables in the packet.
3706 rpkt.status = htons(ctlclkstatus(&cs));
3707 wants_alloc = CC_MAXCODE + 1 + count_var(kv);
3708 wants = emalloc_zero(wants_alloc);
3709 gotvar = false;
3710 while (NULL != (v = ctl_getitem2(clock_var2, &valuep))) {
3711 if (!(EOV & v->flags)) {
3712 wants[v->code] = true;
3713 gotvar = true;
3714 } else {
3715 v = ctl_getitem2(kv, &valuep);
3716 if (NULL == v) {
3717 ctl_error(CERR_BADVALUE);
3718 free(wants);
3719 free_varlist(cs.kv_list);
3720 return;
3723 if (EOV & v->flags) {
3724 ctl_error(CERR_UNKNOWNVAR);
3725 free(wants);
3726 free_varlist(cs.kv_list);
3727 return;
3729 wants[CC_MAXCODE + 1 + v->code] = true;
3730 gotvar = true;
3734 if (gotvar) {
3735 for (i = 1; i <= (int)CC_MAXCODE; i++)
3736 if (wants[i])
3737 ctl_putclock(i, &cs, true);
3738 if (kv != NULL)
3739 for (i = 0; !(EOV & kv[i].flags); i++)
3740 if (wants[(unsigned int)i + CC_MAXCODE + 1])
3741 ctl_putdata(kv[i].text,
3742 strlen(kv[i].text),
3743 false);
3744 } else {
3745 for (v = clock_var2; v != NULL && !(EOV & v->flags); v++)
3746 if (DEF & v->flags)
3747 ctl_putclock(v->code, &cs, false);
3748 for ( ; kv != NULL && !(EOV & kv->flags); kv++)
3749 if (DEF & kv->flags)
3750 ctl_putdata(kv->text, strlen(kv->text),
3751 false);
3754 free(wants);
3755 free_varlist(cs.kv_list);
3757 ctl_flushpkt(0);
3758 #endif
3763 * report_event - report an event to log files
3765 * Code lives here because in past times it reported through the
3766 * obsolete trap facility.
3768 void
3769 report_event(
3770 int err, /* error code */
3771 struct peer *peer, /* peer structure pointer */
3772 const char *str /* protostats string */
3775 #define NTP_MAXSTRLEN 256 /* max string length */
3776 char statstr[NTP_MAXSTRLEN];
3777 size_t len;
3780 * Report the error to the protostats file and system log
3782 if (peer == NULL) {
3785 * Discard a system report if the number of reports of
3786 * the same type exceeds the maximum.
3788 if (ctl_sys_last_event != (uint8_t)err) {
3789 ctl_sys_num_events= 0;
3791 if (ctl_sys_num_events >= CTL_SYS_MAXEVENTS)
3792 return;
3794 ctl_sys_last_event = (uint8_t)err;
3795 ctl_sys_num_events++;
3796 snprintf(statstr, sizeof(statstr),
3797 "0.0.0.0 %04x %02x %s",
3798 ctlsysstatus(), (unsigned)err, eventstr(err));
3799 if (str != NULL) {
3800 len = strlen(statstr);
3801 snprintf(statstr + len, sizeof(statstr) - len,
3802 " %s", str);
3804 NLOG(NLOG_SYSEVENT)
3805 msyslog(LOG_INFO, "PROTO: %s", statstr);
3806 } else {
3809 * Discard a peer report if the number of reports of
3810 * the same type exceeds the maximum for that peer.
3812 const char * src;
3813 uint8_t errlast;
3815 errlast = (uint8_t)err & ~PEER_EVENT;
3816 if (peer->last_event != errlast)
3817 peer->num_events = 0;
3818 if (peer->num_events >= CTL_PEER_MAXEVENTS)
3819 return;
3821 peer->last_event = errlast;
3822 peer->num_events++;
3823 #ifdef REFCLOCK
3824 if (IS_PEER_REFCLOCK(peer))
3825 src = refclock_name(peer);
3826 else
3827 #endif /* REFCLOCK */
3828 if (AF_UNSPEC == AF(&peer->srcadr))
3829 src = peer->hostname;
3830 else
3831 src = socktoa(&peer->srcadr);
3833 snprintf(statstr, sizeof(statstr),
3834 "%s %04x %02x %s", src,
3835 ctlpeerstatus(peer), (unsigned)err, eventstr(err));
3836 if (str != NULL) {
3837 len = strlen(statstr);
3838 snprintf(statstr + len, sizeof(statstr) - len,
3839 " %s", str);
3841 NLOG(NLOG_PEEREVENT)
3842 msyslog(LOG_INFO, "PROTO: %s", statstr);
3844 record_proto_stats(statstr);
3845 DPRINT(1, ("event at %u %s\n", current_time, statstr));
3851 * mprintf_event - printf-style varargs variant of report_event()
3854 mprintf_event(
3855 int evcode, /* event code */
3856 struct peer * p, /* may be NULL */
3857 const char * fmt, /* msnprintf format */
3861 va_list ap;
3862 int rc;
3863 char msg[512];
3865 va_start(ap, fmt);
3866 rc = vsnprintf(msg, sizeof(msg), fmt, ap);
3867 va_end(ap);
3868 report_event(evcode, p, msg);
3870 return rc;
3875 * ctl_clr_stats - clear stat counters
3877 void
3878 ctl_clr_stats(void)
3880 ctltimereset = current_time;
3881 numctlreq = 0;
3882 numctlbadpkts = 0;
3883 numctlresponses = 0;
3884 numctlfrags = 0;
3885 numctlerrors = 0;
3886 numctlfrags = 0;
3887 numctltooshort = 0;
3888 numctlinputresp = 0;
3889 numctlinputfrag = 0;
3890 numctlinputerr = 0;
3891 numctlbadoffset = 0;
3892 numctlbadversion = 0;
3893 numctldatatooshort = 0;
3894 numctlbadop = 0;
3897 static unsigned short
3898 count_var(
3899 const struct ctl_var *k
3902 unsigned int c;
3904 if (NULL == k) {
3905 return 0;
3908 c = 0;
3909 while (!(EOV & (k++)->flags))
3910 c++;
3912 ENSURE(c <= USHRT_MAX);
3913 return (unsigned short)c;
3917 char *
3918 add_var(
3919 struct ctl_var **kv,
3920 unsigned long size,
3921 unsigned short def
3924 unsigned short c;
3925 struct ctl_var *k;
3926 char * buf;
3928 c = count_var(*kv);
3929 *kv = erealloc(*kv, (c + 2U) * sizeof(**kv));
3930 k = *kv;
3931 buf = emalloc(size);
3932 k[c].code = c;
3933 k[c].text = buf;
3934 k[c].flags = def;
3935 k[c + 1].code = 0;
3936 k[c + 1].text = NULL;
3937 k[c + 1].flags = EOV;
3939 return buf;
3943 void
3944 set_var(
3945 struct ctl_var **kv,
3946 const char *data,
3947 unsigned long size,
3948 unsigned short def
3951 struct ctl_var *k;
3952 const char *s;
3953 const char *t;
3954 char *td;
3956 if (NULL == data || !size) {
3957 return;
3960 k = *kv;
3961 if (k != NULL) {
3962 while (!(EOV & k->flags)) {
3963 if (NULL == k->text) {
3964 td = emalloc(size);
3965 memcpy(td, data, size);
3966 k->text = td;
3967 k->flags = def;
3968 return;
3969 } else {
3970 s = data;
3971 t = k->text;
3972 while (*t != '=' && *s == *t) {
3973 s++;
3974 t++;
3976 if (*s == *t && ((*t == '=') || !*t)) {
3977 td = erealloc((void *)(intptr_t)k->text,
3978 size);
3979 memcpy(td, data, size);
3980 k->text = td;
3981 k->flags = def;
3982 return;
3985 k++;
3988 td = add_var(kv, size, def);
3989 memcpy(td, data, size);
3993 void
3994 set_sys_var(
3995 const char *data,
3996 unsigned long size,
3997 unsigned short def
4000 set_var(&ext_sys_var, data, size, def);
4005 * get_ext_sys_var() retrieves the value of a user-defined variable or
4006 * NULL if the variable has not been setvar'd.
4008 const char *
4009 get_ext_sys_var(const char *tag)
4011 struct ctl_var * v;
4012 size_t c;
4013 const char * val;
4015 val = NULL;
4016 c = strlen(tag);
4017 for (v = ext_sys_var; !(EOV & v->flags); v++) {
4018 if (NULL != v->text && !memcmp(tag, v->text, c)) {
4019 if ('=' == v->text[c]) {
4020 val = v->text + c + 1;
4021 break;
4022 } else if ('\0' == v->text[c]) {
4023 val = "";
4024 break;
4029 return val;
4033 void
4034 free_varlist(
4035 struct ctl_var *kv
4038 struct ctl_var *k;
4039 if (kv) {
4040 for (k = kv; !(k->flags & EOV); k++)
4041 free((void *)(intptr_t)k->text);
4042 free((void *)kv);