2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
13 * Copyright 2016 Joyent, Inc.
16 #include <sys/comm_page.h>
21 * Interrogate if querying the clock via the comm page is possible.
24 __cp_can_gettime(comm_page_t
*cp
)
26 switch (cp
->cp_tsc_type
) {
28 case TSC_RDTSC_MFENCE
:
29 case TSC_RDTSC_LFENCE
:
41 * The functions used for calculating time (both monotonic and wall-clock) are
42 * implemented in assembly on amd64. This is primarily for stack conservation.
45 #else /* i386 below */
48 * ASM-defined functions.
50 extern hrtime_t
__cp_tsc_read(comm_page_t
*);
53 * These are cloned from TSC and time related code in the kernel. The should
54 * be kept in sync in the case that the source values are changed.
58 #define NANOSEC 1000000000LL
60 #define TSC_CONVERT_AND_ADD(tsc, hrt, scale) do { \
61 uint32_t *_l = (uint32_t *)&(tsc); \
62 uint64_t sc = (uint32_t)(scale); \
63 (hrt) += (uint64_t)(_l[1] * sc) << NSEC_SHIFT; \
64 (hrt) += (uint64_t)(_l[0] * sc) >> (32 - NSEC_SHIFT); \
68 * Userspace version of tsc_gethrtime.
69 * See: uts/i86pc/os/timestamp.c
72 __cp_gethrtime(comm_page_t
*cp
)
74 uint32_t old_hres_lock
;
75 hrtime_t tsc
, hrt
, tsc_last
;
78 * Several precautions must be taken when collecting the data necessary
79 * to perform an accurate gethrtime calculation.
81 * While much of the TSC state stored in the comm page is unchanging
82 * after boot, portions of it are periodically updated during OS ticks.
83 * Changes to hres_lock during the course of the copy indicates a
84 * potentially inconsistent snapshot, necessitating a loop.
86 * Even more complicated is the handling for TSCs which require sync
87 * offsets between different CPUs. Since userspace lacks the luxury of
88 * disabling interrupts, a validation loop checking for CPU migrations
89 * is used. Pathological scheduling could, in theory, "outwit"
90 * this check. Such a possibility is considered an acceptable risk.
94 old_hres_lock
= cp
->cp_hres_lock
;
95 tsc_last
= cp
->cp_tsc_last
;
96 hrt
= cp
->cp_tsc_hrtime_base
;
97 tsc
= __cp_tsc_read(cp
);
98 } while ((old_hres_lock
& ~1) != cp
->cp_hres_lock
);
100 if (tsc
>= tsc_last
) {
102 } else if (tsc
>= tsc_last
- (2 * cp
->cp_tsc_max_delta
)) {
104 } else if (tsc
> cp
->cp_tsc_resume_cap
) {
105 tsc
= cp
->cp_tsc_resume_cap
;
107 TSC_CONVERT_AND_ADD(tsc
, hrt
, cp
->cp_nsec_scale
);
113 * Userspace version of pc_gethrestime.
114 * See: uts/i86pc/os/machdep.c
117 __cp_clock_gettime_realtime(comm_page_t
*cp
, timespec_t
*tsp
)
124 lock_prev
= cp
->cp_hres_lock
;
125 now
.tv_sec
= cp
->cp_hrestime
[0];
126 now
.tv_nsec
= cp
->cp_hrestime
[1];
127 nslt
= (int)(__cp_gethrtime(cp
) - cp
->cp_hres_last_tick
);
128 hres_adj
= cp
->cp_hrestime_adj
;
131 * Tick came between sampling hrtime and hres_last_tick;
138 * Apply hres_adj skew, if needed.
141 nslt
= (nslt
>> ADJ_SHIFT
);
143 nslt
= (int)hres_adj
;
145 } else if (hres_adj
< 0) {
146 nslt
= -(nslt
>> ADJ_SHIFT
);
148 nslt
= (int)hres_adj
;
153 * Rope in tv_nsec from any excessive adjustments.
155 while ((unsigned long)now
.tv_nsec
>= NANOSEC
) {
156 now
.tv_nsec
-= NANOSEC
;
160 if ((cp
->cp_hres_lock
& ~1) != lock_prev
)
168 * The __cp_clock_gettime_monotonic function expects that hrt2ts be present
169 * when the code is finally linked.
170 * (The amd64 version has no such requirement.)
172 extern void hrt2ts(hrtime_t
, timespec_t
*);
175 __cp_clock_gettime_monotonic(comm_page_t
*cp
, timespec_t
*tsp
)
179 hrt
= __cp_gethrtime(cp
);