Sync usage with man page.
[netbsd-mini2440.git] / external / bsd / ntp / dist / lib / isc / stats.c
blob7aad3138317a162cfcef3e156d1d8c04c3ee03df
1 /* $NetBSD$ */
3 /*
4 * Copyright (C) 2009 Internet Systems Consortium, Inc. ("ISC")
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
11 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
12 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
13 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
14 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
15 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
19 /* Id: stats.c,v 1.3.6.2 2009/01/29 23:47:44 tbox Exp */
21 /*! \file */
23 #include <config.h>
25 #include <string.h>
27 #include <isc/atomic.h>
28 #include <isc/buffer.h>
29 #include <isc/magic.h>
30 #include <isc/mem.h>
31 #include <isc/platform.h>
32 #include <isc/print.h>
33 #include <isc/rwlock.h>
34 #include <isc/stats.h>
35 #include <isc/util.h>
37 #define ISC_STATS_MAGIC ISC_MAGIC('S', 't', 'a', 't')
38 #define ISC_STATS_VALID(x) ISC_MAGIC_VALID(x, ISC_STATS_MAGIC)
40 #ifndef ISC_STATS_USEMULTIFIELDS
41 #if defined(ISC_RWLOCK_USEATOMIC) && defined(ISC_PLATFORM_HAVEXADD) && !defined(ISC_PLATFORM_HAVEXADDQ)
42 #define ISC_STATS_USEMULTIFIELDS 1
43 #else
44 #define ISC_STATS_USEMULTIFIELDS 0
45 #endif
46 #endif /* ISC_STATS_USEMULTIFIELDS */
48 #if ISC_STATS_USEMULTIFIELDS
49 typedef struct {
50 isc_uint32_t hi;
51 isc_uint32_t lo;
52 } isc_stat_t;
53 #else
54 typedef isc_uint64_t isc_stat_t;
55 #endif
57 struct isc_stats {
58 /*% Unlocked */
59 unsigned int magic;
60 isc_mem_t *mctx;
61 int ncounters;
63 isc_mutex_t lock;
64 unsigned int references; /* locked by lock */
66 /*%
67 * Locked by counterlock or unlocked if efficient rwlock is not
68 * available.
70 #ifdef ISC_RWLOCK_USEATOMIC
71 isc_rwlock_t counterlock;
72 #endif
73 isc_stat_t *counters;
75 /*%
76 * We don't want to lock the counters while we are dumping, so we first
77 * copy the current counter values into a local array. This buffer
78 * will be used as the copy destination. It's allocated on creation
79 * of the stats structure so that the dump operation won't fail due
80 * to memory allocation failure.
81 * XXX: this approach is weird for non-threaded build because the
82 * additional memory and the copy overhead could be avoided. We prefer
83 * simplicity here, however, under the assumption that this function
84 * should be only rarely called.
86 isc_uint64_t *copiedcounters;
89 static isc_result_t
90 create_stats(isc_mem_t *mctx, int ncounters, isc_stats_t **statsp) {
91 isc_stats_t *stats;
92 isc_result_t result = ISC_R_SUCCESS;
94 REQUIRE(statsp != NULL && *statsp == NULL);
96 stats = isc_mem_get(mctx, sizeof(*stats));
97 if (stats == NULL)
98 return (ISC_R_NOMEMORY);
100 result = isc_mutex_init(&stats->lock);
101 if (result != ISC_R_SUCCESS)
102 goto clean_stats;
104 stats->counters = isc_mem_get(mctx, sizeof(isc_stat_t) * ncounters);
105 if (stats->counters == NULL) {
106 result = ISC_R_NOMEMORY;
107 goto clean_mutex;
109 stats->copiedcounters = isc_mem_get(mctx,
110 sizeof(isc_uint64_t) * ncounters);
111 if (stats->copiedcounters == NULL) {
112 result = ISC_R_NOMEMORY;
113 goto clean_counters;
116 #ifdef ISC_RWLOCK_USEATOMIC
117 result = isc_rwlock_init(&stats->counterlock, 0, 0);
118 if (result != ISC_R_SUCCESS)
119 goto clean_copiedcounters;
120 #endif
122 stats->references = 1;
123 memset(stats->counters, 0, sizeof(isc_stat_t) * ncounters);
124 stats->mctx = NULL;
125 isc_mem_attach(mctx, &stats->mctx);
126 stats->ncounters = ncounters;
127 stats->magic = ISC_STATS_MAGIC;
129 *statsp = stats;
131 return (result);
133 clean_counters:
134 isc_mem_put(mctx, stats->counters, sizeof(isc_stat_t) * ncounters);
136 #ifdef ISC_RWLOCK_USEATOMIC
137 clean_copiedcounters:
138 isc_mem_put(mctx, stats->copiedcounters,
139 sizeof(isc_stat_t) * ncounters);
140 #endif
142 clean_mutex:
143 DESTROYLOCK(&stats->lock);
145 clean_stats:
146 isc_mem_put(mctx, stats, sizeof(*stats));
148 return (result);
151 void
152 isc_stats_attach(isc_stats_t *stats, isc_stats_t **statsp) {
153 REQUIRE(ISC_STATS_VALID(stats));
154 REQUIRE(statsp != NULL && *statsp == NULL);
156 LOCK(&stats->lock);
157 stats->references++;
158 UNLOCK(&stats->lock);
160 *statsp = stats;
163 void
164 isc_stats_detach(isc_stats_t **statsp) {
165 isc_stats_t *stats;
167 REQUIRE(statsp != NULL && ISC_STATS_VALID(*statsp));
169 stats = *statsp;
170 *statsp = NULL;
172 LOCK(&stats->lock);
173 stats->references--;
174 UNLOCK(&stats->lock);
176 if (stats->references == 0) {
177 isc_mem_put(stats->mctx, stats->copiedcounters,
178 sizeof(isc_stat_t) * stats->ncounters);
179 isc_mem_put(stats->mctx, stats->counters,
180 sizeof(isc_stat_t) * stats->ncounters);
181 DESTROYLOCK(&stats->lock);
182 #ifdef ISC_RWLOCK_USEATOMIC
183 isc_rwlock_destroy(&stats->counterlock);
184 #endif
185 isc_mem_putanddetach(&stats->mctx, stats, sizeof(*stats));
190 isc_stats_ncounters(isc_stats_t *stats) {
191 REQUIRE(ISC_STATS_VALID(stats));
193 return (stats->ncounters);
196 static inline void
197 incrementcounter(isc_stats_t *stats, int counter) {
198 isc_int32_t prev;
200 #ifdef ISC_RWLOCK_USEATOMIC
202 * We use a "read" lock to prevent other threads from reading the
203 * counter while we "writing" a counter field. The write access itself
204 * is protected by the atomic operation.
206 isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read);
207 #endif
209 #if ISC_STATS_USEMULTIFIELDS
210 prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, 1);
212 * If the lower 32-bit field overflows, increment the higher field.
213 * Note that it's *theoretically* possible that the lower field
214 * overlaps again before the higher field is incremented. It doesn't
215 * matter, however, because we don't read the value until
216 * isc_stats_copy() is called where the whole process is protected
217 * by the write (exclusive) lock.
219 if (prev == (isc_int32_t)0xffffffff)
220 isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi, 1);
221 #elif defined(ISC_PLATFORM_HAVEXADDQ)
222 UNUSED(prev);
223 isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], 1);
224 #else
225 UNUSED(prev);
226 stats->counters[counter]++;
227 #endif
229 #ifdef ISC_RWLOCK_USEATOMIC
230 isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
231 #endif
234 static inline void
235 decrementcounter(isc_stats_t *stats, int counter) {
236 isc_int32_t prev;
238 #ifdef ISC_RWLOCK_USEATOMIC
239 isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read);
240 #endif
242 #if ISC_STATS_USEMULTIFIELDS
243 prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, -1);
244 if (prev == 0)
245 isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi,
246 -1);
247 #elif defined(ISC_PLATFORM_HAVEXADDQ)
248 UNUSED(prev);
249 isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], -1);
250 #else
251 UNUSED(prev);
252 stats->counters[counter]--;
253 #endif
255 #ifdef ISC_RWLOCK_USEATOMIC
256 isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
257 #endif
260 static void
261 copy_counters(isc_stats_t *stats) {
262 int i;
264 #ifdef ISC_RWLOCK_USEATOMIC
266 * We use a "write" lock before "reading" the statistics counters as
267 * an exclusive lock.
269 isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_write);
270 #endif
272 #if ISC_STATS_USEMULTIFIELDS
273 for (i = 0; i < stats->ncounters; i++) {
274 stats->copiedcounters[i] =
275 (isc_uint64_t)(stats->counters[i].hi) << 32 |
276 stats->counters[i].lo;
278 #else
279 UNUSED(i);
280 memcpy(stats->copiedcounters, stats->counters,
281 stats->ncounters * sizeof(isc_stat_t));
282 #endif
284 #ifdef ISC_RWLOCK_USEATOMIC
285 isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_write);
286 #endif
289 isc_result_t
290 isc_stats_create(isc_mem_t *mctx, isc_stats_t **statsp, int ncounters) {
291 REQUIRE(statsp != NULL && *statsp == NULL);
293 return (create_stats(mctx, ncounters, statsp));
296 void
297 isc_stats_increment(isc_stats_t *stats, isc_statscounter_t counter) {
298 REQUIRE(ISC_STATS_VALID(stats));
299 REQUIRE(counter < stats->ncounters);
301 incrementcounter(stats, (int)counter);
304 void
305 isc_stats_decrement(isc_stats_t *stats, isc_statscounter_t counter) {
306 REQUIRE(ISC_STATS_VALID(stats));
307 REQUIRE(counter < stats->ncounters);
309 decrementcounter(stats, (int)counter);
312 void
313 isc_stats_dump(isc_stats_t *stats, isc_stats_dumper_t dump_fn,
314 void *arg, unsigned int options)
316 int i;
318 REQUIRE(ISC_STATS_VALID(stats));
320 copy_counters(stats);
322 for (i = 0; i < stats->ncounters; i++) {
323 if ((options & ISC_STATSDUMP_VERBOSE) == 0 &&
324 stats->copiedcounters[i] == 0)
325 continue;
326 dump_fn((isc_statscounter_t)i, stats->copiedcounters[i], arg);