4 * Copyright (C) 2009 Internet Systems Consortium, Inc. ("ISC")
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
11 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
12 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
13 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
14 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
15 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
19 /* Id: stats.c,v 1.3.6.2 2009/01/29 23:47:44 tbox Exp */
27 #include <isc/atomic.h>
28 #include <isc/buffer.h>
29 #include <isc/magic.h>
31 #include <isc/platform.h>
32 #include <isc/print.h>
33 #include <isc/rwlock.h>
34 #include <isc/stats.h>
37 #define ISC_STATS_MAGIC ISC_MAGIC('S', 't', 'a', 't')
38 #define ISC_STATS_VALID(x) ISC_MAGIC_VALID(x, ISC_STATS_MAGIC)
40 #ifndef ISC_STATS_USEMULTIFIELDS
41 #if defined(ISC_RWLOCK_USEATOMIC) && defined(ISC_PLATFORM_HAVEXADD) && !defined(ISC_PLATFORM_HAVEXADDQ)
42 #define ISC_STATS_USEMULTIFIELDS 1
44 #define ISC_STATS_USEMULTIFIELDS 0
46 #endif /* ISC_STATS_USEMULTIFIELDS */
48 #if ISC_STATS_USEMULTIFIELDS
54 typedef isc_uint64_t isc_stat_t
;
64 unsigned int references
; /* locked by lock */
67 * Locked by counterlock or unlocked if efficient rwlock is not
70 #ifdef ISC_RWLOCK_USEATOMIC
71 isc_rwlock_t counterlock
;
76 * We don't want to lock the counters while we are dumping, so we first
77 * copy the current counter values into a local array. This buffer
78 * will be used as the copy destination. It's allocated on creation
79 * of the stats structure so that the dump operation won't fail due
80 * to memory allocation failure.
81 * XXX: this approach is weird for non-threaded build because the
82 * additional memory and the copy overhead could be avoided. We prefer
83 * simplicity here, however, under the assumption that this function
84 * should be only rarely called.
86 isc_uint64_t
*copiedcounters
;
90 create_stats(isc_mem_t
*mctx
, int ncounters
, isc_stats_t
**statsp
) {
92 isc_result_t result
= ISC_R_SUCCESS
;
94 REQUIRE(statsp
!= NULL
&& *statsp
== NULL
);
96 stats
= isc_mem_get(mctx
, sizeof(*stats
));
98 return (ISC_R_NOMEMORY
);
100 result
= isc_mutex_init(&stats
->lock
);
101 if (result
!= ISC_R_SUCCESS
)
104 stats
->counters
= isc_mem_get(mctx
, sizeof(isc_stat_t
) * ncounters
);
105 if (stats
->counters
== NULL
) {
106 result
= ISC_R_NOMEMORY
;
109 stats
->copiedcounters
= isc_mem_get(mctx
,
110 sizeof(isc_uint64_t
) * ncounters
);
111 if (stats
->copiedcounters
== NULL
) {
112 result
= ISC_R_NOMEMORY
;
116 #ifdef ISC_RWLOCK_USEATOMIC
117 result
= isc_rwlock_init(&stats
->counterlock
, 0, 0);
118 if (result
!= ISC_R_SUCCESS
)
119 goto clean_copiedcounters
;
122 stats
->references
= 1;
123 memset(stats
->counters
, 0, sizeof(isc_stat_t
) * ncounters
);
125 isc_mem_attach(mctx
, &stats
->mctx
);
126 stats
->ncounters
= ncounters
;
127 stats
->magic
= ISC_STATS_MAGIC
;
134 isc_mem_put(mctx
, stats
->counters
, sizeof(isc_stat_t
) * ncounters
);
136 #ifdef ISC_RWLOCK_USEATOMIC
137 clean_copiedcounters
:
138 isc_mem_put(mctx
, stats
->copiedcounters
,
139 sizeof(isc_stat_t
) * ncounters
);
143 DESTROYLOCK(&stats
->lock
);
146 isc_mem_put(mctx
, stats
, sizeof(*stats
));
152 isc_stats_attach(isc_stats_t
*stats
, isc_stats_t
**statsp
) {
153 REQUIRE(ISC_STATS_VALID(stats
));
154 REQUIRE(statsp
!= NULL
&& *statsp
== NULL
);
158 UNLOCK(&stats
->lock
);
164 isc_stats_detach(isc_stats_t
**statsp
) {
167 REQUIRE(statsp
!= NULL
&& ISC_STATS_VALID(*statsp
));
174 UNLOCK(&stats
->lock
);
176 if (stats
->references
== 0) {
177 isc_mem_put(stats
->mctx
, stats
->copiedcounters
,
178 sizeof(isc_stat_t
) * stats
->ncounters
);
179 isc_mem_put(stats
->mctx
, stats
->counters
,
180 sizeof(isc_stat_t
) * stats
->ncounters
);
181 DESTROYLOCK(&stats
->lock
);
182 #ifdef ISC_RWLOCK_USEATOMIC
183 isc_rwlock_destroy(&stats
->counterlock
);
185 isc_mem_putanddetach(&stats
->mctx
, stats
, sizeof(*stats
));
190 isc_stats_ncounters(isc_stats_t
*stats
) {
191 REQUIRE(ISC_STATS_VALID(stats
));
193 return (stats
->ncounters
);
197 incrementcounter(isc_stats_t
*stats
, int counter
) {
200 #ifdef ISC_RWLOCK_USEATOMIC
202 * We use a "read" lock to prevent other threads from reading the
203 * counter while we "writing" a counter field. The write access itself
204 * is protected by the atomic operation.
206 isc_rwlock_lock(&stats
->counterlock
, isc_rwlocktype_read
);
209 #if ISC_STATS_USEMULTIFIELDS
210 prev
= isc_atomic_xadd((isc_int32_t
*)&stats
->counters
[counter
].lo
, 1);
212 * If the lower 32-bit field overflows, increment the higher field.
213 * Note that it's *theoretically* possible that the lower field
214 * overlaps again before the higher field is incremented. It doesn't
215 * matter, however, because we don't read the value until
216 * isc_stats_copy() is called where the whole process is protected
217 * by the write (exclusive) lock.
219 if (prev
== (isc_int32_t
)0xffffffff)
220 isc_atomic_xadd((isc_int32_t
*)&stats
->counters
[counter
].hi
, 1);
221 #elif defined(ISC_PLATFORM_HAVEXADDQ)
223 isc_atomic_xaddq((isc_int64_t
*)&stats
->counters
[counter
], 1);
226 stats
->counters
[counter
]++;
229 #ifdef ISC_RWLOCK_USEATOMIC
230 isc_rwlock_unlock(&stats
->counterlock
, isc_rwlocktype_read
);
235 decrementcounter(isc_stats_t
*stats
, int counter
) {
238 #ifdef ISC_RWLOCK_USEATOMIC
239 isc_rwlock_lock(&stats
->counterlock
, isc_rwlocktype_read
);
242 #if ISC_STATS_USEMULTIFIELDS
243 prev
= isc_atomic_xadd((isc_int32_t
*)&stats
->counters
[counter
].lo
, -1);
245 isc_atomic_xadd((isc_int32_t
*)&stats
->counters
[counter
].hi
,
247 #elif defined(ISC_PLATFORM_HAVEXADDQ)
249 isc_atomic_xaddq((isc_int64_t
*)&stats
->counters
[counter
], -1);
252 stats
->counters
[counter
]--;
255 #ifdef ISC_RWLOCK_USEATOMIC
256 isc_rwlock_unlock(&stats
->counterlock
, isc_rwlocktype_read
);
261 copy_counters(isc_stats_t
*stats
) {
264 #ifdef ISC_RWLOCK_USEATOMIC
266 * We use a "write" lock before "reading" the statistics counters as
269 isc_rwlock_lock(&stats
->counterlock
, isc_rwlocktype_write
);
272 #if ISC_STATS_USEMULTIFIELDS
273 for (i
= 0; i
< stats
->ncounters
; i
++) {
274 stats
->copiedcounters
[i
] =
275 (isc_uint64_t
)(stats
->counters
[i
].hi
) << 32 |
276 stats
->counters
[i
].lo
;
280 memcpy(stats
->copiedcounters
, stats
->counters
,
281 stats
->ncounters
* sizeof(isc_stat_t
));
284 #ifdef ISC_RWLOCK_USEATOMIC
285 isc_rwlock_unlock(&stats
->counterlock
, isc_rwlocktype_write
);
290 isc_stats_create(isc_mem_t
*mctx
, isc_stats_t
**statsp
, int ncounters
) {
291 REQUIRE(statsp
!= NULL
&& *statsp
== NULL
);
293 return (create_stats(mctx
, ncounters
, statsp
));
297 isc_stats_increment(isc_stats_t
*stats
, isc_statscounter_t counter
) {
298 REQUIRE(ISC_STATS_VALID(stats
));
299 REQUIRE(counter
< stats
->ncounters
);
301 incrementcounter(stats
, (int)counter
);
305 isc_stats_decrement(isc_stats_t
*stats
, isc_statscounter_t counter
) {
306 REQUIRE(ISC_STATS_VALID(stats
));
307 REQUIRE(counter
< stats
->ncounters
);
309 decrementcounter(stats
, (int)counter
);
313 isc_stats_dump(isc_stats_t
*stats
, isc_stats_dumper_t dump_fn
,
314 void *arg
, unsigned int options
)
318 REQUIRE(ISC_STATS_VALID(stats
));
320 copy_counters(stats
);
322 for (i
= 0; i
< stats
->ncounters
; i
++) {
323 if ((options
& ISC_STATSDUMP_VERBOSE
) == 0 &&
324 stats
->copiedcounters
[i
] == 0)
326 dump_fn((isc_statscounter_t
)i
, stats
->copiedcounters
[i
], arg
);