4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
26 #include <fmd_alloc.h>
29 #include <fmd_error.h>
30 #include <fmd_string.h>
31 #include <fmd_idspace.h>
35 fmd_idspace_create(const char *name
, id_t min
, id_t max
)
37 fmd_idspace_t
*ids
= fmd_alloc(sizeof (fmd_idspace_t
), FMD_SLEEP
);
38 uint_t ids_avg
, ids_max
, hashlen
, hashmax
;
41 * Dynamically size the hash table bucket array based on the desired
42 * chain length. We hash by indexing on the low-order bits.
43 * Do not permit the hash bucket array to exceed a reasonable size.
45 ASSERT(min
>= 0 && max
>= 0);
48 (void) fmd_conf_getprop(fmd
.d_conf
, "ids.avg", &ids_avg
);
49 (void) fmd_conf_getprop(fmd
.d_conf
, "ids.max", &ids_max
);
51 hashmax
= max
- min
+ 1;
52 hashlen
= 1 << fls(hashmax
/ ids_avg
);
53 if (hashlen
> ids_max
)
56 (void) strlcpy(ids
->ids_name
, name
, sizeof (ids
->ids_name
));
57 (void) pthread_mutex_init(&ids
->ids_lock
, NULL
);
58 (void) pthread_cond_init(&ids
->ids_cv
, NULL
);
60 ids
->ids_hash
= fmd_zalloc(sizeof (void *) * hashlen
, FMD_SLEEP
);
61 ids
->ids_hashlen
= hashlen
;
63 ids
->ids_nextid
= min
- 1;
72 fmd_idspace_destroy(fmd_idspace_t
*ids
)
74 fmd_idelem_t
*ide
, *nde
;
77 (void) pthread_mutex_lock(&ids
->ids_lock
);
79 while (ids
->ids_refs
!= 0)
80 (void) pthread_cond_wait(&ids
->ids_cv
, &ids
->ids_lock
);
82 for (i
= 0; i
< ids
->ids_hashlen
; i
++) {
83 for (ide
= ids
->ids_hash
[i
]; ide
!= NULL
; ide
= nde
) {
85 fmd_free(ide
, sizeof (fmd_idelem_t
));
89 fmd_free(ids
->ids_hash
, sizeof (void *) * ids
->ids_hashlen
);
90 fmd_free(ids
, sizeof (fmd_idspace_t
));
94 fmd_idspace_apply(fmd_idspace_t
*ids
,
95 void (*func
)(fmd_idspace_t
*, id_t
, void *), void *arg
)
101 (void) pthread_mutex_lock(&ids
->ids_lock
);
102 count
= ids
->ids_count
;
103 ida
= idp
= fmd_alloc(sizeof (id_t
) * count
, FMD_SLEEP
);
105 for (i
= 0; i
< ids
->ids_hashlen
; i
++) {
106 for (ide
= ids
->ids_hash
[i
]; ide
!= NULL
; ide
= ide
->ide_next
)
107 *idp
++ = ide
->ide_id
;
110 ASSERT(idp
== ida
+ count
);
111 (void) pthread_mutex_unlock(&ids
->ids_lock
);
113 for (i
= 0; i
< count
; i
++)
114 func(ids
, ida
[i
], arg
);
116 fmd_free(ida
, sizeof (id_t
) * count
);
119 static fmd_idelem_t
*
120 fmd_idspace_lookup(fmd_idspace_t
*ids
, id_t id
)
124 ASSERT(MUTEX_HELD(&ids
->ids_lock
));
125 ide
= ids
->ids_hash
[id
& (ids
->ids_hashlen
- 1)];
127 for (; ide
!= NULL
; ide
= ide
->ide_next
) {
128 if (ide
->ide_id
== id
)
136 fmd_idspace_getspecific(fmd_idspace_t
*ids
, id_t id
)
141 (void) pthread_mutex_lock(&ids
->ids_lock
);
142 ide
= fmd_idspace_lookup(ids
, id
);
143 data
= ide
? ide
->ide_data
: NULL
;
144 (void) pthread_mutex_unlock(&ids
->ids_lock
);
150 fmd_idspace_setspecific(fmd_idspace_t
*ids
, id_t id
, void *data
)
154 (void) pthread_mutex_lock(&ids
->ids_lock
);
156 while (ids
->ids_refs
!= 0)
157 (void) pthread_cond_wait(&ids
->ids_cv
, &ids
->ids_lock
);
159 if ((ide
= fmd_idspace_lookup(ids
, id
)) == NULL
) {
160 fmd_panic("idspace %p (%s) does not contain id %ld",
161 (void *)ids
, ids
->ids_name
, id
);
164 ide
->ide_data
= data
;
165 (void) pthread_mutex_unlock(&ids
->ids_lock
);
169 fmd_idspace_contains(fmd_idspace_t
*ids
, id_t id
)
173 (void) pthread_mutex_lock(&ids
->ids_lock
);
174 ide
= fmd_idspace_lookup(ids
, id
);
175 (void) pthread_mutex_unlock(&ids
->ids_lock
);
177 return (ide
!= NULL
);
181 fmd_idspace_valid(fmd_idspace_t
*ids
, id_t id
)
183 return (id
>= ids
->ids_minid
&& id
<= ids
->ids_maxid
);
187 fmd_idspace_xalloc_locked(fmd_idspace_t
*ids
, id_t id
, void *data
)
192 if (id
< ids
->ids_minid
|| id
> ids
->ids_maxid
) {
193 fmd_panic("%ld out of range [%ld .. %ld] for idspace %p (%s)\n",
194 id
, ids
->ids_minid
, ids
->ids_maxid
,
195 (void *)ids
, ids
->ids_name
);
198 if (fmd_idspace_lookup(ids
, id
) != NULL
)
199 return (fmd_set_errno(EALREADY
));
201 ide
= fmd_alloc(sizeof (fmd_idelem_t
), FMD_SLEEP
);
202 h
= id
& (ids
->ids_hashlen
- 1);
204 ide
->ide_next
= ids
->ids_hash
[h
];
205 ide
->ide_data
= data
;
208 ids
->ids_hash
[h
] = ide
;
215 fmd_idspace_xalloc(fmd_idspace_t
*ids
, id_t id
, void *data
)
217 (void) pthread_mutex_lock(&ids
->ids_lock
);
218 id
= fmd_idspace_xalloc_locked(ids
, id
, data
);
219 (void) pthread_mutex_unlock(&ids
->ids_lock
);
224 fmd_idspace_alloc_locked(fmd_idspace_t
*ids
, void *data
)
228 ASSERT(MUTEX_HELD(&ids
->ids_lock
));
230 if (ids
->ids_count
== ids
->ids_maxid
- ids
->ids_minid
+ 1)
231 return (fmd_set_errno(ENOSPC
));
234 if (++ids
->ids_nextid
> ids
->ids_maxid
)
235 ids
->ids_nextid
= ids
->ids_minid
;
236 id
= ids
->ids_nextid
;
237 } while (fmd_idspace_xalloc_locked(ids
, id
, data
) != id
);
243 fmd_idspace_alloc(fmd_idspace_t
*ids
, void *data
)
247 (void) pthread_mutex_lock(&ids
->ids_lock
);
248 id
= fmd_idspace_alloc_locked(ids
, data
);
249 (void) pthread_mutex_unlock(&ids
->ids_lock
);
255 * For the moment, we use a simple but slow implementation: reset ids_nextid to
256 * the minimum id and search in order from there. If this becomes performance
257 * sensitive we can maintain a freelist of the unallocated identifiers, etc.
260 fmd_idspace_alloc_min(fmd_idspace_t
*ids
, void *data
)
264 (void) pthread_mutex_lock(&ids
->ids_lock
);
265 ids
->ids_nextid
= ids
->ids_minid
- 1;
266 id
= fmd_idspace_alloc_locked(ids
, data
);
267 (void) pthread_mutex_unlock(&ids
->ids_lock
);
273 fmd_idspace_free(fmd_idspace_t
*ids
, id_t id
)
275 fmd_idelem_t
*ide
, **pp
;
278 (void) pthread_mutex_lock(&ids
->ids_lock
);
279 pp
= &ids
->ids_hash
[id
& (ids
->ids_hashlen
- 1)];
281 for (ide
= *pp
; ide
!= NULL
; ide
= ide
->ide_next
) {
282 if (ide
->ide_id
!= id
)
289 (void) pthread_mutex_unlock(&ids
->ids_lock
);
293 data
= ide
->ide_data
;
295 fmd_free(ide
, sizeof (fmd_idelem_t
));
297 ASSERT(ids
->ids_count
!= 0);
300 (void) pthread_mutex_unlock(&ids
->ids_lock
);
305 * Retrieve the id-specific data for the specified id and place a hold on the
306 * id so that it cannot be or deleted until fmd_idspace_rele(ids, id) is
307 * called. For simplicity, we now use a single global reference count for all
308 * holds. If this feature needs to be used in a place where there is high
309 * contention between holders and deleters, the implementation can be modified
310 * to use either a per-hash-bucket or a per-id-element condition variable.
313 fmd_idspace_hold(fmd_idspace_t
*ids
, id_t id
)
318 (void) pthread_mutex_lock(&ids
->ids_lock
);
320 if ((ide
= fmd_idspace_lookup(ids
, id
)) != NULL
) {
322 ASSERT(ids
->ids_refs
!= 0);
323 data
= ide
->ide_data
;
324 ASSERT(data
!= NULL
);
327 (void) pthread_mutex_unlock(&ids
->ids_lock
);
332 fmd_idspace_rele(fmd_idspace_t
*ids
, id_t id
)
334 (void) pthread_mutex_lock(&ids
->ids_lock
);
336 ASSERT(fmd_idspace_lookup(ids
, id
) != NULL
);
337 ASSERT(ids
->ids_refs
!= 0);
340 (void) pthread_cond_broadcast(&ids
->ids_cv
);
341 (void) pthread_mutex_unlock(&ids
->ids_lock
);