4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
29 * This file contains most of the functionality
30 * required to support the threads portion of libc_db.
34 #include "thr_uberdata.h"
38 tdb_event_ready(void) {}
41 tdb_event_sleep(void) {}
44 tdb_event_switchto(void) {}
47 tdb_event_switchfrom(void) {}
50 tdb_event_lock_try(void) {}
53 tdb_event_catchsig(void) {}
56 tdb_event_idle(void) {}
59 tdb_event_create(void) {}
62 tdb_event_death(void) {}
65 tdb_event_preempt(void) {}
68 tdb_event_pri_inherit(void) {}
71 tdb_event_reap(void) {}
74 tdb_event_concurrency(void) {}
77 tdb_event_timeout(void) {}
81 * uberflags.uf_tdb_register_sync is set to REGISTER_SYNC_ENABLE by a debugger
82 * to empty the table and then enable synchronization object registration.
84 * uberflags.uf_tdb_register_sync is set to REGISTER_SYNC_DISABLE by a debugger
85 * to empty the table and then disable synchronization object registration.
88 const tdb_ev_func_t tdb_events
[TD_MAX_EVENT_NUM
- TD_MIN_EVENT_NUM
+ 1] = {
99 tdb_event_pri_inherit
,
101 tdb_event_concurrency
,
105 #if TDB_HASH_SHIFT != 15
106 #error "this is all broken because TDB_HASH_SHIFT is not 15"
110 tdb_addr_hash(void *addr
)
113 * This knows for a fact that the hash table has
114 * 32K entries; that is, that TDB_HASH_SHIFT is 15.
117 uint64_t value60
= ((uintptr_t)addr
>> 4); /* 60 bits */
118 uint32_t value30
= (value60
>> 30) ^ (value60
& 0x3fffffff);
120 uint32_t value30
= ((uintptr_t)addr
>> 2); /* 30 bits */
122 return ((value30
>> 15) ^ (value30
& 0x7fff));
125 static tdb_sync_stats_t
*
126 alloc_sync_addr(void *addr
)
128 uberdata_t
*udp
= curthread
->ul_uberdata
;
129 tdb_t
*tdbp
= &udp
->tdb
;
130 tdb_sync_stats_t
*sap
;
132 ASSERT(MUTEX_OWNED(&udp
->tdb_hash_lock
, curthread
));
134 if ((sap
= tdbp
->tdb_sync_addr_free
) == NULL
) {
139 * Don't keep trying after mmap() has already failed.
141 if (tdbp
->tdb_hash_alloc_failed
)
144 /* double the allocation each time */
145 tdbp
->tdb_sync_alloc
*= 2;
146 if ((vaddr
= mmap(NULL
,
147 tdbp
->tdb_sync_alloc
* sizeof (tdb_sync_stats_t
),
148 PROT_READ
|PROT_WRITE
, MAP_PRIVATE
|MAP_ANON
,
149 -1, (off_t
)0)) == MAP_FAILED
) {
150 tdbp
->tdb_hash_alloc_failed
= 1;
153 sap
= tdbp
->tdb_sync_addr_free
= vaddr
;
154 for (i
= 1; i
< tdbp
->tdb_sync_alloc
; sap
++, i
++)
155 sap
->next
= (uintptr_t)(sap
+ 1);
156 sap
->next
= (uintptr_t)0;
157 tdbp
->tdb_sync_addr_last
= sap
;
159 sap
= tdbp
->tdb_sync_addr_free
;
162 tdbp
->tdb_sync_addr_free
= (tdb_sync_stats_t
*)(uintptr_t)sap
->next
;
163 sap
->next
= (uintptr_t)0;
164 sap
->sync_addr
= (uintptr_t)addr
;
165 (void) memset(&sap
->un
, 0, sizeof (sap
->un
));
170 initialize_sync_hash()
172 uberdata_t
*udp
= curthread
->ul_uberdata
;
173 tdb_t
*tdbp
= &udp
->tdb
;
175 tdb_sync_stats_t
*sap
;
179 if (tdbp
->tdb_hash_alloc_failed
)
181 lmutex_lock(&udp
->tdb_hash_lock
);
182 if (udp
->uberflags
.uf_tdb_register_sync
== REGISTER_SYNC_DISABLE
) {
184 * There is no point allocating the hash table
185 * if we are disabling registration.
187 udp
->uberflags
.uf_tdb_register_sync
= REGISTER_SYNC_OFF
;
188 lmutex_unlock(&udp
->tdb_hash_lock
);
191 if (tdbp
->tdb_sync_addr_hash
!= NULL
|| tdbp
->tdb_hash_alloc_failed
) {
192 lmutex_unlock(&udp
->tdb_hash_lock
);
195 /* start with a free list of 2k elements */
196 tdbp
->tdb_sync_alloc
= 2*1024;
197 if ((vaddr
= mmap(NULL
, TDB_HASH_SIZE
* sizeof (uint64_t) +
198 tdbp
->tdb_sync_alloc
* sizeof (tdb_sync_stats_t
),
199 PROT_READ
|PROT_WRITE
, MAP_PRIVATE
|MAP_ANON
,
200 -1, (off_t
)0)) == MAP_FAILED
) {
201 tdbp
->tdb_hash_alloc_failed
= 1;
206 /* initialize the free list */
207 tdbp
->tdb_sync_addr_free
= sap
=
208 (tdb_sync_stats_t
*)&addr_hash
[TDB_HASH_SIZE
];
209 for (i
= 1; i
< tdbp
->tdb_sync_alloc
; sap
++, i
++)
210 sap
->next
= (uintptr_t)(sap
+ 1);
211 sap
->next
= (uintptr_t)0;
212 tdbp
->tdb_sync_addr_last
= sap
;
214 /* insert &udp->tdb_hash_lock itself into the new (empty) table */
215 udp
->tdb_hash_lock_stats
.next
= (uintptr_t)0;
216 udp
->tdb_hash_lock_stats
.sync_addr
= (uintptr_t)&udp
->tdb_hash_lock
;
217 addr_hash
[tdb_addr_hash(&udp
->tdb_hash_lock
)] =
218 (uintptr_t)&udp
->tdb_hash_lock_stats
;
220 tdbp
->tdb_register_count
= 1;
221 /* assign to tdb_sync_addr_hash only after fully initialized */
223 tdbp
->tdb_sync_addr_hash
= addr_hash
;
224 lmutex_unlock(&udp
->tdb_hash_lock
);
228 tdb_sync_obj_register(void *addr
, int *new)
230 ulwp_t
*self
= curthread
;
231 uberdata_t
*udp
= self
->ul_uberdata
;
232 tdb_t
*tdbp
= &udp
->tdb
;
234 tdb_sync_stats_t
*sap
= NULL
;
239 * Don't start statistics collection until
240 * we have initialized the primary link map.
242 if (!self
->ul_primarymap
)
248 * To avoid recursion problems, we must do two things:
249 * 1. Make a special case for tdb_hash_lock (we use it internally).
250 * 2. Deal with the dynamic linker's lock interface:
251 * When calling any external function, we may invoke the
252 * dynamic linker. It grabs a lock, which calls back here.
253 * This only happens on the first call to the external
254 * function, so we can just return NULL if we are called
255 * recursively (and miss the first count).
257 if (addr
== (void *)&udp
->tdb_hash_lock
)
258 return (&udp
->tdb_hash_lock_stats
);
259 if (self
->ul_sync_obj_reg
) /* recursive call */
261 self
->ul_sync_obj_reg
= 1;
264 * On the first time through, initialize the hash table and free list.
266 if (tdbp
->tdb_sync_addr_hash
== NULL
) {
267 initialize_sync_hash();
268 if (tdbp
->tdb_sync_addr_hash
== NULL
) { /* utter failure */
269 udp
->uberflags
.uf_tdb_register_sync
= REGISTER_SYNC_OFF
;
275 sapp
= &tdbp
->tdb_sync_addr_hash
[tdb_addr_hash(addr
)];
276 if (udp
->uberflags
.uf_tdb_register_sync
== REGISTER_SYNC_ON
) {
278 * Look up an address in the synchronization object hash table.
279 * No lock is required since it can only deliver a false
280 * negative, in which case we fall into the locked case below.
282 for (sap
= (tdb_sync_stats_t
*)(uintptr_t)*sapp
; sap
!= NULL
;
283 sap
= (tdb_sync_stats_t
*)(uintptr_t)sap
->next
) {
284 if (sap
->sync_addr
== (uintptr_t)addr
)
290 * The search with no lock held failed or a special action is required.
291 * Grab tdb_hash_lock to do special actions and/or get a precise result.
293 lmutex_lock(&udp
->tdb_hash_lock
);
296 switch (udp
->uberflags
.uf_tdb_register_sync
) {
297 case REGISTER_SYNC_ON
:
299 case REGISTER_SYNC_OFF
:
303 * For all debugger actions, first zero out the
304 * statistics block of every element in the hash table.
306 for (i
= 0; i
< TDB_HASH_SIZE
; i
++)
307 for (sap
= (tdb_sync_stats_t
*)
308 (uintptr_t)tdbp
->tdb_sync_addr_hash
[i
];
310 sap
= (tdb_sync_stats_t
*)(uintptr_t)sap
->next
)
311 (void) memset(&sap
->un
, 0, sizeof (sap
->un
));
313 switch (udp
->uberflags
.uf_tdb_register_sync
) {
314 case REGISTER_SYNC_ENABLE
:
315 udp
->uberflags
.uf_tdb_register_sync
= REGISTER_SYNC_ON
;
317 case REGISTER_SYNC_DISABLE
:
319 udp
->uberflags
.uf_tdb_register_sync
= REGISTER_SYNC_OFF
;
326 * Perform the search while holding tdb_hash_lock.
327 * Keep track of the insertion point.
329 while ((sap
= (tdb_sync_stats_t
*)(uintptr_t)*sapp
) != NULL
) {
330 if (sap
->sync_addr
== (uintptr_t)addr
)
336 * Insert a new element if necessary.
338 if (sap
== NULL
&& (sap
= alloc_sync_addr(addr
)) != NULL
) {
339 *sapp
= (uintptr_t)sap
;
340 tdbp
->tdb_register_count
++;
347 lmutex_unlock(&udp
->tdb_hash_lock
);
348 self
->ul_sync_obj_reg
= 0;
353 tdb_sync_obj_deregister(void *addr
)
355 uberdata_t
*udp
= curthread
->ul_uberdata
;
356 tdb_t
*tdbp
= &udp
->tdb
;
358 tdb_sync_stats_t
*sap
;
362 * tdb_hash_lock is never destroyed.
364 ASSERT(addr
!= &udp
->tdb_hash_lock
);
367 * Avoid acquiring tdb_hash_lock if lock statistics gathering has
368 * never been initiated or there is nothing in the hash bucket.
369 * (Once the hash table is allocated, it is never deallocated.)
371 if (tdbp
->tdb_sync_addr_hash
== NULL
||
372 tdbp
->tdb_sync_addr_hash
[hash
= tdb_addr_hash(addr
)] == 0)
375 lmutex_lock(&udp
->tdb_hash_lock
);
376 sapp
= &tdbp
->tdb_sync_addr_hash
[hash
];
377 while ((sap
= (tdb_sync_stats_t
*)(uintptr_t)*sapp
) != NULL
) {
378 if (sap
->sync_addr
== (uintptr_t)addr
) {
379 /* remove it from the hash table */
381 tdbp
->tdb_register_count
--;
383 sap
->next
= (uintptr_t)NULL
;
384 sap
->sync_addr
= (uintptr_t)NULL
;
385 /* insert it on the tail of the free list */
386 if (tdbp
->tdb_sync_addr_free
== NULL
) {
387 tdbp
->tdb_sync_addr_free
= sap
;
388 tdbp
->tdb_sync_addr_last
= sap
;
390 tdbp
->tdb_sync_addr_last
->next
= (uintptr_t)sap
;
391 tdbp
->tdb_sync_addr_last
= sap
;
397 lmutex_unlock(&udp
->tdb_hash_lock
);
401 * Return a mutex statistics block for the given mutex.
404 tdb_mutex_stats(mutex_t
*mp
)
406 tdb_sync_stats_t
*tssp
;
408 /* avoid stealing the cache line unnecessarily */
409 if (mp
->mutex_magic
!= MUTEX_MAGIC
)
410 mp
->mutex_magic
= MUTEX_MAGIC
;
411 if ((tssp
= tdb_sync_obj_register(mp
, NULL
)) == NULL
)
413 tssp
->un
.type
= TDB_MUTEX
;
414 return (&tssp
->un
.mutex
);
418 * Return a condvar statistics block for the given condvar.
421 tdb_cond_stats(cond_t
*cvp
)
423 tdb_sync_stats_t
*tssp
;
425 /* avoid stealing the cache line unnecessarily */
426 if (cvp
->cond_magic
!= COND_MAGIC
)
427 cvp
->cond_magic
= COND_MAGIC
;
428 if ((tssp
= tdb_sync_obj_register(cvp
, NULL
)) == NULL
)
430 tssp
->un
.type
= TDB_COND
;
431 return (&tssp
->un
.cond
);
435 * Return an rwlock statistics block for the given rwlock.
438 tdb_rwlock_stats(rwlock_t
*rwlp
)
440 tdb_sync_stats_t
*tssp
;
442 /* avoid stealing the cache line unnecessarily */
443 if (rwlp
->magic
!= RWL_MAGIC
)
444 rwlp
->magic
= RWL_MAGIC
;
445 if ((tssp
= tdb_sync_obj_register(rwlp
, NULL
)) == NULL
)
447 tssp
->un
.type
= TDB_RWLOCK
;
448 return (&tssp
->un
.rwlock
);
452 * Return a semaphore statistics block for the given semaphore.
455 tdb_sema_stats(sema_t
*sp
)
457 tdb_sync_stats_t
*tssp
;
460 /* avoid stealing the cache line unnecessarily */
461 if (sp
->magic
!= SEMA_MAGIC
)
462 sp
->magic
= SEMA_MAGIC
;
463 if ((tssp
= tdb_sync_obj_register(sp
, &new)) == NULL
)
465 tssp
->un
.type
= TDB_SEMA
;
467 tssp
->un
.sema
.sema_max_count
= sp
->count
;
468 tssp
->un
.sema
.sema_min_count
= sp
->count
;
470 return (&tssp
->un
.sema
);