4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
35 #include <thr_uberdata.h>
36 #include <thread_db.h>
52 struct ps_prochandle
*ph_p
;
57 psaddr_t bootstrap_addr
;
58 psaddr_t uberdata_addr
;
59 psaddr_t tdb_eventmask_addr
;
60 psaddr_t tdb_register_sync_addr
;
61 psaddr_t tdb_events
[TD_MAX_EVENT_NUM
- TD_MIN_EVENT_NUM
+ 1];
62 psaddr_t hash_table_addr
;
65 psaddr_t single_ulwp_addr
;
69 * This is the name of the variable in libc that contains
70 * the uberdata address that we will need.
72 #define TD_BOOTSTRAP_NAME "_tdb_bootstrap"
74 * This is the actual name of uberdata, used in the event
75 * that tdb_bootstrap has not yet been initialized.
77 #define TD_UBERDATA_NAME "_uberdata"
79 * The library name should end with ".so.1", but older versions of
80 * dbx expect the unadorned name and malfunction if ".1" is specified.
81 * Unfortunately, if ".1" is not specified, mdb malfunctions when it
82 * is applied to another instance of itself (due to the presence of
83 * /usr/lib/mdb/proc/libc.so). So we try it both ways.
85 #define TD_LIBRARY_NAME "libc.so"
86 #define TD_LIBRARY_NAME_1 "libc.so.1"
88 td_err_e
__td_thr_get_info(td_thrhandle_t
*th_p
, td_thrinfo_t
*ti_p
);
90 td_err_e
__td_ta_thr_iter(td_thragent_t
*ta_p
, td_thr_iter_f
*cb
,
91 void *cbdata_p
, td_thr_state_e state
, int ti_pri
,
92 sigset_t
*ti_sigmask_p
, unsigned ti_user_flags
);
95 * Initialize threads debugging interface.
97 #pragma weak td_init = __td_init
105 * This function does nothing, and never did.
106 * But the symbol is in the ABI, so we can't delete it.
108 #pragma weak td_log = __td_log
115 * Short-cut to read just the hash table size from the process,
116 * to avoid repeatedly reading the full uberdata structure when
117 * dealing with a single-threaded process.
120 td_read_hash_size(td_thragent_t
*ta_p
)
125 switch (ta_p
->initialized
) {
126 default: /* uninitialized */
128 case 1: /* partially initialized */
130 case 2: /* fully initialized */
131 return (ta_p
->hash_size
);
134 if (ta_p
->model
== PR_MODEL_NATIVE
) {
135 addr
= ta_p
->uberdata_addr
+ offsetof(uberdata_t
, hash_size
);
137 #if defined(_LP64) && defined(_SYSCALL32)
138 addr
= ta_p
->uberdata_addr
+ offsetof(uberdata32_t
, hash_size
);
143 if (ps_pdread(ta_p
->ph_p
, addr
, &hash_size
, sizeof (hash_size
))
150 td_read_uberdata(td_thragent_t
*ta_p
)
152 struct ps_prochandle
*ph_p
= ta_p
->ph_p
;
155 if (ta_p
->model
== PR_MODEL_NATIVE
) {
158 if (ps_pdread(ph_p
, ta_p
->uberdata_addr
,
159 &uberdata
, sizeof (uberdata
)) != PS_OK
)
161 ta_p
->primary_map
= uberdata
.primary_map
;
162 ta_p
->tdb_eventmask_addr
= ta_p
->uberdata_addr
+
163 offsetof(uberdata_t
, tdb
.tdb_ev_global_mask
);
164 ta_p
->tdb_register_sync_addr
= ta_p
->uberdata_addr
+
165 offsetof(uberdata_t
, uberflags
.uf_tdb_register_sync
);
166 ta_p
->hash_table_addr
= (psaddr_t
)uberdata
.thr_hash_table
;
167 ta_p
->hash_size
= uberdata
.hash_size
;
168 if (ps_pdread(ph_p
, (psaddr_t
)uberdata
.tdb
.tdb_events
,
169 ta_p
->tdb_events
, sizeof (ta_p
->tdb_events
)) != PS_OK
)
172 #if defined(_LP64) && defined(_SYSCALL32)
173 uberdata32_t uberdata
;
174 caddr32_t tdb_events
[TD_MAX_EVENT_NUM
- TD_MIN_EVENT_NUM
+ 1];
176 if (ps_pdread(ph_p
, ta_p
->uberdata_addr
,
177 &uberdata
, sizeof (uberdata
)) != PS_OK
)
179 ta_p
->primary_map
= uberdata
.primary_map
;
180 ta_p
->tdb_eventmask_addr
= ta_p
->uberdata_addr
+
181 offsetof(uberdata32_t
, tdb
.tdb_ev_global_mask
);
182 ta_p
->tdb_register_sync_addr
= ta_p
->uberdata_addr
+
183 offsetof(uberdata32_t
, uberflags
.uf_tdb_register_sync
);
184 ta_p
->hash_table_addr
= (psaddr_t
)uberdata
.thr_hash_table
;
185 ta_p
->hash_size
= uberdata
.hash_size
;
186 if (ps_pdread(ph_p
, (psaddr_t
)uberdata
.tdb
.tdb_events
,
187 tdb_events
, sizeof (tdb_events
)) != PS_OK
)
189 for (i
= 0; i
< TD_MAX_EVENT_NUM
- TD_MIN_EVENT_NUM
+ 1; i
++)
190 ta_p
->tdb_events
[i
] = tdb_events
[i
];
197 * Unfortunately, we are (implicitly) assuming that our uberdata
198 * definition precisely matches that of our target. If this is not
199 * true (that is, if we're examining a core file from a foreign
200 * system that has a different definition of uberdata), the failure
201 * modes can be frustratingly non-explicit. In an effort to catch
202 * this upon initialization (when the debugger may still be able to
203 * opt for another thread model or may be able to fail explicitly), we
204 * check that each of our tdb_events points to valid memory (these are
205 * putatively text upon which a breakpoint can be issued), with the
206 * hope that this is enough of a self-consistency check to lead to
207 * explicit failure on a mismatch.
209 for (i
= 0; i
< TD_MAX_EVENT_NUM
- TD_MIN_EVENT_NUM
+ 1; i
++) {
212 if (ps_pdread(ph_p
, (psaddr_t
)ta_p
->tdb_events
[i
],
213 &check
, sizeof (check
)) != PS_OK
) {
218 if (ta_p
->hash_size
!= 1) { /* multi-threaded */
219 ta_p
->initialized
= 2;
220 ta_p
->single_lwpid
= 0;
221 ta_p
->single_ulwp_addr
= NULL
;
222 } else { /* single-threaded */
223 ta_p
->initialized
= 1;
225 * Get the address and lwpid of the single thread/LWP.
226 * It may not be ulwp_one if this is a child of fork1().
228 if (ta_p
->model
== PR_MODEL_NATIVE
) {
229 thr_hash_table_t head
;
232 if (ps_pdread(ph_p
, ta_p
->hash_table_addr
,
233 &head
, sizeof (head
)) != PS_OK
)
235 if ((psaddr_t
)head
.hash_bucket
== NULL
)
236 ta_p
->initialized
= 0;
237 else if (ps_pdread(ph_p
, (psaddr_t
)head
.hash_bucket
+
238 offsetof(ulwp_t
, ul_lwpid
),
239 &lwpid
, sizeof (lwpid
)) != PS_OK
)
241 ta_p
->single_lwpid
= lwpid
;
242 ta_p
->single_ulwp_addr
= (psaddr_t
)head
.hash_bucket
;
244 #if defined(_LP64) && defined(_SYSCALL32)
245 thr_hash_table32_t head
;
248 if (ps_pdread(ph_p
, ta_p
->hash_table_addr
,
249 &head
, sizeof (head
)) != PS_OK
)
251 if ((psaddr_t
)head
.hash_bucket
== NULL
)
252 ta_p
->initialized
= 0;
253 else if (ps_pdread(ph_p
, (psaddr_t
)head
.hash_bucket
+
254 offsetof(ulwp32_t
, ul_lwpid
),
255 &lwpid
, sizeof (lwpid
)) != PS_OK
)
257 ta_p
->single_lwpid
= lwpid
;
258 ta_p
->single_ulwp_addr
= (psaddr_t
)head
.hash_bucket
;
264 if (!ta_p
->primary_map
)
265 ta_p
->initialized
= 0;
270 td_read_bootstrap_data(td_thragent_t
*ta_p
)
272 struct ps_prochandle
*ph_p
= ta_p
->ph_p
;
273 psaddr_t bootstrap_addr
;
274 psaddr_t uberdata_addr
;
279 switch (ta_p
->initialized
) {
280 case 2: /* fully initialized */
282 case 1: /* partially initialized */
283 if (td_read_hash_size(ta_p
) == 1)
285 return (td_read_uberdata(ta_p
));
289 * Uninitialized -- do the startup work.
290 * We set ta_p->initialized to -1 to cut off recursive calls
291 * into libc_db by code in the provider of ps_pglobal_lookup().
294 ta_p
->initialized
= -1;
295 db_return
= ps_pglobal_lookup(ph_p
, TD_LIBRARY_NAME
,
296 TD_BOOTSTRAP_NAME
, &bootstrap_addr
);
297 if (db_return
== PS_NOSYM
) {
299 db_return
= ps_pglobal_lookup(ph_p
, TD_LIBRARY_NAME_1
,
300 TD_BOOTSTRAP_NAME
, &bootstrap_addr
);
302 if (db_return
== PS_NOSYM
) /* libc is not linked yet */
303 return (TD_NOLIBTHREAD
);
304 if (db_return
!= PS_OK
)
306 db_return
= ps_pglobal_lookup(ph_p
,
307 do_1
? TD_LIBRARY_NAME_1
: TD_LIBRARY_NAME
,
308 TD_UBERDATA_NAME
, &uberdata_addr
);
309 if (db_return
== PS_NOSYM
) /* libc is not linked yet */
310 return (TD_NOLIBTHREAD
);
311 if (db_return
!= PS_OK
)
315 * Read the uberdata address into the thread agent structure.
317 if (ta_p
->model
== PR_MODEL_NATIVE
) {
319 if (ps_pdread(ph_p
, bootstrap_addr
,
320 &psaddr
, sizeof (psaddr
)) != PS_OK
)
322 if ((ta_p
->bootstrap_addr
= psaddr
) == NULL
)
323 psaddr
= uberdata_addr
;
324 else if (ps_pdread(ph_p
, psaddr
,
325 &psaddr
, sizeof (psaddr
)) != PS_OK
)
327 if (psaddr
== NULL
) {
328 /* primary linkmap in the tgt is not initialized */
329 ta_p
->bootstrap_addr
= NULL
;
330 psaddr
= uberdata_addr
;
332 ta_p
->uberdata_addr
= psaddr
;
334 #if defined(_LP64) && defined(_SYSCALL32)
336 if (ps_pdread(ph_p
, bootstrap_addr
,
337 &psaddr
, sizeof (psaddr
)) != PS_OK
)
339 if ((ta_p
->bootstrap_addr
= (psaddr_t
)psaddr
) == NULL
)
340 psaddr
= (caddr32_t
)uberdata_addr
;
341 else if (ps_pdread(ph_p
, (psaddr_t
)psaddr
,
342 &psaddr
, sizeof (psaddr
)) != PS_OK
)
344 if (psaddr
== NULL
) {
345 /* primary linkmap in the tgt is not initialized */
346 ta_p
->bootstrap_addr
= NULL
;
347 psaddr
= (caddr32_t
)uberdata_addr
;
349 ta_p
->uberdata_addr
= (psaddr_t
)psaddr
;
352 #endif /* _SYSCALL32 */
355 if ((return_val
= td_read_uberdata(ta_p
)) != TD_OK
)
357 if (ta_p
->bootstrap_addr
== NULL
)
358 ta_p
->initialized
= 0;
363 #pragma weak ps_lrolltoaddr
366 * Allocate a new agent process handle ("thread agent").
368 #pragma weak td_ta_new = __td_ta_new
370 __td_ta_new(struct ps_prochandle
*ph_p
, td_thragent_t
**ta_pp
)
374 td_err_e return_val
= TD_OK
;
381 if (ps_pstop(ph_p
) != PS_OK
)
384 * ps_pdmodel might not be defined if this is an older client.
385 * Make it a weak symbol and test if it exists before calling.
387 #pragma weak ps_pdmodel
388 if (ps_pdmodel
== NULL
) {
389 model
= PR_MODEL_NATIVE
;
390 } else if (ps_pdmodel(ph_p
, &model
) != PS_OK
) {
391 (void) ps_pcontinue(ph_p
);
394 if ((ta_p
= malloc(sizeof (*ta_p
))) == NULL
) {
395 (void) ps_pcontinue(ph_p
);
400 * Initialize the agent process handle.
401 * Pick up the symbol value we need from the target process.
403 (void) memset(ta_p
, 0, sizeof (*ta_p
));
405 (void) rwlock_init(&ta_p
->rwlock
, USYNC_THREAD
, NULL
);
407 return_val
= td_read_bootstrap_data(ta_p
);
410 * Because the old libthread_db enabled lock tracking by default,
411 * we must also do it. However, we do it only if the application
412 * provides the ps_kill() and ps_lrolltoaddr() interfaces.
413 * (dbx provides the ps_kill() and ps_lrolltoaddr() interfaces.)
415 if (return_val
== TD_OK
&& ps_kill
!= NULL
&& ps_lrolltoaddr
!= NULL
) {
416 register_sync_t oldenable
;
417 register_sync_t enable
= REGISTER_SYNC_ENABLE
;
418 psaddr_t psaddr
= ta_p
->tdb_register_sync_addr
;
420 if (ps_pdread(ph_p
, psaddr
,
421 &oldenable
, sizeof (oldenable
)) != PS_OK
)
422 return_val
= TD_DBERR
;
423 else if (oldenable
!= REGISTER_SYNC_OFF
||
424 ps_pdwrite(ph_p
, psaddr
,
425 &enable
, sizeof (enable
)) != PS_OK
) {
427 * Lock tracking was already enabled or we
428 * failed to enable it, probably because we
429 * are examining a core file. In either case
430 * set the sync_tracking flag non-zero to
431 * indicate that we should not attempt to
432 * disable lock tracking when we delete the
433 * agent process handle in td_ta_delete().
435 ta_p
->sync_tracking
= 1;
439 if (return_val
== TD_OK
)
444 (void) ps_pcontinue(ph_p
);
449 * Utility function to grab the readers lock and return the prochandle,
450 * given an agent process handle. Performs standard error checking.
451 * Returns non-NULL with the lock held, or NULL with the lock not held.
453 static struct ps_prochandle
*
454 ph_lock_ta(td_thragent_t
*ta_p
, td_err_e
*err
)
456 struct ps_prochandle
*ph_p
= NULL
;
459 if (ta_p
== NULL
|| ta_p
->initialized
== -1) {
461 } else if (rw_rdlock(&ta_p
->rwlock
) != 0) { /* can't happen? */
463 } else if ((ph_p
= ta_p
->ph_p
) == NULL
) {
464 (void) rw_unlock(&ta_p
->rwlock
);
466 } else if (ta_p
->initialized
!= 2 &&
467 (error
= td_read_bootstrap_data(ta_p
)) != TD_OK
) {
468 (void) rw_unlock(&ta_p
->rwlock
);
479 * Utility function to grab the readers lock and return the prochandle,
480 * given an agent thread handle. Performs standard error checking.
481 * Returns non-NULL with the lock held, or NULL with the lock not held.
483 static struct ps_prochandle
*
484 ph_lock_th(const td_thrhandle_t
*th_p
, td_err_e
*err
)
486 if (th_p
== NULL
|| th_p
->th_unique
== NULL
) {
490 return (ph_lock_ta(th_p
->th_ta_p
, err
));
494 * Utility function to grab the readers lock and return the prochandle,
495 * given a synchronization object handle. Performs standard error checking.
496 * Returns non-NULL with the lock held, or NULL with the lock not held.
498 static struct ps_prochandle
*
499 ph_lock_sh(const td_synchandle_t
*sh_p
, td_err_e
*err
)
501 if (sh_p
== NULL
|| sh_p
->sh_unique
== NULL
) {
505 return (ph_lock_ta(sh_p
->sh_ta_p
, err
));
509 * Unlock the agent process handle obtained from ph_lock_*().
512 ph_unlock(td_thragent_t
*ta_p
)
514 (void) rw_unlock(&ta_p
->rwlock
);
518 * De-allocate an agent process handle,
519 * releasing all related resources.
521 * XXX -- This is hopelessly broken ---
522 * Storage for thread agent is not deallocated. The prochandle
523 * in the thread agent is set to NULL so that future uses of
524 * the thread agent can be detected and an error value returned.
525 * All functions in the external user interface that make
526 * use of the thread agent are expected
527 * to check for a NULL prochandle in the thread agent.
528 * All such functions are also expected to obtain a
529 * reader lock on the thread agent while it is using it.
531 #pragma weak td_ta_delete = __td_ta_delete
533 __td_ta_delete(td_thragent_t
*ta_p
)
535 struct ps_prochandle
*ph_p
;
538 * This is the only place we grab the writer lock.
539 * We are going to NULL out the prochandle.
541 if (ta_p
== NULL
|| rw_wrlock(&ta_p
->rwlock
) != 0)
543 if ((ph_p
= ta_p
->ph_p
) == NULL
) {
544 (void) rw_unlock(&ta_p
->rwlock
);
548 * If synch. tracking was disabled when td_ta_new() was called and
549 * if td_ta_sync_tracking_enable() was never called, then disable
550 * synch. tracking (it was enabled by default in td_ta_new()).
552 if (ta_p
->sync_tracking
== 0 &&
553 ps_kill
!= NULL
&& ps_lrolltoaddr
!= NULL
) {
554 register_sync_t enable
= REGISTER_SYNC_DISABLE
;
556 (void) ps_pdwrite(ph_p
, ta_p
->tdb_register_sync_addr
,
557 &enable
, sizeof (enable
));
560 (void) rw_unlock(&ta_p
->rwlock
);
565 * Map an agent process handle to a client prochandle.
566 * Currently unused by dbx.
568 #pragma weak td_ta_get_ph = __td_ta_get_ph
570 __td_ta_get_ph(td_thragent_t
*ta_p
, struct ps_prochandle
**ph_pp
)
574 if (ph_pp
!= NULL
) /* protect stupid callers */
578 if ((*ph_pp
= ph_lock_ta(ta_p
, &return_val
)) == NULL
)
585 * Set the process's suggested concurrency level.
586 * This is a no-op in a one-level model.
587 * Currently unused by dbx.
589 #pragma weak td_ta_setconcurrency = __td_ta_setconcurrency
592 __td_ta_setconcurrency(const td_thragent_t
*ta_p
, int level
)
596 if (ta_p
->ph_p
== NULL
)
602 * Get the number of threads in the process.
604 #pragma weak td_ta_get_nthreads = __td_ta_get_nthreads
606 __td_ta_get_nthreads(td_thragent_t
*ta_p
, int *nthread_p
)
608 struct ps_prochandle
*ph_p
;
612 psaddr_t nthreads_addr
;
613 psaddr_t nzombies_addr
;
615 if (ta_p
->model
== PR_MODEL_NATIVE
) {
616 nthreads_addr
= ta_p
->uberdata_addr
+
617 offsetof(uberdata_t
, nthreads
);
618 nzombies_addr
= ta_p
->uberdata_addr
+
619 offsetof(uberdata_t
, nzombies
);
621 #if defined(_LP64) && defined(_SYSCALL32)
622 nthreads_addr
= ta_p
->uberdata_addr
+
623 offsetof(uberdata32_t
, nthreads
);
624 nzombies_addr
= ta_p
->uberdata_addr
+
625 offsetof(uberdata32_t
, nzombies
);
629 #endif /* _SYSCALL32 */
632 if (nthread_p
== NULL
)
634 if ((ph_p
= ph_lock_ta(ta_p
, &return_val
)) == NULL
)
636 if (ps_pdread(ph_p
, nthreads_addr
, &nthreads
, sizeof (int)) != PS_OK
)
637 return_val
= TD_DBERR
;
638 if (ps_pdread(ph_p
, nzombies_addr
, &nzombies
, sizeof (int)) != PS_OK
)
639 return_val
= TD_DBERR
;
641 if (return_val
== TD_OK
)
642 *nthread_p
= nthreads
+ nzombies
;
653 * Check the value in data against the thread id.
654 * If it matches, return 1 to terminate iterations.
655 * This function is used by td_ta_map_id2thr() to map a tid to a thread handle.
658 td_mapper_id2thr(td_thrhandle_t
*th_p
, td_mapper_param_t
*data
)
662 if (__td_thr_get_info(th_p
, &ti
) == TD_OK
&&
663 data
->tid
== ti
.ti_tid
) {
672 * Given a thread identifier, return the corresponding thread handle.
674 #pragma weak td_ta_map_id2thr = __td_ta_map_id2thr
676 __td_ta_map_id2thr(td_thragent_t
*ta_p
, thread_t tid
,
677 td_thrhandle_t
*th_p
)
680 td_mapper_param_t data
;
682 if (th_p
!= NULL
&& /* optimize for a single thread */
684 ta_p
->initialized
== 1 &&
685 (td_read_hash_size(ta_p
) == 1 ||
686 td_read_uberdata(ta_p
) == TD_OK
) &&
687 ta_p
->initialized
== 1 &&
688 ta_p
->single_lwpid
== tid
) {
689 th_p
->th_ta_p
= ta_p
;
690 if ((th_p
->th_unique
= ta_p
->single_ulwp_addr
) == 0)
696 * LOCKING EXCEPTION - Locking is not required here because
697 * the locking and checking will be done in __td_ta_thr_iter.
709 return_val
= __td_ta_thr_iter(ta_p
,
710 (td_thr_iter_f
*)td_mapper_id2thr
, (void *)&data
,
711 TD_THR_ANY_STATE
, TD_THR_LOWEST_PRIORITY
,
712 TD_SIGNO_MASK
, TD_THR_ANY_USER_FLAGS
);
713 if (return_val
== TD_OK
) {
715 return_val
= TD_NOTHR
;
724 * Map the address of a synchronization object to a sync. object handle.
726 #pragma weak td_ta_map_addr2sync = __td_ta_map_addr2sync
728 __td_ta_map_addr2sync(td_thragent_t
*ta_p
, psaddr_t addr
, td_synchandle_t
*sh_p
)
730 struct ps_prochandle
*ph_p
;
738 if ((ph_p
= ph_lock_ta(ta_p
, &return_val
)) == NULL
)
741 * Check the magic number of the sync. object to make sure it's valid.
742 * The magic number is at the same offset for all sync. objects.
744 if (ps_pdread(ph_p
, (psaddr_t
)&((mutex_t
*)addr
)->mutex_magic
,
745 &sync_magic
, sizeof (sync_magic
)) != PS_OK
) {
750 if (sync_magic
!= MUTEX_MAGIC
&& sync_magic
!= COND_MAGIC
&&
751 sync_magic
!= SEMA_MAGIC
&& sync_magic
!= RWL_MAGIC
)
754 * Just fill in the appropriate fields of the sync. handle.
756 sh_p
->sh_ta_p
= (td_thragent_t
*)ta_p
;
757 sh_p
->sh_unique
= addr
;
762 * Iterate over the set of global TSD keys.
763 * The call back function is called with three arguments,
764 * a key, a pointer to the destructor function, and the cbdata pointer.
765 * Currently unused by dbx.
767 #pragma weak td_ta_tsd_iter = __td_ta_tsd_iter
769 __td_ta_tsd_iter(td_thragent_t
*ta_p
, td_key_iter_f
*cb
, void *cbdata_p
)
771 struct ps_prochandle
*ph_p
;
776 psaddr_t
*destructors
= NULL
;
781 if ((ph_p
= ph_lock_ta(ta_p
, &return_val
)) == NULL
)
783 if (ps_pstop(ph_p
) != PS_OK
) {
788 if (ta_p
->model
== PR_MODEL_NATIVE
) {
792 ta_p
->uberdata_addr
+ offsetof(uberdata_t
, tsd_metadata
),
793 &tsdm
, sizeof (tsdm
)) != PS_OK
)
794 return_val
= TD_DBERR
;
796 numkeys
= tsdm
.tsdm_nused
;
797 dest_addr
= (psaddr_t
)tsdm
.tsdm_destro
;
800 malloc(numkeys
* sizeof (psaddr_t
));
803 #if defined(_LP64) && defined(_SYSCALL32)
804 tsd_metadata32_t tsdm
;
807 ta_p
->uberdata_addr
+ offsetof(uberdata32_t
, tsd_metadata
),
808 &tsdm
, sizeof (tsdm
)) != PS_OK
)
809 return_val
= TD_DBERR
;
811 numkeys
= tsdm
.tsdm_nused
;
812 dest_addr
= (psaddr_t
)tsdm
.tsdm_destro
;
815 malloc(numkeys
* sizeof (caddr32_t
));
818 return_val
= TD_DBERR
;
819 #endif /* _SYSCALL32 */
822 if (return_val
!= TD_OK
|| numkeys
<= 0) {
823 (void) ps_pcontinue(ph_p
);
828 if (destructors
== NULL
)
829 return_val
= TD_MALLOC
;
830 else if (ta_p
->model
== PR_MODEL_NATIVE
) {
831 if (ps_pdread(ph_p
, dest_addr
,
832 destructors
, numkeys
* sizeof (psaddr_t
)) != PS_OK
)
833 return_val
= TD_DBERR
;
835 for (key
= 1; key
< numkeys
; key
++) {
836 destructor
= (PFrV
)destructors
[key
];
837 if (destructor
!= TSD_UNALLOCATED
&&
838 (*cb
)(key
, destructor
, cbdata_p
))
842 #if defined(_LP64) && defined(_SYSCALL32)
844 caddr32_t
*destructors32
= (caddr32_t
*)destructors
;
845 caddr32_t destruct32
;
847 if (ps_pdread(ph_p
, dest_addr
,
848 destructors32
, numkeys
* sizeof (caddr32_t
)) != PS_OK
)
849 return_val
= TD_DBERR
;
851 for (key
= 1; key
< numkeys
; key
++) {
852 destruct32
= destructors32
[key
];
854 (caddr32_t
)(uintptr_t)TSD_UNALLOCATED
) &&
855 (*cb
)(key
, (PFrV
)(uintptr_t)destruct32
,
860 #endif /* _SYSCALL32 */
865 (void) ps_pcontinue(ph_p
);
871 sigequalset(const sigset_t
*s1
, const sigset_t
*s2
)
874 s1
->__sigbits
[0] == s2
->__sigbits
[0] &&
875 s1
->__sigbits
[1] == s2
->__sigbits
[1] &&
876 s1
->__sigbits
[2] == s2
->__sigbits
[2] &&
877 s1
->__sigbits
[3] == s2
->__sigbits
[3]);
882 * Iterate over all threads. For each thread call
883 * the function pointed to by "cb" with a pointer
884 * to a thread handle, and a pointer to data which
885 * can be NULL. Only call td_thr_iter_f() on threads
886 * which match the properties of state, ti_pri,
887 * ti_sigmask_p, and ti_user_flags. If cb returns
888 * a non-zero value, terminate iterations.
891 * *ta_p - thread agent
892 * *cb - call back function defined by user.
893 * td_thr_iter_f() takes a thread handle and
894 * cbdata_p as a parameter.
895 * cbdata_p - parameter for td_thr_iter_f().
897 * state - state of threads of interest. A value of
898 * TD_THR_ANY_STATE from enum td_thr_state_e
899 * does not restrict iterations by state.
900 * ti_pri - lower bound of priorities of threads of
901 * interest. A value of TD_THR_LOWEST_PRIORITY
902 * defined in thread_db.h does not restrict
903 * iterations by priority. A thread with priority
904 * less than ti_pri will NOT be passed to the callback
906 * ti_sigmask_p - signal mask of threads of interest.
907 * A value of TD_SIGNO_MASK defined in thread_db.h
908 * does not restrict iterations by signal mask.
909 * ti_user_flags - user flags of threads of interest. A
910 * value of TD_THR_ANY_USER_FLAGS defined in thread_db.h
911 * does not restrict iterations by user flags.
913 #pragma weak td_ta_thr_iter = __td_ta_thr_iter
915 __td_ta_thr_iter(td_thragent_t
*ta_p
, td_thr_iter_f
*cb
,
916 void *cbdata_p
, td_thr_state_e state
, int ti_pri
,
917 sigset_t
*ti_sigmask_p
, unsigned ti_user_flags
)
919 struct ps_prochandle
*ph_p
;
920 psaddr_t first_lwp_addr
;
921 psaddr_t first_zombie_addr
;
922 psaddr_t curr_lwp_addr
;
923 psaddr_t next_lwp_addr
;
932 * If state is not within bound, short circuit.
934 if (state
< TD_THR_ANY_STATE
|| state
> TD_THR_STOPPED_ASLEEP
)
937 if ((ph_p
= ph_lock_ta(ta_p
, &return_val
)) == NULL
)
939 if (ps_pstop(ph_p
) != PS_OK
) {
945 * For each ulwp_t in the circular linked lists pointed
946 * to by "all_lwps" and "all_zombies":
947 * (1) Filter each thread.
948 * (2) Create the thread_object for each thread that passes.
949 * (3) Call the call back function on each thread.
952 if (ta_p
->model
== PR_MODEL_NATIVE
) {
953 db_return
= ps_pdread(ph_p
,
954 ta_p
->uberdata_addr
+ offsetof(uberdata_t
, all_lwps
),
955 &first_lwp_addr
, sizeof (first_lwp_addr
));
956 db_return2
= ps_pdread(ph_p
,
957 ta_p
->uberdata_addr
+ offsetof(uberdata_t
, all_zombies
),
958 &first_zombie_addr
, sizeof (first_zombie_addr
));
960 #if defined(_LP64) && defined(_SYSCALL32)
963 db_return
= ps_pdread(ph_p
,
964 ta_p
->uberdata_addr
+ offsetof(uberdata32_t
, all_lwps
),
965 &addr32
, sizeof (addr32
));
966 first_lwp_addr
= addr32
;
967 db_return2
= ps_pdread(ph_p
,
968 ta_p
->uberdata_addr
+ offsetof(uberdata32_t
, all_zombies
),
969 &addr32
, sizeof (addr32
));
970 first_zombie_addr
= addr32
;
971 #else /* _SYSCALL32 */
974 #endif /* _SYSCALL32 */
976 if (db_return
== PS_OK
)
977 db_return
= db_return2
;
980 * If first_lwp_addr and first_zombie_addr are both NULL,
981 * libc must not yet be initialized or all threads have
982 * exited. Return TD_NOTHR and all will be well.
984 if (db_return
== PS_OK
&&
985 first_lwp_addr
== NULL
&& first_zombie_addr
== NULL
) {
986 (void) ps_pcontinue(ph_p
);
990 if (db_return
!= PS_OK
) {
991 (void) ps_pcontinue(ph_p
);
997 * Run down the lists of all living and dead lwps.
999 if (first_lwp_addr
== NULL
)
1000 first_lwp_addr
= first_zombie_addr
;
1001 curr_lwp_addr
= first_lwp_addr
;
1003 td_thr_state_e ts_state
;
1009 * Read the ulwp struct.
1011 if (ta_p
->model
== PR_MODEL_NATIVE
) {
1014 if (ps_pdread(ph_p
, curr_lwp_addr
,
1015 &ulwp
, sizeof (ulwp
)) != PS_OK
&&
1016 ((void) memset(&ulwp
, 0, sizeof (ulwp
)),
1017 ps_pdread(ph_p
, curr_lwp_addr
,
1018 &ulwp
, REPLACEMENT_SIZE
)) != PS_OK
) {
1019 return_val
= TD_DBERR
;
1022 next_lwp_addr
= (psaddr_t
)ulwp
.ul_forw
;
1024 ts_state
= ulwp
.ul_dead
? TD_THR_ZOMBIE
:
1025 ulwp
.ul_stop
? TD_THR_STOPPED
:
1026 ulwp
.ul_wchan
? TD_THR_SLEEP
:
1028 userpri
= ulwp
.ul_pri
;
1029 userflags
= ulwp
.ul_usropts
;
1031 (void) sigemptyset(&mask
);
1033 mask
= *(sigset_t
*)&ulwp
.ul_sigmask
;
1035 #if defined(_LP64) && defined(_SYSCALL32)
1038 if (ps_pdread(ph_p
, curr_lwp_addr
,
1039 &ulwp
, sizeof (ulwp
)) != PS_OK
&&
1040 ((void) memset(&ulwp
, 0, sizeof (ulwp
)),
1041 ps_pdread(ph_p
, curr_lwp_addr
,
1042 &ulwp
, REPLACEMENT_SIZE32
)) != PS_OK
) {
1043 return_val
= TD_DBERR
;
1046 next_lwp_addr
= (psaddr_t
)ulwp
.ul_forw
;
1048 ts_state
= ulwp
.ul_dead
? TD_THR_ZOMBIE
:
1049 ulwp
.ul_stop
? TD_THR_STOPPED
:
1050 ulwp
.ul_wchan
? TD_THR_SLEEP
:
1052 userpri
= ulwp
.ul_pri
;
1053 userflags
= ulwp
.ul_usropts
;
1055 (void) sigemptyset(&mask
);
1057 mask
= *(sigset_t
*)&ulwp
.ul_sigmask
;
1058 #else /* _SYSCALL32 */
1059 return_val
= TD_ERR
;
1061 #endif /* _SYSCALL32 */
1065 * Filter on state, priority, sigmask, and user flags.
1068 if ((state
!= ts_state
) &&
1069 (state
!= TD_THR_ANY_STATE
))
1072 if (ti_pri
> userpri
)
1075 if (ti_sigmask_p
!= TD_SIGNO_MASK
&&
1076 !sigequalset(ti_sigmask_p
, &mask
))
1079 if (ti_user_flags
!= userflags
&&
1080 ti_user_flags
!= (unsigned)TD_THR_ANY_USER_FLAGS
)
1084 * Call back - break if the return
1085 * from the call back is non-zero.
1087 th
.th_ta_p
= (td_thragent_t
*)ta_p
;
1088 th
.th_unique
= curr_lwp_addr
;
1089 if ((*cb
)(&th
, cbdata_p
))
1093 if ((curr_lwp_addr
= next_lwp_addr
) == first_lwp_addr
) {
1095 * Switch to the zombie list, unless it is NULL
1096 * or we have already been doing the zombie list,
1097 * in which case terminate the loop.
1099 if (first_zombie_addr
== NULL
||
1100 first_lwp_addr
== first_zombie_addr
)
1102 curr_lwp_addr
= first_lwp_addr
= first_zombie_addr
;
1106 (void) ps_pcontinue(ph_p
);
1108 return (return_val
);
1112 * Enable or disable process synchronization object tracking.
1113 * Currently unused by dbx.
1115 #pragma weak td_ta_sync_tracking_enable = __td_ta_sync_tracking_enable
1117 __td_ta_sync_tracking_enable(td_thragent_t
*ta_p
, int onoff
)
1119 struct ps_prochandle
*ph_p
;
1120 td_err_e return_val
;
1121 register_sync_t enable
;
1123 if ((ph_p
= ph_lock_ta(ta_p
, &return_val
)) == NULL
)
1124 return (return_val
);
1126 * Values of tdb_register_sync in the victim process:
1127 * REGISTER_SYNC_ENABLE enables registration of synch objects
1128 * REGISTER_SYNC_DISABLE disables registration of synch objects
1129 * These cause the table to be cleared and tdb_register_sync set to:
1130 * REGISTER_SYNC_ON registration in effect
1131 * REGISTER_SYNC_OFF registration not in effect
1133 enable
= onoff
? REGISTER_SYNC_ENABLE
: REGISTER_SYNC_DISABLE
;
1134 if (ps_pdwrite(ph_p
, ta_p
->tdb_register_sync_addr
,
1135 &enable
, sizeof (enable
)) != PS_OK
)
1136 return_val
= TD_DBERR
;
1138 * Remember that this interface was called (see td_ta_delete()).
1140 ta_p
->sync_tracking
= 1;
1142 return (return_val
);
1146 * Iterate over all known synchronization variables.
1147 * It is very possible that the list generated is incomplete,
1148 * because the iterator can only find synchronization variables
1149 * that have been registered by the process since synchronization
1150 * object registration was enabled.
1151 * The call back function cb is called for each synchronization
1152 * variable with two arguments: a pointer to the synchronization
1153 * handle and the passed-in argument cbdata.
1154 * If cb returns a non-zero value, iterations are terminated.
1156 #pragma weak td_ta_sync_iter = __td_ta_sync_iter
1158 __td_ta_sync_iter(td_thragent_t
*ta_p
, td_sync_iter_f
*cb
, void *cbdata
)
1160 struct ps_prochandle
*ph_p
;
1161 td_err_e return_val
;
1163 register_sync_t enable
;
1165 tdb_sync_stats_t sync_stats
;
1166 td_synchandle_t synchandle
;
1169 uint64_t *sync_addr_hash
= NULL
;
1173 if ((ph_p
= ph_lock_ta(ta_p
, &return_val
)) == NULL
)
1174 return (return_val
);
1175 if (ps_pstop(ph_p
) != PS_OK
) {
1179 if (ps_pdread(ph_p
, ta_p
->tdb_register_sync_addr
,
1180 &enable
, sizeof (enable
)) != PS_OK
) {
1181 return_val
= TD_DBERR
;
1184 if (enable
!= REGISTER_SYNC_ON
)
1188 * First read the hash table.
1189 * The hash table is large; allocate with mmap().
1191 if ((vaddr
= mmap(NULL
, TDB_HASH_SIZE
* sizeof (uint64_t),
1192 PROT_READ
|PROT_WRITE
, MAP_PRIVATE
|MAP_ANON
, -1, (off_t
)0))
1194 return_val
= TD_MALLOC
;
1197 sync_addr_hash
= vaddr
;
1199 if (ta_p
->model
== PR_MODEL_NATIVE
) {
1200 if (ps_pdread(ph_p
, ta_p
->uberdata_addr
+
1201 offsetof(uberdata_t
, tdb
.tdb_sync_addr_hash
),
1202 &psaddr
, sizeof (&psaddr
)) != PS_OK
) {
1203 return_val
= TD_DBERR
;
1210 if (ps_pdread(ph_p
, ta_p
->uberdata_addr
+
1211 offsetof(uberdata32_t
, tdb
.tdb_sync_addr_hash
),
1212 &addr
, sizeof (addr
)) != PS_OK
) {
1213 return_val
= TD_DBERR
;
1218 return_val
= TD_ERR
;
1220 #endif /* _SYSCALL32 */
1225 if (ps_pdread(ph_p
, psaddr
, sync_addr_hash
,
1226 TDB_HASH_SIZE
* sizeof (uint64_t)) != PS_OK
) {
1227 return_val
= TD_DBERR
;
1232 * Now scan the hash table.
1234 for (i
= 0; i
< TDB_HASH_SIZE
; i
++) {
1235 for (next_desc
= (psaddr_t
)sync_addr_hash
[i
];
1237 next_desc
= (psaddr_t
)sync_stats
.next
) {
1238 if (ps_pdread(ph_p
, next_desc
,
1239 &sync_stats
, sizeof (sync_stats
)) != PS_OK
) {
1240 return_val
= TD_DBERR
;
1243 if (sync_stats
.un
.type
== TDB_NONE
) {
1244 /* not registered since registration enabled */
1247 synchandle
.sh_ta_p
= ta_p
;
1248 synchandle
.sh_unique
= (psaddr_t
)sync_stats
.sync_addr
;
1249 if ((*cb
)(&synchandle
, cbdata
) != 0)
1255 if (sync_addr_hash
!= NULL
)
1256 (void) munmap((void *)sync_addr_hash
,
1257 TDB_HASH_SIZE
* sizeof (uint64_t));
1258 (void) ps_pcontinue(ph_p
);
1260 return (return_val
);
1264 * Enable process statistics collection.
1266 #pragma weak td_ta_enable_stats = __td_ta_enable_stats
1269 __td_ta_enable_stats(const td_thragent_t
*ta_p
, int onoff
)
1271 return (TD_NOCAPAB
);
1275 * Reset process statistics.
1277 #pragma weak td_ta_reset_stats = __td_ta_reset_stats
1280 __td_ta_reset_stats(const td_thragent_t
*ta_p
)
1282 return (TD_NOCAPAB
);
1286 * Read process statistics.
1288 #pragma weak td_ta_get_stats = __td_ta_get_stats
1291 __td_ta_get_stats(const td_thragent_t
*ta_p
, td_ta_stats_t
*tstats
)
1293 return (TD_NOCAPAB
);
1297 * Transfer information from lwp struct to thread information struct.
1298 * XXX -- lots of this needs cleaning up.
1301 td_thr2to(td_thragent_t
*ta_p
, psaddr_t ts_addr
,
1302 ulwp_t
*ulwp
, td_thrinfo_t
*ti_p
)
1306 if ((lwpid
= ulwp
->ul_lwpid
) == 0)
1308 (void) memset(ti_p
, 0, sizeof (*ti_p
));
1309 ti_p
->ti_ta_p
= ta_p
;
1310 ti_p
->ti_user_flags
= ulwp
->ul_usropts
;
1311 ti_p
->ti_tid
= lwpid
;
1312 ti_p
->ti_exitval
= ulwp
->ul_rval
;
1313 ti_p
->ti_startfunc
= (psaddr_t
)ulwp
->ul_startpc
;
1314 if (!ulwp
->ul_dead
) {
1316 * The bloody fools got this backwards!
1318 ti_p
->ti_stkbase
= (psaddr_t
)ulwp
->ul_stktop
;
1319 ti_p
->ti_stksize
= ulwp
->ul_stksiz
;
1321 ti_p
->ti_ro_area
= ts_addr
;
1322 ti_p
->ti_ro_size
= ulwp
->ul_replace
?
1323 REPLACEMENT_SIZE
: sizeof (ulwp_t
);
1324 ti_p
->ti_state
= ulwp
->ul_dead
? TD_THR_ZOMBIE
:
1325 ulwp
->ul_stop
? TD_THR_STOPPED
:
1326 ulwp
->ul_wchan
? TD_THR_SLEEP
:
1328 ti_p
->ti_db_suspended
= 0;
1329 ti_p
->ti_type
= TD_THR_USER
;
1330 ti_p
->ti_sp
= ulwp
->ul_sp
;
1332 ti_p
->ti_pri
= ulwp
->ul_pri
;
1333 ti_p
->ti_lid
= lwpid
;
1335 ti_p
->ti_sigmask
= ulwp
->ul_sigmask
;
1336 ti_p
->ti_traceme
= 0;
1337 ti_p
->ti_preemptflag
= 0;
1338 ti_p
->ti_pirecflag
= 0;
1339 (void) sigemptyset(&ti_p
->ti_pending
);
1340 ti_p
->ti_events
= ulwp
->ul_td_evbuf
.eventmask
;
1343 #if defined(_LP64) && defined(_SYSCALL32)
1345 td_thr2to32(td_thragent_t
*ta_p
, psaddr_t ts_addr
,
1346 ulwp32_t
*ulwp
, td_thrinfo_t
*ti_p
)
1350 if ((lwpid
= ulwp
->ul_lwpid
) == 0)
1352 (void) memset(ti_p
, 0, sizeof (*ti_p
));
1353 ti_p
->ti_ta_p
= ta_p
;
1354 ti_p
->ti_user_flags
= ulwp
->ul_usropts
;
1355 ti_p
->ti_tid
= lwpid
;
1356 ti_p
->ti_exitval
= (void *)(uintptr_t)ulwp
->ul_rval
;
1357 ti_p
->ti_startfunc
= (psaddr_t
)ulwp
->ul_startpc
;
1358 if (!ulwp
->ul_dead
) {
1360 * The bloody fools got this backwards!
1362 ti_p
->ti_stkbase
= (psaddr_t
)ulwp
->ul_stktop
;
1363 ti_p
->ti_stksize
= ulwp
->ul_stksiz
;
1365 ti_p
->ti_ro_area
= ts_addr
;
1366 ti_p
->ti_ro_size
= ulwp
->ul_replace
?
1367 REPLACEMENT_SIZE32
: sizeof (ulwp32_t
);
1368 ti_p
->ti_state
= ulwp
->ul_dead
? TD_THR_ZOMBIE
:
1369 ulwp
->ul_stop
? TD_THR_STOPPED
:
1370 ulwp
->ul_wchan
? TD_THR_SLEEP
:
1372 ti_p
->ti_db_suspended
= 0;
1373 ti_p
->ti_type
= TD_THR_USER
;
1374 ti_p
->ti_sp
= (uint32_t)ulwp
->ul_sp
;
1376 ti_p
->ti_pri
= ulwp
->ul_pri
;
1377 ti_p
->ti_lid
= lwpid
;
1379 ti_p
->ti_sigmask
= *(sigset_t
*)&ulwp
->ul_sigmask
;
1380 ti_p
->ti_traceme
= 0;
1381 ti_p
->ti_preemptflag
= 0;
1382 ti_p
->ti_pirecflag
= 0;
1383 (void) sigemptyset(&ti_p
->ti_pending
);
1384 ti_p
->ti_events
= ulwp
->ul_td_evbuf
.eventmask
;
1386 #endif /* _SYSCALL32 */
1389 * Get thread information.
1391 #pragma weak td_thr_get_info = __td_thr_get_info
1393 __td_thr_get_info(td_thrhandle_t
*th_p
, td_thrinfo_t
*ti_p
)
1395 struct ps_prochandle
*ph_p
;
1396 td_thragent_t
*ta_p
;
1397 td_err_e return_val
;
1402 (void) memset(ti_p
, NULL
, sizeof (*ti_p
));
1404 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
1405 return (return_val
);
1406 ta_p
= th_p
->th_ta_p
;
1407 if (ps_pstop(ph_p
) != PS_OK
) {
1413 * Read the ulwp struct from the process.
1414 * Transfer the ulwp struct to the thread information struct.
1416 psaddr
= th_p
->th_unique
;
1417 if (ta_p
->model
== PR_MODEL_NATIVE
) {
1420 if (ps_pdread(ph_p
, psaddr
, &ulwp
, sizeof (ulwp
)) != PS_OK
&&
1421 ((void) memset(&ulwp
, 0, sizeof (ulwp
)),
1422 ps_pdread(ph_p
, psaddr
, &ulwp
, REPLACEMENT_SIZE
)) != PS_OK
)
1423 return_val
= TD_DBERR
;
1425 td_thr2to(ta_p
, psaddr
, &ulwp
, ti_p
);
1427 #if defined(_LP64) && defined(_SYSCALL32)
1430 if (ps_pdread(ph_p
, psaddr
, &ulwp
, sizeof (ulwp
)) != PS_OK
&&
1431 ((void) memset(&ulwp
, 0, sizeof (ulwp
)),
1432 ps_pdread(ph_p
, psaddr
, &ulwp
, REPLACEMENT_SIZE32
)) !=
1434 return_val
= TD_DBERR
;
1436 td_thr2to32(ta_p
, psaddr
, &ulwp
, ti_p
);
1438 return_val
= TD_ERR
;
1439 #endif /* _SYSCALL32 */
1442 (void) ps_pcontinue(ph_p
);
1444 return (return_val
);
1448 * Given a process and an event number, return information about
1449 * an address in the process or at which a breakpoint can be set
1450 * to monitor the event.
1452 #pragma weak td_ta_event_addr = __td_ta_event_addr
1454 __td_ta_event_addr(td_thragent_t
*ta_p
, td_event_e event
, td_notify_t
*notify_p
)
1458 if (event
< TD_MIN_EVENT_NUM
|| event
> TD_MAX_EVENT_NUM
)
1459 return (TD_NOEVENT
);
1460 if (notify_p
== NULL
)
1463 notify_p
->type
= NOTIFY_BPT
;
1464 notify_p
->u
.bptaddr
= ta_p
->tdb_events
[event
- TD_MIN_EVENT_NUM
];
1470 * Add the events in eventset 2 to eventset 1.
1473 eventsetaddset(td_thr_events_t
*event1_p
, td_thr_events_t
*event2_p
)
1477 for (i
= 0; i
< TD_EVENTSIZE
; i
++)
1478 event1_p
->event_bits
[i
] |= event2_p
->event_bits
[i
];
1482 * Delete the events in eventset 2 from eventset 1.
1485 eventsetdelset(td_thr_events_t
*event1_p
, td_thr_events_t
*event2_p
)
1489 for (i
= 0; i
< TD_EVENTSIZE
; i
++)
1490 event1_p
->event_bits
[i
] &= ~event2_p
->event_bits
[i
];
1494 * Either add or delete the given event set from a thread's event mask.
1497 mod_eventset(td_thrhandle_t
*th_p
, td_thr_events_t
*events
, int onoff
)
1499 struct ps_prochandle
*ph_p
;
1500 td_err_e return_val
= TD_OK
;
1502 td_thr_events_t evset
;
1503 psaddr_t psaddr_evset
;
1504 psaddr_t psaddr_enab
;
1506 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
1507 return (return_val
);
1508 if (th_p
->th_ta_p
->model
== PR_MODEL_NATIVE
) {
1509 ulwp_t
*ulwp
= (ulwp_t
*)th_p
->th_unique
;
1510 psaddr_evset
= (psaddr_t
)&ulwp
->ul_td_evbuf
.eventmask
;
1511 psaddr_enab
= (psaddr_t
)&ulwp
->ul_td_events_enable
;
1513 #if defined(_LP64) && defined(_SYSCALL32)
1514 ulwp32_t
*ulwp
= (ulwp32_t
*)th_p
->th_unique
;
1515 psaddr_evset
= (psaddr_t
)&ulwp
->ul_td_evbuf
.eventmask
;
1516 psaddr_enab
= (psaddr_t
)&ulwp
->ul_td_events_enable
;
1518 ph_unlock(th_p
->th_ta_p
);
1520 #endif /* _SYSCALL32 */
1522 if (ps_pstop(ph_p
) != PS_OK
) {
1523 ph_unlock(th_p
->th_ta_p
);
1527 if (ps_pdread(ph_p
, psaddr_evset
, &evset
, sizeof (evset
)) != PS_OK
)
1528 return_val
= TD_DBERR
;
1531 eventsetaddset(&evset
, events
);
1533 eventsetdelset(&evset
, events
);
1534 if (ps_pdwrite(ph_p
, psaddr_evset
, &evset
, sizeof (evset
))
1536 return_val
= TD_DBERR
;
1539 if (td_eventismember(&evset
, TD_EVENTS_ENABLE
))
1541 if (ps_pdwrite(ph_p
, psaddr_enab
,
1542 &enable
, sizeof (enable
)) != PS_OK
)
1543 return_val
= TD_DBERR
;
1547 (void) ps_pcontinue(ph_p
);
1548 ph_unlock(th_p
->th_ta_p
);
1549 return (return_val
);
1553 * Enable or disable tracing for a given thread. Tracing
1554 * is filtered based on the event mask of each thread. Tracing
1555 * can be turned on/off for the thread without changing thread
1557 * Currently unused by dbx.
1559 #pragma weak td_thr_event_enable = __td_thr_event_enable
1561 __td_thr_event_enable(td_thrhandle_t
*th_p
, int onoff
)
1563 td_thr_events_t evset
;
1565 td_event_emptyset(&evset
);
1566 td_event_addset(&evset
, TD_EVENTS_ENABLE
);
1567 return (mod_eventset(th_p
, &evset
, onoff
));
1571 * Set event mask to enable event. event is turned on in
1572 * event mask for thread. If a thread encounters an event
1573 * for which its event mask is on, notification will be sent
1575 * Addresses for each event are provided to the
1576 * debugger. It is assumed that a breakpoint of some type will
1577 * be placed at that address. If the event mask for the thread
1578 * is on, the instruction at the address will be executed.
1579 * Otherwise, the instruction will be skipped.
1581 #pragma weak td_thr_set_event = __td_thr_set_event
1583 __td_thr_set_event(td_thrhandle_t
*th_p
, td_thr_events_t
*events
)
1585 return (mod_eventset(th_p
, events
, 1));
1589 * Enable or disable a set of events in the process-global event mask,
1590 * depending on the value of onoff.
1593 td_ta_mod_event(td_thragent_t
*ta_p
, td_thr_events_t
*events
, int onoff
)
1595 struct ps_prochandle
*ph_p
;
1596 td_thr_events_t targ_eventset
;
1597 td_err_e return_val
;
1599 if ((ph_p
= ph_lock_ta(ta_p
, &return_val
)) == NULL
)
1600 return (return_val
);
1601 if (ps_pstop(ph_p
) != PS_OK
) {
1605 if (ps_pdread(ph_p
, ta_p
->tdb_eventmask_addr
,
1606 &targ_eventset
, sizeof (targ_eventset
)) != PS_OK
)
1607 return_val
= TD_DBERR
;
1610 eventsetaddset(&targ_eventset
, events
);
1612 eventsetdelset(&targ_eventset
, events
);
1613 if (ps_pdwrite(ph_p
, ta_p
->tdb_eventmask_addr
,
1614 &targ_eventset
, sizeof (targ_eventset
)) != PS_OK
)
1615 return_val
= TD_DBERR
;
1617 (void) ps_pcontinue(ph_p
);
1619 return (return_val
);
1623 * Enable a set of events in the process-global event mask.
1625 #pragma weak td_ta_set_event = __td_ta_set_event
1627 __td_ta_set_event(td_thragent_t
*ta_p
, td_thr_events_t
*events
)
1629 return (td_ta_mod_event(ta_p
, events
, 1));
1633 * Set event mask to disable the given event set; these events are cleared
1634 * from the event mask of the thread. Events that occur for a thread
1635 * with the event masked off will not cause notification to be
1636 * sent to the debugger (see td_thr_set_event for fuller description).
1638 #pragma weak td_thr_clear_event = __td_thr_clear_event
1640 __td_thr_clear_event(td_thrhandle_t
*th_p
, td_thr_events_t
*events
)
1642 return (mod_eventset(th_p
, events
, 0));
1646 * Disable a set of events in the process-global event mask.
1648 #pragma weak td_ta_clear_event = __td_ta_clear_event
1650 __td_ta_clear_event(td_thragent_t
*ta_p
, td_thr_events_t
*events
)
1652 return (td_ta_mod_event(ta_p
, events
, 0));
1656 * This function returns the most recent event message, if any,
1657 * associated with a thread. Given a thread handle, return the message
1658 * corresponding to the event encountered by the thread. Only one
1659 * message per thread is saved. Messages from earlier events are lost
1660 * when later events occur.
1662 #pragma weak td_thr_event_getmsg = __td_thr_event_getmsg
1664 __td_thr_event_getmsg(td_thrhandle_t
*th_p
, td_event_msg_t
*msg
)
1666 struct ps_prochandle
*ph_p
;
1667 td_err_e return_val
= TD_OK
;
1670 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
1671 return (return_val
);
1672 if (ps_pstop(ph_p
) != PS_OK
) {
1673 ph_unlock(th_p
->th_ta_p
);
1676 if (th_p
->th_ta_p
->model
== PR_MODEL_NATIVE
) {
1677 ulwp_t
*ulwp
= (ulwp_t
*)th_p
->th_unique
;
1680 psaddr
= (psaddr_t
)&ulwp
->ul_td_evbuf
;
1681 if (ps_pdread(ph_p
, psaddr
, &evbuf
, sizeof (evbuf
)) != PS_OK
) {
1682 return_val
= TD_DBERR
;
1683 } else if (evbuf
.eventnum
== TD_EVENT_NONE
) {
1684 return_val
= TD_NOEVENT
;
1686 msg
->event
= evbuf
.eventnum
;
1687 msg
->th_p
= (td_thrhandle_t
*)th_p
;
1688 msg
->msg
.data
= (uintptr_t)evbuf
.eventdata
;
1689 /* "Consume" the message */
1690 evbuf
.eventnum
= TD_EVENT_NONE
;
1691 evbuf
.eventdata
= NULL
;
1692 if (ps_pdwrite(ph_p
, psaddr
, &evbuf
, sizeof (evbuf
))
1694 return_val
= TD_DBERR
;
1697 #if defined(_LP64) && defined(_SYSCALL32)
1698 ulwp32_t
*ulwp
= (ulwp32_t
*)th_p
->th_unique
;
1701 psaddr
= (psaddr_t
)&ulwp
->ul_td_evbuf
;
1702 if (ps_pdread(ph_p
, psaddr
, &evbuf
, sizeof (evbuf
)) != PS_OK
) {
1703 return_val
= TD_DBERR
;
1704 } else if (evbuf
.eventnum
== TD_EVENT_NONE
) {
1705 return_val
= TD_NOEVENT
;
1707 msg
->event
= evbuf
.eventnum
;
1708 msg
->th_p
= (td_thrhandle_t
*)th_p
;
1709 msg
->msg
.data
= (uintptr_t)evbuf
.eventdata
;
1710 /* "Consume" the message */
1711 evbuf
.eventnum
= TD_EVENT_NONE
;
1712 evbuf
.eventdata
= NULL
;
1713 if (ps_pdwrite(ph_p
, psaddr
, &evbuf
, sizeof (evbuf
))
1715 return_val
= TD_DBERR
;
1718 return_val
= TD_ERR
;
1719 #endif /* _SYSCALL32 */
1722 (void) ps_pcontinue(ph_p
);
1723 ph_unlock(th_p
->th_ta_p
);
1724 return (return_val
);
1728 * The callback function td_ta_event_getmsg uses when looking for
1729 * a thread with an event. A thin wrapper around td_thr_event_getmsg.
1732 event_msg_cb(const td_thrhandle_t
*th_p
, void *arg
)
1734 static td_thrhandle_t th
;
1735 td_event_msg_t
*msg
= arg
;
1737 if (__td_thr_event_getmsg((td_thrhandle_t
*)th_p
, msg
) == TD_OK
) {
1739 * Got an event, stop iterating.
1741 * Because of past mistakes in interface definition,
1742 * we are forced to pass back a static local variable
1743 * for the thread handle because th_p is a pointer
1744 * to a local variable in __td_ta_thr_iter().
1755 * This function is just like td_thr_event_getmsg, except that it is
1756 * passed a process handle rather than a thread handle, and returns
1757 * an event message for some thread in the process that has an event
1758 * message pending. If no thread has an event message pending, this
1759 * routine returns TD_NOEVENT. Thus, all pending event messages may
1760 * be collected from a process by repeatedly calling this routine
1761 * until it returns TD_NOEVENT.
1763 #pragma weak td_ta_event_getmsg = __td_ta_event_getmsg
1765 __td_ta_event_getmsg(td_thragent_t
*ta_p
, td_event_msg_t
*msg
)
1767 td_err_e return_val
;
1771 if (ta_p
->ph_p
== NULL
)
1775 msg
->event
= TD_EVENT_NONE
;
1776 if ((return_val
= __td_ta_thr_iter(ta_p
, event_msg_cb
, msg
,
1777 TD_THR_ANY_STATE
, TD_THR_LOWEST_PRIORITY
, TD_SIGNO_MASK
,
1778 TD_THR_ANY_USER_FLAGS
)) != TD_OK
)
1779 return (return_val
);
1780 if (msg
->event
== TD_EVENT_NONE
)
1781 return (TD_NOEVENT
);
1786 thr_to_lwpid(const td_thrhandle_t
*th_p
)
1788 struct ps_prochandle
*ph_p
= th_p
->th_ta_p
->ph_p
;
1792 * The caller holds the prochandle lock
1793 * and has already verfied everything.
1795 if (th_p
->th_ta_p
->model
== PR_MODEL_NATIVE
) {
1796 ulwp_t
*ulwp
= (ulwp_t
*)th_p
->th_unique
;
1798 if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_lwpid
,
1799 &lwpid
, sizeof (lwpid
)) != PS_OK
)
1801 else if (lwpid
== 0)
1804 #if defined(_LP64) && defined(_SYSCALL32)
1805 ulwp32_t
*ulwp
= (ulwp32_t
*)th_p
->th_unique
;
1807 if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_lwpid
,
1808 &lwpid
, sizeof (lwpid
)) != PS_OK
)
1810 else if (lwpid
== 0)
1814 #endif /* _SYSCALL32 */
1822 * XXX: What does this mean in a one-level model?
1824 #pragma weak td_thr_dbsuspend = __td_thr_dbsuspend
1826 __td_thr_dbsuspend(const td_thrhandle_t
*th_p
)
1828 struct ps_prochandle
*ph_p
;
1829 td_err_e return_val
;
1831 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
1832 return (return_val
);
1833 if (ps_lstop(ph_p
, thr_to_lwpid(th_p
)) != PS_OK
)
1834 return_val
= TD_DBERR
;
1835 ph_unlock(th_p
->th_ta_p
);
1836 return (return_val
);
1840 * Resume a suspended thread.
1841 * XXX: What does this mean in a one-level model?
1843 #pragma weak td_thr_dbresume = __td_thr_dbresume
1845 __td_thr_dbresume(const td_thrhandle_t
*th_p
)
1847 struct ps_prochandle
*ph_p
;
1848 td_err_e return_val
;
1850 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
1851 return (return_val
);
1852 if (ps_lcontinue(ph_p
, thr_to_lwpid(th_p
)) != PS_OK
)
1853 return_val
= TD_DBERR
;
1854 ph_unlock(th_p
->th_ta_p
);
1855 return (return_val
);
1859 * Set a thread's signal mask.
1860 * Currently unused by dbx.
1862 #pragma weak td_thr_sigsetmask = __td_thr_sigsetmask
1865 __td_thr_sigsetmask(const td_thrhandle_t
*th_p
, const sigset_t ti_sigmask
)
1867 return (TD_NOCAPAB
);
1871 * Set a thread's "signals-pending" set.
1872 * Currently unused by dbx.
1874 #pragma weak td_thr_setsigpending = __td_thr_setsigpending
1877 __td_thr_setsigpending(const td_thrhandle_t
*th_p
,
1878 uchar_t ti_pending_flag
, const sigset_t ti_pending
)
1880 return (TD_NOCAPAB
);
1884 * Get a thread's general register set.
1886 #pragma weak td_thr_getgregs = __td_thr_getgregs
1888 __td_thr_getgregs(td_thrhandle_t
*th_p
, prgregset_t regset
)
1890 struct ps_prochandle
*ph_p
;
1891 td_err_e return_val
;
1893 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
1894 return (return_val
);
1895 if (ps_pstop(ph_p
) != PS_OK
) {
1896 ph_unlock(th_p
->th_ta_p
);
1900 if (ps_lgetregs(ph_p
, thr_to_lwpid(th_p
), regset
) != PS_OK
)
1901 return_val
= TD_DBERR
;
1903 (void) ps_pcontinue(ph_p
);
1904 ph_unlock(th_p
->th_ta_p
);
1905 return (return_val
);
1909 * Set a thread's general register set.
1911 #pragma weak td_thr_setgregs = __td_thr_setgregs
1913 __td_thr_setgregs(td_thrhandle_t
*th_p
, const prgregset_t regset
)
1915 struct ps_prochandle
*ph_p
;
1916 td_err_e return_val
;
1918 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
1919 return (return_val
);
1920 if (ps_pstop(ph_p
) != PS_OK
) {
1921 ph_unlock(th_p
->th_ta_p
);
1925 if (ps_lsetregs(ph_p
, thr_to_lwpid(th_p
), regset
) != PS_OK
)
1926 return_val
= TD_DBERR
;
1928 (void) ps_pcontinue(ph_p
);
1929 ph_unlock(th_p
->th_ta_p
);
1930 return (return_val
);
1934 * Get a thread's floating-point register set.
1936 #pragma weak td_thr_getfpregs = __td_thr_getfpregs
1938 __td_thr_getfpregs(td_thrhandle_t
*th_p
, prfpregset_t
*fpregset
)
1940 struct ps_prochandle
*ph_p
;
1941 td_err_e return_val
;
1943 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
1944 return (return_val
);
1945 if (ps_pstop(ph_p
) != PS_OK
) {
1946 ph_unlock(th_p
->th_ta_p
);
1950 if (ps_lgetfpregs(ph_p
, thr_to_lwpid(th_p
), fpregset
) != PS_OK
)
1951 return_val
= TD_DBERR
;
1953 (void) ps_pcontinue(ph_p
);
1954 ph_unlock(th_p
->th_ta_p
);
1955 return (return_val
);
1959 * Set a thread's floating-point register set.
1961 #pragma weak td_thr_setfpregs = __td_thr_setfpregs
1963 __td_thr_setfpregs(td_thrhandle_t
*th_p
, const prfpregset_t
*fpregset
)
1965 struct ps_prochandle
*ph_p
;
1966 td_err_e return_val
;
1968 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
1969 return (return_val
);
1970 if (ps_pstop(ph_p
) != PS_OK
) {
1971 ph_unlock(th_p
->th_ta_p
);
1975 if (ps_lsetfpregs(ph_p
, thr_to_lwpid(th_p
), fpregset
) != PS_OK
)
1976 return_val
= TD_DBERR
;
1978 (void) ps_pcontinue(ph_p
);
1979 ph_unlock(th_p
->th_ta_p
);
1980 return (return_val
);
1984 * Get the size of the extra state register set for this architecture.
1985 * Currently unused by dbx.
1987 #pragma weak td_thr_getxregsize = __td_thr_getxregsize
1990 __td_thr_getxregsize(td_thrhandle_t
*th_p
, int *xregsize
)
1992 #if defined(__sparc)
1993 struct ps_prochandle
*ph_p
;
1994 td_err_e return_val
;
1996 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
1997 return (return_val
);
1998 if (ps_pstop(ph_p
) != PS_OK
) {
1999 ph_unlock(th_p
->th_ta_p
);
2003 if (ps_lgetxregsize(ph_p
, thr_to_lwpid(th_p
), xregsize
) != PS_OK
)
2004 return_val
= TD_DBERR
;
2006 (void) ps_pcontinue(ph_p
);
2007 ph_unlock(th_p
->th_ta_p
);
2008 return (return_val
);
2010 return (TD_NOXREGS
);
2011 #endif /* __sparc */
2015 * Get a thread's extra state register set.
2017 #pragma weak td_thr_getxregs = __td_thr_getxregs
2020 __td_thr_getxregs(td_thrhandle_t
*th_p
, void *xregset
)
2022 #if defined(__sparc)
2023 struct ps_prochandle
*ph_p
;
2024 td_err_e return_val
;
2026 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
2027 return (return_val
);
2028 if (ps_pstop(ph_p
) != PS_OK
) {
2029 ph_unlock(th_p
->th_ta_p
);
2033 if (ps_lgetxregs(ph_p
, thr_to_lwpid(th_p
), (caddr_t
)xregset
) != PS_OK
)
2034 return_val
= TD_DBERR
;
2036 (void) ps_pcontinue(ph_p
);
2037 ph_unlock(th_p
->th_ta_p
);
2038 return (return_val
);
2040 return (TD_NOXREGS
);
2041 #endif /* __sparc */
2045 * Set a thread's extra state register set.
2047 #pragma weak td_thr_setxregs = __td_thr_setxregs
2050 __td_thr_setxregs(td_thrhandle_t
*th_p
, const void *xregset
)
2052 #if defined(__sparc)
2053 struct ps_prochandle
*ph_p
;
2054 td_err_e return_val
;
2056 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
2057 return (return_val
);
2058 if (ps_pstop(ph_p
) != PS_OK
) {
2059 ph_unlock(th_p
->th_ta_p
);
2063 if (ps_lsetxregs(ph_p
, thr_to_lwpid(th_p
), (caddr_t
)xregset
) != PS_OK
)
2064 return_val
= TD_DBERR
;
2066 (void) ps_pcontinue(ph_p
);
2067 ph_unlock(th_p
->th_ta_p
);
2068 return (return_val
);
2070 return (TD_NOXREGS
);
2071 #endif /* __sparc */
2080 * Check the struct thread address in *th_p again first
2081 * value in "data". If value in data is found, set second value
2082 * in "data" to 1 and return 1 to terminate iterations.
2083 * This function is used by td_thr_validate() to verify that
2084 * a thread handle is valid.
2087 td_searcher(const td_thrhandle_t
*th_p
, void *data
)
2089 struct searcher
*searcher_data
= (struct searcher
*)data
;
2091 if (searcher_data
->addr
== th_p
->th_unique
) {
2092 searcher_data
->status
= 1;
2099 * Validate the thread handle. Check that
2100 * a thread exists in the thread agent/process that
2101 * corresponds to thread with handle *th_p.
2102 * Currently unused by dbx.
2104 #pragma weak td_thr_validate = __td_thr_validate
2106 __td_thr_validate(const td_thrhandle_t
*th_p
)
2108 td_err_e return_val
;
2109 struct searcher searcher_data
= {0, 0};
2113 if (th_p
->th_unique
== NULL
|| th_p
->th_ta_p
== NULL
)
2117 * LOCKING EXCEPTION - Locking is not required
2118 * here because no use of the thread agent is made (other
2119 * than the sanity check) and checking of the thread
2120 * agent will be done in __td_ta_thr_iter.
2123 searcher_data
.addr
= th_p
->th_unique
;
2124 return_val
= __td_ta_thr_iter(th_p
->th_ta_p
,
2125 td_searcher
, &searcher_data
,
2126 TD_THR_ANY_STATE
, TD_THR_LOWEST_PRIORITY
,
2127 TD_SIGNO_MASK
, TD_THR_ANY_USER_FLAGS
);
2129 if (return_val
== TD_OK
&& searcher_data
.status
== 0)
2130 return_val
= TD_NOTHR
;
2132 return (return_val
);
2136 * Get a thread's private binding to a given thread specific
2137 * data(TSD) key(see thr_getspecific(3T). If the thread doesn't
2138 * have a binding for a particular key, then NULL is returned.
2140 #pragma weak td_thr_tsd = __td_thr_tsd
2142 __td_thr_tsd(td_thrhandle_t
*th_p
, thread_key_t key
, void **data_pp
)
2144 struct ps_prochandle
*ph_p
;
2145 td_thragent_t
*ta_p
;
2146 td_err_e return_val
;
2151 if (data_pp
== NULL
)
2154 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
2155 return (return_val
);
2156 ta_p
= th_p
->th_ta_p
;
2157 if (ps_pstop(ph_p
) != PS_OK
) {
2162 if (ta_p
->model
== PR_MODEL_NATIVE
) {
2163 ulwp_t
*ulwp
= (ulwp_t
*)th_p
->th_unique
;
2164 tsd_metadata_t tsdm
;
2168 ta_p
->uberdata_addr
+ offsetof(uberdata_t
, tsd_metadata
),
2169 &tsdm
, sizeof (tsdm
)) != PS_OK
)
2170 return_val
= TD_DBERR
;
2171 else if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_stsd
,
2172 &tsd_paddr
, sizeof (tsd_paddr
)) != PS_OK
)
2173 return_val
= TD_DBERR
;
2174 else if (tsd_paddr
!= NULL
&&
2175 ps_pdread(ph_p
, tsd_paddr
, &stsd
, sizeof (stsd
)) != PS_OK
)
2176 return_val
= TD_DBERR
;
2178 maxkey
= tsdm
.tsdm_nused
;
2179 nkey
= tsd_paddr
== NULL
? TSD_NFAST
: stsd
.tsd_nalloc
;
2181 if (key
< TSD_NFAST
)
2182 tsd_paddr
= (psaddr_t
)&ulwp
->ul_ftsd
[0];
2185 #if defined(_LP64) && defined(_SYSCALL32)
2186 ulwp32_t
*ulwp
= (ulwp32_t
*)th_p
->th_unique
;
2187 tsd_metadata32_t tsdm
;
2192 ta_p
->uberdata_addr
+ offsetof(uberdata32_t
, tsd_metadata
),
2193 &tsdm
, sizeof (tsdm
)) != PS_OK
)
2194 return_val
= TD_DBERR
;
2195 else if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_stsd
,
2196 &addr
, sizeof (addr
)) != PS_OK
)
2197 return_val
= TD_DBERR
;
2198 else if (addr
!= NULL
&&
2199 ps_pdread(ph_p
, addr
, &stsd
, sizeof (stsd
)) != PS_OK
)
2200 return_val
= TD_DBERR
;
2202 maxkey
= tsdm
.tsdm_nused
;
2203 nkey
= addr
== NULL
? TSD_NFAST
: stsd
.tsd_nalloc
;
2205 if (key
< TSD_NFAST
) {
2206 tsd_paddr
= (psaddr_t
)&ulwp
->ul_ftsd
[0];
2212 return_val
= TD_ERR
;
2213 #endif /* _SYSCALL32 */
2216 if (return_val
== TD_OK
&& (key
< 1 || key
>= maxkey
))
2217 return_val
= TD_NOTSD
;
2218 if (return_val
!= TD_OK
|| key
>= nkey
) {
2219 /* NULL has already been stored in data_pp */
2220 (void) ps_pcontinue(ph_p
);
2222 return (return_val
);
2226 * Read the value from the thread's tsd array.
2228 if (ta_p
->model
== PR_MODEL_NATIVE
) {
2231 if (ps_pdread(ph_p
, tsd_paddr
+ key
* sizeof (void *),
2232 &value
, sizeof (value
)) != PS_OK
)
2233 return_val
= TD_DBERR
;
2236 #if defined(_LP64) && defined(_SYSCALL32)
2240 if (ps_pdread(ph_p
, tsd_paddr
+ key
* sizeof (caddr32_t
),
2241 &value32
, sizeof (value32
)) != PS_OK
)
2242 return_val
= TD_DBERR
;
2244 *data_pp
= (void *)(uintptr_t)value32
;
2245 #endif /* _SYSCALL32 */
2248 (void) ps_pcontinue(ph_p
);
2250 return (return_val
);
2254 * Get the base address of a thread's thread local storage (TLS) block
2255 * for the module (executable or shared object) identified by 'moduleid'.
2257 #pragma weak td_thr_tlsbase = __td_thr_tlsbase
2259 __td_thr_tlsbase(td_thrhandle_t
*th_p
, ulong_t moduleid
, psaddr_t
*base
)
2261 struct ps_prochandle
*ph_p
;
2262 td_thragent_t
*ta_p
;
2263 td_err_e return_val
;
2268 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
2269 return (return_val
);
2270 ta_p
= th_p
->th_ta_p
;
2271 if (ps_pstop(ph_p
) != PS_OK
) {
2276 if (ta_p
->model
== PR_MODEL_NATIVE
) {
2277 ulwp_t
*ulwp
= (ulwp_t
*)th_p
->th_unique
;
2278 tls_metadata_t tls_metadata
;
2283 ta_p
->uberdata_addr
+ offsetof(uberdata_t
, tls_metadata
),
2284 &tls_metadata
, sizeof (tls_metadata
)) != PS_OK
)
2285 return_val
= TD_DBERR
;
2286 else if (moduleid
>= tls_metadata
.tls_modinfo
.tls_size
)
2287 return_val
= TD_NOTLS
;
2288 else if (ps_pdread(ph_p
,
2289 (psaddr_t
)((TLS_modinfo
*)
2290 tls_metadata
.tls_modinfo
.tls_data
+ moduleid
),
2291 &tlsmod
, sizeof (tlsmod
)) != PS_OK
)
2292 return_val
= TD_DBERR
;
2293 else if (tlsmod
.tm_memsz
== 0)
2294 return_val
= TD_NOTLS
;
2295 else if (tlsmod
.tm_flags
& TM_FLG_STATICTLS
)
2296 *base
= (psaddr_t
)ulwp
- tlsmod
.tm_stattlsoffset
;
2297 else if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_tls
,
2298 &tls
, sizeof (tls
)) != PS_OK
)
2299 return_val
= TD_DBERR
;
2300 else if (moduleid
>= tls
.tls_size
)
2301 return_val
= TD_TLSDEFER
;
2302 else if (ps_pdread(ph_p
,
2303 (psaddr_t
)((tls_t
*)tls
.tls_data
+ moduleid
),
2304 &tls
, sizeof (tls
)) != PS_OK
)
2305 return_val
= TD_DBERR
;
2306 else if (tls
.tls_size
== 0)
2307 return_val
= TD_TLSDEFER
;
2309 *base
= (psaddr_t
)tls
.tls_data
;
2311 #if defined(_LP64) && defined(_SYSCALL32)
2312 ulwp32_t
*ulwp
= (ulwp32_t
*)th_p
->th_unique
;
2313 tls_metadata32_t tls_metadata
;
2314 TLS_modinfo32 tlsmod
;
2318 ta_p
->uberdata_addr
+ offsetof(uberdata32_t
, tls_metadata
),
2319 &tls_metadata
, sizeof (tls_metadata
)) != PS_OK
)
2320 return_val
= TD_DBERR
;
2321 else if (moduleid
>= tls_metadata
.tls_modinfo
.tls_size
)
2322 return_val
= TD_NOTLS
;
2323 else if (ps_pdread(ph_p
,
2324 (psaddr_t
)((TLS_modinfo32
*)
2325 (uintptr_t)tls_metadata
.tls_modinfo
.tls_data
+ moduleid
),
2326 &tlsmod
, sizeof (tlsmod
)) != PS_OK
)
2327 return_val
= TD_DBERR
;
2328 else if (tlsmod
.tm_memsz
== 0)
2329 return_val
= TD_NOTLS
;
2330 else if (tlsmod
.tm_flags
& TM_FLG_STATICTLS
)
2331 *base
= (psaddr_t
)ulwp
- tlsmod
.tm_stattlsoffset
;
2332 else if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_tls
,
2333 &tls
, sizeof (tls
)) != PS_OK
)
2334 return_val
= TD_DBERR
;
2335 else if (moduleid
>= tls
.tls_size
)
2336 return_val
= TD_TLSDEFER
;
2337 else if (ps_pdread(ph_p
,
2338 (psaddr_t
)((tls32_t
*)(uintptr_t)tls
.tls_data
+ moduleid
),
2339 &tls
, sizeof (tls
)) != PS_OK
)
2340 return_val
= TD_DBERR
;
2341 else if (tls
.tls_size
== 0)
2342 return_val
= TD_TLSDEFER
;
2344 *base
= (psaddr_t
)tls
.tls_data
;
2346 return_val
= TD_ERR
;
2347 #endif /* _SYSCALL32 */
2350 (void) ps_pcontinue(ph_p
);
2352 return (return_val
);
2356 * Change a thread's priority to the value specified by ti_pri.
2357 * Currently unused by dbx.
2359 #pragma weak td_thr_setprio = __td_thr_setprio
2362 __td_thr_setprio(td_thrhandle_t
*th_p
, int ti_pri
)
2364 return (TD_NOCAPAB
);
2368 * This structure links td_thr_lockowner and the lowner_cb callback function.
2371 td_sync_iter_f
*owner_cb
;
2373 td_thrhandle_t
*th_p
;
2377 lowner_cb(const td_synchandle_t
*sh_p
, void *arg
)
2379 lowner_cb_ctl_t
*ocb
= arg
;
2386 if (ps_pdread(sh_p
->sh_ta_p
->ph_p
, sh_p
->sh_unique
,
2387 &rw_m
, sizeof (rw_m
)) != PS_OK
) {
2389 if (ps_pdread(sh_p
->sh_ta_p
->ph_p
, sh_p
->sh_unique
,
2390 &rw_m
.mx
, sizeof (rw_m
.mx
)) != PS_OK
)
2393 if (rw_m
.mx
.mutex_magic
== MUTEX_MAGIC
&&
2394 rw_m
.mx
.mutex_owner
== ocb
->th_p
->th_unique
)
2395 return ((ocb
->owner_cb
)(sh_p
, ocb
->owner_cb_arg
));
2396 if (!trunc
&& rw_m
.rwl
.magic
== RWL_MAGIC
) {
2397 mutex_t
*rwlock
= &rw_m
.rwl
.mutex
;
2398 if (rwlock
->mutex_owner
== ocb
->th_p
->th_unique
)
2399 return ((ocb
->owner_cb
)(sh_p
, ocb
->owner_cb_arg
));
2405 * Iterate over the set of locks owned by a specified thread.
2406 * If cb returns a non-zero value, terminate iterations.
2408 #pragma weak td_thr_lockowner = __td_thr_lockowner
2410 __td_thr_lockowner(const td_thrhandle_t
*th_p
, td_sync_iter_f
*cb
,
2413 td_thragent_t
*ta_p
;
2414 td_err_e return_val
;
2415 lowner_cb_ctl_t lcb
;
2418 * Just sanity checks.
2420 if (ph_lock_th((td_thrhandle_t
*)th_p
, &return_val
) == NULL
)
2421 return (return_val
);
2422 ta_p
= th_p
->th_ta_p
;
2426 lcb
.owner_cb_arg
= cb_data
;
2427 lcb
.th_p
= (td_thrhandle_t
*)th_p
;
2428 return (__td_ta_sync_iter(ta_p
, lowner_cb
, &lcb
));
2432 * If a thread is asleep on a synchronization variable,
2433 * then get the synchronization handle.
2435 #pragma weak td_thr_sleepinfo = __td_thr_sleepinfo
2437 __td_thr_sleepinfo(const td_thrhandle_t
*th_p
, td_synchandle_t
*sh_p
)
2439 struct ps_prochandle
*ph_p
;
2440 td_err_e return_val
= TD_OK
;
2445 if ((ph_p
= ph_lock_th((td_thrhandle_t
*)th_p
, &return_val
)) == NULL
)
2446 return (return_val
);
2449 * No need to stop the process for a simple read.
2451 if (th_p
->th_ta_p
->model
== PR_MODEL_NATIVE
) {
2452 ulwp_t
*ulwp
= (ulwp_t
*)th_p
->th_unique
;
2454 if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_wchan
,
2455 &wchan
, sizeof (wchan
)) != PS_OK
)
2456 return_val
= TD_DBERR
;
2458 #if defined(_LP64) && defined(_SYSCALL32)
2459 ulwp32_t
*ulwp
= (ulwp32_t
*)th_p
->th_unique
;
2462 if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_wchan
,
2463 &wchan32
, sizeof (wchan32
)) != PS_OK
)
2464 return_val
= TD_DBERR
;
2467 return_val
= TD_ERR
;
2468 #endif /* _SYSCALL32 */
2471 if (return_val
!= TD_OK
|| wchan
== NULL
) {
2472 sh_p
->sh_ta_p
= NULL
;
2473 sh_p
->sh_unique
= NULL
;
2474 if (return_val
== TD_OK
)
2475 return_val
= TD_ERR
;
2477 sh_p
->sh_ta_p
= th_p
->th_ta_p
;
2478 sh_p
->sh_unique
= (psaddr_t
)wchan
;
2481 ph_unlock(th_p
->th_ta_p
);
2482 return (return_val
);
2486 * Which thread is running on an lwp?
2488 #pragma weak td_ta_map_lwp2thr = __td_ta_map_lwp2thr
2490 __td_ta_map_lwp2thr(td_thragent_t
*ta_p
, lwpid_t lwpid
,
2491 td_thrhandle_t
*th_p
)
2493 return (__td_ta_map_id2thr(ta_p
, lwpid
, th_p
));
2497 * Common code for td_sync_get_info() and td_sync_get_stats()
2500 sync_get_info_common(const td_synchandle_t
*sh_p
, struct ps_prochandle
*ph_p
,
2501 td_syncinfo_t
*si_p
)
2504 td_so_un_t generic_so
;
2507 * Determine the sync. object type; a little type fudgery here.
2508 * First attempt to read the whole union. If that fails, attempt
2509 * to read just the condvar. A condvar is the smallest sync. object.
2511 if (ps_pdread(ph_p
, sh_p
->sh_unique
,
2512 &generic_so
, sizeof (generic_so
)) != PS_OK
) {
2514 if (ps_pdread(ph_p
, sh_p
->sh_unique
, &generic_so
.condition
,
2515 sizeof (generic_so
.condition
)) != PS_OK
)
2519 switch (generic_so
.condition
.cond_magic
) {
2521 if (trunc
&& ps_pdread(ph_p
, sh_p
->sh_unique
,
2522 &generic_so
.lock
, sizeof (generic_so
.lock
)) != PS_OK
)
2524 si_p
->si_type
= TD_SYNC_MUTEX
;
2525 si_p
->si_shared_type
=
2526 (generic_so
.lock
.mutex_type
& USYNC_PROCESS
);
2527 (void) memcpy(si_p
->si_flags
, &generic_so
.lock
.mutex_flag
,
2528 sizeof (generic_so
.lock
.mutex_flag
));
2529 si_p
->si_state
.mutex_locked
=
2530 (generic_so
.lock
.mutex_lockw
!= 0);
2531 si_p
->si_size
= sizeof (generic_so
.lock
);
2532 si_p
->si_has_waiters
= generic_so
.lock
.mutex_waiters
;
2533 si_p
->si_rcount
= generic_so
.lock
.mutex_rcount
;
2534 si_p
->si_prioceiling
= generic_so
.lock
.mutex_ceiling
;
2535 if (si_p
->si_state
.mutex_locked
) {
2536 if (si_p
->si_shared_type
& USYNC_PROCESS
)
2538 generic_so
.lock
.mutex_ownerpid
;
2539 si_p
->si_owner
.th_ta_p
= sh_p
->sh_ta_p
;
2540 si_p
->si_owner
.th_unique
= generic_so
.lock
.mutex_owner
;
2544 si_p
->si_type
= TD_SYNC_COND
;
2545 si_p
->si_shared_type
=
2546 (generic_so
.condition
.cond_type
& USYNC_PROCESS
);
2547 (void) memcpy(si_p
->si_flags
, generic_so
.condition
.flags
.flag
,
2548 sizeof (generic_so
.condition
.flags
.flag
));
2549 si_p
->si_size
= sizeof (generic_so
.condition
);
2550 si_p
->si_has_waiters
=
2551 (generic_so
.condition
.cond_waiters_user
|
2552 generic_so
.condition
.cond_waiters_kernel
)? 1 : 0;
2555 if (trunc
&& ps_pdread(ph_p
, sh_p
->sh_unique
,
2556 &generic_so
.semaphore
, sizeof (generic_so
.semaphore
))
2559 si_p
->si_type
= TD_SYNC_SEMA
;
2560 si_p
->si_shared_type
=
2561 (generic_so
.semaphore
.type
& USYNC_PROCESS
);
2562 si_p
->si_state
.sem_count
= generic_so
.semaphore
.count
;
2563 si_p
->si_size
= sizeof (generic_so
.semaphore
);
2564 si_p
->si_has_waiters
=
2565 ((lwp_sema_t
*)&generic_so
.semaphore
)->flags
[7];
2566 /* this is useless but the old interface provided it */
2567 si_p
->si_data
= (psaddr_t
)generic_so
.semaphore
.count
;
2573 if (trunc
&& ps_pdread(ph_p
, sh_p
->sh_unique
,
2574 &generic_so
.rwlock
, sizeof (generic_so
.rwlock
)) != PS_OK
)
2576 si_p
->si_type
= TD_SYNC_RWLOCK
;
2577 si_p
->si_shared_type
=
2578 (generic_so
.rwlock
.rwlock_type
& USYNC_PROCESS
);
2579 si_p
->si_size
= sizeof (generic_so
.rwlock
);
2581 rwstate
= (uint32_t)generic_so
.rwlock
.rwlock_readers
;
2582 if (rwstate
& URW_WRITE_LOCKED
) {
2583 si_p
->si_state
.nreaders
= -1;
2584 si_p
->si_is_wlock
= 1;
2585 si_p
->si_owner
.th_ta_p
= sh_p
->sh_ta_p
;
2586 si_p
->si_owner
.th_unique
=
2587 generic_so
.rwlock
.rwlock_owner
;
2588 if (si_p
->si_shared_type
& USYNC_PROCESS
)
2590 generic_so
.rwlock
.rwlock_ownerpid
;
2592 si_p
->si_state
.nreaders
= (rwstate
& URW_READERS_MASK
);
2594 si_p
->si_has_waiters
= ((rwstate
& URW_HAS_WAITERS
) != 0);
2596 /* this is useless but the old interface provided it */
2597 si_p
->si_data
= (psaddr_t
)generic_so
.rwlock
.readers
;
2604 si_p
->si_ta_p
= sh_p
->sh_ta_p
;
2605 si_p
->si_sv_addr
= sh_p
->sh_unique
;
2610 * Given a synchronization handle, fill in the
2611 * information for the synchronization variable into *si_p.
2613 #pragma weak td_sync_get_info = __td_sync_get_info
2615 __td_sync_get_info(const td_synchandle_t
*sh_p
, td_syncinfo_t
*si_p
)
2617 struct ps_prochandle
*ph_p
;
2618 td_err_e return_val
;
2622 (void) memset(si_p
, 0, sizeof (*si_p
));
2623 if ((ph_p
= ph_lock_sh(sh_p
, &return_val
)) == NULL
)
2624 return (return_val
);
2625 if (ps_pstop(ph_p
) != PS_OK
) {
2626 ph_unlock(sh_p
->sh_ta_p
);
2630 return_val
= sync_get_info_common(sh_p
, ph_p
, si_p
);
2632 (void) ps_pcontinue(ph_p
);
2633 ph_unlock(sh_p
->sh_ta_p
);
2634 return (return_val
);
2638 tdb_addr_hash64(uint64_t addr
)
2640 uint64_t value60
= (addr
>> 4);
2641 uint32_t value30
= (value60
>> 30) ^ (value60
& 0x3fffffff);
2642 return ((value30
>> 15) ^ (value30
& 0x7fff));
2646 tdb_addr_hash32(uint64_t addr
)
2648 uint32_t value30
= (addr
>> 2); /* 30 bits */
2649 return ((value30
>> 15) ^ (value30
& 0x7fff));
2653 read_sync_stats(td_thragent_t
*ta_p
, psaddr_t hash_table
,
2654 psaddr_t sync_obj_addr
, tdb_sync_stats_t
*sync_stats
)
2661 * Compute the hash table index from the synch object's address.
2663 if (ta_p
->model
== PR_MODEL_LP64
)
2664 ix
= tdb_addr_hash64(sync_obj_addr
);
2666 ix
= tdb_addr_hash32(sync_obj_addr
);
2669 * Get the address of the first element in the linked list.
2671 if (ps_pdread(ta_p
->ph_p
, hash_table
+ ix
* sizeof (uint64_t),
2672 &first
, sizeof (first
)) != PS_OK
)
2676 * Search the linked list for an entry for the synch object..
2678 for (next_desc
= (psaddr_t
)first
; next_desc
!= NULL
;
2679 next_desc
= (psaddr_t
)sync_stats
->next
) {
2680 if (ps_pdread(ta_p
->ph_p
, next_desc
,
2681 sync_stats
, sizeof (*sync_stats
)) != PS_OK
)
2683 if (sync_stats
->sync_addr
== sync_obj_addr
)
2687 (void) memset(sync_stats
, 0, sizeof (*sync_stats
));
2692 * Given a synchronization handle, fill in the
2693 * statistics for the synchronization variable into *ss_p.
2695 #pragma weak td_sync_get_stats = __td_sync_get_stats
2697 __td_sync_get_stats(const td_synchandle_t
*sh_p
, td_syncstats_t
*ss_p
)
2699 struct ps_prochandle
*ph_p
;
2700 td_thragent_t
*ta_p
;
2701 td_err_e return_val
;
2702 register_sync_t enable
;
2704 tdb_sync_stats_t sync_stats
;
2709 (void) memset(ss_p
, 0, sizeof (*ss_p
));
2710 if ((ph_p
= ph_lock_sh(sh_p
, &return_val
)) == NULL
)
2711 return (return_val
);
2712 ta_p
= sh_p
->sh_ta_p
;
2713 if (ps_pstop(ph_p
) != PS_OK
) {
2718 if ((return_val
= sync_get_info_common(sh_p
, ph_p
, &ss_p
->ss_info
))
2720 if (return_val
!= TD_BADSH
)
2722 /* we can correct TD_BADSH */
2723 (void) memset(&ss_p
->ss_info
, 0, sizeof (ss_p
->ss_info
));
2724 ss_p
->ss_info
.si_ta_p
= sh_p
->sh_ta_p
;
2725 ss_p
->ss_info
.si_sv_addr
= sh_p
->sh_unique
;
2726 /* we correct si_type and si_size below */
2729 if (ps_pdread(ph_p
, ta_p
->tdb_register_sync_addr
,
2730 &enable
, sizeof (enable
)) != PS_OK
) {
2731 return_val
= TD_DBERR
;
2734 if (enable
!= REGISTER_SYNC_ON
)
2738 * Get the address of the hash table in the target process.
2740 if (ta_p
->model
== PR_MODEL_NATIVE
) {
2741 if (ps_pdread(ph_p
, ta_p
->uberdata_addr
+
2742 offsetof(uberdata_t
, tdb
.tdb_sync_addr_hash
),
2743 &hashaddr
, sizeof (&hashaddr
)) != PS_OK
) {
2744 return_val
= TD_DBERR
;
2748 #if defined(_LP64) && defined(_SYSCALL32)
2751 if (ps_pdread(ph_p
, ta_p
->uberdata_addr
+
2752 offsetof(uberdata32_t
, tdb
.tdb_sync_addr_hash
),
2753 &addr
, sizeof (addr
)) != PS_OK
) {
2754 return_val
= TD_DBERR
;
2759 return_val
= TD_ERR
;
2761 #endif /* _SYSCALL32 */
2765 return_val
= TD_BADSH
;
2767 return_val
= read_sync_stats(ta_p
, hashaddr
,
2768 sh_p
->sh_unique
, &sync_stats
);
2769 if (return_val
!= TD_OK
)
2773 * We have the hash table entry. Transfer the data to
2774 * the td_syncstats_t structure provided by the caller.
2776 switch (sync_stats
.un
.type
) {
2779 td_mutex_stats_t
*msp
= &ss_p
->ss_un
.mutex
;
2781 ss_p
->ss_info
.si_type
= TD_SYNC_MUTEX
;
2782 ss_p
->ss_info
.si_size
= sizeof (mutex_t
);
2784 sync_stats
.un
.mutex
.mutex_lock
;
2786 sync_stats
.un
.mutex
.mutex_sleep
;
2787 msp
->mutex_sleep_time
=
2788 sync_stats
.un
.mutex
.mutex_sleep_time
;
2789 msp
->mutex_hold_time
=
2790 sync_stats
.un
.mutex
.mutex_hold_time
;
2792 sync_stats
.un
.mutex
.mutex_try
;
2793 msp
->mutex_try_fail
=
2794 sync_stats
.un
.mutex
.mutex_try_fail
;
2795 if (sync_stats
.sync_addr
>= ta_p
->hash_table_addr
&&
2796 (ix
= sync_stats
.sync_addr
- ta_p
->hash_table_addr
)
2797 < ta_p
->hash_size
* sizeof (thr_hash_table_t
))
2798 msp
->mutex_internal
=
2799 ix
/ sizeof (thr_hash_table_t
) + 1;
2804 td_cond_stats_t
*csp
= &ss_p
->ss_un
.cond
;
2806 ss_p
->ss_info
.si_type
= TD_SYNC_COND
;
2807 ss_p
->ss_info
.si_size
= sizeof (cond_t
);
2809 sync_stats
.un
.cond
.cond_wait
;
2810 csp
->cond_timedwait
=
2811 sync_stats
.un
.cond
.cond_timedwait
;
2812 csp
->cond_wait_sleep_time
=
2813 sync_stats
.un
.cond
.cond_wait_sleep_time
;
2814 csp
->cond_timedwait_sleep_time
=
2815 sync_stats
.un
.cond
.cond_timedwait_sleep_time
;
2816 csp
->cond_timedwait_timeout
=
2817 sync_stats
.un
.cond
.cond_timedwait_timeout
;
2819 sync_stats
.un
.cond
.cond_signal
;
2820 csp
->cond_broadcast
=
2821 sync_stats
.un
.cond
.cond_broadcast
;
2822 if (sync_stats
.sync_addr
>= ta_p
->hash_table_addr
&&
2823 (ix
= sync_stats
.sync_addr
- ta_p
->hash_table_addr
)
2824 < ta_p
->hash_size
* sizeof (thr_hash_table_t
))
2825 csp
->cond_internal
=
2826 ix
/ sizeof (thr_hash_table_t
) + 1;
2831 td_rwlock_stats_t
*rwsp
= &ss_p
->ss_un
.rwlock
;
2833 ss_p
->ss_info
.si_type
= TD_SYNC_RWLOCK
;
2834 ss_p
->ss_info
.si_size
= sizeof (rwlock_t
);
2836 sync_stats
.un
.rwlock
.rw_rdlock
;
2837 rwsp
->rw_rdlock_try
=
2838 sync_stats
.un
.rwlock
.rw_rdlock_try
;
2839 rwsp
->rw_rdlock_try_fail
=
2840 sync_stats
.un
.rwlock
.rw_rdlock_try_fail
;
2842 sync_stats
.un
.rwlock
.rw_wrlock
;
2843 rwsp
->rw_wrlock_hold_time
=
2844 sync_stats
.un
.rwlock
.rw_wrlock_hold_time
;
2845 rwsp
->rw_wrlock_try
=
2846 sync_stats
.un
.rwlock
.rw_wrlock_try
;
2847 rwsp
->rw_wrlock_try_fail
=
2848 sync_stats
.un
.rwlock
.rw_wrlock_try_fail
;
2853 td_sema_stats_t
*ssp
= &ss_p
->ss_un
.sema
;
2855 ss_p
->ss_info
.si_type
= TD_SYNC_SEMA
;
2856 ss_p
->ss_info
.si_size
= sizeof (sema_t
);
2858 sync_stats
.un
.sema
.sema_wait
;
2859 ssp
->sema_wait_sleep
=
2860 sync_stats
.un
.sema
.sema_wait_sleep
;
2861 ssp
->sema_wait_sleep_time
=
2862 sync_stats
.un
.sema
.sema_wait_sleep_time
;
2864 sync_stats
.un
.sema
.sema_trywait
;
2865 ssp
->sema_trywait_fail
=
2866 sync_stats
.un
.sema
.sema_trywait_fail
;
2868 sync_stats
.un
.sema
.sema_post
;
2869 ssp
->sema_max_count
=
2870 sync_stats
.un
.sema
.sema_max_count
;
2871 ssp
->sema_min_count
=
2872 sync_stats
.un
.sema
.sema_min_count
;
2876 return_val
= TD_BADSH
;
2881 (void) ps_pcontinue(ph_p
);
2883 return (return_val
);
2887 * Change the state of a synchronization variable.
2888 * 1) mutex lock state set to value
2889 * 2) semaphore's count set to value
2890 * 3) writer's lock set by value < 0
2891 * 4) reader's lock number of readers set to value >= 0
2892 * Currently unused by dbx.
2894 #pragma weak td_sync_setstate = __td_sync_setstate
2896 __td_sync_setstate(const td_synchandle_t
*sh_p
, long lvalue
)
2898 struct ps_prochandle
*ph_p
;
2900 td_err_e return_val
;
2901 td_so_un_t generic_so
;
2903 int value
= (int)lvalue
;
2905 if ((ph_p
= ph_lock_sh(sh_p
, &return_val
)) == NULL
)
2906 return (return_val
);
2907 if (ps_pstop(ph_p
) != PS_OK
) {
2908 ph_unlock(sh_p
->sh_ta_p
);
2913 * Read the synch. variable information.
2914 * First attempt to read the whole union and if that fails
2915 * fall back to reading only the smallest member, the condvar.
2917 if (ps_pdread(ph_p
, sh_p
->sh_unique
, &generic_so
,
2918 sizeof (generic_so
)) != PS_OK
) {
2920 if (ps_pdread(ph_p
, sh_p
->sh_unique
, &generic_so
.condition
,
2921 sizeof (generic_so
.condition
)) != PS_OK
) {
2922 (void) ps_pcontinue(ph_p
);
2923 ph_unlock(sh_p
->sh_ta_p
);
2929 * Set the new value in the sync. variable, read the synch. variable
2930 * information. from the process, reset its value and write it back.
2932 switch (generic_so
.condition
.mutex_magic
) {
2934 if (trunc
&& ps_pdread(ph_p
, sh_p
->sh_unique
,
2935 &generic_so
.lock
, sizeof (generic_so
.lock
)) != PS_OK
) {
2936 return_val
= TD_DBERR
;
2939 generic_so
.lock
.mutex_lockw
= (uint8_t)value
;
2940 if (ps_pdwrite(ph_p
, sh_p
->sh_unique
, &generic_so
.lock
,
2941 sizeof (generic_so
.lock
)) != PS_OK
)
2942 return_val
= TD_DBERR
;
2945 if (trunc
&& ps_pdread(ph_p
, sh_p
->sh_unique
,
2946 &generic_so
.semaphore
, sizeof (generic_so
.semaphore
))
2948 return_val
= TD_DBERR
;
2951 generic_so
.semaphore
.count
= value
;
2952 if (ps_pdwrite(ph_p
, sh_p
->sh_unique
, &generic_so
.semaphore
,
2953 sizeof (generic_so
.semaphore
)) != PS_OK
)
2954 return_val
= TD_DBERR
;
2957 /* Operation not supported on a condition variable */
2958 return_val
= TD_ERR
;
2961 if (trunc
&& ps_pdread(ph_p
, sh_p
->sh_unique
,
2962 &generic_so
.rwlock
, sizeof (generic_so
.rwlock
)) != PS_OK
) {
2963 return_val
= TD_DBERR
;
2966 rwstate
= (uint32_t *)&generic_so
.rwlock
.readers
;
2967 *rwstate
&= URW_HAS_WAITERS
;
2969 *rwstate
|= URW_WRITE_LOCKED
;
2971 *rwstate
|= (value
& URW_READERS_MASK
);
2972 if (ps_pdwrite(ph_p
, sh_p
->sh_unique
, &generic_so
.rwlock
,
2973 sizeof (generic_so
.rwlock
)) != PS_OK
)
2974 return_val
= TD_DBERR
;
2977 /* Bad sync. object type */
2978 return_val
= TD_BADSH
;
2982 (void) ps_pcontinue(ph_p
);
2983 ph_unlock(sh_p
->sh_ta_p
);
2984 return (return_val
);
2988 td_thr_iter_f
*waiter_cb
;
2989 psaddr_t sync_obj_addr
;
2990 uint16_t sync_magic
;
2991 void *waiter_cb_arg
;
2996 waiters_cb(const td_thrhandle_t
*th_p
, void *arg
)
2998 td_thragent_t
*ta_p
= th_p
->th_ta_p
;
2999 struct ps_prochandle
*ph_p
= ta_p
->ph_p
;
3000 waiter_cb_ctl_t
*wcb
= arg
;
3003 if (ta_p
->model
== PR_MODEL_NATIVE
) {
3004 ulwp_t
*ulwp
= (ulwp_t
*)th_p
->th_unique
;
3006 if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_wchan
,
3007 &wchan
, sizeof (wchan
)) != PS_OK
) {
3008 wcb
->errcode
= TD_DBERR
;
3012 #if defined(_LP64) && defined(_SYSCALL32)
3013 ulwp32_t
*ulwp
= (ulwp32_t
*)th_p
->th_unique
;
3016 if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_wchan
,
3017 &wchan32
, sizeof (wchan32
)) != PS_OK
) {
3018 wcb
->errcode
= TD_DBERR
;
3021 wchan
= (caddr_t
)(uintptr_t)wchan32
;
3023 wcb
->errcode
= TD_ERR
;
3025 #endif /* _SYSCALL32 */
3031 if (wchan
== (caddr_t
)wcb
->sync_obj_addr
)
3032 return ((*wcb
->waiter_cb
)(th_p
, wcb
->waiter_cb_arg
));
3038 * For a given synchronization variable, iterate over the
3039 * set of waiting threads. The call back function is passed
3040 * two parameters, a pointer to a thread handle and a pointer
3041 * to extra call back data.
3043 #pragma weak td_sync_waiters = __td_sync_waiters
3045 __td_sync_waiters(const td_synchandle_t
*sh_p
, td_thr_iter_f
*cb
, void *cb_data
)
3047 struct ps_prochandle
*ph_p
;
3048 waiter_cb_ctl_t wcb
;
3049 td_err_e return_val
;
3051 if ((ph_p
= ph_lock_sh(sh_p
, &return_val
)) == NULL
)
3052 return (return_val
);
3054 (psaddr_t
)&((mutex_t
*)sh_p
->sh_unique
)->mutex_magic
,
3055 (caddr_t
)&wcb
.sync_magic
, sizeof (wcb
.sync_magic
)) != PS_OK
) {
3056 ph_unlock(sh_p
->sh_ta_p
);
3059 ph_unlock(sh_p
->sh_ta_p
);
3061 switch (wcb
.sync_magic
) {
3072 wcb
.sync_obj_addr
= sh_p
->sh_unique
;
3073 wcb
.waiter_cb_arg
= cb_data
;
3074 wcb
.errcode
= TD_OK
;
3075 return_val
= __td_ta_thr_iter(sh_p
->sh_ta_p
, waiters_cb
, &wcb
,
3076 TD_THR_SLEEP
, TD_THR_LOWEST_PRIORITY
,
3077 TD_SIGNO_MASK
, TD_THR_ANY_USER_FLAGS
);
3079 if (return_val
!= TD_OK
)
3080 return (return_val
);
3082 return (wcb
.errcode
);