4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
35 #include <thr_uberdata.h>
36 #include <thread_db.h>
52 struct ps_prochandle
*ph_p
;
57 psaddr_t bootstrap_addr
;
58 psaddr_t uberdata_addr
;
59 psaddr_t tdb_eventmask_addr
;
60 psaddr_t tdb_register_sync_addr
;
61 psaddr_t tdb_events
[TD_MAX_EVENT_NUM
- TD_MIN_EVENT_NUM
+ 1];
62 psaddr_t hash_table_addr
;
65 psaddr_t single_ulwp_addr
;
69 * This is the name of the variable in libc that contains
70 * the uberdata address that we will need.
72 #define TD_BOOTSTRAP_NAME "_tdb_bootstrap"
74 * This is the actual name of uberdata, used in the event
75 * that tdb_bootstrap has not yet been initialized.
77 #define TD_UBERDATA_NAME "_uberdata"
79 * The library name should end with ".so.1", but older versions of
80 * dbx expect the unadorned name and malfunction if ".1" is specified.
81 * Unfortunately, if ".1" is not specified, mdb malfunctions when it
82 * is applied to another instance of itself (due to the presence of
83 * /usr/lib/mdb/proc/libc.so). So we try it both ways.
85 #define TD_LIBRARY_NAME "libc.so"
86 #define TD_LIBRARY_NAME_1 "libc.so.1"
88 td_err_e
__td_thr_get_info(td_thrhandle_t
*th_p
, td_thrinfo_t
*ti_p
);
90 td_err_e
__td_ta_thr_iter(td_thragent_t
*ta_p
, td_thr_iter_f
*cb
,
91 void *cbdata_p
, td_thr_state_e state
, int ti_pri
,
92 sigset_t
*ti_sigmask_p
, unsigned ti_user_flags
);
95 * Initialize threads debugging interface.
97 #pragma weak td_init = __td_init
105 * This function does nothing, and never did.
106 * But the symbol is in the ABI, so we can't delete it.
108 #pragma weak td_log = __td_log
115 * Short-cut to read just the hash table size from the process,
116 * to avoid repeatedly reading the full uberdata structure when
117 * dealing with a single-threaded process.
120 td_read_hash_size(td_thragent_t
*ta_p
)
125 switch (ta_p
->initialized
) {
126 default: /* uninitialized */
128 case 1: /* partially initialized */
130 case 2: /* fully initialized */
131 return (ta_p
->hash_size
);
134 if (ta_p
->model
== PR_MODEL_NATIVE
) {
135 addr
= ta_p
->uberdata_addr
+ offsetof(uberdata_t
, hash_size
);
137 #if defined(_LP64) && defined(_SYSCALL32)
138 addr
= ta_p
->uberdata_addr
+ offsetof(uberdata32_t
, hash_size
);
143 if (ps_pdread(ta_p
->ph_p
, addr
, &hash_size
, sizeof (hash_size
))
150 td_read_uberdata(td_thragent_t
*ta_p
)
152 struct ps_prochandle
*ph_p
= ta_p
->ph_p
;
155 if (ta_p
->model
== PR_MODEL_NATIVE
) {
158 if (ps_pdread(ph_p
, ta_p
->uberdata_addr
,
159 &uberdata
, sizeof (uberdata
)) != PS_OK
)
161 ta_p
->primary_map
= uberdata
.primary_map
;
162 ta_p
->tdb_eventmask_addr
= ta_p
->uberdata_addr
+
163 offsetof(uberdata_t
, tdb
.tdb_ev_global_mask
);
164 ta_p
->tdb_register_sync_addr
= ta_p
->uberdata_addr
+
165 offsetof(uberdata_t
, uberflags
.uf_tdb_register_sync
);
166 ta_p
->hash_table_addr
= (psaddr_t
)uberdata
.thr_hash_table
;
167 ta_p
->hash_size
= uberdata
.hash_size
;
168 if (ps_pdread(ph_p
, (psaddr_t
)uberdata
.tdb
.tdb_events
,
169 ta_p
->tdb_events
, sizeof (ta_p
->tdb_events
)) != PS_OK
)
172 #if defined(_LP64) && defined(_SYSCALL32)
173 uberdata32_t uberdata
;
174 caddr32_t tdb_events
[TD_MAX_EVENT_NUM
- TD_MIN_EVENT_NUM
+ 1];
176 if (ps_pdread(ph_p
, ta_p
->uberdata_addr
,
177 &uberdata
, sizeof (uberdata
)) != PS_OK
)
179 ta_p
->primary_map
= uberdata
.primary_map
;
180 ta_p
->tdb_eventmask_addr
= ta_p
->uberdata_addr
+
181 offsetof(uberdata32_t
, tdb
.tdb_ev_global_mask
);
182 ta_p
->tdb_register_sync_addr
= ta_p
->uberdata_addr
+
183 offsetof(uberdata32_t
, uberflags
.uf_tdb_register_sync
);
184 ta_p
->hash_table_addr
= (psaddr_t
)uberdata
.thr_hash_table
;
185 ta_p
->hash_size
= uberdata
.hash_size
;
186 if (ps_pdread(ph_p
, (psaddr_t
)uberdata
.tdb
.tdb_events
,
187 tdb_events
, sizeof (tdb_events
)) != PS_OK
)
189 for (i
= 0; i
< TD_MAX_EVENT_NUM
- TD_MIN_EVENT_NUM
+ 1; i
++)
190 ta_p
->tdb_events
[i
] = tdb_events
[i
];
197 * Unfortunately, we are (implicitly) assuming that our uberdata
198 * definition precisely matches that of our target. If this is not
199 * true (that is, if we're examining a core file from a foreign
200 * system that has a different definition of uberdata), the failure
201 * modes can be frustratingly non-explicit. In an effort to catch
202 * this upon initialization (when the debugger may still be able to
203 * opt for another thread model or may be able to fail explicitly), we
204 * check that each of our tdb_events points to valid memory (these are
205 * putatively text upon which a breakpoint can be issued), with the
206 * hope that this is enough of a self-consistency check to lead to
207 * explicit failure on a mismatch.
209 for (i
= 0; i
< TD_MAX_EVENT_NUM
- TD_MIN_EVENT_NUM
+ 1; i
++) {
212 if (ps_pdread(ph_p
, (psaddr_t
)ta_p
->tdb_events
[i
],
213 &check
, sizeof (check
)) != PS_OK
) {
218 if (ta_p
->hash_size
!= 1) { /* multi-threaded */
219 ta_p
->initialized
= 2;
220 ta_p
->single_lwpid
= 0;
221 ta_p
->single_ulwp_addr
= 0;
222 } else { /* single-threaded */
223 ta_p
->initialized
= 1;
225 * Get the address and lwpid of the single thread/LWP.
226 * It may not be ulwp_one if this is a child of fork1().
228 if (ta_p
->model
== PR_MODEL_NATIVE
) {
229 thr_hash_table_t head
;
232 if (ps_pdread(ph_p
, ta_p
->hash_table_addr
,
233 &head
, sizeof (head
)) != PS_OK
)
235 if ((psaddr_t
)head
.hash_bucket
== 0)
236 ta_p
->initialized
= 0;
237 else if (ps_pdread(ph_p
, (psaddr_t
)head
.hash_bucket
+
238 offsetof(ulwp_t
, ul_lwpid
),
239 &lwpid
, sizeof (lwpid
)) != PS_OK
)
241 ta_p
->single_lwpid
= lwpid
;
242 ta_p
->single_ulwp_addr
= (psaddr_t
)head
.hash_bucket
;
244 #if defined(_LP64) && defined(_SYSCALL32)
245 thr_hash_table32_t head
;
248 if (ps_pdread(ph_p
, ta_p
->hash_table_addr
,
249 &head
, sizeof (head
)) != PS_OK
)
251 if ((psaddr_t
)head
.hash_bucket
== 0)
252 ta_p
->initialized
= 0;
253 else if (ps_pdread(ph_p
, (psaddr_t
)head
.hash_bucket
+
254 offsetof(ulwp32_t
, ul_lwpid
),
255 &lwpid
, sizeof (lwpid
)) != PS_OK
)
257 ta_p
->single_lwpid
= lwpid
;
258 ta_p
->single_ulwp_addr
= (psaddr_t
)head
.hash_bucket
;
264 if (!ta_p
->primary_map
)
265 ta_p
->initialized
= 0;
270 td_read_bootstrap_data(td_thragent_t
*ta_p
)
272 struct ps_prochandle
*ph_p
= ta_p
->ph_p
;
273 psaddr_t bootstrap_addr
;
274 psaddr_t uberdata_addr
;
279 switch (ta_p
->initialized
) {
280 case 2: /* fully initialized */
282 case 1: /* partially initialized */
283 if (td_read_hash_size(ta_p
) == 1)
285 return (td_read_uberdata(ta_p
));
289 * Uninitialized -- do the startup work.
290 * We set ta_p->initialized to -1 to cut off recursive calls
291 * into libc_db by code in the provider of ps_pglobal_lookup().
294 ta_p
->initialized
= -1;
295 db_return
= ps_pglobal_lookup(ph_p
, TD_LIBRARY_NAME
,
296 TD_BOOTSTRAP_NAME
, &bootstrap_addr
);
297 if (db_return
== PS_NOSYM
) {
299 db_return
= ps_pglobal_lookup(ph_p
, TD_LIBRARY_NAME_1
,
300 TD_BOOTSTRAP_NAME
, &bootstrap_addr
);
302 if (db_return
== PS_NOSYM
) /* libc is not linked yet */
303 return (TD_NOLIBTHREAD
);
304 if (db_return
!= PS_OK
)
306 db_return
= ps_pglobal_lookup(ph_p
,
307 do_1
? TD_LIBRARY_NAME_1
: TD_LIBRARY_NAME
,
308 TD_UBERDATA_NAME
, &uberdata_addr
);
309 if (db_return
== PS_NOSYM
) /* libc is not linked yet */
310 return (TD_NOLIBTHREAD
);
311 if (db_return
!= PS_OK
)
315 * Read the uberdata address into the thread agent structure.
317 if (ta_p
->model
== PR_MODEL_NATIVE
) {
319 if (ps_pdread(ph_p
, bootstrap_addr
,
320 &psaddr
, sizeof (psaddr
)) != PS_OK
)
322 if ((ta_p
->bootstrap_addr
= psaddr
) == 0)
323 psaddr
= uberdata_addr
;
324 else if (ps_pdread(ph_p
, psaddr
,
325 &psaddr
, sizeof (psaddr
)) != PS_OK
)
328 /* primary linkmap in the tgt is not initialized */
329 ta_p
->bootstrap_addr
= 0;
330 psaddr
= uberdata_addr
;
332 ta_p
->uberdata_addr
= psaddr
;
334 #if defined(_LP64) && defined(_SYSCALL32)
336 if (ps_pdread(ph_p
, bootstrap_addr
,
337 &psaddr
, sizeof (psaddr
)) != PS_OK
)
339 if ((ta_p
->bootstrap_addr
= (psaddr_t
)psaddr
) == 0)
340 psaddr
= (caddr32_t
)uberdata_addr
;
341 else if (ps_pdread(ph_p
, (psaddr_t
)psaddr
,
342 &psaddr
, sizeof (psaddr
)) != PS_OK
)
345 /* primary linkmap in the tgt is not initialized */
346 ta_p
->bootstrap_addr
= 0;
347 psaddr
= (caddr32_t
)uberdata_addr
;
349 ta_p
->uberdata_addr
= (psaddr_t
)psaddr
;
352 #endif /* _SYSCALL32 */
355 if ((return_val
= td_read_uberdata(ta_p
)) != TD_OK
)
357 if (ta_p
->bootstrap_addr
== 0)
358 ta_p
->initialized
= 0;
363 #pragma weak ps_lrolltoaddr
366 * Allocate a new agent process handle ("thread agent").
368 #pragma weak td_ta_new = __td_ta_new
370 __td_ta_new(struct ps_prochandle
*ph_p
, td_thragent_t
**ta_pp
)
374 td_err_e return_val
= TD_OK
;
381 if (ps_pstop(ph_p
) != PS_OK
)
384 * ps_pdmodel might not be defined if this is an older client.
385 * Make it a weak symbol and test if it exists before calling.
387 #pragma weak ps_pdmodel
388 if (ps_pdmodel
== NULL
) {
389 model
= PR_MODEL_NATIVE
;
390 } else if (ps_pdmodel(ph_p
, &model
) != PS_OK
) {
391 (void) ps_pcontinue(ph_p
);
394 if ((ta_p
= malloc(sizeof (*ta_p
))) == NULL
) {
395 (void) ps_pcontinue(ph_p
);
400 * Initialize the agent process handle.
401 * Pick up the symbol value we need from the target process.
403 (void) memset(ta_p
, 0, sizeof (*ta_p
));
405 (void) rwlock_init(&ta_p
->rwlock
, USYNC_THREAD
, NULL
);
407 return_val
= td_read_bootstrap_data(ta_p
);
410 * Because the old libthread_db enabled lock tracking by default,
411 * we must also do it. However, we do it only if the application
412 * provides the ps_kill() and ps_lrolltoaddr() interfaces.
413 * (dbx provides the ps_kill() and ps_lrolltoaddr() interfaces.)
415 if (return_val
== TD_OK
&& ps_kill
!= NULL
&& ps_lrolltoaddr
!= NULL
) {
416 register_sync_t oldenable
;
417 register_sync_t enable
= REGISTER_SYNC_ENABLE
;
418 psaddr_t psaddr
= ta_p
->tdb_register_sync_addr
;
420 if (ps_pdread(ph_p
, psaddr
,
421 &oldenable
, sizeof (oldenable
)) != PS_OK
)
422 return_val
= TD_DBERR
;
423 else if (oldenable
!= REGISTER_SYNC_OFF
||
424 ps_pdwrite(ph_p
, psaddr
,
425 &enable
, sizeof (enable
)) != PS_OK
) {
427 * Lock tracking was already enabled or we
428 * failed to enable it, probably because we
429 * are examining a core file. In either case
430 * set the sync_tracking flag non-zero to
431 * indicate that we should not attempt to
432 * disable lock tracking when we delete the
433 * agent process handle in td_ta_delete().
435 ta_p
->sync_tracking
= 1;
439 if (return_val
== TD_OK
)
444 (void) ps_pcontinue(ph_p
);
449 * Utility function to grab the readers lock and return the prochandle,
450 * given an agent process handle. Performs standard error checking.
451 * Returns non-NULL with the lock held, or NULL with the lock not held.
453 static struct ps_prochandle
*
454 ph_lock_ta(td_thragent_t
*ta_p
, td_err_e
*err
)
456 struct ps_prochandle
*ph_p
= NULL
;
459 if (ta_p
== NULL
|| ta_p
->initialized
== -1) {
461 } else if (rw_rdlock(&ta_p
->rwlock
) != 0) { /* can't happen? */
463 } else if ((ph_p
= ta_p
->ph_p
) == NULL
) {
464 (void) rw_unlock(&ta_p
->rwlock
);
466 } else if (ta_p
->initialized
!= 2 &&
467 (error
= td_read_bootstrap_data(ta_p
)) != TD_OK
) {
468 (void) rw_unlock(&ta_p
->rwlock
);
479 * Utility function to grab the readers lock and return the prochandle,
480 * given an agent thread handle. Performs standard error checking.
481 * Returns non-NULL with the lock held, or NULL with the lock not held.
483 static struct ps_prochandle
*
484 ph_lock_th(const td_thrhandle_t
*th_p
, td_err_e
*err
)
486 if (th_p
== NULL
|| th_p
->th_unique
== 0) {
490 return (ph_lock_ta(th_p
->th_ta_p
, err
));
494 * Utility function to grab the readers lock and return the prochandle,
495 * given a synchronization object handle. Performs standard error checking.
496 * Returns non-NULL with the lock held, or NULL with the lock not held.
498 static struct ps_prochandle
*
499 ph_lock_sh(const td_synchandle_t
*sh_p
, td_err_e
*err
)
501 if (sh_p
== NULL
|| sh_p
->sh_unique
== 0) {
505 return (ph_lock_ta(sh_p
->sh_ta_p
, err
));
509 * Unlock the agent process handle obtained from ph_lock_*().
512 ph_unlock(td_thragent_t
*ta_p
)
514 (void) rw_unlock(&ta_p
->rwlock
);
518 * De-allocate an agent process handle,
519 * releasing all related resources.
521 * XXX -- This is hopelessly broken ---
522 * Storage for thread agent is not deallocated. The prochandle
523 * in the thread agent is set to NULL so that future uses of
524 * the thread agent can be detected and an error value returned.
525 * All functions in the external user interface that make
526 * use of the thread agent are expected
527 * to check for a NULL prochandle in the thread agent.
528 * All such functions are also expected to obtain a
529 * reader lock on the thread agent while it is using it.
531 #pragma weak td_ta_delete = __td_ta_delete
533 __td_ta_delete(td_thragent_t
*ta_p
)
535 struct ps_prochandle
*ph_p
;
538 * This is the only place we grab the writer lock.
539 * We are going to NULL out the prochandle.
541 if (ta_p
== NULL
|| rw_wrlock(&ta_p
->rwlock
) != 0)
543 if ((ph_p
= ta_p
->ph_p
) == NULL
) {
544 (void) rw_unlock(&ta_p
->rwlock
);
548 * If synch. tracking was disabled when td_ta_new() was called and
549 * if td_ta_sync_tracking_enable() was never called, then disable
550 * synch. tracking (it was enabled by default in td_ta_new()).
552 if (ta_p
->sync_tracking
== 0 &&
553 ps_kill
!= NULL
&& ps_lrolltoaddr
!= NULL
) {
554 register_sync_t enable
= REGISTER_SYNC_DISABLE
;
556 (void) ps_pdwrite(ph_p
, ta_p
->tdb_register_sync_addr
,
557 &enable
, sizeof (enable
));
560 (void) rw_unlock(&ta_p
->rwlock
);
565 * Map an agent process handle to a client prochandle.
566 * Currently unused by dbx.
568 #pragma weak td_ta_get_ph = __td_ta_get_ph
570 __td_ta_get_ph(td_thragent_t
*ta_p
, struct ps_prochandle
**ph_pp
)
574 if (ph_pp
!= NULL
) /* protect stupid callers */
578 if ((*ph_pp
= ph_lock_ta(ta_p
, &return_val
)) == NULL
)
585 * Set the process's suggested concurrency level.
586 * This is a no-op in a one-level model.
587 * Currently unused by dbx.
589 #pragma weak td_ta_setconcurrency = __td_ta_setconcurrency
592 __td_ta_setconcurrency(const td_thragent_t
*ta_p
, int level
)
596 if (ta_p
->ph_p
== NULL
)
602 * Get the number of threads in the process.
604 #pragma weak td_ta_get_nthreads = __td_ta_get_nthreads
606 __td_ta_get_nthreads(td_thragent_t
*ta_p
, int *nthread_p
)
608 struct ps_prochandle
*ph_p
;
612 psaddr_t nthreads_addr
;
613 psaddr_t nzombies_addr
;
615 if (ta_p
->model
== PR_MODEL_NATIVE
) {
616 nthreads_addr
= ta_p
->uberdata_addr
+
617 offsetof(uberdata_t
, nthreads
);
618 nzombies_addr
= ta_p
->uberdata_addr
+
619 offsetof(uberdata_t
, nzombies
);
621 #if defined(_LP64) && defined(_SYSCALL32)
622 nthreads_addr
= ta_p
->uberdata_addr
+
623 offsetof(uberdata32_t
, nthreads
);
624 nzombies_addr
= ta_p
->uberdata_addr
+
625 offsetof(uberdata32_t
, nzombies
);
629 #endif /* _SYSCALL32 */
632 if (nthread_p
== NULL
)
634 if ((ph_p
= ph_lock_ta(ta_p
, &return_val
)) == NULL
)
636 if (ps_pdread(ph_p
, nthreads_addr
, &nthreads
, sizeof (int)) != PS_OK
)
637 return_val
= TD_DBERR
;
638 if (ps_pdread(ph_p
, nzombies_addr
, &nzombies
, sizeof (int)) != PS_OK
)
639 return_val
= TD_DBERR
;
641 if (return_val
== TD_OK
)
642 *nthread_p
= nthreads
+ nzombies
;
653 * Check the value in data against the thread id.
654 * If it matches, return 1 to terminate iterations.
655 * This function is used by td_ta_map_id2thr() to map a tid to a thread handle.
658 td_mapper_id2thr(td_thrhandle_t
*th_p
, td_mapper_param_t
*data
)
662 if (__td_thr_get_info(th_p
, &ti
) == TD_OK
&&
663 data
->tid
== ti
.ti_tid
) {
672 * Given a thread identifier, return the corresponding thread handle.
674 #pragma weak td_ta_map_id2thr = __td_ta_map_id2thr
676 __td_ta_map_id2thr(td_thragent_t
*ta_p
, thread_t tid
,
677 td_thrhandle_t
*th_p
)
680 td_mapper_param_t data
;
682 if (th_p
!= NULL
&& /* optimize for a single thread */
684 ta_p
->initialized
== 1 &&
685 (td_read_hash_size(ta_p
) == 1 ||
686 td_read_uberdata(ta_p
) == TD_OK
) &&
687 ta_p
->initialized
== 1 &&
688 ta_p
->single_lwpid
== tid
) {
689 th_p
->th_ta_p
= ta_p
;
690 if ((th_p
->th_unique
= ta_p
->single_ulwp_addr
) == 0)
696 * LOCKING EXCEPTION - Locking is not required here because
697 * the locking and checking will be done in __td_ta_thr_iter.
709 return_val
= __td_ta_thr_iter(ta_p
,
710 (td_thr_iter_f
*)td_mapper_id2thr
, (void *)&data
,
711 TD_THR_ANY_STATE
, TD_THR_LOWEST_PRIORITY
,
712 TD_SIGNO_MASK
, TD_THR_ANY_USER_FLAGS
);
713 if (return_val
== TD_OK
) {
715 return_val
= TD_NOTHR
;
724 * Map the address of a synchronization object to a sync. object handle.
726 #pragma weak td_ta_map_addr2sync = __td_ta_map_addr2sync
728 __td_ta_map_addr2sync(td_thragent_t
*ta_p
, psaddr_t addr
, td_synchandle_t
*sh_p
)
730 struct ps_prochandle
*ph_p
;
738 if ((ph_p
= ph_lock_ta(ta_p
, &return_val
)) == NULL
)
741 * Check the magic number of the sync. object to make sure it's valid.
742 * The magic number is at the same offset for all sync. objects.
744 if (ps_pdread(ph_p
, (psaddr_t
)&((mutex_t
*)addr
)->mutex_magic
,
745 &sync_magic
, sizeof (sync_magic
)) != PS_OK
) {
750 if (sync_magic
!= MUTEX_MAGIC
&& sync_magic
!= COND_MAGIC
&&
751 sync_magic
!= SEMA_MAGIC
&& sync_magic
!= RWL_MAGIC
)
754 * Just fill in the appropriate fields of the sync. handle.
756 sh_p
->sh_ta_p
= (td_thragent_t
*)ta_p
;
757 sh_p
->sh_unique
= addr
;
762 * Iterate over the set of global TSD keys.
763 * The call back function is called with three arguments,
764 * a key, a pointer to the destructor function, and the cbdata pointer.
765 * Currently unused by dbx.
767 #pragma weak td_ta_tsd_iter = __td_ta_tsd_iter
769 __td_ta_tsd_iter(td_thragent_t
*ta_p
, td_key_iter_f
*cb
, void *cbdata_p
)
771 struct ps_prochandle
*ph_p
;
776 psaddr_t
*destructors
= NULL
;
781 if ((ph_p
= ph_lock_ta(ta_p
, &return_val
)) == NULL
)
783 if (ps_pstop(ph_p
) != PS_OK
) {
788 if (ta_p
->model
== PR_MODEL_NATIVE
) {
792 ta_p
->uberdata_addr
+ offsetof(uberdata_t
, tsd_metadata
),
793 &tsdm
, sizeof (tsdm
)) != PS_OK
)
794 return_val
= TD_DBERR
;
796 numkeys
= tsdm
.tsdm_nused
;
797 dest_addr
= (psaddr_t
)tsdm
.tsdm_destro
;
800 malloc(numkeys
* sizeof (psaddr_t
));
803 #if defined(_LP64) && defined(_SYSCALL32)
804 tsd_metadata32_t tsdm
;
807 ta_p
->uberdata_addr
+ offsetof(uberdata32_t
, tsd_metadata
),
808 &tsdm
, sizeof (tsdm
)) != PS_OK
)
809 return_val
= TD_DBERR
;
811 numkeys
= tsdm
.tsdm_nused
;
812 dest_addr
= (psaddr_t
)tsdm
.tsdm_destro
;
815 malloc(numkeys
* sizeof (caddr32_t
));
818 return_val
= TD_DBERR
;
819 #endif /* _SYSCALL32 */
822 if (return_val
!= TD_OK
|| numkeys
<= 0) {
823 (void) ps_pcontinue(ph_p
);
828 if (destructors
== NULL
)
829 return_val
= TD_MALLOC
;
830 else if (ta_p
->model
== PR_MODEL_NATIVE
) {
831 if (ps_pdread(ph_p
, dest_addr
,
832 destructors
, numkeys
* sizeof (psaddr_t
)) != PS_OK
)
833 return_val
= TD_DBERR
;
835 for (key
= 1; key
< numkeys
; key
++) {
836 destructor
= (PFrV
)destructors
[key
];
837 if (destructor
!= TSD_UNALLOCATED
&&
838 (*cb
)(key
, destructor
, cbdata_p
))
842 #if defined(_LP64) && defined(_SYSCALL32)
844 caddr32_t
*destructors32
= (caddr32_t
*)destructors
;
845 caddr32_t destruct32
;
847 if (ps_pdread(ph_p
, dest_addr
,
848 destructors32
, numkeys
* sizeof (caddr32_t
)) != PS_OK
)
849 return_val
= TD_DBERR
;
851 for (key
= 1; key
< numkeys
; key
++) {
852 destruct32
= destructors32
[key
];
854 (caddr32_t
)(uintptr_t)TSD_UNALLOCATED
) &&
855 (*cb
)(key
, (PFrV
)(uintptr_t)destruct32
,
860 #endif /* _SYSCALL32 */
864 (void) ps_pcontinue(ph_p
);
870 sigequalset(const sigset_t
*s1
, const sigset_t
*s2
)
873 s1
->__sigbits
[0] == s2
->__sigbits
[0] &&
874 s1
->__sigbits
[1] == s2
->__sigbits
[1] &&
875 s1
->__sigbits
[2] == s2
->__sigbits
[2] &&
876 s1
->__sigbits
[3] == s2
->__sigbits
[3]);
881 * Iterate over all threads. For each thread call
882 * the function pointed to by "cb" with a pointer
883 * to a thread handle, and a pointer to data which
884 * can be NULL. Only call td_thr_iter_f() on threads
885 * which match the properties of state, ti_pri,
886 * ti_sigmask_p, and ti_user_flags. If cb returns
887 * a non-zero value, terminate iterations.
890 * *ta_p - thread agent
891 * *cb - call back function defined by user.
892 * td_thr_iter_f() takes a thread handle and
893 * cbdata_p as a parameter.
894 * cbdata_p - parameter for td_thr_iter_f().
896 * state - state of threads of interest. A value of
897 * TD_THR_ANY_STATE from enum td_thr_state_e
898 * does not restrict iterations by state.
899 * ti_pri - lower bound of priorities of threads of
900 * interest. A value of TD_THR_LOWEST_PRIORITY
901 * defined in thread_db.h does not restrict
902 * iterations by priority. A thread with priority
903 * less than ti_pri will NOT be passed to the callback
905 * ti_sigmask_p - signal mask of threads of interest.
906 * A value of TD_SIGNO_MASK defined in thread_db.h
907 * does not restrict iterations by signal mask.
908 * ti_user_flags - user flags of threads of interest. A
909 * value of TD_THR_ANY_USER_FLAGS defined in thread_db.h
910 * does not restrict iterations by user flags.
912 #pragma weak td_ta_thr_iter = __td_ta_thr_iter
914 __td_ta_thr_iter(td_thragent_t
*ta_p
, td_thr_iter_f
*cb
,
915 void *cbdata_p
, td_thr_state_e state
, int ti_pri
,
916 sigset_t
*ti_sigmask_p
, unsigned ti_user_flags
)
918 struct ps_prochandle
*ph_p
;
919 psaddr_t first_lwp_addr
;
920 psaddr_t first_zombie_addr
;
921 psaddr_t curr_lwp_addr
;
922 psaddr_t next_lwp_addr
;
931 * If state is not within bound, short circuit.
933 if (state
< TD_THR_ANY_STATE
|| state
> TD_THR_STOPPED_ASLEEP
)
936 if ((ph_p
= ph_lock_ta(ta_p
, &return_val
)) == NULL
)
938 if (ps_pstop(ph_p
) != PS_OK
) {
944 * For each ulwp_t in the circular linked lists pointed
945 * to by "all_lwps" and "all_zombies":
946 * (1) Filter each thread.
947 * (2) Create the thread_object for each thread that passes.
948 * (3) Call the call back function on each thread.
951 if (ta_p
->model
== PR_MODEL_NATIVE
) {
952 db_return
= ps_pdread(ph_p
,
953 ta_p
->uberdata_addr
+ offsetof(uberdata_t
, all_lwps
),
954 &first_lwp_addr
, sizeof (first_lwp_addr
));
955 db_return2
= ps_pdread(ph_p
,
956 ta_p
->uberdata_addr
+ offsetof(uberdata_t
, all_zombies
),
957 &first_zombie_addr
, sizeof (first_zombie_addr
));
959 #if defined(_LP64) && defined(_SYSCALL32)
962 db_return
= ps_pdread(ph_p
,
963 ta_p
->uberdata_addr
+ offsetof(uberdata32_t
, all_lwps
),
964 &addr32
, sizeof (addr32
));
965 first_lwp_addr
= addr32
;
966 db_return2
= ps_pdread(ph_p
,
967 ta_p
->uberdata_addr
+ offsetof(uberdata32_t
, all_zombies
),
968 &addr32
, sizeof (addr32
));
969 first_zombie_addr
= addr32
;
970 #else /* _SYSCALL32 */
973 #endif /* _SYSCALL32 */
975 if (db_return
== PS_OK
)
976 db_return
= db_return2
;
979 * If first_lwp_addr and first_zombie_addr are both NULL,
980 * libc must not yet be initialized or all threads have
981 * exited. Return TD_NOTHR and all will be well.
983 if (db_return
== PS_OK
&&
984 first_lwp_addr
== 0 && first_zombie_addr
== 0) {
985 (void) ps_pcontinue(ph_p
);
989 if (db_return
!= PS_OK
) {
990 (void) ps_pcontinue(ph_p
);
996 * Run down the lists of all living and dead lwps.
998 if (first_lwp_addr
== 0)
999 first_lwp_addr
= first_zombie_addr
;
1000 curr_lwp_addr
= first_lwp_addr
;
1002 td_thr_state_e ts_state
;
1008 * Read the ulwp struct.
1010 if (ta_p
->model
== PR_MODEL_NATIVE
) {
1013 if (ps_pdread(ph_p
, curr_lwp_addr
,
1014 &ulwp
, sizeof (ulwp
)) != PS_OK
&&
1015 ((void) memset(&ulwp
, 0, sizeof (ulwp
)),
1016 ps_pdread(ph_p
, curr_lwp_addr
,
1017 &ulwp
, REPLACEMENT_SIZE
)) != PS_OK
) {
1018 return_val
= TD_DBERR
;
1021 next_lwp_addr
= (psaddr_t
)ulwp
.ul_forw
;
1023 ts_state
= ulwp
.ul_dead
? TD_THR_ZOMBIE
:
1024 ulwp
.ul_stop
? TD_THR_STOPPED
:
1025 ulwp
.ul_wchan
? TD_THR_SLEEP
:
1027 userpri
= ulwp
.ul_pri
;
1028 userflags
= ulwp
.ul_usropts
;
1030 (void) sigemptyset(&mask
);
1032 mask
= *(sigset_t
*)&ulwp
.ul_sigmask
;
1034 #if defined(_LP64) && defined(_SYSCALL32)
1037 if (ps_pdread(ph_p
, curr_lwp_addr
,
1038 &ulwp
, sizeof (ulwp
)) != PS_OK
&&
1039 ((void) memset(&ulwp
, 0, sizeof (ulwp
)),
1040 ps_pdread(ph_p
, curr_lwp_addr
,
1041 &ulwp
, REPLACEMENT_SIZE32
)) != PS_OK
) {
1042 return_val
= TD_DBERR
;
1045 next_lwp_addr
= (psaddr_t
)ulwp
.ul_forw
;
1047 ts_state
= ulwp
.ul_dead
? TD_THR_ZOMBIE
:
1048 ulwp
.ul_stop
? TD_THR_STOPPED
:
1049 ulwp
.ul_wchan
? TD_THR_SLEEP
:
1051 userpri
= ulwp
.ul_pri
;
1052 userflags
= ulwp
.ul_usropts
;
1054 (void) sigemptyset(&mask
);
1056 mask
= *(sigset_t
*)&ulwp
.ul_sigmask
;
1057 #else /* _SYSCALL32 */
1058 return_val
= TD_ERR
;
1060 #endif /* _SYSCALL32 */
1064 * Filter on state, priority, sigmask, and user flags.
1067 if ((state
!= ts_state
) &&
1068 (state
!= TD_THR_ANY_STATE
))
1071 if (ti_pri
> userpri
)
1074 if (ti_sigmask_p
!= TD_SIGNO_MASK
&&
1075 !sigequalset(ti_sigmask_p
, &mask
))
1078 if (ti_user_flags
!= userflags
&&
1079 ti_user_flags
!= (unsigned)TD_THR_ANY_USER_FLAGS
)
1083 * Call back - break if the return
1084 * from the call back is non-zero.
1086 th
.th_ta_p
= (td_thragent_t
*)ta_p
;
1087 th
.th_unique
= curr_lwp_addr
;
1088 if ((*cb
)(&th
, cbdata_p
))
1092 if ((curr_lwp_addr
= next_lwp_addr
) == first_lwp_addr
) {
1094 * Switch to the zombie list, unless it is NULL
1095 * or we have already been doing the zombie list,
1096 * in which case terminate the loop.
1098 if (first_zombie_addr
== 0 ||
1099 first_lwp_addr
== first_zombie_addr
)
1101 curr_lwp_addr
= first_lwp_addr
= first_zombie_addr
;
1105 (void) ps_pcontinue(ph_p
);
1107 return (return_val
);
1111 * Enable or disable process synchronization object tracking.
1112 * Currently unused by dbx.
1114 #pragma weak td_ta_sync_tracking_enable = __td_ta_sync_tracking_enable
1116 __td_ta_sync_tracking_enable(td_thragent_t
*ta_p
, int onoff
)
1118 struct ps_prochandle
*ph_p
;
1119 td_err_e return_val
;
1120 register_sync_t enable
;
1122 if ((ph_p
= ph_lock_ta(ta_p
, &return_val
)) == NULL
)
1123 return (return_val
);
1125 * Values of tdb_register_sync in the victim process:
1126 * REGISTER_SYNC_ENABLE enables registration of synch objects
1127 * REGISTER_SYNC_DISABLE disables registration of synch objects
1128 * These cause the table to be cleared and tdb_register_sync set to:
1129 * REGISTER_SYNC_ON registration in effect
1130 * REGISTER_SYNC_OFF registration not in effect
1132 enable
= onoff
? REGISTER_SYNC_ENABLE
: REGISTER_SYNC_DISABLE
;
1133 if (ps_pdwrite(ph_p
, ta_p
->tdb_register_sync_addr
,
1134 &enable
, sizeof (enable
)) != PS_OK
)
1135 return_val
= TD_DBERR
;
1137 * Remember that this interface was called (see td_ta_delete()).
1139 ta_p
->sync_tracking
= 1;
1141 return (return_val
);
1145 * Iterate over all known synchronization variables.
1146 * It is very possible that the list generated is incomplete,
1147 * because the iterator can only find synchronization variables
1148 * that have been registered by the process since synchronization
1149 * object registration was enabled.
1150 * The call back function cb is called for each synchronization
1151 * variable with two arguments: a pointer to the synchronization
1152 * handle and the passed-in argument cbdata.
1153 * If cb returns a non-zero value, iterations are terminated.
1155 #pragma weak td_ta_sync_iter = __td_ta_sync_iter
1157 __td_ta_sync_iter(td_thragent_t
*ta_p
, td_sync_iter_f
*cb
, void *cbdata
)
1159 struct ps_prochandle
*ph_p
;
1160 td_err_e return_val
;
1162 register_sync_t enable
;
1164 tdb_sync_stats_t sync_stats
;
1165 td_synchandle_t synchandle
;
1168 uint64_t *sync_addr_hash
= NULL
;
1172 if ((ph_p
= ph_lock_ta(ta_p
, &return_val
)) == NULL
)
1173 return (return_val
);
1174 if (ps_pstop(ph_p
) != PS_OK
) {
1178 if (ps_pdread(ph_p
, ta_p
->tdb_register_sync_addr
,
1179 &enable
, sizeof (enable
)) != PS_OK
) {
1180 return_val
= TD_DBERR
;
1183 if (enable
!= REGISTER_SYNC_ON
)
1187 * First read the hash table.
1188 * The hash table is large; allocate with mmap().
1190 if ((vaddr
= mmap(NULL
, TDB_HASH_SIZE
* sizeof (uint64_t),
1191 PROT_READ
|PROT_WRITE
, MAP_PRIVATE
|MAP_ANON
, -1, (off_t
)0))
1193 return_val
= TD_MALLOC
;
1196 sync_addr_hash
= vaddr
;
1198 if (ta_p
->model
== PR_MODEL_NATIVE
) {
1199 if (ps_pdread(ph_p
, ta_p
->uberdata_addr
+
1200 offsetof(uberdata_t
, tdb
.tdb_sync_addr_hash
),
1201 &psaddr
, sizeof (&psaddr
)) != PS_OK
) {
1202 return_val
= TD_DBERR
;
1209 if (ps_pdread(ph_p
, ta_p
->uberdata_addr
+
1210 offsetof(uberdata32_t
, tdb
.tdb_sync_addr_hash
),
1211 &addr
, sizeof (addr
)) != PS_OK
) {
1212 return_val
= TD_DBERR
;
1217 return_val
= TD_ERR
;
1219 #endif /* _SYSCALL32 */
1224 if (ps_pdread(ph_p
, psaddr
, sync_addr_hash
,
1225 TDB_HASH_SIZE
* sizeof (uint64_t)) != PS_OK
) {
1226 return_val
= TD_DBERR
;
1231 * Now scan the hash table.
1233 for (i
= 0; i
< TDB_HASH_SIZE
; i
++) {
1234 for (next_desc
= (psaddr_t
)sync_addr_hash
[i
];
1236 next_desc
= (psaddr_t
)sync_stats
.next
) {
1237 if (ps_pdread(ph_p
, next_desc
,
1238 &sync_stats
, sizeof (sync_stats
)) != PS_OK
) {
1239 return_val
= TD_DBERR
;
1242 if (sync_stats
.un
.type
== TDB_NONE
) {
1243 /* not registered since registration enabled */
1246 synchandle
.sh_ta_p
= ta_p
;
1247 synchandle
.sh_unique
= (psaddr_t
)sync_stats
.sync_addr
;
1248 if ((*cb
)(&synchandle
, cbdata
) != 0)
1254 if (sync_addr_hash
!= NULL
)
1255 (void) munmap((void *)sync_addr_hash
,
1256 TDB_HASH_SIZE
* sizeof (uint64_t));
1257 (void) ps_pcontinue(ph_p
);
1259 return (return_val
);
1263 * Enable process statistics collection.
1265 #pragma weak td_ta_enable_stats = __td_ta_enable_stats
1268 __td_ta_enable_stats(const td_thragent_t
*ta_p
, int onoff
)
1270 return (TD_NOCAPAB
);
1274 * Reset process statistics.
1276 #pragma weak td_ta_reset_stats = __td_ta_reset_stats
1279 __td_ta_reset_stats(const td_thragent_t
*ta_p
)
1281 return (TD_NOCAPAB
);
1285 * Read process statistics.
1287 #pragma weak td_ta_get_stats = __td_ta_get_stats
1290 __td_ta_get_stats(const td_thragent_t
*ta_p
, td_ta_stats_t
*tstats
)
1292 return (TD_NOCAPAB
);
1296 * Transfer information from lwp struct to thread information struct.
1297 * XXX -- lots of this needs cleaning up.
1300 td_thr2to(td_thragent_t
*ta_p
, psaddr_t ts_addr
,
1301 ulwp_t
*ulwp
, td_thrinfo_t
*ti_p
)
1305 if ((lwpid
= ulwp
->ul_lwpid
) == 0)
1307 (void) memset(ti_p
, 0, sizeof (*ti_p
));
1308 ti_p
->ti_ta_p
= ta_p
;
1309 ti_p
->ti_user_flags
= ulwp
->ul_usropts
;
1310 ti_p
->ti_tid
= lwpid
;
1311 ti_p
->ti_exitval
= ulwp
->ul_rval
;
1312 ti_p
->ti_startfunc
= (psaddr_t
)ulwp
->ul_startpc
;
1313 if (!ulwp
->ul_dead
) {
1315 * The bloody fools got this backwards!
1317 ti_p
->ti_stkbase
= (psaddr_t
)ulwp
->ul_stktop
;
1318 ti_p
->ti_stksize
= ulwp
->ul_stksiz
;
1320 ti_p
->ti_ro_area
= ts_addr
;
1321 ti_p
->ti_ro_size
= ulwp
->ul_replace
?
1322 REPLACEMENT_SIZE
: sizeof (ulwp_t
);
1323 ti_p
->ti_state
= ulwp
->ul_dead
? TD_THR_ZOMBIE
:
1324 ulwp
->ul_stop
? TD_THR_STOPPED
:
1325 ulwp
->ul_wchan
? TD_THR_SLEEP
:
1327 ti_p
->ti_db_suspended
= 0;
1328 ti_p
->ti_type
= TD_THR_USER
;
1329 ti_p
->ti_sp
= ulwp
->ul_sp
;
1331 ti_p
->ti_pri
= ulwp
->ul_pri
;
1332 ti_p
->ti_lid
= lwpid
;
1334 ti_p
->ti_sigmask
= ulwp
->ul_sigmask
;
1335 ti_p
->ti_traceme
= 0;
1336 ti_p
->ti_preemptflag
= 0;
1337 ti_p
->ti_pirecflag
= 0;
1338 (void) sigemptyset(&ti_p
->ti_pending
);
1339 ti_p
->ti_events
= ulwp
->ul_td_evbuf
.eventmask
;
1342 #if defined(_LP64) && defined(_SYSCALL32)
1344 td_thr2to32(td_thragent_t
*ta_p
, psaddr_t ts_addr
,
1345 ulwp32_t
*ulwp
, td_thrinfo_t
*ti_p
)
1349 if ((lwpid
= ulwp
->ul_lwpid
) == 0)
1351 (void) memset(ti_p
, 0, sizeof (*ti_p
));
1352 ti_p
->ti_ta_p
= ta_p
;
1353 ti_p
->ti_user_flags
= ulwp
->ul_usropts
;
1354 ti_p
->ti_tid
= lwpid
;
1355 ti_p
->ti_exitval
= (void *)(uintptr_t)ulwp
->ul_rval
;
1356 ti_p
->ti_startfunc
= (psaddr_t
)ulwp
->ul_startpc
;
1357 if (!ulwp
->ul_dead
) {
1359 * The bloody fools got this backwards!
1361 ti_p
->ti_stkbase
= (psaddr_t
)ulwp
->ul_stktop
;
1362 ti_p
->ti_stksize
= ulwp
->ul_stksiz
;
1364 ti_p
->ti_ro_area
= ts_addr
;
1365 ti_p
->ti_ro_size
= ulwp
->ul_replace
?
1366 REPLACEMENT_SIZE32
: sizeof (ulwp32_t
);
1367 ti_p
->ti_state
= ulwp
->ul_dead
? TD_THR_ZOMBIE
:
1368 ulwp
->ul_stop
? TD_THR_STOPPED
:
1369 ulwp
->ul_wchan
? TD_THR_SLEEP
:
1371 ti_p
->ti_db_suspended
= 0;
1372 ti_p
->ti_type
= TD_THR_USER
;
1373 ti_p
->ti_sp
= (uint32_t)ulwp
->ul_sp
;
1375 ti_p
->ti_pri
= ulwp
->ul_pri
;
1376 ti_p
->ti_lid
= lwpid
;
1378 ti_p
->ti_sigmask
= *(sigset_t
*)&ulwp
->ul_sigmask
;
1379 ti_p
->ti_traceme
= 0;
1380 ti_p
->ti_preemptflag
= 0;
1381 ti_p
->ti_pirecflag
= 0;
1382 (void) sigemptyset(&ti_p
->ti_pending
);
1383 ti_p
->ti_events
= ulwp
->ul_td_evbuf
.eventmask
;
1385 #endif /* _SYSCALL32 */
1388 * Get thread information.
1390 #pragma weak td_thr_get_info = __td_thr_get_info
1392 __td_thr_get_info(td_thrhandle_t
*th_p
, td_thrinfo_t
*ti_p
)
1394 struct ps_prochandle
*ph_p
;
1395 td_thragent_t
*ta_p
;
1396 td_err_e return_val
;
1401 (void) memset(ti_p
, 0, sizeof (*ti_p
));
1403 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
1404 return (return_val
);
1405 ta_p
= th_p
->th_ta_p
;
1406 if (ps_pstop(ph_p
) != PS_OK
) {
1412 * Read the ulwp struct from the process.
1413 * Transfer the ulwp struct to the thread information struct.
1415 psaddr
= th_p
->th_unique
;
1416 if (ta_p
->model
== PR_MODEL_NATIVE
) {
1419 if (ps_pdread(ph_p
, psaddr
, &ulwp
, sizeof (ulwp
)) != PS_OK
&&
1420 ((void) memset(&ulwp
, 0, sizeof (ulwp
)),
1421 ps_pdread(ph_p
, psaddr
, &ulwp
, REPLACEMENT_SIZE
)) != PS_OK
)
1422 return_val
= TD_DBERR
;
1424 td_thr2to(ta_p
, psaddr
, &ulwp
, ti_p
);
1426 #if defined(_LP64) && defined(_SYSCALL32)
1429 if (ps_pdread(ph_p
, psaddr
, &ulwp
, sizeof (ulwp
)) != PS_OK
&&
1430 ((void) memset(&ulwp
, 0, sizeof (ulwp
)),
1431 ps_pdread(ph_p
, psaddr
, &ulwp
, REPLACEMENT_SIZE32
)) !=
1433 return_val
= TD_DBERR
;
1435 td_thr2to32(ta_p
, psaddr
, &ulwp
, ti_p
);
1437 return_val
= TD_ERR
;
1438 #endif /* _SYSCALL32 */
1441 (void) ps_pcontinue(ph_p
);
1443 return (return_val
);
1447 * Given a process and an event number, return information about
1448 * an address in the process or at which a breakpoint can be set
1449 * to monitor the event.
1451 #pragma weak td_ta_event_addr = __td_ta_event_addr
1453 __td_ta_event_addr(td_thragent_t
*ta_p
, td_event_e event
, td_notify_t
*notify_p
)
1457 if (event
< TD_MIN_EVENT_NUM
|| event
> TD_MAX_EVENT_NUM
)
1458 return (TD_NOEVENT
);
1459 if (notify_p
== NULL
)
1462 notify_p
->type
= NOTIFY_BPT
;
1463 notify_p
->u
.bptaddr
= ta_p
->tdb_events
[event
- TD_MIN_EVENT_NUM
];
1469 * Add the events in eventset 2 to eventset 1.
1472 eventsetaddset(td_thr_events_t
*event1_p
, td_thr_events_t
*event2_p
)
1476 for (i
= 0; i
< TD_EVENTSIZE
; i
++)
1477 event1_p
->event_bits
[i
] |= event2_p
->event_bits
[i
];
1481 * Delete the events in eventset 2 from eventset 1.
1484 eventsetdelset(td_thr_events_t
*event1_p
, td_thr_events_t
*event2_p
)
1488 for (i
= 0; i
< TD_EVENTSIZE
; i
++)
1489 event1_p
->event_bits
[i
] &= ~event2_p
->event_bits
[i
];
1493 * Either add or delete the given event set from a thread's event mask.
1496 mod_eventset(td_thrhandle_t
*th_p
, td_thr_events_t
*events
, int onoff
)
1498 struct ps_prochandle
*ph_p
;
1499 td_err_e return_val
= TD_OK
;
1501 td_thr_events_t evset
;
1502 psaddr_t psaddr_evset
;
1503 psaddr_t psaddr_enab
;
1505 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
1506 return (return_val
);
1507 if (th_p
->th_ta_p
->model
== PR_MODEL_NATIVE
) {
1508 ulwp_t
*ulwp
= (ulwp_t
*)th_p
->th_unique
;
1509 psaddr_evset
= (psaddr_t
)&ulwp
->ul_td_evbuf
.eventmask
;
1510 psaddr_enab
= (psaddr_t
)&ulwp
->ul_td_events_enable
;
1512 #if defined(_LP64) && defined(_SYSCALL32)
1513 ulwp32_t
*ulwp
= (ulwp32_t
*)th_p
->th_unique
;
1514 psaddr_evset
= (psaddr_t
)&ulwp
->ul_td_evbuf
.eventmask
;
1515 psaddr_enab
= (psaddr_t
)&ulwp
->ul_td_events_enable
;
1517 ph_unlock(th_p
->th_ta_p
);
1519 #endif /* _SYSCALL32 */
1521 if (ps_pstop(ph_p
) != PS_OK
) {
1522 ph_unlock(th_p
->th_ta_p
);
1526 if (ps_pdread(ph_p
, psaddr_evset
, &evset
, sizeof (evset
)) != PS_OK
)
1527 return_val
= TD_DBERR
;
1530 eventsetaddset(&evset
, events
);
1532 eventsetdelset(&evset
, events
);
1533 if (ps_pdwrite(ph_p
, psaddr_evset
, &evset
, sizeof (evset
))
1535 return_val
= TD_DBERR
;
1538 if (td_eventismember(&evset
, TD_EVENTS_ENABLE
))
1540 if (ps_pdwrite(ph_p
, psaddr_enab
,
1541 &enable
, sizeof (enable
)) != PS_OK
)
1542 return_val
= TD_DBERR
;
1546 (void) ps_pcontinue(ph_p
);
1547 ph_unlock(th_p
->th_ta_p
);
1548 return (return_val
);
1552 * Enable or disable tracing for a given thread. Tracing
1553 * is filtered based on the event mask of each thread. Tracing
1554 * can be turned on/off for the thread without changing thread
1556 * Currently unused by dbx.
1558 #pragma weak td_thr_event_enable = __td_thr_event_enable
1560 __td_thr_event_enable(td_thrhandle_t
*th_p
, int onoff
)
1562 td_thr_events_t evset
;
1564 td_event_emptyset(&evset
);
1565 td_event_addset(&evset
, TD_EVENTS_ENABLE
);
1566 return (mod_eventset(th_p
, &evset
, onoff
));
1570 * Set event mask to enable event. event is turned on in
1571 * event mask for thread. If a thread encounters an event
1572 * for which its event mask is on, notification will be sent
1574 * Addresses for each event are provided to the
1575 * debugger. It is assumed that a breakpoint of some type will
1576 * be placed at that address. If the event mask for the thread
1577 * is on, the instruction at the address will be executed.
1578 * Otherwise, the instruction will be skipped.
1580 #pragma weak td_thr_set_event = __td_thr_set_event
1582 __td_thr_set_event(td_thrhandle_t
*th_p
, td_thr_events_t
*events
)
1584 return (mod_eventset(th_p
, events
, 1));
1588 * Enable or disable a set of events in the process-global event mask,
1589 * depending on the value of onoff.
1592 td_ta_mod_event(td_thragent_t
*ta_p
, td_thr_events_t
*events
, int onoff
)
1594 struct ps_prochandle
*ph_p
;
1595 td_thr_events_t targ_eventset
;
1596 td_err_e return_val
;
1598 if ((ph_p
= ph_lock_ta(ta_p
, &return_val
)) == NULL
)
1599 return (return_val
);
1600 if (ps_pstop(ph_p
) != PS_OK
) {
1604 if (ps_pdread(ph_p
, ta_p
->tdb_eventmask_addr
,
1605 &targ_eventset
, sizeof (targ_eventset
)) != PS_OK
)
1606 return_val
= TD_DBERR
;
1609 eventsetaddset(&targ_eventset
, events
);
1611 eventsetdelset(&targ_eventset
, events
);
1612 if (ps_pdwrite(ph_p
, ta_p
->tdb_eventmask_addr
,
1613 &targ_eventset
, sizeof (targ_eventset
)) != PS_OK
)
1614 return_val
= TD_DBERR
;
1616 (void) ps_pcontinue(ph_p
);
1618 return (return_val
);
1622 * Enable a set of events in the process-global event mask.
1624 #pragma weak td_ta_set_event = __td_ta_set_event
1626 __td_ta_set_event(td_thragent_t
*ta_p
, td_thr_events_t
*events
)
1628 return (td_ta_mod_event(ta_p
, events
, 1));
1632 * Set event mask to disable the given event set; these events are cleared
1633 * from the event mask of the thread. Events that occur for a thread
1634 * with the event masked off will not cause notification to be
1635 * sent to the debugger (see td_thr_set_event for fuller description).
1637 #pragma weak td_thr_clear_event = __td_thr_clear_event
1639 __td_thr_clear_event(td_thrhandle_t
*th_p
, td_thr_events_t
*events
)
1641 return (mod_eventset(th_p
, events
, 0));
1645 * Disable a set of events in the process-global event mask.
1647 #pragma weak td_ta_clear_event = __td_ta_clear_event
1649 __td_ta_clear_event(td_thragent_t
*ta_p
, td_thr_events_t
*events
)
1651 return (td_ta_mod_event(ta_p
, events
, 0));
1655 * This function returns the most recent event message, if any,
1656 * associated with a thread. Given a thread handle, return the message
1657 * corresponding to the event encountered by the thread. Only one
1658 * message per thread is saved. Messages from earlier events are lost
1659 * when later events occur.
1661 #pragma weak td_thr_event_getmsg = __td_thr_event_getmsg
1663 __td_thr_event_getmsg(td_thrhandle_t
*th_p
, td_event_msg_t
*msg
)
1665 struct ps_prochandle
*ph_p
;
1666 td_err_e return_val
= TD_OK
;
1669 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
1670 return (return_val
);
1671 if (ps_pstop(ph_p
) != PS_OK
) {
1672 ph_unlock(th_p
->th_ta_p
);
1675 if (th_p
->th_ta_p
->model
== PR_MODEL_NATIVE
) {
1676 ulwp_t
*ulwp
= (ulwp_t
*)th_p
->th_unique
;
1679 psaddr
= (psaddr_t
)&ulwp
->ul_td_evbuf
;
1680 if (ps_pdread(ph_p
, psaddr
, &evbuf
, sizeof (evbuf
)) != PS_OK
) {
1681 return_val
= TD_DBERR
;
1682 } else if (evbuf
.eventnum
== TD_EVENT_NONE
) {
1683 return_val
= TD_NOEVENT
;
1685 msg
->event
= evbuf
.eventnum
;
1686 msg
->th_p
= (td_thrhandle_t
*)th_p
;
1687 msg
->msg
.data
= (uintptr_t)evbuf
.eventdata
;
1688 /* "Consume" the message */
1689 evbuf
.eventnum
= TD_EVENT_NONE
;
1690 evbuf
.eventdata
= 0;
1691 if (ps_pdwrite(ph_p
, psaddr
, &evbuf
, sizeof (evbuf
))
1693 return_val
= TD_DBERR
;
1696 #if defined(_LP64) && defined(_SYSCALL32)
1697 ulwp32_t
*ulwp
= (ulwp32_t
*)th_p
->th_unique
;
1700 psaddr
= (psaddr_t
)&ulwp
->ul_td_evbuf
;
1701 if (ps_pdread(ph_p
, psaddr
, &evbuf
, sizeof (evbuf
)) != PS_OK
) {
1702 return_val
= TD_DBERR
;
1703 } else if (evbuf
.eventnum
== TD_EVENT_NONE
) {
1704 return_val
= TD_NOEVENT
;
1706 msg
->event
= evbuf
.eventnum
;
1707 msg
->th_p
= (td_thrhandle_t
*)th_p
;
1708 msg
->msg
.data
= (uintptr_t)evbuf
.eventdata
;
1709 /* "Consume" the message */
1710 evbuf
.eventnum
= TD_EVENT_NONE
;
1711 evbuf
.eventdata
= (uintptr_t)NULL
;
1712 if (ps_pdwrite(ph_p
, psaddr
, &evbuf
, sizeof (evbuf
))
1714 return_val
= TD_DBERR
;
1717 return_val
= TD_ERR
;
1718 #endif /* _SYSCALL32 */
1721 (void) ps_pcontinue(ph_p
);
1722 ph_unlock(th_p
->th_ta_p
);
1723 return (return_val
);
1727 * The callback function td_ta_event_getmsg uses when looking for
1728 * a thread with an event. A thin wrapper around td_thr_event_getmsg.
1731 event_msg_cb(const td_thrhandle_t
*th_p
, void *arg
)
1733 static td_thrhandle_t th
;
1734 td_event_msg_t
*msg
= arg
;
1736 if (__td_thr_event_getmsg((td_thrhandle_t
*)th_p
, msg
) == TD_OK
) {
1738 * Got an event, stop iterating.
1740 * Because of past mistakes in interface definition,
1741 * we are forced to pass back a static local variable
1742 * for the thread handle because th_p is a pointer
1743 * to a local variable in __td_ta_thr_iter().
1754 * This function is just like td_thr_event_getmsg, except that it is
1755 * passed a process handle rather than a thread handle, and returns
1756 * an event message for some thread in the process that has an event
1757 * message pending. If no thread has an event message pending, this
1758 * routine returns TD_NOEVENT. Thus, all pending event messages may
1759 * be collected from a process by repeatedly calling this routine
1760 * until it returns TD_NOEVENT.
1762 #pragma weak td_ta_event_getmsg = __td_ta_event_getmsg
1764 __td_ta_event_getmsg(td_thragent_t
*ta_p
, td_event_msg_t
*msg
)
1766 td_err_e return_val
;
1770 if (ta_p
->ph_p
== NULL
)
1774 msg
->event
= TD_EVENT_NONE
;
1775 if ((return_val
= __td_ta_thr_iter(ta_p
, event_msg_cb
, msg
,
1776 TD_THR_ANY_STATE
, TD_THR_LOWEST_PRIORITY
, TD_SIGNO_MASK
,
1777 TD_THR_ANY_USER_FLAGS
)) != TD_OK
)
1778 return (return_val
);
1779 if (msg
->event
== TD_EVENT_NONE
)
1780 return (TD_NOEVENT
);
1785 thr_to_lwpid(const td_thrhandle_t
*th_p
)
1787 struct ps_prochandle
*ph_p
= th_p
->th_ta_p
->ph_p
;
1791 * The caller holds the prochandle lock
1792 * and has already verfied everything.
1794 if (th_p
->th_ta_p
->model
== PR_MODEL_NATIVE
) {
1795 ulwp_t
*ulwp
= (ulwp_t
*)th_p
->th_unique
;
1797 if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_lwpid
,
1798 &lwpid
, sizeof (lwpid
)) != PS_OK
)
1800 else if (lwpid
== 0)
1803 #if defined(_LP64) && defined(_SYSCALL32)
1804 ulwp32_t
*ulwp
= (ulwp32_t
*)th_p
->th_unique
;
1806 if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_lwpid
,
1807 &lwpid
, sizeof (lwpid
)) != PS_OK
)
1809 else if (lwpid
== 0)
1813 #endif /* _SYSCALL32 */
1821 * XXX: What does this mean in a one-level model?
1823 #pragma weak td_thr_dbsuspend = __td_thr_dbsuspend
1825 __td_thr_dbsuspend(const td_thrhandle_t
*th_p
)
1827 struct ps_prochandle
*ph_p
;
1828 td_err_e return_val
;
1830 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
1831 return (return_val
);
1832 if (ps_lstop(ph_p
, thr_to_lwpid(th_p
)) != PS_OK
)
1833 return_val
= TD_DBERR
;
1834 ph_unlock(th_p
->th_ta_p
);
1835 return (return_val
);
1839 * Resume a suspended thread.
1840 * XXX: What does this mean in a one-level model?
1842 #pragma weak td_thr_dbresume = __td_thr_dbresume
1844 __td_thr_dbresume(const td_thrhandle_t
*th_p
)
1846 struct ps_prochandle
*ph_p
;
1847 td_err_e return_val
;
1849 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
1850 return (return_val
);
1851 if (ps_lcontinue(ph_p
, thr_to_lwpid(th_p
)) != PS_OK
)
1852 return_val
= TD_DBERR
;
1853 ph_unlock(th_p
->th_ta_p
);
1854 return (return_val
);
1858 * Set a thread's signal mask.
1859 * Currently unused by dbx.
1861 #pragma weak td_thr_sigsetmask = __td_thr_sigsetmask
1864 __td_thr_sigsetmask(const td_thrhandle_t
*th_p
, const sigset_t ti_sigmask
)
1866 return (TD_NOCAPAB
);
1870 * Set a thread's "signals-pending" set.
1871 * Currently unused by dbx.
1873 #pragma weak td_thr_setsigpending = __td_thr_setsigpending
1876 __td_thr_setsigpending(const td_thrhandle_t
*th_p
,
1877 uchar_t ti_pending_flag
, const sigset_t ti_pending
)
1879 return (TD_NOCAPAB
);
1883 * Get a thread's general register set.
1885 #pragma weak td_thr_getgregs = __td_thr_getgregs
1887 __td_thr_getgregs(td_thrhandle_t
*th_p
, prgregset_t regset
)
1889 struct ps_prochandle
*ph_p
;
1890 td_err_e return_val
;
1892 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
1893 return (return_val
);
1894 if (ps_pstop(ph_p
) != PS_OK
) {
1895 ph_unlock(th_p
->th_ta_p
);
1899 if (ps_lgetregs(ph_p
, thr_to_lwpid(th_p
), regset
) != PS_OK
)
1900 return_val
= TD_DBERR
;
1902 (void) ps_pcontinue(ph_p
);
1903 ph_unlock(th_p
->th_ta_p
);
1904 return (return_val
);
1908 * Set a thread's general register set.
1910 #pragma weak td_thr_setgregs = __td_thr_setgregs
1912 __td_thr_setgregs(td_thrhandle_t
*th_p
, const prgregset_t regset
)
1914 struct ps_prochandle
*ph_p
;
1915 td_err_e return_val
;
1917 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
1918 return (return_val
);
1919 if (ps_pstop(ph_p
) != PS_OK
) {
1920 ph_unlock(th_p
->th_ta_p
);
1924 if (ps_lsetregs(ph_p
, thr_to_lwpid(th_p
), regset
) != PS_OK
)
1925 return_val
= TD_DBERR
;
1927 (void) ps_pcontinue(ph_p
);
1928 ph_unlock(th_p
->th_ta_p
);
1929 return (return_val
);
1933 * Get a thread's floating-point register set.
1935 #pragma weak td_thr_getfpregs = __td_thr_getfpregs
1937 __td_thr_getfpregs(td_thrhandle_t
*th_p
, prfpregset_t
*fpregset
)
1939 struct ps_prochandle
*ph_p
;
1940 td_err_e return_val
;
1942 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
1943 return (return_val
);
1944 if (ps_pstop(ph_p
) != PS_OK
) {
1945 ph_unlock(th_p
->th_ta_p
);
1949 if (ps_lgetfpregs(ph_p
, thr_to_lwpid(th_p
), fpregset
) != PS_OK
)
1950 return_val
= TD_DBERR
;
1952 (void) ps_pcontinue(ph_p
);
1953 ph_unlock(th_p
->th_ta_p
);
1954 return (return_val
);
1958 * Set a thread's floating-point register set.
1960 #pragma weak td_thr_setfpregs = __td_thr_setfpregs
1962 __td_thr_setfpregs(td_thrhandle_t
*th_p
, const prfpregset_t
*fpregset
)
1964 struct ps_prochandle
*ph_p
;
1965 td_err_e return_val
;
1967 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
1968 return (return_val
);
1969 if (ps_pstop(ph_p
) != PS_OK
) {
1970 ph_unlock(th_p
->th_ta_p
);
1974 if (ps_lsetfpregs(ph_p
, thr_to_lwpid(th_p
), fpregset
) != PS_OK
)
1975 return_val
= TD_DBERR
;
1977 (void) ps_pcontinue(ph_p
);
1978 ph_unlock(th_p
->th_ta_p
);
1979 return (return_val
);
1983 * Get the size of the extra state register set for this architecture.
1984 * Currently unused by dbx.
1986 #pragma weak td_thr_getxregsize = __td_thr_getxregsize
1989 __td_thr_getxregsize(td_thrhandle_t
*th_p
, int *xregsize
)
1991 return (TD_NOXREGS
);
1995 * Get a thread's extra state register set.
1997 #pragma weak td_thr_getxregs = __td_thr_getxregs
2000 __td_thr_getxregs(td_thrhandle_t
*th_p
, void *xregset
)
2002 return (TD_NOXREGS
);
2006 * Set a thread's extra state register set.
2008 #pragma weak td_thr_setxregs = __td_thr_setxregs
2011 __td_thr_setxregs(td_thrhandle_t
*th_p
, const void *xregset
)
2013 return (TD_NOXREGS
);
2022 * Check the struct thread address in *th_p again first
2023 * value in "data". If value in data is found, set second value
2024 * in "data" to 1 and return 1 to terminate iterations.
2025 * This function is used by td_thr_validate() to verify that
2026 * a thread handle is valid.
2029 td_searcher(const td_thrhandle_t
*th_p
, void *data
)
2031 struct searcher
*searcher_data
= (struct searcher
*)data
;
2033 if (searcher_data
->addr
== th_p
->th_unique
) {
2034 searcher_data
->status
= 1;
2041 * Validate the thread handle. Check that
2042 * a thread exists in the thread agent/process that
2043 * corresponds to thread with handle *th_p.
2044 * Currently unused by dbx.
2046 #pragma weak td_thr_validate = __td_thr_validate
2048 __td_thr_validate(const td_thrhandle_t
*th_p
)
2050 td_err_e return_val
;
2051 struct searcher searcher_data
= {0, 0};
2055 if (th_p
->th_unique
== 0 || th_p
->th_ta_p
== NULL
)
2059 * LOCKING EXCEPTION - Locking is not required
2060 * here because no use of the thread agent is made (other
2061 * than the sanity check) and checking of the thread
2062 * agent will be done in __td_ta_thr_iter.
2065 searcher_data
.addr
= th_p
->th_unique
;
2066 return_val
= __td_ta_thr_iter(th_p
->th_ta_p
,
2067 td_searcher
, &searcher_data
,
2068 TD_THR_ANY_STATE
, TD_THR_LOWEST_PRIORITY
,
2069 TD_SIGNO_MASK
, TD_THR_ANY_USER_FLAGS
);
2071 if (return_val
== TD_OK
&& searcher_data
.status
== 0)
2072 return_val
= TD_NOTHR
;
2074 return (return_val
);
2078 * Get a thread's private binding to a given thread specific
2079 * data(TSD) key(see thr_getspecific(3T). If the thread doesn't
2080 * have a binding for a particular key, then NULL is returned.
2082 #pragma weak td_thr_tsd = __td_thr_tsd
2084 __td_thr_tsd(td_thrhandle_t
*th_p
, thread_key_t key
, void **data_pp
)
2086 struct ps_prochandle
*ph_p
;
2087 td_thragent_t
*ta_p
;
2088 td_err_e return_val
;
2093 if (data_pp
== NULL
)
2096 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
2097 return (return_val
);
2098 ta_p
= th_p
->th_ta_p
;
2099 if (ps_pstop(ph_p
) != PS_OK
) {
2104 if (ta_p
->model
== PR_MODEL_NATIVE
) {
2105 ulwp_t
*ulwp
= (ulwp_t
*)th_p
->th_unique
;
2106 tsd_metadata_t tsdm
;
2110 ta_p
->uberdata_addr
+ offsetof(uberdata_t
, tsd_metadata
),
2111 &tsdm
, sizeof (tsdm
)) != PS_OK
)
2112 return_val
= TD_DBERR
;
2113 else if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_stsd
,
2114 &tsd_paddr
, sizeof (tsd_paddr
)) != PS_OK
)
2115 return_val
= TD_DBERR
;
2116 else if (tsd_paddr
!= 0 &&
2117 ps_pdread(ph_p
, tsd_paddr
, &stsd
, sizeof (stsd
)) != PS_OK
)
2118 return_val
= TD_DBERR
;
2120 maxkey
= tsdm
.tsdm_nused
;
2121 nkey
= tsd_paddr
== 0 ? TSD_NFAST
: stsd
.tsd_nalloc
;
2123 if (key
< TSD_NFAST
)
2124 tsd_paddr
= (psaddr_t
)&ulwp
->ul_ftsd
[0];
2127 #if defined(_LP64) && defined(_SYSCALL32)
2128 ulwp32_t
*ulwp
= (ulwp32_t
*)th_p
->th_unique
;
2129 tsd_metadata32_t tsdm
;
2134 ta_p
->uberdata_addr
+ offsetof(uberdata32_t
, tsd_metadata
),
2135 &tsdm
, sizeof (tsdm
)) != PS_OK
)
2136 return_val
= TD_DBERR
;
2137 else if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_stsd
,
2138 &addr
, sizeof (addr
)) != PS_OK
)
2139 return_val
= TD_DBERR
;
2140 else if (addr
!= 0 &&
2141 ps_pdread(ph_p
, addr
, &stsd
, sizeof (stsd
)) != PS_OK
)
2142 return_val
= TD_DBERR
;
2144 maxkey
= tsdm
.tsdm_nused
;
2145 nkey
= addr
== 0 ? TSD_NFAST
: stsd
.tsd_nalloc
;
2147 if (key
< TSD_NFAST
) {
2148 tsd_paddr
= (psaddr_t
)&ulwp
->ul_ftsd
[0];
2154 return_val
= TD_ERR
;
2155 #endif /* _SYSCALL32 */
2158 if (return_val
== TD_OK
&& (key
< 1 || key
>= maxkey
))
2159 return_val
= TD_NOTSD
;
2160 if (return_val
!= TD_OK
|| key
>= nkey
) {
2161 /* NULL has already been stored in data_pp */
2162 (void) ps_pcontinue(ph_p
);
2164 return (return_val
);
2168 * Read the value from the thread's tsd array.
2170 if (ta_p
->model
== PR_MODEL_NATIVE
) {
2173 if (ps_pdread(ph_p
, tsd_paddr
+ key
* sizeof (void *),
2174 &value
, sizeof (value
)) != PS_OK
)
2175 return_val
= TD_DBERR
;
2178 #if defined(_LP64) && defined(_SYSCALL32)
2182 if (ps_pdread(ph_p
, tsd_paddr
+ key
* sizeof (caddr32_t
),
2183 &value32
, sizeof (value32
)) != PS_OK
)
2184 return_val
= TD_DBERR
;
2186 *data_pp
= (void *)(uintptr_t)value32
;
2187 #endif /* _SYSCALL32 */
2190 (void) ps_pcontinue(ph_p
);
2192 return (return_val
);
2196 * Get the base address of a thread's thread local storage (TLS) block
2197 * for the module (executable or shared object) identified by 'moduleid'.
2199 #pragma weak td_thr_tlsbase = __td_thr_tlsbase
2201 __td_thr_tlsbase(td_thrhandle_t
*th_p
, ulong_t moduleid
, psaddr_t
*base
)
2203 struct ps_prochandle
*ph_p
;
2204 td_thragent_t
*ta_p
;
2205 td_err_e return_val
;
2210 if ((ph_p
= ph_lock_th(th_p
, &return_val
)) == NULL
)
2211 return (return_val
);
2212 ta_p
= th_p
->th_ta_p
;
2213 if (ps_pstop(ph_p
) != PS_OK
) {
2218 if (ta_p
->model
== PR_MODEL_NATIVE
) {
2219 ulwp_t
*ulwp
= (ulwp_t
*)th_p
->th_unique
;
2220 tls_metadata_t tls_metadata
;
2225 ta_p
->uberdata_addr
+ offsetof(uberdata_t
, tls_metadata
),
2226 &tls_metadata
, sizeof (tls_metadata
)) != PS_OK
)
2227 return_val
= TD_DBERR
;
2228 else if (moduleid
>= tls_metadata
.tls_modinfo
.tls_size
)
2229 return_val
= TD_NOTLS
;
2230 else if (ps_pdread(ph_p
,
2231 (psaddr_t
)((TLS_modinfo
*)
2232 tls_metadata
.tls_modinfo
.tls_data
+ moduleid
),
2233 &tlsmod
, sizeof (tlsmod
)) != PS_OK
)
2234 return_val
= TD_DBERR
;
2235 else if (tlsmod
.tm_memsz
== 0)
2236 return_val
= TD_NOTLS
;
2237 else if (tlsmod
.tm_flags
& TM_FLG_STATICTLS
)
2238 *base
= (psaddr_t
)ulwp
- tlsmod
.tm_stattlsoffset
;
2239 else if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_tls
,
2240 &tls
, sizeof (tls
)) != PS_OK
)
2241 return_val
= TD_DBERR
;
2242 else if (moduleid
>= tls
.tls_size
)
2243 return_val
= TD_TLSDEFER
;
2244 else if (ps_pdread(ph_p
,
2245 (psaddr_t
)((tls_t
*)tls
.tls_data
+ moduleid
),
2246 &tls
, sizeof (tls
)) != PS_OK
)
2247 return_val
= TD_DBERR
;
2248 else if (tls
.tls_size
== 0)
2249 return_val
= TD_TLSDEFER
;
2251 *base
= (psaddr_t
)tls
.tls_data
;
2253 #if defined(_LP64) && defined(_SYSCALL32)
2254 ulwp32_t
*ulwp
= (ulwp32_t
*)th_p
->th_unique
;
2255 tls_metadata32_t tls_metadata
;
2256 TLS_modinfo32 tlsmod
;
2260 ta_p
->uberdata_addr
+ offsetof(uberdata32_t
, tls_metadata
),
2261 &tls_metadata
, sizeof (tls_metadata
)) != PS_OK
)
2262 return_val
= TD_DBERR
;
2263 else if (moduleid
>= tls_metadata
.tls_modinfo
.tls_size
)
2264 return_val
= TD_NOTLS
;
2265 else if (ps_pdread(ph_p
,
2266 (psaddr_t
)((TLS_modinfo32
*)
2267 (uintptr_t)tls_metadata
.tls_modinfo
.tls_data
+ moduleid
),
2268 &tlsmod
, sizeof (tlsmod
)) != PS_OK
)
2269 return_val
= TD_DBERR
;
2270 else if (tlsmod
.tm_memsz
== 0)
2271 return_val
= TD_NOTLS
;
2272 else if (tlsmod
.tm_flags
& TM_FLG_STATICTLS
)
2273 *base
= (psaddr_t
)ulwp
- tlsmod
.tm_stattlsoffset
;
2274 else if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_tls
,
2275 &tls
, sizeof (tls
)) != PS_OK
)
2276 return_val
= TD_DBERR
;
2277 else if (moduleid
>= tls
.tls_size
)
2278 return_val
= TD_TLSDEFER
;
2279 else if (ps_pdread(ph_p
,
2280 (psaddr_t
)((tls32_t
*)(uintptr_t)tls
.tls_data
+ moduleid
),
2281 &tls
, sizeof (tls
)) != PS_OK
)
2282 return_val
= TD_DBERR
;
2283 else if (tls
.tls_size
== 0)
2284 return_val
= TD_TLSDEFER
;
2286 *base
= (psaddr_t
)tls
.tls_data
;
2288 return_val
= TD_ERR
;
2289 #endif /* _SYSCALL32 */
2292 (void) ps_pcontinue(ph_p
);
2294 return (return_val
);
2298 * Change a thread's priority to the value specified by ti_pri.
2299 * Currently unused by dbx.
2301 #pragma weak td_thr_setprio = __td_thr_setprio
2304 __td_thr_setprio(td_thrhandle_t
*th_p
, int ti_pri
)
2306 return (TD_NOCAPAB
);
2310 * This structure links td_thr_lockowner and the lowner_cb callback function.
2313 td_sync_iter_f
*owner_cb
;
2315 td_thrhandle_t
*th_p
;
2319 lowner_cb(const td_synchandle_t
*sh_p
, void *arg
)
2321 lowner_cb_ctl_t
*ocb
= arg
;
2328 if (ps_pdread(sh_p
->sh_ta_p
->ph_p
, sh_p
->sh_unique
,
2329 &rw_m
, sizeof (rw_m
)) != PS_OK
) {
2331 if (ps_pdread(sh_p
->sh_ta_p
->ph_p
, sh_p
->sh_unique
,
2332 &rw_m
.mx
, sizeof (rw_m
.mx
)) != PS_OK
)
2335 if (rw_m
.mx
.mutex_magic
== MUTEX_MAGIC
&&
2336 rw_m
.mx
.mutex_owner
== ocb
->th_p
->th_unique
)
2337 return ((ocb
->owner_cb
)(sh_p
, ocb
->owner_cb_arg
));
2338 if (!trunc
&& rw_m
.rwl
.magic
== RWL_MAGIC
) {
2339 mutex_t
*rwlock
= &rw_m
.rwl
.mutex
;
2340 if (rwlock
->mutex_owner
== ocb
->th_p
->th_unique
)
2341 return ((ocb
->owner_cb
)(sh_p
, ocb
->owner_cb_arg
));
2347 * Iterate over the set of locks owned by a specified thread.
2348 * If cb returns a non-zero value, terminate iterations.
2350 #pragma weak td_thr_lockowner = __td_thr_lockowner
2352 __td_thr_lockowner(const td_thrhandle_t
*th_p
, td_sync_iter_f
*cb
,
2355 td_thragent_t
*ta_p
;
2356 td_err_e return_val
;
2357 lowner_cb_ctl_t lcb
;
2360 * Just sanity checks.
2362 if (ph_lock_th((td_thrhandle_t
*)th_p
, &return_val
) == NULL
)
2363 return (return_val
);
2364 ta_p
= th_p
->th_ta_p
;
2368 lcb
.owner_cb_arg
= cb_data
;
2369 lcb
.th_p
= (td_thrhandle_t
*)th_p
;
2370 return (__td_ta_sync_iter(ta_p
, lowner_cb
, &lcb
));
2374 * If a thread is asleep on a synchronization variable,
2375 * then get the synchronization handle.
2377 #pragma weak td_thr_sleepinfo = __td_thr_sleepinfo
2379 __td_thr_sleepinfo(const td_thrhandle_t
*th_p
, td_synchandle_t
*sh_p
)
2381 struct ps_prochandle
*ph_p
;
2382 td_err_e return_val
= TD_OK
;
2387 if ((ph_p
= ph_lock_th((td_thrhandle_t
*)th_p
, &return_val
)) == NULL
)
2388 return (return_val
);
2391 * No need to stop the process for a simple read.
2393 if (th_p
->th_ta_p
->model
== PR_MODEL_NATIVE
) {
2394 ulwp_t
*ulwp
= (ulwp_t
*)th_p
->th_unique
;
2396 if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_wchan
,
2397 &wchan
, sizeof (wchan
)) != PS_OK
)
2398 return_val
= TD_DBERR
;
2400 #if defined(_LP64) && defined(_SYSCALL32)
2401 ulwp32_t
*ulwp
= (ulwp32_t
*)th_p
->th_unique
;
2404 if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_wchan
,
2405 &wchan32
, sizeof (wchan32
)) != PS_OK
)
2406 return_val
= TD_DBERR
;
2409 return_val
= TD_ERR
;
2410 #endif /* _SYSCALL32 */
2413 if (return_val
!= TD_OK
|| wchan
== 0) {
2414 sh_p
->sh_ta_p
= NULL
;
2415 sh_p
->sh_unique
= 0;
2416 if (return_val
== TD_OK
)
2417 return_val
= TD_ERR
;
2419 sh_p
->sh_ta_p
= th_p
->th_ta_p
;
2420 sh_p
->sh_unique
= (psaddr_t
)wchan
;
2423 ph_unlock(th_p
->th_ta_p
);
2424 return (return_val
);
2428 * Which thread is running on an lwp?
2430 #pragma weak td_ta_map_lwp2thr = __td_ta_map_lwp2thr
2432 __td_ta_map_lwp2thr(td_thragent_t
*ta_p
, lwpid_t lwpid
,
2433 td_thrhandle_t
*th_p
)
2435 return (__td_ta_map_id2thr(ta_p
, lwpid
, th_p
));
2439 * Common code for td_sync_get_info() and td_sync_get_stats()
2442 sync_get_info_common(const td_synchandle_t
*sh_p
, struct ps_prochandle
*ph_p
,
2443 td_syncinfo_t
*si_p
)
2446 td_so_un_t generic_so
;
2449 * Determine the sync. object type; a little type fudgery here.
2450 * First attempt to read the whole union. If that fails, attempt
2451 * to read just the condvar. A condvar is the smallest sync. object.
2453 if (ps_pdread(ph_p
, sh_p
->sh_unique
,
2454 &generic_so
, sizeof (generic_so
)) != PS_OK
) {
2456 if (ps_pdread(ph_p
, sh_p
->sh_unique
, &generic_so
.condition
,
2457 sizeof (generic_so
.condition
)) != PS_OK
)
2461 switch (generic_so
.condition
.cond_magic
) {
2463 if (trunc
&& ps_pdread(ph_p
, sh_p
->sh_unique
,
2464 &generic_so
.lock
, sizeof (generic_so
.lock
)) != PS_OK
)
2466 si_p
->si_type
= TD_SYNC_MUTEX
;
2467 si_p
->si_shared_type
=
2468 (generic_so
.lock
.mutex_type
& USYNC_PROCESS
);
2469 (void) memcpy(si_p
->si_flags
, &generic_so
.lock
.mutex_flag
,
2470 sizeof (generic_so
.lock
.mutex_flag
));
2471 si_p
->si_state
.mutex_locked
=
2472 (generic_so
.lock
.mutex_lockw
!= 0);
2473 si_p
->si_size
= sizeof (generic_so
.lock
);
2474 si_p
->si_has_waiters
= generic_so
.lock
.mutex_waiters
;
2475 si_p
->si_rcount
= generic_so
.lock
.mutex_rcount
;
2476 si_p
->si_prioceiling
= generic_so
.lock
.mutex_ceiling
;
2477 if (si_p
->si_state
.mutex_locked
) {
2478 if (si_p
->si_shared_type
& USYNC_PROCESS
)
2480 generic_so
.lock
.mutex_ownerpid
;
2481 si_p
->si_owner
.th_ta_p
= sh_p
->sh_ta_p
;
2482 si_p
->si_owner
.th_unique
= generic_so
.lock
.mutex_owner
;
2486 si_p
->si_type
= TD_SYNC_COND
;
2487 si_p
->si_shared_type
=
2488 (generic_so
.condition
.cond_type
& USYNC_PROCESS
);
2489 (void) memcpy(si_p
->si_flags
, generic_so
.condition
.flags
.flag
,
2490 sizeof (generic_so
.condition
.flags
.flag
));
2491 si_p
->si_size
= sizeof (generic_so
.condition
);
2492 si_p
->si_has_waiters
=
2493 (generic_so
.condition
.cond_waiters_user
|
2494 generic_so
.condition
.cond_waiters_kernel
)? 1 : 0;
2497 if (trunc
&& ps_pdread(ph_p
, sh_p
->sh_unique
,
2498 &generic_so
.semaphore
, sizeof (generic_so
.semaphore
))
2501 si_p
->si_type
= TD_SYNC_SEMA
;
2502 si_p
->si_shared_type
=
2503 (generic_so
.semaphore
.type
& USYNC_PROCESS
);
2504 si_p
->si_state
.sem_count
= generic_so
.semaphore
.count
;
2505 si_p
->si_size
= sizeof (generic_so
.semaphore
);
2506 si_p
->si_has_waiters
=
2507 ((lwp_sema_t
*)&generic_so
.semaphore
)->flags
[7];
2508 /* this is useless but the old interface provided it */
2509 si_p
->si_data
= (psaddr_t
)generic_so
.semaphore
.count
;
2515 if (trunc
&& ps_pdread(ph_p
, sh_p
->sh_unique
,
2516 &generic_so
.rwlock
, sizeof (generic_so
.rwlock
)) != PS_OK
)
2518 si_p
->si_type
= TD_SYNC_RWLOCK
;
2519 si_p
->si_shared_type
=
2520 (generic_so
.rwlock
.rwlock_type
& USYNC_PROCESS
);
2521 si_p
->si_size
= sizeof (generic_so
.rwlock
);
2523 rwstate
= (uint32_t)generic_so
.rwlock
.rwlock_readers
;
2524 if (rwstate
& URW_WRITE_LOCKED
) {
2525 si_p
->si_state
.nreaders
= -1;
2526 si_p
->si_is_wlock
= 1;
2527 si_p
->si_owner
.th_ta_p
= sh_p
->sh_ta_p
;
2528 si_p
->si_owner
.th_unique
=
2529 generic_so
.rwlock
.rwlock_owner
;
2530 if (si_p
->si_shared_type
& USYNC_PROCESS
)
2532 generic_so
.rwlock
.rwlock_ownerpid
;
2534 si_p
->si_state
.nreaders
= (rwstate
& URW_READERS_MASK
);
2536 si_p
->si_has_waiters
= ((rwstate
& URW_HAS_WAITERS
) != 0);
2538 /* this is useless but the old interface provided it */
2539 si_p
->si_data
= (psaddr_t
)generic_so
.rwlock
.readers
;
2546 si_p
->si_ta_p
= sh_p
->sh_ta_p
;
2547 si_p
->si_sv_addr
= sh_p
->sh_unique
;
2552 * Given a synchronization handle, fill in the
2553 * information for the synchronization variable into *si_p.
2555 #pragma weak td_sync_get_info = __td_sync_get_info
2557 __td_sync_get_info(const td_synchandle_t
*sh_p
, td_syncinfo_t
*si_p
)
2559 struct ps_prochandle
*ph_p
;
2560 td_err_e return_val
;
2564 (void) memset(si_p
, 0, sizeof (*si_p
));
2565 if ((ph_p
= ph_lock_sh(sh_p
, &return_val
)) == NULL
)
2566 return (return_val
);
2567 if (ps_pstop(ph_p
) != PS_OK
) {
2568 ph_unlock(sh_p
->sh_ta_p
);
2572 return_val
= sync_get_info_common(sh_p
, ph_p
, si_p
);
2574 (void) ps_pcontinue(ph_p
);
2575 ph_unlock(sh_p
->sh_ta_p
);
2576 return (return_val
);
2580 tdb_addr_hash64(uint64_t addr
)
2582 uint64_t value60
= (addr
>> 4);
2583 uint32_t value30
= (value60
>> 30) ^ (value60
& 0x3fffffff);
2584 return ((value30
>> 15) ^ (value30
& 0x7fff));
2588 tdb_addr_hash32(uint64_t addr
)
2590 uint32_t value30
= (addr
>> 2); /* 30 bits */
2591 return ((value30
>> 15) ^ (value30
& 0x7fff));
2595 read_sync_stats(td_thragent_t
*ta_p
, psaddr_t hash_table
,
2596 psaddr_t sync_obj_addr
, tdb_sync_stats_t
*sync_stats
)
2603 * Compute the hash table index from the synch object's address.
2605 if (ta_p
->model
== PR_MODEL_LP64
)
2606 ix
= tdb_addr_hash64(sync_obj_addr
);
2608 ix
= tdb_addr_hash32(sync_obj_addr
);
2611 * Get the address of the first element in the linked list.
2613 if (ps_pdread(ta_p
->ph_p
, hash_table
+ ix
* sizeof (uint64_t),
2614 &first
, sizeof (first
)) != PS_OK
)
2618 * Search the linked list for an entry for the synch object..
2620 for (next_desc
= (psaddr_t
)first
; next_desc
!= 0;
2621 next_desc
= (psaddr_t
)sync_stats
->next
) {
2622 if (ps_pdread(ta_p
->ph_p
, next_desc
,
2623 sync_stats
, sizeof (*sync_stats
)) != PS_OK
)
2625 if (sync_stats
->sync_addr
== sync_obj_addr
)
2629 (void) memset(sync_stats
, 0, sizeof (*sync_stats
));
2634 * Given a synchronization handle, fill in the
2635 * statistics for the synchronization variable into *ss_p.
2637 #pragma weak td_sync_get_stats = __td_sync_get_stats
2639 __td_sync_get_stats(const td_synchandle_t
*sh_p
, td_syncstats_t
*ss_p
)
2641 struct ps_prochandle
*ph_p
;
2642 td_thragent_t
*ta_p
;
2643 td_err_e return_val
;
2644 register_sync_t enable
;
2646 tdb_sync_stats_t sync_stats
;
2651 (void) memset(ss_p
, 0, sizeof (*ss_p
));
2652 if ((ph_p
= ph_lock_sh(sh_p
, &return_val
)) == NULL
)
2653 return (return_val
);
2654 ta_p
= sh_p
->sh_ta_p
;
2655 if (ps_pstop(ph_p
) != PS_OK
) {
2660 if ((return_val
= sync_get_info_common(sh_p
, ph_p
, &ss_p
->ss_info
))
2662 if (return_val
!= TD_BADSH
)
2664 /* we can correct TD_BADSH */
2665 (void) memset(&ss_p
->ss_info
, 0, sizeof (ss_p
->ss_info
));
2666 ss_p
->ss_info
.si_ta_p
= sh_p
->sh_ta_p
;
2667 ss_p
->ss_info
.si_sv_addr
= sh_p
->sh_unique
;
2668 /* we correct si_type and si_size below */
2671 if (ps_pdread(ph_p
, ta_p
->tdb_register_sync_addr
,
2672 &enable
, sizeof (enable
)) != PS_OK
) {
2673 return_val
= TD_DBERR
;
2676 if (enable
!= REGISTER_SYNC_ON
)
2680 * Get the address of the hash table in the target process.
2682 if (ta_p
->model
== PR_MODEL_NATIVE
) {
2683 if (ps_pdread(ph_p
, ta_p
->uberdata_addr
+
2684 offsetof(uberdata_t
, tdb
.tdb_sync_addr_hash
),
2685 &hashaddr
, sizeof (&hashaddr
)) != PS_OK
) {
2686 return_val
= TD_DBERR
;
2690 #if defined(_LP64) && defined(_SYSCALL32)
2693 if (ps_pdread(ph_p
, ta_p
->uberdata_addr
+
2694 offsetof(uberdata32_t
, tdb
.tdb_sync_addr_hash
),
2695 &addr
, sizeof (addr
)) != PS_OK
) {
2696 return_val
= TD_DBERR
;
2701 return_val
= TD_ERR
;
2703 #endif /* _SYSCALL32 */
2707 return_val
= TD_BADSH
;
2709 return_val
= read_sync_stats(ta_p
, hashaddr
,
2710 sh_p
->sh_unique
, &sync_stats
);
2711 if (return_val
!= TD_OK
)
2715 * We have the hash table entry. Transfer the data to
2716 * the td_syncstats_t structure provided by the caller.
2718 switch (sync_stats
.un
.type
) {
2721 td_mutex_stats_t
*msp
= &ss_p
->ss_un
.mutex
;
2723 ss_p
->ss_info
.si_type
= TD_SYNC_MUTEX
;
2724 ss_p
->ss_info
.si_size
= sizeof (mutex_t
);
2726 sync_stats
.un
.mutex
.mutex_lock
;
2728 sync_stats
.un
.mutex
.mutex_sleep
;
2729 msp
->mutex_sleep_time
=
2730 sync_stats
.un
.mutex
.mutex_sleep_time
;
2731 msp
->mutex_hold_time
=
2732 sync_stats
.un
.mutex
.mutex_hold_time
;
2734 sync_stats
.un
.mutex
.mutex_try
;
2735 msp
->mutex_try_fail
=
2736 sync_stats
.un
.mutex
.mutex_try_fail
;
2737 if (sync_stats
.sync_addr
>= ta_p
->hash_table_addr
&&
2738 (ix
= sync_stats
.sync_addr
- ta_p
->hash_table_addr
)
2739 < ta_p
->hash_size
* sizeof (thr_hash_table_t
))
2740 msp
->mutex_internal
=
2741 ix
/ sizeof (thr_hash_table_t
) + 1;
2746 td_cond_stats_t
*csp
= &ss_p
->ss_un
.cond
;
2748 ss_p
->ss_info
.si_type
= TD_SYNC_COND
;
2749 ss_p
->ss_info
.si_size
= sizeof (cond_t
);
2751 sync_stats
.un
.cond
.cond_wait
;
2752 csp
->cond_timedwait
=
2753 sync_stats
.un
.cond
.cond_timedwait
;
2754 csp
->cond_wait_sleep_time
=
2755 sync_stats
.un
.cond
.cond_wait_sleep_time
;
2756 csp
->cond_timedwait_sleep_time
=
2757 sync_stats
.un
.cond
.cond_timedwait_sleep_time
;
2758 csp
->cond_timedwait_timeout
=
2759 sync_stats
.un
.cond
.cond_timedwait_timeout
;
2761 sync_stats
.un
.cond
.cond_signal
;
2762 csp
->cond_broadcast
=
2763 sync_stats
.un
.cond
.cond_broadcast
;
2764 if (sync_stats
.sync_addr
>= ta_p
->hash_table_addr
&&
2765 (ix
= sync_stats
.sync_addr
- ta_p
->hash_table_addr
)
2766 < ta_p
->hash_size
* sizeof (thr_hash_table_t
))
2767 csp
->cond_internal
=
2768 ix
/ sizeof (thr_hash_table_t
) + 1;
2773 td_rwlock_stats_t
*rwsp
= &ss_p
->ss_un
.rwlock
;
2775 ss_p
->ss_info
.si_type
= TD_SYNC_RWLOCK
;
2776 ss_p
->ss_info
.si_size
= sizeof (rwlock_t
);
2778 sync_stats
.un
.rwlock
.rw_rdlock
;
2779 rwsp
->rw_rdlock_try
=
2780 sync_stats
.un
.rwlock
.rw_rdlock_try
;
2781 rwsp
->rw_rdlock_try_fail
=
2782 sync_stats
.un
.rwlock
.rw_rdlock_try_fail
;
2784 sync_stats
.un
.rwlock
.rw_wrlock
;
2785 rwsp
->rw_wrlock_hold_time
=
2786 sync_stats
.un
.rwlock
.rw_wrlock_hold_time
;
2787 rwsp
->rw_wrlock_try
=
2788 sync_stats
.un
.rwlock
.rw_wrlock_try
;
2789 rwsp
->rw_wrlock_try_fail
=
2790 sync_stats
.un
.rwlock
.rw_wrlock_try_fail
;
2795 td_sema_stats_t
*ssp
= &ss_p
->ss_un
.sema
;
2797 ss_p
->ss_info
.si_type
= TD_SYNC_SEMA
;
2798 ss_p
->ss_info
.si_size
= sizeof (sema_t
);
2800 sync_stats
.un
.sema
.sema_wait
;
2801 ssp
->sema_wait_sleep
=
2802 sync_stats
.un
.sema
.sema_wait_sleep
;
2803 ssp
->sema_wait_sleep_time
=
2804 sync_stats
.un
.sema
.sema_wait_sleep_time
;
2806 sync_stats
.un
.sema
.sema_trywait
;
2807 ssp
->sema_trywait_fail
=
2808 sync_stats
.un
.sema
.sema_trywait_fail
;
2810 sync_stats
.un
.sema
.sema_post
;
2811 ssp
->sema_max_count
=
2812 sync_stats
.un
.sema
.sema_max_count
;
2813 ssp
->sema_min_count
=
2814 sync_stats
.un
.sema
.sema_min_count
;
2818 return_val
= TD_BADSH
;
2823 (void) ps_pcontinue(ph_p
);
2825 return (return_val
);
2829 * Change the state of a synchronization variable.
2830 * 1) mutex lock state set to value
2831 * 2) semaphore's count set to value
2832 * 3) writer's lock set by value < 0
2833 * 4) reader's lock number of readers set to value >= 0
2834 * Currently unused by dbx.
2836 #pragma weak td_sync_setstate = __td_sync_setstate
2838 __td_sync_setstate(const td_synchandle_t
*sh_p
, long lvalue
)
2840 struct ps_prochandle
*ph_p
;
2842 td_err_e return_val
;
2843 td_so_un_t generic_so
;
2845 int value
= (int)lvalue
;
2847 if ((ph_p
= ph_lock_sh(sh_p
, &return_val
)) == NULL
)
2848 return (return_val
);
2849 if (ps_pstop(ph_p
) != PS_OK
) {
2850 ph_unlock(sh_p
->sh_ta_p
);
2855 * Read the synch. variable information.
2856 * First attempt to read the whole union and if that fails
2857 * fall back to reading only the smallest member, the condvar.
2859 if (ps_pdread(ph_p
, sh_p
->sh_unique
, &generic_so
,
2860 sizeof (generic_so
)) != PS_OK
) {
2862 if (ps_pdread(ph_p
, sh_p
->sh_unique
, &generic_so
.condition
,
2863 sizeof (generic_so
.condition
)) != PS_OK
) {
2864 (void) ps_pcontinue(ph_p
);
2865 ph_unlock(sh_p
->sh_ta_p
);
2871 * Set the new value in the sync. variable, read the synch. variable
2872 * information. from the process, reset its value and write it back.
2874 switch (generic_so
.condition
.mutex_magic
) {
2876 if (trunc
&& ps_pdread(ph_p
, sh_p
->sh_unique
,
2877 &generic_so
.lock
, sizeof (generic_so
.lock
)) != PS_OK
) {
2878 return_val
= TD_DBERR
;
2881 generic_so
.lock
.mutex_lockw
= (uint8_t)value
;
2882 if (ps_pdwrite(ph_p
, sh_p
->sh_unique
, &generic_so
.lock
,
2883 sizeof (generic_so
.lock
)) != PS_OK
)
2884 return_val
= TD_DBERR
;
2887 if (trunc
&& ps_pdread(ph_p
, sh_p
->sh_unique
,
2888 &generic_so
.semaphore
, sizeof (generic_so
.semaphore
))
2890 return_val
= TD_DBERR
;
2893 generic_so
.semaphore
.count
= value
;
2894 if (ps_pdwrite(ph_p
, sh_p
->sh_unique
, &generic_so
.semaphore
,
2895 sizeof (generic_so
.semaphore
)) != PS_OK
)
2896 return_val
= TD_DBERR
;
2899 /* Operation not supported on a condition variable */
2900 return_val
= TD_ERR
;
2903 if (trunc
&& ps_pdread(ph_p
, sh_p
->sh_unique
,
2904 &generic_so
.rwlock
, sizeof (generic_so
.rwlock
)) != PS_OK
) {
2905 return_val
= TD_DBERR
;
2908 rwstate
= (uint32_t *)&generic_so
.rwlock
.readers
;
2909 *rwstate
&= URW_HAS_WAITERS
;
2911 *rwstate
|= URW_WRITE_LOCKED
;
2913 *rwstate
|= (value
& URW_READERS_MASK
);
2914 if (ps_pdwrite(ph_p
, sh_p
->sh_unique
, &generic_so
.rwlock
,
2915 sizeof (generic_so
.rwlock
)) != PS_OK
)
2916 return_val
= TD_DBERR
;
2919 /* Bad sync. object type */
2920 return_val
= TD_BADSH
;
2924 (void) ps_pcontinue(ph_p
);
2925 ph_unlock(sh_p
->sh_ta_p
);
2926 return (return_val
);
2930 td_thr_iter_f
*waiter_cb
;
2931 psaddr_t sync_obj_addr
;
2932 uint16_t sync_magic
;
2933 void *waiter_cb_arg
;
2938 waiters_cb(const td_thrhandle_t
*th_p
, void *arg
)
2940 td_thragent_t
*ta_p
= th_p
->th_ta_p
;
2941 struct ps_prochandle
*ph_p
= ta_p
->ph_p
;
2942 waiter_cb_ctl_t
*wcb
= arg
;
2945 if (ta_p
->model
== PR_MODEL_NATIVE
) {
2946 ulwp_t
*ulwp
= (ulwp_t
*)th_p
->th_unique
;
2948 if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_wchan
,
2949 &wchan
, sizeof (wchan
)) != PS_OK
) {
2950 wcb
->errcode
= TD_DBERR
;
2954 #if defined(_LP64) && defined(_SYSCALL32)
2955 ulwp32_t
*ulwp
= (ulwp32_t
*)th_p
->th_unique
;
2958 if (ps_pdread(ph_p
, (psaddr_t
)&ulwp
->ul_wchan
,
2959 &wchan32
, sizeof (wchan32
)) != PS_OK
) {
2960 wcb
->errcode
= TD_DBERR
;
2963 wchan
= (caddr_t
)(uintptr_t)wchan32
;
2965 wcb
->errcode
= TD_ERR
;
2967 #endif /* _SYSCALL32 */
2973 if (wchan
== (caddr_t
)wcb
->sync_obj_addr
)
2974 return ((*wcb
->waiter_cb
)(th_p
, wcb
->waiter_cb_arg
));
2980 * For a given synchronization variable, iterate over the
2981 * set of waiting threads. The call back function is passed
2982 * two parameters, a pointer to a thread handle and a pointer
2983 * to extra call back data.
2985 #pragma weak td_sync_waiters = __td_sync_waiters
2987 __td_sync_waiters(const td_synchandle_t
*sh_p
, td_thr_iter_f
*cb
, void *cb_data
)
2989 struct ps_prochandle
*ph_p
;
2990 waiter_cb_ctl_t wcb
;
2991 td_err_e return_val
;
2993 if ((ph_p
= ph_lock_sh(sh_p
, &return_val
)) == NULL
)
2994 return (return_val
);
2996 (psaddr_t
)&((mutex_t
*)sh_p
->sh_unique
)->mutex_magic
,
2997 (caddr_t
)&wcb
.sync_magic
, sizeof (wcb
.sync_magic
)) != PS_OK
) {
2998 ph_unlock(sh_p
->sh_ta_p
);
3001 ph_unlock(sh_p
->sh_ta_p
);
3003 switch (wcb
.sync_magic
) {
3014 wcb
.sync_obj_addr
= sh_p
->sh_unique
;
3015 wcb
.waiter_cb_arg
= cb_data
;
3016 wcb
.errcode
= TD_OK
;
3017 return_val
= __td_ta_thr_iter(sh_p
->sh_ta_p
, waiters_cb
, &wcb
,
3018 TD_THR_SLEEP
, TD_THR_LOWEST_PRIORITY
,
3019 TD_SIGNO_MASK
, TD_THR_ANY_USER_FLAGS
);
3021 if (return_val
!= TD_OK
)
3022 return (return_val
);
3024 return (wcb
.errcode
);