4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
36 #include <sys/types.h>
37 #include <sys/stack.h>
40 #include <sys/isa_defs.h>
41 #include <proc_service.h>
52 * Functions supporting library function call tracing.
61 * static functions in this file.
63 void function_entry(private_t
*, struct bkpt
*, struct callstack
*);
64 void function_return(private_t
*, struct callstack
*);
65 int object_iter(void *, const prmap_t
*, const char *);
66 int object_present(void *, const prmap_t
*, const char *);
67 int symbol_iter(void *, const GElf_Sym
*, const char *);
68 uintptr_t get_return_address(uintptr_t *);
69 int get_arguments(long *argp
);
70 uintptr_t previous_fp(uintptr_t, uintptr_t *);
71 int lwp_stack_traps(void *cd
, const lwpstatus_t
*Lsp
);
72 int thr_stack_traps(const td_thrhandle_t
*Thp
, void *cd
);
73 struct bkpt
*create_bkpt(uintptr_t, int, int);
74 void set_deferred_breakpoints(void);
76 #define DEF_MAXCALL 16 /* initial value of Stk->maxcall */
78 #define FAULT_ADDR ((uintptr_t)(0-8))
81 #define bpt_hash(addr) ((((addr) >> 13) ^ ((addr) >> 2)) & 0x7ff)
84 setup_thread_agent(void)
88 td_thr_events_t events
;
90 if (Thr_agent
!= NULL
) /* only once */
92 if (td_init() != TD_OK
|| td_ta_new(Proc
, &Thr_agent
) != TD_OK
)
95 td_event_emptyset(&events
);
96 td_event_addset(&events
, TD_CREATE
);
97 if (td_ta_event_addr(Thr_agent
, TD_CREATE
, ¬ify
) == TD_OK
&&
98 notify
.type
== NOTIFY_BPT
&&
99 td_ta_set_event(Thr_agent
, &events
) == TD_OK
&&
100 (Bp
= create_bkpt(notify
.u
.bptaddr
, 0, 1)) != NULL
)
101 Bp
->flags
|= BPT_TD_CREATE
;
106 * Delete all breakpoints in the range [base .. base+size)
107 * from the breakpoint hash table.
110 delete_breakpoints(uintptr_t base
, size_t size
)
116 if (bpt_hashtable
== NULL
)
118 for (i
= 0; i
< HASHSZ
; i
++) {
119 Bpp
= &bpt_hashtable
[i
];
120 while ((Bp
= *Bpp
) != NULL
) {
121 if (Bp
->addr
< base
|| Bp
->addr
>= base
+ size
) {
134 * Establishment of breakpoints on traced library functions.
137 establish_breakpoints(void)
142 /* allocate the breakpoint hash table */
143 if (bpt_hashtable
== NULL
) {
144 bpt_hashtable
= my_malloc(HASHSZ
* sizeof (struct bkpt
*),
146 (void) memset(bpt_hashtable
, 0,
147 HASHSZ
* sizeof (struct bkpt
*));
151 * Set special rtld_db event breakpoints, first time only.
153 if (Rdb_agent
== NULL
&&
154 (Rdb_agent
= Prd_agent(Proc
)) != NULL
) {
158 (void) rd_event_enable(Rdb_agent
, 1);
159 if (rd_event_addr(Rdb_agent
, RD_PREINIT
, ¬ify
) == RD_OK
&&
160 (Bp
= create_bkpt(notify
.u
.bptaddr
, 0, 1)) != NULL
)
161 Bp
->flags
|= BPT_PREINIT
;
162 if (rd_event_addr(Rdb_agent
, RD_POSTINIT
, ¬ify
) == RD_OK
&&
163 (Bp
= create_bkpt(notify
.u
.bptaddr
, 0, 1)) != NULL
)
164 Bp
->flags
|= BPT_POSTINIT
;
165 if (rd_event_addr(Rdb_agent
, RD_DLACTIVITY
, ¬ify
) == RD_OK
&&
166 (Bp
= create_bkpt(notify
.u
.bptaddr
, 0, 1)) != NULL
)
167 Bp
->flags
|= BPT_DLACTIVITY
;
171 * Set special thread event breakpoint, first time libc is seen.
173 if (Thr_agent
== NULL
)
174 setup_thread_agent();
177 * Tell libproc to update its mappings.
182 * If rtld_db told us a library was being deleted,
183 * first mark all of the dynlibs as not present, then
184 * iterate over the shared objects, marking only those
185 * present that really are present, and finally delete
186 * all of the not-present dynlibs.
188 if (delete_library
) {
192 for (Dp
= Dynlib
; Dp
!= NULL
; Dp
= Dp
->next
)
194 (void) Pobject_iter(Proc
, object_present
, NULL
);
196 while ((Dp
= *Dpp
) != NULL
) {
201 delete_breakpoints(Dp
->base
, Dp
->size
);
204 free(Dp
->match_name
);
208 delete_library
= FALSE
;
212 * Iterate over the shared objects, creating breakpoints.
214 (void) Pobject_iter(Proc
, object_iter
, NULL
);
217 * Now actually set all the breakpoints we just created.
219 set_deferred_breakpoints();
223 * Initial establishment of stacks in a newly-grabbed process.
224 * establish_breakpoints() has already been called.
227 establish_stacks(void)
229 const pstatus_t
*Psp
= Pstatus(Proc
);
233 prmap_t
*Pmap
= NULL
;
237 (void) sprintf(mapfile
, "/proc/%d/rmap", (int)Psp
->pr_pid
);
238 if ((mapfd
= open(mapfile
, O_RDONLY
)) < 0 ||
239 fstat(mapfd
, &statb
) != 0 ||
240 statb
.st_size
< sizeof (prmap_t
) ||
241 (Pmap
= my_malloc(statb
.st_size
, NULL
)) == NULL
||
242 (nmap
= pread(mapfd
, Pmap
, statb
.st_size
, 0L)) <= 0 ||
243 (nmap
/= sizeof (prmap_t
)) == 0) {
253 * Iterate over lwps, establishing stacks.
257 (void) Plwp_iter(Proc
, lwp_stack_traps
, &ph_map
);
261 if (Thr_agent
== NULL
)
265 * Iterate over unbound threads, establishing stacks.
267 (void) td_ta_thr_iter(Thr_agent
, thr_stack_traps
, NULL
,
268 TD_THR_ANY_STATE
, TD_THR_LOWEST_PRIORITY
,
269 TD_SIGNO_MASK
, TD_THR_ANY_USER_FLAGS
);
273 do_symbol_iter(const char *object_name
, struct dynpat
*Dyp
)
275 if (*Dyp
->Dp
->prt_name
== '\0')
276 object_name
= PR_OBJ_EXEC
;
279 * Always search the dynamic symbol table.
281 (void) Psymbol_iter(Proc
, object_name
,
282 PR_DYNSYM
, BIND_WEAK
|BIND_GLOBAL
|TYPE_FUNC
,
286 * Search the static symbol table if this is the
287 * executable file or if we are being asked to
288 * report internal calls within the library.
290 if (object_name
== PR_OBJ_EXEC
|| Dyp
->internal
)
291 (void) Psymbol_iter(Proc
, object_name
,
292 PR_SYMTAB
, BIND_ANY
|TYPE_FUNC
,
298 object_iter(void *cd
, const prmap_t
*pmp
, const char *object_name
)
307 if ((pmp
->pr_mflags
& MA_WRITE
) || !(pmp
->pr_mflags
& MA_EXEC
))
311 * Set special thread event breakpoint, first time libc is seen.
313 if (Thr_agent
== NULL
&& strstr(object_name
, "/libc.so.") != NULL
)
314 setup_thread_agent();
316 for (Dp
= Dynlib
; Dp
!= NULL
; Dp
= Dp
->next
)
317 if (strcmp(object_name
, Dp
->lib_name
) == 0 ||
318 (strcmp(Dp
->lib_name
, "a.out") == 0 &&
319 strcmp(pmp
->pr_mapname
, "a.out") == 0))
323 Dp
= my_malloc(sizeof (struct dynlib
), NULL
);
324 (void) memset(Dp
, 0, sizeof (struct dynlib
));
325 if (strcmp(pmp
->pr_mapname
, "a.out") == 0) {
326 Dp
->lib_name
= strdup(pmp
->pr_mapname
);
327 Dp
->match_name
= strdup(pmp
->pr_mapname
);
328 Dp
->prt_name
= strdup("");
330 Dp
->lib_name
= strdup(object_name
);
331 if ((str
= strrchr(object_name
, '/')) != NULL
)
335 (void) strncpy(name
, str
, sizeof (name
) - 2);
336 name
[sizeof (name
) - 2] = '\0';
337 if ((s
= strstr(name
, ".so")) != NULL
)
339 Dp
->match_name
= strdup(name
);
340 (void) strcat(name
, ":");
341 Dp
->prt_name
= strdup(name
);
348 (not_consist
&& strcmp(Dp
->prt_name
, "ld:") != 0)) /* kludge */
351 if (hflag
&& not_consist
)
352 (void) fprintf(stderr
, "not_consist is TRUE, building %s\n",
355 Dp
->base
= pmp
->pr_vaddr
;
356 Dp
->size
= pmp
->pr_size
;
359 * For every dynlib pattern that matches this library's name,
360 * iterate through all of the library's symbols looking for
361 * matching symbol name patterns.
363 for (Dyp
= Dynpat
; Dyp
!= NULL
; Dyp
= Dyp
->next
) {
364 if (interrupt
|sigusr1
)
366 for (i
= 0; i
< Dyp
->nlibpat
; i
++) {
367 if (interrupt
|sigusr1
)
369 if (fnmatch(Dyp
->libpat
[i
], Dp
->match_name
, 0) != 0)
370 continue; /* no match */
373 * Require an exact match for the executable (a.out)
374 * and for the dynamic linker (ld.so.1).
376 if ((strcmp(Dp
->match_name
, "a.out") == 0 ||
377 strcmp(Dp
->match_name
, "ld") == 0) &&
378 strcmp(Dyp
->libpat
[i
], Dp
->match_name
) != 0)
382 * Set Dyp->Dp to Dp so symbol_iter() can use it.
385 do_symbol_iter(object_name
, Dyp
);
391 return (interrupt
| sigusr1
);
396 object_present(void *cd
, const prmap_t
*pmp
, const char *object_name
)
400 for (Dp
= Dynlib
; Dp
!= NULL
; Dp
= Dp
->next
) {
401 if (Dp
->base
== pmp
->pr_vaddr
)
409 * Search for an existing breakpoint at the 'pc' location.
412 get_bkpt(uintptr_t pc
)
416 for (Bp
= bpt_hashtable
[bpt_hash(pc
)]; Bp
!= NULL
; Bp
= Bp
->next
)
424 * Create a breakpoint at 'pc', if one is not there already.
425 * 'ret' is true when creating a function return breakpoint, in which case
426 * fail and return NULL if the breakpoint would be created in writeable data.
427 * If 'set' it true, set the breakpoint in the process now.
430 create_bkpt(uintptr_t pc
, int ret
, int set
)
432 uint_t hix
= bpt_hash(pc
);
436 for (Bp
= bpt_hashtable
[hix
]; Bp
!= NULL
; Bp
= Bp
->next
)
441 * Don't set return breakpoints on writeable data
442 * or on any space other than executable text.
443 * Don't set breakpoints in the child of a vfork()
444 * because that would modify the parent's address space.
446 if (is_vfork_child
||
448 ((pmp
= Paddr_to_text_map(Proc
, pc
)) == NULL
||
449 !(pmp
->pr_mflags
& MA_EXEC
) ||
450 (pmp
->pr_mflags
& MA_WRITE
))))
453 /* create a new unnamed breakpoint */
454 Bp
= my_malloc(sizeof (struct bkpt
), NULL
);
460 if (set
&& Psetbkpt(Proc
, Bp
->addr
, &Bp
->instr
) == 0)
461 Bp
->flags
|= BPT_ACTIVE
;
462 Bp
->next
= bpt_hashtable
[hix
];
463 bpt_hashtable
[hix
] = Bp
;
469 * Set all breakpoints that haven't been set yet.
470 * Deactivate all breakpoints from modules that are not present any more.
473 set_deferred_breakpoints(void)
481 for (i
= 0; i
< HASHSZ
; i
++) {
482 for (Bp
= bpt_hashtable
[i
]; Bp
!= NULL
; Bp
= Bp
->next
) {
483 if (!(Bp
->flags
& BPT_ACTIVE
)) {
484 if (!(Bp
->flags
& BPT_EXCLUDE
) &&
485 Psetbkpt(Proc
, Bp
->addr
, &Bp
->instr
) == 0)
486 Bp
->flags
|= BPT_ACTIVE
;
487 } else if (Paddr_to_text_map(Proc
, Bp
->addr
) == NULL
) {
488 Bp
->flags
&= ~BPT_ACTIVE
;
495 symbol_iter(void *cd
, const GElf_Sym
*sym
, const char *sym_name
)
497 struct dynpat
*Dyp
= cd
;
498 struct dynlib
*Dp
= Dyp
->Dp
;
499 uintptr_t pc
= sym
->st_value
;
503 /* ignore any undefined symbols */
504 if (sym
->st_shndx
== SHN_UNDEF
)
508 * Arbitrarily omit "_start" from the executable.
509 * (Avoid indentation before main().)
511 if (*Dp
->prt_name
== '\0' && strcmp(sym_name
, "_start") == 0)
515 * Arbitrarily omit "_rt_boot" from the dynamic linker.
516 * (Avoid indentation before main().)
518 if (strcmp(Dp
->match_name
, "ld") == 0 &&
519 strcmp(sym_name
, "_rt_boot") == 0)
523 * Arbitrarily omit any symbols whose name starts with '.'.
524 * Apparantly putting a breakpoint on .umul causes a
525 * fatal error in libthread (%y is not restored correctly
526 * when a single step is taken). Looks like a /proc bug.
528 if (*sym_name
== '.')
532 * For each pattern in the array of symbol patterns,
533 * if the pattern matches the symbol name, then
534 * create a breakpoint at the function in question.
536 for (i
= 0; i
< Dyp
->nsympat
; i
++) {
537 if (interrupt
|sigusr1
)
539 if (fnmatch(Dyp
->sympat
[i
], sym_name
, 0) != 0)
542 if ((Bp
= create_bkpt(pc
, 0, 0)) == NULL
) /* can't fail */
546 * New breakpoints receive a name now.
547 * For existing breakpoints, prefer the subset name if possible,
548 * else prefer the shorter name.
550 if (Bp
->sym_name
== NULL
) {
551 Bp
->sym_name
= strdup(sym_name
);
552 } else if (strstr(Bp
->sym_name
, sym_name
) != NULL
||
553 strlen(Bp
->sym_name
) > strlen(sym_name
)) {
555 Bp
->sym_name
= strdup(sym_name
);
558 Bp
->flags
|= Dyp
->flag
;
560 Bp
->flags
|= BPT_EXCLUDE
;
561 else if (Dyp
->internal
|| *Dp
->prt_name
== '\0')
562 Bp
->flags
|= BPT_INTERNAL
;
566 return (interrupt
| sigusr1
);
569 /* For debugging only ---- */
571 report_htable_stats(void)
573 const pstatus_t
*Psp
= Pstatus(Proc
);
574 struct callstack
*Stk
;
576 uint_t Min
= 1000000;
581 uint_t bucket
[HASHSZ
];
583 if (Dynpat
== NULL
|| !hflag
)
587 (void) memset(bucket
, 0, sizeof (bucket
));
589 for (i
= 0; i
< HASHSZ
; i
++) {
591 for (Bp
= bpt_hashtable
[i
]; Bp
!= NULL
; Bp
= Bp
->next
)
601 Avg
= (Total
+ HASHSZ
/ 2) / HASHSZ
;
602 (void) fprintf(stderr
, "truss hash table statistics --------\n");
603 (void) fprintf(stderr
, " Total = %u\n", Total
);
604 (void) fprintf(stderr
, " Min = %u\n", Min
);
605 (void) fprintf(stderr
, " Max = %u\n", Max
);
606 (void) fprintf(stderr
, " Avg = %u\n", Avg
);
607 for (i
= 0; i
< HASHSZ
; i
++)
609 (void) fprintf(stderr
, " %3u buckets of size %d\n",
612 (void) fprintf(stderr
, "truss-detected stacks --------\n");
613 for (Stk
= callstack
; Stk
!= NULL
; Stk
= Stk
->next
) {
614 (void) fprintf(stderr
,
615 " base = 0x%.8lx end = 0x%.8lx size = %ld\n",
616 (ulong_t
)Stk
->stkbase
,
617 (ulong_t
)Stk
->stkend
,
618 (ulong_t
)(Stk
->stkend
- Stk
->stkbase
));
620 (void) fprintf(stderr
, "primary unix stack --------\n");
621 (void) fprintf(stderr
,
622 " base = 0x%.8lx end = 0x%.8lx size = %ld\n",
623 (ulong_t
)Psp
->pr_stkbase
,
624 (ulong_t
)(Psp
->pr_stkbase
+ Psp
->pr_stksize
),
625 (ulong_t
)Psp
->pr_stksize
);
626 (void) fprintf(stderr
, "nthr_create = %u\n", nthr_create
);
630 make_lwp_stack(const lwpstatus_t
*Lsp
, prmap_t
*Pmap
, int nmap
)
632 const pstatus_t
*Psp
= Pstatus(Proc
);
633 uintptr_t sp
= Lsp
->pr_reg
[R_SP
];
634 id_t lwpid
= Lsp
->pr_lwpid
;
635 struct callstack
*Stk
;
637 td_thrinfo_t thrinfo
;
639 if (data_model
!= PR_MODEL_LP64
)
642 /* check to see if we already have this stack */
645 for (Stk
= callstack
; Stk
!= NULL
; Stk
= Stk
->next
)
646 if (sp
>= Stk
->stkbase
&& sp
< Stk
->stkend
)
649 Stk
= my_malloc(sizeof (struct callstack
), NULL
);
650 Stk
->next
= callstack
;
655 Stk
->nthr_create
= 0;
657 Stk
->maxcall
= DEF_MAXCALL
;
658 Stk
->stack
= my_malloc(DEF_MAXCALL
* sizeof (*Stk
->stack
), NULL
);
661 if (sp
>= Psp
->pr_stkbase
&& sp
< Psp
->pr_stkbase
+ Psp
->pr_stksize
) {
662 Stk
->stkbase
= Psp
->pr_stkbase
;
663 Stk
->stkend
= Stk
->stkbase
+ Psp
->pr_stksize
;
667 /* alternate stack */
668 if ((Lsp
->pr_altstack
.ss_flags
& SS_ONSTACK
) &&
669 sp
>= (uintptr_t)Lsp
->pr_altstack
.ss_sp
&&
670 sp
< (uintptr_t)Lsp
->pr_altstack
.ss_sp
671 + Lsp
->pr_altstack
.ss_size
) {
672 Stk
->stkbase
= (uintptr_t)Lsp
->pr_altstack
.ss_sp
;
673 Stk
->stkend
= Stk
->stkbase
+ Lsp
->pr_altstack
.ss_size
;
678 if (Thr_agent
!= NULL
&&
679 td_ta_map_lwp2thr(Thr_agent
, lwpid
, &th
) == TD_OK
&&
680 td_thr_get_info(&th
, &thrinfo
) == TD_OK
&&
681 sp
>= (uintptr_t)thrinfo
.ti_stkbase
- thrinfo
.ti_stksize
&&
682 sp
< (uintptr_t)thrinfo
.ti_stkbase
) {
683 /* The bloody fools got this backwards! */
684 Stk
->stkend
= (uintptr_t)thrinfo
.ti_stkbase
;
685 Stk
->stkbase
= Stk
->stkend
- thrinfo
.ti_stksize
;
689 /* last chance -- try the raw memory map */
690 for (; nmap
; nmap
--, Pmap
++) {
691 if (sp
>= Pmap
->pr_vaddr
&&
692 sp
< Pmap
->pr_vaddr
+ Pmap
->pr_size
) {
693 Stk
->stkbase
= Pmap
->pr_vaddr
;
694 Stk
->stkend
= Pmap
->pr_vaddr
+ Pmap
->pr_size
;
699 callstack
= Stk
->next
;
706 make_thr_stack(const td_thrhandle_t
*Thp
, prgregset_t reg
)
708 const pstatus_t
*Psp
= Pstatus(Proc
);
709 td_thrinfo_t thrinfo
;
710 uintptr_t sp
= reg
[R_SP
];
711 struct callstack
*Stk
;
713 if (data_model
!= PR_MODEL_LP64
)
716 /* check to see if we already have this stack */
719 for (Stk
= callstack
; Stk
!= NULL
; Stk
= Stk
->next
)
720 if (sp
>= Stk
->stkbase
&& sp
< Stk
->stkend
)
723 Stk
= my_malloc(sizeof (struct callstack
), NULL
);
724 Stk
->next
= callstack
;
729 Stk
->nthr_create
= 0;
731 Stk
->maxcall
= DEF_MAXCALL
;
732 Stk
->stack
= my_malloc(DEF_MAXCALL
* sizeof (*Stk
->stack
), NULL
);
735 if (sp
>= Psp
->pr_stkbase
&& sp
< Psp
->pr_stkbase
+ Psp
->pr_stksize
) {
736 Stk
->stkbase
= Psp
->pr_stkbase
;
737 Stk
->stkend
= Stk
->stkbase
+ Psp
->pr_stksize
;
741 if (td_thr_get_info(Thp
, &thrinfo
) == TD_OK
&&
742 sp
>= (uintptr_t)thrinfo
.ti_stkbase
- thrinfo
.ti_stksize
&&
743 sp
< (uintptr_t)thrinfo
.ti_stkbase
) {
744 /* The bloody fools got this backwards! */
745 Stk
->stkend
= (uintptr_t)thrinfo
.ti_stkbase
;
746 Stk
->stkbase
= Stk
->stkend
- thrinfo
.ti_stksize
;
750 callstack
= Stk
->next
;
757 find_lwp_stack(uintptr_t sp
)
759 const pstatus_t
*Psp
= Pstatus(Proc
);
763 prmap_t
*Pmap
= NULL
;
764 prmap_t
*pmap
= NULL
;
766 struct callstack
*Stk
= NULL
;
769 * Get the address space map.
771 (void) sprintf(mapfile
, "/proc/%d/rmap", (int)Psp
->pr_pid
);
772 if ((mapfd
= open(mapfile
, O_RDONLY
)) < 0 ||
773 fstat(mapfd
, &statb
) != 0 ||
774 statb
.st_size
< sizeof (prmap_t
) ||
775 (Pmap
= my_malloc(statb
.st_size
, NULL
)) == NULL
||
776 (nmap
= pread(mapfd
, Pmap
, statb
.st_size
, 0L)) <= 0 ||
777 (nmap
/= sizeof (prmap_t
)) == 0) {
786 for (pmap
= Pmap
; nmap
--; pmap
++) {
787 if (sp
>= pmap
->pr_vaddr
&&
788 sp
< pmap
->pr_vaddr
+ pmap
->pr_size
) {
789 Stk
= my_malloc(sizeof (struct callstack
), NULL
);
790 Stk
->next
= callstack
;
793 Stk
->stkbase
= pmap
->pr_vaddr
;
794 Stk
->stkend
= pmap
->pr_vaddr
+ pmap
->pr_size
;
797 Stk
->nthr_create
= 0;
799 Stk
->maxcall
= DEF_MAXCALL
;
800 Stk
->stack
= my_malloc(
801 DEF_MAXCALL
* sizeof (*Stk
->stack
), NULL
);
811 find_stack(uintptr_t sp
)
813 const pstatus_t
*Psp
= Pstatus(Proc
);
814 private_t
*pri
= get_private();
815 const lwpstatus_t
*Lsp
= pri
->lwpstat
;
816 id_t lwpid
= Lsp
->pr_lwpid
;
818 prgreg_t tref
= Lsp
->pr_reg
[R_G7
];
819 #elif defined(__amd64)
820 prgreg_t tref
= Lsp
->pr_reg
[REG_FS
];
821 #elif defined(__i386)
822 prgreg_t tref
= Lsp
->pr_reg
[GS
];
824 struct callstack
*Stk
= NULL
;
826 td_thrinfo_t thrinfo
;
830 if (sp
>= Psp
->pr_stkbase
&& sp
< Psp
->pr_stkbase
+ Psp
->pr_stksize
) {
831 Stk
= my_malloc(sizeof (struct callstack
), NULL
);
832 Stk
->next
= callstack
;
835 Stk
->stkbase
= Psp
->pr_stkbase
;
836 Stk
->stkend
= Stk
->stkbase
+ Psp
->pr_stksize
;
839 Stk
->nthr_create
= 0;
841 Stk
->maxcall
= DEF_MAXCALL
;
842 Stk
->stack
= my_malloc(DEF_MAXCALL
* sizeof (*Stk
->stack
),
847 /* alternate stack */
848 if ((Lsp
->pr_altstack
.ss_flags
& SS_ONSTACK
) &&
849 sp
>= (uintptr_t)Lsp
->pr_altstack
.ss_sp
&&
850 sp
< (uintptr_t)Lsp
->pr_altstack
.ss_sp
851 + Lsp
->pr_altstack
.ss_size
) {
852 Stk
= my_malloc(sizeof (struct callstack
), NULL
);
853 Stk
->next
= callstack
;
856 Stk
->stkbase
= (uintptr_t)Lsp
->pr_altstack
.ss_sp
;
857 Stk
->stkend
= Stk
->stkbase
+ Lsp
->pr_altstack
.ss_size
;
860 Stk
->nthr_create
= 0;
862 Stk
->maxcall
= DEF_MAXCALL
;
863 Stk
->stack
= my_malloc(DEF_MAXCALL
* sizeof (*Stk
->stack
),
868 if (Thr_agent
== NULL
)
869 return (find_lwp_stack(sp
));
872 if ((error
= td_ta_map_lwp2thr(Thr_agent
, lwpid
, &th
)) != TD_OK
) {
874 (void) fprintf(stderr
,
875 "cannot get thread handle for "
876 "lwp#%d, error=%d, tref=0x%.8lx\n",
877 (int)lwpid
, error
, (long)tref
);
881 if ((error
= td_thr_get_info(&th
, &thrinfo
)) != TD_OK
) {
883 (void) fprintf(stderr
,
884 "cannot get thread info for "
885 "lwp#%d, error=%d, tref=0x%.8lx\n",
886 (int)lwpid
, error
, (long)tref
);
890 if (sp
>= (uintptr_t)thrinfo
.ti_stkbase
- thrinfo
.ti_stksize
&&
891 sp
< (uintptr_t)thrinfo
.ti_stkbase
) {
892 Stk
= my_malloc(sizeof (struct callstack
), NULL
);
893 Stk
->next
= callstack
;
896 /* The bloody fools got this backwards! */
897 Stk
->stkend
= (uintptr_t)thrinfo
.ti_stkbase
;
898 Stk
->stkbase
= Stk
->stkend
- thrinfo
.ti_stksize
;
900 Stk
->tid
= thrinfo
.ti_tid
;
901 Stk
->nthr_create
= nthr_create
;
903 Stk
->maxcall
= DEF_MAXCALL
;
904 Stk
->stack
= my_malloc(DEF_MAXCALL
* sizeof (*Stk
->stack
),
909 /* stack bounds failure -- complain bitterly */
911 (void) fprintf(stderr
,
912 "sp not within thread stack: "
913 "sp=0x%.8lx stkbase=0x%.8lx stkend=0x%.8lx\n",
915 /* The bloody fools got this backwards! */
916 (ulong_t
)thrinfo
.ti_stkbase
- thrinfo
.ti_stksize
,
917 (ulong_t
)thrinfo
.ti_stkbase
);
924 get_tid(struct callstack
*Stk
)
926 private_t
*pri
= get_private();
927 const lwpstatus_t
*Lsp
= pri
->lwpstat
;
928 id_t lwpid
= Lsp
->pr_lwpid
;
930 prgreg_t tref
= Lsp
->pr_reg
[R_G7
];
931 #elif defined(__amd64)
932 prgreg_t tref
= (data_model
== PR_MODEL_LP64
) ?
933 Lsp
->pr_reg
[REG_FS
] : Lsp
->pr_reg
[REG_GS
];
934 #elif defined(__i386)
935 prgreg_t tref
= Lsp
->pr_reg
[GS
];
938 td_thrinfo_t thrinfo
;
941 if (Thr_agent
== NULL
) {
944 Stk
->nthr_create
= 0;
950 * If we have a matching tref and no new threads have
951 * been created since the last time we encountered this
952 * stack, then we don't have to go through the overhead
953 * of calling td_ta_map_lwp2thr() to get the thread-id.
955 if (tref
== Stk
->tref
&& Stk
->nthr_create
== nthr_create
)
958 if ((error
= td_ta_map_lwp2thr(Thr_agent
, lwpid
, &th
)) != TD_OK
) {
960 (void) fprintf(stderr
,
961 "cannot get thread handle for "
962 "lwp#%d, error=%d, tref=0x%.8lx\n",
963 (int)lwpid
, error
, (long)tref
);
966 Stk
->nthr_create
= 0;
967 } else if ((error
= td_thr_get_info(&th
, &thrinfo
)) != TD_OK
) {
969 (void) fprintf(stderr
,
970 "cannot get thread info for "
971 "lwp#%d, error=%d, tref=0x%.8lx\n",
972 (int)lwpid
, error
, (long)tref
);
975 Stk
->nthr_create
= 0;
978 Stk
->tid
= thrinfo
.ti_tid
;
979 Stk
->nthr_create
= nthr_create
;
984 callstack_info(uintptr_t sp
, uintptr_t fp
, int makeid
)
986 struct callstack
*Stk
;
990 Pread(Proc
, &trash
, sizeof (trash
), sp
) != sizeof (trash
))
993 for (Stk
= callstack
; Stk
!= NULL
; Stk
= Stk
->next
)
994 if (sp
>= Stk
->stkbase
&& sp
< Stk
->stkend
)
998 * If we didn't find the stack, do it the hard way.
1001 uintptr_t stkbase
= sp
;
1005 #if defined(i386) || defined(__amd64)
1007 if (data_model
== PR_MODEL_LP64
)
1008 minsize
= 2 * sizeof (uintptr_t); /* fp + pc */
1011 minsize
= 2 * sizeof (uint32_t);
1014 if (data_model
!= PR_MODEL_LP64
)
1015 minsize
= SA32(MINFRAME32
);
1017 minsize
= SA64(MINFRAME64
);
1019 minsize
= SA(MINFRAME
);
1022 stkend
= sp
+ minsize
;
1024 while (Stk
== NULL
&& fp
!= 0 && fp
>= sp
) {
1025 stkend
= fp
+ minsize
;
1026 for (Stk
= callstack
; Stk
!= NULL
; Stk
= Stk
->next
)
1027 if ((fp
>= Stk
->stkbase
&& fp
< Stk
->stkend
) ||
1028 (stkend
> Stk
->stkbase
&&
1029 stkend
<= Stk
->stkend
))
1032 fp
= previous_fp(fp
, NULL
);
1035 if (Stk
!= NULL
) /* the stack grew */
1036 Stk
->stkbase
= stkbase
;
1039 if (Stk
== NULL
&& makeid
) /* new stack */
1040 Stk
= find_stack(sp
);
1046 * Ensure that there is room for at least one more entry.
1048 if (Stk
->ncall
== Stk
->maxcall
) {
1050 Stk
->stack
= my_realloc(Stk
->stack
,
1051 Stk
->maxcall
* sizeof (*Stk
->stack
), NULL
);
1061 * Reset the breakpoint information (called on successful exec()).
1064 reset_breakpoints(void)
1068 struct callstack
*Stk
;
1074 /* destroy all previous dynamic library information */
1075 while ((Dp
= Dynlib
) != NULL
) {
1078 free(Dp
->match_name
);
1083 /* destroy all previous breakpoint trap information */
1084 if (bpt_hashtable
!= NULL
) {
1085 for (i
= 0; i
< HASHSZ
; i
++) {
1086 while ((Bp
= bpt_hashtable
[i
]) != NULL
) {
1087 bpt_hashtable
[i
] = Bp
->next
;
1095 /* destroy all the callstack information */
1096 while ((Stk
= callstack
) != NULL
) {
1097 callstack
= Stk
->next
;
1102 /* we are not a multi-threaded process anymore */
1103 if (Thr_agent
!= NULL
)
1104 (void) td_ta_delete(Thr_agent
);
1107 /* tell libproc to clear out its mapping information */
1111 /* Reestablish the symbols from the executable */
1112 (void) establish_breakpoints();
1116 * Clear breakpoints from the process (called before Prelease()).
1117 * Don't actually destroy the breakpoint table;
1118 * threads currently fielding breakpoints will need it.
1121 clear_breakpoints(void)
1130 * Change all breakpoint traps back to normal instructions.
1131 * We attempt to remove a breakpoint from every address which
1132 * may have ever contained a breakpoint to protect our victims.
1134 report_htable_stats(); /* report stats first */
1135 for (i
= 0; i
< HASHSZ
; i
++) {
1136 for (Bp
= bpt_hashtable
[i
]; Bp
!= NULL
; Bp
= Bp
->next
) {
1137 if (Bp
->flags
& BPT_ACTIVE
)
1138 (void) Pdelbkpt(Proc
, Bp
->addr
, Bp
->instr
);
1139 Bp
->flags
&= ~BPT_ACTIVE
;
1143 if (Thr_agent
!= NULL
) {
1144 td_thr_events_t events
;
1146 td_event_fillset(&events
);
1147 (void) td_ta_clear_event(Thr_agent
, &events
);
1148 (void) td_ta_delete(Thr_agent
);
1154 * Reestablish the breakpoint traps in the process.
1155 * Called after resuming from a vfork() in the parent.
1158 reestablish_traps(void)
1164 if (Dynpat
== NULL
|| is_vfork_child
)
1167 for (i
= 0; i
< HASHSZ
; i
++) {
1168 for (Bp
= bpt_hashtable
[i
]; Bp
!= NULL
; Bp
= Bp
->next
) {
1169 if ((Bp
->flags
& BPT_ACTIVE
) &&
1170 Psetbkpt(Proc
, Bp
->addr
, &instr
) != 0)
1171 Bp
->flags
&= ~BPT_ACTIVE
;
1177 show_function_call(private_t
*pri
,
1178 struct callstack
*Stk
, struct dynlib
*Dp
, struct bkpt
*Bp
)
1184 narg
= get_arguments(arg
);
1185 make_pname(pri
, (Stk
!= NULL
)? Stk
->tid
: 0);
1189 for (i
= 1; i
< Stk
->ncall
; i
++) {
1190 (void) fputc(' ', stdout
);
1191 (void) fputc(' ', stdout
);
1194 (void) printf("-> %s%s(", Dp
->prt_name
, Bp
->sym_name
);
1195 for (i
= 0; i
< narg
; i
++) {
1196 (void) printf("0x%lx", arg
[i
]);
1198 (void) fputc(',', stdout
);
1199 (void) fputc(' ', stdout
);
1202 (void) printf(")\n");
1208 show_function_return(private_t
*pri
, long rval
, int stret
,
1209 struct callstack
*Stk
, struct dynlib
*Dp
, struct bkpt
*Bp
)
1213 make_pname(pri
, Stk
->tid
);
1216 for (i
= 0; i
< Stk
->ncall
; i
++) {
1217 (void) fputc(' ', stdout
);
1218 (void) fputc(' ', stdout
);
1220 (void) printf("<- %s%s() = ", Dp
->prt_name
, Bp
->sym_name
);
1222 (void) printf("struct return\n");
1223 } else if (data_model
== PR_MODEL_LP64
) {
1224 if (rval
>= (64 * 1024) || -rval
>= (64 * 1024))
1225 (void) printf("0x%lx\n", rval
);
1227 (void) printf("%ld\n", rval
);
1229 int rval32
= (int)rval
;
1230 if (rval32
>= (64 * 1024) || -rval32
>= (64 * 1024))
1231 (void) printf("0x%x\n", rval32
);
1233 (void) printf("%d\n", rval32
);
1239 * Called to deal with function-call tracing.
1240 * Return 0 on normal success, 1 to indicate a BPT_HANG success,
1241 * and -1 on failure (not tracing functions or unknown breakpoint).
1244 function_trace(private_t
*pri
, int first
, int clear
, int dotrace
)
1246 struct ps_lwphandle
*Lwp
= pri
->Lwp
;
1247 const lwpstatus_t
*Lsp
= pri
->lwpstat
;
1248 uintptr_t pc
= Lsp
->pr_reg
[R_PC
];
1249 uintptr_t sp
= Lsp
->pr_reg
[R_SP
];
1250 uintptr_t fp
= Lsp
->pr_reg
[R_FP
];
1253 struct callstack
*Stk
;
1261 if (data_model
!= PR_MODEL_LP64
) {
1267 if ((Bp
= get_bkpt(pc
)) == NULL
) {
1269 (void) fprintf(stderr
,
1270 "function_trace(): "
1271 "cannot find breakpoint for pc: 0x%.8lx\n",
1276 if ((Bp
->flags
& (BPT_PREINIT
|BPT_POSTINIT
|BPT_DLACTIVITY
)) && !clear
) {
1277 rd_event_msg_t event_msg
;
1280 if (Bp
->flags
& BPT_PREINIT
)
1281 (void) fprintf(stderr
, "function_trace(): "
1282 "RD_PREINIT breakpoint\n");
1283 if (Bp
->flags
& BPT_POSTINIT
)
1284 (void) fprintf(stderr
, "function_trace(): "
1285 "RD_POSTINIT breakpoint\n");
1286 if (Bp
->flags
& BPT_DLACTIVITY
)
1287 (void) fprintf(stderr
, "function_trace(): "
1288 "RD_DLACTIVITY breakpoint\n");
1290 if (rd_event_getmsg(Rdb_agent
, &event_msg
) == RD_OK
) {
1291 if (event_msg
.type
== RD_DLACTIVITY
) {
1292 switch (event_msg
.u
.state
) {
1294 establish_breakpoints();
1297 not_consist
= TRUE
; /* kludge */
1298 establish_breakpoints();
1299 not_consist
= FALSE
;
1302 delete_library
= TRUE
;
1312 switch (event_msg
.type
) {
1323 et
= "RD_DLACTIVITY";
1326 (void) sprintf(buf
, "0x%x",
1331 (void) fprintf(stderr
,
1332 "event_msg.type = %s ", et
);
1333 switch (event_msg
.u
.state
) {
1338 et
= "RD_CONSISTENT";
1347 (void) sprintf(buf
, "0x%x",
1352 (void) fprintf(stderr
,
1353 "event_msg.u.state = %s\n", et
);
1358 if ((Bp
->flags
& BPT_TD_CREATE
) && !clear
) {
1361 (void) fprintf(stderr
, "function_trace(): "
1362 "BPT_TD_CREATE breakpoint\n");
1363 /* we don't care about the event message */
1369 if ((Stk
= callstack_info(sp
, fp
, 1)) == NULL
) {
1370 if (Dp
!= NULL
&& !clear
) {
1372 add_fcall(fcall_tbl
, Dp
->prt_name
,
1373 Bp
->sym_name
, (unsigned long)1);
1376 show_function_call(pri
, NULL
, Dp
, Bp
);
1377 if ((Bp
->flags
& BPT_HANG
) && !first
)
1380 } else if (!clear
) {
1382 function_entry(pri
, Bp
, Stk
);
1383 if ((Bp
->flags
& BPT_HANG
) && !first
)
1386 function_return(pri
, Stk
);
1392 * Single-step the traced instruction. Since it's possible that
1393 * another thread has deactivated this breakpoint, we indicate
1394 * that we have reactivated it by virtue of executing it.
1396 * To avoid a deadlock with some other thread in the process
1397 * performing a fork() or a thr_suspend() operation, we must
1398 * drop and later reacquire truss_lock. Some fancy dancing here.
1400 active
= (Bp
->flags
& BPT_ACTIVE
);
1401 Bp
->flags
|= BPT_ACTIVE
;
1403 (void) mutex_unlock(&truss_lock
);
1404 (void) Lxecbkpt(Lwp
, instr
);
1405 (void) mutex_lock(&truss_lock
);
1407 if (rval
|| clear
) { /* leave process stopped and abandoned */
1410 * Leave it stopped in a state that a stack trace is reasonable.
1412 /* XX64 needs to be updated for amd64 & gcc */
1413 if (rval
&& instr
== 0x55) { /* pushl %ebp */
1414 /* step it over the movl %esp,%ebp */
1415 (void) mutex_unlock(&truss_lock
);
1416 (void) Lsetrun(Lwp
, 0, PRCFAULT
|PRSTEP
);
1417 /* we're wrapping up; wait one second at most */
1418 (void) Lwait(Lwp
, MILLISEC
);
1419 (void) mutex_lock(&truss_lock
);
1422 if (get_bkpt(pc
) != Bp
)
1423 abend("function_trace: lost breakpoint", NULL
);
1424 (void) Pdelbkpt(Proc
, Bp
->addr
, Bp
->instr
);
1425 Bp
->flags
&= ~BPT_ACTIVE
;
1426 (void) mutex_unlock(&truss_lock
);
1427 (void) Lsetrun(Lwp
, 0, PRCFAULT
|PRSTOP
);
1428 /* we're wrapping up; wait one second at most */
1429 (void) Lwait(Lwp
, MILLISEC
);
1430 (void) mutex_lock(&truss_lock
);
1432 if (get_bkpt(pc
) != Bp
)
1433 abend("function_trace: lost breakpoint", NULL
);
1434 if (!active
|| !(Bp
->flags
& BPT_ACTIVE
)) {
1435 (void) Pdelbkpt(Proc
, Bp
->addr
, Bp
->instr
);
1436 Bp
->flags
&= ~BPT_ACTIVE
;
1443 function_entry(private_t
*pri
, struct bkpt
*Bp
, struct callstack
*Stk
)
1445 const lwpstatus_t
*Lsp
= pri
->lwpstat
;
1446 uintptr_t sp
= Lsp
->pr_reg
[R_SP
];
1447 uintptr_t rpc
= get_return_address(&sp
);
1448 struct dynlib
*Dp
= Bp
->dyn
;
1449 int oldframe
= FALSE
;
1453 if (data_model
!= PR_MODEL_LP64
) {
1455 rpc
= (uint32_t)rpc
;
1460 * If the sp is not within the stack bounds, forget it.
1461 * If the symbol's 'internal' flag is false,
1462 * don't report internal calls within the library.
1464 if (!(sp
>= Stk
->stkbase
&& sp
< Stk
->stkend
) ||
1465 (!(Bp
->flags
& BPT_INTERNAL
) &&
1466 rpc
>= Dp
->base
&& rpc
< Dp
->base
+ Dp
->size
))
1469 for (i
= 0; i
< Stk
->ncall
; i
++) {
1470 if (sp
>= Stk
->stack
[i
].sp
) {
1472 if (sp
== Stk
->stack
[i
].sp
)
1479 * Breakpoints for function returns are set here
1480 * If we're counting function calls, there is no need to set
1481 * a breakpoint upon return
1484 if (!oldframe
&& !cflag
) {
1485 (void) create_bkpt(rpc
, 1, 1); /* may or may not be set */
1486 Stk
->stack
[Stk
->ncall
].sp
= sp
; /* record it anyeay */
1487 Stk
->stack
[Stk
->ncall
].pc
= rpc
;
1488 Stk
->stack
[Stk
->ncall
].fcn
= Bp
;
1492 add_fcall(fcall_tbl
, Dp
->prt_name
, Bp
->sym_name
,
1495 show_function_call(pri
, Stk
, Dp
, Bp
);
1500 * We are here because we hit an unnamed breakpoint.
1501 * Attempt to match this up with a return pc on the stack
1502 * and report the function return.
1505 function_return(private_t
*pri
, struct callstack
*Stk
)
1507 const lwpstatus_t
*Lsp
= pri
->lwpstat
;
1508 uintptr_t sp
= Lsp
->pr_reg
[R_SP
];
1509 uintptr_t fp
= Lsp
->pr_reg
[R_FP
];
1513 if (data_model
!= PR_MODEL_LP64
) {
1522 for (i
= Stk
->ncall
- 1; i
>= 0; i
--) {
1523 if (sp
<= Stk
->stack
[i
].sp
&& fp
> Stk
->stack
[i
].sp
) {
1529 #if defined(i386) || defined(__amd64)
1531 /* probably __mul64() or friends -- try harder */
1533 for (j
= 0; i
< 0 && j
< 8; j
++) { /* up to 8 args */
1535 for (i
= Stk
->ncall
- 1; i
>= 0; i
--) {
1536 if (sp
<= Stk
->stack
[i
].sp
&&
1537 fp
> Stk
->stack
[i
].sp
) {
1546 if ((i
>= 0) && (!cflag
)) {
1547 show_function_return(pri
, Lsp
->pr_reg
[R_R0
], 0,
1548 Stk
, Stk
->stack
[i
].fcn
->dyn
, Stk
->stack
[i
].fcn
);
1552 #if defined(__sparc)
1554 #elif defined(__amd64)
1556 #elif defined(__i386)
1561 trap_one_stack(prgregset_t reg
)
1565 struct callstack
*Stk
;
1568 uintptr_t sp
= reg
[R_SP
];
1569 uintptr_t pc
= reg
[R_PC
];
1573 uint_t maxframe
= 8;
1575 uintptr_t sp
; /* %sp within called function */
1576 uintptr_t pc
; /* %pc within called function */
1577 uintptr_t rsp
; /* the return sp */
1578 uintptr_t rpc
; /* the return pc */
1579 } *frame
= my_malloc(maxframe
* sizeof (*frame
), NULL
);
1582 * Gather stack frames bottom to top.
1585 fp
= sp
; /* remember higest non-null sp */
1586 frame
[nframe
].sp
= sp
;
1587 frame
[nframe
].pc
= pc
;
1588 sp
= previous_fp(sp
, &pc
);
1589 frame
[nframe
].rsp
= sp
;
1590 frame
[nframe
].rpc
= pc
;
1591 if (++nframe
== maxframe
) {
1593 frame
= my_realloc(frame
, maxframe
* sizeof (*frame
),
1599 * Scan for function return breakpoints top to bottom.
1602 /* lookup the called function in the symbol tables */
1603 if (Plookup_by_addr(Proc
, frame
[nframe
].pc
, sym_name
,
1604 sizeof (sym_name
), &sym
) != 0)
1607 pc
= sym
.st_value
; /* entry point of the function */
1608 rpc
= frame
[nframe
].rpc
; /* caller's return pc */
1610 /* lookup the function in the breakpoint table */
1611 if ((Bp
= get_bkpt(pc
)) == NULL
|| (Dp
= Bp
->dyn
) == NULL
)
1614 if (!(Bp
->flags
& BPT_INTERNAL
) &&
1615 rpc
>= Dp
->base
&& rpc
< Dp
->base
+ Dp
->size
)
1618 sp
= frame
[nframe
].rsp
+ FPADJUST
; /* %sp at time of call */
1619 if ((Stk
= callstack_info(sp
, fp
, 0)) == NULL
)
1620 continue; /* can't happen? */
1622 if (create_bkpt(rpc
, 1, 1) != NULL
) {
1623 Stk
->stack
[Stk
->ncall
].sp
= sp
;
1624 Stk
->stack
[Stk
->ncall
].pc
= rpc
;
1625 Stk
->stack
[Stk
->ncall
].fcn
= Bp
;
1634 lwp_stack_traps(void *cd
, const lwpstatus_t
*Lsp
)
1636 ph_map_t
*ph_map
= (ph_map_t
*)cd
;
1639 (void) memcpy(reg
, Lsp
->pr_reg
, sizeof (prgregset_t
));
1640 make_lwp_stack(Lsp
, ph_map
->pmap
, ph_map
->nmap
);
1641 trap_one_stack(reg
);
1643 return (interrupt
| sigusr1
);
1648 thr_stack_traps(const td_thrhandle_t
*Thp
, void *cd
)
1653 * We have already dealt with all the lwps.
1654 * We only care about unbound threads here (TD_PARTIALREG).
1656 if (td_thr_getgregs(Thp
, reg
) != TD_PARTIALREG
)
1659 make_thr_stack(Thp
, reg
);
1660 trap_one_stack(reg
);
1662 return (interrupt
| sigusr1
);
1665 #if defined(__sparc)
1668 previous_fp(uintptr_t sp
, uintptr_t *rpc
)
1674 if (data_model
== PR_MODEL_LP64
) {
1675 struct rwindow64 rwin
;
1676 if (Pread(Proc
, &rwin
, sizeof (rwin
), sp
+ STACK_BIAS
)
1678 fp
= (uintptr_t)rwin
.rw_fp
;
1679 pc
= (uintptr_t)rwin
.rw_rtn
;
1682 Pread(Proc
, &rwin
, sizeof (rwin
), fp
+ STACK_BIAS
)
1686 struct rwindow32 rwin
;
1688 struct rwindow rwin
;
1690 if (Pread(Proc
, &rwin
, sizeof (rwin
), sp
) == sizeof (rwin
)) {
1691 fp
= (uint32_t)rwin
.rw_fp
;
1692 pc
= (uint32_t)rwin
.rw_rtn
;
1695 Pread(Proc
, &rwin
, sizeof (rwin
), fp
) != sizeof (rwin
))
1707 get_return_address(uintptr_t *psp
)
1710 private_t
*pri
= get_private();
1711 const lwpstatus_t
*Lsp
= pri
->lwpstat
;
1714 rpc
= (uintptr_t)Lsp
->pr_reg
[R_O7
] + 8;
1715 if (data_model
!= PR_MODEL_LP64
)
1716 rpc
= (uint32_t)rpc
;
1718 /* check for structure return (bletch!) */
1719 if (Pread(Proc
, &inst
, sizeof (inst
), rpc
) == sizeof (inst
) &&
1721 rpc
+= sizeof (instr_t
);
1727 get_arguments(long *argp
)
1729 private_t
*pri
= get_private();
1730 const lwpstatus_t
*Lsp
= pri
->lwpstat
;
1733 if (data_model
!= PR_MODEL_LP64
)
1734 for (i
= 0; i
< 4; i
++)
1735 argp
[i
] = (uint_t
)Lsp
->pr_reg
[R_O0
+i
];
1737 for (i
= 0; i
< 4; i
++)
1738 argp
[i
] = (long)Lsp
->pr_reg
[R_O0
+i
];
1742 #endif /* __sparc */
1744 #if defined(__i386) || defined(__amd64)
1747 previous_fp(uintptr_t fp
, uintptr_t *rpc
)
1752 if (Pread(Proc
, frame
, sizeof (frame
), fp
) != sizeof (frame
) ||
1754 Pread(Proc
, trash
, sizeof (trash
), frame
[0]) != sizeof (trash
)))
1755 frame
[0] = frame
[1] = 0;
1764 #if defined(__amd64) || defined(__i386)
1767 * Examine the instruction at the return location of a function call
1768 * and return the byte count by which the stack is adjusted on return.
1769 * It the instruction at the return location is an addl, as expected,
1770 * then adjust the return pc by the size of that instruction so that
1771 * we will place the return breakpoint on the following instruction.
1772 * This allows programs that interrogate their own stacks and record
1773 * function calls and arguments to work correctly even while we interfere.
1774 * Return the count on success, -1 on failure.
1777 return_count32(uint32_t *ppc
)
1779 uintptr_t pc
= *ppc
;
1782 uchar_t instr
[6]; /* instruction at pc */
1784 if ((count
= Pread(Proc
, instr
, sizeof (instr
), pc
)) < 0)
1787 /* find the replaced instruction at pc (if any) */
1788 if ((Bp
= get_bkpt(pc
)) != NULL
&& (Bp
->flags
& BPT_ACTIVE
))
1789 instr
[0] = (uchar_t
)Bp
->instr
;
1791 if (count
!= sizeof (instr
) &&
1792 (count
< 3 || instr
[0] != 0x83))
1796 * A bit of disassembly of the instruction is required here.
1798 if (instr
[1] != 0xc4) { /* not an addl mumble,%esp inctruction */
1800 } else if (instr
[0] == 0x81) { /* count is a longword */
1801 count
= instr
[2]+(instr
[3]<<8)+(instr
[4]<<16)+(instr
[5]<<24);
1803 } else if (instr
[0] == 0x83) { /* count is a byte */
1806 } else { /* not an addl inctruction */
1814 get_return_address32(uintptr_t *psp
)
1820 *psp
+= 4; /* account for popping the stack on return */
1821 if (Pread(Proc
, &rpc
, sizeof (rpc
), sp
) != sizeof (rpc
))
1823 if ((count
= return_count32(&rpc
)) < 0)
1825 *psp
+= count
; /* expected sp on return */
1830 get_return_address(uintptr_t *psp
)
1834 uintptr_t sp
= *psp
;
1836 if (data_model
== PR_MODEL_LP64
) {
1837 if (Pread(Proc
, &rpc
, sizeof (rpc
), sp
) != sizeof (rpc
))
1840 * Ignore arguments pushed on the stack. See comments in
1846 return (get_return_address32(psp
));
1851 get_arguments32(long *argp
)
1853 private_t
*pri
= get_private();
1854 const lwpstatus_t
*Lsp
= pri
->lwpstat
;
1855 uint32_t frame
[5]; /* return pc + 4 args */
1860 narg
= Pread(Proc
, frame
, sizeof (frame
),
1861 (uintptr_t)Lsp
->pr_reg
[R_SP
]);
1862 narg
-= sizeof (greg32_t
);
1865 narg
/= sizeof (greg32_t
); /* no more than 4 */
1868 * Given the return PC, determine the number of arguments.
1870 if ((count
= return_count32(&frame
[0])) < 0)
1873 count
/= sizeof (greg32_t
);
1878 for (i
= 0; i
< narg
; i
++)
1879 argp
[i
] = (long)frame
[i
+1];
1885 get_arguments(long *argp
)
1888 private_t
*pri
= get_private();
1889 const lwpstatus_t
*Lsp
= pri
->lwpstat
;
1891 if (data_model
== PR_MODEL_LP64
) {
1893 * On amd64, we do not know how many arguments are passed to
1894 * each function. While it may be possible to detect if we
1895 * have more than 6 arguments, it is of marginal value.
1896 * Instead, assume that we always have 6 arguments, which are
1897 * passed via registers.
1899 argp
[0] = Lsp
->pr_reg
[REG_RDI
];
1900 argp
[1] = Lsp
->pr_reg
[REG_RSI
];
1901 argp
[2] = Lsp
->pr_reg
[REG_RDX
];
1902 argp
[3] = Lsp
->pr_reg
[REG_RCX
];
1903 argp
[4] = Lsp
->pr_reg
[REG_R8
];
1904 argp
[5] = Lsp
->pr_reg
[REG_R9
];
1908 return (get_arguments32(argp
));
1911 #endif /* __amd64 || __i386 */