4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1996, 2010, Oracle and/or its affiliates. All rights reserved.
27 * Utilities to handle shared object dependency graph.
29 * The algorithms used in this file are taken from the following book:
32 * Addison-Wesley Publishing company
34 * From the following chapters:
35 * Chapter 29 Elementary Graph Algorithms
36 * Chapter 32 Directed Graph
39 #include <sys/types.h>
52 * Structure for maintaining sorting state.
55 Rt_map
**s_lmpa
; /* link-map[] (returned to caller) */
56 Rt_map
*s_lmp
; /* originating link-map */
57 Rt_map
**s_stack
; /* strongly connected component stack */
58 APlist
*s_scc
; /* cyclic list */
59 APlist
*s_queue
; /* depth queue for cyclic components */
60 int s_sndx
; /* present stack index */
61 int s_lndx
; /* present link-map index */
62 int s_num
; /* number of objects to sort */
63 int s_initfirst
; /* no. of INITFIRST entries */
69 * qsort(3c) comparison function.
72 compare(const void *lmpp1
, const void *lmpp2
)
74 Rt_map
*lmp1
= *((Rt_map
**)lmpp1
);
75 Rt_map
*lmp2
= *((Rt_map
**)lmpp2
);
77 if (IDX(lmp1
) > IDX(lmp2
))
79 if (IDX(lmp1
) < IDX(lmp2
))
85 * This routine is called when a cyclic dependency is detected between strongly
86 * connected components. The nodes within the cycle are reverse breadth-first
90 sort_scc(Sort
*sort
, int fndx
, int flag
)
92 static const char *tfmt
= 0, *ffmt
;
96 Lm_list
*lml
= LIST(sort
->s_lmp
);
97 Word lmflags
= lml
->lm_flags
;
101 * If this is the first cyclic dependency traverse the new objects that
102 * have been added to the link-map list and for each object establish
103 * a unique depth index. We build this dynamically as we have no idea
104 * of the number of objects that will be inspected (logic matches that
105 * used by dlsym() to traverse lazy dependencies).
107 if (sort
->s_queue
== NULL
) {
114 if (aplist_append(&sort
->s_queue
, lmp
, sort
->s_num
) == NULL
)
119 for (APLIST_TRAVERSE(sort
->s_queue
, idx1
, lmp2
)) {
123 for (APLIST_TRAVERSE(DEPENDS(lmp2
), idx2
, bdp
)) {
124 Rt_map
*lmp3
= bdp
->b_depend
;
126 if (IDX(lmp3
) || (LIST(lmp3
) != lml
))
130 * If we're .init processing and this depend-
131 * encies .init has been called, skip it.
133 if ((flag
& RT_SORT_REV
) &&
134 (FLAGS(lmp3
) & FLG_RT_INITCALL
))
137 if (aplist_append(&sort
->s_queue
, lmp3
,
138 sort
->s_num
) == NULL
)
149 qsort(&(sort
->s_lmpa
[fndx
]), sort
->s_lndx
- fndx
, sizeof (Rt_map
*),
153 * Under ldd -i, or debugging, print this cycle. Under ldd -i/-U assign
154 * each object a group identifier so that cyclic dependent callers can
155 * be better traced (see trace_sort()), or analyzed for non-use.
157 init
= lmflags
& LML_FLG_TRC_INIT
;
158 unref
= lmflags
& LML_FLG_TRC_UNREF
;
159 if ((init
== 0) && (unref
== 0) && (DBG_ENABLED
== 0))
164 tfmt
= MSG_INTL(MSG_LDD_INIT_FMT_01
);
165 ffmt
= MSG_ORIG(MSG_LDD_INIT_FMT_FILE
);
167 (void) printf(tfmt
, cnt
);
169 DBG_CALL(Dbg_util_scc_title(lml
, (flag
& RT_SORT_REV
)));
172 * Identify this cyclic group, and under ldd -i print the cycle in the
173 * order its components will be run.
175 if (flag
& RT_SORT_REV
) {
176 for (ndx
= fndx
; ndx
< sort
->s_lndx
; ndx
++) {
177 lmp
= sort
->s_lmpa
[ndx
];
181 (void) printf(ffmt
, NAME(lmp
));
182 DBG_CALL(Dbg_util_scc_entry(lmp
, ndx
));
186 } else if (DBG_ENABLED
) {
187 for (ndx
= sort
->s_lndx
- 1; ndx
>= fndx
; ndx
--) {
188 lmp
= sort
->s_lmpa
[ndx
];
189 DBG_CALL(Dbg_util_scc_entry(lmp
, ndx
));
194 * If we're looking for unused dependencies determine if any of these
195 * cyclic components are referenced from outside of the cycle.
197 if (unref
|| DBG_ENABLED
) {
198 for (ndx
= fndx
; ndx
< sort
->s_lndx
; ndx
++) {
202 lmp
= sort
->s_lmpa
[ndx
];
205 * If this object has a handle then it can be in use by
212 * Traverse this objects callers looking for outside
215 for (APLIST_TRAVERSE(CALLERS(lmp
), idx
, bdp
)) {
216 Rt_map
*clmp
= bdp
->b_caller
;
218 if ((bdp
->b_flags
& BND_REFER
) == 0)
221 if (CYCGROUP(lmp
) != CYCGROUP(clmp
))
227 * If we're here then none of the cyclic dependents have been
228 * referenced from outside of the cycle, mark them as unused.
230 for (ndx
= fndx
; ndx
< sort
->s_lndx
; ndx
++) {
231 lmp
= sort
->s_lmpa
[ndx
];
232 FLAGS1(lmp
) &= ~FL1_RT_USED
;
239 * Take elements off of the stack and move them to the link-map array. Typically
240 * this routine just pops one strongly connected component (individual link-map)
241 * at a time. When a cyclic dependency has been detected the stack will contain
242 * more than just the present object to process, and will trigger the later call
243 * to sort_scc() to sort these elements.
246 visit(Lm_list
*lml
, Rt_map
*lmp
, Sort
*sort
, int flag
)
249 int num
= sort
->s_lndx
;
250 Word tracing
= lml
->lm_flags
& LML_FLG_TRC_ENABLE
;
254 tlmp
= sort
->s_stack
[--(sort
->s_sndx
)];
255 SORTVAL(tlmp
) = sort
->s_num
;
256 DBG_CALL(Dbg_util_collect(tlmp
, sort
->s_lndx
, flag
));
257 sort
->s_lmpa
[(sort
->s_lndx
)++] = tlmp
;
259 if (flag
& RT_SORT_REV
) {
261 * Indicate the object has had its .init collected.
262 * Note, that regardless of the object having a .init
263 * the object is added to the tsort list, as it's from
264 * this list that any post-init flags are established.
266 FLAGS(tlmp
) |= FLG_RT_INITCLCT
;
270 * Indicate the object has had its .fini collected.
271 * Note, that regardless of the object having a .fini,
272 * the object is added to the tsort list, as it's from
273 * this list that any audit_objclose() activity is
276 FLAGS(tlmp
) |= FLG_RT_FINICLCT
;
280 * If tracing, save the strongly connected component.
282 if (tracing
&& (aplist_append(&alp
, tlmp
,
283 AL_CNT_SCC
) == NULL
))
285 } while (tlmp
!= lmp
);
288 * Determine if there are cyclic dependencies to process. If so, sort
289 * the components, and retain them for tracing output.
291 if (sort
->s_lndx
> (num
+ 1)) {
292 if (sort_scc(sort
, num
, flag
) == 0)
295 if (tracing
&& (aplist_append(&sort
->s_scc
, alp
,
296 AL_CNT_SCC
) == NULL
))
304 dep_visit(Lm_list
*, Rt_map
*, uint_t
, Rt_map
*, Sort
*, int);
307 _dep_visit(Lm_list
*lml
, int min
, Rt_map
*clmp
, Rt_map
*dlmp
, uint_t bflags
,
308 Sort
*sort
, int flag
)
313 * Only collect objects that belong to the callers link-map. Catches
314 * cross dependencies (filtering) to ld.so.1.
316 if (LIST(dlmp
) != lml
)
320 * Determine if this object hasn't been inspected.
322 if ((_min
= SORTVAL(dlmp
)) == -1) {
323 if (flag
& RT_SORT_REV
) {
325 * For .init processing, only collect objects that have
326 * been relocated and haven't already been collected.
328 if ((FLAGS(dlmp
) & (FLG_RT_RELOCED
|
329 FLG_RT_INITCLCT
)) != FLG_RT_RELOCED
)
333 * If this object contains no .init, there's no need to
334 * establish a dependency.
336 if ((INIT(dlmp
) == 0) && (INITARRAY(dlmp
) == 0))
340 * For .fini processing only collect objects that have
341 * had their .init collected, and haven't already been
344 if ((FLAGS(dlmp
) & (FLG_RT_INITCLCT
|
345 FLG_RT_FINICLCT
)) != FLG_RT_INITCLCT
)
349 * If we're deleting a subset of objects, only collect
350 * those marked for deletion.
352 if ((flag
& RT_SORT_DELETE
) &&
353 ((FLAGS(dlmp
) & FLG_RT_DELETE
) == 0))
357 * If this object contains no .fini, there's no need to
358 * establish a dependency.
360 if ((FINI(dlmp
) == 0) && (FINIARRAY(dlmp
) == 0))
365 * Inspect this new dependency.
367 if ((_min
= dep_visit(lml
, clmp
, bflags
, dlmp
,
373 * Keep track of the smallest SORTVAL that has been encountered. If
374 * this value is smaller than the present object, then the dependency
375 * edge has cycled back to objects that have been processed earlier
376 * along this dependency edge.
379 DBG_CALL(Dbg_util_edge_out(clmp
, sort
->s_stack
[_min
]));
386 * Visit the dependencies of each object.
389 dep_visit(Lm_list
*lml
, Rt_map
*clmp
, uint_t cbflags
, Rt_map
*lmp
, Sort
*sort
,
397 min
= SORTVAL(lmp
) = sort
->s_sndx
;
398 sort
->s_stack
[(sort
->s_sndx
)++] = lmp
;
400 if (FLAGS(lmp
) & FLG_RT_INITFRST
)
403 DBG_CALL(Dbg_util_edge_in(lml
, clmp
, cbflags
, lmp
, min
, flag
));
406 * Traverse both explicit and implicit dependencies.
408 for (APLIST_TRAVERSE(DEPENDS(lmp
), idx1
, bdp
)) {
409 if ((min
= _dep_visit(lml
, min
, lmp
, bdp
->b_depend
,
410 bdp
->b_flags
, sort
, flag
)) == -1)
415 * Traverse any filtee dependencies.
417 if (((dip
= DYNINFO(lmp
)) != NULL
) && (FLAGS1(lmp
) & MSK_RT_FILTER
)) {
418 uint_t cnt
, max
= DYNINFOCNT(lmp
);
420 for (cnt
= 0; cnt
< max
; cnt
++, dip
++) {
424 if (((falp
= (Alist
*)dip
->di_info
) == NULL
) ||
425 ((dip
->di_flags
& MSK_DI_FILTER
) == 0))
428 for (ALIST_TRAVERSE(falp
, idx1
, pdp
)) {
433 if ((pdp
->pd_plen
== 0) ||
434 ((ghp
= (Grp_hdl
*)pdp
->pd_info
) == NULL
))
437 for (ALIST_TRAVERSE(ghp
->gh_depends
, idx2
,
440 if (gdp
->gd_depend
== lmp
)
442 if ((min
= _dep_visit(lml
, min
, lmp
,
443 gdp
->gd_depend
, BND_FILTER
,
452 * Having returned to where the minimum SORTVAL is equivalent to the
453 * object that has just been processed, collect any dependencies that
454 * are available on the sorting stack.
456 if (min
== SORTVAL(lmp
)) {
457 if (visit(lml
, lmp
, sort
, flag
) == 0)
464 * Find corresponding strongly connected component structure.
467 trace_find_scc(Sort
*sort
, Rt_map
*lmp
)
472 for (APLIST_TRAVERSE(sort
->s_scc
, idx1
, alp
)) {
476 for (APLIST_TRAVERSE(alp
, idx2
, lmp2
)) {
485 * Print out the .init dependency information (ldd).
488 trace_sort(Sort
*sort
)
494 (void) printf(MSG_ORIG(MSG_STR_NL
));
496 while ((lmp1
= sort
->s_lmpa
[ndx
++]) != NULL
) {
497 static const char *ffmt
, *cfmt
= 0, *sfmt
= 0;
501 if ((INIT(lmp1
) == 0) || (FLAGS(lmp1
) & FLG_RT_INITCALL
))
505 sfmt
= MSG_INTL(MSG_LDD_INIT_FMT_02
);
508 * If the only component on the strongly connected list is
509 * this link-map, then there are no dependencies.
511 if ((alp
= trace_find_scc(sort
, lmp1
)) == NULL
) {
512 (void) printf(sfmt
, NAME(lmp1
));
517 * Establish message formats for cyclic dependencies.
520 cfmt
= MSG_INTL(MSG_LDD_INIT_FMT_03
);
521 ffmt
= MSG_ORIG(MSG_LDD_INIT_FMT_FILE
);
524 (void) printf(cfmt
, NAME(lmp1
), CYCGROUP(lmp1
));
526 for (APLIST_TRAVERSE(CALLERS(lmp1
), idx1
, bdp
)) {
527 Rt_map
*lmp3
, *lmp2
= bdp
->b_caller
;
530 for (APLIST_TRAVERSE(alp
, idx2
, lmp3
)) {
534 (void) printf(ffmt
, NAME(lmp3
));
541 * A reverse ordered list (for .init's) contains INITFIRST elements. Move each
542 * of these elements to the front of the list.
545 r_initfirst(Sort
* sort
, int end
)
548 int bgn
, ifst
, lifst
= 0;
550 for (bgn
= 0; bgn
< sort
->s_initfirst
; bgn
++) {
551 for (ifst
= lifst
; ifst
<= end
; ifst
++) {
552 tlmp
= sort
->s_lmpa
[ifst
];
554 if (!(FLAGS(tlmp
) & FLG_RT_INITFRST
))
558 * If the INITFIRST element is already at the front of
559 * the list leave it there.
567 * Move the elements from the front of the list up to
568 * the INITFIRST element, back one position.
570 (void) memmove(&sort
->s_lmpa
[bgn
+ 1],
572 ((ifst
- bgn
) * sizeof (Rt_map
*)));
575 * Insert INITFIRST element at the front of the list.
577 sort
->s_lmpa
[bgn
] = tlmp
;
585 * A forward ordered list (for .fini's) contains INITFIRST elements. Move each
586 * of these elements to the front of the list.
589 f_initfirst(Sort
*sort
, int end
)
592 int bgn
, ifst
, lifst
= 0;
594 for (bgn
= 0; bgn
< sort
->s_initfirst
; bgn
++) {
595 for (ifst
= lifst
; ifst
<= end
; ifst
++) {
596 tlmp
= sort
->s_lmpa
[ifst
];
598 if (!(FLAGS(tlmp
) & FLG_RT_INITFRST
))
602 * If the INITFIRST element is already at the end of
603 * the list leave it there.
609 * Move the elements from after the INITFIRST element
610 * up to the back of the list, up one position.
612 (void) memmove(&sort
->s_lmpa
[ifst
],
613 &sort
->s_lmpa
[ifst
+ 1],
614 ((end
- ifst
) * sizeof (Rt_map
*)));
617 * Insert INITFIRST element at the back of the list.
619 sort
->s_lmpa
[end
--] = tlmp
;
627 * Determine whether .init or .fini processing is required.
630 initorfini(Lm_list
*lml
, Rt_map
*lmp
, int flag
, Sort
*sort
)
632 if (flag
& RT_SORT_REV
) {
634 * For .init processing, only collect objects that have been
635 * relocated and haven't already been collected.
637 if ((FLAGS(lmp
) & (FLG_RT_RELOCED
| FLG_RT_INITCLCT
)) !=
641 if (dep_visit(lml
, 0, 0, lmp
, sort
, flag
) == -1)
644 } else if (!(flag
& RT_SORT_DELETE
) || (FLAGS(lmp
) & FLG_RT_DELETE
)) {
646 * Only collect objects that have had their .init collected,
647 * and haven't already been .fini collected.
649 if (!((FLAGS(lmp
) & (FLG_RT_INITCLCT
| FLG_RT_FINICLCT
)) ==
653 if (dep_visit(lml
, 0, 0, lmp
, sort
, flag
) == -1)
660 * Sort the dependency
663 tsort(Rt_map
*lmp
, int num
, int flag
)
666 Lm_list
*lml
= LIST(lmp
);
667 Word init
= lml
->lm_flags
& LML_FLG_TRC_INIT
;
675 * Prior to tsorting any .init sections, insure that the `environ'
676 * symbol is initialized for this link-map list.
678 if ((flag
& RT_SORT_REV
) && ((lml
->lm_flags
&
679 (LML_FLG_TRC_ENABLE
| LML_FLG_ENVIRON
)) == 0))
683 * Allocate memory for link-map list array. Calloc the array to insure
684 * all elements are zero, we might find that no objects need processing.
685 * At the same time, allocate a stack for any topological sorting that
686 * might be necessary.
689 sort
.s_num
= num
+ 1;
690 size
= sort
.s_num
* sizeof (Rt_map
*);
691 if ((sort
.s_lmpa
= calloc(2, size
)) == NULL
)
692 return ((Rt_map
**)S_ERROR
);
693 sort
.s_stack
= (Rt_map
**)((uintptr_t)sort
.s_lmpa
+ size
);
696 * Determine where to start searching for tsort() candidates. Any call
697 * to tsort() for .init processing is passed the link-map from which to
698 * start searching. However, if new objects have dependencies on
699 * existing objects, or existing objects have been promoted (RTLD_LAZY
700 * to RTLD_NOW), then start searching at the head of the link-map list.
701 * These previously loaded objects will have been tagged for inclusion
702 * in this tsort() pass. They still remain on an existing tsort() list,
703 * which must have been prempted for control to have arrived here.
704 * However, they will be ignored when encountered on any previous
705 * tsort() list if their .init has already been called.
707 if (lml
->lm_flags
& LML_FLG_OBJREEVAL
)
712 DBG_CALL(Dbg_file_bindings(_lmp
, flag
));
714 ~(LML_FLG_OBJREEVAL
| LML_FLG_OBJADDED
| LML_FLG_OBJDELETED
);
717 * If interposers exist, inspect these objects first.
719 * Interposers can provide implicit dependencies - for example, an
720 * application that has a dependency on libumem will caused any other
721 * dependencies of the application that use the malloc family, to
722 * have an implicit dependency on libumem. However, under the default
723 * condition of lazy binding, these dependency relationships on libumem
724 * are unknown to the tsorting process (ie. a call to one of the malloc
725 * family has not occurred to establish the dependency). This lack of
726 * dependency information makes the interposer look "standalone",
727 * whereas the interposers .init/.fini should be analyzed with respect
728 * to the dependency relationship libumem will eventually create.
730 * By inspecting interposing objects first, we are able to trigger
731 * their .init sections to be accounted for before any others.
732 * Selecting these .init sections first is important for the malloc
733 * libraries, as these libraries need to prepare for pthread_atfork().
734 * However, handling interposer libraries in this generic fashion
735 * should help provide for other dependency relationships that may
738 if ((lml
->lm_flags
& (LML_FLG_INTRPOSE
| LML_FLG_INTRPOSETSORT
)) ==
743 * Unless the executable is tagged as an interposer, skip to
746 if ((FLAGS(ilmp
) & MSK_RT_INTPOSE
) == 0)
747 ilmp
= NEXT_RT_MAP(ilmp
);
749 for (; ilmp
; ilmp
= NEXT_RT_MAP(ilmp
)) {
750 if ((FLAGS(ilmp
) & MSK_RT_INTPOSE
) == 0)
753 if (initorfini(lml
, ilmp
, (flag
| RT_SORT_INTPOSE
),
755 return ((Rt_map
**)S_ERROR
);
759 * Once all interposers are processed, there is no need to
760 * look for interposers again. An interposer can only
761 * be introduced before any relocation takes place, thus
762 * interposer .init's will be grabbed during the first tsort
763 * starting at the head of the link-map list.
765 * Interposers can't be unloaded. Thus interposer .fini's can
766 * only be called during atexit() processing. The interposer
767 * tsort flag is removed from each link-map list during
768 * atexit_fini() so that the interposers .fini sections are
769 * processed appropriately.
771 lml
->lm_flags
|= LML_FLG_INTRPOSETSORT
;
775 * Inspect any standard objects.
777 for (; _lmp
; _lmp
= NEXT_RT_MAP(_lmp
)) {
778 if (FLAGS(_lmp
) & MSK_RT_INTPOSE
)
781 if (initorfini(lml
, _lmp
, flag
, &sort
) != 0)
782 return ((Rt_map
**)S_ERROR
);
786 * The dependencies have been collected such that they are appropriate
787 * for an .init order, for .fini order reverse them.
789 if (flag
& RT_SORT_FWD
) {
790 int bgn
= 0, end
= sort
.s_lndx
- 1;
793 Rt_map
*tlmp
= sort
.s_lmpa
[end
];
795 sort
.s_lmpa
[end
] = sort
.s_lmpa
[bgn
];
796 sort
.s_lmpa
[bgn
] = tlmp
;
803 * If INITFIRST objects have been collected then move them to the front
804 * or end of the list as appropriate.
806 if (sort
.s_initfirst
) {
807 if (flag
& RT_SORT_REV
)
808 r_initfirst(&sort
, sort
.s_lndx
- 1);
810 f_initfirst(&sort
, sort
.s_lndx
- 1);
814 * If tracing .init sections (only meaningful for RT_SORT_REV), print
815 * out the sorted dependencies.
821 * Clean any temporary structures prior to return.
828 * Traverse the link-maps collected on the sort queue and
829 * delete the depth index. These link-maps may be traversed
830 * again to sort other components either for inits, and almost
831 * certainly for .finis.
833 for (APLIST_TRAVERSE(sort
.s_queue
, idx
, lmp2
))
843 for (APLIST_TRAVERSE(sort
.s_scc
, idx
, alp
))
849 * The caller is responsible for freeing the sorted link-map list once
850 * the associated .init/.fini's have been fired.
852 DBG_CALL(Dbg_file_bindings_done(lml
));
853 return (sort
.s_lmpa
);