dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / usr / src / cmd / sgs / rtld / common / tsort.c
blob9e4545e7180deaf3acb187ec086ba3a63cfc1c29
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 1996, 2010, Oracle and/or its affiliates. All rights reserved.
27 * Utilities to handle shared object dependency graph.
29 * The algorithms used in this file are taken from the following book:
30 * Algorithms in C
31 * Robert Sedgewick
32 * Addison-Wesley Publishing company
33 * ISBN 0-201-51425-7
34 * From the following chapters:
35 * Chapter 29 Elementary Graph Algorithms
36 * Chapter 32 Directed Graph
39 #include <sys/types.h>
40 #include <stdarg.h>
41 #include <stdio.h>
42 #include <dlfcn.h>
43 #include <signal.h>
44 #include <locale.h>
45 #include <string.h>
46 #include <libintl.h>
47 #include <debug.h>
48 #include "_rtld.h"
49 #include "msg.h"
52 * Structure for maintaining sorting state.
54 typedef struct {
55 Rt_map **s_lmpa; /* link-map[] (returned to caller) */
56 Rt_map *s_lmp; /* originating link-map */
57 Rt_map **s_stack; /* strongly connected component stack */
58 APlist *s_scc; /* cyclic list */
59 APlist *s_queue; /* depth queue for cyclic components */
60 int s_sndx; /* present stack index */
61 int s_lndx; /* present link-map index */
62 int s_num; /* number of objects to sort */
63 int s_initfirst; /* no. of INITFIRST entries */
64 } Sort;
66 #define AL_CNT_SCC 10
69 * qsort(3c) comparison function.
71 static int
72 compare(const void *lmpp1, const void *lmpp2)
74 Rt_map *lmp1 = *((Rt_map **)lmpp1);
75 Rt_map *lmp2 = *((Rt_map **)lmpp2);
77 if (IDX(lmp1) > IDX(lmp2))
78 return (-1);
79 if (IDX(lmp1) < IDX(lmp2))
80 return (1);
81 return (0);
85 * This routine is called when a cyclic dependency is detected between strongly
86 * connected components. The nodes within the cycle are reverse breadth-first
87 * sorted.
89 static int
90 sort_scc(Sort *sort, int fndx, int flag)
92 static const char *tfmt = 0, *ffmt;
93 static int cnt = 1;
94 int ndx;
95 Rt_map *lmp;
96 Lm_list *lml = LIST(sort->s_lmp);
97 Word lmflags = lml->lm_flags;
98 Word init, unref;
101 * If this is the first cyclic dependency traverse the new objects that
102 * have been added to the link-map list and for each object establish
103 * a unique depth index. We build this dynamically as we have no idea
104 * of the number of objects that will be inspected (logic matches that
105 * used by dlsym() to traverse lazy dependencies).
107 if (sort->s_queue == NULL) {
108 Aliste idx1;
109 Rt_map *lmp, *lmp2;
111 lmp = sort->s_lmp;
112 ndx = 1;
114 if (aplist_append(&sort->s_queue, lmp, sort->s_num) == NULL)
115 return (0);
117 IDX(lmp) = ndx++;
119 for (APLIST_TRAVERSE(sort->s_queue, idx1, lmp2)) {
120 Bnd_desc *bdp;
121 Aliste idx2;
123 for (APLIST_TRAVERSE(DEPENDS(lmp2), idx2, bdp)) {
124 Rt_map *lmp3 = bdp->b_depend;
126 if (IDX(lmp3) || (LIST(lmp3) != lml))
127 continue;
130 * If we're .init processing and this depend-
131 * encies .init has been called, skip it.
133 if ((flag & RT_SORT_REV) &&
134 (FLAGS(lmp3) & FLG_RT_INITCALL))
135 continue;
137 if (aplist_append(&sort->s_queue, lmp3,
138 sort->s_num) == NULL)
139 return (0);
141 IDX(lmp3) = ndx++;
147 * Sort the cyclics.
149 qsort(&(sort->s_lmpa[fndx]), sort->s_lndx - fndx, sizeof (Rt_map *),
150 compare);
153 * Under ldd -i, or debugging, print this cycle. Under ldd -i/-U assign
154 * each object a group identifier so that cyclic dependent callers can
155 * be better traced (see trace_sort()), or analyzed for non-use.
157 init = lmflags & LML_FLG_TRC_INIT;
158 unref = lmflags & LML_FLG_TRC_UNREF;
159 if ((init == 0) && (unref == 0) && (DBG_ENABLED == 0))
160 return (1);
162 if (init) {
163 if (tfmt == 0) {
164 tfmt = MSG_INTL(MSG_LDD_INIT_FMT_01);
165 ffmt = MSG_ORIG(MSG_LDD_INIT_FMT_FILE);
167 (void) printf(tfmt, cnt);
169 DBG_CALL(Dbg_util_scc_title(lml, (flag & RT_SORT_REV)));
172 * Identify this cyclic group, and under ldd -i print the cycle in the
173 * order its components will be run.
175 if (flag & RT_SORT_REV) {
176 for (ndx = fndx; ndx < sort->s_lndx; ndx++) {
177 lmp = sort->s_lmpa[ndx];
178 CYCGROUP(lmp) = cnt;
180 if (init)
181 (void) printf(ffmt, NAME(lmp));
182 DBG_CALL(Dbg_util_scc_entry(lmp, ndx));
184 cnt++;
186 } else if (DBG_ENABLED) {
187 for (ndx = sort->s_lndx - 1; ndx >= fndx; ndx--) {
188 lmp = sort->s_lmpa[ndx];
189 DBG_CALL(Dbg_util_scc_entry(lmp, ndx));
194 * If we're looking for unused dependencies determine if any of these
195 * cyclic components are referenced from outside of the cycle.
197 if (unref || DBG_ENABLED) {
198 for (ndx = fndx; ndx < sort->s_lndx; ndx++) {
199 Bnd_desc *bdp;
200 Aliste idx;
202 lmp = sort->s_lmpa[ndx];
205 * If this object has a handle then it can be in use by
206 * anyone.
208 if (HANDLES(lmp))
209 return (1);
212 * Traverse this objects callers looking for outside
213 * references.
215 for (APLIST_TRAVERSE(CALLERS(lmp), idx, bdp)) {
216 Rt_map *clmp = bdp->b_caller;
218 if ((bdp->b_flags & BND_REFER) == 0)
219 continue;
221 if (CYCGROUP(lmp) != CYCGROUP(clmp))
222 return (1);
227 * If we're here then none of the cyclic dependents have been
228 * referenced from outside of the cycle, mark them as unused.
230 for (ndx = fndx; ndx < sort->s_lndx; ndx++) {
231 lmp = sort->s_lmpa[ndx];
232 FLAGS1(lmp) &= ~FL1_RT_USED;
235 return (1);
239 * Take elements off of the stack and move them to the link-map array. Typically
240 * this routine just pops one strongly connected component (individual link-map)
241 * at a time. When a cyclic dependency has been detected the stack will contain
242 * more than just the present object to process, and will trigger the later call
243 * to sort_scc() to sort these elements.
245 static int
246 visit(Lm_list *lml, Rt_map *lmp, Sort *sort, int flag)
248 APlist *alp = NULL;
249 int num = sort->s_lndx;
250 Word tracing = lml->lm_flags & LML_FLG_TRC_ENABLE;
251 Rt_map *tlmp;
253 do {
254 tlmp = sort->s_stack[--(sort->s_sndx)];
255 SORTVAL(tlmp) = sort->s_num;
256 DBG_CALL(Dbg_util_collect(tlmp, sort->s_lndx, flag));
257 sort->s_lmpa[(sort->s_lndx)++] = tlmp;
259 if (flag & RT_SORT_REV) {
261 * Indicate the object has had its .init collected.
262 * Note, that regardless of the object having a .init
263 * the object is added to the tsort list, as it's from
264 * this list that any post-init flags are established.
266 FLAGS(tlmp) |= FLG_RT_INITCLCT;
267 lml->lm_init--;
268 } else {
270 * Indicate the object has had its .fini collected.
271 * Note, that regardless of the object having a .fini,
272 * the object is added to the tsort list, as it's from
273 * this list that any audit_objclose() activity is
274 * triggered.
276 FLAGS(tlmp) |= FLG_RT_FINICLCT;
280 * If tracing, save the strongly connected component.
282 if (tracing && (aplist_append(&alp, tlmp,
283 AL_CNT_SCC) == NULL))
284 return (0);
285 } while (tlmp != lmp);
288 * Determine if there are cyclic dependencies to process. If so, sort
289 * the components, and retain them for tracing output.
291 if (sort->s_lndx > (num + 1)) {
292 if (sort_scc(sort, num, flag) == 0)
293 return (0);
295 if (tracing && (aplist_append(&sort->s_scc, alp,
296 AL_CNT_SCC) == NULL))
297 return (0);
298 } else free(alp);
300 return (1);
303 static int
304 dep_visit(Lm_list *, Rt_map *, uint_t, Rt_map *, Sort *, int);
306 static int
307 _dep_visit(Lm_list *lml, int min, Rt_map *clmp, Rt_map *dlmp, uint_t bflags,
308 Sort *sort, int flag)
310 int _min;
313 * Only collect objects that belong to the callers link-map. Catches
314 * cross dependencies (filtering) to ld.so.1.
316 if (LIST(dlmp) != lml)
317 return (min);
320 * Determine if this object hasn't been inspected.
322 if ((_min = SORTVAL(dlmp)) == -1) {
323 if (flag & RT_SORT_REV) {
325 * For .init processing, only collect objects that have
326 * been relocated and haven't already been collected.
328 if ((FLAGS(dlmp) & (FLG_RT_RELOCED |
329 FLG_RT_INITCLCT)) != FLG_RT_RELOCED)
330 return (min);
333 * If this object contains no .init, there's no need to
334 * establish a dependency.
336 if ((INIT(dlmp) == 0) && (INITARRAY(dlmp) == 0))
337 return (min);
338 } else {
340 * For .fini processing only collect objects that have
341 * had their .init collected, and haven't already been
342 * .fini collected.
344 if ((FLAGS(dlmp) & (FLG_RT_INITCLCT |
345 FLG_RT_FINICLCT)) != FLG_RT_INITCLCT)
346 return (min);
349 * If we're deleting a subset of objects, only collect
350 * those marked for deletion.
352 if ((flag & RT_SORT_DELETE) &&
353 ((FLAGS(dlmp) & FLG_RT_DELETE) == 0))
354 return (min);
357 * If this object contains no .fini, there's no need to
358 * establish a dependency.
360 if ((FINI(dlmp) == 0) && (FINIARRAY(dlmp) == 0))
361 return (min);
365 * Inspect this new dependency.
367 if ((_min = dep_visit(lml, clmp, bflags, dlmp,
368 sort, flag)) == -1)
369 return (-1);
373 * Keep track of the smallest SORTVAL that has been encountered. If
374 * this value is smaller than the present object, then the dependency
375 * edge has cycled back to objects that have been processed earlier
376 * along this dependency edge.
378 if (_min < min) {
379 DBG_CALL(Dbg_util_edge_out(clmp, sort->s_stack[_min]));
380 return (_min);
381 } else
382 return (min);
386 * Visit the dependencies of each object.
388 static int
389 dep_visit(Lm_list *lml, Rt_map *clmp, uint_t cbflags, Rt_map *lmp, Sort *sort,
390 int flag)
392 int min;
393 Aliste idx1;
394 Bnd_desc *bdp;
395 Dyninfo *dip;
397 min = SORTVAL(lmp) = sort->s_sndx;
398 sort->s_stack[(sort->s_sndx)++] = lmp;
400 if (FLAGS(lmp) & FLG_RT_INITFRST)
401 sort->s_initfirst++;
403 DBG_CALL(Dbg_util_edge_in(lml, clmp, cbflags, lmp, min, flag));
406 * Traverse both explicit and implicit dependencies.
408 for (APLIST_TRAVERSE(DEPENDS(lmp), idx1, bdp)) {
409 if ((min = _dep_visit(lml, min, lmp, bdp->b_depend,
410 bdp->b_flags, sort, flag)) == -1)
411 return (-1);
415 * Traverse any filtee dependencies.
417 if (((dip = DYNINFO(lmp)) != NULL) && (FLAGS1(lmp) & MSK_RT_FILTER)) {
418 uint_t cnt, max = DYNINFOCNT(lmp);
420 for (cnt = 0; cnt < max; cnt++, dip++) {
421 Alist *falp;
422 Pdesc *pdp;
424 if (((falp = (Alist *)dip->di_info) == NULL) ||
425 ((dip->di_flags & MSK_DI_FILTER) == 0))
426 continue;
428 for (ALIST_TRAVERSE(falp, idx1, pdp)) {
429 Aliste idx2;
430 Grp_hdl *ghp;
431 Grp_desc *gdp;
433 if ((pdp->pd_plen == 0) ||
434 ((ghp = (Grp_hdl *)pdp->pd_info) == NULL))
435 continue;
437 for (ALIST_TRAVERSE(ghp->gh_depends, idx2,
438 gdp)) {
440 if (gdp->gd_depend == lmp)
441 continue;
442 if ((min = _dep_visit(lml, min, lmp,
443 gdp->gd_depend, BND_FILTER,
444 sort, flag)) == -1)
445 return (-1);
452 * Having returned to where the minimum SORTVAL is equivalent to the
453 * object that has just been processed, collect any dependencies that
454 * are available on the sorting stack.
456 if (min == SORTVAL(lmp)) {
457 if (visit(lml, lmp, sort, flag) == 0)
458 return (-1);
460 return (min);
464 * Find corresponding strongly connected component structure.
466 static APlist *
467 trace_find_scc(Sort *sort, Rt_map *lmp)
469 APlist *alp;
470 Aliste idx1;
472 for (APLIST_TRAVERSE(sort->s_scc, idx1, alp)) {
473 Rt_map *lmp2;
474 Aliste idx2;
476 for (APLIST_TRAVERSE(alp, idx2, lmp2)) {
477 if (lmp == lmp2)
478 return (alp);
481 return (NULL);
485 * Print out the .init dependency information (ldd).
487 static void
488 trace_sort(Sort *sort)
490 int ndx = 0;
491 APlist *alp;
492 Rt_map *lmp1;
494 (void) printf(MSG_ORIG(MSG_STR_NL));
496 while ((lmp1 = sort->s_lmpa[ndx++]) != NULL) {
497 static const char *ffmt, *cfmt = 0, *sfmt = 0;
498 Bnd_desc *bdp;
499 Aliste idx1;
501 if ((INIT(lmp1) == 0) || (FLAGS(lmp1) & FLG_RT_INITCALL))
502 continue;
504 if (sfmt == 0)
505 sfmt = MSG_INTL(MSG_LDD_INIT_FMT_02);
508 * If the only component on the strongly connected list is
509 * this link-map, then there are no dependencies.
511 if ((alp = trace_find_scc(sort, lmp1)) == NULL) {
512 (void) printf(sfmt, NAME(lmp1));
513 continue;
517 * Establish message formats for cyclic dependencies.
519 if (cfmt == 0) {
520 cfmt = MSG_INTL(MSG_LDD_INIT_FMT_03);
521 ffmt = MSG_ORIG(MSG_LDD_INIT_FMT_FILE);
524 (void) printf(cfmt, NAME(lmp1), CYCGROUP(lmp1));
526 for (APLIST_TRAVERSE(CALLERS(lmp1), idx1, bdp)) {
527 Rt_map *lmp3, *lmp2 = bdp->b_caller;
528 Aliste idx2;
530 for (APLIST_TRAVERSE(alp, idx2, lmp3)) {
531 if (lmp2 != lmp3)
532 continue;
534 (void) printf(ffmt, NAME(lmp3));
541 * A reverse ordered list (for .init's) contains INITFIRST elements. Move each
542 * of these elements to the front of the list.
544 static void
545 r_initfirst(Sort * sort, int end)
547 Rt_map *tlmp;
548 int bgn, ifst, lifst = 0;
550 for (bgn = 0; bgn < sort->s_initfirst; bgn++) {
551 for (ifst = lifst; ifst <= end; ifst++) {
552 tlmp = sort->s_lmpa[ifst];
554 if (!(FLAGS(tlmp) & FLG_RT_INITFRST))
555 continue;
558 * If the INITFIRST element is already at the front of
559 * the list leave it there.
561 if (ifst == bgn) {
562 lifst = ifst + 1;
563 break;
567 * Move the elements from the front of the list up to
568 * the INITFIRST element, back one position.
570 (void) memmove(&sort->s_lmpa[bgn + 1],
571 &sort->s_lmpa[bgn],
572 ((ifst - bgn) * sizeof (Rt_map *)));
575 * Insert INITFIRST element at the front of the list.
577 sort->s_lmpa[bgn] = tlmp;
578 lifst = ifst + 1;
579 break;
585 * A forward ordered list (for .fini's) contains INITFIRST elements. Move each
586 * of these elements to the front of the list.
588 static void
589 f_initfirst(Sort *sort, int end)
591 Rt_map *tlmp;
592 int bgn, ifst, lifst = 0;
594 for (bgn = 0; bgn < sort->s_initfirst; bgn++) {
595 for (ifst = lifst; ifst <= end; ifst++) {
596 tlmp = sort->s_lmpa[ifst];
598 if (!(FLAGS(tlmp) & FLG_RT_INITFRST))
599 continue;
602 * If the INITFIRST element is already at the end of
603 * the list leave it there.
605 if (ifst == end)
606 break;
609 * Move the elements from after the INITFIRST element
610 * up to the back of the list, up one position.
612 (void) memmove(&sort->s_lmpa[ifst],
613 &sort->s_lmpa[ifst + 1],
614 ((end - ifst) * sizeof (Rt_map *)));
617 * Insert INITFIRST element at the back of the list.
619 sort->s_lmpa[end--] = tlmp;
620 lifst = ifst;
621 break;
627 * Determine whether .init or .fini processing is required.
629 static int
630 initorfini(Lm_list *lml, Rt_map *lmp, int flag, Sort *sort)
632 if (flag & RT_SORT_REV) {
634 * For .init processing, only collect objects that have been
635 * relocated and haven't already been collected.
637 if ((FLAGS(lmp) & (FLG_RT_RELOCED | FLG_RT_INITCLCT)) !=
638 FLG_RT_RELOCED)
639 return (0);
641 if (dep_visit(lml, 0, 0, lmp, sort, flag) == -1)
642 return (1);
644 } else if (!(flag & RT_SORT_DELETE) || (FLAGS(lmp) & FLG_RT_DELETE)) {
646 * Only collect objects that have had their .init collected,
647 * and haven't already been .fini collected.
649 if (!((FLAGS(lmp) & (FLG_RT_INITCLCT | FLG_RT_FINICLCT)) ==
650 (FLG_RT_INITCLCT)))
651 return (0);
653 if (dep_visit(lml, 0, 0, lmp, sort, flag) == -1)
654 return (1);
656 return (0);
660 * Sort the dependency
662 Rt_map **
663 tsort(Rt_map *lmp, int num, int flag)
665 Rt_map *_lmp;
666 Lm_list *lml = LIST(lmp);
667 Word init = lml->lm_flags & LML_FLG_TRC_INIT;
668 Sort sort = { 0 };
669 size_t size;
671 if (num == 0)
672 return (0);
675 * Prior to tsorting any .init sections, insure that the `environ'
676 * symbol is initialized for this link-map list.
678 if ((flag & RT_SORT_REV) && ((lml->lm_flags &
679 (LML_FLG_TRC_ENABLE | LML_FLG_ENVIRON)) == 0))
680 set_environ(lml);
683 * Allocate memory for link-map list array. Calloc the array to insure
684 * all elements are zero, we might find that no objects need processing.
685 * At the same time, allocate a stack for any topological sorting that
686 * might be necessary.
688 sort.s_lmp = lmp;
689 sort.s_num = num + 1;
690 size = sort.s_num * sizeof (Rt_map *);
691 if ((sort.s_lmpa = calloc(2, size)) == NULL)
692 return ((Rt_map **)S_ERROR);
693 sort.s_stack = (Rt_map **)((uintptr_t)sort.s_lmpa + size);
696 * Determine where to start searching for tsort() candidates. Any call
697 * to tsort() for .init processing is passed the link-map from which to
698 * start searching. However, if new objects have dependencies on
699 * existing objects, or existing objects have been promoted (RTLD_LAZY
700 * to RTLD_NOW), then start searching at the head of the link-map list.
701 * These previously loaded objects will have been tagged for inclusion
702 * in this tsort() pass. They still remain on an existing tsort() list,
703 * which must have been prempted for control to have arrived here.
704 * However, they will be ignored when encountered on any previous
705 * tsort() list if their .init has already been called.
707 if (lml->lm_flags & LML_FLG_OBJREEVAL)
708 _lmp = lml->lm_head;
709 else
710 _lmp = lmp;
712 DBG_CALL(Dbg_file_bindings(_lmp, flag));
713 lml->lm_flags &=
714 ~(LML_FLG_OBJREEVAL | LML_FLG_OBJADDED | LML_FLG_OBJDELETED);
717 * If interposers exist, inspect these objects first.
719 * Interposers can provide implicit dependencies - for example, an
720 * application that has a dependency on libumem will caused any other
721 * dependencies of the application that use the malloc family, to
722 * have an implicit dependency on libumem. However, under the default
723 * condition of lazy binding, these dependency relationships on libumem
724 * are unknown to the tsorting process (ie. a call to one of the malloc
725 * family has not occurred to establish the dependency). This lack of
726 * dependency information makes the interposer look "standalone",
727 * whereas the interposers .init/.fini should be analyzed with respect
728 * to the dependency relationship libumem will eventually create.
730 * By inspecting interposing objects first, we are able to trigger
731 * their .init sections to be accounted for before any others.
732 * Selecting these .init sections first is important for the malloc
733 * libraries, as these libraries need to prepare for pthread_atfork().
734 * However, handling interposer libraries in this generic fashion
735 * should help provide for other dependency relationships that may
736 * exist.
738 if ((lml->lm_flags & (LML_FLG_INTRPOSE | LML_FLG_INTRPOSETSORT)) ==
739 LML_FLG_INTRPOSE) {
740 Rt_map *ilmp = _lmp;
743 * Unless the executable is tagged as an interposer, skip to
744 * the next object.
746 if ((FLAGS(ilmp) & MSK_RT_INTPOSE) == 0)
747 ilmp = NEXT_RT_MAP(ilmp);
749 for (; ilmp; ilmp = NEXT_RT_MAP(ilmp)) {
750 if ((FLAGS(ilmp) & MSK_RT_INTPOSE) == 0)
751 break;
753 if (initorfini(lml, ilmp, (flag | RT_SORT_INTPOSE),
754 &sort) != 0)
755 return ((Rt_map **)S_ERROR);
759 * Once all interposers are processed, there is no need to
760 * look for interposers again. An interposer can only
761 * be introduced before any relocation takes place, thus
762 * interposer .init's will be grabbed during the first tsort
763 * starting at the head of the link-map list.
765 * Interposers can't be unloaded. Thus interposer .fini's can
766 * only be called during atexit() processing. The interposer
767 * tsort flag is removed from each link-map list during
768 * atexit_fini() so that the interposers .fini sections are
769 * processed appropriately.
771 lml->lm_flags |= LML_FLG_INTRPOSETSORT;
775 * Inspect any standard objects.
777 for (; _lmp; _lmp = NEXT_RT_MAP(_lmp)) {
778 if (FLAGS(_lmp) & MSK_RT_INTPOSE)
779 continue;
781 if (initorfini(lml, _lmp, flag, &sort) != 0)
782 return ((Rt_map **)S_ERROR);
786 * The dependencies have been collected such that they are appropriate
787 * for an .init order, for .fini order reverse them.
789 if (flag & RT_SORT_FWD) {
790 int bgn = 0, end = sort.s_lndx - 1;
792 while (bgn < end) {
793 Rt_map *tlmp = sort.s_lmpa[end];
795 sort.s_lmpa[end] = sort.s_lmpa[bgn];
796 sort.s_lmpa[bgn] = tlmp;
798 bgn++, end--;
803 * If INITFIRST objects have been collected then move them to the front
804 * or end of the list as appropriate.
806 if (sort.s_initfirst) {
807 if (flag & RT_SORT_REV)
808 r_initfirst(&sort, sort.s_lndx - 1);
809 else
810 f_initfirst(&sort, sort.s_lndx - 1);
814 * If tracing .init sections (only meaningful for RT_SORT_REV), print
815 * out the sorted dependencies.
817 if (init)
818 trace_sort(&sort);
821 * Clean any temporary structures prior to return.
823 if (sort.s_queue) {
824 Aliste idx;
825 Rt_map *lmp2;
828 * Traverse the link-maps collected on the sort queue and
829 * delete the depth index. These link-maps may be traversed
830 * again to sort other components either for inits, and almost
831 * certainly for .finis.
833 for (APLIST_TRAVERSE(sort.s_queue, idx, lmp2))
834 IDX(lmp2) = 0;
836 free(sort.s_queue);
839 if (sort.s_scc) {
840 Aliste idx;
841 APlist *alp;
843 for (APLIST_TRAVERSE(sort.s_scc, idx, alp))
844 free(alp);
845 free(sort.s_scc);
849 * The caller is responsible for freeing the sorted link-map list once
850 * the associated .init/.fini's have been fired.
852 DBG_CALL(Dbg_file_bindings_done(lml));
853 return (sort.s_lmpa);