1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/zalloc.h>
13 #include <internal/rc_check.h>
16 * Locking/sorting note:
18 * Sorting is done with the write lock, iteration and binary searching happens
19 * under the read lock requiring being sorted. There is a race between sorting
20 * releasing the write lock and acquiring the read lock for iteration/searching
21 * where another thread could insert and break the sorting of the maps. In
22 * practice inserting maps should be rare meaning that the race shouldn't lead
23 * to live lock. Removal of maps doesn't break being sorted.
26 DECLARE_RC_STRUCT(maps
) {
27 struct rw_semaphore lock
;
29 * @maps_by_address: array of maps sorted by their starting address if
30 * maps_by_address_sorted is true.
32 struct map
**maps_by_address
;
34 * @maps_by_name: optional array of maps sorted by their dso name if
35 * maps_by_name_sorted is true.
37 struct map
**maps_by_name
;
38 struct machine
*machine
;
39 #ifdef HAVE_LIBUNWIND_SUPPORT
41 const struct unwind_libunwind_ops
*unwind_libunwind_ops
;
45 * @nr_maps: number of maps_by_address, and possibly maps_by_name,
46 * entries that contain maps.
50 * @nr_maps_allocated: number of entries in maps_by_address and possibly
53 unsigned int nr_maps_allocated
;
55 * @last_search_by_name_idx: cache of last found by name entry's index
56 * as frequent searches for the same dso name are common.
58 unsigned int last_search_by_name_idx
;
59 /** @maps_by_address_sorted: is maps_by_address sorted. */
60 bool maps_by_address_sorted
;
61 /** @maps_by_name_sorted: is maps_by_name sorted. */
62 bool maps_by_name_sorted
;
63 /** @ends_broken: does the map contain a map where end values are unset/unsorted? */
67 static void check_invariants(const struct maps
*maps __maybe_unused
)
70 assert(RC_CHK_ACCESS(maps
)->nr_maps
<= RC_CHK_ACCESS(maps
)->nr_maps_allocated
);
71 for (unsigned int i
= 0; i
< RC_CHK_ACCESS(maps
)->nr_maps
; i
++) {
72 struct map
*map
= RC_CHK_ACCESS(maps
)->maps_by_address
[i
];
74 /* Check map is well-formed. */
75 assert(map__end(map
) == 0 || map__start(map
) <= map__end(map
));
76 /* Expect at least 1 reference count. */
77 assert(refcount_read(map__refcnt(map
)) > 0);
79 if (map__dso(map
) && dso__kernel(map__dso(map
)))
80 assert(RC_CHK_EQUAL(map__kmap(map
)->kmaps
, maps
));
83 struct map
*prev
= RC_CHK_ACCESS(maps
)->maps_by_address
[i
- 1];
85 /* If addresses are sorted... */
86 if (RC_CHK_ACCESS(maps
)->maps_by_address_sorted
) {
87 /* Maps should be in start address order. */
88 assert(map__start(prev
) <= map__start(map
));
90 * If the ends of maps aren't broken (during
91 * construction) then they should be ordered
94 if (!RC_CHK_ACCESS(maps
)->ends_broken
) {
95 assert(map__end(prev
) <= map__end(map
));
96 assert(map__end(prev
) <= map__start(map
) ||
97 map__start(prev
) == map__start(map
));
102 if (RC_CHK_ACCESS(maps
)->maps_by_name
) {
103 for (unsigned int i
= 0; i
< RC_CHK_ACCESS(maps
)->nr_maps
; i
++) {
104 struct map
*map
= RC_CHK_ACCESS(maps
)->maps_by_name
[i
];
107 * Maps by name maps should be in maps_by_address, so
108 * the reference count should be higher.
110 assert(refcount_read(map__refcnt(map
)) > 1);
116 static struct map
**maps__maps_by_address(const struct maps
*maps
)
118 return RC_CHK_ACCESS(maps
)->maps_by_address
;
121 static void maps__set_maps_by_address(struct maps
*maps
, struct map
**new)
123 RC_CHK_ACCESS(maps
)->maps_by_address
= new;
127 static void maps__set_nr_maps_allocated(struct maps
*maps
, unsigned int nr_maps_allocated
)
129 RC_CHK_ACCESS(maps
)->nr_maps_allocated
= nr_maps_allocated
;
132 static void maps__set_nr_maps(struct maps
*maps
, unsigned int nr_maps
)
134 RC_CHK_ACCESS(maps
)->nr_maps
= nr_maps
;
137 /* Not in the header, to aid reference counting. */
138 static struct map
**maps__maps_by_name(const struct maps
*maps
)
140 return RC_CHK_ACCESS(maps
)->maps_by_name
;
144 static void maps__set_maps_by_name(struct maps
*maps
, struct map
**new)
146 RC_CHK_ACCESS(maps
)->maps_by_name
= new;
150 static bool maps__maps_by_address_sorted(const struct maps
*maps
)
152 return RC_CHK_ACCESS(maps
)->maps_by_address_sorted
;
155 static void maps__set_maps_by_address_sorted(struct maps
*maps
, bool value
)
157 RC_CHK_ACCESS(maps
)->maps_by_address_sorted
= value
;
160 static bool maps__maps_by_name_sorted(const struct maps
*maps
)
162 return RC_CHK_ACCESS(maps
)->maps_by_name_sorted
;
165 static void maps__set_maps_by_name_sorted(struct maps
*maps
, bool value
)
167 RC_CHK_ACCESS(maps
)->maps_by_name_sorted
= value
;
170 struct machine
*maps__machine(const struct maps
*maps
)
172 return RC_CHK_ACCESS(maps
)->machine
;
175 unsigned int maps__nr_maps(const struct maps
*maps
)
177 return RC_CHK_ACCESS(maps
)->nr_maps
;
180 refcount_t
*maps__refcnt(struct maps
*maps
)
182 return &RC_CHK_ACCESS(maps
)->refcnt
;
185 #ifdef HAVE_LIBUNWIND_SUPPORT
186 void *maps__addr_space(const struct maps
*maps
)
188 return RC_CHK_ACCESS(maps
)->addr_space
;
191 void maps__set_addr_space(struct maps
*maps
, void *addr_space
)
193 RC_CHK_ACCESS(maps
)->addr_space
= addr_space
;
196 const struct unwind_libunwind_ops
*maps__unwind_libunwind_ops(const struct maps
*maps
)
198 return RC_CHK_ACCESS(maps
)->unwind_libunwind_ops
;
201 void maps__set_unwind_libunwind_ops(struct maps
*maps
, const struct unwind_libunwind_ops
*ops
)
203 RC_CHK_ACCESS(maps
)->unwind_libunwind_ops
= ops
;
207 static struct rw_semaphore
*maps__lock(struct maps
*maps
)
209 return &RC_CHK_ACCESS(maps
)->lock
;
212 static void maps__init(struct maps
*maps
, struct machine
*machine
)
214 init_rwsem(maps__lock(maps
));
215 RC_CHK_ACCESS(maps
)->maps_by_address
= NULL
;
216 RC_CHK_ACCESS(maps
)->maps_by_name
= NULL
;
217 RC_CHK_ACCESS(maps
)->machine
= machine
;
218 #ifdef HAVE_LIBUNWIND_SUPPORT
219 RC_CHK_ACCESS(maps
)->addr_space
= NULL
;
220 RC_CHK_ACCESS(maps
)->unwind_libunwind_ops
= NULL
;
222 refcount_set(maps__refcnt(maps
), 1);
223 RC_CHK_ACCESS(maps
)->nr_maps
= 0;
224 RC_CHK_ACCESS(maps
)->nr_maps_allocated
= 0;
225 RC_CHK_ACCESS(maps
)->last_search_by_name_idx
= 0;
226 RC_CHK_ACCESS(maps
)->maps_by_address_sorted
= true;
227 RC_CHK_ACCESS(maps
)->maps_by_name_sorted
= false;
230 static void maps__exit(struct maps
*maps
)
232 struct map
**maps_by_address
= maps__maps_by_address(maps
);
233 struct map
**maps_by_name
= maps__maps_by_name(maps
);
235 for (unsigned int i
= 0; i
< maps__nr_maps(maps
); i
++) {
236 map__zput(maps_by_address
[i
]);
238 map__zput(maps_by_name
[i
]);
240 zfree(&maps_by_address
);
241 zfree(&maps_by_name
);
242 unwind__finish_access(maps
);
245 struct maps
*maps__new(struct machine
*machine
)
248 RC_STRUCT(maps
) *maps
= zalloc(sizeof(*maps
));
250 if (ADD_RC_CHK(result
, maps
))
251 maps__init(result
, machine
);
256 static void maps__delete(struct maps
*maps
)
262 struct maps
*maps__get(struct maps
*maps
)
266 if (RC_CHK_GET(result
, maps
))
267 refcount_inc(maps__refcnt(maps
));
272 void maps__put(struct maps
*maps
)
274 if (maps
&& refcount_dec_and_test(maps__refcnt(maps
)))
280 static void __maps__free_maps_by_name(struct maps
*maps
)
282 if (!maps__maps_by_name(maps
))
286 * Free everything to try to do it from the rbtree in the next search
288 for (unsigned int i
= 0; i
< maps__nr_maps(maps
); i
++)
289 map__put(maps__maps_by_name(maps
)[i
]);
291 zfree(&RC_CHK_ACCESS(maps
)->maps_by_name
);
293 /* Consistent with maps__init(). When maps_by_name == NULL, maps_by_name_sorted == false */
294 maps__set_maps_by_name_sorted(maps
, false);
297 static int map__start_cmp(const void *a
, const void *b
)
299 const struct map
*map_a
= *(const struct map
* const *)a
;
300 const struct map
*map_b
= *(const struct map
* const *)b
;
301 u64 map_a_start
= map__start(map_a
);
302 u64 map_b_start
= map__start(map_b
);
304 if (map_a_start
== map_b_start
) {
305 u64 map_a_end
= map__end(map_a
);
306 u64 map_b_end
= map__end(map_b
);
308 if (map_a_end
== map_b_end
) {
309 /* Ensure maps with the same addresses have a fixed order. */
310 if (RC_CHK_ACCESS(map_a
) == RC_CHK_ACCESS(map_b
))
312 return (intptr_t)RC_CHK_ACCESS(map_a
) > (intptr_t)RC_CHK_ACCESS(map_b
)
315 return map_a_end
> map_b_end
? 1 : -1;
317 return map_a_start
> map_b_start
? 1 : -1;
320 static void __maps__sort_by_address(struct maps
*maps
)
322 if (maps__maps_by_address_sorted(maps
))
325 qsort(maps__maps_by_address(maps
),
327 sizeof(struct map
*),
329 maps__set_maps_by_address_sorted(maps
, true);
332 static void maps__sort_by_address(struct maps
*maps
)
334 down_write(maps__lock(maps
));
335 __maps__sort_by_address(maps
);
336 up_write(maps__lock(maps
));
339 static int map__strcmp(const void *a
, const void *b
)
341 const struct map
*map_a
= *(const struct map
* const *)a
;
342 const struct map
*map_b
= *(const struct map
* const *)b
;
343 const struct dso
*dso_a
= map__dso(map_a
);
344 const struct dso
*dso_b
= map__dso(map_b
);
345 int ret
= strcmp(dso__short_name(dso_a
), dso__short_name(dso_b
));
347 if (ret
== 0 && RC_CHK_ACCESS(map_a
) != RC_CHK_ACCESS(map_b
)) {
348 /* Ensure distinct but name equal maps have an order. */
349 return map__start_cmp(a
, b
);
354 static int maps__sort_by_name(struct maps
*maps
)
358 down_write(maps__lock(maps
));
359 if (!maps__maps_by_name_sorted(maps
)) {
360 struct map
**maps_by_name
= maps__maps_by_name(maps
);
363 maps_by_name
= malloc(RC_CHK_ACCESS(maps
)->nr_maps_allocated
*
364 sizeof(*maps_by_name
));
368 struct map
**maps_by_address
= maps__maps_by_address(maps
);
369 unsigned int n
= maps__nr_maps(maps
);
371 maps__set_maps_by_name(maps
, maps_by_name
);
372 for (unsigned int i
= 0; i
< n
; i
++)
373 maps_by_name
[i
] = map__get(maps_by_address
[i
]);
379 sizeof(struct map
*),
381 maps__set_maps_by_name_sorted(maps
, true);
384 check_invariants(maps
);
385 up_write(maps__lock(maps
));
389 static unsigned int maps__by_address_index(const struct maps
*maps
, const struct map
*map
)
391 struct map
**maps_by_address
= maps__maps_by_address(maps
);
393 if (maps__maps_by_address_sorted(maps
)) {
395 bsearch(&map
, maps__maps_by_address(maps
), maps__nr_maps(maps
),
396 sizeof(*mapp
), map__start_cmp
);
399 return mapp
- maps_by_address
;
401 for (unsigned int i
= 0; i
< maps__nr_maps(maps
); i
++) {
402 if (RC_CHK_ACCESS(maps_by_address
[i
]) == RC_CHK_ACCESS(map
))
406 pr_err("Map missing from maps");
410 static unsigned int maps__by_name_index(const struct maps
*maps
, const struct map
*map
)
412 struct map
**maps_by_name
= maps__maps_by_name(maps
);
414 if (maps__maps_by_name_sorted(maps
)) {
416 bsearch(&map
, maps_by_name
, maps__nr_maps(maps
),
417 sizeof(*mapp
), map__strcmp
);
420 return mapp
- maps_by_name
;
422 for (unsigned int i
= 0; i
< maps__nr_maps(maps
); i
++) {
423 if (RC_CHK_ACCESS(maps_by_name
[i
]) == RC_CHK_ACCESS(map
))
427 pr_err("Map missing from maps");
431 static int __maps__insert(struct maps
*maps
, struct map
*new)
433 struct map
**maps_by_address
= maps__maps_by_address(maps
);
434 struct map
**maps_by_name
= maps__maps_by_name(maps
);
435 const struct dso
*dso
= map__dso(new);
436 unsigned int nr_maps
= maps__nr_maps(maps
);
437 unsigned int nr_allocate
= RC_CHK_ACCESS(maps
)->nr_maps_allocated
;
439 if (nr_maps
+ 1 > nr_allocate
) {
440 nr_allocate
= !nr_allocate
? 32 : nr_allocate
* 2;
442 maps_by_address
= realloc(maps_by_address
, nr_allocate
* sizeof(new));
443 if (!maps_by_address
)
446 maps__set_maps_by_address(maps
, maps_by_address
);
448 maps_by_name
= realloc(maps_by_name
, nr_allocate
* sizeof(new));
451 * If by name fails, just disable by name and it will
452 * recompute next time it is required.
454 __maps__free_maps_by_name(maps
);
456 maps__set_maps_by_name(maps
, maps_by_name
);
458 RC_CHK_ACCESS(maps
)->nr_maps_allocated
= nr_allocate
;
460 /* Insert the value at the end. */
461 maps_by_address
[nr_maps
] = map__get(new);
463 maps_by_name
[nr_maps
] = map__get(new);
466 RC_CHK_ACCESS(maps
)->nr_maps
= nr_maps
;
469 * Recompute if things are sorted. If things are inserted in a sorted
470 * manner, for example by processing /proc/pid/maps, then no
471 * sorting/resorting will be necessary.
474 /* If there's just 1 entry then maps are sorted. */
475 maps__set_maps_by_address_sorted(maps
, true);
476 maps__set_maps_by_name_sorted(maps
, maps_by_name
!= NULL
);
478 /* Sorted if maps were already sorted and this map starts after the last one. */
479 maps__set_maps_by_address_sorted(maps
,
480 maps__maps_by_address_sorted(maps
) &&
481 map__end(maps_by_address
[nr_maps
- 2]) <= map__start(new));
482 maps__set_maps_by_name_sorted(maps
, false);
484 if (map__end(new) < map__start(new))
485 RC_CHK_ACCESS(maps
)->ends_broken
= true;
486 if (dso
&& dso__kernel(dso
)) {
487 struct kmap
*kmap
= map__kmap(new);
492 pr_err("Internal error: kernel dso with non kernel map\n");
497 int maps__insert(struct maps
*maps
, struct map
*map
)
501 down_write(maps__lock(maps
));
502 ret
= __maps__insert(maps
, map
);
503 check_invariants(maps
);
504 up_write(maps__lock(maps
));
508 static void __maps__remove(struct maps
*maps
, struct map
*map
)
510 struct map
**maps_by_address
= maps__maps_by_address(maps
);
511 struct map
**maps_by_name
= maps__maps_by_name(maps
);
512 unsigned int nr_maps
= maps__nr_maps(maps
);
513 unsigned int address_idx
;
515 /* Slide later mappings over the one to remove */
516 address_idx
= maps__by_address_index(maps
, map
);
517 map__put(maps_by_address
[address_idx
]);
518 memmove(&maps_by_address
[address_idx
],
519 &maps_by_address
[address_idx
+ 1],
520 (nr_maps
- address_idx
- 1) * sizeof(*maps_by_address
));
523 unsigned int name_idx
= maps__by_name_index(maps
, map
);
525 map__put(maps_by_name
[name_idx
]);
526 memmove(&maps_by_name
[name_idx
],
527 &maps_by_name
[name_idx
+ 1],
528 (nr_maps
- name_idx
- 1) * sizeof(*maps_by_name
));
531 --RC_CHK_ACCESS(maps
)->nr_maps
;
534 void maps__remove(struct maps
*maps
, struct map
*map
)
536 down_write(maps__lock(maps
));
537 __maps__remove(maps
, map
);
538 check_invariants(maps
);
539 up_write(maps__lock(maps
));
542 bool maps__empty(struct maps
*maps
)
546 down_read(maps__lock(maps
));
547 res
= maps__nr_maps(maps
) == 0;
548 up_read(maps__lock(maps
));
553 bool maps__equal(struct maps
*a
, struct maps
*b
)
555 return RC_CHK_EQUAL(a
, b
);
558 int maps__for_each_map(struct maps
*maps
, int (*cb
)(struct map
*map
, void *data
), void *data
)
563 /* See locking/sorting note. */
565 down_read(maps__lock(maps
));
566 if (maps__maps_by_address_sorted(maps
)) {
568 * maps__for_each_map callbacks may buggily/unsafely
569 * insert into maps_by_address. Deliberately reload
570 * maps__nr_maps and maps_by_address on each iteration
571 * to avoid using memory freed by maps__insert growing
572 * the array - this may cause maps to be skipped or
575 for (unsigned int i
= 0; i
< maps__nr_maps(maps
); i
++) {
576 struct map
**maps_by_address
= maps__maps_by_address(maps
);
577 struct map
*map
= maps_by_address
[i
];
585 up_read(maps__lock(maps
));
587 maps__sort_by_address(maps
);
592 void maps__remove_maps(struct maps
*maps
, bool (*cb
)(struct map
*map
, void *data
), void *data
)
594 struct map
**maps_by_address
;
596 down_write(maps__lock(maps
));
598 maps_by_address
= maps__maps_by_address(maps
);
599 for (unsigned int i
= 0; i
< maps__nr_maps(maps
);) {
600 if (cb(maps_by_address
[i
], data
))
601 __maps__remove(maps
, maps_by_address
[i
]);
605 check_invariants(maps
);
606 up_write(maps__lock(maps
));
609 struct symbol
*maps__find_symbol(struct maps
*maps
, u64 addr
, struct map
**mapp
)
611 struct map
*map
= maps__find(maps
, addr
);
612 struct symbol
*result
= NULL
;
614 /* Ensure map is loaded before using map->map_ip */
615 if (map
!= NULL
&& map__load(map
) >= 0)
616 result
= map__find_symbol(map
, map__map_ip(map
, addr
));
626 struct maps__find_symbol_by_name_args
{
632 static int maps__find_symbol_by_name_cb(struct map
*map
, void *data
)
634 struct maps__find_symbol_by_name_args
*args
= data
;
636 args
->sym
= map__find_symbol_by_name(map
, args
->name
);
640 if (!map__contains_symbol(map
, args
->sym
)) {
645 if (args
->mapp
!= NULL
)
646 *args
->mapp
= map__get(map
);
650 struct symbol
*maps__find_symbol_by_name(struct maps
*maps
, const char *name
, struct map
**mapp
)
652 struct maps__find_symbol_by_name_args args
= {
658 maps__for_each_map(maps
, maps__find_symbol_by_name_cb
, &args
);
662 int maps__find_ams(struct maps
*maps
, struct addr_map_symbol
*ams
)
664 if (ams
->addr
< map__start(ams
->ms
.map
) || ams
->addr
>= map__end(ams
->ms
.map
)) {
667 ams
->ms
.map
= maps__find(maps
, ams
->addr
);
668 if (ams
->ms
.map
== NULL
)
672 ams
->al_addr
= map__map_ip(ams
->ms
.map
, ams
->addr
);
673 ams
->ms
.sym
= map__find_symbol(ams
->ms
.map
, ams
->al_addr
);
675 return ams
->ms
.sym
? 0 : -1;
678 struct maps__fprintf_args
{
683 static int maps__fprintf_cb(struct map
*map
, void *data
)
685 struct maps__fprintf_args
*args
= data
;
687 args
->printed
+= fprintf(args
->fp
, "Map:");
688 args
->printed
+= map__fprintf(map
, args
->fp
);
690 args
->printed
+= dso__fprintf(map__dso(map
), args
->fp
);
691 args
->printed
+= fprintf(args
->fp
, "--\n");
696 size_t maps__fprintf(struct maps
*maps
, FILE *fp
)
698 struct maps__fprintf_args args
= {
703 maps__for_each_map(maps
, maps__fprintf_cb
, &args
);
709 * Find first map where end > map->start.
710 * Same as find_vma() in kernel.
712 static unsigned int first_ending_after(struct maps
*maps
, const struct map
*map
)
714 struct map
**maps_by_address
= maps__maps_by_address(maps
);
715 int low
= 0, high
= (int)maps__nr_maps(maps
) - 1, first
= high
+ 1;
717 assert(maps__maps_by_address_sorted(maps
));
718 if (low
<= high
&& map__end(maps_by_address
[0]) > map__start(map
))
721 while (low
<= high
) {
722 int mid
= (low
+ high
) / 2;
723 struct map
*pos
= maps_by_address
[mid
];
725 if (map__end(pos
) > map__start(map
)) {
727 if (map__start(pos
) <= map__start(map
)) {
728 /* Entry overlaps map. */
738 static int __maps__insert_sorted(struct maps
*maps
, unsigned int first_after_index
,
739 struct map
*new1
, struct map
*new2
)
741 struct map
**maps_by_address
= maps__maps_by_address(maps
);
742 struct map
**maps_by_name
= maps__maps_by_name(maps
);
743 unsigned int nr_maps
= maps__nr_maps(maps
);
744 unsigned int nr_allocate
= RC_CHK_ACCESS(maps
)->nr_maps_allocated
;
745 unsigned int to_add
= new2
? 2 : 1;
747 assert(maps__maps_by_address_sorted(maps
));
748 assert(first_after_index
== nr_maps
||
749 map__end(new1
) <= map__start(maps_by_address
[first_after_index
]));
750 assert(!new2
|| map__end(new1
) <= map__start(new2
));
751 assert(first_after_index
== nr_maps
|| !new2
||
752 map__end(new2
) <= map__start(maps_by_address
[first_after_index
]));
754 if (nr_maps
+ to_add
> nr_allocate
) {
755 nr_allocate
= !nr_allocate
? 32 : nr_allocate
* 2;
757 maps_by_address
= realloc(maps_by_address
, nr_allocate
* sizeof(new1
));
758 if (!maps_by_address
)
761 maps__set_maps_by_address(maps
, maps_by_address
);
763 maps_by_name
= realloc(maps_by_name
, nr_allocate
* sizeof(new1
));
766 * If by name fails, just disable by name and it will
767 * recompute next time it is required.
769 __maps__free_maps_by_name(maps
);
771 maps__set_maps_by_name(maps
, maps_by_name
);
773 RC_CHK_ACCESS(maps
)->nr_maps_allocated
= nr_allocate
;
775 memmove(&maps_by_address
[first_after_index
+to_add
],
776 &maps_by_address
[first_after_index
],
777 (nr_maps
- first_after_index
) * sizeof(new1
));
778 maps_by_address
[first_after_index
] = map__get(new1
);
780 maps_by_name
[nr_maps
] = map__get(new1
);
782 maps_by_address
[first_after_index
+ 1] = map__get(new2
);
784 maps_by_name
[nr_maps
+ 1] = map__get(new2
);
786 RC_CHK_ACCESS(maps
)->nr_maps
= nr_maps
+ to_add
;
787 maps__set_maps_by_name_sorted(maps
, false);
788 check_invariants(maps
);
793 * Adds new to maps, if new overlaps existing entries then the existing maps are
794 * adjusted or removed so that new fits without overlapping any entries.
796 static int __maps__fixup_overlap_and_insert(struct maps
*maps
, struct map
*new)
799 FILE *fp
= debug_file();
802 if (!maps__maps_by_address_sorted(maps
))
803 __maps__sort_by_address(maps
);
806 * Iterate through entries where the end of the existing entry is
807 * greater-than the new map's start.
809 for (i
= first_ending_after(maps
, new); i
< maps__nr_maps(maps
); ) {
810 struct map
**maps_by_address
= maps__maps_by_address(maps
);
811 struct map
*pos
= maps_by_address
[i
];
812 struct map
*before
= NULL
, *after
= NULL
;
815 * Stop if current map starts after map->end.
816 * Maps are ordered by start: next will not overlap for sure.
818 if (map__start(pos
) >= map__end(new))
822 pr_debug("overlapping maps in %s (disable tui for more info)\n",
823 dso__name(map__dso(new)));
824 } else if (verbose
>= 2) {
825 pr_debug("overlapping maps:\n");
826 map__fprintf(new, fp
);
827 map__fprintf(pos
, fp
);
831 * Now check if we need to create new maps for areas not
832 * overlapped by the new map:
834 if (map__start(new) > map__start(pos
)) {
835 /* Map starts within existing map. Need to shorten the existing map. */
836 before
= map__clone(pos
);
838 if (before
== NULL
) {
842 map__set_end(before
, map__start(new));
844 if (verbose
>= 2 && !use_browser
)
845 map__fprintf(before
, fp
);
847 if (map__end(new) < map__end(pos
)) {
848 /* The new map isn't as long as the existing map. */
849 after
= map__clone(pos
);
857 map__set_start(after
, map__end(new));
858 map__add_pgoff(after
, map__end(new) - map__start(pos
));
859 assert(map__map_ip(pos
, map__end(new)) ==
860 map__map_ip(after
, map__end(new)));
862 if (verbose
>= 2 && !use_browser
)
863 map__fprintf(after
, fp
);
866 * If adding one entry, for `before` or `after`, we can replace
867 * the existing entry. If both `before` and `after` are
868 * necessary than an insert is needed. If the existing entry
869 * entirely overlaps the existing entry it can just be removed.
872 map__put(maps_by_address
[i
]);
873 maps_by_address
[i
] = before
;
874 /* Maps are still ordered, go to next one. */
878 * 'before' and 'after' mean 'new' split the
879 * 'pos' mapping and therefore there are no
882 err
= __maps__insert_sorted(maps
, i
, new, after
);
884 check_invariants(maps
);
887 check_invariants(maps
);
890 * 'after' means 'new' split 'pos' and there are no
893 map__put(maps_by_address
[i
]);
894 maps_by_address
[i
] = map__get(new);
895 err
= __maps__insert_sorted(maps
, i
+ 1, after
, NULL
);
897 check_invariants(maps
);
900 struct map
*next
= NULL
;
902 if (i
+ 1 < maps__nr_maps(maps
))
903 next
= maps_by_address
[i
+ 1];
905 if (!next
|| map__start(next
) >= map__end(new)) {
907 * Replace existing mapping and end knowing
908 * there aren't later overlapping or any
911 map__put(maps_by_address
[i
]);
912 maps_by_address
[i
] = map__get(new);
913 check_invariants(maps
);
916 __maps__remove(maps
, pos
);
917 check_invariants(maps
);
919 * Maps are ordered but no need to increase `i` as the
920 * later maps were moved down.
925 err
= __maps__insert_sorted(maps
, i
, new, NULL
);
930 int maps__fixup_overlap_and_insert(struct maps
*maps
, struct map
*new)
934 down_write(maps__lock(maps
));
935 err
= __maps__fixup_overlap_and_insert(maps
, new);
936 up_write(maps__lock(maps
));
940 int maps__copy_from(struct maps
*dest
, struct maps
*parent
)
942 /* Note, if struct map were immutable then cloning could use ref counts. */
943 struct map
**parent_maps_by_address
;
947 down_write(maps__lock(dest
));
948 down_read(maps__lock(parent
));
950 parent_maps_by_address
= maps__maps_by_address(parent
);
951 n
= maps__nr_maps(parent
);
952 if (maps__nr_maps(dest
) == 0) {
953 /* No existing mappings so just copy from parent to avoid reallocs in insert. */
954 unsigned int nr_maps_allocated
= RC_CHK_ACCESS(parent
)->nr_maps_allocated
;
955 struct map
**dest_maps_by_address
=
956 malloc(nr_maps_allocated
* sizeof(struct map
*));
957 struct map
**dest_maps_by_name
= NULL
;
959 if (!dest_maps_by_address
)
962 if (maps__maps_by_name(parent
)) {
964 malloc(nr_maps_allocated
* sizeof(struct map
*));
967 RC_CHK_ACCESS(dest
)->maps_by_address
= dest_maps_by_address
;
968 RC_CHK_ACCESS(dest
)->maps_by_name
= dest_maps_by_name
;
969 RC_CHK_ACCESS(dest
)->nr_maps_allocated
= nr_maps_allocated
;
972 for (unsigned int i
= 0; !err
&& i
< n
; i
++) {
973 struct map
*pos
= parent_maps_by_address
[i
];
974 struct map
*new = map__clone(pos
);
979 err
= unwind__prepare_access(dest
, new, NULL
);
981 dest_maps_by_address
[i
] = new;
982 if (dest_maps_by_name
)
983 dest_maps_by_name
[i
] = map__get(new);
984 RC_CHK_ACCESS(dest
)->nr_maps
= i
+ 1;
990 maps__set_maps_by_address_sorted(dest
, maps__maps_by_address_sorted(parent
));
992 RC_CHK_ACCESS(dest
)->last_search_by_name_idx
=
993 RC_CHK_ACCESS(parent
)->last_search_by_name_idx
;
994 maps__set_maps_by_name_sorted(dest
,
996 maps__maps_by_name_sorted(parent
));
998 RC_CHK_ACCESS(dest
)->last_search_by_name_idx
= 0;
999 maps__set_maps_by_name_sorted(dest
, false);
1002 /* Unexpected copying to a maps containing entries. */
1003 for (unsigned int i
= 0; !err
&& i
< n
; i
++) {
1004 struct map
*pos
= parent_maps_by_address
[i
];
1005 struct map
*new = map__clone(pos
);
1010 err
= unwind__prepare_access(dest
, new, NULL
);
1012 err
= __maps__insert(dest
, new);
1017 check_invariants(dest
);
1019 up_read(maps__lock(parent
));
1020 up_write(maps__lock(dest
));
1024 static int map__addr_cmp(const void *key
, const void *entry
)
1026 const u64 ip
= *(const u64
*)key
;
1027 const struct map
*map
= *(const struct map
* const *)entry
;
1029 if (ip
< map__start(map
))
1031 if (ip
>= map__end(map
))
1036 struct map
*maps__find(struct maps
*maps
, u64 ip
)
1038 struct map
*result
= NULL
;
1041 /* See locking/sorting note. */
1043 down_read(maps__lock(maps
));
1044 if (maps__maps_by_address_sorted(maps
)) {
1046 bsearch(&ip
, maps__maps_by_address(maps
), maps__nr_maps(maps
),
1047 sizeof(*mapp
), map__addr_cmp
);
1050 result
= map__get(*mapp
);
1053 up_read(maps__lock(maps
));
1055 maps__sort_by_address(maps
);
1060 static int map__strcmp_name(const void *name
, const void *b
)
1062 const struct dso
*dso
= map__dso(*(const struct map
**)b
);
1064 return strcmp(name
, dso__short_name(dso
));
1067 struct map
*maps__find_by_name(struct maps
*maps
, const char *name
)
1069 struct map
*result
= NULL
;
1072 /* See locking/sorting note. */
1076 down_read(maps__lock(maps
));
1078 /* First check last found entry. */
1079 i
= RC_CHK_ACCESS(maps
)->last_search_by_name_idx
;
1080 if (i
< maps__nr_maps(maps
) && maps__maps_by_name(maps
)) {
1081 struct dso
*dso
= map__dso(maps__maps_by_name(maps
)[i
]);
1083 if (dso
&& strcmp(dso__short_name(dso
), name
) == 0) {
1084 result
= map__get(maps__maps_by_name(maps
)[i
]);
1089 /* Second search sorted array. */
1090 if (!done
&& maps__maps_by_name_sorted(maps
)) {
1092 bsearch(name
, maps__maps_by_name(maps
), maps__nr_maps(maps
),
1093 sizeof(*mapp
), map__strcmp_name
);
1096 result
= map__get(*mapp
);
1097 i
= mapp
- maps__maps_by_name(maps
);
1098 RC_CHK_ACCESS(maps
)->last_search_by_name_idx
= i
;
1102 up_read(maps__lock(maps
));
1104 /* Sort and retry binary search. */
1105 if (maps__sort_by_name(maps
)) {
1107 * Memory allocation failed do linear search
1108 * through address sorted maps.
1110 struct map
**maps_by_address
;
1113 down_read(maps__lock(maps
));
1114 maps_by_address
= maps__maps_by_address(maps
);
1115 n
= maps__nr_maps(maps
);
1116 for (i
= 0; i
< n
; i
++) {
1117 struct map
*pos
= maps_by_address
[i
];
1118 struct dso
*dso
= map__dso(pos
);
1120 if (dso
&& strcmp(dso__short_name(dso
), name
) == 0) {
1121 result
= map__get(pos
);
1125 up_read(maps__lock(maps
));
1133 struct map
*maps__find_next_entry(struct maps
*maps
, struct map
*map
)
1136 struct map
*result
= NULL
;
1138 down_read(maps__lock(maps
));
1139 i
= maps__by_address_index(maps
, map
);
1140 if (i
< maps__nr_maps(maps
))
1141 result
= map__get(maps__maps_by_address(maps
)[i
]);
1143 up_read(maps__lock(maps
));
1147 void maps__fixup_end(struct maps
*maps
)
1149 struct map
**maps_by_address
;
1152 down_write(maps__lock(maps
));
1153 if (!maps__maps_by_address_sorted(maps
))
1154 __maps__sort_by_address(maps
);
1156 maps_by_address
= maps__maps_by_address(maps
);
1157 n
= maps__nr_maps(maps
);
1158 for (unsigned int i
= 1; i
< n
; i
++) {
1159 struct map
*prev
= maps_by_address
[i
- 1];
1160 struct map
*curr
= maps_by_address
[i
];
1162 if (!map__end(prev
) || map__end(prev
) > map__start(curr
))
1163 map__set_end(prev
, map__start(curr
));
1167 * We still haven't the actual symbols, so guess the
1168 * last map final address.
1170 if (n
> 0 && !map__end(maps_by_address
[n
- 1]))
1171 map__set_end(maps_by_address
[n
- 1], ~0ULL);
1173 RC_CHK_ACCESS(maps
)->ends_broken
= false;
1174 check_invariants(maps
);
1176 up_write(maps__lock(maps
));
1180 * Merges map into maps by splitting the new map within the existing map
1183 int maps__merge_in(struct maps
*kmaps
, struct map
*new_map
)
1185 unsigned int first_after_
, kmaps__nr_maps
;
1186 struct map
**kmaps_maps_by_address
;
1187 struct map
**merged_maps_by_address
;
1188 unsigned int merged_nr_maps_allocated
;
1190 /* First try under a read lock. */
1192 down_read(maps__lock(kmaps
));
1193 if (maps__maps_by_address_sorted(kmaps
))
1196 up_read(maps__lock(kmaps
));
1198 /* First after binary search requires sorted maps. Sort and try again. */
1199 maps__sort_by_address(kmaps
);
1201 first_after_
= first_ending_after(kmaps
, new_map
);
1202 kmaps_maps_by_address
= maps__maps_by_address(kmaps
);
1204 if (first_after_
>= maps__nr_maps(kmaps
) ||
1205 map__start(kmaps_maps_by_address
[first_after_
]) >= map__end(new_map
)) {
1206 /* No overlap so regular insert suffices. */
1207 up_read(maps__lock(kmaps
));
1208 return maps__insert(kmaps
, new_map
);
1210 up_read(maps__lock(kmaps
));
1212 /* Plain insert with a read-lock failed, try again now with the write lock. */
1213 down_write(maps__lock(kmaps
));
1214 if (!maps__maps_by_address_sorted(kmaps
))
1215 __maps__sort_by_address(kmaps
);
1217 first_after_
= first_ending_after(kmaps
, new_map
);
1218 kmaps_maps_by_address
= maps__maps_by_address(kmaps
);
1219 kmaps__nr_maps
= maps__nr_maps(kmaps
);
1221 if (first_after_
>= kmaps__nr_maps
||
1222 map__start(kmaps_maps_by_address
[first_after_
]) >= map__end(new_map
)) {
1223 /* No overlap so regular insert suffices. */
1224 int ret
= __maps__insert(kmaps
, new_map
);
1226 check_invariants(kmaps
);
1227 up_write(maps__lock(kmaps
));
1230 /* Array to merge into, possibly 1 more for the sake of new_map. */
1231 merged_nr_maps_allocated
= RC_CHK_ACCESS(kmaps
)->nr_maps_allocated
;
1232 if (kmaps__nr_maps
+ 1 == merged_nr_maps_allocated
)
1233 merged_nr_maps_allocated
++;
1235 merged_maps_by_address
= malloc(merged_nr_maps_allocated
* sizeof(*merged_maps_by_address
));
1236 if (!merged_maps_by_address
) {
1237 up_write(maps__lock(kmaps
));
1240 maps__set_maps_by_address(kmaps
, merged_maps_by_address
);
1241 maps__set_maps_by_address_sorted(kmaps
, true);
1242 __maps__free_maps_by_name(kmaps
);
1243 maps__set_nr_maps_allocated(kmaps
, merged_nr_maps_allocated
);
1245 /* Copy entries before the new_map that can't overlap. */
1246 for (unsigned int i
= 0; i
< first_after_
; i
++)
1247 merged_maps_by_address
[i
] = map__get(kmaps_maps_by_address
[i
]);
1249 maps__set_nr_maps(kmaps
, first_after_
);
1251 /* Add the new map, it will be split when the later overlapping mappings are added. */
1252 __maps__insert(kmaps
, new_map
);
1254 /* Insert mappings after new_map, splitting new_map in the process. */
1255 for (unsigned int i
= first_after_
; i
< kmaps__nr_maps
; i
++)
1256 __maps__fixup_overlap_and_insert(kmaps
, kmaps_maps_by_address
[i
]);
1258 /* Copy the maps from merged into kmaps. */
1259 for (unsigned int i
= 0; i
< kmaps__nr_maps
; i
++)
1260 map__zput(kmaps_maps_by_address
[i
]);
1262 free(kmaps_maps_by_address
);
1263 check_invariants(kmaps
);
1264 up_write(maps__lock(kmaps
));
1268 void maps__load_first(struct maps
*maps
)
1270 down_read(maps__lock(maps
));
1272 if (maps__nr_maps(maps
) > 0)
1273 map__load(maps__maps_by_address(maps
)[0]);
1275 up_read(maps__lock(maps
));