1 /* SPDX-License-Identifier: GPL-2.0-only */
5 #include <commonlib/helpers.h>
6 #include <console/console.h>
9 static inline void range_entry_link(struct range_entry
**prev_ptr
,
10 struct range_entry
*r
)
16 static inline void range_entry_unlink(struct range_entry
**prev_ptr
,
17 struct range_entry
*r
)
23 static inline void range_entry_unlink_and_free(struct memranges
*ranges
,
24 struct range_entry
**prev_ptr
,
25 struct range_entry
*r
)
27 range_entry_unlink(prev_ptr
, r
);
28 range_entry_link(&ranges
->free_list
, r
);
31 static struct range_entry
*alloc_range(struct memranges
*ranges
)
33 if (ranges
->free_list
!= NULL
) {
34 struct range_entry
*r
;
36 r
= ranges
->free_list
;
37 range_entry_unlink(&ranges
->free_list
, r
);
40 if (ENV_PAYLOAD_LOADER
)
41 return malloc(sizeof(struct range_entry
));
45 static inline struct range_entry
*
46 range_list_add(struct memranges
*ranges
, struct range_entry
**prev_ptr
,
47 resource_t begin
, resource_t end
, unsigned long tag
)
49 struct range_entry
*new_entry
;
51 new_entry
= alloc_range(ranges
);
52 if (new_entry
== NULL
) {
53 printk(BIOS_ERR
, "Could not allocate range_entry!\n");
56 new_entry
->begin
= begin
;
59 range_entry_link(prev_ptr
, new_entry
);
64 static void merge_neighbor_entries(struct memranges
*ranges
)
66 struct range_entry
*cur
;
67 struct range_entry
*prev
;
70 /* Merge all neighbors and delete/free the leftover entry. */
71 for (cur
= ranges
->entries
; cur
!= NULL
; cur
= cur
->next
) {
72 /* First entry. Just set prev. */
78 /* If the previous entry merges with the current update the
79 * previous entry to cover full range and delete current from
81 if (prev
->end
+ 1 >= cur
->begin
&& prev
->tag
== cur
->tag
) {
83 range_entry_unlink_and_free(ranges
, &prev
->next
, cur
);
84 /* Set cur to prev so cur->next is valid since cur
85 * was just unlinked and free. */
94 static void remove_memranges(struct memranges
*ranges
,
95 resource_t begin
, resource_t end
,
98 struct range_entry
*cur
;
99 struct range_entry
*next
;
100 struct range_entry
**prev_ptr
;
102 prev_ptr
= &ranges
->entries
;
103 for (cur
= ranges
->entries
; cur
!= NULL
; cur
= next
) {
106 /* Cache the next value to handle unlinks. */
109 /* No other ranges are affected. */
110 if (end
< cur
->begin
)
113 /* The removal range starts after this one. */
114 if (begin
> cur
->end
) {
115 prev_ptr
= &cur
->next
;
119 /* The removal range overlaps with the current entry either
120 * partially or fully. However, we need to adjust the removal
121 * range for any holes. */
122 if (begin
<= cur
->begin
) {
126 if (end
>= cur
->end
) {
127 begin
= cur
->end
+ 1;
128 range_entry_unlink_and_free(ranges
, prev_ptr
,
134 /* prev_ptr can be set now that the unlink path wasn't taken. */
135 prev_ptr
= &cur
->next
;
137 /* Clip the end fragment to do proper splitting. */
142 /* Hole punched in middle of entry. */
143 if (begin
> cur
->begin
&& tmp_end
< cur
->end
) {
144 range_list_add(ranges
, &cur
->next
, end
+ 1, cur
->end
,
146 cur
->end
= begin
- 1;
150 /* Removal at beginning. */
151 if (begin
== cur
->begin
)
152 cur
->begin
= tmp_end
+ 1;
154 /* Removal at end. */
155 if (tmp_end
== cur
->end
)
156 cur
->end
= begin
- 1;
160 static void merge_add_memranges(struct memranges
*ranges
,
161 resource_t begin
, resource_t end
,
164 struct range_entry
*cur
;
165 struct range_entry
**prev_ptr
;
167 prev_ptr
= &ranges
->entries
;
169 /* Remove all existing entries covered by the range. */
170 remove_memranges(ranges
, begin
, end
, -1);
172 /* Find the entry to place the new entry after. Since
173 * remove_memranges() was called above there is a guaranteed
174 * spot for this new entry. */
175 for (cur
= ranges
->entries
; cur
!= NULL
; cur
= cur
->next
) {
176 /* Found insertion spot before current entry. */
177 if (end
< cur
->begin
)
180 /* Keep track of previous entry to insert new entry after it. */
181 prev_ptr
= &cur
->next
;
183 /* The new entry starts after this one. */
184 if (begin
> cur
->end
)
188 /* Add new entry and merge with neighbors. */
189 range_list_add(ranges
, prev_ptr
, begin
, end
, tag
);
190 merge_neighbor_entries(ranges
);
193 void memranges_update_tag(struct memranges
*ranges
, unsigned long old_tag
,
194 unsigned long new_tag
)
196 struct range_entry
*r
;
198 memranges_each_entry(r
, ranges
) {
199 if (range_entry_tag(r
) == old_tag
)
200 range_entry_update_tag(r
, new_tag
);
203 merge_neighbor_entries(ranges
);
206 typedef void (*range_action_t
)(struct memranges
*ranges
,
207 resource_t begin
, resource_t end
,
210 static void do_action(struct memranges
*ranges
,
211 resource_t base
, resource_t size
, unsigned long tag
,
212 range_action_t action
)
220 /* The addresses are aligned to (1ULL << ranges->align): the begin address is
221 * aligned down while the end address is aligned up to be conservative
222 * about the full range covered. */
223 begin
= ALIGN_DOWN(base
, POWER_OF_2(ranges
->align
));
224 end
= begin
+ size
+ (base
- begin
);
225 end
= ALIGN_UP(end
, POWER_OF_2(ranges
->align
)) - 1;
226 action(ranges
, begin
, end
, tag
);
229 void memranges_create_hole(struct memranges
*ranges
,
230 resource_t base
, resource_t size
)
232 do_action(ranges
, base
, size
, -1, remove_memranges
);
235 void memranges_insert(struct memranges
*ranges
,
236 resource_t base
, resource_t size
, unsigned long tag
)
238 do_action(ranges
, base
, size
, tag
, merge_add_memranges
);
241 struct collect_context
{
242 struct memranges
*ranges
;
244 memrange_filter_t filter
;
247 static void collect_ranges(void *gp
, struct device
*dev
, struct resource
*res
)
249 struct collect_context
*ctx
= gp
;
254 if (ctx
->filter
== NULL
|| ctx
->filter(dev
, res
))
255 memranges_insert(ctx
->ranges
, res
->base
, res
->size
, ctx
->tag
);
258 void memranges_add_resources_filter(struct memranges
*ranges
,
259 unsigned long mask
, unsigned long match
,
261 memrange_filter_t filter
)
263 struct collect_context context
;
265 /* Only deal with MEM resources. */
266 mask
|= IORESOURCE_MEM
;
267 match
|= IORESOURCE_MEM
;
269 context
.ranges
= ranges
;
271 context
.filter
= filter
;
272 search_global_resources(mask
, match
, collect_ranges
, &context
);
275 void memranges_add_resources(struct memranges
*ranges
,
276 unsigned long mask
, unsigned long match
,
279 memranges_add_resources_filter(ranges
, mask
, match
, tag
, NULL
);
282 void memranges_init_empty_with_alignment(struct memranges
*ranges
,
283 struct range_entry
*to_free
,
284 size_t num_free
, unsigned char align
)
288 ranges
->entries
= NULL
;
289 ranges
->free_list
= NULL
;
290 ranges
->align
= align
;
292 for (i
= 0; i
< num_free
; i
++)
293 range_entry_link(&ranges
->free_list
, &to_free
[i
]);
296 void memranges_init_with_alignment(struct memranges
*ranges
,
297 unsigned long mask
, unsigned long match
,
298 unsigned long tag
, unsigned char align
)
300 memranges_init_empty_with_alignment(ranges
, NULL
, 0, align
);
301 memranges_add_resources(ranges
, mask
, match
, tag
);
304 /* Clone a memrange. The new memrange has the same entries as the old one. */
305 void memranges_clone(struct memranges
*newranges
, struct memranges
*oldranges
)
307 struct range_entry
*r
, *cur
;
308 struct range_entry
**prev_ptr
;
310 memranges_init_empty_with_alignment(newranges
, NULL
, 0, oldranges
->align
);
312 prev_ptr
= &newranges
->entries
;
313 memranges_each_entry(r
, oldranges
) {
314 cur
= range_list_add(newranges
, prev_ptr
, r
->begin
, r
->end
,
316 prev_ptr
= &cur
->next
;
320 void memranges_teardown(struct memranges
*ranges
)
322 while (ranges
->entries
!= NULL
) {
323 range_entry_unlink_and_free(ranges
, &ranges
->entries
,
328 void memranges_fill_holes_up_to(struct memranges
*ranges
,
329 resource_t limit
, unsigned long tag
)
331 struct range_entry
*cur
;
332 struct range_entry
*prev
;
335 for (cur
= ranges
->entries
; cur
!= NULL
; cur
= cur
->next
) {
336 /* First entry. Just set prev. */
342 /* If the previous entry does not directly precede the current
343 * entry then add a new entry just after the previous one. */
344 if (range_entry_end(prev
) != cur
->begin
) {
347 end
= cur
->begin
- 1;
350 range_list_add(ranges
, &prev
->next
,
351 range_entry_end(prev
), end
, tag
);
356 /* Hit the requested range limit. No other entries after this
358 if (cur
->begin
>= limit
)
362 /* Handle the case where the limit was never reached. A new entry needs
363 * to be added to cover the range up to the limit. */
364 if (prev
!= NULL
&& range_entry_end(prev
) < limit
)
365 range_list_add(ranges
, &prev
->next
, range_entry_end(prev
),
368 /* Merge all entries that were newly added. */
369 merge_neighbor_entries(ranges
);
372 struct range_entry
*memranges_next_entry(struct memranges
*ranges
,
373 const struct range_entry
*r
)
378 /* Find a range entry that satisfies the given constraints to fit a hole that matches the
379 * required alignment, is big enough, does not exceed the limit and has a matching tag. */
380 static const struct range_entry
*
381 memranges_find_entry(struct memranges
*ranges
, resource_t limit
, resource_t size
,
382 unsigned char align
, unsigned long tag
, bool last
)
384 const struct range_entry
*r
, *last_entry
= NULL
;
385 resource_t base
, end
;
390 memranges_each_entry(r
, ranges
) {
394 base
= ALIGN_UP(r
->begin
, POWER_OF_2(align
));
395 end
= base
+ size
- 1;
401 * If end for the hole in the current range entry goes beyond the requested
402 * limit, then none of the following ranges can satisfy this request because all
403 * range entries are maintained in increasing order.
417 bool memranges_steal(struct memranges
*ranges
, resource_t limit
, resource_t size
,
418 unsigned char align
, unsigned long tag
, resource_t
*stolen_base
,
421 const struct range_entry
*r
;
423 r
= memranges_find_entry(ranges
, limit
, size
, align
, tag
, from_top
);
428 limit
= MIN(limit
, r
->end
);
429 /* Ensure we're within the range, even aligned down.
430 Proof is simple: If ALIGN_UP(r->begin) would be
431 higher, the stolen range wouldn't fit.*/
432 assert(r
->begin
<= ALIGN_DOWN(limit
- size
+ 1, POWER_OF_2(align
)));
433 *stolen_base
= ALIGN_DOWN(limit
- size
+ 1, POWER_OF_2(align
));
435 *stolen_base
= ALIGN_UP(r
->begin
, POWER_OF_2(align
));
437 memranges_create_hole(ranges
, *stolen_base
, size
);