1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <commonlib/bsd/helpers.h>
4 #include <console/console.h>
5 #include <device/device.h>
10 static const char *resource2str(const struct resource
*res
)
12 if (res
->flags
& IORESOURCE_IO
)
14 if (res
->flags
& IORESOURCE_PREFETCH
)
16 if (res
->flags
& IORESOURCE_MEM
)
21 static void print_domain_res(const struct device
*dev
,
22 const struct resource
*res
, const char *suffix
)
24 printk(BIOS_DEBUG
, "%s %s: base: %llx size: %llx align: %u gran: %u limit: %llx%s\n",
25 dev_path(dev
), resource2str(res
), res
->base
, res
->size
,
26 res
->align
, res
->gran
, res
->limit
, suffix
);
29 #define res_printk(depth, str, ...) printk(BIOS_DEBUG, "%*c"str, depth, ' ', __VA_ARGS__)
31 static void print_bridge_res(const struct device
*dev
, const struct resource
*res
,
32 int depth
, const char *suffix
)
34 res_printk(depth
, "%s %s: size: %llx align: %u gran: %u limit: %llx%s\n", dev_path(dev
),
35 resource2str(res
), res
->size
, res
->align
, res
->gran
, res
->limit
, suffix
);
38 static void print_child_res(const struct device
*dev
, const struct resource
*res
, int depth
)
40 res_printk(depth
+ 1, "%s %02lx * [0x%llx - 0x%llx] %s\n", dev_path(dev
),
41 res
->index
, res
->base
, res
->base
+ res
->size
- 1, resource2str(res
));
44 static void print_fixed_res(const struct device
*dev
,
45 const struct resource
*res
, const char *prefix
)
47 printk(BIOS_DEBUG
, " %s: %s %02lx base %08llx limit %08llx %s (fixed)\n",
48 prefix
, dev_path(dev
), res
->index
, res
->base
, res
->base
+ res
->size
- 1,
52 static void print_assigned_res(const struct device
*dev
, const struct resource
*res
)
54 printk(BIOS_DEBUG
, " %s %02lx * [0x%llx - 0x%llx] limit: %llx %s\n",
55 dev_path(dev
), res
->index
, res
->base
, res
->limit
, res
->limit
, resource2str(res
));
58 static void print_failed_res(const struct device
*dev
, const struct resource
*res
)
60 printk(BIOS_DEBUG
, " %s %02lx * size: 0x%llx limit: %llx %s\n",
61 dev_path(dev
), res
->index
, res
->size
, res
->limit
, resource2str(res
));
64 static void print_resource_ranges(const struct device
*dev
, const struct memranges
*ranges
)
66 const struct range_entry
*r
;
68 printk(BIOS_INFO
, " %s: Resource ranges:\n", dev_path(dev
));
70 if (memranges_is_empty(ranges
))
71 printk(BIOS_INFO
, " * EMPTY!!\n");
73 memranges_each_entry(r
, ranges
) {
74 printk(BIOS_INFO
, " * Base: %llx, Size: %llx, Tag: %lx\n",
75 range_entry_base(r
), range_entry_size(r
), range_entry_tag(r
));
79 static bool dev_has_children(const struct device
*dev
)
81 const struct bus
*bus
= dev
->downstream
;
82 return bus
&& bus
->children
;
85 static resource_t
effective_limit(const struct resource
*const res
)
87 if (CONFIG(ALWAYS_ALLOW_ABOVE_4G_ALLOCATION
))
90 /* Always allow bridge resources above 4G. */
91 if (res
->flags
& IORESOURCE_BRIDGE
)
94 const resource_t quirk_4g_limit
=
95 res
->flags
& IORESOURCE_ABOVE_4G
? UINT64_MAX
: UINT32_MAX
;
96 return MIN(res
->limit
, quirk_4g_limit
);
100 * During pass 1, once all the requirements for downstream devices of a
101 * bridge are gathered, this function calculates the overall resource
102 * requirement for the bridge. It starts by picking the largest resource
103 * requirement downstream for the given resource type and works by
104 * adding requirements in descending order.
106 * Additionally, it takes alignment and limits of the downstream devices
107 * into consideration and ensures that they get propagated to the bridge
108 * resource. This is required to guarantee that the upstream bridge/
109 * domain honors the limit and alignment requirements for this bridge
110 * based on the tightest constraints downstream.
112 * Last but not least, it stores the offset inside the bridge resource
113 * for each child resource in its base field. This simplifies pass 2
114 * for resources behind a bridge, as we only have to add offsets to the
115 * allocated base of the bridge resource.
117 static void update_bridge_resource(const struct device
*bridge
, struct resource
*bridge_res
,
120 const struct device
*child
;
121 struct resource
*child_res
;
123 const unsigned long type_mask
= IORESOURCE_TYPE_MASK
| IORESOURCE_PREFETCH
;
124 const unsigned long type_match
= bridge_res
->flags
& type_mask
;
125 struct bus
*bus
= bridge
->downstream
;
130 * `base` keeps track of where the next allocation for child resources
131 * can take place from within the bridge resource window. Since the
132 * bridge resource window allocation is not performed yet, it can start
133 * at 0. Base gets updated every time a resource requirement is
134 * accounted for in the loop below. After scanning all these resources,
135 * base will indicate the total size requirement for the current bridge
140 print_bridge_res(bridge
, bridge_res
, print_depth
, "");
142 while ((child
= largest_resource(bus
, &child_res
, type_mask
, type_match
))) {
143 /* Size 0 resources can be skipped. */
144 if (!child_res
->size
)
147 /* Resources with 0 limit can't be assigned anything. */
148 if (!child_res
->limit
)
152 * Propagate the resource alignment to the bridge resource. The
153 * condition can only be true for the first (largest) resource. For all
154 * other child resources, alignment is taken care of by rounding their
157 if (child_res
->align
> bridge_res
->align
)
158 bridge_res
->align
= child_res
->align
;
161 * Propagate the resource limit to the bridge resource. If a downstream
162 * device has stricter requirements w.r.t. limits for any resource, that
163 * constraint needs to be propagated back up to the bridges downstream
164 * of the domain. This way, the whole bridge resource fulfills the limit.
166 if (effective_limit(child_res
) < bridge_res
->limit
)
167 bridge_res
->limit
= effective_limit(child_res
);
170 * Alignment value of 0 means that the child resource has no alignment
171 * requirements and so the base value remains unchanged here.
173 base
= ALIGN_UP(base
, POWER_OF_2(child_res
->align
));
176 * Store the relative offset inside the bridge resource for later
177 * consumption in allocate_bridge_resources(), and invalidate flags
178 * related to the base.
180 child_res
->base
= base
;
181 child_res
->flags
&= ~(IORESOURCE_ASSIGNED
| IORESOURCE_STORED
);
183 print_child_res(child
, child_res
, print_depth
);
185 base
+= child_res
->size
;
189 * After all downstream device resources are scanned, `base` represents
190 * the total size requirement for the current bridge resource window.
191 * This size needs to be rounded up to the granularity requirement of
192 * the bridge to ensure that the upstream bridge/domain allocates big
195 bridge_res
->size
= ALIGN_UP(base
, POWER_OF_2(bridge_res
->gran
));
197 print_bridge_res(bridge
, bridge_res
, print_depth
, " done");
201 * During pass 1, at the bridge level, the resource allocator gathers
202 * requirements from downstream devices and updates its own resource
203 * windows for the provided resource type.
205 static void compute_bridge_resources(const struct device
*bridge
, unsigned long type_match
,
208 const struct device
*child
;
209 struct resource
*res
;
210 struct bus
*bus
= bridge
->downstream
;
211 const unsigned long type_mask
= IORESOURCE_TYPE_MASK
| IORESOURCE_PREFETCH
;
213 for (res
= bridge
->resource_list
; res
; res
= res
->next
) {
214 if (!(res
->flags
& IORESOURCE_BRIDGE
))
217 if ((res
->flags
& type_mask
) != type_match
)
221 * Ensure that the resource requirements for all downstream bridges are
222 * gathered before updating the window for current bridge resource.
224 for (child
= bus
->children
; child
; child
= child
->sibling
) {
225 if (!dev_has_children(child
))
227 compute_bridge_resources(child
, type_match
, print_depth
+ 1);
231 * Update the window for current bridge resource now that all downstream
232 * requirements are gathered.
234 update_bridge_resource(bridge
, res
, print_depth
);
239 * During pass 1, the resource allocator walks down the entire sub-tree
240 * of a domain. It gathers resource requirements for every downstream
241 * bridge by looking at the resource requests of its children. Thus, the
242 * requirement gathering begins at the leaf devices and is propagated
243 * back up to the downstream bridges of the domain.
245 * At the domain level, it identifies every downstream bridge and walks
246 * down that bridge to gather requirements for each resource type i.e.
247 * i/o, mem and prefmem. Since bridges have separate windows for mem and
248 * prefmem, requirements for each need to be collected separately.
250 * Domain resource windows are fixed ranges and hence requirement
251 * gathering does not result in any changes to these fixed ranges.
253 static void compute_domain_resources(const struct device
*domain
)
255 const struct device
*child
;
256 const int print_depth
= 1;
258 if (domain
->downstream
== NULL
)
261 for (child
= domain
->downstream
->children
; child
; child
= child
->sibling
) {
262 /* Skip if this is not a bridge or has no children under it. */
263 if (!dev_has_children(child
))
266 compute_bridge_resources(child
, IORESOURCE_IO
, print_depth
);
267 compute_bridge_resources(child
, IORESOURCE_MEM
, print_depth
);
268 compute_bridge_resources(child
, IORESOURCE_MEM
| IORESOURCE_PREFETCH
,
274 * Scan the entire tree to identify any fixed resources allocated by
275 * any device to ensure that the address map for domain resources are
276 * appropriately updated.
278 * Domains can typically provide a memrange for entire address space.
279 * So, this function punches holes in the address space for all fixed
280 * resources that are already defined. Both I/O and normal memory
281 * resources are added as fixed. Both need to be removed from address
282 * space where dynamic resource allocations are sourced.
284 static void avoid_fixed_resources(struct memranges
*ranges
, const struct device
*dev
,
285 unsigned long mask_match
)
287 const struct resource
*res
;
288 const struct device
*child
;
289 const struct bus
*bus
;
291 for (res
= dev
->resource_list
; res
!= NULL
; res
= res
->next
) {
292 if ((res
->flags
& mask_match
) != mask_match
)
296 print_fixed_res(dev
, res
, __func__
);
297 memranges_create_hole(ranges
, res
->base
, res
->size
);
300 bus
= dev
->downstream
;
304 for (child
= bus
->children
; child
!= NULL
; child
= child
->sibling
)
305 avoid_fixed_resources(ranges
, child
, mask_match
);
309 * This function creates a list of memranges of given type using the
310 * resource that is provided. It applies additional constraints to
311 * ensure that the memranges do not overlap any of the fixed resources
312 * under the domain. The domain typically provides a memrange for the
313 * entire address space. Thus, it is up to the chipset to add DRAM and
314 * all other windows which cannot be used for resource allocation as
317 static void setup_resource_ranges(const struct device
*const domain
,
318 const unsigned long type
,
319 struct memranges
*const ranges
)
321 /* Align mem resources to 2^12 (4KiB pages) at a minimum, so they
322 can be memory-mapped individually (e.g. for virtualization guests). */
323 const unsigned char alignment
= type
== IORESOURCE_MEM
? 12 : 0;
324 const unsigned long type_mask
= IORESOURCE_TYPE_MASK
| IORESOURCE_FIXED
;
326 memranges_init_empty_with_alignment(ranges
, NULL
, 0, alignment
);
328 for (struct resource
*res
= domain
->resource_list
; res
!= NULL
; res
= res
->next
) {
329 if ((res
->flags
& type_mask
) != type
)
331 print_domain_res(domain
, res
, "");
332 memranges_insert(ranges
, res
->base
, res
->limit
- res
->base
+ 1, type
);
335 if (type
== IORESOURCE_IO
) {
337 * Don't allow allocations in the VGA I/O range. PCI has special
340 memranges_create_hole(ranges
, 0x3b0, 0x3df - 0x3b0 + 1);
343 * Resource allocator no longer supports the legacy behavior where
344 * I/O resource allocation is guaranteed to avoid aliases over legacy
345 * PCI expansion card addresses.
349 avoid_fixed_resources(ranges
, domain
, type
| IORESOURCE_FIXED
);
351 print_resource_ranges(domain
, ranges
);
354 static void cleanup_domain_resource_ranges(const struct device
*dev
, struct memranges
*ranges
,
357 memranges_teardown(ranges
);
358 for (struct resource
*res
= dev
->resource_list
; res
!= NULL
; res
= res
->next
) {
359 if (res
->flags
& IORESOURCE_FIXED
)
361 if ((res
->flags
& IORESOURCE_TYPE_MASK
) != type
)
363 print_domain_res(dev
, res
, " done");
367 static void assign_resource(struct resource
*const res
, const resource_t base
,
368 const struct device
*const dev
)
371 res
->limit
= res
->base
+ res
->size
- 1;
372 res
->flags
|= IORESOURCE_ASSIGNED
;
373 res
->flags
&= ~IORESOURCE_STORED
;
375 print_assigned_res(dev
, res
);
379 * This is where the actual allocation of resources happens during
380 * pass 2. We construct a list of memory ranges corresponding to the
381 * resource of a given type, then look for the biggest unallocated
382 * resource on the downstream bus. This continues in a descending order
383 * until all resources of a given type have space allocated within the
384 * domain's resource window.
386 static void allocate_toplevel_resources(const struct device
*const domain
,
387 const unsigned long type
)
389 const unsigned long type_mask
= IORESOURCE_TYPE_MASK
;
390 struct resource
*res
= NULL
;
391 const struct device
*dev
;
392 struct memranges ranges
;
395 if (!dev_has_children(domain
))
398 setup_resource_ranges(domain
, type
, &ranges
);
400 while ((dev
= largest_resource(domain
->downstream
, &res
, type_mask
, type
))) {
404 if (!memranges_steal(&ranges
, effective_limit(res
), res
->size
, res
->align
,
405 type
, &base
, CONFIG(RESOURCE_ALLOCATION_TOP_DOWN
))) {
406 printk(BIOS_ERR
, "Resource didn't fit!!!\n");
407 print_failed_res(dev
, res
);
411 assign_resource(res
, base
, dev
);
414 cleanup_domain_resource_ranges(domain
, &ranges
, type
);
418 * Pass 2 of the resource allocator at the bridge level loops through
419 * all the resources for the bridge and assigns all the base addresses
420 * of its children's resources of the same type. update_bridge_resource()
421 * of pass 1 pre-calculated the offsets of these bases inside the bridge
422 * resource. Now that the bridge resource is allocated, all we have to
423 * do is to add its final base to these offsets.
425 * Once allocation at the current bridge is complete, resource allocator
426 * continues walking down the downstream bridges until it hits the leaf
429 static void assign_resource_cb(void *param
, struct device
*dev
, struct resource
*res
)
431 /* We have to filter the same resources as update_bridge_resource(). */
432 if (!res
->size
|| !res
->limit
)
435 assign_resource(res
, *(const resource_t
*)param
+ res
->base
, dev
);
437 static void allocate_bridge_resources(const struct device
*bridge
)
439 const unsigned long type_mask
=
440 IORESOURCE_TYPE_MASK
| IORESOURCE_PREFETCH
| IORESOURCE_FIXED
;
441 struct bus
*const bus
= bridge
->downstream
;
442 struct resource
*res
;
443 struct device
*child
;
445 for (res
= bridge
->resource_list
; res
!= NULL
; res
= res
->next
) {
449 if (!(res
->flags
& IORESOURCE_BRIDGE
))
452 if (!(res
->flags
& IORESOURCE_ASSIGNED
))
455 /* Run assign_resource_cb() for all downstream resources of the same type. */
456 search_bus_resources(bus
, type_mask
, res
->flags
& type_mask
,
457 assign_resource_cb
, &res
->base
);
460 for (child
= bus
->children
; child
!= NULL
; child
= child
->sibling
) {
461 if (!dev_has_children(child
))
464 allocate_bridge_resources(child
);
469 * Pass 2 of resource allocator begins at the domain level. Every domain
470 * has two types of resources - io and mem. For each of these resources,
471 * this function creates a list of memory ranges that can be used for
472 * downstream resource allocation. This list is constrained to remove
473 * any fixed resources in the domain sub-tree of the given resource
474 * type. It then uses the memory ranges to apply best fit on the
475 * resource requirements of the downstream devices.
477 * Once resources are allocated to all downstream devices of the domain,
478 * it walks down each downstream bridge to finish resource assignment
479 * of its children resources within its own window.
481 static void allocate_domain_resources(const struct device
*domain
)
483 /* Resource type I/O */
484 allocate_toplevel_resources(domain
, IORESOURCE_IO
);
488 * Domain does not distinguish between mem and prefmem resources. Thus,
489 * the resource allocation at domain level considers mem and prefmem
490 * together when finding the best fit based on the biggest resource
493 allocate_toplevel_resources(domain
, IORESOURCE_MEM
);
495 struct device
*child
;
496 for (child
= domain
->downstream
->children
; child
; child
= child
->sibling
) {
497 if (!dev_has_children(child
))
500 /* Continue allocation for all downstream bridges. */
501 allocate_bridge_resources(child
);
506 * This function forms the guts of the resource allocator. It walks
507 * through the entire device tree for each domain two times.
509 * Every domain has a fixed set of ranges. These ranges cannot be
510 * relaxed based on the requirements of the downstream devices. They
511 * represent the available windows from which resources can be allocated
512 * to the different devices under the domain.
514 * In order to identify the requirements of downstream devices, resource
515 * allocator walks in a DFS fashion. It gathers the requirements from
516 * leaf devices and propagates those back up to their upstream bridges
517 * until the requirements for all the downstream devices of the domain
518 * are gathered. This is referred to as pass 1 of the resource allocator.
520 * Once the requirements for all the devices under the domain are
521 * gathered, the resource allocator walks a second time to allocate
522 * resources to downstream devices as per the requirements. It always
523 * picks the biggest resource request as per the type (i/o and mem) to
524 * allocate space from its fixed window to the immediate downstream
525 * device of the domain. In order to accomplish best fit for the
526 * resources, a list of ranges is maintained by each resource type (i/o
527 * and mem). At the domain level we don't differentiate between mem and
528 * prefmem. Since they are allocated space from the same window, the
529 * resource allocator at the domain level ensures that the biggest
530 * requirement is selected independent of the prefetch type. Once the
531 * resource allocation for all immediate downstream devices is complete
532 * at the domain level, the resource allocator walks down the subtree
533 * for each downstream bridge to continue the allocation process at the
534 * bridge level. Since bridges have either their whole window allocated
535 * or nothing, we only need to place downstream resources inside these
536 * windows by re-using offsets that were pre-calculated in pass 1. This
537 * continues until resource allocation is realized for all downstream
538 * bridges in the domain sub-tree. This is referred to as pass 2 of the
539 * resource allocator.
541 * Some rules that are followed by the resource allocator:
542 * - Allocate resource locations for every device as long as
543 * the requirements can be satisfied.
544 * - Don't overlap with resources in fixed locations.
545 * - Don't overlap and follow the rules of bridges -- downstream
546 * devices of bridges should use parts of the address space
547 * allocated to the bridge.
549 void allocate_resources(const struct device
*root
)
551 const struct device
*child
;
553 if ((root
== NULL
) || (root
->downstream
== NULL
))
556 for (child
= root
->downstream
->children
; child
; child
= child
->sibling
) {
557 if (child
->path
.type
!= DEVICE_PATH_DOMAIN
)
560 post_log_path(child
);
562 /* Pass 1 - Relative placement. */
563 printk(BIOS_INFO
, "=== Resource allocator: %s - Pass 1 (relative placement) ===\n",
565 compute_domain_resources(child
);
567 /* Pass 2 - Allocate resources as per gathered requirements. */
568 printk(BIOS_INFO
, "=== Resource allocator: %s - Pass 2 (allocating resources) ===\n",
570 allocate_domain_resources(child
);
572 printk(BIOS_INFO
, "=== Resource allocator: %s - resource allocation complete ===\n",