1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <commonlib/bsd/helpers.h>
4 #include <console/console.h>
5 #include <device/device.h>
10 static const char *resource2str(const struct resource
*res
)
12 if (res
->flags
& IORESOURCE_IO
)
14 if (res
->flags
& IORESOURCE_PREFETCH
)
16 if (res
->flags
& IORESOURCE_MEM
)
21 static bool dev_has_children(const struct device
*dev
)
23 const struct bus
*bus
= dev
->link_list
;
24 return bus
&& bus
->children
;
27 #define res_printk(depth, str, ...) printk(BIOS_DEBUG, "%*c"str, depth, ' ', __VA_ARGS__)
30 * During pass 1, once all the requirements for downstream devices of a
31 * bridge are gathered, this function calculates the overall resource
32 * requirement for the bridge. It starts by picking the largest resource
33 * requirement downstream for the given resource type and works by
34 * adding requirements in descending order.
36 * Additionally, it takes alignment and limits of the downstream devices
37 * into consideration and ensures that they get propagated to the bridge
38 * resource. This is required to guarantee that the upstream bridge/
39 * domain honors the limit and alignment requirements for this bridge
40 * based on the tightest constraints downstream.
42 static void update_bridge_resource(const struct device
*bridge
, struct resource
*bridge_res
,
43 unsigned long type_match
, int print_depth
)
45 const struct device
*child
;
46 struct resource
*child_res
;
48 const unsigned long type_mask
= IORESOURCE_TYPE_MASK
| IORESOURCE_PREFETCH
;
49 struct bus
*bus
= bridge
->link_list
;
54 * `base` keeps track of where the next allocation for child resources
55 * can take place from within the bridge resource window. Since the
56 * bridge resource window allocation is not performed yet, it can start
57 * at 0. Base gets updated every time a resource requirement is
58 * accounted for in the loop below. After scanning all these resources,
59 * base will indicate the total size requirement for the current bridge
64 res_printk(print_depth
, "%s %s: size: %llx align: %d gran: %d limit: %llx\n",
65 dev_path(bridge
), resource2str(bridge_res
), bridge_res
->size
,
66 bridge_res
->align
, bridge_res
->gran
, bridge_res
->limit
);
68 while ((child
= largest_resource(bus
, &child_res
, type_mask
, type_match
))) {
70 /* Size 0 resources can be skipped. */
74 /* Resources with 0 limit can't be assigned anything. */
75 if (!child_res
->limit
)
79 * Propagate the resource alignment to the bridge resource. The
80 * condition can only be true for the first (largest) resource. For all
81 * other children resources, alignment is taken care of by updating the
82 * base to round up as per the child resource alignment. It is
83 * guaranteed that pass 2 follows the exact same method of picking the
84 * resource for allocation using largest_resource(). Thus, as long as
85 * the alignment for the largest child resource is propagated up to the
86 * bridge resource, it can be guaranteed that the alignment for all
87 * resources is appropriately met.
89 if (child_res
->align
> bridge_res
->align
)
90 bridge_res
->align
= child_res
->align
;
93 * Propagate the resource limit to the bridge resource. If a downstream
94 * device has stricter requirements w.r.t. limits for any resource, that
95 * constraint needs to be propagated back up to the downstream bridges
96 * of the domain. This guarantees that the resource allocation which
97 * starts at the domain level takes into account all these constraints
98 * thus working on a global view.
100 if (child_res
->limit
< bridge_res
->limit
)
101 bridge_res
->limit
= child_res
->limit
;
104 * Propagate the downstream resource request to allocate above 4G
105 * boundary to upstream bridge resource. This ensures that during
106 * pass 2, the resource allocator at domain level has a global view
107 * of all the downstream device requirements and thus address space
108 * is allocated as per updated flags in the bridge resource.
110 * Since the bridge resource is a single window, all the downstream
111 * resources of this bridge resource will be allocated in space above
114 if (child_res
->flags
& IORESOURCE_ABOVE_4G
)
115 bridge_res
->flags
|= IORESOURCE_ABOVE_4G
;
118 * Alignment value of 0 means that the child resource has no alignment
119 * requirements and so the base value remains unchanged here.
121 base
= ALIGN_UP(base
, POWER_OF_2(child_res
->align
));
123 res_printk(print_depth
+ 1, "%s %02lx * [0x%llx - 0x%llx] %s\n",
124 dev_path(child
), child_res
->index
, base
, base
+ child_res
->size
- 1,
125 resource2str(child_res
));
127 base
+= child_res
->size
;
131 * After all downstream device resources are scanned, `base` represents
132 * the total size requirement for the current bridge resource window.
133 * This size needs to be rounded up to the granularity requirement of
134 * the bridge to ensure that the upstream bridge/domain allocates big
137 bridge_res
->size
= ALIGN_UP(base
, POWER_OF_2(bridge_res
->gran
));
139 res_printk(print_depth
, "%s %s: size: %llx align: %d gran: %d limit: %llx done\n",
140 dev_path(bridge
), resource2str(bridge_res
), bridge_res
->size
,
141 bridge_res
->align
, bridge_res
->gran
, bridge_res
->limit
);
145 * During pass 1, at the bridge level, the resource allocator gathers
146 * requirements from downstream devices and updates its own resource
147 * windows for the provided resource type.
149 static void compute_bridge_resources(const struct device
*bridge
, unsigned long type_match
,
152 const struct device
*child
;
153 struct resource
*res
;
154 struct bus
*bus
= bridge
->link_list
;
155 const unsigned long type_mask
= IORESOURCE_TYPE_MASK
| IORESOURCE_PREFETCH
;
157 for (res
= bridge
->resource_list
; res
; res
= res
->next
) {
158 if (!(res
->flags
& IORESOURCE_BRIDGE
))
161 if ((res
->flags
& type_mask
) != type_match
)
165 * Ensure that the resource requirements for all downstream bridges are
166 * gathered before updating the window for current bridge resource.
168 for (child
= bus
->children
; child
; child
= child
->sibling
) {
169 if (!dev_has_children(child
))
171 compute_bridge_resources(child
, type_match
, print_depth
+ 1);
175 * Update the window for current bridge resource now that all downstream
176 * requirements are gathered.
178 update_bridge_resource(bridge
, res
, type_match
, print_depth
);
183 * During pass 1, the resource allocator walks down the entire sub-tree
184 * of a domain. It gathers resource requirements for every downstream
185 * bridge by looking at the resource requests of its children. Thus, the
186 * requirement gathering begins at the leaf devices and is propagated
187 * back up to the downstream bridges of the domain.
189 * At the domain level, it identifies every downstream bridge and walks
190 * down that bridge to gather requirements for each resource type i.e.
191 * i/o, mem and prefmem. Since bridges have separate windows for mem and
192 * prefmem, requirements for each need to be collected separately.
194 * Domain resource windows are fixed ranges and hence requirement
195 * gathering does not result in any changes to these fixed ranges.
197 static void compute_domain_resources(const struct device
*domain
)
199 const struct device
*child
;
200 const int print_depth
= 1;
202 if (domain
->link_list
== NULL
)
205 for (child
= domain
->link_list
->children
; child
; child
= child
->sibling
) {
207 /* Skip if this is not a bridge or has no children under it. */
208 if (!dev_has_children(child
))
211 compute_bridge_resources(child
, IORESOURCE_IO
, print_depth
);
212 compute_bridge_resources(child
, IORESOURCE_MEM
, print_depth
);
213 compute_bridge_resources(child
, IORESOURCE_MEM
| IORESOURCE_PREFETCH
,
218 static unsigned char get_alignment_by_resource_type(const struct resource
*res
)
220 if (res
->flags
& IORESOURCE_MEM
)
221 return 12; /* Page-aligned --> log2(4KiB) */
222 else if (res
->flags
& IORESOURCE_IO
)
223 return 0; /* No special alignment required --> log2(1) */
225 die("Unexpected resource type: flags(%d)!\n", res
->flags
);
229 * If the resource is NULL or if the resource is not assigned, then it
230 * cannot be used for allocation for downstream devices.
232 static bool is_resource_invalid(const struct resource
*res
)
234 return (res
== NULL
) || !(res
->flags
& IORESOURCE_ASSIGNED
);
237 static void initialize_domain_io_resource_memranges(struct memranges
*ranges
,
238 const struct resource
*res
,
239 unsigned long memrange_type
)
241 memranges_insert(ranges
, res
->base
, res
->limit
- res
->base
+ 1, memrange_type
);
244 static void initialize_domain_mem_resource_memranges(struct memranges
*ranges
,
245 const struct resource
*res
,
246 unsigned long memrange_type
)
249 resource_t res_limit
;
251 const resource_t limit_4g
= 0xffffffff;
253 res_base
= res
->base
;
254 res_limit
= res
->limit
;
257 * Split the resource into two separate ranges if it crosses the 4G
258 * boundary. Memrange type is set differently to ensure that memrange
259 * does not merge these two ranges. For the range above 4G boundary,
260 * given memrange type is ORed with IORESOURCE_ABOVE_4G.
262 if (res_base
<= limit_4g
) {
264 resource_t range_limit
;
266 /* Clip the resource limit at 4G boundary if necessary. */
267 range_limit
= MIN(res_limit
, limit_4g
);
268 memranges_insert(ranges
, res_base
, range_limit
- res_base
+ 1, memrange_type
);
271 * If the resource lies completely below the 4G boundary, nothing more
274 if (res_limit
<= limit_4g
)
278 * If the resource window crosses the 4G boundary, then update res_base
279 * to add another entry for the range above the boundary.
281 res_base
= limit_4g
+ 1;
284 if (res_base
> res_limit
)
288 * If resource lies completely above the 4G boundary or if the resource
289 * was clipped to add two separate ranges, the range above 4G boundary
290 * has the resource flag IORESOURCE_ABOVE_4G set. This allows domain to
291 * handle any downstream requests for resource allocation above 4G
294 memranges_insert(ranges
, res_base
, res_limit
- res_base
+ 1,
295 memrange_type
| IORESOURCE_ABOVE_4G
);
299 * This function initializes memranges for domain device. If the
300 * resource crosses 4G boundary, then this function splits it into two
301 * ranges -- one for the window below 4G and the other for the window
302 * above 4G. The latter range has IORESOURCE_ABOVE_4G flag set to
303 * satisfy resource requests from downstream devices for allocations
306 static void initialize_domain_memranges(struct memranges
*ranges
, const struct resource
*res
,
307 unsigned long memrange_type
)
309 unsigned char align
= get_alignment_by_resource_type(res
);
311 memranges_init_empty_with_alignment(ranges
, NULL
, 0, align
);
313 if (is_resource_invalid(res
))
316 if (res
->flags
& IORESOURCE_IO
)
317 initialize_domain_io_resource_memranges(ranges
, res
, memrange_type
);
319 initialize_domain_mem_resource_memranges(ranges
, res
, memrange_type
);
323 * This function initializes memranges for bridge device. Unlike domain,
324 * bridge does not need to care about resource window crossing 4G
325 * boundary. This is handled by the resource allocator at domain level
326 * to ensure that all downstream bridges are allocated space either
327 * above or below 4G boundary as per the state of IORESOURCE_ABOVE_4G
328 * for the respective bridge resource.
330 * So, this function creates a single range of the entire resource
331 * window available for the bridge resource. Thus all downstream
332 * resources of the bridge for the given resource type get allocated
333 * space from the same window. If there is any downstream resource of
334 * the bridge which requests allocation above 4G, then all other
335 * downstream resources of the same type under the bridge get allocated
338 static void initialize_bridge_memranges(struct memranges
*ranges
, const struct resource
*res
,
339 unsigned long memrange_type
)
341 unsigned char align
= get_alignment_by_resource_type(res
);
343 memranges_init_empty_with_alignment(ranges
, NULL
, 0, align
);
345 if (is_resource_invalid(res
))
348 memranges_insert(ranges
, res
->base
, res
->limit
- res
->base
+ 1, memrange_type
);
351 static void print_resource_ranges(const struct device
*dev
, const struct memranges
*ranges
)
353 const struct range_entry
*r
;
355 printk(BIOS_INFO
, " %s: Resource ranges:\n", dev_path(dev
));
357 if (memranges_is_empty(ranges
))
358 printk(BIOS_INFO
, " * EMPTY!!\n");
360 memranges_each_entry(r
, ranges
) {
361 printk(BIOS_INFO
, " * Base: %llx, Size: %llx, Tag: %lx\n",
362 range_entry_base(r
), range_entry_size(r
), range_entry_tag(r
));
367 * This is where the actual allocation of resources happens during
368 * pass 2. Given the list of memory ranges corresponding to the
369 * resource of given type, it finds the biggest unallocated resource
370 * using the type mask on the downstream bus. This continues in a
371 * descending order until all resources of given type are allocated
372 * address space within the current resource window.
374 static void allocate_child_resources(struct bus
*bus
, struct memranges
*ranges
,
375 unsigned long type_mask
, unsigned long type_match
)
377 const bool allocate_top_down
=
378 bus
->dev
->path
.type
== DEVICE_PATH_DOMAIN
&&
379 CONFIG(RESOURCE_ALLOCATION_TOP_DOWN
);
380 struct resource
*resource
= NULL
;
381 const struct device
*dev
;
383 while ((dev
= largest_resource(bus
, &resource
, type_mask
, type_match
))) {
388 if (memranges_steal(ranges
, resource
->limit
, resource
->size
, resource
->align
,
389 type_match
, &resource
->base
, allocate_top_down
) == false) {
390 printk(BIOS_ERR
, " ERROR: Resource didn't fit!!! ");
391 printk(BIOS_DEBUG
, " %s %02lx * size: 0x%llx limit: %llx %s\n",
392 dev_path(dev
), resource
->index
,
393 resource
->size
, resource
->limit
, resource2str(resource
));
397 resource
->limit
= resource
->base
+ resource
->size
- 1;
398 resource
->flags
|= IORESOURCE_ASSIGNED
;
400 printk(BIOS_DEBUG
, " %s %02lx * [0x%llx - 0x%llx] limit: %llx %s\n",
401 dev_path(dev
), resource
->index
, resource
->base
,
402 resource
->size
? resource
->base
+ resource
->size
- 1 :
403 resource
->base
, resource
->limit
, resource2str(resource
));
407 static void update_constraints(struct memranges
*ranges
, const struct device
*dev
,
408 const struct resource
*res
)
413 printk(BIOS_DEBUG
, " %s: %s %02lx base %08llx limit %08llx %s (fixed)\n",
414 __func__
, dev_path(dev
), res
->index
, res
->base
,
415 res
->base
+ res
->size
- 1, resource2str(res
));
417 memranges_create_hole(ranges
, res
->base
, res
->size
);
421 * Scan the entire tree to identify any fixed resources allocated by
422 * any device to ensure that the address map for domain resources are
423 * appropriately updated.
425 * Domains can typically provide a memrange for entire address space.
426 * So, this function punches holes in the address space for all fixed
427 * resources that are already defined. Both I/O and normal memory
428 * resources are added as fixed. Both need to be removed from address
429 * space where dynamic resource allocations are sourced.
431 static void avoid_fixed_resources(struct memranges
*ranges
, const struct device
*dev
,
432 unsigned long mask_match
)
434 const struct resource
*res
;
435 const struct device
*child
;
436 const struct bus
*bus
;
438 for (res
= dev
->resource_list
; res
!= NULL
; res
= res
->next
) {
439 if ((res
->flags
& mask_match
) != mask_match
)
441 update_constraints(ranges
, dev
, res
);
444 bus
= dev
->link_list
;
448 for (child
= bus
->children
; child
!= NULL
; child
= child
->sibling
)
449 avoid_fixed_resources(ranges
, child
, mask_match
);
452 static void constrain_domain_resources(const struct device
*domain
, struct memranges
*ranges
,
455 unsigned long mask_match
= type
| IORESOURCE_FIXED
;
457 if (type
== IORESOURCE_IO
) {
459 * Don't allow allocations in the VGA I/O range. PCI has special
462 memranges_create_hole(ranges
, 0x3b0, 0x3df - 0x3b0 + 1);
465 * Resource allocator no longer supports the legacy behavior where
466 * I/O resource allocation is guaranteed to avoid aliases over legacy
467 * PCI expansion card addresses.
471 avoid_fixed_resources(ranges
, domain
, mask_match
);
475 * This function creates a list of memranges of given type using the
476 * resource that is provided. If the given resource is NULL or if the
477 * resource window size is 0, then it creates an empty list. This
478 * results in resource allocation for that resource type failing for
479 * all downstream devices since there is nothing to allocate from.
481 * In case of domain, it applies additional constraints to ensure that
482 * the memranges do not overlap any of the fixed resources under that
483 * domain. Domain typically seems to provide memrange for entire address
484 * space. Thus, it is up to the chipset to add DRAM and all other
485 * windows which cannot be used for resource allocation as fixed
488 static void setup_resource_ranges(const struct device
*dev
, const struct resource
*res
,
489 unsigned long type
, struct memranges
*ranges
)
491 printk(BIOS_DEBUG
, "%s %s: base: %llx size: %llx align: %d gran: %d limit: %llx\n",
492 dev_path(dev
), resource2str(res
), res
->base
, res
->size
, res
->align
,
493 res
->gran
, res
->limit
);
495 if (dev
->path
.type
== DEVICE_PATH_DOMAIN
) {
496 initialize_domain_memranges(ranges
, res
, type
);
497 constrain_domain_resources(dev
, ranges
, type
);
499 initialize_bridge_memranges(ranges
, res
, type
);
502 print_resource_ranges(dev
, ranges
);
505 static void cleanup_resource_ranges(const struct device
*dev
, struct memranges
*ranges
,
506 const struct resource
*res
)
508 memranges_teardown(ranges
);
509 printk(BIOS_DEBUG
, "%s %s: base: %llx size: %llx align: %d gran: %d limit: %llx done\n",
510 dev_path(dev
), resource2str(res
), res
->base
, res
->size
, res
->align
,
511 res
->gran
, res
->limit
);
515 * Pass 2 of the resource allocator at the bridge level loops through
516 * all the resources for the bridge and generates a list of memory
517 * ranges similar to that at the domain level. However, there is no need
518 * to apply any additional constraints since the window allocated to the
519 * bridge is guaranteed to be non-overlapping by the allocator at domain
522 * Allocation at the bridge level works the same as at domain level
523 * (starts with the biggest resource requirement from downstream devices
524 * and continues in descending order). One major difference at the
525 * bridge level is that it considers prefmem resources separately from
528 * Once allocation at the current bridge is complete, resource allocator
529 * continues walking down the downstream bridges until it hits the leaf
532 static void allocate_bridge_resources(const struct device
*bridge
)
534 struct memranges ranges
;
535 const struct resource
*res
;
536 struct bus
*bus
= bridge
->link_list
;
537 unsigned long type_match
;
538 struct device
*child
;
539 const unsigned long type_mask
= IORESOURCE_TYPE_MASK
| IORESOURCE_PREFETCH
;
541 for (res
= bridge
->resource_list
; res
; res
= res
->next
) {
545 if (!(res
->flags
& IORESOURCE_BRIDGE
))
548 type_match
= res
->flags
& type_mask
;
550 setup_resource_ranges(bridge
, res
, type_match
, &ranges
);
551 allocate_child_resources(bus
, &ranges
, type_mask
, type_match
);
552 cleanup_resource_ranges(bridge
, &ranges
, res
);
555 for (child
= bus
->children
; child
; child
= child
->sibling
) {
556 if (!dev_has_children(child
))
559 allocate_bridge_resources(child
);
563 static const struct resource
*find_domain_resource(const struct device
*domain
,
566 const struct resource
*res
;
568 for (res
= domain
->resource_list
; res
; res
= res
->next
) {
569 if (res
->flags
& IORESOURCE_FIXED
)
572 if ((res
->flags
& IORESOURCE_TYPE_MASK
) == type
)
580 * Pass 2 of resource allocator begins at the domain level. Every domain
581 * has two types of resources - io and mem. For each of these resources,
582 * this function creates a list of memory ranges that can be used for
583 * downstream resource allocation. This list is constrained to remove
584 * any fixed resources in the domain sub-tree of the given resource
585 * type. It then uses the memory ranges to apply best fit on the
586 * resource requirements of the downstream devices.
588 * Once resources are allocated to all downstream devices of the domain,
589 * it walks down each downstream bridge to continue the same process
590 * until resources are allocated to all devices under the domain.
592 static void allocate_domain_resources(const struct device
*domain
)
594 struct memranges ranges
;
595 struct device
*child
;
596 const struct resource
*res
;
598 /* Resource type I/O */
599 res
= find_domain_resource(domain
, IORESOURCE_IO
);
601 setup_resource_ranges(domain
, res
, IORESOURCE_IO
, &ranges
);
602 allocate_child_resources(domain
->link_list
, &ranges
, IORESOURCE_TYPE_MASK
,
604 cleanup_resource_ranges(domain
, &ranges
, res
);
609 * Domain does not distinguish between mem and prefmem resources. Thus,
610 * the resource allocation at domain level considers mem and prefmem
611 * together when finding the best fit based on the biggest resource
614 * However, resource requests for allocation above 4G boundary need to
615 * be handled separately if the domain resource window crosses this
616 * boundary. There is a single window for resource of type
617 * IORESOURCE_MEM. When creating memranges, this resource is split into
618 * two separate ranges -- one for the window below 4G boundary and other
619 * for the window above 4G boundary (with IORESOURCE_ABOVE_4G flag set).
620 * Thus, when allocating child resources, requests for below and above
621 * the 4G boundary are handled separately by setting the type_mask and
622 * type_match to allocate_child_resources() accordingly.
624 res
= find_domain_resource(domain
, IORESOURCE_MEM
);
626 setup_resource_ranges(domain
, res
, IORESOURCE_MEM
, &ranges
);
627 allocate_child_resources(domain
->link_list
, &ranges
,
628 IORESOURCE_TYPE_MASK
| IORESOURCE_ABOVE_4G
,
630 allocate_child_resources(domain
->link_list
, &ranges
,
631 IORESOURCE_TYPE_MASK
| IORESOURCE_ABOVE_4G
,
632 IORESOURCE_MEM
| IORESOURCE_ABOVE_4G
);
633 cleanup_resource_ranges(domain
, &ranges
, res
);
636 for (child
= domain
->link_list
->children
; child
; child
= child
->sibling
) {
637 if (!dev_has_children(child
))
640 /* Continue allocation for all downstream bridges. */
641 allocate_bridge_resources(child
);
646 * This function forms the guts of the resource allocator. It walks
647 * through the entire device tree for each domain two times.
649 * Every domain has a fixed set of ranges. These ranges cannot be
650 * relaxed based on the requirements of the downstream devices. They
651 * represent the available windows from which resources can be allocated
652 * to the different devices under the domain.
654 * In order to identify the requirements of downstream devices, resource
655 * allocator walks in a DFS fashion. It gathers the requirements from
656 * leaf devices and propagates those back up to their upstream bridges
657 * until the requirements for all the downstream devices of the domain
658 * are gathered. This is referred to as pass 1 of the resource allocator.
660 * Once the requirements for all the devices under the domain are
661 * gathered, the resource allocator walks a second time to allocate
662 * resources to downstream devices as per the requirements. It always
663 * picks the biggest resource request as per the type (i/o and mem) to
664 * allocate space from its fixed window to the immediate downstream
665 * device of the domain. In order to accomplish best fit for the
666 * resources, a list of ranges is maintained by each resource type (i/o
667 * and mem). At the domain level we don't differentiate between mem and
668 * prefmem. Since they are allocated space from the same window, the
669 * resource allocator at the domain level ensures that the biggest
670 * requirement is selected independent of the prefetch type. Once the
671 * resource allocation for all immediate downstream devices is complete
672 * at the domain level, the resource allocator walks down the subtree
673 * for each downstream bridge to continue the allocation process at the
674 * bridge level. Since bridges have separate windows for i/o, mem and
675 * prefmem, best fit algorithm at bridge level looks for the biggest
676 * requirement considering prefmem resources separately from non-prefmem
677 * resources. This continues until resource allocation is performed for
678 * all downstream bridges in the domain sub-tree. This is referred to as
679 * pass 2 of the resource allocator.
681 * Some rules that are followed by the resource allocator:
682 * - Allocate resource locations for every device as long as
683 * the requirements can be satisfied.
684 * - Don't overlap with resources in fixed locations.
685 * - Don't overlap and follow the rules of bridges -- downstream
686 * devices of bridges should use parts of the address space
687 * allocated to the bridge.
689 void allocate_resources(const struct device
*root
)
691 const struct device
*child
;
693 if ((root
== NULL
) || (root
->link_list
== NULL
))
696 for (child
= root
->link_list
->children
; child
; child
= child
->sibling
) {
698 if (child
->path
.type
!= DEVICE_PATH_DOMAIN
)
701 post_log_path(child
);
703 /* Pass 1 - Gather requirements. */
704 printk(BIOS_INFO
, "=== Resource allocator: %s - Pass 1 (gathering requirements) ===\n",
706 compute_domain_resources(child
);
708 /* Pass 2 - Allocate resources as per gathered requirements. */
709 printk(BIOS_INFO
, "=== Resource allocator: %s - Pass 2 (allocating resources) ===\n",
711 allocate_domain_resources(child
);
713 printk(BIOS_INFO
, "=== Resource allocator: %s - resource allocation complete ===\n",