1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <commonlib/region.h>
4 #include <console/console.h>
6 #include <cpu/x86/mtrr.h>
7 #include <program_loading.h>
9 /* For now this is a good lowest common denominator for the total CPU cache.
10 TODO: fetch the total amount of cache from CPUID leaf2. */
11 #define MAX_CPU_CACHE (256 * KiB)
13 /* This makes the 'worst' case assumption that all cachelines covered by
14 the MTRR, no matter the caching type, are filled and not overlapping. */
15 static uint32_t max_cache_used(void)
17 int i
, total_mtrrs
= get_var_mtrr_count();
18 uint32_t total_cache
= 0;
20 for (i
= 0; i
< total_mtrrs
; i
++) {
21 msr_t mtrr
= rdmsr(MTRR_PHYS_MASK(i
));
22 if (!(mtrr
.lo
& MTRR_PHYS_MASK_VALID
))
24 total_cache
+= ~(mtrr
.lo
& 0xfffff000) + 1;
29 void platform_prog_run(struct prog
*prog
)
31 const uint32_t base
= (uintptr_t)prog_start(prog
);
32 const uint32_t size
= prog_size(prog
);
33 const uint32_t end
= base
+ size
;
34 const uint32_t cache_used
= max_cache_used();
35 /* This will accumulate MTRR's as XIP stages are run.
36 For now this includes bootblock which sets ups its own
37 caching elsewhere, verstage and romstage */
38 int mtrr_num
= get_free_var_mtrr();
40 uint32_t mtrr_size
= 4 * KiB
;
41 struct cpuinfo_x86 cpu_info
;
43 get_fms(&cpu_info
, cpuid_eax(1));
45 * An unidentified combination of speculative reads and branch
46 * predictions inside WRPROT-cacheable memory can cause invalidation
47 * of cachelines and loss of stack on models based on NetBurst
48 * microarchitecture. Therefore disable WRPROT region entirely for
49 * all family F models.
51 if (cpu_info
.x86
== 0xf) {
53 "PROG_RUN: CPU does not support caching ROM\n"
54 "The next stage will run slowly!\n");
60 "PROG_RUN: No MTRR available to cache ROM!\n"
61 "The next stage will run slowly!\n");
65 if (cache_used
+ mtrr_size
> MAX_CPU_CACHE
) {
67 "PROG_RUN: No more cache available for the next stage\n"
68 "The next stage will run slowly!\n");
73 if (ALIGN_DOWN(base
, mtrr_size
) + mtrr_size
>= end
)
75 if (cache_used
+ mtrr_size
* 2 > MAX_CPU_CACHE
)
80 mtrr_base
= ALIGN_DOWN(base
, mtrr_size
);
81 if (mtrr_base
+ mtrr_size
< end
) {
82 printk(BIOS_NOTICE
, "PROG_RUN: Limiting XIP cache to %uKiB!\n",
84 /* Check if we can cover a bigger range by aligning up. */
85 const uint32_t alt_base
= ALIGN_UP(base
, mtrr_size
);
86 const uint32_t lower_coverage
= mtrr_base
+ mtrr_size
- base
;
87 const uint32_t upper_coverage
= MIN(alt_base
+ mtrr_size
, end
) - alt_base
;
88 if (upper_coverage
> lower_coverage
)
93 "PROG_RUN: Setting MTRR to cache XIP stage. base: 0x%08x, size: 0x%08x\n",
94 mtrr_base
, mtrr_size
);
96 set_var_mtrr(mtrr_num
, mtrr_base
, mtrr_size
, MTRR_TYPE_WRPROT
);