1 /* SPDX-License-Identifier: GPL-2.0-or-later */
4 * mtrr.c: setting MTRR to decent values for cache initialization on P6
5 * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel
7 * Reference: Intel Architecture Software Developer's Manual, Volume 3: System
12 #include <bootstate.h>
13 #include <commonlib/helpers.h>
14 #include <console/console.h>
15 #include <cpu/amd/mtrr.h>
17 #include <cpu/x86/cache.h>
18 #include <cpu/x86/lapic.h>
19 #include <cpu/x86/msr.h>
20 #include <cpu/x86/mtrr.h>
21 #include <device/device.h>
22 #include <device/pci_ids.h>
27 #if CONFIG(X86_AMD_FIXED_MTRRS)
28 #define MTRR_FIXED_WRBACK_BITS (MTRR_READ_MEM | MTRR_WRITE_MEM)
30 #define MTRR_FIXED_WRBACK_BITS 0
36 * Static storage size for variable MTRRs. It's sized sufficiently large to
37 * handle different types of CPUs. Empirically, 16 variable MTRRs has not
40 #define NUM_MTRR_STATIC_STORAGE 16
42 static int total_mtrrs
;
44 static void detect_var_mtrrs(void)
46 total_mtrrs
= get_var_mtrr_count();
48 if (total_mtrrs
> NUM_MTRR_STATIC_STORAGE
) {
50 "MTRRs detected (%d) > NUM_MTRR_STATIC_STORAGE (%d)\n",
51 total_mtrrs
, NUM_MTRR_STATIC_STORAGE
);
52 total_mtrrs
= NUM_MTRR_STATIC_STORAGE
;
56 void enable_fixed_mtrr(void)
60 msr
= rdmsr(MTRR_DEF_TYPE_MSR
);
61 msr
.lo
|= MTRR_DEF_TYPE_EN
| MTRR_DEF_TYPE_FIX_EN
;
62 wrmsr(MTRR_DEF_TYPE_MSR
, msr
);
65 void fixed_mtrrs_expose_amd_rwdram(void)
69 if (!CONFIG(X86_AMD_FIXED_MTRRS
))
72 syscfg
= rdmsr(SYSCFG_MSR
);
73 syscfg
.lo
|= SYSCFG_MSR_MtrrFixDramModEn
;
74 wrmsr(SYSCFG_MSR
, syscfg
);
77 void fixed_mtrrs_hide_amd_rwdram(void)
81 if (!CONFIG(X86_AMD_FIXED_MTRRS
))
84 syscfg
= rdmsr(SYSCFG_MSR
);
85 syscfg
.lo
&= ~SYSCFG_MSR_MtrrFixDramModEn
;
86 wrmsr(SYSCFG_MSR
, syscfg
);
89 static void enable_var_mtrr(unsigned char deftype
)
93 msr
= rdmsr(MTRR_DEF_TYPE_MSR
);
95 msr
.lo
|= MTRR_DEF_TYPE_EN
| deftype
;
96 wrmsr(MTRR_DEF_TYPE_MSR
, msr
);
99 #define MTRR_VERBOSE_LEVEL BIOS_NEVER
101 /* MTRRs are at a 4KiB granularity. */
102 #define RANGE_SHIFT 12
103 #define ADDR_SHIFT_TO_RANGE_SHIFT(x) \
104 (((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT)
105 #define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT)
106 #define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT)
108 /* Helpful constants. */
109 #define RANGE_1MB PHYS_TO_RANGE_ADDR(1ULL << 20)
110 #define RANGE_4GB (1ULL << (ADDR_SHIFT_TO_RANGE_SHIFT(32)))
112 #define MTRR_ALGO_SHIFT (8)
113 #define MTRR_TAG_MASK ((1 << MTRR_ALGO_SHIFT) - 1)
115 static inline uint64_t range_entry_base_mtrr_addr(struct range_entry
*r
)
117 return PHYS_TO_RANGE_ADDR(range_entry_base(r
));
120 static inline uint64_t range_entry_end_mtrr_addr(struct range_entry
*r
)
122 return PHYS_TO_RANGE_ADDR(range_entry_end(r
));
125 static inline int range_entry_mtrr_type(struct range_entry
*r
)
127 return range_entry_tag(r
) & MTRR_TAG_MASK
;
130 static int filter_vga_wrcomb(struct device
*dev
, struct resource
*res
)
132 /* Only handle PCI devices. */
133 if (dev
->path
.type
!= DEVICE_PATH_PCI
)
136 /* Only handle VGA class devices. */
137 if (((dev
->class >> 8) != PCI_CLASS_DISPLAY_VGA
))
140 /* Add resource as write-combining in the address space. */
144 static void print_physical_address_space(const struct memranges
*addr_space
,
145 const char *identifier
)
147 const struct range_entry
*r
;
150 printk(BIOS_DEBUG
, "MTRR: %s Physical address space:\n",
153 printk(BIOS_DEBUG
, "MTRR: Physical address space:\n");
155 memranges_each_entry(r
, addr_space
)
157 "0x%016llx - 0x%016llx size 0x%08llx type %ld\n",
158 range_entry_base(r
), range_entry_end(r
) - 1,
159 range_entry_size(r
), range_entry_tag(r
));
162 static struct memranges
*get_physical_address_space(void)
164 static struct memranges
*addr_space
;
165 static struct memranges addr_space_storage
;
167 /* In order to handle some chipsets not being able to pre-determine
168 * uncacheable ranges, such as graphics memory, at resource insertion
169 * time remove uncacheable regions from the cacheable ones. */
170 if (addr_space
== NULL
) {
174 addr_space
= &addr_space_storage
;
176 mask
= IORESOURCE_CACHEABLE
;
177 /* Collect cacheable and uncacheable address ranges. The
178 * uncacheable regions take precedence over the cacheable
180 memranges_init(addr_space
, mask
, mask
, MTRR_TYPE_WRBACK
);
181 memranges_add_resources(addr_space
, mask
, 0,
182 MTRR_TYPE_UNCACHEABLE
);
184 /* Handle any write combining resources. Only prefetchable
185 * resources are appropriate for this MTRR type. */
186 match
= IORESOURCE_PREFETCH
;
188 memranges_add_resources_filter(addr_space
, mask
, match
,
189 MTRR_TYPE_WRCOMB
, filter_vga_wrcomb
);
191 /* The address space below 4GiB is special. It needs to be
192 * covered entirely by range entries so that MTRR calculations
193 * can be properly done for the full 32-bit address space.
194 * Therefore, ensure holes are filled up to 4GiB as
196 memranges_fill_holes_up_to(addr_space
,
197 RANGE_TO_PHYS_ADDR(RANGE_4GB
),
198 MTRR_TYPE_UNCACHEABLE
);
200 print_physical_address_space(addr_space
, NULL
);
206 /* Fixed MTRR descriptor. This structure defines the step size and begin
207 * and end (exclusive) address covered by a set of fixed MTRR MSRs.
208 * It also describes the offset in byte intervals to store the calculated MTRR
209 * type in an array. */
210 struct fixed_mtrr_desc
{
218 /* Shared MTRR calculations. Can be reused by APs. */
219 static uint8_t fixed_mtrr_types
[NUM_FIXED_RANGES
];
221 /* Fixed MTRR descriptors. */
222 static const struct fixed_mtrr_desc fixed_mtrr_desc
[] = {
223 { PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000),
224 PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRR_FIX_64K_00000
},
225 { PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000),
226 PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRR_FIX_16K_80000
},
227 { PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000),
228 PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRR_FIX_4K_C0000
},
231 static void calc_fixed_mtrrs(void)
233 static int fixed_mtrr_types_initialized
;
234 struct memranges
*phys_addr_space
;
235 struct range_entry
*r
;
236 const struct fixed_mtrr_desc
*desc
;
237 const struct fixed_mtrr_desc
*last_desc
;
242 if (fixed_mtrr_types_initialized
)
245 phys_addr_space
= get_physical_address_space();
247 /* Set all fixed ranges to uncacheable first. */
248 memset(&fixed_mtrr_types
[0], MTRR_TYPE_UNCACHEABLE
, NUM_FIXED_RANGES
);
250 desc
= &fixed_mtrr_desc
[0];
251 last_desc
= &fixed_mtrr_desc
[ARRAY_SIZE(fixed_mtrr_desc
) - 1];
253 memranges_each_entry(r
, phys_addr_space
) {
254 begin
= range_entry_base_mtrr_addr(r
);
255 end
= range_entry_end_mtrr_addr(r
);
257 if (begin
>= last_desc
->end
)
260 if (end
> last_desc
->end
)
261 end
= last_desc
->end
;
263 /* Get to the correct fixed mtrr descriptor. */
264 while (begin
>= desc
->end
)
267 type_index
= desc
->range_index
;
268 type_index
+= (begin
- desc
->begin
) / desc
->step
;
270 while (begin
!= end
) {
273 type
= range_entry_tag(r
);
274 printk(MTRR_VERBOSE_LEVEL
,
275 "MTRR addr 0x%x-0x%x set to %d type @ %d\n",
276 begin
, begin
+ desc
->step
- 1, type
, type_index
);
277 if (type
== MTRR_TYPE_WRBACK
)
278 type
|= MTRR_FIXED_WRBACK_BITS
;
279 fixed_mtrr_types
[type_index
] = type
;
282 if (begin
== desc
->end
)
286 fixed_mtrr_types_initialized
= 1;
289 static void commit_fixed_mtrrs(void)
295 const unsigned int lapic_id
= lapicid();
296 /* 8 ranges per msr. */
297 msr_t fixed_msrs
[NUM_FIXED_MTRRS
];
298 unsigned long msr_index
[NUM_FIXED_MTRRS
];
300 fixed_mtrrs_expose_amd_rwdram();
302 memset(&fixed_msrs
, 0, sizeof(fixed_msrs
));
306 for (i
= 0; i
< ARRAY_SIZE(fixed_mtrr_desc
); i
++) {
307 const struct fixed_mtrr_desc
*desc
;
310 desc
= &fixed_mtrr_desc
[i
];
311 num_ranges
= (desc
->end
- desc
->begin
) / desc
->step
;
312 for (j
= 0; j
< num_ranges
; j
+= RANGES_PER_FIXED_MTRR
) {
313 msr_index
[msr_num
] = desc
->msr_index_base
+
314 (j
/ RANGES_PER_FIXED_MTRR
);
315 fixed_msrs
[msr_num
].lo
|=
316 fixed_mtrr_types
[type_index
++] << 0;
317 fixed_msrs
[msr_num
].lo
|=
318 fixed_mtrr_types
[type_index
++] << 8;
319 fixed_msrs
[msr_num
].lo
|=
320 fixed_mtrr_types
[type_index
++] << 16;
321 fixed_msrs
[msr_num
].lo
|=
322 fixed_mtrr_types
[type_index
++] << 24;
323 fixed_msrs
[msr_num
].hi
|=
324 fixed_mtrr_types
[type_index
++] << 0;
325 fixed_msrs
[msr_num
].hi
|=
326 fixed_mtrr_types
[type_index
++] << 8;
327 fixed_msrs
[msr_num
].hi
|=
328 fixed_mtrr_types
[type_index
++] << 16;
329 fixed_msrs
[msr_num
].hi
|=
330 fixed_mtrr_types
[type_index
++] << 24;
335 /* Ensure that both arrays were fully initialized */
336 ASSERT(msr_num
== NUM_FIXED_MTRRS
)
338 for (i
= 0; i
< ARRAY_SIZE(fixed_msrs
); i
++)
339 printk(BIOS_DEBUG
, "apic_id 0x%x: MTRR: Fixed MSR 0x%lx 0x%08x%08x\n",
340 lapic_id
, msr_index
[i
], fixed_msrs
[i
].hi
, fixed_msrs
[i
].lo
);
343 for (i
= 0; i
< ARRAY_SIZE(fixed_msrs
); i
++)
344 wrmsr(msr_index
[i
], fixed_msrs
[i
]);
346 fixed_mtrrs_hide_amd_rwdram();
349 static void x86_setup_fixed_mtrrs_no_enable(void)
352 commit_fixed_mtrrs();
355 static void x86_setup_fixed_mtrrs(void)
357 x86_setup_fixed_mtrrs_no_enable();
359 printk(BIOS_SPEW
, "apic_id 0x%x call enable_fixed_mtrr()\n", lapicid());
363 struct var_mtrr_regs
{
368 struct var_mtrr_solution
{
369 int mtrr_default_type
;
371 struct var_mtrr_regs regs
[NUM_MTRR_STATIC_STORAGE
];
374 /* Global storage for variable MTRR solution. */
375 static struct var_mtrr_solution mtrr_global_solution
;
377 struct var_mtrr_state
{
378 struct memranges
*addr_space
;
384 struct var_mtrr_regs
*regs
;
387 static void clear_var_mtrr(int index
)
389 msr_t msr
= { .lo
= 0, .hi
= 0 };
391 wrmsr(MTRR_PHYS_BASE(index
), msr
);
392 wrmsr(MTRR_PHYS_MASK(index
), msr
);
395 static int get_os_reserved_mtrrs(void)
397 return CONFIG(RESERVE_MTRRS_FOR_OS
) ? 2 : 0;
400 static void prep_var_mtrr(struct var_mtrr_state
*var_state
,
401 uint64_t base
, uint64_t size
, int mtrr_type
)
403 struct var_mtrr_regs
*regs
;
408 if (var_state
->mtrr_index
>= total_mtrrs
) {
409 printk(BIOS_ERR
, "Not enough MTRRs available! MTRR index is %d with %d MTRRs in total.\n",
410 var_state
->mtrr_index
, total_mtrrs
);
415 * If desired, 2 variable MTRRs are attempted to be saved for the OS to
416 * use. However, it's more important to try to map the full address
419 if (var_state
->mtrr_index
>= total_mtrrs
- get_os_reserved_mtrrs())
420 printk(BIOS_WARNING
, "Taking a reserved OS MTRR.\n");
425 rbase
= RANGE_TO_PHYS_ADDR(rbase
);
426 rsize
= RANGE_TO_PHYS_ADDR(rsize
);
429 mask
= (1ULL << var_state
->address_bits
) - 1;
430 rsize
= rsize
& mask
;
432 printk(BIOS_DEBUG
, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n",
433 var_state
->mtrr_index
, rbase
, rsize
, mtrr_type
);
435 regs
= &var_state
->regs
[var_state
->mtrr_index
];
437 regs
->base
.lo
= rbase
;
438 regs
->base
.lo
|= mtrr_type
;
439 regs
->base
.hi
= rbase
>> 32;
441 regs
->mask
.lo
= rsize
;
442 regs
->mask
.lo
|= MTRR_PHYS_MASK_VALID
;
443 regs
->mask
.hi
= rsize
>> 32;
447 * fls64: find least significant bit set in a 64-bit word
448 * As samples, fls64(0x0) = 64; fls64(0x4400) = 10;
449 * fls64(0x40400000000) = 34.
451 static uint32_t fls64(uint64_t x
)
453 uint32_t lo
= (uint32_t)x
;
456 uint32_t hi
= x
>> 32;
461 * fms64: find most significant bit set in a 64-bit word
462 * As samples, fms64(0x0) = 0; fms64(0x4400) = 14;
463 * fms64(0x40400000000) = 42.
465 static uint32_t fms64(uint64_t x
)
467 uint32_t hi
= (uint32_t)(x
>> 32);
469 return fms((uint32_t)x
);
473 static void calc_var_mtrr_range(struct var_mtrr_state
*var_state
,
474 uint64_t base
, uint64_t size
, int mtrr_type
)
481 addr_lsb
= fls64(base
);
482 size_msb
= fms64(size
);
484 /* All MTRR entries need to have their base aligned to the mask
485 * size. The maximum size is calculated by a function of the
486 * min base bit set and maximum size bit set. */
487 if (addr_lsb
> size_msb
)
488 mtrr_size
= 1ULL << size_msb
;
490 mtrr_size
= 1ULL << addr_lsb
;
492 if (var_state
->prepare_msrs
)
493 prep_var_mtrr(var_state
, base
, mtrr_size
, mtrr_type
);
497 var_state
->mtrr_index
++;
501 static uint64_t optimize_var_mtrr_hole(const uint64_t base
,
503 const uint64_t limit
,
504 const int carve_hole
)
507 * With default type UC, we can potentially optimize a WB
508 * range with unaligned upper end, by aligning it up and
509 * carving the added "hole" out again.
511 * To optimize the upper end of the hole, we will test
512 * how many MTRRs calc_var_mtrr_range() will spend for any
513 * alignment of the hole's upper end.
515 * We take four parameters, the lower end of the WB range
516 * `base`, upper end of the WB range as start of the `hole`,
517 * a `limit` how far we may align the upper end of the hole
518 * up and a flag `carve_hole` whether we should count MTRRs
519 * for carving the hole out. We return the optimal upper end
520 * for the hole (which may be the same as the end of the WB
521 * range in case we don't gain anything by aligning up).
524 const int dont_care
= 0;
525 struct var_mtrr_state var_state
= { 0, };
527 unsigned int align
, best_count
;
528 uint32_t best_end
= hole
;
530 /* calculate MTRR count for the WB range alone (w/o a hole) */
531 calc_var_mtrr_range(&var_state
, base
, hole
- base
, dont_care
);
532 best_count
= var_state
.mtrr_index
;
533 var_state
.mtrr_index
= 0;
535 for (align
= fls(hole
) + 1; align
<= fms(hole
); ++align
) {
536 const uint64_t hole_end
= ALIGN_UP((uint64_t)hole
, 1 << align
);
537 if (hole_end
> limit
)
540 /* calculate MTRR count for this alignment */
542 &var_state
, base
, hole_end
- base
, dont_care
);
545 &var_state
, hole
, hole_end
- hole
, dont_care
);
547 if (var_state
.mtrr_index
< best_count
) {
548 best_count
= var_state
.mtrr_index
;
551 var_state
.mtrr_index
= 0;
557 static void calc_var_mtrrs_with_hole(struct var_mtrr_state
*var_state
,
558 struct range_entry
*r
)
560 uint64_t a1
, a2
, b1
, b2
;
561 int mtrr_type
, carve_hole
;
564 * Determine MTRRs based on the following algorithm for the given entry:
565 * +------------------+ b2 = ALIGN_UP(end)
566 * | 0 or more bytes | <-- hole is carved out between b1 and b2
567 * +------------------+ a2 = b1 = original end
569 * +------------------+ a1 = begin
571 * Thus, there are up to 2 sub-ranges to configure variable MTRRs for.
573 mtrr_type
= range_entry_mtrr_type(r
);
575 a1
= range_entry_base_mtrr_addr(r
);
576 a2
= range_entry_end_mtrr_addr(r
);
578 /* The end address is within the first 1MiB. The fixed MTRRs take
579 * precedence over the variable ones. Therefore this range
584 /* Again, the fixed MTRRs take precedence so the beginning
585 * of the range can be set to 0 if it starts at or below 1MiB. */
589 /* If the range starts above 4GiB the processing is done. */
590 if (!var_state
->above4gb
&& a1
>= RANGE_4GB
)
593 /* Clip the upper address to 4GiB if addresses above 4GiB
594 * are not being processed. */
595 if (!var_state
->above4gb
&& a2
> RANGE_4GB
)
602 /* We only consider WB type ranges for hole-carving. */
603 if (mtrr_type
== MTRR_TYPE_WRBACK
) {
604 struct range_entry
*next
;
607 * Depending on the type of the next range, there are three
608 * different situations to handle:
610 * 1. WB range is last in address space:
611 * Aligning up, up to the next power of 2, may gain us
614 * 2. The next range is of type UC:
615 * We may align up, up to the _end_ of the next range. If
616 * there is a gap between the current and the next range,
617 * it would have been covered by the default type UC anyway.
619 * 3. The next range is not of type UC:
620 * We may align up, up to the _base_ of the next range. This
621 * may either be the end of the current range (if the next
622 * range follows immediately) or the end of the gap between
625 next
= memranges_next_entry(var_state
->addr_space
, r
);
627 b2_limit
= ALIGN_UP((uint64_t)b1
, 1 << fms(b1
));
628 /* If it's the last range above 4GiB, we won't carve
629 the hole out. If an OS wanted to move MMIO there,
630 it would have to override the MTRR setting using
631 PAT just like it would with WB as default type. */
632 carve_hole
= a1
< RANGE_4GB
;
633 } else if (range_entry_mtrr_type(next
)
634 == MTRR_TYPE_UNCACHEABLE
) {
635 b2_limit
= range_entry_end_mtrr_addr(next
);
638 b2_limit
= range_entry_base_mtrr_addr(next
);
641 b2
= optimize_var_mtrr_hole(a1
, b1
, b2_limit
, carve_hole
);
644 calc_var_mtrr_range(var_state
, a1
, b2
- a1
, mtrr_type
);
645 if (carve_hole
&& b2
!= b1
) {
646 calc_var_mtrr_range(var_state
, b1
, b2
- b1
,
647 MTRR_TYPE_UNCACHEABLE
);
651 static void __calc_var_mtrrs(struct memranges
*addr_space
,
652 int above4gb
, int address_bits
,
653 int *num_def_wb_mtrrs
, int *num_def_uc_mtrrs
)
655 int wb_deftype_count
;
656 int uc_deftype_count
;
657 struct range_entry
*r
;
658 struct var_mtrr_state var_state
;
660 /* The default MTRR cacheability type is determined by calculating
661 * the number of MTRRs required for each MTRR type as if it was the
663 var_state
.addr_space
= addr_space
;
664 var_state
.above4gb
= above4gb
;
665 var_state
.address_bits
= address_bits
;
666 var_state
.prepare_msrs
= 0;
668 wb_deftype_count
= 0;
669 uc_deftype_count
= 0;
672 * For each range do 2 calculations:
673 * 1. UC as default type with possible holes at top of range.
675 * The lowest count is then used as default after totaling all
676 * MTRRs. UC takes precedence in the MTRR architecture. There-
677 * fore, only holes can be used when the type of the region is
678 * MTRR_TYPE_WRBACK with MTRR_TYPE_UNCACHEABLE as the default
681 memranges_each_entry(r
, var_state
.addr_space
) {
684 mtrr_type
= range_entry_mtrr_type(r
);
686 if (mtrr_type
!= MTRR_TYPE_UNCACHEABLE
) {
687 var_state
.mtrr_index
= 0;
688 var_state
.def_mtrr_type
= MTRR_TYPE_UNCACHEABLE
;
689 calc_var_mtrrs_with_hole(&var_state
, r
);
690 uc_deftype_count
+= var_state
.mtrr_index
;
693 if (mtrr_type
!= MTRR_TYPE_WRBACK
) {
694 var_state
.mtrr_index
= 0;
695 var_state
.def_mtrr_type
= MTRR_TYPE_WRBACK
;
696 calc_var_mtrrs_with_hole(&var_state
, r
);
697 wb_deftype_count
+= var_state
.mtrr_index
;
701 *num_def_wb_mtrrs
= wb_deftype_count
;
702 *num_def_uc_mtrrs
= uc_deftype_count
;
705 static int calc_var_mtrrs(struct memranges
*addr_space
,
706 int above4gb
, int address_bits
)
708 int wb_deftype_count
= 0;
709 int uc_deftype_count
= 0;
711 __calc_var_mtrrs(addr_space
, above4gb
, address_bits
, &wb_deftype_count
,
714 const int bios_mtrrs
= total_mtrrs
- get_os_reserved_mtrrs();
715 if (wb_deftype_count
> bios_mtrrs
&& uc_deftype_count
> bios_mtrrs
) {
716 printk(BIOS_DEBUG
, "MTRR: Removing WRCOMB type. "
717 "WB/UC MTRR counts: %d/%d > %d.\n",
718 wb_deftype_count
, uc_deftype_count
, bios_mtrrs
);
719 memranges_update_tag(addr_space
, MTRR_TYPE_WRCOMB
,
720 MTRR_TYPE_UNCACHEABLE
);
721 __calc_var_mtrrs(addr_space
, above4gb
, address_bits
,
722 &wb_deftype_count
, &uc_deftype_count
);
725 printk(BIOS_DEBUG
, "MTRR: default type WB/UC MTRR counts: %d/%d.\n",
726 wb_deftype_count
, uc_deftype_count
);
728 if (wb_deftype_count
< uc_deftype_count
) {
729 printk(BIOS_DEBUG
, "MTRR: WB selected as default type.\n");
730 return MTRR_TYPE_WRBACK
;
732 printk(BIOS_DEBUG
, "MTRR: UC selected as default type.\n");
733 return MTRR_TYPE_UNCACHEABLE
;
736 static void prepare_var_mtrrs(struct memranges
*addr_space
, int def_type
,
737 int above4gb
, int address_bits
,
738 struct var_mtrr_solution
*sol
)
740 struct range_entry
*r
;
741 struct var_mtrr_state var_state
;
743 var_state
.addr_space
= addr_space
;
744 var_state
.above4gb
= above4gb
;
745 var_state
.address_bits
= address_bits
;
746 /* Prepare the MSRs. */
747 var_state
.prepare_msrs
= 1;
748 var_state
.mtrr_index
= 0;
749 var_state
.def_mtrr_type
= def_type
;
750 var_state
.regs
= &sol
->regs
[0];
752 memranges_each_entry(r
, var_state
.addr_space
) {
753 if (range_entry_mtrr_type(r
) == def_type
)
755 calc_var_mtrrs_with_hole(&var_state
, r
);
758 /* Update the solution. */
759 sol
->num_used
= var_state
.mtrr_index
;
762 static int commit_var_mtrrs(const struct var_mtrr_solution
*sol
)
766 if (sol
->num_used
> total_mtrrs
) {
767 printk(BIOS_WARNING
, "Not enough MTRRs: %d vs %d\n",
768 sol
->num_used
, total_mtrrs
);
772 /* Write out the variable MTRRs. */
774 for (i
= 0; i
< sol
->num_used
; i
++) {
775 wrmsr(MTRR_PHYS_BASE(i
), sol
->regs
[i
].base
);
776 wrmsr(MTRR_PHYS_MASK(i
), sol
->regs
[i
].mask
);
778 /* Clear the ones that are unused. */
779 for (; i
< total_mtrrs
; i
++)
781 enable_var_mtrr(sol
->mtrr_default_type
);
787 void x86_setup_var_mtrrs(unsigned int address_bits
, unsigned int above4gb
)
789 static struct var_mtrr_solution
*sol
= NULL
;
790 struct memranges
*addr_space
;
792 addr_space
= get_physical_address_space();
795 sol
= &mtrr_global_solution
;
796 sol
->mtrr_default_type
=
797 calc_var_mtrrs(addr_space
, !!above4gb
, address_bits
);
798 prepare_var_mtrrs(addr_space
, sol
->mtrr_default_type
,
799 !!above4gb
, address_bits
, sol
);
802 commit_var_mtrrs(sol
);
805 static void _x86_setup_mtrrs(unsigned int above4gb
)
811 x86_setup_fixed_mtrrs();
812 address_size
= cpu_phys_address_size();
813 printk(BIOS_DEBUG
, "apic_id 0x%x setup mtrr for CPU physical address size: %d bits\n",
814 lapicid(), address_size
);
815 x86_setup_var_mtrrs(address_size
, above4gb
);
818 void x86_setup_mtrrs(void)
820 /* Without detect, assume the minimum */
821 total_mtrrs
= MIN_MTRRS
;
822 /* Always handle addresses above 4GiB. */
826 void x86_setup_mtrrs_with_detect(void)
829 /* Always handle addresses above 4GiB. */
833 void x86_setup_mtrrs_with_detect_no_above_4gb(void)
839 void x86_mtrr_check(void)
841 /* Only Pentium Pro and later have MTRR */
843 printk(BIOS_DEBUG
, "\nMTRR check\n");
845 msr
= rdmsr(MTRR_DEF_TYPE_MSR
);
847 printk(BIOS_DEBUG
, "Fixed MTRRs : ");
848 if (msr
.lo
& MTRR_DEF_TYPE_FIX_EN
)
849 printk(BIOS_DEBUG
, "Enabled\n");
851 printk(BIOS_DEBUG
, "Disabled\n");
853 printk(BIOS_DEBUG
, "Variable MTRRs: ");
854 if (msr
.lo
& MTRR_DEF_TYPE_EN
)
855 printk(BIOS_DEBUG
, "Enabled\n");
857 printk(BIOS_DEBUG
, "Disabled\n");
859 printk(BIOS_DEBUG
, "\n");
864 static bool put_back_original_solution
;
866 void mtrr_use_temp_range(uintptr_t begin
, size_t size
, int type
)
868 const struct range_entry
*r
;
869 const struct memranges
*orig
;
870 struct var_mtrr_solution sol
;
871 struct memranges addr_space
;
872 const int above4gb
= 1; /* Cover above 4GiB by default. */
874 static struct temp_range
{
884 for (i
= 0; i
< ARRAY_SIZE(temp_ranges
); i
++) {
885 if (temp_ranges
[i
].size
== 0) {
886 temp_ranges
[i
].begin
= begin
;
887 temp_ranges
[i
].size
= size
;
888 temp_ranges
[i
].type
= type
;
892 if (i
== ARRAY_SIZE(temp_ranges
)) {
893 printk(BIOS_ERR
, "Out of temporary ranges for MTRR use\n");
897 /* Make a copy of the original address space and tweak it with the
899 memranges_init_empty(&addr_space
, NULL
, 0);
900 orig
= get_physical_address_space();
901 memranges_each_entry(r
, orig
) {
902 unsigned long tag
= range_entry_tag(r
);
904 /* Remove any write combining MTRRs from the temporary
905 * solution as it just fragments the address space. */
906 if (tag
== MTRR_TYPE_WRCOMB
)
907 tag
= MTRR_TYPE_UNCACHEABLE
;
909 memranges_insert(&addr_space
, range_entry_base(r
),
910 range_entry_size(r
), tag
);
913 /* Place new range into the address space. */
914 for (i
= 0; i
< ARRAY_SIZE(temp_ranges
); i
++) {
915 if (temp_ranges
[i
].size
!= 0)
916 memranges_insert(&addr_space
, temp_ranges
[i
].begin
,
917 temp_ranges
[i
].size
, temp_ranges
[i
].type
);
920 print_physical_address_space(&addr_space
, "TEMPORARY");
922 /* Calculate a new solution with the updated address space. */
923 address_bits
= cpu_phys_address_size();
924 memset(&sol
, 0, sizeof(sol
));
925 sol
.mtrr_default_type
=
926 calc_var_mtrrs(&addr_space
, above4gb
, address_bits
);
927 prepare_var_mtrrs(&addr_space
, sol
.mtrr_default_type
,
928 above4gb
, address_bits
, &sol
);
930 if (commit_var_mtrrs(&sol
) < 0)
931 printk(BIOS_WARNING
, "Unable to insert temporary MTRR range: 0x%016llx - 0x%016llx size 0x%08llx type %d\n",
932 (long long)begin
, (long long)begin
+ size
- 1,
933 (long long)size
, type
);
935 put_back_original_solution
= true;
937 memranges_teardown(&addr_space
);
940 static void remove_temp_solution(void *unused
)
942 if (put_back_original_solution
)
943 commit_var_mtrrs(&mtrr_global_solution
);
946 BOOT_STATE_INIT_ENTRY(BS_OS_RESUME
, BS_ON_ENTRY
, remove_temp_solution
, NULL
);
947 BOOT_STATE_INIT_ENTRY(BS_PAYLOAD_BOOT
, BS_ON_ENTRY
, remove_temp_solution
, NULL
);