1 // SPDX-License-Identifier: GPL-2.0-only
3 * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
4 * because MTRRs can span up to 40 bits (36bits on most modern x86)
7 #include <linux/export.h>
8 #include <linux/init.h>
11 #include <linux/cc_platform.h>
12 #include <asm/processor-flags.h>
13 #include <asm/cacheinfo.h>
14 #include <asm/cpufeature.h>
15 #include <asm/hypervisor.h>
16 #include <asm/mshyperv.h>
17 #include <asm/tlbflush.h>
20 #include <asm/memtype.h>
24 struct fixed_range_block
{
25 int base_msr
; /* start address of an MTRR block */
26 int ranges
; /* number of MTRRs in this block */
29 static struct fixed_range_block fixed_range_blocks
[] = {
30 { MSR_MTRRfix64K_00000
, 1 }, /* one 64k MTRR */
31 { MSR_MTRRfix16K_80000
, 2 }, /* two 16k MTRRs */
32 { MSR_MTRRfix4K_C0000
, 8 }, /* eight 4k MTRRs */
46 static int __init
mtrr_param_setup(char *str
)
52 if (!strcmp(str
, "debug"))
59 early_param("mtrr", mtrr_param_setup
);
62 * CACHE_MAP_MAX is the maximum number of memory ranges in cache_map, where
63 * no 2 adjacent ranges have the same cache mode (those would be merged).
64 * The number is based on the worst case:
65 * - no two adjacent fixed MTRRs share the same cache mode
66 * - one variable MTRR is spanning a huge area with mode WB
67 * - 255 variable MTRRs with mode UC all overlap with the WB MTRR, creating 2
68 * additional ranges each (result like "ababababa...aba" with a = WB, b = UC),
69 * accounting for MTRR_MAX_VAR_RANGES * 2 - 1 range entries
70 * - a TOP_MEM2 area (even with overlapping an UC MTRR can't add 2 range entries
71 * to the possible maximum, as it always starts at 4GB, thus it can't be in
72 * the middle of that MTRR, unless that MTRR starts at 0, which would remove
73 * the initial "a" from the "abababa" pattern above)
74 * The map won't contain ranges with no matching MTRR (those fall back to the
75 * default cache mode).
77 #define CACHE_MAP_MAX (MTRR_NUM_FIXED_RANGES + MTRR_MAX_VAR_RANGES * 2)
79 static struct cache_map init_cache_map
[CACHE_MAP_MAX
] __initdata
;
80 static struct cache_map
*cache_map __refdata
= init_cache_map
;
81 static unsigned int cache_map_size
= CACHE_MAP_MAX
;
82 static unsigned int cache_map_n
;
83 static unsigned int cache_map_fixed
;
85 static unsigned long smp_changes_mask
;
86 static int mtrr_state_set
;
89 struct mtrr_state_type mtrr_state
;
90 EXPORT_SYMBOL_GPL(mtrr_state
);
92 /* Reserved bits in the high portion of the MTRRphysBaseN MSR. */
96 * BIOS is expected to clear MtrrFixDramModEn bit, see for example
97 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
98 * Opteron Processors" (26094 Rev. 3.30 February 2006), section
99 * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
100 * to 1 during BIOS initialization of the fixed MTRRs, then cleared to
103 static inline void k8_check_syscfg_dram_mod_en(void)
107 if (!((boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
) &&
108 (boot_cpu_data
.x86
>= 0x0f)))
111 if (cc_platform_has(CC_ATTR_HOST_SEV_SNP
))
114 rdmsr(MSR_AMD64_SYSCFG
, lo
, hi
);
115 if (lo
& K8_MTRRFIXRANGE_DRAM_MODIFY
) {
116 pr_err(FW_WARN
"MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
117 " not cleared by BIOS, clearing this bit\n",
119 lo
&= ~K8_MTRRFIXRANGE_DRAM_MODIFY
;
120 mtrr_wrmsr(MSR_AMD64_SYSCFG
, lo
, hi
);
124 /* Get the size of contiguous MTRR range */
125 static u64
get_mtrr_size(u64 mask
)
129 mask
|= (u64
)phys_hi_rsvd
<< 32;
135 static u8
get_var_mtrr_state(unsigned int reg
, u64
*start
, u64
*size
)
137 struct mtrr_var_range
*mtrr
= mtrr_state
.var_ranges
+ reg
;
139 if (!(mtrr
->mask_lo
& MTRR_PHYSMASK_V
))
140 return MTRR_TYPE_INVALID
;
142 *start
= (((u64
)mtrr
->base_hi
) << 32) + (mtrr
->base_lo
& PAGE_MASK
);
143 *size
= get_mtrr_size((((u64
)mtrr
->mask_hi
) << 32) +
144 (mtrr
->mask_lo
& PAGE_MASK
));
146 return mtrr
->base_lo
& MTRR_PHYSBASE_TYPE
;
149 static u8
get_effective_type(u8 type1
, u8 type2
)
151 if (type1
== MTRR_TYPE_UNCACHABLE
|| type2
== MTRR_TYPE_UNCACHABLE
)
152 return MTRR_TYPE_UNCACHABLE
;
154 if ((type1
== MTRR_TYPE_WRBACK
&& type2
== MTRR_TYPE_WRTHROUGH
) ||
155 (type1
== MTRR_TYPE_WRTHROUGH
&& type2
== MTRR_TYPE_WRBACK
))
156 return MTRR_TYPE_WRTHROUGH
;
159 return MTRR_TYPE_UNCACHABLE
;
164 static void rm_map_entry_at(int idx
)
167 if (cache_map_n
> idx
) {
168 memmove(cache_map
+ idx
, cache_map
+ idx
+ 1,
169 sizeof(*cache_map
) * (cache_map_n
- idx
));
174 * Add an entry into cache_map at a specific index. Merges adjacent entries if
175 * appropriate. Return the number of merges for correcting the scan index
176 * (this is needed as merging will reduce the number of entries, which will
177 * result in skipping entries in future iterations if the scan index isn't
179 * Note that the corrected index can never go below -1 (resulting in being 0 in
180 * the next scan iteration), as "2" is returned only if the current index is
183 static int add_map_entry_at(u64 start
, u64 end
, u8 type
, int idx
)
185 bool merge_prev
= false, merge_next
= false;
191 struct cache_map
*prev
= cache_map
+ idx
- 1;
193 if (!prev
->fixed
&& start
== prev
->end
&& type
== prev
->type
)
197 if (idx
< cache_map_n
) {
198 struct cache_map
*next
= cache_map
+ idx
;
200 if (!next
->fixed
&& end
== next
->start
&& type
== next
->type
)
204 if (merge_prev
&& merge_next
) {
205 cache_map
[idx
- 1].end
= cache_map
[idx
].end
;
206 rm_map_entry_at(idx
);
210 cache_map
[idx
- 1].end
= end
;
214 cache_map
[idx
].start
= start
;
218 /* Sanity check: the array should NEVER be too small! */
219 if (cache_map_n
== cache_map_size
) {
220 WARN(1, "MTRR cache mode memory map exhausted!\n");
221 cache_map_n
= cache_map_fixed
;
225 if (cache_map_n
> idx
) {
226 memmove(cache_map
+ idx
+ 1, cache_map
+ idx
,
227 sizeof(*cache_map
) * (cache_map_n
- idx
));
230 cache_map
[idx
].start
= start
;
231 cache_map
[idx
].end
= end
;
232 cache_map
[idx
].type
= type
;
233 cache_map
[idx
].fixed
= 0;
239 /* Clear a part of an entry. Return 1 if start of entry is still valid. */
240 static int clr_map_range_at(u64 start
, u64 end
, int idx
)
242 int ret
= start
!= cache_map
[idx
].start
;
245 if (start
== cache_map
[idx
].start
&& end
== cache_map
[idx
].end
) {
246 rm_map_entry_at(idx
);
247 } else if (start
== cache_map
[idx
].start
) {
248 cache_map
[idx
].start
= end
;
249 } else if (end
== cache_map
[idx
].end
) {
250 cache_map
[idx
].end
= start
;
252 tmp
= cache_map
[idx
].end
;
253 cache_map
[idx
].end
= start
;
254 add_map_entry_at(end
, tmp
, cache_map
[idx
].type
, idx
+ 1);
261 * Add MTRR to the map. The current map is scanned and each part of the MTRR
262 * either overlapping with an existing entry or with a hole in the map is
263 * handled separately.
265 static void add_map_entry(u64 start
, u64 end
, u8 type
)
267 u8 new_type
, old_type
;
271 for (i
= 0; i
< cache_map_n
&& start
< end
; i
++) {
272 if (start
>= cache_map
[i
].end
)
275 if (start
< cache_map
[i
].start
) {
276 /* Region start has no overlap. */
277 tmp
= min(end
, cache_map
[i
].start
);
278 i
-= add_map_entry_at(start
, tmp
, type
, i
);
283 new_type
= get_effective_type(type
, cache_map
[i
].type
);
284 old_type
= cache_map
[i
].type
;
286 if (cache_map
[i
].fixed
|| new_type
== old_type
) {
287 /* Cut off start of new entry. */
288 start
= cache_map
[i
].end
;
292 /* Handle only overlapping part of region. */
293 tmp
= min(end
, cache_map
[i
].end
);
294 i
+= clr_map_range_at(start
, tmp
, i
);
295 i
-= add_map_entry_at(start
, tmp
, new_type
, i
);
299 /* Add rest of region after last map entry (rest might be empty). */
300 add_map_entry_at(start
, end
, type
, i
);
303 /* Add variable MTRRs to cache map. */
304 static void map_add_var(void)
311 * Add AMD TOP_MEM2 area. Can't be added in mtrr_build_map(), as it
312 * needs to be added again when rebuilding the map due to potentially
313 * having moved as a result of variable MTRRs for memory below 4GB.
316 add_map_entry(BIT_ULL(32), mtrr_tom2
, MTRR_TYPE_WRBACK
);
317 cache_map
[cache_map_n
- 1].fixed
= 1;
320 for (i
= 0; i
< num_var_ranges
; i
++) {
321 type
= get_var_mtrr_state(i
, &start
, &size
);
322 if (type
!= MTRR_TYPE_INVALID
)
323 add_map_entry(start
, start
+ size
, type
);
328 * Rebuild map by replacing variable entries. Needs to be called when MTRR
329 * registers are being changed after boot, as such changes could include
330 * removals of registers, which are complicated to handle without rebuild of
333 void generic_rebuild_map(void)
335 if (mtrr_if
!= &generic_mtrr_ops
)
338 cache_map_n
= cache_map_fixed
;
343 static unsigned int __init
get_cache_map_size(void)
345 return cache_map_fixed
+ 2 * num_var_ranges
+ (mtrr_tom2
!= 0);
348 /* Build the cache_map containing the cache modes per memory range. */
349 void __init
mtrr_build_map(void)
351 u64 start
, end
, size
;
355 /* Add fixed MTRRs, optimize for adjacent entries with same type. */
356 if (mtrr_state
.enabled
& MTRR_STATE_MTRR_FIXED_ENABLED
) {
358 * Start with 64k size fixed entries, preset 1st one (hence the
359 * loop below is starting with index 1).
362 end
= size
= 0x10000;
363 type
= mtrr_state
.fixed_ranges
[0];
365 for (i
= 1; i
< MTRR_NUM_FIXED_RANGES
; i
++) {
366 /* 8 64k entries, then 16 16k ones, rest 4k. */
367 if (i
== 8 || i
== 24)
370 if (mtrr_state
.fixed_ranges
[i
] != type
) {
371 add_map_entry(start
, end
, type
);
373 type
= mtrr_state
.fixed_ranges
[i
];
377 add_map_entry(start
, end
, type
);
380 /* Mark fixed, they take precedence. */
381 for (i
= 0; i
< cache_map_n
; i
++)
382 cache_map
[i
].fixed
= 1;
383 cache_map_fixed
= cache_map_n
;
387 pr_info("MTRR map: %u entries (%u fixed + %u variable; max %u), built from %u variable MTRRs\n",
388 cache_map_n
, cache_map_fixed
, cache_map_n
- cache_map_fixed
,
389 get_cache_map_size(), num_var_ranges
+ (mtrr_tom2
!= 0));
392 for (i
= 0; i
< cache_map_n
; i
++) {
393 pr_info("%3u: %016llx-%016llx %s\n", i
,
394 cache_map
[i
].start
, cache_map
[i
].end
- 1,
395 mtrr_attrib_to_str(cache_map
[i
].type
));
400 /* Copy the cache_map from __initdata memory to dynamically allocated one. */
401 void __init
mtrr_copy_map(void)
403 unsigned int new_size
= get_cache_map_size();
405 if (!mtrr_state
.enabled
|| !new_size
) {
410 mutex_lock(&mtrr_mutex
);
412 cache_map
= kcalloc(new_size
, sizeof(*cache_map
), GFP_KERNEL
);
414 memmove(cache_map
, init_cache_map
,
415 cache_map_n
* sizeof(*cache_map
));
416 cache_map_size
= new_size
;
418 mtrr_state
.enabled
= 0;
419 pr_err("MTRRs disabled due to allocation failure for lookup map.\n");
422 mutex_unlock(&mtrr_mutex
);
426 * mtrr_overwrite_state - set static MTRR state
428 * Used to set MTRR state via different means (e.g. with data obtained from
430 * Is allowed only for special cases when running virtualized. Must be called
431 * from the x86_init.hyper.init_platform() hook. It can be called only once.
432 * The MTRR state can't be changed afterwards. To ensure that, X86_FEATURE_MTRR
435 * @var: MTRR variable range array to use
436 * @num_var: length of the @var array
437 * @def_type: default caching type
439 void mtrr_overwrite_state(struct mtrr_var_range
*var
, unsigned int num_var
,
444 /* Only allowed to be called once before mtrr_bp_init(). */
445 if (WARN_ON_ONCE(mtrr_state_set
))
448 /* Only allowed when running virtualized. */
449 if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR
))
453 * Only allowed for special virtualization cases:
454 * - when running as Hyper-V, SEV-SNP guest using vTOM
455 * - when running as Xen PV guest
456 * - when running as SEV-SNP or TDX guest to avoid unnecessary
457 * VMM communication/Virtualization exceptions (#VC, #VE)
459 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP
) &&
460 !hv_is_isolation_supported() &&
461 !cpu_feature_enabled(X86_FEATURE_XENPV
) &&
462 !cpu_feature_enabled(X86_FEATURE_TDX_GUEST
))
465 /* Disable MTRR in order to disable MTRR modifications. */
466 setup_clear_cpu_cap(X86_FEATURE_MTRR
);
469 if (num_var
> MTRR_MAX_VAR_RANGES
) {
470 pr_warn("Trying to overwrite MTRR state with %u variable entries\n",
472 num_var
= MTRR_MAX_VAR_RANGES
;
474 for (i
= 0; i
< num_var
; i
++)
475 mtrr_state
.var_ranges
[i
] = var
[i
];
476 num_var_ranges
= num_var
;
479 mtrr_state
.def_type
= def_type
;
480 mtrr_state
.enabled
|= MTRR_STATE_MTRR_ENABLED
;
485 static u8
type_merge(u8 type
, u8 new_type
, u8
*uniform
)
489 if (type
== MTRR_TYPE_INVALID
)
492 effective_type
= get_effective_type(type
, new_type
);
493 if (type
!= effective_type
)
496 return effective_type
;
500 * mtrr_type_lookup - look up memory type in MTRR
502 * @start: Begin of the physical address range
503 * @end: End of the physical address range
504 * @uniform: output argument:
505 * - 1: the returned MTRR type is valid for the whole region
509 * MTRR_TYPE_(type) - The effective MTRR type for the region
510 * MTRR_TYPE_INVALID - MTRR is disabled
512 u8
mtrr_type_lookup(u64 start
, u64 end
, u8
*uniform
)
514 u8 type
= MTRR_TYPE_INVALID
;
517 if (!mtrr_state_set
) {
518 /* Uniformity is unknown. */
520 return MTRR_TYPE_UNCACHABLE
;
525 if (!(mtrr_state
.enabled
& MTRR_STATE_MTRR_ENABLED
))
526 return MTRR_TYPE_UNCACHABLE
;
528 for (i
= 0; i
< cache_map_n
&& start
< end
; i
++) {
529 /* Region after current map entry? -> continue with next one. */
530 if (start
>= cache_map
[i
].end
)
533 /* Start of region not covered by current map entry? */
534 if (start
< cache_map
[i
].start
) {
535 /* At least some part of region has default type. */
536 type
= type_merge(type
, mtrr_state
.def_type
, uniform
);
537 /* End of region not covered, too? -> lookup done. */
538 if (end
<= cache_map
[i
].start
)
542 /* At least part of region covered by map entry. */
543 type
= type_merge(type
, cache_map
[i
].type
, uniform
);
545 start
= cache_map
[i
].end
;
548 /* End of region past last entry in map? -> use default type. */
550 type
= type_merge(type
, mtrr_state
.def_type
, uniform
);
555 /* Get the MSR pair relating to a var range */
557 get_mtrr_var_range(unsigned int index
, struct mtrr_var_range
*vr
)
559 rdmsr(MTRRphysBase_MSR(index
), vr
->base_lo
, vr
->base_hi
);
560 rdmsr(MTRRphysMask_MSR(index
), vr
->mask_lo
, vr
->mask_hi
);
563 /* Fill the MSR pair relating to a var range */
564 void fill_mtrr_var_range(unsigned int index
,
565 u32 base_lo
, u32 base_hi
, u32 mask_lo
, u32 mask_hi
)
567 struct mtrr_var_range
*vr
;
569 vr
= mtrr_state
.var_ranges
;
571 vr
[index
].base_lo
= base_lo
;
572 vr
[index
].base_hi
= base_hi
;
573 vr
[index
].mask_lo
= mask_lo
;
574 vr
[index
].mask_hi
= mask_hi
;
577 static void get_fixed_ranges(mtrr_type
*frs
)
579 unsigned int *p
= (unsigned int *)frs
;
582 k8_check_syscfg_dram_mod_en();
584 rdmsr(MSR_MTRRfix64K_00000
, p
[0], p
[1]);
586 for (i
= 0; i
< 2; i
++)
587 rdmsr(MSR_MTRRfix16K_80000
+ i
, p
[2 + i
* 2], p
[3 + i
* 2]);
588 for (i
= 0; i
< 8; i
++)
589 rdmsr(MSR_MTRRfix4K_C0000
+ i
, p
[6 + i
* 2], p
[7 + i
* 2]);
592 void mtrr_save_fixed_ranges(void *info
)
594 if (boot_cpu_has(X86_FEATURE_MTRR
))
595 get_fixed_ranges(mtrr_state
.fixed_ranges
);
598 static unsigned __initdata last_fixed_start
;
599 static unsigned __initdata last_fixed_end
;
600 static mtrr_type __initdata last_fixed_type
;
602 static void __init
print_fixed_last(void)
607 pr_info(" %05X-%05X %s\n", last_fixed_start
,
608 last_fixed_end
- 1, mtrr_attrib_to_str(last_fixed_type
));
613 static void __init
update_fixed_last(unsigned base
, unsigned end
,
616 last_fixed_start
= base
;
617 last_fixed_end
= end
;
618 last_fixed_type
= type
;
622 print_fixed(unsigned base
, unsigned step
, const mtrr_type
*types
)
626 for (i
= 0; i
< 8; ++i
, ++types
, base
+= step
) {
627 if (last_fixed_end
== 0) {
628 update_fixed_last(base
, base
+ step
, *types
);
631 if (last_fixed_end
== base
&& last_fixed_type
== *types
) {
632 last_fixed_end
= base
+ step
;
635 /* new segments: gap or different type */
637 update_fixed_last(base
, base
+ step
, *types
);
641 static void __init
print_mtrr_state(void)
646 pr_info("MTRR default type: %s\n",
647 mtrr_attrib_to_str(mtrr_state
.def_type
));
648 if (mtrr_state
.have_fixed
) {
649 pr_info("MTRR fixed ranges %sabled:\n",
650 ((mtrr_state
.enabled
& MTRR_STATE_MTRR_ENABLED
) &&
651 (mtrr_state
.enabled
& MTRR_STATE_MTRR_FIXED_ENABLED
)) ?
653 print_fixed(0x00000, 0x10000, mtrr_state
.fixed_ranges
+ 0);
654 for (i
= 0; i
< 2; ++i
)
655 print_fixed(0x80000 + i
* 0x20000, 0x04000,
656 mtrr_state
.fixed_ranges
+ (i
+ 1) * 8);
657 for (i
= 0; i
< 8; ++i
)
658 print_fixed(0xC0000 + i
* 0x08000, 0x01000,
659 mtrr_state
.fixed_ranges
+ (i
+ 3) * 8);
664 pr_info("MTRR variable ranges %sabled:\n",
665 mtrr_state
.enabled
& MTRR_STATE_MTRR_ENABLED
? "en" : "dis");
666 high_width
= (boot_cpu_data
.x86_phys_bits
- (32 - PAGE_SHIFT
) + 3) / 4;
668 for (i
= 0; i
< num_var_ranges
; ++i
) {
669 if (mtrr_state
.var_ranges
[i
].mask_lo
& MTRR_PHYSMASK_V
)
670 pr_info(" %u base %0*X%05X000 mask %0*X%05X000 %s\n",
673 mtrr_state
.var_ranges
[i
].base_hi
,
674 mtrr_state
.var_ranges
[i
].base_lo
>> 12,
676 mtrr_state
.var_ranges
[i
].mask_hi
,
677 mtrr_state
.var_ranges
[i
].mask_lo
>> 12,
678 mtrr_attrib_to_str(mtrr_state
.var_ranges
[i
].base_lo
&
679 MTRR_PHYSBASE_TYPE
));
681 pr_info(" %u disabled\n", i
);
684 pr_info("TOM2: %016llx aka %lldM\n", mtrr_tom2
, mtrr_tom2
>>20);
687 /* Grab all of the MTRR state for this CPU into *state */
688 bool __init
get_mtrr_state(void)
690 struct mtrr_var_range
*vrs
;
694 vrs
= mtrr_state
.var_ranges
;
696 rdmsr(MSR_MTRRcap
, lo
, dummy
);
697 mtrr_state
.have_fixed
= lo
& MTRR_CAP_FIX
;
699 for (i
= 0; i
< num_var_ranges
; i
++)
700 get_mtrr_var_range(i
, &vrs
[i
]);
701 if (mtrr_state
.have_fixed
)
702 get_fixed_ranges(mtrr_state
.fixed_ranges
);
704 rdmsr(MSR_MTRRdefType
, lo
, dummy
);
705 mtrr_state
.def_type
= lo
& MTRR_DEF_TYPE_TYPE
;
706 mtrr_state
.enabled
= (lo
& MTRR_DEF_TYPE_ENABLE
) >> MTRR_STATE_SHIFT
;
708 if (amd_special_default_mtrr()) {
712 rdmsr(MSR_K8_TOP_MEM2
, low
, high
);
716 mtrr_tom2
&= 0xffffff800000ULL
;
724 return !!(mtrr_state
.enabled
& MTRR_STATE_MTRR_ENABLED
);
727 /* Some BIOS's are messed up and don't set all MTRRs the same! */
728 void __init
mtrr_state_warn(void)
730 unsigned long mask
= smp_changes_mask
;
734 if (mask
& MTRR_CHANGE_MASK_FIXED
)
735 pr_warn("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
736 if (mask
& MTRR_CHANGE_MASK_VARIABLE
)
737 pr_warn("mtrr: your CPUs had inconsistent variable MTRR settings\n");
738 if (mask
& MTRR_CHANGE_MASK_DEFTYPE
)
739 pr_warn("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
741 pr_info("mtrr: probably your BIOS does not setup all CPUs.\n");
742 pr_info("mtrr: corrected configuration.\n");
746 * Doesn't attempt to pass an error out to MTRR users
747 * because it's quite complicated in some cases and probably not
748 * worth it because the best error handling is to ignore it.
750 void mtrr_wrmsr(unsigned msr
, unsigned a
, unsigned b
)
752 if (wrmsr_safe(msr
, a
, b
) < 0) {
753 pr_err("MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
754 smp_processor_id(), msr
, a
, b
);
759 * set_fixed_range - checks & updates a fixed-range MTRR if it
760 * differs from the value it should have
761 * @msr: MSR address of the MTTR which should be checked and updated
762 * @changed: pointer which indicates whether the MTRR needed to be changed
763 * @msrwords: pointer to the MSR values which the MSR should have
765 static void set_fixed_range(int msr
, bool *changed
, unsigned int *msrwords
)
771 if (lo
!= msrwords
[0] || hi
!= msrwords
[1]) {
772 mtrr_wrmsr(msr
, msrwords
[0], msrwords
[1]);
778 * generic_get_free_region - Get a free MTRR.
779 * @base: The starting (base) address of the region.
780 * @size: The size (in bytes) of the region.
781 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
783 * Returns: The index of the region on success, else negative on error.
786 generic_get_free_region(unsigned long base
, unsigned long size
, int replace_reg
)
788 unsigned long lbase
, lsize
;
792 max
= num_var_ranges
;
793 if (replace_reg
>= 0 && replace_reg
< max
)
796 for (i
= 0; i
< max
; ++i
) {
797 mtrr_if
->get(i
, &lbase
, &lsize
, <ype
);
805 static void generic_get_mtrr(unsigned int reg
, unsigned long *base
,
806 unsigned long *size
, mtrr_type
*type
)
808 u32 mask_lo
, mask_hi
, base_lo
, base_hi
;
813 * get_mtrr doesn't need to update mtrr_state, also it could be called
814 * from any cpu, so try to print it out directly.
818 rdmsr(MTRRphysMask_MSR(reg
), mask_lo
, mask_hi
);
820 if (!(mask_lo
& MTRR_PHYSMASK_V
)) {
821 /* Invalid (i.e. free) range */
828 rdmsr(MTRRphysBase_MSR(reg
), base_lo
, base_hi
);
830 /* Work out the shifted address mask: */
831 tmp
= (u64
)mask_hi
<< 32 | (mask_lo
& PAGE_MASK
);
832 mask
= (u64
)phys_hi_rsvd
<< 32 | tmp
;
834 /* Expand tmp with high bits to all 1s: */
837 tmp
|= ~((1ULL<<(hi
- 1)) - 1);
840 pr_warn("mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
841 add_taint(TAINT_FIRMWARE_WORKAROUND
, LOCKDEP_STILL_OK
);
847 * This works correctly if size is a power of two, i.e. a
850 *size
= -mask
>> PAGE_SHIFT
;
851 *base
= (u64
)base_hi
<< (32 - PAGE_SHIFT
) | base_lo
>> PAGE_SHIFT
;
852 *type
= base_lo
& MTRR_PHYSBASE_TYPE
;
859 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
860 * differ from the saved set
861 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
863 static int set_fixed_ranges(mtrr_type
*frs
)
865 unsigned long long *saved
= (unsigned long long *)frs
;
866 bool changed
= false;
867 int block
= -1, range
;
869 k8_check_syscfg_dram_mod_en();
871 while (fixed_range_blocks
[++block
].ranges
) {
872 for (range
= 0; range
< fixed_range_blocks
[block
].ranges
; range
++)
873 set_fixed_range(fixed_range_blocks
[block
].base_msr
+ range
,
874 &changed
, (unsigned int *)saved
++);
881 * Set the MSR pair relating to a var range.
882 * Returns true if changes are made.
884 static bool set_mtrr_var_ranges(unsigned int index
, struct mtrr_var_range
*vr
)
887 bool changed
= false;
889 rdmsr(MTRRphysBase_MSR(index
), lo
, hi
);
890 if ((vr
->base_lo
& ~MTRR_PHYSBASE_RSVD
) != (lo
& ~MTRR_PHYSBASE_RSVD
)
891 || (vr
->base_hi
& ~phys_hi_rsvd
) != (hi
& ~phys_hi_rsvd
)) {
893 mtrr_wrmsr(MTRRphysBase_MSR(index
), vr
->base_lo
, vr
->base_hi
);
897 rdmsr(MTRRphysMask_MSR(index
), lo
, hi
);
899 if ((vr
->mask_lo
& ~MTRR_PHYSMASK_RSVD
) != (lo
& ~MTRR_PHYSMASK_RSVD
)
900 || (vr
->mask_hi
& ~phys_hi_rsvd
) != (hi
& ~phys_hi_rsvd
)) {
901 mtrr_wrmsr(MTRRphysMask_MSR(index
), vr
->mask_lo
, vr
->mask_hi
);
907 static u32 deftype_lo
, deftype_hi
;
910 * set_mtrr_state - Set the MTRR state for this CPU.
912 * NOTE: The CPU must already be in a safe state for MTRR changes, including
913 * measures that only a single CPU can be active in set_mtrr_state() in
914 * order to not be subject to races for usage of deftype_lo. This is
915 * accomplished by taking cache_disable_lock.
916 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
918 static unsigned long set_mtrr_state(void)
920 unsigned long change_mask
= 0;
923 for (i
= 0; i
< num_var_ranges
; i
++) {
924 if (set_mtrr_var_ranges(i
, &mtrr_state
.var_ranges
[i
]))
925 change_mask
|= MTRR_CHANGE_MASK_VARIABLE
;
928 if (mtrr_state
.have_fixed
&& set_fixed_ranges(mtrr_state
.fixed_ranges
))
929 change_mask
|= MTRR_CHANGE_MASK_FIXED
;
932 * Set_mtrr_restore restores the old value of MTRRdefType,
933 * so to set it we fiddle with the saved value:
935 if ((deftype_lo
& MTRR_DEF_TYPE_TYPE
) != mtrr_state
.def_type
||
936 ((deftype_lo
& MTRR_DEF_TYPE_ENABLE
) >> MTRR_STATE_SHIFT
) != mtrr_state
.enabled
) {
938 deftype_lo
= (deftype_lo
& MTRR_DEF_TYPE_DISABLE
) |
939 mtrr_state
.def_type
|
940 (mtrr_state
.enabled
<< MTRR_STATE_SHIFT
);
941 change_mask
|= MTRR_CHANGE_MASK_DEFTYPE
;
947 void mtrr_disable(void)
949 /* Save MTRR state */
950 rdmsr(MSR_MTRRdefType
, deftype_lo
, deftype_hi
);
952 /* Disable MTRRs, and set the default type to uncached */
953 mtrr_wrmsr(MSR_MTRRdefType
, deftype_lo
& MTRR_DEF_TYPE_DISABLE
, deftype_hi
);
956 void mtrr_enable(void)
958 /* Intel (P6) standard MTRRs */
959 mtrr_wrmsr(MSR_MTRRdefType
, deftype_lo
, deftype_hi
);
962 void mtrr_generic_set_state(void)
964 unsigned long mask
, count
;
966 /* Actually set the state */
967 mask
= set_mtrr_state();
969 /* Use the atomic bitops to update the global mask */
970 for (count
= 0; count
< sizeof(mask
) * 8; ++count
) {
972 set_bit(count
, &smp_changes_mask
);
978 * generic_set_mtrr - set variable MTRR register on the local CPU.
980 * @reg: The register to set.
981 * @base: The base address of the region.
982 * @size: The size of the region. If this is 0 the region is disabled.
983 * @type: The type of the region.
987 static void generic_set_mtrr(unsigned int reg
, unsigned long base
,
988 unsigned long size
, mtrr_type type
)
991 struct mtrr_var_range
*vr
;
993 vr
= &mtrr_state
.var_ranges
[reg
];
995 local_irq_save(flags
);
1000 * The invalid bit is kept in the mask, so we simply
1001 * clear the relevant mask register to disable a range.
1003 mtrr_wrmsr(MTRRphysMask_MSR(reg
), 0, 0);
1004 memset(vr
, 0, sizeof(struct mtrr_var_range
));
1006 vr
->base_lo
= base
<< PAGE_SHIFT
| type
;
1007 vr
->base_hi
= (base
>> (32 - PAGE_SHIFT
)) & ~phys_hi_rsvd
;
1008 vr
->mask_lo
= -size
<< PAGE_SHIFT
| MTRR_PHYSMASK_V
;
1009 vr
->mask_hi
= (-size
>> (32 - PAGE_SHIFT
)) & ~phys_hi_rsvd
;
1011 mtrr_wrmsr(MTRRphysBase_MSR(reg
), vr
->base_lo
, vr
->base_hi
);
1012 mtrr_wrmsr(MTRRphysMask_MSR(reg
), vr
->mask_lo
, vr
->mask_hi
);
1016 local_irq_restore(flags
);
1019 int generic_validate_add_page(unsigned long base
, unsigned long size
,
1022 unsigned long lbase
, last
;
1025 * For Intel PPro stepping <= 7
1026 * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
1028 if (mtrr_if
== &generic_mtrr_ops
&& boot_cpu_data
.x86
== 6 &&
1029 boot_cpu_data
.x86_model
== 1 &&
1030 boot_cpu_data
.x86_stepping
<= 7) {
1031 if (base
& ((1 << (22 - PAGE_SHIFT
)) - 1)) {
1032 pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base
);
1035 if (!(base
+ size
< 0x70000 || base
> 0x7003F) &&
1036 (type
== MTRR_TYPE_WRCOMB
1037 || type
== MTRR_TYPE_WRBACK
)) {
1038 pr_warn("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
1044 * Check upper bits of base and last are equal and lower bits are 0
1045 * for base and 1 for last
1047 last
= base
+ size
- 1;
1048 for (lbase
= base
; !(lbase
& 1) && (last
& 1);
1049 lbase
= lbase
>> 1, last
= last
>> 1)
1051 if (lbase
!= last
) {
1052 pr_warn("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base
, size
);
1058 static int generic_have_wrcomb(void)
1060 unsigned long config
, dummy
;
1061 rdmsr(MSR_MTRRcap
, config
, dummy
);
1062 return config
& MTRR_CAP_WC
;
1065 int positive_have_wrcomb(void)
1071 * Generic structure...
1073 const struct mtrr_ops generic_mtrr_ops
= {
1074 .get
= generic_get_mtrr
,
1075 .get_free_region
= generic_get_free_region
,
1076 .set
= generic_set_mtrr
,
1077 .validate_add_page
= generic_validate_add_page
,
1078 .have_wrcomb
= generic_have_wrcomb
,