1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3 #include <linux/init.h>
4 #include <linux/slab.h>
6 #include <linux/module.h>
10 #include <asm/system.h>
11 #include <asm/cpufeature.h>
12 #include <asm/processor-flags.h>
13 #include <asm/tlbflush.h>
18 struct mtrr_var_range var_ranges
[MAX_VAR_RANGES
];
19 mtrr_type fixed_ranges
[NUM_FIXED_RANGES
];
20 unsigned char enabled
;
21 unsigned char have_fixed
;
25 struct fixed_range_block
{
26 int base_msr
; /* start address of an MTRR block */
27 int ranges
; /* number of MTRRs in this block */
30 static struct fixed_range_block fixed_range_blocks
[] = {
31 { MTRRfix64K_00000_MSR
, 1 }, /* one 64k MTRR */
32 { MTRRfix16K_80000_MSR
, 2 }, /* two 16k MTRRs */
33 { MTRRfix4K_C0000_MSR
, 8 }, /* eight 4k MTRRs */
37 static unsigned long smp_changes_mask
;
38 static struct mtrr_state mtrr_state
= {};
39 static int mtrr_state_set
;
42 #undef MODULE_PARAM_PREFIX
43 #define MODULE_PARAM_PREFIX "mtrr."
46 module_param_named(show
, mtrr_show
, bool, 0);
49 * BIOS is expected to clear MtrrFixDramModEn bit, see for example
50 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
51 * Opteron Processors" (26094 Rev. 3.30 February 2006), section
52 * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
53 * to 1 during BIOS initalization of the fixed MTRRs, then cleared to
56 static inline void k8_check_syscfg_dram_mod_en(void)
60 if (!((boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
) &&
61 (boot_cpu_data
.x86
>= 0x0f)))
64 rdmsr(MSR_K8_SYSCFG
, lo
, hi
);
65 if (lo
& K8_MTRRFIXRANGE_DRAM_MODIFY
) {
66 printk(KERN_ERR
"MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
67 " not cleared by BIOS, clearing this bit\n",
69 lo
&= ~K8_MTRRFIXRANGE_DRAM_MODIFY
;
70 mtrr_wrmsr(MSR_K8_SYSCFG
, lo
, hi
);
75 * Returns the effective MTRR type for the region
77 * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
78 * - 0xFF - when MTRR is not enabled
80 u8
mtrr_type_lookup(u64 start
, u64 end
)
84 u8 prev_match
, curr_match
;
89 if (!mtrr_state
.enabled
)
92 /* Make end inclusive end, instead of exclusive */
95 /* Look in fixed ranges. Just return the type as per start */
96 if (mtrr_state
.have_fixed
&& (start
< 0x100000)) {
99 if (start
< 0x80000) {
101 idx
+= (start
>> 16);
102 return mtrr_state
.fixed_ranges
[idx
];
103 } else if (start
< 0xC0000) {
105 idx
+= ((start
- 0x80000) >> 14);
106 return mtrr_state
.fixed_ranges
[idx
];
107 } else if (start
< 0x1000000) {
109 idx
+= ((start
- 0xC0000) >> 12);
110 return mtrr_state
.fixed_ranges
[idx
];
115 * Look in variable ranges
116 * Look of multiple ranges matching this address and pick type
117 * as per MTRR precedence
119 if (!(mtrr_state
.enabled
& 2)) {
120 return mtrr_state
.def_type
;
124 for (i
= 0; i
< num_var_ranges
; ++i
) {
125 unsigned short start_state
, end_state
;
127 if (!(mtrr_state
.var_ranges
[i
].mask_lo
& (1 << 11)))
130 base
= (((u64
)mtrr_state
.var_ranges
[i
].base_hi
) << 32) +
131 (mtrr_state
.var_ranges
[i
].base_lo
& PAGE_MASK
);
132 mask
= (((u64
)mtrr_state
.var_ranges
[i
].mask_hi
) << 32) +
133 (mtrr_state
.var_ranges
[i
].mask_lo
& PAGE_MASK
);
135 start_state
= ((start
& mask
) == (base
& mask
));
136 end_state
= ((end
& mask
) == (base
& mask
));
137 if (start_state
!= end_state
)
140 if ((start
& mask
) != (base
& mask
)) {
144 curr_match
= mtrr_state
.var_ranges
[i
].base_lo
& 0xff;
145 if (prev_match
== 0xFF) {
146 prev_match
= curr_match
;
150 if (prev_match
== MTRR_TYPE_UNCACHABLE
||
151 curr_match
== MTRR_TYPE_UNCACHABLE
) {
152 return MTRR_TYPE_UNCACHABLE
;
155 if ((prev_match
== MTRR_TYPE_WRBACK
&&
156 curr_match
== MTRR_TYPE_WRTHROUGH
) ||
157 (prev_match
== MTRR_TYPE_WRTHROUGH
&&
158 curr_match
== MTRR_TYPE_WRBACK
)) {
159 prev_match
= MTRR_TYPE_WRTHROUGH
;
160 curr_match
= MTRR_TYPE_WRTHROUGH
;
163 if (prev_match
!= curr_match
) {
164 return MTRR_TYPE_UNCACHABLE
;
169 if (start
>= (1ULL<<32) && (end
< mtrr_tom2
))
170 return MTRR_TYPE_WRBACK
;
173 if (prev_match
!= 0xFF)
176 return mtrr_state
.def_type
;
179 /* Get the MSR pair relating to a var range */
181 get_mtrr_var_range(unsigned int index
, struct mtrr_var_range
*vr
)
183 rdmsr(MTRRphysBase_MSR(index
), vr
->base_lo
, vr
->base_hi
);
184 rdmsr(MTRRphysMask_MSR(index
), vr
->mask_lo
, vr
->mask_hi
);
187 /* fill the MSR pair relating to a var range */
188 void fill_mtrr_var_range(unsigned int index
,
189 u32 base_lo
, u32 base_hi
, u32 mask_lo
, u32 mask_hi
)
191 struct mtrr_var_range
*vr
;
193 vr
= mtrr_state
.var_ranges
;
195 vr
[index
].base_lo
= base_lo
;
196 vr
[index
].base_hi
= base_hi
;
197 vr
[index
].mask_lo
= mask_lo
;
198 vr
[index
].mask_hi
= mask_hi
;
202 get_fixed_ranges(mtrr_type
* frs
)
204 unsigned int *p
= (unsigned int *) frs
;
207 k8_check_syscfg_dram_mod_en();
209 rdmsr(MTRRfix64K_00000_MSR
, p
[0], p
[1]);
211 for (i
= 0; i
< 2; i
++)
212 rdmsr(MTRRfix16K_80000_MSR
+ i
, p
[2 + i
* 2], p
[3 + i
* 2]);
213 for (i
= 0; i
< 8; i
++)
214 rdmsr(MTRRfix4K_C0000_MSR
+ i
, p
[6 + i
* 2], p
[7 + i
* 2]);
217 void mtrr_save_fixed_ranges(void *info
)
220 get_fixed_ranges(mtrr_state
.fixed_ranges
);
223 static void print_fixed(unsigned base
, unsigned step
, const mtrr_type
*types
)
227 for (i
= 0; i
< 8; ++i
, ++types
, base
+= step
)
228 printk(KERN_INFO
"MTRR %05X-%05X %s\n",
229 base
, base
+ step
- 1, mtrr_attrib_to_str(*types
));
232 static void prepare_set(void);
233 static void post_set(void);
235 /* Grab all of the MTRR state for this CPU into *state */
236 void __init
get_mtrr_state(void)
239 struct mtrr_var_range
*vrs
;
243 vrs
= mtrr_state
.var_ranges
;
245 rdmsr(MTRRcap_MSR
, lo
, dummy
);
246 mtrr_state
.have_fixed
= (lo
>> 8) & 1;
248 for (i
= 0; i
< num_var_ranges
; i
++)
249 get_mtrr_var_range(i
, &vrs
[i
]);
250 if (mtrr_state
.have_fixed
)
251 get_fixed_ranges(mtrr_state
.fixed_ranges
);
253 rdmsr(MTRRdefType_MSR
, lo
, dummy
);
254 mtrr_state
.def_type
= (lo
& 0xff);
255 mtrr_state
.enabled
= (lo
& 0xc00) >> 10;
257 if (amd_special_default_mtrr()) {
260 rdmsr(MSR_K8_TOP_MEM2
, low
, high
);
264 mtrr_tom2
&= 0xffffff800000ULL
;
269 printk(KERN_INFO
"MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state
.def_type
));
270 if (mtrr_state
.have_fixed
) {
271 printk(KERN_INFO
"MTRR fixed ranges %sabled:\n",
272 mtrr_state
.enabled
& 1 ? "en" : "dis");
273 print_fixed(0x00000, 0x10000, mtrr_state
.fixed_ranges
+ 0);
274 for (i
= 0; i
< 2; ++i
)
275 print_fixed(0x80000 + i
* 0x20000, 0x04000, mtrr_state
.fixed_ranges
+ (i
+ 1) * 8);
276 for (i
= 0; i
< 8; ++i
)
277 print_fixed(0xC0000 + i
* 0x08000, 0x01000, mtrr_state
.fixed_ranges
+ (i
+ 3) * 8);
279 printk(KERN_INFO
"MTRR variable ranges %sabled:\n",
280 mtrr_state
.enabled
& 2 ? "en" : "dis");
281 high_width
= ((size_or_mask
? ffs(size_or_mask
) - 1 : 32) - (32 - PAGE_SHIFT
) + 3) / 4;
282 for (i
= 0; i
< num_var_ranges
; ++i
) {
283 if (mtrr_state
.var_ranges
[i
].mask_lo
& (1 << 11))
284 printk(KERN_INFO
"MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
287 mtrr_state
.var_ranges
[i
].base_hi
,
288 mtrr_state
.var_ranges
[i
].base_lo
>> 12,
290 mtrr_state
.var_ranges
[i
].mask_hi
,
291 mtrr_state
.var_ranges
[i
].mask_lo
>> 12,
292 mtrr_attrib_to_str(mtrr_state
.var_ranges
[i
].base_lo
& 0xff));
294 printk(KERN_INFO
"MTRR %u disabled\n", i
);
297 printk(KERN_INFO
"TOM2: %016llx aka %lldM\n",
298 mtrr_tom2
, mtrr_tom2
>>20);
303 /* PAT setup for BP. We need to go through sync steps here */
304 local_irq_save(flags
);
310 local_irq_restore(flags
);
314 /* Some BIOS's are fucked and don't set all MTRRs the same! */
315 void __init
mtrr_state_warn(void)
317 unsigned long mask
= smp_changes_mask
;
321 if (mask
& MTRR_CHANGE_MASK_FIXED
)
322 printk(KERN_WARNING
"mtrr: your CPUs had inconsistent fixed MTRR settings\n");
323 if (mask
& MTRR_CHANGE_MASK_VARIABLE
)
324 printk(KERN_WARNING
"mtrr: your CPUs had inconsistent variable MTRR settings\n");
325 if (mask
& MTRR_CHANGE_MASK_DEFTYPE
)
326 printk(KERN_WARNING
"mtrr: your CPUs had inconsistent MTRRdefType settings\n");
327 printk(KERN_INFO
"mtrr: probably your BIOS does not setup all CPUs.\n");
328 printk(KERN_INFO
"mtrr: corrected configuration.\n");
331 /* Doesn't attempt to pass an error out to MTRR users
332 because it's quite complicated in some cases and probably not
333 worth it because the best error handling is to ignore it. */
334 void mtrr_wrmsr(unsigned msr
, unsigned a
, unsigned b
)
336 if (wrmsr_safe(msr
, a
, b
) < 0)
338 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
339 smp_processor_id(), msr
, a
, b
);
343 * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
344 * @msr: MSR address of the MTTR which should be checked and updated
345 * @changed: pointer which indicates whether the MTRR needed to be changed
346 * @msrwords: pointer to the MSR values which the MSR should have
348 static void set_fixed_range(int msr
, bool *changed
, unsigned int *msrwords
)
354 if (lo
!= msrwords
[0] || hi
!= msrwords
[1]) {
355 mtrr_wrmsr(msr
, msrwords
[0], msrwords
[1]);
361 * generic_get_free_region - Get a free MTRR.
362 * @base: The starting (base) address of the region.
363 * @size: The size (in bytes) of the region.
364 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
366 * Returns: The index of the region on success, else negative on error.
368 int generic_get_free_region(unsigned long base
, unsigned long size
, int replace_reg
)
372 unsigned long lbase
, lsize
;
374 max
= num_var_ranges
;
375 if (replace_reg
>= 0 && replace_reg
< max
)
377 for (i
= 0; i
< max
; ++i
) {
378 mtrr_if
->get(i
, &lbase
, &lsize
, <ype
);
385 static void generic_get_mtrr(unsigned int reg
, unsigned long *base
,
386 unsigned long *size
, mtrr_type
*type
)
388 unsigned int mask_lo
, mask_hi
, base_lo
, base_hi
;
389 unsigned int tmp
, hi
;
391 rdmsr(MTRRphysMask_MSR(reg
), mask_lo
, mask_hi
);
392 if ((mask_lo
& 0x800) == 0) {
393 /* Invalid (i.e. free) range */
400 rdmsr(MTRRphysBase_MSR(reg
), base_lo
, base_hi
);
402 /* Work out the shifted address mask. */
403 tmp
= mask_hi
<< (32 - PAGE_SHIFT
) | mask_lo
>> PAGE_SHIFT
;
404 mask_lo
= size_or_mask
| tmp
;
405 /* Expand tmp with high bits to all 1s*/
408 tmp
|= ~((1<<(hi
- 1)) - 1);
410 if (tmp
!= mask_lo
) {
414 printk(KERN_INFO
"mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
421 /* This works correctly if size is a power of two, i.e. a
424 *base
= base_hi
<< (32 - PAGE_SHIFT
) | base_lo
>> PAGE_SHIFT
;
425 *type
= base_lo
& 0xff;
429 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set
430 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
432 static int set_fixed_ranges(mtrr_type
* frs
)
434 unsigned long long *saved
= (unsigned long long *) frs
;
435 bool changed
= false;
438 k8_check_syscfg_dram_mod_en();
440 while (fixed_range_blocks
[++block
].ranges
)
441 for (range
=0; range
< fixed_range_blocks
[block
].ranges
; range
++)
442 set_fixed_range(fixed_range_blocks
[block
].base_msr
+ range
,
443 &changed
, (unsigned int *) saved
++);
448 /* Set the MSR pair relating to a var range. Returns TRUE if
450 static bool set_mtrr_var_ranges(unsigned int index
, struct mtrr_var_range
*vr
)
453 bool changed
= false;
455 rdmsr(MTRRphysBase_MSR(index
), lo
, hi
);
456 if ((vr
->base_lo
& 0xfffff0ffUL
) != (lo
& 0xfffff0ffUL
)
457 || (vr
->base_hi
& (size_and_mask
>> (32 - PAGE_SHIFT
))) !=
458 (hi
& (size_and_mask
>> (32 - PAGE_SHIFT
)))) {
459 mtrr_wrmsr(MTRRphysBase_MSR(index
), vr
->base_lo
, vr
->base_hi
);
463 rdmsr(MTRRphysMask_MSR(index
), lo
, hi
);
465 if ((vr
->mask_lo
& 0xfffff800UL
) != (lo
& 0xfffff800UL
)
466 || (vr
->mask_hi
& (size_and_mask
>> (32 - PAGE_SHIFT
))) !=
467 (hi
& (size_and_mask
>> (32 - PAGE_SHIFT
)))) {
468 mtrr_wrmsr(MTRRphysMask_MSR(index
), vr
->mask_lo
, vr
->mask_hi
);
474 static u32 deftype_lo
, deftype_hi
;
477 * set_mtrr_state - Set the MTRR state for this CPU.
479 * NOTE: The CPU must already be in a safe state for MTRR changes.
480 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
482 static unsigned long set_mtrr_state(void)
485 unsigned long change_mask
= 0;
487 for (i
= 0; i
< num_var_ranges
; i
++)
488 if (set_mtrr_var_ranges(i
, &mtrr_state
.var_ranges
[i
]))
489 change_mask
|= MTRR_CHANGE_MASK_VARIABLE
;
491 if (mtrr_state
.have_fixed
&& set_fixed_ranges(mtrr_state
.fixed_ranges
))
492 change_mask
|= MTRR_CHANGE_MASK_FIXED
;
494 /* Set_mtrr_restore restores the old value of MTRRdefType,
495 so to set it we fiddle with the saved value */
496 if ((deftype_lo
& 0xff) != mtrr_state
.def_type
497 || ((deftype_lo
& 0xc00) >> 10) != mtrr_state
.enabled
) {
498 deftype_lo
= (deftype_lo
& ~0xcff) | mtrr_state
.def_type
| (mtrr_state
.enabled
<< 10);
499 change_mask
|= MTRR_CHANGE_MASK_DEFTYPE
;
506 static unsigned long cr4
= 0;
507 static DEFINE_SPINLOCK(set_atomicity_lock
);
510 * Since we are disabling the cache don't allow any interrupts - they
511 * would run extremely slow and would only increase the pain. The caller must
512 * ensure that local interrupts are disabled and are reenabled after post_set()
516 static void prepare_set(void) __acquires(set_atomicity_lock
)
520 /* Note that this is not ideal, since the cache is only flushed/disabled
521 for this CPU while the MTRRs are changed, but changing this requires
522 more invasive changes to the way the kernel boots */
524 spin_lock(&set_atomicity_lock
);
526 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
527 cr0
= read_cr0() | X86_CR0_CD
;
531 /* Save value of CR4 and clear Page Global Enable (bit 7) */
534 write_cr4(cr4
& ~X86_CR4_PGE
);
537 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
540 /* Save MTRR state */
541 rdmsr(MTRRdefType_MSR
, deftype_lo
, deftype_hi
);
543 /* Disable MTRRs, and set the default type to uncached */
544 mtrr_wrmsr(MTRRdefType_MSR
, deftype_lo
& ~0xcff, deftype_hi
);
547 static void post_set(void) __releases(set_atomicity_lock
)
549 /* Flush TLBs (no need to flush caches - they are disabled) */
552 /* Intel (P6) standard MTRRs */
553 mtrr_wrmsr(MTRRdefType_MSR
, deftype_lo
, deftype_hi
);
556 write_cr0(read_cr0() & 0xbfffffff);
558 /* Restore value of CR4 */
561 spin_unlock(&set_atomicity_lock
);
564 static void generic_set_all(void)
566 unsigned long mask
, count
;
569 local_irq_save(flags
);
572 /* Actually set the state */
573 mask
= set_mtrr_state();
579 local_irq_restore(flags
);
581 /* Use the atomic bitops to update the global mask */
582 for (count
= 0; count
< sizeof mask
* 8; ++count
) {
584 set_bit(count
, &smp_changes_mask
);
590 static void generic_set_mtrr(unsigned int reg
, unsigned long base
,
591 unsigned long size
, mtrr_type type
)
592 /* [SUMMARY] Set variable MTRR register on the local CPU.
593 <reg> The register to set.
594 <base> The base address of the region.
595 <size> The size of the region. If this is 0 the region is disabled.
596 <type> The type of the region.
601 struct mtrr_var_range
*vr
;
603 vr
= &mtrr_state
.var_ranges
[reg
];
605 local_irq_save(flags
);
609 /* The invalid bit is kept in the mask, so we simply clear the
610 relevant mask register to disable a range. */
611 mtrr_wrmsr(MTRRphysMask_MSR(reg
), 0, 0);
612 memset(vr
, 0, sizeof(struct mtrr_var_range
));
614 vr
->base_lo
= base
<< PAGE_SHIFT
| type
;
615 vr
->base_hi
= (base
& size_and_mask
) >> (32 - PAGE_SHIFT
);
616 vr
->mask_lo
= -size
<< PAGE_SHIFT
| 0x800;
617 vr
->mask_hi
= (-size
& size_and_mask
) >> (32 - PAGE_SHIFT
);
619 mtrr_wrmsr(MTRRphysBase_MSR(reg
), vr
->base_lo
, vr
->base_hi
);
620 mtrr_wrmsr(MTRRphysMask_MSR(reg
), vr
->mask_lo
, vr
->mask_hi
);
624 local_irq_restore(flags
);
627 int generic_validate_add_page(unsigned long base
, unsigned long size
, unsigned int type
)
629 unsigned long lbase
, last
;
631 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
632 and not touch 0x70000000->0x7003FFFF */
633 if (is_cpu(INTEL
) && boot_cpu_data
.x86
== 6 &&
634 boot_cpu_data
.x86_model
== 1 &&
635 boot_cpu_data
.x86_mask
<= 7) {
636 if (base
& ((1 << (22 - PAGE_SHIFT
)) - 1)) {
637 printk(KERN_WARNING
"mtrr: base(0x%lx000) is not 4 MiB aligned\n", base
);
640 if (!(base
+ size
< 0x70000 || base
> 0x7003F) &&
641 (type
== MTRR_TYPE_WRCOMB
642 || type
== MTRR_TYPE_WRBACK
)) {
643 printk(KERN_WARNING
"mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
648 /* Check upper bits of base and last are equal and lower bits are 0
649 for base and 1 for last */
650 last
= base
+ size
- 1;
651 for (lbase
= base
; !(lbase
& 1) && (last
& 1);
652 lbase
= lbase
>> 1, last
= last
>> 1) ;
654 printk(KERN_WARNING
"mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
662 static int generic_have_wrcomb(void)
664 unsigned long config
, dummy
;
665 rdmsr(MTRRcap_MSR
, config
, dummy
);
666 return (config
& (1 << 10));
669 int positive_have_wrcomb(void)
674 /* generic structure...
676 struct mtrr_ops generic_mtrr_ops
= {
678 .set_all
= generic_set_all
,
679 .get
= generic_get_mtrr
,
680 .get_free_region
= generic_get_free_region
,
681 .set
= generic_set_mtrr
,
682 .validate_add_page
= generic_validate_add_page
,
683 .have_wrcomb
= generic_have_wrcomb
,