2 * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
3 * because MTRRs can span up to 40 bits (36bits on most modern x86)
7 #include <linux/export.h>
8 #include <linux/init.h>
12 #include <asm/processor-flags.h>
13 #include <asm/cpufeature.h>
14 #include <asm/tlbflush.h>
21 struct fixed_range_block
{
22 int base_msr
; /* start address of an MTRR block */
23 int ranges
; /* number of MTRRs in this block */
26 static struct fixed_range_block fixed_range_blocks
[] = {
27 { MSR_MTRRfix64K_00000
, 1 }, /* one 64k MTRR */
28 { MSR_MTRRfix16K_80000
, 2 }, /* two 16k MTRRs */
29 { MSR_MTRRfix4K_C0000
, 8 }, /* eight 4k MTRRs */
33 static unsigned long smp_changes_mask
;
34 static int mtrr_state_set
;
37 struct mtrr_state_type mtrr_state
;
38 EXPORT_SYMBOL_GPL(mtrr_state
);
41 * BIOS is expected to clear MtrrFixDramModEn bit, see for example
42 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
43 * Opteron Processors" (26094 Rev. 3.30 February 2006), section
44 * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
45 * to 1 during BIOS initialization of the fixed MTRRs, then cleared to
48 static inline void k8_check_syscfg_dram_mod_en(void)
52 if (!((boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
) &&
53 (boot_cpu_data
.x86
>= 0x0f)))
56 rdmsr(MSR_K8_SYSCFG
, lo
, hi
);
57 if (lo
& K8_MTRRFIXRANGE_DRAM_MODIFY
) {
58 pr_err(FW_WARN
"MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
59 " not cleared by BIOS, clearing this bit\n",
61 lo
&= ~K8_MTRRFIXRANGE_DRAM_MODIFY
;
62 mtrr_wrmsr(MSR_K8_SYSCFG
, lo
, hi
);
66 /* Get the size of contiguous MTRR range */
67 static u64
get_mtrr_size(u64 mask
)
79 * Check and return the effective type for MTRR-MTRR type overlap.
80 * Returns 1 if the effective type is UNCACHEABLE, else returns 0
82 static int check_type_overlap(u8
*prev
, u8
*curr
)
84 if (*prev
== MTRR_TYPE_UNCACHABLE
|| *curr
== MTRR_TYPE_UNCACHABLE
) {
85 *prev
= MTRR_TYPE_UNCACHABLE
;
86 *curr
= MTRR_TYPE_UNCACHABLE
;
90 if ((*prev
== MTRR_TYPE_WRBACK
&& *curr
== MTRR_TYPE_WRTHROUGH
) ||
91 (*prev
== MTRR_TYPE_WRTHROUGH
&& *curr
== MTRR_TYPE_WRBACK
)) {
92 *prev
= MTRR_TYPE_WRTHROUGH
;
93 *curr
= MTRR_TYPE_WRTHROUGH
;
97 *prev
= MTRR_TYPE_UNCACHABLE
;
98 *curr
= MTRR_TYPE_UNCACHABLE
;
106 * mtrr_type_lookup_fixed - look up memory type in MTRR fixed entries
108 * Return the MTRR fixed memory type of 'start'.
110 * MTRR fixed entries are divided into the following ways:
111 * 0x00000 - 0x7FFFF : This range is divided into eight 64KB sub-ranges
112 * 0x80000 - 0xBFFFF : This range is divided into sixteen 16KB sub-ranges
113 * 0xC0000 - 0xFFFFF : This range is divided into sixty-four 4KB sub-ranges
116 * MTRR_TYPE_(type) - Matched memory type
117 * MTRR_TYPE_INVALID - Unmatched
119 static u8
mtrr_type_lookup_fixed(u64 start
, u64 end
)
123 if (start
>= 0x100000)
124 return MTRR_TYPE_INVALID
;
127 if (start
< 0x80000) {
129 idx
+= (start
>> 16);
130 return mtrr_state
.fixed_ranges
[idx
];
131 /* 0x80000 - 0xBFFFF */
132 } else if (start
< 0xC0000) {
134 idx
+= ((start
- 0x80000) >> 14);
135 return mtrr_state
.fixed_ranges
[idx
];
138 /* 0xC0000 - 0xFFFFF */
140 idx
+= ((start
- 0xC0000) >> 12);
141 return mtrr_state
.fixed_ranges
[idx
];
145 * mtrr_type_lookup_variable - look up memory type in MTRR variable entries
148 * MTRR_TYPE_(type) - Matched memory type or default memory type (unmatched)
151 * repeat - Set to 1 when [start:end] spanned across MTRR range and type
152 * returned corresponds only to [start:*partial_end]. Caller has
153 * to lookup again for [*partial_end:end].
155 * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
156 * region is fully covered by a single MTRR entry or the default
159 static u8
mtrr_type_lookup_variable(u64 start
, u64 end
, u64
*partial_end
,
160 int *repeat
, u8
*uniform
)
164 u8 prev_match
, curr_match
;
169 /* Make end inclusive instead of exclusive */
172 prev_match
= MTRR_TYPE_INVALID
;
173 for (i
= 0; i
< num_var_ranges
; ++i
) {
174 unsigned short start_state
, end_state
, inclusive
;
176 if (!(mtrr_state
.var_ranges
[i
].mask_lo
& (1 << 11)))
179 base
= (((u64
)mtrr_state
.var_ranges
[i
].base_hi
) << 32) +
180 (mtrr_state
.var_ranges
[i
].base_lo
& PAGE_MASK
);
181 mask
= (((u64
)mtrr_state
.var_ranges
[i
].mask_hi
) << 32) +
182 (mtrr_state
.var_ranges
[i
].mask_lo
& PAGE_MASK
);
184 start_state
= ((start
& mask
) == (base
& mask
));
185 end_state
= ((end
& mask
) == (base
& mask
));
186 inclusive
= ((start
< base
) && (end
> base
));
188 if ((start_state
!= end_state
) || inclusive
) {
190 * We have start:end spanning across an MTRR.
191 * We split the region into either
194 * (start:mtrr_end)(mtrr_end:end)
196 * (start:mtrr_start)(mtrr_start:end)
198 * (start:mtrr_start)(mtrr_start:mtrr_end)(mtrr_end:end)
200 * depending on kind of overlap.
202 * Return the type of the first region and a pointer
203 * to the start of next region so that caller will be
204 * advised to lookup again after having adjusted start
207 * Note: This way we handle overlaps with multiple
208 * entries and the default type properly.
211 *partial_end
= base
+ get_mtrr_size(mask
);
215 if (unlikely(*partial_end
<= start
)) {
217 *partial_end
= start
+ PAGE_SIZE
;
220 end
= *partial_end
- 1; /* end is inclusive */
225 if ((start
& mask
) != (base
& mask
))
228 curr_match
= mtrr_state
.var_ranges
[i
].base_lo
& 0xff;
229 if (prev_match
== MTRR_TYPE_INVALID
) {
230 prev_match
= curr_match
;
235 if (check_type_overlap(&prev_match
, &curr_match
))
239 if (prev_match
!= MTRR_TYPE_INVALID
)
242 return mtrr_state
.def_type
;
246 * mtrr_type_lookup - look up memory type in MTRR
249 * MTRR_TYPE_(type) - The effective MTRR type for the region
250 * MTRR_TYPE_INVALID - MTRR is disabled
253 * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
254 * region is fully covered by a single MTRR entry or the default
257 u8
mtrr_type_lookup(u64 start
, u64 end
, u8
*uniform
)
259 u8 type
, prev_type
, is_uniform
= 1, dummy
;
264 return MTRR_TYPE_INVALID
;
266 if (!(mtrr_state
.enabled
& MTRR_STATE_MTRR_ENABLED
))
267 return MTRR_TYPE_INVALID
;
270 * Look up the fixed ranges first, which take priority over
271 * the variable ranges.
273 if ((start
< 0x100000) &&
274 (mtrr_state
.have_fixed
) &&
275 (mtrr_state
.enabled
& MTRR_STATE_MTRR_FIXED_ENABLED
)) {
277 type
= mtrr_type_lookup_fixed(start
, end
);
282 * Look up the variable ranges. Look of multiple ranges matching
283 * this address and pick type as per MTRR precedence.
285 type
= mtrr_type_lookup_variable(start
, end
, &partial_end
,
286 &repeat
, &is_uniform
);
289 * Common path is with repeat = 0.
290 * However, we can have cases where [start:end] spans across some
291 * MTRR ranges and/or the default type. Do repeated lookups for
298 type
= mtrr_type_lookup_variable(start
, end
, &partial_end
,
301 if (check_type_overlap(&prev_type
, &type
))
305 if (mtrr_tom2
&& (start
>= (1ULL<<32)) && (end
< mtrr_tom2
))
306 type
= MTRR_TYPE_WRBACK
;
309 *uniform
= is_uniform
;
313 /* Get the MSR pair relating to a var range */
315 get_mtrr_var_range(unsigned int index
, struct mtrr_var_range
*vr
)
317 rdmsr(MTRRphysBase_MSR(index
), vr
->base_lo
, vr
->base_hi
);
318 rdmsr(MTRRphysMask_MSR(index
), vr
->mask_lo
, vr
->mask_hi
);
321 /* Fill the MSR pair relating to a var range */
322 void fill_mtrr_var_range(unsigned int index
,
323 u32 base_lo
, u32 base_hi
, u32 mask_lo
, u32 mask_hi
)
325 struct mtrr_var_range
*vr
;
327 vr
= mtrr_state
.var_ranges
;
329 vr
[index
].base_lo
= base_lo
;
330 vr
[index
].base_hi
= base_hi
;
331 vr
[index
].mask_lo
= mask_lo
;
332 vr
[index
].mask_hi
= mask_hi
;
335 static void get_fixed_ranges(mtrr_type
*frs
)
337 unsigned int *p
= (unsigned int *)frs
;
340 k8_check_syscfg_dram_mod_en();
342 rdmsr(MSR_MTRRfix64K_00000
, p
[0], p
[1]);
344 for (i
= 0; i
< 2; i
++)
345 rdmsr(MSR_MTRRfix16K_80000
+ i
, p
[2 + i
* 2], p
[3 + i
* 2]);
346 for (i
= 0; i
< 8; i
++)
347 rdmsr(MSR_MTRRfix4K_C0000
+ i
, p
[6 + i
* 2], p
[7 + i
* 2]);
350 void mtrr_save_fixed_ranges(void *info
)
352 if (boot_cpu_has(X86_FEATURE_MTRR
))
353 get_fixed_ranges(mtrr_state
.fixed_ranges
);
356 static unsigned __initdata last_fixed_start
;
357 static unsigned __initdata last_fixed_end
;
358 static mtrr_type __initdata last_fixed_type
;
360 static void __init
print_fixed_last(void)
365 pr_debug(" %05X-%05X %s\n", last_fixed_start
,
366 last_fixed_end
- 1, mtrr_attrib_to_str(last_fixed_type
));
371 static void __init
update_fixed_last(unsigned base
, unsigned end
,
374 last_fixed_start
= base
;
375 last_fixed_end
= end
;
376 last_fixed_type
= type
;
380 print_fixed(unsigned base
, unsigned step
, const mtrr_type
*types
)
384 for (i
= 0; i
< 8; ++i
, ++types
, base
+= step
) {
385 if (last_fixed_end
== 0) {
386 update_fixed_last(base
, base
+ step
, *types
);
389 if (last_fixed_end
== base
&& last_fixed_type
== *types
) {
390 last_fixed_end
= base
+ step
;
393 /* new segments: gap or different type */
395 update_fixed_last(base
, base
+ step
, *types
);
399 static void prepare_set(void);
400 static void post_set(void);
402 static void __init
print_mtrr_state(void)
407 pr_debug("MTRR default type: %s\n",
408 mtrr_attrib_to_str(mtrr_state
.def_type
));
409 if (mtrr_state
.have_fixed
) {
410 pr_debug("MTRR fixed ranges %sabled:\n",
411 ((mtrr_state
.enabled
& MTRR_STATE_MTRR_ENABLED
) &&
412 (mtrr_state
.enabled
& MTRR_STATE_MTRR_FIXED_ENABLED
)) ?
414 print_fixed(0x00000, 0x10000, mtrr_state
.fixed_ranges
+ 0);
415 for (i
= 0; i
< 2; ++i
)
416 print_fixed(0x80000 + i
* 0x20000, 0x04000,
417 mtrr_state
.fixed_ranges
+ (i
+ 1) * 8);
418 for (i
= 0; i
< 8; ++i
)
419 print_fixed(0xC0000 + i
* 0x08000, 0x01000,
420 mtrr_state
.fixed_ranges
+ (i
+ 3) * 8);
425 pr_debug("MTRR variable ranges %sabled:\n",
426 mtrr_state
.enabled
& MTRR_STATE_MTRR_ENABLED
? "en" : "dis");
427 high_width
= (__ffs64(size_or_mask
) - (32 - PAGE_SHIFT
) + 3) / 4;
429 for (i
= 0; i
< num_var_ranges
; ++i
) {
430 if (mtrr_state
.var_ranges
[i
].mask_lo
& (1 << 11))
431 pr_debug(" %u base %0*X%05X000 mask %0*X%05X000 %s\n",
434 mtrr_state
.var_ranges
[i
].base_hi
,
435 mtrr_state
.var_ranges
[i
].base_lo
>> 12,
437 mtrr_state
.var_ranges
[i
].mask_hi
,
438 mtrr_state
.var_ranges
[i
].mask_lo
>> 12,
439 mtrr_attrib_to_str(mtrr_state
.var_ranges
[i
].base_lo
& 0xff));
441 pr_debug(" %u disabled\n", i
);
444 pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2
, mtrr_tom2
>>20);
447 /* PAT setup for BP. We need to go through sync steps here */
448 void __init
mtrr_bp_pat_init(void)
452 local_irq_save(flags
);
458 local_irq_restore(flags
);
461 /* Grab all of the MTRR state for this CPU into *state */
462 bool __init
get_mtrr_state(void)
464 struct mtrr_var_range
*vrs
;
468 vrs
= mtrr_state
.var_ranges
;
470 rdmsr(MSR_MTRRcap
, lo
, dummy
);
471 mtrr_state
.have_fixed
= (lo
>> 8) & 1;
473 for (i
= 0; i
< num_var_ranges
; i
++)
474 get_mtrr_var_range(i
, &vrs
[i
]);
475 if (mtrr_state
.have_fixed
)
476 get_fixed_ranges(mtrr_state
.fixed_ranges
);
478 rdmsr(MSR_MTRRdefType
, lo
, dummy
);
479 mtrr_state
.def_type
= (lo
& 0xff);
480 mtrr_state
.enabled
= (lo
& 0xc00) >> 10;
482 if (amd_special_default_mtrr()) {
486 rdmsr(MSR_K8_TOP_MEM2
, low
, high
);
490 mtrr_tom2
&= 0xffffff800000ULL
;
497 return !!(mtrr_state
.enabled
& MTRR_STATE_MTRR_ENABLED
);
500 /* Some BIOS's are messed up and don't set all MTRRs the same! */
501 void __init
mtrr_state_warn(void)
503 unsigned long mask
= smp_changes_mask
;
507 if (mask
& MTRR_CHANGE_MASK_FIXED
)
508 pr_warn("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
509 if (mask
& MTRR_CHANGE_MASK_VARIABLE
)
510 pr_warn("mtrr: your CPUs had inconsistent variable MTRR settings\n");
511 if (mask
& MTRR_CHANGE_MASK_DEFTYPE
)
512 pr_warn("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
514 pr_info("mtrr: probably your BIOS does not setup all CPUs.\n");
515 pr_info("mtrr: corrected configuration.\n");
519 * Doesn't attempt to pass an error out to MTRR users
520 * because it's quite complicated in some cases and probably not
521 * worth it because the best error handling is to ignore it.
523 void mtrr_wrmsr(unsigned msr
, unsigned a
, unsigned b
)
525 if (wrmsr_safe(msr
, a
, b
) < 0) {
526 pr_err("MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
527 smp_processor_id(), msr
, a
, b
);
532 * set_fixed_range - checks & updates a fixed-range MTRR if it
533 * differs from the value it should have
534 * @msr: MSR address of the MTTR which should be checked and updated
535 * @changed: pointer which indicates whether the MTRR needed to be changed
536 * @msrwords: pointer to the MSR values which the MSR should have
538 static void set_fixed_range(int msr
, bool *changed
, unsigned int *msrwords
)
544 if (lo
!= msrwords
[0] || hi
!= msrwords
[1]) {
545 mtrr_wrmsr(msr
, msrwords
[0], msrwords
[1]);
551 * generic_get_free_region - Get a free MTRR.
552 * @base: The starting (base) address of the region.
553 * @size: The size (in bytes) of the region.
554 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
556 * Returns: The index of the region on success, else negative on error.
559 generic_get_free_region(unsigned long base
, unsigned long size
, int replace_reg
)
561 unsigned long lbase
, lsize
;
565 max
= num_var_ranges
;
566 if (replace_reg
>= 0 && replace_reg
< max
)
569 for (i
= 0; i
< max
; ++i
) {
570 mtrr_if
->get(i
, &lbase
, &lsize
, <ype
);
578 static void generic_get_mtrr(unsigned int reg
, unsigned long *base
,
579 unsigned long *size
, mtrr_type
*type
)
581 u32 mask_lo
, mask_hi
, base_lo
, base_hi
;
586 * get_mtrr doesn't need to update mtrr_state, also it could be called
587 * from any cpu, so try to print it out directly.
591 rdmsr(MTRRphysMask_MSR(reg
), mask_lo
, mask_hi
);
593 if ((mask_lo
& 0x800) == 0) {
594 /* Invalid (i.e. free) range */
601 rdmsr(MTRRphysBase_MSR(reg
), base_lo
, base_hi
);
603 /* Work out the shifted address mask: */
604 tmp
= (u64
)mask_hi
<< (32 - PAGE_SHIFT
) | mask_lo
>> PAGE_SHIFT
;
605 mask
= size_or_mask
| tmp
;
607 /* Expand tmp with high bits to all 1s: */
610 tmp
|= ~((1ULL<<(hi
- 1)) - 1);
613 pr_warn("mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
614 add_taint(TAINT_FIRMWARE_WORKAROUND
, LOCKDEP_STILL_OK
);
620 * This works correctly if size is a power of two, i.e. a
624 *base
= (u64
)base_hi
<< (32 - PAGE_SHIFT
) | base_lo
>> PAGE_SHIFT
;
625 *type
= base_lo
& 0xff;
632 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
633 * differ from the saved set
634 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
636 static int set_fixed_ranges(mtrr_type
*frs
)
638 unsigned long long *saved
= (unsigned long long *)frs
;
639 bool changed
= false;
640 int block
= -1, range
;
642 k8_check_syscfg_dram_mod_en();
644 while (fixed_range_blocks
[++block
].ranges
) {
645 for (range
= 0; range
< fixed_range_blocks
[block
].ranges
; range
++)
646 set_fixed_range(fixed_range_blocks
[block
].base_msr
+ range
,
647 &changed
, (unsigned int *)saved
++);
654 * Set the MSR pair relating to a var range.
655 * Returns true if changes are made.
657 static bool set_mtrr_var_ranges(unsigned int index
, struct mtrr_var_range
*vr
)
660 bool changed
= false;
662 rdmsr(MTRRphysBase_MSR(index
), lo
, hi
);
663 if ((vr
->base_lo
& 0xfffff0ffUL
) != (lo
& 0xfffff0ffUL
)
664 || (vr
->base_hi
& (size_and_mask
>> (32 - PAGE_SHIFT
))) !=
665 (hi
& (size_and_mask
>> (32 - PAGE_SHIFT
)))) {
667 mtrr_wrmsr(MTRRphysBase_MSR(index
), vr
->base_lo
, vr
->base_hi
);
671 rdmsr(MTRRphysMask_MSR(index
), lo
, hi
);
673 if ((vr
->mask_lo
& 0xfffff800UL
) != (lo
& 0xfffff800UL
)
674 || (vr
->mask_hi
& (size_and_mask
>> (32 - PAGE_SHIFT
))) !=
675 (hi
& (size_and_mask
>> (32 - PAGE_SHIFT
)))) {
676 mtrr_wrmsr(MTRRphysMask_MSR(index
), vr
->mask_lo
, vr
->mask_hi
);
682 static u32 deftype_lo
, deftype_hi
;
685 * set_mtrr_state - Set the MTRR state for this CPU.
687 * NOTE: The CPU must already be in a safe state for MTRR changes.
688 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
690 static unsigned long set_mtrr_state(void)
692 unsigned long change_mask
= 0;
695 for (i
= 0; i
< num_var_ranges
; i
++) {
696 if (set_mtrr_var_ranges(i
, &mtrr_state
.var_ranges
[i
]))
697 change_mask
|= MTRR_CHANGE_MASK_VARIABLE
;
700 if (mtrr_state
.have_fixed
&& set_fixed_ranges(mtrr_state
.fixed_ranges
))
701 change_mask
|= MTRR_CHANGE_MASK_FIXED
;
704 * Set_mtrr_restore restores the old value of MTRRdefType,
705 * so to set it we fiddle with the saved value:
707 if ((deftype_lo
& 0xff) != mtrr_state
.def_type
708 || ((deftype_lo
& 0xc00) >> 10) != mtrr_state
.enabled
) {
710 deftype_lo
= (deftype_lo
& ~0xcff) | mtrr_state
.def_type
|
711 (mtrr_state
.enabled
<< 10);
712 change_mask
|= MTRR_CHANGE_MASK_DEFTYPE
;
719 static unsigned long cr4
;
720 static DEFINE_RAW_SPINLOCK(set_atomicity_lock
);
723 * Since we are disabling the cache don't allow any interrupts,
724 * they would run extremely slow and would only increase the pain.
726 * The caller must ensure that local interrupts are disabled and
727 * are reenabled after post_set() has been called.
729 static void prepare_set(void) __acquires(set_atomicity_lock
)
734 * Note that this is not ideal
735 * since the cache is only flushed/disabled for this CPU while the
736 * MTRRs are changed, but changing this requires more invasive
737 * changes to the way the kernel boots
740 raw_spin_lock(&set_atomicity_lock
);
742 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
743 cr0
= read_cr0() | X86_CR0_CD
;
747 /* Save value of CR4 and clear Page Global Enable (bit 7) */
748 if (boot_cpu_has(X86_FEATURE_PGE
)) {
750 __write_cr4(cr4
& ~X86_CR4_PGE
);
753 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
754 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL
);
757 /* Save MTRR state */
758 rdmsr(MSR_MTRRdefType
, deftype_lo
, deftype_hi
);
760 /* Disable MTRRs, and set the default type to uncached */
761 mtrr_wrmsr(MSR_MTRRdefType
, deftype_lo
& ~0xcff, deftype_hi
);
765 static void post_set(void) __releases(set_atomicity_lock
)
767 /* Flush TLBs (no need to flush caches - they are disabled) */
768 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL
);
771 /* Intel (P6) standard MTRRs */
772 mtrr_wrmsr(MSR_MTRRdefType
, deftype_lo
, deftype_hi
);
775 write_cr0(read_cr0() & ~X86_CR0_CD
);
777 /* Restore value of CR4 */
778 if (boot_cpu_has(X86_FEATURE_PGE
))
780 raw_spin_unlock(&set_atomicity_lock
);
783 static void generic_set_all(void)
785 unsigned long mask
, count
;
788 local_irq_save(flags
);
791 /* Actually set the state */
792 mask
= set_mtrr_state();
798 local_irq_restore(flags
);
800 /* Use the atomic bitops to update the global mask */
801 for (count
= 0; count
< sizeof mask
* 8; ++count
) {
803 set_bit(count
, &smp_changes_mask
);
810 * generic_set_mtrr - set variable MTRR register on the local CPU.
812 * @reg: The register to set.
813 * @base: The base address of the region.
814 * @size: The size of the region. If this is 0 the region is disabled.
815 * @type: The type of the region.
819 static void generic_set_mtrr(unsigned int reg
, unsigned long base
,
820 unsigned long size
, mtrr_type type
)
823 struct mtrr_var_range
*vr
;
825 vr
= &mtrr_state
.var_ranges
[reg
];
827 local_irq_save(flags
);
832 * The invalid bit is kept in the mask, so we simply
833 * clear the relevant mask register to disable a range.
835 mtrr_wrmsr(MTRRphysMask_MSR(reg
), 0, 0);
836 memset(vr
, 0, sizeof(struct mtrr_var_range
));
838 vr
->base_lo
= base
<< PAGE_SHIFT
| type
;
839 vr
->base_hi
= (base
& size_and_mask
) >> (32 - PAGE_SHIFT
);
840 vr
->mask_lo
= -size
<< PAGE_SHIFT
| 0x800;
841 vr
->mask_hi
= (-size
& size_and_mask
) >> (32 - PAGE_SHIFT
);
843 mtrr_wrmsr(MTRRphysBase_MSR(reg
), vr
->base_lo
, vr
->base_hi
);
844 mtrr_wrmsr(MTRRphysMask_MSR(reg
), vr
->mask_lo
, vr
->mask_hi
);
848 local_irq_restore(flags
);
851 int generic_validate_add_page(unsigned long base
, unsigned long size
,
854 unsigned long lbase
, last
;
857 * For Intel PPro stepping <= 7
858 * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
860 if (is_cpu(INTEL
) && boot_cpu_data
.x86
== 6 &&
861 boot_cpu_data
.x86_model
== 1 &&
862 boot_cpu_data
.x86_stepping
<= 7) {
863 if (base
& ((1 << (22 - PAGE_SHIFT
)) - 1)) {
864 pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base
);
867 if (!(base
+ size
< 0x70000 || base
> 0x7003F) &&
868 (type
== MTRR_TYPE_WRCOMB
869 || type
== MTRR_TYPE_WRBACK
)) {
870 pr_warn("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
876 * Check upper bits of base and last are equal and lower bits are 0
877 * for base and 1 for last
879 last
= base
+ size
- 1;
880 for (lbase
= base
; !(lbase
& 1) && (last
& 1);
881 lbase
= lbase
>> 1, last
= last
>> 1)
884 pr_warn("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base
, size
);
890 static int generic_have_wrcomb(void)
892 unsigned long config
, dummy
;
893 rdmsr(MSR_MTRRcap
, config
, dummy
);
894 return config
& (1 << 10);
897 int positive_have_wrcomb(void)
903 * Generic structure...
905 const struct mtrr_ops generic_mtrr_ops
= {
907 .set_all
= generic_set_all
,
908 .get
= generic_get_mtrr
,
909 .get_free_region
= generic_get_free_region
,
910 .set
= generic_set_mtrr
,
911 .validate_add_page
= generic_validate_add_page
,
912 .have_wrcomb
= generic_have_wrcomb
,