1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2006 Qumranet, Inc.
6 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
7 * Copyright(C) 2015 Intel Corporation.
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 * Marcelo Tosatti <mtosatti@redhat.com>
13 * Paolo Bonzini <pbonzini@redhat.com>
14 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
17 #include <linux/kvm_host.h>
23 #define IA32_MTRR_DEF_TYPE_E (1ULL << 11)
24 #define IA32_MTRR_DEF_TYPE_FE (1ULL << 10)
25 #define IA32_MTRR_DEF_TYPE_TYPE_MASK (0xff)
27 static bool msr_mtrr_valid(unsigned msr
)
30 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR
- 1:
31 case MSR_MTRRfix64K_00000
:
32 case MSR_MTRRfix16K_80000
:
33 case MSR_MTRRfix16K_A0000
:
34 case MSR_MTRRfix4K_C0000
:
35 case MSR_MTRRfix4K_C8000
:
36 case MSR_MTRRfix4K_D0000
:
37 case MSR_MTRRfix4K_D8000
:
38 case MSR_MTRRfix4K_E0000
:
39 case MSR_MTRRfix4K_E8000
:
40 case MSR_MTRRfix4K_F0000
:
41 case MSR_MTRRfix4K_F8000
:
49 static bool valid_mtrr_type(unsigned t
)
51 return t
< 8 && (1 << t
) & 0x73; /* 0, 1, 4, 5, 6 */
54 bool kvm_mtrr_valid(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
)
59 if (!msr_mtrr_valid(msr
))
62 if (msr
== MSR_IA32_CR_PAT
) {
63 return kvm_pat_valid(data
);
64 } else if (msr
== MSR_MTRRdefType
) {
67 return valid_mtrr_type(data
& 0xff);
68 } else if (msr
>= MSR_MTRRfix64K_00000
&& msr
<= MSR_MTRRfix4K_F8000
) {
69 for (i
= 0; i
< 8 ; i
++)
70 if (!valid_mtrr_type((data
>> (i
* 8)) & 0xff))
76 WARN_ON(!(msr
>= 0x200 && msr
< 0x200 + 2 * KVM_NR_VAR_MTRR
));
78 mask
= (~0ULL) << cpuid_maxphyaddr(vcpu
);
81 if (!valid_mtrr_type(data
& 0xff))
88 kvm_inject_gp(vcpu
, 0);
94 EXPORT_SYMBOL_GPL(kvm_mtrr_valid
);
96 static bool mtrr_is_enabled(struct kvm_mtrr
*mtrr_state
)
98 return !!(mtrr_state
->deftype
& IA32_MTRR_DEF_TYPE_E
);
101 static bool fixed_mtrr_is_enabled(struct kvm_mtrr
*mtrr_state
)
103 return !!(mtrr_state
->deftype
& IA32_MTRR_DEF_TYPE_FE
);
106 static u8
mtrr_default_type(struct kvm_mtrr
*mtrr_state
)
108 return mtrr_state
->deftype
& IA32_MTRR_DEF_TYPE_TYPE_MASK
;
111 static u8
mtrr_disabled_type(struct kvm_vcpu
*vcpu
)
114 * Intel SDM 11.11.2.2: all MTRRs are disabled when
115 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
116 * memory type is applied to all of physical memory.
118 * However, virtual machines can be run with CPUID such that
119 * there are no MTRRs. In that case, the firmware will never
120 * enable MTRRs and it is obviously undesirable to run the
121 * guest entirely with UC memory and we use WB.
123 if (guest_cpuid_has(vcpu
, X86_FEATURE_MTRR
))
124 return MTRR_TYPE_UNCACHABLE
;
126 return MTRR_TYPE_WRBACK
;
130 * Three terms are used in the following code:
131 * - segment, it indicates the address segments covered by fixed MTRRs.
132 * - unit, it corresponds to the MSR entry in the segment.
133 * - range, a range is covered in one memory cache type.
135 struct fixed_mtrr_segment
{
141 /* the start position in kvm_mtrr.fixed_ranges[]. */
145 static struct fixed_mtrr_segment fixed_seg_table
[] = {
146 /* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */
150 .range_shift
= 16, /* 64K */
155 * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units,
161 .range_shift
= 14, /* 16K */
166 * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units,
172 .range_shift
= 12, /* 12K */
178 * The size of unit is covered in one MSR, one MSR entry contains
179 * 8 ranges so that unit size is always 8 * 2^range_shift.
181 static u64
fixed_mtrr_seg_unit_size(int seg
)
183 return 8 << fixed_seg_table
[seg
].range_shift
;
186 static bool fixed_msr_to_seg_unit(u32 msr
, int *seg
, int *unit
)
189 case MSR_MTRRfix64K_00000
:
193 case MSR_MTRRfix16K_80000
... MSR_MTRRfix16K_A0000
:
195 *unit
= msr
- MSR_MTRRfix16K_80000
;
197 case MSR_MTRRfix4K_C0000
... MSR_MTRRfix4K_F8000
:
199 *unit
= msr
- MSR_MTRRfix4K_C0000
;
208 static void fixed_mtrr_seg_unit_range(int seg
, int unit
, u64
*start
, u64
*end
)
210 struct fixed_mtrr_segment
*mtrr_seg
= &fixed_seg_table
[seg
];
211 u64 unit_size
= fixed_mtrr_seg_unit_size(seg
);
213 *start
= mtrr_seg
->start
+ unit
* unit_size
;
214 *end
= *start
+ unit_size
;
215 WARN_ON(*end
> mtrr_seg
->end
);
218 static int fixed_mtrr_seg_unit_range_index(int seg
, int unit
)
220 struct fixed_mtrr_segment
*mtrr_seg
= &fixed_seg_table
[seg
];
222 WARN_ON(mtrr_seg
->start
+ unit
* fixed_mtrr_seg_unit_size(seg
)
225 /* each unit has 8 ranges. */
226 return mtrr_seg
->range_start
+ 8 * unit
;
229 static int fixed_mtrr_seg_end_range_index(int seg
)
231 struct fixed_mtrr_segment
*mtrr_seg
= &fixed_seg_table
[seg
];
234 n
= (mtrr_seg
->end
- mtrr_seg
->start
) >> mtrr_seg
->range_shift
;
235 return mtrr_seg
->range_start
+ n
- 1;
238 static bool fixed_msr_to_range(u32 msr
, u64
*start
, u64
*end
)
242 if (!fixed_msr_to_seg_unit(msr
, &seg
, &unit
))
245 fixed_mtrr_seg_unit_range(seg
, unit
, start
, end
);
249 static int fixed_msr_to_range_index(u32 msr
)
253 if (!fixed_msr_to_seg_unit(msr
, &seg
, &unit
))
256 return fixed_mtrr_seg_unit_range_index(seg
, unit
);
259 static int fixed_mtrr_addr_to_seg(u64 addr
)
261 struct fixed_mtrr_segment
*mtrr_seg
;
262 int seg
, seg_num
= ARRAY_SIZE(fixed_seg_table
);
264 for (seg
= 0; seg
< seg_num
; seg
++) {
265 mtrr_seg
= &fixed_seg_table
[seg
];
266 if (mtrr_seg
->start
<= addr
&& addr
< mtrr_seg
->end
)
273 static int fixed_mtrr_addr_seg_to_range_index(u64 addr
, int seg
)
275 struct fixed_mtrr_segment
*mtrr_seg
;
278 mtrr_seg
= &fixed_seg_table
[seg
];
279 index
= mtrr_seg
->range_start
;
280 index
+= (addr
- mtrr_seg
->start
) >> mtrr_seg
->range_shift
;
284 static u64
fixed_mtrr_range_end_addr(int seg
, int index
)
286 struct fixed_mtrr_segment
*mtrr_seg
= &fixed_seg_table
[seg
];
287 int pos
= index
- mtrr_seg
->range_start
;
289 return mtrr_seg
->start
+ ((pos
+ 1) << mtrr_seg
->range_shift
);
292 static void var_mtrr_range(struct kvm_mtrr_range
*range
, u64
*start
, u64
*end
)
296 *start
= range
->base
& PAGE_MASK
;
298 mask
= range
->mask
& PAGE_MASK
;
300 /* This cannot overflow because writing to the reserved bits of
301 * variable MTRRs causes a #GP.
303 *end
= (*start
| ~mask
) + 1;
306 static void update_mtrr(struct kvm_vcpu
*vcpu
, u32 msr
)
308 struct kvm_mtrr
*mtrr_state
= &vcpu
->arch
.mtrr_state
;
312 if (msr
== MSR_IA32_CR_PAT
|| !tdp_enabled
||
313 !kvm_arch_has_noncoherent_dma(vcpu
->kvm
))
316 if (!mtrr_is_enabled(mtrr_state
) && msr
!= MSR_MTRRdefType
)
320 if (fixed_msr_to_range(msr
, &start
, &end
)) {
321 if (!fixed_mtrr_is_enabled(mtrr_state
))
323 } else if (msr
== MSR_MTRRdefType
) {
327 /* variable range MTRRs. */
328 index
= (msr
- 0x200) / 2;
329 var_mtrr_range(&mtrr_state
->var_ranges
[index
], &start
, &end
);
332 kvm_zap_gfn_range(vcpu
->kvm
, gpa_to_gfn(start
), gpa_to_gfn(end
));
335 static bool var_mtrr_range_is_valid(struct kvm_mtrr_range
*range
)
337 return (range
->mask
& (1 << 11)) != 0;
340 static void set_var_mtrr_msr(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
)
342 struct kvm_mtrr
*mtrr_state
= &vcpu
->arch
.mtrr_state
;
343 struct kvm_mtrr_range
*tmp
, *cur
;
344 int index
, is_mtrr_mask
;
346 index
= (msr
- 0x200) / 2;
347 is_mtrr_mask
= msr
- 0x200 - 2 * index
;
348 cur
= &mtrr_state
->var_ranges
[index
];
350 /* remove the entry if it's in the list. */
351 if (var_mtrr_range_is_valid(cur
))
352 list_del(&mtrr_state
->var_ranges
[index
].node
);
354 /* Extend the mask with all 1 bits to the left, since those
355 * bits must implicitly be 0. The bits are then cleared
361 cur
->mask
= data
| (-1LL << cpuid_maxphyaddr(vcpu
));
363 /* add it to the list if it's enabled. */
364 if (var_mtrr_range_is_valid(cur
)) {
365 list_for_each_entry(tmp
, &mtrr_state
->head
, node
)
366 if (cur
->base
>= tmp
->base
)
368 list_add_tail(&cur
->node
, &tmp
->node
);
372 int kvm_mtrr_set_msr(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
)
376 if (!kvm_mtrr_valid(vcpu
, msr
, data
))
379 index
= fixed_msr_to_range_index(msr
);
381 *(u64
*)&vcpu
->arch
.mtrr_state
.fixed_ranges
[index
] = data
;
382 else if (msr
== MSR_MTRRdefType
)
383 vcpu
->arch
.mtrr_state
.deftype
= data
;
384 else if (msr
== MSR_IA32_CR_PAT
)
385 vcpu
->arch
.pat
= data
;
387 set_var_mtrr_msr(vcpu
, msr
, data
);
389 update_mtrr(vcpu
, msr
);
393 int kvm_mtrr_get_msr(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
)
397 /* MSR_MTRRcap is a readonly MSR. */
398 if (msr
== MSR_MTRRcap
) {
403 * VCNT = KVM_NR_VAR_MTRR
405 *pdata
= 0x500 | KVM_NR_VAR_MTRR
;
409 if (!msr_mtrr_valid(msr
))
412 index
= fixed_msr_to_range_index(msr
);
414 *pdata
= *(u64
*)&vcpu
->arch
.mtrr_state
.fixed_ranges
[index
];
415 else if (msr
== MSR_MTRRdefType
)
416 *pdata
= vcpu
->arch
.mtrr_state
.deftype
;
417 else if (msr
== MSR_IA32_CR_PAT
)
418 *pdata
= vcpu
->arch
.pat
;
419 else { /* Variable MTRRs */
422 index
= (msr
- 0x200) / 2;
423 is_mtrr_mask
= msr
- 0x200 - 2 * index
;
425 *pdata
= vcpu
->arch
.mtrr_state
.var_ranges
[index
].base
;
427 *pdata
= vcpu
->arch
.mtrr_state
.var_ranges
[index
].mask
;
429 *pdata
&= (1ULL << cpuid_maxphyaddr(vcpu
)) - 1;
435 void kvm_vcpu_mtrr_init(struct kvm_vcpu
*vcpu
)
437 INIT_LIST_HEAD(&vcpu
->arch
.mtrr_state
.head
);
442 struct kvm_mtrr
*mtrr_state
;
448 /* mtrr is completely disabled? */
450 /* [start, end) is not fully covered in MTRRs? */
453 /* private fields. */
455 /* used for fixed MTRRs. */
461 /* used for var MTRRs. */
463 struct kvm_mtrr_range
*range
;
464 /* max address has been covered in var MTRRs. */
472 static bool mtrr_lookup_fixed_start(struct mtrr_iter
*iter
)
476 if (!fixed_mtrr_is_enabled(iter
->mtrr_state
))
479 seg
= fixed_mtrr_addr_to_seg(iter
->start
);
484 index
= fixed_mtrr_addr_seg_to_range_index(iter
->start
, seg
);
490 static bool match_var_range(struct mtrr_iter
*iter
,
491 struct kvm_mtrr_range
*range
)
495 var_mtrr_range(range
, &start
, &end
);
496 if (!(start
>= iter
->end
|| end
<= iter
->start
)) {
500 * the function is called when we do kvm_mtrr.head walking.
501 * Range has the minimum base address which interleaves
502 * [looker->start_max, looker->end).
504 iter
->partial_map
|= iter
->start_max
< start
;
506 /* update the max address has been covered. */
507 iter
->start_max
= max(iter
->start_max
, end
);
514 static void __mtrr_lookup_var_next(struct mtrr_iter
*iter
)
516 struct kvm_mtrr
*mtrr_state
= iter
->mtrr_state
;
518 list_for_each_entry_continue(iter
->range
, &mtrr_state
->head
, node
)
519 if (match_var_range(iter
, iter
->range
))
523 iter
->partial_map
|= iter
->start_max
< iter
->end
;
526 static void mtrr_lookup_var_start(struct mtrr_iter
*iter
)
528 struct kvm_mtrr
*mtrr_state
= iter
->mtrr_state
;
531 iter
->start_max
= iter
->start
;
533 iter
->range
= list_prepare_entry(iter
->range
, &mtrr_state
->head
, node
);
535 __mtrr_lookup_var_next(iter
);
538 static void mtrr_lookup_fixed_next(struct mtrr_iter
*iter
)
540 /* terminate the lookup. */
541 if (fixed_mtrr_range_end_addr(iter
->seg
, iter
->index
) >= iter
->end
) {
549 /* have looked up for all fixed MTRRs. */
550 if (iter
->index
>= ARRAY_SIZE(iter
->mtrr_state
->fixed_ranges
))
551 return mtrr_lookup_var_start(iter
);
553 /* switch to next segment. */
554 if (iter
->index
> fixed_mtrr_seg_end_range_index(iter
->seg
))
558 static void mtrr_lookup_var_next(struct mtrr_iter
*iter
)
560 __mtrr_lookup_var_next(iter
);
563 static void mtrr_lookup_start(struct mtrr_iter
*iter
)
565 if (!mtrr_is_enabled(iter
->mtrr_state
)) {
566 iter
->mtrr_disabled
= true;
570 if (!mtrr_lookup_fixed_start(iter
))
571 mtrr_lookup_var_start(iter
);
574 static void mtrr_lookup_init(struct mtrr_iter
*iter
,
575 struct kvm_mtrr
*mtrr_state
, u64 start
, u64 end
)
577 iter
->mtrr_state
= mtrr_state
;
580 iter
->mtrr_disabled
= false;
581 iter
->partial_map
= false;
585 mtrr_lookup_start(iter
);
588 static bool mtrr_lookup_okay(struct mtrr_iter
*iter
)
591 iter
->mem_type
= iter
->mtrr_state
->fixed_ranges
[iter
->index
];
596 iter
->mem_type
= iter
->range
->base
& 0xff;
603 static void mtrr_lookup_next(struct mtrr_iter
*iter
)
606 mtrr_lookup_fixed_next(iter
);
608 mtrr_lookup_var_next(iter
);
611 #define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \
612 for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \
613 mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_))
615 u8
kvm_mtrr_get_guest_memory_type(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
617 struct kvm_mtrr
*mtrr_state
= &vcpu
->arch
.mtrr_state
;
618 struct mtrr_iter iter
;
621 const int wt_wb_mask
= (1 << MTRR_TYPE_WRBACK
)
622 | (1 << MTRR_TYPE_WRTHROUGH
);
624 start
= gfn_to_gpa(gfn
);
625 end
= start
+ PAGE_SIZE
;
627 mtrr_for_each_mem_type(&iter
, mtrr_state
, start
, end
) {
628 int curr_type
= iter
.mem_type
;
631 * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR
641 * If two or more variable memory ranges match and the
642 * memory types are identical, then that memory type is
645 if (type
== curr_type
)
649 * If two or more variable memory ranges match and one of
650 * the memory types is UC, the UC memory type used.
652 if (curr_type
== MTRR_TYPE_UNCACHABLE
)
653 return MTRR_TYPE_UNCACHABLE
;
656 * If two or more variable memory ranges match and the
657 * memory types are WT and WB, the WT memory type is used.
659 if (((1 << type
) & wt_wb_mask
) &&
660 ((1 << curr_type
) & wt_wb_mask
)) {
661 type
= MTRR_TYPE_WRTHROUGH
;
666 * For overlaps not defined by the above rules, processor
667 * behavior is undefined.
670 /* We use WB for this undefined behavior. :( */
671 return MTRR_TYPE_WRBACK
;
674 if (iter
.mtrr_disabled
)
675 return mtrr_disabled_type(vcpu
);
677 /* not contained in any MTRRs. */
679 return mtrr_default_type(mtrr_state
);
682 * We just check one page, partially covered by MTRRs is
685 WARN_ON(iter
.partial_map
);
689 EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type
);
691 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu
*vcpu
, gfn_t gfn
,
694 struct kvm_mtrr
*mtrr_state
= &vcpu
->arch
.mtrr_state
;
695 struct mtrr_iter iter
;
699 start
= gfn_to_gpa(gfn
);
700 end
= gfn_to_gpa(gfn
+ page_num
);
701 mtrr_for_each_mem_type(&iter
, mtrr_state
, start
, end
) {
703 type
= iter
.mem_type
;
707 if (type
!= iter
.mem_type
)
711 if (iter
.mtrr_disabled
)
714 if (!iter
.partial_map
)
720 return type
== mtrr_default_type(mtrr_state
);