2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
26 #include <asm/tlbflush.h>
27 #include <asm/mmu-44x.h>
28 #include <asm/kvm_ppc.h>
29 #include <asm/kvm_44x.h>
35 #ifndef PPC44x_TLBE_SIZE
36 #define PPC44x_TLBE_SIZE PPC44x_TLB_4K
39 #define PAGE_SIZE_4K (1<<12)
40 #define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1))
42 #define PPC44x_TLB_UATTR_MASK \
43 (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3)
44 #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
45 #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
48 void kvmppc_dump_tlbs(struct kvm_vcpu
*vcpu
)
50 struct kvmppc_vcpu_44x
*vcpu_44x
= to_44x(vcpu
);
51 struct kvmppc_44x_tlbe
*tlbe
;
54 printk("vcpu %d TLB dump:\n", vcpu
->vcpu_id
);
55 printk("| %2s | %3s | %8s | %8s | %8s |\n",
56 "nr", "tid", "word0", "word1", "word2");
58 for (i
= 0; i
< ARRAY_SIZE(vcpu_44x
->guest_tlb
); i
++) {
59 tlbe
= &vcpu_44x
->guest_tlb
[i
];
60 if (tlbe
->word0
& PPC44x_TLB_VALID
)
61 printk(" G%2d | %02X | %08X | %08X | %08X |\n",
62 i
, tlbe
->tid
, tlbe
->word0
, tlbe
->word1
,
68 static inline void kvmppc_44x_tlbie(unsigned int index
)
70 /* 0 <= index < 64, so the V bit is clear and we can use the index as
73 "tlbwe %[index], %[index], 0\n"
79 static inline void kvmppc_44x_tlbre(unsigned int index
,
80 struct kvmppc_44x_tlbe
*tlbe
)
83 "tlbre %[word0], %[index], 0\n"
84 "mfspr %[tid], %[sprn_mmucr]\n"
85 "andi. %[tid], %[tid], 0xff\n"
86 "tlbre %[word1], %[index], 1\n"
87 "tlbre %[word2], %[index], 2\n"
88 : [word0
] "=r"(tlbe
->word0
),
89 [word1
] "=r"(tlbe
->word1
),
90 [word2
] "=r"(tlbe
->word2
),
93 [sprn_mmucr
] "i"(SPRN_MMUCR
)
98 static inline void kvmppc_44x_tlbwe(unsigned int index
,
99 struct kvmppc_44x_tlbe
*stlbe
)
104 "mfspr %[tmp], %[sprn_mmucr]\n"
105 "rlwimi %[tmp], %[tid], 0, 0xff\n"
106 "mtspr %[sprn_mmucr], %[tmp]\n"
107 "tlbwe %[word0], %[index], 0\n"
108 "tlbwe %[word1], %[index], 1\n"
109 "tlbwe %[word2], %[index], 2\n"
111 : [word0
] "r"(stlbe
->word0
),
112 [word1
] "r"(stlbe
->word1
),
113 [word2
] "r"(stlbe
->word2
),
114 [tid
] "r"(stlbe
->tid
),
116 [sprn_mmucr
] "i"(SPRN_MMUCR
)
120 static u32
kvmppc_44x_tlb_shadow_attrib(u32 attrib
, int usermode
)
122 /* We only care about the guest's permission and user bits. */
123 attrib
&= PPC44x_TLB_PERM_MASK
|PPC44x_TLB_UATTR_MASK
;
126 /* Guest is in supervisor mode, so we need to translate guest
127 * supervisor permissions into user permissions. */
128 attrib
&= ~PPC44x_TLB_USER_PERM_MASK
;
129 attrib
|= (attrib
& PPC44x_TLB_SUPER_PERM_MASK
) << 3;
132 /* Make sure host can always access this memory. */
133 attrib
|= PPC44x_TLB_SX
|PPC44x_TLB_SR
|PPC44x_TLB_SW
;
135 /* WIMGE = 0b00100 */
136 attrib
|= PPC44x_TLB_M
;
141 /* Load shadow TLB back into hardware. */
142 void kvmppc_44x_tlb_load(struct kvm_vcpu
*vcpu
)
144 struct kvmppc_vcpu_44x
*vcpu_44x
= to_44x(vcpu
);
147 for (i
= 0; i
<= tlb_44x_hwater
; i
++) {
148 struct kvmppc_44x_tlbe
*stlbe
= &vcpu_44x
->shadow_tlb
[i
];
150 if (get_tlb_v(stlbe
) && get_tlb_ts(stlbe
))
151 kvmppc_44x_tlbwe(i
, stlbe
);
155 static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x
*vcpu_44x
,
158 vcpu_44x
->shadow_tlb_mod
[i
] = 1;
161 /* Save hardware TLB to the vcpu, and invalidate all guest mappings. */
162 void kvmppc_44x_tlb_put(struct kvm_vcpu
*vcpu
)
164 struct kvmppc_vcpu_44x
*vcpu_44x
= to_44x(vcpu
);
167 for (i
= 0; i
<= tlb_44x_hwater
; i
++) {
168 struct kvmppc_44x_tlbe
*stlbe
= &vcpu_44x
->shadow_tlb
[i
];
170 if (vcpu_44x
->shadow_tlb_mod
[i
])
171 kvmppc_44x_tlbre(i
, stlbe
);
173 if (get_tlb_v(stlbe
) && get_tlb_ts(stlbe
))
179 /* Search the guest TLB for a matching entry. */
180 int kvmppc_44x_tlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
, unsigned int pid
,
183 struct kvmppc_vcpu_44x
*vcpu_44x
= to_44x(vcpu
);
186 /* XXX Replace loop with fancy data structures. */
187 for (i
= 0; i
< ARRAY_SIZE(vcpu_44x
->guest_tlb
); i
++) {
188 struct kvmppc_44x_tlbe
*tlbe
= &vcpu_44x
->guest_tlb
[i
];
191 if (eaddr
< get_tlb_eaddr(tlbe
))
194 if (eaddr
> get_tlb_end(tlbe
))
197 tid
= get_tlb_tid(tlbe
);
198 if (tid
&& (tid
!= pid
))
201 if (!get_tlb_v(tlbe
))
204 if (get_tlb_ts(tlbe
) != as
)
213 gpa_t
kvmppc_mmu_xlate(struct kvm_vcpu
*vcpu
, unsigned int gtlb_index
,
216 struct kvmppc_vcpu_44x
*vcpu_44x
= to_44x(vcpu
);
217 struct kvmppc_44x_tlbe
*gtlbe
= &vcpu_44x
->guest_tlb
[gtlb_index
];
218 unsigned int pgmask
= get_tlb_bytes(gtlbe
) - 1;
220 return get_tlb_raddr(gtlbe
) | (eaddr
& pgmask
);
223 int kvmppc_mmu_itlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
)
225 unsigned int as
= !!(vcpu
->arch
.shared
->msr
& MSR_IS
);
227 return kvmppc_44x_tlb_index(vcpu
, eaddr
, vcpu
->arch
.pid
, as
);
230 int kvmppc_mmu_dtlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
)
232 unsigned int as
= !!(vcpu
->arch
.shared
->msr
& MSR_DS
);
234 return kvmppc_44x_tlb_index(vcpu
, eaddr
, vcpu
->arch
.pid
, as
);
237 void kvmppc_mmu_itlb_miss(struct kvm_vcpu
*vcpu
)
241 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu
*vcpu
)
245 static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x
*vcpu_44x
,
246 unsigned int stlb_index
)
248 struct kvmppc_44x_shadow_ref
*ref
= &vcpu_44x
->shadow_refs
[stlb_index
];
253 /* Discard from the TLB. */
254 /* Note: we could actually invalidate a host mapping, if the host overwrote
255 * this TLB entry since we inserted a guest mapping. */
256 kvmppc_44x_tlbie(stlb_index
);
258 /* Now release the page. */
260 kvm_release_page_dirty(ref
->page
);
262 kvm_release_page_clean(ref
->page
);
266 /* XXX set tlb_44x_index to stlb_index? */
268 trace_kvm_stlb_inval(stlb_index
);
271 void kvmppc_mmu_destroy(struct kvm_vcpu
*vcpu
)
273 struct kvmppc_vcpu_44x
*vcpu_44x
= to_44x(vcpu
);
276 for (i
= 0; i
<= tlb_44x_hwater
; i
++)
277 kvmppc_44x_shadow_release(vcpu_44x
, i
);
281 * kvmppc_mmu_map -- create a host mapping for guest memory
283 * If the guest wanted a larger page than the host supports, only the first
284 * host page is mapped here and the rest are demand faulted.
286 * If the guest wanted a smaller page than the host page size, we map only the
287 * guest-size page (i.e. not a full host page mapping).
289 * Caller must ensure that the specified guest TLB entry is safe to insert into
292 void kvmppc_mmu_map(struct kvm_vcpu
*vcpu
, u64 gvaddr
, gpa_t gpaddr
,
293 unsigned int gtlb_index
)
295 struct kvmppc_44x_tlbe stlbe
;
296 struct kvmppc_vcpu_44x
*vcpu_44x
= to_44x(vcpu
);
297 struct kvmppc_44x_tlbe
*gtlbe
= &vcpu_44x
->guest_tlb
[gtlb_index
];
298 struct kvmppc_44x_shadow_ref
*ref
;
299 struct page
*new_page
;
302 u32 asid
= gtlbe
->tid
;
303 u32 flags
= gtlbe
->word2
;
304 u32 max_bytes
= get_tlb_bytes(gtlbe
);
307 /* Select TLB entry to clobber. Indirectly guard against races with the TLB
308 * miss handler by disabling interrupts. */
310 victim
= ++tlb_44x_index
;
311 if (victim
> tlb_44x_hwater
)
313 tlb_44x_index
= victim
;
316 /* Get reference to new page. */
317 gfn
= gpaddr
>> PAGE_SHIFT
;
318 new_page
= gfn_to_page(vcpu
->kvm
, gfn
);
319 if (is_error_page(new_page
)) {
320 printk(KERN_ERR
"Couldn't get guest page for gfn %llx!\n",
321 (unsigned long long)gfn
);
322 kvm_release_page_clean(new_page
);
325 hpaddr
= page_to_phys(new_page
);
327 /* Invalidate any previous shadow mappings. */
328 kvmppc_44x_shadow_release(vcpu_44x
, victim
);
330 /* XXX Make sure (va, size) doesn't overlap any other
331 * entries. 440x6 user manual says the result would be
334 /* XXX what about AS? */
336 /* Force TS=1 for all guest mappings. */
337 stlbe
.word0
= PPC44x_TLB_VALID
| PPC44x_TLB_TS
;
339 if (max_bytes
>= PAGE_SIZE
) {
340 /* Guest mapping is larger than or equal to host page size. We can use
341 * a "native" host mapping. */
342 stlbe
.word0
|= (gvaddr
& PAGE_MASK
) | PPC44x_TLBE_SIZE
;
344 /* Guest mapping is smaller than host page size. We must restrict the
345 * size of the mapping to be at most the smaller of the two, but for
346 * simplicity we fall back to a 4K mapping (this is probably what the
347 * guest is using anyways). */
348 stlbe
.word0
|= (gvaddr
& PAGE_MASK_4K
) | PPC44x_TLB_4K
;
350 /* 'hpaddr' is a host page, which is larger than the mapping we're
351 * inserting here. To compensate, we must add the in-page offset to the
353 hpaddr
|= gpaddr
& (PAGE_MASK
^ PAGE_MASK_4K
);
356 stlbe
.word1
= (hpaddr
& 0xfffffc00) | ((hpaddr
>> 32) & 0xf);
357 stlbe
.word2
= kvmppc_44x_tlb_shadow_attrib(flags
,
358 vcpu
->arch
.shared
->msr
& MSR_PR
);
359 stlbe
.tid
= !(asid
& 0xff);
361 /* Keep track of the reference so we can properly release it later. */
362 ref
= &vcpu_44x
->shadow_refs
[victim
];
363 ref
->page
= new_page
;
364 ref
->gtlb_index
= gtlb_index
;
365 ref
->writeable
= !!(stlbe
.word2
& PPC44x_TLB_UW
);
366 ref
->tid
= stlbe
.tid
;
368 /* Insert shadow mapping into hardware TLB. */
369 kvmppc_44x_tlbe_set_modified(vcpu_44x
, victim
);
370 kvmppc_44x_tlbwe(victim
, &stlbe
);
371 trace_kvm_stlb_write(victim
, stlbe
.tid
, stlbe
.word0
, stlbe
.word1
,
375 /* For a particular guest TLB entry, invalidate the corresponding host TLB
376 * mappings and release the host pages. */
377 static void kvmppc_44x_invalidate(struct kvm_vcpu
*vcpu
,
378 unsigned int gtlb_index
)
380 struct kvmppc_vcpu_44x
*vcpu_44x
= to_44x(vcpu
);
383 for (i
= 0; i
< ARRAY_SIZE(vcpu_44x
->shadow_refs
); i
++) {
384 struct kvmppc_44x_shadow_ref
*ref
= &vcpu_44x
->shadow_refs
[i
];
385 if (ref
->gtlb_index
== gtlb_index
)
386 kvmppc_44x_shadow_release(vcpu_44x
, i
);
390 void kvmppc_mmu_priv_switch(struct kvm_vcpu
*vcpu
, int usermode
)
392 vcpu
->arch
.shadow_pid
= !usermode
;
395 void kvmppc_set_pid(struct kvm_vcpu
*vcpu
, u32 new_pid
)
397 struct kvmppc_vcpu_44x
*vcpu_44x
= to_44x(vcpu
);
400 if (unlikely(vcpu
->arch
.pid
== new_pid
))
403 vcpu
->arch
.pid
= new_pid
;
405 /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it
406 * can't access guest kernel mappings (TID=1). When we switch to a new
407 * guest PID, which will also use host PID=0, we must discard the old guest
408 * userspace mappings. */
409 for (i
= 0; i
< ARRAY_SIZE(vcpu_44x
->shadow_refs
); i
++) {
410 struct kvmppc_44x_shadow_ref
*ref
= &vcpu_44x
->shadow_refs
[i
];
413 kvmppc_44x_shadow_release(vcpu_44x
, i
);
417 static int tlbe_is_host_safe(const struct kvm_vcpu
*vcpu
,
418 const struct kvmppc_44x_tlbe
*tlbe
)
422 if (!get_tlb_v(tlbe
))
425 /* Does it match current guest AS? */
426 /* XXX what about IS != DS? */
427 if (get_tlb_ts(tlbe
) != !!(vcpu
->arch
.shared
->msr
& MSR_IS
))
430 gpa
= get_tlb_raddr(tlbe
);
431 if (!gfn_to_memslot(vcpu
->kvm
, gpa
>> PAGE_SHIFT
))
432 /* Mapping is not for RAM. */
438 int kvmppc_44x_emul_tlbwe(struct kvm_vcpu
*vcpu
, u8 ra
, u8 rs
, u8 ws
)
440 struct kvmppc_vcpu_44x
*vcpu_44x
= to_44x(vcpu
);
441 struct kvmppc_44x_tlbe
*tlbe
;
442 unsigned int gtlb_index
;
444 gtlb_index
= kvmppc_get_gpr(vcpu
, ra
);
445 if (gtlb_index
>= KVM44x_GUEST_TLB_SIZE
) {
446 printk("%s: index %d\n", __func__
, gtlb_index
);
447 kvmppc_dump_vcpu(vcpu
);
451 tlbe
= &vcpu_44x
->guest_tlb
[gtlb_index
];
453 /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */
454 if (tlbe
->word0
& PPC44x_TLB_VALID
)
455 kvmppc_44x_invalidate(vcpu
, gtlb_index
);
458 case PPC44x_TLB_PAGEID
:
459 tlbe
->tid
= get_mmucr_stid(vcpu
);
460 tlbe
->word0
= kvmppc_get_gpr(vcpu
, rs
);
463 case PPC44x_TLB_XLAT
:
464 tlbe
->word1
= kvmppc_get_gpr(vcpu
, rs
);
467 case PPC44x_TLB_ATTRIB
:
468 tlbe
->word2
= kvmppc_get_gpr(vcpu
, rs
);
475 if (tlbe_is_host_safe(vcpu
, tlbe
)) {
480 eaddr
= get_tlb_eaddr(tlbe
);
481 gpaddr
= get_tlb_raddr(tlbe
);
483 /* Use the advertised page size to mask effective and real addrs. */
484 bytes
= get_tlb_bytes(tlbe
);
485 eaddr
&= ~(bytes
- 1);
486 gpaddr
&= ~(bytes
- 1);
488 kvmppc_mmu_map(vcpu
, eaddr
, gpaddr
, gtlb_index
);
491 trace_kvm_gtlb_write(gtlb_index
, tlbe
->tid
, tlbe
->word0
, tlbe
->word1
,
494 kvmppc_set_exit_type(vcpu
, EMULATED_TLBWE_EXITS
);
498 int kvmppc_44x_emul_tlbsx(struct kvm_vcpu
*vcpu
, u8 rt
, u8 ra
, u8 rb
, u8 rc
)
502 unsigned int as
= get_mmucr_sts(vcpu
);
503 unsigned int pid
= get_mmucr_stid(vcpu
);
505 ea
= kvmppc_get_gpr(vcpu
, rb
);
507 ea
+= kvmppc_get_gpr(vcpu
, ra
);
509 gtlb_index
= kvmppc_44x_tlb_index(vcpu
, ea
, pid
, as
);
511 u32 cr
= kvmppc_get_cr(vcpu
);
514 kvmppc_set_cr(vcpu
, cr
& ~0x20000000);
516 kvmppc_set_cr(vcpu
, cr
| 0x20000000);
518 kvmppc_set_gpr(vcpu
, rt
, gtlb_index
);
520 kvmppc_set_exit_type(vcpu
, EMULATED_TLBSX_EXITS
);