"[PATCH] Fix leaks on /proc/{*/sched,sched_debug,timer_list,timer_stats}" and
[mmotm.git] / arch / powerpc / kvm / 44x_tlb.c
blobff3cb63b8117f3e1285be25a0dce34a7194f1328
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
26 #include <asm/tlbflush.h>
27 #include <asm/mmu-44x.h>
28 #include <asm/kvm_ppc.h>
29 #include <asm/kvm_44x.h>
30 #include "timing.h"
32 #include "44x_tlb.h"
33 #include "trace.h"
35 #ifndef PPC44x_TLBE_SIZE
36 #define PPC44x_TLBE_SIZE PPC44x_TLB_4K
37 #endif
39 #define PAGE_SIZE_4K (1<<12)
40 #define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1))
42 #define PPC44x_TLB_UATTR_MASK \
43 (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3)
44 #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
45 #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
47 #ifdef DEBUG
48 void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
50 struct kvmppc_44x_tlbe *tlbe;
51 int i;
53 printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
54 printk("| %2s | %3s | %8s | %8s | %8s |\n",
55 "nr", "tid", "word0", "word1", "word2");
57 for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
58 tlbe = &vcpu_44x->guest_tlb[i];
59 if (tlbe->word0 & PPC44x_TLB_VALID)
60 printk(" G%2d | %02X | %08X | %08X | %08X |\n",
61 i, tlbe->tid, tlbe->word0, tlbe->word1,
62 tlbe->word2);
65 #endif
67 static inline void kvmppc_44x_tlbie(unsigned int index)
69 /* 0 <= index < 64, so the V bit is clear and we can use the index as
70 * word0. */
71 asm volatile(
72 "tlbwe %[index], %[index], 0\n"
74 : [index] "r"(index)
78 static inline void kvmppc_44x_tlbre(unsigned int index,
79 struct kvmppc_44x_tlbe *tlbe)
81 asm volatile(
82 "tlbre %[word0], %[index], 0\n"
83 "mfspr %[tid], %[sprn_mmucr]\n"
84 "andi. %[tid], %[tid], 0xff\n"
85 "tlbre %[word1], %[index], 1\n"
86 "tlbre %[word2], %[index], 2\n"
87 : [word0] "=r"(tlbe->word0),
88 [word1] "=r"(tlbe->word1),
89 [word2] "=r"(tlbe->word2),
90 [tid] "=r"(tlbe->tid)
91 : [index] "r"(index),
92 [sprn_mmucr] "i"(SPRN_MMUCR)
93 : "cc"
97 static inline void kvmppc_44x_tlbwe(unsigned int index,
98 struct kvmppc_44x_tlbe *stlbe)
100 unsigned long tmp;
102 asm volatile(
103 "mfspr %[tmp], %[sprn_mmucr]\n"
104 "rlwimi %[tmp], %[tid], 0, 0xff\n"
105 "mtspr %[sprn_mmucr], %[tmp]\n"
106 "tlbwe %[word0], %[index], 0\n"
107 "tlbwe %[word1], %[index], 1\n"
108 "tlbwe %[word2], %[index], 2\n"
109 : [tmp] "=&r"(tmp)
110 : [word0] "r"(stlbe->word0),
111 [word1] "r"(stlbe->word1),
112 [word2] "r"(stlbe->word2),
113 [tid] "r"(stlbe->tid),
114 [index] "r"(index),
115 [sprn_mmucr] "i"(SPRN_MMUCR)
119 static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
121 /* We only care about the guest's permission and user bits. */
122 attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK;
124 if (!usermode) {
125 /* Guest is in supervisor mode, so we need to translate guest
126 * supervisor permissions into user permissions. */
127 attrib &= ~PPC44x_TLB_USER_PERM_MASK;
128 attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
131 /* Make sure host can always access this memory. */
132 attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
134 /* WIMGE = 0b00100 */
135 attrib |= PPC44x_TLB_M;
137 return attrib;
140 /* Load shadow TLB back into hardware. */
141 void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu)
143 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
144 int i;
146 for (i = 0; i <= tlb_44x_hwater; i++) {
147 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
149 if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
150 kvmppc_44x_tlbwe(i, stlbe);
154 static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x,
155 unsigned int i)
157 vcpu_44x->shadow_tlb_mod[i] = 1;
160 /* Save hardware TLB to the vcpu, and invalidate all guest mappings. */
161 void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu)
163 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
164 int i;
166 for (i = 0; i <= tlb_44x_hwater; i++) {
167 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
169 if (vcpu_44x->shadow_tlb_mod[i])
170 kvmppc_44x_tlbre(i, stlbe);
172 if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
173 kvmppc_44x_tlbie(i);
178 /* Search the guest TLB for a matching entry. */
179 int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
180 unsigned int as)
182 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
183 int i;
185 /* XXX Replace loop with fancy data structures. */
186 for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
187 struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i];
188 unsigned int tid;
190 if (eaddr < get_tlb_eaddr(tlbe))
191 continue;
193 if (eaddr > get_tlb_end(tlbe))
194 continue;
196 tid = get_tlb_tid(tlbe);
197 if (tid && (tid != pid))
198 continue;
200 if (!get_tlb_v(tlbe))
201 continue;
203 if (get_tlb_ts(tlbe) != as)
204 continue;
206 return i;
209 return -1;
212 gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
213 gva_t eaddr)
215 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
216 struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
217 unsigned int pgmask = get_tlb_bytes(gtlbe) - 1;
219 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
222 int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
224 unsigned int as = !!(vcpu->arch.msr & MSR_IS);
226 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
229 int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
231 unsigned int as = !!(vcpu->arch.msr & MSR_DS);
233 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
236 void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
240 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
244 static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
245 unsigned int stlb_index)
247 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index];
249 if (!ref->page)
250 return;
252 /* Discard from the TLB. */
253 /* Note: we could actually invalidate a host mapping, if the host overwrote
254 * this TLB entry since we inserted a guest mapping. */
255 kvmppc_44x_tlbie(stlb_index);
257 /* Now release the page. */
258 if (ref->writeable)
259 kvm_release_page_dirty(ref->page);
260 else
261 kvm_release_page_clean(ref->page);
263 ref->page = NULL;
265 /* XXX set tlb_44x_index to stlb_index? */
267 trace_kvm_stlb_inval(stlb_index);
270 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
272 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
273 int i;
275 for (i = 0; i <= tlb_44x_hwater; i++)
276 kvmppc_44x_shadow_release(vcpu_44x, i);
280 * kvmppc_mmu_map -- create a host mapping for guest memory
282 * If the guest wanted a larger page than the host supports, only the first
283 * host page is mapped here and the rest are demand faulted.
285 * If the guest wanted a smaller page than the host page size, we map only the
286 * guest-size page (i.e. not a full host page mapping).
288 * Caller must ensure that the specified guest TLB entry is safe to insert into
289 * the shadow TLB.
291 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
292 unsigned int gtlb_index)
294 struct kvmppc_44x_tlbe stlbe;
295 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
296 struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
297 struct kvmppc_44x_shadow_ref *ref;
298 struct page *new_page;
299 hpa_t hpaddr;
300 gfn_t gfn;
301 u32 asid = gtlbe->tid;
302 u32 flags = gtlbe->word2;
303 u32 max_bytes = get_tlb_bytes(gtlbe);
304 unsigned int victim;
306 /* Select TLB entry to clobber. Indirectly guard against races with the TLB
307 * miss handler by disabling interrupts. */
308 local_irq_disable();
309 victim = ++tlb_44x_index;
310 if (victim > tlb_44x_hwater)
311 victim = 0;
312 tlb_44x_index = victim;
313 local_irq_enable();
315 /* Get reference to new page. */
316 gfn = gpaddr >> PAGE_SHIFT;
317 new_page = gfn_to_page(vcpu->kvm, gfn);
318 if (is_error_page(new_page)) {
319 printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
320 kvm_release_page_clean(new_page);
321 return;
323 hpaddr = page_to_phys(new_page);
325 /* Invalidate any previous shadow mappings. */
326 kvmppc_44x_shadow_release(vcpu_44x, victim);
328 /* XXX Make sure (va, size) doesn't overlap any other
329 * entries. 440x6 user manual says the result would be
330 * "undefined." */
332 /* XXX what about AS? */
334 /* Force TS=1 for all guest mappings. */
335 stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS;
337 if (max_bytes >= PAGE_SIZE) {
338 /* Guest mapping is larger than or equal to host page size. We can use
339 * a "native" host mapping. */
340 stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE;
341 } else {
342 /* Guest mapping is smaller than host page size. We must restrict the
343 * size of the mapping to be at most the smaller of the two, but for
344 * simplicity we fall back to a 4K mapping (this is probably what the
345 * guest is using anyways). */
346 stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K;
348 /* 'hpaddr' is a host page, which is larger than the mapping we're
349 * inserting here. To compensate, we must add the in-page offset to the
350 * sub-page. */
351 hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K);
354 stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
355 stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags,
356 vcpu->arch.msr & MSR_PR);
357 stlbe.tid = !(asid & 0xff);
359 /* Keep track of the reference so we can properly release it later. */
360 ref = &vcpu_44x->shadow_refs[victim];
361 ref->page = new_page;
362 ref->gtlb_index = gtlb_index;
363 ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW);
364 ref->tid = stlbe.tid;
366 /* Insert shadow mapping into hardware TLB. */
367 kvmppc_44x_tlbe_set_modified(vcpu_44x, victim);
368 kvmppc_44x_tlbwe(victim, &stlbe);
369 trace_kvm_stlb_write(victim, stlbe.tid, stlbe.word0, stlbe.word1,
370 stlbe.word2);
373 /* For a particular guest TLB entry, invalidate the corresponding host TLB
374 * mappings and release the host pages. */
375 static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu,
376 unsigned int gtlb_index)
378 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
379 int i;
381 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
382 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
383 if (ref->gtlb_index == gtlb_index)
384 kvmppc_44x_shadow_release(vcpu_44x, i);
388 void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
390 vcpu->arch.shadow_pid = !usermode;
393 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
395 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
396 int i;
398 if (unlikely(vcpu->arch.pid == new_pid))
399 return;
401 vcpu->arch.pid = new_pid;
403 /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it
404 * can't access guest kernel mappings (TID=1). When we switch to a new
405 * guest PID, which will also use host PID=0, we must discard the old guest
406 * userspace mappings. */
407 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
408 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
410 if (ref->tid == 0)
411 kvmppc_44x_shadow_release(vcpu_44x, i);
415 static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
416 const struct kvmppc_44x_tlbe *tlbe)
418 gpa_t gpa;
420 if (!get_tlb_v(tlbe))
421 return 0;
423 /* Does it match current guest AS? */
424 /* XXX what about IS != DS? */
425 if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
426 return 0;
428 gpa = get_tlb_raddr(tlbe);
429 if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
430 /* Mapping is not for RAM. */
431 return 0;
433 return 1;
436 int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
438 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
439 struct kvmppc_44x_tlbe *tlbe;
440 unsigned int gtlb_index;
442 gtlb_index = vcpu->arch.gpr[ra];
443 if (gtlb_index > KVM44x_GUEST_TLB_SIZE) {
444 printk("%s: index %d\n", __func__, gtlb_index);
445 kvmppc_dump_vcpu(vcpu);
446 return EMULATE_FAIL;
449 tlbe = &vcpu_44x->guest_tlb[gtlb_index];
451 /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */
452 if (tlbe->word0 & PPC44x_TLB_VALID)
453 kvmppc_44x_invalidate(vcpu, gtlb_index);
455 switch (ws) {
456 case PPC44x_TLB_PAGEID:
457 tlbe->tid = get_mmucr_stid(vcpu);
458 tlbe->word0 = vcpu->arch.gpr[rs];
459 break;
461 case PPC44x_TLB_XLAT:
462 tlbe->word1 = vcpu->arch.gpr[rs];
463 break;
465 case PPC44x_TLB_ATTRIB:
466 tlbe->word2 = vcpu->arch.gpr[rs];
467 break;
469 default:
470 return EMULATE_FAIL;
473 if (tlbe_is_host_safe(vcpu, tlbe)) {
474 gva_t eaddr;
475 gpa_t gpaddr;
476 u32 bytes;
478 eaddr = get_tlb_eaddr(tlbe);
479 gpaddr = get_tlb_raddr(tlbe);
481 /* Use the advertised page size to mask effective and real addrs. */
482 bytes = get_tlb_bytes(tlbe);
483 eaddr &= ~(bytes - 1);
484 gpaddr &= ~(bytes - 1);
486 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
489 trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
490 tlbe->word2);
492 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
493 return EMULATE_DONE;
496 int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
498 u32 ea;
499 int gtlb_index;
500 unsigned int as = get_mmucr_sts(vcpu);
501 unsigned int pid = get_mmucr_stid(vcpu);
503 ea = vcpu->arch.gpr[rb];
504 if (ra)
505 ea += vcpu->arch.gpr[ra];
507 gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
508 if (rc) {
509 if (gtlb_index < 0)
510 vcpu->arch.cr &= ~0x20000000;
511 else
512 vcpu->arch.cr |= 0x20000000;
514 vcpu->arch.gpr[rt] = gtlb_index;
516 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
517 return EMULATE_DONE;