2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 #include <linux/kvm_host.h>
23 #include <asm/kvm_ppc.h>
24 #include <asm/kvm_book3s.h>
25 #include <asm/mmu-hash32.h>
26 #include <asm/machdep.h>
27 #include <asm/mmu_context.h>
28 #include <asm/hw_irq.h>
30 /* #define DEBUG_MMU */
31 /* #define DEBUG_SR */
34 #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
36 #define dprintk_mmu(a, ...) do { } while(0)
40 #define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
42 #define dprintk_sr(a, ...) do { } while(0)
46 #error Unknown page size
50 #error XXX need to grab mmu_hash_lock
53 #ifdef CONFIG_PTE_64BIT
54 #error Only 32 bit pages are supported for now
60 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu
*vcpu
, struct hpte_cache
*pte
)
64 /* Remove from host HTAB */
65 pteg
= (u32
*)pte
->slot
;
68 /* And make sure it's gone from the TLB too */
69 asm volatile ("sync");
70 asm volatile ("tlbie %0" : : "r" (pte
->pte
.eaddr
) : "memory");
71 asm volatile ("sync");
72 asm volatile ("tlbsync");
75 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
76 * a hash, so we don't waste cycles on looping */
77 static u16
kvmppc_sid_hash(struct kvm_vcpu
*vcpu
, u64 gvsid
)
79 return (u16
)(((gvsid
>> (SID_MAP_BITS
* 7)) & SID_MAP_MASK
) ^
80 ((gvsid
>> (SID_MAP_BITS
* 6)) & SID_MAP_MASK
) ^
81 ((gvsid
>> (SID_MAP_BITS
* 5)) & SID_MAP_MASK
) ^
82 ((gvsid
>> (SID_MAP_BITS
* 4)) & SID_MAP_MASK
) ^
83 ((gvsid
>> (SID_MAP_BITS
* 3)) & SID_MAP_MASK
) ^
84 ((gvsid
>> (SID_MAP_BITS
* 2)) & SID_MAP_MASK
) ^
85 ((gvsid
>> (SID_MAP_BITS
* 1)) & SID_MAP_MASK
) ^
86 ((gvsid
>> (SID_MAP_BITS
* 0)) & SID_MAP_MASK
));
90 static struct kvmppc_sid_map
*find_sid_vsid(struct kvm_vcpu
*vcpu
, u64 gvsid
)
92 struct kvmppc_sid_map
*map
;
95 if (vcpu
->arch
.shared
->msr
& MSR_PR
)
98 sid_map_mask
= kvmppc_sid_hash(vcpu
, gvsid
);
99 map
= &to_book3s(vcpu
)->sid_map
[sid_map_mask
];
100 if (map
->guest_vsid
== gvsid
) {
101 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
102 gvsid
, map
->host_vsid
);
106 map
= &to_book3s(vcpu
)->sid_map
[SID_MAP_MASK
- sid_map_mask
];
107 if (map
->guest_vsid
== gvsid
) {
108 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
109 gvsid
, map
->host_vsid
);
113 dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid
);
117 static u32
*kvmppc_mmu_get_pteg(struct kvm_vcpu
*vcpu
, u32 vsid
, u32 eaddr
,
123 page
= (eaddr
& ~ESID_MASK
) >> 12;
125 hash
= ((vsid
^ page
) << 6);
133 dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
134 htab
, hash
, htabmask
, pteg
);
141 int kvmppc_mmu_map_page(struct kvm_vcpu
*vcpu
, struct kvmppc_pte
*orig_pte
)
146 struct kvmppc_sid_map
*map
;
148 u32 eaddr
= orig_pte
->eaddr
;
151 bool primary
= false;
153 struct hpte_cache
*pte
;
156 /* Get host physical address for gpa */
157 hpaddr
= kvmppc_gfn_to_pfn(vcpu
, orig_pte
->raddr
>> PAGE_SHIFT
);
158 if (is_error_pfn(hpaddr
)) {
159 printk(KERN_INFO
"Couldn't get guest page for gfn %lx!\n",
164 hpaddr
<<= PAGE_SHIFT
;
166 /* and write the mapping ea -> hpa into the pt */
167 vcpu
->arch
.mmu
.esid_to_vsid(vcpu
, orig_pte
->eaddr
>> SID_SHIFT
, &vsid
);
168 map
= find_sid_vsid(vcpu
, vsid
);
170 kvmppc_mmu_map_segment(vcpu
, eaddr
);
171 map
= find_sid_vsid(vcpu
, vsid
);
175 vsid
= map
->host_vsid
;
176 va
= (vsid
<< SID_SHIFT
) | (eaddr
& ~ESID_MASK
);
185 pteg
= kvmppc_mmu_get_pteg(vcpu
, vsid
, eaddr
, primary
);
187 /* not evicting yet */
188 if (!evict
&& (pteg
[rr
] & PTE_V
)) {
193 dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg
, rr
);
194 dprintk_mmu("KVM: %08x - %08x\n", pteg
[0], pteg
[1]);
195 dprintk_mmu("KVM: %08x - %08x\n", pteg
[2], pteg
[3]);
196 dprintk_mmu("KVM: %08x - %08x\n", pteg
[4], pteg
[5]);
197 dprintk_mmu("KVM: %08x - %08x\n", pteg
[6], pteg
[7]);
198 dprintk_mmu("KVM: %08x - %08x\n", pteg
[8], pteg
[9]);
199 dprintk_mmu("KVM: %08x - %08x\n", pteg
[10], pteg
[11]);
200 dprintk_mmu("KVM: %08x - %08x\n", pteg
[12], pteg
[13]);
201 dprintk_mmu("KVM: %08x - %08x\n", pteg
[14], pteg
[15]);
203 pteg0
= ((eaddr
& 0x0fffffff) >> 22) | (vsid
<< 7) | PTE_V
|
204 (primary
? 0 : PTE_SEC
);
205 pteg1
= hpaddr
| PTE_M
| PTE_R
| PTE_C
;
207 if (orig_pte
->may_write
) {
209 mark_page_dirty(vcpu
->kvm
, orig_pte
->raddr
>> PAGE_SHIFT
);
214 if (orig_pte
->may_execute
)
215 kvmppc_mmu_flush_icache(hpaddr
>> PAGE_SHIFT
);
221 asm volatile ("sync");
223 pteg
[rr
+ 1] = pteg1
;
225 asm volatile ("sync");
229 dprintk_mmu("KVM: new PTEG: %p\n", pteg
);
230 dprintk_mmu("KVM: %08x - %08x\n", pteg
[0], pteg
[1]);
231 dprintk_mmu("KVM: %08x - %08x\n", pteg
[2], pteg
[3]);
232 dprintk_mmu("KVM: %08x - %08x\n", pteg
[4], pteg
[5]);
233 dprintk_mmu("KVM: %08x - %08x\n", pteg
[6], pteg
[7]);
234 dprintk_mmu("KVM: %08x - %08x\n", pteg
[8], pteg
[9]);
235 dprintk_mmu("KVM: %08x - %08x\n", pteg
[10], pteg
[11]);
236 dprintk_mmu("KVM: %08x - %08x\n", pteg
[12], pteg
[13]);
237 dprintk_mmu("KVM: %08x - %08x\n", pteg
[14], pteg
[15]);
240 /* Now tell our Shadow PTE code about the new page */
242 pte
= kvmppc_mmu_hpte_cache_next(vcpu
);
244 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
245 orig_pte
->may_write
? 'w' : '-',
246 orig_pte
->may_execute
? 'x' : '-',
247 orig_pte
->eaddr
, (ulong
)pteg
, va
,
248 orig_pte
->vpage
, hpaddr
);
250 pte
->slot
= (ulong
)&pteg
[rr
];
252 pte
->pte
= *orig_pte
;
253 pte
->pfn
= hpaddr
>> PAGE_SHIFT
;
255 kvmppc_mmu_hpte_cache_map(vcpu
, pte
);
261 static struct kvmppc_sid_map
*create_sid_map(struct kvm_vcpu
*vcpu
, u64 gvsid
)
263 struct kvmppc_sid_map
*map
;
264 struct kvmppc_vcpu_book3s
*vcpu_book3s
= to_book3s(vcpu
);
266 static int backwards_map
= 0;
268 if (vcpu
->arch
.shared
->msr
& MSR_PR
)
271 /* We might get collisions that trap in preceding order, so let's
272 map them differently */
274 sid_map_mask
= kvmppc_sid_hash(vcpu
, gvsid
);
276 sid_map_mask
= SID_MAP_MASK
- sid_map_mask
;
278 map
= &to_book3s(vcpu
)->sid_map
[sid_map_mask
];
280 /* Make sure we're taking the other map next time */
281 backwards_map
= !backwards_map
;
283 /* Uh-oh ... out of mappings. Let's flush! */
284 if (vcpu_book3s
->vsid_next
>= VSID_POOL_SIZE
) {
285 vcpu_book3s
->vsid_next
= 0;
286 memset(vcpu_book3s
->sid_map
, 0,
287 sizeof(struct kvmppc_sid_map
) * SID_MAP_NUM
);
288 kvmppc_mmu_pte_flush(vcpu
, 0, 0);
289 kvmppc_mmu_flush_segments(vcpu
);
291 map
->host_vsid
= vcpu_book3s
->vsid_pool
[vcpu_book3s
->vsid_next
];
292 vcpu_book3s
->vsid_next
++;
294 map
->guest_vsid
= gvsid
;
300 int kvmppc_mmu_map_segment(struct kvm_vcpu
*vcpu
, ulong eaddr
)
302 u32 esid
= eaddr
>> SID_SHIFT
;
305 struct kvmppc_sid_map
*map
;
306 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
309 if (vcpu
->arch
.mmu
.esid_to_vsid(vcpu
, esid
, &gvsid
)) {
310 /* Invalidate an entry */
311 svcpu
->sr
[esid
] = SR_INVALID
;
316 map
= find_sid_vsid(vcpu
, gvsid
);
318 map
= create_sid_map(vcpu
, gvsid
);
320 map
->guest_esid
= esid
;
321 sr
= map
->host_vsid
| SR_KP
;
322 svcpu
->sr
[esid
] = sr
;
324 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid
, sr
);
331 void kvmppc_mmu_flush_segments(struct kvm_vcpu
*vcpu
)
334 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
336 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu
->sr
));
337 for (i
= 0; i
< ARRAY_SIZE(svcpu
->sr
); i
++)
338 svcpu
->sr
[i
] = SR_INVALID
;
343 void kvmppc_mmu_destroy(struct kvm_vcpu
*vcpu
)
347 kvmppc_mmu_hpte_destroy(vcpu
);
349 for (i
= 0; i
< SID_CONTEXTS
; i
++)
350 __destroy_context(to_book3s(vcpu
)->context_id
[i
]);
354 /* From mm/mmu_context_hash32.c */
355 #define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
357 int kvmppc_mmu_init(struct kvm_vcpu
*vcpu
)
359 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
365 for (i
= 0; i
< SID_CONTEXTS
; i
++) {
366 err
= __init_new_context();
369 vcpu3s
->context_id
[i
] = err
;
371 /* Remember context id for this combination */
372 for (j
= 0; j
< 16; j
++)
373 vcpu3s
->vsid_pool
[(i
* 16) + j
] = CTX_TO_VSID(err
, j
);
376 vcpu3s
->vsid_next
= 0;
378 /* Remember where the HTAB is */
379 asm ( "mfsdr1 %0" : "=r"(sdr1
) );
380 htabmask
= ((sdr1
& 0x1FF) << 16) | 0xFFC0;
381 htab
= (ulong
)__va(sdr1
& 0xffff0000);
383 kvmppc_mmu_hpte_init(vcpu
);
388 for (j
= 0; j
< i
; j
++) {
389 if (!vcpu3s
->context_id
[j
])
392 __destroy_context(to_book3s(vcpu
)->context_id
[j
]);