1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
6 * Alexander Graf <agraf@suse.de>
9 #include <linux/kvm_host.h>
11 #include <asm/kvm_ppc.h>
12 #include <asm/kvm_book3s.h>
13 #include <asm/book3s/32/mmu-hash.h>
14 #include <asm/machdep.h>
15 #include <asm/mmu_context.h>
16 #include <asm/hw_irq.h>
19 /* #define DEBUG_MMU */
20 /* #define DEBUG_SR */
23 #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
25 #define dprintk_mmu(a, ...) do { } while(0)
29 #define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
31 #define dprintk_sr(a, ...) do { } while(0)
35 #error Unknown page size
39 #error XXX need to grab mmu_hash_lock
42 #ifdef CONFIG_PTE_64BIT
43 #error Only 32 bit pages are supported for now
49 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu
*vcpu
, struct hpte_cache
*pte
)
53 /* Remove from host HTAB */
54 pteg
= (u32
*)pte
->slot
;
57 /* And make sure it's gone from the TLB too */
58 asm volatile ("sync");
59 asm volatile ("tlbie %0" : : "r" (pte
->pte
.eaddr
) : "memory");
60 asm volatile ("sync");
61 asm volatile ("tlbsync");
64 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
65 * a hash, so we don't waste cycles on looping */
66 static u16
kvmppc_sid_hash(struct kvm_vcpu
*vcpu
, u64 gvsid
)
68 return (u16
)(((gvsid
>> (SID_MAP_BITS
* 7)) & SID_MAP_MASK
) ^
69 ((gvsid
>> (SID_MAP_BITS
* 6)) & SID_MAP_MASK
) ^
70 ((gvsid
>> (SID_MAP_BITS
* 5)) & SID_MAP_MASK
) ^
71 ((gvsid
>> (SID_MAP_BITS
* 4)) & SID_MAP_MASK
) ^
72 ((gvsid
>> (SID_MAP_BITS
* 3)) & SID_MAP_MASK
) ^
73 ((gvsid
>> (SID_MAP_BITS
* 2)) & SID_MAP_MASK
) ^
74 ((gvsid
>> (SID_MAP_BITS
* 1)) & SID_MAP_MASK
) ^
75 ((gvsid
>> (SID_MAP_BITS
* 0)) & SID_MAP_MASK
));
79 static struct kvmppc_sid_map
*find_sid_vsid(struct kvm_vcpu
*vcpu
, u64 gvsid
)
81 struct kvmppc_sid_map
*map
;
84 if (kvmppc_get_msr(vcpu
) & MSR_PR
)
87 sid_map_mask
= kvmppc_sid_hash(vcpu
, gvsid
);
88 map
= &to_book3s(vcpu
)->sid_map
[sid_map_mask
];
89 if (map
->guest_vsid
== gvsid
) {
90 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
91 gvsid
, map
->host_vsid
);
95 map
= &to_book3s(vcpu
)->sid_map
[SID_MAP_MASK
- sid_map_mask
];
96 if (map
->guest_vsid
== gvsid
) {
97 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
98 gvsid
, map
->host_vsid
);
102 dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid
);
106 static u32
*kvmppc_mmu_get_pteg(struct kvm_vcpu
*vcpu
, u32 vsid
, u32 eaddr
,
112 page
= (eaddr
& ~ESID_MASK
) >> 12;
114 hash
= ((vsid
^ page
) << 6);
122 dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
123 htab
, hash
, htabmask
, pteg
);
130 int kvmppc_mmu_map_page(struct kvm_vcpu
*vcpu
, struct kvmppc_pte
*orig_pte
,
136 struct kvmppc_sid_map
*map
;
138 u32 eaddr
= orig_pte
->eaddr
;
141 bool primary
= false;
143 struct hpte_cache
*pte
;
147 /* Get host physical address for gpa */
148 hpaddr
= kvmppc_gpa_to_pfn(vcpu
, orig_pte
->raddr
, iswrite
, &writable
);
149 if (is_error_noslot_pfn(hpaddr
)) {
150 printk(KERN_INFO
"Couldn't get guest page for gpa %lx!\n",
155 hpaddr
<<= PAGE_SHIFT
;
157 /* and write the mapping ea -> hpa into the pt */
158 vcpu
->arch
.mmu
.esid_to_vsid(vcpu
, orig_pte
->eaddr
>> SID_SHIFT
, &vsid
);
159 map
= find_sid_vsid(vcpu
, vsid
);
161 kvmppc_mmu_map_segment(vcpu
, eaddr
);
162 map
= find_sid_vsid(vcpu
, vsid
);
166 vsid
= map
->host_vsid
;
167 vpn
= (vsid
<< (SID_SHIFT
- VPN_SHIFT
)) |
168 ((eaddr
& ~ESID_MASK
) >> VPN_SHIFT
);
176 pteg
= kvmppc_mmu_get_pteg(vcpu
, vsid
, eaddr
, primary
);
178 /* not evicting yet */
179 if (!evict
&& (pteg
[rr
] & PTE_V
)) {
184 dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg
, rr
);
185 dprintk_mmu("KVM: %08x - %08x\n", pteg
[0], pteg
[1]);
186 dprintk_mmu("KVM: %08x - %08x\n", pteg
[2], pteg
[3]);
187 dprintk_mmu("KVM: %08x - %08x\n", pteg
[4], pteg
[5]);
188 dprintk_mmu("KVM: %08x - %08x\n", pteg
[6], pteg
[7]);
189 dprintk_mmu("KVM: %08x - %08x\n", pteg
[8], pteg
[9]);
190 dprintk_mmu("KVM: %08x - %08x\n", pteg
[10], pteg
[11]);
191 dprintk_mmu("KVM: %08x - %08x\n", pteg
[12], pteg
[13]);
192 dprintk_mmu("KVM: %08x - %08x\n", pteg
[14], pteg
[15]);
194 pteg0
= ((eaddr
& 0x0fffffff) >> 22) | (vsid
<< 7) | PTE_V
|
195 (primary
? 0 : PTE_SEC
);
196 pteg1
= hpaddr
| PTE_M
| PTE_R
| PTE_C
;
198 if (orig_pte
->may_write
&& writable
) {
200 mark_page_dirty(vcpu
->kvm
, orig_pte
->raddr
>> PAGE_SHIFT
);
205 if (orig_pte
->may_execute
)
206 kvmppc_mmu_flush_icache(hpaddr
>> PAGE_SHIFT
);
212 asm volatile ("sync");
214 pteg
[rr
+ 1] = pteg1
;
216 asm volatile ("sync");
220 dprintk_mmu("KVM: new PTEG: %p\n", pteg
);
221 dprintk_mmu("KVM: %08x - %08x\n", pteg
[0], pteg
[1]);
222 dprintk_mmu("KVM: %08x - %08x\n", pteg
[2], pteg
[3]);
223 dprintk_mmu("KVM: %08x - %08x\n", pteg
[4], pteg
[5]);
224 dprintk_mmu("KVM: %08x - %08x\n", pteg
[6], pteg
[7]);
225 dprintk_mmu("KVM: %08x - %08x\n", pteg
[8], pteg
[9]);
226 dprintk_mmu("KVM: %08x - %08x\n", pteg
[10], pteg
[11]);
227 dprintk_mmu("KVM: %08x - %08x\n", pteg
[12], pteg
[13]);
228 dprintk_mmu("KVM: %08x - %08x\n", pteg
[14], pteg
[15]);
231 /* Now tell our Shadow PTE code about the new page */
233 pte
= kvmppc_mmu_hpte_cache_next(vcpu
);
235 kvm_release_pfn_clean(hpaddr
>> PAGE_SHIFT
);
240 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
241 orig_pte
->may_write
? 'w' : '-',
242 orig_pte
->may_execute
? 'x' : '-',
243 orig_pte
->eaddr
, (ulong
)pteg
, vpn
,
244 orig_pte
->vpage
, hpaddr
);
246 pte
->slot
= (ulong
)&pteg
[rr
];
248 pte
->pte
= *orig_pte
;
249 pte
->pfn
= hpaddr
>> PAGE_SHIFT
;
251 kvmppc_mmu_hpte_cache_map(vcpu
, pte
);
253 kvm_release_pfn_clean(hpaddr
>> PAGE_SHIFT
);
258 void kvmppc_mmu_unmap_page(struct kvm_vcpu
*vcpu
, struct kvmppc_pte
*pte
)
260 kvmppc_mmu_pte_vflush(vcpu
, pte
->vpage
, 0xfffffffffULL
);
263 static struct kvmppc_sid_map
*create_sid_map(struct kvm_vcpu
*vcpu
, u64 gvsid
)
265 struct kvmppc_sid_map
*map
;
266 struct kvmppc_vcpu_book3s
*vcpu_book3s
= to_book3s(vcpu
);
268 static int backwards_map
= 0;
270 if (kvmppc_get_msr(vcpu
) & MSR_PR
)
273 /* We might get collisions that trap in preceding order, so let's
274 map them differently */
276 sid_map_mask
= kvmppc_sid_hash(vcpu
, gvsid
);
278 sid_map_mask
= SID_MAP_MASK
- sid_map_mask
;
280 map
= &to_book3s(vcpu
)->sid_map
[sid_map_mask
];
282 /* Make sure we're taking the other map next time */
283 backwards_map
= !backwards_map
;
285 /* Uh-oh ... out of mappings. Let's flush! */
286 if (vcpu_book3s
->vsid_next
>= VSID_POOL_SIZE
) {
287 vcpu_book3s
->vsid_next
= 0;
288 memset(vcpu_book3s
->sid_map
, 0,
289 sizeof(struct kvmppc_sid_map
) * SID_MAP_NUM
);
290 kvmppc_mmu_pte_flush(vcpu
, 0, 0);
291 kvmppc_mmu_flush_segments(vcpu
);
293 map
->host_vsid
= vcpu_book3s
->vsid_pool
[vcpu_book3s
->vsid_next
];
294 vcpu_book3s
->vsid_next
++;
296 map
->guest_vsid
= gvsid
;
302 int kvmppc_mmu_map_segment(struct kvm_vcpu
*vcpu
, ulong eaddr
)
304 u32 esid
= eaddr
>> SID_SHIFT
;
307 struct kvmppc_sid_map
*map
;
308 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
311 if (vcpu
->arch
.mmu
.esid_to_vsid(vcpu
, esid
, &gvsid
)) {
312 /* Invalidate an entry */
313 svcpu
->sr
[esid
] = SR_INVALID
;
318 map
= find_sid_vsid(vcpu
, gvsid
);
320 map
= create_sid_map(vcpu
, gvsid
);
322 map
->guest_esid
= esid
;
323 sr
= map
->host_vsid
| SR_KP
;
324 svcpu
->sr
[esid
] = sr
;
326 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid
, sr
);
333 void kvmppc_mmu_flush_segments(struct kvm_vcpu
*vcpu
)
336 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
338 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu
->sr
));
339 for (i
= 0; i
< ARRAY_SIZE(svcpu
->sr
); i
++)
340 svcpu
->sr
[i
] = SR_INVALID
;
345 void kvmppc_mmu_destroy_pr(struct kvm_vcpu
*vcpu
)
349 kvmppc_mmu_hpte_destroy(vcpu
);
351 for (i
= 0; i
< SID_CONTEXTS
; i
++)
352 __destroy_context(to_book3s(vcpu
)->context_id
[i
]);
356 /* From mm/mmu_context_hash32.c */
357 #define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
359 int kvmppc_mmu_init_pr(struct kvm_vcpu
*vcpu
)
361 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
367 for (i
= 0; i
< SID_CONTEXTS
; i
++) {
368 err
= __init_new_context();
371 vcpu3s
->context_id
[i
] = err
;
373 /* Remember context id for this combination */
374 for (j
= 0; j
< 16; j
++)
375 vcpu3s
->vsid_pool
[(i
* 16) + j
] = CTX_TO_VSID(err
, j
);
378 vcpu3s
->vsid_next
= 0;
380 /* Remember where the HTAB is */
381 asm ( "mfsdr1 %0" : "=r"(sdr1
) );
382 htabmask
= ((sdr1
& 0x1FF) << 16) | 0xFFC0;
383 htab
= (ulong
)__va(sdr1
& 0xffff0000);
385 kvmppc_mmu_hpte_init(vcpu
);
390 for (j
= 0; j
< i
; j
++) {
391 if (!vcpu3s
->context_id
[j
])
394 __destroy_context(to_book3s(vcpu
)->context_id
[j
]);