1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
6 * Alexander Graf <agraf@suse.de>
7 * Kevin Wolf <mail@kevin-wolf.de>
10 #include <linux/kvm_host.h>
12 #include <asm/kvm_ppc.h>
13 #include <asm/kvm_book3s.h>
14 #include <asm/book3s/64/mmu-hash.h>
15 #include <asm/machdep.h>
16 #include <asm/mmu_context.h>
17 #include <asm/hw_irq.h>
23 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu
*vcpu
, struct hpte_cache
*pte
)
25 mmu_hash_ops
.hpte_invalidate(pte
->slot
, pte
->host_vpn
,
26 pte
->pagesize
, pte
->pagesize
,
27 MMU_SEGSIZE_256M
, false);
30 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
31 * a hash, so we don't waste cycles on looping */
32 static u16
kvmppc_sid_hash(struct kvm_vcpu
*vcpu
, u64 gvsid
)
34 return (u16
)(((gvsid
>> (SID_MAP_BITS
* 7)) & SID_MAP_MASK
) ^
35 ((gvsid
>> (SID_MAP_BITS
* 6)) & SID_MAP_MASK
) ^
36 ((gvsid
>> (SID_MAP_BITS
* 5)) & SID_MAP_MASK
) ^
37 ((gvsid
>> (SID_MAP_BITS
* 4)) & SID_MAP_MASK
) ^
38 ((gvsid
>> (SID_MAP_BITS
* 3)) & SID_MAP_MASK
) ^
39 ((gvsid
>> (SID_MAP_BITS
* 2)) & SID_MAP_MASK
) ^
40 ((gvsid
>> (SID_MAP_BITS
* 1)) & SID_MAP_MASK
) ^
41 ((gvsid
>> (SID_MAP_BITS
* 0)) & SID_MAP_MASK
));
45 static struct kvmppc_sid_map
*find_sid_vsid(struct kvm_vcpu
*vcpu
, u64 gvsid
)
47 struct kvmppc_sid_map
*map
;
50 if (kvmppc_get_msr(vcpu
) & MSR_PR
)
53 sid_map_mask
= kvmppc_sid_hash(vcpu
, gvsid
);
54 map
= &to_book3s(vcpu
)->sid_map
[sid_map_mask
];
55 if (map
->valid
&& (map
->guest_vsid
== gvsid
)) {
56 trace_kvm_book3s_slb_found(gvsid
, map
->host_vsid
);
60 map
= &to_book3s(vcpu
)->sid_map
[SID_MAP_MASK
- sid_map_mask
];
61 if (map
->valid
&& (map
->guest_vsid
== gvsid
)) {
62 trace_kvm_book3s_slb_found(gvsid
, map
->host_vsid
);
66 trace_kvm_book3s_slb_fail(sid_map_mask
, gvsid
);
70 int kvmppc_mmu_map_page(struct kvm_vcpu
*vcpu
, struct kvmppc_pte
*orig_pte
,
81 struct kvmppc_sid_map
*map
;
83 int hpsize
= MMU_PAGE_4K
;
85 unsigned long mmu_seq
;
86 struct kvm
*kvm
= vcpu
->kvm
;
87 struct hpte_cache
*cpte
;
88 unsigned long gfn
= orig_pte
->raddr
>> PAGE_SHIFT
;
91 /* used to check for invalidations in progress */
92 mmu_seq
= kvm
->mmu_notifier_seq
;
95 /* Get host physical address for gpa */
96 pfn
= kvmppc_gpa_to_pfn(vcpu
, orig_pte
->raddr
, iswrite
, &writable
);
97 if (is_error_noslot_pfn(pfn
)) {
98 printk(KERN_INFO
"Couldn't get guest page for gpa %lx!\n",
103 hpaddr
= pfn
<< PAGE_SHIFT
;
105 /* and write the mapping ea -> hpa into the pt */
106 vcpu
->arch
.mmu
.esid_to_vsid(vcpu
, orig_pte
->eaddr
>> SID_SHIFT
, &vsid
);
107 map
= find_sid_vsid(vcpu
, vsid
);
109 ret
= kvmppc_mmu_map_segment(vcpu
, orig_pte
->eaddr
);
111 map
= find_sid_vsid(vcpu
, vsid
);
114 printk(KERN_ERR
"KVM: Segment map for 0x%llx (0x%lx) failed\n",
115 vsid
, orig_pte
->eaddr
);
121 vpn
= hpt_vpn(orig_pte
->eaddr
, map
->host_vsid
, MMU_SEGSIZE_256M
);
123 kvm_set_pfn_accessed(pfn
);
124 if (!orig_pte
->may_write
|| !writable
)
127 mark_page_dirty(vcpu
->kvm
, gfn
);
128 kvm_set_pfn_dirty(pfn
);
131 if (!orig_pte
->may_execute
)
134 kvmppc_mmu_flush_icache(pfn
);
136 rflags
= (rflags
& ~HPTE_R_WIMG
) | orig_pte
->wimg
;
139 * Use 64K pages if possible; otherwise, on 64K page kernels,
140 * we need to transfer 4 more bits from guest real to host real addr.
143 hpsize
= MMU_PAGE_64K
;
145 hpaddr
|= orig_pte
->raddr
& (~0xfffULL
& ~PAGE_MASK
);
147 hash
= hpt_hash(vpn
, mmu_psize_defs
[hpsize
].shift
, MMU_SEGSIZE_256M
);
149 cpte
= kvmppc_mmu_hpte_cache_next(vcpu
);
151 spin_lock(&kvm
->mmu_lock
);
152 if (!cpte
|| mmu_notifier_retry(kvm
, mmu_seq
)) {
158 hpteg
= ((hash
& htab_hash_mask
) * HPTES_PER_GROUP
);
160 /* In case we tried normal mapping already, let's nuke old entries */
162 if (mmu_hash_ops
.hpte_remove(hpteg
) < 0) {
167 ret
= mmu_hash_ops
.hpte_insert(hpteg
, vpn
, hpaddr
, rflags
, vflags
,
168 hpsize
, hpsize
, MMU_SEGSIZE_256M
);
171 /* If we couldn't map a primary PTE, try a secondary */
173 vflags
^= HPTE_V_SECONDARY
;
176 } else if (ret
< 0) {
180 trace_kvm_book3s_64_mmu_map(rflags
, hpteg
,
181 vpn
, hpaddr
, orig_pte
);
184 * The mmu_hash_ops code may give us a secondary entry even
185 * though we asked for a primary. Fix up.
187 if ((ret
& _PTEIDX_SECONDARY
) && !(vflags
& HPTE_V_SECONDARY
)) {
189 hpteg
= ((hash
& htab_hash_mask
) * HPTES_PER_GROUP
);
192 cpte
->slot
= hpteg
+ (ret
& 7);
193 cpte
->host_vpn
= vpn
;
194 cpte
->pte
= *orig_pte
;
196 cpte
->pagesize
= hpsize
;
198 kvmppc_mmu_hpte_cache_map(vcpu
, cpte
);
203 spin_unlock(&kvm
->mmu_lock
);
204 kvm_release_pfn_clean(pfn
);
206 kvmppc_mmu_hpte_cache_free(cpte
);
212 void kvmppc_mmu_unmap_page(struct kvm_vcpu
*vcpu
, struct kvmppc_pte
*pte
)
214 u64 mask
= 0xfffffffffULL
;
217 vcpu
->arch
.mmu
.esid_to_vsid(vcpu
, pte
->eaddr
>> SID_SHIFT
, &vsid
);
219 mask
= 0xffffffff0ULL
;
220 kvmppc_mmu_pte_vflush(vcpu
, pte
->vpage
, mask
);
223 static struct kvmppc_sid_map
*create_sid_map(struct kvm_vcpu
*vcpu
, u64 gvsid
)
225 unsigned long vsid_bits
= VSID_BITS_65_256M
;
226 struct kvmppc_sid_map
*map
;
227 struct kvmppc_vcpu_book3s
*vcpu_book3s
= to_book3s(vcpu
);
229 static int backwards_map
= 0;
231 if (kvmppc_get_msr(vcpu
) & MSR_PR
)
234 /* We might get collisions that trap in preceding order, so let's
235 map them differently */
237 sid_map_mask
= kvmppc_sid_hash(vcpu
, gvsid
);
239 sid_map_mask
= SID_MAP_MASK
- sid_map_mask
;
241 map
= &to_book3s(vcpu
)->sid_map
[sid_map_mask
];
243 /* Make sure we're taking the other map next time */
244 backwards_map
= !backwards_map
;
246 /* Uh-oh ... out of mappings. Let's flush! */
247 if (vcpu_book3s
->proto_vsid_next
== vcpu_book3s
->proto_vsid_max
) {
248 vcpu_book3s
->proto_vsid_next
= vcpu_book3s
->proto_vsid_first
;
249 memset(vcpu_book3s
->sid_map
, 0,
250 sizeof(struct kvmppc_sid_map
) * SID_MAP_NUM
);
251 kvmppc_mmu_pte_flush(vcpu
, 0, 0);
252 kvmppc_mmu_flush_segments(vcpu
);
255 if (mmu_has_feature(MMU_FTR_68_BIT_VA
))
256 vsid_bits
= VSID_BITS_256M
;
258 map
->host_vsid
= vsid_scramble(vcpu_book3s
->proto_vsid_next
++,
259 VSID_MULTIPLIER_256M
, vsid_bits
);
261 map
->guest_vsid
= gvsid
;
264 trace_kvm_book3s_slb_map(sid_map_mask
, gvsid
, map
->host_vsid
);
269 static int kvmppc_mmu_next_segment(struct kvm_vcpu
*vcpu
, ulong esid
)
271 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
273 int max_slb_size
= 64;
274 int found_inval
= -1;
277 /* Are we overwriting? */
278 for (i
= 0; i
< svcpu
->slb_max
; i
++) {
279 if (!(svcpu
->slb
[i
].esid
& SLB_ESID_V
))
281 else if ((svcpu
->slb
[i
].esid
& ESID_MASK
) == esid
) {
287 /* Found a spare entry that was invalidated before */
288 if (found_inval
>= 0) {
293 /* No spare invalid entry, so create one */
295 if (mmu_slb_size
< 64)
296 max_slb_size
= mmu_slb_size
;
298 /* Overflowing -> purge */
299 if ((svcpu
->slb_max
) == max_slb_size
)
300 kvmppc_mmu_flush_segments(vcpu
);
310 int kvmppc_mmu_map_segment(struct kvm_vcpu
*vcpu
, ulong eaddr
)
312 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
313 u64 esid
= eaddr
>> SID_SHIFT
;
314 u64 slb_esid
= (eaddr
& ESID_MASK
) | SLB_ESID_V
;
315 u64 slb_vsid
= SLB_VSID_USER
;
318 struct kvmppc_sid_map
*map
;
321 slb_index
= kvmppc_mmu_next_segment(vcpu
, eaddr
& ESID_MASK
);
323 if (vcpu
->arch
.mmu
.esid_to_vsid(vcpu
, esid
, &gvsid
)) {
324 /* Invalidate an entry */
325 svcpu
->slb
[slb_index
].esid
= 0;
330 map
= find_sid_vsid(vcpu
, gvsid
);
332 map
= create_sid_map(vcpu
, gvsid
);
334 map
->guest_esid
= esid
;
336 slb_vsid
|= (map
->host_vsid
<< 12);
337 slb_vsid
&= ~SLB_VSID_KP
;
338 slb_esid
|= slb_index
;
340 #ifdef CONFIG_PPC_64K_PAGES
341 /* Set host segment base page size to 64K if possible */
342 if (gvsid
& VSID_64K
)
343 slb_vsid
|= mmu_psize_defs
[MMU_PAGE_64K
].sllp
;
346 svcpu
->slb
[slb_index
].esid
= slb_esid
;
347 svcpu
->slb
[slb_index
].vsid
= slb_vsid
;
349 trace_kvm_book3s_slbmte(slb_vsid
, slb_esid
);
356 void kvmppc_mmu_flush_segment(struct kvm_vcpu
*vcpu
, ulong ea
, ulong seg_size
)
358 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
359 ulong seg_mask
= -seg_size
;
362 for (i
= 0; i
< svcpu
->slb_max
; i
++) {
363 if ((svcpu
->slb
[i
].esid
& SLB_ESID_V
) &&
364 (svcpu
->slb
[i
].esid
& seg_mask
) == ea
) {
365 /* Invalidate this entry */
366 svcpu
->slb
[i
].esid
= 0;
373 void kvmppc_mmu_flush_segments(struct kvm_vcpu
*vcpu
)
375 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
377 svcpu
->slb
[0].esid
= 0;
381 void kvmppc_mmu_destroy_pr(struct kvm_vcpu
*vcpu
)
383 kvmppc_mmu_hpte_destroy(vcpu
);
384 __destroy_context(to_book3s(vcpu
)->context_id
[0]);
387 int kvmppc_mmu_init(struct kvm_vcpu
*vcpu
)
389 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
392 err
= hash__alloc_context_id();
395 vcpu3s
->context_id
[0] = err
;
397 vcpu3s
->proto_vsid_max
= ((u64
)(vcpu3s
->context_id
[0] + 1)
399 vcpu3s
->proto_vsid_first
= (u64
)vcpu3s
->context_id
[0] << ESID_BITS
;
400 vcpu3s
->proto_vsid_next
= vcpu3s
->proto_vsid_first
;
402 kvmppc_mmu_hpte_init(vcpu
);