2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
26 #include <asm/tlbflush.h>
27 #include <asm/kvm_ppc.h>
28 #include <asm/kvm_book3s.h>
29 #include <asm/mmu-hash64.h>
31 /* #define DEBUG_MMU */
34 #define dprintk(X...) printk(KERN_INFO X)
36 #define dprintk(X...) do { } while(0)
39 static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu
*vcpu
)
41 kvmppc_set_msr(vcpu
, MSR_SF
);
44 static struct kvmppc_slb
*kvmppc_mmu_book3s_64_find_slbe(
45 struct kvm_vcpu
*vcpu
,
49 u64 esid
= GET_ESID(eaddr
);
50 u64 esid_1t
= GET_ESID_1T(eaddr
);
52 for (i
= 0; i
< vcpu
->arch
.slb_nr
; i
++) {
55 if (!vcpu
->arch
.slb
[i
].valid
)
58 if (vcpu
->arch
.slb
[i
].tb
)
61 if (vcpu
->arch
.slb
[i
].esid
== cmp_esid
)
62 return &vcpu
->arch
.slb
[i
];
65 dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
66 eaddr
, esid
, esid_1t
);
67 for (i
= 0; i
< vcpu
->arch
.slb_nr
; i
++) {
68 if (vcpu
->arch
.slb
[i
].vsid
)
69 dprintk(" %d: %c%c%c %llx %llx\n", i
,
70 vcpu
->arch
.slb
[i
].valid
? 'v' : ' ',
71 vcpu
->arch
.slb
[i
].large
? 'l' : ' ',
72 vcpu
->arch
.slb
[i
].tb
? 't' : ' ',
73 vcpu
->arch
.slb
[i
].esid
,
74 vcpu
->arch
.slb
[i
].vsid
);
80 static int kvmppc_slb_sid_shift(struct kvmppc_slb
*slbe
)
82 return slbe
->tb
? SID_SHIFT_1T
: SID_SHIFT
;
85 static u64
kvmppc_slb_offset_mask(struct kvmppc_slb
*slbe
)
87 return (1ul << kvmppc_slb_sid_shift(slbe
)) - 1;
90 static u64
kvmppc_slb_calc_vpn(struct kvmppc_slb
*slb
, gva_t eaddr
)
92 eaddr
&= kvmppc_slb_offset_mask(slb
);
94 return (eaddr
>> VPN_SHIFT
) |
95 ((slb
->vsid
) << (kvmppc_slb_sid_shift(slb
) - VPN_SHIFT
));
98 static u64
kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
101 struct kvmppc_slb
*slb
;
103 slb
= kvmppc_mmu_book3s_64_find_slbe(vcpu
, eaddr
);
107 return kvmppc_slb_calc_vpn(slb
, eaddr
);
110 static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb
*slbe
)
112 return slbe
->large
? 24 : 12;
115 static u32
kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb
*slbe
, gva_t eaddr
)
117 int p
= kvmppc_mmu_book3s_64_get_pagesize(slbe
);
119 return ((eaddr
& kvmppc_slb_offset_mask(slbe
)) >> p
);
122 static hva_t
kvmppc_mmu_book3s_64_get_pteg(
123 struct kvmppc_vcpu_book3s
*vcpu_book3s
,
124 struct kvmppc_slb
*slbe
, gva_t eaddr
,
127 u64 hash
, pteg
, htabsize
;
132 htabsize
= ((1 << ((vcpu_book3s
->sdr1
& 0x1f) + 11)) - 1);
134 vpn
= kvmppc_slb_calc_vpn(slbe
, eaddr
);
135 ssize
= slbe
->tb
? MMU_SEGSIZE_1T
: MMU_SEGSIZE_256M
;
136 hash
= hpt_hash(vpn
, kvmppc_mmu_book3s_64_get_pagesize(slbe
), ssize
);
139 hash
&= ((1ULL << 39ULL) - 1ULL);
143 pteg
= vcpu_book3s
->sdr1
& 0xfffffffffffc0000ULL
;
146 dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n",
147 page
, vcpu_book3s
->sdr1
, pteg
, slbe
->vsid
);
149 /* When running a PAPR guest, SDR1 contains a HVA address instead
151 if (vcpu_book3s
->vcpu
.arch
.papr_enabled
)
154 r
= gfn_to_hva(vcpu_book3s
->vcpu
.kvm
, pteg
>> PAGE_SHIFT
);
156 if (kvm_is_error_hva(r
))
158 return r
| (pteg
& ~PAGE_MASK
);
161 static u64
kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb
*slbe
, gva_t eaddr
)
163 int p
= kvmppc_mmu_book3s_64_get_pagesize(slbe
);
166 avpn
= kvmppc_mmu_book3s_64_get_page(slbe
, eaddr
);
167 avpn
|= slbe
->vsid
<< (kvmppc_slb_sid_shift(slbe
) - p
);
170 avpn
>>= ((80 - p
) - 56) - 8;
177 static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
178 struct kvmppc_pte
*gpte
, bool data
)
180 struct kvmppc_vcpu_book3s
*vcpu_book3s
= to_book3s(vcpu
);
181 struct kvmppc_slb
*slbe
;
192 ulong mp_ea
= vcpu
->arch
.magic_page_ea
;
194 /* Magic page override */
195 if (unlikely(mp_ea
) &&
196 unlikely((eaddr
& ~0xfffULL
) == (mp_ea
& ~0xfffULL
)) &&
197 !(vcpu
->arch
.shared
->msr
& MSR_PR
)) {
199 gpte
->vpage
= kvmppc_mmu_book3s_64_ea_to_vp(vcpu
, eaddr
, data
);
200 gpte
->raddr
= vcpu
->arch
.magic_page_pa
| (gpte
->raddr
& 0xfff);
201 gpte
->raddr
&= KVM_PAM
;
202 gpte
->may_execute
= true;
203 gpte
->may_read
= true;
204 gpte
->may_write
= true;
209 slbe
= kvmppc_mmu_book3s_64_find_slbe(vcpu
, eaddr
);
213 avpn
= kvmppc_mmu_book3s_64_get_avpn(slbe
, eaddr
);
214 v_val
= avpn
& HPTE_V_AVPN
;
217 v_val
|= SLB_VSID_B_1T
;
219 v_val
|= HPTE_V_LARGE
;
220 v_val
|= HPTE_V_VALID
;
222 v_mask
= SLB_VSID_B
| HPTE_V_AVPN
| HPTE_V_LARGE
| HPTE_V_VALID
|
226 ptegp
= kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s
, slbe
, eaddr
, second
);
227 if (kvm_is_error_hva(ptegp
))
230 if(copy_from_user(pteg
, (void __user
*)ptegp
, sizeof(pteg
))) {
231 printk(KERN_ERR
"KVM can't copy data from 0x%lx!\n", ptegp
);
235 if ((vcpu
->arch
.shared
->msr
& MSR_PR
) && slbe
->Kp
)
237 else if (!(vcpu
->arch
.shared
->msr
& MSR_PR
) && slbe
->Ks
)
240 for (i
=0; i
<16; i
+=2) {
241 /* Check all relevant fields of 1st dword */
242 if ((pteg
[i
] & v_mask
) == v_val
) {
251 v_val
|= HPTE_V_SECONDARY
;
258 pp
= (r
& HPTE_R_PP
) | key
;
262 gpte
->vpage
= kvmppc_mmu_book3s_64_ea_to_vp(vcpu
, eaddr
, data
);
264 eaddr_mask
= 0xFFFFFF;
265 gpte
->raddr
= (r
& HPTE_R_RPN
& ~eaddr_mask
) | (eaddr
& eaddr_mask
);
266 gpte
->may_execute
= ((r
& HPTE_R_N
) ? false : true);
267 gpte
->may_read
= false;
268 gpte
->may_write
= false;
275 gpte
->may_write
= true;
280 gpte
->may_read
= true;
284 dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
286 eaddr
, avpn
, gpte
->vpage
, gpte
->raddr
);
288 /* Update PTE R and C bits, so the guest's swapper knows we used the
290 if (gpte
->may_read
) {
291 /* Set the accessed flag */
294 if (data
&& gpte
->may_write
) {
295 /* Set the dirty flag -- XXX even if not writing */
299 /* Write back into the PTEG */
300 if (pteg
[i
+1] != r
) {
302 copy_to_user((void __user
*)ptegp
, pteg
, sizeof(pteg
));
314 dprintk("KVM MMU: Trigger segment fault\n");
318 static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu
*vcpu
, u64 rs
, u64 rb
)
320 struct kvmppc_vcpu_book3s
*vcpu_book3s
;
323 struct kvmppc_slb
*slbe
;
325 dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs
, rb
);
327 vcpu_book3s
= to_book3s(vcpu
);
330 esid_1t
= GET_ESID_1T(rb
);
333 if (slb_nr
> vcpu
->arch
.slb_nr
)
336 slbe
= &vcpu
->arch
.slb
[slb_nr
];
338 slbe
->large
= (rs
& SLB_VSID_L
) ? 1 : 0;
339 slbe
->tb
= (rs
& SLB_VSID_B_1T
) ? 1 : 0;
340 slbe
->esid
= slbe
->tb
? esid_1t
: esid
;
341 slbe
->vsid
= (rs
& ~SLB_VSID_B
) >> (kvmppc_slb_sid_shift(slbe
) - 16);
342 slbe
->valid
= (rb
& SLB_ESID_V
) ? 1 : 0;
343 slbe
->Ks
= (rs
& SLB_VSID_KS
) ? 1 : 0;
344 slbe
->Kp
= (rs
& SLB_VSID_KP
) ? 1 : 0;
345 slbe
->nx
= (rs
& SLB_VSID_N
) ? 1 : 0;
346 slbe
->class = (rs
& SLB_VSID_C
) ? 1 : 0;
348 slbe
->orige
= rb
& (ESID_MASK
| SLB_ESID_V
);
351 /* Map the new segment */
352 kvmppc_mmu_map_segment(vcpu
, esid
<< SID_SHIFT
);
355 static u64
kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu
*vcpu
, u64 slb_nr
)
357 struct kvmppc_slb
*slbe
;
359 if (slb_nr
> vcpu
->arch
.slb_nr
)
362 slbe
= &vcpu
->arch
.slb
[slb_nr
];
367 static u64
kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu
*vcpu
, u64 slb_nr
)
369 struct kvmppc_slb
*slbe
;
371 if (slb_nr
> vcpu
->arch
.slb_nr
)
374 slbe
= &vcpu
->arch
.slb
[slb_nr
];
379 static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu
*vcpu
, u64 ea
)
381 struct kvmppc_slb
*slbe
;
384 dprintk("KVM MMU: slbie(0x%llx)\n", ea
);
386 slbe
= kvmppc_mmu_book3s_64_find_slbe(vcpu
, ea
);
391 dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea
, slbe
->esid
);
397 seg_size
= 1ull << kvmppc_slb_sid_shift(slbe
);
398 kvmppc_mmu_flush_segment(vcpu
, ea
& ~(seg_size
- 1), seg_size
);
401 static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu
*vcpu
)
405 dprintk("KVM MMU: slbia()\n");
407 for (i
= 1; i
< vcpu
->arch
.slb_nr
; i
++) {
408 vcpu
->arch
.slb
[i
].valid
= false;
409 vcpu
->arch
.slb
[i
].orige
= 0;
410 vcpu
->arch
.slb
[i
].origv
= 0;
413 if (vcpu
->arch
.shared
->msr
& MSR_IR
) {
414 kvmppc_mmu_flush_segments(vcpu
);
415 kvmppc_mmu_map_segment(vcpu
, kvmppc_get_pc(vcpu
));
419 static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu
*vcpu
, u32 srnum
,
425 * According to Book3 2.01 mtsrin is implemented as:
427 * The SLB entry specified by (RB)32:35 is loaded from register
430 * SLBE Bit Source SLB Field
432 * 0:31 0x0000_0000 ESID-0:31
433 * 32:35 (RB)32:35 ESID-32:35
435 * 37:61 0x00_0000|| 0b0 VSID-0:24
436 * 62:88 (RS)37:63 VSID-25:51
437 * 89:91 (RS)33:35 Ks Kp N
438 * 92 (RS)36 L ((RS)36 must be 0b0)
442 dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum
, value
);
445 rb
|= (srnum
& 0xf) << 28;
446 /* Set the valid bit */
452 rs
|= (value
& 0xfffffff) << 12;
454 rs
|= ((value
>> 28) & 0x7) << 9;
456 kvmppc_mmu_book3s_64_slbmte(vcpu
, rs
, rb
);
459 static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu
*vcpu
, ulong va
,
462 u64 mask
= 0xFFFFFFFFFULL
;
464 dprintk("KVM MMU: tlbie(0x%lx)\n", va
);
467 mask
= 0xFFFFFF000ULL
;
468 kvmppc_mmu_pte_vflush(vcpu
, va
>> 12, mask
);
471 static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu
*vcpu
, ulong esid
,
474 ulong ea
= esid
<< SID_SHIFT
;
475 struct kvmppc_slb
*slb
;
477 ulong mp_ea
= vcpu
->arch
.magic_page_ea
;
479 if (vcpu
->arch
.shared
->msr
& (MSR_DR
|MSR_IR
)) {
480 slb
= kvmppc_mmu_book3s_64_find_slbe(vcpu
, ea
);
484 gvsid
<<= SID_SHIFT_1T
- SID_SHIFT
;
485 gvsid
|= esid
& ((1ul << (SID_SHIFT_1T
- SID_SHIFT
)) - 1);
491 switch (vcpu
->arch
.shared
->msr
& (MSR_DR
|MSR_IR
)) {
493 *vsid
= VSID_REAL
| esid
;
496 *vsid
= VSID_REAL_IR
| gvsid
;
499 *vsid
= VSID_REAL_DR
| gvsid
;
512 if (vcpu
->arch
.shared
->msr
& MSR_PR
)
518 /* Catch magic page case */
519 if (unlikely(mp_ea
) &&
520 unlikely(esid
== (mp_ea
>> SID_SHIFT
)) &&
521 !(vcpu
->arch
.shared
->msr
& MSR_PR
)) {
522 *vsid
= VSID_REAL
| esid
;
529 static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu
*vcpu
)
531 return (to_book3s(vcpu
)->hid
[5] & 0x80);
534 void kvmppc_mmu_book3s_64_init(struct kvm_vcpu
*vcpu
)
536 struct kvmppc_mmu
*mmu
= &vcpu
->arch
.mmu
;
539 mmu
->mtsrin
= kvmppc_mmu_book3s_64_mtsrin
;
540 mmu
->slbmte
= kvmppc_mmu_book3s_64_slbmte
;
541 mmu
->slbmfee
= kvmppc_mmu_book3s_64_slbmfee
;
542 mmu
->slbmfev
= kvmppc_mmu_book3s_64_slbmfev
;
543 mmu
->slbie
= kvmppc_mmu_book3s_64_slbie
;
544 mmu
->slbia
= kvmppc_mmu_book3s_64_slbia
;
545 mmu
->xlate
= kvmppc_mmu_book3s_64_xlate
;
546 mmu
->reset_msr
= kvmppc_mmu_book3s_64_reset_msr
;
547 mmu
->tlbie
= kvmppc_mmu_book3s_64_tlbie
;
548 mmu
->esid_to_vsid
= kvmppc_mmu_book3s_64_esid_to_vsid
;
549 mmu
->ea_to_vp
= kvmppc_mmu_book3s_64_ea_to_vp
;
550 mmu
->is_dcbz32
= kvmppc_mmu_book3s_64_is_dcbz32
;
552 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_SLB
;