2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
15 #include <asm/tlbflush.h>
16 #include <asm/kvm_ppc.h>
17 #include <asm/kvm_book3s.h>
18 #include <asm/mmu-hash64.h>
19 #include <asm/hvcall.h>
20 #include <asm/synch.h>
21 #include <asm/ppc-opcode.h>
23 /* For now use fixed-size 16MB page table */
25 #define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
26 #define HPT_HASH_MASK (HPT_NPTEG - 1)
28 #define HPTE_V_HVLOCK 0x40UL
30 static inline long lock_hpte(unsigned long *hpte
, unsigned long bits
)
32 unsigned long tmp
, old
;
34 asm volatile(" ldarx %0,0,%2\n"
42 : "=&r" (tmp
), "=&r" (old
)
43 : "r" (hpte
), "r" (bits
), "i" (HPTE_V_HVLOCK
)
48 long kvmppc_h_enter(struct kvm_vcpu
*vcpu
, unsigned long flags
,
49 long pte_index
, unsigned long pteh
, unsigned long ptel
)
52 struct kvm
*kvm
= vcpu
->kvm
;
53 unsigned long i
, lpn
, pa
;
56 /* only handle 4k, 64k and 16M pages for now */
58 if (pteh
& HPTE_V_LARGE
) {
59 if (cpu_has_feature(CPU_FTR_ARCH_206
) &&
60 (ptel
& 0xf000) == 0x1000) {
63 } else if ((ptel
& 0xff000) == 0) {
66 /* lowest AVA bit must be 0 for 16M pages */
72 lpn
= (ptel
& HPTE_R_RPN
) >> kvm
->arch
.ram_porder
;
73 if (lpn
>= kvm
->arch
.ram_npages
|| porder
> kvm
->arch
.ram_porder
)
75 pa
= kvm
->arch
.ram_pginfo
[lpn
].pfn
<< PAGE_SHIFT
;
79 if ((ptel
& HPTE_R_WIMG
) != HPTE_R_M
&&
80 (ptel
& HPTE_R_WIMG
) != (HPTE_R_W
| HPTE_R_I
| HPTE_R_M
))
83 ptel
&= ~(HPTE_R_PP0
- kvm
->arch
.ram_psize
);
85 if (pte_index
>= (HPT_NPTEG
<< 3))
87 if (likely((flags
& H_EXACT
) == 0)) {
89 hpte
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
93 if ((*hpte
& HPTE_V_VALID
) == 0 &&
94 lock_hpte(hpte
, HPTE_V_HVLOCK
| HPTE_V_VALID
))
100 hpte
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
101 if (!lock_hpte(hpte
, HPTE_V_HVLOCK
| HPTE_V_VALID
))
107 asm volatile("ptesync" : : : "memory");
108 atomic_inc(&kvm
->arch
.ram_pginfo
[lpn
].refcnt
);
109 vcpu
->arch
.gpr
[4] = pte_index
+ i
;
113 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
115 static inline int try_lock_tlbie(unsigned int *lock
)
117 unsigned int tmp
, old
;
118 unsigned int token
= LOCK_TOKEN
;
120 asm volatile("1:lwarx %1,0,%2\n"
127 : "=&r" (tmp
), "=&r" (old
)
128 : "r" (lock
), "r" (token
)
133 long kvmppc_h_remove(struct kvm_vcpu
*vcpu
, unsigned long flags
,
134 unsigned long pte_index
, unsigned long avpn
,
137 struct kvm
*kvm
= vcpu
->kvm
;
139 unsigned long v
, r
, rb
;
141 if (pte_index
>= (HPT_NPTEG
<< 3))
143 hpte
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
144 while (!lock_hpte(hpte
, HPTE_V_HVLOCK
))
146 if ((hpte
[0] & HPTE_V_VALID
) == 0 ||
147 ((flags
& H_AVPN
) && (hpte
[0] & ~0x7fUL
) != avpn
) ||
148 ((flags
& H_ANDCOND
) && (hpte
[0] & avpn
) != 0)) {
149 hpte
[0] &= ~HPTE_V_HVLOCK
;
152 if (atomic_read(&kvm
->online_vcpus
) == 1)
154 vcpu
->arch
.gpr
[4] = v
= hpte
[0] & ~HPTE_V_HVLOCK
;
155 vcpu
->arch
.gpr
[5] = r
= hpte
[1];
156 rb
= compute_tlbie_rb(v
, r
, pte_index
);
158 if (!(flags
& H_LOCAL
)) {
159 while(!try_lock_tlbie(&kvm
->arch
.tlbie_lock
))
161 asm volatile("ptesync" : : : "memory");
162 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
163 : : "r" (rb
), "r" (kvm
->arch
.lpid
));
164 asm volatile("ptesync" : : : "memory");
165 kvm
->arch
.tlbie_lock
= 0;
167 asm volatile("ptesync" : : : "memory");
168 asm volatile("tlbiel %0" : : "r" (rb
));
169 asm volatile("ptesync" : : : "memory");
174 long kvmppc_h_bulk_remove(struct kvm_vcpu
*vcpu
)
176 struct kvm
*kvm
= vcpu
->kvm
;
177 unsigned long *args
= &vcpu
->arch
.gpr
[4];
178 unsigned long *hp
, tlbrb
[4];
180 long int n_inval
= 0;
181 unsigned long flags
, req
, pte_index
;
183 long int ret
= H_SUCCESS
;
185 if (atomic_read(&kvm
->online_vcpus
) == 1)
187 for (i
= 0; i
< 4; ++i
) {
188 pte_index
= args
[i
* 2];
189 flags
= pte_index
>> 56;
190 pte_index
&= ((1ul << 56) - 1);
195 if (req
!= 1 || flags
== 3 ||
196 pte_index
>= (HPT_NPTEG
<< 3)) {
197 /* parameter error */
198 args
[i
* 2] = ((0xa0 | flags
) << 56) + pte_index
;
202 hp
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
203 while (!lock_hpte(hp
, HPTE_V_HVLOCK
))
206 if (hp
[0] & HPTE_V_VALID
) {
208 case 0: /* absolute */
211 case 1: /* andcond */
212 if (!(hp
[0] & args
[i
* 2 + 1]))
216 if ((hp
[0] & ~0x7fUL
) == args
[i
* 2 + 1])
222 hp
[0] &= ~HPTE_V_HVLOCK
;
223 args
[i
* 2] = ((0x90 | flags
) << 56) + pte_index
;
226 /* insert R and C bits from PTE */
227 flags
|= (hp
[1] >> 5) & 0x0c;
228 args
[i
* 2] = ((0x80 | flags
) << 56) + pte_index
;
229 tlbrb
[n_inval
++] = compute_tlbie_rb(hp
[0], hp
[1], pte_index
);
236 while(!try_lock_tlbie(&kvm
->arch
.tlbie_lock
))
238 asm volatile("ptesync" : : : "memory");
239 for (i
= 0; i
< n_inval
; ++i
)
240 asm volatile(PPC_TLBIE(%1,%0)
241 : : "r" (tlbrb
[i
]), "r" (kvm
->arch
.lpid
));
242 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
243 kvm
->arch
.tlbie_lock
= 0;
245 asm volatile("ptesync" : : : "memory");
246 for (i
= 0; i
< n_inval
; ++i
)
247 asm volatile("tlbiel %0" : : "r" (tlbrb
[i
]));
248 asm volatile("ptesync" : : : "memory");
253 long kvmppc_h_protect(struct kvm_vcpu
*vcpu
, unsigned long flags
,
254 unsigned long pte_index
, unsigned long avpn
,
257 struct kvm
*kvm
= vcpu
->kvm
;
259 unsigned long v
, r
, rb
;
261 if (pte_index
>= (HPT_NPTEG
<< 3))
263 hpte
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
264 while (!lock_hpte(hpte
, HPTE_V_HVLOCK
))
266 if ((hpte
[0] & HPTE_V_VALID
) == 0 ||
267 ((flags
& H_AVPN
) && (hpte
[0] & ~0x7fUL
) != avpn
)) {
268 hpte
[0] &= ~HPTE_V_HVLOCK
;
271 if (atomic_read(&kvm
->online_vcpus
) == 1)
274 r
= hpte
[1] & ~(HPTE_R_PP0
| HPTE_R_PP
| HPTE_R_N
|
275 HPTE_R_KEY_HI
| HPTE_R_KEY_LO
);
276 r
|= (flags
<< 55) & HPTE_R_PP0
;
277 r
|= (flags
<< 48) & HPTE_R_KEY_HI
;
278 r
|= flags
& (HPTE_R_PP
| HPTE_R_N
| HPTE_R_KEY_LO
);
279 rb
= compute_tlbie_rb(v
, r
, pte_index
);
280 hpte
[0] = v
& ~HPTE_V_VALID
;
281 if (!(flags
& H_LOCAL
)) {
282 while(!try_lock_tlbie(&kvm
->arch
.tlbie_lock
))
284 asm volatile("ptesync" : : : "memory");
285 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
286 : : "r" (rb
), "r" (kvm
->arch
.lpid
));
287 asm volatile("ptesync" : : : "memory");
288 kvm
->arch
.tlbie_lock
= 0;
290 asm volatile("ptesync" : : : "memory");
291 asm volatile("tlbiel %0" : : "r" (rb
));
292 asm volatile("ptesync" : : : "memory");
296 hpte
[0] = v
& ~HPTE_V_HVLOCK
;
297 asm volatile("ptesync" : : : "memory");
301 static unsigned long reverse_xlate(struct kvm
*kvm
, unsigned long realaddr
)
304 unsigned long offset
, rpn
;
306 offset
= realaddr
& (kvm
->arch
.ram_psize
- 1);
307 rpn
= (realaddr
- offset
) >> PAGE_SHIFT
;
308 for (i
= 0; i
< kvm
->arch
.ram_npages
; ++i
)
309 if (rpn
== kvm
->arch
.ram_pginfo
[i
].pfn
)
310 return (i
<< PAGE_SHIFT
) + offset
;
311 return HPTE_R_RPN
; /* all 1s in the RPN field */
314 long kvmppc_h_read(struct kvm_vcpu
*vcpu
, unsigned long flags
,
315 unsigned long pte_index
)
317 struct kvm
*kvm
= vcpu
->kvm
;
318 unsigned long *hpte
, r
;
321 if (pte_index
>= (HPT_NPTEG
<< 3))
323 if (flags
& H_READ_4
) {
327 for (i
= 0; i
< n
; ++i
, ++pte_index
) {
328 hpte
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
330 if ((flags
& H_R_XLATE
) && (hpte
[0] & HPTE_V_VALID
))
331 r
= reverse_xlate(kvm
, r
& HPTE_R_RPN
) |
333 vcpu
->arch
.gpr
[4 + i
* 2] = hpte
[0];
334 vcpu
->arch
.gpr
[5 + i
* 2] = r
;