2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2010
17 * Authors: Alexander Graf <agraf@suse.de>
20 #ifndef __ASM_KVM_BOOK3S_64_H__
21 #define __ASM_KVM_BOOK3S_64_H__
23 #include <linux/string.h>
24 #include <asm/bitops.h>
25 #include <asm/book3s/64/mmu-hash.h>
27 /* Power architecture requires HPT is at least 256kiB, at most 64TiB */
28 #define PPC_MIN_HPT_ORDER 18
29 #define PPC_MAX_HPT_ORDER 46
31 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
32 static inline struct kvmppc_book3s_shadow_vcpu
*svcpu_get(struct kvm_vcpu
*vcpu
)
35 return &get_paca()->shadow_vcpu
;
38 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu
*svcpu
)
44 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
46 static inline bool kvm_is_radix(struct kvm
*kvm
)
48 return kvm
->arch
.radix
;
51 #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
55 * We use a lock bit in HPTE dword 0 to synchronize updates and
56 * accesses to each HPTE, and another bit to indicate non-present
59 #define HPTE_V_HVLOCK 0x40UL
60 #define HPTE_V_ABSENT 0x20UL
63 * We use this bit in the guest_rpte field of the revmap entry
64 * to indicate a modified HPTE.
66 #define HPTE_GR_MODIFIED (1ul << 62)
68 /* These bits are reserved in the guest view of the HPTE */
69 #define HPTE_GR_RESERVED HPTE_GR_MODIFIED
71 static inline long try_lock_hpte(__be64
*hpte
, unsigned long bits
)
73 unsigned long tmp
, old
;
74 __be64 be_lockbit
, be_bits
;
77 * We load/store in native endian, but the HTAB is in big endian. If
78 * we byte swap all data we apply on the PTE we're implicitly correct
81 be_lockbit
= cpu_to_be64(HPTE_V_HVLOCK
);
82 be_bits
= cpu_to_be64(bits
);
84 asm volatile(" ldarx %0,0,%2\n"
92 : "=&r" (tmp
), "=&r" (old
)
93 : "r" (hpte
), "r" (be_bits
), "r" (be_lockbit
)
98 static inline void unlock_hpte(__be64
*hpte
, unsigned long hpte_v
)
100 hpte_v
&= ~HPTE_V_HVLOCK
;
101 asm volatile(PPC_RELEASE_BARRIER
"" : : : "memory");
102 hpte
[0] = cpu_to_be64(hpte_v
);
105 /* Without barrier */
106 static inline void __unlock_hpte(__be64
*hpte
, unsigned long hpte_v
)
108 hpte_v
&= ~HPTE_V_HVLOCK
;
109 hpte
[0] = cpu_to_be64(hpte_v
);
113 * These functions encode knowledge of the POWER7/8/9 hardware
114 * interpretations of the HPTE LP (large page size) field.
116 static inline int kvmppc_hpte_page_shifts(unsigned long h
, unsigned long l
)
120 if (!(h
& HPTE_V_LARGE
))
122 lphi
= (l
>> 16) & 0xf;
123 switch ((l
>> 12) & 0xf) {
125 return !lphi
? 24 : 0; /* 16MB */
128 return 16; /* 64kB */
131 return !lphi
? 34 : 0; /* 16GB */
134 return (16 << 8) + 12; /* 64kB in 4kB */
138 return (24 << 8) + 16; /* 16MB in 64kkB */
140 return (24 << 8) + 12; /* 16MB in 4kB */
146 static inline int kvmppc_hpte_base_page_shift(unsigned long h
, unsigned long l
)
148 return kvmppc_hpte_page_shifts(h
, l
) & 0xff;
151 static inline int kvmppc_hpte_actual_page_shift(unsigned long h
, unsigned long l
)
153 int tmp
= kvmppc_hpte_page_shifts(h
, l
);
160 static inline unsigned long kvmppc_actual_pgsz(unsigned long v
, unsigned long r
)
162 int shift
= kvmppc_hpte_actual_page_shift(v
, r
);
169 static inline int kvmppc_pgsize_lp_encoding(int base_shift
, int actual_shift
)
171 switch (base_shift
) {
173 switch (actual_shift
) {
183 switch (actual_shift
) {
196 static inline unsigned long compute_tlbie_rb(unsigned long v
, unsigned long r
,
197 unsigned long pte_index
)
199 int a_pgshift
, b_pgshift
;
200 unsigned long rb
= 0, va_low
, sllp
;
202 b_pgshift
= a_pgshift
= kvmppc_hpte_page_shifts(v
, r
);
203 if (a_pgshift
>= 0x100) {
209 * Ignore the top 14 bits of va
210 * v have top two bits covering segment size, hence move
211 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
212 * AVA field in v also have the lower 23 bits ignored.
213 * For base page size 4K we need 14 .. 65 bits (so need to
214 * collect extra 11 bits)
215 * For others we need 14..14+i
217 /* This covers 14..54 bits of va*/
218 rb
= (v
& ~0x7fUL
) << 16; /* AVA field */
221 * AVA in v had cleared lower 23 bits. We need to derive
222 * that from pteg index
224 va_low
= pte_index
>> 3;
225 if (v
& HPTE_V_SECONDARY
)
228 * get the vpn bits from va_low using reverse of hashing.
229 * In v we have va with 23 bits dropped and then left shifted
230 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
231 * right shift it with (SID_SHIFT - (23 - 7))
233 if (!(v
& HPTE_V_1TB_SEG
))
234 va_low
^= v
>> (SID_SHIFT
- 16);
236 va_low
^= v
>> (SID_SHIFT_1T
- 16);
239 if (b_pgshift
<= 12) {
240 if (a_pgshift
> 12) {
241 sllp
= (a_pgshift
== 16) ? 5 : 4;
242 rb
|= sllp
<< 5; /* AP field */
244 rb
|= (va_low
& 0x7ff) << 12; /* remaining 11 bits of AVA */
248 * remaining bits of AVA/LP fields
249 * Also contain the rr bits of LP
251 rb
|= (va_low
<< b_pgshift
) & 0x7ff000;
253 * Now clear not needed LP bits based on actual psize
255 rb
&= ~((1ul << a_pgshift
) - 1);
257 * AVAL field 58..77 - base_page_shift bits of va
258 * we have space for 58..64 bits, Missing bits should
259 * be zero filled. +1 is to take care of L bit shift
261 aval_shift
= 64 - (77 - b_pgshift
) + 1;
262 rb
|= ((va_low
<< aval_shift
) & 0xfe);
264 rb
|= 1; /* L field */
265 rb
|= r
& 0xff000 & ((1ul << a_pgshift
) - 1); /* LP field */
267 rb
|= (v
>> HPTE_V_SSIZE_SHIFT
) << 8; /* B field */
271 static inline unsigned long hpte_rpn(unsigned long ptel
, unsigned long psize
)
273 return ((ptel
& HPTE_R_RPN
) & ~(psize
- 1)) >> PAGE_SHIFT
;
276 static inline int hpte_is_writable(unsigned long ptel
)
278 unsigned long pp
= ptel
& (HPTE_R_PP0
| HPTE_R_PP
);
280 return pp
!= PP_RXRX
&& pp
!= PP_RXXX
;
283 static inline unsigned long hpte_make_readonly(unsigned long ptel
)
285 if ((ptel
& HPTE_R_PP0
) || (ptel
& HPTE_R_PP
) == PP_RWXX
)
286 ptel
= (ptel
& ~HPTE_R_PP
) | PP_RXXX
;
292 static inline bool hpte_cache_flags_ok(unsigned long hptel
, bool is_ci
)
294 unsigned int wimg
= hptel
& HPTE_R_WIMG
;
297 if (wimg
== (HPTE_R_W
| HPTE_R_I
| HPTE_R_M
) &&
298 cpu_has_feature(CPU_FTR_ARCH_206
))
302 return wimg
== HPTE_R_M
;
304 * if host is mapped cache inhibited, make sure hptel also have
307 if (wimg
& HPTE_R_W
) /* FIXME!! is this ok for all guest. ? */
309 return !!(wimg
& HPTE_R_I
);
313 * If it's present and writable, atomically set dirty and referenced bits and
314 * return the PTE, otherwise return 0.
316 static inline pte_t
kvmppc_read_update_linux_pte(pte_t
*ptep
, int writing
)
318 pte_t old_pte
, new_pte
= __pte(0);
322 * Make sure we don't reload from ptep
324 old_pte
= READ_ONCE(*ptep
);
326 * wait until H_PAGE_BUSY is clear then set it atomically
328 if (unlikely(pte_val(old_pte
) & H_PAGE_BUSY
)) {
332 /* If pte is not present return None */
333 if (unlikely(!(pte_val(old_pte
) & _PAGE_PRESENT
)))
336 new_pte
= pte_mkyoung(old_pte
);
337 if (writing
&& pte_write(old_pte
))
338 new_pte
= pte_mkdirty(new_pte
);
340 if (pte_xchg(ptep
, old_pte
, new_pte
))
346 static inline bool hpte_read_permission(unsigned long pp
, unsigned long key
)
349 return PP_RWRX
<= pp
&& pp
<= PP_RXRX
;
353 static inline bool hpte_write_permission(unsigned long pp
, unsigned long key
)
356 return pp
== PP_RWRW
;
357 return pp
<= PP_RWRW
;
360 static inline int hpte_get_skey_perm(unsigned long hpte_r
, unsigned long amr
)
364 skey
= ((hpte_r
& HPTE_R_KEY_HI
) >> 57) |
365 ((hpte_r
& HPTE_R_KEY_LO
) >> 9);
366 return (amr
>> (62 - 2 * skey
)) & 3;
369 static inline void lock_rmap(unsigned long *rmap
)
372 while (test_bit(KVMPPC_RMAP_LOCK_BIT
, rmap
))
374 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT
, rmap
));
377 static inline void unlock_rmap(unsigned long *rmap
)
379 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT
, rmap
);
382 static inline bool slot_is_aligned(struct kvm_memory_slot
*memslot
,
383 unsigned long pagesize
)
385 unsigned long mask
= (pagesize
>> PAGE_SHIFT
) - 1;
387 if (pagesize
<= PAGE_SIZE
)
389 return !(memslot
->base_gfn
& mask
) && !(memslot
->npages
& mask
);
393 * This works for 4k, 64k and 16M pages on POWER7,
394 * and 4k and 16M pages on PPC970.
396 static inline unsigned long slb_pgsize_encoding(unsigned long psize
)
398 unsigned long senc
= 0;
400 if (psize
> 0x1000) {
402 if (psize
== 0x10000)
403 senc
|= SLB_VSID_LP_01
;
408 static inline int is_vrma_hpte(unsigned long hpte_v
)
410 return (hpte_v
& ~0xffffffUL
) ==
411 (HPTE_V_1TB_SEG
| (VRMA_VSID
<< (40 - 16)));
414 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
416 * Note modification of an HPTE; set the HPTE modified bit
417 * if anyone is interested.
419 static inline void note_hpte_modification(struct kvm
*kvm
,
420 struct revmap_entry
*rev
)
422 if (atomic_read(&kvm
->arch
.hpte_mod_interest
))
423 rev
->guest_rpte
|= HPTE_GR_MODIFIED
;
427 * Like kvm_memslots(), but for use in real mode when we can't do
428 * any RCU stuff (since the secondary threads are offline from the
429 * kernel's point of view), and we can't print anything.
430 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
432 static inline struct kvm_memslots
*kvm_memslots_raw(struct kvm
*kvm
)
434 return rcu_dereference_raw_notrace(kvm
->memslots
[0]);
437 extern void kvmppc_mmu_debugfs_init(struct kvm
*kvm
);
439 extern void kvmhv_rm_send_ipi(int cpu
);
441 static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info
*hpt
)
443 /* HPTEs are 2**4 bytes long */
444 return 1UL << (hpt
->order
- 4);
447 static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info
*hpt
)
449 /* 128 (2**7) bytes in each HPTEG */
450 return (1UL << (hpt
->order
- 7)) - 1;
453 /* Set bits in a dirty bitmap, which is in LE format */
454 static inline void set_dirty_bits(unsigned long *map
, unsigned long i
,
455 unsigned long npages
)
459 memset((char *)map
+ i
/ 8, 0xff, npages
/ 8);
461 for (; npages
; ++i
, --npages
)
462 __set_bit_le(i
, map
);
465 static inline void set_dirty_bits_atomic(unsigned long *map
, unsigned long i
,
466 unsigned long npages
)
469 memset((char *)map
+ i
/ 8, 0xff, npages
/ 8);
471 for (; npages
; ++i
, --npages
)
475 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
477 #endif /* __ASM_KVM_BOOK3S_64_H__ */