2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2010
17 * Authors: Alexander Graf <agraf@suse.de>
20 #ifndef __ASM_KVM_BOOK3S_64_H__
21 #define __ASM_KVM_BOOK3S_64_H__
23 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
24 static inline struct kvmppc_book3s_shadow_vcpu
*svcpu_get(struct kvm_vcpu
*vcpu
)
27 return &get_paca()->shadow_vcpu
;
30 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu
*svcpu
)
36 #define SPAPR_TCE_SHIFT 12
38 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
39 #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
42 #define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
45 * We use a lock bit in HPTE dword 0 to synchronize updates and
46 * accesses to each HPTE, and another bit to indicate non-present
49 #define HPTE_V_HVLOCK 0x40UL
50 #define HPTE_V_ABSENT 0x20UL
53 * We use this bit in the guest_rpte field of the revmap entry
54 * to indicate a modified HPTE.
56 #define HPTE_GR_MODIFIED (1ul << 62)
58 /* These bits are reserved in the guest view of the HPTE */
59 #define HPTE_GR_RESERVED HPTE_GR_MODIFIED
61 static inline long try_lock_hpte(__be64
*hpte
, unsigned long bits
)
63 unsigned long tmp
, old
;
64 __be64 be_lockbit
, be_bits
;
67 * We load/store in native endian, but the HTAB is in big endian. If
68 * we byte swap all data we apply on the PTE we're implicitly correct
71 be_lockbit
= cpu_to_be64(HPTE_V_HVLOCK
);
72 be_bits
= cpu_to_be64(bits
);
74 asm volatile(" ldarx %0,0,%2\n"
82 : "=&r" (tmp
), "=&r" (old
)
83 : "r" (hpte
), "r" (be_bits
), "r" (be_lockbit
)
88 static inline void unlock_hpte(__be64
*hpte
, unsigned long hpte_v
)
90 hpte_v
&= ~HPTE_V_HVLOCK
;
91 asm volatile(PPC_RELEASE_BARRIER
"" : : : "memory");
92 hpte
[0] = cpu_to_be64(hpte_v
);
96 static inline void __unlock_hpte(__be64
*hpte
, unsigned long hpte_v
)
98 hpte_v
&= ~HPTE_V_HVLOCK
;
99 hpte
[0] = cpu_to_be64(hpte_v
);
102 static inline int __hpte_actual_psize(unsigned int lp
, int psize
)
107 /* start from 1 ignoring MMU_PAGE_4K */
108 for (i
= 1; i
< MMU_PAGE_COUNT
; i
++) {
111 if (mmu_psize_defs
[psize
].penc
[i
] == -1)
114 * encoding bits per actual page size
115 * PTE LP actual page size
122 shift
= mmu_psize_defs
[i
].shift
- LP_SHIFT
;
125 mask
= (1 << shift
) - 1;
126 if ((lp
& mask
) == mmu_psize_defs
[psize
].penc
[i
])
132 static inline unsigned long compute_tlbie_rb(unsigned long v
, unsigned long r
,
133 unsigned long pte_index
)
135 int b_psize
= MMU_PAGE_4K
, a_psize
= MMU_PAGE_4K
;
137 unsigned long rb
= 0, va_low
, sllp
;
138 unsigned int lp
= (r
>> LP_SHIFT
) & ((1 << LP_BITS
) - 1);
140 if (v
& HPTE_V_LARGE
) {
141 for (b_psize
= 0; b_psize
< MMU_PAGE_COUNT
; b_psize
++) {
143 /* valid entries have a shift value */
144 if (!mmu_psize_defs
[b_psize
].shift
)
147 a_psize
= __hpte_actual_psize(lp
, b_psize
);
153 * Ignore the top 14 bits of va
154 * v have top two bits covering segment size, hence move
155 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
156 * AVA field in v also have the lower 23 bits ignored.
157 * For base page size 4K we need 14 .. 65 bits (so need to
158 * collect extra 11 bits)
159 * For others we need 14..14+i
161 /* This covers 14..54 bits of va*/
162 rb
= (v
& ~0x7fUL
) << 16; /* AVA field */
164 rb
|= (v
>> HPTE_V_SSIZE_SHIFT
) << 8; /* B field */
166 * AVA in v had cleared lower 23 bits. We need to derive
167 * that from pteg index
169 va_low
= pte_index
>> 3;
170 if (v
& HPTE_V_SECONDARY
)
173 * get the vpn bits from va_low using reverse of hashing.
174 * In v we have va with 23 bits dropped and then left shifted
175 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
176 * right shift it with (SID_SHIFT - (23 - 7))
178 if (!(v
& HPTE_V_1TB_SEG
))
179 va_low
^= v
>> (SID_SHIFT
- 16);
181 va_low
^= v
>> (SID_SHIFT_1T
- 16);
186 sllp
= ((mmu_psize_defs
[a_psize
].sllp
& SLB_VSID_L
) >> 6) |
187 ((mmu_psize_defs
[a_psize
].sllp
& SLB_VSID_LP
) >> 4);
188 rb
|= sllp
<< 5; /* AP field */
189 rb
|= (va_low
& 0x7ff) << 12; /* remaining 11 bits of AVA */
195 * remaining bits of AVA/LP fields
196 * Also contain the rr bits of LP
198 rb
|= (va_low
<< mmu_psize_defs
[b_psize
].shift
) & 0x7ff000;
200 * Now clear not needed LP bits based on actual psize
202 rb
&= ~((1ul << mmu_psize_defs
[a_psize
].shift
) - 1);
204 * AVAL field 58..77 - base_page_shift bits of va
205 * we have space for 58..64 bits, Missing bits should
206 * be zero filled. +1 is to take care of L bit shift
208 aval_shift
= 64 - (77 - mmu_psize_defs
[b_psize
].shift
) + 1;
209 rb
|= ((va_low
<< aval_shift
) & 0xfe);
211 rb
|= 1; /* L field */
212 penc
= mmu_psize_defs
[b_psize
].penc
[a_psize
];
213 rb
|= penc
<< 12; /* LP field */
217 rb
|= (v
>> 54) & 0x300; /* B field */
221 static inline unsigned long __hpte_page_size(unsigned long h
, unsigned long l
,
226 /* Look at the 8 bit LP value */
227 unsigned int lp
= (l
>> LP_SHIFT
) & ((1 << LP_BITS
) - 1);
229 /* only handle 4k, 64k and 16M pages for now */
230 if (!(h
& HPTE_V_LARGE
))
233 for (size
= 0; size
< MMU_PAGE_COUNT
; size
++) {
234 /* valid entries have a shift value */
235 if (!mmu_psize_defs
[size
].shift
)
238 a_psize
= __hpte_actual_psize(lp
, size
);
241 return 1ul << mmu_psize_defs
[size
].shift
;
242 return 1ul << mmu_psize_defs
[a_psize
].shift
;
250 static inline unsigned long hpte_page_size(unsigned long h
, unsigned long l
)
252 return __hpte_page_size(h
, l
, 0);
255 static inline unsigned long hpte_base_page_size(unsigned long h
, unsigned long l
)
257 return __hpte_page_size(h
, l
, 1);
260 static inline unsigned long hpte_rpn(unsigned long ptel
, unsigned long psize
)
262 return ((ptel
& HPTE_R_RPN
) & ~(psize
- 1)) >> PAGE_SHIFT
;
265 static inline int hpte_is_writable(unsigned long ptel
)
267 unsigned long pp
= ptel
& (HPTE_R_PP0
| HPTE_R_PP
);
269 return pp
!= PP_RXRX
&& pp
!= PP_RXXX
;
272 static inline unsigned long hpte_make_readonly(unsigned long ptel
)
274 if ((ptel
& HPTE_R_PP0
) || (ptel
& HPTE_R_PP
) == PP_RWXX
)
275 ptel
= (ptel
& ~HPTE_R_PP
) | PP_RXXX
;
281 static inline int hpte_cache_flags_ok(unsigned long ptel
, unsigned long io_type
)
283 unsigned int wimg
= ptel
& HPTE_R_WIMG
;
286 if (wimg
== (HPTE_R_W
| HPTE_R_I
| HPTE_R_M
) &&
287 cpu_has_feature(CPU_FTR_ARCH_206
))
291 return wimg
== HPTE_R_M
;
293 return (wimg
& (HPTE_R_W
| HPTE_R_I
)) == io_type
;
297 * If it's present and writable, atomically set dirty and referenced bits and
298 * return the PTE, otherwise return 0.
300 static inline pte_t
kvmppc_read_update_linux_pte(pte_t
*ptep
, int writing
)
302 pte_t old_pte
, new_pte
= __pte(0);
306 * Make sure we don't reload from ptep
308 old_pte
= READ_ONCE(*ptep
);
310 * wait until _PAGE_BUSY is clear then set it atomically
312 if (unlikely(pte_val(old_pte
) & _PAGE_BUSY
)) {
316 /* If pte is not present return None */
317 if (unlikely(!(pte_val(old_pte
) & _PAGE_PRESENT
)))
320 new_pte
= pte_mkyoung(old_pte
);
321 if (writing
&& pte_write(old_pte
))
322 new_pte
= pte_mkdirty(new_pte
);
324 if (pte_val(old_pte
) == __cmpxchg_u64((unsigned long *)ptep
,
334 /* Return HPTE cache control bits corresponding to Linux pte bits */
335 static inline unsigned long hpte_cache_bits(unsigned long pte_val
)
337 #if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W
338 return pte_val
& (HPTE_R_W
| HPTE_R_I
);
340 return ((pte_val
& _PAGE_NO_CACHE
) ? HPTE_R_I
: 0) +
341 ((pte_val
& _PAGE_WRITETHRU
) ? HPTE_R_W
: 0);
345 static inline bool hpte_read_permission(unsigned long pp
, unsigned long key
)
348 return PP_RWRX
<= pp
&& pp
<= PP_RXRX
;
352 static inline bool hpte_write_permission(unsigned long pp
, unsigned long key
)
355 return pp
== PP_RWRW
;
356 return pp
<= PP_RWRW
;
359 static inline int hpte_get_skey_perm(unsigned long hpte_r
, unsigned long amr
)
363 skey
= ((hpte_r
& HPTE_R_KEY_HI
) >> 57) |
364 ((hpte_r
& HPTE_R_KEY_LO
) >> 9);
365 return (amr
>> (62 - 2 * skey
)) & 3;
368 static inline void lock_rmap(unsigned long *rmap
)
371 while (test_bit(KVMPPC_RMAP_LOCK_BIT
, rmap
))
373 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT
, rmap
));
376 static inline void unlock_rmap(unsigned long *rmap
)
378 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT
, rmap
);
381 static inline bool slot_is_aligned(struct kvm_memory_slot
*memslot
,
382 unsigned long pagesize
)
384 unsigned long mask
= (pagesize
>> PAGE_SHIFT
) - 1;
386 if (pagesize
<= PAGE_SIZE
)
388 return !(memslot
->base_gfn
& mask
) && !(memslot
->npages
& mask
);
392 * This works for 4k, 64k and 16M pages on POWER7,
393 * and 4k and 16M pages on PPC970.
395 static inline unsigned long slb_pgsize_encoding(unsigned long psize
)
397 unsigned long senc
= 0;
399 if (psize
> 0x1000) {
401 if (psize
== 0x10000)
402 senc
|= SLB_VSID_LP_01
;
407 static inline int is_vrma_hpte(unsigned long hpte_v
)
409 return (hpte_v
& ~0xffffffUL
) ==
410 (HPTE_V_1TB_SEG
| (VRMA_VSID
<< (40 - 16)));
413 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
415 * Note modification of an HPTE; set the HPTE modified bit
416 * if anyone is interested.
418 static inline void note_hpte_modification(struct kvm
*kvm
,
419 struct revmap_entry
*rev
)
421 if (atomic_read(&kvm
->arch
.hpte_mod_interest
))
422 rev
->guest_rpte
|= HPTE_GR_MODIFIED
;
426 * Like kvm_memslots(), but for use in real mode when we can't do
427 * any RCU stuff (since the secondary threads are offline from the
428 * kernel's point of view), and we can't print anything.
429 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
431 static inline struct kvm_memslots
*kvm_memslots_raw(struct kvm
*kvm
)
433 return rcu_dereference_raw_notrace(kvm
->memslots
[0]);
436 extern void kvmppc_mmu_debugfs_init(struct kvm
*kvm
);
438 extern void kvmhv_rm_send_ipi(int cpu
);
440 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
442 #endif /* __ASM_KVM_BOOK3S_64_H__ */