x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / powerpc / include / asm / kvm_book3s_64.h
blobd9b48f5bb606a79259a6598380c874399e9def47
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2010
17 * Authors: Alexander Graf <agraf@suse.de>
20 #ifndef __ASM_KVM_BOOK3S_64_H__
21 #define __ASM_KVM_BOOK3S_64_H__
23 #include <asm/book3s/64/mmu-hash.h>
25 /* Power architecture requires HPT is at least 256kiB, at most 64TiB */
26 #define PPC_MIN_HPT_ORDER 18
27 #define PPC_MAX_HPT_ORDER 46
29 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
30 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
32 preempt_disable();
33 return &get_paca()->shadow_vcpu;
36 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
38 preempt_enable();
40 #endif
42 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
44 static inline bool kvm_is_radix(struct kvm *kvm)
46 return kvm->arch.radix;
49 #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
50 #endif
52 #define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
55 * We use a lock bit in HPTE dword 0 to synchronize updates and
56 * accesses to each HPTE, and another bit to indicate non-present
57 * HPTEs.
59 #define HPTE_V_HVLOCK 0x40UL
60 #define HPTE_V_ABSENT 0x20UL
63 * We use this bit in the guest_rpte field of the revmap entry
64 * to indicate a modified HPTE.
66 #define HPTE_GR_MODIFIED (1ul << 62)
68 /* These bits are reserved in the guest view of the HPTE */
69 #define HPTE_GR_RESERVED HPTE_GR_MODIFIED
71 static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
73 unsigned long tmp, old;
74 __be64 be_lockbit, be_bits;
77 * We load/store in native endian, but the HTAB is in big endian. If
78 * we byte swap all data we apply on the PTE we're implicitly correct
79 * again.
81 be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
82 be_bits = cpu_to_be64(bits);
84 asm volatile(" ldarx %0,0,%2\n"
85 " and. %1,%0,%3\n"
86 " bne 2f\n"
87 " or %0,%0,%4\n"
88 " stdcx. %0,0,%2\n"
89 " beq+ 2f\n"
90 " mr %1,%3\n"
91 "2: isync"
92 : "=&r" (tmp), "=&r" (old)
93 : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
94 : "cc", "memory");
95 return old == 0;
98 static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
100 hpte_v &= ~HPTE_V_HVLOCK;
101 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
102 hpte[0] = cpu_to_be64(hpte_v);
105 /* Without barrier */
106 static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
108 hpte_v &= ~HPTE_V_HVLOCK;
109 hpte[0] = cpu_to_be64(hpte_v);
112 static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
113 unsigned long pte_index)
115 int i, b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
116 unsigned int penc;
117 unsigned long rb = 0, va_low, sllp;
118 unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
120 if (v & HPTE_V_LARGE) {
121 i = hpte_page_sizes[lp];
122 b_psize = i & 0xf;
123 a_psize = i >> 4;
127 * Ignore the top 14 bits of va
128 * v have top two bits covering segment size, hence move
129 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
130 * AVA field in v also have the lower 23 bits ignored.
131 * For base page size 4K we need 14 .. 65 bits (so need to
132 * collect extra 11 bits)
133 * For others we need 14..14+i
135 /* This covers 14..54 bits of va*/
136 rb = (v & ~0x7fUL) << 16; /* AVA field */
139 * AVA in v had cleared lower 23 bits. We need to derive
140 * that from pteg index
142 va_low = pte_index >> 3;
143 if (v & HPTE_V_SECONDARY)
144 va_low = ~va_low;
146 * get the vpn bits from va_low using reverse of hashing.
147 * In v we have va with 23 bits dropped and then left shifted
148 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
149 * right shift it with (SID_SHIFT - (23 - 7))
151 if (!(v & HPTE_V_1TB_SEG))
152 va_low ^= v >> (SID_SHIFT - 16);
153 else
154 va_low ^= v >> (SID_SHIFT_1T - 16);
155 va_low &= 0x7ff;
157 switch (b_psize) {
158 case MMU_PAGE_4K:
159 sllp = get_sllp_encoding(a_psize);
160 rb |= sllp << 5; /* AP field */
161 rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */
162 break;
163 default:
165 int aval_shift;
167 * remaining bits of AVA/LP fields
168 * Also contain the rr bits of LP
170 rb |= (va_low << mmu_psize_defs[b_psize].shift) & 0x7ff000;
172 * Now clear not needed LP bits based on actual psize
174 rb &= ~((1ul << mmu_psize_defs[a_psize].shift) - 1);
176 * AVAL field 58..77 - base_page_shift bits of va
177 * we have space for 58..64 bits, Missing bits should
178 * be zero filled. +1 is to take care of L bit shift
180 aval_shift = 64 - (77 - mmu_psize_defs[b_psize].shift) + 1;
181 rb |= ((va_low << aval_shift) & 0xfe);
183 rb |= 1; /* L field */
184 penc = mmu_psize_defs[b_psize].penc[a_psize];
185 rb |= penc << 12; /* LP field */
186 break;
189 rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */
190 return rb;
193 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
195 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
198 static inline int hpte_is_writable(unsigned long ptel)
200 unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
202 return pp != PP_RXRX && pp != PP_RXXX;
205 static inline unsigned long hpte_make_readonly(unsigned long ptel)
207 if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
208 ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
209 else
210 ptel |= PP_RXRX;
211 return ptel;
214 static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci)
216 unsigned int wimg = hptel & HPTE_R_WIMG;
218 /* Handle SAO */
219 if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
220 cpu_has_feature(CPU_FTR_ARCH_206))
221 wimg = HPTE_R_M;
223 if (!is_ci)
224 return wimg == HPTE_R_M;
226 * if host is mapped cache inhibited, make sure hptel also have
227 * cache inhibited.
229 if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */
230 return false;
231 return !!(wimg & HPTE_R_I);
235 * If it's present and writable, atomically set dirty and referenced bits and
236 * return the PTE, otherwise return 0.
238 static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
240 pte_t old_pte, new_pte = __pte(0);
242 while (1) {
244 * Make sure we don't reload from ptep
246 old_pte = READ_ONCE(*ptep);
248 * wait until H_PAGE_BUSY is clear then set it atomically
250 if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) {
251 cpu_relax();
252 continue;
254 /* If pte is not present return None */
255 if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
256 return __pte(0);
258 new_pte = pte_mkyoung(old_pte);
259 if (writing && pte_write(old_pte))
260 new_pte = pte_mkdirty(new_pte);
262 if (pte_xchg(ptep, old_pte, new_pte))
263 break;
265 return new_pte;
268 static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
270 if (key)
271 return PP_RWRX <= pp && pp <= PP_RXRX;
272 return true;
275 static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
277 if (key)
278 return pp == PP_RWRW;
279 return pp <= PP_RWRW;
282 static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
284 unsigned long skey;
286 skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
287 ((hpte_r & HPTE_R_KEY_LO) >> 9);
288 return (amr >> (62 - 2 * skey)) & 3;
291 static inline void lock_rmap(unsigned long *rmap)
293 do {
294 while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
295 cpu_relax();
296 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
299 static inline void unlock_rmap(unsigned long *rmap)
301 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
304 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
305 unsigned long pagesize)
307 unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
309 if (pagesize <= PAGE_SIZE)
310 return true;
311 return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
315 * This works for 4k, 64k and 16M pages on POWER7,
316 * and 4k and 16M pages on PPC970.
318 static inline unsigned long slb_pgsize_encoding(unsigned long psize)
320 unsigned long senc = 0;
322 if (psize > 0x1000) {
323 senc = SLB_VSID_L;
324 if (psize == 0x10000)
325 senc |= SLB_VSID_LP_01;
327 return senc;
330 static inline int is_vrma_hpte(unsigned long hpte_v)
332 return (hpte_v & ~0xffffffUL) ==
333 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
336 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
338 * Note modification of an HPTE; set the HPTE modified bit
339 * if anyone is interested.
341 static inline void note_hpte_modification(struct kvm *kvm,
342 struct revmap_entry *rev)
344 if (atomic_read(&kvm->arch.hpte_mod_interest))
345 rev->guest_rpte |= HPTE_GR_MODIFIED;
349 * Like kvm_memslots(), but for use in real mode when we can't do
350 * any RCU stuff (since the secondary threads are offline from the
351 * kernel's point of view), and we can't print anything.
352 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
354 static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
356 return rcu_dereference_raw_notrace(kvm->memslots[0]);
359 extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
361 extern void kvmhv_rm_send_ipi(int cpu);
363 static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt)
365 /* HPTEs are 2**4 bytes long */
366 return 1UL << (hpt->order - 4);
369 static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt)
371 /* 128 (2**7) bytes in each HPTEG */
372 return (1UL << (hpt->order - 7)) - 1;
375 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
377 #endif /* __ASM_KVM_BOOK3S_64_H__ */