Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux/fpc-iii.git] / arch / powerpc / include / asm / kvm_book3s_64.h
blobbf0fa8b0a883f8ecac068d0b9376ec6e45a2de05
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2010
17 * Authors: Alexander Graf <agraf@suse.de>
20 #ifndef __ASM_KVM_BOOK3S_64_H__
21 #define __ASM_KVM_BOOK3S_64_H__
23 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
24 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
26 preempt_disable();
27 return &get_paca()->shadow_vcpu;
30 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
32 preempt_enable();
34 #endif
36 #define SPAPR_TCE_SHIFT 12
38 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
39 #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
40 extern unsigned long kvm_rma_pages;
41 #endif
43 #define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
46 * We use a lock bit in HPTE dword 0 to synchronize updates and
47 * accesses to each HPTE, and another bit to indicate non-present
48 * HPTEs.
50 #define HPTE_V_HVLOCK 0x40UL
51 #define HPTE_V_ABSENT 0x20UL
54 * We use this bit in the guest_rpte field of the revmap entry
55 * to indicate a modified HPTE.
57 #define HPTE_GR_MODIFIED (1ul << 62)
59 /* These bits are reserved in the guest view of the HPTE */
60 #define HPTE_GR_RESERVED HPTE_GR_MODIFIED
62 static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
64 unsigned long tmp, old;
66 asm volatile(" ldarx %0,0,%2\n"
67 " and. %1,%0,%3\n"
68 " bne 2f\n"
69 " ori %0,%0,%4\n"
70 " stdcx. %0,0,%2\n"
71 " beq+ 2f\n"
72 " mr %1,%3\n"
73 "2: isync"
74 : "=&r" (tmp), "=&r" (old)
75 : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
76 : "cc", "memory");
77 return old == 0;
80 static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
81 unsigned long pte_index)
83 unsigned long rb, va_low;
85 rb = (v & ~0x7fUL) << 16; /* AVA field */
86 va_low = pte_index >> 3;
87 if (v & HPTE_V_SECONDARY)
88 va_low = ~va_low;
89 /* xor vsid from AVA */
90 if (!(v & HPTE_V_1TB_SEG))
91 va_low ^= v >> 12;
92 else
93 va_low ^= v >> 24;
94 va_low &= 0x7ff;
95 if (v & HPTE_V_LARGE) {
96 rb |= 1; /* L field */
97 if (cpu_has_feature(CPU_FTR_ARCH_206) &&
98 (r & 0xff000)) {
99 /* non-16MB large page, must be 64k */
100 /* (masks depend on page size) */
101 rb |= 0x1000; /* page encoding in LP field */
102 rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
103 rb |= ((va_low << 4) & 0xf0); /* AVAL field (P7 doesn't seem to care) */
105 } else {
106 /* 4kB page */
107 rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */
109 rb |= (v >> 54) & 0x300; /* B field */
110 return rb;
113 static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
115 /* only handle 4k, 64k and 16M pages for now */
116 if (!(h & HPTE_V_LARGE))
117 return 1ul << 12; /* 4k page */
118 if ((l & 0xf000) == 0x1000 && cpu_has_feature(CPU_FTR_ARCH_206))
119 return 1ul << 16; /* 64k page */
120 if ((l & 0xff000) == 0)
121 return 1ul << 24; /* 16M page */
122 return 0; /* error */
125 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
127 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
130 static inline int hpte_is_writable(unsigned long ptel)
132 unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
134 return pp != PP_RXRX && pp != PP_RXXX;
137 static inline unsigned long hpte_make_readonly(unsigned long ptel)
139 if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
140 ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
141 else
142 ptel |= PP_RXRX;
143 return ptel;
146 static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
148 unsigned int wimg = ptel & HPTE_R_WIMG;
150 /* Handle SAO */
151 if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
152 cpu_has_feature(CPU_FTR_ARCH_206))
153 wimg = HPTE_R_M;
155 if (!io_type)
156 return wimg == HPTE_R_M;
158 return (wimg & (HPTE_R_W | HPTE_R_I)) == io_type;
162 * If it's present and writable, atomically set dirty and referenced bits and
163 * return the PTE, otherwise return 0. If we find a transparent hugepage
164 * and if it is marked splitting we return 0;
166 static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing,
167 unsigned int hugepage)
169 pte_t old_pte, new_pte = __pte(0);
171 while (1) {
172 old_pte = pte_val(*ptep);
174 * wait until _PAGE_BUSY is clear then set it atomically
176 if (unlikely(old_pte & _PAGE_BUSY)) {
177 cpu_relax();
178 continue;
180 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
181 /* If hugepage and is trans splitting return None */
182 if (unlikely(hugepage &&
183 pmd_trans_splitting(pte_pmd(old_pte))))
184 return __pte(0);
185 #endif
186 /* If pte is not present return None */
187 if (unlikely(!(old_pte & _PAGE_PRESENT)))
188 return __pte(0);
190 new_pte = pte_mkyoung(old_pte);
191 if (writing && pte_write(old_pte))
192 new_pte = pte_mkdirty(new_pte);
194 if (old_pte == __cmpxchg_u64((unsigned long *)ptep, old_pte,
195 new_pte))
196 break;
198 return new_pte;
202 /* Return HPTE cache control bits corresponding to Linux pte bits */
203 static inline unsigned long hpte_cache_bits(unsigned long pte_val)
205 #if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W
206 return pte_val & (HPTE_R_W | HPTE_R_I);
207 #else
208 return ((pte_val & _PAGE_NO_CACHE) ? HPTE_R_I : 0) +
209 ((pte_val & _PAGE_WRITETHRU) ? HPTE_R_W : 0);
210 #endif
213 static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
215 if (key)
216 return PP_RWRX <= pp && pp <= PP_RXRX;
217 return 1;
220 static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
222 if (key)
223 return pp == PP_RWRW;
224 return pp <= PP_RWRW;
227 static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
229 unsigned long skey;
231 skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
232 ((hpte_r & HPTE_R_KEY_LO) >> 9);
233 return (amr >> (62 - 2 * skey)) & 3;
236 static inline void lock_rmap(unsigned long *rmap)
238 do {
239 while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
240 cpu_relax();
241 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
244 static inline void unlock_rmap(unsigned long *rmap)
246 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
249 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
250 unsigned long pagesize)
252 unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
254 if (pagesize <= PAGE_SIZE)
255 return 1;
256 return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
260 * This works for 4k, 64k and 16M pages on POWER7,
261 * and 4k and 16M pages on PPC970.
263 static inline unsigned long slb_pgsize_encoding(unsigned long psize)
265 unsigned long senc = 0;
267 if (psize > 0x1000) {
268 senc = SLB_VSID_L;
269 if (psize == 0x10000)
270 senc |= SLB_VSID_LP_01;
272 return senc;
275 static inline int is_vrma_hpte(unsigned long hpte_v)
277 return (hpte_v & ~0xffffffUL) ==
278 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
281 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
283 * Note modification of an HPTE; set the HPTE modified bit
284 * if anyone is interested.
286 static inline void note_hpte_modification(struct kvm *kvm,
287 struct revmap_entry *rev)
289 if (atomic_read(&kvm->arch.hpte_mod_interest))
290 rev->guest_rpte |= HPTE_GR_MODIFIED;
292 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
294 #endif /* __ASM_KVM_BOOK3S_64_H__ */