2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2010
17 * Authors: Alexander Graf <agraf@suse.de>
20 #ifndef __ASM_KVM_BOOK3S_64_H__
21 #define __ASM_KVM_BOOK3S_64_H__
23 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
24 static inline struct kvmppc_book3s_shadow_vcpu
*svcpu_get(struct kvm_vcpu
*vcpu
)
27 return &get_paca()->shadow_vcpu
;
30 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu
*svcpu
)
36 #define SPAPR_TCE_SHIFT 12
38 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
39 #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
40 extern unsigned long kvm_rma_pages
;
43 #define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
46 * We use a lock bit in HPTE dword 0 to synchronize updates and
47 * accesses to each HPTE, and another bit to indicate non-present
50 #define HPTE_V_HVLOCK 0x40UL
51 #define HPTE_V_ABSENT 0x20UL
54 * We use this bit in the guest_rpte field of the revmap entry
55 * to indicate a modified HPTE.
57 #define HPTE_GR_MODIFIED (1ul << 62)
59 /* These bits are reserved in the guest view of the HPTE */
60 #define HPTE_GR_RESERVED HPTE_GR_MODIFIED
62 static inline long try_lock_hpte(unsigned long *hpte
, unsigned long bits
)
64 unsigned long tmp
, old
;
66 asm volatile(" ldarx %0,0,%2\n"
74 : "=&r" (tmp
), "=&r" (old
)
75 : "r" (hpte
), "r" (bits
), "i" (HPTE_V_HVLOCK
)
80 static inline int __hpte_actual_psize(unsigned int lp
, int psize
)
85 /* start from 1 ignoring MMU_PAGE_4K */
86 for (i
= 1; i
< MMU_PAGE_COUNT
; i
++) {
89 if (mmu_psize_defs
[psize
].penc
[i
] == -1)
92 * encoding bits per actual page size
93 * PTE LP actual page size
100 shift
= mmu_psize_defs
[i
].shift
- LP_SHIFT
;
103 mask
= (1 << shift
) - 1;
104 if ((lp
& mask
) == mmu_psize_defs
[psize
].penc
[i
])
110 static inline unsigned long compute_tlbie_rb(unsigned long v
, unsigned long r
,
111 unsigned long pte_index
)
113 int b_psize
, a_psize
;
115 unsigned long rb
= 0, va_low
, sllp
;
116 unsigned int lp
= (r
>> LP_SHIFT
) & ((1 << LP_BITS
) - 1);
118 if (!(v
& HPTE_V_LARGE
)) {
119 /* both base and actual psize is 4k */
120 b_psize
= MMU_PAGE_4K
;
121 a_psize
= MMU_PAGE_4K
;
123 for (b_psize
= 0; b_psize
< MMU_PAGE_COUNT
; b_psize
++) {
125 /* valid entries have a shift value */
126 if (!mmu_psize_defs
[b_psize
].shift
)
129 a_psize
= __hpte_actual_psize(lp
, b_psize
);
135 * Ignore the top 14 bits of va
136 * v have top two bits covering segment size, hence move
137 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
138 * AVA field in v also have the lower 23 bits ignored.
139 * For base page size 4K we need 14 .. 65 bits (so need to
140 * collect extra 11 bits)
141 * For others we need 14..14+i
143 /* This covers 14..54 bits of va*/
144 rb
= (v
& ~0x7fUL
) << 16; /* AVA field */
146 * AVA in v had cleared lower 23 bits. We need to derive
147 * that from pteg index
149 va_low
= pte_index
>> 3;
150 if (v
& HPTE_V_SECONDARY
)
153 * get the vpn bits from va_low using reverse of hashing.
154 * In v we have va with 23 bits dropped and then left shifted
155 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
156 * right shift it with (SID_SHIFT - (23 - 7))
158 if (!(v
& HPTE_V_1TB_SEG
))
159 va_low
^= v
>> (SID_SHIFT
- 16);
161 va_low
^= v
>> (SID_SHIFT_1T
- 16);
166 sllp
= ((mmu_psize_defs
[a_psize
].sllp
& SLB_VSID_L
) >> 6) |
167 ((mmu_psize_defs
[a_psize
].sllp
& SLB_VSID_LP
) >> 4);
168 rb
|= sllp
<< 5; /* AP field */
169 rb
|= (va_low
& 0x7ff) << 12; /* remaining 11 bits of AVA */
175 * remaining 7bits of AVA/LP fields
176 * Also contain the rr bits of LP
178 rb
|= (va_low
& 0x7f) << 16;
180 * Now clear not needed LP bits based on actual psize
182 rb
&= ~((1ul << mmu_psize_defs
[a_psize
].shift
) - 1);
184 * AVAL field 58..77 - base_page_shift bits of va
185 * we have space for 58..64 bits, Missing bits should
186 * be zero filled. +1 is to take care of L bit shift
188 aval_shift
= 64 - (77 - mmu_psize_defs
[b_psize
].shift
) + 1;
189 rb
|= ((va_low
<< aval_shift
) & 0xfe);
191 rb
|= 1; /* L field */
192 penc
= mmu_psize_defs
[b_psize
].penc
[a_psize
];
193 rb
|= penc
<< 12; /* LP field */
197 rb
|= (v
>> 54) & 0x300; /* B field */
201 static inline unsigned long hpte_page_size(unsigned long h
, unsigned long l
)
204 /* Look at the 8 bit LP value */
205 unsigned int lp
= (l
>> LP_SHIFT
) & ((1 << LP_BITS
) - 1);
207 /* only handle 4k, 64k and 16M pages for now */
208 if (!(h
& HPTE_V_LARGE
))
211 for (size
= 0; size
< MMU_PAGE_COUNT
; size
++) {
212 /* valid entries have a shift value */
213 if (!mmu_psize_defs
[size
].shift
)
216 a_psize
= __hpte_actual_psize(lp
, size
);
218 return 1ul << mmu_psize_defs
[a_psize
].shift
;
225 static inline unsigned long hpte_rpn(unsigned long ptel
, unsigned long psize
)
227 return ((ptel
& HPTE_R_RPN
) & ~(psize
- 1)) >> PAGE_SHIFT
;
230 static inline int hpte_is_writable(unsigned long ptel
)
232 unsigned long pp
= ptel
& (HPTE_R_PP0
| HPTE_R_PP
);
234 return pp
!= PP_RXRX
&& pp
!= PP_RXXX
;
237 static inline unsigned long hpte_make_readonly(unsigned long ptel
)
239 if ((ptel
& HPTE_R_PP0
) || (ptel
& HPTE_R_PP
) == PP_RWXX
)
240 ptel
= (ptel
& ~HPTE_R_PP
) | PP_RXXX
;
246 static inline int hpte_cache_flags_ok(unsigned long ptel
, unsigned long io_type
)
248 unsigned int wimg
= ptel
& HPTE_R_WIMG
;
251 if (wimg
== (HPTE_R_W
| HPTE_R_I
| HPTE_R_M
) &&
252 cpu_has_feature(CPU_FTR_ARCH_206
))
256 return wimg
== HPTE_R_M
;
258 return (wimg
& (HPTE_R_W
| HPTE_R_I
)) == io_type
;
262 * If it's present and writable, atomically set dirty and referenced bits and
263 * return the PTE, otherwise return 0. If we find a transparent hugepage
264 * and if it is marked splitting we return 0;
266 static inline pte_t
kvmppc_read_update_linux_pte(pte_t
*ptep
, int writing
,
267 unsigned int hugepage
)
269 pte_t old_pte
, new_pte
= __pte(0);
272 old_pte
= pte_val(*ptep
);
274 * wait until _PAGE_BUSY is clear then set it atomically
276 if (unlikely(old_pte
& _PAGE_BUSY
)) {
280 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
281 /* If hugepage and is trans splitting return None */
282 if (unlikely(hugepage
&&
283 pmd_trans_splitting(pte_pmd(old_pte
))))
286 /* If pte is not present return None */
287 if (unlikely(!(old_pte
& _PAGE_PRESENT
)))
290 new_pte
= pte_mkyoung(old_pte
);
291 if (writing
&& pte_write(old_pte
))
292 new_pte
= pte_mkdirty(new_pte
);
294 if (old_pte
== __cmpxchg_u64((unsigned long *)ptep
, old_pte
,
302 /* Return HPTE cache control bits corresponding to Linux pte bits */
303 static inline unsigned long hpte_cache_bits(unsigned long pte_val
)
305 #if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W
306 return pte_val
& (HPTE_R_W
| HPTE_R_I
);
308 return ((pte_val
& _PAGE_NO_CACHE
) ? HPTE_R_I
: 0) +
309 ((pte_val
& _PAGE_WRITETHRU
) ? HPTE_R_W
: 0);
313 static inline bool hpte_read_permission(unsigned long pp
, unsigned long key
)
316 return PP_RWRX
<= pp
&& pp
<= PP_RXRX
;
320 static inline bool hpte_write_permission(unsigned long pp
, unsigned long key
)
323 return pp
== PP_RWRW
;
324 return pp
<= PP_RWRW
;
327 static inline int hpte_get_skey_perm(unsigned long hpte_r
, unsigned long amr
)
331 skey
= ((hpte_r
& HPTE_R_KEY_HI
) >> 57) |
332 ((hpte_r
& HPTE_R_KEY_LO
) >> 9);
333 return (amr
>> (62 - 2 * skey
)) & 3;
336 static inline void lock_rmap(unsigned long *rmap
)
339 while (test_bit(KVMPPC_RMAP_LOCK_BIT
, rmap
))
341 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT
, rmap
));
344 static inline void unlock_rmap(unsigned long *rmap
)
346 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT
, rmap
);
349 static inline bool slot_is_aligned(struct kvm_memory_slot
*memslot
,
350 unsigned long pagesize
)
352 unsigned long mask
= (pagesize
>> PAGE_SHIFT
) - 1;
354 if (pagesize
<= PAGE_SIZE
)
356 return !(memslot
->base_gfn
& mask
) && !(memslot
->npages
& mask
);
360 * This works for 4k, 64k and 16M pages on POWER7,
361 * and 4k and 16M pages on PPC970.
363 static inline unsigned long slb_pgsize_encoding(unsigned long psize
)
365 unsigned long senc
= 0;
367 if (psize
> 0x1000) {
369 if (psize
== 0x10000)
370 senc
|= SLB_VSID_LP_01
;
375 static inline int is_vrma_hpte(unsigned long hpte_v
)
377 return (hpte_v
& ~0xffffffUL
) ==
378 (HPTE_V_1TB_SEG
| (VRMA_VSID
<< (40 - 16)));
381 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
383 * Note modification of an HPTE; set the HPTE modified bit
384 * if anyone is interested.
386 static inline void note_hpte_modification(struct kvm
*kvm
,
387 struct revmap_entry
*rev
)
389 if (atomic_read(&kvm
->arch
.hpte_mod_interest
))
390 rev
->guest_rpte
|= HPTE_GR_MODIFIED
;
394 * Like kvm_memslots(), but for use in real mode when we can't do
395 * any RCU stuff (since the secondary threads are offline from the
396 * kernel's point of view), and we can't print anything.
397 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
399 static inline struct kvm_memslots
*kvm_memslots_raw(struct kvm
*kvm
)
401 return rcu_dereference_raw_notrace(kvm
->memslots
);
404 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
406 #endif /* __ASM_KVM_BOOK3S_64_H__ */