1 /* SPDX-License-Identifier: GPL-2.0-only */
4 * Copyright SUSE Linux Products GmbH 2010
6 * Authors: Alexander Graf <agraf@suse.de>
9 #ifndef __ASM_KVM_BOOK3S_64_H__
10 #define __ASM_KVM_BOOK3S_64_H__
12 #include <linux/string.h>
13 #include <asm/bitops.h>
14 #include <asm/book3s/64/mmu-hash.h>
15 #include <asm/cpu_has_feature.h>
16 #include <asm/ppc-opcode.h>
18 #ifdef CONFIG_PPC_PSERIES
19 static inline bool kvmhv_on_pseries(void)
21 return !cpu_has_feature(CPU_FTR_HVMODE
);
24 static inline bool kvmhv_on_pseries(void)
31 * Structure for a nested guest, that is, for a guest that is managed by
34 struct kvm_nested_guest
{
35 struct kvm
*l1_host
; /* L1 VM that owns this nested guest */
36 int l1_lpid
; /* lpid L1 guest thinks this guest is */
37 int shadow_lpid
; /* real lpid of this nested guest */
38 pgd_t
*shadow_pgtable
; /* our page table for this guest */
39 u64 l1_gr_to_hr
; /* L1's addr of part'n-scoped table */
40 u64 process_table
; /* process table entry for this guest */
41 long refcnt
; /* number of pointers to this struct */
42 struct mutex tlb_lock
; /* serialize page faults and tlbies */
43 struct kvm_nested_guest
*next
;
44 cpumask_t need_tlb_flush
;
45 cpumask_t cpu_in_guest
;
46 short prev_cpu
[NR_CPUS
];
47 u8 radix
; /* is this nested guest radix */
51 * We define a nested rmap entry as a single 64-bit quantity
52 * 0xFFF0000000000000 12-bit lpid field
53 * 0x000FFFFFFFFFF000 40-bit guest 4k page frame number
54 * 0x0000000000000001 1-bit single entry flag
56 #define RMAP_NESTED_LPID_MASK 0xFFF0000000000000UL
57 #define RMAP_NESTED_LPID_SHIFT (52)
58 #define RMAP_NESTED_GPA_MASK 0x000FFFFFFFFFF000UL
59 #define RMAP_NESTED_IS_SINGLE_ENTRY 0x0000000000000001UL
61 /* Structure for a nested guest rmap entry */
63 struct llist_node list
;
68 * for_each_nest_rmap_safe - iterate over the list of nested rmap entries
69 * safe against removal of the list entry or NULL list
70 * @pos: a (struct rmap_nested *) to use as a loop cursor
71 * @node: pointer to the first entry
72 * NOTE: this can be NULL
73 * @rmapp: an (unsigned long *) in which to return the rmap entries on each
75 * NOTE: this must point to already allocated memory
77 * The nested_rmap is a llist of (struct rmap_nested) entries pointed to by the
78 * rmap entry in the memslot. The list is always terminated by a "single entry"
79 * stored in the list element of the final entry of the llist. If there is ONLY
80 * a single entry then this is itself in the rmap entry of the memslot, not a
83 * Note that the iterator below assumes that a nested rmap entry is always
84 * non-zero. This is true for our usage because the LPID field is always
85 * non-zero (zero is reserved for the host).
87 * This should be used to iterate over the list of rmap_nested entries with
88 * processing done on the u64 rmap value given by each iteration. This is safe
89 * against removal of list entries and it is always safe to call free on (pos).
92 * struct rmap_nested *cursor;
93 * struct llist_node *first;
95 * for_each_nest_rmap_safe(cursor, first, &rmap) {
100 #define for_each_nest_rmap_safe(pos, node, rmapp) \
101 for ((pos) = llist_entry((node), typeof(*(pos)), list); \
103 (*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
104 ((u64) (node)) : ((pos)->rmap))) && \
105 (((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
106 ((struct llist_node *) ((pos) = NULL)) : \
107 (pos)->list.next)), true); \
108 (pos) = llist_entry((node), typeof(*(pos)), list))
110 struct kvm_nested_guest
*kvmhv_get_nested(struct kvm
*kvm
, int l1_lpid
,
112 void kvmhv_put_nested(struct kvm_nested_guest
*gp
);
113 int kvmhv_nested_next_lpid(struct kvm
*kvm
, int lpid
);
115 /* Encoding of first parameter for H_TLB_INVALIDATE */
116 #define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \
119 /* Power architecture requires HPT is at least 256kiB, at most 64TiB */
120 #define PPC_MIN_HPT_ORDER 18
121 #define PPC_MAX_HPT_ORDER 46
123 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
124 static inline struct kvmppc_book3s_shadow_vcpu
*svcpu_get(struct kvm_vcpu
*vcpu
)
127 return &get_paca()->shadow_vcpu
;
130 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu
*svcpu
)
136 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
138 static inline bool kvm_is_radix(struct kvm
*kvm
)
140 return kvm
->arch
.radix
;
143 static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu
*vcpu
)
147 if (vcpu
->arch
.nested
)
148 radix
= vcpu
->arch
.nested
->radix
;
150 radix
= kvm_is_radix(vcpu
->kvm
);
155 #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
159 * We use a lock bit in HPTE dword 0 to synchronize updates and
160 * accesses to each HPTE, and another bit to indicate non-present
163 #define HPTE_V_HVLOCK 0x40UL
164 #define HPTE_V_ABSENT 0x20UL
167 * We use this bit in the guest_rpte field of the revmap entry
168 * to indicate a modified HPTE.
170 #define HPTE_GR_MODIFIED (1ul << 62)
172 /* These bits are reserved in the guest view of the HPTE */
173 #define HPTE_GR_RESERVED HPTE_GR_MODIFIED
175 static inline long try_lock_hpte(__be64
*hpte
, unsigned long bits
)
177 unsigned long tmp
, old
;
178 __be64 be_lockbit
, be_bits
;
181 * We load/store in native endian, but the HTAB is in big endian. If
182 * we byte swap all data we apply on the PTE we're implicitly correct
185 be_lockbit
= cpu_to_be64(HPTE_V_HVLOCK
);
186 be_bits
= cpu_to_be64(bits
);
188 asm volatile(" ldarx %0,0,%2\n"
196 : "=&r" (tmp
), "=&r" (old
)
197 : "r" (hpte
), "r" (be_bits
), "r" (be_lockbit
)
202 static inline void unlock_hpte(__be64
*hpte
, unsigned long hpte_v
)
204 hpte_v
&= ~HPTE_V_HVLOCK
;
205 asm volatile(PPC_RELEASE_BARRIER
"" : : : "memory");
206 hpte
[0] = cpu_to_be64(hpte_v
);
209 /* Without barrier */
210 static inline void __unlock_hpte(__be64
*hpte
, unsigned long hpte_v
)
212 hpte_v
&= ~HPTE_V_HVLOCK
;
213 hpte
[0] = cpu_to_be64(hpte_v
);
217 * These functions encode knowledge of the POWER7/8/9 hardware
218 * interpretations of the HPTE LP (large page size) field.
220 static inline int kvmppc_hpte_page_shifts(unsigned long h
, unsigned long l
)
224 if (!(h
& HPTE_V_LARGE
))
226 lphi
= (l
>> 16) & 0xf;
227 switch ((l
>> 12) & 0xf) {
229 return !lphi
? 24 : 0; /* 16MB */
232 return 16; /* 64kB */
235 return !lphi
? 34 : 0; /* 16GB */
238 return (16 << 8) + 12; /* 64kB in 4kB */
242 return (24 << 8) + 16; /* 16MB in 64kkB */
244 return (24 << 8) + 12; /* 16MB in 4kB */
250 static inline int kvmppc_hpte_base_page_shift(unsigned long h
, unsigned long l
)
252 return kvmppc_hpte_page_shifts(h
, l
) & 0xff;
255 static inline int kvmppc_hpte_actual_page_shift(unsigned long h
, unsigned long l
)
257 int tmp
= kvmppc_hpte_page_shifts(h
, l
);
264 static inline unsigned long kvmppc_actual_pgsz(unsigned long v
, unsigned long r
)
266 int shift
= kvmppc_hpte_actual_page_shift(v
, r
);
273 static inline int kvmppc_pgsize_lp_encoding(int base_shift
, int actual_shift
)
275 switch (base_shift
) {
277 switch (actual_shift
) {
287 switch (actual_shift
) {
300 static inline unsigned long compute_tlbie_rb(unsigned long v
, unsigned long r
,
301 unsigned long pte_index
)
303 int a_pgshift
, b_pgshift
;
304 unsigned long rb
= 0, va_low
, sllp
;
306 b_pgshift
= a_pgshift
= kvmppc_hpte_page_shifts(v
, r
);
307 if (a_pgshift
>= 0x100) {
313 * Ignore the top 14 bits of va
314 * v have top two bits covering segment size, hence move
315 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
316 * AVA field in v also have the lower 23 bits ignored.
317 * For base page size 4K we need 14 .. 65 bits (so need to
318 * collect extra 11 bits)
319 * For others we need 14..14+i
321 /* This covers 14..54 bits of va*/
322 rb
= (v
& ~0x7fUL
) << 16; /* AVA field */
325 * AVA in v had cleared lower 23 bits. We need to derive
326 * that from pteg index
328 va_low
= pte_index
>> 3;
329 if (v
& HPTE_V_SECONDARY
)
332 * get the vpn bits from va_low using reverse of hashing.
333 * In v we have va with 23 bits dropped and then left shifted
334 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
335 * right shift it with (SID_SHIFT - (23 - 7))
337 if (!(v
& HPTE_V_1TB_SEG
))
338 va_low
^= v
>> (SID_SHIFT
- 16);
340 va_low
^= v
>> (SID_SHIFT_1T
- 16);
343 if (b_pgshift
<= 12) {
344 if (a_pgshift
> 12) {
345 sllp
= (a_pgshift
== 16) ? 5 : 4;
346 rb
|= sllp
<< 5; /* AP field */
348 rb
|= (va_low
& 0x7ff) << 12; /* remaining 11 bits of AVA */
352 * remaining bits of AVA/LP fields
353 * Also contain the rr bits of LP
355 rb
|= (va_low
<< b_pgshift
) & 0x7ff000;
357 * Now clear not needed LP bits based on actual psize
359 rb
&= ~((1ul << a_pgshift
) - 1);
361 * AVAL field 58..77 - base_page_shift bits of va
362 * we have space for 58..64 bits, Missing bits should
363 * be zero filled. +1 is to take care of L bit shift
365 aval_shift
= 64 - (77 - b_pgshift
) + 1;
366 rb
|= ((va_low
<< aval_shift
) & 0xfe);
368 rb
|= 1; /* L field */
369 rb
|= r
& 0xff000 & ((1ul << a_pgshift
) - 1); /* LP field */
371 rb
|= (v
>> HPTE_V_SSIZE_SHIFT
) << 8; /* B field */
375 static inline unsigned long hpte_rpn(unsigned long ptel
, unsigned long psize
)
377 return ((ptel
& HPTE_R_RPN
) & ~(psize
- 1)) >> PAGE_SHIFT
;
380 static inline int hpte_is_writable(unsigned long ptel
)
382 unsigned long pp
= ptel
& (HPTE_R_PP0
| HPTE_R_PP
);
384 return pp
!= PP_RXRX
&& pp
!= PP_RXXX
;
387 static inline unsigned long hpte_make_readonly(unsigned long ptel
)
389 if ((ptel
& HPTE_R_PP0
) || (ptel
& HPTE_R_PP
) == PP_RWXX
)
390 ptel
= (ptel
& ~HPTE_R_PP
) | PP_RXXX
;
396 static inline bool hpte_cache_flags_ok(unsigned long hptel
, bool is_ci
)
398 unsigned int wimg
= hptel
& HPTE_R_WIMG
;
401 if (wimg
== (HPTE_R_W
| HPTE_R_I
| HPTE_R_M
) &&
402 cpu_has_feature(CPU_FTR_ARCH_206
))
406 return wimg
== HPTE_R_M
;
408 * if host is mapped cache inhibited, make sure hptel also have
411 if (wimg
& HPTE_R_W
) /* FIXME!! is this ok for all guest. ? */
413 return !!(wimg
& HPTE_R_I
);
417 * If it's present and writable, atomically set dirty and referenced bits and
418 * return the PTE, otherwise return 0.
420 static inline pte_t
kvmppc_read_update_linux_pte(pte_t
*ptep
, int writing
)
422 pte_t old_pte
, new_pte
= __pte(0);
426 * Make sure we don't reload from ptep
428 old_pte
= READ_ONCE(*ptep
);
430 * wait until H_PAGE_BUSY is clear then set it atomically
432 if (unlikely(pte_val(old_pte
) & H_PAGE_BUSY
)) {
436 /* If pte is not present return None */
437 if (unlikely(!(pte_val(old_pte
) & _PAGE_PRESENT
)))
440 new_pte
= pte_mkyoung(old_pte
);
441 if (writing
&& pte_write(old_pte
))
442 new_pte
= pte_mkdirty(new_pte
);
444 if (pte_xchg(ptep
, old_pte
, new_pte
))
450 static inline bool hpte_read_permission(unsigned long pp
, unsigned long key
)
453 return PP_RWRX
<= pp
&& pp
<= PP_RXRX
;
457 static inline bool hpte_write_permission(unsigned long pp
, unsigned long key
)
460 return pp
== PP_RWRW
;
461 return pp
<= PP_RWRW
;
464 static inline int hpte_get_skey_perm(unsigned long hpte_r
, unsigned long amr
)
468 skey
= ((hpte_r
& HPTE_R_KEY_HI
) >> 57) |
469 ((hpte_r
& HPTE_R_KEY_LO
) >> 9);
470 return (amr
>> (62 - 2 * skey
)) & 3;
473 static inline void lock_rmap(unsigned long *rmap
)
476 while (test_bit(KVMPPC_RMAP_LOCK_BIT
, rmap
))
478 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT
, rmap
));
481 static inline void unlock_rmap(unsigned long *rmap
)
483 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT
, rmap
);
486 static inline bool slot_is_aligned(struct kvm_memory_slot
*memslot
,
487 unsigned long pagesize
)
489 unsigned long mask
= (pagesize
>> PAGE_SHIFT
) - 1;
491 if (pagesize
<= PAGE_SIZE
)
493 return !(memslot
->base_gfn
& mask
) && !(memslot
->npages
& mask
);
497 * This works for 4k, 64k and 16M pages on POWER7,
498 * and 4k and 16M pages on PPC970.
500 static inline unsigned long slb_pgsize_encoding(unsigned long psize
)
502 unsigned long senc
= 0;
504 if (psize
> 0x1000) {
506 if (psize
== 0x10000)
507 senc
|= SLB_VSID_LP_01
;
512 static inline int is_vrma_hpte(unsigned long hpte_v
)
514 return (hpte_v
& ~0xffffffUL
) ==
515 (HPTE_V_1TB_SEG
| (VRMA_VSID
<< (40 - 16)));
518 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
520 * Note modification of an HPTE; set the HPTE modified bit
521 * if anyone is interested.
523 static inline void note_hpte_modification(struct kvm
*kvm
,
524 struct revmap_entry
*rev
)
526 if (atomic_read(&kvm
->arch
.hpte_mod_interest
))
527 rev
->guest_rpte
|= HPTE_GR_MODIFIED
;
531 * Like kvm_memslots(), but for use in real mode when we can't do
532 * any RCU stuff (since the secondary threads are offline from the
533 * kernel's point of view), and we can't print anything.
534 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
536 static inline struct kvm_memslots
*kvm_memslots_raw(struct kvm
*kvm
)
538 return rcu_dereference_raw_check(kvm
->memslots
[0]);
541 extern void kvmppc_mmu_debugfs_init(struct kvm
*kvm
);
542 extern void kvmhv_radix_debugfs_init(struct kvm
*kvm
);
544 extern void kvmhv_rm_send_ipi(int cpu
);
546 static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info
*hpt
)
548 /* HPTEs are 2**4 bytes long */
549 return 1UL << (hpt
->order
- 4);
552 static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info
*hpt
)
554 /* 128 (2**7) bytes in each HPTEG */
555 return (1UL << (hpt
->order
- 7)) - 1;
558 /* Set bits in a dirty bitmap, which is in LE format */
559 static inline void set_dirty_bits(unsigned long *map
, unsigned long i
,
560 unsigned long npages
)
564 memset((char *)map
+ i
/ 8, 0xff, npages
/ 8);
566 for (; npages
; ++i
, --npages
)
567 __set_bit_le(i
, map
);
570 static inline void set_dirty_bits_atomic(unsigned long *map
, unsigned long i
,
571 unsigned long npages
)
574 memset((char *)map
+ i
/ 8, 0xff, npages
/ 8);
576 for (; npages
; ++i
, --npages
)
580 static inline u64
sanitize_msr(u64 msr
)
587 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
588 static inline void copy_from_checkpoint(struct kvm_vcpu
*vcpu
)
590 vcpu
->arch
.regs
.ccr
= vcpu
->arch
.cr_tm
;
591 vcpu
->arch
.regs
.xer
= vcpu
->arch
.xer_tm
;
592 vcpu
->arch
.regs
.link
= vcpu
->arch
.lr_tm
;
593 vcpu
->arch
.regs
.ctr
= vcpu
->arch
.ctr_tm
;
594 vcpu
->arch
.amr
= vcpu
->arch
.amr_tm
;
595 vcpu
->arch
.ppr
= vcpu
->arch
.ppr_tm
;
596 vcpu
->arch
.dscr
= vcpu
->arch
.dscr_tm
;
597 vcpu
->arch
.tar
= vcpu
->arch
.tar_tm
;
598 memcpy(vcpu
->arch
.regs
.gpr
, vcpu
->arch
.gpr_tm
,
599 sizeof(vcpu
->arch
.regs
.gpr
));
600 vcpu
->arch
.fp
= vcpu
->arch
.fp_tm
;
601 vcpu
->arch
.vr
= vcpu
->arch
.vr_tm
;
602 vcpu
->arch
.vrsave
= vcpu
->arch
.vrsave_tm
;
605 static inline void copy_to_checkpoint(struct kvm_vcpu
*vcpu
)
607 vcpu
->arch
.cr_tm
= vcpu
->arch
.regs
.ccr
;
608 vcpu
->arch
.xer_tm
= vcpu
->arch
.regs
.xer
;
609 vcpu
->arch
.lr_tm
= vcpu
->arch
.regs
.link
;
610 vcpu
->arch
.ctr_tm
= vcpu
->arch
.regs
.ctr
;
611 vcpu
->arch
.amr_tm
= vcpu
->arch
.amr
;
612 vcpu
->arch
.ppr_tm
= vcpu
->arch
.ppr
;
613 vcpu
->arch
.dscr_tm
= vcpu
->arch
.dscr
;
614 vcpu
->arch
.tar_tm
= vcpu
->arch
.tar
;
615 memcpy(vcpu
->arch
.gpr_tm
, vcpu
->arch
.regs
.gpr
,
616 sizeof(vcpu
->arch
.regs
.gpr
));
617 vcpu
->arch
.fp_tm
= vcpu
->arch
.fp
;
618 vcpu
->arch
.vr_tm
= vcpu
->arch
.vr
;
619 vcpu
->arch
.vrsave_tm
= vcpu
->arch
.vrsave
;
621 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
623 extern int kvmppc_create_pte(struct kvm
*kvm
, pgd_t
*pgtable
, pte_t pte
,
624 unsigned long gpa
, unsigned int level
,
625 unsigned long mmu_seq
, unsigned int lpid
,
626 unsigned long *rmapp
, struct rmap_nested
**n_rmap
);
627 extern void kvmhv_insert_nest_rmap(struct kvm
*kvm
, unsigned long *rmapp
,
628 struct rmap_nested
**n_rmap
);
629 extern void kvmhv_update_nest_rmap_rc_list(struct kvm
*kvm
, unsigned long *rmapp
,
630 unsigned long clr
, unsigned long set
,
631 unsigned long hpa
, unsigned long nbytes
);
632 extern void kvmhv_remove_nest_rmap_range(struct kvm
*kvm
,
633 const struct kvm_memory_slot
*memslot
,
634 unsigned long gpa
, unsigned long hpa
,
635 unsigned long nbytes
);
637 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
639 #endif /* __ASM_KVM_BOOK3S_64_H__ */